mirror of
https://github.com/digital-asset/daml.git
synced 2024-09-19 08:48:21 +03:00
update canton to 20240624.13529.vdf9677d5 (#19442)
* update canton to 20240624.13529.vdf9677d5 tell-slack: canton * fix build * another fix * Fix proto file listing check --------- Co-authored-by: Azure Pipelines Daml Build <support@digitalasset.com> Co-authored-by: Marcin Ziolek <marcin.ziolek@digitalasset.com>
This commit is contained in:
parent
e233328a53
commit
0c67ba39e7
@ -1566,7 +1566,10 @@ filegroup(
|
||||
name = "ledger-api-protos-fg-admin",
|
||||
srcs = glob(
|
||||
["community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/admin/*.proto"],
|
||||
exclude = ["community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/admin/participant_pruning_service.proto"], # TODO: haskell grpc client for participant pruning
|
||||
exclude = [
|
||||
"community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/admin/participant_pruning_service.proto", # TODO: haskell grpc client for participant pruning
|
||||
"community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/admin/command_inspection_service.proto",
|
||||
],
|
||||
),
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
@ -1592,6 +1595,7 @@ genrule(
|
||||
$(location @proto3-suite//:compile-proto-file) \
|
||||
--includeDir """ + google_protobuf_src + """ \
|
||||
--includeDir """ + google_rpc_src + """ \
|
||||
--includeDir """ + ledger_api_value_proto_source_root + """ \
|
||||
--includeDir """ + ledger_api_proto_source_root + """ \
|
||||
--proto com/daml/ledger/api/v2/admin/$$(basename $$src) \
|
||||
--out $(@D)
|
||||
|
@ -31,8 +31,10 @@ message TrafficState {
|
||||
int64 extra_traffic_consumed = 2;
|
||||
// Amount of base traffic remaining
|
||||
int64 base_traffic_remainder = 3;
|
||||
// Cost deducted at `timestamp`, only present when traffic was consumed at `timestamp`, otherwise is set to 0
|
||||
uint64 last_consumed_cost = 4;
|
||||
// Timestamp at which the state is valid
|
||||
int64 timestamp = 4;
|
||||
int64 timestamp = 5;
|
||||
// Optional serial of the balance update that updated the extra traffic limit
|
||||
google.protobuf.UInt32Value serial = 5;
|
||||
google.protobuf.UInt32Value serial = 6;
|
||||
}
|
||||
|
@ -4,6 +4,14 @@
|
||||
package com.digitalasset.canton.admin.api.client.commands
|
||||
|
||||
import cats.syntax.either.*
|
||||
import cats.syntax.traverse.*
|
||||
import com.daml.ledger.api.v2.admin.command_inspection_service.CommandInspectionServiceGrpc.CommandInspectionServiceStub
|
||||
import com.daml.ledger.api.v2.admin.command_inspection_service.{
|
||||
CommandInspectionServiceGrpc,
|
||||
CommandState,
|
||||
GetCommandStatusRequest,
|
||||
GetCommandStatusResponse,
|
||||
}
|
||||
import com.daml.ledger.api.v2.admin.identity_provider_config_service.IdentityProviderConfigServiceGrpc.IdentityProviderConfigServiceStub
|
||||
import com.daml.ledger.api.v2.admin.identity_provider_config_service.*
|
||||
import com.daml.ledger.api.v2.admin.metering_report_service.MeteringReportServiceGrpc.MeteringReportServiceStub
|
||||
@ -135,6 +143,7 @@ import com.digitalasset.canton.ledger.client.services.admin.IdentityProviderConf
|
||||
import com.digitalasset.canton.logging.ErrorLoggingContext
|
||||
import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting}
|
||||
import com.digitalasset.canton.networking.grpc.ForwardingStreamObserver
|
||||
import com.digitalasset.canton.platform.apiserver.execution.CommandStatus
|
||||
import com.digitalasset.canton.protocol.LfContractId
|
||||
import com.digitalasset.canton.serialization.ProtoConverter
|
||||
import com.digitalasset.canton.topology.{DomainId, PartyId}
|
||||
@ -352,6 +361,34 @@ object LedgerApiCommands {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
object CommandInspectionService {
|
||||
abstract class BaseCommand[Req, Resp, Res] extends GrpcAdminCommand[Req, Resp, Res] {
|
||||
override type Svc = CommandInspectionServiceStub
|
||||
|
||||
override def createService(channel: ManagedChannel): CommandInspectionServiceStub =
|
||||
CommandInspectionServiceGrpc.stub(channel)
|
||||
}
|
||||
|
||||
final case class GetCommandStatus(commandIdPrefix: String, state: CommandState, limit: Int)
|
||||
extends BaseCommand[GetCommandStatusRequest, GetCommandStatusResponse, Seq[CommandStatus]] {
|
||||
override def createRequest(): Either[String, GetCommandStatusRequest] = Right(
|
||||
GetCommandStatusRequest(commandIdPrefix = commandIdPrefix, state = state, limit = limit)
|
||||
)
|
||||
|
||||
override def submitRequest(
|
||||
service: CommandInspectionServiceStub,
|
||||
request: GetCommandStatusRequest,
|
||||
): Future[GetCommandStatusResponse] = service.getCommandStatus(request)
|
||||
|
||||
override def handleResponse(
|
||||
response: GetCommandStatusResponse
|
||||
): Either[String, Seq[CommandStatus]] = {
|
||||
response.commandStatus.traverse(CommandStatus.fromProto).leftMap(_.message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
object ParticipantPruningService {
|
||||
abstract class BaseCommand[Req, Resp, Res] extends GrpcAdminCommand[Req, Resp, Res] {
|
||||
override type Svc = ParticipantPruningServiceStub
|
||||
|
@ -70,6 +70,7 @@ import com.digitalasset.canton.participant.ParticipantNodeParameters
|
||||
import com.digitalasset.canton.participant.admin.AdminWorkflowConfig
|
||||
import com.digitalasset.canton.participant.config.ParticipantInitConfig.ParticipantLedgerApiInitConfig
|
||||
import com.digitalasset.canton.participant.config.*
|
||||
import com.digitalasset.canton.participant.sync.CommandProgressTrackerConfig
|
||||
import com.digitalasset.canton.platform.apiserver.SeedService.Seeding
|
||||
import com.digitalasset.canton.platform.apiserver.configuration.{
|
||||
EngineLoggingConfig,
|
||||
@ -388,6 +389,7 @@ trait CantonConfig {
|
||||
disableUpgradeValidation = participantParameters.disableUpgradeValidation,
|
||||
allowForUnauthenticatedContractIds =
|
||||
participantParameters.allowForUnauthenticatedContractIds,
|
||||
commandProgressTracking = participantParameters.commandProgressTracker,
|
||||
)
|
||||
}
|
||||
|
||||
@ -971,9 +973,12 @@ object CantonConfig {
|
||||
deriveReader[EngineLoggingConfig]
|
||||
lazy implicit val cantonEngineConfigReader: ConfigReader[CantonEngineConfig] =
|
||||
deriveReader[CantonEngineConfig]
|
||||
lazy implicit val participantNodeParameterConfigReader
|
||||
: ConfigReader[ParticipantNodeParameterConfig] =
|
||||
@nowarn("cat=unused") lazy implicit val participantNodeParameterConfigReader
|
||||
: ConfigReader[ParticipantNodeParameterConfig] = {
|
||||
implicit val commandProgressTrackerConfigReader: ConfigReader[CommandProgressTrackerConfig] =
|
||||
deriveReader[CommandProgressTrackerConfig]
|
||||
deriveReader[ParticipantNodeParameterConfig]
|
||||
}
|
||||
lazy implicit val timeTrackerConfigReader: ConfigReader[DomainTimeTrackerConfig] =
|
||||
deriveReader[DomainTimeTrackerConfig]
|
||||
lazy implicit val timeRequestConfigReader: ConfigReader[TimeProofRequestConfig] =
|
||||
@ -1385,9 +1390,12 @@ object CantonConfig {
|
||||
deriveWriter[EngineLoggingConfig]
|
||||
lazy implicit val cantonEngineConfigWriter: ConfigWriter[CantonEngineConfig] =
|
||||
deriveWriter[CantonEngineConfig]
|
||||
lazy implicit val participantNodeParameterConfigWriter
|
||||
: ConfigWriter[ParticipantNodeParameterConfig] =
|
||||
@nowarn("cat=unused") lazy implicit val participantNodeParameterConfigWriter
|
||||
: ConfigWriter[ParticipantNodeParameterConfig] = {
|
||||
implicit val commandProgressTrackerConfigWriter: ConfigWriter[CommandProgressTrackerConfig] =
|
||||
deriveWriter[CommandProgressTrackerConfig]
|
||||
deriveWriter[ParticipantNodeParameterConfig]
|
||||
}
|
||||
lazy implicit val timeTrackerConfigWriter: ConfigWriter[DomainTimeTrackerConfig] =
|
||||
deriveWriter[DomainTimeTrackerConfig]
|
||||
lazy implicit val timeRequestConfigWriter: ConfigWriter[TimeProofRequestConfig] =
|
||||
|
@ -8,6 +8,7 @@ import cats.syntax.functorFilter.*
|
||||
import cats.syntax.traverse.*
|
||||
import com.daml.jwt.JwtDecoder
|
||||
import com.daml.jwt.domain.Jwt
|
||||
import com.daml.ledger.api.v2.admin.command_inspection_service.CommandState
|
||||
import com.daml.ledger.api.v2.admin.package_management_service.PackageDetails
|
||||
import com.daml.ledger.api.v2.admin.party_management_service.PartyDetails as ProtoPartyDetails
|
||||
import com.daml.ledger.api.v2.checkpoint.Checkpoint
|
||||
@ -83,6 +84,7 @@ import com.digitalasset.canton.ledger.client.services.admin.IdentityProviderConf
|
||||
import com.digitalasset.canton.logging.NamedLogging
|
||||
import com.digitalasset.canton.networking.grpc.{GrpcError, RecordingStreamObserver}
|
||||
import com.digitalasset.canton.participant.ledger.api.client.JavaDecodeUtil
|
||||
import com.digitalasset.canton.platform.apiserver.execution.CommandStatus
|
||||
import com.digitalasset.canton.protocol.LfContractId
|
||||
import com.digitalasset.canton.topology.{DomainId, ParticipantId, PartyId}
|
||||
import com.digitalasset.canton.tracing.NoTracing
|
||||
@ -554,6 +556,36 @@ trait BaseLedgerApiAdministration extends NoTracing {
|
||||
}
|
||||
}
|
||||
|
||||
@Help.Summary("Investigate successful and failed commands", FeatureFlag.Testing)
|
||||
@Help.Description(
|
||||
"""Find the status of commands. Note that only recent commands which are kept in memory will be returned."""
|
||||
)
|
||||
def status(
|
||||
commandIdPrefix: String = "",
|
||||
state: CommandState = CommandState.COMMAND_STATE_UNSPECIFIED,
|
||||
limit: PositiveInt = PositiveInt.tryCreate(10),
|
||||
): Seq[CommandStatus] = check(FeatureFlag.Preview) {
|
||||
consoleEnvironment.run {
|
||||
ledgerApiCommand(
|
||||
LedgerApiCommands.CommandInspectionService.GetCommandStatus(
|
||||
commandIdPrefix = commandIdPrefix,
|
||||
state = state,
|
||||
limit = limit.unwrap,
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@Help.Summary("Investigate failed commands", FeatureFlag.Testing)
|
||||
@Help.Description(
|
||||
"""Same as status(..., state = CommandState.Failed)."""
|
||||
)
|
||||
def failed(commandId: String = "", limit: PositiveInt = PositiveInt.tryCreate(10)): Seq[
|
||||
CommandStatus
|
||||
] = check(FeatureFlag.Preview) {
|
||||
status(commandId, CommandState.COMMAND_STATE_FAILED, limit)
|
||||
}
|
||||
|
||||
@Help.Summary(
|
||||
"Submit assign command and wait for the resulting reassignment, returning the reassignment or failing otherwise",
|
||||
FeatureFlag.Testing,
|
||||
@ -796,6 +828,36 @@ trait BaseLedgerApiAdministration extends NoTracing {
|
||||
)
|
||||
})
|
||||
|
||||
@Help.Summary("Investigate successful and failed commands", FeatureFlag.Testing)
|
||||
@Help.Description(
|
||||
"""Find the status of commands. Note that only recent commands which are kept in memory will be returned."""
|
||||
)
|
||||
def status(
|
||||
commandIdPrefix: String = "",
|
||||
state: CommandState = CommandState.COMMAND_STATE_UNSPECIFIED,
|
||||
limit: PositiveInt = PositiveInt.tryCreate(10),
|
||||
): Seq[CommandStatus] = check(FeatureFlag.Preview) {
|
||||
consoleEnvironment.run {
|
||||
ledgerApiCommand(
|
||||
LedgerApiCommands.CommandInspectionService.GetCommandStatus(
|
||||
commandIdPrefix = commandIdPrefix,
|
||||
state = state,
|
||||
limit = limit.unwrap,
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@Help.Summary("Investigate failed commands", FeatureFlag.Testing)
|
||||
@Help.Description(
|
||||
"""Same as status(..., state = CommandState.Failed)."""
|
||||
)
|
||||
def failed(commandId: String = "", limit: PositiveInt = PositiveInt.tryCreate(10)): Seq[
|
||||
CommandStatus
|
||||
] = check(FeatureFlag.Preview) {
|
||||
status(commandId, CommandState.COMMAND_STATE_FAILED, limit)
|
||||
}
|
||||
|
||||
@Help.Summary("Read active contracts", FeatureFlag.Testing)
|
||||
@Help.Group("Active Contracts")
|
||||
object acs extends Helpful {
|
||||
|
@ -309,7 +309,7 @@ class ParticipantPartiesAdministrationGroup(
|
||||
TopologyAdminCommands.Write.Propose(
|
||||
// TODO(#14048) properly set the serial or introduce auto-detection so we don't
|
||||
// have to set it on the client side
|
||||
mapping = PartyToParticipant(
|
||||
mapping = PartyToParticipant.create(
|
||||
partyId,
|
||||
None,
|
||||
threshold,
|
||||
@ -326,6 +326,8 @@ class ParticipantPartiesAdministrationGroup(
|
||||
serial = None,
|
||||
store = AuthorizedStore.filterName,
|
||||
mustFullyAuthorize = mustFullyAuthorize,
|
||||
change = TopologyChangeOp.Replace,
|
||||
forceChanges = ForceFlags.none,
|
||||
)
|
||||
)
|
||||
}
|
||||
|
@ -1209,6 +1209,29 @@ class TopologyAdministrationGroup(
|
||||
@Help.Group("Party to participant mappings")
|
||||
object party_to_participant_mappings extends Helpful {
|
||||
|
||||
private def findCurrent(party: PartyId, store: String) = {
|
||||
TopologyStoreId(store) match {
|
||||
case TopologyStoreId.DomainStore(domainId, _) =>
|
||||
expectAtMostOneResult(
|
||||
list(
|
||||
domainId,
|
||||
filterParty = party.filterString,
|
||||
// fetch both REPLACE and REMOVE to correctly determine the next serial
|
||||
operation = None,
|
||||
)
|
||||
)
|
||||
|
||||
case TopologyStoreId.AuthorizedStore =>
|
||||
expectAtMostOneResult(
|
||||
list_from_authorized(
|
||||
filterParty = party.filterString,
|
||||
// fetch both REPLACE and REMOVE to correctly determine the next serial
|
||||
operation = None,
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@Help.Summary("Change party to participant mapping")
|
||||
@Help.Description("""Change the association of a party to hosting participants.
|
||||
party: The unique identifier of the party whose set of participants or permission to modify.
|
||||
@ -1244,27 +1267,7 @@ class TopologyAdministrationGroup(
|
||||
store: String = AuthorizedStore.filterName,
|
||||
): SignedTopologyTransaction[TopologyChangeOp, PartyToParticipant] = {
|
||||
|
||||
val currentO = TopologyStoreId(store) match {
|
||||
case TopologyStoreId.DomainStore(domainId, _) =>
|
||||
expectAtMostOneResult(
|
||||
list(
|
||||
domainId,
|
||||
filterParty = party.filterString,
|
||||
// fetch both REPLACE and REMOVE to correctly determine the next serial
|
||||
operation = None,
|
||||
)
|
||||
)
|
||||
|
||||
case TopologyStoreId.AuthorizedStore =>
|
||||
expectAtMostOneResult(
|
||||
list_from_authorized(
|
||||
filterParty = party.filterString,
|
||||
// fetch both REPLACE and REMOVE to correctly determine the next serial
|
||||
operation = None,
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
val currentO = findCurrent(party, store)
|
||||
val (existingPermissions, newSerial, threshold, groupAddressing) = currentO match {
|
||||
case Some(current) if current.context.operation == TopologyChangeOp.Remove =>
|
||||
(
|
||||
@ -1361,7 +1364,7 @@ class TopologyAdministrationGroup(
|
||||
}
|
||||
|
||||
val command = TopologyAdminCommands.Write.Propose(
|
||||
mapping = PartyToParticipant(
|
||||
mapping = PartyToParticipant.create(
|
||||
partyId = party,
|
||||
domainId = domainId,
|
||||
threshold = threshold,
|
||||
@ -1373,6 +1376,7 @@ class TopologyAdministrationGroup(
|
||||
change = op,
|
||||
mustFullyAuthorize = mustFullyAuthorize,
|
||||
store = store,
|
||||
forceChanges = ForceFlags.none,
|
||||
)
|
||||
|
||||
synchronisation.runAdminCommand(synchronize)(command)
|
||||
@ -1969,13 +1973,16 @@ class TopologyAdministrationGroup(
|
||||
),
|
||||
): SignedTopologyTransaction[TopologyChangeOp, AuthorityOf] = {
|
||||
|
||||
val command = TopologyAdminCommands.Write.Propose(
|
||||
AuthorityOf(
|
||||
val authorityOf = AuthorityOf
|
||||
.create(
|
||||
partyId,
|
||||
domainId,
|
||||
PositiveInt.tryCreate(threshold),
|
||||
parties,
|
||||
),
|
||||
)
|
||||
.valueOr(error => consoleEnvironment.run(GenericCommandError(error)))
|
||||
val command = TopologyAdminCommands.Write.Propose(
|
||||
authorityOf,
|
||||
signedBy = signedBy.toList,
|
||||
serial = serial,
|
||||
store = store,
|
||||
|
@ -82,6 +82,7 @@ trait Environment extends NamedLogging with AutoCloseable with NoTracing {
|
||||
histogramInventory = histogramInventory,
|
||||
histogramFilter = baseFilter,
|
||||
histogramConfigs = config.monitoring.metrics.histograms,
|
||||
config.monitoring.metrics.cardinality.unwrap,
|
||||
loggerFactory,
|
||||
)
|
||||
}
|
||||
|
@ -12,13 +12,14 @@ import com.daml.metrics.api.{MetricQualification, MetricsContext, MetricsInfoFil
|
||||
import com.daml.metrics.grpc.DamlGrpcServerMetrics
|
||||
import com.daml.metrics.{HealthMetrics, HistogramDefinition, MetricsFilterConfig}
|
||||
import com.digitalasset.canton.config.NonNegativeFiniteDuration
|
||||
import com.digitalasset.canton.config.RequireTypes.Port
|
||||
import com.digitalasset.canton.config.RequireTypes.{Port, PositiveInt}
|
||||
import com.digitalasset.canton.discard.Implicits.DiscardOps
|
||||
import com.digitalasset.canton.domain.metrics.{MediatorMetrics, SequencerMetrics}
|
||||
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
|
||||
import com.digitalasset.canton.metrics.MetricsConfig.JvmMetrics
|
||||
import com.digitalasset.canton.metrics.MetricsReporterConfig.{Csv, Logging, Prometheus}
|
||||
import com.digitalasset.canton.participant.metrics.ParticipantMetrics
|
||||
import com.digitalasset.canton.telemetry.OpenTelemetryFactory
|
||||
import com.typesafe.scalalogging.LazyLogging
|
||||
import io.opentelemetry.api.OpenTelemetry
|
||||
import io.opentelemetry.api.metrics.Meter
|
||||
@ -26,6 +27,7 @@ import io.opentelemetry.exporter.prometheus.PrometheusHttpServer
|
||||
import io.opentelemetry.instrumentation.runtimemetrics.java8.*
|
||||
import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder
|
||||
import io.opentelemetry.sdk.metrics.`export`.{MetricExporter, MetricReader, PeriodicMetricReader}
|
||||
import io.opentelemetry.sdk.metrics.internal.state.MetricStorage
|
||||
|
||||
import java.io.File
|
||||
import java.util.concurrent.ScheduledExecutorService
|
||||
@ -43,6 +45,7 @@ final case class MetricsConfig(
|
||||
reporters: Seq[MetricsReporterConfig] = Seq.empty,
|
||||
jvmMetrics: Option[JvmMetrics] = None,
|
||||
histograms: Seq[HistogramDefinition] = Seq.empty,
|
||||
cardinality: PositiveInt = PositiveInt.tryCreate(MetricStorage.DEFAULT_MAX_CARDINALITY),
|
||||
qualifiers: Seq[MetricQualification] = Seq[MetricQualification](
|
||||
MetricQualification.Errors,
|
||||
MetricQualification.Latency,
|
||||
@ -267,10 +270,15 @@ object MetricsRegistry extends LazyLogging {
|
||||
|
||||
}
|
||||
.zip(config.reporters)
|
||||
.foreach { case (reader, config) =>
|
||||
sdkMeterProviderBuilder
|
||||
.registerMetricReader(FilteringMetricsReader.create(config.filters, reader))
|
||||
.foreach { case (reader, readerConfig) =>
|
||||
OpenTelemetryFactory
|
||||
.registerMetricsReaderWithCardinality(
|
||||
sdkMeterProviderBuilder,
|
||||
FilteringMetricsReader.create(readerConfig.filters, reader),
|
||||
config.cardinality.unwrap,
|
||||
)
|
||||
.discard
|
||||
|
||||
}
|
||||
sdkMeterProviderBuilder
|
||||
}
|
||||
|
@ -52,8 +52,10 @@ message TrafficConsumed {
|
||||
uint64 extra_traffic_consumed = 2;
|
||||
// Remaining free base traffic
|
||||
uint64 base_traffic_remainder = 3;
|
||||
// Cost deducted at `timestamp`, only present when traffic was consumed at `timestamp`, otherwise is set to 0
|
||||
uint64 last_consumed_cost = 4;
|
||||
// Timestamp at which this state is valid - this timestamp is used to compute the base traffic remainder above
|
||||
int64 sequencing_timestamp = 4; // in microseconds of UTC time since Unix epoch
|
||||
int64 sequencing_timestamp = 5; // in microseconds of UTC time since Unix epoch
|
||||
}
|
||||
|
||||
// Message representing a traffic purchase made on behalf of a member
|
||||
@ -77,10 +79,12 @@ message TrafficState {
|
||||
int64 extra_traffic_consumed = 2;
|
||||
// Amount of base traffic remaining
|
||||
int64 base_traffic_remainder = 3;
|
||||
// Cost deducted at `timestamp`, only present when traffic was consumed at `timestamp`, otherwise is set to 0
|
||||
uint64 last_consumed_cost = 4;
|
||||
// Timestamp at which the state is valid
|
||||
int64 timestamp = 4;
|
||||
int64 timestamp = 5;
|
||||
// Optional serial of the balance update that updated the extra traffic limit
|
||||
google.protobuf.UInt32Value serial = 5;
|
||||
google.protobuf.UInt32Value serial = 6;
|
||||
}
|
||||
|
||||
message SetTrafficPurchasedMessage {
|
||||
|
@ -5,9 +5,11 @@ package com.digitalasset.canton.data
|
||||
|
||||
import com.daml.lf.data.{Bytes, Ref}
|
||||
import com.daml.logging.entries.{LoggingValue, ToLoggingValue}
|
||||
import com.digitalasset.canton.data.Offset.beforeBegin
|
||||
import com.google.protobuf.ByteString
|
||||
|
||||
import java.io.InputStream
|
||||
import java.nio.{ByteBuffer, ByteOrder}
|
||||
|
||||
/** Offsets into streams with hierarchical addressing.
|
||||
*
|
||||
@ -31,10 +33,16 @@ final case class Offset(bytes: Bytes) extends Ordered[Offset] {
|
||||
def toByteArray: Array[Byte] = bytes.toByteArray
|
||||
|
||||
def toHexString: Ref.HexString = bytes.toHexString
|
||||
|
||||
def toLong: Long =
|
||||
if (this == beforeBegin) 0L
|
||||
else ByteBuffer.wrap(bytes.toByteArray).getLong(1)
|
||||
}
|
||||
|
||||
object Offset {
|
||||
val beforeBegin: Offset = new Offset(Bytes.Empty)
|
||||
private val longBasedByteLength: Int = 9 // One byte for the version plus 8 bytes for Long
|
||||
private val versionUpstreamOffsetsAsLong: Byte = 0
|
||||
|
||||
def fromByteString(bytes: ByteString) = new Offset(Bytes.fromByteString(bytes))
|
||||
|
||||
@ -44,6 +52,21 @@ object Offset {
|
||||
|
||||
def fromHexString(s: Ref.HexString) = new Offset(Bytes.fromHexString(s))
|
||||
|
||||
def fromLong(l: Long): Offset =
|
||||
if (l == 0L) beforeBegin
|
||||
else
|
||||
Offset(
|
||||
com.daml.lf.data.Bytes.fromByteString(
|
||||
ByteString.copyFrom(
|
||||
ByteBuffer
|
||||
.allocate(longBasedByteLength)
|
||||
.order(ByteOrder.BIG_ENDIAN)
|
||||
.put(0, versionUpstreamOffsetsAsLong)
|
||||
.putLong(1, l)
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
implicit val `Offset to LoggingValue`: ToLoggingValue[Offset] = value =>
|
||||
LoggingValue.OfString(value.toHexString)
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ import com.digitalasset.canton.{
|
||||
DoNotTraverseLikeFuture,
|
||||
}
|
||||
|
||||
import java.util.concurrent.CompletionException
|
||||
import scala.concurrent.{Awaitable, ExecutionContext, Future}
|
||||
import scala.util.chaining.*
|
||||
import scala.util.{Failure, Success, Try}
|
||||
@ -81,8 +82,14 @@ object FutureUnlessShutdown {
|
||||
apply(f.transform({
|
||||
case Success(value) => Success(UnlessShutdown.Outcome(value))
|
||||
case Failure(AbortedDueToShutdownException(_)) => Success(UnlessShutdown.AbortedDueToShutdown)
|
||||
case Failure(ce: CompletionException) =>
|
||||
ce.getCause match {
|
||||
case AbortedDueToShutdownException(_) => Success(UnlessShutdown.AbortedDueToShutdown)
|
||||
case _ => Failure(ce)
|
||||
}
|
||||
case Failure(other) => Failure(other)
|
||||
}))
|
||||
|
||||
}
|
||||
|
||||
/** Monad combination of `Future` and [[UnlessShutdown]]
|
||||
|
@ -293,13 +293,13 @@ object OnboardingRestriction {
|
||||
* Must be greater than `maxSequencingTime` specified by a participant,
|
||||
* practically also requires extra slack to allow clock skew between participant and sequencer.
|
||||
* @param onboardingRestriction current onboarding restrictions for participants
|
||||
* @param catchUpParameters Optional parameters of type [[com.digitalasset.canton.protocol.AcsCommitmentsCatchUpConfig]].
|
||||
* Defined starting with protobuf version v2 and protocol version v30.
|
||||
* If None, the catch-up mode is disabled: the participant does not trigger the
|
||||
* catch-up mode when lagging behind.
|
||||
* If not None, it specifies the number of reconciliation intervals that the
|
||||
* participant skips in catch-up mode, and the number of catch-up intervals
|
||||
* intervals a participant should lag behind in order to enter catch-up mode.
|
||||
* @param acsCommitmentsCatchUpConfig Optional parameters of type [[com.digitalasset.canton.protocol.AcsCommitmentsCatchUpConfig]].
|
||||
* Defined starting with protobuf version v2 and protocol version v30.
|
||||
* If None, the catch-up mode is disabled: the participant does not trigger the
|
||||
* catch-up mode when lagging behind.
|
||||
* If not None, it specifies the number of reconciliation intervals that the
|
||||
* participant skips in catch-up mode, and the number of catch-up intervals
|
||||
* intervals a participant should lag behind in order to enter catch-up mode.
|
||||
*
|
||||
* @throws DynamicDomainParameters$.InvalidDynamicDomainParameters
|
||||
* if `mediatorDeduplicationTimeout` is less than twice of `ledgerTimeRecordTimeTolerance`.
|
||||
|
@ -12,12 +12,22 @@ import com.digitalasset.canton.topology.client.TopologySnapshot
|
||||
import com.digitalasset.canton.topology.{ParticipantId, PartyId}
|
||||
import com.digitalasset.canton.tracing.TraceContext
|
||||
import com.digitalasset.canton.util.ShowUtil.*
|
||||
import com.digitalasset.canton.util.{Checked, ErrorUtil}
|
||||
import com.digitalasset.canton.util.{Checked, ErrorUtil, SetCover}
|
||||
|
||||
import scala.concurrent.{ExecutionContext, Future}
|
||||
|
||||
object RootHashMessageRecipients extends HasLoggerName {
|
||||
|
||||
/** Computes the list of recipients for the root hash messages of a confirmation request.
|
||||
* Each recipient returned is either a participant or a group address
|
||||
* [[com.digitalasset.canton.sequencing.protocol.ParticipantsOfParty]].
|
||||
* The group addresses can be overlapping, but a participant member recipient will only be present if it is
|
||||
* not included in any of the group addresses.
|
||||
*
|
||||
* @param informees informees of the confirmation request
|
||||
* @param ipsSnapshot topology snapshot used at submission time
|
||||
* @return list of root hash message recipients
|
||||
*/
|
||||
def rootHashRecipientsForInformees(
|
||||
informees: Set[LfPartyId],
|
||||
ipsSnapshot: TopologySnapshot,
|
||||
@ -37,10 +47,10 @@ object RootHashMessageRecipients extends HasLoggerName {
|
||||
)
|
||||
)
|
||||
)
|
||||
groupAddressedInformees <- ipsSnapshot.partiesWithGroupAddressing(informeesList)
|
||||
participantsOfGroupAddressedInformees <- ipsSnapshot.activeParticipantsOfParties(
|
||||
groupAddressedInformees.toList
|
||||
)
|
||||
participantsOfGroupAddressedInformees <- ipsSnapshot
|
||||
.activeParticipantsOfPartiesWithGroupAddressing(
|
||||
informeesList
|
||||
)
|
||||
} yield {
|
||||
// If there are several group-addressed informees with overlapping participants,
|
||||
// we actually look for a set cover. It doesn't matter which one we pick.
|
||||
@ -86,28 +96,45 @@ object RootHashMessageRecipients extends HasLoggerName {
|
||||
} ++ directlyAddressedParticipants.map { participant =>
|
||||
MemberRecipient(participant) -> Set(participant)
|
||||
}
|
||||
// TODO(#13883) Use a set cover for the recipients instead of all of them
|
||||
// SetCover.greedy(sets.toMap)
|
||||
sets.map { case (recipient, _) => recipient }.toSeq
|
||||
SetCover.greedy(sets)
|
||||
}
|
||||
}
|
||||
|
||||
/** Validate the recipients of root hash messages received by a participant in Phase 3.
|
||||
*/
|
||||
def validateRecipientsOnParticipant(recipients: Recipients): Checked[Nothing, String, Unit] = {
|
||||
recipients.asSingleGroup match {
|
||||
case Some(group) if group.sizeCompare(2) == 0 =>
|
||||
// group members must be participantId and mediator, due to previous checks
|
||||
Checked.unit
|
||||
case Some(group) =>
|
||||
val hasGroupAddressing = group.collect { case ParticipantsOfParty(party) =>
|
||||
party.toLf
|
||||
}.nonEmpty
|
||||
if (hasGroupAddressing) Checked.unit
|
||||
else Checked.continue(s"The root hash message has an invalid recipient group.\n$recipients")
|
||||
case _ =>
|
||||
Checked.continue(s"The root hash message has more than one recipient group.\n$recipients")
|
||||
// group members must be of size 2, which must be participant and mediator, due to previous checks
|
||||
val validGroups = recipients.trees.collect {
|
||||
case RecipientsTree(group, Seq()) if group.sizeCompare(2) == 0 => group
|
||||
}
|
||||
|
||||
if (validGroups.size == recipients.trees.size) {
|
||||
val allUseGroupAddressing = validGroups.forall {
|
||||
_.exists {
|
||||
case ParticipantsOfParty(_) => true
|
||||
case _ => false
|
||||
}
|
||||
}
|
||||
|
||||
// Due to how rootHashRecipientsForInformees() computes recipients, if there is more than one group,
|
||||
// they must all address the participant using group addressing.
|
||||
if (allUseGroupAddressing || validGroups.sizeCompare(1) == 0) Checked.unit
|
||||
else
|
||||
Checked.continue(
|
||||
s"The root hash message has more than one recipient group, not all using group addressing.\n$recipients"
|
||||
)
|
||||
} else Checked.continue(s"The root hash message has invalid recipient groups.\n$recipients")
|
||||
}
|
||||
|
||||
/** Validate the recipients of root hash messages received by a mediator in Phase 2.
|
||||
*
|
||||
* A recipient is valid if each recipient tree:
|
||||
* - contains only a single recipient group (no children)
|
||||
* - the recipient group is if size 2
|
||||
* - the recipient group contains:
|
||||
* - the mediator group recipient
|
||||
* - either a participant member recipient or a PartyOfParticipant group recipient
|
||||
*/
|
||||
def wrongAndCorrectRecipients(
|
||||
recipientsList: Seq[Recipients],
|
||||
mediator: MediatorGroupRecipient,
|
||||
@ -115,18 +142,14 @@ object RootHashMessageRecipients extends HasLoggerName {
|
||||
val (wrongRecipients, correctRecipients) = recipientsList.flatMap { recipients =>
|
||||
recipients.trees.toList.map {
|
||||
case tree @ RecipientsTree(group, Seq()) =>
|
||||
val participantCount = group.count {
|
||||
case MemberRecipient(_: ParticipantId) => true
|
||||
val hasMediator = group.contains(mediator)
|
||||
val hasParticipantOrPop = group.exists {
|
||||
case MemberRecipient(_: ParticipantId) | ParticipantsOfParty(_) => true
|
||||
case _ => false
|
||||
}
|
||||
val groupAddressCount = group.count {
|
||||
case ParticipantsOfParty(_) => true
|
||||
case _ => false
|
||||
}
|
||||
val groupAddressingBeingUsed = groupAddressCount > 0
|
||||
|
||||
Either.cond(
|
||||
((group.size == 2) || (groupAddressingBeingUsed && group.size >= 2)) &&
|
||||
group.contains(mediator) && (participantCount + groupAddressCount > 0),
|
||||
group.sizeCompare(2) == 0 && hasMediator && hasParticipantOrPop,
|
||||
group,
|
||||
tree,
|
||||
)
|
||||
|
@ -59,15 +59,16 @@ final case class SubmissionRequest private (
|
||||
|
||||
@VisibleForTesting
|
||||
def isConfirmationRequest: Boolean = {
|
||||
val hasParticipantRecipient = batch.allMembers.exists {
|
||||
case _: ParticipantId => true
|
||||
case _: Member => false
|
||||
val hasParticipantOrPopRecipient = batch.allRecipients.exists {
|
||||
case MemberRecipient(_: ParticipantId) => true
|
||||
case ParticipantsOfParty(_) => true
|
||||
case _ => false
|
||||
}
|
||||
val hasMediatorRecipient = batch.allRecipients.exists {
|
||||
case _: MediatorGroupRecipient => true
|
||||
case _: Recipient => false
|
||||
}
|
||||
hasParticipantRecipient && hasMediatorRecipient
|
||||
hasParticipantOrPopRecipient && hasMediatorRecipient
|
||||
}
|
||||
|
||||
// Caches the serialized request to be able to do checks on its size without re-serializing
|
||||
|
@ -25,31 +25,35 @@ final case class TrafficState(
|
||||
extraTrafficPurchased: NonNegativeLong,
|
||||
extraTrafficConsumed: NonNegativeLong,
|
||||
baseTrafficRemainder: NonNegativeLong,
|
||||
lastConsumedCost: NonNegativeLong,
|
||||
timestamp: CantonTimestamp,
|
||||
serial: Option[PositiveInt],
|
||||
) extends PrettyPrinting {
|
||||
def extraTrafficRemainder: Long = extraTrafficPurchased.value - extraTrafficConsumed.value
|
||||
def availableTraffic: Long = extraTrafficRemainder + baseTrafficRemainder.value
|
||||
// Need big decimal here because it could overflow a long especially if extraTrafficPurchased == Long.MAX
|
||||
lazy val availableTraffic: BigDecimal =
|
||||
BigDecimal(extraTrafficRemainder) + BigDecimal(baseTrafficRemainder.value)
|
||||
|
||||
def toProtoV30: v30.TrafficState = v30.TrafficState(
|
||||
extraTrafficPurchased = extraTrafficPurchased.value,
|
||||
extraTrafficConsumed = extraTrafficConsumed.value,
|
||||
baseTrafficRemainder = baseTrafficRemainder.value,
|
||||
lastConsumedCost = lastConsumedCost.value,
|
||||
timestamp = timestamp.toProtoPrimitive,
|
||||
serial = serial.map(_.value),
|
||||
)
|
||||
|
||||
def toTrafficConsumed(member: Member): TrafficConsumed = TrafficConsumed(
|
||||
member = member,
|
||||
sequencingTimestamp = timestamp,
|
||||
extraTrafficConsumed = extraTrafficConsumed,
|
||||
baseTrafficRemainder = baseTrafficRemainder,
|
||||
)
|
||||
def toTrafficConsumed(member: Member): TrafficConsumed =
|
||||
TrafficConsumed(
|
||||
member = member,
|
||||
sequencingTimestamp = timestamp,
|
||||
extraTrafficConsumed = extraTrafficConsumed,
|
||||
baseTrafficRemainder = baseTrafficRemainder,
|
||||
lastConsumedCost = lastConsumedCost,
|
||||
)
|
||||
|
||||
def toTrafficReceipt(
|
||||
consumedCost: NonNegativeLong
|
||||
): TrafficReceipt = TrafficReceipt(
|
||||
consumedCost = consumedCost,
|
||||
def toTrafficReceipt: TrafficReceipt = TrafficReceipt(
|
||||
consumedCost = lastConsumedCost,
|
||||
extraTrafficConsumed = extraTrafficConsumed,
|
||||
baseTrafficRemainder = baseTrafficRemainder,
|
||||
)
|
||||
@ -67,6 +71,7 @@ final case class TrafficState(
|
||||
param("extraTrafficLimit", _.extraTrafficPurchased),
|
||||
param("extraTrafficConsumed", _.extraTrafficConsumed),
|
||||
param("baseTrafficRemainder", _.baseTrafficRemainder),
|
||||
param("lastConsumedCost", _.lastConsumedCost),
|
||||
param("timestamp", _.timestamp),
|
||||
paramIfDefined("serial", _.serial),
|
||||
)
|
||||
@ -78,13 +83,15 @@ object TrafficState {
|
||||
pp >> Some(v.extraTrafficPurchased.value)
|
||||
pp >> Some(v.extraTrafficConsumed.value)
|
||||
pp >> Some(v.baseTrafficRemainder.value)
|
||||
pp >> Some(v.lastConsumedCost.value)
|
||||
pp >> v.timestamp
|
||||
pp >> v.serial.map(_.value)
|
||||
}
|
||||
|
||||
implicit val getResultTrafficState: GetResult[Option[TrafficState]] = {
|
||||
GetResult
|
||||
.createGetTuple5(
|
||||
.createGetTuple6(
|
||||
nonNegativeLongOptionGetResult,
|
||||
nonNegativeLongOptionGetResult,
|
||||
nonNegativeLongOptionGetResult,
|
||||
nonNegativeLongOptionGetResult,
|
||||
@ -98,6 +105,7 @@ object TrafficState {
|
||||
NonNegativeLong.zero,
|
||||
NonNegativeLong.zero,
|
||||
NonNegativeLong.zero,
|
||||
NonNegativeLong.zero,
|
||||
CantonTimestamp.Epoch,
|
||||
Option.empty,
|
||||
)
|
||||
@ -106,6 +114,7 @@ object TrafficState {
|
||||
NonNegativeLong.zero,
|
||||
NonNegativeLong.zero,
|
||||
NonNegativeLong.zero,
|
||||
NonNegativeLong.zero,
|
||||
timestamp,
|
||||
Option.empty,
|
||||
)
|
||||
@ -116,12 +125,14 @@ object TrafficState {
|
||||
extraTrafficLimit <- ProtoConverter.parseNonNegativeLong(trafficStateP.extraTrafficPurchased)
|
||||
extraTrafficConsumed <- ProtoConverter.parseNonNegativeLong(trafficStateP.extraTrafficConsumed)
|
||||
baseTrafficRemainder <- ProtoConverter.parseNonNegativeLong(trafficStateP.baseTrafficRemainder)
|
||||
lastConsumedCost <- ProtoConverter.parseNonNegativeLong(trafficStateP.lastConsumedCost)
|
||||
timestamp <- CantonTimestamp.fromProtoPrimitive(trafficStateP.timestamp)
|
||||
serial <- trafficStateP.serial.traverse(ProtoConverter.parsePositiveInt)
|
||||
} yield TrafficState(
|
||||
extraTrafficLimit,
|
||||
extraTrafficConsumed,
|
||||
baseTrafficRemainder,
|
||||
lastConsumedCost,
|
||||
timestamp,
|
||||
serial,
|
||||
)
|
||||
|
@ -24,18 +24,18 @@ import slick.jdbc.GetResult
|
||||
* @param sequencingTimestamp sequencing timestamp at which this traffic consumed state is valid
|
||||
* @param extraTrafficConsumed extra traffic consumed at this sequencing timestamp
|
||||
* @param baseTrafficRemainder base traffic remaining at this sequencing timestamp
|
||||
* @param lastConsumedCost last cost deducted from the traffic balance (base and if not enough, extra)
|
||||
*/
|
||||
final case class TrafficConsumed(
|
||||
member: Member,
|
||||
sequencingTimestamp: CantonTimestamp,
|
||||
extraTrafficConsumed: NonNegativeLong,
|
||||
baseTrafficRemainder: NonNegativeLong,
|
||||
lastConsumedCost: NonNegativeLong,
|
||||
) extends PrettyPrinting {
|
||||
|
||||
def toTrafficReceipt(
|
||||
consumedCost: NonNegativeLong
|
||||
): TrafficReceipt = TrafficReceipt(
|
||||
consumedCost = consumedCost,
|
||||
def toTrafficReceipt: TrafficReceipt = TrafficReceipt(
|
||||
consumedCost = lastConsumedCost,
|
||||
extraTrafficConsumed,
|
||||
baseTrafficRemainder,
|
||||
)
|
||||
@ -48,6 +48,7 @@ final case class TrafficConsumed(
|
||||
trafficPurchased.map(_.extraTrafficPurchased).getOrElse(NonNegativeLong.zero),
|
||||
extraTrafficConsumed,
|
||||
baseTrafficRemainder,
|
||||
lastConsumedCost,
|
||||
trafficPurchased
|
||||
.map(_.sequencingTimestamp.max(sequencingTimestamp))
|
||||
.getOrElse(sequencingTimestamp),
|
||||
@ -105,6 +106,7 @@ final case class TrafficConsumed(
|
||||
copy(
|
||||
baseTrafficRemainder = baseTrafficRemainderAtCurrentTime,
|
||||
sequencingTimestamp = timestamp,
|
||||
lastConsumedCost = NonNegativeLong.zero,
|
||||
)
|
||||
}
|
||||
|
||||
@ -127,6 +129,7 @@ final case class TrafficConsumed(
|
||||
baseTrafficRemainder = baseTrafficRemainderAfterConsume,
|
||||
extraTrafficConsumed = this.extraTrafficConsumed + extraTrafficConsumed,
|
||||
sequencingTimestamp = sequencingTimestamp,
|
||||
lastConsumedCost = cost,
|
||||
)
|
||||
}
|
||||
|
||||
@ -157,6 +160,7 @@ final case class TrafficConsumed(
|
||||
param("member", _.member),
|
||||
param("extraTrafficConsumed", _.extraTrafficConsumed),
|
||||
param("baseTrafficRemainder", _.baseTrafficRemainder),
|
||||
param("lastConsumedCost", _.lastConsumedCost),
|
||||
param("sequencingTimestamp", _.sequencingTimestamp),
|
||||
)
|
||||
|
||||
@ -166,6 +170,7 @@ final case class TrafficConsumed(
|
||||
extraTrafficConsumed = extraTrafficConsumed.value,
|
||||
baseTrafficRemainder = baseTrafficRemainder.value,
|
||||
sequencingTimestamp = sequencingTimestamp.toProtoPrimitive,
|
||||
lastConsumedCost = lastConsumedCost.value,
|
||||
)
|
||||
}
|
||||
}
|
||||
@ -177,7 +182,13 @@ object TrafficConsumed {
|
||||
/** TrafficConsumed object for members the first time they submit a submission request
|
||||
*/
|
||||
def init(member: Member): TrafficConsumed =
|
||||
TrafficConsumed(member, CantonTimestamp.MinValue, NonNegativeLong.zero, NonNegativeLong.zero)
|
||||
TrafficConsumed(
|
||||
member,
|
||||
CantonTimestamp.MinValue,
|
||||
NonNegativeLong.zero,
|
||||
NonNegativeLong.zero,
|
||||
NonNegativeLong.zero,
|
||||
)
|
||||
|
||||
def empty(
|
||||
member: Member,
|
||||
@ -188,16 +199,18 @@ object TrafficConsumed {
|
||||
timestamp,
|
||||
NonNegativeLong.zero,
|
||||
baseTraffic,
|
||||
NonNegativeLong.zero,
|
||||
)
|
||||
|
||||
implicit val trafficConsumedOrdering: Ordering[TrafficConsumed] =
|
||||
Ordering.by(_.sequencingTimestamp)
|
||||
|
||||
implicit val trafficConsumedGetResult: GetResult[TrafficConsumed] =
|
||||
GetResult.createGetTuple4[Member, CantonTimestamp, NonNegativeLong, NonNegativeLong].andThen {
|
||||
case (member, ts, trafficConsumed, baseTraffic) =>
|
||||
TrafficConsumed(member, ts, trafficConsumed, baseTraffic)
|
||||
}
|
||||
GetResult
|
||||
.createGetTuple5[Member, CantonTimestamp, NonNegativeLong, NonNegativeLong, NonNegativeLong]
|
||||
.andThen { case (member, ts, trafficConsumed, baseTraffic, lastConsumedCost) =>
|
||||
TrafficConsumed(member, ts, trafficConsumed, baseTraffic, lastConsumedCost)
|
||||
}
|
||||
|
||||
def fromProtoV30(trafficConsumedP: TrafficConsumedP): ParsingResult[TrafficConsumed] =
|
||||
for {
|
||||
@ -211,10 +224,14 @@ object TrafficConsumed {
|
||||
sequencingTimestamp <- CantonTimestamp.fromProtoPrimitive(
|
||||
trafficConsumedP.sequencingTimestamp
|
||||
)
|
||||
lastConsumedCost <- ProtoConverter.parseNonNegativeLong(
|
||||
trafficConsumedP.lastConsumedCost
|
||||
)
|
||||
} yield TrafficConsumed(
|
||||
member = member,
|
||||
extraTrafficConsumed = extraTrafficConsumed,
|
||||
baseTrafficRemainder = baseTrafficRemainder,
|
||||
sequencingTimestamp = sequencingTimestamp,
|
||||
lastConsumedCost = lastConsumedCost,
|
||||
)
|
||||
}
|
||||
|
@ -43,6 +43,7 @@ class TrafficConsumedManager(
|
||||
current.copy(
|
||||
extraTrafficConsumed = trafficReceipt.extraTrafficConsumed,
|
||||
baseTrafficRemainder = trafficReceipt.baseTrafficRemainder,
|
||||
lastConsumedCost = trafficReceipt.consumedCost,
|
||||
sequencingTimestamp = timestamp,
|
||||
)
|
||||
case current => current
|
||||
@ -101,7 +102,7 @@ class TrafficConsumedManager(
|
||||
}.discard
|
||||
Left(value)
|
||||
case Right(_) =>
|
||||
val newState = trafficConsumed.updateAndGet {
|
||||
val newState = updateAndGet {
|
||||
_.consume(timestamp, params, eventCost, logger)
|
||||
}
|
||||
logger.debug(s"Consumed ${eventCost.value} for $member at $timestamp: new state $newState")
|
||||
|
@ -435,23 +435,6 @@ object TopologyManagerError extends TopologyManagerErrorGroup {
|
||||
with TopologyManagerError
|
||||
}
|
||||
|
||||
@Explanation(
|
||||
"This error indicates that a threshold in the submitted transaction was higher than the number of members that would have to satisfy that threshold."
|
||||
)
|
||||
@Resolution(
|
||||
"""Submit the topology transaction with a lower threshold.
|
||||
|The metadata details of this error contain the expected maximum in the field ``expectedMaximum``."""
|
||||
)
|
||||
object InvalidThreshold
|
||||
extends ErrorCode(id = "INVALID_THRESHOLD", ErrorCategory.InvalidIndependentOfSystemState) {
|
||||
final case class ThresholdTooHigh(actual: Int, expectedMaximum: Int)(implicit
|
||||
override val loggingContext: ErrorLoggingContext
|
||||
) extends CantonError.Impl(
|
||||
cause = s"Threshold must not be higher than $expectedMaximum, but was $actual."
|
||||
)
|
||||
with TopologyManagerError
|
||||
}
|
||||
|
||||
@Explanation(
|
||||
"This error indicates that members referenced in a topology transaction have not declared at least one signing key or at least 1 encryption key or both."
|
||||
)
|
||||
@ -473,6 +456,20 @@ object TopologyManagerError extends TopologyManagerErrorGroup {
|
||||
with TopologyManagerError
|
||||
}
|
||||
|
||||
object PartyExceedsHostingLimit
|
||||
extends ErrorCode(
|
||||
id = "PARTY_EXCEEDS_HOSTING_LIMIT",
|
||||
ErrorCategory.InvalidIndependentOfSystemState,
|
||||
) {
|
||||
final case class Reject(party: PartyId, limit: Int, numParticipants: Int)(implicit
|
||||
override val loggingContext: ErrorLoggingContext
|
||||
) extends CantonError.Impl(
|
||||
cause =
|
||||
s"Party $party exceeds hosting limit of $limit with desired number of $numParticipants hosting participant."
|
||||
)
|
||||
with TopologyManagerError
|
||||
}
|
||||
|
||||
@Explanation(
|
||||
"This error indicates that the topology transaction references members that are currently unknown."
|
||||
)
|
||||
@ -572,7 +569,7 @@ object TopologyManagerError extends TopologyManagerErrorGroup {
|
||||
object InvalidTopologyMapping
|
||||
extends ErrorCode(
|
||||
id = "INVALID_TOPOLOGY_MAPPING",
|
||||
ErrorCategory.InvalidGivenCurrentSystemStateOther,
|
||||
ErrorCategory.InvalidIndependentOfSystemState,
|
||||
) {
|
||||
final case class Reject(
|
||||
description: String
|
||||
@ -605,7 +602,36 @@ object TopologyManagerError extends TopologyManagerErrorGroup {
|
||||
}
|
||||
)
|
||||
with TopologyManagerError
|
||||
|
||||
final case class MissingDomainParameters(effectiveTime: EffectiveTime)(implicit
|
||||
override val loggingContext: ErrorLoggingContext
|
||||
) extends CantonError.Impl(
|
||||
cause = s"Missing domain parameters at $effectiveTime"
|
||||
)
|
||||
with TopologyManagerError
|
||||
}
|
||||
|
||||
@Explanation(
|
||||
"""This error indicates that the namespace is already used by another entity."""
|
||||
)
|
||||
@Resolution(
|
||||
"""Change the namespace used in the submitted topology transaction."""
|
||||
)
|
||||
object NamespaceAlreadyInUse
|
||||
extends ErrorCode(
|
||||
id = "NAMESPACE_ALREADY_IN_USE",
|
||||
ErrorCategory.InvalidGivenCurrentSystemStateResourceExists,
|
||||
) {
|
||||
final case class Reject(
|
||||
namespace: Namespace
|
||||
)(implicit
|
||||
override val loggingContext: ErrorLoggingContext
|
||||
) extends CantonError.Impl(
|
||||
cause = s"The namespace $namespace is already in use by another entity."
|
||||
)
|
||||
with TopologyManagerError
|
||||
}
|
||||
|
||||
abstract class DomainErrorGroup extends ErrorGroup()
|
||||
abstract class ParticipantErrorGroup extends ErrorGroup()
|
||||
|
||||
|
@ -167,6 +167,7 @@ class TopologyStateProcessor(
|
||||
s"${enqueuingOrStoring} topology transaction ${idx + 1}/$ln ${tx.operation} ${tx.mapping} with ts=$effective (epsilon=${epsilon} ms)"
|
||||
)
|
||||
case (ValidatedTopologyTransaction(tx, Some(r), _), idx) =>
|
||||
// TODO(i19737): we need to emit a security alert, if the rejection is due to a malicious broadcast
|
||||
logger.info(
|
||||
s"Rejected transaction ${idx + 1}/$ln ${tx.operation} ${tx.mapping} at ts=$effective (epsilon=${epsilon} ms) due to $r"
|
||||
)
|
||||
@ -296,18 +297,13 @@ class TopologyStateProcessor(
|
||||
authValidator
|
||||
.validateAndUpdateHeadAuthState(
|
||||
effective.value,
|
||||
Seq(toValidate),
|
||||
inStore.map(tx => tx.mapping.uniqueKey -> tx).toList.toMap,
|
||||
toValidate,
|
||||
inStore,
|
||||
expectFullAuthorization,
|
||||
)
|
||||
)
|
||||
.subflatMap { case (_, txs) =>
|
||||
// TODO(#12390) proper error
|
||||
txs.headOption
|
||||
.toRight[TopologyTransactionRejection](
|
||||
TopologyTransactionRejection.Other("expected validation result doesn't exist")
|
||||
)
|
||||
.flatMap(tx => tx.rejectionReason.toLeft(tx.transaction))
|
||||
.subflatMap { case (_, tx) =>
|
||||
tx.rejectionReason.toLeft(tx.transaction)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -299,6 +299,10 @@ trait PartyTopologySnapshotClient {
|
||||
parties: Seq[LfPartyId]
|
||||
)(implicit traceContext: TraceContext): Future[Set[LfPartyId]]
|
||||
|
||||
def activeParticipantsOfPartiesWithGroupAddressing(
|
||||
parties: Seq[LfPartyId]
|
||||
)(implicit traceContext: TraceContext): Future[Map[LfPartyId, Set[ParticipantId]]]
|
||||
|
||||
/** Returns a list of all known parties on this domain */
|
||||
def inspectKnownParties(
|
||||
filterParty: String,
|
||||
@ -841,6 +845,11 @@ private[client] trait PartyTopologySnapshotLoader
|
||||
): Future[Set[LfPartyId]] =
|
||||
loadAndMapPartyInfos(parties, identity, _.groupAddressing).map(_.keySet)
|
||||
|
||||
final override def activeParticipantsOfPartiesWithGroupAddressing(
|
||||
parties: Seq[LfPartyId]
|
||||
)(implicit traceContext: TraceContext): Future[Map[LfPartyId, Set[ParticipantId]]] =
|
||||
loadAndMapPartyInfos(parties, _.participants.keySet, _.groupAddressing)
|
||||
|
||||
final override def consortiumThresholds(
|
||||
parties: Set[LfPartyId]
|
||||
)(implicit traceContext: TraceContext): Future[Map[LfPartyId, PositiveInt]] =
|
||||
|
@ -8,12 +8,12 @@ import com.digitalasset.canton.crypto.{Fingerprint, SigningPublicKey}
|
||||
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
|
||||
import com.digitalasset.canton.topology.Namespace
|
||||
import com.digitalasset.canton.topology.processing.AuthorizedTopologyTransaction.AuthorizedNamespaceDelegation
|
||||
import com.digitalasset.canton.topology.transaction.TopologyChangeOp.{Remove, Replace}
|
||||
import com.digitalasset.canton.topology.transaction.*
|
||||
import com.digitalasset.canton.tracing.TraceContext
|
||||
import com.digitalasset.canton.util.ErrorUtil
|
||||
import com.digitalasset.canton.util.ShowUtil.*
|
||||
|
||||
import scala.annotation.tailrec
|
||||
import scala.collection.concurrent.TrieMap
|
||||
import scala.math.Ordering.Implicits.*
|
||||
|
||||
@ -35,8 +35,8 @@ object AuthorizedTopologyTransaction {
|
||||
|
||||
/** Returns true if the namespace delegation is a root certificate
|
||||
*
|
||||
* A root certificate is defined by the namespace delegation that authorizes the
|
||||
* key f to act on namespace spanned by f, authorized by f.
|
||||
* A root certificate is defined by a namespace delegation that authorizes the
|
||||
* key f to act on the namespace spanned by f, authorized by f.
|
||||
*/
|
||||
def isRootCertificate(namespaceDelegation: AuthorizedNamespaceDelegation): Boolean = {
|
||||
NamespaceDelegation.isRootCertificate(namespaceDelegation.transaction)
|
||||
@ -44,11 +44,7 @@ object AuthorizedTopologyTransaction {
|
||||
|
||||
/** Returns true if the namespace delegation is a root certificate or a root delegation
|
||||
*
|
||||
* A root certificate is defined by the namespace delegation that authorizes the
|
||||
* key f to act on namespace spanned by f, authorized by f.
|
||||
*
|
||||
* A root delegation is defined by the namespace delegation the authorizes the
|
||||
* key g to act on namespace spanned by f.
|
||||
* A root delegation is a namespace delegation whose target key may be used to authorize other namespace delegations.
|
||||
*/
|
||||
def isRootDelegation(namespaceDelegation: AuthorizedNamespaceDelegation): Boolean = {
|
||||
NamespaceDelegation.isRootDelegation(namespaceDelegation.transaction)
|
||||
@ -56,49 +52,45 @@ object AuthorizedTopologyTransaction {
|
||||
|
||||
}
|
||||
|
||||
/** maintain a dependency graph for the namespace delegations
|
||||
/** Stores a set of namespace delegations, tracks dependencies and
|
||||
* determines which keys are authorized to sign on behalf of a namespace.
|
||||
*
|
||||
* namespace delegations are a bit tricky as there can be an arbitrary number of delegations before we reach
|
||||
* the actual key that will be used for authorizations. think of it as a certificate chain where we get a
|
||||
* Namespace delegations are a bit tricky as there can be an arbitrary number of delegations between the namespace key
|
||||
* and the key that will be used for authorizations. Think of it as a certificate chain where we get a
|
||||
* series of certificates and we need to figure out a path from one certificate to the root certificate.
|
||||
*
|
||||
* NOTE: this class is not thread-safe
|
||||
*
|
||||
* properties of the graph:
|
||||
* - the nodes are the target key fingerprints
|
||||
* - the node with fingerprint of the namespace is the root node
|
||||
* - the edges between the nodes are the authorizations where key A authorizes key B to act on the namespace
|
||||
* in this case, the authorization is outgoing from A and incoming to B.
|
||||
* - the graph SHOULD be a directed acyclic graph, but we MIGHT have cycles (i.e. key A authorizing B, B authorizing A).
|
||||
* we don't need to make a fuss about cycles in the graph. we just ignore / report them assuming it was an admin
|
||||
* mistake, but we don't get confused.
|
||||
* - root certificates are edges pointing to the node itself. they are separate such that they don't show up
|
||||
* in the list of incoming / outgoing.
|
||||
* - we track for each node the set of outgoing edges and incoming edges. an outgoing edge is a delegation where
|
||||
* the source node is authorizing a target node. obviously every outgoing edge is also an incoming edge.
|
||||
* Properties of the graph:
|
||||
* - Each node corresponds to a target key
|
||||
* - The node with key fingerprint of the namespace is the root node
|
||||
* - The edges between nodes are namespace delegations.
|
||||
* If key A signs a namespace delegation with target key B, then key A authorizes key B to act on the namespace.
|
||||
* In this case, the edge is outgoing from node A and incoming into node B.
|
||||
* - The graph may have cycles. The implementation does not get confused by this.
|
||||
*
|
||||
* computation task:
|
||||
* - once we've modified the graph, we compute the nodes that are somehow connected to the root node.
|
||||
* Computation task:
|
||||
* The graph maintains a set of nodes that are connected to the root node. Those correspond to the keys that are
|
||||
* authorized to sign on behalf of the namespace.
|
||||
*
|
||||
* purpose:
|
||||
* - once we know which target keys are actually authorized to act on this particular namespace, we can then use
|
||||
* this information to find out which resulting mapping is properly authorized and which one is not.
|
||||
* Limitation: clients need to ensure that the namespace delegations added have valid signatures.
|
||||
* If delegations with invalid signatures are added, authorization will break.
|
||||
*
|
||||
* authorization checks:
|
||||
* - when adding "single transactions", we do check that the transaction is properly authorized. otherwise we
|
||||
* "ignore" it (returning false). this is used during processing.
|
||||
* - when adding "batch transactions", we don't check that all of them are properly authorized, as we do allow
|
||||
* temporarily "nodes" to be unauthorized (so that errors can be fixed by adding a replacement certificate)
|
||||
* - when removing transactions, we do check that the authorizing key is authorized. but note that the authorizing
|
||||
* key of an edge REMOVAL doesn't need to match the key used to authorized the ADD.
|
||||
* @param extraDebugInfo whether to log the authorization graph at debug level on every recomputation
|
||||
*/
|
||||
class AuthorizationGraph(
|
||||
val namespace: Namespace,
|
||||
extraDebugInfo: Boolean,
|
||||
val loggerFactory: NamedLoggerFactory,
|
||||
override protected val loggerFactory: NamedLoggerFactory,
|
||||
) extends AuthorizationCheck
|
||||
with NamedLogging {
|
||||
|
||||
/** @param root the last active root certificate for `target`
|
||||
* @param outgoing all active namespace delegations (excluding root certificates) authorized by `target`
|
||||
* @param incoming all active namespace delegations for the namespace `target`
|
||||
*
|
||||
* All namespace delegations are for namespace `this.namespace`.
|
||||
*/
|
||||
private case class GraphNode(
|
||||
target: Fingerprint,
|
||||
root: Option[AuthorizedNamespaceDelegation] = None,
|
||||
@ -113,9 +105,9 @@ class AuthorizationGraph(
|
||||
private abstract class AuthLevel(val isAuth: Boolean, val isRoot: Boolean)
|
||||
private object AuthLevel {
|
||||
|
||||
object NotAuthorized extends AuthLevel(false, false)
|
||||
object Standard extends AuthLevel(true, false)
|
||||
object RootDelegation extends AuthLevel(true, true)
|
||||
private object NotAuthorized extends AuthLevel(false, false)
|
||||
private object Standard extends AuthLevel(true, false)
|
||||
private object RootDelegation extends AuthLevel(true, true)
|
||||
|
||||
implicit val orderingAuthLevel: Ordering[AuthLevel] =
|
||||
Ordering.by[AuthLevel, Int](authl => Seq(authl.isAuth, authl.isRoot).count(identity))
|
||||
@ -129,23 +121,30 @@ class AuthorizationGraph(
|
||||
|
||||
}
|
||||
|
||||
/** GraphNodes by GraphNode.target */
|
||||
private val nodes = new TrieMap[Fingerprint, GraphNode]()
|
||||
|
||||
/** temporary cache for the current graph authorization check results
|
||||
*
|
||||
* if a fingerprint is empty, then we haven't yet computed the answer
|
||||
*/
|
||||
/** Authorized namespace delegations for namespace `this.namespace`, grouped by target */
|
||||
private val cache =
|
||||
new TrieMap[Fingerprint, Option[AuthorizedNamespaceDelegation]]()
|
||||
new TrieMap[Fingerprint, AuthorizedNamespaceDelegation]()
|
||||
|
||||
/** Check if `item` is authorized and, if so, add its mapping to this graph.
|
||||
*
|
||||
* @throws java.lang.IllegalArgumentException if `item` does not refer to `namespace` or the operation is not REPLACE.
|
||||
*/
|
||||
def add(item: AuthorizedNamespaceDelegation)(implicit traceContext: TraceContext): Boolean = {
|
||||
ErrorUtil.requireArgument(
|
||||
item.mapping.namespace == namespace,
|
||||
s"added namespace ${item.mapping.namespace} to $namespace",
|
||||
s"unable to add namespace delegation for ${item.mapping.namespace} to graph for $namespace",
|
||||
)
|
||||
ErrorUtil.requireArgument(
|
||||
item.operation == Replace,
|
||||
s"unable to add namespace delegation with operation ${item.operation} to graph for $namespace",
|
||||
)
|
||||
|
||||
if (
|
||||
AuthorizedTopologyTransaction.isRootCertificate(item) ||
|
||||
this.areValidAuthorizationKeys(item.signingKeys, requireRoot = true)
|
||||
this.existsAuthorizedKeyIn(item.signingKeys, requireRoot = true)
|
||||
) {
|
||||
doAdd(item)
|
||||
recompute()
|
||||
@ -153,6 +152,12 @@ class AuthorizationGraph(
|
||||
} else false
|
||||
}
|
||||
|
||||
/** Add the mappings in `items` to this graph, regardless if they are authorized or not.
|
||||
* If an unauthorized namespace delegation is added to the graph, the graph will contain nodes that are not connected to the root.
|
||||
* The target key of the unauthorized delegation will still be considered unauthorized.
|
||||
*
|
||||
* @throws java.lang.IllegalArgumentException if `item` does not refer to `namespace` or the operation is not REPLACE.
|
||||
*/
|
||||
def unauthorizedAdd(
|
||||
items: Seq[AuthorizedNamespaceDelegation]
|
||||
)(implicit traceContext: TraceContext): Unit = {
|
||||
@ -163,6 +168,15 @@ class AuthorizationGraph(
|
||||
private def doAdd(
|
||||
item: AuthorizedNamespaceDelegation
|
||||
)(implicit traceContext: TraceContext): Unit = {
|
||||
ErrorUtil.requireArgument(
|
||||
item.mapping.namespace == namespace,
|
||||
s"unable to add namespace delegation for ${item.mapping.namespace} to graph for $namespace",
|
||||
)
|
||||
ErrorUtil.requireArgument(
|
||||
item.operation == Replace,
|
||||
s"unable to add namespace delegation with operation ${item.operation} to graph for $namespace",
|
||||
)
|
||||
|
||||
val targetKey = item.mapping.target.fingerprint
|
||||
val curTarget = nodes.getOrElse(targetKey, GraphNode(targetKey))
|
||||
// if this is a root certificate, remember it separately
|
||||
@ -181,32 +195,38 @@ class AuthorizationGraph(
|
||||
}
|
||||
}
|
||||
|
||||
def remove(item: AuthorizedNamespaceDelegation)(implicit traceContext: TraceContext): Boolean =
|
||||
if (areValidAuthorizationKeys(item.signingKeys, requireRoot = true)) {
|
||||
/** Check if `item` is authorized and, if so, remove its mapping from this graph.
|
||||
* Note that addition and removal of a namespace delegation can be authorized by different keys.
|
||||
*
|
||||
* @throws java.lang.IllegalArgumentException if `item` does not refer to `namespace` or the operation is not REMOVE.
|
||||
*/
|
||||
def remove(item: AuthorizedNamespaceDelegation)(implicit traceContext: TraceContext): Boolean = {
|
||||
ErrorUtil.requireArgument(
|
||||
item.mapping.namespace == namespace,
|
||||
s"unable to remove namespace delegation for ${item.mapping.namespace} from graph for $namespace",
|
||||
)
|
||||
|
||||
ErrorUtil.requireArgument(
|
||||
item.operation == Remove,
|
||||
s"unable to remove namespace delegation with operation ${item.operation} from graph for $namespace",
|
||||
)
|
||||
|
||||
if (existsAuthorizedKeyIn(item.signingKeys, requireRoot = true)) {
|
||||
doRemove(item)
|
||||
true
|
||||
} else false
|
||||
|
||||
def unauthorizedRemove(
|
||||
items: Seq[AuthorizedNamespaceDelegation]
|
||||
)(implicit traceContext: TraceContext): Unit = {
|
||||
items.foreach(doRemove)
|
||||
}
|
||||
|
||||
/** remove a namespace delegation
|
||||
*
|
||||
* note that this one is a bit tricky as the removal might have been authorized
|
||||
* by a different key than the addition. this is fine but it complicates the book-keeping,
|
||||
* The implementation is a bit tricky as the removal might have been authorized
|
||||
* by a different key than the addition. This complicates the book-keeping,
|
||||
* as we need to track for each target key what the "incoming authorizations" were solely for the
|
||||
* purpose of being able to clean them up
|
||||
* purpose of being able to clean them up.
|
||||
*/
|
||||
private def doRemove(
|
||||
item: AuthorizedNamespaceDelegation
|
||||
)(implicit traceContext: TraceContext): Unit = {
|
||||
ErrorUtil.requireArgument(
|
||||
item.mapping.namespace == namespace,
|
||||
s"removing namespace ${item.mapping.namespace} from $namespace",
|
||||
)
|
||||
def myFilter(existing: AuthorizedNamespaceDelegation): Boolean = {
|
||||
// the auth key doesn't need to match on removals
|
||||
existing.mapping != item.mapping
|
||||
@ -248,10 +268,9 @@ class AuthorizationGraph(
|
||||
updateRemove(targetKey, curTarget.copy(incoming = curTarget.incoming.filter(myFilter)))
|
||||
}
|
||||
recompute()
|
||||
case None =>
|
||||
logger.warn(s"Superfluous removal of namespace delegation $item")
|
||||
}
|
||||
|
||||
case None => logger.warn(s"Superfluous removal of namespace delegation $item")
|
||||
}
|
||||
}
|
||||
|
||||
protected def recompute()(implicit traceContext: TraceContext): Unit = {
|
||||
@ -269,12 +288,12 @@ class AuthorizationGraph(
|
||||
fingerprint: Fingerprint,
|
||||
incoming: AuthorizedNamespaceDelegation,
|
||||
): Unit = {
|
||||
val current = cache.getOrElseUpdate(fingerprint, None)
|
||||
val current = cache.get(fingerprint)
|
||||
val currentLevel = AuthLevel.fromDelegationO(current)
|
||||
val incomingLevel = AuthLevel.fromDelegationO(Some(incoming))
|
||||
// this inherited level is higher than current, propagate it
|
||||
if (incomingLevel > currentLevel) {
|
||||
cache.update(fingerprint, Some(incoming))
|
||||
cache.update(fingerprint, incoming)
|
||||
// get the graph node of this fingerprint
|
||||
nodes.get(fingerprint).foreach { graphNode =>
|
||||
// iterate through all edges that depart from this node
|
||||
@ -310,7 +329,7 @@ class AuthorizationGraph(
|
||||
}
|
||||
if (extraDebugInfo && logger.underlying.isDebugEnabled) {
|
||||
val str =
|
||||
authorizedDelegations()
|
||||
cache.values
|
||||
.map(aud =>
|
||||
show"auth=${aud.signingKeys}, target=${aud.mapping.target.fingerprint}, root=${AuthorizedTopologyTransaction
|
||||
.isRootCertificate(aud)}"
|
||||
@ -320,144 +339,99 @@ class AuthorizationGraph(
|
||||
}
|
||||
} else
|
||||
logger.debug(
|
||||
s"Namespace ${namespace} has no root certificate, making all ${nodes.size} un-authorized"
|
||||
s"Namespace $namespace has no root certificate, making all ${nodes.size} un-authorized"
|
||||
)
|
||||
|
||||
override def areValidAuthorizationKeys(
|
||||
override def existsAuthorizedKeyIn(
|
||||
authKeys: Set[Fingerprint],
|
||||
requireRoot: Boolean,
|
||||
): Boolean = {
|
||||
authKeys.exists { authKey =>
|
||||
val authLevel = AuthLevel.fromDelegationO(cache.getOrElse(authKey, None))
|
||||
authLevel.isRoot || (authLevel.isAuth && !requireRoot)
|
||||
}
|
||||
}
|
||||
): Boolean = authKeys.exists(getAuthorizedKey(_, requireRoot).nonEmpty)
|
||||
|
||||
override def getValidAuthorizationKeys(
|
||||
authKeys: Set[Fingerprint],
|
||||
private def getAuthorizedKey(
|
||||
authKey: Fingerprint,
|
||||
requireRoot: Boolean,
|
||||
): Set[SigningPublicKey] = authKeys.flatMap(authKey =>
|
||||
): Option[SigningPublicKey] =
|
||||
cache
|
||||
.getOrElse(authKey, None)
|
||||
.map(_.mapping.target)
|
||||
.filter(_ => areValidAuthorizationKeys(Set(authKey), requireRoot))
|
||||
)
|
||||
|
||||
def authorizationChain(
|
||||
startAuthKey: Fingerprint,
|
||||
requireRoot: Boolean,
|
||||
): Option[AuthorizationChain] = {
|
||||
@tailrec
|
||||
def go(
|
||||
authKey: Fingerprint,
|
||||
requireRoot: Boolean,
|
||||
acc: List[AuthorizedNamespaceDelegation],
|
||||
): List[AuthorizedNamespaceDelegation] = {
|
||||
cache.getOrElse(authKey, None) match {
|
||||
// we've terminated with the root certificate
|
||||
case Some(delegation) if AuthorizedTopologyTransaction.isRootCertificate(delegation) =>
|
||||
delegation :: acc
|
||||
// cert is valid, append it
|
||||
case Some(delegation) if delegation.mapping.isRootDelegation || !requireRoot =>
|
||||
go(delegation.signingKeys.head1, delegation.mapping.isRootDelegation, delegation :: acc)
|
||||
// return empty to indicate failure
|
||||
case _ => List.empty
|
||||
.get(authKey)
|
||||
.filter { delegation =>
|
||||
val authLevel = AuthLevel.fromDelegationO(Some(delegation))
|
||||
authLevel.isRoot || (authLevel.isAuth && !requireRoot)
|
||||
}
|
||||
}
|
||||
go(startAuthKey, requireRoot, List.empty) match {
|
||||
case Nil => None
|
||||
case rest =>
|
||||
Some(
|
||||
AuthorizationChain(
|
||||
identifierDelegation = Seq.empty,
|
||||
namespaceDelegations = rest,
|
||||
Seq.empty,
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
.map(_.mapping.target)
|
||||
|
||||
def authorizedDelegations(): Seq[AuthorizedNamespaceDelegation] =
|
||||
cache.values.flatMap(_.toList).toSeq
|
||||
override def keysSupportingAuthorization(
|
||||
authKeys: Set[Fingerprint],
|
||||
requireRoot: Boolean,
|
||||
): Set[SigningPublicKey] = authKeys.flatMap(getAuthorizedKey(_, requireRoot))
|
||||
|
||||
override def toString: String = s"AuthorizationGraph($namespace)"
|
||||
|
||||
def debugInfo() = s"$namespace => ${nodes.mkString("\n")}"
|
||||
}
|
||||
|
||||
trait AuthorizationCheck {
|
||||
def areValidAuthorizationKeys(authKeys: Set[Fingerprint], requireRoot: Boolean): Boolean
|
||||
|
||||
def getValidAuthorizationKeys(
|
||||
/** Determines if a subset of the given keys is authorized to sign on behalf of the (possibly decentralized) namespace.
|
||||
*
|
||||
* @param requireRoot whether the authorization must be suitable to authorize namespace delegations
|
||||
*/
|
||||
def existsAuthorizedKeyIn(authKeys: Set[Fingerprint], requireRoot: Boolean): Boolean
|
||||
|
||||
/** Returns those keys that are useful for signing on behalf of the (possibly decentralized) namespace.
|
||||
* Only keys with fingerprint in `authKeys` will be returned.
|
||||
* The returned keys are not necessarily sufficient to authorize a transaction on behalf of the namespace;
|
||||
* in case of a decentralized namespace, additional signatures may be required.
|
||||
*/
|
||||
def keysSupportingAuthorization(
|
||||
authKeys: Set[Fingerprint],
|
||||
requireRoot: Boolean,
|
||||
): Set[SigningPublicKey]
|
||||
|
||||
def authorizationChain(
|
||||
startAuthKey: Fingerprint,
|
||||
requireRoot: Boolean,
|
||||
): Option[AuthorizationChain]
|
||||
|
||||
def authorizedDelegations(): Seq[AuthorizedNamespaceDelegation]
|
||||
}
|
||||
|
||||
object AuthorizationCheck {
|
||||
val empty = new AuthorizationCheck {
|
||||
override def areValidAuthorizationKeys(
|
||||
val empty: AuthorizationCheck = new AuthorizationCheck {
|
||||
override def existsAuthorizedKeyIn(
|
||||
authKeys: Set[Fingerprint],
|
||||
requireRoot: Boolean,
|
||||
): Boolean = false
|
||||
|
||||
override def authorizationChain(
|
||||
startAuthKey: Fingerprint,
|
||||
requireRoot: Boolean,
|
||||
): Option[AuthorizationChain] = None
|
||||
|
||||
override def getValidAuthorizationKeys(
|
||||
override def keysSupportingAuthorization(
|
||||
authKeys: Set[Fingerprint],
|
||||
requireRoot: Boolean,
|
||||
): Set[SigningPublicKey] = Set.empty
|
||||
|
||||
override def authorizedDelegations(): Seq[AuthorizedNamespaceDelegation] = Seq.empty
|
||||
|
||||
override def toString: String = "AuthorizationCheck.empty"
|
||||
}
|
||||
}
|
||||
|
||||
/** Authorization graph for a decentralized namespace.
|
||||
*
|
||||
* @throws java.lang.IllegalArgumentException if `dnd` and `direct` refer to different namespaces.
|
||||
*/
|
||||
final case class DecentralizedNamespaceAuthorizationGraph(
|
||||
dnd: DecentralizedNamespaceDefinition,
|
||||
direct: AuthorizationGraph,
|
||||
ownerGraphs: Seq[AuthorizationGraph],
|
||||
) extends AuthorizationCheck {
|
||||
override def areValidAuthorizationKeys(
|
||||
require(
|
||||
dnd.namespace == direct.namespace,
|
||||
s"The direct graph refers to the wrong namespace (expected: ${dnd.namespace}, actual: ${direct.namespace}).",
|
||||
)
|
||||
|
||||
override def existsAuthorizedKeyIn(
|
||||
authKeys: Set[Fingerprint],
|
||||
requireRoot: Boolean,
|
||||
): Boolean = {
|
||||
val viaNamespaceDelegation = direct.areValidAuthorizationKeys(authKeys, requireRoot)
|
||||
val viaNamespaceDelegation = direct.existsAuthorizedKeyIn(authKeys, requireRoot)
|
||||
val viaCollective =
|
||||
ownerGraphs.count(_.areValidAuthorizationKeys(authKeys, requireRoot)) >= dnd.threshold.value
|
||||
ownerGraphs.count(_.existsAuthorizedKeyIn(authKeys, requireRoot)) >= dnd.threshold.value
|
||||
viaNamespaceDelegation || viaCollective
|
||||
}
|
||||
|
||||
import cats.syntax.foldable.*
|
||||
|
||||
override def getValidAuthorizationKeys(
|
||||
override def keysSupportingAuthorization(
|
||||
authKeys: Set[Fingerprint],
|
||||
requireRoot: Boolean,
|
||||
): Set[SigningPublicKey] = {
|
||||
(direct +: ownerGraphs)
|
||||
.flatMap(_.getValidAuthorizationKeys(authKeys, requireRoot))
|
||||
.flatMap(_.keysSupportingAuthorization(authKeys, requireRoot))
|
||||
.toSet
|
||||
}
|
||||
|
||||
override def authorizationChain(
|
||||
startAuthKey: Fingerprint,
|
||||
requireRoot: Boolean,
|
||||
): Option[AuthorizationChain] =
|
||||
direct
|
||||
.authorizationChain(startAuthKey, requireRoot)
|
||||
.orElse(ownerGraphs.map(_.authorizationChain(startAuthKey, requireRoot)).combineAll)
|
||||
|
||||
override def authorizedDelegations(): Seq[AuthorizedNamespaceDelegation] =
|
||||
direct.authorizedDelegations() ++ ownerGraphs.flatMap(_.authorizedDelegations())
|
||||
}
|
||||
|
@ -5,7 +5,8 @@ package com.digitalasset.canton.topology.processing
|
||||
|
||||
import cats.Monoid
|
||||
import cats.data.EitherT
|
||||
import cats.syntax.parallel.*
|
||||
import cats.syntax.bifunctor.*
|
||||
import cats.syntax.foldable.*
|
||||
import com.digitalasset.canton.config.RequireTypes.PositiveInt
|
||||
import com.digitalasset.canton.crypto.CryptoPureApi
|
||||
import com.digitalasset.canton.data.CantonTimestamp
|
||||
@ -20,14 +21,10 @@ import com.digitalasset.canton.topology.processing.AuthorizedTopologyTransaction
|
||||
import com.digitalasset.canton.topology.store.ValidatedTopologyTransaction.GenericValidatedTopologyTransaction
|
||||
import com.digitalasset.canton.topology.store.*
|
||||
import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction
|
||||
import com.digitalasset.canton.topology.transaction.TopologyMapping.{
|
||||
MappingHash,
|
||||
RequiredAuthAuthorizations,
|
||||
}
|
||||
import com.digitalasset.canton.topology.transaction.TopologyMapping.RequiredAuthAuthorizations
|
||||
import com.digitalasset.canton.topology.transaction.TopologyTransaction.GenericTopologyTransaction
|
||||
import com.digitalasset.canton.topology.transaction.*
|
||||
import com.digitalasset.canton.tracing.TraceContext
|
||||
import com.digitalasset.canton.util.FutureInstances.*
|
||||
|
||||
import scala.concurrent.{ExecutionContext, Future}
|
||||
|
||||
@ -130,17 +127,14 @@ class IncomingTopologyTransactionAuthorizationValidator(
|
||||
*/
|
||||
def validateAndUpdateHeadAuthState(
|
||||
timestamp: CantonTimestamp,
|
||||
transactionsToValidate: Seq[GenericSignedTopologyTransaction],
|
||||
transactionsInStore: Map[MappingHash, GenericSignedTopologyTransaction],
|
||||
toValidate: GenericSignedTopologyTransaction,
|
||||
inStore: Option[GenericSignedTopologyTransaction],
|
||||
expectFullAuthorization: Boolean,
|
||||
)(implicit
|
||||
traceContext: TraceContext
|
||||
): Future[(UpdateAggregation, Seq[GenericValidatedTopologyTransaction])] = {
|
||||
): Future[(UpdateAggregation, GenericValidatedTopologyTransaction)] = {
|
||||
for {
|
||||
authCheckResult <- determineRelevantUidsAndNamespaces(
|
||||
transactionsToValidate,
|
||||
transactionsInStore.view.mapValues(_.transaction).toMap,
|
||||
)
|
||||
authCheckResult <- determineRelevantUidsAndNamespaces(toValidate, inStore.map(_.transaction))
|
||||
(updateAggregation, targetDomainVerified) = authCheckResult
|
||||
loadGraphsF = loadAuthorizationGraphs(timestamp, updateAggregation.authNamespaces)
|
||||
loadUidsF = loadIdentifierDelegationsCascading(
|
||||
@ -153,11 +147,11 @@ class IncomingTopologyTransactionAuthorizationValidator(
|
||||
} yield {
|
||||
|
||||
logger.debug(s"Update aggregation yielded ${updateAggregation}")
|
||||
val validated = targetDomainVerified.map {
|
||||
val validated = targetDomainVerified match {
|
||||
case ValidatedTopologyTransaction(tx, None, _) =>
|
||||
processTransaction(
|
||||
tx,
|
||||
transactionsInStore.get(tx.mapping.uniqueKey),
|
||||
inStore,
|
||||
expectFullAuthorization,
|
||||
)
|
||||
case v => v
|
||||
@ -173,101 +167,124 @@ class IncomingTopologyTransactionAuthorizationValidator(
|
||||
}
|
||||
}
|
||||
|
||||
/** Validates a topology transaction as follows:
|
||||
* <ol>
|
||||
* <li>check that the transaction has valid signatures and is sufficiently authorized. if not, reject.</li>
|
||||
* <li>if there are no missing authorizers, as is the case for proposals, we update internal caches for NSD, IDD, and DND</li>
|
||||
* <li>if this validation is run to determine a final verdict, as is the case for processing topology transactions coming from the domain,
|
||||
* automatically clear the proposal flag for transactions with sufficent authorizing signatures.</li>
|
||||
* </ol>
|
||||
*/
|
||||
private def processTransaction(
|
||||
toValidate: GenericSignedTopologyTransaction,
|
||||
inStore: Option[GenericSignedTopologyTransaction],
|
||||
expectFullAuthorization: Boolean,
|
||||
)(implicit traceContext: TraceContext): GenericValidatedTopologyTransaction = {
|
||||
val processedNs = toValidate.selectMapping[NamespaceDelegation].forall { sigTx =>
|
||||
processNamespaceDelegation(
|
||||
toValidate.operation,
|
||||
AuthorizedTopologyTransaction(sigTx),
|
||||
)
|
||||
}
|
||||
// See validateRootCertificate why we need to check the removal of a root certificate explicitly here.
|
||||
val signatureCheckResult = validateRootCertificate(toValidate)
|
||||
.getOrElse(validateSignaturesAndDetermineMissingAuthorizers(toValidate, inStore))
|
||||
|
||||
val processedIdent = toValidate.selectMapping[IdentifierDelegation].forall { sigTx =>
|
||||
processIdentifierDelegation(
|
||||
toValidate.operation,
|
||||
AuthorizedTopologyTransaction(sigTx),
|
||||
)
|
||||
}
|
||||
|
||||
val resultDns = toValidate.selectMapping[DecentralizedNamespaceDefinition].map { sigTx =>
|
||||
processDecentralizedNamespaceDefinition(
|
||||
sigTx.operation,
|
||||
AuthorizedTopologyTransaction(sigTx),
|
||||
)
|
||||
}
|
||||
val processedDns = resultDns.forall(_._1)
|
||||
val mappingSpecificCheck = processedNs && processedIdent && processedDns
|
||||
|
||||
// the transaction is fully authorized if either
|
||||
// 1. it's a root certificate, or
|
||||
// 2. there is no authorization error and there are no missing authorizers
|
||||
// We need to check explicitly for the root certificate here, because a REMOVE operation
|
||||
// removes itself from the authorization graph, and therefore `isCurrentlyAuthorized` cannot validate it.
|
||||
val authorizationResult =
|
||||
if (NamespaceDelegation.isRootCertificate(toValidate))
|
||||
Right(
|
||||
(
|
||||
toValidate,
|
||||
RequiredAuthAuthorizations.empty, // no missing authorizers
|
||||
)
|
||||
)
|
||||
else isCurrentlyAuthorized(toValidate, inStore)
|
||||
|
||||
authorizationResult match {
|
||||
signatureCheckResult match {
|
||||
// propagate the rejection reason
|
||||
case Left(rejectionReason) => ValidatedTopologyTransaction(toValidate, Some(rejectionReason))
|
||||
|
||||
// if a transaction wasn't outright rejected, run some additional checks
|
||||
case Right((validatedTx, missingAuthorizers)) =>
|
||||
// The mappingSpecificCheck is a necessary condition for having sufficient authorizers.
|
||||
val isFullyAuthorized =
|
||||
mappingSpecificCheck && missingAuthorizers.isEmpty
|
||||
|
||||
// If a decentralizedNamespace transaction is fully authorized, reflect so in the decentralizedNamespace cache.
|
||||
// Note: It seems a bit unsafe to update the caches on the assumption that the update will also be eventually
|
||||
// persisted by the caller (a few levels up the call chain in TopologyStateProcessor.validateAndApplyAuthorization
|
||||
// as the caller performs additional checks such as the numeric value of the serial number).
|
||||
// But at least this is safer than where the check was previously (inside processDecentralizedNamespaceDefinition before even
|
||||
// `isCurrentlyAuthorized` above had finished all checks).
|
||||
if (isFullyAuthorized) {
|
||||
resultDns.foreach { case (_, updateDecentralizedNamespaceCache) =>
|
||||
updateDecentralizedNamespaceCache()
|
||||
}
|
||||
}
|
||||
|
||||
val acceptMissingAuthorizers =
|
||||
validatedTx.isProposal && !expectFullAuthorization
|
||||
|
||||
// if the result of this validation is final (when processing transactions for the authorized store
|
||||
// or sequenced transactions from the domain) we set the proposal flag according to whether the transaction
|
||||
// is fully authorized or not.
|
||||
// This must not be done when preliminarily validating transactions via the DomainTopologyManager, because
|
||||
// the validation outcome might change when validating the transaction again after it has been sequenced.
|
||||
val finalTransaction =
|
||||
if (validationIsFinal) validatedTx.copy(isProposal = !isFullyAuthorized)
|
||||
else validatedTx
|
||||
|
||||
// Either the transaction is fully authorized or the request allows partial authorization
|
||||
if (isFullyAuthorized || acceptMissingAuthorizers) {
|
||||
ValidatedTopologyTransaction(finalTransaction, None)
|
||||
} else {
|
||||
if (!missingAuthorizers.isEmpty) {
|
||||
logger.debug(s"Missing authorizers: $missingAuthorizers")
|
||||
}
|
||||
if (!mappingSpecificCheck) {
|
||||
logger.debug(s"Mapping specific check failed")
|
||||
}
|
||||
ValidatedTopologyTransaction(
|
||||
toValidate,
|
||||
Some(TopologyTransactionRejection.NotAuthorized),
|
||||
)
|
||||
}
|
||||
handleSuccessfulSignatureChecks(
|
||||
validatedTx,
|
||||
missingAuthorizers,
|
||||
expectFullAuthorization,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
private def handleSuccessfulSignatureChecks(
|
||||
toValidate: GenericSignedTopologyTransaction,
|
||||
missingAuthorizers: RequiredAuthAuthorizations,
|
||||
expectFullAuthorization: Boolean,
|
||||
)(implicit
|
||||
traceContext: TraceContext
|
||||
): ValidatedTopologyTransaction[TopologyChangeOp, TopologyMapping] = {
|
||||
// if there are no missing authorizers, we can update the internal caches
|
||||
val isFullyAuthorized = if (missingAuthorizers.isEmpty) {
|
||||
val processedNSD = toValidate
|
||||
.selectMapping[NamespaceDelegation]
|
||||
.forall { sigTx => processNamespaceDelegation(AuthorizedTopologyTransaction(sigTx)) }
|
||||
|
||||
val processedIDD = toValidate.selectMapping[IdentifierDelegation].forall { sigTx =>
|
||||
processIdentifierDelegation(AuthorizedTopologyTransaction(sigTx))
|
||||
}
|
||||
|
||||
val processedDND =
|
||||
toValidate.selectMapping[DecentralizedNamespaceDefinition].forall { sigTx =>
|
||||
processDecentralizedNamespaceDefinition(AuthorizedTopologyTransaction(sigTx))
|
||||
}
|
||||
val mappingSpecificCheck = processedNSD && processedIDD && processedDND
|
||||
if (!mappingSpecificCheck) {
|
||||
logger.debug(s"Mapping specific check failed")
|
||||
}
|
||||
mappingSpecificCheck
|
||||
} else { false }
|
||||
|
||||
val acceptMissingAuthorizers =
|
||||
toValidate.isProposal && !expectFullAuthorization
|
||||
|
||||
// if the result of this validation is final (when processing transactions for the authorized store
|
||||
// or sequenced transactions from the domain) we set the proposal flag according to whether the transaction
|
||||
// is fully authorized or not.
|
||||
// This must not be done when preliminarily validating transactions via the DomainTopologyManager, because
|
||||
// the validation outcome might change when validating the transaction again after it has been sequenced.
|
||||
val finalTransaction =
|
||||
if (validationIsFinal) toValidate.copy(isProposal = !isFullyAuthorized)
|
||||
else toValidate
|
||||
|
||||
// Either the transaction is fully authorized or the request allows partial authorization
|
||||
if (isFullyAuthorized || acceptMissingAuthorizers) {
|
||||
ValidatedTopologyTransaction(finalTransaction, None)
|
||||
} else {
|
||||
if (!missingAuthorizers.isEmpty) {
|
||||
logger.debug(s"Missing authorizers: $missingAuthorizers")
|
||||
}
|
||||
ValidatedTopologyTransaction(
|
||||
toValidate,
|
||||
Some(TopologyTransactionRejection.NotAuthorized),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/** Validates the signature of the removal of a root certificate.
|
||||
* This check is done separately from the mechanism used for other topology transactions (ie isCurrentlyAuthorized),
|
||||
* because removing a root certificate removes it from the authorization graph and therefore
|
||||
* isCurrentlyAuthorized would not find the key to validate it.
|
||||
*/
|
||||
private def validateRootCertificate(
|
||||
toValidate: GenericSignedTopologyTransaction
|
||||
): Option[Either[
|
||||
TopologyTransactionRejection,
|
||||
(GenericSignedTopologyTransaction, RequiredAuthAuthorizations),
|
||||
]] = {
|
||||
toValidate
|
||||
.selectMapping[NamespaceDelegation]
|
||||
.filter(NamespaceDelegation.isRootCertificate)
|
||||
.map { rootCert =>
|
||||
val result = rootCert.signatures.toSeq.forgetNE
|
||||
.traverse_(
|
||||
pureCrypto
|
||||
.verifySignature(
|
||||
rootCert.hash.hash,
|
||||
rootCert.mapping.target,
|
||||
_,
|
||||
)
|
||||
)
|
||||
.bimap(
|
||||
TopologyTransactionRejection.SignatureCheckFailed,
|
||||
_ => (toValidate, RequiredAuthAuthorizations.empty /* no missing authorizers */ ),
|
||||
)
|
||||
result
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/** loads all identifier delegations into the identifier delegation cache
|
||||
*
|
||||
* This function has two "modes". On a cascading update affecting namespaces, we have
|
||||
@ -291,16 +308,15 @@ class IncomingTopologyTransactionAuthorizationValidator(
|
||||
}
|
||||
|
||||
private def processIdentifierDelegation(
|
||||
op: TopologyChangeOp,
|
||||
tx: AuthorizedIdentifierDelegation,
|
||||
tx: AuthorizedIdentifierDelegation
|
||||
): Boolean = {
|
||||
// check authorization
|
||||
val check = getAuthorizationCheckForNamespace(tx.mapping.identifier.namespace)
|
||||
val keysAreValid = check.areValidAuthorizationKeys(tx.signingKeys, requireRoot = false)
|
||||
val keysAreValid = check.existsAuthorizedKeyIn(tx.signingKeys, requireRoot = false)
|
||||
// update identifier delegation cache if necessary
|
||||
if (keysAreValid) {
|
||||
val updateOp: Set[AuthorizedIdentifierDelegation] => Set[AuthorizedIdentifierDelegation] =
|
||||
op match {
|
||||
tx.operation match {
|
||||
case TopologyChangeOp.Replace =>
|
||||
x => x + tx
|
||||
case TopologyChangeOp.Remove =>
|
||||
@ -313,12 +329,11 @@ class IncomingTopologyTransactionAuthorizationValidator(
|
||||
}
|
||||
|
||||
private def processNamespaceDelegation(
|
||||
op: TopologyChangeOp,
|
||||
tx: AuthorizedNamespaceDelegation,
|
||||
tx: AuthorizedNamespaceDelegation
|
||||
)(implicit traceContext: TraceContext): Boolean = {
|
||||
val graph = getAuthorizationGraphForNamespace(tx.mapping.namespace)
|
||||
// add or remove including authorization check
|
||||
op match {
|
||||
tx.operation match {
|
||||
case TopologyChangeOp.Replace => graph.add(tx)
|
||||
case TopologyChangeOp.Remove => graph.remove(tx)
|
||||
}
|
||||
@ -330,9 +345,8 @@ class IncomingTopologyTransactionAuthorizationValidator(
|
||||
* by the caller once the mapping is to be committed.
|
||||
*/
|
||||
private def processDecentralizedNamespaceDefinition(
|
||||
op: TopologyChangeOp,
|
||||
tx: AuthorizedDecentralizedNamespaceDefinition,
|
||||
)(implicit traceContext: TraceContext): (Boolean, () => Unit) = {
|
||||
tx: AuthorizedDecentralizedNamespaceDefinition
|
||||
)(implicit traceContext: TraceContext): Boolean = {
|
||||
val decentralizedNamespace = tx.mapping.namespace
|
||||
val dnsGraph = decentralizedNamespaceCache
|
||||
.get(decentralizedNamespace)
|
||||
@ -360,26 +374,30 @@ class IncomingTopologyTransactionAuthorizationValidator(
|
||||
)
|
||||
newDecentralizedNamespaceGraph
|
||||
}
|
||||
val isAuthorized = dnsGraph.areValidAuthorizationKeys(tx.signingKeys, false)
|
||||
val isAuthorized = dnsGraph.existsAuthorizedKeyIn(tx.signingKeys, requireRoot = false)
|
||||
|
||||
(
|
||||
isAuthorized,
|
||||
() => {
|
||||
val ownerGraphs = tx.mapping.owners.forgetNE.toSeq.map(getAuthorizationGraphForNamespace)
|
||||
decentralizedNamespaceCache
|
||||
.put(
|
||||
decentralizedNamespace,
|
||||
(tx.mapping, dnsGraph.copy(dnd = tx.mapping, ownerGraphs = ownerGraphs)),
|
||||
)
|
||||
.discard
|
||||
},
|
||||
)
|
||||
if (isAuthorized) {
|
||||
tx.operation match {
|
||||
case TopologyChangeOp.Remove =>
|
||||
decentralizedNamespaceCache.remove(decentralizedNamespace).discard
|
||||
|
||||
case TopologyChangeOp.Replace =>
|
||||
val ownerGraphs = tx.mapping.owners.forgetNE.toSeq.map(getAuthorizationGraphForNamespace)
|
||||
decentralizedNamespaceCache
|
||||
.put(
|
||||
decentralizedNamespace,
|
||||
(tx.mapping, dnsGraph.copy(dnd = tx.mapping, ownerGraphs = ownerGraphs)),
|
||||
)
|
||||
.discard
|
||||
}
|
||||
}
|
||||
isAuthorized
|
||||
}
|
||||
|
||||
private def determineRelevantUidsAndNamespaces(
|
||||
transactionsToValidate: Seq[GenericSignedTopologyTransaction],
|
||||
transactionsInStore: Map[MappingHash, GenericTopologyTransaction],
|
||||
): Future[(UpdateAggregation, Seq[GenericValidatedTopologyTransaction])] = {
|
||||
toValidate: GenericSignedTopologyTransaction,
|
||||
inStore: Option[GenericTopologyTransaction],
|
||||
): Future[(UpdateAggregation, GenericValidatedTopologyTransaction)] = {
|
||||
def verifyDomain(
|
||||
tx: GenericSignedTopologyTransaction
|
||||
): Either[TopologyTransactionRejection, Unit] =
|
||||
@ -395,22 +413,19 @@ class IncomingTopologyTransactionAuthorizationValidator(
|
||||
|
||||
// we need to figure out for which namespaces and uids we need to load the validation checks
|
||||
// and for which uids and namespaces we'll have to perform a cascading update
|
||||
import UpdateAggregation.monoid
|
||||
transactionsToValidate.parFoldMapA { toValidate =>
|
||||
EitherT
|
||||
.fromEither[Future](verifyDomain(toValidate))
|
||||
.fold(
|
||||
rejection =>
|
||||
(UpdateAggregation(), Seq(ValidatedTopologyTransaction(toValidate, Some(rejection)))),
|
||||
_ =>
|
||||
(
|
||||
UpdateAggregation().add(
|
||||
toValidate.mapping,
|
||||
transactionsInStore.get(toValidate.mapping.uniqueKey),
|
||||
),
|
||||
Seq(ValidatedTopologyTransaction(toValidate, None)),
|
||||
EitherT
|
||||
.fromEither[Future](verifyDomain(toValidate))
|
||||
.fold(
|
||||
rejection =>
|
||||
(UpdateAggregation(), ValidatedTopologyTransaction(toValidate, Some(rejection))),
|
||||
_ =>
|
||||
(
|
||||
UpdateAggregation().add(
|
||||
toValidate.mapping,
|
||||
inStore,
|
||||
),
|
||||
)
|
||||
}
|
||||
ValidatedTopologyTransaction(toValidate, None),
|
||||
),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
@ -43,7 +43,7 @@ trait TransactionAuthorizationValidator {
|
||||
|
||||
protected def pureCrypto: CryptoPureApi
|
||||
|
||||
def isCurrentlyAuthorized(
|
||||
def validateSignaturesAndDetermineMissingAuthorizers(
|
||||
toValidate: GenericSignedTopologyTransaction,
|
||||
inStore: Option[GenericSignedTopologyTransaction],
|
||||
)(implicit
|
||||
@ -72,41 +72,41 @@ trait TransactionAuthorizationValidator {
|
||||
val namespaceWithRootAuthorizations =
|
||||
required.namespacesWithRoot.map { ns =>
|
||||
val check = getAuthorizationCheckForNamespace(ns)
|
||||
val keysWithDelegation = check.getValidAuthorizationKeys(
|
||||
val keysUsed = check.keysSupportingAuthorization(
|
||||
signingKeys,
|
||||
requireRoot = true,
|
||||
)
|
||||
val keysAuthorizeNamespace =
|
||||
check.areValidAuthorizationKeys(signingKeys, requireRoot = true)
|
||||
(ns -> (keysAuthorizeNamespace, keysWithDelegation))
|
||||
check.existsAuthorizedKeyIn(signingKeys, requireRoot = true)
|
||||
(ns -> (keysAuthorizeNamespace, keysUsed))
|
||||
}.toMap
|
||||
|
||||
// Now let's determine which namespaces and uids actually delegated to any of the keys
|
||||
val namespaceAuthorizations = required.namespaces.map { ns =>
|
||||
val check = getAuthorizationCheckForNamespace(ns)
|
||||
val keysWithDelegation = check.getValidAuthorizationKeys(
|
||||
val keysUsed = check.keysSupportingAuthorization(
|
||||
signingKeys,
|
||||
requireRoot = false,
|
||||
)
|
||||
val keysAuthorizeNamespace = check.areValidAuthorizationKeys(signingKeys, requireRoot = false)
|
||||
(ns -> (keysAuthorizeNamespace, keysWithDelegation))
|
||||
val keysAuthorizeNamespace = check.existsAuthorizedKeyIn(signingKeys, requireRoot = false)
|
||||
(ns -> (keysAuthorizeNamespace, keysUsed))
|
||||
}.toMap
|
||||
|
||||
val uidAuthorizations =
|
||||
required.uids.map { uid =>
|
||||
val check = getAuthorizationCheckForNamespace(uid.namespace)
|
||||
val keysWithDelegation = check.getValidAuthorizationKeys(
|
||||
val keysUsed = check.keysSupportingAuthorization(
|
||||
signingKeys,
|
||||
requireRoot = false,
|
||||
)
|
||||
val keysAuthorizeNamespace =
|
||||
check.areValidAuthorizationKeys(signingKeys, requireRoot = false)
|
||||
check.existsAuthorizedKeyIn(signingKeys, requireRoot = false)
|
||||
|
||||
val keyForUid =
|
||||
getAuthorizedIdentifierDelegation(check, uid, toValidate.signatures.map(_.signedBy))
|
||||
.map(_.mapping.target)
|
||||
|
||||
(uid -> (keysAuthorizeNamespace || keyForUid.nonEmpty, keysWithDelegation ++ keyForUid))
|
||||
(uid -> (keysAuthorizeNamespace || keyForUid.nonEmpty, keysUsed ++ keyForUid))
|
||||
}.toMap
|
||||
|
||||
val extraKeyAuthorizations = {
|
||||
@ -132,7 +132,7 @@ trait TransactionAuthorizationValidator {
|
||||
.toMap
|
||||
}
|
||||
|
||||
val allAuthorizingKeys =
|
||||
val allKeysUsedForAuthorization =
|
||||
(namespaceWithRootAuthorizations.values ++
|
||||
namespaceAuthorizations.values ++
|
||||
uidAuthorizations.values ++
|
||||
@ -145,9 +145,9 @@ trait TransactionAuthorizationValidator {
|
||||
logAuthorizations("Authorizations for UIDs", uidAuthorizations)
|
||||
logAuthorizations("Authorizations for extraKeys", extraKeyAuthorizations)
|
||||
|
||||
logger.debug(s"All authorizing keys: ${allAuthorizingKeys.keySet}")
|
||||
logger.debug(s"All keys used for authorization: ${allKeysUsedForAuthorization.keySet}")
|
||||
|
||||
val superfluousKeys = signingKeys -- allAuthorizingKeys.keys
|
||||
val superfluousKeys = signingKeys -- allKeysUsedForAuthorization.keys
|
||||
for {
|
||||
_ <- Either.cond[TopologyTransactionRejection, Unit](
|
||||
// there must be at least 1 key used for the signatures for one of the delegation mechanisms
|
||||
@ -160,7 +160,7 @@ trait TransactionAuthorizationValidator {
|
||||
},
|
||||
)
|
||||
|
||||
txWithValidSignatures <- toValidate
|
||||
txWithSignaturesToVerify <- toValidate
|
||||
.removeSignatures(superfluousKeys)
|
||||
.toRight({
|
||||
logger.info(
|
||||
@ -169,9 +169,9 @@ trait TransactionAuthorizationValidator {
|
||||
TopologyTransactionRejection.NoDelegationFoundForKeys(superfluousKeys)
|
||||
})
|
||||
|
||||
_ <- txWithValidSignatures.signatures.forgetNE.toList
|
||||
_ <- txWithSignaturesToVerify.signatures.forgetNE.toList
|
||||
.traverse_(sig =>
|
||||
allAuthorizingKeys
|
||||
allKeysUsedForAuthorization
|
||||
.get(sig.signedBy)
|
||||
.toRight({
|
||||
val msg =
|
||||
@ -182,7 +182,7 @@ trait TransactionAuthorizationValidator {
|
||||
.flatMap(key =>
|
||||
pureCrypto
|
||||
.verifySignature(
|
||||
txWithValidSignatures.hash.hash,
|
||||
txWithSignaturesToVerify.hash.hash,
|
||||
key,
|
||||
sig,
|
||||
)
|
||||
@ -202,7 +202,7 @@ trait TransactionAuthorizationValidator {
|
||||
extraKeys = onlyFullyAuthorized(extraKeyAuthorizations),
|
||||
)
|
||||
(
|
||||
txWithValidSignatures,
|
||||
txWithSignaturesToVerify,
|
||||
requiredAuth
|
||||
.satisfiedByActualAuthorizers(actual)
|
||||
.fold(identity, _ => RequiredAuthAuthorizations.empty),
|
||||
@ -236,7 +236,7 @@ trait TransactionAuthorizationValidator {
|
||||
): Option[AuthorizedIdentifierDelegation] = {
|
||||
getIdentifierDelegationsForUid(uid)
|
||||
.find(aid =>
|
||||
authKeys(aid.mapping.target.id) && graph.areValidAuthorizationKeys(
|
||||
authKeys(aid.mapping.target.id) && graph.existsAuthorizedKeyIn(
|
||||
aid.signingKeys,
|
||||
requireRoot = false,
|
||||
)
|
||||
@ -254,9 +254,7 @@ trait TransactionAuthorizationValidator {
|
||||
namespace: Namespace
|
||||
): AuthorizationCheck = {
|
||||
val decentralizedNamespaceCheck = decentralizedNamespaceCache.get(namespace).map(_._2)
|
||||
val namespaceCheck = namespaceCache.get(
|
||||
namespace
|
||||
)
|
||||
val namespaceCheck = namespaceCache.get(namespace)
|
||||
decentralizedNamespaceCheck
|
||||
.orElse(namespaceCheck)
|
||||
.getOrElse(AuthorizationCheck.empty)
|
||||
|
@ -10,14 +10,9 @@ import com.digitalasset.canton.data.CantonTimestamp
|
||||
import com.digitalasset.canton.logging.ErrorLoggingContext
|
||||
import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting}
|
||||
import com.digitalasset.canton.protocol.OnboardingRestriction
|
||||
import com.digitalasset.canton.topology.*
|
||||
import com.digitalasset.canton.topology.processing.EffectiveTime
|
||||
import com.digitalasset.canton.topology.transaction.TopologyMapping
|
||||
import com.digitalasset.canton.topology.{
|
||||
DomainId,
|
||||
Member,
|
||||
ParticipantId,
|
||||
PartyId,
|
||||
TopologyManagerError,
|
||||
}
|
||||
|
||||
sealed trait TopologyTransactionRejection extends PrettyPrinting with Product with Serializable {
|
||||
def asString: String
|
||||
@ -45,25 +40,12 @@ object TopologyTransactionRejection {
|
||||
TopologyManagerError.UnauthorizedTransaction.Failure(asString)
|
||||
}
|
||||
|
||||
final case class ThresholdTooHigh(actual: Int, mustBeAtMost: Int)
|
||||
extends TopologyTransactionRejection {
|
||||
override def asString: String =
|
||||
s"Threshold must not be higher than $mustBeAtMost, but was $actual."
|
||||
|
||||
override def pretty: Pretty[ThresholdTooHigh] = prettyOfString(_ => asString)
|
||||
|
||||
override def toTopologyManagerError(implicit elc: ErrorLoggingContext) = {
|
||||
TopologyManagerError.InvalidThreshold.ThresholdTooHigh(actual, mustBeAtMost)
|
||||
}
|
||||
}
|
||||
|
||||
final case class UnknownParties(parties: Seq[PartyId]) extends TopologyTransactionRejection {
|
||||
override def asString: String = s"Parties ${parties.sorted.mkString(", ")} are unknown."
|
||||
|
||||
override def pretty: Pretty[UnknownParties.this.type] = prettyOfString(_ => asString)
|
||||
override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError =
|
||||
TopologyManagerError.UnknownParties.Failure(parties)
|
||||
|
||||
}
|
||||
|
||||
final case class OnboardingRestrictionInPlace(
|
||||
@ -192,6 +174,25 @@ object TopologyTransactionRejection {
|
||||
)
|
||||
}
|
||||
|
||||
final case class PartyExceedsHostingLimit(
|
||||
partyId: PartyId,
|
||||
limit: Int,
|
||||
numParticipants: Int,
|
||||
) extends TopologyTransactionRejection {
|
||||
override def asString: String =
|
||||
s"Party $partyId exceeds hosting limit of $limit with desired number of $numParticipants hosting participants."
|
||||
|
||||
override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError =
|
||||
TopologyManagerError.PartyExceedsHostingLimit.Reject(partyId, limit, numParticipants)
|
||||
|
||||
override def pretty: Pretty[PartyExceedsHostingLimit.this.type] =
|
||||
prettyOfClass(
|
||||
param("partyId", _.partyId),
|
||||
param("limit", _.limit),
|
||||
param("number of hosting participants", _.numParticipants),
|
||||
)
|
||||
}
|
||||
|
||||
final case class MissingMappings(missing: Map[Member, Seq[TopologyMapping.Code]])
|
||||
extends TopologyTransactionRejection {
|
||||
override def asString: String = {
|
||||
@ -209,4 +210,24 @@ object TopologyTransactionRejection {
|
||||
|
||||
override def pretty: Pretty[MissingMappings.this.type] = prettyOfString(_ => asString)
|
||||
}
|
||||
|
||||
final case class MissingDomainParameters(effective: EffectiveTime)
|
||||
extends TopologyTransactionRejection {
|
||||
override def asString: String = s"Missing domain parameters at $effective"
|
||||
|
||||
override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError =
|
||||
TopologyManagerError.MissingTopologyMapping.MissingDomainParameters(effective)
|
||||
|
||||
override def pretty: Pretty[MissingDomainParameters.this.type] = prettyOfString(_ => asString)
|
||||
}
|
||||
|
||||
final case class NamespaceAlreadyInUse(namespace: Namespace)
|
||||
extends TopologyTransactionRejection {
|
||||
override def asString: String = s"The namespace $namespace is already used by another entity."
|
||||
|
||||
override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError =
|
||||
TopologyManagerError.NamespaceAlreadyInUse.Reject(namespace)
|
||||
|
||||
override def pretty: Pretty[NamespaceAlreadyInUse.this.type] = prettyOfString(_ => asString)
|
||||
}
|
||||
}
|
||||
|
@ -401,14 +401,14 @@ object NamespaceDelegation {
|
||||
target: SigningPublicKey,
|
||||
isRootDelegation: Boolean,
|
||||
): NamespaceDelegation =
|
||||
create(namespace, target, isRootDelegation).fold(err => sys.error(err), identity)
|
||||
create(namespace, target, isRootDelegation).valueOr(err =>
|
||||
throw new IllegalArgumentException((err))
|
||||
)
|
||||
|
||||
def code: TopologyMapping.Code = Code.NamespaceDelegation
|
||||
|
||||
/** Returns true if the given transaction is a self-signed root certificate */
|
||||
def isRootCertificate(sit: GenericSignedTopologyTransaction): Boolean = {
|
||||
((sit.operation == TopologyChangeOp.Replace && sit.serial == PositiveInt.one) ||
|
||||
(sit.operation == TopologyChangeOp.Remove && sit.serial != PositiveInt.one)) &&
|
||||
sit.mapping
|
||||
.select[transaction.NamespaceDelegation]
|
||||
.exists(ns =>
|
||||
@ -944,8 +944,8 @@ final case class PartyHostingLimits(
|
||||
|
||||
override def code: Code = Code.PartyHostingLimits
|
||||
|
||||
override def namespace: Namespace = domainId.namespace
|
||||
override def maybeUid: Option[UniqueIdentifier] = Some(domainId.uid)
|
||||
override def namespace: Namespace = partyId.namespace
|
||||
override def maybeUid: Option[UniqueIdentifier] = Some(partyId.uid)
|
||||
|
||||
override def restrictedToDomain: Option[DomainId] = Some(domainId)
|
||||
|
||||
@ -1057,7 +1057,7 @@ object HostingParticipant {
|
||||
} yield HostingParticipant(participantId, permission)
|
||||
}
|
||||
|
||||
final case class PartyToParticipant(
|
||||
final case class PartyToParticipant private (
|
||||
partyId: PartyId,
|
||||
domainId: Option[DomainId],
|
||||
threshold: PositiveInt,
|
||||
@ -1135,6 +1135,51 @@ final case class PartyToParticipant(
|
||||
|
||||
object PartyToParticipant {
|
||||
|
||||
def create(
|
||||
partyId: PartyId,
|
||||
domainId: Option[DomainId],
|
||||
threshold: PositiveInt,
|
||||
participants: Seq[HostingParticipant],
|
||||
groupAddressing: Boolean,
|
||||
): Either[String, PartyToParticipant] = {
|
||||
val noDuplicatePParticipants = {
|
||||
val duplicatePermissions =
|
||||
participants.groupBy(_.participantId).values.filter(_.size > 1).toList
|
||||
Either.cond(
|
||||
duplicatePermissions.isEmpty,
|
||||
(),
|
||||
s"Participants may only be assigned one permission: $duplicatePermissions",
|
||||
)
|
||||
}
|
||||
val thresholdCanBeMet = {
|
||||
val numConfirmingParticipants =
|
||||
participants.count(_.permission >= ParticipantPermission.Confirmation)
|
||||
Either
|
||||
.cond(
|
||||
// we allow to not meet the threshold criteria if there are only observing participants.
|
||||
// but as soon as there is 1 confirming participant, the threshold must theoretically be satisfiable,
|
||||
// otherwise the party can never confirm a transaction.
|
||||
numConfirmingParticipants == 0 || threshold.value <= numConfirmingParticipants,
|
||||
(),
|
||||
s"Party $partyId cannot meet threshold of $threshold confirming participants with participants $participants",
|
||||
)
|
||||
.map(_ => PartyToParticipant(partyId, domainId, threshold, participants, groupAddressing))
|
||||
}
|
||||
|
||||
noDuplicatePParticipants.flatMap(_ => thresholdCanBeMet)
|
||||
}
|
||||
|
||||
def tryCreate(
|
||||
partyId: PartyId,
|
||||
domainId: Option[DomainId],
|
||||
threshold: PositiveInt,
|
||||
participants: Seq[HostingParticipant],
|
||||
groupAddressing: Boolean,
|
||||
): PartyToParticipant =
|
||||
create(partyId, domainId, threshold, participants, groupAddressing).valueOr(err =>
|
||||
throw new IllegalArgumentException(err)
|
||||
)
|
||||
|
||||
def uniqueKey(partyId: PartyId, domainId: Option[DomainId]): MappingHash =
|
||||
TopologyMapping.buildUniqueKey(code)(
|
||||
_.add(partyId.toProtoPrimitive).add(domainId.fold("")(_.toProtoPrimitive))
|
||||
@ -1158,7 +1203,7 @@ object PartyToParticipant {
|
||||
}
|
||||
|
||||
// AuthorityOf
|
||||
final case class AuthorityOf(
|
||||
final case class AuthorityOf private (
|
||||
partyId: PartyId,
|
||||
domainId: Option[DomainId],
|
||||
threshold: PositiveInt,
|
||||
@ -1199,6 +1244,21 @@ final case class AuthorityOf(
|
||||
|
||||
object AuthorityOf {
|
||||
|
||||
def create(
|
||||
partyId: PartyId,
|
||||
domainId: Option[DomainId],
|
||||
threshold: PositiveInt,
|
||||
parties: Seq[PartyId],
|
||||
): Either[String, AuthorityOf] = {
|
||||
Either
|
||||
.cond(
|
||||
threshold.value <= parties.size,
|
||||
(),
|
||||
s"Invalid threshold $threshold for $partyId with authorizers $parties",
|
||||
)
|
||||
.map(_ => AuthorityOf(partyId, domainId, threshold, parties))
|
||||
}
|
||||
|
||||
def uniqueKey(partyId: PartyId, domainId: Option[DomainId]): MappingHash =
|
||||
TopologyMapping.buildUniqueKey(code)(
|
||||
_.add(partyId.toProtoPrimitive).add(domainId.fold("")(_.toProtoPrimitive))
|
||||
@ -1217,7 +1277,9 @@ object AuthorityOf {
|
||||
if (value.domain.nonEmpty)
|
||||
DomainId.fromProtoPrimitive(value.domain, "domain").map(_.some)
|
||||
else Right(None)
|
||||
} yield AuthorityOf(partyId, domainId, threshold, parties)
|
||||
authorityOf <- create(partyId, domainId, threshold, parties)
|
||||
.leftMap(ProtoDeserializationError.OtherError)
|
||||
} yield authorityOf
|
||||
}
|
||||
|
||||
/** Dynamic domain parameter settings for the domain
|
||||
|
@ -5,14 +5,19 @@ package com.digitalasset.canton.topology.transaction
|
||||
|
||||
import cats.data.EitherT
|
||||
import cats.instances.future.*
|
||||
import cats.instances.order.*
|
||||
import cats.syntax.semigroup.*
|
||||
import com.digitalasset.canton.crypto.KeyPurpose
|
||||
import com.digitalasset.canton.data.CantonTimestamp
|
||||
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
|
||||
import com.digitalasset.canton.protocol.OnboardingRestriction
|
||||
import com.digitalasset.canton.protocol.{DynamicDomainParameters, OnboardingRestriction}
|
||||
import com.digitalasset.canton.topology.*
|
||||
import com.digitalasset.canton.topology.processing.EffectiveTime
|
||||
import com.digitalasset.canton.topology.store.StoredTopologyTransactions.PositiveStoredTopologyTransactions
|
||||
import com.digitalasset.canton.topology.store.TopologyTransactionRejection.{
|
||||
InvalidTopologyMapping,
|
||||
NamespaceAlreadyInUse,
|
||||
}
|
||||
import com.digitalasset.canton.topology.store.{
|
||||
TopologyStore,
|
||||
TopologyStoreId,
|
||||
@ -24,7 +29,6 @@ import com.digitalasset.canton.tracing.TraceContext
|
||||
import com.digitalasset.canton.util.EitherTUtil
|
||||
|
||||
import scala.concurrent.{ExecutionContext, Future}
|
||||
import scala.math.Ordered.*
|
||||
|
||||
trait TopologyMappingChecks {
|
||||
def checkTransaction(
|
||||
@ -127,6 +131,27 @@ class ValidatingTopologyMappingChecks(
|
||||
.select[TopologyChangeOp.Replace, AuthorityOf]
|
||||
.map(checkAuthorityOf(effective, _))
|
||||
|
||||
case (
|
||||
Code.DecentralizedNamespaceDefinition,
|
||||
None | Some(Code.DecentralizedNamespaceDefinition),
|
||||
) =>
|
||||
toValidate
|
||||
.select[TopologyChangeOp.Replace, DecentralizedNamespaceDefinition]
|
||||
.map(
|
||||
checkDecentralizedNamespaceDefinitionReplace(
|
||||
_,
|
||||
inStore.flatMap(_.select[TopologyChangeOp, DecentralizedNamespaceDefinition]),
|
||||
)
|
||||
)
|
||||
|
||||
case (
|
||||
Code.NamespaceDelegation,
|
||||
None | Some(Code.NamespaceDelegation),
|
||||
) =>
|
||||
toValidate
|
||||
.select[TopologyChangeOp.Replace, NamespaceDelegation]
|
||||
.map(checkNamespaceDelegationReplace)
|
||||
|
||||
case otherwise => None
|
||||
}
|
||||
|
||||
@ -190,6 +215,33 @@ class ValidatingTopologyMappingChecks(
|
||||
ensureParticipantDoesNotHostParties(effective, toValidate.mapping.participantId)
|
||||
}
|
||||
|
||||
private def loadDomainParameters(
|
||||
effective: EffectiveTime
|
||||
)(implicit
|
||||
traceContext: TraceContext
|
||||
): EitherT[Future, TopologyTransactionRejection, DynamicDomainParameters] = {
|
||||
loadFromStore(effective, DomainParametersState.code).subflatMap { domainParamCandidates =>
|
||||
val params = domainParamCandidates.result.view
|
||||
.flatMap(_.selectMapping[DomainParametersState])
|
||||
.map(_.mapping.parameters)
|
||||
.toList
|
||||
params match {
|
||||
case Nil =>
|
||||
logger.error(
|
||||
"Can not determine domain parameters."
|
||||
)
|
||||
Left(TopologyTransactionRejection.MissingDomainParameters(effective))
|
||||
case param :: Nil => Right(param)
|
||||
case param :: rest =>
|
||||
logger.error(
|
||||
s"Multiple domain parameters at ${effective} ${rest.size + 1}. Using first one: $param."
|
||||
)
|
||||
Right(param)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private def checkDomainTrustCertificateReplace(
|
||||
effective: EffectiveTime,
|
||||
toValidate: SignedTopologyTransaction[TopologyChangeOp, DomainTrustCertificate],
|
||||
@ -199,25 +251,7 @@ class ValidatingTopologyMappingChecks(
|
||||
|
||||
def loadOnboardingRestriction()
|
||||
: EitherT[Future, TopologyTransactionRejection, OnboardingRestriction] = {
|
||||
loadFromStore(effective, DomainParametersState.code).map { domainParamCandidates =>
|
||||
val restrictions = domainParamCandidates.result.view
|
||||
.flatMap(_.selectMapping[DomainParametersState])
|
||||
.map(_.mapping.parameters.onboardingRestriction)
|
||||
.toList
|
||||
restrictions match {
|
||||
case Nil =>
|
||||
logger.error(
|
||||
"Can not determine the onboarding restriction. Assuming the domain is locked."
|
||||
)
|
||||
OnboardingRestriction.RestrictedLocked
|
||||
case param :: Nil => param
|
||||
case param :: rest =>
|
||||
logger.error(
|
||||
s"Multiple domain parameters at ${effective} ${rest.size + 1}. Using first one with restriction ${param}."
|
||||
)
|
||||
param
|
||||
}
|
||||
}
|
||||
loadDomainParameters(effective).map(_.onboardingRestriction)
|
||||
}
|
||||
|
||||
def checkDomainIsNotLocked(restriction: OnboardingRestriction) = {
|
||||
@ -311,65 +345,97 @@ class ValidatingTopologyMappingChecks(
|
||||
traceContext: TraceContext
|
||||
): EitherT[Future, TopologyTransactionRejection, Unit] = {
|
||||
import toValidate.mapping
|
||||
val numConfirmingParticipants =
|
||||
mapping.participants.count(_.permission >= ParticipantPermission.Confirmation)
|
||||
def checkParticipants() = {
|
||||
val newParticipants = mapping.participants.map(_.participantId).toSet --
|
||||
inStore.toList.flatMap(_.mapping.participants.map(_.participantId))
|
||||
for {
|
||||
participantTransactions <- EitherT.right[TopologyTransactionRejection](
|
||||
store
|
||||
.findPositiveTransactions(
|
||||
CantonTimestamp.MaxValue,
|
||||
asOfInclusive = false,
|
||||
isProposal = false,
|
||||
types = Seq(DomainTrustCertificate.code, OwnerToKeyMapping.code),
|
||||
filterUid = Some(newParticipants.toSeq.map(_.uid)),
|
||||
filterNamespace = None,
|
||||
)
|
||||
)
|
||||
|
||||
// check that all participants are known on the domain
|
||||
missingParticipantCertificates = newParticipants -- participantTransactions
|
||||
.collectOfMapping[DomainTrustCertificate]
|
||||
.result
|
||||
.map(_.mapping.participantId)
|
||||
|
||||
_ <- EitherTUtil.condUnitET[Future][TopologyTransactionRejection](
|
||||
missingParticipantCertificates.isEmpty,
|
||||
TopologyTransactionRejection.UnknownMembers(missingParticipantCertificates.toSeq),
|
||||
)
|
||||
|
||||
// check that all known participants have keys registered
|
||||
participantsWithInsufficientKeys =
|
||||
newParticipants -- participantTransactions
|
||||
.collectOfMapping[OwnerToKeyMapping]
|
||||
.result
|
||||
.view
|
||||
.filter { tx =>
|
||||
val keyPurposes = tx.mapping.keys.map(_.purpose).toSet
|
||||
requiredKeyPurposes.forall(keyPurposes)
|
||||
}
|
||||
.map(_.mapping.member)
|
||||
.collect { case pid: ParticipantId => pid }
|
||||
.toSeq
|
||||
|
||||
_ <- EitherTUtil.condUnitET[Future][TopologyTransactionRejection](
|
||||
participantsWithInsufficientKeys.isEmpty,
|
||||
TopologyTransactionRejection.InsufficientKeys(participantsWithInsufficientKeys.toSeq),
|
||||
)
|
||||
} yield {
|
||||
()
|
||||
}
|
||||
}
|
||||
|
||||
def checkHostingLimits(effective: EffectiveTime) = for {
|
||||
hostingLimitsCandidates <- loadFromStore(
|
||||
effective,
|
||||
code = PartyHostingLimits.code,
|
||||
filterUid = Some(Seq(toValidate.mapping.partyId.uid)),
|
||||
)
|
||||
hostingLimits = hostingLimitsCandidates.result.view
|
||||
.flatMap(_.selectMapping[PartyHostingLimits])
|
||||
.map(_.mapping.quota)
|
||||
.toList
|
||||
partyHostingLimit = hostingLimits match {
|
||||
case Nil => // No hosting limits found. This is expected if no restrictions are in place
|
||||
None
|
||||
case quota :: Nil => Some(quota)
|
||||
case multiple @ (quota :: _) =>
|
||||
logger.error(
|
||||
s"Multiple PartyHostingLimits at ${effective} ${multiple.size}. Using first one with quota $quota."
|
||||
)
|
||||
Some(quota)
|
||||
}
|
||||
// TODO(#14050) load default party hosting limits from dynamic domain parameters in case the party
|
||||
// doesn't have a specific PartyHostingLimits mapping issued by the domain.
|
||||
_ <- partyHostingLimit match {
|
||||
case Some(limit) =>
|
||||
EitherTUtil.condUnitET[Future][TopologyTransactionRejection](
|
||||
toValidate.mapping.participants.size <= limit,
|
||||
TopologyTransactionRejection.PartyExceedsHostingLimit(
|
||||
toValidate.mapping.partyId,
|
||||
limit,
|
||||
toValidate.mapping.participants.size,
|
||||
),
|
||||
)
|
||||
case None => EitherTUtil.unit[TopologyTransactionRejection]
|
||||
}
|
||||
} yield ()
|
||||
|
||||
for {
|
||||
// check the threshold
|
||||
_ <- EitherTUtil.condUnitET[Future][TopologyTransactionRejection](
|
||||
mapping.threshold.value <= numConfirmingParticipants,
|
||||
TopologyTransactionRejection.ThresholdTooHigh(
|
||||
mapping.threshold.value,
|
||||
numConfirmingParticipants,
|
||||
),
|
||||
)
|
||||
_ <- checkParticipants()
|
||||
_ <- checkHostingLimits(EffectiveTime.MaxValue)
|
||||
} yield ()
|
||||
|
||||
newParticipants = mapping.participants.map(_.participantId).toSet --
|
||||
inStore.toList.flatMap(_.mapping.participants.map(_.participantId))
|
||||
participantTransactions <- EitherT.right[TopologyTransactionRejection](
|
||||
store
|
||||
.findPositiveTransactions(
|
||||
CantonTimestamp.MaxValue,
|
||||
asOfInclusive = false,
|
||||
isProposal = false,
|
||||
types = Seq(DomainTrustCertificate.code, OwnerToKeyMapping.code),
|
||||
filterUid = Some(newParticipants.toSeq.map(_.uid)),
|
||||
filterNamespace = None,
|
||||
)
|
||||
)
|
||||
|
||||
// check that all participants are known on the domain
|
||||
missingParticipantCertificates = newParticipants -- participantTransactions
|
||||
.collectOfMapping[DomainTrustCertificate]
|
||||
.result
|
||||
.map(_.mapping.participantId)
|
||||
|
||||
_ <- EitherTUtil.condUnitET[Future][TopologyTransactionRejection](
|
||||
missingParticipantCertificates.isEmpty,
|
||||
TopologyTransactionRejection.UnknownMembers(missingParticipantCertificates.toSeq),
|
||||
)
|
||||
|
||||
// check that all known participants have keys registered
|
||||
participantsWithInsufficientKeys =
|
||||
newParticipants -- participantTransactions
|
||||
.collectOfMapping[OwnerToKeyMapping]
|
||||
.result
|
||||
.view
|
||||
.filter { tx =>
|
||||
val keyPurposes = tx.mapping.keys.map(_.purpose).toSet
|
||||
requiredKeyPurposes.forall(keyPurposes)
|
||||
}
|
||||
.map(_.mapping.member)
|
||||
.collect { case pid: ParticipantId => pid }
|
||||
.toSeq
|
||||
|
||||
_ <- EitherTUtil.condUnitET[Future][TopologyTransactionRejection](
|
||||
participantsWithInsufficientKeys.isEmpty,
|
||||
TopologyTransactionRejection.InsufficientKeys(participantsWithInsufficientKeys.toSeq),
|
||||
)
|
||||
} yield {
|
||||
()
|
||||
}
|
||||
}
|
||||
|
||||
private def checkOwnerToKeyMappingReplace(
|
||||
@ -465,15 +531,7 @@ class ValidatingTopologyMappingChecks(
|
||||
val newMediators = (toValidate.mapping.allMediatorsInGroup.toSet -- inStore.toList.flatMap(
|
||||
_.mapping.allMediatorsInGroup
|
||||
)).map(identity[Member])
|
||||
|
||||
val thresholdCheck = EitherTUtil.condUnitET(
|
||||
toValidate.mapping.threshold.value <= toValidate.mapping.active.size,
|
||||
TopologyTransactionRejection.ThresholdTooHigh(
|
||||
toValidate.mapping.threshold.value,
|
||||
toValidate.mapping.active.size,
|
||||
),
|
||||
)
|
||||
thresholdCheck.flatMap(_ => checkMissingNsdAndOtkMappings(effectiveTime, newMediators))
|
||||
checkMissingNsdAndOtkMappings(effectiveTime, newMediators)
|
||||
}
|
||||
|
||||
private def checkSequencerDomainStateReplace(
|
||||
@ -485,14 +543,7 @@ class ValidatingTopologyMappingChecks(
|
||||
_.mapping.allSequencers
|
||||
)).map(identity[Member])
|
||||
|
||||
val thresholdCheck = EitherTUtil.condUnitET(
|
||||
toValidate.mapping.threshold.value <= toValidate.mapping.active.size,
|
||||
TopologyTransactionRejection.ThresholdTooHigh(
|
||||
toValidate.mapping.threshold.value,
|
||||
toValidate.mapping.active.size,
|
||||
),
|
||||
)
|
||||
thresholdCheck.flatMap(_ => checkMissingNsdAndOtkMappings(effectiveTime, newSequencers))
|
||||
checkMissingNsdAndOtkMappings(effectiveTime, newSequencers)
|
||||
}
|
||||
|
||||
private def checkAuthorityOf(
|
||||
@ -521,15 +572,85 @@ class ValidatingTopologyMappingChecks(
|
||||
}
|
||||
}
|
||||
|
||||
val checkThreshold = {
|
||||
val actual = toValidate.mapping.threshold.value
|
||||
val mustBeAtMost = toValidate.mapping.parties.size
|
||||
EitherTUtil.condUnitET(
|
||||
actual <= mustBeAtMost,
|
||||
TopologyTransactionRejection.ThresholdTooHigh(actual, mustBeAtMost),
|
||||
)
|
||||
checkPartiesAreKnown()
|
||||
}
|
||||
|
||||
private def checkDecentralizedNamespaceDefinitionReplace(
|
||||
toValidate: SignedTopologyTransaction[
|
||||
TopologyChangeOp.Replace,
|
||||
DecentralizedNamespaceDefinition,
|
||||
],
|
||||
inStore: Option[SignedTopologyTransaction[
|
||||
TopologyChangeOp,
|
||||
DecentralizedNamespaceDefinition,
|
||||
]],
|
||||
)(implicit traceContext: TraceContext): EitherT[Future, TopologyTransactionRejection, Unit] = {
|
||||
|
||||
def checkDecentralizedNamespaceDerivedFromOwners()
|
||||
: EitherT[Future, TopologyTransactionRejection, Unit] =
|
||||
if (inStore.isEmpty) {
|
||||
// The very first decentralized namespace definition must have namespace computed from the owners
|
||||
EitherTUtil.condUnitET(
|
||||
toValidate.mapping.namespace == DecentralizedNamespaceDefinition
|
||||
.computeNamespace(toValidate.mapping.owners),
|
||||
InvalidTopologyMapping(
|
||||
s"The decentralized namespace ${toValidate.mapping.namespace} is not derived from the owners ${toValidate.mapping.owners.toSeq.sorted}"
|
||||
),
|
||||
)
|
||||
} else {
|
||||
EitherTUtil.unit
|
||||
}
|
||||
|
||||
def checkNoClashWithRootCertificates()(implicit
|
||||
traceContext: TraceContext
|
||||
): EitherT[Future, TopologyTransactionRejection, Unit] = {
|
||||
loadFromStore(
|
||||
EffectiveTime.MaxValue,
|
||||
Code.NamespaceDelegation,
|
||||
filterUid = None,
|
||||
filterNamespace = Some(Seq(toValidate.mapping.namespace)),
|
||||
).flatMap { namespaceDelegations =>
|
||||
val foundRootCertWithSameNamespace = namespaceDelegations.result.exists(stored =>
|
||||
NamespaceDelegation.isRootCertificate(stored.transaction)
|
||||
)
|
||||
EitherTUtil.condUnitET(
|
||||
!foundRootCertWithSameNamespace,
|
||||
NamespaceAlreadyInUse(toValidate.mapping.namespace),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
checkThreshold.flatMap(_ => checkPartiesAreKnown())
|
||||
for {
|
||||
_ <- checkDecentralizedNamespaceDerivedFromOwners()
|
||||
_ <- checkNoClashWithRootCertificates()
|
||||
} yield ()
|
||||
}
|
||||
|
||||
private def checkNamespaceDelegationReplace(
|
||||
toValidate: SignedTopologyTransaction[
|
||||
TopologyChangeOp.Replace,
|
||||
NamespaceDelegation,
|
||||
]
|
||||
)(implicit traceContext: TraceContext): EitherT[Future, TopologyTransactionRejection, Unit] = {
|
||||
def checkNoClashWithDecentralizedNamespaces()(implicit
|
||||
traceContext: TraceContext
|
||||
): EitherT[Future, TopologyTransactionRejection, Unit] = {
|
||||
EitherTUtil.ifThenET(NamespaceDelegation.isRootCertificate(toValidate)) {
|
||||
loadFromStore(
|
||||
EffectiveTime.MaxValue,
|
||||
Code.DecentralizedNamespaceDefinition,
|
||||
filterUid = None,
|
||||
filterNamespace = Some(Seq(toValidate.mapping.namespace)),
|
||||
).flatMap { dns =>
|
||||
val foundDecentralizedNamespaceWithSameNamespace = dns.result.nonEmpty
|
||||
EitherTUtil.condUnitET(
|
||||
!foundDecentralizedNamespaceWithSameNamespace,
|
||||
NamespaceAlreadyInUse(toValidate.mapping.namespace),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
checkNoClashWithDecentralizedNamespaces()
|
||||
}
|
||||
}
|
||||
|
@ -6,7 +6,6 @@ package com.digitalasset.canton.tracing
|
||||
import com.daml.scalautil.Statement.discard
|
||||
import com.digitalasset.canton.concurrent.DirectExecutionContext
|
||||
import com.digitalasset.canton.lifecycle.FutureUnlessShutdown
|
||||
import com.digitalasset.canton.lifecycle.FutureUnlessShutdownImpl.AbortedDueToShutdownException
|
||||
import com.digitalasset.canton.logging.TracedLogger
|
||||
import com.github.blemale.scaffeine.{AsyncLoadingCache, Scaffeine}
|
||||
|
||||
@ -75,8 +74,10 @@ class TracedAsyncLoadingCache[K, V](
|
||||
)(tracedLogger: TracedLogger) {
|
||||
implicit private[this] val ec: ExecutionContext = DirectExecutionContext(tracedLogger)
|
||||
|
||||
/** @see com.github.blemale.scaffeine.AsyncLoadingCache.get
|
||||
*/
|
||||
/*
|
||||
* See com.github.blemale.scaffeine.AsyncLoadingCache.get
|
||||
* If shutting down the future returned will be failed with a AbortedDueToShutdownException
|
||||
*/
|
||||
def get(key: K)(implicit traceContext: TraceContext): Future[V] =
|
||||
underlying.get(TracedKey(key)(traceContext))
|
||||
|
||||
@ -85,12 +86,14 @@ class TracedAsyncLoadingCache[K, V](
|
||||
discard(underlying.synchronous().asMap().filterInPlace((t, v) => !filter(t.key, v)))
|
||||
}
|
||||
|
||||
def getUS(key: K)(implicit traceContext: TraceContext): FutureUnlessShutdown[V] = {
|
||||
def getUS(key: K)(implicit traceContext: TraceContext): FutureUnlessShutdown[V] =
|
||||
FutureUnlessShutdown.transformAbortedF(get(key))
|
||||
}
|
||||
|
||||
/** @see com.github.blemale.scaffeine.AsyncLoadingCache.getAll
|
||||
*/
|
||||
/*
|
||||
* See com.github.blemale.scaffeine.AsyncLoadingCache.getAll
|
||||
* If shutting down the future returned will be failed with a AbortedDueToShutdownException wrapped inside
|
||||
* a java.util.concurrent.CompletionException
|
||||
*/
|
||||
def getAll(keys: Iterable[K])(implicit traceContext: TraceContext): Future[Map[K, V]] =
|
||||
underlying
|
||||
.getAll(keys.map(TracedKey(_)(traceContext)))
|
||||
@ -98,16 +101,9 @@ class TracedAsyncLoadingCache[K, V](
|
||||
|
||||
def getAllUS(
|
||||
keys: Iterable[K]
|
||||
)(implicit traceContext: TraceContext): FutureUnlessShutdown[Map[K, V]] =
|
||||
try
|
||||
FutureUnlessShutdown.outcomeF(
|
||||
underlying
|
||||
.getAll(keys.map(TracedKey(_)(traceContext)))
|
||||
.map(_.map { case (tracedKey, value) => tracedKey.key -> value })(ec)
|
||||
)
|
||||
catch {
|
||||
case _: AbortedDueToShutdownException => FutureUnlessShutdown.abortedDueToShutdown
|
||||
}
|
||||
)(implicit traceContext: TraceContext): FutureUnlessShutdown[Map[K, V]] = {
|
||||
FutureUnlessShutdown.transformAbortedF(getAll(keys))
|
||||
}
|
||||
|
||||
override def toString = s"TracedAsyncLoadingCache($underlying)"
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
sdk-version: 3.1.0-snapshot.20240613.13124.0.v24e0f5e8
|
||||
sdk-version: 3.1.0-snapshot.20240620.13140.0.v996a1164
|
||||
build-options:
|
||||
- --target=2.1
|
||||
name: CantonExamples
|
||||
|
@ -650,9 +650,9 @@ create table sequencer_lower_bound (
|
||||
create table sequencer_events (
|
||||
ts bigint primary key,
|
||||
node_index smallint not null,
|
||||
-- single char to indicate the event type: D for deliver event, E for deliver error
|
||||
-- single char to indicate the event type: D for deliver event, E for deliver error, R for deliver receipt
|
||||
event_type char(1) not null
|
||||
constraint event_type_enum check (event_type = 'D' or event_type = 'E'),
|
||||
constraint event_type_enum check (event_type IN ('D', 'E', 'R')),
|
||||
message_id varchar null,
|
||||
sender integer null,
|
||||
-- null if event goes to everyone, otherwise specify member ids of recipients
|
||||
@ -921,6 +921,8 @@ create table seq_traffic_control_consumed_journal (
|
||||
extra_traffic_consumed bigint not null,
|
||||
-- base traffic remainder at sequencing_timestamp
|
||||
base_traffic_remainder bigint not null,
|
||||
-- the last cost consumed at sequencing_timestamp
|
||||
last_consumed_cost bigint not null,
|
||||
-- traffic entries have a unique sequencing_timestamp per member
|
||||
primary key (member, sequencing_timestamp)
|
||||
);
|
||||
|
@ -1 +1 @@
|
||||
8347bf5092167e6a3df9d8f3cf1d0054a779e272589f7c0f3aad50cca8f8736a
|
||||
1923effb9fa5d583e6c188f401e708a5e9c03b725ed988d0928a0b61660854a2
|
||||
|
@ -81,7 +81,8 @@ CREATE TABLE lapi_command_completions (
|
||||
trace_context BINARY LARGE OBJECT
|
||||
);
|
||||
|
||||
CREATE INDEX lapi__command_completions_application_id_offset_idx ON lapi_command_completions USING btree (application_id, completion_offset);
|
||||
CREATE INDEX lapi_command_completions_application_id_offset_idx ON lapi_command_completions USING btree (application_id, completion_offset);
|
||||
CREATE INDEX lapi_command_completions_offset_idx ON lapi_command_completions USING btree (completion_offset);
|
||||
|
||||
---------------------------------------------------------------------------------------------------
|
||||
-- Events: create
|
||||
|
@ -673,9 +673,9 @@ create table sequencer_lower_bound (
|
||||
create table sequencer_events (
|
||||
ts bigint primary key,
|
||||
node_index smallint not null,
|
||||
-- single char to indicate the event type: D for deliver event, E for deliver error
|
||||
-- single char to indicate the event type: D for deliver event, E for deliver error, R for deliver receipt
|
||||
event_type char(1) not null
|
||||
constraint event_type_enum check (event_type = 'D' or event_type = 'E'),
|
||||
constraint event_type_enum check (event_type IN ('D', 'E', 'R')),
|
||||
message_id varchar(300) collate "C" null,
|
||||
sender integer null,
|
||||
-- null if event goes to everyone, otherwise specify member ids of recipients
|
||||
@ -935,6 +935,8 @@ create table seq_traffic_control_consumed_journal (
|
||||
extra_traffic_consumed bigint not null,
|
||||
-- base traffic remainder at sequencing_timestamp
|
||||
base_traffic_remainder bigint not null,
|
||||
-- the last cost consumed at sequencing_timestamp
|
||||
last_consumed_cost bigint not null,
|
||||
-- traffic entries have a unique sequencing_timestamp per member
|
||||
primary key (member, sequencing_timestamp)
|
||||
);
|
||||
|
@ -1 +1 @@
|
||||
22559de6824376d64006305601db270b57afafb1eccc05e041e55bf3cb858e30
|
||||
1f50894cad8a5ce3e65f5e6b0a48484d2cf0cd7cc354fc6b0aa9cdda97d9e6d3
|
||||
|
@ -669,7 +669,8 @@ create or replace view debug.seq_traffic_control_consumed_journal as
|
||||
member,
|
||||
debug.canton_timestamp(sequencing_timestamp) as sequencing_timestamp,
|
||||
extra_traffic_consumed,
|
||||
base_traffic_remainder
|
||||
base_traffic_remainder,
|
||||
last_consumed_cost
|
||||
from seq_traffic_control_consumed_journal;
|
||||
|
||||
create or replace view debug.seq_traffic_control_initial_timestamp as
|
||||
|
@ -1 +1 @@
|
||||
f4d58cc709e08a2081d761637ea8d27393decb4ed1a6f4ee8ecf4843a838eab0
|
||||
d1c0b524698a1e1249785b0fe973f21f5542020215b49c4012bd774e310fb82e
|
||||
|
@ -100,6 +100,7 @@ CREATE TABLE lapi_command_completions (
|
||||
);
|
||||
|
||||
CREATE INDEX lapi_command_completions_application_id_offset_idx ON lapi_command_completions USING btree (application_id, completion_offset);
|
||||
CREATE INDEX lapi_command_completions_offset_idx ON lapi_command_completions USING btree (completion_offset);
|
||||
|
||||
---------------------------------------------------------------------------------------------------
|
||||
-- Events: Assign
|
||||
|
@ -12,7 +12,6 @@ import com.digitalasset.canton.sequencing.protocol.{
|
||||
Batch,
|
||||
MediatorGroupRecipient,
|
||||
OpenEnvelope,
|
||||
ParticipantsOfParty,
|
||||
Recipients,
|
||||
}
|
||||
import com.digitalasset.canton.topology.client.TopologySnapshot
|
||||
@ -57,32 +56,13 @@ final case class TransactionConfirmationRequest(
|
||||
val rootHashMessageEnvelopes =
|
||||
NonEmpty.from(recipientsOfRootHashMessage) match {
|
||||
case Some(recipientsNE) =>
|
||||
// TODO(#13883) Use BCC also for group addresses
|
||||
// val groupsWithMediator =
|
||||
// recipientsOfRootHashMessage.map(recipient => NonEmpty(Set, recipient, mediatorRecipient))
|
||||
// val rootHashMessageEnvelope = OpenEnvelope(
|
||||
// rootHashMessage,
|
||||
// Recipients.recipientGroups(NonEmptyUtil.fromUnsafe(groupsWithMediator)),
|
||||
// )(protocolVersion)
|
||||
val groupAddressing = recipientsOfRootHashMessage.exists {
|
||||
case ParticipantsOfParty(_) => true
|
||||
case _ => false
|
||||
}
|
||||
// if using group addressing, we just place all recipients in one group instead of separately as before (it was separate for legacy reasons)
|
||||
val rootHashMessageRecipients =
|
||||
if (groupAddressing)
|
||||
Recipients.recipientGroups(
|
||||
NonEmpty.mk(Seq, recipientsNE.toSet ++ Seq(mediator))
|
||||
)
|
||||
else
|
||||
Recipients.recipientGroups(
|
||||
recipientsNE.map(NonEmpty.mk(Set, _, mediator))
|
||||
)
|
||||
List(
|
||||
OpenEnvelope(rootHashMessage(ipsSnapshot.timestamp), rootHashMessageRecipients)(
|
||||
protocolVersion
|
||||
)
|
||||
)
|
||||
val groupsWithMediator = recipientsNE.map(NonEmpty(Set, _, mediator))
|
||||
val rootHashMessageEnvelope = OpenEnvelope(
|
||||
rootHashMessage(ipsSnapshot.timestamp),
|
||||
Recipients.recipientGroups(groupsWithMediator),
|
||||
)(protocolVersion)
|
||||
|
||||
List(rootHashMessageEnvelope)
|
||||
case None =>
|
||||
loggingContext.warn("Confirmation request without root hash message recipients")
|
||||
List.empty
|
||||
|
@ -117,7 +117,7 @@ class QueueBasedDomainOutbox(
|
||||
|
||||
private def hasUnsentTransactions: Boolean = domainOutboxQueue.numUnsentTransactions > 0
|
||||
|
||||
def newTransactionsAddedToAuthorizedStore(
|
||||
def newTransactionsAdded(
|
||||
asOf: CantonTimestamp,
|
||||
num: Int,
|
||||
): FutureUnlessShutdown[Unit] = {
|
||||
|
@ -162,7 +162,7 @@ class StoreBasedDomainOutbox(
|
||||
|
||||
final def queueSize: Int = watermarks.get().queuedApprox
|
||||
|
||||
final def newTransactionsAddedToAuthorizedStore(
|
||||
final def newTransactionsAdded(
|
||||
asOf: CantonTimestamp,
|
||||
num: Int,
|
||||
): FutureUnlessShutdown[Unit] = {
|
||||
@ -375,7 +375,7 @@ abstract class DomainOutbox extends DomainOutboxHandle {
|
||||
|
||||
def targetClient: DomainTopologyClientWithInit
|
||||
|
||||
def newTransactionsAddedToAuthorizedStore(
|
||||
def newTransactionsAdded(
|
||||
asOf: CantonTimestamp,
|
||||
num: Int,
|
||||
): FutureUnlessShutdown[Unit]
|
||||
@ -396,7 +396,7 @@ class DomainOutboxDynamicObserver(val loggerFactory: NamedLoggerFactory)
|
||||
transactions: Seq[SignedTopologyTransaction[TopologyChangeOp, TopologyMapping]],
|
||||
)(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = {
|
||||
outboxRef.get.fold(FutureUnlessShutdown.unit)(
|
||||
_.newTransactionsAddedToAuthorizedStore(timestamp, transactions.size)
|
||||
_.newTransactionsAdded(timestamp, transactions.size)
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -19,7 +19,7 @@ class RecipientsTreeTest extends AnyWordSpec with BaseTest {
|
||||
lazy val p6: Member = ParticipantId("participant6")
|
||||
|
||||
lazy val alice = PartyId(UniqueIdentifier.tryFromProtoPrimitive(s"alice::party"))
|
||||
lazy val bob = PartyId(UniqueIdentifier.tryFromProtoPrimitive(s"alice::bob"))
|
||||
lazy val bob = PartyId(UniqueIdentifier.tryFromProtoPrimitive(s"bob::party"))
|
||||
lazy val pop1: ParticipantsOfParty = ParticipantsOfParty(alice)
|
||||
lazy val pop2: ParticipantsOfParty = ParticipantsOfParty(bob)
|
||||
|
||||
@ -51,6 +51,16 @@ class RecipientsTreeTest extends AnyWordSpec with BaseTest {
|
||||
t5.forMember(p5, Set(pop1)) shouldBe List(t5)
|
||||
}
|
||||
}
|
||||
|
||||
"allPaths" should {
|
||||
"give all paths within the tree" in {
|
||||
t5.allPaths shouldBe Seq(
|
||||
Seq(Set(rec(p1), pop1), Set(rec(p4), rec(p2), pop2), Set(rec(p1), rec(p5))),
|
||||
Seq(Set(rec(p1), pop1), Set(rec(p4), rec(p2), pop2), Set(rec(p3))),
|
||||
Seq(Set(rec(p1), pop1), Set(rec(p2), rec(p6), pop2)),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
"serialization and deserialization" should {
|
||||
|
@ -53,12 +53,14 @@ final class GeneratorsTrafficData(
|
||||
extraTrafficLimit <- Arbitrary.arbitrary[NonNegativeLong]
|
||||
extraTrafficConsumed <- Arbitrary.arbitrary[NonNegativeLong]
|
||||
baseTrafficRemainder <- Arbitrary.arbitrary[NonNegativeLong]
|
||||
lastConsumedCost <- Arbitrary.arbitrary[NonNegativeLong]
|
||||
timestamp <- Arbitrary.arbitrary[CantonTimestamp]
|
||||
serial <- Arbitrary.arbitrary[Option[PositiveInt]]
|
||||
} yield TrafficState(
|
||||
extraTrafficLimit,
|
||||
extraTrafficConsumed,
|
||||
baseTrafficRemainder,
|
||||
lastConsumedCost,
|
||||
timestamp,
|
||||
serial,
|
||||
)
|
||||
|
@ -91,7 +91,7 @@ class TopologyTransactionTest extends AnyWordSpec with BaseTest with HasCryptogr
|
||||
"party to participant" should {
|
||||
val p1 =
|
||||
mk(
|
||||
PartyToParticipant(
|
||||
PartyToParticipant.tryCreate(
|
||||
PartyId(uid),
|
||||
None,
|
||||
PositiveInt.one,
|
||||
@ -102,12 +102,12 @@ class TopologyTransactionTest extends AnyWordSpec with BaseTest with HasCryptogr
|
||||
|
||||
val p2 =
|
||||
mk(
|
||||
PartyToParticipant(
|
||||
PartyToParticipant.tryCreate(
|
||||
PartyId(uid),
|
||||
Some(domainId),
|
||||
PositiveInt.two,
|
||||
Seq(
|
||||
HostingParticipant(ParticipantId(uid2), ParticipantPermission.Observation),
|
||||
HostingParticipant(ParticipantId(uid2), ParticipantPermission.Confirmation),
|
||||
HostingParticipant(ParticipantId(uid), ParticipantPermission.Submission),
|
||||
),
|
||||
groupAddressing = true,
|
||||
|
@ -789,6 +789,7 @@ class SequencerClientTest
|
||||
CantonTimestamp.MinValue.immediateSuccessor,
|
||||
trafficReceipt.extraTrafficConsumed,
|
||||
trafficReceipt.baseTrafficRemainder,
|
||||
trafficReceipt.consumedCost,
|
||||
)
|
||||
}
|
||||
|
||||
@ -838,6 +839,7 @@ class SequencerClientTest
|
||||
CantonTimestamp.MinValue.immediateSuccessor,
|
||||
trafficReceipt.extraTrafficConsumed,
|
||||
trafficReceipt.baseTrafficRemainder,
|
||||
trafficReceipt.consumedCost,
|
||||
)
|
||||
}
|
||||
|
||||
@ -1152,6 +1154,7 @@ class SequencerClientTest
|
||||
extraTrafficConsumed =
|
||||
NonNegativeLong.tryCreate(Math.abs(request.timestamp.toProtoPrimitive)),
|
||||
baseTrafficRemainder = NonNegativeLong.zero,
|
||||
lastConsumedCost = NonNegativeLong.zero,
|
||||
timestamp = request.timestamp,
|
||||
serial = None,
|
||||
)
|
||||
|
@ -76,6 +76,11 @@ class PartyTopologySnapshotClientTest extends AsyncWordSpec with BaseTest {
|
||||
): Future[Set[LfPartyId]] =
|
||||
???
|
||||
|
||||
override def activeParticipantsOfPartiesWithGroupAddressing(
|
||||
parties: Seq[LfPartyId]
|
||||
)(implicit traceContext: TraceContext): Future[Map[LfPartyId, Set[ParticipantId]]] =
|
||||
???
|
||||
|
||||
override def consortiumThresholds(
|
||||
parties: Set[LfPartyId]
|
||||
)(implicit traceContext: TraceContext): Future[Map[LfPartyId, PositiveInt]] = ???
|
||||
|
@ -43,7 +43,7 @@ trait StoreBasedTopologySnapshotTest extends AsyncWordSpec with BaseTest with Ha
|
||||
import factory.TestingTransactions.*
|
||||
|
||||
lazy val party1participant1 = mkAdd(
|
||||
PartyToParticipant(
|
||||
PartyToParticipant.tryCreate(
|
||||
party1,
|
||||
None,
|
||||
PositiveInt.one,
|
||||
@ -52,7 +52,7 @@ trait StoreBasedTopologySnapshotTest extends AsyncWordSpec with BaseTest with Ha
|
||||
)
|
||||
)
|
||||
lazy val party2participant1_2 = mkAdd(
|
||||
PartyToParticipant(
|
||||
PartyToParticipant.tryCreate(
|
||||
party2,
|
||||
None,
|
||||
PositiveInt.one,
|
||||
|
@ -4,6 +4,7 @@
|
||||
package com.digitalasset.canton.topology.processing
|
||||
|
||||
import com.daml.nonempty.NonEmpty
|
||||
import com.digitalasset.canton.config.RequireTypes.PositiveInt
|
||||
import com.digitalasset.canton.crypto.SigningPublicKey
|
||||
import com.digitalasset.canton.topology.transaction.{NamespaceDelegation, TopologyMapping}
|
||||
import com.digitalasset.canton.topology.{Namespace, TestingOwnerWithKeys}
|
||||
@ -25,7 +26,7 @@ class AuthorizationGraphTest
|
||||
|
||||
def mkGraph = new AuthorizationGraph(namespace, extraDebugInfo = true, loggerFactory)
|
||||
|
||||
def mkAuth(
|
||||
def mkAdd(
|
||||
nsd: NamespaceDelegation,
|
||||
key: SigningPublicKey,
|
||||
): AuthorizedTopologyTransaction[NamespaceDelegation] = {
|
||||
@ -33,16 +34,27 @@ class AuthorizationGraphTest
|
||||
AuthorizedTopologyTransaction(tx)
|
||||
}
|
||||
|
||||
def mkRemove(
|
||||
nsd: NamespaceDelegation,
|
||||
key: SigningPublicKey,
|
||||
): AuthorizedTopologyTransaction[NamespaceDelegation] = {
|
||||
val tx = factory.mkRemove(nsd, NonEmpty(Set, key), PositiveInt.two)
|
||||
AuthorizedTopologyTransaction(tx)
|
||||
}
|
||||
|
||||
def mkNs(namespace: Namespace, key: SigningPublicKey, isRootDelegation: Boolean) =
|
||||
NamespaceDelegation.tryCreate(namespace, key, isRootDelegation)
|
||||
|
||||
val nsk1k1 = mkAuth(mkNs(namespace, key1, isRootDelegation = true), key1)
|
||||
val nsk2k1 = mkAuth(mkNs(namespace, key2, isRootDelegation = true), key1)
|
||||
val nsk2k1p = mkAuth(mkNs(namespace, key2, isRootDelegation = true), key1)
|
||||
val nsk3k2 = mkAuth(mkNs(namespace, key3, isRootDelegation = true), key2)
|
||||
val nsk1k1 = mkAdd(mkNs(namespace, key1, isRootDelegation = true), key1)
|
||||
val nsk1k1_remove = mkRemove(mkNs(namespace, key1, isRootDelegation = true), key1)
|
||||
val nsk2k1 = mkAdd(mkNs(namespace, key2, isRootDelegation = true), key1)
|
||||
val nsk2k1_remove = mkRemove(mkNs(namespace, key2, isRootDelegation = true), key1)
|
||||
val nsk3k2 = mkAdd(mkNs(namespace, key3, isRootDelegation = true), key2)
|
||||
val nsk3k2_remove = mkRemove(mkNs(namespace, key3, isRootDelegation = true), key2)
|
||||
val nsk1k2 =
|
||||
mkAuth(mkNs(namespace, key1, isRootDelegation = true), key2) // cycle
|
||||
val nsk3k1_nonRoot = mkAuth(mkNs(namespace, key3, isRootDelegation = false), key1)
|
||||
mkAdd(mkNs(namespace, key1, isRootDelegation = true), key2) // cycle
|
||||
val nsk3k1_nonRoot = mkAdd(mkNs(namespace, key3, isRootDelegation = false), key1)
|
||||
val nsk3k1_nonRoot_remove = mkRemove(mkNs(namespace, key3, isRootDelegation = false), key1)
|
||||
|
||||
def replaceSignature[T <: TopologyMapping](
|
||||
authTx: AuthorizedTopologyTransaction[T],
|
||||
@ -65,7 +77,7 @@ class AuthorizationGraphTest
|
||||
requireRoot: Boolean,
|
||||
valid: Boolean,
|
||||
) = {
|
||||
graph.areValidAuthorizationKeys(Set(key.fingerprint), requireRoot = requireRoot) shouldBe valid
|
||||
graph.existsAuthorizedKeyIn(Set(key.fingerprint), requireRoot = requireRoot) shouldBe valid
|
||||
}
|
||||
|
||||
"authorization graph" when {
|
||||
@ -93,7 +105,7 @@ class AuthorizationGraphTest
|
||||
val graph = mkGraph
|
||||
graph.add(nsk1k1)
|
||||
graph.add(nsk2k1)
|
||||
graph.remove(nsk2k1)
|
||||
graph.remove(nsk2k1_remove)
|
||||
check(graph, key2, requireRoot = false, valid = false)
|
||||
check(graph, key1, requireRoot = false, valid = true)
|
||||
}
|
||||
@ -104,10 +116,13 @@ class AuthorizationGraphTest
|
||||
graph.add(nsk3k2)
|
||||
check(graph, key2, requireRoot = false, valid = true)
|
||||
check(graph, key3, requireRoot = false, valid = true)
|
||||
loggerFactory.assertLogs(graph.remove(nsk2k1), _.warningMessage should include("dangling"))
|
||||
loggerFactory.assertLogs(
|
||||
graph.remove(nsk2k1_remove),
|
||||
_.warningMessage should include("dangling"),
|
||||
)
|
||||
check(graph, key2, requireRoot = false, valid = false)
|
||||
check(graph, key3, requireRoot = false, valid = false)
|
||||
graph.add(nsk2k1p)
|
||||
graph.add(nsk2k1)
|
||||
check(graph, key3, requireRoot = false, valid = true)
|
||||
}
|
||||
"support several chains" in {
|
||||
@ -118,7 +133,7 @@ class AuthorizationGraphTest
|
||||
check(graph, key3, requireRoot = false, valid = true)
|
||||
graph.add(nsk3k1_nonRoot)
|
||||
check(graph, key3, requireRoot = false, valid = true)
|
||||
graph.remove(nsk3k1_nonRoot)
|
||||
graph.remove(nsk3k1_nonRoot_remove)
|
||||
check(graph, key3, requireRoot = false, valid = true)
|
||||
}
|
||||
|
||||
@ -136,7 +151,7 @@ class AuthorizationGraphTest
|
||||
graph.add(nsk1k1)
|
||||
graph.add(nsk2k1)
|
||||
graph.add(nsk3k2)
|
||||
graph.remove(nsk1k1)
|
||||
graph.remove(nsk1k1_remove)
|
||||
check(graph, key1, requireRoot = false, valid = false)
|
||||
check(graph, key2, requireRoot = false, valid = false)
|
||||
check(graph, key3, requireRoot = false, valid = false)
|
||||
@ -159,17 +174,17 @@ class AuthorizationGraphTest
|
||||
// test that random key is not authorized
|
||||
check(graph, key3, requireRoot = false, valid = false)
|
||||
// remove first certificate
|
||||
graph.remove(nsk2k1)
|
||||
graph.remove(nsk2k1_remove)
|
||||
check(graph, key2, requireRoot = true, valid = false)
|
||||
// add other certificate (we don't remember removes, so we can do that in this test)
|
||||
graph.add(nsk2k1p)
|
||||
graph.add(nsk2k1)
|
||||
check(graph, key2, requireRoot = true, valid = true)
|
||||
}
|
||||
|
||||
"reject delegations with a wrong namespace" in {
|
||||
val graph = mkGraph
|
||||
val fakeNs = Namespace(key8.fingerprint)
|
||||
val nsk1k1 = mkAuth(mkNs(fakeNs, key1, isRootDelegation = true), key1)
|
||||
val nsk1k1 = mkAdd(mkNs(fakeNs, key1, isRootDelegation = true), key1)
|
||||
loggerFactory.assertThrowsAndLogs[IllegalArgumentException](
|
||||
graph.add(nsk1k1),
|
||||
_.errorMessage should include("internal error"),
|
||||
@ -184,7 +199,7 @@ class AuthorizationGraphTest
|
||||
graph.add(nsk3k2)
|
||||
check(graph, key3, requireRoot = true, valid = true)
|
||||
|
||||
graph.remove(replaceSignature(nsk3k2, key1))
|
||||
graph.remove(replaceSignature(nsk3k2_remove, key1))
|
||||
check(graph, key3, requireRoot = true, valid = false)
|
||||
}
|
||||
}
|
||||
@ -202,10 +217,10 @@ class AuthorizationGraphTest
|
||||
graph.add(nsk1k1)
|
||||
graph.add(nsk2k1)
|
||||
check(graph, key2, requireRoot = false, valid = true)
|
||||
val fakeRemove = replaceSignature(nsk2k1, key6)
|
||||
val fakeRemove = replaceSignature(nsk2k1_remove, key6)
|
||||
graph.remove(fakeRemove) shouldBe false
|
||||
check(graph, key2, requireRoot = false, valid = true)
|
||||
graph.remove(nsk2k1)
|
||||
graph.remove(nsk2k1_remove)
|
||||
check(graph, key2, requireRoot = false, valid = false)
|
||||
}
|
||||
"prevent a non-root authorization to authorize a root authorization" in {
|
||||
@ -213,7 +228,7 @@ class AuthorizationGraphTest
|
||||
graph.add(nsk1k1)
|
||||
graph.add(nsk3k1_nonRoot)
|
||||
check(graph, key3, requireRoot = false, valid = true)
|
||||
val nsk4k3 = mkAuth(mkNs(namespace, key4, isRootDelegation = true), key3)
|
||||
val nsk4k3 = mkAdd(mkNs(namespace, key4, isRootDelegation = true), key3)
|
||||
graph.add(nsk4k3) shouldBe false
|
||||
check(graph, key4, requireRoot = false, valid = false)
|
||||
}
|
||||
@ -225,14 +240,14 @@ class AuthorizationGraphTest
|
||||
graph.add(nsk2k1)
|
||||
check(graph, key3, requireRoot = false, valid = true)
|
||||
check(graph, key2, requireRoot = true, valid = true)
|
||||
graph.remove(replaceSignature(nsk2k1, key3)) shouldBe false
|
||||
graph.remove(replaceSignature(nsk2k1_remove, key3)) shouldBe false
|
||||
check(graph, key2, requireRoot = true, valid = true)
|
||||
}
|
||||
|
||||
"ensure once a delegation is revoked, all depending authorizations will become unauthorized" in {
|
||||
val graph = mkGraph
|
||||
val nsk4k3 = mkAuth(mkNs(namespace, key4, isRootDelegation = true), key3)
|
||||
val nsk5k2 = mkAuth(mkNs(namespace, key5, isRootDelegation = true), key3)
|
||||
val nsk4k3 = mkAdd(mkNs(namespace, key4, isRootDelegation = true), key3)
|
||||
val nsk5k2 = mkAdd(mkNs(namespace, key5, isRootDelegation = true), key3)
|
||||
graph.add(nsk1k1)
|
||||
graph.add(nsk2k1)
|
||||
graph.add(nsk3k2)
|
||||
@ -241,7 +256,7 @@ class AuthorizationGraphTest
|
||||
Seq(key3, key4, key5).foreach(check(graph, _, requireRoot = false, valid = true))
|
||||
loggerFactory.assertLogs(
|
||||
{
|
||||
graph.remove(nsk2k1)
|
||||
graph.remove(nsk2k1_remove)
|
||||
Seq(key3, key4, key5).foreach(check(graph, _, requireRoot = false, valid = false))
|
||||
},
|
||||
_.warningMessage should include("The following target keys"),
|
||||
|
@ -70,7 +70,7 @@ class DecentralizedNamespaceAuthorizationGraphTest
|
||||
|
||||
}
|
||||
|
||||
def mkAuth(
|
||||
def mkAdd(
|
||||
nsd: NamespaceDelegation,
|
||||
key: SigningPublicKey,
|
||||
): AuthorizedTopologyTransaction[NamespaceDelegation] = {
|
||||
@ -78,20 +78,30 @@ class DecentralizedNamespaceAuthorizationGraphTest
|
||||
AuthorizedTopologyTransaction(tx)
|
||||
}
|
||||
|
||||
def mkRemove(
|
||||
nsd: NamespaceDelegation,
|
||||
key: SigningPublicKey,
|
||||
): AuthorizedTopologyTransaction[NamespaceDelegation] = {
|
||||
val tx = factory.mkRemove(nsd, NonEmpty(Set, key), PositiveInt.two)
|
||||
AuthorizedTopologyTransaction(tx)
|
||||
}
|
||||
|
||||
def mkNs(namespace: Namespace, key: SigningPublicKey, isRootDelegation: Boolean) =
|
||||
NamespaceDelegation.tryCreate(namespace, key, isRootDelegation)
|
||||
|
||||
val ns1k1k1 = mkAuth(mkNs(ns1, key1, isRootDelegation = true), key1)
|
||||
val ns1k4k1 = mkAuth(mkNs(ns1, key4, isRootDelegation = true), key1)
|
||||
val ns1k1k1 = mkAdd(mkNs(ns1, key1, isRootDelegation = true), key1)
|
||||
|
||||
val ns2k2k2 = mkAuth(mkNs(ns2, key2, isRootDelegation = true), key2)
|
||||
val ns2k5k2 = mkAuth(mkNs(ns2, key5, isRootDelegation = true), key2)
|
||||
val ns2k2k5 = mkAuth(mkNs(ns2, key5, isRootDelegation = true), key2)
|
||||
val ns2k8k5 = mkAuth(mkNs(ns2, key8, isRootDelegation = true), key5)
|
||||
val ns2k8k2_nonRoot = mkAuth(mkNs(ns2, key8, isRootDelegation = false), key2)
|
||||
val ns2k2k2 = mkAdd(mkNs(ns2, key2, isRootDelegation = true), key2)
|
||||
val ns2k2k2_remove = mkRemove(mkNs(ns2, key2, isRootDelegation = true), key2)
|
||||
val ns2k5k2 = mkAdd(mkNs(ns2, key5, isRootDelegation = true), key2)
|
||||
val ns2k5k2_remove = mkRemove(mkNs(ns2, key5, isRootDelegation = true), key2)
|
||||
val ns2k2k5 = mkAdd(mkNs(ns2, key2, isRootDelegation = true), key5)
|
||||
val ns2k8k5 = mkAdd(mkNs(ns2, key8, isRootDelegation = true), key5)
|
||||
val ns2k8k5_remove = mkRemove(mkNs(ns2, key8, isRootDelegation = true), key5)
|
||||
val ns2k8k2_nonRoot = mkAdd(mkNs(ns2, key8, isRootDelegation = false), key2)
|
||||
val ns2k8k2_nonRoot_remove = mkRemove(mkNs(ns2, key8, isRootDelegation = false), key2)
|
||||
|
||||
val ns3k3k3 = mkAuth(mkNs(ns3, key3, isRootDelegation = true), key3)
|
||||
val ns3k6k3 = mkAuth(mkNs(ns3, key6, isRootDelegation = true), key3)
|
||||
val ns3k3k3 = mkAdd(mkNs(ns3, key3, isRootDelegation = true), key3)
|
||||
|
||||
def replaceSignature[T <: TopologyMapping](
|
||||
authTx: AuthorizedTopologyTransaction[T],
|
||||
@ -114,7 +124,7 @@ class DecentralizedNamespaceAuthorizationGraphTest
|
||||
requireRoot: Boolean,
|
||||
valid: Boolean,
|
||||
)(keys: SigningPublicKey*) = {
|
||||
graph.areValidAuthorizationKeys(
|
||||
graph.existsAuthorizedKeyIn(
|
||||
keys.map(_.fingerprint).toSet,
|
||||
requireRoot = requireRoot,
|
||||
) shouldBe valid
|
||||
@ -164,7 +174,7 @@ class DecentralizedNamespaceAuthorizationGraphTest
|
||||
graph.addAuth(ns2k2k2)
|
||||
graph.addAuth(ns3k3k3)
|
||||
|
||||
graph.removeAuth(ns2k2k2)
|
||||
graph.removeAuth(ns2k2k2_remove)
|
||||
check(graph, requireRoot = false, valid = false)(key1, key2)
|
||||
check(graph, requireRoot = false, valid = true)(key1, key3)
|
||||
}
|
||||
@ -180,7 +190,7 @@ class DecentralizedNamespaceAuthorizationGraphTest
|
||||
check(graph, requireRoot = false, valid = true)(key1, key5)
|
||||
check(graph, requireRoot = false, valid = true)(key1, key8)
|
||||
loggerFactory.assertLogs(
|
||||
graph.removeAuth(ns2k5k2),
|
||||
graph.removeAuth(ns2k5k2_remove),
|
||||
_.warningMessage should include("dangling"),
|
||||
)
|
||||
check(graph, requireRoot = false, valid = false)(key1, key5)
|
||||
@ -200,7 +210,7 @@ class DecentralizedNamespaceAuthorizationGraphTest
|
||||
check(graph, requireRoot = false, valid = true)(key1, key8)
|
||||
graph.addAuth(ns2k8k2_nonRoot)
|
||||
check(graph, requireRoot = false, valid = true)(key1, key8)
|
||||
graph.removeAuth(ns2k8k2_nonRoot)
|
||||
graph.removeAuth(ns2k8k2_nonRoot_remove)
|
||||
check(graph, requireRoot = false, valid = true)(key1, key8)
|
||||
}
|
||||
|
||||
@ -222,7 +232,7 @@ class DecentralizedNamespaceAuthorizationGraphTest
|
||||
|
||||
graph.addAuth(ns2k5k2)
|
||||
graph.addAuth(ns2k8k5)
|
||||
graph.removeAuth(ns2k2k2)
|
||||
graph.removeAuth(ns2k2k2_remove)
|
||||
check(graph, requireRoot = false, valid = false)(key1, key2)
|
||||
check(graph, requireRoot = false, valid = false)(key1, key5)
|
||||
check(graph, requireRoot = false, valid = false)(key1, key8)
|
||||
@ -247,7 +257,7 @@ class DecentralizedNamespaceAuthorizationGraphTest
|
||||
// test that random key is not authorized
|
||||
check(graph, requireRoot = false, valid = false)(key1, key3)
|
||||
// remove first certificate
|
||||
graph.removeAuth(ns2k5k2)
|
||||
graph.removeAuth(ns2k5k2_remove)
|
||||
check(graph, requireRoot = true, valid = false)(key1, key5)
|
||||
// add other certificate (we don't remember removes, so we can do that in this test)
|
||||
graph.addAuth(ns2k5k2)
|
||||
@ -264,7 +274,7 @@ class DecentralizedNamespaceAuthorizationGraphTest
|
||||
graph.addAuth(ns2k8k5)
|
||||
check(graph, requireRoot = true, valid = true)(key1, key8)
|
||||
|
||||
graph.removeAuth(replaceSignature(ns2k8k5, key2))
|
||||
graph.removeAuth(replaceSignature(ns2k8k5_remove, key2))
|
||||
check(graph, requireRoot = true, valid = false)(key1, key8)
|
||||
}
|
||||
}
|
||||
|
@ -5,9 +5,11 @@ package com.digitalasset.canton.topology.processing
|
||||
|
||||
import cats.Apply
|
||||
import cats.instances.list.*
|
||||
import cats.syntax.foldable.*
|
||||
import com.daml.nonempty.NonEmpty
|
||||
import com.digitalasset.canton.config.RequireTypes.PositiveInt
|
||||
import com.digitalasset.canton.crypto.SigningPublicKey
|
||||
import com.digitalasset.canton.crypto.SignatureCheckError.InvalidSignature
|
||||
import com.digitalasset.canton.crypto.{Signature, SigningPublicKey}
|
||||
import com.digitalasset.canton.data.CantonTimestamp
|
||||
import com.digitalasset.canton.topology.*
|
||||
import com.digitalasset.canton.topology.store.TopologyStoreId.DomainStore
|
||||
@ -21,10 +23,13 @@ import com.digitalasset.canton.topology.store.{
|
||||
TopologyTransactionRejection,
|
||||
ValidatedTopologyTransaction,
|
||||
}
|
||||
import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction
|
||||
import com.digitalasset.canton.topology.transaction.TopologyMapping.MappingHash
|
||||
import com.digitalasset.canton.topology.transaction.*
|
||||
import com.digitalasset.canton.tracing.TraceContext
|
||||
import com.digitalasset.canton.util.MonadUtil
|
||||
import com.digitalasset.canton.{BaseTest, HasExecutionContext, ProtocolVersionChecksAsyncWordSpec}
|
||||
import com.google.protobuf.ByteString
|
||||
import org.scalatest.wordspec.AsyncWordSpec
|
||||
|
||||
class IncomingTopologyTransactionAuthorizationValidatorTest
|
||||
@ -69,12 +74,35 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
|
||||
succeed
|
||||
}
|
||||
|
||||
def validate(
|
||||
validator: IncomingTopologyTransactionAuthorizationValidator,
|
||||
timestamp: CantonTimestamp,
|
||||
toValidate: Seq[GenericSignedTopologyTransaction],
|
||||
inStore: Map[MappingHash, GenericSignedTopologyTransaction],
|
||||
expectFullAuthorization: Boolean,
|
||||
)(implicit traceContext: TraceContext) = {
|
||||
MonadUtil
|
||||
.sequentialTraverse(toValidate)(tx =>
|
||||
validator.validateAndUpdateHeadAuthState(
|
||||
timestamp,
|
||||
tx,
|
||||
inStore.get(tx.mapping.uniqueKey),
|
||||
expectFullAuthorization,
|
||||
)
|
||||
)
|
||||
.map { results =>
|
||||
val (aggregations, transactions) = results.unzip
|
||||
(aggregations.combineAll, transactions)
|
||||
}
|
||||
}
|
||||
|
||||
"receiving transactions with signatures" should {
|
||||
"succeed to add if the signature is valid" in {
|
||||
val validator = mk()
|
||||
import Factory.*
|
||||
for {
|
||||
res <- validator.validateAndUpdateHeadAuthState(
|
||||
res <- validate(
|
||||
validator,
|
||||
ts(0),
|
||||
List(ns1k1_k1, ns1k2_k1),
|
||||
Map.empty,
|
||||
@ -89,7 +117,8 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
|
||||
import Factory.*
|
||||
val invalid = ns1k2_k1.copy(signatures = ns1k1_k1.signatures)
|
||||
for {
|
||||
(_, validatedTopologyTransactions) <- validator.validateAndUpdateHeadAuthState(
|
||||
(_, validatedTopologyTransactions) <- validate(
|
||||
validator,
|
||||
ts(0),
|
||||
List(ns1k1_k1, invalid),
|
||||
Map.empty,
|
||||
@ -116,7 +145,8 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
|
||||
val okmS1k7_k1_missing_k7 =
|
||||
okmS1k7_k1.removeSignatures(Set(SigningKeys.key7.fingerprint)).value
|
||||
for {
|
||||
(_, validatedTopologyTransactions) <- validator.validateAndUpdateHeadAuthState(
|
||||
(_, validatedTopologyTransactions) <- validate(
|
||||
validator,
|
||||
ts(0),
|
||||
List(ns1k1_k1, okmS1k7_k1_missing_k7),
|
||||
Map.empty,
|
||||
@ -133,37 +163,41 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(#12390) resuscitate
|
||||
// "reject if the transaction is for the wrong domain" in {
|
||||
// val validator = mk()
|
||||
// import Factory.*
|
||||
// val wrongDomain = DomainId(UniqueIdentifier.tryCreate("wrong", ns1.fingerprint.unwrap))
|
||||
// val pid = ParticipantId(UniqueIdentifier.tryCreate("correct", ns1.fingerprint.unwrap))
|
||||
// val wrong = mkAdd(
|
||||
// ParticipantState(
|
||||
// RequestSide.Both,
|
||||
// wrongDomain,
|
||||
// pid,
|
||||
// ParticipantPermission.Submission,
|
||||
// TrustLevel.Ordinary,
|
||||
// ),
|
||||
// Factory.SigningKeys.key1,
|
||||
// )
|
||||
// for {
|
||||
// res <- validator.validateAndUpdateHeadAuthState(ts(0), List(ns1k1_k1, wrong))
|
||||
// } yield {
|
||||
// check(
|
||||
// res._2,
|
||||
// Seq(
|
||||
// None,
|
||||
// Some({
|
||||
// case TopologyTransactionRejection.WrongDomain(_) => true
|
||||
// case _ => false
|
||||
// }),
|
||||
// ),
|
||||
// )
|
||||
// }
|
||||
// }
|
||||
"reject if the transaction is for the wrong domain" in {
|
||||
val validator = mk()
|
||||
import Factory.*
|
||||
val wrongDomain = DomainId(UniqueIdentifier.tryCreate("wrong", ns1.fingerprint.unwrap))
|
||||
val pid = ParticipantId(UniqueIdentifier.tryCreate("correct", ns1.fingerprint.unwrap))
|
||||
val wrong = mkAdd(
|
||||
DomainTrustCertificate(
|
||||
pid,
|
||||
wrongDomain,
|
||||
false,
|
||||
Seq.empty,
|
||||
),
|
||||
Factory.SigningKeys.key1,
|
||||
)
|
||||
for {
|
||||
res <- validate(
|
||||
validator,
|
||||
ts(0),
|
||||
List(ns1k1_k1, wrong),
|
||||
Map.empty,
|
||||
expectFullAuthorization = false,
|
||||
)
|
||||
} yield {
|
||||
check(
|
||||
res._2,
|
||||
Seq(
|
||||
None,
|
||||
Some({
|
||||
case TopologyTransactionRejection.WrongDomain(_) => true
|
||||
case _ => false
|
||||
}),
|
||||
),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
"observing namespace delegations" should {
|
||||
@ -171,7 +205,8 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
|
||||
val validator = mk()
|
||||
import Factory.*
|
||||
for {
|
||||
res <- validator.validateAndUpdateHeadAuthState(
|
||||
res <- validate(
|
||||
validator,
|
||||
ts(0),
|
||||
List(ns1k1_k1, ns1k2_k1, ns1k3_k2),
|
||||
Map.empty,
|
||||
@ -181,11 +216,46 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
|
||||
check(res._2, Seq(None, None, None))
|
||||
}
|
||||
}
|
||||
"fail if the signature of a root certificate is not valid" in {
|
||||
val validator = mk()
|
||||
import Factory.*
|
||||
|
||||
val sig_k1_emptySignature = Signature
|
||||
.fromProtoV30(ns1k1_k1.signatures.head1.toProtoV30.copy(signature = ByteString.empty()))
|
||||
.value
|
||||
val ns1k1_k1WithEmptySignature =
|
||||
ns1k1_k1.copy(signatures = NonEmpty(Set, sig_k1_emptySignature))
|
||||
|
||||
for {
|
||||
res <- validate(
|
||||
validator,
|
||||
ts(0),
|
||||
List(ns1k1_k1WithEmptySignature, ns1k2_k1),
|
||||
Map.empty,
|
||||
expectFullAuthorization = true,
|
||||
)
|
||||
} yield {
|
||||
check(
|
||||
res._2,
|
||||
Seq(
|
||||
Some({
|
||||
case TopologyTransactionRejection.SignatureCheckFailed(
|
||||
InvalidSignature(`sig_k1_emptySignature`, _, _)
|
||||
) =>
|
||||
true
|
||||
case _ => false
|
||||
}),
|
||||
Some(_ == NoDelegationFoundForKeys(Set(SigningKeys.key1.fingerprint))),
|
||||
),
|
||||
)
|
||||
}
|
||||
}
|
||||
"fail if transaction is not properly authorized" in {
|
||||
val validator = mk()
|
||||
import Factory.*
|
||||
for {
|
||||
res <- validator.validateAndUpdateHeadAuthState(
|
||||
res <- validate(
|
||||
validator,
|
||||
ts(0),
|
||||
List(ns1k1_k1, ns6k3_k6, ns1k3_k2, ns1k2_k1, ns1k3_k2),
|
||||
Map.empty,
|
||||
@ -217,7 +287,8 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
|
||||
removeTxs = Set.empty,
|
||||
additions = List(ns1k1_k1).map(ValidatedTopologyTransaction(_)),
|
||||
)
|
||||
res <- validator.validateAndUpdateHeadAuthState(
|
||||
res <- validate(
|
||||
validator,
|
||||
ts(1),
|
||||
List(ns1k2_k1, ns1k3_k2),
|
||||
Map.empty,
|
||||
@ -232,7 +303,8 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
|
||||
val validator = mk()
|
||||
import Factory.*
|
||||
for {
|
||||
res <- validator.validateAndUpdateHeadAuthState(
|
||||
res <- validate(
|
||||
validator,
|
||||
ts(1),
|
||||
List(ns1k1_k1, ns1k3_k2, id1ak4_k2, ns1k2_k1, ns6k3_k6, id1ak4_k1),
|
||||
Map.empty,
|
||||
@ -261,7 +333,8 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
|
||||
val validator = mk()
|
||||
import Factory.*
|
||||
for {
|
||||
res <- validator.validateAndUpdateHeadAuthState(
|
||||
res <- validate(
|
||||
validator,
|
||||
ts(0),
|
||||
List(ns1k1_k1, id1ak4_k1, ns1k2_k1, id1ak4_k2),
|
||||
Map.empty,
|
||||
@ -275,7 +348,8 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
|
||||
val validator = mk()
|
||||
import Factory.*
|
||||
for {
|
||||
res <- validator.validateAndUpdateHeadAuthState(
|
||||
res <- validate(
|
||||
validator,
|
||||
ts(0),
|
||||
List(id1ak4_k1, ns1k1_k1, id1ak4_k1, id6k4_k1),
|
||||
Map.empty,
|
||||
@ -301,7 +375,8 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
|
||||
val validator = mk()
|
||||
import Factory.*
|
||||
for {
|
||||
res <- validator.validateAndUpdateHeadAuthState(
|
||||
res <- validate(
|
||||
validator,
|
||||
ts(0),
|
||||
List(ns1k1_k1, ns1k2_k1, okm1ak5k1E_k2, p1p1B_k2, id1ak4_k1, ns6k6_k6, p1p6_k2k6),
|
||||
Map.empty,
|
||||
@ -315,21 +390,41 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
|
||||
val validator = mk()
|
||||
import Factory.*
|
||||
for {
|
||||
res <- validator.validateAndUpdateHeadAuthState(
|
||||
resultExpectFullAuthorization <- validate(
|
||||
validator,
|
||||
ts(0),
|
||||
List(ns1k1_k1, okm1ak5k1E_k2, p1p1B_k2),
|
||||
Map.empty,
|
||||
expectFullAuthorization = true,
|
||||
)
|
||||
// also check that insufficiently authorized non-proposals get rejected with expectFullAuthorization
|
||||
resultDontExpectFullAuthorization <- validate(
|
||||
validator,
|
||||
ts(0),
|
||||
List(ns1k1_k1, okm1ak5k1E_k2, p1p1B_k2),
|
||||
Map.empty,
|
||||
expectFullAuthorization = false,
|
||||
)
|
||||
|
||||
} yield {
|
||||
check(
|
||||
res._2,
|
||||
resultExpectFullAuthorization._2,
|
||||
Seq(
|
||||
None,
|
||||
Some(_ == NotAuthorized),
|
||||
Some(_ == NoDelegationFoundForKeys(Set(SigningKeys.key2.fingerprint))),
|
||||
),
|
||||
)
|
||||
|
||||
check(
|
||||
resultDontExpectFullAuthorization._2,
|
||||
Seq(
|
||||
None,
|
||||
Some(_ == NotAuthorized),
|
||||
Some(_ == NoDelegationFoundForKeys(Set(SigningKeys.key2.fingerprint))),
|
||||
),
|
||||
)
|
||||
|
||||
}
|
||||
}
|
||||
"succeed with loading existing identifier delegations" in {
|
||||
@ -345,7 +440,8 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
|
||||
removeTxs = Set.empty,
|
||||
additions = List(ns1k1_k1, ns6k6_k6, id1ak4_k1).map(ValidatedTopologyTransaction(_)),
|
||||
)
|
||||
res <- validator.validateAndUpdateHeadAuthState(
|
||||
res <- validate(
|
||||
validator,
|
||||
ts(1),
|
||||
List(ns1k2_k1, p1p6_k2k6, p1p1B_k2),
|
||||
Map.empty,
|
||||
@ -364,7 +460,8 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
|
||||
val Rns1k2_k1 = mkTrans(ns1k2_k1.transaction.reverse)
|
||||
val Rid1ak4_k1 = mkTrans(id1ak4_k1.transaction.reverse)
|
||||
for {
|
||||
res <- validator.validateAndUpdateHeadAuthState(
|
||||
res <- validate(
|
||||
validator,
|
||||
ts(0),
|
||||
List(ns1k1_k1, ns1k2_k1, id1ak4_k1, Rns1k2_k1, Rid1ak4_k1),
|
||||
Map.empty,
|
||||
@ -381,7 +478,8 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
|
||||
val Rns1k2_k1 = mkTrans(ns1k2_k1.transaction.reverse)
|
||||
val Rid1ak4_k1 = mkTrans(id1ak4_k1.transaction.reverse)
|
||||
for {
|
||||
res <- validator.validateAndUpdateHeadAuthState(
|
||||
res <- validate(
|
||||
validator,
|
||||
ts(0),
|
||||
List(ns1k1_k1, ns1k2_k1, id1ak4_k1, Rns1k2_k1, Rid1ak4_k1, okm1ak5k1E_k2, p1p6_k2),
|
||||
Map.empty,
|
||||
@ -419,7 +517,8 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
|
||||
removeTxs = Set.empty,
|
||||
additions = List(ns6k6_k6).map(ValidatedTopologyTransaction(_)),
|
||||
)
|
||||
res <- validator.validateAndUpdateHeadAuthState(
|
||||
res <- validate(
|
||||
validator,
|
||||
ts(1),
|
||||
List(ns1k1_k1, okm1bk5k1E_k1, p1p6_k6),
|
||||
Map.empty,
|
||||
@ -444,7 +543,8 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
|
||||
removeTxs = Set.empty,
|
||||
additions = List(ns1k1_k1).map(ValidatedTopologyTransaction(_)),
|
||||
)
|
||||
res <- validator.validateAndUpdateHeadAuthState(
|
||||
res <- validate(
|
||||
validator,
|
||||
ts(1),
|
||||
List(Rns1k1_k1, okm1bk5k1E_k1),
|
||||
Map(Rns1k1_k1.mapping.uniqueKey -> ns1k1_k1),
|
||||
@ -473,13 +573,15 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
|
||||
removeTxs = Set.empty,
|
||||
additions = List(ns1k1_k1).map(ValidatedTopologyTransaction(_)),
|
||||
)
|
||||
res <- validator.validateAndUpdateHeadAuthState(
|
||||
res <- validate(
|
||||
validator,
|
||||
ts(1),
|
||||
List(id1ak4_k1),
|
||||
Map.empty,
|
||||
expectFullAuthorization = true,
|
||||
)
|
||||
res2 <- validator.validateAndUpdateHeadAuthState(
|
||||
res2 <- validate(
|
||||
validator,
|
||||
ts(2),
|
||||
List(Rid1ak4_k1),
|
||||
Map.empty,
|
||||
@ -511,7 +613,8 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
|
||||
additions =
|
||||
List(ns1k1_k1, ns1k2_k1, id1ak4_k2, ns6k6_k6).map(ValidatedTopologyTransaction(_)),
|
||||
)
|
||||
res <- validator.validateAndUpdateHeadAuthState(
|
||||
res <- validate(
|
||||
validator,
|
||||
ts(1),
|
||||
List(p1p6_k2k6, Rns1k2_k1, id6ak7_k6, p1p6_k2),
|
||||
Map(
|
||||
@ -543,29 +646,44 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
|
||||
import Factory.*
|
||||
|
||||
val pid2 = ParticipantId(UniqueIdentifier.tryCreate("participant2", ns2))
|
||||
val participant2HostsParty1 = mkAddMultiKey(
|
||||
PartyToParticipant(
|
||||
val participants_1_2_6_HostParty1 = mkAddMultiKey(
|
||||
PartyToParticipant.tryCreate(
|
||||
party1b, // lives in the namespace of p1, corresponding to `SigningKeys.key1`
|
||||
None,
|
||||
threshold = PositiveInt.two,
|
||||
Seq(
|
||||
HostingParticipant(participant1, ParticipantPermission.Submission),
|
||||
HostingParticipant(pid2, ParticipantPermission.Submission),
|
||||
HostingParticipant(participant6, ParticipantPermission.Submission),
|
||||
),
|
||||
groupAddressing = false,
|
||||
),
|
||||
// both the party's owner and the participant sign
|
||||
NonEmpty(Set, SigningKeys.key1, SigningKeys.key2),
|
||||
NonEmpty(Set, SigningKeys.key1, SigningKeys.key2, SigningKeys.key6),
|
||||
serial = PositiveInt.one,
|
||||
)
|
||||
|
||||
val unhostingMapping = PartyToParticipant(
|
||||
val unhostingMapping = PartyToParticipant.tryCreate(
|
||||
party1b,
|
||||
None,
|
||||
threshold = PositiveInt.two,
|
||||
Seq(HostingParticipant(participant1, ParticipantPermission.Submission)),
|
||||
Seq(
|
||||
HostingParticipant(participant1, ParticipantPermission.Submission),
|
||||
HostingParticipant(participant6, ParticipantPermission.Submission),
|
||||
),
|
||||
groupAddressing = false,
|
||||
)
|
||||
val unhostingMappingAndThresholdChange = PartyToParticipant.tryCreate(
|
||||
party1b,
|
||||
None,
|
||||
threshold = PositiveInt.one,
|
||||
Seq(
|
||||
HostingParticipant(participant1, ParticipantPermission.Submission),
|
||||
HostingParticipant(participant6, ParticipantPermission.Submission),
|
||||
),
|
||||
groupAddressing = false,
|
||||
)
|
||||
|
||||
val participant2RemovesItselfUnilaterally = mkAdd(
|
||||
unhostingMapping,
|
||||
// only the unhosting participant signs
|
||||
@ -580,53 +698,54 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
|
||||
serial = PositiveInt.two,
|
||||
)
|
||||
|
||||
val ptpMappingHash = participant2HostsParty1.mapping.uniqueKey
|
||||
import monocle.syntax.all.*
|
||||
val ptpMappingHash = participants_1_2_6_HostParty1.mapping.uniqueKey
|
||||
for {
|
||||
_ <- store.update(
|
||||
SequencedTime(ts(0)),
|
||||
EffectiveTime(ts(0)),
|
||||
removeMapping = Map.empty,
|
||||
removeTxs = Set.empty,
|
||||
additions = List(ns1k1_k1, ns2k2_k2).map(
|
||||
additions = List(ns1k1_k1, ns2k2_k2, ns6k6_k6).map(
|
||||
ValidatedTopologyTransaction(_)
|
||||
),
|
||||
)
|
||||
hostingResult <- validator.validateAndUpdateHeadAuthState(
|
||||
hostingResult <- validate(
|
||||
validator,
|
||||
ts(1),
|
||||
List(participant2HostsParty1),
|
||||
transactionsInStore = Map.empty,
|
||||
List(participants_1_2_6_HostParty1),
|
||||
inStore = Map.empty,
|
||||
expectFullAuthorization = false,
|
||||
)
|
||||
|
||||
// unilateral unhosting by participant2 only signed by the participant
|
||||
unhostingResult <- validator.validateAndUpdateHeadAuthState(
|
||||
unhostingResult <- validate(
|
||||
validator,
|
||||
ts(2),
|
||||
List(participant2RemovesItselfUnilaterally),
|
||||
transactionsInStore = Map(ptpMappingHash -> participant2HostsParty1),
|
||||
inStore = Map(ptpMappingHash -> participants_1_2_6_HostParty1),
|
||||
expectFullAuthorization = false,
|
||||
)
|
||||
|
||||
// it is still allowed to have a mix of signatures for unhosting
|
||||
unhostingMixedResult <- validator.validateAndUpdateHeadAuthState(
|
||||
unhostingMixedResult <- validate(
|
||||
validator,
|
||||
ts(2),
|
||||
List(participant2RemovedFullyAuthorized),
|
||||
transactionsInStore = Map(ptpMappingHash -> participant2HostsParty1),
|
||||
inStore = Map(ptpMappingHash -> participants_1_2_6_HostParty1),
|
||||
expectFullAuthorization = false,
|
||||
)
|
||||
|
||||
// the participant being removed may not sign if anything else changes
|
||||
unhostingAndThresholdChangeResult <- validator.validateAndUpdateHeadAuthState(
|
||||
unhostingAndThresholdChangeResult <- validate(
|
||||
validator,
|
||||
ts(2),
|
||||
List(
|
||||
mkAddMultiKey(
|
||||
unhostingMapping
|
||||
.focus(_.threshold)
|
||||
.replace(PositiveInt.one),
|
||||
unhostingMappingAndThresholdChange,
|
||||
NonEmpty(Set, SigningKeys.key2),
|
||||
)
|
||||
),
|
||||
transactionsInStore = Map(ptpMappingHash -> participant2HostsParty1),
|
||||
inStore = Map(ptpMappingHash -> participants_1_2_6_HostParty1),
|
||||
expectFullAuthorization = false,
|
||||
)
|
||||
} yield {
|
||||
@ -657,7 +776,8 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
|
||||
ValidatedTopologyTransaction(_)
|
||||
),
|
||||
)
|
||||
res <- validator.validateAndUpdateHeadAuthState(
|
||||
res <- validate(
|
||||
validator,
|
||||
ts(1),
|
||||
List(dns2),
|
||||
decentralizedNamespaceWithMultipleOwnerThreshold
|
||||
@ -695,7 +815,8 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
|
||||
ValidatedTopologyTransaction(_)
|
||||
),
|
||||
)
|
||||
res <- validator.validateAndUpdateHeadAuthState(
|
||||
res <- validate(
|
||||
validator,
|
||||
ts(2),
|
||||
// Analogously to how the TopologyStateProcessor merges the signatures of proposals
|
||||
// with the same serial, combine the signature of the previous proposal to the current proposal.
|
||||
@ -711,9 +832,93 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
|
||||
check(res._2, Seq(None))
|
||||
}
|
||||
}
|
||||
|
||||
"remove from cache for TopologyChangeOp.REMOVAL" in {
|
||||
val store =
|
||||
new InMemoryTopologyStore(TopologyStoreId.AuthorizedStore, loggerFactory, timeouts)
|
||||
val validator = mk(store)
|
||||
import Factory.*
|
||||
for {
|
||||
// 1. validate and store the decentralized namespace owners root certificates
|
||||
resultAddOwners <- validate(
|
||||
validator,
|
||||
ts(0),
|
||||
decentralizedNamespaceOwners,
|
||||
Map.empty,
|
||||
expectFullAuthorization = true,
|
||||
)
|
||||
_ = resultAddOwners._2.foreach(_.rejectionReason shouldBe None)
|
||||
_ <- store.update(
|
||||
SequencedTime(ts(0)),
|
||||
EffectiveTime(ts(0)),
|
||||
removeMapping = Map.empty,
|
||||
removeTxs = Set.empty,
|
||||
additions = resultAddOwners._2,
|
||||
)
|
||||
|
||||
// 2. validate and store the decentralized namespace definition
|
||||
// this puts the DND authorization graph into the cache
|
||||
resultAddDND <- validate(
|
||||
validator,
|
||||
ts(1),
|
||||
List(dns1),
|
||||
Map.empty,
|
||||
expectFullAuthorization = true,
|
||||
)
|
||||
_ = resultAddDND._2.foreach(_.rejectionReason shouldBe None)
|
||||
_ <- store.update(
|
||||
SequencedTime(ts(1)),
|
||||
EffectiveTime(ts(1)),
|
||||
removeMapping = Map.empty,
|
||||
removeTxs = Set.empty,
|
||||
additions = resultAddDND._2,
|
||||
)
|
||||
|
||||
// 3. now process the removal of the decentralized namespace definition
|
||||
// this should remove the DND authorization graph from the cache
|
||||
resRemoveDND <- validate(
|
||||
validator,
|
||||
ts(2),
|
||||
List(dns1Removal),
|
||||
Map(dns1.mapping.uniqueKey -> dns1),
|
||||
expectFullAuthorization = true,
|
||||
)
|
||||
_ = resRemoveDND._2.foreach(_.rejectionReason shouldBe None)
|
||||
_ <- store.update(
|
||||
SequencedTime(ts(2)),
|
||||
EffectiveTime(ts(2)),
|
||||
removeMapping = Map(dns1Removal.mapping.uniqueKey -> dns1Removal.serial),
|
||||
removeTxs = Set.empty,
|
||||
additions = resRemoveDND._2,
|
||||
)
|
||||
|
||||
// 4. Now to the actual test: try to authorize something for the decentralized namespace.
|
||||
// this should be rejected because the namespace is not valid anymore, and the
|
||||
// authorization cache has been properly cleaned up.
|
||||
resultUnauthorizedIDD <- validate(
|
||||
validator,
|
||||
ts(3),
|
||||
List(dns1Idd),
|
||||
Map.empty,
|
||||
expectFullAuthorization = true,
|
||||
)
|
||||
} yield {
|
||||
check(
|
||||
resultUnauthorizedIDD._2,
|
||||
Seq(
|
||||
Some(
|
||||
_ == NoDelegationFoundForKeys(
|
||||
Set(SigningKeys.key1, SigningKeys.key8, SigningKeys.key9).map(_.fingerprint)
|
||||
)
|
||||
)
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
def checkProposalFlatAfterValidation(validationIsFinal: Boolean, expectProposal: Boolean) = {
|
||||
def checkProposalFlagAfterValidation(validationIsFinal: Boolean, expectProposal: Boolean) = {
|
||||
val store =
|
||||
new InMemoryTopologyStore(TopologyStoreId.AuthorizedStore, loggerFactory, timeouts)
|
||||
val validator = mk(store, validationIsFinal)
|
||||
@ -751,18 +956,18 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
|
||||
),
|
||||
BaseTest.testedProtocolVersion,
|
||||
)
|
||||
result_packageVetting <- validator
|
||||
.validateAndUpdateHeadAuthState(
|
||||
ts(1),
|
||||
transactionsToValidate = List(
|
||||
// Setting isProposal=true despite having enough keys.
|
||||
// This simulates processing a proposal with the signature of a node,
|
||||
// that got merged with another proposal already in the store.
|
||||
mkTrans(pkgTx, signingKeys = NonEmpty(Set, key1, key8), isProposal = true)
|
||||
),
|
||||
transactionsInStore = Map.empty,
|
||||
expectFullAuthorization = false,
|
||||
)
|
||||
result_packageVetting <- validate(
|
||||
validator,
|
||||
ts(1),
|
||||
toValidate = List(
|
||||
// Setting isProposal=true despite having enough keys.
|
||||
// This simulates processing a proposal with the signature of a node,
|
||||
// that got merged with another proposal already in the store.
|
||||
mkTrans(pkgTx, signingKeys = NonEmpty(Set, key1, key8), isProposal = true)
|
||||
),
|
||||
inStore = Map.empty,
|
||||
expectFullAuthorization = false,
|
||||
)
|
||||
|
||||
} yield {
|
||||
val validatedPkgTx = result_packageVetting._2.loneElement
|
||||
@ -775,11 +980,11 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
|
||||
}
|
||||
|
||||
"change the proposal status when the validation is final" in {
|
||||
checkProposalFlatAfterValidation(validationIsFinal = true, expectProposal = false)
|
||||
checkProposalFlagAfterValidation(validationIsFinal = true, expectProposal = false)
|
||||
}
|
||||
|
||||
"not change the proposal status when the validation is not final" in {
|
||||
checkProposalFlatAfterValidation(validationIsFinal = false, expectProposal = true)
|
||||
checkProposalFlagAfterValidation(validationIsFinal = false, expectProposal = true)
|
||||
}
|
||||
|
||||
"remove superfluous signatures" in {
|
||||
@ -820,26 +1025,27 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
|
||||
),
|
||||
BaseTest.testedProtocolVersion,
|
||||
)
|
||||
resultPackageVetting <- validator
|
||||
.validateAndUpdateHeadAuthState(
|
||||
ts(1),
|
||||
transactionsToValidate = List(
|
||||
// Signing this transaction also with key9 simulates that ns9 was part of the
|
||||
// decentralized namespace before and was eligible for signing the transaction.
|
||||
// After this validation, we expect the signature of key9 to be removed
|
||||
mkTrans(pkgTx, signingKeys = NonEmpty(Set, key9, key1, key8), isProposal = true)
|
||||
),
|
||||
transactionsInStore = Map.empty,
|
||||
expectFullAuthorization = false,
|
||||
)
|
||||
resultPackageVetting <- validate(
|
||||
validator,
|
||||
ts(1),
|
||||
toValidate = List(
|
||||
// Signing this transaction also with key9 simulates that ns9 was part of the
|
||||
// decentralized namespace before and was eligible for signing the transaction.
|
||||
// After this validation, we expect the signature of key9 to be removed
|
||||
mkTrans(pkgTx, signingKeys = NonEmpty(Set, key9, key1, key8), isProposal = true)
|
||||
),
|
||||
inStore = Map.empty,
|
||||
expectFullAuthorization = false,
|
||||
)
|
||||
|
||||
// if there are only superfluous signatures, reject the transaction
|
||||
resultOnlySuperfluousSignatures <- validator.validateAndUpdateHeadAuthState(
|
||||
resultOnlySuperfluousSignatures <- validate(
|
||||
validator,
|
||||
ts(2),
|
||||
transactionsToValidate = List(
|
||||
toValidate = List(
|
||||
mkTrans(pkgTx, signingKeys = NonEmpty(Set, key3, key5), isProposal = true)
|
||||
),
|
||||
transactionsInStore = Map.empty,
|
||||
inStore = Map.empty,
|
||||
expectFullAuthorization = false,
|
||||
)
|
||||
|
||||
@ -893,19 +1099,19 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
|
||||
expectFullAuthorization: Boolean,
|
||||
signingKeys: SigningPublicKey*
|
||||
) = TraceContext.withNewTraceContext { freshTraceContext =>
|
||||
validator
|
||||
.validateAndUpdateHeadAuthState(
|
||||
ts(1),
|
||||
transactionsToValidate = List(
|
||||
mkTrans(
|
||||
pkgTx,
|
||||
isProposal = isProposal,
|
||||
signingKeys = NonEmpty.from(signingKeys.toSet).value,
|
||||
)
|
||||
),
|
||||
transactionsInStore = Map.empty,
|
||||
expectFullAuthorization = expectFullAuthorization,
|
||||
)(freshTraceContext)
|
||||
validate(
|
||||
validator,
|
||||
ts(1),
|
||||
toValidate = List(
|
||||
mkTrans(
|
||||
pkgTx,
|
||||
isProposal = isProposal,
|
||||
signingKeys = NonEmpty.from(signingKeys.toSet).value,
|
||||
)
|
||||
),
|
||||
inStore = Map.empty,
|
||||
expectFullAuthorization = expectFullAuthorization,
|
||||
)(freshTraceContext)
|
||||
.map(_._2.loneElement)
|
||||
}
|
||||
|
||||
|
@ -5,7 +5,7 @@ package com.digitalasset.canton.topology.processing
|
||||
|
||||
import com.daml.nonempty.NonEmpty
|
||||
import com.digitalasset.canton.config.RequireTypes.PositiveInt
|
||||
import com.digitalasset.canton.crypto.{Fingerprint, SigningPublicKey}
|
||||
import com.digitalasset.canton.crypto.SigningPublicKey
|
||||
import com.digitalasset.canton.logging.NamedLoggerFactory
|
||||
import com.digitalasset.canton.protocol.TestDomainParameters
|
||||
import com.digitalasset.canton.time.NonNegativeFiniteDuration
|
||||
@ -98,7 +98,7 @@ class TopologyTransactionTestFactory(loggerFactory: NamedLoggerFactory, initEc:
|
||||
|
||||
val p1p1B_k2 =
|
||||
mkAdd(
|
||||
PartyToParticipant(
|
||||
PartyToParticipant.tryCreate(
|
||||
party1b,
|
||||
None,
|
||||
threshold = PositiveInt.one,
|
||||
@ -109,7 +109,7 @@ class TopologyTransactionTestFactory(loggerFactory: NamedLoggerFactory, initEc:
|
||||
)
|
||||
val p1p6_k2 =
|
||||
mkAdd(
|
||||
PartyToParticipant(
|
||||
PartyToParticipant.tryCreate(
|
||||
party1b,
|
||||
None,
|
||||
threshold = PositiveInt.one,
|
||||
@ -120,20 +120,20 @@ class TopologyTransactionTestFactory(loggerFactory: NamedLoggerFactory, initEc:
|
||||
isProposal = true,
|
||||
)
|
||||
val p1p6_k6 =
|
||||
mkAdd(
|
||||
PartyToParticipant(
|
||||
mkAddMultiKey(
|
||||
PartyToParticipant.tryCreate(
|
||||
party1b,
|
||||
None,
|
||||
threshold = PositiveInt.one,
|
||||
Seq(HostingParticipant(participant6, ParticipantPermission.Submission)),
|
||||
groupAddressing = false,
|
||||
),
|
||||
key6,
|
||||
NonEmpty(Set, key1, key6),
|
||||
isProposal = true,
|
||||
)
|
||||
val p1p6_k2k6 =
|
||||
mkAddMultiKey(
|
||||
PartyToParticipant(
|
||||
PartyToParticipant.tryCreate(
|
||||
party1b,
|
||||
None,
|
||||
threshold = PositiveInt.one,
|
||||
@ -145,7 +145,7 @@ class TopologyTransactionTestFactory(loggerFactory: NamedLoggerFactory, initEc:
|
||||
|
||||
val p1p6B_k3 =
|
||||
mkAdd(
|
||||
PartyToParticipant(
|
||||
PartyToParticipant.tryCreate(
|
||||
party1b,
|
||||
Some(domainId1),
|
||||
threshold = PositiveInt.one,
|
||||
@ -192,6 +192,15 @@ class TopologyTransactionTestFactory(loggerFactory: NamedLoggerFactory, initEc:
|
||||
NonEmpty(Set, key1, key8, key9),
|
||||
serial = PositiveInt.one,
|
||||
)
|
||||
val dns1Removal = mkRemove(
|
||||
dns1.mapping,
|
||||
NonEmpty(Set, key1, key8, key9),
|
||||
serial = PositiveInt.two,
|
||||
)
|
||||
val dns1Idd = mkAddMultiKey(
|
||||
IdentifierDelegation(UniqueIdentifier.tryCreate("test", dns1.mapping.namespace), key4),
|
||||
NonEmpty(Set, key1, key8, key9),
|
||||
)
|
||||
val dns2 = mkAdd(
|
||||
DecentralizedNamespaceDefinition
|
||||
.create(ns7, PositiveInt.one, NonEmpty(Set, ns1))
|
||||
@ -214,15 +223,19 @@ class TopologyTransactionTestFactory(loggerFactory: NamedLoggerFactory, initEc:
|
||||
serial = PositiveInt.two,
|
||||
isProposal = true,
|
||||
)
|
||||
val decentralizedNamespaceOwners = List(ns1k1_k1, ns8k8_k8, ns9k9_k9)
|
||||
val decentralizedNamespaceWithMultipleOwnerThreshold =
|
||||
List(ns1k1_k1, ns8k8_k8, ns9k9_k9, ns7k7_k7, dns1)
|
||||
|
||||
private val dndOwners =
|
||||
NonEmpty(Set, key1.fingerprint, key2.fingerprint, key3.fingerprint).map(Namespace(_))
|
||||
private val dndNamespace = DecentralizedNamespaceDefinition.computeNamespace(dndOwners)
|
||||
val dnd_proposal_k1 = mkAdd(
|
||||
DecentralizedNamespaceDefinition
|
||||
.create(
|
||||
Namespace(Fingerprint.tryCreate("dnd-namespace")),
|
||||
dndNamespace,
|
||||
PositiveInt.two,
|
||||
NonEmpty(Set, key1.fingerprint, key2.fingerprint, key3.fingerprint).map(Namespace(_)),
|
||||
dndOwners,
|
||||
)
|
||||
.fold(sys.error, identity),
|
||||
signingKey = key1,
|
||||
@ -231,7 +244,7 @@ class TopologyTransactionTestFactory(loggerFactory: NamedLoggerFactory, initEc:
|
||||
val dnd_proposal_k2 = mkAdd(
|
||||
DecentralizedNamespaceDefinition
|
||||
.create(
|
||||
Namespace(Fingerprint.tryCreate("dnd-namespace")),
|
||||
dndNamespace,
|
||||
PositiveInt.two,
|
||||
NonEmpty(Set, key1.fingerprint, key2.fingerprint, key3.fingerprint).map(Namespace(_)),
|
||||
)
|
||||
@ -242,7 +255,7 @@ class TopologyTransactionTestFactory(loggerFactory: NamedLoggerFactory, initEc:
|
||||
val dnd_proposal_k3 = mkAdd(
|
||||
DecentralizedNamespaceDefinition
|
||||
.create(
|
||||
Namespace(Fingerprint.tryCreate("dnd-namespace")),
|
||||
dndNamespace,
|
||||
PositiveInt.two,
|
||||
NonEmpty(Set, key1.fingerprint, key2.fingerprint, key3.fingerprint).map(Namespace(_)),
|
||||
)
|
||||
|
@ -89,7 +89,7 @@ class TopologyStoreTestData(
|
||||
serial = PositiveInt.tryCreate(1),
|
||||
)
|
||||
val tx3_PTP_Proposal = makeSignedTx(
|
||||
PartyToParticipant(
|
||||
PartyToParticipant.tryCreate(
|
||||
partyId = fredOfCanton,
|
||||
domainId = None,
|
||||
threshold = PositiveInt.one,
|
||||
@ -116,7 +116,7 @@ class TopologyStoreTestData(
|
||||
serial = PositiveInt.tryCreate(2),
|
||||
)
|
||||
val tx5_PTP = makeSignedTx(
|
||||
PartyToParticipant(
|
||||
PartyToParticipant.tryCreate(
|
||||
partyId = fredOfCanton,
|
||||
domainId = None,
|
||||
threshold = PositiveInt.one,
|
||||
|
@ -12,6 +12,8 @@ import com.digitalasset.canton.topology.{
|
||||
GeneratorsTopology,
|
||||
MediatorId,
|
||||
Namespace,
|
||||
ParticipantId,
|
||||
PartyId,
|
||||
SequencerId,
|
||||
}
|
||||
import com.digitalasset.canton.version.ProtocolVersion
|
||||
@ -20,6 +22,8 @@ import magnolify.scalacheck.auto.*
|
||||
import org.scalacheck.{Arbitrary, Gen}
|
||||
import org.scalatest.EitherValues.*
|
||||
|
||||
import scala.math.Ordering.Implicits.*
|
||||
|
||||
final class GeneratorsTransaction(
|
||||
protocolVersion: ProtocolVersion,
|
||||
generatorsProtocol: GeneratorsProtocol,
|
||||
@ -49,6 +53,18 @@ final class GeneratorsTransaction(
|
||||
Arbitrary(Generators.nonEmptySetGen[PublicKey].map(_.toSeq))
|
||||
implicit val topologyTransactionMappingsArb: Arbitrary[NonEmpty[Seq[TopologyMapping]]] =
|
||||
Arbitrary(Generators.nonEmptySetGen[TopologyMapping].map(_.toSeq))
|
||||
implicit val topologyTransactionPartyIdsArb: Arbitrary[NonEmpty[Seq[PartyId]]] =
|
||||
Arbitrary(Generators.nonEmptySetGen[PartyId].map(_.toSeq))
|
||||
implicit val topologyTransactionHostingParticipantsArb
|
||||
: Arbitrary[NonEmpty[Seq[HostingParticipant]]] =
|
||||
Arbitrary(Generators.nonEmptySetGen[HostingParticipant].map(_.toSeq))
|
||||
|
||||
implicit val hostingParticipantArb: Arbitrary[HostingParticipant] = Arbitrary(
|
||||
for {
|
||||
pid <- Arbitrary.arbitrary[ParticipantId]
|
||||
permission <- Arbitrary.arbitrary[ParticipantPermission]
|
||||
} yield HostingParticipant(pid, permission)
|
||||
)
|
||||
|
||||
implicit val topologyMappingArb: Arbitrary[TopologyMapping] = genArbitrary
|
||||
|
||||
@ -89,6 +105,31 @@ final class GeneratorsTransaction(
|
||||
} yield PurgeTopologyTransaction.create(domain, mappings).value
|
||||
)
|
||||
|
||||
implicit val authorityOfTopologyTransactionArb: Arbitrary[AuthorityOf] = Arbitrary(
|
||||
for {
|
||||
partyId <- Arbitrary.arbitrary[PartyId]
|
||||
domain <- Arbitrary.arbitrary[Option[DomainId]]
|
||||
authorizers <- Arbitrary.arbitrary[NonEmpty[Seq[PartyId]]]
|
||||
// Not using Arbitrary.arbitrary[PositiveInt] for threshold to honor constraint
|
||||
threshold <- Gen.choose(1, authorizers.size).map(PositiveInt.tryCreate)
|
||||
} yield AuthorityOf.create(partyId, domain, threshold, authorizers).value
|
||||
)
|
||||
|
||||
implicit val partyToParticipantTopologyTransactionArb: Arbitrary[PartyToParticipant] = Arbitrary(
|
||||
for {
|
||||
partyId <- Arbitrary.arbitrary[PartyId]
|
||||
domain <- Arbitrary.arbitrary[Option[DomainId]]
|
||||
participants <- Arbitrary.arbitrary[NonEmpty[Seq[HostingParticipant]]]
|
||||
// Not using Arbitrary.arbitrary[PositiveInt] for threshold to honor constraint
|
||||
threshold <- Gen
|
||||
.choose(1, participants.count(_.permission >= ParticipantPermission.Confirmation).max(1))
|
||||
.map(PositiveInt.tryCreate)
|
||||
groupAddressing <- Arbitrary.arbitrary[Boolean]
|
||||
} yield PartyToParticipant
|
||||
.create(partyId, domain, threshold, participants, groupAddressing)
|
||||
.value
|
||||
)
|
||||
|
||||
implicit val sequencerDomainStateArb: Arbitrary[SequencerDomainState] = Arbitrary(
|
||||
for {
|
||||
domain <- Arbitrary.arbitrary[DomainId]
|
||||
|
@ -5,12 +5,16 @@ package com.digitalasset.canton.topology.transaction
|
||||
|
||||
import com.daml.nonempty.NonEmpty
|
||||
import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt}
|
||||
import com.digitalasset.canton.crypto.Fingerprint
|
||||
import com.digitalasset.canton.protocol.{DynamicDomainParameters, OnboardingRestriction}
|
||||
import com.digitalasset.canton.topology.DefaultTestIdentities.{mediatorId, sequencerId}
|
||||
import com.digitalasset.canton.topology.*
|
||||
import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime}
|
||||
import com.digitalasset.canton.topology.store.TopologyStoreId.AuthorizedStore
|
||||
import com.digitalasset.canton.topology.store.TopologyTransactionRejection.InvalidTopologyMapping
|
||||
import com.digitalasset.canton.topology.store.TopologyTransactionRejection.{
|
||||
InvalidTopologyMapping,
|
||||
PartyExceedsHostingLimit,
|
||||
}
|
||||
import com.digitalasset.canton.topology.store.memory.InMemoryTopologyStore
|
||||
import com.digitalasset.canton.topology.store.{
|
||||
StoredTopologyTransaction,
|
||||
@ -97,35 +101,64 @@ class ValidatingTopologyMappingChecksTest
|
||||
}
|
||||
}
|
||||
|
||||
"validating PartyToParticipant" should {
|
||||
"validating DecentralizedNamespaceDefinition" should {
|
||||
"reject namespaces not derived from their owners' namespaces" in {
|
||||
val (checks, store) = mk()
|
||||
val keys = NonEmpty.mk(
|
||||
Set,
|
||||
factory.SigningKeys.key1,
|
||||
factory.SigningKeys.key2,
|
||||
factory.SigningKeys.key3,
|
||||
)
|
||||
val (namespaces, rootCerts) =
|
||||
keys.map { key =>
|
||||
val namespace = Namespace(key.fingerprint)
|
||||
namespace -> factory.mkAdd(
|
||||
NamespaceDelegation.tryCreate(
|
||||
namespace,
|
||||
key,
|
||||
isRootDelegation = true,
|
||||
),
|
||||
signingKey = key,
|
||||
)
|
||||
}.unzip
|
||||
|
||||
"reject an invalid threshold" in {
|
||||
val (checks, _) = mk()
|
||||
addToStore(store, rootCerts.toSeq*)
|
||||
|
||||
val failureCases = Seq[(PositiveInt, Seq[HostingParticipant])](
|
||||
PositiveInt.two -> Seq(participant1 -> Observation, participant2 -> Confirmation),
|
||||
PositiveInt.two -> Seq(participant1 -> Observation, participant2 -> Submission),
|
||||
PositiveInt.two -> Seq(participant1 -> Submission),
|
||||
PositiveInt.one -> Seq(participant1 -> Observation),
|
||||
val dns = factory.mkAddMultiKey(
|
||||
DecentralizedNamespaceDefinition
|
||||
.create(
|
||||
Namespace(Fingerprint.tryCreate("bogusNamespace")),
|
||||
PositiveInt.one,
|
||||
NonEmpty.from(namespaces).value.toSet,
|
||||
)
|
||||
.value,
|
||||
signingKeys = keys,
|
||||
// using serial=2 here to test that we don't special case serial=1
|
||||
serial = PositiveInt.two,
|
||||
)
|
||||
|
||||
failureCases.foreach { case (threshold, participants) =>
|
||||
val ptp = factory.mkAdd(
|
||||
PartyToParticipant(
|
||||
party1,
|
||||
None,
|
||||
threshold,
|
||||
participants,
|
||||
groupAddressing = false,
|
||||
)
|
||||
)
|
||||
|
||||
checkTransaction(checks, ptp) should matchPattern {
|
||||
case Left(TopologyTransactionRejection.ThresholdTooHigh(`threshold`.value, _)) =>
|
||||
}
|
||||
checkTransaction(checks, dns, None) should matchPattern {
|
||||
case Left(TopologyTransactionRejection.InvalidTopologyMapping(err))
|
||||
if err.contains("not derived from the owners") =>
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(#19716) how does one produce a key with a specific hash? by using symbolic crypto?
|
||||
"reject if a root certificate with the same namespace already exists" ignore {
|
||||
fail("TODO(#19716)")
|
||||
}
|
||||
}
|
||||
|
||||
"validating NamespaceDelegation" should {
|
||||
// TODO(#19715) how does one produce a key with a specific hash? by using symbolic crypto?
|
||||
"reject a root certificate if a decentralized namespace with the same namespace already exists" ignore {
|
||||
fail("TODO(#19715)")
|
||||
}
|
||||
}
|
||||
|
||||
"validating PartyToParticipant" should {
|
||||
|
||||
"reject when participants don't have a DTC" in {
|
||||
val (checks, store) = mk()
|
||||
addToStore(store, p2_dtc)
|
||||
@ -134,7 +167,7 @@ class ValidatingTopologyMappingChecksTest
|
||||
|
||||
failureCases.foreach { participants =>
|
||||
val ptp = factory.mkAdd(
|
||||
PartyToParticipant(
|
||||
PartyToParticipant.tryCreate(
|
||||
party1,
|
||||
None,
|
||||
PositiveInt.one,
|
||||
@ -163,7 +196,7 @@ class ValidatingTopologyMappingChecksTest
|
||||
|
||||
missingKeyCases.foreach { participant =>
|
||||
val ptp = factory.mkAdd(
|
||||
PartyToParticipant(
|
||||
PartyToParticipant.tryCreate(
|
||||
party1,
|
||||
None,
|
||||
PositiveInt.one,
|
||||
@ -177,6 +210,40 @@ class ValidatingTopologyMappingChecksTest
|
||||
}
|
||||
}
|
||||
|
||||
"reject when the party exceeds the explicitly issued PartyHostingLimits" in {
|
||||
def mkPTP(numParticipants: Int) = {
|
||||
val hostingParticipants = Seq[HostingParticipant](
|
||||
participant1 -> Observation,
|
||||
participant2 -> Submission,
|
||||
participant3 -> Submission,
|
||||
)
|
||||
factory.mkAdd(
|
||||
PartyToParticipant.tryCreate(
|
||||
partyId = party1,
|
||||
domainId = None,
|
||||
threshold = PositiveInt.one,
|
||||
participants = hostingParticipants.take(numParticipants),
|
||||
groupAddressing = false,
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
val (checks, store) = mk()
|
||||
val limits = factory.mkAdd(PartyHostingLimits(domainId, party1, 2))
|
||||
addToStore(store, p1_otk, p1_dtc, p2_otk, p2_dtc, p3_otk, p3_dtc, limits)
|
||||
|
||||
// 2 participants are at the limit
|
||||
val twoParticipants = mkPTP(numParticipants = 2)
|
||||
checkTransaction(checks, twoParticipants) shouldBe Right(())
|
||||
|
||||
// 3 participants exceed the limit imposed by the domain
|
||||
val threeParticipants = mkPTP(numParticipants = 3)
|
||||
checkTransaction(checks, threeParticipants) shouldBe Left(
|
||||
PartyExceedsHostingLimit(party1, 2, 3)
|
||||
)
|
||||
|
||||
}
|
||||
|
||||
"report no errors for valid mappings" in {
|
||||
val (checks, store) = mk()
|
||||
addToStore(store, p1_otk, p1_dtc, p2_otk, p2_dtc, p3_otk, p3_dtc)
|
||||
@ -195,7 +262,7 @@ class ValidatingTopologyMappingChecksTest
|
||||
|
||||
validCases.foreach { case (threshold, participants) =>
|
||||
val ptp = factory.mkAdd(
|
||||
PartyToParticipant(
|
||||
PartyToParticipant.tryCreate(
|
||||
party1,
|
||||
None,
|
||||
threshold,
|
||||
@ -213,7 +280,7 @@ class ValidatingTopologyMappingChecksTest
|
||||
"reject a removal when the participant still hosts a party" in {
|
||||
val (checks, store) = mk()
|
||||
val ptp = factory.mkAdd(
|
||||
PartyToParticipant(
|
||||
PartyToParticipant.tryCreate(
|
||||
party1,
|
||||
None,
|
||||
PositiveInt.one,
|
||||
@ -385,35 +452,6 @@ class ValidatingTopologyMappingChecksTest
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
"report ThresholdTooHigh" in {
|
||||
val (checks, store) = mk()
|
||||
val (Seq(med1, med2), transactions) = generateMemberIdentities(2, MediatorId(_))
|
||||
addToStore(store, transactions*)
|
||||
|
||||
// using reflection to create an instance via the private constructor
|
||||
// so we can bypass the checks in MediatorDomainState.create
|
||||
val ctr = classOf[MediatorDomainState].getConstructor(
|
||||
classOf[DomainId],
|
||||
classOf[NonNegativeInt],
|
||||
classOf[PositiveInt],
|
||||
classOf[Object],
|
||||
classOf[Seq[MediatorId]],
|
||||
)
|
||||
val invalidMapping = ctr.newInstance(
|
||||
domainId,
|
||||
NonNegativeInt.zero,
|
||||
PositiveInt.three, // threshold higher than number of active mediators
|
||||
NonEmpty(Seq, med1, med2),
|
||||
Seq.empty,
|
||||
)
|
||||
|
||||
val mds = factory.mkAdd(invalidMapping, factory.SigningKeys.key1)
|
||||
|
||||
checkTransaction(checks, mds) shouldBe Left(
|
||||
TopologyTransactionRejection.ThresholdTooHigh(3, 2)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
"validating SequencerDomainState" should {
|
||||
@ -491,33 +529,6 @@ class ValidatingTopologyMappingChecksTest
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
"report ThresholdTooHigh" in {
|
||||
val (checks, store) = mk()
|
||||
val (Seq(seq1, seq2), transactions) = generateMemberIdentities(2, SequencerId(_))
|
||||
addToStore(store, transactions*)
|
||||
|
||||
// using reflection to create an instance via the private constructor
|
||||
// so we can bypass the checks in SequencerDomainState.create
|
||||
val ctr = classOf[SequencerDomainState].getConstructor(
|
||||
classOf[DomainId],
|
||||
classOf[PositiveInt],
|
||||
classOf[Object],
|
||||
classOf[Seq[SequencerId]],
|
||||
)
|
||||
val invalidMapping = ctr.newInstance(
|
||||
domainId,
|
||||
PositiveInt.three, // threshold higher than number of active sequencers
|
||||
NonEmpty(Seq, seq1, seq2),
|
||||
Seq.empty,
|
||||
)
|
||||
|
||||
val sds = factory.mkAdd(invalidMapping, factory.SigningKeys.key1)
|
||||
|
||||
checkTransaction(checks, sds) shouldBe Left(
|
||||
TopologyTransactionRejection.ThresholdTooHigh(3, 2)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
"validating OwnerToKeyMapping" should {
|
||||
@ -582,7 +593,7 @@ class ValidatingTopologyMappingChecksTest
|
||||
"validating AuthorityOf" should {
|
||||
val ptps @ Seq(p1_ptp, p2_ptp, p3_ptp) = Seq(party1, party2, party3).map { party =>
|
||||
factory.mkAdd(
|
||||
PartyToParticipant(
|
||||
PartyToParticipant.tryCreate(
|
||||
party,
|
||||
None,
|
||||
PositiveInt.one,
|
||||
@ -596,7 +607,9 @@ class ValidatingTopologyMappingChecksTest
|
||||
addToStore(store, ptps*)
|
||||
|
||||
val authorityOf =
|
||||
factory.mkAdd(AuthorityOf(party1, None, PositiveInt.two, Seq(party2, party3)))
|
||||
factory.mkAdd(
|
||||
AuthorityOf.create(party1, None, PositiveInt.two, Seq(party2, party3)).value
|
||||
)
|
||||
checkTransaction(checks, authorityOf) shouldBe Right(())
|
||||
}
|
||||
|
||||
@ -605,40 +618,31 @@ class ValidatingTopologyMappingChecksTest
|
||||
addToStore(store, p1_ptp)
|
||||
|
||||
val missingAuthorizingParty =
|
||||
factory.mkAdd(AuthorityOf(party2, None, PositiveInt.one, Seq(party1)))
|
||||
factory.mkAdd(AuthorityOf.create(party2, None, PositiveInt.one, Seq(party1)).value)
|
||||
checkTransaction(checks, missingAuthorizingParty) shouldBe Left(
|
||||
TopologyTransactionRejection.UnknownParties(Seq(party2))
|
||||
)
|
||||
|
||||
val missingAuthorizedParty =
|
||||
factory.mkAdd(AuthorityOf(party1, None, PositiveInt.one, Seq(party2)))
|
||||
factory.mkAdd(AuthorityOf.create(party1, None, PositiveInt.one, Seq(party2)).value)
|
||||
checkTransaction(checks, missingAuthorizedParty) shouldBe Left(
|
||||
TopologyTransactionRejection.UnknownParties(Seq(party2))
|
||||
)
|
||||
|
||||
val missingAllParties =
|
||||
factory.mkAdd(AuthorityOf(party2, None, PositiveInt.one, Seq(party3)))
|
||||
factory.mkAdd(AuthorityOf.create(party2, None, PositiveInt.one, Seq(party3)).value)
|
||||
checkTransaction(checks, missingAllParties) shouldBe Left(
|
||||
TopologyTransactionRejection.UnknownParties(Seq(party2, party3))
|
||||
)
|
||||
|
||||
val missingMixedParties =
|
||||
factory.mkAdd(AuthorityOf(party2, None, PositiveInt.one, Seq(party1, party3)))
|
||||
factory.mkAdd(
|
||||
AuthorityOf.create(party2, None, PositiveInt.one, Seq(party1, party3)).value
|
||||
)
|
||||
checkTransaction(checks, missingMixedParties) shouldBe Left(
|
||||
TopologyTransactionRejection.UnknownParties(Seq(party2, party3))
|
||||
)
|
||||
}
|
||||
|
||||
"report ThresholdTooHigh if the threshold is higher than the number of authorized parties" in {
|
||||
val (checks, store) = mk()
|
||||
addToStore(store, ptps*)
|
||||
|
||||
val thresholdTooHigh =
|
||||
factory.mkAdd(AuthorityOf(party1, None, PositiveInt.three, Seq(party2, party3)))
|
||||
checkTransaction(checks, thresholdTooHigh) shouldBe Left(
|
||||
TopologyTransactionRejection.ThresholdTooHigh(3, 2)
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -32,7 +32,7 @@ class TracedScaffeineTest extends AsyncWordSpec with BaseTest {
|
||||
}
|
||||
}.failOnShutdown
|
||||
|
||||
"Handle an AbortDueToShutdownException" in {
|
||||
"Handle AbortDueToShutdownException in get" in {
|
||||
val keysCache =
|
||||
TracedScaffeine.buildTracedAsyncFutureUS[Int, Int](
|
||||
cache = CachingConfigs.testing.mySigningKeyCache.buildScaffeine(),
|
||||
@ -47,6 +47,22 @@ class TracedScaffeineTest extends AsyncWordSpec with BaseTest {
|
||||
}
|
||||
}
|
||||
|
||||
// Note that when Scaffeine.getAll returns a failed future that wraps the underlying exception
|
||||
// with java.util.concurrent.CompletionException
|
||||
"Handle AbortDueToShutdownException in getAll" in {
|
||||
val keysCache =
|
||||
TracedScaffeine.buildTracedAsyncFutureUS[Int, Int](
|
||||
cache = CachingConfigs.testing.mySigningKeyCache.buildScaffeine(),
|
||||
loader = traceContext => input => getValueBroken(input),
|
||||
)(logger)
|
||||
|
||||
for {
|
||||
result <- keysCache.getAllUS(Set(10)).unwrap
|
||||
} yield {
|
||||
result shouldBe UnlessShutdown.AbortedDueToShutdown
|
||||
}
|
||||
}
|
||||
|
||||
"Allow entries to be cleared" in {
|
||||
|
||||
val loads = new AtomicInteger(0)
|
||||
|
@ -1,4 +1,4 @@
|
||||
sdk-version: 3.1.0-snapshot.20240613.13124.0.v24e0f5e8
|
||||
sdk-version: 3.1.0-snapshot.20240620.13140.0.v996a1164
|
||||
build-options:
|
||||
- --target=2.1
|
||||
name: ai-analysis
|
||||
|
@ -1,4 +1,4 @@
|
||||
sdk-version: 3.1.0-snapshot.20240613.13124.0.v24e0f5e8
|
||||
sdk-version: 3.1.0-snapshot.20240620.13140.0.v996a1164
|
||||
build-options:
|
||||
- --target=2.1
|
||||
name: bank
|
||||
|
@ -1,4 +1,4 @@
|
||||
sdk-version: 3.1.0-snapshot.20240613.13124.0.v24e0f5e8
|
||||
sdk-version: 3.1.0-snapshot.20240620.13140.0.v996a1164
|
||||
build-options:
|
||||
- --target=2.1
|
||||
name: doctor
|
||||
|
@ -1,4 +1,4 @@
|
||||
sdk-version: 3.1.0-snapshot.20240613.13124.0.v24e0f5e8
|
||||
sdk-version: 3.1.0-snapshot.20240620.13140.0.v996a1164
|
||||
build-options:
|
||||
- --target=2.1
|
||||
name: health-insurance
|
||||
|
@ -1,4 +1,4 @@
|
||||
sdk-version: 3.1.0-snapshot.20240613.13124.0.v24e0f5e8
|
||||
sdk-version: 3.1.0-snapshot.20240620.13140.0.v996a1164
|
||||
build-options:
|
||||
- --target=2.1
|
||||
name: medical-records
|
||||
|
@ -33,7 +33,6 @@ import com.digitalasset.canton.domain.block.update.{
|
||||
import com.digitalasset.canton.domain.sequencing.integrations.state.statemanager.MemberCounters
|
||||
import com.digitalasset.canton.domain.sequencing.sequencer.block.BlockSequencer
|
||||
import com.digitalasset.canton.domain.sequencing.sequencer.errors.CreateSubscriptionError
|
||||
import com.digitalasset.canton.domain.sequencing.sequencer.traffic.SequencerRateLimitManager
|
||||
import com.digitalasset.canton.domain.sequencing.sequencer.{Sequencer, SequencerIntegration}
|
||||
import com.digitalasset.canton.error.BaseAlarm
|
||||
import com.digitalasset.canton.lifecycle.{
|
||||
@ -50,7 +49,7 @@ import com.digitalasset.canton.topology.{DomainId, Member, SequencerId}
|
||||
import com.digitalasset.canton.tracing.{TraceContext, Traced}
|
||||
import com.digitalasset.canton.util.PekkoUtil.syntax.*
|
||||
import com.digitalasset.canton.util.ShowUtil.*
|
||||
import com.digitalasset.canton.util.{ErrorUtil, LoggerUtil, MapsUtil, MonadUtil}
|
||||
import com.digitalasset.canton.util.{ErrorUtil, LoggerUtil, MapsUtil}
|
||||
import com.digitalasset.canton.version.ProtocolVersion
|
||||
import com.google.common.annotations.VisibleForTesting
|
||||
import org.apache.pekko.stream.KillSwitches
|
||||
@ -128,7 +127,6 @@ class BlockSequencerStateManager(
|
||||
override val maybeLowerTopologyTimestampBound: Option[CantonTimestamp],
|
||||
override protected val timeouts: ProcessingTimeout,
|
||||
protected val loggerFactory: NamedLoggerFactory,
|
||||
rateLimitManager: SequencerRateLimitManager,
|
||||
unifiedSequencer: Boolean,
|
||||
)(implicit executionContext: ExecutionContext)
|
||||
extends BlockSequencerStateManagerBase
|
||||
@ -402,12 +400,13 @@ class BlockSequencerStateManager(
|
||||
)
|
||||
.map { case (_, event) =>
|
||||
if (event.isTombstone) {
|
||||
val err =
|
||||
s"Encountered tombstone ${event.counter} and ${event.timestamp} for $member"
|
||||
logger.warn(s"Terminating subscription due to: $err")(event.traceContext)
|
||||
Left(
|
||||
SequencerSubscriptionError.TombstoneEncountered.Error(err)
|
||||
val err = SequencerSubscriptionError.TombstoneEncountered.Error(
|
||||
event.counter,
|
||||
member,
|
||||
event.timestamp,
|
||||
)
|
||||
logger.warn(s"Terminating subscription due to: ${err.cause}")(event.traceContext)
|
||||
Left(err)
|
||||
} else {
|
||||
Right(event)
|
||||
}
|
||||
@ -446,20 +445,6 @@ class BlockSequencerStateManager(
|
||||
newHead
|
||||
}
|
||||
|
||||
private def updateMemberCounterSupportedAfter(member: Member, counter: SequencerCounter)(implicit
|
||||
traceContext: TraceContext
|
||||
): Future[Unit] =
|
||||
store
|
||||
.updateMemberCounterSupportedAfter(member, counter)
|
||||
.map(_ =>
|
||||
countersSupportedAfter.getAndUpdate { previousCounters =>
|
||||
if (previousCounters.get(member).exists(_ >= counter))
|
||||
previousCounters
|
||||
else
|
||||
previousCounters + (member -> counter)
|
||||
}.discard
|
||||
)
|
||||
|
||||
private def handleChunkUpdate(
|
||||
priorHead: HeadState,
|
||||
update: ChunkUpdate[SignedChunkEvents],
|
||||
@ -553,13 +538,6 @@ class BlockSequencerStateManager(
|
||||
membersDisabled = Seq.empty,
|
||||
inFlightAggregationUpdates = update.inFlightAggregationUpdates,
|
||||
)
|
||||
_ <- MonadUtil.sequentialTraverse[(Member, SequencerCounter), Future, Unit](
|
||||
update.events
|
||||
.flatMap(_.events)
|
||||
.collect {
|
||||
case (member, tombstone) if tombstone.isTombstone => member -> tombstone.counter
|
||||
}
|
||||
) { case (member, counter) => updateMemberCounterSupportedAfter(member, counter) }
|
||||
} yield {
|
||||
// head state update must happen before member counters are updated
|
||||
// as otherwise, if we have a registration in between counter-signalling and head-state,
|
||||
@ -788,7 +766,6 @@ object BlockSequencerStateManager {
|
||||
enableInvariantCheck: Boolean,
|
||||
timeouts: ProcessingTimeout,
|
||||
loggerFactory: NamedLoggerFactory,
|
||||
rateLimitManager: SequencerRateLimitManager,
|
||||
unifiedSequencer: Boolean,
|
||||
)(implicit
|
||||
executionContext: ExecutionContext,
|
||||
@ -809,7 +786,6 @@ object BlockSequencerStateManager {
|
||||
maybeLowerTopologyTimestampBound = maybeLowerTopologyTimestampBound,
|
||||
timeouts = timeouts,
|
||||
loggerFactory = loggerFactory,
|
||||
rateLimitManager = rateLimitManager,
|
||||
unifiedSequencer = unifiedSequencer,
|
||||
)
|
||||
}
|
||||
|
@ -336,6 +336,7 @@ private[update] final class BlockChunkProcessor(
|
||||
case Some(params) =>
|
||||
newMembers.toList
|
||||
.parTraverse_ { case (member, timestamp) =>
|
||||
// Note: in unified sequencer mode, rate limiter uses a default value if member is not present in its state
|
||||
rateLimitManager
|
||||
.registerNewMemberAt(
|
||||
member,
|
||||
|
@ -702,30 +702,29 @@ private[update] final class SubmissionRequestValidator(
|
||||
}
|
||||
|
||||
topologyTimestampO = submissionRequest.topologyTimestamp
|
||||
members =
|
||||
groupToMembers.values.flatten.toSet ++ submissionRequest.batch.allMembers + submissionRequest.sender
|
||||
events =
|
||||
if (unifiedSequencer) {
|
||||
Map.empty[Member, Deliver[ClosedEnvelope]]
|
||||
} else {
|
||||
(groupToMembers.values.flatten.toSet ++ submissionRequest.batch.allMembers + submissionRequest.sender).toSeq.map {
|
||||
member =>
|
||||
val groups = groupToMembers.collect {
|
||||
case (groupAddress, members) if members.contains(member) => groupAddress
|
||||
}.toSet
|
||||
val deliver = Deliver.create(
|
||||
state.tryNextCounter(member),
|
||||
sequencingTimestamp,
|
||||
domainId,
|
||||
Option.when(member == submissionRequest.sender)(submissionRequest.messageId),
|
||||
Batch.filterClosedEnvelopesFor(aggregatedBatch, member, groups),
|
||||
topologyTimestampO,
|
||||
protocolVersion,
|
||||
Option.empty[TrafficReceipt],
|
||||
)
|
||||
member -> deliver
|
||||
members.toSeq.map { member =>
|
||||
val groups = groupToMembers.collect {
|
||||
case (groupAddress, members) if members.contains(member) => groupAddress
|
||||
}.toSet
|
||||
val deliver = Deliver.create(
|
||||
state.tryNextCounter(member),
|
||||
sequencingTimestamp,
|
||||
domainId,
|
||||
Option.when(member == submissionRequest.sender)(submissionRequest.messageId),
|
||||
Batch.filterClosedEnvelopesFor(aggregatedBatch, member, groups),
|
||||
topologyTimestampO,
|
||||
protocolVersion,
|
||||
Option.empty[TrafficReceipt],
|
||||
)
|
||||
member -> deliver
|
||||
}.toMap
|
||||
}
|
||||
members =
|
||||
groupToMembers.values.flatten.toSet ++ submissionRequest.batch.allMembers + submissionRequest.sender
|
||||
aggregationUpdate = aggregationOutcome.map {
|
||||
case (aggregationId, inFlightAggregationUpdate, _) =>
|
||||
aggregationId -> inFlightAggregationUpdate
|
||||
|
@ -195,9 +195,7 @@ private[update] class TrafficControlValidator(
|
||||
sender,
|
||||
// When above traffic limit we don't consume traffic, hence cost = 0
|
||||
Some(
|
||||
error.trafficState.toTrafficReceipt(
|
||||
consumedCost = NonNegativeLong.zero
|
||||
)
|
||||
error.trafficState.copy(lastConsumedCost = NonNegativeLong.zero).toTrafficReceipt
|
||||
),
|
||||
)
|
||||
// Outdated event costs are possible if the sender is too far behind and out of the tolerance window.
|
||||
|
@ -42,7 +42,7 @@ import scala.concurrent.{ExecutionContext, Future}
|
||||
/** Scalable service to validate the received MediatorConfirmationRequests and ConfirmationResponses,
|
||||
* derive a verdict, and send ConfirmationResultMessages to informee participants.
|
||||
*/
|
||||
private[mediator] class ConfirmationResponseProcessor(
|
||||
private[mediator] class ConfirmationRequestAndResponseProcessor(
|
||||
domainId: DomainId,
|
||||
private val mediatorId: MediatorId,
|
||||
verdictSender: VerdictSender,
|
||||
@ -211,7 +211,7 @@ private[mediator] class ConfirmationResponseProcessor(
|
||||
rootHashMessages: Seq[OpenEnvelope[RootHashMessage[SerializedRootHashMessagePayload]]],
|
||||
batchAlsoContainsTopologyTransaction: Boolean,
|
||||
)(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = {
|
||||
withSpan("TransactionConfirmationResponseProcessor.processRequest") {
|
||||
withSpan("ConfirmationRequestAndResponseProcessor.processRequest") {
|
||||
val timeout = requestId.unwrap.plus(confirmationResponseTimeout.unwrap)
|
||||
implicit traceContext =>
|
||||
span =>
|
||||
@ -755,7 +755,7 @@ private[mediator] class ConfirmationResponseProcessor(
|
||||
topologyTimestamp: Option[CantonTimestamp],
|
||||
recipients: Recipients,
|
||||
)(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] =
|
||||
withSpan("TransactionConfirmationResponseProcessor.processResponse") {
|
||||
withSpan("ConfirmationRequestAndResponseProcessor.processResponse") {
|
||||
implicit traceContext => span =>
|
||||
span.setAttribute("timestamp", ts.toString)
|
||||
span.setAttribute("counter", counter.toString)
|
@ -99,7 +99,7 @@ private[mediator] class Mediator(
|
||||
private val verdictSender =
|
||||
VerdictSender(sequencerClient, syncCrypto, mediatorId, protocolVersion, loggerFactory)
|
||||
|
||||
private val processor = new ConfirmationResponseProcessor(
|
||||
private val processor = new ConfirmationRequestAndResponseProcessor(
|
||||
domain,
|
||||
mediatorId,
|
||||
verdictSender,
|
||||
|
@ -13,7 +13,7 @@ import com.digitalasset.canton.sequencing.protocol.{OpenEnvelope, Recipients}
|
||||
/** The [[MediatorEventsProcessor]] looks through all sequencer events provided by the sequencer client in a batch
|
||||
* to pick out events for the Mediator with the same request-id while also scheduling timeouts and running
|
||||
* topology transactions at appropriate times. We map all the mediator events we generate into this simplified
|
||||
* structure so the [[ConfirmationResponseProcessor]] processes these events without having to perform the same extraction
|
||||
* structure so the [[ConfirmationRequestAndResponseProcessor]] processes these events without having to perform the same extraction
|
||||
* and error handling of the original SequencerEvent.
|
||||
*/
|
||||
private[mediator] sealed trait MediatorEvent extends PrettyPrinting {
|
||||
|
@ -166,14 +166,14 @@ private[mediator] class MediatorEventsProcessor(
|
||||
private[mediator] object MediatorEventsProcessor {
|
||||
def apply(
|
||||
identityClientEventHandler: UnsignedProtocolEventHandler,
|
||||
confirmationResponseProcessor: ConfirmationResponseProcessor,
|
||||
processor: ConfirmationRequestAndResponseProcessor,
|
||||
mediatorEventDeduplicator: MediatorEventDeduplicator,
|
||||
metrics: MediatorMetrics,
|
||||
loggerFactory: NamedLoggerFactory,
|
||||
)(implicit executionContext: ExecutionContext): MediatorEventsProcessor = {
|
||||
new MediatorEventsProcessor(
|
||||
identityClientEventHandler,
|
||||
confirmationResponseProcessor.handleRequestEvents,
|
||||
processor.handleRequestEvents,
|
||||
mediatorEventDeduplicator,
|
||||
metrics,
|
||||
loggerFactory,
|
||||
|
@ -215,7 +215,7 @@ private[mediator] class DefaultVerdictSender(
|
||||
.map(Recipients.recipientGroups)
|
||||
.getOrElse(
|
||||
// Should never happen as the topology (same snapshot) is checked in
|
||||
// `ConfirmationResponseProcessor.validateRequest`
|
||||
// `ConfirmationRequestAndResponseProcessor.validateRequest`
|
||||
ErrorUtil.invalidState("No active participants for informees")
|
||||
)
|
||||
|
||||
|
@ -22,6 +22,7 @@ import com.digitalasset.canton.domain.sequencing.sequencer.traffic.{
|
||||
SequencerRateLimitError,
|
||||
SequencerTrafficStatus,
|
||||
}
|
||||
import com.digitalasset.canton.domain.sequencing.traffic.store.TrafficConsumedStore
|
||||
import com.digitalasset.canton.health.admin.data.{SequencerAdminStatus, SequencerHealthStatus}
|
||||
import com.digitalasset.canton.lifecycle.{FlagCloseable, FutureUnlessShutdown, Lifecycle}
|
||||
import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, TracedLogger}
|
||||
@ -43,9 +44,9 @@ import com.digitalasset.canton.time.{Clock, NonNegativeFiniteDuration}
|
||||
import com.digitalasset.canton.topology.{DomainId, Member, SequencerId}
|
||||
import com.digitalasset.canton.tracing.TraceContext
|
||||
import com.digitalasset.canton.tracing.TraceContext.withNewTraceContext
|
||||
import com.digitalasset.canton.util.ErrorUtil
|
||||
import com.digitalasset.canton.util.FutureUtil.doNotAwait
|
||||
import com.digitalasset.canton.util.Thereafter.syntax.*
|
||||
import com.digitalasset.canton.util.{EitherTUtil, ErrorUtil}
|
||||
import com.digitalasset.canton.version.ProtocolVersion
|
||||
import io.opentelemetry.api.trace.Tracer
|
||||
import org.apache.pekko.stream.Materializer
|
||||
@ -101,6 +102,7 @@ object DatabaseSequencer {
|
||||
clock,
|
||||
domainId,
|
||||
topologyClientMember,
|
||||
trafficConsumedStore = None,
|
||||
protocolVersion,
|
||||
cryptoApi,
|
||||
metrics,
|
||||
@ -125,6 +127,7 @@ class DatabaseSequencer(
|
||||
clock: Clock,
|
||||
domainId: DomainId,
|
||||
topologyClientMember: Member,
|
||||
trafficConsumedStore: Option[TrafficConsumedStore],
|
||||
protocolVersion: ProtocolVersion,
|
||||
cryptoApi: DomainSyncCryptoClient,
|
||||
metrics: SequencerMetrics,
|
||||
@ -164,7 +167,7 @@ class DatabaseSequencer(
|
||||
storageForAdminChanges.isActive
|
||||
)
|
||||
|
||||
private val store = writer.generalStore
|
||||
private[sequencer] val store = writer.generalStore
|
||||
|
||||
protected val memberValidator: SequencerMemberValidator = store
|
||||
|
||||
@ -230,6 +233,7 @@ class DatabaseSequencer(
|
||||
cryptoApi,
|
||||
eventSignaller,
|
||||
topologyClientMember,
|
||||
trafficConsumedStore,
|
||||
protocolVersion,
|
||||
timeouts,
|
||||
loggerFactory,
|
||||
@ -253,9 +257,10 @@ class DatabaseSequencer(
|
||||
} yield isEnabled
|
||||
}
|
||||
|
||||
/** Package private to use access method in tests, see `TestDatabaseSequencerWrapper`.
|
||||
*/
|
||||
override final def registerMemberInternal(member: Member, timestamp: CantonTimestamp)(implicit
|
||||
override private[sequencing] final def registerMemberInternal(
|
||||
member: Member,
|
||||
timestamp: CantonTimestamp,
|
||||
)(implicit
|
||||
traceContext: TraceContext
|
||||
): EitherT[Future, RegisterError, Unit] = {
|
||||
EitherT
|
||||
@ -396,8 +401,25 @@ class DatabaseSequencer(
|
||||
|
||||
override def snapshot(timestamp: CantonTimestamp)(implicit
|
||||
traceContext: TraceContext
|
||||
): EitherT[Future, String, SequencerSnapshot] =
|
||||
EitherT.right[String](store.readStateAtTimestamp(timestamp))
|
||||
): EitherT[Future, String, SequencerSnapshot] = {
|
||||
for {
|
||||
safeWatermarkO <- EitherT.right(store.safeWatermark)
|
||||
// we check if watermark is after the requested timestamp to avoid snapshotting the sequencer
|
||||
// at a timestamp that is not yet safe to read
|
||||
_ <- {
|
||||
safeWatermarkO match {
|
||||
case Some(safeWatermark) =>
|
||||
EitherTUtil.condUnitET[Future](
|
||||
timestamp <= safeWatermark,
|
||||
s"Requested snapshot at $timestamp is after the safe watermark $safeWatermark",
|
||||
)
|
||||
case None =>
|
||||
EitherT.leftT[Future, Unit](s"No safe watermark found for the sequencer")
|
||||
}
|
||||
}
|
||||
snapshot <- EitherT.right[String](store.readStateAtTimestamp(timestamp))
|
||||
} yield snapshot
|
||||
}
|
||||
|
||||
override private[sequencing] def firstSequencerCounterServeableForSequencer: SequencerCounter =
|
||||
// Database sequencers are never bootstrapped
|
||||
|
@ -88,8 +88,8 @@ trait Sequencer
|
||||
*/
|
||||
def isEnabled(member: Member)(implicit traceContext: TraceContext): Future[Boolean]
|
||||
|
||||
def registerMemberInternal(member: Member, timestamp: CantonTimestamp)(implicit
|
||||
traceContext: TraceContext
|
||||
private[sequencing] def registerMemberInternal(member: Member, timestamp: CantonTimestamp)(
|
||||
implicit traceContext: TraceContext
|
||||
): EitherT[Future, RegisterError, Unit]
|
||||
|
||||
def sendAsyncSigned(signedSubmission: SignedContent[SubmissionRequest])(implicit
|
||||
|
@ -15,6 +15,7 @@ import com.digitalasset.canton.data.CantonTimestamp
|
||||
import com.digitalasset.canton.domain.sequencing.sequencer.SequencerReader.ReadState
|
||||
import com.digitalasset.canton.domain.sequencing.sequencer.errors.CreateSubscriptionError
|
||||
import com.digitalasset.canton.domain.sequencing.sequencer.store.*
|
||||
import com.digitalasset.canton.domain.sequencing.traffic.store.TrafficConsumedStore
|
||||
import com.digitalasset.canton.lifecycle.{
|
||||
CloseContext,
|
||||
FlagCloseable,
|
||||
@ -29,7 +30,7 @@ import com.digitalasset.canton.sequencing.client.{
|
||||
SequencerSubscriptionError,
|
||||
}
|
||||
import com.digitalasset.canton.sequencing.protocol.*
|
||||
import com.digitalasset.canton.sequencing.traffic.TrafficReceipt
|
||||
import com.digitalasset.canton.sequencing.traffic.{TrafficConsumed, TrafficReceipt}
|
||||
import com.digitalasset.canton.sequencing.{GroupAddressResolver, OrdinarySerializedEvent}
|
||||
import com.digitalasset.canton.store.SequencedEventStore.OrdinarySequencedEvent
|
||||
import com.digitalasset.canton.store.db.DbDeserializationException
|
||||
@ -78,6 +79,7 @@ class SequencerReader(
|
||||
syncCryptoApi: SyncCryptoClient[SyncCryptoApi],
|
||||
eventSignaller: EventSignaller,
|
||||
topologyClientMember: Member,
|
||||
trafficConsumedStoreO: Option[TrafficConsumedStore],
|
||||
protocolVersion: ProtocolVersion,
|
||||
override protected val timeouts: ProcessingTimeout,
|
||||
protected val loggerFactory: NamedLoggerFactory,
|
||||
@ -355,35 +357,34 @@ class SequencerReader(
|
||||
// To not introduce gaps in the sequencer counters,
|
||||
// we deliver an empty batch to the member if it is not the sender.
|
||||
// This way, we can avoid revalidating the skipped events after the checkpoint we resubscribe from.
|
||||
val event = if (registeredMember.memberId == sender) {
|
||||
val error =
|
||||
SequencerErrors.TopoologyTimestampTooEarly(
|
||||
topologyTimestamp,
|
||||
getTrafficReceipt(sender, sequencingTimestamp).map { trafficReceiptO =>
|
||||
val event = if (registeredMember.memberId == sender) {
|
||||
val error =
|
||||
SequencerErrors.TopoologyTimestampTooEarly(
|
||||
topologyTimestamp,
|
||||
sequencingTimestamp,
|
||||
)
|
||||
DeliverError.create(
|
||||
counter,
|
||||
sequencingTimestamp,
|
||||
domainId,
|
||||
messageId,
|
||||
error,
|
||||
protocolVersion,
|
||||
trafficReceiptO,
|
||||
)
|
||||
DeliverError.create(
|
||||
counter,
|
||||
sequencingTimestamp,
|
||||
domainId,
|
||||
messageId,
|
||||
error,
|
||||
protocolVersion,
|
||||
Option
|
||||
.empty[TrafficReceipt], // TODO(i19528) wire traffic consumed for DB sequencer
|
||||
)
|
||||
} else
|
||||
Deliver.create(
|
||||
counter,
|
||||
sequencingTimestamp,
|
||||
domainId,
|
||||
None,
|
||||
emptyBatch,
|
||||
None,
|
||||
protocolVersion,
|
||||
Option
|
||||
.empty[TrafficReceipt], // TODO(i19528) wire traffic consumed for DB sequencer
|
||||
)
|
||||
Future.successful(
|
||||
} else
|
||||
Deliver.create(
|
||||
counter,
|
||||
sequencingTimestamp,
|
||||
domainId,
|
||||
None,
|
||||
emptyBatch,
|
||||
None,
|
||||
protocolVersion,
|
||||
trafficReceiptO,
|
||||
)
|
||||
|
||||
// This event cannot change the topology state of the client
|
||||
// and might not reach the topology client even
|
||||
// if it was originally addressed to it.
|
||||
@ -396,7 +397,7 @@ class SequencerReader(
|
||||
topologyClientTimestampBefore,
|
||||
unvalidatedEvent.traceContext,
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -530,6 +531,28 @@ class SequencerReader(
|
||||
} yield OrdinarySequencedEvent(signedEvent)(traceContext)
|
||||
}
|
||||
|
||||
private def getTrafficReceipt(senderMemberId: SequencerMemberId, timestamp: CantonTimestamp)(
|
||||
implicit traceContext: TraceContext
|
||||
): Future[Option[TrafficReceipt]] = {
|
||||
if (registeredMember.memberId == senderMemberId) { // traffic receipt is only for the sender
|
||||
trafficConsumedStoreO match { // and only if we have traffic management enabled
|
||||
case Some(trafficConsumedStore) =>
|
||||
trafficConsumedStore.lookupAt(member, timestamp).map {
|
||||
case Some(trafficConsumed) => Some(trafficConsumed.toTrafficReceipt)
|
||||
case None =>
|
||||
logger.debug(
|
||||
s"Traffic consumed not found for member $member, receipt will contain init value"
|
||||
)
|
||||
TrafficConsumed.init(member).toTrafficReceipt.some
|
||||
}
|
||||
case None =>
|
||||
Future.successful(None)
|
||||
}
|
||||
} else {
|
||||
Future.successful(None)
|
||||
}
|
||||
}
|
||||
|
||||
/** Takes our stored event and turns it back into a real sequenced event.
|
||||
*/
|
||||
private def mkSequencedEvent(
|
||||
@ -553,7 +576,7 @@ class SequencerReader(
|
||||
_traceContext,
|
||||
) =>
|
||||
// message id only goes to sender
|
||||
val messageIdO = Option(messageId).filter(_ => registeredMember.memberId == sender)
|
||||
val messageIdO = Option.when(registeredMember.memberId == sender)(messageId)
|
||||
val batch: Batch[ClosedEnvelope] = Batch
|
||||
.fromByteString(protocolVersion)(
|
||||
payload.content
|
||||
@ -595,6 +618,7 @@ class SequencerReader(
|
||||
memberGroupRecipients = resolvedGroupAddresses.collect {
|
||||
case (groupRecipient, groupMembers) if groupMembers.contains(member) => groupRecipient
|
||||
}.toSet
|
||||
trafficReceiptO <- getTrafficReceipt(sender, timestamp)
|
||||
} yield {
|
||||
val filteredBatch = Batch.filterClosedEnvelopesFor(batch, member, memberGroupRecipients)
|
||||
Deliver.create[ClosedEnvelope](
|
||||
@ -605,12 +629,12 @@ class SequencerReader(
|
||||
filteredBatch,
|
||||
topologyTimestampO,
|
||||
protocolVersion,
|
||||
Option.empty[TrafficReceipt], // TODO(i19528) wire traffic consumed for DB sequencer
|
||||
trafficReceiptO,
|
||||
)
|
||||
}
|
||||
|
||||
case ReceiptStoreEvent(_sender, messageId, topologyTimestampO, _traceContext) =>
|
||||
Future.successful(
|
||||
case ReceiptStoreEvent(sender, messageId, topologyTimestampO, _traceContext) =>
|
||||
getTrafficReceipt(sender, timestamp).map(trafficReceiptO =>
|
||||
Deliver.create[ClosedEnvelope](
|
||||
counter,
|
||||
timestamp,
|
||||
@ -619,14 +643,14 @@ class SequencerReader(
|
||||
emptyBatch,
|
||||
topologyTimestampO,
|
||||
protocolVersion,
|
||||
Option.empty[TrafficReceipt], // TODO(i19528) wire traffic consumed for DB sequencer
|
||||
trafficReceiptO,
|
||||
)
|
||||
)
|
||||
case DeliverErrorStoreEvent(_, messageId, error, _traceContext) =>
|
||||
case DeliverErrorStoreEvent(sender, messageId, error, _traceContext) =>
|
||||
val status = DeliverErrorStoreEvent
|
||||
.fromByteString(error, protocolVersion)
|
||||
.valueOr(err => throw new DbDeserializationException(err.toString))
|
||||
Future.successful(
|
||||
getTrafficReceipt(sender, timestamp).map(trafficReceiptO =>
|
||||
DeliverError.create(
|
||||
counter,
|
||||
timestamp,
|
||||
@ -634,7 +658,7 @@ class SequencerReader(
|
||||
messageId,
|
||||
status,
|
||||
protocolVersion,
|
||||
Option.empty[TrafficReceipt], // TODO(i19528) wire traffic consumed for DB sequencer
|
||||
trafficReceiptO,
|
||||
)
|
||||
)
|
||||
}
|
||||
|
@ -109,6 +109,7 @@ class BlockSequencer(
|
||||
clock,
|
||||
domainId,
|
||||
sequencerId,
|
||||
Some(blockRateLimitManager.trafficConsumedStore),
|
||||
protocolVersion,
|
||||
cryptoApi,
|
||||
SequencerMetrics.noop("TODO"), // TODO(#18406)
|
||||
@ -464,29 +465,34 @@ class BlockSequencer(
|
||||
.toSequencerSnapshot(protocolVersion, trafficPurchased, trafficConsumed)
|
||||
.tap(snapshot =>
|
||||
if (logger.underlying.isDebugEnabled()) {
|
||||
logger.debug(
|
||||
logger.trace(
|
||||
s"Snapshot for timestamp $timestamp generated from ephemeral state:\n$blockEphemeralState"
|
||||
)
|
||||
logger.debug(
|
||||
s"Resulting snapshot for timestamp $timestamp:\n$snapshot"
|
||||
)
|
||||
}
|
||||
)
|
||||
},
|
||||
)
|
||||
finalSnapshot <- {
|
||||
if (unifiedSequencer) {
|
||||
super.snapshot(timestamp).map { dbsSnapshot =>
|
||||
super.snapshot(bsSnapshot.lastTs).map { dbsSnapshot =>
|
||||
dbsSnapshot.copy(
|
||||
latestBlockHeight = bsSnapshot.latestBlockHeight,
|
||||
inFlightAggregations = bsSnapshot.inFlightAggregations,
|
||||
additional = bsSnapshot.additional,
|
||||
trafficPurchased = bsSnapshot.trafficPurchased,
|
||||
trafficConsumed = bsSnapshot.trafficConsumed,
|
||||
)(dbsSnapshot.representativeProtocolVersion)
|
||||
}
|
||||
} else {
|
||||
EitherT.pure[Future, String](bsSnapshot)
|
||||
}
|
||||
}
|
||||
} yield finalSnapshot
|
||||
} yield {
|
||||
logger.trace(
|
||||
s"Resulting snapshot for timestamp $timestamp:\n$finalSnapshot"
|
||||
)
|
||||
finalSnapshot
|
||||
}
|
||||
}
|
||||
|
||||
override def pruningStatus(implicit
|
||||
@ -700,14 +706,27 @@ class BlockSequencer(
|
||||
override def trafficStatus(requestedMembers: Seq[Member], selector: TimestampSelector)(implicit
|
||||
traceContext: TraceContext
|
||||
): FutureUnlessShutdown[SequencerTrafficStatus] = {
|
||||
trafficStatesForMembers(
|
||||
if (requestedMembers.isEmpty) {
|
||||
// If requestedMembers get the traffic states of all known member in the head state
|
||||
stateManager.getHeadState.chunk.ephemeral.status.membersMap.keySet
|
||||
} else requestedMembers.toSet,
|
||||
selector,
|
||||
)
|
||||
.map(SequencerTrafficStatus.apply)
|
||||
for {
|
||||
members <-
|
||||
if (requestedMembers.isEmpty) {
|
||||
// If requestedMembers is not set get the traffic states of all known members
|
||||
if (unifiedSequencer) {
|
||||
FutureUnlessShutdown.outcomeF(
|
||||
cryptoApi.currentSnapshotApproximation.ipsSnapshot.allMembers()
|
||||
)
|
||||
} else {
|
||||
FutureUnlessShutdown.pure(
|
||||
stateManager.getHeadState.chunk.ephemeral.status.membersMap.keySet
|
||||
)
|
||||
}
|
||||
} else {
|
||||
FutureUnlessShutdown.pure(requestedMembers.toSet)
|
||||
}
|
||||
trafficState <- trafficStatesForMembers(
|
||||
members,
|
||||
selector,
|
||||
)
|
||||
} yield SequencerTrafficStatus(trafficState)
|
||||
}
|
||||
|
||||
override def getTrafficStateAt(member: Member, timestamp: CantonTimestamp)(implicit
|
||||
|
@ -222,7 +222,6 @@ abstract class BlockSequencerFactory(
|
||||
nodeParameters.enableAdditionalConsistencyChecks,
|
||||
nodeParameters.processingTimeouts,
|
||||
domainLoggerFactory,
|
||||
rateLimitManager,
|
||||
nodeParameters.useUnifiedSequencer,
|
||||
)
|
||||
|
||||
|
@ -162,10 +162,9 @@ class DbSequencerStore(
|
||||
|
||||
case object Receipt extends EventTypeDiscriminator('R')
|
||||
case object Deliver extends EventTypeDiscriminator('D')
|
||||
|
||||
case object Error extends EventTypeDiscriminator('E')
|
||||
|
||||
private val all = Seq[EventTypeDiscriminator](Deliver, Error)
|
||||
private val all = Seq[EventTypeDiscriminator](Deliver, Error, Receipt)
|
||||
|
||||
def fromChar(value: Char): Either[String, EventTypeDiscriminator] =
|
||||
all.find(_.value == value).toRight(s"Event type discriminator for value [$value] not found")
|
||||
@ -841,6 +840,9 @@ class DbSequencerStore(
|
||||
query.as[Option[CantonTimestamp]].headOption.map(_.flatten)
|
||||
}
|
||||
|
||||
override def safeWatermark(implicit traceContext: TraceContext): Future[Option[CantonTimestamp]] =
|
||||
storage.query(safeWaterMarkDBIO, "query safe watermark")
|
||||
|
||||
override def readStateAtTimestamp(
|
||||
timestamp: CantonTimestamp
|
||||
)(implicit traceContext: TraceContext): Future[SequencerSnapshot] = {
|
||||
|
@ -132,6 +132,9 @@ class InMemorySequencerStore(
|
||||
): Future[Option[Watermark]] =
|
||||
Future.successful(watermark.get.map(Watermark(_, online = true)))
|
||||
|
||||
override def safeWatermark(implicit traceContext: TraceContext): Future[Option[CantonTimestamp]] =
|
||||
Future.successful(watermark.get)
|
||||
|
||||
override def goOffline(instanceIndex: Int)(implicit traceContext: TraceContext): Future[Unit] =
|
||||
Future.unit
|
||||
|
||||
|
@ -532,6 +532,10 @@ trait SequencerStore extends SequencerMemberValidator with NamedLogging with Aut
|
||||
traceContext: TraceContext
|
||||
): Future[Option[Watermark]]
|
||||
|
||||
/** Return the minimum watermark across all online sequencers
|
||||
*/
|
||||
def safeWatermark(implicit traceContext: TraceContext): Future[Option[CantonTimestamp]]
|
||||
|
||||
/** Flag that we're going offline (likely due to a shutdown) */
|
||||
def goOffline(instanceIndex: Int)(implicit traceContext: TraceContext): Future[Unit]
|
||||
|
||||
@ -790,10 +794,14 @@ trait SequencerStore extends SequencerMemberValidator with NamedLogging with Aut
|
||||
if (!memberStatus.enabled) eitherT(disableMember(id))
|
||||
else EitherT.rightT[Future, String](())
|
||||
_ <- eitherT(memberStatus.lastAcknowledged.fold(Future.unit)(ack => acknowledge(id, ack)))
|
||||
_ <- saveCounterCheckpoint(
|
||||
id,
|
||||
CounterCheckpoint(snapshot.heads(memberStatus.member), lastTs, Some(lastTs)),
|
||||
).leftMap(_.toString)
|
||||
_ <-
|
||||
// Some members can be registered, but not have any events yet, so there can be no CounterCheckpoint in the snapshot
|
||||
snapshot.heads.get(memberStatus.member).fold(eitherT[String](Future.unit)) { counter =>
|
||||
saveCounterCheckpoint(
|
||||
id,
|
||||
CounterCheckpoint(counter, lastTs, Some(lastTs)),
|
||||
).leftMap(_.toString)
|
||||
}
|
||||
} yield ()
|
||||
}
|
||||
_ <- saveLowerBound(lastTs).leftMap(_.toString)
|
||||
|
@ -587,7 +587,7 @@ class EnterpriseSequencerRateLimitManager(
|
||||
}
|
||||
} yield {
|
||||
// Here we correctly consumed the traffic, so submitted cost and consumed cost are the same
|
||||
trafficConsumed.toTrafficReceipt(consumedCost = cost)
|
||||
trafficConsumed.toTrafficReceipt
|
||||
}
|
||||
}
|
||||
|
||||
@ -630,11 +630,13 @@ class EnterpriseSequencerRateLimitManager(
|
||||
// Update the traffic consumed at sequencing time, and convert it to a receipt. Cost = 0 because we failed to consume traffic
|
||||
ensureTrafficConsumedAtSequencingTime(snapshotAtSequencingTime)
|
||||
.map(
|
||||
_.map(
|
||||
_.toTrafficReceipt(
|
||||
consumedCost = NonNegativeLong.zero
|
||||
_.map { trafficConsumed =>
|
||||
require(
|
||||
trafficConsumed.lastConsumedCost.unwrap == 0L,
|
||||
"Consumed cost should be zero",
|
||||
)
|
||||
)
|
||||
trafficConsumed.toTrafficReceipt
|
||||
}
|
||||
)
|
||||
)
|
||||
} yield {
|
||||
|
@ -33,8 +33,9 @@ class DbTrafficConsumedStore(
|
||||
trafficConsumed: TrafficConsumed
|
||||
)(implicit traceContext: TraceContext): Future[Unit] = {
|
||||
val insertSql =
|
||||
sqlu"""insert into seq_traffic_control_consumed_journal (member, sequencing_timestamp, extra_traffic_consumed, base_traffic_remainder)
|
||||
values (${trafficConsumed.member}, ${trafficConsumed.sequencingTimestamp}, ${trafficConsumed.extraTrafficConsumed}, ${trafficConsumed.baseTrafficRemainder}) on conflict do nothing"""
|
||||
sqlu"""insert into seq_traffic_control_consumed_journal (member, sequencing_timestamp, extra_traffic_consumed, base_traffic_remainder, last_consumed_cost)
|
||||
values (${trafficConsumed.member}, ${trafficConsumed.sequencingTimestamp}, ${trafficConsumed.extraTrafficConsumed}, ${trafficConsumed.baseTrafficRemainder}, ${trafficConsumed.lastConsumedCost})
|
||||
on conflict do nothing"""
|
||||
|
||||
storage.update_(insertSql, functionFullName)
|
||||
}
|
||||
@ -43,7 +44,10 @@ class DbTrafficConsumedStore(
|
||||
member: Member
|
||||
)(implicit traceContext: TraceContext): Future[Seq[TrafficConsumed]] = {
|
||||
val query =
|
||||
sql"select member, sequencing_timestamp, extra_traffic_consumed, base_traffic_remainder from seq_traffic_control_consumed_journal where member = $member order by sequencing_timestamp asc"
|
||||
sql"""select member, sequencing_timestamp, extra_traffic_consumed, base_traffic_remainder, last_consumed_cost
|
||||
from seq_traffic_control_consumed_journal
|
||||
where member = $member
|
||||
order by sequencing_timestamp asc"""
|
||||
storage.query(query.as[TrafficConsumed], functionFullName)
|
||||
}
|
||||
|
||||
@ -51,17 +55,21 @@ class DbTrafficConsumedStore(
|
||||
member: Member
|
||||
)(implicit traceContext: TraceContext): Future[Option[TrafficConsumed]] = {
|
||||
val query =
|
||||
sql"select member, sequencing_timestamp, extra_traffic_consumed, base_traffic_remainder from seq_traffic_control_consumed_journal where member = $member order by sequencing_timestamp desc"
|
||||
sql"""select member, sequencing_timestamp, extra_traffic_consumed, base_traffic_remainder, last_consumed_cost
|
||||
from seq_traffic_control_consumed_journal
|
||||
where member = $member
|
||||
order by sequencing_timestamp desc"""
|
||||
storage.querySingle(query.as[TrafficConsumed].headOption, functionFullName).value
|
||||
}
|
||||
|
||||
override def lookupLatestBeforeInclusive(timestamp: CantonTimestamp)(implicit
|
||||
traceContext: TraceContext
|
||||
): Future[Seq[TrafficConsumed]] = {
|
||||
// TODO(#18394): Check if performance of this query is good (looks a lot like a group by)
|
||||
val query =
|
||||
sql"""select member, sequencing_timestamp, extra_traffic_consumed, base_traffic_remainder
|
||||
sql"""select member, sequencing_timestamp, extra_traffic_consumed, base_traffic_remainder, last_consumed_cost
|
||||
from
|
||||
(select member, sequencing_timestamp, extra_traffic_consumed, base_traffic_remainder,
|
||||
(select member, sequencing_timestamp, extra_traffic_consumed, base_traffic_remainder, last_consumed_cost,
|
||||
rank() over (partition by member order by sequencing_timestamp desc) as pos
|
||||
from seq_traffic_control_consumed_journal
|
||||
where sequencing_timestamp <= $timestamp
|
||||
@ -75,10 +83,11 @@ class DbTrafficConsumedStore(
|
||||
def lookupLatestBeforeInclusiveForMember(member: Member, timestamp: CantonTimestamp)(implicit
|
||||
traceContext: TraceContext
|
||||
): Future[Option[TrafficConsumed]] = {
|
||||
// TODO(#18394): Check if performance of this query is good (looks a lot like a group by)
|
||||
val query =
|
||||
sql"""select member, sequencing_timestamp, extra_traffic_consumed, base_traffic_remainder
|
||||
sql"""select member, sequencing_timestamp, extra_traffic_consumed, base_traffic_remainder, last_consumed_cost
|
||||
from
|
||||
(select member, sequencing_timestamp, extra_traffic_consumed, base_traffic_remainder,
|
||||
(select member, sequencing_timestamp, extra_traffic_consumed, base_traffic_remainder, last_consumed_cost,
|
||||
rank() over (partition by member order by sequencing_timestamp desc) as pos
|
||||
from seq_traffic_control_consumed_journal
|
||||
where sequencing_timestamp <= $timestamp and member = $member
|
||||
@ -93,7 +102,9 @@ class DbTrafficConsumedStore(
|
||||
traceContext: TraceContext
|
||||
): Future[Option[TrafficConsumed]] = {
|
||||
val query =
|
||||
sql"select member, sequencing_timestamp, extra_traffic_consumed, base_traffic_remainder from seq_traffic_control_consumed_journal where member = $member and sequencing_timestamp = $timestamp"
|
||||
sql"""select member, sequencing_timestamp, extra_traffic_consumed, base_traffic_remainder, last_consumed_cost
|
||||
from seq_traffic_control_consumed_journal
|
||||
where member = $member and sequencing_timestamp = $timestamp"""
|
||||
storage.querySingle(query.as[TrafficConsumed].headOption, functionFullName).value
|
||||
}
|
||||
|
||||
@ -105,6 +116,7 @@ class DbTrafficConsumedStore(
|
||||
// upToExclusive, we need to keep it.
|
||||
// To do that we first find the latest timestamp for all members before the pruning timestamp.
|
||||
// Then we delete all rows below that timestamp for each member.
|
||||
// TODO(#18394): Check performance of the group by query here
|
||||
val deleteQuery =
|
||||
sqlu"""with last_before_pruning_timestamp(member, sequencing_timestamp) as (
|
||||
select member, max(sequencing_timestamp)
|
||||
|
@ -45,7 +45,7 @@ import scala.jdk.CollectionConverters.*
|
||||
import scala.language.reflectiveCalls
|
||||
|
||||
@nowarn("msg=match may not be exhaustive")
|
||||
class ConfirmationResponseProcessorTest
|
||||
class ConfirmationRequestAndResponseProcessorTest
|
||||
extends AsyncWordSpec
|
||||
with BaseTest
|
||||
with HasTestCloseContext
|
||||
@ -134,8 +134,8 @@ class ConfirmationResponseProcessorTest
|
||||
SymbolicCrypto.create(testedReleaseProtocolVersion, timeouts, loggerFactory)
|
||||
|
||||
private lazy val topology: TestingTopology = TestingTopology(
|
||||
Set(domainId),
|
||||
Map(
|
||||
domains = Set(domainId),
|
||||
topology = Map(
|
||||
submitter -> Map(participant -> ParticipantPermission.Confirmation),
|
||||
signatory ->
|
||||
Map(participant -> ParticipantPermission.Confirmation),
|
||||
@ -144,8 +144,8 @@ class ConfirmationResponseProcessorTest
|
||||
extra ->
|
||||
Map(participant -> ParticipantPermission.Observation),
|
||||
),
|
||||
Set(mediatorGroup, mediatorGroup2),
|
||||
sequencerGroup,
|
||||
mediatorGroups = Set(mediatorGroup, mediatorGroup2),
|
||||
sequencerGroup = sequencerGroup,
|
||||
)
|
||||
|
||||
private lazy val identityFactory = TestingIdentityFactory(
|
||||
@ -158,14 +158,14 @@ class ConfirmationResponseProcessorTest
|
||||
|
||||
private lazy val identityFactory2 = {
|
||||
val topology2 = TestingTopology(
|
||||
Set(domainId),
|
||||
Map(
|
||||
domains = Set(domainId),
|
||||
topology = Map(
|
||||
submitter -> Map(participant1 -> ParticipantPermission.Confirmation),
|
||||
signatory -> Map(participant2 -> ParticipantPermission.Confirmation),
|
||||
observer -> Map(participant3 -> ParticipantPermission.Confirmation),
|
||||
),
|
||||
Set(mediatorGroup),
|
||||
sequencerGroup,
|
||||
mediatorGroups = Set(mediatorGroup),
|
||||
sequencerGroup = sequencerGroup,
|
||||
)
|
||||
TestingIdentityFactory(
|
||||
topology2,
|
||||
@ -190,12 +190,12 @@ class ConfirmationResponseProcessorTest
|
||||
private lazy val identityFactoryOnlySubmitter =
|
||||
TestingIdentityFactory(
|
||||
TestingTopology(
|
||||
Set(domainId),
|
||||
Map(
|
||||
domains = Set(domainId),
|
||||
topology = Map(
|
||||
submitter -> Map(participant1 -> ParticipantPermission.Confirmation)
|
||||
),
|
||||
Set(mediatorGroup0(NonEmpty.mk(Seq, mediatorId))),
|
||||
sequencerGroup,
|
||||
mediatorGroups = Set(mediatorGroup0(NonEmpty.mk(Seq, mediatorId))),
|
||||
sequencerGroup = sequencerGroup,
|
||||
),
|
||||
loggerFactory,
|
||||
dynamicDomainParameters = initialDomainParameters,
|
||||
@ -238,7 +238,7 @@ class ConfirmationResponseProcessorTest
|
||||
timeouts,
|
||||
loggerFactory,
|
||||
)
|
||||
val processor = new ConfirmationResponseProcessor(
|
||||
val processor = new ConfirmationRequestAndResponseProcessor(
|
||||
domainId,
|
||||
mediatorId,
|
||||
verdictSender,
|
||||
@ -290,7 +290,7 @@ class ConfirmationResponseProcessorTest
|
||||
.failOnShutdown
|
||||
.futureValue
|
||||
|
||||
"TransactionConfirmationResponseProcessor" should {
|
||||
"ConfirmationRequestAndResponseProcessor" should {
|
||||
def shouldBeViewThresholdBelowMinimumAlarm(
|
||||
requestId: RequestId,
|
||||
viewPosition: ViewPosition,
|
||||
@ -518,10 +518,24 @@ class ConfirmationResponseProcessorTest
|
||||
),
|
||||
Recipients.cc(MemberRecipient(participant3), mediatorGroupRecipient),
|
||||
),
|
||||
"group addresses and member recipients" -> Seq(
|
||||
Recipients.recipientGroups(
|
||||
NonEmpty.mk(
|
||||
Seq,
|
||||
NonEmpty.mk(
|
||||
Set,
|
||||
ParticipantsOfParty(PartyId.tryFromLfParty(submitter)),
|
||||
mediatorGroupRecipient,
|
||||
),
|
||||
NonEmpty.mk(Set, MemberRecipient(participant2), mediatorGroupRecipient),
|
||||
NonEmpty.mk(Set, MemberRecipient(participant3), mediatorGroupRecipient),
|
||||
)
|
||||
)
|
||||
),
|
||||
)
|
||||
|
||||
sequentialTraverse_(tests.zipWithIndex) { case ((_testName, recipients), i) =>
|
||||
withClueF("testname") {
|
||||
sequentialTraverse_(tests.zipWithIndex) { case ((testName, recipients), i) =>
|
||||
withClueF(testName) {
|
||||
val rootHashMessages =
|
||||
recipients.map(r => OpenEnvelope(rootHashMessage, r)(testedProtocolVersion))
|
||||
val ts = CantonTimestamp.ofEpochSecond(i.toLong)
|
||||
@ -645,7 +659,7 @@ class ConfirmationResponseProcessorTest
|
||||
correctRootHashMessage -> Recipients
|
||||
.cc(mediatorGroupRecipient, MemberRecipient(participant)),
|
||||
correctRootHashMessage.copy(
|
||||
payload = SerializedRootHashMessagePayload(ByteString.copyFromUtf8("other paylroosoad"))
|
||||
payload = SerializedRootHashMessagePayload(ByteString.copyFromUtf8("other payload"))
|
||||
) -> Recipients
|
||||
.cc(mediatorGroupRecipient, MemberRecipient(otherParticipant)),
|
||||
)
|
||||
@ -681,7 +695,7 @@ class ConfirmationResponseProcessorTest
|
||||
(batchWithSuperfluousRootHashMessage -> show"Superfluous root hash message for members: $otherParticipant") ->
|
||||
List(Set[Member](participant, otherParticipant) -> correctViewType),
|
||||
|
||||
(batchWithDifferentPayloads -> show"Different payloads in root hash messages. Sizes: 0, 17.") ->
|
||||
(batchWithDifferentPayloads -> show"Different payloads in root hash messages. Sizes: 0, 13.") ->
|
||||
List(Set[Member](participant, otherParticipant) -> correctViewType),
|
||||
)
|
||||
// format: on
|
@ -12,6 +12,7 @@ import com.digitalasset.canton.domain.sequencing.sequencer.Sequencer as CantonSe
|
||||
import com.digitalasset.canton.protocol.DynamicDomainParameters
|
||||
import com.digitalasset.canton.resource.MemoryStorage
|
||||
import com.digitalasset.canton.sequencing.protocol.{Recipients, SubmissionRequest}
|
||||
import com.digitalasset.canton.sequencing.traffic.TrafficReceipt
|
||||
import com.digitalasset.canton.topology.{MediatorId, TestingIdentityFactory, TestingTopology}
|
||||
import org.apache.pekko.stream.Materializer
|
||||
|
||||
@ -55,6 +56,8 @@ class DatabaseSequencerSnapshottingTest extends SequencerApiTest {
|
||||
|
||||
override protected def supportAggregation: Boolean = false
|
||||
|
||||
override protected def defaultExpectedTrafficReceipt: Option[TrafficReceipt] = None
|
||||
|
||||
"Database snapshotting" should {
|
||||
|
||||
"allow a new separate database to be created" in { env =>
|
||||
@ -95,7 +98,16 @@ class DatabaseSequencerSnapshottingTest extends SequencerApiTest {
|
||||
)
|
||||
checkMessages(List(details), messages)
|
||||
}
|
||||
snapshot <- valueOrFail(sequencer.snapshot(CantonTimestamp.MaxValue))("get snapshot")
|
||||
|
||||
error <- sequencer
|
||||
.snapshot(CantonTimestamp.MaxValue)
|
||||
.leftOrFail("snapshotting after the watermark is expected to fail")
|
||||
_ <- error should include(" is after the safe watermark")
|
||||
|
||||
// Note: below we use the timestamp that is currently the safe watermark in the sequencer
|
||||
snapshot <- valueOrFail(sequencer.snapshot(CantonTimestamp.Epoch.immediateSuccessor))(
|
||||
"get snapshot"
|
||||
)
|
||||
|
||||
// create a second separate sequencer from the snapshot
|
||||
secondSequencer = createSequencerWithSnapshot(
|
||||
@ -103,8 +115,16 @@ class DatabaseSequencerSnapshottingTest extends SequencerApiTest {
|
||||
Some(snapshot),
|
||||
)
|
||||
|
||||
// TODO(#18405): Currently crash recovery of DBS resets the watermark to a wrong value (epoch) leading to
|
||||
// the second snapshot failing due to newly added watermark check. This is a temp workaround to avoid that.
|
||||
_ <- secondSequencer.store
|
||||
.saveWatermark(instanceIndex = 0, snapshot.lastTs)
|
||||
.valueOrFail("save watermark")
|
||||
|
||||
// the snapshot from the second sequencer should look the same except that the lastTs will become the lower bound
|
||||
snapshot2 <- valueOrFail(secondSequencer.snapshot(CantonTimestamp.MaxValue))("get snapshot")
|
||||
snapshot2 <- valueOrFail(
|
||||
secondSequencer.snapshot(CantonTimestamp.Epoch.immediateSuccessor)
|
||||
)("get snapshot")
|
||||
_ = {
|
||||
snapshot2 shouldBe (snapshot.copy(status =
|
||||
snapshot.status.copy(lowerBound = snapshot.lastTs)
|
||||
|
@ -142,6 +142,8 @@ abstract class SequencerApiTest
|
||||
|
||||
protected def supportAggregation: Boolean
|
||||
|
||||
protected def defaultExpectedTrafficReceipt: Option[TrafficReceipt]
|
||||
|
||||
protected def runSequencerApiTests(): Unit = {
|
||||
"The sequencers" should {
|
||||
"send a batch to one recipient" in { env =>
|
||||
@ -160,7 +162,7 @@ abstract class SequencerApiTest
|
||||
SequencerCounter(0),
|
||||
sender,
|
||||
Some(request.messageId),
|
||||
None,
|
||||
defaultExpectedTrafficReceipt,
|
||||
EnvelopeDetails(messageContent, recipients),
|
||||
)
|
||||
checkMessages(List(details), messages)
|
||||
@ -253,7 +255,7 @@ abstract class SequencerApiTest
|
||||
SequencerCounter.Genesis,
|
||||
sender,
|
||||
Some(request1.messageId),
|
||||
None,
|
||||
defaultExpectedTrafficReceipt,
|
||||
EnvelopeDetails(normalMessageContent, recipients),
|
||||
)
|
||||
checkMessages(List(details), messages)
|
||||
@ -278,7 +280,7 @@ abstract class SequencerApiTest
|
||||
SequencerCounter.Genesis,
|
||||
member,
|
||||
Option.when(member == sender)(request.messageId),
|
||||
None,
|
||||
if (member == sender) defaultExpectedTrafficReceipt else None,
|
||||
EnvelopeDetails(messageContent, recipients.forMember(member, Set.empty).value),
|
||||
)
|
||||
}
|
||||
@ -323,12 +325,26 @@ abstract class SequencerApiTest
|
||||
} yield {
|
||||
// p6 gets the receipt immediately
|
||||
checkMessages(
|
||||
Seq(EventDetails(SequencerCounter.Genesis, p6, Some(request1.messageId), None)),
|
||||
Seq(
|
||||
EventDetails(
|
||||
SequencerCounter.Genesis,
|
||||
p6,
|
||||
Some(request1.messageId),
|
||||
defaultExpectedTrafficReceipt,
|
||||
)
|
||||
),
|
||||
reads1,
|
||||
)
|
||||
// p9 gets the receipt only
|
||||
checkMessages(
|
||||
Seq(EventDetails(SequencerCounter.Genesis, p9, Some(request2.messageId), None)),
|
||||
Seq(
|
||||
EventDetails(
|
||||
SequencerCounter.Genesis,
|
||||
p9,
|
||||
Some(request2.messageId),
|
||||
defaultExpectedTrafficReceipt,
|
||||
)
|
||||
),
|
||||
reads2,
|
||||
)
|
||||
// p10 gets the message
|
||||
@ -337,8 +353,8 @@ abstract class SequencerApiTest
|
||||
EventDetails(
|
||||
SequencerCounter.Genesis,
|
||||
p10,
|
||||
None,
|
||||
None,
|
||||
messageId = None,
|
||||
trafficReceipt = None,
|
||||
EnvelopeDetails(messageContent, Recipients.cc(p10)),
|
||||
)
|
||||
),
|
||||
@ -429,7 +445,7 @@ abstract class SequencerApiTest
|
||||
}
|
||||
reads3 <- readForMembers(Seq(p6), sequencer)
|
||||
} yield {
|
||||
checkRejection(reads3, p6, request1.messageId) {
|
||||
checkRejection(reads3, p6, request1.messageId, defaultExpectedTrafficReceipt) {
|
||||
case SequencerErrors.MaxSequencingTimeTooFar(reason) =>
|
||||
reason should (
|
||||
include(s"Max sequencing time") and
|
||||
@ -522,7 +538,14 @@ abstract class SequencerApiTest
|
||||
)
|
||||
} yield {
|
||||
checkMessages(
|
||||
Seq(EventDetails(SequencerCounter.Genesis, p11, Some(request1.messageId), None)),
|
||||
Seq(
|
||||
EventDetails(
|
||||
SequencerCounter.Genesis,
|
||||
p11,
|
||||
Some(request1.messageId),
|
||||
defaultExpectedTrafficReceipt,
|
||||
)
|
||||
),
|
||||
reads11,
|
||||
)
|
||||
checkMessages(
|
||||
@ -531,14 +554,14 @@ abstract class SequencerApiTest
|
||||
SequencerCounter.Genesis,
|
||||
p12,
|
||||
Some(request1.messageId),
|
||||
None,
|
||||
defaultExpectedTrafficReceipt,
|
||||
EnvelopeDetails(content2, recipients2, envs1(1).signatures ++ envs2(1).signatures),
|
||||
),
|
||||
EventDetails(
|
||||
SequencerCounter.Genesis,
|
||||
p13,
|
||||
None,
|
||||
None,
|
||||
messageId = None,
|
||||
trafficReceipt = None,
|
||||
EnvelopeDetails(content1, recipients1, envs1(0).signatures ++ envs2(0).signatures),
|
||||
EnvelopeDetails(content2, recipients2, envs1(1).signatures ++ envs2(1).signatures),
|
||||
),
|
||||
@ -550,15 +573,15 @@ abstract class SequencerApiTest
|
||||
EventDetails(
|
||||
SequencerCounter.Genesis + 1,
|
||||
p11,
|
||||
None,
|
||||
None,
|
||||
messageId = None,
|
||||
trafficReceipt = None,
|
||||
EnvelopeDetails(content1, recipients1, envs1(0).signatures ++ envs2(0).signatures),
|
||||
)
|
||||
),
|
||||
reads12a,
|
||||
)
|
||||
|
||||
checkRejection(reads13, p13, messageId3) {
|
||||
checkRejection(reads13, p13, messageId3, defaultExpectedTrafficReceipt) {
|
||||
case SequencerErrors.AggregateSubmissionAlreadySent(reason) =>
|
||||
reason should (
|
||||
include(s"The aggregatable request with aggregation ID") and
|
||||
@ -635,10 +658,17 @@ abstract class SequencerApiTest
|
||||
reads15 <- readForMembers(Seq(p15), sequencer)
|
||||
} yield {
|
||||
checkMessages(
|
||||
Seq(EventDetails(SequencerCounter.Genesis, p14, Some(request1.messageId), None)),
|
||||
Seq(
|
||||
EventDetails(
|
||||
SequencerCounter.Genesis,
|
||||
p14,
|
||||
Some(request1.messageId),
|
||||
defaultExpectedTrafficReceipt,
|
||||
)
|
||||
),
|
||||
reads14,
|
||||
)
|
||||
checkRejection(reads14a, p14, messageId2) {
|
||||
checkRejection(reads14a, p14, messageId2, defaultExpectedTrafficReceipt) {
|
||||
case SequencerErrors.AggregateSubmissionStuffing(reason) =>
|
||||
reason should include(
|
||||
s"The sender ${p14} previously contributed to the aggregatable submission with ID"
|
||||
@ -653,7 +683,13 @@ abstract class SequencerApiTest
|
||||
|
||||
checkMessages(
|
||||
Seq(
|
||||
EventDetails(SequencerCounter.Genesis + 2, p14, None, None, deliveredEnvelopeDetails)
|
||||
EventDetails(
|
||||
SequencerCounter.Genesis + 2,
|
||||
p14,
|
||||
messageId = None,
|
||||
trafficReceipt = None,
|
||||
deliveredEnvelopeDetails,
|
||||
)
|
||||
),
|
||||
reads14b,
|
||||
)
|
||||
@ -663,7 +699,7 @@ abstract class SequencerApiTest
|
||||
SequencerCounter.Genesis,
|
||||
p15,
|
||||
Some(messageId3),
|
||||
None,
|
||||
defaultExpectedTrafficReceipt,
|
||||
deliveredEnvelopeDetails,
|
||||
)
|
||||
),
|
||||
@ -725,7 +761,7 @@ abstract class SequencerApiTest
|
||||
_ <- sequencer.sendAsyncSigned(sign(request)).valueOrFailShutdown("Sent async")
|
||||
reads <- readForMembers(Seq(p17), sequencer)
|
||||
} yield {
|
||||
checkRejection(reads, p17, messageId) {
|
||||
checkRejection(reads, p17, messageId, defaultExpectedTrafficReceipt) {
|
||||
case SequencerErrors.SubmissionRequestMalformed(reason) =>
|
||||
reason should include("Threshold 2 cannot be reached")
|
||||
}
|
||||
@ -755,7 +791,7 @@ abstract class SequencerApiTest
|
||||
_ <- sequencer.sendAsyncSigned(sign(request)).valueOrFailShutdown("Sent async")
|
||||
reads <- readForMembers(Seq(p18), sequencer)
|
||||
} yield {
|
||||
checkRejection(reads, p18, messageId) {
|
||||
checkRejection(reads, p18, messageId, defaultExpectedTrafficReceipt) {
|
||||
case SequencerErrors.SubmissionRequestMalformed(reason) =>
|
||||
reason should include("Sender is not eligible according to the aggregation rule")
|
||||
}
|
||||
@ -921,7 +957,7 @@ trait SequencerApiTestUtils
|
||||
got: Seq[(Member, OrdinarySerializedEvent)],
|
||||
sender: Member,
|
||||
expectedMessageId: MessageId,
|
||||
expectedTrafficReceipt: Option[TrafficReceipt] = None,
|
||||
expectedTrafficReceipt: Option[TrafficReceipt],
|
||||
)(assertReason: PartialFunction[Status, Assertion]): Assertion = {
|
||||
got match {
|
||||
case Seq((`sender`, event)) =>
|
||||
|
@ -133,6 +133,7 @@ class SequencerReaderTest extends FixtureAsyncWordSpec with BaseTest {
|
||||
cryptoD,
|
||||
eventSignaller,
|
||||
topologyClientMember,
|
||||
trafficConsumedStoreO = None,
|
||||
testedProtocolVersion,
|
||||
timeouts,
|
||||
loggerFactory,
|
||||
|
@ -48,6 +48,7 @@ trait SequencerStoreTest
|
||||
val ts1 = ts(1)
|
||||
val ts2 = ts(2)
|
||||
val ts3 = ts(3)
|
||||
val ts4 = ts(4)
|
||||
|
||||
val payloadBytes1 = ByteString.copyFromUtf8("1")
|
||||
val payloadBytes2 = ByteString.copyFromUtf8("1")
|
||||
@ -57,6 +58,7 @@ trait SequencerStoreTest
|
||||
val messageId1 = MessageId.tryCreate("1")
|
||||
val messageId2 = MessageId.tryCreate("2")
|
||||
val messageId3 = MessageId.tryCreate("3")
|
||||
val messageId4 = MessageId.tryCreate("4")
|
||||
|
||||
val instanceDiscriminator1 = UUID.randomUUID()
|
||||
val instanceDiscriminator2 = UUID.randomUUID()
|
||||
@ -101,6 +103,24 @@ trait SequencerStoreTest
|
||||
),
|
||||
)
|
||||
|
||||
def deliverReceipt(
|
||||
ts: CantonTimestamp,
|
||||
sender: Member,
|
||||
messageId: MessageId,
|
||||
topologyTimestamp: CantonTimestamp,
|
||||
): Future[Sequenced[PayloadId]] =
|
||||
for {
|
||||
senderId <- store.registerMember(sender, ts)
|
||||
} yield Sequenced(
|
||||
ts,
|
||||
ReceiptStoreEvent(
|
||||
senderId,
|
||||
messageId,
|
||||
topologyTimestampO = Some(topologyTimestamp),
|
||||
traceContext,
|
||||
),
|
||||
)
|
||||
|
||||
def lookupRegisteredMember(member: Member): Future[SequencerMemberId] =
|
||||
for {
|
||||
registeredMemberO <- store.lookupMember(member)
|
||||
@ -151,6 +171,35 @@ trait SequencerStoreTest
|
||||
}
|
||||
}
|
||||
|
||||
def assertReceiptEvent(
|
||||
event: Sequenced[Payload],
|
||||
expectedTimestamp: CantonTimestamp,
|
||||
expectedSender: Member,
|
||||
expectedMessageId: MessageId,
|
||||
expectedTopologyTimestamp: Option[CantonTimestamp],
|
||||
): Future[Assertion] = {
|
||||
for {
|
||||
senderId <- lookupRegisteredMember(expectedSender)
|
||||
} yield {
|
||||
event.timestamp shouldBe expectedTimestamp
|
||||
event.event match {
|
||||
case ReceiptStoreEvent(
|
||||
sender,
|
||||
messageId,
|
||||
topologyTimestampO,
|
||||
_traceContext,
|
||||
) =>
|
||||
sender shouldBe senderId
|
||||
messageId shouldBe expectedMessageId
|
||||
event.event.members shouldBe Set(senderId)
|
||||
event.event.payloadO shouldBe None
|
||||
topologyTimestampO shouldBe expectedTopologyTimestamp
|
||||
case other =>
|
||||
fail(s"Expected deliver receipt but got $other")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Save payloads using the default `instanceDiscriminator1` and expecting it to succeed */
|
||||
def savePayloads(payloads: NonEmpty[Seq[Payload]]): Future[Unit] =
|
||||
valueOrFail(store.savePayloads(payloads, instanceDiscriminator1))("savePayloads")
|
||||
@ -272,15 +321,16 @@ trait SequencerStoreTest
|
||||
payload2.id,
|
||||
recipients = Set(alice, bob),
|
||||
)
|
||||
receiptAlice <- env.deliverReceipt(ts4, alice, messageId4, ts3)
|
||||
deliverEventBob <- env.deliverEvent(ts3, bob, messageId3, payload3.id)
|
||||
_ <- env.store.saveEvents(
|
||||
instanceIndex,
|
||||
NonEmpty(Seq, deliverEventAlice, deliverEventAll, deliverEventBob),
|
||||
NonEmpty(Seq, deliverEventAlice, deliverEventAll, deliverEventBob, receiptAlice),
|
||||
)
|
||||
_ <- env.saveWatermark(deliverEventBob.timestamp).valueOrFail("saveWatermark")
|
||||
_ <- env.saveWatermark(receiptAlice.timestamp).valueOrFail("saveWatermark")
|
||||
aliceEvents <- env.readEvents(alice)
|
||||
bobEvents <- env.readEvents(bob)
|
||||
_ = aliceEvents should have size (2)
|
||||
_ = aliceEvents should have size (3)
|
||||
_ = bobEvents should have size (2)
|
||||
_ <- env.assertDeliverEvent(aliceEvents(0), ts1, alice, messageId1, Set(alice), payload1)
|
||||
_ <- env.assertDeliverEvent(
|
||||
@ -291,6 +341,13 @@ trait SequencerStoreTest
|
||||
Set(alice, bob),
|
||||
payload2,
|
||||
)
|
||||
_ <- env.assertReceiptEvent(
|
||||
aliceEvents(2),
|
||||
ts4,
|
||||
alice,
|
||||
messageId4,
|
||||
ts3.some,
|
||||
)
|
||||
_ <- env.assertDeliverEvent(
|
||||
bobEvents(0),
|
||||
ts2,
|
||||
|
@ -133,11 +133,12 @@ class EnterpriseSequencerRateLimitManagerTest
|
||||
expectedExtraTrafficPurchased: NonNegativeLong = trafficPurchased,
|
||||
expectedTrafficConsumed: NonNegativeLong = expectedExtraTrafficConsumed,
|
||||
expectedBaseTrafficRemainder: NonNegativeLong = NonNegativeLong.zero,
|
||||
expectedLastConsumedCost: NonNegativeLong = eventCostNonNegative,
|
||||
expectedSerial: Option[PositiveInt] = serial,
|
||||
timestamp: CantonTimestamp = sequencingTs,
|
||||
)(implicit f: Env) = for {
|
||||
states <- f.rlm
|
||||
.getStates(Set(sender), Some(sequencingTs), None, warnIfApproximate = false)
|
||||
.getStates(Set(sender), Some(timestamp), None, warnIfApproximate = false)
|
||||
.failOnShutdown
|
||||
} yield states.get(sender) shouldBe Some(
|
||||
Right(
|
||||
@ -145,6 +146,7 @@ class EnterpriseSequencerRateLimitManagerTest
|
||||
expectedExtraTrafficPurchased,
|
||||
expectedTrafficConsumed,
|
||||
expectedBaseTrafficRemainder,
|
||||
expectedLastConsumedCost,
|
||||
timestamp,
|
||||
expectedSerial,
|
||||
)
|
||||
@ -166,6 +168,7 @@ class EnterpriseSequencerRateLimitManagerTest
|
||||
expectedExtraTrafficPurchased,
|
||||
expectedTrafficConsumed,
|
||||
expectedBaseTrafficRemainder,
|
||||
NonNegativeLong.zero,
|
||||
sequencingTs,
|
||||
expectedSerial,
|
||||
)
|
||||
@ -295,6 +298,7 @@ class EnterpriseSequencerRateLimitManagerTest
|
||||
NonNegativeLong.zero,
|
||||
NonNegativeLong.zero,
|
||||
maxBaseTrafficRemainder,
|
||||
NonNegativeLong.zero,
|
||||
sequencerTs,
|
||||
None,
|
||||
),
|
||||
@ -478,6 +482,46 @@ class EnterpriseSequencerRateLimitManagerTest
|
||||
}
|
||||
}
|
||||
|
||||
"consumed cost resets to 0 when advancing the timestamp with no traffic being used" in {
|
||||
implicit f =>
|
||||
returnCorrectCost
|
||||
|
||||
val expected = Right(
|
||||
Some(
|
||||
TrafficReceipt(
|
||||
consumedCost = NonNegativeLong.one,
|
||||
extraTrafficConsumed = NonNegativeLong.zero,
|
||||
baseTrafficRemainder = maxBaseTrafficRemainder.tryAdd(-1L),
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
for {
|
||||
_ <- purchaseTraffic
|
||||
res <- consume( // only uses the base traffic
|
||||
cost = Some(NonNegativeLong.one),
|
||||
correctCost = NonNegativeLong.one,
|
||||
sequencingTimestamp = sequencingTs.plusMillis(1),
|
||||
)
|
||||
_ <- assertTrafficConsumed(
|
||||
timestamp = sequencingTs.plusMillis(1),
|
||||
expectedTrafficConsumed = NonNegativeLong.zero,
|
||||
expectedBaseTrafficRemainder =
|
||||
maxBaseTrafficRemainder.tryAdd(-1L), // only uses the base traffic
|
||||
expectedLastConsumedCost = NonNegativeLong.one,
|
||||
)
|
||||
_ <- assertTrafficConsumed(
|
||||
timestamp = sequencingTs.plusSeconds(1), // after a full second
|
||||
expectedTrafficConsumed = NonNegativeLong.zero,
|
||||
expectedBaseTrafficRemainder =
|
||||
maxBaseTrafficRemainder, // base traffic is back to maximum
|
||||
expectedLastConsumedCost = NonNegativeLong.zero, // last consumed cost is reset to 0
|
||||
)
|
||||
} yield {
|
||||
res shouldBe expected
|
||||
}
|
||||
}
|
||||
|
||||
"advance traffic consumed timestamp even when not consuming because not enough traffic" in {
|
||||
implicit f =>
|
||||
returnCorrectCost
|
||||
@ -490,6 +534,7 @@ class EnterpriseSequencerRateLimitManagerTest
|
||||
NonNegativeLong.zero,
|
||||
NonNegativeLong.zero,
|
||||
maxBaseTrafficRemainder,
|
||||
NonNegativeLong.zero,
|
||||
sequencingTs,
|
||||
None,
|
||||
),
|
||||
@ -512,6 +557,7 @@ class EnterpriseSequencerRateLimitManagerTest
|
||||
_ <- assertTrafficConsumed(
|
||||
expectedTrafficConsumed = NonNegativeLong.zero,
|
||||
expectedBaseTrafficRemainder = NonNegativeLong.tryCreate(4),
|
||||
expectedLastConsumedCost = NonNegativeLong.one,
|
||||
)
|
||||
// then at sequencingTs.plusMillis(1)
|
||||
res2 <- consume(
|
||||
@ -522,6 +568,7 @@ class EnterpriseSequencerRateLimitManagerTest
|
||||
_ <- assertTrafficConsumed(
|
||||
expectedTrafficConsumed = NonNegativeLong.zero,
|
||||
expectedBaseTrafficRemainder = NonNegativeLong.tryCreate(3),
|
||||
expectedLastConsumedCost = NonNegativeLong.one,
|
||||
timestamp = sequencingTs.plusMillis(1),
|
||||
)
|
||||
// then repeat consume at sequencingTs, which simulates a crash recovery that replays the event
|
||||
@ -530,6 +577,7 @@ class EnterpriseSequencerRateLimitManagerTest
|
||||
_ <- assertTrafficConsumed(
|
||||
expectedTrafficConsumed = NonNegativeLong.zero,
|
||||
expectedBaseTrafficRemainder = NonNegativeLong.tryCreate(3),
|
||||
expectedLastConsumedCost = NonNegativeLong.one,
|
||||
timestamp = sequencingTs.plusMillis(1),
|
||||
)
|
||||
} yield {
|
||||
@ -562,7 +610,10 @@ class EnterpriseSequencerRateLimitManagerTest
|
||||
for {
|
||||
_ <- purchaseTraffic
|
||||
res <- consume(cost = Some(incorrectSubmissionCostNN))
|
||||
_ <- assertTrafficConsumed(expectedTrafficConsumed = NonNegativeLong.one)
|
||||
_ <- assertTrafficConsumed(
|
||||
expectedTrafficConsumed = NonNegativeLong.one,
|
||||
expectedLastConsumedCost = incorrectSubmissionCostNN,
|
||||
)
|
||||
} yield {
|
||||
res shouldBe Right(
|
||||
Some(
|
||||
|
@ -34,6 +34,7 @@ trait TrafficConsumedStoreTest
|
||||
t1,
|
||||
NonNegativeLong.tryCreate(3),
|
||||
NonNegativeLong.tryCreate(20L),
|
||||
NonNegativeLong.tryCreate(5L),
|
||||
)
|
||||
val consumedAlice2 = consumedAlice1.copy(sequencingTimestamp = t2)
|
||||
val consumedAlice3 = consumedAlice1.copy(sequencingTimestamp = t3)
|
||||
@ -43,6 +44,7 @@ trait TrafficConsumedStoreTest
|
||||
t1,
|
||||
NonNegativeLong.tryCreate(3),
|
||||
NonNegativeLong.tryCreate(20L),
|
||||
NonNegativeLong.tryCreate(10L),
|
||||
)
|
||||
val consumedBob2 = consumedBob1.copy(sequencingTimestamp = t2)
|
||||
val consumedBob3 = consumedBob1.copy(sequencingTimestamp = t3)
|
||||
@ -168,21 +170,35 @@ trait TrafficConsumedStoreTest
|
||||
val store = mk()
|
||||
|
||||
val aliceConsumed = Seq(
|
||||
TrafficConsumed(alice.member, t1, NonNegativeLong.one, NonNegativeLong.tryCreate(5L)),
|
||||
TrafficConsumed(
|
||||
alice.member,
|
||||
t1,
|
||||
NonNegativeLong.one,
|
||||
NonNegativeLong.tryCreate(5L),
|
||||
NonNegativeLong.tryCreate(5L),
|
||||
),
|
||||
TrafficConsumed(
|
||||
alice.member,
|
||||
t3,
|
||||
NonNegativeLong.tryCreate(2),
|
||||
NonNegativeLong.tryCreate(55L),
|
||||
NonNegativeLong.tryCreate(4L),
|
||||
),
|
||||
)
|
||||
val bobConsumed = Seq(
|
||||
TrafficConsumed(bob.member, t2, NonNegativeLong.one, NonNegativeLong.tryCreate(10L)),
|
||||
TrafficConsumed(
|
||||
bob.member,
|
||||
t2,
|
||||
NonNegativeLong.one,
|
||||
NonNegativeLong.tryCreate(10L),
|
||||
NonNegativeLong.tryCreate(5L),
|
||||
),
|
||||
TrafficConsumed(
|
||||
bob.member,
|
||||
t4,
|
||||
NonNegativeLong.tryCreate(2),
|
||||
NonNegativeLong.tryCreate(100L),
|
||||
NonNegativeLong.tryCreate(3L),
|
||||
),
|
||||
)
|
||||
|
||||
|
@ -0,0 +1,73 @@
|
||||
// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package com.daml.ledger.api.v2.admin;
|
||||
|
||||
import "com/daml/ledger/api/v2/commands.proto";
|
||||
import "com/daml/ledger/api/v2/completion.proto";
|
||||
import "com/daml/ledger/api/v2/value.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
option csharp_namespace = "Com.Daml.Ledger.Api.V2.Admin";
|
||||
option java_outer_classname = "CommandInspectionServiceOuterClass";
|
||||
option java_package = "com.daml.ledger.api.v2.admin";
|
||||
|
||||
// Status: experimental interface, will change before it is deemed production
|
||||
// ready
|
||||
//
|
||||
// The inspection service provides methods for the ledger administrator
|
||||
// to look under the hood of a running system.
|
||||
// In V2 Ledger API this service is not available.
|
||||
service CommandInspectionService {
|
||||
// Inquire about the status of a command.
|
||||
// This service is used for debugging only. The command status is only tracked in memory and is not persisted.
|
||||
// The service can be used to understand the failure status and the structure of a command.
|
||||
// Requires admin privileges
|
||||
// The service is alpha without backward compatibility guarantees.
|
||||
rpc GetCommandStatus(GetCommandStatusRequest) returns (GetCommandStatusResponse);
|
||||
}
|
||||
|
||||
enum CommandState {
|
||||
COMMAND_STATE_UNSPECIFIED = 0; // This value acts as wildcard in the queries
|
||||
COMMAND_STATE_PENDING = 1;
|
||||
COMMAND_STATE_SUCCEEDED = 2;
|
||||
COMMAND_STATE_FAILED = 3;
|
||||
}
|
||||
|
||||
message GetCommandStatusRequest {
|
||||
string command_id_prefix = 1; // optional filter by command id
|
||||
CommandState state = 2; // optional filter by state
|
||||
uint32 limit = 3; // optional limit of returned statuses, defaults to 100
|
||||
}
|
||||
|
||||
message GetCommandStatusResponse {
|
||||
message CommandStatus {
|
||||
google.protobuf.Timestamp started = 1;
|
||||
google.protobuf.Timestamp completed = 2;
|
||||
Completion completion = 3;
|
||||
CommandState state = 4;
|
||||
repeated Command commands = 5;
|
||||
message RequestStatistics {
|
||||
uint32 envelopes = 1;
|
||||
uint32 request_size = 2;
|
||||
uint32 recipients = 3;
|
||||
}
|
||||
RequestStatistics request_statistics = 6;
|
||||
message CommandUpdates {
|
||||
message Contract {
|
||||
Identifier template_id = 1;
|
||||
string contract_id = 2;
|
||||
Value contract_key = 3;
|
||||
}
|
||||
repeated Contract created = 1;
|
||||
repeated Contract archived = 2;
|
||||
uint32 exercised = 3;
|
||||
uint32 fetched = 4;
|
||||
uint32 looked_up_by_key = 5;
|
||||
}
|
||||
CommandUpdates updates = 7;
|
||||
}
|
||||
repeated CommandStatus command_status = 1;
|
||||
}
|
@ -17,9 +17,15 @@ option java_package = "com.daml.ledger.api.v2";
|
||||
// See the feature message definitions for descriptions.
|
||||
message ExperimentalFeatures {
|
||||
ExperimentalStaticTime static_time = 1;
|
||||
ExperimentalCommandInspectionService command_inspection_service = 2;
|
||||
}
|
||||
|
||||
// Ledger is in the static time mode and exposes a time service.
|
||||
message ExperimentalStaticTime {
|
||||
bool supported = 1;
|
||||
}
|
||||
|
||||
// Whether the Ledger API supports command inspection service
|
||||
message ExperimentalCommandInspectionService {
|
||||
bool supported = 1;
|
||||
}
|
||||
|
@ -0,0 +1,36 @@
|
||||
// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.digitalasset.canton.ledger.api.auth.services
|
||||
|
||||
import com.daml.ledger.api.v2.admin.command_inspection_service.CommandInspectionServiceGrpc.CommandInspectionService
|
||||
import com.daml.ledger.api.v2.admin.command_inspection_service.{
|
||||
CommandInspectionServiceGrpc,
|
||||
GetCommandStatusRequest,
|
||||
GetCommandStatusResponse,
|
||||
}
|
||||
import com.digitalasset.canton.ledger.api.ProxyCloseable
|
||||
import com.digitalasset.canton.ledger.api.auth.Authorizer
|
||||
import com.digitalasset.canton.ledger.api.grpc.GrpcApiService
|
||||
import io.grpc.ServerServiceDefinition
|
||||
|
||||
import scala.concurrent.{ExecutionContext, Future}
|
||||
|
||||
final class CommandInspectionServiceAuthorization(
|
||||
protected val service: CommandInspectionService with AutoCloseable,
|
||||
private val authorizer: Authorizer,
|
||||
)(implicit executionContext: ExecutionContext)
|
||||
extends CommandInspectionService
|
||||
with ProxyCloseable
|
||||
with GrpcApiService {
|
||||
|
||||
override def bindService(): ServerServiceDefinition =
|
||||
CommandInspectionServiceGrpc.bindService(this, executionContext)
|
||||
|
||||
override def close(): Unit = service.close()
|
||||
|
||||
override def getCommandStatus(
|
||||
request: GetCommandStatusRequest
|
||||
): Future[GetCommandStatusResponse] =
|
||||
authorizer.requireAdminClaims(service.getCommandStatus)(request)
|
||||
}
|
@ -27,7 +27,7 @@ final class CommandServiceAuthorization(
|
||||
with GrpcApiService {
|
||||
|
||||
override def submitAndWait(request: SubmitAndWaitRequest): Future[Empty] = {
|
||||
val effectiveSubmitters = CommandsValidator.effectiveSubmittersV2(request.commands)
|
||||
val effectiveSubmitters = CommandsValidator.effectiveSubmitters(request.commands)
|
||||
authorizer.requireActAndReadClaimsForParties(
|
||||
actAs = effectiveSubmitters.actAs,
|
||||
readAs = effectiveSubmitters.readAs,
|
||||
@ -39,7 +39,7 @@ final class CommandServiceAuthorization(
|
||||
override def submitAndWaitForTransaction(
|
||||
request: SubmitAndWaitRequest
|
||||
): Future[SubmitAndWaitForTransactionResponse] = {
|
||||
val effectiveSubmitters = CommandsValidator.effectiveSubmittersV2(request.commands)
|
||||
val effectiveSubmitters = CommandsValidator.effectiveSubmitters(request.commands)
|
||||
authorizer.requireActAndReadClaimsForParties(
|
||||
actAs = effectiveSubmitters.actAs,
|
||||
readAs = effectiveSubmitters.readAs,
|
||||
@ -51,7 +51,7 @@ final class CommandServiceAuthorization(
|
||||
override def submitAndWaitForUpdateId(
|
||||
request: SubmitAndWaitRequest
|
||||
): Future[SubmitAndWaitForUpdateIdResponse] = {
|
||||
val effectiveSubmitters = CommandsValidator.effectiveSubmittersV2(request.commands)
|
||||
val effectiveSubmitters = CommandsValidator.effectiveSubmitters(request.commands)
|
||||
authorizer.requireActAndReadClaimsForParties(
|
||||
actAs = effectiveSubmitters.actAs,
|
||||
readAs = effectiveSubmitters.readAs,
|
||||
@ -63,7 +63,7 @@ final class CommandServiceAuthorization(
|
||||
override def submitAndWaitForTransactionTree(
|
||||
request: SubmitAndWaitRequest
|
||||
): Future[SubmitAndWaitForTransactionTreeResponse] = {
|
||||
val effectiveSubmitters = CommandsValidator.effectiveSubmittersV2(request.commands)
|
||||
val effectiveSubmitters = CommandsValidator.effectiveSubmitters(request.commands)
|
||||
authorizer.requireActAndReadClaimsForParties(
|
||||
actAs = effectiveSubmitters.actAs,
|
||||
readAs = effectiveSubmitters.readAs,
|
||||
|
@ -23,7 +23,7 @@ final class CommandSubmissionServiceAuthorization(
|
||||
with GrpcApiService {
|
||||
|
||||
override def submit(request: SubmitRequest): Future[SubmitResponse] = {
|
||||
val effectiveSubmitters = CommandsValidator.effectiveSubmittersV2(request.commands)
|
||||
val effectiveSubmitters = CommandsValidator.effectiveSubmitters(request.commands)
|
||||
authorizer.requireActAndReadClaimsForParties(
|
||||
actAs = effectiveSubmitters.actAs,
|
||||
readAs = effectiveSubmitters.readAs,
|
||||
|
@ -0,0 +1,18 @@
|
||||
// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.digitalasset.canton.ledger.api.services
|
||||
|
||||
import com.daml.ledger.api.v2.admin.command_inspection_service.CommandState
|
||||
import com.digitalasset.canton.platform.apiserver.execution.CommandStatus
|
||||
|
||||
import scala.concurrent.Future
|
||||
|
||||
trait CommandInspectionService {
|
||||
def findCommandStatus(
|
||||
commandIdPrefix: String,
|
||||
state: CommandState,
|
||||
limit: Int,
|
||||
): Future[Seq[CommandStatus]]
|
||||
|
||||
}
|
@ -0,0 +1,26 @@
|
||||
// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.digitalasset.canton.ledger.api.validation
|
||||
|
||||
import com.daml.error.ContextualizedErrorLogger
|
||||
import com.daml.ledger.api.v2.admin.command_inspection_service.GetCommandStatusRequest
|
||||
import com.daml.lf.data.Ref
|
||||
import com.digitalasset.canton.ledger.api.validation.ValidationErrors.invalidField
|
||||
import io.grpc.StatusRuntimeException
|
||||
|
||||
object CommandInspectionServiceRequestValidator {
|
||||
def validateCommandStatusRequest(
|
||||
request: GetCommandStatusRequest
|
||||
)(implicit
|
||||
contextualizedErrorLogger: ContextualizedErrorLogger
|
||||
): Either[StatusRuntimeException, GetCommandStatusRequest] =
|
||||
if (request.commandIdPrefix.isEmpty) Right(request)
|
||||
else
|
||||
Ref.CommandId
|
||||
.fromString(request.commandIdPrefix)
|
||||
.map(_ => request)
|
||||
.left
|
||||
.map(invalidField("command_id_prefix", _))
|
||||
|
||||
}
|
@ -271,10 +271,6 @@ object CommandsValidator {
|
||||
commands.fold(noSubmitters)(effectiveSubmitters)
|
||||
}
|
||||
|
||||
def effectiveSubmittersV2(commands: Option[Commands]): Submitters[String] = {
|
||||
commands.fold(noSubmitters)(effectiveSubmitters)
|
||||
}
|
||||
|
||||
def effectiveSubmitters(commands: Commands): Submitters[String] = {
|
||||
val actAs = commands.actAs.toSet
|
||||
val readAs = commands.readAs.toSet -- actAs
|
||||
|
@ -3,23 +3,12 @@
|
||||
|
||||
package com.digitalasset.canton.ledger.participant.state
|
||||
|
||||
import com.daml.daml_lf_dev.DamlLf.Archive
|
||||
import com.daml.error.ContextualizedErrorLogger
|
||||
import com.daml.lf.data.Ref.PackageId
|
||||
import com.digitalasset.canton.data.Offset
|
||||
import com.digitalasset.canton.ledger.api.health.ReportsHealth
|
||||
import com.digitalasset.canton.platform.store.packagemeta.PackageMetadata
|
||||
import com.digitalasset.canton.protocol.PackageDescription
|
||||
import com.digitalasset.canton.topology.DomainId
|
||||
import com.digitalasset.canton.topology.transaction.ParticipantPermission
|
||||
import com.digitalasset.canton.tracing.{TraceContext, Traced}
|
||||
import com.digitalasset.canton.{DomainAlias, LfPartyId}
|
||||
import com.google.protobuf.ByteString
|
||||
import org.apache.pekko.NotUsed
|
||||
import org.apache.pekko.stream.scaladsl.Source
|
||||
|
||||
import scala.concurrent.Future
|
||||
|
||||
/** An interface for reading the state of a ledger participant.
|
||||
* '''Please note that this interface is unstable and may significantly change.'''
|
||||
*
|
||||
@ -33,7 +22,7 @@ import scala.concurrent.Future
|
||||
* information. See [[Update]] for a description of the state updates
|
||||
* communicated by [[ReadService!.stateUpdates]].
|
||||
*/
|
||||
trait ReadService extends ReportsHealth with InternalStateServiceProvider {
|
||||
trait ReadService extends ReportsHealth {
|
||||
|
||||
/** Get the stream of state [[Update]]s starting from the beginning or right
|
||||
* after the given [[com.digitalasset.canton.data.Offset]]
|
||||
@ -139,65 +128,4 @@ trait ReadService extends ReportsHealth with InternalStateServiceProvider {
|
||||
def stateUpdates(
|
||||
beginAfter: Option[Offset]
|
||||
)(implicit traceContext: TraceContext): Source[(Offset, Traced[Update]), NotUsed]
|
||||
|
||||
def getConnectedDomains(request: ReadService.ConnectedDomainRequest)(implicit
|
||||
traceContext: TraceContext
|
||||
): Future[ReadService.ConnectedDomainResponse] =
|
||||
throw new UnsupportedOperationException()
|
||||
|
||||
/** Get the offsets of the incomplete assigned/unassigned events for a set of stakeholders.
|
||||
*
|
||||
* @param validAt The offset of validity in participant offset terms.
|
||||
* @param stakeholders Only offsets are returned which have at least one stakeholder from this set.
|
||||
* @return All the offset of assigned/unassigned events which do not have their conterparts visible at
|
||||
* the validAt offset, and only for the reassignments for which this participant is reassigning.
|
||||
*/
|
||||
def incompleteReassignmentOffsets(
|
||||
validAt: Offset,
|
||||
stakeholders: Set[LfPartyId],
|
||||
)(implicit traceContext: TraceContext): Future[Vector[Offset]] = {
|
||||
val _ = validAt
|
||||
val _ = stakeholders
|
||||
val _ = traceContext
|
||||
Future.successful(Vector.empty)
|
||||
}
|
||||
|
||||
def getPackageMetadataSnapshot(implicit
|
||||
contextualizedErrorLogger: ContextualizedErrorLogger
|
||||
): PackageMetadata =
|
||||
throw new UnsupportedOperationException()
|
||||
|
||||
def listLfPackages()(implicit
|
||||
traceContext: TraceContext
|
||||
): Future[Seq[PackageDescription]] =
|
||||
throw new UnsupportedOperationException()
|
||||
|
||||
def getLfArchive(packageId: PackageId)(implicit
|
||||
traceContext: TraceContext
|
||||
): Future[Option[Archive]] =
|
||||
throw new UnsupportedOperationException()
|
||||
|
||||
def validateDar(
|
||||
dar: ByteString,
|
||||
darName: String,
|
||||
)(implicit
|
||||
traceContext: TraceContext
|
||||
): Future[SubmissionResult] =
|
||||
throw new UnsupportedOperationException()
|
||||
}
|
||||
|
||||
object ReadService {
|
||||
final case class ConnectedDomainRequest(party: LfPartyId)
|
||||
|
||||
final case class ConnectedDomainResponse(
|
||||
connectedDomains: Seq[ConnectedDomainResponse.ConnectedDomain]
|
||||
)
|
||||
|
||||
object ConnectedDomainResponse {
|
||||
final case class ConnectedDomain(
|
||||
domainAlias: DomainAlias,
|
||||
domainId: DomainId,
|
||||
permission: ParticipantPermission,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
@ -3,15 +3,28 @@
|
||||
|
||||
package com.digitalasset.canton.ledger.participant.state
|
||||
|
||||
import com.daml.daml_lf_dev.DamlLf.Archive
|
||||
import com.daml.error.ContextualizedErrorLogger
|
||||
import com.daml.lf.data.Ref.PackageId
|
||||
import com.daml.lf.data.{ImmArray, Ref}
|
||||
import com.daml.lf.transaction.{GlobalKey, SubmittedTransaction}
|
||||
import com.daml.lf.value.Value
|
||||
import com.digitalasset.canton.data.ProcessedDisclosedContract
|
||||
import com.digitalasset.canton.data.{Offset, ProcessedDisclosedContract}
|
||||
import com.digitalasset.canton.ledger.api.health.ReportsHealth
|
||||
import com.digitalasset.canton.ledger.participant.state.WriteService.{
|
||||
ConnectedDomainRequest,
|
||||
ConnectedDomainResponse,
|
||||
}
|
||||
import com.digitalasset.canton.platform.store.packagemeta.PackageMetadata
|
||||
import com.digitalasset.canton.protocol.PackageDescription
|
||||
import com.digitalasset.canton.topology.DomainId
|
||||
import com.digitalasset.canton.topology.transaction.ParticipantPermission
|
||||
import com.digitalasset.canton.tracing.TraceContext
|
||||
import com.digitalasset.canton.{DomainAlias, LfPartyId}
|
||||
import com.google.protobuf.ByteString
|
||||
|
||||
import java.util.concurrent.CompletionStage
|
||||
import scala.concurrent.Future
|
||||
|
||||
/** An interface to change a ledger via a participant.
|
||||
* '''Please note that this interface is unstable and may significantly change.'''
|
||||
@ -31,14 +44,14 @@ import java.util.concurrent.CompletionStage
|
||||
* The following methods are currently available for changing the state of a Daml ledger:
|
||||
* - submitting a transaction using [[WriteService!.submitTransaction]]
|
||||
* - allocating a new party using [[WritePartyService!.allocateParty]]
|
||||
* - uploading a new package using [[WritePackagesService!.uploadDar]]
|
||||
* - pruning a participant ledger using [[WriteParticipantPruningService!.prune]]
|
||||
*/
|
||||
trait WriteService
|
||||
extends WritePackagesService
|
||||
with WritePartyService
|
||||
with WriteParticipantPruningService
|
||||
with ReportsHealth {
|
||||
with ReportsHealth
|
||||
with InternalStateServiceProvider {
|
||||
|
||||
/** Submit a transaction for acceptance to the ledger.
|
||||
*
|
||||
@ -147,4 +160,65 @@ trait WriteService
|
||||
)(implicit
|
||||
traceContext: TraceContext
|
||||
): CompletionStage[SubmissionResult]
|
||||
|
||||
def getConnectedDomains(request: ConnectedDomainRequest)(implicit
|
||||
traceContext: TraceContext
|
||||
): Future[ConnectedDomainResponse] =
|
||||
throw new UnsupportedOperationException()
|
||||
|
||||
/** Get the offsets of the incomplete assigned/unassigned events for a set of stakeholders.
|
||||
*
|
||||
* @param validAt The offset of validity in participant offset terms.
|
||||
* @param stakeholders Only offsets are returned which have at least one stakeholder from this set.
|
||||
* @return All the offset of assigned/unassigned events which do not have their conterparts visible at
|
||||
* the validAt offset, and only for the reassignments for which this participant is reassigning.
|
||||
*/
|
||||
def incompleteReassignmentOffsets(
|
||||
validAt: Offset,
|
||||
stakeholders: Set[LfPartyId],
|
||||
)(implicit traceContext: TraceContext): Future[Vector[Offset]] = {
|
||||
val _ = validAt
|
||||
val _ = stakeholders
|
||||
val _ = traceContext
|
||||
Future.successful(Vector.empty)
|
||||
}
|
||||
|
||||
def getPackageMetadataSnapshot(implicit
|
||||
contextualizedErrorLogger: ContextualizedErrorLogger
|
||||
): PackageMetadata =
|
||||
throw new UnsupportedOperationException()
|
||||
|
||||
def listLfPackages()(implicit
|
||||
traceContext: TraceContext
|
||||
): Future[Seq[PackageDescription]] =
|
||||
throw new UnsupportedOperationException()
|
||||
|
||||
def getLfArchive(packageId: PackageId)(implicit
|
||||
traceContext: TraceContext
|
||||
): Future[Option[Archive]] =
|
||||
throw new UnsupportedOperationException()
|
||||
|
||||
def validateDar(
|
||||
dar: ByteString,
|
||||
darName: String,
|
||||
)(implicit
|
||||
traceContext: TraceContext
|
||||
): Future[SubmissionResult] =
|
||||
throw new UnsupportedOperationException()
|
||||
}
|
||||
|
||||
object WriteService {
|
||||
final case class ConnectedDomainRequest(party: LfPartyId)
|
||||
|
||||
final case class ConnectedDomainResponse(
|
||||
connectedDomains: Seq[ConnectedDomainResponse.ConnectedDomain]
|
||||
)
|
||||
|
||||
object ConnectedDomainResponse {
|
||||
final case class ConnectedDomain(
|
||||
domainAlias: DomainAlias,
|
||||
domainId: DomainId,
|
||||
permission: ParticipantPermission,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
@ -3,29 +3,15 @@
|
||||
|
||||
package com.digitalasset.canton.ledger.participant.state.metrics
|
||||
|
||||
import com.daml.daml_lf_dev.DamlLf.Archive
|
||||
import com.daml.error.ContextualizedErrorLogger
|
||||
import com.daml.lf.data.Ref.PackageId
|
||||
import com.daml.metrics.Timed
|
||||
import com.digitalasset.canton.LfPartyId
|
||||
import com.digitalasset.canton.data.Offset
|
||||
import com.digitalasset.canton.ledger.api.health.HealthStatus
|
||||
import com.digitalasset.canton.ledger.participant.state.{
|
||||
InternalStateService,
|
||||
ReadService,
|
||||
SubmissionResult,
|
||||
Update,
|
||||
}
|
||||
import com.digitalasset.canton.ledger.participant.state.{ReadService, Update}
|
||||
import com.digitalasset.canton.metrics.LedgerApiServerMetrics
|
||||
import com.digitalasset.canton.platform.store.packagemeta.PackageMetadata
|
||||
import com.digitalasset.canton.protocol.PackageDescription
|
||||
import com.digitalasset.canton.tracing.{TraceContext, Traced}
|
||||
import com.google.protobuf.ByteString
|
||||
import org.apache.pekko.NotUsed
|
||||
import org.apache.pekko.stream.scaladsl.Source
|
||||
|
||||
import scala.concurrent.Future
|
||||
|
||||
final class TimedReadService(delegate: ReadService, metrics: LedgerApiServerMetrics)
|
||||
extends ReadService {
|
||||
|
||||
@ -34,60 +20,6 @@ final class TimedReadService(delegate: ReadService, metrics: LedgerApiServerMetr
|
||||
)(implicit traceContext: TraceContext): Source[(Offset, Traced[Update]), NotUsed] =
|
||||
Timed.source(metrics.services.read.stateUpdates, delegate.stateUpdates(beginAfter))
|
||||
|
||||
override def getConnectedDomains(
|
||||
request: ReadService.ConnectedDomainRequest
|
||||
)(implicit traceContext: TraceContext): Future[ReadService.ConnectedDomainResponse] =
|
||||
Timed.future(
|
||||
metrics.services.read.getConnectedDomains,
|
||||
delegate.getConnectedDomains(request),
|
||||
)
|
||||
|
||||
override def incompleteReassignmentOffsets(validAt: Offset, stakeholders: Set[LfPartyId])(implicit
|
||||
traceContext: TraceContext
|
||||
): Future[Vector[Offset]] =
|
||||
Timed.future(
|
||||
metrics.services.read.getConnectedDomains,
|
||||
delegate.incompleteReassignmentOffsets(validAt, stakeholders),
|
||||
)
|
||||
|
||||
override def currentHealth(): HealthStatus =
|
||||
delegate.currentHealth()
|
||||
|
||||
override def registerInternalStateService(internalStateService: InternalStateService): Unit =
|
||||
delegate.registerInternalStateService(internalStateService)
|
||||
|
||||
override def internalStateService: Option[InternalStateService] =
|
||||
delegate.internalStateService
|
||||
|
||||
override def unregisterInternalStateService(): Unit =
|
||||
delegate.unregisterInternalStateService()
|
||||
|
||||
override def getPackageMetadataSnapshot(implicit
|
||||
contextualizedErrorLogger: ContextualizedErrorLogger
|
||||
): PackageMetadata =
|
||||
delegate.getPackageMetadataSnapshot
|
||||
|
||||
override def listLfPackages()(implicit
|
||||
traceContext: TraceContext
|
||||
): Future[Seq[PackageDescription]] =
|
||||
Timed.future(
|
||||
metrics.services.read.listLfPackages,
|
||||
delegate.listLfPackages(),
|
||||
)
|
||||
|
||||
override def getLfArchive(
|
||||
packageId: PackageId
|
||||
)(implicit traceContext: TraceContext): Future[Option[Archive]] =
|
||||
Timed.future(
|
||||
metrics.services.read.getLfArchive,
|
||||
delegate.getLfArchive(packageId),
|
||||
)
|
||||
|
||||
override def validateDar(dar: ByteString, darName: String)(implicit
|
||||
traceContext: TraceContext
|
||||
): Future[SubmissionResult] =
|
||||
Timed.future(
|
||||
metrics.services.read.validateDar,
|
||||
delegate.validateDar(dar, darName),
|
||||
)
|
||||
}
|
||||
|
@ -3,14 +3,24 @@
|
||||
|
||||
package com.digitalasset.canton.ledger.participant.state.metrics
|
||||
|
||||
import com.daml.daml_lf_dev.DamlLf.Archive
|
||||
import com.daml.error.ContextualizedErrorLogger
|
||||
import com.daml.lf.data.Ref.PackageId
|
||||
import com.daml.lf.data.{ImmArray, Ref}
|
||||
import com.daml.lf.transaction.{GlobalKey, SubmittedTransaction}
|
||||
import com.daml.lf.value.Value
|
||||
import com.daml.metrics.Timed
|
||||
import com.digitalasset.canton.LfPartyId
|
||||
import com.digitalasset.canton.data.{Offset, ProcessedDisclosedContract}
|
||||
import com.digitalasset.canton.ledger.api.health.HealthStatus
|
||||
import com.digitalasset.canton.ledger.participant.state.WriteService.{
|
||||
ConnectedDomainRequest,
|
||||
ConnectedDomainResponse,
|
||||
}
|
||||
import com.digitalasset.canton.ledger.participant.state.*
|
||||
import com.digitalasset.canton.metrics.LedgerApiServerMetrics
|
||||
import com.digitalasset.canton.platform.store.packagemeta.PackageMetadata
|
||||
import com.digitalasset.canton.protocol.PackageDescription
|
||||
import com.digitalasset.canton.topology.DomainId
|
||||
import com.digitalasset.canton.tracing.TraceContext
|
||||
import com.google.protobuf.ByteString
|
||||
@ -104,4 +114,58 @@ final class TimedWriteService(delegate: WriteService, metrics: LedgerApiServerMe
|
||||
|
||||
override def currentHealth(): HealthStatus =
|
||||
delegate.currentHealth()
|
||||
|
||||
override def getConnectedDomains(
|
||||
request: ConnectedDomainRequest
|
||||
)(implicit traceContext: TraceContext): Future[ConnectedDomainResponse] =
|
||||
Timed.future(
|
||||
metrics.services.read.getConnectedDomains,
|
||||
delegate.getConnectedDomains(request),
|
||||
)
|
||||
|
||||
override def incompleteReassignmentOffsets(validAt: Offset, stakeholders: Set[LfPartyId])(implicit
|
||||
traceContext: TraceContext
|
||||
): Future[Vector[Offset]] =
|
||||
Timed.future(
|
||||
metrics.services.read.getConnectedDomains,
|
||||
delegate.incompleteReassignmentOffsets(validAt, stakeholders),
|
||||
)
|
||||
|
||||
override def registerInternalStateService(internalStateService: InternalStateService): Unit =
|
||||
delegate.registerInternalStateService(internalStateService)
|
||||
|
||||
override def internalStateService: Option[InternalStateService] =
|
||||
delegate.internalStateService
|
||||
|
||||
override def unregisterInternalStateService(): Unit =
|
||||
delegate.unregisterInternalStateService()
|
||||
|
||||
override def getPackageMetadataSnapshot(implicit
|
||||
contextualizedErrorLogger: ContextualizedErrorLogger
|
||||
): PackageMetadata =
|
||||
delegate.getPackageMetadataSnapshot
|
||||
|
||||
override def listLfPackages()(implicit
|
||||
traceContext: TraceContext
|
||||
): Future[Seq[PackageDescription]] =
|
||||
Timed.future(
|
||||
metrics.services.read.listLfPackages,
|
||||
delegate.listLfPackages(),
|
||||
)
|
||||
|
||||
override def getLfArchive(
|
||||
packageId: PackageId
|
||||
)(implicit traceContext: TraceContext): Future[Option[Archive]] =
|
||||
Timed.future(
|
||||
metrics.services.read.getLfArchive,
|
||||
delegate.getLfArchive(packageId),
|
||||
)
|
||||
|
||||
override def validateDar(dar: ByteString, darName: String)(implicit
|
||||
traceContext: TraceContext
|
||||
): Future[SubmissionResult] =
|
||||
Timed.future(
|
||||
metrics.services.read.validateDar,
|
||||
delegate.validateDar(dar, darName),
|
||||
)
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ package com.digitalasset.canton.platform
|
||||
import com.daml.ledger.resources.ResourceOwner
|
||||
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
|
||||
import com.digitalasset.canton.metrics.LedgerApiServerMetrics
|
||||
import com.digitalasset.canton.platform.apiserver.execution.CommandProgressTracker
|
||||
import com.digitalasset.canton.platform.apiserver.services.tracking.SubmissionTracker
|
||||
import com.digitalasset.canton.platform.store.backend.ParameterStorageBackend.LedgerEnd
|
||||
import com.digitalasset.canton.platform.store.cache.{
|
||||
@ -32,6 +33,7 @@ private[platform] class InMemoryState(
|
||||
val stringInterningView: StringInterningView,
|
||||
val dispatcherState: DispatcherState,
|
||||
val submissionTracker: SubmissionTracker,
|
||||
val commandProgressTracker: CommandProgressTracker,
|
||||
val loggerFactory: NamedLoggerFactory,
|
||||
)(implicit executionContext: ExecutionContext)
|
||||
extends NamedLogging {
|
||||
@ -70,6 +72,7 @@ private[platform] class InMemoryState(
|
||||
|
||||
object InMemoryState {
|
||||
def owner(
|
||||
commandProgressTracker: CommandProgressTracker,
|
||||
apiStreamShutdownTimeout: Duration,
|
||||
bufferedStreamsPageSize: Int,
|
||||
maxContractStateCacheSize: Long,
|
||||
@ -112,6 +115,7 @@ object InMemoryState {
|
||||
),
|
||||
stringInterningView = new StringInterningView(loggerFactory),
|
||||
submissionTracker = submissionTracker,
|
||||
commandProgressTracker = commandProgressTracker,
|
||||
loggerFactory = loggerFactory,
|
||||
)(executionContext)
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ package com.digitalasset.canton.platform
|
||||
import com.daml.ledger.resources.ResourceOwner
|
||||
import com.digitalasset.canton.logging.NamedLoggerFactory
|
||||
import com.digitalasset.canton.metrics.LedgerApiServerMetrics
|
||||
import com.digitalasset.canton.platform.apiserver.execution.CommandProgressTracker
|
||||
import com.digitalasset.canton.platform.config.IndexServiceConfig
|
||||
import com.digitalasset.canton.platform.index.InMemoryStateUpdater
|
||||
import com.digitalasset.canton.tracing.TraceContext
|
||||
@ -15,6 +16,7 @@ import scala.concurrent.ExecutionContext
|
||||
|
||||
object LedgerApiServer {
|
||||
def createInMemoryStateAndUpdater(
|
||||
commandProgressTracker: CommandProgressTracker,
|
||||
indexServiceConfig: IndexServiceConfig,
|
||||
maxCommandsInFlight: Int,
|
||||
metrics: LedgerApiServerMetrics,
|
||||
@ -26,6 +28,7 @@ object LedgerApiServer {
|
||||
): ResourceOwner[(InMemoryState, InMemoryStateUpdater.UpdaterFlow)] = {
|
||||
for {
|
||||
inMemoryState <- InMemoryState.owner(
|
||||
commandProgressTracker = commandProgressTracker,
|
||||
apiStreamShutdownTimeout = indexServiceConfig.apiStreamShutdownTimeout,
|
||||
bufferedStreamsPageSize = indexServiceConfig.bufferedStreamsPageSize,
|
||||
maxContractStateCacheSize = indexServiceConfig.maxContractStateCacheSize,
|
||||
|
@ -0,0 +1,46 @@
|
||||
// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.digitalasset.canton.platform
|
||||
|
||||
import com.daml.ledger.resources.Resource
|
||||
import com.digitalasset.canton.lifecycle.{AsyncCloseable, AsyncOrSyncCloseable, FlagCloseableAsync}
|
||||
import com.digitalasset.canton.logging.NamedLogging
|
||||
import com.digitalasset.canton.tracing.TraceContext
|
||||
|
||||
import scala.concurrent.blocking
|
||||
|
||||
@SuppressWarnings(Array("org.wartremover.warts.Var"))
|
||||
abstract class ResourceCloseable extends FlagCloseableAsync with NamedLogging {
|
||||
private var closeableResource: Option[AsyncCloseable] = None
|
||||
|
||||
override protected def closeAsync(): Seq[AsyncOrSyncCloseable] = blocking(synchronized {
|
||||
List(
|
||||
closeableResource.getOrElse(
|
||||
throw new IllegalStateException(
|
||||
"Programming error: resource not registered. Please use ResourceOwnerOps.toCloseable."
|
||||
)
|
||||
)
|
||||
)
|
||||
})
|
||||
|
||||
def registerResource(resource: Resource[?], name: String)(implicit
|
||||
traceContext: TraceContext
|
||||
): this.type = blocking(synchronized {
|
||||
this.closeableResource.foreach(_ =>
|
||||
throw new IllegalStateException(
|
||||
"Programming error: resource registered multiple times. Please use ResourceOwnerFlagCloseableOps.acquireFlagCloseable."
|
||||
)
|
||||
)
|
||||
this.closeableResource = Some(
|
||||
AsyncCloseable(
|
||||
name = name,
|
||||
closeFuture = resource.release(),
|
||||
timeout = timeouts.shutdownNetwork,
|
||||
onTimeout = err =>
|
||||
logger.warn(s"Resource $name failed to close within ${timeouts.shutdownNetwork}.", err),
|
||||
)
|
||||
)
|
||||
this
|
||||
})
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user