update canton to 20231218.12048.0.v12ec1f16/2.9.0-snapshot.20231218.11653.0.v6d6cda48/3.0.0-snapshot.20231218.12048.0.v12ec1f16 (#18050)

CHANGELOG_BEGIN
CHANGELOG_END

Co-authored-by: Azure Pipelines Daml Build <support@digitalasset.com>
This commit is contained in:
azure-pipelines[bot] 2023-12-19 10:56:26 +00:00 committed by GitHub
parent 58a51f2151
commit e01b623477
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
138 changed files with 1280 additions and 5105 deletions

View File

@ -0,0 +1,76 @@
// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package com.digitalasset.canton.admin.api.client.commands
import cats.implicits.toTraverseOps
import cats.syntax.either.*
import com.daml.nonempty.NonEmpty
import com.digitalasset.canton.domain.admin.v0
import com.digitalasset.canton.sequencing.SequencerConnections
import com.google.protobuf.empty.Empty
import io.grpc.ManagedChannel
import scala.concurrent.Future
object EnterpriseSequencerConnectionAdminCommands {
abstract class BaseSequencerConnectionAdminCommand[Req, Rep, Res]
extends GrpcAdminCommand[Req, Rep, Res] {
override type Svc =
v0.EnterpriseSequencerConnectionServiceGrpc.EnterpriseSequencerConnectionServiceStub
override def createService(
channel: ManagedChannel
): v0.EnterpriseSequencerConnectionServiceGrpc.EnterpriseSequencerConnectionServiceStub =
v0.EnterpriseSequencerConnectionServiceGrpc.stub(channel)
}
final case class GetConnection()
extends BaseSequencerConnectionAdminCommand[
v0.GetConnectionRequest,
v0.GetConnectionResponse,
Option[SequencerConnections],
] {
override def submitRequest(
service: v0.EnterpriseSequencerConnectionServiceGrpc.EnterpriseSequencerConnectionServiceStub,
request: v0.GetConnectionRequest,
): Future[v0.GetConnectionResponse] = service.getConnection(request)
override def createRequest(): Either[String, v0.GetConnectionRequest] = Right(
v0.GetConnectionRequest()
)
override def handleResponse(
response: v0.GetConnectionResponse
): Either[String, Option[SequencerConnections]] =
NonEmpty.from(response.sequencerConnections).traverse { connections =>
SequencerConnections
.fromProtoV0(
connections,
response.sequencerTrustThreshold,
)
.leftMap(_.message)
}
}
final case class SetConnection(connections: SequencerConnections)
extends BaseSequencerConnectionAdminCommand[
v0.SetConnectionRequest,
Empty,
Unit,
] {
override def submitRequest(
service: v0.EnterpriseSequencerConnectionServiceGrpc.EnterpriseSequencerConnectionServiceStub,
request: v0.SetConnectionRequest,
): Future[Empty] = service.setConnection(request)
override def createRequest(): Either[String, v0.SetConnectionRequest] = Right(
v0.SetConnectionRequest(
connections.connections.map(_.toProtoV0),
connections.sequencerTrustThreshold.unwrap,
)
)
override def handleResponse(response: Empty): Either[String, Unit] = Right(())
}
}

View File

@ -5,6 +5,8 @@ package com.digitalasset.canton.admin.api.client.commands
import com.daml.ledger.api.v1.event.CreatedEvent
import com.daml.ledger.api.v1.value.{Record, RecordField, Value}
import com.daml.ledger.api.v2.state_service.GetActiveContractsResponse
import com.daml.ledger.api.v2.state_service.GetActiveContractsResponse.ContractEntry
import com.daml.lf.data.Time
import com.daml.lf.transaction.TransactionCoder
import com.digitalasset.canton.admin.api.client.data.TemplateId
@ -15,6 +17,40 @@ import com.digitalasset.canton.protocol.{DriverContractMetadata, LfContractId}
/** Wrapper class to make scalapb LedgerApi classes more convenient to access
*/
object LedgerApiTypeWrappers {
final case class WrappedContractEntry(entry: GetActiveContractsResponse.ContractEntry) {
lazy val event: CreatedEvent = (entry match {
case ContractEntry.Empty => throw new RuntimeException("Found empty contract entry")
case ContractEntry.ActiveContract(value) => value.createdEvent
case ContractEntry.IncompleteUnassigned(value) =>
value.createdEvent
case ContractEntry.IncompleteAssigned(value) =>
value.assignedEvent
.getOrElse(throw new RuntimeException("Found empty assigned event"))
.createdEvent
}).getOrElse(throw new RuntimeException("Found empty created event"))
def arguments: Map[String, Any] =
event.createArguments.toList.flatMap(_.fields).flatMap(flatten(Seq(), _)).toMap
def reassignmentCounter: Long = entry match {
case ContractEntry.Empty => throw new RuntimeException("Found empty contract entry")
case ContractEntry.ActiveContract(value) => value.reassignmentCounter
case ContractEntry.IncompleteUnassigned(value) =>
value.unassignedEvent
.getOrElse(throw new RuntimeException("Found empty unassigned event"))
.reassignmentCounter
case ContractEntry.IncompleteAssigned(value) =>
value.assignedEvent
.getOrElse(throw new RuntimeException("Found empty assigned event"))
.reassignmentCounter
}
def contractId: String = event.contractId
def templateId: TemplateId = TemplateId.fromIdentifier(
event.templateId
.getOrElse(throw new RuntimeException("Found empty template id"))
)
}
/*
Provide a few utilities methods on CreatedEvent.
@ -43,16 +79,6 @@ object LedgerApiTypeWrappers {
event.templateId.map(_.packageId).getOrElse(corrupt)
}
private def flatten(prefix: Seq[String], field: RecordField): Seq[(String, Any)] = {
def extract(args: Value.Sum): Seq[(String, Any)] =
args match {
case x: Value.Sum.Record => x.value.fields.flatMap(flatten(prefix :+ field.label, _))
case x: Value.Sum.Variant => x.value.value.toList.map(_.sum).flatMap(extract)
case x => Seq(((prefix :+ field.label).mkString("."), x.value))
}
field.value.map(_.sum).toList.flatMap(extract)
}
def arguments: Map[String, Any] =
event.createArguments.toList.flatMap(_.fields).flatMap(flatten(Seq(), _)).toMap
@ -97,6 +123,17 @@ object LedgerApiTypeWrappers {
}
}
private def flatten(prefix: Seq[String], field: RecordField): Seq[(String, Any)] = {
def extract(args: Value.Sum): Seq[(String, Any)] =
args match {
case x: Value.Sum.Record => x.value.fields.flatMap(flatten(prefix :+ field.label, _))
case x: Value.Sum.Variant => x.value.value.toList.map(_.sum).flatMap(extract)
case x => Seq(((prefix :+ field.label).mkString("."), x.value))
}
field.value.map(_.sum).toList.flatMap(extract)
}
/** Holder of "core" contract defining fields (particularly those relevant for importing contracts) */
final case class ContractData(
templateId: TemplateId,

View File

@ -716,9 +716,8 @@ object ParticipantAdminCommands {
): Future[RegisterDomainResponse] =
service.registerDomain(request)
override def handleResponse(response: RegisterDomainResponse): Either[String, Unit] = Right(
()
)
override def handleResponse(response: RegisterDomainResponse): Either[String, Unit] =
Right(())
// can take long if we need to wait to become active
override def timeoutType: TimeoutType = DefaultUnboundedTimeout

View File

@ -7,7 +7,12 @@ import cats.syntax.either.*
import com.daml.nonempty.NonEmptyUtil
import com.digitalasset.canton.admin.api.client.data.crypto.*
import com.digitalasset.canton.config.RequireTypes.NonNegativeInt
import com.digitalasset.canton.config.{NonNegativeFiniteDuration, PositiveDurationSeconds}
import com.digitalasset.canton.config.{
CryptoConfig,
NonNegativeFiniteDuration,
PositiveDurationSeconds,
}
import com.digitalasset.canton.domain.config.DomainParametersConfig
import com.digitalasset.canton.protocol.DomainParameters.MaxRequestSize
import com.digitalasset.canton.protocol.DynamicDomainParameters.{
InvalidDynamicDomainParameters,
@ -68,6 +73,33 @@ final case class StaticDomainParameters(
object StaticDomainParameters {
// This method is unsafe. Not prefixing by `try` to have nicer docs snippets.
def fromConfig(
config: DomainParametersConfig,
cryptoConfig: CryptoConfig,
): StaticDomainParameters = {
val internal = config
.toStaticDomainParameters(cryptoConfig)
.valueOr(err =>
throw new IllegalArgumentException(s"Cannot instantiate static domain parameters: $err")
)
StaticDomainParameters(internal)
}
// This method is unsafe. Not prefixing by `try` to have nicer docs snippets.
def defaults(
cryptoConfig: CryptoConfig
): StaticDomainParameters = {
val internal = DomainParametersConfig()
.toStaticDomainParameters(cryptoConfig)
.valueOr(err =>
throw new IllegalArgumentException(s"Cannot instantiate static domain parameters: $err")
)
StaticDomainParameters(internal)
}
def apply(
domain: StaticDomainParametersInternal
): StaticDomainParameters =

View File

@ -13,6 +13,7 @@ import com.digitalasset.canton.domain.config.{
DomainBaseConfig,
RemoteDomainConfig,
}
import com.digitalasset.canton.domain.mediator.{CommunityMediatorNodeXConfig, RemoteMediatorConfig}
import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, TracedLogger}
import com.digitalasset.canton.participant.config.{
CommunityParticipantConfig,
@ -32,9 +33,11 @@ final case class CantonCommunityConfig(
domains: Map[InstanceName, CommunityDomainConfig] = Map.empty,
participants: Map[InstanceName, CommunityParticipantConfig] = Map.empty,
participantsX: Map[InstanceName, CommunityParticipantConfig] = Map.empty,
mediatorsX: Map[InstanceName, CommunityMediatorNodeXConfig] = Map.empty,
remoteDomains: Map[InstanceName, RemoteDomainConfig] = Map.empty,
remoteParticipants: Map[InstanceName, RemoteParticipantConfig] = Map.empty,
remoteParticipantsX: Map[InstanceName, RemoteParticipantConfig] = Map.empty,
remoteMediatorsX: Map[InstanceName, RemoteMediatorConfig] = Map.empty,
monitoring: MonitoringConfig = MonitoringConfig(),
parameters: CantonParameters = CantonParameters(),
features: CantonFeatures = CantonFeatures(),
@ -43,6 +46,7 @@ final case class CantonCommunityConfig(
override type DomainConfigType = CommunityDomainConfig
override type ParticipantConfigType = CommunityParticipantConfig
override type MediatorNodeXConfigType = CommunityMediatorNodeXConfig
/** renders the config as json (used for dumping config for diagnostic purposes) */
override def dumpString: String = CantonCommunityConfig.makeConfidentialString(this)
@ -98,6 +102,8 @@ object CantonCommunityConfig {
deriveReader[CommunityDomainConfig].applyDeprecations
implicit val communityParticipantConfigReader: ConfigReader[CommunityParticipantConfig] =
deriveReader[CommunityParticipantConfig].applyDeprecations
implicit val communityMediatorNodeXConfigReader: ConfigReader[CommunityMediatorNodeXConfig] =
deriveReader[CommunityMediatorNodeXConfig]
deriveReader[CantonCommunityConfig]
}
@ -110,6 +116,8 @@ object CantonCommunityConfig {
deriveWriter[CommunityDomainConfig]
implicit val communityParticipantConfigWriter: ConfigWriter[CommunityParticipantConfig] =
deriveWriter[CommunityParticipantConfig]
implicit val communityMediatorNodeXConfigWriter: ConfigWriter[CommunityMediatorNodeXConfig] =
deriveWriter[CommunityMediatorNodeXConfig]
deriveWriter[CantonCommunityConfig]
}

View File

@ -36,6 +36,12 @@ import com.digitalasset.canton.console.{AmmoniteConsoleConfig, FeatureFlag}
import com.digitalasset.canton.crypto.*
import com.digitalasset.canton.domain.DomainNodeParameters
import com.digitalasset.canton.domain.config.*
import com.digitalasset.canton.domain.mediator.{
MediatorNodeConfigCommon,
MediatorNodeParameterConfig,
MediatorNodeParameters,
RemoteMediatorConfig,
}
import com.digitalasset.canton.domain.sequencing.sequencer.*
import com.digitalasset.canton.environment.CantonNodeParameters
import com.digitalasset.canton.http.{HttpApiConfig, StaticContentConfig, WebsocketConfig}
@ -342,6 +348,7 @@ trait CantonConfig {
DefaultPorts,
ParticipantConfigType,
]
type MediatorNodeXConfigType <: MediatorNodeConfigCommon
/** all domains that this Canton process can operate
*
@ -377,6 +384,23 @@ trait CantonConfig {
n.unwrap -> c
}
def mediatorsX: Map[InstanceName, MediatorNodeXConfigType]
/** Use `mediatorsX` instead!
*/
def mediatorsByStringX: Map[String, MediatorNodeXConfigType] = mediatorsX.map { case (n, c) =>
n.unwrap -> c
}
def remoteMediatorsX: Map[InstanceName, RemoteMediatorConfig]
/** Use `remoteMediators` instead!
*/
def remoteMediatorsByStringX: Map[String, RemoteMediatorConfig] = remoteMediatorsX.map {
case (n, c) =>
n.unwrap -> c
}
/** all remotely running domains to which the console can connect and operate on */
def remoteDomains: Map[InstanceName, RemoteDomainConfig]
@ -475,6 +499,20 @@ trait CantonConfig {
InstanceName.tryCreate(name)
)
private lazy val mediatorNodeParametersX_ : Map[InstanceName, MediatorNodeParameters] =
mediatorsX.fmap { mediatorNodeConfig =>
MediatorNodeParameters(
general = CantonNodeParameterConverter.general(this, mediatorNodeConfig),
protocol = CantonNodeParameterConverter.protocol(this, mediatorNodeConfig.parameters),
)
}
private[canton] def mediatorNodeParametersX(name: InstanceName): MediatorNodeParameters =
nodeParametersFor(mediatorNodeParametersX_, "mediator-x", name)
private[canton] def mediatorNodeParametersByStringX(name: String): MediatorNodeParameters =
mediatorNodeParametersX(InstanceName.tryCreate(name))
protected def nodeParametersFor[A](
cachedNodeParameters: Map[InstanceName, A],
kind: String,
@ -899,6 +937,10 @@ object CantonConfig {
deriveReader[SequencerWriterConfig.LowLatency]
lazy implicit val communitySequencerConfigReader: ConfigReader[CommunitySequencerConfig] =
deriveReader[CommunitySequencerConfig]
lazy implicit val mediatorNodeParameterConfigReader: ConfigReader[MediatorNodeParameterConfig] =
deriveReader[MediatorNodeParameterConfig]
lazy implicit val remoteMediatorConfigReader: ConfigReader[RemoteMediatorConfig] =
deriveReader[RemoteMediatorConfig]
lazy implicit val domainParametersConfigReader: ConfigReader[DomainParametersConfig] =
deriveReader[DomainParametersConfig]
lazy implicit val domainNodeParametersConfigReader: ConfigReader[DomainNodeParametersConfig] =
@ -1274,6 +1316,10 @@ object CantonConfig {
deriveWriter[SequencerWriterConfig.LowLatency]
lazy implicit val communitySequencerConfigWriter: ConfigWriter[CommunitySequencerConfig] =
deriveWriter[CommunitySequencerConfig]
lazy implicit val mediatorNodeParameterConfigWriter: ConfigWriter[MediatorNodeParameterConfig] =
deriveWriter[MediatorNodeParameterConfig]
lazy implicit val remoteMediatorConfigWriter: ConfigWriter[RemoteMediatorConfig] =
deriveWriter[RemoteMediatorConfig]
lazy implicit val domainParametersConfigWriter: ConfigWriter[DomainParametersConfig] =
deriveWriter[DomainParametersConfig]
lazy implicit val domainNodeParametersConfigWriter: ConfigWriter[DomainNodeParametersConfig] =

View File

@ -154,9 +154,11 @@ object CommunityConfigValidations
domains,
participants,
participantsX,
mediatorsX,
remoteDomains,
remoteParticipants,
remoteParticipantsX,
remoteMediatorsX,
_,
_,
_,
@ -170,6 +172,8 @@ object CommunityConfigValidations
remoteParticipants,
participantsX,
remoteParticipantsX,
mediatorsX,
remoteMediatorsX,
)
.exists(_.nonEmpty),
(),

View File

@ -346,6 +346,13 @@ trait ConsoleEnvironment extends NamedLogging with FlagCloseable with NoTracing
environment.config.remoteDomainsByString.keys.map(createRemoteDomainReference).toSeq,
)
lazy val mediatorsX
: NodeReferences[MediatorReferenceX, RemoteMediatorReferenceX, LocalMediatorReferenceX] =
NodeReferences(
environment.config.mediatorsByStringX.keys.map(createMediatorReferenceX).toSeq,
environment.config.remoteMediatorsByStringX.keys.map(createRemoteMediatorReferenceX).toSeq,
)
// the scala compiler / wartremover gets confused here if I use ++ directly
def mergeLocalInstances(
locals: Seq[LocalInstanceReferenceCommon]*
@ -490,6 +497,12 @@ trait ConsoleEnvironment extends NamedLogging with FlagCloseable with NoTracing
protected def createDomainReference(name: String): DomainLocalRef
protected def createRemoteDomainReference(name: String): DomainRemoteRef
private def createMediatorReferenceX(name: String): LocalMediatorReferenceX =
new LocalMediatorReferenceX(this, name)
private def createRemoteMediatorReferenceX(name: String): RemoteMediatorReferenceX =
new RemoteMediatorReferenceX(this, name)
/** So we can we make this available
*/
protected def selfAlias(): Bind[_] = Bind(ConsoleEnvironmentBinding.BindingName, this)

View File

@ -27,6 +27,7 @@ class ConsoleEnvironmentBinding {
|import com.digitalasset.canton.config._
|import com.digitalasset.canton.admin.api.client.data._
|import com.digitalasset.canton.participant.domain.DomainConnectionConfig
|import com.digitalasset.canton.domain.config.DomainParametersConfig
|import com.digitalasset.canton.SequencerAlias
|import com.digitalasset.canton.sequencing.SequencerConnection
|import com.digitalasset.canton.sequencing.SequencerConnections

View File

@ -3,6 +3,7 @@
package com.digitalasset.canton.console
import cats.data.EitherT
import com.digitalasset.canton.admin.api.client.GrpcCtlRunner
import com.digitalasset.canton.admin.api.client.commands.GrpcAdminCommand
import com.digitalasset.canton.admin.api.client.commands.GrpcAdminCommand.{
@ -23,7 +24,7 @@ import io.opentelemetry.api.trace.Tracer
import java.util.concurrent.TimeUnit
import scala.collection.concurrent.TrieMap
import scala.concurrent.duration.{Duration, FiniteDuration}
import scala.concurrent.{ExecutionContextExecutor, blocking}
import scala.concurrent.{ExecutionContextExecutor, Future, blocking}
/** Attempt to run a grpc admin-api command against whatever is pointed at in the config
*/
@ -51,7 +52,7 @@ class GrpcAdminCommandRunner(
command: GrpcAdminCommand[_, _, Result],
clientConfig: ClientConfig,
token: Option[String],
)(implicit traceContext: TraceContext) = {
)(implicit traceContext: TraceContext): (NonNegativeDuration, EitherT[Future, String, Result]) = {
val awaitTimeout = command.timeoutType match {
case CustomClientTimeout(timeout) => timeout
// If a custom timeout for a console command is set, it involves some non-gRPC timeout mechanism

View File

@ -11,9 +11,20 @@ import com.digitalasset.canton.console.CommandErrors.NodeNotStarted
import com.digitalasset.canton.console.commands.*
import com.digitalasset.canton.crypto.Crypto
import com.digitalasset.canton.domain.config.RemoteDomainConfig
import com.digitalasset.canton.domain.mediator.{
MediatorNodeBootstrapX,
MediatorNodeConfigCommon,
MediatorNodeX,
RemoteMediatorConfig,
}
import com.digitalasset.canton.domain.{Domain, DomainNodeBootstrap}
import com.digitalasset.canton.environment.*
import com.digitalasset.canton.health.admin.data.{DomainStatus, NodeStatus, ParticipantStatus}
import com.digitalasset.canton.health.admin.data.{
DomainStatus,
MediatorNodeStatus,
NodeStatus,
ParticipantStatus,
}
import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting}
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging, TracedLogger}
import com.digitalasset.canton.participant.config.{
@ -29,7 +40,7 @@ import com.digitalasset.canton.participant.{
ParticipantNodeX,
}
import com.digitalasset.canton.sequencing.{GrpcSequencerConnection, SequencerConnections}
import com.digitalasset.canton.topology.{DomainId, NodeIdentity, ParticipantId}
import com.digitalasset.canton.topology.{DomainId, MediatorId, NodeIdentity, ParticipantId}
import com.digitalasset.canton.tracing.NoTracing
import com.digitalasset.canton.util.ErrorUtil
@ -924,3 +935,95 @@ class LocalParticipantReferenceX(
@Help.Group("Repair")
def repair: LocalParticipantRepairAdministration = repair_
}
trait MediatorReferenceCommon extends InstanceReferenceCommon {
@Help.Summary(
"Yields the mediator id of this mediator. " +
"Throws an exception, if the id has not yet been allocated (e.g., the mediator has not yet been initialised)."
)
def id: MediatorId = topology.idHelper(MediatorId(_))
override type Status = MediatorNodeStatus
}
object MediatorReferenceX {
val InstanceType = "MediatorX"
}
abstract class MediatorReferenceX(val consoleEnvironment: ConsoleEnvironment, name: String)
extends MediatorReferenceCommon
with MediatorXAdministrationGroupWithInit
with InstanceReferenceX {
override protected def runner: AdminCommandRunner = this
override protected val instanceType: String = MediatorReferenceX.InstanceType
override protected val loggerFactory: NamedLoggerFactory =
consoleEnvironment.environment.loggerFactory
.append(MediatorNodeBootstrapX.LoggerFactoryKeyName, name)
@Help.Summary("Health and diagnostic related commands")
@Help.Group("Health")
override def health =
new HealthAdministrationX[MediatorNodeStatus](
this,
consoleEnvironment,
MediatorNodeStatus.fromProtoV0,
)
private lazy val topology_ =
new TopologyAdministrationGroupX(
this,
health.status.successOption.map(_.topologyQueue),
consoleEnvironment,
loggerFactory,
)
override def topology: TopologyAdministrationGroupX = topology_
private lazy val parties_ = new PartiesAdministrationGroupX(this, consoleEnvironment)
override def parties: PartiesAdministrationGroupX = parties_
override def equals(obj: Any): Boolean =
obj match {
case x: MediatorReferenceX => x.consoleEnvironment == consoleEnvironment && x.name == name
case _ => false
}
}
class LocalMediatorReferenceX(consoleEnvironment: ConsoleEnvironment, val name: String)
extends MediatorReferenceX(consoleEnvironment, name)
with LocalInstanceReferenceX
with SequencerConnectionAdministration
with BaseInspection[MediatorNodeX] {
override protected[canton] def executionContext: ExecutionContext =
consoleEnvironment.environment.executionContext
@Help.Summary("Returns the mediator-x configuration")
override def config: MediatorNodeConfigCommon =
consoleEnvironment.environment.config.mediatorsByStringX(name)
private[console] val nodes: MediatorNodesX[?] = consoleEnvironment.environment.mediatorsX
override protected[console] def runningNode: Option[MediatorNodeBootstrapX] =
nodes.getRunning(name)
override protected[console] def startingNode: Option[MediatorNodeBootstrapX] =
nodes.getStarting(name)
}
class RemoteMediatorReferenceX(val environment: ConsoleEnvironment, val name: String)
extends MediatorReferenceX(environment, name)
with GrpcRemoteInstanceReference {
@Help.Summary("Returns the remote mediator configuration")
def config: RemoteMediatorConfig =
environment.environment.config.remoteMediatorsByStringX(name)
override protected[canton] def executionContext: ExecutionContext =
consoleEnvironment.environment.executionContext
}

View File

@ -26,10 +26,7 @@ import com.daml.ledger.api.v1.value.Value
import com.daml.ledger.api.v1.{EventQueryServiceOuterClass, ValueOuterClass}
import com.daml.ledger.api.v2.event_query_service.GetEventsByContractIdResponse as GetEventsByContractIdResponseV2
import com.daml.ledger.api.v2.participant_offset.ParticipantOffset
import com.daml.ledger.api.v2.state_service.{
GetActiveContractsResponse,
GetConnectedDomainsResponse,
}
import com.daml.ledger.api.v2.state_service.GetConnectedDomainsResponse
import com.daml.ledger.api.v2.transaction.{
Transaction as TransactionV2,
TransactionTree as TransactionTreeV2,
@ -40,7 +37,10 @@ import com.daml.lf.data.Ref
import com.daml.metrics.api.MetricHandle.{Histogram, Meter}
import com.daml.metrics.api.{MetricHandle, MetricName, MetricsContext}
import com.daml.scalautil.Statement.discard
import com.digitalasset.canton.admin.api.client.commands.LedgerApiTypeWrappers.WrappedCreatedEvent
import com.digitalasset.canton.admin.api.client.commands.LedgerApiTypeWrappers.{
WrappedContractEntry,
WrappedCreatedEvent,
}
import com.digitalasset.canton.admin.api.client.commands.LedgerApiV2Commands.CompletionWrapper
import com.digitalasset.canton.admin.api.client.commands.LedgerApiV2Commands.UpdateService.{
AssignedWrapper,
@ -409,8 +409,7 @@ trait BaseLedgerApiAdministration extends NoTracing {
object commands extends Helpful {
@Help.Summary(
"Submit command and wait for the resulting transaction, returning the transaction tree or failing otherwise",
FeatureFlag.Testing,
"Submit command and wait for the resulting transaction, returning the transaction tree or failing otherwise"
)
@Help.Description(
"""Submits a command on behalf of the `actAs` parties, waits for the resulting transaction to commit and returns it.
@ -437,7 +436,7 @@ trait BaseLedgerApiAdministration extends NoTracing {
readAs: Seq[PartyId] = Seq.empty,
disclosedContracts: Seq[DisclosedContract] = Seq.empty,
applicationId: String = applicationId,
): TransactionTreeV2 = check(FeatureFlag.Testing) {
): TransactionTreeV2 = {
val tx = consoleEnvironment.run {
ledgerApiCommand(
LedgerApiV2Commands.CommandService.SubmitAndWaitTransactionTree(
@ -459,8 +458,7 @@ trait BaseLedgerApiAdministration extends NoTracing {
}
@Help.Summary(
"Submit command and wait for the resulting transaction, returning the flattened transaction or failing otherwise",
FeatureFlag.Testing,
"Submit command and wait for the resulting transaction, returning the flattened transaction or failing otherwise"
)
@Help.Description(
"""Submits a command on behalf of the `actAs` parties, waits for the resulting transaction to commit, and returns the "flattened" transaction.
@ -487,7 +485,7 @@ trait BaseLedgerApiAdministration extends NoTracing {
readAs: Seq[PartyId] = Seq.empty,
disclosedContracts: Seq[DisclosedContract] = Seq.empty,
applicationId: String = applicationId,
): TransactionV2 = check(FeatureFlag.Testing) {
): TransactionV2 = {
val tx = consoleEnvironment.run {
ledgerApiCommand(
LedgerApiV2Commands.CommandService.SubmitAndWaitTransaction(
@ -747,7 +745,7 @@ trait BaseLedgerApiAdministration extends NoTracing {
@Help.Summary("Read active contracts", FeatureFlag.Testing)
@Help.Group("Active Contracts")
object acs extends Helpful {
@Help.Summary("List the set of active contracts of a given party", FeatureFlag.Testing)
@Help.Summary("List the set of active contracts of a given party")
@Help.Description(
"""This command will return the current set of active contracts and incomplete reassignments for the given party.
|
@ -768,25 +766,27 @@ trait BaseLedgerApiAdministration extends NoTracing {
activeAtOffset: String = "",
timeout: config.NonNegativeDuration = timeouts.unbounded,
includeCreatedEventBlob: Boolean = false,
): Seq[GetActiveContractsResponse] =
check(FeatureFlag.Testing)(consoleEnvironment.run {
ledgerApiCommand(
LedgerApiV2Commands.StateService
.GetActiveContracts(
Set(party.toLf),
limit,
filterTemplates,
activeAtOffset,
verbose,
timeout.asFiniteApproximation,
includeCreatedEventBlob,
)(consoleEnvironment.environment.scheduler)
)
})
): Seq[WrappedContractEntry] =
consoleEnvironment
.run {
ledgerApiCommand(
LedgerApiV2Commands.StateService
.GetActiveContracts(
Set(party.toLf),
limit,
filterTemplates,
activeAtOffset,
verbose,
timeout.asFiniteApproximation,
includeCreatedEventBlob,
)(consoleEnvironment.environment.scheduler)
)
}
.filter(_.contractEntry.isDefined)
.map(activeContract => WrappedContractEntry(activeContract.contractEntry))
@Help.Summary(
"List the set of active contracts for all parties hosted on this participant",
FeatureFlag.Testing,
"List the set of active contracts for all parties hosted on this participant"
)
@Help.Description(
"""This command will return the current set of active contracts for all parties.
@ -809,7 +809,7 @@ trait BaseLedgerApiAdministration extends NoTracing {
timeout: config.NonNegativeDuration = timeouts.unbounded,
identityProviderId: String = "",
includeCreatedEventBlob: Boolean = false,
): Seq[GetActiveContractsResponse] = check(FeatureFlag.Testing)(
): Seq[WrappedContractEntry] =
consoleEnvironment.runE {
for {
parties <- ledgerApiCommand(
@ -831,12 +831,14 @@ trait BaseLedgerApiAdministration extends NoTracing {
timeout.asFiniteApproximation,
includeCreatedEventBlob,
)(consoleEnvironment.environment.scheduler)
).toEither
).toEither.map(
_.filter(_.contractEntry.isDefined)
.map(activeContract => WrappedContractEntry(activeContract.contractEntry))
)
}
}
} yield res
}
)
@Help.Summary(
"Wait until the party sees the given contract in the active contract service",
@ -852,25 +854,21 @@ trait BaseLedgerApiAdministration extends NoTracing {
): Unit = check(FeatureFlag.Testing) {
ConsoleMacros.utils.retry_until_true(timeout) {
of_party(party, verbose = false)
.exists(
_.contractEntry.activeContract.exists(
_.getCreatedEvent.contractId == contractId.coid
)
)
.exists(_.contractId == contractId.coid)
}
}
@Help.Summary("Generic search for contracts", FeatureFlag.Testing)
@Help.Summary("Generic search for contracts")
@Help.Description(
"""This search function returns an untyped ledger-api event.
|The find will wait until the contract appears or throw an exception once it times out."""
)
def find_generic(
partyId: PartyId,
filter: GetActiveContractsResponse => Boolean,
filter: WrappedContractEntry => Boolean,
timeout: config.NonNegativeDuration = timeouts.ledgerCommand,
): GetActiveContractsResponse = check(FeatureFlag.Testing) {
def scan: Option[GetActiveContractsResponse] = of_party(partyId).find(filter(_))
): WrappedContractEntry = {
def scan: Option[WrappedContractEntry] = of_party(partyId).find(filter(_))
ConsoleMacros.utils.retry_until_true(timeout)(scan.isDefined)
consoleEnvironment.runE {
@ -1796,27 +1794,26 @@ trait BaseLedgerApiAdministration extends NoTracing {
subscribe_trees(observer, filterParty, end(), verbose = false)
}
@Help.Summary("Get a (tree) transaction by its ID", FeatureFlag.Testing)
@Help.Summary("Get a (tree) transaction by its ID")
@Help.Description(
"""Get a transaction tree from the transaction stream by its ID. Returns None if the transaction is not (yet)
|known at the participant or if the transaction has been pruned via `pruning.prune`."""
)
def by_id(parties: Set[PartyId], id: String): Option[TransactionTree] =
check(FeatureFlag.Testing)(consoleEnvironment.run {
consoleEnvironment.run {
ledgerApiCommand(
LedgerApiCommands.TransactionService.GetTransactionById(parties.map(_.toLf), id)(
consoleEnvironment.environment.executionContext
)
)
})
}
@Help.Summary("Get the domain that a transaction was committed over.", FeatureFlag.Testing)
@Help.Summary("Get the domain that a transaction was committed over.")
@Help.Description(
"""Get the domain that a transaction was committed over. Throws an error if the transaction is not (yet) known
|to the participant or if the transaction has been pruned via `pruning.prune`."""
)
def domain_of(transactionId: String): DomainId =
check(FeatureFlag.Testing)(domainOfTransaction(transactionId))
def domain_of(transactionId: String): DomainId = domainOfTransaction(transactionId)
}
@Help.Summary("Submit commands", FeatureFlag.Testing)
@ -1957,7 +1954,7 @@ trait BaseLedgerApiAdministration extends NoTracing {
@Help.Summary("Read active contracts", FeatureFlag.Testing)
@Help.Group("Active Contracts")
object acs extends Helpful {
@Help.Summary("List the set of active contracts of a given party", FeatureFlag.Testing)
@Help.Summary("List the set of active contracts of a given party")
@Help.Description(
"""This command will return the current set of active contracts for the given party.
@ -1979,7 +1976,7 @@ trait BaseLedgerApiAdministration extends NoTracing {
timeout: config.NonNegativeDuration = timeouts.unbounded,
includeCreatedEventBlob: Boolean = false,
): Seq[WrappedCreatedEvent] =
check(FeatureFlag.Testing)(consoleEnvironment.run {
consoleEnvironment.run {
ledgerApiCommand(
LedgerApiCommands.AcsService
.GetActiveContracts(
@ -1991,7 +1988,7 @@ trait BaseLedgerApiAdministration extends NoTracing {
includeCreatedEventBlob,
)(consoleEnvironment.environment.scheduler)
)
})
}
@Help.Summary(
"List the set of active contracts for all parties hosted on this participant",
@ -2085,7 +2082,7 @@ trait BaseLedgerApiAdministration extends NoTracing {
@Help.Group("Party Management")
object parties extends Helpful {
@Help.Summary("Allocate a new party", FeatureFlag.Testing)
@Help.Summary("Allocate a new party")
@Help.Description(
"""Allocates a new party on the ledger.
party: a hint for generating the party identifier
@ -2099,7 +2096,7 @@ trait BaseLedgerApiAdministration extends NoTracing {
annotations: Map[String, String] = Map.empty,
identityProviderId: String = "",
): PartyDetails = {
val proto = check(FeatureFlag.Testing)(consoleEnvironment.run {
val proto = consoleEnvironment.run {
ledgerApiCommand(
LedgerApiCommands.PartyManagementService.AllocateParty(
partyIdHint = party,
@ -2108,23 +2105,23 @@ trait BaseLedgerApiAdministration extends NoTracing {
identityProviderId = identityProviderId,
)
)
})
}
PartyDetails.fromProtoPartyDetails(proto)
}
@Help.Summary("List parties known by the Ledger API server", FeatureFlag.Testing)
@Help.Summary("List parties known by the Ledger API server")
@Help.Description(
"""Lists parties known by the Ledger API server.
identityProviderId: identity provider id"""
)
def list(identityProviderId: String = ""): Seq[PartyDetails] = {
val proto = check(FeatureFlag.Testing)(consoleEnvironment.run {
val proto = consoleEnvironment.run {
ledgerApiCommand(
LedgerApiCommands.PartyManagementService.ListKnownParties(
identityProviderId = identityProviderId
)
)
})
}
proto.map(PartyDetails.fromProtoPartyDetails)
}

View File

@ -0,0 +1,88 @@
// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package com.digitalasset.canton.console.commands
import cats.syntax.either.*
import com.digitalasset.canton.SequencerAlias
import com.digitalasset.canton.admin.api.client.commands.EnterpriseSequencerConnectionAdminCommands
import com.digitalasset.canton.console.{AdminCommandRunner, Help, Helpful, InstanceReferenceCommon}
import com.digitalasset.canton.sequencing.{SequencerConnection, SequencerConnections}
import scala.util.Try
trait SequencerConnectionAdministration extends Helpful {
this: AdminCommandRunner with InstanceReferenceCommon =>
@Help.Summary("Manage sequencer connection")
@Help.Group("Sequencer Connection")
object sequencer_connection extends Helpful {
@Help.Summary("Get Sequencer Connection")
@Help.Description(
"Use this command to get the currently configured sequencer connection details for this sequencer client. " +
"If this node has not yet been initialized, this will return None."
)
def get(): Option[SequencerConnections] = consoleEnvironment.run {
adminCommand(
EnterpriseSequencerConnectionAdminCommands.GetConnection()
)
}
@Help.Summary("Set Sequencer Connection")
@Help.Description(
"Set new sequencer connection details for this sequencer client node. " +
"This will replace any pre-configured connection details. " +
"This command will only work after the node has been initialized."
)
def set(connections: SequencerConnections): Unit = consoleEnvironment.run {
adminCommand(
EnterpriseSequencerConnectionAdminCommands.SetConnection(connections)
)
}
@Help.Summary("Set Sequencer Connection")
@Help.Description(
"Set new sequencer connection details for this sequencer client node. " +
"This will replace any pre-configured connection details. " +
"This command will only work after the node has been initialized."
)
def set(connection: SequencerConnection): Unit = consoleEnvironment.run {
adminCommand(
EnterpriseSequencerConnectionAdminCommands.SetConnection(
SequencerConnections.single(connection)
)
)
}
@Help.Summary("Modify Default Sequencer Connection")
@Help.Description(
"Modify sequencer connection details for this sequencer client node, " +
"by passing a modifier function that operates on the existing default connection. "
)
def modify(modifier: SequencerConnection => SequencerConnection): Unit =
modify_connections(_.modify(SequencerAlias.Default, modifier))
@Help.Summary("Modify Sequencer Connections")
@Help.Description(
"Modify sequencer connection details for this sequencer client node, " +
"by passing a modifier function that operates on the existing connection configuration. "
)
def modify_connections(
modifier: SequencerConnections => SequencerConnections
): Unit =
consoleEnvironment.runE {
for {
connOption <- adminCommand(
EnterpriseSequencerConnectionAdminCommands.GetConnection()
).toEither
conn <- connOption.toRight("Node not yet initialized")
newConn <- Try(modifier(conn)).toEither.leftMap(_.getMessage)
_ <- adminCommand(
EnterpriseSequencerConnectionAdminCommands.SetConnection(newConn)
).toEither
} yield ()
}
}
}

View File

@ -3,6 +3,7 @@
package com.digitalasset.canton.environment
import cats.syntax.either.*
import com.digitalasset.canton.admin.api.client.data.CommunityCantonStatus
import com.digitalasset.canton.config.{CantonCommunityConfig, TestingConfigInternal}
import com.digitalasset.canton.console.{
@ -26,10 +27,19 @@ import com.digitalasset.canton.console.{
NodeReferences,
StandardConsoleOutput,
}
import com.digitalasset.canton.crypto.CommunityCryptoFactory
import com.digitalasset.canton.crypto.admin.grpc.GrpcVaultService.CommunityGrpcVaultServiceFactory
import com.digitalasset.canton.crypto.store.CryptoPrivateStore.CommunityCryptoPrivateStoreFactory
import com.digitalasset.canton.domain.DomainNodeBootstrap
import com.digitalasset.canton.domain.mediator.*
import com.digitalasset.canton.domain.metrics.MediatorNodeMetrics
import com.digitalasset.canton.logging.NamedLoggerFactory
import com.digitalasset.canton.participant.{ParticipantNodeBootstrap, ParticipantNodeBootstrapX}
import com.digitalasset.canton.resource.{CommunityDbMigrationsFactory, DbMigrationsFactory}
import com.digitalasset.canton.resource.{
CommunityDbMigrationsFactory,
CommunityStorageFactory,
DbMigrationsFactory,
}
class CommunityEnvironment(
override val config: CantonCommunityConfig,
@ -66,6 +76,37 @@ class CommunityEnvironment(
): HealthDumpGenerator[CommunityCantonStatus] = {
new CommunityHealthDumpGenerator(this, commandRunner)
}
override protected def createMediatorX(
name: String,
mediatorConfig: CommunityMediatorNodeXConfig,
): MediatorNodeBootstrapX = {
val factoryArguments = mediatorNodeFactoryArguments(name, mediatorConfig)
val arguments = factoryArguments
.toCantonNodeBootstrapCommonArguments(
new CommunityStorageFactory(mediatorConfig.storage),
new CommunityCryptoFactory(),
new CommunityCryptoPrivateStoreFactory(),
new CommunityGrpcVaultServiceFactory(),
)
.valueOr(err =>
throw new RuntimeException(s"Failed to create mediator bootstrap: $err")
): CantonNodeBootstrapCommonArguments[
MediatorNodeConfigCommon,
MediatorNodeParameters,
MediatorNodeMetrics,
]
new MediatorNodeBootstrapX(
arguments,
new CommunityMediatorReplicaManager(
config.parameters.timeouts.processing,
loggerFactory,
),
CommunityMediatorRuntimeFactory,
)
}
}
object CommunityEnvironmentFactory extends EnvironmentFactory[CommunityEnvironment] {

View File

@ -22,6 +22,8 @@ import com.digitalasset.canton.console.{
}
import com.digitalasset.canton.data.CantonTimestamp
import com.digitalasset.canton.domain.DomainNodeBootstrap
import com.digitalasset.canton.domain.mediator.{MediatorNodeBootstrapX, MediatorNodeParameters}
import com.digitalasset.canton.domain.metrics.MediatorNodeMetrics
import com.digitalasset.canton.environment.CantonNodeBootstrap.HealthDumpFunction
import com.digitalasset.canton.environment.Environment.*
import com.digitalasset.canton.environment.ParticipantNodes.{ParticipantNodesOld, ParticipantNodesX}
@ -30,14 +32,8 @@ import com.digitalasset.canton.lifecycle.Lifecycle
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
import com.digitalasset.canton.metrics.MetricsConfig.Prometheus
import com.digitalasset.canton.metrics.MetricsFactory
import com.digitalasset.canton.participant.*
import com.digitalasset.canton.participant.domain.DomainConnectionConfig
import com.digitalasset.canton.participant.{
ParticipantNode,
ParticipantNodeBootstrap,
ParticipantNodeBootstrapCommon,
ParticipantNodeBootstrapX,
ParticipantNodeCommon,
}
import com.digitalasset.canton.resource.DbMigrationsFactory
import com.digitalasset.canton.sequencing.SequencerConnections
import com.digitalasset.canton.telemetry.{ConfiguredOpenTelemetry, OpenTelemetryFactory}
@ -296,6 +292,15 @@ trait Environment extends NamedLogging with AutoCloseable with NoTracing {
config.participantNodeParametersByString,
loggerFactory,
)
val mediatorsX =
new MediatorNodesX(
createMediatorX,
migrationsFactory,
timeouts,
config.mediatorsByStringX,
config.mediatorNodeParametersByStringX,
loggerFactory,
)
// convenient grouping of all node collections for performing operations
// intentionally defined in the order we'd like to start them
@ -522,6 +527,11 @@ trait Environment extends NamedLogging with AutoCloseable with NoTracing {
.valueOr(err => throw new RuntimeException(s"Failed to create participant bootstrap: $err"))
}
protected def createMediatorX(
name: String,
mediatorConfig: Config#MediatorNodeXConfigType,
): MediatorNodeBootstrapX
protected def createParticipantX(
name: String,
participantConfig: Config#ParticipantConfigType,
@ -568,6 +578,26 @@ trait Environment extends NamedLogging with AutoCloseable with NoTracing {
)
.valueOr(err => throw new RuntimeException(s"Failed to create domain bootstrap: $err"))
protected def mediatorNodeFactoryArguments(
name: String,
mediatorConfig: Config#MediatorNodeXConfigType,
): NodeFactoryArguments[
Config#MediatorNodeXConfigType,
MediatorNodeParameters,
MediatorNodeMetrics,
] = NodeFactoryArguments(
name,
mediatorConfig,
config.mediatorNodeParametersByStringX(name),
createClock(Some(MediatorNodeBootstrapX.LoggerFactoryKeyName -> name)),
metricsFactory.forMediator(name),
testingConfig,
futureSupervisor,
loggerFactory.append(MediatorNodeBootstrapX.LoggerFactoryKeyName, name),
writeHealthDumpToFile,
configuredOpenTelemetry,
)
private def simClocks: Seq[SimClock] = {
val clocks = clock +: (participants.running.map(_.clock) ++ domains.running.map(_.clock))
val simclocks = clocks.collect { case sc: SimClock => sc }

View File

@ -12,6 +12,12 @@ import com.digitalasset.canton.DiscardOps
import com.digitalasset.canton.concurrent.ExecutionContextIdlenessExecutorService
import com.digitalasset.canton.config.{DbConfig, LocalNodeConfig, ProcessingTimeout, StorageConfig}
import com.digitalasset.canton.domain.config.DomainConfig
import com.digitalasset.canton.domain.mediator.{
MediatorNodeBootstrapX,
MediatorNodeConfigCommon,
MediatorNodeParameters,
MediatorNodeX,
}
import com.digitalasset.canton.domain.{Domain, DomainNodeBootstrap, DomainNodeParameters}
import com.digitalasset.canton.lifecycle.*
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
@ -438,3 +444,26 @@ class DomainNodes[DC <: DomainConfig](
startUpGroup = 0,
loggerFactory,
)
class MediatorNodesX[MNC <: MediatorNodeConfigCommon](
create: (String, MNC) => MediatorNodeBootstrapX,
migrationsFactory: DbMigrationsFactory,
timeouts: ProcessingTimeout,
configs: Map[String, MNC],
parameters: String => MediatorNodeParameters,
loggerFactory: NamedLoggerFactory,
)(implicit ec: ExecutionContext)
extends ManagedNodes[
MediatorNodeX,
MNC,
MediatorNodeParameters,
MediatorNodeBootstrapX,
](
create,
migrationsFactory,
timeouts,
configs,
parameters,
startUpGroup = 1,
loggerFactory,
)

View File

@ -0,0 +1,35 @@
canton {
participants-x {
participant1 {
storage.type = memory
admin-api.port = 5012
ledger-api.port = 5011
}
participant2 {
storage.type = memory
admin-api.port = 5022
ledger-api.port = 5021
}
}
sequencers-x {
sequencer1 {
sequencer {
config {
}
type = reference
}
storage {
type = memory
}
}
}
mediators-x {
mediator1 {
}
}
// enable ledger_api commands for our getting started guide
features.enable-testing-commands = yes
features.enable-preview-commands = yes
}

View File

@ -1,45 +0,0 @@
canton {
features {
enable-testing-commands = yes
enable-preview-commands = yes
}
domains {
mydomain {
storage {
type = memory
}
public-api.port = 14008
admin-api.port = 14009
}
}
participants {
participant1 {
storage {
type = memory
}
admin-api {
port = 12012
}
ledger-api {
port = 12011
}
}
participant2 {
storage {
type = memory
}
admin-api {
port = 12022
}
ledger-api {
port = 12021
}
}
}
}

View File

@ -1,46 +0,0 @@
import better.files._
val createDamlAppDir = File(sys.props.get("create-daml-app-dir").getOrElse("create-daml-app"))
val createDamlAppDar = createDamlAppDir / ".daml" / "dist" / "create-daml-app-0.1.0.dar"
// check that files exist where we expect them
createDamlAppDir.exists || sys.error(s"please setup the create-daml-app example in the current working directly under ${createDamlAppDir}")
createDamlAppDar.exists || sys.error(s"please run daml build in the create-daml-app folder to build the dar file ${createDamlAppDar}")
participant1.domains.connect_local(mydomain)
participant2.domains.connect_local(mydomain)
val alice = participant1.parties.enable("Alice")
val bob = participant2.parties.enable("Bob")
val public = participant1.parties.enable("Public")
participant1.topology.party_to_participant_mappings.authorize(
TopologyChangeOp.Add,
party = public,
participant = participant2.id,
side = RequestSide.From,
permission = ParticipantPermission.Observation,
)
participant2.topology.party_to_participant_mappings.authorize(
TopologyChangeOp.Add,
party = public,
participant = participant2.id,
side = RequestSide.To,
permission = ParticipantPermission.Observation,
)
participant1.ledger_api.users.create(
id = "alice",
actAs = Set(alice.toLf),
primaryParty = Some(alice.toLf),
readAs = Set(public.toLf),
)
participant2.ledger_api.users.create(
id = "bob",
actAs = Set(bob.toLf),
primaryParty = Some(bob.toLf),
readAs = Set(public.toLf),
)
// upload dar file to participants
participants.all.dars.upload(createDamlAppDar.pathAsString)

View File

@ -1,4 +1,4 @@
sdk-version: 2.9.0-snapshot.20231211.12491.0.vbb56339f
sdk-version: 2.9.0-snapshot.20231215.12512.0.v5a0f0a18
sandbox-options:
- --wall-clock-time
name: contact

View File

@ -1,4 +1,4 @@
sdk-version: 2.9.0-snapshot.20231211.12491.0.vbb56339f
sdk-version: 2.9.0-snapshot.20231215.12512.0.v5a0f0a18
sandbox-options:
- --wall-clock-time
name: message

View File

@ -0,0 +1,13 @@
canton {
features.enable-testing-commands = yes
mediators-x {
mediator1 { }
}
participants-x {
participant1 { }
participant2 { }
}
}

View File

@ -61,7 +61,6 @@ trait HasConsoleScriptRunner { this: NamedLogging =>
object ExampleIntegrationTest {
lazy val examplesPath: File = "community" / "app" / "src" / "pack" / "examples"
lazy val simpleTopology: File = examplesPath / "01-simple-topology"
lazy val createDamlApp: File = examplesPath / "04-create-daml-app"
lazy val advancedConfiguration: File = examplesPath / "03-advanced-configuration"
lazy val composabilityConfiguration: File = examplesPath / "05-composability"
lazy val messagingConfiguration: File = examplesPath / "06-messaging"

View File

@ -0,0 +1,40 @@
// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package com.digitalasset.canton.integration.tests
import com.digitalasset.canton.health.admin.data.NodeStatus
import com.digitalasset.canton.integration.CommunityTests.{
CommunityIntegrationTest,
SharedCommunityEnvironment,
}
import com.digitalasset.canton.integration.{
CommunityConfigTransforms,
CommunityEnvironmentDefinition,
}
class SimplestPingDistributedCommunityIntegrationTest
extends CommunityIntegrationTest
with SharedCommunityEnvironment {
override def environmentDefinition: CommunityEnvironmentDefinition =
CommunityEnvironmentDefinition
.fromResource("distributed-single-domain.conf")
.addConfigTransforms(CommunityConfigTransforms.uniquePorts)
.withManualStart
"we can run a trivial ping" in { implicit env =>
import env.*
mediator1x.start()
participants.local.start()
mediator1x.health.status shouldBe NodeStatus.NotInitialized(true)
// TODO(i15178): test this as soon as the mediator can be initialized
// mediator1x.testing.fetch_domain_time()
// TODO(i15178): add sequencer and domain manager, then test if the distributed domain can be used for a ping
}
}

View File

@ -23,7 +23,7 @@ trait OnShutdownRunner { this: AutoCloseable =>
protected def logger: TracedLogger
/** Check whether we're closing.
* Susceptible to race conditions; unless you're using using this as a flag to the retry lib or you really know
* Susceptible to race conditions; unless you're using this as a flag to the retry lib or you really know
* what you're doing, prefer `performUnlessClosing` and friends.
*/
def isClosing: Boolean = closingFlag.get()

View File

@ -90,7 +90,7 @@ object TopologyManagerError extends TopologyManagerErrorGroup {
override val loggingContext: ErrorLoggingContext
) extends Alarm(cause)
with TopologyManagerError {
override lazy val logOnCreation: Boolean = false
override def logOnCreation: Boolean = false
}
}

View File

@ -135,6 +135,9 @@ abstract class TopologyManagerX[+StoreID <: TopologyStoreId](
def addObserver(observer: TopologyManagerObserver): Unit =
observers.updateAndGet(_ :+ observer).discard
def removeObserver(observer: TopologyManagerObserver): Unit =
observers.updateAndGet(_.filterNot(_ == observer)).discard
@VisibleForTesting
def clearObservers(): Unit = observers.set(Seq.empty)

View File

@ -117,24 +117,30 @@ class StoreBasedTopologySnapshotX(
}
)
val requiredPackagesET = EitherT.right[PackageId](
findTransactions(
asOfInclusive = false,
types = Seq(TopologyMappingX.Code.DomainParametersStateX),
filterUid = None,
filterNamespace = None,
).map { transactions =>
collectLatestMapping(
TopologyMappingX.Code.DomainParametersStateX,
transactions.collectOfMapping[DomainParametersStateX].result,
).getOrElse(throw new IllegalStateException("Unable to locate domain parameters state"))
.discard
val requiredPackagesET = store.storeId match {
case _: TopologyStoreId.DomainStore =>
EitherT.right[PackageId](
findTransactions(
asOfInclusive = false,
types = Seq(TopologyMappingX.Code.DomainParametersStateX),
filterUid = None,
filterNamespace = None,
).map { transactions =>
collectLatestMapping(
TopologyMappingX.Code.DomainParametersStateX,
transactions.collectOfMapping[DomainParametersStateX].result,
).getOrElse(throw new IllegalStateException("Unable to locate domain parameters state"))
.discard
// TODO(#14054) Once the non-proto DynamicDomainParametersX is available, use it
// _.parameters.requiredPackages
Seq.empty[PackageId]
}
)
// TODO(#14054) Once the non-proto DynamicDomainParametersX is available, use it
// _.parameters.requiredPackages
Seq.empty[PackageId]
}
)
case TopologyStoreId.AuthorizedStore =>
EitherT.pure[Future, PackageId](Seq.empty)
}
lazy val dependenciesET = packageDependencies(packageId)

View File

@ -1,4 +1,4 @@
sdk-version: 2.9.0-snapshot.20231211.12491.0.vbb56339f
sdk-version: 2.9.0-snapshot.20231215.12512.0.v5a0f0a18
build-options:
- --target=1.14
name: CantonExamples

View File

@ -244,6 +244,35 @@ abstract class BootstrapStageWithStorage[
)(implicit traceContext: TraceContext): EitherT[FutureUnlessShutdown, String, Unit] =
completeWithExternalUS(storeAndPassResult.mapK(FutureUnlessShutdown.outcomeK))
protected def resumeIfCompleteStage()(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, String, Unit] = {
val synchronousCheck = bootstrap.queue.executeUS(
if (stageResult.get().nonEmpty) {
logger.debug(s"Stage $description is already initialized. Skipping check.")
FutureUnlessShutdown.pure(None)
} else {
performUnlessClosingF("check-already-init")(stageCompleted).map {
case Some(result) =>
logger.info(
s"Stage ${description} completed in the background. Continuing with the initialization"
)
val nextStage = Some(buildNextStage(result))
stageResult.set(nextStage)
nextStage
case None =>
logger.debug(s"Continue awaiting external trigger for start-up stage $description")
None
}
},
s"resume-if-complete-stage-$description",
)
EitherT.right[String](synchronousCheck).flatMap {
case Some(nextStage) => nextStage.start()
case None => EitherT.rightT(())
}
}
protected def completeWithExternalUS(
storeAndPassResult: => EitherT[FutureUnlessShutdown, String, M]
)(implicit traceContext: TraceContext): EitherT[FutureUnlessShutdown, String, Unit] =
@ -286,7 +315,7 @@ abstract class BootstrapStageWithStorage[
}
}
final override def attempt()(implicit
final protected override def attempt()(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, String, Option[StageResult]] =
performUnlessClosingEitherUSF(description) {

View File

@ -26,7 +26,7 @@ import com.digitalasset.canton.topology.store.TopologyStoreId.DomainStore
import com.digitalasset.canton.topology.store.{InitializationStore, TopologyStoreId, TopologyStoreX}
import com.digitalasset.canton.topology.transaction.*
import com.digitalasset.canton.tracing.TraceContext
import com.digitalasset.canton.util.SimpleExecutionQueue
import com.digitalasset.canton.util.{FutureUtil, SimpleExecutionQueue}
import com.digitalasset.canton.version.{ProtocolVersion, ReleaseProtocolVersion}
import org.apache.pekko.actor.ActorSystem
@ -106,7 +106,7 @@ abstract class CantonNodeBootstrapX[
description = "Initialise storage",
bootstrapStageCallback,
) {
override def attempt()(implicit
override protected def attempt()(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, String, Option[SetupCrypto]] = {
EitherT(
@ -148,7 +148,7 @@ abstract class CantonNodeBootstrapX[
)
with HasCloseContext {
override def attempt()(implicit
override protected def attempt()(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, String, Option[SetupNodeId]] = {
// crypto factory doesn't write to the db during startup, hence,
@ -332,6 +332,35 @@ abstract class CantonNodeBootstrapX[
config.init.autoInit,
) {
private val topologyManagerObserver = new TopologyManagerObserver {
override def addedNewTransactions(
timestamp: CantonTimestamp,
transactions: Seq[SignedTopologyTransactionX[TopologyChangeOpX, TopologyMappingX]],
)(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = {
logger.debug(
s"Checking whether new topology transactions at $timestamp suffice for initializing the stage $description"
)
// Run the resumption check asynchronously so that
// - Topology transactions added during initialization of this stage do not deadlock
// because all stages run on a sequential queue.
// - Topology transactions added during the resumption do not deadlock
// because the topology processor runs all notifications and topology additions on a sequential queue.
FutureUtil.doNotAwaitUnlessShutdown(
resumeIfCompleteStage().value,
s"Checking whether new topology transactions completed stage $description failed ",
)
FutureUnlessShutdown.unit
}
}
override def start()(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, String, Unit] = {
// Register the observer first so that it does not race with the removal when the stage has finished.
manager.addObserver(topologyManagerObserver)
super.start()
}
override protected def stageCompleted(implicit
traceContext: TraceContext
): Future[Option[Unit]] = {
@ -346,23 +375,23 @@ abstract class CantonNodeBootstrapX[
filterNamespace = None,
)
.map { res =>
Option.when(
res.result
.filterNot(_.transaction.isProposal)
.map(_.transaction.transaction.mapping)
.exists {
case OwnerToKeyMappingX(`myMember`, None, keys) =>
// stage is clear if we have a general signing key and possibly also an encryption key
// this tx can not exist without appropriate certificates, so don't need to check for them
keys.exists(_.isSigning) && (myMember.code != ParticipantId.Code || keys
.exists(x => !x.isSigning))
case _ => false
}
)(())
val done = res.result
.filterNot(_.transaction.isProposal)
.map(_.transaction.transaction.mapping)
.exists {
case OwnerToKeyMappingX(`myMember`, None, keys) =>
// stage is clear if we have a general signing key and possibly also an encryption key
// this tx can not exist without appropriate certificates, so don't need to check for them
keys.exists(_.isSigning) && (myMember.code != ParticipantId.Code || keys
.exists(x => !x.isSigning))
case _ => false
}
Option.when(done)(())
}
}
override protected def buildNextStage(result: Unit): BootstrapStageOrLeaf[T] =
override protected def buildNextStage(result: Unit): BootstrapStageOrLeaf[T] = {
manager.removeObserver(topologyManagerObserver)
customNodeStages(
storage,
crypto,
@ -371,6 +400,7 @@ abstract class CantonNodeBootstrapX[
healthReporter,
healthService,
)
}
override protected def autoCompleteStage()
: EitherT[FutureUnlessShutdown, String, Option[Unit]] = {

View File

@ -109,6 +109,7 @@ class ConfirmationPolicyTest extends AnyWordSpec with BaseTest with HasExecution
templateId,
Value.ValueUnit,
Set(ExampleTransactionFactory.signatory),
shared = true,
)
),
)

View File

@ -272,7 +272,7 @@ class WellFormedTransactionTest extends AnyWordSpec with BaseTest with HasExecut
signatories = Set(signatory),
key = Some(
LfGlobalKeyWithMaintainers
.assertBuild(templateId, contractInst.unversioned.arg, Set.empty)
.assertBuild(templateId, contractInst.unversioned.arg, Set.empty, shared = true)
),
),
ExampleTransactionFactory.exerciseNode(
@ -284,6 +284,7 @@ class WellFormedTransactionTest extends AnyWordSpec with BaseTest with HasExecut
templateId,
contractInst.unversioned.arg,
Set.empty,
shared = true,
)
),
),

View File

@ -60,7 +60,9 @@ class ResilientSequencerSubscriberPekkoTest extends StreamSpec with BaseTest {
"ResilientSequencerSubscriberPekko" should {
"not retry on an unrecoverable error" in assertAllStagesStopped {
val factory = TestSequencerSubscriptionFactoryPekko(loggerFactory)
val factory = TestSequencerSubscriptionFactoryPekko(
loggerFactory.appendUnnamedKey("case", "unrecoverable-error")
)
val subscriber = createResilientSubscriber(factory)
factory.add(Error(UnretryableError))
val subscription = subscriber.subscribeFrom(SequencerCounter.Genesis)
@ -77,7 +79,9 @@ class ResilientSequencerSubscriberPekkoTest extends StreamSpec with BaseTest {
}
"retry on recoverable errors" in assertAllStagesStopped {
val factory = TestSequencerSubscriptionFactoryPekko(loggerFactory)
val factory = TestSequencerSubscriptionFactoryPekko(
loggerFactory.appendUnnamedKey("case", "retry-on-error")
)
val subscriber = createResilientSubscriber(factory)
factory.add(Error(RetryableError))
factory.add(Error(RetryableError))
@ -99,7 +103,9 @@ class ResilientSequencerSubscriberPekkoTest extends StreamSpec with BaseTest {
}
"retry on exceptions until one is fatal" in {
val factory = TestSequencerSubscriptionFactoryPekko(loggerFactory)
val factory = TestSequencerSubscriptionFactoryPekko(
loggerFactory.appendUnnamedKey("case", "retry-on-exception")
)
val subscriber = createResilientSubscriber(factory)
factory.add(Failure(RetryableExn))
factory.add(Failure(FatalExn))
@ -121,7 +127,9 @@ class ResilientSequencerSubscriberPekkoTest extends StreamSpec with BaseTest {
}
"restart from last received counter" in {
val factory = TestSequencerSubscriptionFactoryPekko(loggerFactory)
val factory = TestSequencerSubscriptionFactoryPekko(
loggerFactory.appendUnnamedKey("case", "restart-from-counter")
)
val subscriber = createResilientSubscriber(factory)
factory.subscribe(start =>
(start to (start + 10)).map(sc => Event(sc)) :+ Error(RetryableError)
@ -162,7 +170,9 @@ class ResilientSequencerSubscriberPekkoTest extends StreamSpec with BaseTest {
override val initialDelay: FiniteDuration = 1.milli
override val warnDelayDuration: FiniteDuration = 100.millis
}
val factory = TestSequencerSubscriptionFactoryPekko(loggerFactory)
val factory = TestSequencerSubscriptionFactoryPekko(
loggerFactory.appendUnnamedKey("case", "calculate-retry-delay")
)
val subscriber = createResilientSubscriber(factory, captureHasEvent)
// provide an event then close with a recoverable error
@ -186,7 +196,9 @@ class ResilientSequencerSubscriberPekkoTest extends StreamSpec with BaseTest {
"retry until closing if the sequencer is permanently unavailable" in assertAllStagesStopped {
val maxDelay = 100.milliseconds
val factory = TestSequencerSubscriptionFactoryPekko(loggerFactory)
val factory = TestSequencerSubscriptionFactoryPekko(
loggerFactory.appendUnnamedKey("case", "retry-until-closing")
)
val subscriber = createResilientSubscriber(factory, retryDelay(maxDelay))
// Always close with RetryableError
for (_ <- 1 to 100) {
@ -230,11 +242,13 @@ class ResilientSequencerSubscriberPekkoTest extends StreamSpec with BaseTest {
"return to healthy when messages are received again" in assertAllStagesStopped {
val maxDelay = 100.milliseconds
val factory = TestSequencerSubscriptionFactoryPekko(loggerFactory)
val factory = TestSequencerSubscriptionFactoryPekko(
loggerFactory.appendUnnamedKey("case", "return-to-healthy")
)
val subscriber = createResilientSubscriber(factory, retryDelay(maxDelay))
// retryDelay doubles the delay upon each attempt until it hits `maxDelay`,
// so we set it to one more such that we get the chance to see the unhealthy state
val retries = (Math.log(maxDelay.toMillis.toDouble) / Math.log(2.0d)).ceil.toInt + 1
// so we set it to two more such that we get the chance to see the unhealthy state
val retries = (Math.log(maxDelay.toMillis.toDouble) / Math.log(2.0d)).ceil.toInt + 2
for (_ <- 1 to retries) {
factory.add(Error(RetryableError))
}
@ -249,13 +263,15 @@ class ResilientSequencerSubscriberPekkoTest extends StreamSpec with BaseTest {
subscription.health.isFailed shouldBe false
val (killSwitch, doneF) = subscription.source.toMat(Sink.ignore)(Keep.left).run()
// we retry until we become unhealthy
eventually() {
logger.debug("Wait until the subscription becomes unhealthy")
eventually(maxPollInterval = 10.milliseconds) {
subscription.health.isFailed shouldBe true
}
// The factory should eventually produce new elements. So we should return to healthy
eventually() {
logger.debug("Wait until the subscription becomes healthy again")
eventually(maxPollInterval = 10.milliseconds) {
subscription.health.getState shouldBe ComponentHealthState.Ok()
}
@ -301,6 +317,8 @@ class TestSequencerSubscriptionFactoryPekko(
)
)
logger.debug(s"Creating SequencerSubscriptionPekko at starting counter $startingCounter")
val source = Source(subscribe(startingCounter))
// Add an incomplete unproductive source at the end to prevent automatic completion signals
.concat(Source.never[Element])

View File

@ -1,4 +1,4 @@
sdk-version: 2.9.0-snapshot.20231211.12491.0.vbb56339f
sdk-version: 2.9.0-snapshot.20231215.12512.0.v5a0f0a18
name: ai-analysis
source: AIAnalysis.daml
init-script: AIAnalysis:setup

View File

@ -1,4 +1,4 @@
sdk-version: 2.9.0-snapshot.20231211.12491.0.vbb56339f
sdk-version: 2.9.0-snapshot.20231215.12512.0.v5a0f0a18
name: bank
source: Bank.daml
init-script: Bank:setup

View File

@ -1,4 +1,4 @@
sdk-version: 2.9.0-snapshot.20231211.12491.0.vbb56339f
sdk-version: 2.9.0-snapshot.20231215.12512.0.v5a0f0a18
name: doctor
source: Doctor.daml
init-script: Doctor:setup

View File

@ -1,4 +1,4 @@
sdk-version: 2.9.0-snapshot.20231211.12491.0.vbb56339f
sdk-version: 2.9.0-snapshot.20231215.12512.0.v5a0f0a18
name: health-insurance
source: HealthInsurance.daml
init-script: HealthInsurance:setup

View File

@ -1,4 +1,4 @@
sdk-version: 2.9.0-snapshot.20231211.12491.0.vbb56339f
sdk-version: 2.9.0-snapshot.20231215.12512.0.v5a0f0a18
name: medical-records
source: MedicalRecord.daml
init-script: MedicalRecord:setup

View File

@ -4,7 +4,7 @@
package com.digitalasset.canton.domain.config
import com.daml.nonempty.NonEmpty
import com.digitalasset.canton.config.{CryptoConfig, ProtocolConfig}
import com.digitalasset.canton.config.{CommunityCryptoConfig, CryptoConfig, ProtocolConfig}
import com.digitalasset.canton.crypto.CryptoFactory.{
selectAllowedEncryptionKeyScheme,
selectAllowedHashAlgorithms,
@ -68,7 +68,7 @@ final case class DomainParametersConfig(
* Sets the required crypto schemes based on the provided crypto config if they are unset in the config.
*/
def toStaticDomainParameters(
cryptoConfig: CryptoConfig
cryptoConfig: CryptoConfig = CommunityCryptoConfig()
): Either[String, StaticDomainParameters] = {
def selectSchemes[S](

View File

@ -69,7 +69,7 @@ final case class CommunityMediatorNodeConfig(
override val timeTracker: DomainTimeTrackerConfig = DomainTimeTrackerConfig(),
override val sequencerClient: SequencerClientConfig = SequencerClientConfig(),
override val caching: CachingConfigs = CachingConfigs(),
parameters: MediatorNodeParameterConfig = MediatorNodeParameterConfig(),
override val parameters: MediatorNodeParameterConfig = MediatorNodeParameterConfig(),
override val monitoring: NodeMonitoringConfig = NodeMonitoringConfig(),
override val topologyX: TopologyXConfig = TopologyXConfig.NotUsed,
) extends MediatorNodeConfigCommon(
@ -80,6 +80,7 @@ final case class CommunityMediatorNodeConfig(
timeTracker,
sequencerClient,
caching,
parameters,
monitoring,
)
with ConfigDefaults[DefaultPorts, CommunityMediatorNodeConfig] {
@ -103,6 +104,7 @@ abstract class MediatorNodeConfigCommon(
val timeTracker: DomainTimeTrackerConfig,
val sequencerClient: SequencerClientConfig,
val caching: CachingConfigs,
val parameters: MediatorNodeParameterConfig,
val monitoring: NodeMonitoringConfig,
) extends LocalNodeConfig {
@ -144,7 +146,7 @@ class MediatorNodeBootstrap(
MediatorNodeParameters,
MediatorNodeMetrics,
],
override protected val replicaManager: MediatorReplicaManagerStub,
override protected val replicaManager: MediatorReplicaManager,
override protected val mediatorRuntimeFactory: MediatorRuntimeFactory,
)(
implicit executionContext: ExecutionContextIdlenessExecutorService,
@ -472,7 +474,7 @@ class MediatorNodeCommon(
config: MediatorNodeConfigCommon,
mediatorId: MediatorId,
domainId: DomainId,
replicaManager: MediatorReplicaManagerStub,
replicaManager: MediatorReplicaManager,
storage: Storage,
override protected val clock: Clock,
override protected val loggerFactory: NamedLoggerFactory,
@ -492,7 +494,7 @@ class MediatorNodeCommon(
uptime(),
ports,
replicaManager.isActive,
replicaManager.getTopologyQueueStatus(),
replicaManager.getTopologyQueueStatus,
healthData,
)
)
@ -509,7 +511,7 @@ class MediatorNode(
config: MediatorNodeConfigCommon,
mediatorId: MediatorId,
domainId: DomainId,
replicaManager: MediatorReplicaManagerStub,
replicaManager: MediatorReplicaManager,
storage: Storage,
clock: Clock,
loggerFactory: NamedLoggerFactory,

View File

@ -55,7 +55,7 @@ trait MediatorNodeBootstrapCommon[
(TopologyTransactionProcessorCommon, DomainTopologyClientWithInit),
]
protected val replicaManager: MediatorReplicaManagerStub
protected val replicaManager: MediatorReplicaManager
protected def mediatorRuntimeFactory: MediatorRuntimeFactory

View File

@ -54,7 +54,7 @@ final case class CommunityMediatorNodeXConfig(
override val timeTracker: DomainTimeTrackerConfig = DomainTimeTrackerConfig(),
override val sequencerClient: SequencerClientConfig = SequencerClientConfig(),
override val caching: CachingConfigs = CachingConfigs(),
parameters: MediatorNodeParameterConfig = MediatorNodeParameterConfig(),
override val parameters: MediatorNodeParameterConfig = MediatorNodeParameterConfig(),
override val monitoring: NodeMonitoringConfig = NodeMonitoringConfig(),
override val topologyX: TopologyXConfig = TopologyXConfig(),
) extends MediatorNodeConfigCommon(
@ -65,6 +65,7 @@ final case class CommunityMediatorNodeXConfig(
timeTracker,
sequencerClient,
caching,
parameters,
monitoring,
)
with ConfigDefaults[DefaultPorts, CommunityMediatorNodeXConfig] {
@ -118,7 +119,7 @@ class MediatorNodeBootstrapX(
MediatorNodeParameters,
MediatorNodeMetrics,
],
protected val replicaManager: MediatorReplicaManagerStub,
protected val replicaManager: MediatorReplicaManager,
override protected val mediatorRuntimeFactory: MediatorRuntimeFactory,
)(
implicit executionContext: ExecutionContextIdlenessExecutorService,
@ -258,7 +259,7 @@ class MediatorNodeBootstrapX(
private val domainLoggerFactory = loggerFactory.append("domainId", domainId.toString)
override def attempt()(implicit
override protected def attempt()(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, String, Option[RunningNode[MediatorNodeX]]] = {
@ -363,7 +364,7 @@ class MediatorNodeX(
config: MediatorNodeConfigCommon,
mediatorId: MediatorId,
domainId: DomainId,
protected[canton] val replicaManager: MediatorReplicaManagerStub,
protected[canton] val replicaManager: MediatorReplicaManager,
storage: Storage,
clock: Clock,
loggerFactory: NamedLoggerFactory,

View File

@ -0,0 +1,109 @@
// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package com.digitalasset.canton.domain.mediator
import cats.data.EitherT
import com.digitalasset.canton.config.ProcessingTimeout
import com.digitalasset.canton.domain.api.v0
import com.digitalasset.canton.health.admin.data.TopologyQueueStatus
import com.digitalasset.canton.lifecycle.{AsyncOrSyncCloseable, FlagCloseableAsync, SyncCloseable}
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
import com.digitalasset.canton.networking.grpc.{CantonMutableHandlerRegistry, GrpcDynamicService}
import com.digitalasset.canton.tracing.TraceContext
import com.digitalasset.canton.util.{EitherTUtil, ErrorUtil, SingleUseCell}
import java.util.concurrent.atomic.AtomicReference
import scala.concurrent.{ExecutionContext, Future}
trait MediatorReplicaManager extends NamedLogging with FlagCloseableAsync {
protected val mediatorRuntimeFactoryRef
: SingleUseCell[() => EitherT[Future, String, MediatorRuntime]] =
new SingleUseCell
protected def getMediatorRuntimeFactory()(implicit
traceContext: TraceContext
): () => EitherT[Future, String, MediatorRuntime] =
mediatorRuntimeFactoryRef.getOrElse {
ErrorUtil.internalError(
new IllegalStateException(
"Set active called before mediator runtime factory was initialized"
)
)
}
protected val mediatorRuntimeRef = new AtomicReference[Option[MediatorRuntime]](None)
protected[canton] def mediatorRuntime: Option[MediatorRuntime] = mediatorRuntimeRef.get()
protected val serviceUnavailableMessage = "Mediator replica is passive"
val domainTimeService =
new GrpcDynamicService(
v0.DomainTimeServiceGrpc.SERVICE,
serviceUnavailableMessage,
loggerFactory,
)
def setup(
adminServiceRegistry: CantonMutableHandlerRegistry,
factory: () => EitherT[Future, String, MediatorRuntime],
isActive: Boolean,
)(implicit traceContext: TraceContext): Future[Unit]
def isActive: Boolean
def getTopologyQueueStatus: TopologyQueueStatus = TopologyQueueStatus(
manager =
mediatorRuntime.flatMap(_.mediator.topologyManagerStatusO).map(_.queueSize).getOrElse(0),
dispatcher =
mediatorRuntime.flatMap(_.mediator.domainOutboxStatusO).map(_.queueSize).getOrElse(0),
clients = mediatorRuntime.map(x => x.mediator.topologyClient.numPendingChanges).getOrElse(0),
)
override def closeAsync(): Seq[AsyncOrSyncCloseable] = mediatorRuntimeRef
.get()
.toList
.map(runtime => SyncCloseable("mediatorRuntime", runtime.close()))
}
/** Community version of the mediator replica manager.
* Does not support high-availability.
*/
class CommunityMediatorReplicaManager(
override protected val timeouts: ProcessingTimeout,
override protected val loggerFactory: NamedLoggerFactory,
)(implicit executionContext: ExecutionContext)
extends MediatorReplicaManager {
override def setup(
adminServiceRegistry: CantonMutableHandlerRegistry,
factory: () => EitherT[Future, String, MediatorRuntime],
isActive: Boolean,
)(implicit traceContext: TraceContext): Future[Unit] = {
logger.debug("Setting up replica manager")
mediatorRuntimeFactoryRef.putIfAbsent(factory).foreach { prev =>
logger.warn(
s"Mediator runtime factory was already initialized with $prev, ignoring new value"
)
}
adminServiceRegistry.addServiceU(domainTimeService.serviceDescriptor)
for {
mediatorRuntime <- EitherTUtil.toFuture(
getMediatorRuntimeFactory().apply().leftMap(new MediatorReplicaManagerException(_))
)
} yield {
mediatorRuntimeRef.set(Some(mediatorRuntime))
domainTimeService.setInstance(mediatorRuntime.timeService)
}
}
override def isActive: Boolean = true
}
/** An unexpected error occurred while transitioning between replica states */
class MediatorReplicaManagerException(message: String) extends RuntimeException(message)

View File

@ -1,26 +0,0 @@
// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package com.digitalasset.canton.domain.mediator
import cats.data.EitherT
import com.digitalasset.canton.health.admin.data.TopologyQueueStatus
import com.digitalasset.canton.networking.grpc.CantonMutableHandlerRegistry
import com.digitalasset.canton.tracing.TraceContext
import scala.concurrent.Future
trait MediatorReplicaManagerStub extends AutoCloseable {
def setup(
adminServiceRegistry: CantonMutableHandlerRegistry,
factory: () => EitherT[Future, String, MediatorRuntime],
isActive: Boolean,
)(implicit traceContext: TraceContext): Future[Unit]
def isActive: Boolean
def getTopologyQueueStatus(): TopologyQueueStatus
protected[canton] def mediatorRuntime: Option[MediatorRuntime]
}

View File

@ -312,10 +312,10 @@ class SequencerRuntime(
override def onClosed(): Unit =
Lifecycle.close(
topologyClient,
sequencerService,
authenticationServices.memberAuthenticationService,
sequencer,
topologyClient,
)(logger)
}

View File

@ -57,5 +57,15 @@ trait ConsoleEnvironmentTestHelpers[+CE <: ConsoleEnvironment] { this: CE =>
.find(_.name == name)
.getOrElse(sys.error(s"remote domain [$name] not configured"))
def rmx(name: String): RemoteMediatorReferenceX =
mediatorsX.remote
.find(_.name == name)
.getOrElse(sys.error(s"remote mediator-x [$name] not configured"))
def mx(name: String): LocalMediatorReferenceX =
mediatorsX.local
.find(_.name == name)
.getOrElse(sys.error(s"mediator-x [$name] not configured"))
def mediatorIdForDomain(domain: String): MediatorId = MediatorId(d(domain).id)
}

View File

@ -9,10 +9,10 @@ import com.digitalasset.canton.logging.NamedLoggerFactory
/** Definition of how a environment should be configured and setup.
* @param baseConfig the base config to use (typically loaded from a pre-canned config file or sample)
* @param setup a function to configure the environment before tests can be run.
* @param testingConfig the testing specifics bits of the config
* @param setups a function to configure the environment before tests can be run.
* @param teardown a function to perform cleanup after the environment has been destroyed.
* @param configTransforms transforms to perform on the base configuration before starting the environment (typically making ports unique or some other specialization for the particular tests you're running)
* @param networkBootstrapFactory The NetworkBootstrap is created and run before the setup steps for bootstrapping the domain(s). Manual starts use a noop network bootstrap.
*/
abstract class BaseEnvironmentDefinition[E <: Environment, TCE <: TestConsoleEnvironment[E]](
val baseConfig: E#Config,

View File

@ -6,6 +6,7 @@ package com.digitalasset.canton.integration
import com.digitalasset.canton.console.{
ConsoleEnvironment,
ConsoleEnvironmentTestHelpers,
LocalMediatorReferenceX,
LocalParticipantReference,
LocalParticipantReferenceX,
ParticipantReference,
@ -33,4 +34,9 @@ trait CommonTestAliases[+CE <: ConsoleEnvironment] {
lazy val acme: CE#DomainLocalRef = d("acme")
lazy val repairDomain: CE#DomainLocalRef = d("repair")
lazy val mediator1x: LocalMediatorReferenceX = mx("mediator1")
lazy val mediator2x: LocalMediatorReferenceX = mx("mediator2")
lazy val mediator3x: LocalMediatorReferenceX = mx("mediator3")
lazy val mediator4x: LocalMediatorReferenceX = mx("mediator4")
}

View File

@ -13,6 +13,7 @@ import com.digitalasset.canton.config.{
StorageConfig,
}
import com.digitalasset.canton.domain.config.CommunityDomainConfig
import com.digitalasset.canton.domain.mediator.CommunityMediatorNodeXConfig
import com.digitalasset.canton.participant.config.CommunityParticipantConfig
import com.typesafe.config.{Config, ConfigValueFactory}
import monocle.macros.syntax.lens.*
@ -78,6 +79,19 @@ object CommunityConfigTransforms {
.focus(_.participants)
.modify(_.map { case (pName, pConfig) => (pName, update(pName.unwrap, pConfig)) })
def updateAllMediatorXConfigs_(
update: CommunityMediatorNodeXConfig => CommunityMediatorNodeXConfig
): CommunityConfigTransform =
updateAllMediatorXConfigs((_, config) => update(config))
def updateAllMediatorXConfigs(
update: (String, CommunityMediatorNodeXConfig) => CommunityMediatorNodeXConfig
): CommunityConfigTransform =
cantonConfig =>
cantonConfig
.focus(_.mediatorsX)
.modify(_.map { case (pName, pConfig) => (pName, update(pName.unwrap, pConfig)) })
def uniqueH2DatabaseNames: CommunityConfigTransform = {
updateAllDomainConfigs { case (nodeName, cfg) =>
cfg.focus(_.storage).modify(CommunityConfigTransforms.withUniqueDbName(nodeName, _))
@ -106,7 +120,14 @@ object CommunityConfigTransforms {
.replace(nextPort.some)
}
domainUpdate compose participantUpdate
val mediatorXUpdate = updateAllMediatorXConfigs_(
_.focus(_.adminApi.internalPort)
.replace(nextPort.some)
.focus(_.monitoring.grpcHealthServer)
.modify(_.map(_.copy(internalPort = nextPort.some)))
)
domainUpdate compose participantUpdate compose mediatorXUpdate
}
}

View File

@ -7,16 +7,14 @@ import com.digitalasset.canton.console.InstanceReferenceX
import com.digitalasset.canton.protocol.StaticDomainParameters
import com.digitalasset.canton.topology.DomainId
/*
* Used to bootstrap one or more domains at the start of a test.
*/
/** Used to bootstrap one or more domains at the start of a test.
*/
trait NetworkBootstrap {
def bootstrap(): Unit
}
/*
* A data container to hold useful information for initialized domains
*/
/** A data container to hold useful information for initialized domains
*/
final case class InitializedDomain(
domainId: DomainId,
staticDomainParameters: StaticDomainParameters,

View File

@ -57,17 +57,27 @@ private[auth] final class OngoingAuthorizationObserver[A](
" This can result in detached/rogue server side stream processing, and a resulting memory leak!"
)
private val cancelUserRightsChecksO =
private val cancelUserRightsChecksO: Option[() => Unit] =
userRightsCheckerO.map(
_.schedule(() => abortGRPCStreamAndCancelUpstream(staleStreamAuthError))
_.schedule { () =>
if (!isCancelled) {
// Downstream cancellation could race with emittion of errors, therefore we only emit error if the stream is
// not cancelled.
abortGRPCStreamAndCancelUpstream(staleStreamAuthError)
}
}
)
override def isCancelled: Boolean = blocking(synchronized(observer.isCancelled))
override def setOnCancelHandler(runnable: Runnable): Unit = blocking(
synchronized {
observer.setOnCancelHandler(runnable)
onCancelHandler = runnable
val newCancelHandler: Runnable = { () =>
cancelUserRightsChecksO.foreach(_.apply())
runnable.run()
}
observer.setOnCancelHandler(newCancelHandler)
onCancelHandler = newCancelHandler
}
)

View File

@ -4,8 +4,6 @@
package com.digitalasset.canton.ledger.api.validation
import com.daml.error.ContextualizedErrorLogger
import com.digitalasset.canton.ledger.api.DeduplicationPeriod
import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors
import io.grpc.StatusRuntimeException
import java.time.Duration
@ -13,35 +11,6 @@ import java.time.Duration
object DeduplicationPeriodValidator {
private val fieldName = "deduplication_period"
def validate(
deduplicationPeriod: DeduplicationPeriod,
maxDeduplicationDuration: Duration,
)(implicit
contextualizedErrorLogger: ContextualizedErrorLogger
): Either[StatusRuntimeException, DeduplicationPeriod] = {
deduplicationPeriod match {
case DeduplicationPeriod.DeduplicationDuration(duration) =>
validateDuration(duration, maxDeduplicationDuration).map(_ => deduplicationPeriod)
case DeduplicationPeriod.DeduplicationOffset(_) => Right(deduplicationPeriod)
}
}
def validateDuration(duration: Duration, maxDeduplicationDuration: Duration)(implicit
contextualizedErrorLogger: ContextualizedErrorLogger
): Either[StatusRuntimeException, Duration] =
validateNonNegativeDuration(duration).flatMap { duration =>
if (duration.compareTo(maxDeduplicationDuration) > 0)
Left(
RequestValidationErrors.InvalidDeduplicationPeriodField
.Reject(
s"The given deduplication duration of $duration exceeds the maximum deduplication duration of $maxDeduplicationDuration",
Some(maxDeduplicationDuration),
)
.asGrpcError
)
else Right(duration)
}
def validateNonNegativeDuration(duration: Duration)(implicit
contextualizedErrorLogger: ContextualizedErrorLogger
): Either[StatusRuntimeException, Duration] = if (duration.isNegative)

View File

@ -65,45 +65,6 @@ object Update {
}
}
/** Signal that a configuration change submitted by this participant was rejected. */
final case class ConfigurationChangeRejected(
recordTime: Timestamp,
submissionId: Ref.SubmissionId,
participantId: Ref.ParticipantId,
proposedConfiguration: Configuration,
rejectionReason: String,
) extends Update {
override def pretty: Pretty[ConfigurationChangeRejected] =
prettyOfClass(
param("recordTime", _.recordTime),
param("configuration", _.proposedConfiguration),
param("rejectionReason", _.rejectionReason.singleQuoted),
indicateOmittedFields,
)
}
object ConfigurationChangeRejected {
implicit val `ConfigurationChangeRejected to LoggingValue`
: ToLoggingValue[ConfigurationChangeRejected] = {
case ConfigurationChangeRejected(
recordTime,
submissionId,
participantId,
proposedConfiguration,
rejectionReason,
) =>
LoggingValue.Nested.fromEntries(
Logging.recordTime(recordTime),
Logging.submissionId(submissionId),
Logging.participantId(participantId),
Logging.configGeneration(proposedConfiguration.generation),
Logging.maxDeduplicationDuration(proposedConfiguration.maxDeduplicationDuration),
Logging.rejectionReason(rejectionReason),
)
}
}
/** Signal that a party is hosted at a participant.
*
* Repeated `PartyAddedToParticipant` updates are interpreted in the order of their offsets as follows:
@ -471,10 +432,6 @@ object Update {
implicit val `Update to LoggingValue`: ToLoggingValue[Update] = {
case update: ConfigurationChanged =>
ConfigurationChanged.`ConfigurationChanged to LoggingValue`.toLoggingValue(update)
case update: ConfigurationChangeRejected =>
ConfigurationChangeRejected.`ConfigurationChangeRejected to LoggingValue`.toLoggingValue(
update
)
case update: PartyAddedToParticipant =>
PartyAddedToParticipant.`PartyAddedToParticipant to LoggingValue`.toLoggingValue(update)
case update: PartyAllocationRejected =>

View File

@ -1,38 +0,0 @@
// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package com.digitalasset.canton.ledger.participant.state.v2
import com.daml.lf.data.Ref
import com.daml.lf.data.Time.Timestamp
import com.digitalasset.canton.ledger.configuration.Configuration
import com.digitalasset.canton.tracing.TraceContext
import java.util.concurrent.CompletionStage
trait WriteConfigService {
/** Submit a new configuration to the ledger. If the configuration is accepted
* a [[com.digitalasset.canton.ledger.participant.state.v2.Update.ConfigurationChanged]] event will be emitted to all participants.
* In case of rejection a [[com.digitalasset.canton.ledger.participant.state.v2.Update.ConfigurationChangeRejected]] will be emitted.
*
* The [[com.digitalasset.canton.ledger.configuration.Configuration]] contains the identity of the participant that is allowed
* to further change the configuration. The initial configuration can be submitted
* by any participant.
*
* If configuration changes are not supported by the implementation then the
* [[com.digitalasset.canton.ledger.participant.state.v2.SubmissionResult.SynchronousError]] should be returned.
* *
* @param maxRecordTime: The maximum record time after which the request is rejected.
* @param submissionId: Client picked submission identifier for matching the responses with the request.
* @param config: The new ledger configuration.
* @return an async result of a SubmissionResult
*/
def submitConfiguration(
maxRecordTime: Timestamp,
submissionId: Ref.SubmissionId,
config: Configuration,
)(implicit
traceContext: TraceContext
): CompletionStage[SubmissionResult]
}

View File

@ -37,7 +37,6 @@ import java.util.concurrent.CompletionStage
trait WriteService
extends WritePackagesService
with WritePartyService
with WriteConfigService
with WriteParticipantPruningService
with ReportsHealth {

View File

@ -4,13 +4,12 @@
package com.digitalasset.canton.ledger.participant.state.v2.metrics
import com.daml.daml_lf_dev.DamlLf
import com.daml.lf.data.{ImmArray, Ref, Time}
import com.daml.lf.data.{ImmArray, Ref}
import com.daml.lf.transaction.{GlobalKey, SubmittedTransaction}
import com.daml.lf.value.Value
import com.daml.metrics.Timed
import com.digitalasset.canton.data.ProcessedDisclosedContract
import com.digitalasset.canton.ledger.api.health.HealthStatus
import com.digitalasset.canton.ledger.configuration.Configuration
import com.digitalasset.canton.ledger.offset.Offset
import com.digitalasset.canton.ledger.participant.state.v2.{
PruningResult,
@ -100,18 +99,6 @@ final class TimedWriteService(delegate: WriteService, metrics: Metrics) extends
delegate.allocateParty(hint, displayName, submissionId),
)
override def submitConfiguration(
maxRecordTime: Time.Timestamp,
submissionId: Ref.SubmissionId,
config: Configuration,
)(implicit
traceContext: TraceContext
): CompletionStage[SubmissionResult] =
Timed.completionStage(
metrics.daml.services.write.submitConfiguration,
delegate.submitConfiguration(maxRecordTime, submissionId, config),
)
override def prune(
pruneUpToInclusive: Offset,
submissionId: Ref.SubmissionId,

View File

@ -470,8 +470,6 @@ object ApiServices {
val apiConfigManagementService = ApiConfigManagementService.createApiService(
configManagementService,
writeService,
timeProvider,
telemetry = telemetry,
loggerFactory = loggerFactory,
)

View File

@ -548,7 +548,7 @@ private[apiserver] final class StoreBackedCommandExecutor(
Versioned(
unusedTxVersion,
GlobalKeyWithMaintainers
.assertBuild(disclosedContract.templateId, value, maintainers),
.assertBuild(disclosedContract.templateId, value, maintainers, shared = true),
)
},
),

View File

@ -3,26 +3,16 @@
package com.digitalasset.canton.platform.apiserver.services.admin
import com.daml.error.ContextualizedErrorLogger
import com.daml.ledger.api.v1.admin.config_management_service.ConfigManagementServiceGrpc.ConfigManagementService
import com.daml.ledger.api.v1.admin.config_management_service.*
import com.daml.lf.data.{Ref, Time}
import com.daml.logging.LoggingContext
import com.daml.tracing.Telemetry
import com.digitalasset.canton.ledger.api.domain.{ConfigurationEntry, LedgerOffset}
import com.digitalasset.canton.error.TransactionError
import com.digitalasset.canton.ledger.api.grpc.GrpcApiService
import com.digitalasset.canton.ledger.api.util.{
DurationConversion,
TimeProvider,
TimestampConversion,
}
import com.digitalasset.canton.ledger.api.validation.FieldValidator
import com.digitalasset.canton.ledger.api.validation.ValidationErrors.*
import com.digitalasset.canton.ledger.api.{ValidationLogger, domain}
import com.digitalasset.canton.ledger.configuration.{Configuration, LedgerTimeModel}
import com.digitalasset.canton.ledger.error.groups.{AdminServiceErrors, RequestValidationErrors}
import com.digitalasset.canton.ledger.api.util.DurationConversion
import com.digitalasset.canton.ledger.configuration.Configuration
import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors
import com.digitalasset.canton.ledger.participant.state.index.v2.IndexConfigManagementService
import com.digitalasset.canton.ledger.participant.state.v2 as state
import com.digitalasset.canton.logging.LoggingContextUtil.createLoggingContext
import com.digitalasset.canton.logging.LoggingContextWithTrace.{
implicitExtractTraceContext,
@ -30,27 +20,17 @@ import com.digitalasset.canton.logging.LoggingContextWithTrace.{
}
import com.digitalasset.canton.logging.TracedLoggerOps.TracedLoggerOps
import com.digitalasset.canton.logging.*
import com.digitalasset.canton.platform.apiserver.services.admin.ApiConfigManagementService.*
import com.digitalasset.canton.platform.apiserver.services.logging
import io.grpc.{ServerServiceDefinition, StatusRuntimeException}
import org.apache.pekko.stream.Materializer
import org.apache.pekko.stream.scaladsl.Source
import io.grpc.ServerServiceDefinition
import scala.concurrent.duration.{Duration, FiniteDuration}
import scala.concurrent.{ExecutionContext, Future}
import scala.jdk.FutureConverters.CompletionStageOps
import scala.util.{Failure, Success}
private[apiserver] final class ApiConfigManagementService private (
index: IndexConfigManagementService,
writeService: state.WriteConfigService,
timeProvider: TimeProvider,
submissionIdGenerator: String => Ref.SubmissionId,
telemetry: Telemetry,
val loggerFactory: NamedLoggerFactory,
)(implicit
materializer: Materializer,
executionContext: ExecutionContext,
executionContext: ExecutionContext
) extends ConfigManagementService
with GrpcApiService
with NamedLogging {
@ -58,16 +38,7 @@ private[apiserver] final class ApiConfigManagementService private (
private implicit val loggingContext: LoggingContext =
createLoggingContext(loggerFactory)(identity)
private val synchronousResponse = new SynchronousResponse(
new SynchronousResponseStrategy(
writeService,
index,
loggerFactory,
),
loggerFactory,
)
override def close(): Unit = synchronousResponse.close()
override def close(): Unit = {}
override def bindService(): ServerServiceDefinition =
ConfigManagementServiceGrpc.bindService(this, executionContext)
@ -117,193 +88,22 @@ private[apiserver] final class ApiConfigManagementService private (
logging.submissionId(request.submissionId)
) { implicit loggingContext =>
logger.info(s"Setting time model, ${loggingContext.serializeFiltered("submissionId")}.")
implicit val errorLoggingContext: ContextualizedErrorLogger =
LedgerErrorLoggingContext(
logger,
loggingContext.toPropertiesMap,
loggingContext.traceContext,
request.submissionId,
)
val response = for {
// Validate and convert the request parameters
params <- validateParameters(request).fold(
t => Future.failed(ValidationLogger.logFailureWithTrace(logger, request, t)),
Future.successful,
)
// Lookup latest configuration to check generation and to extend it with the new time model.
configuration <- index
.lookupConfiguration()
.flatMap {
case Some(result) =>
Future.successful(result)
case None =>
logger.warn(
"Could not get the current time model. The index does not yet have any ledger configuration."
)
Future.failed(
RequestValidationErrors.NotFound.LedgerConfiguration
.Reject()
.asGrpcError
)
}
(ledgerEndBeforeRequest, currentConfig) = configuration
// Verify that we're modifying the current configuration.
expectedGeneration = currentConfig.generation
_ <-
if (request.configurationGeneration != expectedGeneration) {
Future.failed(
ValidationLogger.logFailureWithTrace(
logger,
request,
invalidArgument(
s"Mismatching configuration generation, expected $expectedGeneration, received ${request.configurationGeneration}"
),
)
)
} else {
Future.unit
}
// Create the new extended configuration.
newConfig = currentConfig.copy(
generation = currentConfig.generation + 1,
timeModel = params.newTimeModel,
)
// Submit configuration to the ledger, and start polling for the result.
augmentedSubmissionId = submissionIdGenerator(request.submissionId)
entry <- synchronousResponse.submitAndWait(
augmentedSubmissionId,
(params.maximumRecordTime, newConfig),
Some(ledgerEndBeforeRequest),
params.timeToLive,
)
} yield SetTimeModelResponse(entry.configuration.generation)
response.andThen(logger.logErrorsOnCall[SetTimeModelResponse])
Future.failed(TransactionError.NotSupported.exception)
}
private case class SetTimeModelParameters(
newTimeModel: LedgerTimeModel,
maximumRecordTime: Time.Timestamp,
timeToLive: FiniteDuration,
)
private def validateParameters(
request: SetTimeModelRequest
)(implicit
contextualizedErrorLogger: ContextualizedErrorLogger
): Either[StatusRuntimeException, SetTimeModelParameters] = {
import FieldValidator.*
for {
pTimeModel <- requirePresence(request.newTimeModel, "new_time_model")
pAvgTransactionLatency <- requirePresence(
pTimeModel.avgTransactionLatency,
"avg_transaction_latency",
)
pMinSkew <- requirePresence(pTimeModel.minSkew, "min_skew")
pMaxSkew <- requirePresence(pTimeModel.maxSkew, "max_skew")
newTimeModel <- LedgerTimeModel(
avgTransactionLatency = DurationConversion.fromProto(pAvgTransactionLatency),
minSkew = DurationConversion.fromProto(pMinSkew),
maxSkew = DurationConversion.fromProto(pMaxSkew),
) match {
case Failure(err) => Left(invalidArgument(err.toString))
case Success(ok) => Right(ok)
}
pMaxRecordTime <- requirePresence(request.maximumRecordTime, "maximum_record_time")
mrtInstant = TimestampConversion.toInstant(pMaxRecordTime)
timeToLive = {
val ttl = java.time.Duration.between(timeProvider.getCurrentTime, mrtInstant)
if (ttl.isNegative) Duration.Zero
else Duration.fromNanos(ttl.toNanos)
}
maximumRecordTime <- Time.Timestamp
.fromInstant(mrtInstant)
.fold(err => Left(invalidArgument(err)), Right(_))
} yield SetTimeModelParameters(newTimeModel, maximumRecordTime, timeToLive)
}
}
private[apiserver] object ApiConfigManagementService {
def createApiService(
readBackend: IndexConfigManagementService,
writeBackend: state.WriteConfigService,
timeProvider: TimeProvider,
submissionIdGenerator: String => Ref.SubmissionId = augmentSubmissionId,
telemetry: Telemetry,
loggerFactory: NamedLoggerFactory,
)(implicit
materializer: Materializer,
executionContext: ExecutionContext,
executionContext: ExecutionContext
): ConfigManagementServiceGrpc.ConfigManagementService & GrpcApiService =
new ApiConfigManagementService(
readBackend,
writeBackend,
timeProvider,
submissionIdGenerator,
telemetry,
loggerFactory,
)
private final class SynchronousResponseStrategy(
writeConfigService: state.WriteConfigService,
configManagementService: IndexConfigManagementService,
val loggerFactory: NamedLoggerFactory,
) extends SynchronousResponse.Strategy[
(Time.Timestamp, Configuration),
ConfigurationEntry,
ConfigurationEntry.Accepted,
]
with NamedLogging {
override def submit(
submissionId: Ref.SubmissionId,
input: (Time.Timestamp, Configuration),
)(implicit
loggingContext: LoggingContextWithTrace
): Future[state.SubmissionResult] = {
val (maximumRecordTime, newConfiguration) = input
writeConfigService
.submitConfiguration(maximumRecordTime, submissionId, newConfiguration)
.asScala
}
override def entries(offset: Option[LedgerOffset.Absolute])(implicit
loggingContext: LoggingContextWithTrace
): Source[ConfigurationEntry, _] =
configManagementService.configurationEntries(offset).map(_._2)
override def accept(
submissionId: Ref.SubmissionId
): PartialFunction[ConfigurationEntry, ConfigurationEntry.Accepted] = {
case entry @ domain.ConfigurationEntry.Accepted(`submissionId`, _) =>
entry
}
override def reject(
submissionId: Ref.SubmissionId
)(implicit
loggingContext: LoggingContextWithTrace
): PartialFunction[ConfigurationEntry, StatusRuntimeException] = {
case domain.ConfigurationEntry.Rejected(`submissionId`, reason, _) =>
AdminServiceErrors.ConfigurationEntryRejected
.Reject(reason)(
LedgerErrorLoggingContext(
logger,
loggingContext.toPropertiesMap,
loggingContext.traceContext,
submissionId,
)
)
.asGrpcError
}
}
}

View File

@ -125,14 +125,14 @@ private[apiserver] final class CommandSubmissionServiceImpl private[services] (
val cmds = request.commands.commands.commands
logger.debug(show"Submitted commands are: ${if (cmds.length > 1) "\n " else ""}${cmds
.map {
case ApiCommand.Create(tmplRef, _) =>
s"create ${tmplRef.qName}"
case ApiCommand.Exercise(tmplRef, _, choiceId, _) =>
s"exercise @${tmplRef.qName} ${choiceId}"
case ApiCommand.ExerciseByKey(tmplRef, _, choiceId, _) =>
s"exerciseByKey @${tmplRef.qName} $choiceId"
case ApiCommand.CreateAndExercise(tmplRef, _, choiceId, _) =>
s"createAndExercise ${tmplRef.qName} ... $choiceId ..."
case ApiCommand.Create(templateRef, _) =>
s"create ${templateRef.qName}"
case ApiCommand.Exercise(templateRef, _, choiceId, _) =>
s"exercise @${templateRef.qName} ${choiceId}"
case ApiCommand.ExerciseByKey(templateRef, _, choiceId, _) =>
s"exerciseByKey @${templateRef.qName} $choiceId"
case ApiCommand.CreateAndExercise(templateRef, _, choiceId, _) =>
s"createAndExercise ${templateRef.qName} ... $choiceId ..."
}
.map(_.singleQuoted)
.toSeq

View File

@ -88,23 +88,6 @@ object UpdateToDbDto {
)
)
case u: ConfigurationChangeRejected =>
incrementCounterForEvent(
metrics.daml.indexerEvents,
IndexedUpdatesMetrics.Labels.eventType.configurationChange,
IndexedUpdatesMetrics.Labels.status.rejected,
)
Iterator(
DbDto.ConfigurationEntry(
ledger_offset = offset.toHexString,
recorded_at = u.recordTime.micros,
submission_id = u.submissionId,
typ = JdbcLedgerDao.rejectType,
configuration = Configuration.encode(u.proposedConfiguration).toByteArray,
rejection_reason = Some(u.rejectionReason),
)
)
case u: PartyAddedToParticipant =>
incrementCounterForEvent(
metrics.daml.indexerEvents,

View File

@ -93,7 +93,7 @@ private[dao] sealed class EventsReader(
maxIterations: Int,
)(implicit loggingContext: LoggingContextWithTrace): Future[GetEventsByContractKeyResponse] = {
val keyHash: String =
platform.Key.assertBuild(templateId, contractKey, shared = false).hash.bytes.toHexString
platform.Key.assertBuild(templateId, contractKey, shared = true).hash.bytes.toHexString
val eventProjectionProperties = EventProjectionProperties(
// Used by LfEngineToApi

View File

@ -32,6 +32,7 @@ import com.digitalasset.canton.ledger.api.auth.services.TransactionServiceAuthor
import com.digitalasset.canton.ledger.api.domain.UserRight.CanReadAs
import com.digitalasset.canton.ledger.api.domain.{IdentityProviderId, User}
import com.digitalasset.canton.ledger.api.grpc.StreamingServiceLifecycleManagement
import com.digitalasset.canton.logging.SuppressionRule.{FullSuppression, LoggerNameContains}
import com.digitalasset.canton.logging.{LoggingContextWithTrace, NamedLoggerFactory}
import com.digitalasset.canton.metrics.Metrics
import com.digitalasset.canton.platform.apiserver.{ApiServiceOwner, GrpcServer}
@ -69,6 +70,8 @@ class StreamAuthorizationComponentSpec
with Matchers
with PekkoBeforeAndAfterAll {
private val OngoingAuthorizationObserverLoggerName = "OngoingAuthorizationObserver"
private implicit val ec: ExecutionContextExecutor = materializer.executionContext
behavior of s"Stream authorization"
@ -84,26 +87,69 @@ class StreamAuthorizationComponentSpec
.map(_ => fixture.waitForServerPekkoStream shouldBe None)
}
it should "cancel streams if user rights changed" in test { fixture =>
fixture.clientStream
.take(10)
.zipWithIndex
.map { case (_, index) =>
if (index == 1) {
// after 2 received entries (400 millis) the user right change,
// which triggers a STALE_STREAM_AUTHORIZATION
fixture.changeUserRights
it should "not emit STALE_STREAM_AUTHORIZATION after it was cancelled downstream" in test {
fixture =>
// this stream takes 10 elements (takes 2 seconds to produce), then it is closed (user side cancellation).
// after one second a scheduled user right check will commence, this check expected to be successful
fixture.clientStream
.take(10)
.zipWithIndex
.map { case (_, index) =>
if (index == 9) {
// towards the end we change the user rights, which makes the next scheduled user right check fail
fixture.changeUserRights
}
logger.debug("received")
}
logger.debug(s"received #$index")
}
.run()
.failed
.map { t =>
// the client stream should be cancelled with error
t.getMessage should include("STALE_STREAM_AUTHORIZATION")
// the server stream should be completed
fixture.waitForServerPekkoStream shouldBe None
.run()
.map { _ =>
// now the stream is cancelled from downstream because of the take(10) above
fixture.waitForServerPekkoStream shouldBe None
val suppressionRules = FullSuppression &&
LoggerNameContains(OngoingAuthorizationObserverLoggerName)
loggerFactory.suppress(suppressionRules) {
// waiting 2 seconds for the user right checker schedule task to execute
Threading.sleep(2000)
loggerFactory.fetchRecordedLogEntries shouldBe Nil
}
}
}
it should "cancel streams if user rights changed" in test { fixture =>
val suppressionRules = FullSuppression &&
LoggerNameContains(OngoingAuthorizationObserverLoggerName)
loggerFactory.suppress(suppressionRules) {
val result = fixture.clientStream
.take(10)
.zipWithIndex
.map { case (_, index) =>
if (index == 1) {
// after 2 received entries (400 millis) the user right change,
// which triggers a STALE_STREAM_AUTHORIZATION
fixture.changeUserRights
}
logger.debug(s"received #$index")
}
.run()
.failed
.map { t =>
// the client stream should be cancelled with error
t.getMessage should include("STALE_STREAM_AUTHORIZATION")
// the server stream should be completed
fixture.waitForServerPekkoStream shouldBe None
}
// Please note: asserting on the log message is important because in the previous test
// "not emit STALE_STREAM_AUTHORIZATION after it was cancelled downstream" we are doing
// a negative lookup, and we need to make sure that the negative lookup looks for the
// right log messages.
eventually() {
loggerFactory.fetchRecordedLogEntries should have size (1)
loggerFactory.fetchRecordedLogEntries(0).infoMessage should include(
"STALE_STREAM_AUTHORIZATION"
)
}
result
}
}
it should "cancel streams if authorization expired" in test { fixture =>

View File

@ -1,43 +0,0 @@
// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package com.digitalasset.canton.ledger.api.validation
import com.daml.error.{ContextualizedErrorLogger, NoLogging}
import com.digitalasset.canton.ledger.api.DeduplicationPeriod.DeduplicationDuration
import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors.InvalidDeduplicationPeriodField.ValidMaxDeduplicationFieldKey
import io.grpc.Status.Code.FAILED_PRECONDITION
import org.scalatest.matchers.should.Matchers
import org.scalatest.prop.TableDrivenPropertyChecks
import org.scalatest.wordspec.AnyWordSpec
import java.time
import java.time.Duration
class DeduplicationPeriodValidatorSpec
extends AnyWordSpec
with Matchers
with ValidatorTestUtils
with TableDrivenPropertyChecks {
private implicit val contextualizedErrorLogger: ContextualizedErrorLogger = NoLogging
private val maxDeduplicationDuration = time.Duration.ofSeconds(5)
"not allow deduplication duration exceeding maximum deduplication duration" in {
val durationSecondsExceedingMax = maxDeduplicationDuration.plusSeconds(1).getSeconds
requestMustFailWith(
request = DeduplicationPeriodValidator.validate(
DeduplicationDuration(
Duration.ofSeconds(durationSecondsExceedingMax)
),
maxDeduplicationDuration,
),
code = FAILED_PRECONDITION,
description = s"INVALID_DEDUPLICATION_PERIOD(9,0): The submitted command had an invalid deduplication period: The given deduplication duration of ${java.time.Duration
.ofSeconds(durationSecondsExceedingMax)} exceeds the maximum deduplication duration of ${maxDeduplicationDuration}",
metadata = Map(
ValidMaxDeduplicationFieldKey -> maxDeduplicationDuration.toString
),
)
}
}

View File

@ -277,6 +277,7 @@ class StoreBackedCommandExecutorSpec
identifier,
someContractKey(signatory, "some key"),
Set(signatory),
shared = true,
)
),
resume = verdict => {

View File

@ -4,41 +4,20 @@
package com.digitalasset.canton.platform.apiserver.services.admin
import com.daml.error.ErrorsAssertions
import com.daml.error.utils.ErrorDetails
import com.daml.error.utils.ErrorDetails.RetryInfoDetail
import com.daml.grpc.{GrpcException, GrpcStatus}
import com.daml.ledger.api.testing.utils.PekkoBeforeAndAfterAll
import com.daml.ledger.api.v1.admin.config_management_service.{
GetTimeModelRequest,
SetTimeModelRequest,
TimeModel,
}
import com.daml.lf.data.Ref.SubmissionId
import com.daml.lf.data.{Ref, Time}
import com.daml.tracing.TelemetrySpecBase.*
import com.daml.tracing.{DefaultOpenTelemetry, NoOpTelemetry}
import com.daml.ledger.api.v1.admin.config_management_service.{GetTimeModelRequest, TimeModel}
import com.daml.lf.data.Ref
import com.daml.tracing.NoOpTelemetry
import com.digitalasset.canton.BaseTest
import com.digitalasset.canton.ledger.api.domain.{ConfigurationEntry, LedgerOffset}
import com.digitalasset.canton.ledger.api.util.TimeProvider
import com.digitalasset.canton.ledger.configuration.{Configuration, LedgerTimeModel}
import com.digitalasset.canton.ledger.participant.state.index.v2.IndexConfigManagementService
import com.digitalasset.canton.ledger.participant.state.v2.{
SubmissionResult,
WriteConfigService,
WriteService,
}
import com.digitalasset.canton.ledger.participant.state.v2 as state
import com.digitalasset.canton.logging.LoggingContextWithTrace
import com.digitalasset.canton.platform.apiserver.services.admin.ApiConfigManagementServiceSpec.*
import com.digitalasset.canton.tracing.{TestTelemetrySetup, TraceContext}
import com.digitalasset.canton.{BaseTest, DiscardOps}
import com.digitalasset.canton.tracing.TestTelemetrySetup
import com.google.protobuf.duration.Duration as DurationProto
import com.google.protobuf.timestamp.Timestamp
import io.grpc.Status.Code
import io.grpc.StatusRuntimeException
import io.opentelemetry.api.trace.Tracer
import io.opentelemetry.sdk.OpenTelemetrySdk
import org.apache.pekko.NotUsed
import org.apache.pekko.stream.Materializer
import org.apache.pekko.stream.scaladsl.Source
import org.mockito.{ArgumentMatchersSugar, MockitoSugar}
import org.scalatest.matchers.should.Matchers
@ -46,12 +25,8 @@ import org.scalatest.wordspec.AsyncWordSpec
import org.scalatest.{BeforeAndAfterEach, Inside}
import java.time.Duration
import java.util.concurrent.CompletableFuture.completedFuture
import java.util.concurrent.atomic.{AtomicLong, AtomicReference}
import java.util.concurrent.{CompletableFuture, CompletionStage}
import scala.collection.immutable
import scala.concurrent.duration.{Duration as ScalaDuration, DurationInt}
import scala.concurrent.{Await, Future, Promise}
import java.util.concurrent.atomic.AtomicReference
import scala.concurrent.{Future, Promise}
import scala.util.{Failure, Success}
class ApiConfigManagementServiceSpec
@ -88,14 +63,11 @@ class ApiConfigManagementServiceSpec
maxSkew = Some(DurationProto.of(2 * 60, 0)),
)
val writeService = mock[state.WriteConfigService]
val apiConfigManagementService = ApiConfigManagementService.createApiService(
new FakeCurrentIndexConfigManagementService(
LedgerOffset.Absolute(Ref.LedgerString.assertFromString("0")),
Configuration(aConfigurationGeneration, indexedTimeModel, Duration.ZERO),
),
writeService,
TimeProvider.UTC,
telemetry = NoOpTelemetry,
loggerFactory = loggerFactory,
)
@ -104,17 +76,13 @@ class ApiConfigManagementServiceSpec
.getTimeModel(GetTimeModelRequest.defaultInstance)
.map { response =>
response.timeModel should be(Some(expectedTimeModel))
verifyZeroInteractions(writeService)
succeed
}
}
"return a `NOT_FOUND` error if a time model is not found" in {
val writeService = mock[WriteConfigService]
val apiConfigManagementService = ApiConfigManagementService.createApiService(
EmptyIndexConfigManagementService,
writeService,
TimeProvider.UTC,
telemetry = NoOpTelemetry,
loggerFactory = loggerFactory,
)
@ -127,230 +95,13 @@ class ApiConfigManagementServiceSpec
}
}
}
"set a new time model" in {
val maximumDeduplicationDuration = Duration.ofHours(6)
val initialGeneration = 2L
val initialTimeModel = LedgerTimeModel(
avgTransactionLatency = Duration.ofMinutes(1),
minSkew = Duration.ofMinutes(2),
maxSkew = Duration.ofMinutes(3),
).get
val initialConfiguration = Configuration(
generation = initialGeneration,
timeModel = initialTimeModel,
maxDeduplicationDuration = maximumDeduplicationDuration,
)
val expectedGeneration = 3L
val expectedTimeModel = LedgerTimeModel(
avgTransactionLatency = Duration.ofMinutes(2),
minSkew = Duration.ofMinutes(1),
maxSkew = Duration.ofSeconds(30),
).get
val expectedConfiguration = Configuration(
generation = expectedGeneration,
timeModel = expectedTimeModel,
maxDeduplicationDuration = maximumDeduplicationDuration,
)
val timeProvider = TimeProvider.UTC
val maximumRecordTime = timeProvider.getCurrentTime.plusSeconds(60)
val (indexService, writeService, currentConfiguration) = bridgedServices(
startingOffset = 7,
submissions = Seq(Ref.SubmissionId.assertFromString("one") -> initialConfiguration),
)
val apiConfigManagementService = ApiConfigManagementService.createApiService(
indexService,
writeService,
timeProvider,
telemetry = NoOpTelemetry,
loggerFactory = loggerFactory,
)
apiConfigManagementService
.setTimeModel(
SetTimeModelRequest.of(
submissionId = "some submission ID",
maximumRecordTime = Some(Timestamp.of(maximumRecordTime.getEpochSecond, 0)),
configurationGeneration = initialGeneration,
newTimeModel = Some(
TimeModel(
avgTransactionLatency = Some(DurationProto.of(2 * 60, 0)),
minSkew = Some(DurationProto.of(60, 0)),
maxSkew = Some(DurationProto.of(30, 0)),
)
),
)
)
.map { response =>
response.configurationGeneration should be(expectedGeneration)
currentConfiguration() should be(Some(expectedConfiguration))
succeed
}
}
"refuse to set a new time model if none is indexed" in {
val initialGeneration = 0L
val timeProvider = TimeProvider.UTC
val maximumRecordTime = timeProvider.getCurrentTime.plusSeconds(60)
val writeService = mock[WriteService]
val apiConfigManagementService = ApiConfigManagementService.createApiService(
EmptyIndexConfigManagementService,
writeService,
timeProvider,
telemetry = NoOpTelemetry,
loggerFactory = loggerFactory,
)
loggerFactory.assertLogs(
within = {
apiConfigManagementService
.setTimeModel(
SetTimeModelRequest.of(
"a submission ID",
maximumRecordTime = Some(Timestamp.of(maximumRecordTime.getEpochSecond, 0)),
configurationGeneration = initialGeneration,
newTimeModel = Some(
TimeModel(
avgTransactionLatency = Some(DurationProto.of(10, 0)),
minSkew = Some(DurationProto.of(20, 0)),
maxSkew = Some(DurationProto.of(40, 0)),
)
),
)
)
.transform(Success.apply)
.map { response =>
verifyZeroInteractions(writeService)
response should matchPattern {
case Failure(GrpcException(GrpcStatus.NOT_FOUND(), _)) =>
}
}
},
assertions = _.warningMessage should include("Could not get the current time model."),
)
}
"propagate trace context" in {
val apiConfigManagementService = ApiConfigManagementService.createApiService(
new FakeStreamingIndexConfigManagementService(someConfigurationEntries),
TestWriteConfigService(testTelemetrySetup.tracer),
TimeProvider.UTC,
_ => Ref.SubmissionId.assertFromString("aSubmission"),
telemetry = new DefaultOpenTelemetry(OpenTelemetrySdk.builder().build()),
loggerFactory = loggerFactory,
)
val span = testTelemetrySetup.anEmptySpan()
val scope = span.makeCurrent()
apiConfigManagementService
.setTimeModel(aSetTimeModelRequest)
.andThen { case _ =>
scope.close()
span.end()
}
.map { _ =>
testTelemetrySetup.reportedSpanAttributes should contain(anApplicationIdSpanAttribute)
succeed
}
}
"close while setting time model" in {
val writeService = mock[state.WriteConfigService]
when(
writeService.submitConfiguration(
any[Time.Timestamp],
any[Ref.SubmissionId],
any[Configuration],
)(any[TraceContext])
).thenReturn(CompletableFuture.completedFuture(SubmissionResult.Acknowledged))
val indexConfigManagementService = new FakeCurrentIndexConfigManagementService(
LedgerOffset.Absolute(Ref.LedgerString.assertFromString("0")),
aConfiguration,
)
val apiConfigManagementService = ApiConfigManagementService.createApiService(
indexConfigManagementService,
writeService,
TimeProvider.UTC,
telemetry = NoOpTelemetry,
loggerFactory = loggerFactory,
)
indexConfigManagementService.getNextConfigurationEntriesPromise.future
.map(_ => apiConfigManagementService.close())
.discard
apiConfigManagementService
.setTimeModel(
aSetTimeModelRequest
)
.transform {
case Success(_) =>
fail("Expected a failure, but received success")
case Failure(err: StatusRuntimeException) =>
assertError(
actual = err,
expectedStatusCode = Code.UNAVAILABLE,
expectedMessage = "SERVER_IS_SHUTTING_DOWN(1,0): Server is shutting down",
expectedDetails = List(
ErrorDetails.ErrorInfoDetail(
"SERVER_IS_SHUTTING_DOWN",
Map(
"submissionId" -> s"'$aSubmissionId'",
"category" -> "1",
"definite_answer" -> "false",
"test" -> s"'${getClass.getSimpleName}'",
),
),
RetryInfoDetail(1.second),
),
verifyEmptyStackTrace = true,
)
Success(succeed)
case Failure(other) =>
fail("Unexpected error", other)
}
}
}
}
object ApiConfigManagementServiceSpec {
private val aSubmissionId = "aSubmission"
private val aConfigurationGeneration = 0L
private val aConfiguration = Configuration(
aConfigurationGeneration,
LedgerTimeModel.reasonableDefault,
Duration.ZERO,
)
private val someConfigurationEntries = List(
LedgerOffset.Absolute(Ref.LedgerString.assertFromString("0")) ->
ConfigurationEntry.Accepted(
aSubmissionId,
aConfiguration,
)
)
private val aSetTimeModelRequest = SetTimeModelRequest(
aSubmissionId,
Some(Timestamp.of(TimeProvider.UTC.getCurrentTime.plusSeconds(600).getEpochSecond, 0)),
aConfigurationGeneration,
Some(
TimeModel(
Some(DurationProto.defaultInstance),
Some(DurationProto.defaultInstance),
Some(DurationProto.defaultInstance),
)
),
)
private object EmptyIndexConfigManagementService extends IndexConfigManagementService {
override def lookupConfiguration()(implicit
loggingContext: LoggingContextWithTrace
@ -381,96 +132,5 @@ object ApiConfigManagementServiceSpec {
private val nextConfigurationEntriesPromise =
new AtomicReference[Promise[Unit]](Promise[Unit]())
def getNextConfigurationEntriesPromise: Promise[Unit] = nextConfigurationEntriesPromise.get()
}
private final class FakeStreamingIndexConfigManagementService(
entries: immutable.Iterable[(LedgerOffset.Absolute, ConfigurationEntry)]
) extends IndexConfigManagementService {
private val currentConfiguration =
entries.collect { case (offset, ConfigurationEntry.Accepted(_, configuration)) =>
offset -> configuration
}.lastOption
override def lookupConfiguration()(implicit
loggingContext: LoggingContextWithTrace
): Future[Option[(LedgerOffset.Absolute, Configuration)]] =
Future.successful(currentConfiguration)
override def configurationEntries(startExclusive: Option[LedgerOffset.Absolute])(implicit
loggingContext: LoggingContextWithTrace
): Source[(LedgerOffset.Absolute, ConfigurationEntry), NotUsed] =
Source(entries)
}
private final case class TestWriteConfigService(tracer: Tracer) extends state.WriteConfigService {
override def submitConfiguration(
maxRecordTime: Time.Timestamp,
submissionId: Ref.SubmissionId,
config: Configuration,
)(implicit
traceContext: TraceContext
): CompletionStage[state.SubmissionResult] = {
val telemetryContext = traceContext.toDamlTelemetryContext(tracer)
telemetryContext.setAttribute(
anApplicationIdSpanAttribute._1,
anApplicationIdSpanAttribute._2,
)
completedFuture(state.SubmissionResult.Acknowledged)
}
}
private def bridgedServices(
startingOffset: Long,
submissions: Iterable[(Ref.SubmissionId, Configuration)],
)(implicit
materializer: Materializer
): (IndexConfigManagementService, WriteConfigService, () => Option[Configuration]) = {
val currentOffset = new AtomicLong(startingOffset)
val (configurationQueue, configurationSource) =
Source.queue[(Long, SubmissionId, Configuration)](1).preMaterialize()
submissions.foreach { case (submissionId, configuration) =>
configurationQueue.offer((currentOffset.getAndIncrement(), submissionId, configuration))
}
val currentConfiguration =
new AtomicReference[Option[(LedgerOffset.Absolute, Configuration)]](None)
val indexService: IndexConfigManagementService = new IndexConfigManagementService {
private val atLeastOneConfig = Promise[Unit]()
private val source = configurationSource
.map { case (offset, submissionId, configuration) =>
val ledgerOffset =
LedgerOffset.Absolute(Ref.LedgerString.assertFromString(offset.toString))
currentConfiguration.set(Some(ledgerOffset -> configuration))
atLeastOneConfig.trySuccess(())
val entry = ConfigurationEntry.Accepted(submissionId, configuration)
ledgerOffset -> entry
}
.preMaterialize()
Await.result(atLeastOneConfig.future, ScalaDuration.Inf)
override def lookupConfiguration()(implicit
loggingContext: LoggingContextWithTrace
): Future[Option[(LedgerOffset.Absolute, Configuration)]] =
Future.successful(currentConfiguration.get())
override def configurationEntries(startExclusive: Option[LedgerOffset.Absolute])(implicit
loggingContext: LoggingContextWithTrace
): Source[(LedgerOffset.Absolute, ConfigurationEntry), NotUsed] =
source._2
}
val writeService = new WriteConfigService {
override def submitConfiguration(
maxRecordTime: Time.Timestamp,
submissionId: SubmissionId,
configuration: Configuration,
)(implicit
traceContext: TraceContext
): CompletionStage[SubmissionResult] = {
configurationQueue.offer((currentOffset.getAndIncrement(), submissionId, configuration))
completedFuture(state.SubmissionResult.Acknowledged)
}
}
(indexService, writeService, () => currentConfiguration.get.map(_._2))
}
}

View File

@ -85,29 +85,6 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers {
)
}
"handle ConfigurationChangeRejected" in {
val rejectionReason = "Test rejection reason"
val update = state.Update.ConfigurationChangeRejected(
someRecordTime,
someSubmissionId,
someParticipantId,
someConfiguration,
rejectionReason,
)
val dtos = updateToDtos(update)
dtos should contain theSameElementsInOrderAs List(
DbDto.ConfigurationEntry(
ledger_offset = someOffset.toHexString,
recorded_at = someRecordTime.micros,
submission_id = someSubmissionId,
typ = JdbcLedgerDao.rejectType,
configuration = Configuration.encode(someConfiguration).toByteArray,
rejection_reason = Some(rejectionReason),
)
)
}
"handle PartyAddedToParticipant (local party)" in {
val displayName = "Test party"
val update = state.Update.PartyAddedToParticipant(

View File

@ -145,7 +145,12 @@ private[dao] trait JdbcLedgerDaoContractsSpec extends LoneElement with Inside wi
it should "present the contract key state at a specific event sequential id" in {
val aTextValue = ValueText(scala.util.Random.nextString(10))
val key = GlobalKeyWithMaintainers.assertBuild(someTemplateId, aTextValue, Set(alice, bob))
val key = GlobalKeyWithMaintainers.assertBuild(
someTemplateId,
aTextValue,
Set(alice, bob),
shared = true,
)
for {
(_, tx) <- createAndStoreContract(

View File

@ -50,7 +50,12 @@ private[dao] trait JdbcLedgerDaoDivulgenceSpec extends LoneElement with Inside {
stakeholders = Set(bob),
keyOpt = Some(
GlobalKeyWithMaintainers
.assertBuild(someTemplateId, someContractKey(bob, "some key"), Set(bob))
.assertBuild(
someTemplateId,
someContractKey(bob, "some key"),
Set(bob),
shared = true,
)
),
version = TransactionVersion.minVersion,
)
@ -83,7 +88,8 @@ private[dao] trait JdbcLedgerDaoDivulgenceSpec extends LoneElement with Inside {
signatories = Set(bob),
stakeholders = Set(bob),
keyOpt = Some(
GlobalKeyWithMaintainers.assertBuild(someTemplateId, ValueParty(bob), Set(bob))
GlobalKeyWithMaintainers
.assertBuild(someTemplateId, ValueParty(bob), Set(bob), shared = true)
),
byKey = false,
version = TransactionVersion.minVersion,
@ -105,7 +111,7 @@ private[dao] trait JdbcLedgerDaoDivulgenceSpec extends LoneElement with Inside {
exerciseResult = Some(someChoiceResult),
keyOpt = Some(
GlobalKeyWithMaintainers
.assertBuild(someTemplateId, someContractKey(bob, "some key"), Set(bob))
.assertBuild(someTemplateId, someContractKey(bob, "some key"), Set(bob), shared = true)
),
byKey = false,
version = TransactionVersion.minVersion,
@ -120,7 +126,7 @@ private[dao] trait JdbcLedgerDaoDivulgenceSpec extends LoneElement with Inside {
stakeholders = Set(alice, bob),
keyOpt = Some(
GlobalKeyWithMaintainers
.assertBuild(someTemplateId, someContractKey(bob, "some key"), Set(bob))
.assertBuild(someTemplateId, someContractKey(bob, "some key"), Set(bob), shared = true)
),
version = TransactionVersion.minVersion,
)

View File

@ -4,7 +4,7 @@
package com.digitalasset.canton.platform.store.dao
import com.daml.lf.data.Ref.Party
import com.daml.lf.transaction.{GlobalKey, GlobalKeyWithMaintainers, Util}
import com.daml.lf.transaction.{GlobalKey, GlobalKeyWithMaintainers}
import com.digitalasset.canton.platform.store.cache.MutableCacheBackedContractStore.EventSequentialId
import org.scalatest.flatspec.AsyncFlatSpec
import org.scalatest.matchers.should.Matchers
@ -26,7 +26,7 @@ private[dao] trait JdbcLedgerDaoEventsSpec extends LoneElement with Inside with
GlobalKey.assertBuild(
someTemplateId,
someContractKey(alice, value),
shared = Util.sharedKey(testLanguageVersion),
shared = true,
),
Set(alice),
)

View File

@ -348,7 +348,7 @@ private[dao] trait JdbcLedgerDaoSuite extends JdbcLedgerDaoBackend with OptionVa
maintainers: Set[Party]
): GlobalKeyWithMaintainers = {
val aTextValue = ValueText(scala.util.Random.nextString(10))
GlobalKeyWithMaintainers.assertBuild(someTemplateId, aTextValue, maintainers)
GlobalKeyWithMaintainers.assertBuild(someTemplateId, aTextValue, maintainers, shared = true)
}
protected final def createAndStoreContract(
@ -739,7 +739,7 @@ private[dao] trait JdbcLedgerDaoSuite extends JdbcLedgerDaoBackend with OptionVa
stakeholders = Set(party),
keyOpt = Some(
GlobalKeyWithMaintainers
.assertBuild(someTemplateId, someContractKey(party, key), Set(party))
.assertBuild(someTemplateId, someContractKey(party, key), Set(party), shared = true)
),
version = txVersion,
)
@ -783,7 +783,7 @@ private[dao] trait JdbcLedgerDaoSuite extends JdbcLedgerDaoBackend with OptionVa
exerciseResult = Some(LfValue.ValueUnit),
keyOpt = maybeKey.map(k =>
GlobalKeyWithMaintainers
.assertBuild(someTemplateId, someContractKey(party, k), Set(party))
.assertBuild(someTemplateId, someContractKey(party, k), Set(party), shared = true)
),
byKey = false,
version = txVersion,
@ -814,7 +814,7 @@ private[dao] trait JdbcLedgerDaoSuite extends JdbcLedgerDaoBackend with OptionVa
Node.LookupByKey(
templateId = someTemplateId,
key = GlobalKeyWithMaintainers
.assertBuild(someTemplateId, someContractKey(party, key), Set(party)),
.assertBuild(someTemplateId, someContractKey(party, key), Set(party), shared = true),
result = result,
version = txVersion,
)

View File

@ -1,10 +1,10 @@
sdk-version: 2.9.0-snapshot.20231211.12491.0.vbb56339f
sdk-version: 2.9.0-snapshot.20231215.12512.0.v5a0f0a18
build-options:
- --target=2.dev
name: benchtool-tests
build-options:
- --target=2.dev
source: .
version: 3.0.0
dependencies:
- daml-prim
- daml-stdlib
- daml3-script
dependencies:
- daml-prim
- daml-stdlib
- daml3-script

View File

@ -1,10 +1,10 @@
sdk-version: 2.9.0-snapshot.20231211.12491.0.vbb56339f
sdk-version: 2.9.0-snapshot.20231215.12512.0.v5a0f0a18
build-options:
- --target=2.dev
name: carbonv1-tests
build-options:
- --target=2.dev
source: .
version: 3.0.0
dependencies:
- daml-prim
- daml-stdlib
- daml3-script
dependencies:
- daml-prim
- daml-stdlib
- daml3-script

View File

@ -1,12 +1,12 @@
sdk-version: 2.9.0-snapshot.20231211.12491.0.vbb56339f
sdk-version: 2.9.0-snapshot.20231215.12512.0.v5a0f0a18
build-options:
- --target=2.dev
name: carbonv2-tests
build-options:
- --target=2.dev
data-dependencies:
- ../../scala-2.13/resource_managed/main/carbonv1-tests.dar
data-dependencies:
- ../../scala-2.13/resource_managed/main/carbonv1-tests.dar
source: .
version: 3.0.0
dependencies:
- daml-prim
- daml-stdlib
- daml3-script
dependencies:
- daml-prim
- daml-stdlib
- daml3-script

View File

@ -1,12 +1,12 @@
sdk-version: 2.9.0-snapshot.20231211.12491.0.vbb56339f
sdk-version: 2.9.0-snapshot.20231215.12512.0.v5a0f0a18
build-options:
- --target=2.dev
name: carbonv3-tests
build-options:
- --target=2.dev
data-dependencies:
- ../../scala-2.13/resource_managed/main/carbonv2-tests.dar
data-dependencies:
- ../../scala-2.13/resource_managed/main/carbonv2-tests.dar
source: .
version: 3.0.0
dependencies:
- daml-prim
- daml-stdlib
- daml3-script
dependencies:
- daml-prim
- daml-stdlib
- daml3-script

View File

@ -1,10 +1,10 @@
sdk-version: 2.9.0-snapshot.20231211.12491.0.vbb56339f
sdk-version: 2.9.0-snapshot.20231215.12512.0.v5a0f0a18
build-options:
- --target=2.dev
name: model-tests
build-options:
- --target=2.dev
source: .
version: 3.0.0
dependencies:
- daml-prim
- daml-stdlib
- daml3-script
dependencies:
- daml-prim
- daml-stdlib
- daml3-script

View File

@ -1,10 +1,10 @@
sdk-version: 2.9.0-snapshot.20231211.12491.0.vbb56339f
sdk-version: 2.9.0-snapshot.20231215.12512.0.v5a0f0a18
build-options:
- --target=2.dev
name: package-management-tests
build-options:
- --target=2.dev
source: .
version: 3.0.0
dependencies:
- daml-prim
- daml-stdlib
- daml3-script
dependencies:
- daml-prim
- daml-stdlib
- daml3-script

View File

@ -1,10 +1,10 @@
sdk-version: 2.9.0-snapshot.20231211.12491.0.vbb56339f
sdk-version: 2.9.0-snapshot.20231215.12512.0.v5a0f0a18
build-options:
- --target=2.dev
name: semantic-tests
build-options:
- --target=2.dev
source: .
version: 3.0.0
dependencies:
- daml-prim
- daml-stdlib
- daml3-script
dependencies:
- daml-prim
- daml-stdlib
- daml3-script

View File

@ -244,8 +244,6 @@ class ServicesMetrics(
@MetricDoc.FanInstanceTag
val allocateParty: Timer = metricsFactory.timer(prefix :+ "allocate_party")
@MetricDoc.FanInstanceTag
val submitConfiguration: Timer = metricsFactory.timer(prefix :+ "submit_configuration")
@MetricDoc.FanInstanceTag
val prune: Timer = metricsFactory.timer(prefix :+ "prune")
}

View File

@ -1,4 +1,4 @@
sdk-version: 2.9.0-snapshot.20231211.12491.0.vbb56339f
sdk-version: 2.9.0-snapshot.20231215.12512.0.v5a0f0a18
build-options:
- --target=1.14
name: JsonEncodingTest

View File

@ -1,56 +0,0 @@
# Collectd metrics
## Collectd installation - macOS
First, check whether the directory `/usr/local/sbin` exists on your machine.
This directory does not exist on MacOs by default, and homebrew will try to link
collectd into this directory. If it does not exist, run:
```
sudo mkdir /usr/local/sbin
sudo chown -R `whoami`:admin /usr/local/sbin
```
To install collectd, run:
```
brew install libpq
brew install --build-from-source collectd
```
Note: The collectd bottle does not contain the postgresql plugin.
You therefore need to install `libpq` first, then build collectd from source.
If everything went well, you should have the following file on your machine:
```
/usr/local/Cellar/collectd/5.11.0/lib/collectd/postgresql.so
```
## Collectd configuration
Once installed, replace `/usr/local/etc/collectd.conf` with the [file from this directory](./collectd.conf)
## Postgres configuration
To allow collectd to monitor PostgreSQL,
create a `collectd` user and grant it priviledges to your databases.
```
psql -d postgres
# create user collectd with encrypted password 'collectd';
# grant all privileges on database damlsandbox to collectd;
# grant all privileges on database damlsandboxindex to collectd;
```
Note: this assumes your databaseses are called `damlsandbox` and `damlsandboxindex`.
If you want to monitor different databases, change also the corresponding entries in `collectd.conf`.
## Running collectd
To run collectd as a foreground process, run:
```
sudo /usr/local/sbin/collectd -f -C /usr/local/etc/collectd.conf
```

View File

@ -1,81 +0,0 @@
Hostname "localhost"
FQDNLookup false
BaseDir "/usr/local/var/lib/collectd"
PIDFile "/usr/local/var/run/collectd.pid"
TypesDB "/usr/local/Cellar/collectd/5.11.0/share/collectd/types.db"
AutoLoadPlugin true
Interval 1
MaxReadInterval 86400
Timeout 2
ReadThreads 5
WriteThreads 5
<Plugin cpu>
ReportByCpu true
ReportByState true
ValuesPercentage true
</Plugin>
<Plugin load>
ReportRelative true
</Plugin>
<Plugin memory>
ValuesAbsolute true
ValuesPercentage false
</Plugin>
<Plugin "disk">
IgnoreSelected false
</Plugin>
<Plugin processes>
ProcessMatch "docker" "com.docker.hyperkit"
Process "java"
ProcessMatch "postgres" "postgres.*"
</Plugin>
<Plugin postgresql>
<Database damlsandbox>
Host "localhost"
Port "5432"
User "collectd"
Password "collectd"
# Predefined:
Query backends
Query transactions
Query queries
Query table_states
Query disk_io
Query disk_usage
</Database>
<Database damlsandboxindex>
Host "localhost"
Port "5432"
User "collectd"
Password "collectd"
# Predefined:
Query backends
Query transactions
Query queries
Query table_states
Query disk_io
Query disk_usage
</Database>
</Plugin>
<Plugin write_graphite>
<Carbon>
Host "localhost"
Port "2003"
Prefix "collectd."
Protocol "tcp"
LogSendErrors true
StoreRates true
AlwaysAppendDS false
EscapeCharacter "_"
</Carbon>
</Plugin>

View File

@ -1,59 +0,0 @@
## Basic Usage
1. Launch Grafana & Graphite:
```
$ docker-compose up -d
```
2. Explore available dashboards:
```
$ open http://localhost:3000/
# login with admin/admin
```
3. Power off when done:
```
$ docker-compose down
```
## Fresh Restart
This will purge all data and configuration changes, and restart Graphite and Grafana:
```
$ reset-all.sh
```
## Customizing Graphite
After startup, you can explore and adjust the current Graphite configuration in `./graphite/conf`.
Configuration changes remain effective after restarts, but they will be purged by `reset-all.sh`.
At the first start, i.e., if the configuration directory is empty, Graphite will reset the configuration to the default.
If you want to change the default configuration, put your adjusted configuration files into `./graphite/default_conf`.
Changes to the default configuration will not be purged by `reset-all.sh`.
The following changes have been made to the default configuration:
- `storage-aggregation.conf`: Removed a section that would aggregate metrics ending in `.count` using `sum`.
Such metrics will be aggregated by using the `avg` function instead.
**Rationale:** Codahale `Meter`s and `Counter`s will report metrics ending in `.count` containing the total number of events measured.
Such metrics need to be aggregated with the `avg` function, as they reflect the total number of events (as opposed to the delta w.r.t. the last report).
- `storage-aggregation.conf`: Changed the default `xFilesFactor` from `0.3` to `0.0`.
**Rationale:** If a metric is reported only once per minute, Graphite would discard the metric values after the first aggregation step.
- `storage-schemas.conf`: Changed the default retention schedule to `10s:7d,1m:30d,10m:1800d`.
## Customizing Grafana
The easiest way to customize Grafana is through the web ui.
Changes will be persisted to the Grafana db and remain effective after restarts.
You can also import dashboards programmatically:
1. Export the dashboard to a json file through the Grafana UI. (Go to the dashboard, share, export, ...)
2. Put the dashboard file into `./grafana/dashboards`
3. Restart: `docker-compose restart`.

View File

@ -1,50 +0,0 @@
# Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
version: "3"
services:
grafana:
image: grafana/grafana
container_name: grafana
restart: always
ports:
- 3000:3000
networks:
- collection
volumes:
- ./grafana/provisioning:/etc/grafana/provisioning # configuration to be imported during startup
- ./grafana/dashboards:/var/lib/grafana/dashboards # dashboards to be imported during startup
- grafana-storage:/var/lib/grafana # preserve configuration changes during restarts
graphite:
image: graphiteapp/graphite-statsd
container_name: graphite
restart: always
ports:
- 2000:80
- 2003:2003
- 2004:2004
- 2023:2023
- 2024:2024
- 8125:8125/udp
- 8126:8126
networks:
- collection
volumes:
# preserve metrics data during restarts
- graphite-storage:/opt/graphite/storage
# make configuration visible on the host system
- ./graphite/conf:/opt/graphite/conf
- ./graphite/statsd_conf:/opt/statsd/config
# update default configuration during startup
- ./graphite/run_once:/etc/run_once
- ./graphite/default_conf:/etc/default_conf
networks:
collection:
volumes:
graphite-storage:
grafana-storage:

View File

@ -1,864 +0,0 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": "-- Grafana --",
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"gnetId": null,
"graphTooltip": 0,
"id": 6,
"links": [],
"panels": [
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": null,
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 6,
"w": 12,
"x": 0,
"y": 0
},
"hiddenSeries": false,
"id": 2,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"refId": "A",
"target": "aliasByNode(offset(scale(collectd.localhost.*.percent-idle, -1), 100), 2)"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "CPU",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "percent",
"label": null,
"logBase": 1,
"max": "100",
"min": "0",
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": null,
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 6,
"w": 12,
"x": 12,
"y": 0
},
"hiddenSeries": false,
"id": 10,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": true,
"steppedLine": false,
"targets": [
{
"refCount": 0,
"refId": "A",
"target": "aliasByMetric(collectd.localhost.memory.*)"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Memory",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "bytes",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": null,
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 6
},
"hiddenSeries": false,
"id": 7,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"refCount": 0,
"refId": "A",
"target": "aliasByNode(collectd.localhost.*.disk_ops.*, 2, 4)"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Disk ops / second",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": null,
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 6
},
"hiddenSeries": false,
"id": 8,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"refCount": 0,
"refId": "A",
"target": "aliasByNode(collectd.localhost.*.disk_octets.*, 2, 4)"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Disk bytes / second",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "bytes",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": null,
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 8,
"w": 8,
"x": 0,
"y": 14
},
"hiddenSeries": false,
"id": 12,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": true,
"steppedLine": false,
"targets": [
{
"refCount": 0,
"refId": "B",
"target": "aliasByNode(collectd.localhost.*.ps_cputime.syst, 2)"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Processes - CPU usage",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "ns",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": null,
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 8,
"w": 8,
"x": 8,
"y": 14
},
"hiddenSeries": false,
"id": 20,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": true,
"steppedLine": false,
"targets": [
{
"refId": "A",
"target": "aliasByNode(collectd.localhost.*.ps_rss, 2)"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Processes - Resident memory",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "bytes",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": null,
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 8,
"w": 8,
"x": 16,
"y": 14
},
"hiddenSeries": false,
"id": 14,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": true,
"steppedLine": false,
"targets": [
{
"refCount": 0,
"refId": "A",
"target": "aliasByNode(collectd.localhost.*.ps_pagefaults.majflt, 2)"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Processes - Page faults",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": null,
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 22
},
"hiddenSeries": false,
"id": 16,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"refCount": 0,
"refId": "A",
"target": "aliasByMetric(collectd.localhost.postgresql-damlsandboxindex.pg_n_tup_c-ins)"
},
{
"refCount": 0,
"refId": "B",
"target": "aliasByMetric(collectd.localhost.postgresql-damlsandboxindex.pg_n_tup_c-del)"
},
{
"refCount": 0,
"refId": "C",
"target": "aliasByMetric(collectd.localhost.postgresql-damlsandboxindex.pg_n_tup_c-upd)"
},
{
"refCount": 0,
"refId": "D",
"target": "aliasByMetric(collectd.localhost.postgresql-damlsandboxindex.pg_n_tup_c-hot_upd)"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Sandbox index - row ops",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": null,
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 22
},
"hiddenSeries": false,
"id": 18,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"refCount": 0,
"refId": "A",
"target": "aliasByMetric(collectd.localhost.postgresql-damlsandboxindex.pg_blks-idx_read)"
},
{
"refCount": 0,
"refId": "B",
"target": "aliasByMetric(collectd.localhost.postgresql-damlsandboxindex.pg_blks-idx_hit)"
},
{
"refCount": 0,
"refId": "C",
"target": "aliasByMetric(collectd.localhost.postgresql-damlsandboxindex.pg_blks-heap_read)"
},
{
"refCount": 0,
"refId": "D",
"target": "aliasByMetric(collectd.localhost.postgresql-damlsandboxindex.pg_blks-heap_hit)"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Sandbox index - block reads",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
}
],
"schemaVersion": 22,
"style": "dark",
"tags": [],
"templating": {
"list": []
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
]
},
"timezone": "",
"title": "CollectD",
"uid": "BifGFjzMk",
"variables": {
"list": []
},
"version": 3
}

View File

@ -1,203 +0,0 @@
{
"annotations": {
"enable": false,
"list": [
{
"$$hashKey": "object:60",
"builtIn": 1,
"datasource": "-- Grafana --",
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"gnetId": null,
"graphTooltip": 0,
"id": 1,
"links": [],
"panels": [
{
"aliasColors": {},
"annotate": {
"enable": false
},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "Graphite",
"editable": true,
"fill": 0,
"fillGradient": 0,
"grid": {
"max": null,
"min": 0
},
"gridPos": {
"h": 10,
"w": 24,
"x": 0,
"y": 0
},
"hiddenSeries": false,
"id": 1,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"loadingEditor": false,
"nullPointMode": "null as zero",
"options": {
"dataLinks": []
},
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"resolution": 100,
"scale": 1,
"seriesOverrides": [
{
"alias": "Points Per Update",
"yaxis": 2
},
{
"alias": "CPU",
"yaxis": 2
}
],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"refId": "A",
"target": "alias(sumSeries(carbon.agents.*.updateOperations),\"Updates\") "
},
{
"refId": "B",
"target": "alias(sumSeries(carbon.agents.*.metricsReceived),'Metrics Received')"
},
{
"refId": "C",
"target": "alias(sumSeries(carbon.agents.*.committedPoints),'Committed Points')"
},
{
"refId": "D",
"target": "alias(sumSeries(carbon.agents.*.pointsPerUpdate),'Points Per Update')"
},
{
"refId": "E",
"target": "alias(averageSeries(carbon.agents.*.cpuUsage),'CPU')"
},
{
"refId": "F",
"target": "alias(sumSeries(carbon.agents.*.creates),'Creates')"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Graphite Carbon Metrics",
"tooltip": {
"query_as_alias": true,
"shared": false,
"sort": 0,
"value_type": "cumulative"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
},
"zerofill": true
}
],
"refresh": false,
"revision": "1.0",
"schemaVersion": 22,
"style": "dark",
"tags": [
"graphite",
"carbon"
],
"templating": {
"list": []
},
"time": {
"from": "now-5m",
"to": "now"
},
"timepicker": {
"collapse": false,
"enable": true,
"notice": false,
"now": true,
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"status": "Stable",
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
],
"type": "timepicker"
},
"timezone": "browser",
"title": "Graphite Carbon Metrics",
"uid": "YEv29jjWz",
"variables": {
"list": []
},
"version": 1
}

View File

@ -1,161 +0,0 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": "-- Grafana --",
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"gnetId": null,
"graphTooltip": 0,
"id": 4,
"links": [],
"panels": [
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": null,
"fieldConfig": {
"defaults": {
"custom": {}
},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 17,
"w": 21,
"x": 0,
"y": 0
},
"hiddenSeries": false,
"id": 2,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [
{
"alias": "daml.index.db.get_flat_transactions.translation.m1_rate",
"yaxis": 2
},
{
"alias": "daml.index.db.get_flat_transactions.wait.m1_rate",
"yaxis": 2
},
{
"alias": "daml.index.db.get_flat_transactions.exec.m1_rate",
"yaxis": 2
}
],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"refCount": 0,
"refId": "A",
"target": "daml.index.db.get_flat_transactions.exec.mean"
},
{
"refCount": 0,
"refId": "B",
"target": "daml.index.db.get_flat_transactions.exec.m1_rate"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Transaction Stream",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "ms",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
}
],
"refresh": "5s",
"schemaVersion": 25,
"style": "dark",
"tags": [],
"templating": {
"list": []
},
"time": {
"from": "now-30m",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
]
},
"timezone": "",
"title": "Transaction Stream",
"uid": "gmAihrgGz",
"version": 3
}

View File

@ -1,17 +0,0 @@
# Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
apiVersion: 1
providers:
- name: 'Daml'
orgId: 1
folder: 'daml'
folderUid: ''
type: file
disableDeletion: false
editable: true
updateIntervalSeconds: 10
allowUiUpdates: true
options:
path: /var/lib/grafana/dashboards

View File

@ -1,22 +0,0 @@
# Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
apiVersion: 1
deleteDatasources:
- name: Graphite
orgId: 1
datasources:
- name: Graphite
type: graphite
access: proxy
orgId: 1
url: http://graphite:8080
isDefault: true
jsonData:
graphiteVersion: "1.1"
tlsAuth: false
tlsAuthWithCACert: false
version: 1
editable: false

View File

@ -1,2 +0,0 @@
/conf
/statsd_conf

View File

@ -1,32 +0,0 @@
# Aggregation methods for whisper files. Entries are scanned in order,
# and first match wins. This file is scanned for changes every 60 seconds
#
# [name]
# pattern = <regex>
# xFilesFactor = <float between 0 and 1>
# aggregationMethod = <average|sum|last|max|min>
#
# name: Arbitrary unique name for the rule
# pattern: Regex pattern to match against the metric name
# xFilesFactor: Ratio of valid data points required for aggregation to the next retention to occur
# aggregationMethod: function to apply to data points for aggregation
#
[min]
pattern = \.lower$
xFilesFactor = 0.1
aggregationMethod = min
[max]
pattern = \.upper(_\d+)?$
xFilesFactor = 0.1
aggregationMethod = max
[sum]
pattern = \.sum$
xFilesFactor = 0
aggregationMethod = sum
[default_average]
pattern = .*
xFilesFactor = 0
aggregationMethod = average

View File

@ -1,26 +0,0 @@
# Schema definitions for Whisper files. Entries are scanned in order,
# and first match wins. This file is scanned for changes every 60 seconds.
#
# Definition Syntax:
#
# [name]
# pattern = regex
# retentions = timePerPoint:timeToStore, timePerPoint:timeToStore, ...
#
# Remember: To support accurate aggregation from higher to lower resolution
# archives, the precision of a longer retention archive must be
# cleanly divisible by precision of next lower retention archive.
#
# Valid: 60s:7d,300s:30d (300/60 = 5)
# Invalid: 180s:7d,300s:30d (300/180 = 3.333)
#
# Carbon's internal metrics. This entry should match what is specified in
# CARBON_METRIC_PREFIX and CARBON_METRIC_INTERVAL settings
[carbon]
pattern = ^carbon\.
retentions = 10s:6h,1m:90d
[default]
pattern = .*
retentions = 10s:7d,1m:30d,10m:1800d

View File

@ -1,6 +0,0 @@
#!/bin/sh
echo "Initializing Graphite configuration..."
cp -v /etc/default_conf/*.conf /opt/defaultconf/graphite/

View File

@ -1,11 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
docker-compose down -v
rm -rf graphite/conf/*
rm -rf graphite/statsd_conf/*
docker-compose up -d

View File

@ -1,93 +0,0 @@
#!/usr/bin/env node
// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
const http = require("http");
const window = "6hours";
const url = target =>
`http://localhost:2000/render?target=summarize(${target},%20%27${window}%27,%20%27last%27)&from=-${window}&format=json`;
function fetch(url) {
return new Promise((resolve, reject) => {
http.get(url, res => {
res.setEncoding("utf8");
let body = "";
res.on("data", data => {
body += data;
});
res.on("end", () => {
resolve(JSON.parse(body));
});
res.on("error", reject);
});
});
}
function toDict(rawData) {
const result = {};
rawData.forEach(row => {
const words = row.tags.name.split(".");
const target = words.slice(undefined, -1).join(".");
const stat = words[words.length - 1];
const data = row.datapoints[row.datapoints.length - 1][0];
result[target] = result[target] || {};
result[target][stat] = data;
});
return result;
}
const formatNumber = num =>
num === undefined || isNaN(num)
? ""
: Math.round(num * 100 + Number.EPSILON) / 100;
function toCsv(dictData) {
const result = [];
for (const key in dictData) {
const rowData = dictData[key];
const count = formatNumber(rowData.count);
const mean = formatNumber(rowData.mean);
const total = formatNumber(rowData.mean * rowData.count);
const min = formatNumber(rowData.min);
const max = formatNumber(rowData.max);
result.push(`${key}, ${count}, ${mean}, ${total}, ${min}, ${max}`);
}
return "Metric, Count, Mean, Total, Min, Max\n" + result.sort().join("\n");
}
async function fetchData() {
const maxDepth = 7;
const roots = ["daml", "jvm"];
// [1, 2, 3, ...]
const depths = Array.from(Array(maxDepth), (_, i) => i + 1);
// ["*", "*.*", "*.*.*", ...]
const stars = depths.map(depth =>
Array.from(Array(depth), () => "*").join(".")
);
// Array of all URLs to fetch
const urls = roots.reduce(
(acc, root) =>
acc.concat(
stars.map(path => url(`${root}.${path}.{mean,count,min,max}`))
),
[]
);
const nestedData = await Promise.all(urls.map(fetch));
return Array.prototype.concat(...nestedData);
}
async function run() {
const rawData = await fetchData();
const dictData = toDict(rawData);
const csvData = toCsv(dictData);
console.log(csvData);
}
run();

View File

@ -1,30 +0,0 @@
-- Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
-- SPDX-License-Identifier: Apache-2.0
module Simple where
data SimpleVariant = SV Party
template SimpleTemplate
with
owner : Party
observer : Party
contractData : [Party]
where
signatory owner
observer observer
key owner : Party
maintainer key
choice Replace : ContractId SimpleTemplate
controller owner
do create this
template SimpleTemplateHolder
with
owner : Party
where
signatory owner
nonconsuming choice ReplaceHeldByKey : ()
controller owner
do (cid, c) <- fetchByKey @SimpleTemplate owner
exercise cid Archive
create c
pure ()

View File

@ -1,30 +0,0 @@
-- Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
-- SPDX-License-Identifier: Apache-2.0
module Simple where
data SimpleVariant = SV Party
template SimpleTemplate
with
owner : Party
observer : Party
contractData : Optional Party
where
signatory owner
observer observer
key owner : Party
maintainer key
choice Replace : ContractId SimpleTemplate
controller owner
do create this
template SimpleTemplateHolder
with
owner : Party
where
signatory owner
nonconsuming choice ReplaceHeldByKey : ()
controller owner
do (cid, c) <- fetchByKey @SimpleTemplate owner
exercise cid Archive
create c
pure ()

Some files were not shown because too many files have changed in this diff Show More