mirror of
https://github.com/digital-asset/daml.git
synced 2024-11-10 10:46:11 +03:00
update canton to 72730ce3 (#17613)
CHANGELOG_BEGIN CHANGELOG_END Co-authored-by: Azure Pipelines Daml Build <support@digitalasset.com>
This commit is contained in:
parent
174098ad41
commit
6f352b7642
@ -4,8 +4,9 @@
|
||||
package com.digitalasset.canton.admin.api.client.data
|
||||
|
||||
import com.daml.ledger.api.refinements.ApiTypes
|
||||
import com.daml.ledger.api.v1.ValueOuterClass
|
||||
import com.daml.ledger.api.v1.value.Identifier
|
||||
import com.daml.ledger.client.binding.Primitive as P
|
||||
import com.daml.ledger.javaapi
|
||||
|
||||
final case class TemplateId(
|
||||
packageId: String,
|
||||
@ -18,10 +19,10 @@ final case class TemplateId(
|
||||
entityName = entityName,
|
||||
)
|
||||
|
||||
def toPrim: P.TemplateId[_] = P.TemplateId(
|
||||
packageId = packageId,
|
||||
moduleName = moduleName,
|
||||
entityName = entityName,
|
||||
def toJavaIdentifier: javaapi.data.Identifier = new javaapi.data.Identifier(
|
||||
packageId,
|
||||
moduleName,
|
||||
entityName,
|
||||
)
|
||||
|
||||
def isModuleEntity(moduleName: String, entityName: String) =
|
||||
@ -47,4 +48,12 @@ object TemplateId {
|
||||
fromIdentifier(templateId.unwrap)
|
||||
}
|
||||
|
||||
def fromJavaProtoIdentifier(templateId: ValueOuterClass.Identifier): TemplateId = {
|
||||
fromIdentifier(Identifier.fromJavaProto(templateId))
|
||||
}
|
||||
|
||||
def fromJavaIdentifier(templateId: javaapi.data.Identifier): TemplateId = {
|
||||
fromJavaProtoIdentifier(templateId.toProto)
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -26,6 +26,7 @@ import com.digitalasset.canton.data.CantonTimestamp
|
||||
import com.digitalasset.canton.environment.Environment
|
||||
import com.digitalasset.canton.lifecycle.{FlagCloseable, Lifecycle}
|
||||
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
|
||||
import com.digitalasset.canton.protocol.SerializableContract
|
||||
import com.digitalasset.canton.sequencing.{
|
||||
GrpcSequencerConnection,
|
||||
SequencerConnection,
|
||||
@ -588,6 +589,11 @@ object ConsoleEnvironment {
|
||||
*/
|
||||
implicit def toPositiveDouble(n: Double): PositiveDouble = PositiveDouble.tryCreate(n)
|
||||
|
||||
/** Implicitly map a `CantonTimestamp` to a `LedgerCreateTime`
|
||||
*/
|
||||
implicit def toLedgerCreateTime(ts: CantonTimestamp): SerializableContract.LedgerCreateTime =
|
||||
SerializableContract.LedgerCreateTime(ts)
|
||||
|
||||
/** Implicitly convert a duration to a [[com.digitalasset.canton.config.NonNegativeDuration]]
|
||||
* @throws java.lang.IllegalArgumentException if `duration` is negative
|
||||
*/
|
||||
|
@ -36,6 +36,7 @@ import com.digitalasset.canton.participant.admin.inspection.SyncStateInspection
|
||||
import com.digitalasset.canton.participant.admin.repair.RepairService
|
||||
import com.digitalasset.canton.participant.config.{AuthServiceConfig, BaseParticipantConfig}
|
||||
import com.digitalasset.canton.participant.ledger.api.JwtTokenUtilities
|
||||
import com.digitalasset.canton.protocol.SerializableContract.LedgerCreateTime
|
||||
import com.digitalasset.canton.protocol.*
|
||||
import com.digitalasset.canton.topology.*
|
||||
import com.digitalasset.canton.tracing.{NoTracing, TraceContext}
|
||||
@ -327,7 +328,7 @@ trait ConsoleMacros extends NamedLogging with NoTracing {
|
||||
observers,
|
||||
contractId,
|
||||
contractSaltO,
|
||||
Some(ledgerCreateTime.underlying),
|
||||
Some(ledgerCreateTime.ts.underlying),
|
||||
)
|
||||
}
|
||||
)
|
||||
@ -379,7 +380,7 @@ trait ConsoleMacros extends NamedLogging with NoTracing {
|
||||
generate_contract_id(
|
||||
cryptoPureApi = pureCrypto,
|
||||
rawContract = contractInstanceWithUpdatedContractIdReferences,
|
||||
createdAt = contract.ledgerCreateTime,
|
||||
createdAt = contract.ledgerCreateTime.ts,
|
||||
discriminator = discriminator,
|
||||
contractSalt = contractSalt,
|
||||
metadata = contract.metadata,
|
||||
@ -423,7 +424,7 @@ trait ConsoleMacros extends NamedLogging with NoTracing {
|
||||
val unicum = unicumGenerator
|
||||
.recomputeUnicum(
|
||||
contractSalt,
|
||||
createdAt,
|
||||
LedgerCreateTime(createdAt),
|
||||
metadata,
|
||||
rawContract,
|
||||
cantonContractIdVersion,
|
||||
|
@ -8,7 +8,6 @@ import cats.syntax.functorFilter.*
|
||||
import cats.syntax.traverse.*
|
||||
import com.daml.jwt.JwtDecoder
|
||||
import com.daml.jwt.domain.Jwt
|
||||
import com.daml.ledger.api.v1.CommandsOuterClass
|
||||
import com.daml.ledger.api.v1.admin.package_management_service.PackageDetails
|
||||
import com.daml.ledger.api.v1.admin.party_management_service.PartyDetails as ProtoPartyDetails
|
||||
import com.daml.ledger.api.v1.command_completion_service.Checkpoint
|
||||
@ -23,6 +22,8 @@ import com.daml.ledger.api.v1.ledger_configuration_service.LedgerConfiguration
|
||||
import com.daml.ledger.api.v1.ledger_offset.LedgerOffset
|
||||
import com.daml.ledger.api.v1.transaction.{Transaction, TransactionTree}
|
||||
import com.daml.ledger.api.v1.transaction_filter.{Filters, TransactionFilter}
|
||||
import com.daml.ledger.api.v1.value.Value
|
||||
import com.daml.ledger.api.v1.{CommandsOuterClass, EventQueryServiceOuterClass, ValueOuterClass}
|
||||
import com.daml.ledger.api.v2.transaction.{
|
||||
Transaction as TransactionV2,
|
||||
TransactionTree as TransactionTreeV2,
|
||||
@ -1612,6 +1613,36 @@ trait BaseLedgerApiAdministration extends NoTracing {
|
||||
)
|
||||
}
|
||||
|
||||
@Help.Summary("Submit java codegen command asynchronously", FeatureFlag.Testing)
|
||||
@Help.Description(
|
||||
"""Provides access to the command submission service of the Ledger API.
|
||||
|See https://docs.daml.com/app-dev/services.html for documentation of the parameters."""
|
||||
)
|
||||
def submit_async(
|
||||
actAs: Seq[PartyId],
|
||||
commands: Seq[javab.data.Command],
|
||||
workflowId: String = "",
|
||||
commandId: String = "",
|
||||
deduplicationPeriod: Option[DeduplicationPeriod] = None,
|
||||
submissionId: String = "",
|
||||
minLedgerTimeAbs: Option[Instant] = None,
|
||||
readAs: Seq[PartyId] = Seq.empty,
|
||||
disclosedContracts: Seq[DisclosedContract] = Seq.empty,
|
||||
applicationId: String = applicationId,
|
||||
): Unit =
|
||||
ledger_api.commands.submit_async(
|
||||
actAs,
|
||||
commands.map(c => Command.fromJavaProto(c.toProtoCommand)),
|
||||
workflowId,
|
||||
commandId,
|
||||
deduplicationPeriod,
|
||||
submissionId,
|
||||
minLedgerTimeAbs,
|
||||
readAs,
|
||||
disclosedContracts,
|
||||
applicationId,
|
||||
)
|
||||
|
||||
}
|
||||
|
||||
@Help.Summary("Read from transaction stream (Java bindings)", FeatureFlag.Testing)
|
||||
@ -1742,6 +1773,49 @@ trait BaseLedgerApiAdministration extends NoTracing {
|
||||
|
||||
}
|
||||
|
||||
@Help.Summary("Query event details", FeatureFlag.Testing)
|
||||
@Help.Group("EventQuery")
|
||||
object event_query extends Helpful {
|
||||
|
||||
@Help.Summary("Get events in java codegen by contract Id", FeatureFlag.Testing)
|
||||
@Help.Description("""Return events associated with the given contract Id""")
|
||||
def by_contract_id(
|
||||
contractId: String,
|
||||
requestingParties: Seq[PartyId],
|
||||
): EventQueryServiceOuterClass.GetEventsByContractIdResponse =
|
||||
check(FeatureFlag.Testing)(
|
||||
GetEventsByContractIdResponse.toJavaProto(consoleEnvironment.run {
|
||||
ledgerApiCommand(
|
||||
LedgerApiCommands.QueryService
|
||||
.GetEventsByContractId(contractId, requestingParties.map(_.toLf))
|
||||
)
|
||||
})
|
||||
)
|
||||
|
||||
@Help.Summary("Get events in java codegen format by contract key", FeatureFlag.Testing)
|
||||
@Help.Description("""Return events associated with the given contract key""")
|
||||
def by_contract_key(
|
||||
contractKey: ValueOuterClass.Value,
|
||||
requestingParties: Seq[PartyId],
|
||||
templateId: TemplateId,
|
||||
continuationToken: Option[String] = None,
|
||||
): EventQueryServiceOuterClass.GetEventsByContractKeyResponse =
|
||||
check(FeatureFlag.Testing)(
|
||||
GetEventsByContractKeyResponse.toJavaProto(consoleEnvironment.run {
|
||||
ledgerApiCommand(
|
||||
LedgerApiCommands.QueryService
|
||||
.GetEventsByContractKey(
|
||||
Value.fromJavaProto(contractKey),
|
||||
requestingParties.map(_.toLf),
|
||||
templateId,
|
||||
continuationToken,
|
||||
)
|
||||
)
|
||||
})
|
||||
)
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -166,7 +166,7 @@ message PartyToParticipantX {
|
||||
// the target participant that the party should be mapped to
|
||||
string participant = 1;
|
||||
// permission of the participant for this particular party (the actual
|
||||
// will be min of ParticipantDomainPermissions and this setting)
|
||||
// will be min of ParticipantDomainPermissionX.ParticipantPermissionX and this setting)
|
||||
ParticipantPermissionX permission = 2;
|
||||
}
|
||||
// the party that is to be represented by the participants
|
||||
|
@ -13,8 +13,6 @@ sealed trait X509CertificateEncoder[Encoding] {
|
||||
|
||||
protected def unwrap(value: Either[String, Encoding]): Encoding =
|
||||
value.valueOr(err => throw new IllegalArgumentException(s"Failed to load certificate: $err"))
|
||||
|
||||
def tryFromBytes(encoded: ByteString): Encoding = unwrap(fromBytes(encoded))
|
||||
}
|
||||
|
||||
/** A X509 Certificate serialized in PEM format. */
|
||||
|
@ -10,9 +10,8 @@ import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult
|
||||
import com.digitalasset.canton.version.HasVersionedToByteString
|
||||
|
||||
/** Reifies the subclasses of [[ViewTree]] as values */
|
||||
// This trait is not sealed so that we can extend it for unit testing
|
||||
// This trait does not extend ProtoSerializable because v0.EncryptedViewMessage.ViewType is an enum, not a message.
|
||||
trait ViewType extends Product with Serializable with PrettyPrinting {
|
||||
sealed trait ViewType extends Product with Serializable with PrettyPrinting {
|
||||
|
||||
/** The subclass of [[ViewTree]] that is reified. */
|
||||
type View <: ViewTree with HasVersionedToByteString
|
||||
@ -26,6 +25,9 @@ trait ViewType extends Product with Serializable with PrettyPrinting {
|
||||
override def pretty: Pretty[ViewType.this.type] = prettyOfObject[ViewType.this.type]
|
||||
}
|
||||
|
||||
// This trait is not sealed so that we can extend it for unit testing
|
||||
trait ViewTypeTest extends ViewType
|
||||
|
||||
object ViewType {
|
||||
|
||||
def fromProtoEnum: v0.ViewType => ParsingResult[ViewType] = {
|
||||
|
@ -12,7 +12,7 @@ import com.digitalasset.canton.lifecycle.FlagCloseable
|
||||
* so that the [[com.digitalasset.canton.lifecycle.FlagCloseable]] gets initialized first.
|
||||
*/
|
||||
trait CloseableHealthElement extends FlagCloseable with HealthElement {
|
||||
final override protected def associatedFlagCloseable: FlagCloseable = this
|
||||
final override protected def associatedOnShutdownRunner: FlagCloseable = this
|
||||
}
|
||||
|
||||
/** Refines the state of a [[CloseableHealthElement]] to something convertible to a [[ComponentHealthState]] */
|
||||
|
@ -5,7 +5,7 @@ package com.digitalasset.canton.health
|
||||
|
||||
import cats.Eval
|
||||
import com.digitalasset.canton.DiscardOps
|
||||
import com.digitalasset.canton.lifecycle.{RunOnShutdown, UnlessShutdown}
|
||||
import com.digitalasset.canton.lifecycle.RunOnShutdown
|
||||
import com.digitalasset.canton.tracing.TraceContext
|
||||
|
||||
import scala.collection.concurrent.TrieMap
|
||||
@ -36,17 +36,19 @@ trait CompositeHealthElement[ID, HE <: HealthElement] extends HealthElement {
|
||||
// Unregister all dependencies when this element is closed.
|
||||
locally {
|
||||
import TraceContext.Implicits.Empty.*
|
||||
associatedFlagCloseable.runOnShutdown_(new RunOnShutdown {
|
||||
associatedOnShutdownRunner.runOnShutdown_(new RunOnShutdown {
|
||||
override def name: String = s"unregister-$name-from-dependencies"
|
||||
override def done: Boolean = false
|
||||
override def run(): Unit = {
|
||||
dependencies.foreachEntry((_, element) =>
|
||||
element.unregisterOnHealthChange(dependencyListener).discard[Boolean]
|
||||
)
|
||||
}
|
||||
override def run(): Unit = unregisterFromAll()
|
||||
})
|
||||
}
|
||||
|
||||
private def unregisterFromAll(): Unit = {
|
||||
dependencies.foreachEntry((_, element) =>
|
||||
element.unregisterOnHealthChange(dependencyListener).discard[Boolean]
|
||||
)
|
||||
}
|
||||
|
||||
protected def getDependencies: Map[ID, HE] = dependencies.readOnlySnapshot().toMap
|
||||
|
||||
protected def setDependency(id: ID, dependency: HE): Unit =
|
||||
@ -67,7 +69,6 @@ trait CompositeHealthElement[ID, HE <: HealthElement] extends HealthElement {
|
||||
* This however is only temporary as in this case another state refresh will be triggered at the end.
|
||||
*/
|
||||
protected def alterDependencies(remove: Set[ID], add: Map[ID, HE]): Unit = {
|
||||
import TraceContext.Implicits.Empty.*
|
||||
def removeId(id: ID): Boolean =
|
||||
if (add.contains(id)) false
|
||||
else
|
||||
@ -87,13 +88,16 @@ trait CompositeHealthElement[ID, HE <: HealthElement] extends HealthElement {
|
||||
true
|
||||
}
|
||||
|
||||
associatedFlagCloseable
|
||||
.performUnlessClosing("alter dependencies") {
|
||||
val removedAtLeastOne = remove.map(removeId).exists(Predef.identity)
|
||||
val addedAtLeastOne =
|
||||
add.map { case (id, dependency) => addOrReplace(id, dependency) }.exists(Predef.identity)
|
||||
if (addedAtLeastOne || removedAtLeastOne) dependencyListener.poke()(TraceContext.empty)
|
||||
}
|
||||
.discard[UnlessShutdown[Unit]]
|
||||
if (!associatedOnShutdownRunner.isClosing) {
|
||||
val removedAtLeastOne = remove.map(removeId).exists(Predef.identity)
|
||||
val addedAtLeastOne =
|
||||
add.map { case (id, dependency) => addOrReplace(id, dependency) }.exists(Predef.identity)
|
||||
val dependenciesChanged = addedAtLeastOne || removedAtLeastOne
|
||||
// Since the associatedOnShutdownRunner may have started closing while we've been modifying the dependencies,
|
||||
// query the closing flag again and repeat the unregistration
|
||||
if (associatedOnShutdownRunner.isClosing) {
|
||||
unregisterFromAll()
|
||||
} else if (dependenciesChanged) dependencyListener.poke()(TraceContext.empty)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -4,7 +4,7 @@
|
||||
package com.digitalasset.canton.health
|
||||
|
||||
import cats.Eval
|
||||
import com.digitalasset.canton.lifecycle.{FlagCloseable, RunOnShutdown}
|
||||
import com.digitalasset.canton.lifecycle.{OnShutdownRunner, RunOnShutdown}
|
||||
import com.digitalasset.canton.logging.pretty.Pretty
|
||||
import com.digitalasset.canton.logging.{ErrorLoggingContext, TracedLogger}
|
||||
import com.digitalasset.canton.tracing.TraceContext
|
||||
@ -71,7 +71,7 @@ trait HealthElement {
|
||||
/** The initial state upon creation */
|
||||
protected def initialHealthState: State
|
||||
|
||||
/** The state set when the [[associatedFlagCloseable]] closes */
|
||||
/** The state set when the [[associatedOnShutdownRunner]] closes */
|
||||
protected def closingState: State
|
||||
|
||||
/** The [[com.digitalasset.canton.lifecycle.FlagCloseable]] associated with this object.
|
||||
@ -79,11 +79,11 @@ trait HealthElement {
|
||||
* When this [[com.digitalasset.canton.lifecycle.FlagCloseable]] closes, the health state permanently becomes [[closingState]]
|
||||
* and all listeners are notified about this.
|
||||
*/
|
||||
protected def associatedFlagCloseable: FlagCloseable
|
||||
protected def associatedOnShutdownRunner: OnShutdownRunner
|
||||
|
||||
locally {
|
||||
import TraceContext.Implicits.Empty.*
|
||||
associatedFlagCloseable.runOnShutdown_(new RunOnShutdown {
|
||||
associatedOnShutdownRunner.runOnShutdown_(new RunOnShutdown {
|
||||
override def name: String = s"set-closing-state-of-${HealthElement.this.name}"
|
||||
override def done: Boolean = false
|
||||
override def run(): Unit = refreshState(Eval.now(closingState))
|
||||
@ -127,7 +127,7 @@ trait HealthElement {
|
||||
}
|
||||
// When we're closing, force the value to `closingState`.
|
||||
// This ensures that `closingState` is sticky.
|
||||
val newStateValue = if (associatedFlagCloseable.isClosing) closingState else newState.value
|
||||
val newStateValue = if (associatedOnShutdownRunner.isClosing) closingState else newState.value
|
||||
val previous = internalState.getAndUpdate {
|
||||
case InternalState(_, Idle) => errorOnIdle
|
||||
case InternalState(_, Refreshing) => InternalState(newStateValue, Idle)
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
package com.digitalasset.canton.lifecycle
|
||||
|
||||
import cats.data.{EitherT, OptionT}
|
||||
import cats.data.EitherT
|
||||
import cats.syntax.traverse.*
|
||||
import com.digitalasset.canton.DiscardOps
|
||||
import com.digitalasset.canton.concurrent.Threading
|
||||
@ -12,36 +12,21 @@ import com.digitalasset.canton.lifecycle.FlagCloseable.forceShutdownStr
|
||||
import com.digitalasset.canton.logging.TracedLogger
|
||||
import com.digitalasset.canton.tracing.TraceContext
|
||||
import com.digitalasset.canton.util.Thereafter.syntax.*
|
||||
import com.digitalasset.canton.util.{Checked, CheckedT}
|
||||
import com.google.common.annotations.VisibleForTesting
|
||||
import com.digitalasset.canton.util.{Checked, CheckedT, Thereafter}
|
||||
|
||||
import java.util.concurrent.atomic.{AtomicBoolean, AtomicLong, AtomicReference}
|
||||
import scala.collection.concurrent.TrieMap
|
||||
import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference}
|
||||
import scala.collection.immutable.MultiSet
|
||||
import scala.concurrent.duration.FiniteDuration
|
||||
import scala.concurrent.{ExecutionContext, Future}
|
||||
import scala.util.Try
|
||||
import scala.util.control.NonFatal
|
||||
|
||||
/** Trait that can be registered with a [FlagCloseable] to run on shutdown */
|
||||
trait RunOnShutdown {
|
||||
|
||||
/** the name, used for logging during shutdown */
|
||||
def name: String
|
||||
|
||||
/** true if the task has already run (maybe elsewhere) */
|
||||
def done: Boolean
|
||||
|
||||
/** invoked by [FlagCloseable] during shutdown */
|
||||
def run(): Unit
|
||||
}
|
||||
|
||||
/** Provides a way to synchronize closing with other running tasks in the class, such that new tasks aren't scheduled
|
||||
* while closing, and such that closing waits for the scheduled tasks.
|
||||
*
|
||||
* The component's custom shutdown behaviour should override the `onClosed` method.
|
||||
*/
|
||||
trait FlagCloseable extends AutoCloseable {
|
||||
trait FlagCloseable extends OnShutdownRunner {
|
||||
|
||||
protected def timeouts: ProcessingTimeout
|
||||
|
||||
@ -59,68 +44,9 @@ trait FlagCloseable extends AutoCloseable {
|
||||
// count on acquires and releases happening on the same thread, since we support the synchronization of futures.
|
||||
private val readerState = new AtomicReference(ReaderState.empty)
|
||||
|
||||
private val incrementor = new AtomicLong(0L)
|
||||
private val onShutdownTasks = TrieMap.empty[Long, RunOnShutdown]
|
||||
|
||||
protected def logger: TracedLogger
|
||||
|
||||
// How often to poll to check that all tasks have completed.
|
||||
protected def maxSleepMillis: Long = 500
|
||||
|
||||
@VisibleForTesting
|
||||
protected def runStateChanged(waitingState: Boolean = false): Unit = {} // used for unit testing
|
||||
|
||||
/** Register a task to run when shutdown is initiated.
|
||||
*
|
||||
* You can use this for example to register tasks that cancel long-running computations,
|
||||
* whose termination you can then wait for in "closeAsync".
|
||||
*/
|
||||
def runOnShutdown_[T](
|
||||
task: RunOnShutdown
|
||||
)(implicit traceContext: TraceContext): Unit = {
|
||||
runOnShutdown(task).discard
|
||||
}
|
||||
|
||||
/** Same as [[runOnShutdown_]] but returns a token that allows you to remove the task explicitly from being run
|
||||
* using [[cancelShutdownTask]]
|
||||
*/
|
||||
def runOnShutdown[T](
|
||||
task: RunOnShutdown
|
||||
)(implicit traceContext: TraceContext): Long = {
|
||||
val token = incrementor.getAndIncrement()
|
||||
onShutdownTasks
|
||||
// First remove the tasks that are done
|
||||
.filterInPlace { case (_, run) =>
|
||||
!run.done
|
||||
}
|
||||
// Then add the new one
|
||||
.put(token, task)
|
||||
.discard
|
||||
if (isClosing) runOnShutdownTasks()
|
||||
token
|
||||
}
|
||||
|
||||
/** Removes a shutdown task from the list using a token returned by [[runOnShutdown]]
|
||||
*/
|
||||
def cancelShutdownTask(token: Long): Unit = onShutdownTasks.remove(token).discard
|
||||
|
||||
private def runOnShutdownTasks()(implicit traceContext: TraceContext): Unit = {
|
||||
onShutdownTasks.toList.foreach { case (token, task) =>
|
||||
Try {
|
||||
onShutdownTasks
|
||||
.remove(token)
|
||||
.filterNot(_.done)
|
||||
.foreach(_.run())
|
||||
}.failed.foreach(t => logger.warn(s"Task ${task.name} failed on shutdown!", t))
|
||||
}
|
||||
}
|
||||
|
||||
/** Check whether we're closing.
|
||||
* Susceptible to race conditions; unless you're using using this as a flag to the retry lib or you really know
|
||||
* what you're doing, prefer [[performUnlessClosing]] and friends.
|
||||
*/
|
||||
def isClosing: Boolean = closingFlag.get()
|
||||
|
||||
/** Performs the task given by `f` unless a shutdown has been initiated.
|
||||
* The shutdown will only begin after `f` completes, but other tasks may execute concurrently with `f`, if started using this
|
||||
* function, or one of the other variants ([[performUnlessClosingF]] and [[performUnlessClosingEitherT]]).
|
||||
@ -276,54 +202,44 @@ trait FlagCloseable extends AutoCloseable {
|
||||
/** Blocks until all earlier tasks have completed and then prevents further tasks from being run.
|
||||
*/
|
||||
@SuppressWarnings(Array("org.wartremover.warts.While", "org.wartremover.warts.Var"))
|
||||
final override def close(): Unit = {
|
||||
final override def onFirstClose(): Unit = {
|
||||
import TraceContext.Implicits.Empty.*
|
||||
|
||||
/* Setting closingFlag to true first ensures that we can shut down cleanly, unless one of the
|
||||
/* closingFlag has already been set to true. This ensures that we can shut down cleanly, unless one of the
|
||||
readers takes longer to complete than the closing timeout. After the flag is set to true, the readerCount
|
||||
can only decrease (since it only increases in performUnlessClosingF, and since the || there short-circuits).
|
||||
*/
|
||||
val firstCallToClose = closingFlag.compareAndSet(false, true)
|
||||
runStateChanged()
|
||||
if (firstCallToClose) {
|
||||
// First run onShutdown tasks.
|
||||
// Important to run them in the beginning as they may be used to cancel long-running tasks.
|
||||
runOnShutdownTasks()
|
||||
|
||||
// Poll for tasks to finish. Inefficient, but we're only doing this during shutdown.
|
||||
val deadline = closingTimeout.fromNow
|
||||
var sleepMillis = 1L
|
||||
while (
|
||||
(readerState.getAndUpdate { current =>
|
||||
if (current == ReaderState.empty) {
|
||||
current.copy(count = -1)
|
||||
} else current
|
||||
}.count != 0) && deadline.hasTimeLeft()
|
||||
) {
|
||||
val readers = readerState.get()
|
||||
logger.debug(
|
||||
s"${readers.count} active tasks (${readers.readers.mkString(",")}) preventing closing; sleeping for ${sleepMillis}ms"
|
||||
)
|
||||
runStateChanged(true)
|
||||
Threading.sleep(sleepMillis)
|
||||
sleepMillis = (sleepMillis * 2) min maxSleepMillis min deadline.timeLeft.toMillis
|
||||
}
|
||||
if (readerState.get.count >= 0) {
|
||||
logger.warn(
|
||||
s"Timeout ${closingTimeout} expired, but tasks still running. ${forceShutdownStr}"
|
||||
)
|
||||
dumpRunning()
|
||||
}
|
||||
if (keepTrackOfOpenFutures) {
|
||||
logger.warn("Tracking of open futures is enabled, but this is only meant for debugging!")
|
||||
}
|
||||
try {
|
||||
onClosed()
|
||||
} catch {
|
||||
case NonFatal(e) => onCloseFailure(e)
|
||||
}
|
||||
} else {
|
||||
// TODO(i8594): Ensure we call close only once
|
||||
// Poll for tasks to finish. Inefficient, but we're only doing this during shutdown.
|
||||
val deadline = closingTimeout.fromNow
|
||||
var sleepMillis = 1L
|
||||
while (
|
||||
(readerState.getAndUpdate { current =>
|
||||
if (current == ReaderState.empty) {
|
||||
current.copy(count = -1)
|
||||
} else current
|
||||
}.count != 0) && deadline.hasTimeLeft()
|
||||
) {
|
||||
val readers = readerState.get()
|
||||
logger.debug(
|
||||
s"${readers.count} active tasks (${readers.readers.mkString(",")}) preventing closing; sleeping for ${sleepMillis}ms"
|
||||
)
|
||||
runStateChanged(true)
|
||||
Threading.sleep(sleepMillis)
|
||||
sleepMillis = (sleepMillis * 2) min maxSleepMillis min deadline.timeLeft.toMillis
|
||||
}
|
||||
if (readerState.get.count >= 0) {
|
||||
logger.warn(
|
||||
s"Timeout ${closingTimeout} expired, but tasks still running. ${forceShutdownStr}"
|
||||
)
|
||||
dumpRunning()
|
||||
}
|
||||
if (keepTrackOfOpenFutures) {
|
||||
logger.warn("Tracking of open futures is enabled, but this is only meant for debugging!")
|
||||
}
|
||||
try {
|
||||
onClosed()
|
||||
} catch {
|
||||
case NonFatal(e) => onCloseFailure(e)
|
||||
}
|
||||
}
|
||||
|
||||
@ -379,52 +295,43 @@ object CloseContext {
|
||||
closeContext2: CloseContext,
|
||||
processingTimeout: ProcessingTimeout,
|
||||
tracedLogger: TracedLogger,
|
||||
)(implicit
|
||||
traceContext: TraceContext
|
||||
): CloseContext = {
|
||||
)(implicit traceContext: TraceContext): CloseContext = {
|
||||
// TODO(#8594) Add a test that this correctly implements the performUnlessClosing semantics
|
||||
// Currently, this is broken because if both closeContext1 and closeContext2 are closed concurrently,
|
||||
// then the close of the created flagCloseable will terminate early for the second call to its close method
|
||||
// and thus not delay that closeContext's closing.
|
||||
val flagCloseable = new FlagCloseable {
|
||||
override protected def timeouts: ProcessingTimeout = processingTimeout
|
||||
override protected def logger: TracedLogger = tracedLogger
|
||||
}
|
||||
closeContext1.flagCloseable.runOnShutdown_(new RunOnShutdown {
|
||||
override def name: String = s"combined-close-ctx1"
|
||||
override def done: Boolean = flagCloseable.isClosing
|
||||
override def done: Boolean =
|
||||
closeContext1.flagCloseable.isClosing && closeContext2.flagCloseable.isClosing
|
||||
override def run(): Unit = flagCloseable.close()
|
||||
})
|
||||
closeContext2.flagCloseable.runOnShutdown_(new RunOnShutdown {
|
||||
override def name: String = s"combined-close-ctx2"
|
||||
override def done: Boolean = flagCloseable.isClosing
|
||||
override def done: Boolean =
|
||||
closeContext1.flagCloseable.isClosing && closeContext2.flagCloseable.isClosing
|
||||
override def run(): Unit = flagCloseable.close()
|
||||
})
|
||||
CloseContext(flagCloseable)
|
||||
}
|
||||
|
||||
def withCombinedContextF[T](
|
||||
def withCombinedContext[F[_], T, C[_]](
|
||||
closeContext1: CloseContext,
|
||||
closeContext2: CloseContext,
|
||||
processingTimeout: ProcessingTimeout,
|
||||
tracedLogger: TracedLogger,
|
||||
)(func: CloseContext => Future[T])(implicit
|
||||
)(func: CloseContext => F[T])(implicit
|
||||
traceContext: TraceContext,
|
||||
ex: ExecutionContext,
|
||||
): Future[T] = {
|
||||
F: Thereafter[F, C],
|
||||
): F[T] = {
|
||||
val tmp = combineUnsafe(closeContext1, closeContext2, processingTimeout, tracedLogger)
|
||||
func(tmp).thereafter(_ => tmp.flagCloseable.close())
|
||||
}
|
||||
|
||||
def withCombinedContextOT[T](
|
||||
closeContext1: CloseContext,
|
||||
closeContext2: CloseContext,
|
||||
processingTimeout: ProcessingTimeout,
|
||||
tracedLogger: TracedLogger,
|
||||
)(func: CloseContext => OptionT[Future, T])(implicit
|
||||
traceContext: TraceContext,
|
||||
ex: ExecutionContext,
|
||||
): OptionT[Future, T] = {
|
||||
val tmp = combineUnsafe(closeContext1, closeContext2, processingTimeout, tracedLogger)
|
||||
func(tmp).thereafter(_ => tmp.flagCloseable.close())
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/** Mix-in to obtain a [[CloseContext]] implicit based on the class's [[FlagCloseable]] */
|
||||
|
@ -0,0 +1,112 @@
|
||||
// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.digitalasset.canton.lifecycle
|
||||
|
||||
import com.digitalasset.canton.DiscardOps
|
||||
import com.digitalasset.canton.logging.TracedLogger
|
||||
import com.digitalasset.canton.tracing.TraceContext
|
||||
import com.google.common.annotations.VisibleForTesting
|
||||
|
||||
import java.util.concurrent.atomic.{AtomicBoolean, AtomicLong}
|
||||
import scala.collection.concurrent.TrieMap
|
||||
import scala.util.Try
|
||||
|
||||
trait OnShutdownRunner extends AutoCloseable {
|
||||
|
||||
private val closingFlag: AtomicBoolean = new AtomicBoolean(false)
|
||||
|
||||
private val incrementor: AtomicLong = new AtomicLong(0L)
|
||||
private val onShutdownTasks: TrieMap[Long, RunOnShutdown] = TrieMap.empty[Long, RunOnShutdown]
|
||||
|
||||
protected def logger: TracedLogger
|
||||
|
||||
/** Check whether we're closing.
|
||||
* Susceptible to race conditions; unless you're using using this as a flag to the retry lib or you really know
|
||||
* what you're doing, prefer `performUnlessClosing` and friends.
|
||||
*/
|
||||
def isClosing: Boolean = closingFlag.get()
|
||||
|
||||
/** Register a task to run when shutdown is initiated.
|
||||
*
|
||||
* You can use this for example to register tasks that cancel long-running computations,
|
||||
* whose termination you can then wait for in "closeAsync".
|
||||
*/
|
||||
def runOnShutdown_[T](
|
||||
task: RunOnShutdown
|
||||
)(implicit traceContext: TraceContext): Unit = {
|
||||
runOnShutdown(task).discard
|
||||
}
|
||||
|
||||
/** Same as [[runOnShutdown_]] but returns a token that allows you to remove the task explicitly from being run
|
||||
* using [[cancelShutdownTask]]
|
||||
*/
|
||||
def runOnShutdown[T](
|
||||
task: RunOnShutdown
|
||||
)(implicit traceContext: TraceContext): Long = {
|
||||
val token = incrementor.getAndIncrement()
|
||||
onShutdownTasks
|
||||
// First remove the tasks that are done
|
||||
.filterInPlace { case (_, run) =>
|
||||
!run.done
|
||||
}
|
||||
// Then add the new one
|
||||
.put(token, task)
|
||||
.discard
|
||||
if (isClosing) runOnShutdownTasks()
|
||||
token
|
||||
}
|
||||
|
||||
/** Removes a shutdown task from the list using a token returned by [[runOnShutdown]]
|
||||
*/
|
||||
def cancelShutdownTask(token: Long): Unit = onShutdownTasks.remove(token).discard
|
||||
|
||||
private def runOnShutdownTasks()(implicit traceContext: TraceContext): Unit = {
|
||||
onShutdownTasks.toList.foreach { case (token, task) =>
|
||||
Try {
|
||||
onShutdownTasks
|
||||
.remove(token)
|
||||
.filterNot(_.done)
|
||||
// TODO(#8594) Time limit the shutdown tasks similar to how we time limit the readers in FlagCloseable
|
||||
.foreach(_.run())
|
||||
}.failed.foreach(t => logger.warn(s"Task ${task.name} failed on shutdown!", t))
|
||||
}
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
protected def runStateChanged(waitingState: Boolean = false): Unit = {} // used for unit testing
|
||||
|
||||
protected def onFirstClose(): Unit
|
||||
|
||||
/** Blocks until all earlier tasks have completed and then prevents further tasks from being run.
|
||||
*/
|
||||
@SuppressWarnings(Array("org.wartremover.warts.While", "org.wartremover.warts.Var"))
|
||||
final override def close(): Unit = {
|
||||
import TraceContext.Implicits.Empty.*
|
||||
|
||||
val firstCallToClose = closingFlag.compareAndSet(false, true)
|
||||
runStateChanged()
|
||||
if (firstCallToClose) {
|
||||
// First run onShutdown tasks.
|
||||
// Important to run them in the beginning as they may be used to cancel long-running tasks.
|
||||
runOnShutdownTasks()
|
||||
|
||||
onFirstClose()
|
||||
} else {
|
||||
// TODO(i8594): Ensure we call close only once
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Trait that can be registered with a [FlagCloseable] to run on shutdown */
|
||||
trait RunOnShutdown {
|
||||
|
||||
/** the name, used for logging during shutdown */
|
||||
def name: String
|
||||
|
||||
/** true if the task has already run (maybe elsewhere) */
|
||||
def done: Boolean
|
||||
|
||||
/** invoked by [FlagCloseable] during shutdown */
|
||||
def run(): Unit
|
||||
}
|
@ -128,11 +128,7 @@ package object canton {
|
||||
*/
|
||||
type TransferCounterO = Option[TransferCounter]
|
||||
|
||||
object RequestCounter extends CounterCompanion[RequestCounterDiscriminator] {
|
||||
|
||||
/** A strict lower bound on all request counters */
|
||||
val LowerBound: RequestCounter = RequestCounter(-1)
|
||||
}
|
||||
object RequestCounter extends CounterCompanion[RequestCounterDiscriminator]
|
||||
|
||||
/** Wrap a method call with this method to document that the caller is sure that the callee's preconditions are met. */
|
||||
def checked[A](x: => A): A = x
|
||||
|
@ -6,6 +6,7 @@ package com.digitalasset.canton.protocol
|
||||
import cats.syntax.either.*
|
||||
import com.daml.ledger.api.refinements.ApiTypes
|
||||
import com.daml.ledger.client.binding.Primitive
|
||||
import com.daml.ledger.javaapi.data.codegen.ContractId
|
||||
import com.daml.lf.data.Bytes
|
||||
import com.digitalasset.canton.checked
|
||||
import com.digitalasset.canton.config.CantonRequireTypes.String255
|
||||
@ -97,6 +98,10 @@ object ContractIdSyntax {
|
||||
def toLf: LfContractId = LfContractId.assertFromString(contractId.toString)
|
||||
}
|
||||
|
||||
implicit class JavaCodegenContractIdSyntax[T](contractId: ContractId[?]) {
|
||||
def toLf: LfContractId = LfContractId.assertFromString(contractId.contractId)
|
||||
}
|
||||
|
||||
implicit class LfContractIdSyntax(private val contractId: LfContractId) extends AnyVal {
|
||||
def toProtoPrimitive: String = contractId.coid
|
||||
|
||||
|
@ -7,17 +7,20 @@ import cats.implicits.toTraverseOps
|
||||
import cats.syntax.either.*
|
||||
import com.daml.lf.value.ValueCoder
|
||||
import com.digitalasset.canton.ProtoDeserializationError.ValueConversionError
|
||||
import com.digitalasset.canton.crypto
|
||||
import com.digitalasset.canton.crypto.Salt
|
||||
import com.digitalasset.canton.data.{CantonTimestamp, ProcessedDisclosedContract}
|
||||
import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting}
|
||||
import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting, PrettyUtil}
|
||||
import com.digitalasset.canton.protocol.ContractIdSyntax.*
|
||||
import com.digitalasset.canton.protocol.SerializableContract.LedgerCreateTime
|
||||
import com.digitalasset.canton.serialization.ProtoConverter
|
||||
import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult
|
||||
import com.digitalasset.canton.version.*
|
||||
import com.digitalasset.canton.{LfTimestamp, crypto}
|
||||
import com.google.protobuf.ByteString
|
||||
import com.google.protobuf.timestamp.Timestamp
|
||||
|
||||
import java.time.Instant
|
||||
|
||||
/** Represents a serializable contract.
|
||||
*
|
||||
* @param contractId The ID of the contract.
|
||||
@ -35,7 +38,7 @@ case class SerializableContract(
|
||||
contractId: LfContractId,
|
||||
rawContractInstance: SerializableRawContractInstance,
|
||||
metadata: ContractMetadata,
|
||||
ledgerCreateTime: CantonTimestamp,
|
||||
ledgerCreateTime: LedgerCreateTime,
|
||||
contractSalt: Option[Salt],
|
||||
)
|
||||
// The class implements `HasVersionedWrapper` because we serialize it to an anonymous binary format (ByteString/Array[Byte]) when
|
||||
@ -77,7 +80,7 @@ case class SerializableContract(
|
||||
(serContract: SerializableContract) => serContract.rawContractInstance.contractInstance,
|
||||
)(adHocPrettyInstance), // TODO(#3269) This may leak confidential data
|
||||
param("metadata", _.metadata),
|
||||
param("create time", _.ledgerCreateTime),
|
||||
param("create time", _.ledgerCreateTime.ts),
|
||||
paramIfDefined("contract salt", _.contractSalt),
|
||||
)
|
||||
|
||||
@ -112,6 +115,19 @@ object SerializableContract
|
||||
|
||||
override def name: String = "serializable contract"
|
||||
|
||||
// Ledger time of the "repair transaction" creating the contract
|
||||
final case class LedgerCreateTime(ts: CantonTimestamp) extends AnyVal {
|
||||
def toProtoPrimitive: Timestamp = ts.toProtoPrimitive
|
||||
def toInstant: Instant = ts.toInstant
|
||||
def toLf: LfTimestamp = ts.toLf
|
||||
}
|
||||
|
||||
object LedgerCreateTime extends PrettyUtil {
|
||||
implicit val ledgerCreateTimeOrdering: Ordering[LedgerCreateTime] = Ordering.by(_.ts)
|
||||
implicit val prettyLedgerCreateTime: Pretty[LedgerCreateTime] =
|
||||
prettyOfClass[LedgerCreateTime](param("ts", _.ts))
|
||||
}
|
||||
|
||||
def apply(
|
||||
contractId: LfContractId,
|
||||
contractInstance: LfContractInst,
|
||||
@ -122,7 +138,9 @@ object SerializableContract
|
||||
): Either[ValueCoder.EncodeError, SerializableContract] =
|
||||
SerializableRawContractInstance
|
||||
.create(contractInstance, unvalidatedAgreementText)
|
||||
.map(SerializableContract(contractId, _, metadata, ledgerTime, contractSalt))
|
||||
.map(
|
||||
SerializableContract(contractId, _, metadata, LedgerCreateTime(ledgerTime), contractSalt)
|
||||
)
|
||||
|
||||
def fromDisclosedContract(
|
||||
disclosedContract: ProcessedDisclosedContract
|
||||
@ -207,5 +225,11 @@ object SerializableContract
|
||||
.required("ledger_create_time", ledgerCreateTime)
|
||||
.flatMap(CantonTimestamp.fromProtoPrimitive)
|
||||
contractSalt <- contractSaltO.traverse(Salt.fromProtoV0)
|
||||
} yield SerializableContract(contractId, raw, metadata, ledgerTime, contractSalt)
|
||||
} yield SerializableContract(
|
||||
contractId,
|
||||
raw,
|
||||
metadata,
|
||||
LedgerCreateTime(ledgerTime),
|
||||
contractSalt,
|
||||
)
|
||||
}
|
||||
|
@ -4,6 +4,7 @@
|
||||
package com.digitalasset.canton.protocol.messages
|
||||
|
||||
import cats.data.EitherT
|
||||
import cats.syntax.either.*
|
||||
import cats.syntax.traverse.*
|
||||
import com.digitalasset.canton.config.CantonRequireTypes.LengthLimitedString.TopologyRequestId
|
||||
import com.digitalasset.canton.config.CantonRequireTypes.String255
|
||||
@ -42,10 +43,10 @@ final case class DomainTopologyTransactionMessage private (
|
||||
with ProtocolMessageV2
|
||||
with UnsignedProtocolMessageV3 {
|
||||
|
||||
require(
|
||||
representativeProtocolVersion.representative < ProtocolVersion.v5 || notSequencedAfter.nonEmpty,
|
||||
"Not sequenced after must be non-empty for protocol version v5 and above",
|
||||
)
|
||||
// Ensures the invariants related to default values hold
|
||||
DomainTopologyTransactionMessage
|
||||
.validateInstance(this, representativeProtocolVersion)
|
||||
.valueOr(err => throw new IllegalArgumentException(err))
|
||||
|
||||
def hashToSign(hashOps: HashOps): Hash =
|
||||
DomainTopologyTransactionMessage.hash(
|
||||
@ -53,25 +54,22 @@ final case class DomainTopologyTransactionMessage private (
|
||||
domainId,
|
||||
notSequencedAfter,
|
||||
hashOps,
|
||||
representativeProtocolVersion.representative,
|
||||
)
|
||||
|
||||
def toProtoV0: v0.DomainTopologyTransactionMessage = {
|
||||
private[messages] def toProtoV0: v0.DomainTopologyTransactionMessage =
|
||||
v0.DomainTopologyTransactionMessage(
|
||||
signature = Some(domainTopologyManagerSignature.toProtoV0),
|
||||
transactions = transactions.map(_.getCryptographicEvidence),
|
||||
domainId = domainId.toProtoPrimitive,
|
||||
)
|
||||
}
|
||||
|
||||
def toProtoV1: v1.DomainTopologyTransactionMessage = {
|
||||
private[messages] def toProtoV1: v1.DomainTopologyTransactionMessage =
|
||||
v1.DomainTopologyTransactionMessage(
|
||||
signature = Some(domainTopologyManagerSignature.toProtoV0),
|
||||
transactions = transactions.map(_.getCryptographicEvidence),
|
||||
domainId = domainId.toProtoPrimitive,
|
||||
notSequencedAfter = notSequencedAfter.map(_.toProtoPrimitive),
|
||||
)
|
||||
}
|
||||
|
||||
override def toProtoEnvelopeContentV0: v0.EnvelopeContent =
|
||||
v0.EnvelopeContent(
|
||||
@ -109,7 +107,7 @@ object DomainTopologyTransactionMessage
|
||||
ProtocolMessageContentCast.create[DomainTopologyTransactionMessage](
|
||||
"DomainTopologyTransactionMessage"
|
||||
) {
|
||||
case ditm: DomainTopologyTransactionMessage => Some(ditm)
|
||||
case dttm: DomainTopologyTransactionMessage => Some(dttm)
|
||||
case _ => None
|
||||
}
|
||||
|
||||
@ -124,25 +122,31 @@ object DomainTopologyTransactionMessage
|
||||
v1.DomainTopologyTransactionMessage
|
||||
)(
|
||||
supportedProtoVersion(_)(fromProtoV1),
|
||||
_.toProtoV0.toByteString,
|
||||
_.toProtoV1.toByteString,
|
||||
),
|
||||
)
|
||||
|
||||
override lazy val invariants = Seq(notSequencedAfterInvariant)
|
||||
lazy val notSequencedAfterInvariant = EmptyOptionExactlyUntilExclusive(
|
||||
_.notSequencedAfter,
|
||||
"notSequencedAfter",
|
||||
protocolVersionRepresentativeFor(ProtocolVersion.v5),
|
||||
)
|
||||
|
||||
private def hash(
|
||||
transactions: List[SignedTopologyTransaction[TopologyChangeOp]],
|
||||
domainId: DomainId,
|
||||
notSequencedAfter: Option[CantonTimestamp],
|
||||
hashOps: HashOps,
|
||||
protocolVersion: ProtocolVersion,
|
||||
): Hash = {
|
||||
val builder = hashOps
|
||||
.build(HashPurpose.DomainTopologyTransactionMessageSignature)
|
||||
.add(domainId.toProtoPrimitive)
|
||||
if (protocolVersion >= ProtocolVersion.v5) {
|
||||
notSequencedAfter.foreach { ts =>
|
||||
builder.add(ts.toEpochMilli)
|
||||
}
|
||||
|
||||
notSequencedAfter.foreach { ts =>
|
||||
builder.add(ts.toEpochMilli)
|
||||
}
|
||||
|
||||
transactions.foreach(elem => builder.add(elem.getCryptographicEvidence))
|
||||
builder.finish()
|
||||
}
|
||||
@ -151,54 +155,66 @@ object DomainTopologyTransactionMessage
|
||||
transactions: List[SignedTopologyTransaction[TopologyChangeOp]],
|
||||
syncCrypto: DomainSnapshotSyncCryptoApi,
|
||||
domainId: DomainId,
|
||||
notSequencedAfter: CantonTimestamp,
|
||||
notSequencedAfter: Option[CantonTimestamp],
|
||||
protocolVersion: ProtocolVersion,
|
||||
)(implicit
|
||||
traceContext: TraceContext,
|
||||
ec: ExecutionContext,
|
||||
): EitherT[Future, SyncCryptoError, DomainTopologyTransactionMessage] = {
|
||||
val hashToSign =
|
||||
hash(
|
||||
transactions,
|
||||
domainId,
|
||||
Some(notSequencedAfter),
|
||||
syncCrypto.crypto.pureCrypto,
|
||||
protocolVersion,
|
||||
)
|
||||
syncCrypto
|
||||
.sign(hashToSign)
|
||||
.map(signature =>
|
||||
DomainTopologyTransactionMessage(
|
||||
signature,
|
||||
transactions,
|
||||
notSequencedAfter = Some(notSequencedAfter),
|
||||
domainId,
|
||||
)(protocolVersionRepresentativeFor(protocolVersion))
|
||||
): EitherT[Future, String, DomainTopologyTransactionMessage] = {
|
||||
val notSequencedAfterUpdated =
|
||||
notSequencedAfterInvariant.orValue(notSequencedAfter, protocolVersion)
|
||||
|
||||
val hashToSign = hash(
|
||||
transactions,
|
||||
domainId,
|
||||
notSequencedAfterUpdated,
|
||||
syncCrypto.crypto.pureCrypto,
|
||||
)
|
||||
|
||||
for {
|
||||
signature <- syncCrypto.sign(hashToSign).leftMap(_.toString)
|
||||
domainTopologyTransactionMessageE = Either
|
||||
.catchOnly[IllegalArgumentException](
|
||||
DomainTopologyTransactionMessage(
|
||||
signature,
|
||||
transactions,
|
||||
notSequencedAfter = notSequencedAfterUpdated,
|
||||
domainId,
|
||||
)(protocolVersionRepresentativeFor(protocolVersion))
|
||||
)
|
||||
.leftMap(_.getMessage)
|
||||
domainTopologyTransactionMessage <- EitherT.fromEither[Future](
|
||||
domainTopologyTransactionMessageE
|
||||
)
|
||||
} yield domainTopologyTransactionMessage
|
||||
}
|
||||
|
||||
def tryCreate(
|
||||
transactions: List[SignedTopologyTransaction[TopologyChangeOp]],
|
||||
crypto: DomainSnapshotSyncCryptoApi,
|
||||
domainId: DomainId,
|
||||
notSequencedAfter: CantonTimestamp,
|
||||
notSequencedAfter: Option[CantonTimestamp],
|
||||
protocolVersion: ProtocolVersion,
|
||||
)(implicit
|
||||
traceContext: TraceContext,
|
||||
ec: ExecutionContext,
|
||||
): Future[DomainTopologyTransactionMessage] =
|
||||
create(transactions, crypto, domainId, notSequencedAfter, protocolVersion).fold(
|
||||
): Future[DomainTopologyTransactionMessage] = {
|
||||
val notSequencedAfterUpdated =
|
||||
notSequencedAfterInvariant.orValue(notSequencedAfter, protocolVersion)
|
||||
|
||||
create(transactions, crypto, domainId, notSequencedAfterUpdated, protocolVersion).fold(
|
||||
err =>
|
||||
throw new IllegalStateException(
|
||||
s"Failed to create domain topology transaction message: $err"
|
||||
),
|
||||
identity,
|
||||
)
|
||||
}
|
||||
|
||||
def fromProtoV0(
|
||||
private[messages] def fromProtoV0(
|
||||
message: v0.DomainTopologyTransactionMessage
|
||||
): ParsingResult[DomainTopologyTransactionMessage] = {
|
||||
val v0.DomainTopologyTransactionMessage(signature, domainId, transactions) = message
|
||||
val v0.DomainTopologyTransactionMessage(signature, _domainId, transactions) = message
|
||||
for {
|
||||
succeededContent <- transactions.toList.traverse(SignedTopologyTransaction.fromByteString)
|
||||
signature <- ProtoConverter.parseRequired(Signature.fromProtoV0, "signature", signature)
|
||||
@ -211,7 +227,7 @@ object DomainTopologyTransactionMessage
|
||||
)(protocolVersionRepresentativeFor(ProtoVersion(0)))
|
||||
}
|
||||
|
||||
def fromProtoV1(
|
||||
private[messages] def fromProtoV1(
|
||||
message: v1.DomainTopologyTransactionMessage
|
||||
): ParsingResult[DomainTopologyTransactionMessage] = {
|
||||
val v1.DomainTopologyTransactionMessage(signature, domainId, timestamp, transactions) = message
|
||||
|
@ -117,7 +117,7 @@ object EnvelopeContent extends HasProtocolVersionedWithContextCompanion[Envelope
|
||||
): EnvelopeContent =
|
||||
create(message, protocolVersion).valueOr(err => throw new IllegalArgumentException(err))
|
||||
|
||||
def fromProtoV0(
|
||||
private def fromProtoV0(
|
||||
hashOps: HashOps,
|
||||
envelopeContent: v0.EnvelopeContent,
|
||||
): ParsingResult[EnvelopeContent] = {
|
||||
@ -147,7 +147,7 @@ object EnvelopeContent extends HasProtocolVersionedWithContextCompanion[Envelope
|
||||
messageE.map(message => new EnvelopeContentV0(message) {})
|
||||
}
|
||||
|
||||
def fromProtoV1(
|
||||
private def fromProtoV1(
|
||||
hashOps: HashOps,
|
||||
envelopeContent: v1.EnvelopeContent,
|
||||
): ParsingResult[EnvelopeContent] = {
|
||||
@ -177,7 +177,7 @@ object EnvelopeContent extends HasProtocolVersionedWithContextCompanion[Envelope
|
||||
messageE.map(message => new EnvelopeContentV1(message) {})
|
||||
}
|
||||
|
||||
def fromProtoV2(
|
||||
private def fromProtoV2(
|
||||
hashOps: HashOps,
|
||||
envelopeContent: v2.EnvelopeContent,
|
||||
): ParsingResult[EnvelopeContent] = {
|
||||
@ -202,8 +202,8 @@ object EnvelopeContent extends HasProtocolVersionedWithContextCompanion[Envelope
|
||||
case Content.RegisterTopologyTransactionResponse(messageP) =>
|
||||
RegisterTopologyTransactionResponse.fromProtoV1(messageP)
|
||||
case Content.Empty => Left(OtherError("Cannot deserialize an empty message content"))
|
||||
}): ParsingResult[ProtocolMessageV1]
|
||||
messageE.map(message => new EnvelopeContentV1(message) {})
|
||||
}): ParsingResult[ProtocolMessageV2]
|
||||
messageE.map(message => new EnvelopeContentV2(message) {})
|
||||
}
|
||||
|
||||
private def fromProtoV3(
|
||||
|
@ -46,11 +46,11 @@ object LocalVerdict extends HasProtocolVersionedCompanion[LocalVerdict] {
|
||||
SupportedProtoVersions(
|
||||
ProtoVersion(0) -> VersionedProtoConverter(ProtocolVersion.v3)(v0.LocalVerdict)(
|
||||
supportedProtoVersion(_)(fromProtoV0),
|
||||
_.toByteString,
|
||||
_.toProtoV0.toByteString,
|
||||
),
|
||||
ProtoVersion(1) -> VersionedProtoConverter(ProtocolVersion.v4)(v1.LocalVerdict)(
|
||||
supportedProtoVersion(_)(fromProtoV1),
|
||||
_.toByteString,
|
||||
_.toProtoV1.toByteString,
|
||||
),
|
||||
)
|
||||
|
||||
@ -563,9 +563,7 @@ object LocalReject extends LocalRejectionGroup {
|
||||
override val representativeProtocolVersion: RepresentativeProtocolVersion[
|
||||
LocalVerdict.type
|
||||
]
|
||||
) extends Malformed(
|
||||
_causePrefix = ""
|
||||
)
|
||||
) extends Malformed(_causePrefix = "")
|
||||
|
||||
object Reject {
|
||||
def apply(details: String, protocolVersion: ProtocolVersion): Reject =
|
||||
@ -800,6 +798,7 @@ object LocalReject extends LocalRejectionGroup {
|
||||
}
|
||||
|
||||
/** Fallback for deserializing local rejects that are not known to the current Canton version.
|
||||
* Should not be serialized.
|
||||
*/
|
||||
final case class GenericReject(
|
||||
override val _causePrefix: String,
|
||||
|
@ -191,7 +191,7 @@ object MalformedMediatorRequestResult
|
||||
rpv == protocolVersionRepresentativeFor(
|
||||
MediatorRejectV0.applicableProtocolVersion
|
||||
),
|
||||
Verdict.MediatorRejectV0.wrongProtocolVersion,
|
||||
Verdict.MediatorRejectV0.wrongProtocolVersion(rpv),
|
||||
)
|
||||
case _: Verdict.MediatorRejectV1 =>
|
||||
EitherUtil.condUnitE(
|
||||
@ -201,14 +201,14 @@ object MalformedMediatorRequestResult
|
||||
rpv <= protocolVersionRepresentativeFor(
|
||||
MediatorRejectV1.lastApplicableProtocolVersion
|
||||
),
|
||||
Verdict.MediatorRejectV1.wrongProtocolVersion,
|
||||
Verdict.MediatorRejectV1.wrongProtocolVersion(rpv),
|
||||
)
|
||||
case _: Verdict.MediatorRejectV2 =>
|
||||
EitherUtil.condUnitE(
|
||||
rpv >= protocolVersionRepresentativeFor(
|
||||
MediatorRejectV2.firstApplicableProtocolVersion
|
||||
),
|
||||
Verdict.MediatorRejectV2.wrongProtocolVersion,
|
||||
Verdict.MediatorRejectV2.wrongProtocolVersion(rpv),
|
||||
)
|
||||
}
|
||||
}
|
||||
@ -249,7 +249,11 @@ object MalformedMediatorRequestResult
|
||||
.flatMap(RequestId.fromProtoPrimitive)
|
||||
domainId <- DomainId.fromProtoPrimitive(domainIdP, "domain_id")
|
||||
viewType <- ViewType.fromProtoEnum(viewTypeP)
|
||||
reject <- ProtoConverter.parseRequired(MediatorRejectV1.fromProtoV1, "rejection", rejectionPO)
|
||||
reject <- ProtoConverter.parseRequired(
|
||||
MediatorRejectV1.fromProtoV1(_, Verdict.protocolVersionRepresentativeFor(ProtoVersion(1))),
|
||||
"rejection",
|
||||
rejectionPO,
|
||||
)
|
||||
} yield MalformedMediatorRequestResult(requestId, domainId, viewType, reject)(
|
||||
protocolVersionRepresentativeFor(ProtoVersion(1)),
|
||||
Some(bytes),
|
||||
@ -268,7 +272,11 @@ object MalformedMediatorRequestResult
|
||||
.flatMap(RequestId.fromProtoPrimitive)
|
||||
domainId <- DomainId.fromProtoPrimitive(domainIdP, "domain_id")
|
||||
viewType <- ViewType.fromProtoEnum(viewTypeP)
|
||||
reject <- ProtoConverter.parseRequired(MediatorRejectV2.fromProtoV2, "rejection", rejectionPO)
|
||||
reject <- ProtoConverter.parseRequired(
|
||||
MediatorRejectV2.fromProtoV2,
|
||||
"rejection",
|
||||
rejectionPO,
|
||||
)
|
||||
} yield MalformedMediatorRequestResult(requestId, domainId, viewType, reject)(
|
||||
protocolVersionRepresentativeFor(ProtoVersion(2)),
|
||||
Some(bytes),
|
||||
|
@ -75,7 +75,7 @@ case class MediatorResponse private (
|
||||
with HasDomainId
|
||||
with PrettyPrinting {
|
||||
|
||||
// Private copy method so that callers do not trigger exceptions by accident.
|
||||
// Private copy method used by the lenses in the companion object
|
||||
private def copy(
|
||||
requestId: RequestId = requestId,
|
||||
sender: ParticipantId = sender,
|
||||
|
@ -178,7 +178,9 @@ object SignedProtocolMessage
|
||||
),
|
||||
)
|
||||
|
||||
val multipleSignaturesSupportedSince = protocolVersionRepresentativeFor(ProtoVersion(1))
|
||||
private[messages] val multipleSignaturesSupportedSince = protocolVersionRepresentativeFor(
|
||||
ProtoVersion(1)
|
||||
)
|
||||
|
||||
def create[M <: SignedProtocolMessageContent](
|
||||
typedMessage: TypedSignedProtocolMessageContent[M],
|
||||
|
@ -162,7 +162,7 @@ object TransactionResultMessage
|
||||
None,
|
||||
)
|
||||
|
||||
// TODO(i12171): Remove in 3.0
|
||||
// TODO(i12171): Remove in 3.0 and drop context HashOps
|
||||
def apply(
|
||||
requestId: RequestId,
|
||||
verdict: Verdict,
|
||||
|
@ -133,13 +133,13 @@ object Verdict
|
||||
v0.Verdict(someVerdict = v0.Verdict.SomeVerdict.MediatorReject(toProtoMediatorRejectionV0))
|
||||
|
||||
private[messages] override def toProtoV1: v1.Verdict =
|
||||
throw new UnsupportedOperationException(wrongProtocolVersion)
|
||||
throw new UnsupportedOperationException(wrongProtocolVersion(representativeProtocolVersion))
|
||||
|
||||
private[messages] override def toProtoV2: v2.Verdict =
|
||||
throw new UnsupportedOperationException(wrongProtocolVersion)
|
||||
throw new UnsupportedOperationException(wrongProtocolVersion(representativeProtocolVersion))
|
||||
|
||||
private[messages] override def toProtoV3: v3.Verdict =
|
||||
throw new UnsupportedOperationException(wrongProtocolVersion)
|
||||
throw new UnsupportedOperationException(wrongProtocolVersion(representativeProtocolVersion))
|
||||
|
||||
override def pretty: Pretty[MediatorRejectV0] = prettyOfClass(
|
||||
param("code", _.code.toString.unquoted),
|
||||
@ -172,8 +172,8 @@ object Verdict
|
||||
object MediatorRejectV0 {
|
||||
def applicableProtocolVersion: ProtocolVersion = ProtocolVersion.v3
|
||||
|
||||
lazy val wrongProtocolVersion: String =
|
||||
s"MediatorRejectV0 can only be used in protocol version ${applicableProtocolVersion}"
|
||||
private[messages] def wrongProtocolVersion(rpv: RepresentativeProtocolVersion[_]): String =
|
||||
s"MediatorRejectV0 can only be used in representative protocol version $applicableProtocolVersion; found: representative protocol version=$rpv"
|
||||
|
||||
def tryCreate(code: v0.MediatorRejection.Code, reason: String): MediatorRejectV0 =
|
||||
new MediatorRejectV0(code, reason)
|
||||
@ -220,11 +220,11 @@ object Verdict
|
||||
representativeProtocolVersion <= protocolVersionRepresentativeFor(
|
||||
lastApplicableProtocolVersion
|
||||
),
|
||||
wrongProtocolVersion,
|
||||
wrongProtocolVersion(representativeProtocolVersion),
|
||||
)
|
||||
|
||||
private[messages] override def toProtoV0: v0.Verdict =
|
||||
throw new UnsupportedOperationException(wrongProtocolVersion)
|
||||
throw new UnsupportedOperationException(wrongProtocolVersion(representativeProtocolVersion))
|
||||
|
||||
private[messages] override def toProtoV1: v1.Verdict =
|
||||
v1.Verdict(someVerdict = v1.Verdict.SomeVerdict.MediatorReject(toProtoMediatorRejectV1))
|
||||
@ -233,7 +233,7 @@ object Verdict
|
||||
v2.Verdict(someVerdict = v2.Verdict.SomeVerdict.MediatorReject(toProtoMediatorRejectV1))
|
||||
|
||||
private[messages] override def toProtoV3: v3.Verdict =
|
||||
throw new UnsupportedOperationException(wrongProtocolVersion)
|
||||
throw new UnsupportedOperationException(wrongProtocolVersion(representativeProtocolVersion))
|
||||
|
||||
def toProtoMediatorRejectV1: v1.MediatorReject =
|
||||
v1.MediatorReject(cause = cause, errorCode = id, errorCategory = errorCategory)
|
||||
@ -270,8 +270,8 @@ object Verdict
|
||||
def firstApplicableProtocolVersion: ProtocolVersion = ProtocolVersion.v4
|
||||
def lastApplicableProtocolVersion: ProtocolVersion = ProtocolVersion.v5
|
||||
|
||||
lazy val wrongProtocolVersion: String =
|
||||
s"MediatorRejectV1 can only be used in protocol versions from $firstApplicableProtocolVersion to $lastApplicableProtocolVersion"
|
||||
private[messages] def wrongProtocolVersion(pv: RepresentativeProtocolVersion[_]): String =
|
||||
s"MediatorRejectV1 can only be used in protocol versions from $firstApplicableProtocolVersion to $lastApplicableProtocolVersion; found: representative protocol version=$pv"
|
||||
|
||||
private[messages] def tryCreate(
|
||||
cause: String,
|
||||
@ -288,10 +288,12 @@ object Verdict
|
||||
): MediatorRejectV1 =
|
||||
tryCreate(cause, id, errorCategory, Verdict.protocolVersionRepresentativeFor(protocolVersion))
|
||||
|
||||
def fromProtoV1(mediatorRejectP: v1.MediatorReject): ParsingResult[MediatorRejectV1] = {
|
||||
val representativeProtocolVersion = protocolVersionRepresentativeFor(ProtoVersion(1))
|
||||
def fromProtoV1(
|
||||
mediatorRejectP: v1.MediatorReject,
|
||||
pv: RepresentativeProtocolVersion[Verdict.type],
|
||||
): ParsingResult[MediatorRejectV1] = {
|
||||
val v1.MediatorReject(cause, errorCodeP, errorCategoryP) = mediatorRejectP
|
||||
Right(MediatorRejectV1(cause, errorCodeP, errorCategoryP)(representativeProtocolVersion))
|
||||
Right(MediatorRejectV1(cause, errorCodeP, errorCategoryP)(pv))
|
||||
}
|
||||
}
|
||||
|
||||
@ -305,19 +307,19 @@ object Verdict
|
||||
representativeProtocolVersion >= Verdict.protocolVersionRepresentativeFor(
|
||||
firstApplicableProtocolVersion
|
||||
),
|
||||
wrongProtocolVersion,
|
||||
wrongProtocolVersion(representativeProtocolVersion),
|
||||
)
|
||||
|
||||
require(status.code != com.google.rpc.Code.OK_VALUE, "Rejection must not use status code OK")
|
||||
|
||||
private[messages] override def toProtoV0: v0.Verdict =
|
||||
throw new UnsupportedOperationException(wrongProtocolVersion)
|
||||
throw new UnsupportedOperationException(wrongProtocolVersion(representativeProtocolVersion))
|
||||
|
||||
private[messages] override def toProtoV1: v1.Verdict =
|
||||
throw new UnsupportedOperationException(wrongProtocolVersion)
|
||||
throw new UnsupportedOperationException(wrongProtocolVersion(representativeProtocolVersion))
|
||||
|
||||
private[messages] override def toProtoV2: v2.Verdict =
|
||||
throw new UnsupportedOperationException(wrongProtocolVersion)
|
||||
throw new UnsupportedOperationException(wrongProtocolVersion(representativeProtocolVersion))
|
||||
|
||||
private[messages] override def toProtoV3: v3.Verdict =
|
||||
v3.Verdict(v3.Verdict.SomeVerdict.MediatorReject(toProtoMediatorRejectV2))
|
||||
@ -346,8 +348,8 @@ object Verdict
|
||||
object MediatorRejectV2 {
|
||||
def firstApplicableProtocolVersion: ProtocolVersion = ProtocolVersion.v6
|
||||
|
||||
lazy val wrongProtocolVersion: String =
|
||||
s"MediatorRejectV2 can only be used in protocol versions $firstApplicableProtocolVersion and higher"
|
||||
private[messages] def wrongProtocolVersion(pv: RepresentativeProtocolVersion[_]): String =
|
||||
s"MediatorRejectV2 can only be used in protocol versions $firstApplicableProtocolVersion and higher; found: representative protocol version=$pv"
|
||||
|
||||
private[messages] def tryCreate(
|
||||
status: com.google.rpc.status.Status,
|
||||
@ -428,9 +430,7 @@ object Verdict
|
||||
val message = show"Request was rejected with multiple reasons. $reasons"
|
||||
loggingContext.logger.info(message)(loggingContext.traceContext)
|
||||
}
|
||||
reasons
|
||||
.map(_._2)
|
||||
.maxBy1(_.code.category)
|
||||
reasons.map { case (_, localReject) => localReject }.maxBy1(_.code.category)
|
||||
}
|
||||
|
||||
override def isTimeoutDeterminedByMediator: Boolean = false
|
||||
@ -443,53 +443,55 @@ object Verdict
|
||||
): ParticipantReject =
|
||||
ParticipantReject(reasons)(Verdict.protocolVersionRepresentativeFor(protocolVersion))
|
||||
|
||||
def fromProtoV0(rejectionReasonsP: v0.RejectionReasons): ParsingResult[ParticipantReject] = {
|
||||
private[messages] def fromProtoV0(
|
||||
rejectionReasonsP: v0.RejectionReasons
|
||||
): ParsingResult[ParticipantReject] = {
|
||||
val v0.RejectionReasons(reasonsP) = rejectionReasonsP
|
||||
fromProtoRejectionReasonsV0(reasonsP)
|
||||
fromProtoRejectionReasonsV0(reasonsP, protocolVersionRepresentativeFor(ProtoVersion(0)))
|
||||
}
|
||||
|
||||
private def fromProtoRejectionReasonsV0(
|
||||
reasonsP: Seq[v0.RejectionReason]
|
||||
reasonsP: Seq[v0.RejectionReason],
|
||||
pv: RepresentativeProtocolVersion[Verdict.type],
|
||||
): ParsingResult[ParticipantReject] =
|
||||
for {
|
||||
reasons <- reasonsP.traverse(fromProtoReasonV0)
|
||||
reasonsNE <- NonEmpty
|
||||
.from(reasons.toList)
|
||||
.toRight(InvariantViolation("Field reasons must not be empty!"))
|
||||
} yield ParticipantReject(reasonsNE)(
|
||||
Verdict.protocolVersionRepresentativeFor(ProtoVersion(0))
|
||||
)
|
||||
} yield ParticipantReject(reasonsNE)(pv)
|
||||
|
||||
def fromProtoV1(
|
||||
participantRejectP: v1.ParticipantReject
|
||||
private[messages] def fromProtoV1(
|
||||
participantRejectP: v1.ParticipantReject,
|
||||
pv: RepresentativeProtocolVersion[Verdict.type],
|
||||
): ParsingResult[ParticipantReject] = {
|
||||
val v1.ParticipantReject(reasonsP) = participantRejectP
|
||||
fromProtoRejectionReasonsV0(reasonsP)
|
||||
fromProtoRejectionReasonsV0(reasonsP, pv)
|
||||
}
|
||||
|
||||
private def fromProtoRejectionReasonsV2(
|
||||
reasonsP: Seq[v2.RejectionReason]
|
||||
reasonsP: Seq[v2.RejectionReason],
|
||||
pv: RepresentativeProtocolVersion[Verdict.type],
|
||||
): ParsingResult[ParticipantReject] =
|
||||
for {
|
||||
reasons <- reasonsP.traverse(fromProtoReasonV2)
|
||||
reasonsNE <- NonEmpty
|
||||
.from(reasons.toList)
|
||||
.toRight(InvariantViolation("Field reasons must not be empty!"))
|
||||
} yield ParticipantReject(reasonsNE)(
|
||||
Verdict.protocolVersionRepresentativeFor(ProtoVersion(2))
|
||||
)
|
||||
} yield ParticipantReject(reasonsNE)(pv)
|
||||
|
||||
def fromProtoV2(
|
||||
participantRejectP: v2.ParticipantReject
|
||||
participantRejectP: v2.ParticipantReject,
|
||||
pv: RepresentativeProtocolVersion[Verdict.type],
|
||||
): ParsingResult[ParticipantReject] = {
|
||||
val v2.ParticipantReject(reasonsP) = participantRejectP
|
||||
fromProtoRejectionReasonsV2(reasonsP)
|
||||
fromProtoRejectionReasonsV2(reasonsP, pv)
|
||||
}
|
||||
}
|
||||
|
||||
override def name: String = "verdict"
|
||||
|
||||
def fromProtoV0(verdictP: v0.Verdict): ParsingResult[Verdict] = {
|
||||
private[messages] def fromProtoV0(verdictP: v0.Verdict): ParsingResult[Verdict] = {
|
||||
val v0.Verdict(someVerdictP) = verdictP
|
||||
import v0.Verdict.{SomeVerdict as V}
|
||||
|
||||
@ -510,7 +512,7 @@ object Verdict
|
||||
}
|
||||
}
|
||||
|
||||
def fromProtoV1(verdictP: v1.Verdict): ParsingResult[Verdict] = {
|
||||
private[messages] def fromProtoV1(verdictP: v1.Verdict): ParsingResult[Verdict] = {
|
||||
val v1.Verdict(someVerdictP) = verdictP
|
||||
import v1.Verdict.{SomeVerdict as V}
|
||||
|
||||
@ -518,9 +520,10 @@ object Verdict
|
||||
|
||||
someVerdictP match {
|
||||
case V.Approve(empty.Empty(_)) => Right(Approve()(representativeProtocolVersion))
|
||||
case V.MediatorReject(mediatorRejectP) => MediatorRejectV1.fromProtoV1(mediatorRejectP)
|
||||
case V.MediatorReject(mediatorRejectP) =>
|
||||
MediatorRejectV1.fromProtoV1(mediatorRejectP, representativeProtocolVersion)
|
||||
case V.ParticipantReject(participantRejectP) =>
|
||||
ParticipantReject.fromProtoV1(participantRejectP)
|
||||
ParticipantReject.fromProtoV1(participantRejectP, representativeProtocolVersion)
|
||||
case V.Empty => Left(NotImplementedYet("empty verdict type"))
|
||||
}
|
||||
}
|
||||
@ -533,9 +536,10 @@ object Verdict
|
||||
|
||||
someVerdictP match {
|
||||
case V.Approve(empty.Empty(_)) => Right(Approve()(representativeProtocolVersion))
|
||||
case V.MediatorReject(mediatorRejectP) => MediatorRejectV1.fromProtoV1(mediatorRejectP)
|
||||
case V.MediatorReject(mediatorRejectP) =>
|
||||
MediatorRejectV1.fromProtoV1(mediatorRejectP, representativeProtocolVersion)
|
||||
case V.ParticipantReject(participantRejectP) =>
|
||||
ParticipantReject.fromProtoV2(participantRejectP)
|
||||
ParticipantReject.fromProtoV2(participantRejectP, representativeProtocolVersion)
|
||||
case V.Empty => Left(OtherError("empty verdict type"))
|
||||
}
|
||||
}
|
||||
@ -547,9 +551,10 @@ object Verdict
|
||||
val representativeProtocolVersion = protocolVersionRepresentativeFor(ProtoVersion(3))
|
||||
someVerdictP match {
|
||||
case V.Approve(empty.Empty(_)) => Right(Approve()(representativeProtocolVersion))
|
||||
case V.MediatorReject(mediatorRejectP) => MediatorRejectV2.fromProtoV2(mediatorRejectP)
|
||||
case V.MediatorReject(mediatorRejectP) =>
|
||||
MediatorRejectV2.fromProtoV2(mediatorRejectP)
|
||||
case V.ParticipantReject(participantRejectP) =>
|
||||
ParticipantReject.fromProtoV2(participantRejectP)
|
||||
ParticipantReject.fromProtoV2(participantRejectP, representativeProtocolVersion)
|
||||
case V.Empty => Left(OtherError("empty verdict type"))
|
||||
}
|
||||
}
|
||||
|
@ -27,7 +27,7 @@ import scala.concurrent.{ExecutionContext, Future}
|
||||
trait SequencerCounterTrackerStore extends FlagCloseable {
|
||||
protected[store] val cursorStore: CursorPreheadStore[SequencerCounterDiscriminator]
|
||||
|
||||
/** Gets the prehead clean sequencer counter. All sequencer counters below are assumed to be clean. */
|
||||
/** Gets the prehead clean sequencer counter. This sequencer counter and all the ones below are assumed to be clean. */
|
||||
def preheadSequencerCounter(implicit
|
||||
traceContext: TraceContext
|
||||
): Future[Option[SequencerCounterCursorPrehead]] =
|
||||
|
@ -137,7 +137,7 @@ class DbSequencedEventStore(
|
||||
else
|
||||
processingTime.event {
|
||||
withLock(functionFullName) {
|
||||
CloseContext.withCombinedContextF(closeContext, externalCloseContext, timeouts, logger) {
|
||||
CloseContext.withCombinedContext(closeContext, externalCloseContext, timeouts, logger) {
|
||||
combinedCloseContext =>
|
||||
storage
|
||||
.queryAndUpdate(bulkInsertQuery(events), functionFullName)(
|
||||
|
@ -5,7 +5,7 @@ package com.digitalasset.canton.topology
|
||||
|
||||
import com.daml.error.*
|
||||
import com.daml.nonempty.NonEmpty
|
||||
import com.digitalasset.canton.config.RequireTypes.PositiveInt
|
||||
import com.digitalasset.canton.config.RequireTypes.{PositiveInt, PositiveLong}
|
||||
import com.digitalasset.canton.crypto.*
|
||||
import com.digitalasset.canton.crypto.store.{CryptoPrivateStoreError, CryptoPublicStoreError}
|
||||
import com.digitalasset.canton.data.CantonTimestamp
|
||||
@ -16,13 +16,7 @@ import com.digitalasset.canton.time.NonNegativeFiniteDuration
|
||||
import com.digitalasset.canton.topology.processing.EffectiveTime
|
||||
import com.digitalasset.canton.topology.store.ValidatedTopologyTransaction
|
||||
import com.digitalasset.canton.topology.transaction.TopologyTransactionX.TxHash
|
||||
import com.digitalasset.canton.topology.transaction.{
|
||||
TopologyChangeOp,
|
||||
TopologyMapping,
|
||||
TopologyMappingX,
|
||||
TopologyStateElement,
|
||||
TopologyTransaction,
|
||||
}
|
||||
import com.digitalasset.canton.topology.transaction.*
|
||||
|
||||
sealed trait TopologyManagerError extends CantonError
|
||||
|
||||
@ -384,6 +378,31 @@ object TopologyManagerError extends TopologyManagerErrorGroup {
|
||||
with TopologyManagerError
|
||||
}
|
||||
|
||||
@Explanation(
|
||||
"This error indicates that the attempted update of the extra traffic limits for a particular member failed because the new limit is lower than the current limit."
|
||||
)
|
||||
@Resolution(
|
||||
"""Extra traffic limits can only be increased. Submit the topology transaction with a higher limit.
|
||||
|The metadata details of this error contain the expected minimum value in the field ``expectedMinimum``."""
|
||||
)
|
||||
object InvalidTrafficLimit
|
||||
extends ErrorCode(
|
||||
id = "INVALID_TRAFFIC_LIMIT",
|
||||
ErrorCategory.InvalidIndependentOfSystemState,
|
||||
) {
|
||||
final case class TrafficLimitTooLow(
|
||||
member: Member,
|
||||
actual: PositiveLong,
|
||||
expectedMinimum: PositiveLong,
|
||||
)(implicit
|
||||
override val loggingContext: ErrorLoggingContext
|
||||
) extends CantonError.Impl(
|
||||
cause =
|
||||
s"The extra traffic limit for $member should be at least $expectedMinimum, but was $actual."
|
||||
)
|
||||
with TopologyManagerError
|
||||
}
|
||||
|
||||
abstract class DomainErrorGroup extends ErrorGroup()
|
||||
abstract class ParticipantErrorGroup extends ErrorGroup()
|
||||
|
||||
|
@ -14,7 +14,7 @@ import com.digitalasset.canton.config.CantonRequireTypes.{
|
||||
String256M,
|
||||
}
|
||||
import com.digitalasset.canton.config.ProcessingTimeout
|
||||
import com.digitalasset.canton.config.RequireTypes.PositiveInt
|
||||
import com.digitalasset.canton.config.RequireTypes.{PositiveInt, PositiveLong}
|
||||
import com.digitalasset.canton.crypto.{PublicKey, SignatureCheckError}
|
||||
import com.digitalasset.canton.data.CantonTimestamp
|
||||
import com.digitalasset.canton.lifecycle.{FlagCloseable, FutureUnlessShutdown}
|
||||
@ -231,7 +231,7 @@ sealed trait TopologyTransactionRejection extends PrettyPrinting {
|
||||
}
|
||||
object TopologyTransactionRejection {
|
||||
object NotAuthorized extends TopologyTransactionRejection {
|
||||
def asString: String = "Not authorized"
|
||||
override def asString: String = "Not authorized"
|
||||
override def pretty: Pretty[NotAuthorized.type] = prettyOfString(_ => asString)
|
||||
|
||||
override def toTopologyManagerError(implicit elc: ErrorLoggingContext) =
|
||||
@ -240,7 +240,8 @@ object TopologyTransactionRejection {
|
||||
|
||||
final case class ThresholdTooHigh(actual: Int, mustBeAtMost: Int)
|
||||
extends TopologyTransactionRejection {
|
||||
def asString: String = s"Threshold must not be higher than $mustBeAtMost, but was $actual."
|
||||
override def asString: String =
|
||||
s"Threshold must not be higher than $mustBeAtMost, but was $actual."
|
||||
|
||||
override def pretty: Pretty[ThresholdTooHigh] = prettyOfString(_ => asString)
|
||||
|
||||
@ -251,28 +252,28 @@ object TopologyTransactionRejection {
|
||||
|
||||
final case class SignatureCheckFailed(err: SignatureCheckError)
|
||||
extends TopologyTransactionRejection {
|
||||
def asString: String = err.toString
|
||||
override def asString: String = err.toString
|
||||
override def pretty: Pretty[SignatureCheckFailed] = prettyOfClass(param("err", _.err))
|
||||
|
||||
override def toTopologyManagerError(implicit elc: ErrorLoggingContext) =
|
||||
TopologyManagerError.InvalidSignatureError.Failure(err)
|
||||
}
|
||||
final case class WrongDomain(wrong: DomainId) extends TopologyTransactionRejection {
|
||||
def asString: String = show"Wrong domain $wrong"
|
||||
override def asString: String = show"Wrong domain $wrong"
|
||||
override def pretty: Pretty[WrongDomain] = prettyOfClass(param("wrong", _.wrong))
|
||||
|
||||
override def toTopologyManagerError(implicit elc: ErrorLoggingContext) =
|
||||
TopologyManagerError.WrongDomain.Failure(wrong)
|
||||
}
|
||||
final case class Duplicate(old: CantonTimestamp) extends TopologyTransactionRejection {
|
||||
def asString: String = show"Duplicate transaction from ${old}"
|
||||
override def asString: String = show"Duplicate transaction from ${old}"
|
||||
override def pretty: Pretty[Duplicate] = prettyOfClass(param("old", _.old))
|
||||
override def toTopologyManagerError(implicit elc: ErrorLoggingContext) =
|
||||
TopologyManagerError.DuplicateTransaction.ExistsAt(old)
|
||||
}
|
||||
final case class SerialMismatch(expected: PositiveInt, actual: PositiveInt)
|
||||
extends TopologyTransactionRejection {
|
||||
def asString: String =
|
||||
override def asString: String =
|
||||
show"The given serial $actual does not match the expected serial $expected"
|
||||
override def pretty: Pretty[SerialMismatch] =
|
||||
prettyOfClass(param("expected", _.expected), param("actual", _.actual))
|
||||
@ -280,12 +281,31 @@ object TopologyTransactionRejection {
|
||||
TopologyManagerError.SerialMismatch.Failure(expected, actual)
|
||||
}
|
||||
final case class Other(str: String) extends TopologyTransactionRejection {
|
||||
def asString: String = str
|
||||
override def asString: String = str
|
||||
override def pretty: Pretty[Other] = prettyOfString(_ => asString)
|
||||
|
||||
override def toTopologyManagerError(implicit elc: ErrorLoggingContext) =
|
||||
TopologyManagerError.InternalError.Other(str)
|
||||
}
|
||||
|
||||
final case class ExtraTrafficLimitTooLow(
|
||||
member: Member,
|
||||
actual: PositiveLong,
|
||||
expectedMinimum: PositiveLong,
|
||||
) extends TopologyTransactionRejection {
|
||||
override def asString: String =
|
||||
s"Extra traffic limit for $member should be at least $expectedMinimum, but was $actual."
|
||||
|
||||
override def pretty: Pretty[ExtraTrafficLimitTooLow] =
|
||||
prettyOfClass(
|
||||
param("member", _.member),
|
||||
param("actual", _.actual),
|
||||
param("expectedMinimum", _.expectedMinimum),
|
||||
)
|
||||
|
||||
override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError =
|
||||
TopologyManagerError.InvalidTrafficLimit.TrafficLimitTooLow(member, actual, expectedMinimum)
|
||||
}
|
||||
}
|
||||
|
||||
final case class ValidatedTopologyTransaction(
|
||||
|
@ -21,6 +21,7 @@ import com.digitalasset.canton.version.{
|
||||
ProtocolVersion,
|
||||
RepresentativeProtocolVersion,
|
||||
}
|
||||
import com.google.common.annotations.VisibleForTesting
|
||||
import com.google.protobuf.ByteString
|
||||
import slick.jdbc.{GetResult, PositionedParameters, SetParameter}
|
||||
|
||||
@ -34,7 +35,7 @@ import scala.concurrent.{ExecutionContext, Future}
|
||||
* Whether the key is eligible to authorize the topology transaction depends on the topology state
|
||||
*/
|
||||
@SuppressWarnings(Array("org.wartremover.warts.FinalCaseClass")) // This class is mocked in tests
|
||||
case class SignedTopologyTransaction[+Op <: TopologyChangeOp](
|
||||
case class SignedTopologyTransaction[+Op <: TopologyChangeOp] private (
|
||||
transaction: TopologyTransaction[Op],
|
||||
key: SigningPublicKey,
|
||||
signature: Signature,
|
||||
@ -67,6 +68,17 @@ case class SignedTopologyTransaction[+Op <: TopologyChangeOp](
|
||||
pureApi.verifySignature(hash, key, signature)
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
def update[NewOp >: Op <: TopologyChangeOp](
|
||||
transaction: TopologyTransaction[NewOp] = transaction,
|
||||
key: SigningPublicKey = key,
|
||||
signature: Signature = signature,
|
||||
): SignedTopologyTransaction[NewOp] =
|
||||
this.copy(transaction = transaction, key = key, signature = signature)(
|
||||
representativeProtocolVersion,
|
||||
None,
|
||||
)
|
||||
|
||||
override def pretty: Pretty[SignedTopologyTransaction.this.type] =
|
||||
prettyOfClass(unnamedParam(_.transaction), param("key", _.key))
|
||||
|
||||
@ -94,6 +106,14 @@ object SignedTopologyTransaction
|
||||
|
||||
import com.digitalasset.canton.resource.DbStorage.Implicits.*
|
||||
|
||||
def apply[Op <: TopologyChangeOp](
|
||||
transaction: TopologyTransaction[Op],
|
||||
key: SigningPublicKey,
|
||||
signature: Signature,
|
||||
rpv: RepresentativeProtocolVersion[SignedTopologyTransaction.type],
|
||||
): SignedTopologyTransaction[Op] =
|
||||
SignedTopologyTransaction(transaction, key, signature)(rpv, None)
|
||||
|
||||
/** Sign the given topology transaction. */
|
||||
def create[Op <: TopologyChangeOp](
|
||||
transaction: TopologyTransaction[Op],
|
||||
|
@ -16,6 +16,7 @@ import com.digitalasset.canton.serialization.{ProtoConverter, ProtocolVersionedM
|
||||
import com.digitalasset.canton.topology.*
|
||||
import com.digitalasset.canton.version.*
|
||||
import com.digitalasset.canton.{LfPackageId, ProtoDeserializationError}
|
||||
import com.google.common.annotations.VisibleForTesting
|
||||
import com.google.protobuf.ByteString
|
||||
|
||||
import scala.Ordered.orderingToOrdered
|
||||
@ -207,7 +208,8 @@ object OwnerToKeyMapping {
|
||||
|
||||
}
|
||||
|
||||
final case class SignedLegalIdentityClaim(
|
||||
// Using private because the `claim` needs to be a `LegalIdentityClaim`
|
||||
final case class SignedLegalIdentityClaim private (
|
||||
uid: UniqueIdentifier,
|
||||
claim: ByteString,
|
||||
signature: Signature,
|
||||
@ -236,6 +238,10 @@ object SignedLegalIdentityClaim {
|
||||
|
||||
def dbType: DomainTopologyTransactionType = DomainTopologyTransactionType.SignedLegalIdentityClaim
|
||||
|
||||
@VisibleForTesting
|
||||
def create(claim: LegalIdentityClaim, signature: Signature): SignedLegalIdentityClaim =
|
||||
SignedLegalIdentityClaim(claim.uid, claim.toByteString, signature)
|
||||
|
||||
def fromProtoV0(
|
||||
value: v0.SignedLegalIdentityClaim
|
||||
): ParsingResult[SignedLegalIdentityClaim] =
|
||||
@ -255,7 +261,6 @@ final case class LegalIdentityClaim private (
|
||||
override val deserializedFrom: Option[ByteString],
|
||||
) extends ProtocolVersionedMemoizedEvidence
|
||||
with HasProtocolVersionedWrapper[LegalIdentityClaim] {
|
||||
|
||||
@transient override protected lazy val companionObj: LegalIdentityClaim.type = LegalIdentityClaim
|
||||
|
||||
protected def toProtoV0: v0.LegalIdentityClaim =
|
||||
@ -637,12 +642,12 @@ final case class DomainParametersChange(
|
||||
domainId: DomainId,
|
||||
domainParameters: DynamicDomainParameters,
|
||||
) extends DomainGovernanceMapping {
|
||||
def toProtoV0: v0.DomainParametersChange = v0.DomainParametersChange(
|
||||
private[transaction] def toProtoV0: v0.DomainParametersChange = v0.DomainParametersChange(
|
||||
domain = domainId.toProtoPrimitive,
|
||||
Option(domainParameters.toProtoV0),
|
||||
)
|
||||
|
||||
def toProtoV1: v1.DomainParametersChange = v1.DomainParametersChange(
|
||||
private[transaction] def toProtoV1: v1.DomainParametersChange = v1.DomainParametersChange(
|
||||
domain = domainId.toProtoPrimitive,
|
||||
Option(domainParameters.toProtoV1),
|
||||
)
|
||||
|
@ -5,7 +5,7 @@ package com.digitalasset.canton.topology.transaction
|
||||
|
||||
import cats.data.EitherT
|
||||
import cats.instances.future.*
|
||||
import cats.syntax.parallel.*
|
||||
import com.digitalasset.canton.config.RequireTypes.PositiveLong
|
||||
import com.digitalasset.canton.crypto.KeyPurpose
|
||||
import com.digitalasset.canton.data.CantonTimestamp
|
||||
import com.digitalasset.canton.logging.NamedLoggerFactory
|
||||
@ -16,9 +16,9 @@ import com.digitalasset.canton.topology.store.{
|
||||
TopologyTransactionRejection,
|
||||
}
|
||||
import com.digitalasset.canton.topology.transaction.SignedTopologyTransactionX.GenericSignedTopologyTransactionX
|
||||
import com.digitalasset.canton.topology.transaction.TopologyMappingX.Code
|
||||
import com.digitalasset.canton.tracing.TraceContext
|
||||
import com.digitalasset.canton.util.EitherTUtil
|
||||
import com.digitalasset.canton.util.FutureInstances.*
|
||||
|
||||
import scala.concurrent.{ExecutionContext, Future}
|
||||
import scala.math.Ordered.*
|
||||
@ -53,17 +53,30 @@ class ValidatingTopologyMappingXChecks(
|
||||
inStore: Option[GenericSignedTopologyTransactionX],
|
||||
)(implicit traceContext: TraceContext): EitherT[Future, TopologyTransactionRejection, Unit] = {
|
||||
|
||||
val checkDomainTrustCertificateX = toValidate
|
||||
.selectMapping[DomainTrustCertificateX]
|
||||
.map(checkDomainTrustCertificate(effective, _))
|
||||
.getOrElse(EitherTUtil.unit[TopologyTransactionRejection])
|
||||
val checkOpt = (toValidate.mapping.code, inStore.map(_.mapping.code)) match {
|
||||
case (Code.DomainTrustCertificateX, None | Some(Code.DomainTrustCertificateX)) =>
|
||||
toValidate
|
||||
.selectMapping[DomainTrustCertificateX]
|
||||
.map(checkDomainTrustCertificate(effective, _))
|
||||
|
||||
val checkPartyToParticipantX = toValidate
|
||||
.select[TopologyChangeOpX.Replace, PartyToParticipantX]
|
||||
.map(checkPartyToParticipantMapping(_, inStore.flatMap(_.selectMapping[PartyToParticipantX])))
|
||||
.getOrElse(EitherTUtil.unit)
|
||||
case (Code.PartyToParticipantX, None | Some(Code.PartyToParticipantX)) =>
|
||||
toValidate
|
||||
.select[TopologyChangeOpX.Replace, PartyToParticipantX]
|
||||
.map(checkPartyToParticipant(_, inStore.flatMap(_.selectMapping[PartyToParticipantX])))
|
||||
|
||||
Seq(checkDomainTrustCertificateX, checkPartyToParticipantX).parSequence_
|
||||
case (Code.TrafficControlStateX, None | Some(Code.TrafficControlStateX)) =>
|
||||
toValidate
|
||||
.select[TopologyChangeOpX.Replace, TrafficControlStateX]
|
||||
.map(
|
||||
checkTrafficControl(
|
||||
_,
|
||||
inStore.flatMap(_.selectMapping[TrafficControlStateX]),
|
||||
)
|
||||
)
|
||||
|
||||
case otherwise => None
|
||||
}
|
||||
checkOpt.getOrElse(EitherTUtil.unit)
|
||||
}
|
||||
|
||||
/** Checks that the DTC is not being removed if the participant still hosts a party.
|
||||
@ -118,7 +131,7 @@ class ValidatingTopologyMappingXChecks(
|
||||
* - new participants have a valid DTC
|
||||
* - new participants have an OTK with at least 1 signing key and 1 encryption key
|
||||
*/
|
||||
private def checkPartyToParticipantMapping(
|
||||
private def checkPartyToParticipant(
|
||||
toValidate: SignedTopologyTransactionX[TopologyChangeOpX, PartyToParticipantX],
|
||||
inStore: Option[SignedTopologyTransactionX[TopologyChangeOpX, PartyToParticipantX]],
|
||||
)(implicit
|
||||
@ -186,4 +199,27 @@ class ValidatingTopologyMappingXChecks(
|
||||
}
|
||||
}
|
||||
|
||||
/** Checks that the extraTrafficLimit is monotonically increasing */
|
||||
private def checkTrafficControl(
|
||||
toValidate: SignedTopologyTransactionX[TopologyChangeOpX.Replace, TrafficControlStateX],
|
||||
inStore: Option[SignedTopologyTransactionX[TopologyChangeOpX, TrafficControlStateX]],
|
||||
): EitherT[Future, TopologyTransactionRejection, Unit] = {
|
||||
val minimumExtraTrafficLimit = inStore match {
|
||||
case None => PositiveLong.one
|
||||
case Some(TopologyChangeOpX(TopologyChangeOpX.Remove)) =>
|
||||
// if the transaction in the store is a removal, we "reset" the monotonicity requirement
|
||||
PositiveLong.one
|
||||
case Some(tx) => tx.mapping.totalExtraTrafficLimit
|
||||
}
|
||||
|
||||
EitherTUtil.condUnitET(
|
||||
toValidate.mapping.totalExtraTrafficLimit >= minimumExtraTrafficLimit,
|
||||
TopologyTransactionRejection.ExtraTrafficLimitTooLow(
|
||||
toValidate.mapping.member,
|
||||
toValidate.mapping.totalExtraTrafficLimit,
|
||||
minimumExtraTrafficLimit,
|
||||
),
|
||||
)
|
||||
|
||||
}
|
||||
}
|
||||
|
@ -40,6 +40,13 @@ object TopologyChangeOpX {
|
||||
type Remove = Remove.type
|
||||
type Replace = Replace.type
|
||||
|
||||
def unapply(
|
||||
tx: TopologyTransactionX[TopologyChangeOpX, TopologyMappingX]
|
||||
): Option[TopologyChangeOpX] = Some(tx.op)
|
||||
def unapply(
|
||||
tx: SignedTopologyTransactionX[TopologyChangeOpX, TopologyMappingX]
|
||||
): Option[TopologyChangeOpX] = Some(tx.transaction.op)
|
||||
|
||||
def fromProtoV2(
|
||||
protoOp: v2.TopologyChangeOpX
|
||||
): ParsingResult[TopologyChangeOpX] =
|
||||
|
@ -6,7 +6,12 @@ package com.digitalasset.canton.util
|
||||
import com.daml.nameof.NameOf.functionFullName
|
||||
import com.digitalasset.canton.DiscardOps
|
||||
import com.digitalasset.canton.concurrent.Threading
|
||||
import com.digitalasset.canton.lifecycle.{FlagCloseable, FutureUnlessShutdown, UnlessShutdown}
|
||||
import com.digitalasset.canton.lifecycle.{
|
||||
FlagCloseable,
|
||||
FutureUnlessShutdown,
|
||||
OnShutdownRunner,
|
||||
UnlessShutdown,
|
||||
}
|
||||
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
|
||||
import com.digitalasset.canton.tracing.TraceContext
|
||||
|
||||
@ -64,17 +69,17 @@ object DelayUtil extends NamedLogging {
|
||||
promise.future
|
||||
}
|
||||
|
||||
/** Creates a future that succeeds after the given delay provided that `flagCloseable` has not yet been closed then.
|
||||
* The future completes fast with UnlessShutdown.AbortedDueToShutdown if `flagCloseable` is already closing.
|
||||
/** Creates a future that succeeds after the given delay provided that `onShutdownRunner` has not yet been closed then.
|
||||
* The future completes fast with UnlessShutdown.AbortedDueToShutdown if `onShutdownRunner` is already closing.
|
||||
*/
|
||||
def delayIfNotClosing(name: String, delay: FiniteDuration, flagCloseable: FlagCloseable)(implicit
|
||||
traceContext: TraceContext
|
||||
def delayIfNotClosing(name: String, delay: FiniteDuration, onShutdownRunner: OnShutdownRunner)(
|
||||
implicit traceContext: TraceContext
|
||||
): FutureUnlessShutdown[Unit] = {
|
||||
val promise = Promise[UnlessShutdown[Unit]]()
|
||||
val future = promise.future
|
||||
|
||||
import com.digitalasset.canton.lifecycle.RunOnShutdown
|
||||
val cancelToken = flagCloseable.runOnShutdown(new RunOnShutdown() {
|
||||
val cancelToken = onShutdownRunner.runOnShutdown(new RunOnShutdown() {
|
||||
val name = s"$functionFullName-shutdown"
|
||||
def done = promise.isCompleted
|
||||
def run(): Unit = {
|
||||
@ -86,7 +91,7 @@ object DelayUtil extends NamedLogging {
|
||||
promise.trySuccess(UnlessShutdown.Outcome(())).discard
|
||||
// No need to complete the promise on shutdown with an AbortedDueToShutdown since we succeeded, and also
|
||||
// keeps the list of shutdown tasks from growing indefinitely with each retry
|
||||
flagCloseable.cancelShutdownTask(cancelToken)
|
||||
onShutdownRunner.cancelShutdownTask(cancelToken)
|
||||
}
|
||||
|
||||
// TODO(i4245): Use Clock instead
|
||||
|
@ -255,11 +255,10 @@ abstract class RetryWithDelay(
|
||||
// Run the task again on the normal execution context as the task might take a long time.
|
||||
// `performUnlessClosingF` guards against closing the execution context.
|
||||
val nextRunUnlessShutdown =
|
||||
flagCloseable
|
||||
.performUnlessClosingF(operationName)(runTask())(
|
||||
executionContext,
|
||||
traceContext,
|
||||
)
|
||||
flagCloseable.performUnlessClosingF(operationName)(runTask())(
|
||||
executionContext,
|
||||
traceContext,
|
||||
)
|
||||
@SuppressWarnings(Array("org.wartremover.warts.TryPartial"))
|
||||
val nextRunF = nextRunUnlessShutdown
|
||||
.onShutdown {
|
||||
|
@ -110,7 +110,7 @@ trait HasProtocolVersionedWrapper[ValueClass <: HasRepresentativeProtocolVersion
|
||||
/** Will check that default value rules defined in `companionObj.defaultValues` hold.
|
||||
*/
|
||||
def validateInstance(): Either[String, Unit] =
|
||||
companionObj.invariants.traverse_(_.validateInstance(this, representativeProtocolVersion))
|
||||
companionObj.validateInstance(this, representativeProtocolVersion)
|
||||
|
||||
/** Yields the proto representation of the class inside an `UntypedVersionedMessage` wrapper.
|
||||
*
|
||||
@ -230,7 +230,7 @@ trait HasSupportedProtoVersions[ValueClass] {
|
||||
// Serializer: (ValueClass => Proto)
|
||||
type Serializer = ValueClass => ByteString
|
||||
|
||||
private type ThisRepresentativeProtocolVersion = RepresentativeProtocolVersion[this.type]
|
||||
protected type ThisRepresentativeProtocolVersion = RepresentativeProtocolVersion[this.type]
|
||||
|
||||
trait Invariant {
|
||||
def validateInstance(
|
||||
@ -341,7 +341,7 @@ trait HasSupportedProtoVersions[ValueClass] {
|
||||
Either.cond(
|
||||
v.isEmpty == pv < untilExclusive.representative,
|
||||
(),
|
||||
s"expecting None if and only if $pv < ${untilExclusive.representative}; found: $v",
|
||||
s"expecting None if and only if pv < ${untilExclusive.representative}; for $pv, found: $v",
|
||||
)
|
||||
}
|
||||
|
||||
@ -644,6 +644,14 @@ trait HasProtocolVersionedWrapperCompanion[
|
||||
type OriginalByteString = ByteString // What is passed to the fromByteString method
|
||||
type DataByteString = ByteString // What is inside the parsed UntypedVersionedMessage message
|
||||
|
||||
/** Will check that default value rules defined in `companionObj.defaultValues` hold.
|
||||
*/
|
||||
def validateInstance(
|
||||
instance: ValueClass,
|
||||
representativeProtocolVersion: ThisRepresentativeProtocolVersion,
|
||||
): Either[String, Unit] =
|
||||
invariants.traverse_(_.validateInstance(instance, representativeProtocolVersion))
|
||||
|
||||
protected def deserializeForVersion(
|
||||
rpv: RepresentativeProtocolVersion[this.type],
|
||||
deserializeLegacyProto: Deserializer => ParsingResult[DeserializedValueClass],
|
||||
|
@ -19,5 +19,25 @@ ALTER TABLE commitment_pruning ADD COLUMN succeeded bigint null;
|
||||
ALTER TABLE contract_key_pruning ADD COLUMN succeeded bigint null;
|
||||
ALTER TABLE sequenced_event_store_pruning ADD COLUMN succeeded bigint null;
|
||||
|
||||
-- changes to the event logs to accommodate more complex offsets (for topology events)
|
||||
ALTER TABLE event_log ADD COLUMN local_offset_effective_time bigint NOT NULL DEFAULT 0; -- timestamp, micros from epoch
|
||||
ALTER TABLE event_log ADD COLUMN local_offset_discriminator smallint NOT NULL DEFAULT 0; -- 0 for requests, 1 for topology events
|
||||
ALTER TABLE event_log RENAME COLUMN local_offset TO local_offset_tie_breaker;
|
||||
|
||||
ALTER TABLE linearized_event_log ADD COLUMN local_offset_effective_time bigint NOT NULL DEFAULT 0; -- timestamp, micros from epoch
|
||||
ALTER TABLE linearized_event_log ADD COLUMN local_offset_discriminator smallint NOT NULL DEFAULT 0; -- 0 for requests, 1 for topology events
|
||||
ALTER TABLE linearized_event_log RENAME COLUMN local_offset TO local_offset_tie_breaker;
|
||||
|
||||
-- changes to the indexes, keys and constraints of the event logs
|
||||
ALTER TABLE linearized_event_log DROP CONSTRAINT foreign_key_event_log;
|
||||
DROP INDEX idx_linearized_event_log_offset;
|
||||
|
||||
CREATE UNIQUE INDEX idx_linearized_event_log_offset ON linearized_event_log (log_id, local_offset_effective_time, local_offset_discriminator, local_offset_tie_breaker);
|
||||
|
||||
ALTER TABLE event_log DROP PRIMARY KEY;
|
||||
ALTER TABLE event_log ADD PRIMARY KEY (log_id, local_offset_effective_time, local_offset_discriminator, local_offset_tie_breaker);
|
||||
CREATE INDEX idx_event_log_local_offset ON event_log (local_offset_effective_time, local_offset_discriminator, local_offset_tie_breaker);
|
||||
|
||||
ALTER TABLE linearized_event_log
|
||||
ADD CONSTRAINT foreign_key_event_log FOREIGN KEY (log_id, local_offset_effective_time, local_offset_discriminator, local_offset_tie_breaker)
|
||||
REFERENCES event_log(log_id, local_offset_effective_time, local_offset_discriminator, local_offset_tie_breaker) ON DELETE CASCADE;
|
||||
|
@ -19,3 +19,24 @@ ALTER TABLE commitment_pruning ADD COLUMN succeeded bigint null;
|
||||
ALTER TABLE contract_key_pruning ADD COLUMN succeeded bigint null;
|
||||
ALTER TABLE sequenced_event_store_pruning ADD COLUMN succeeded bigint null;
|
||||
|
||||
-- changes to the event logs to accommodate more complex offsets (for topology events)
|
||||
ALTER TABLE event_log ADD COLUMN local_offset_effective_time bigint NOT NULL DEFAULT 0; -- timestamp, micros from epoch
|
||||
ALTER TABLE event_log ADD COLUMN local_offset_discriminator smallint NOT NULL DEFAULT 0; -- 0 for requests, 1 for topology events
|
||||
ALTER TABLE event_log RENAME COLUMN local_offset TO local_offset_tie_breaker;
|
||||
ALTER TABLE linearized_event_log ADD COLUMN local_offset_effective_time bigint NOT NULL DEFAULT 0; -- timestamp, micros from epoch
|
||||
ALTER TABLE linearized_event_log ADD COLUMN local_offset_discriminator smallint NOT NULL DEFAULT 0; -- 0 for requests, 1 for topology events
|
||||
ALTER TABLE linearized_event_log RENAME COLUMN local_offset TO local_offset_tie_breaker;
|
||||
|
||||
-- changes to the indexes, keys and constraints of the event logs
|
||||
ALTER TABLE linearized_event_log DROP CONSTRAINT foreign_key_event_log;
|
||||
DROP INDEX idx_linearized_event_log_offset;
|
||||
|
||||
CREATE UNIQUE INDEX idx_linearized_event_log_offset ON linearized_event_log (log_id, local_offset_effective_time, local_offset_discriminator, local_offset_tie_breaker);
|
||||
|
||||
ALTER TABLE event_log DROP CONSTRAINT event_log_pkey;
|
||||
ALTER TABLE event_log ADD PRIMARY KEY (log_id, local_offset_effective_time, local_offset_discriminator, local_offset_tie_breaker);
|
||||
CREATE INDEX idx_event_log_local_offset ON event_log (local_offset_effective_time, local_offset_discriminator, local_offset_tie_breaker);
|
||||
|
||||
ALTER TABLE linearized_event_log
|
||||
ADD CONSTRAINT foreign_key_event_log FOREIGN KEY (log_id, local_offset_effective_time, local_offset_discriminator, local_offset_tie_breaker)
|
||||
REFERENCES event_log(log_id, local_offset_effective_time, local_offset_discriminator, local_offset_tie_breaker) ON DELETE CASCADE;
|
||||
|
@ -4,7 +4,8 @@
|
||||
package com.digitalasset.canton.protocol
|
||||
|
||||
import com.digitalasset.canton.crypto.{Hash, HashOps, HashPurpose, HmacOps, Salt}
|
||||
import com.digitalasset.canton.data.{CantonTimestamp, ViewPosition}
|
||||
import com.digitalasset.canton.data.ViewPosition
|
||||
import com.digitalasset.canton.protocol.SerializableContract.LedgerCreateTime
|
||||
import com.digitalasset.canton.serialization.DeterministicEncoding
|
||||
import com.digitalasset.canton.topology.{DomainId, MediatorRef}
|
||||
|
||||
@ -96,7 +97,7 @@ class UnicumGenerator(cryptoOps: HashOps with HmacOps) {
|
||||
* @param viewParticipantDataSalt the salt of the [[com.digitalasset.canton.data.ViewParticipantData]] of the view whose core creates the contract
|
||||
* @param createIndex the index of the node creating the contract (starting at 0).
|
||||
* Only create nodes and only nodes that belong to the core of the view with salt `viewActionSalt` have an index.
|
||||
* @param ledgerTime the ledger time at which the contract is created
|
||||
* @param ledgerCreateTime the ledger time at which the contract is created
|
||||
* @param metadata contract metadata
|
||||
* @param suffixedContractInstance the serializable raw contract instance of the contract where contract IDs have already been suffixed.
|
||||
* @param contractIdVersion version of contract ID used
|
||||
@ -110,7 +111,7 @@ class UnicumGenerator(cryptoOps: HashOps with HmacOps) {
|
||||
viewPosition: ViewPosition,
|
||||
viewParticipantDataSalt: Salt,
|
||||
createIndex: Int,
|
||||
ledgerTime: CantonTimestamp,
|
||||
ledgerCreateTime: LedgerCreateTime,
|
||||
metadata: ContractMetadata,
|
||||
suffixedContractInstance: SerializableRawContractInstance,
|
||||
contractIdVersion: CantonContractIdVersion,
|
||||
@ -128,7 +129,7 @@ class UnicumGenerator(cryptoOps: HashOps with HmacOps) {
|
||||
val unicumHash =
|
||||
if (contractIdVersion == AuthenticatedContractIdVersionV2) {
|
||||
computeUnicumV2Hash(
|
||||
ledgerTime = ledgerTime,
|
||||
ledgerCreateTime = ledgerCreateTime,
|
||||
metadata,
|
||||
suffixedContractInstance = suffixedContractInstance,
|
||||
contractSalt = contractSalt.unwrap,
|
||||
@ -136,7 +137,7 @@ class UnicumGenerator(cryptoOps: HashOps with HmacOps) {
|
||||
)
|
||||
} else {
|
||||
computeUnicumV1Hash(
|
||||
ledgerTime = ledgerTime,
|
||||
ledgerCreateTime = ledgerCreateTime,
|
||||
suffixedContractInstance = suffixedContractInstance,
|
||||
contractSalt = contractSalt.unwrap,
|
||||
contractIdVersion = contractIdVersion,
|
||||
@ -150,7 +151,7 @@ class UnicumGenerator(cryptoOps: HashOps with HmacOps) {
|
||||
* Used for authenticating contracts.
|
||||
*
|
||||
* @param contractSalt the [[ContractSalt]] computed when the original contract id was generated.
|
||||
* @param ledgerTime the ledger time at which the contract is created
|
||||
* @param ledgerCreateTime the ledger time at which the contract is created
|
||||
* @param metadata contract metadata
|
||||
* @param suffixedContractInstance the serializable raw contract instance of the contract where contract IDs have already been suffixed.
|
||||
* @param contractIdVersion version of contract ID used
|
||||
@ -158,7 +159,7 @@ class UnicumGenerator(cryptoOps: HashOps with HmacOps) {
|
||||
*/
|
||||
def recomputeUnicum(
|
||||
contractSalt: Salt,
|
||||
ledgerTime: CantonTimestamp,
|
||||
ledgerCreateTime: LedgerCreateTime,
|
||||
metadata: ContractMetadata,
|
||||
suffixedContractInstance: SerializableRawContractInstance,
|
||||
contractIdVersion: CantonContractIdVersion,
|
||||
@ -169,7 +170,7 @@ class UnicumGenerator(cryptoOps: HashOps with HmacOps) {
|
||||
contractSaltSize.toLong == cryptoOps.defaultHmacAlgorithm.hashAlgorithm.length,
|
||||
Unicum(
|
||||
computeUnicumV2Hash(
|
||||
ledgerTime,
|
||||
ledgerCreateTime,
|
||||
metadata,
|
||||
suffixedContractInstance,
|
||||
contractSalt,
|
||||
@ -182,7 +183,12 @@ class UnicumGenerator(cryptoOps: HashOps with HmacOps) {
|
||||
Either.cond(
|
||||
contractSaltSize.toLong == cryptoOps.defaultHmacAlgorithm.hashAlgorithm.length,
|
||||
Unicum(
|
||||
computeUnicumV1Hash(ledgerTime, suffixedContractInstance, contractSalt, contractIdVersion)
|
||||
computeUnicumV1Hash(
|
||||
ledgerCreateTime,
|
||||
suffixedContractInstance,
|
||||
contractSalt,
|
||||
contractIdVersion,
|
||||
)
|
||||
),
|
||||
s"Invalid contract salt size ($contractSaltSize)",
|
||||
)
|
||||
@ -190,7 +196,7 @@ class UnicumGenerator(cryptoOps: HashOps with HmacOps) {
|
||||
}
|
||||
|
||||
private def computeUnicumV1Hash(
|
||||
ledgerTime: CantonTimestamp,
|
||||
ledgerCreateTime: LedgerCreateTime,
|
||||
suffixedContractInstance: SerializableRawContractInstance,
|
||||
contractSalt: Salt,
|
||||
contractIdVersion: CantonContractIdVersion,
|
||||
@ -200,13 +206,13 @@ class UnicumGenerator(cryptoOps: HashOps with HmacOps) {
|
||||
// The salt's length is determined by the hash algorithm and the contract ID version determines the hash algorithm,
|
||||
// so salts have fixed length.
|
||||
.addWithoutLengthPrefix(contractSalt.forHashing(contractIdVersion))
|
||||
.addWithoutLengthPrefix(DeterministicEncoding.encodeInstant(ledgerTime.toInstant))
|
||||
.addWithoutLengthPrefix(DeterministicEncoding.encodeInstant(ledgerCreateTime.toInstant))
|
||||
// The hash of the contract instance has a fixed length, so we do not need a length prefix
|
||||
.addWithoutLengthPrefix(suffixedContractInstance.contractHash.bytes.toByteString)
|
||||
.finish()
|
||||
|
||||
private def computeUnicumV2Hash(
|
||||
ledgerTime: CantonTimestamp,
|
||||
ledgerCreateTime: LedgerCreateTime,
|
||||
metadata: ContractMetadata,
|
||||
suffixedContractInstance: SerializableRawContractInstance,
|
||||
contractSalt: Salt,
|
||||
@ -219,7 +225,7 @@ class UnicumGenerator(cryptoOps: HashOps with HmacOps) {
|
||||
// The salt's length is determined by the hash algorithm and the contract ID version determines the hash algorithm,
|
||||
// so salts have fixed length.
|
||||
.addWithoutLengthPrefix(contractSalt.forHashing(contractIdVersion))
|
||||
.addWithoutLengthPrefix(DeterministicEncoding.encodeInstant(ledgerTime.toInstant))
|
||||
.addWithoutLengthPrefix(DeterministicEncoding.encodeInstant(ledgerCreateTime.toInstant))
|
||||
.add(
|
||||
DeterministicEncoding.encodeSeqWith(metadata.signatories.toSeq.sorted)(
|
||||
DeterministicEncoding.encodeParty
|
||||
|
@ -0,0 +1,52 @@
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQC4iQ6l9+JpqJ6R
|
||||
DrqJuyVmjvBK7ibL0VjJNs4taOyNYDzbm3oYqlc69A+Uqjf7cOclzHgW/NtuEKZE
|
||||
F71e7iHbgexiOoDAbl3HFa/2LN7fGnp6q1kgXOKeQ/wJfPsM20pMoJTq6+ug+4L/
|
||||
q/qSEOGK9DDi/5Jd1QYtFLiukqL5/ElfzEHWPO+5Tzrj81buKGlJMsjDZ6IGTzsZ
|
||||
AgfQi+r3zzL0ICoi239vXIKeGoSz62Fm36wa+RlG3MB4QpBelfFMryBx9BVs/avY
|
||||
sPe9VvBLqtwVppMDUTSfXTaydNDnkjrTwpdbJwMNHoR/Yd4QHNHqGDiUa6aF58H3
|
||||
t2aAbQVjpkKB2epLl81o0hkv4cXhCQZciwxlvNN00Zl631WHoOp5buTLy6wzQAC6
|
||||
TOnJW8Y9ZBcvoKMOtLATWYHGF7sJhnr7c9MKo6hu2uccAlISyOvB9uVOk9ygO6k3
|
||||
DWM4IxJSOhCXVX+EOiy8RGg1tEPbVn2HBhm2iV6rgniURD0FoujDkNFJXBkJmTAW
|
||||
GBbu8tzjTQdxlnnpw8J4LDcowyV/tWjx9f4eIsbqoyhBAircxCReUJYpwo5/QC3f
|
||||
FYwXH8F6OlSEi4FdY3ExYrE1p5W37t8yQ+fSOAD8aWIg9K4HbIdYcy+wmLQojlSR
|
||||
F+EO0Se/boQjiDywxONUuS907/sjqwIDAQABAoICAQCY12QbkQGlc7TJ/oIAG9Jx
|
||||
bIiX90KI4/7+wXbpY5Mx0Xj2h4MMNOIFASp6frXZe3zn8L+ygoIYWo/rrCH8I45F
|
||||
agnonf4P5BmmEE9qvWVnMl4phgxO4sAMR9DT5wFkd6jFaOeO4BKMhmTM8JucKYE1
|
||||
fuWSzg1jzFHOOvBwHyH/nbcEMoCBOn1vZbEoJl9k6HGz3+0q84XeZfdT0ju4vnrB
|
||||
/4p7arybnGOjT+MbomkxQA6vjyjUOmd2Ktb7hF5hCR4P3qkn2Pt6B20zD/hRiVWD
|
||||
xqN9BzeSssq8CA+KpXh1pMc2p2CHbft3/UPO4bd0bvpqQt9yBuuo0vjz/KJYUnP3
|
||||
2UaG6B/eolYTSHYtBx6RRH2TZ1p1mB4BR7bAuB4sfWgj3qELJDKXpNHzjwqbHyvK
|
||||
jNg/ZSAuIURQLI20ndOP+goLMWyJX3s3LpgDEJrhtiUkeKRo7NXjEk+3RyuRVtOA
|
||||
p+wNtmdshVNJ0wri/RCW3Cvn39bEBKkQA8RQTrqBgVNG9FEPZJk3trWWdHxuZ1Vr
|
||||
KAAHVJ0GIQAkMKhDW8JtKGFi6ZWYGLoLqmHuUzivvlo6v0iGxz89bdbxLMhOrSc0
|
||||
ro9qWds+B+/6pFvExtf6ABc/OoRJ0e/nMFVtKeaK0Q2KTBqAO4a7pL9pLOvICPfs
|
||||
MorYU6h161jTXZp5hA7lcQKCAQEA6UAkiEjCU+S6vFFRICRY8lWM4tSs6GQaZ6SH
|
||||
i0iMnJZSm9XrxvGgx5/edyy9g9NKSbxDsQQdc2fQ5oaJ+ChAEC2c12PAIlUC17/6
|
||||
dqqNrqNgVT2651z5xLLNEkIqX+16fqVWRt+u3tq53zZ95eQi6PhkIyBsfarqMgRx
|
||||
J4qfqs0o141RXUPRaJX+XYs5/UnZVwUXtHPUhikIIGzKzFHUPLs5uNEwUbQdUWdf
|
||||
600ANu5yRY/Jy6XqMfGqZniOS4EeMT7v+CofW+XTpGzztxpnxHDgZONzl77TMymE
|
||||
O1Wve18DvjJtBjK06f7+HEASl/z2DDDnlbyecrnR65gB6u6J+QKCAQEAyoiU/F4u
|
||||
Oj34NQEpBCjbPCW/89Z9BV9E4YzQla6MyE6xlxsveJydmAMGAKLgFAncuyQJwAR+
|
||||
v5Dam8bucLguwPlE63OCHlbp2wWU5DOGBa7dvnT8/EKHXEUWOzMMYh9+BMCkg8/T
|
||||
aP0hl2URXL5poXS5Q4onFlUEhVqNKkG+/2Zs8gn+bhD3sQNfIuWaVS1iNpZUaLnm
|
||||
Ea+A/gf9I/dygV40BAgaB5BRLXM1pTTJNKBbVCv+5ZElO/FdRwtLCj41rzisjSob
|
||||
2uQxbGt2c4z9fY639YjTxBHhcvGrNeuPnmCL3CJ8wY38kKh0/bV6MunlTAziHFke
|
||||
JTaYVJzHLYMjwwKCAQAiuc72j0aHMO1wLfcS/vZTFdOEzb70VjrsJT2cye9NPMAN
|
||||
vVp94ulZ7v4y5fFNF/7eXW4Cg6dS0Cj0uSD/Vha7kd7g3lPziI6EymsikZ/IWUB1
|
||||
UFxsJ5Zz517Kkhk28Ockxb7DjHe/a/byOW2i9UURRDG8XQ5p4zE8wxaJmYTN6/9h
|
||||
oAN2DCu4E2SsTpcSk5UM9JAj6H7CfodcNHY41xHu/LzKPDKiZ1taDDbmlC8s/nD6
|
||||
1D5p2Ei2qNECsn4U1aNGF6WoX7UNfHj26NWb9A1w3JE70/UYqWtOGac7IjWe+3VW
|
||||
MrGyk4gFslXopM1f3WRQSlOgIlXApNOQ7K+UZ3EZAoIBAGLvYUf+gbp1tx+9V1nu
|
||||
01mIX0IvzpanOTCs5t+XirH2rw3Pe9sBNoKhR3dcGPXw82B/dDvWdkwB6Kgb/zIU
|
||||
5tHhcmdJ35AOiOIy0c6/1IJGL4/v+f0ISrMqFHgKscvk8lxD9pllFjK9JIGeH2Mc
|
||||
qODz8eQqQnnIQhMabsbrou+EC0gY1a7SnwxbR0f2vPNK3iUoPkeCN/7/qpVtNZ0y
|
||||
LT9A00v84So8t0rqcpQnOvp4MuTlMPjXdEbZ2uTvgVKZnQPhPXkfzseYnQQHYq+D
|
||||
efixlO9eTYQD8AuEPkxyq4+EPg3k0uiCAZRwleWxluX0F8yUpoeOiQlIqBT9GNcE
|
||||
rp0CggEAbyLH7THIKvDMDq49z4JKvHeF6FnwfqbnSGonbwlPXpJADSiHwKbgJuBy
|
||||
c4PtWsKLAR/iiUNcmC8M3GTeNtoHmp+d8lvj9jjUpXvUt/oWwg5k4VtMZStdZ1Wl
|
||||
KUnJuH0/8t3UtHzvHffhzpt61hjZMzT0ThxLkG2OYd0Xwg10tpQop/KXOol1+I1k
|
||||
rpvP8v300LDHCwU0Ug2xiFxRw+7sZtTA4/8Nl7Nj1+KzQJUFpmV//E1XxU2/Kmhp
|
||||
33zcG41hHx5SKW3FmOHw/omFRiNX+sGoUaPs0Uz3+dnquiADyZZci1Rv0w9CGvhr
|
||||
IdiRR2cexNXF+WLvgrQxSI6HosPnxg==
|
||||
-----END PRIVATE KEY-----
|
@ -5,6 +5,7 @@ package com.digitalasset.canton
|
||||
|
||||
import com.daml.ledger.api.v1.value.Identifier as ApiIdentifier
|
||||
import com.daml.ledger.client.binding
|
||||
import com.daml.ledger.javaapi.data.Identifier
|
||||
import com.daml.lf.data.{FrontStack, ImmArray}
|
||||
import com.daml.lf.transaction.NodeId
|
||||
import com.daml.lf.transaction.test.NodeIdTransactionBuilder
|
||||
@ -69,6 +70,14 @@ trait ComparesLfTransactions {
|
||||
toIdentifier(s"${moduleName}:${entityName}")(toPackageId(packageId))
|
||||
}
|
||||
}
|
||||
|
||||
protected def templateIdFromIdentifier(
|
||||
identifier: Identifier
|
||||
): LfInterfaceId =
|
||||
toIdentifier(s"${identifier.getModuleName}:${identifier.getEntityName}")(
|
||||
toPackageId(identifier.getPackageId)
|
||||
)
|
||||
|
||||
}
|
||||
|
||||
object ComparesLfTransactions {
|
||||
|
@ -46,6 +46,10 @@ object GeneratorsLf {
|
||||
Gen.stringOfN(8, Gen.alphaChar).map(LfChoiceName.assertFromString)
|
||||
)
|
||||
|
||||
implicit val lfPackageId: Arbitrary[LfPackageId] = Arbitrary(
|
||||
Gen.stringOfN(64, Gen.alphaChar).map(LfPackageId.assertFromString)
|
||||
)
|
||||
|
||||
implicit val lfTemplateIdArb: Arbitrary[LfTemplateId] = Arbitrary(for {
|
||||
packageName <- Gen.stringOfN(8, Gen.alphaChar)
|
||||
moduleName <- Gen.stringOfN(8, Gen.alphaChar)
|
||||
|
@ -3,10 +3,16 @@
|
||||
|
||||
package com.digitalasset.canton.config
|
||||
|
||||
import com.digitalasset.canton.config.CantonRequireTypes.{String185, String255, String68}
|
||||
import com.digitalasset.canton.config.RequireTypes.*
|
||||
import com.digitalasset.canton.{Generators, config}
|
||||
import org.scalacheck.{Arbitrary, Gen}
|
||||
|
||||
import scala.concurrent.duration
|
||||
|
||||
object GeneratorsConfig {
|
||||
import org.scalatest.EitherValues.*
|
||||
|
||||
// Refined Int
|
||||
implicit val nonNegativeIntArb: Arbitrary[NonNegativeInt] = Arbitrary(
|
||||
Gen.choose(0, Int.MaxValue).map(NonNegativeInt.tryCreate)
|
||||
@ -22,4 +28,22 @@ object GeneratorsConfig {
|
||||
implicit val positiveLongArb: Arbitrary[PositiveLong] = Arbitrary(
|
||||
Gen.choose(1, Long.MaxValue).map(PositiveLong.tryCreate)
|
||||
)
|
||||
|
||||
implicit val string68Arb: Arbitrary[String68] = Arbitrary(
|
||||
Generators.lengthLimitedStringGen(String68)
|
||||
)
|
||||
implicit val string185Arb: Arbitrary[String185] = Arbitrary(
|
||||
Generators.lengthLimitedStringGen(String185)
|
||||
)
|
||||
implicit val string255Arb: Arbitrary[String255] = Arbitrary(
|
||||
Generators.lengthLimitedStringGen(String255)
|
||||
)
|
||||
|
||||
implicit val nonNegativeFiniteDurationArb: Arbitrary[config.NonNegativeFiniteDuration] =
|
||||
Arbitrary(
|
||||
Arbitrary
|
||||
.arbitrary[NonNegativeLong]
|
||||
.map(i => scala.concurrent.duration.FiniteDuration(i.unwrap, duration.NANOSECONDS))
|
||||
.map(d => config.NonNegativeFiniteDuration.fromDuration(d).value)
|
||||
)
|
||||
}
|
||||
|
@ -65,7 +65,7 @@ object GeneratorsCrypto {
|
||||
private lazy val loggerFactoryNotUsed =
|
||||
NamedLoggerFactory.unnamedKey("test", "NotUsed-GeneratorsCrypto")
|
||||
|
||||
private lazy val cryptoFactory =
|
||||
lazy val cryptoFactory =
|
||||
TestingIdentityFactory(loggerFactoryNotUsed).forOwnerAndDomain(
|
||||
DefaultTestIdentities.sequencerId
|
||||
)
|
||||
@ -76,6 +76,25 @@ object GeneratorsCrypto {
|
||||
private lazy val privateCrypto = cryptoFactory.crypto.privateCrypto
|
||||
private lazy val pureCryptoApi: CryptoPureApi = cryptoFactory.pureCrypto
|
||||
|
||||
implicit val signingPublicKeyArb: Arbitrary[SigningPublicKey] = Arbitrary(for {
|
||||
id <- Arbitrary.arbitrary[Fingerprint]
|
||||
format <- Arbitrary.arbitrary[CryptoKeyFormat]
|
||||
key <- Arbitrary.arbitrary[ByteString]
|
||||
scheme <- Arbitrary.arbitrary[SigningKeyScheme]
|
||||
} yield new SigningPublicKey(id, format, key, scheme))
|
||||
|
||||
implicit val encryptionPublicKeyArb: Arbitrary[EncryptionPublicKey] = Arbitrary(for {
|
||||
id <- Arbitrary.arbitrary[Fingerprint]
|
||||
format <- Arbitrary.arbitrary[CryptoKeyFormat]
|
||||
key <- Arbitrary.arbitrary[ByteString]
|
||||
scheme <- Arbitrary.arbitrary[EncryptionKeyScheme]
|
||||
} yield new EncryptionPublicKey(id, format, key, scheme))
|
||||
|
||||
// TODO(#14515) Check that the generator is exhaustive
|
||||
implicit val publicKeyArb: Arbitrary[PublicKey] = Arbitrary(
|
||||
Gen.oneOf(Arbitrary.arbitrary[SigningPublicKey], Arbitrary.arbitrary[EncryptionPublicKey])
|
||||
)
|
||||
|
||||
def sign(str: String, purpose: HashPurpose)(implicit
|
||||
executionContext: ExecutionContext
|
||||
): Signature = {
|
||||
|
@ -12,7 +12,7 @@ import com.digitalasset.canton.data.ActionDescription.{
|
||||
FetchActionDescription,
|
||||
LookupByKeyActionDescription,
|
||||
}
|
||||
import com.digitalasset.canton.data.ViewPosition.MerklePathElement
|
||||
import com.digitalasset.canton.data.ViewPosition.{MerklePathElement, MerkleSeqIndex}
|
||||
import com.digitalasset.canton.ledger.api.DeduplicationPeriod
|
||||
import com.digitalasset.canton.protocol.{
|
||||
ConfirmationPolicy,
|
||||
@ -57,7 +57,23 @@ object GeneratorsData {
|
||||
Gen.choose(0, tenYears.getSeconds).map(CantonTimestampSecond.ofEpochSecond)
|
||||
)
|
||||
|
||||
implicit val merklePathElementArg: Arbitrary[MerklePathElement] = genArbitrary
|
||||
implicit val viewPositionArb: Arbitrary[ViewPosition] = Arbitrary(
|
||||
Gen.listOf(merklePathElementArg.arbitrary).map(ViewPosition(_))
|
||||
)
|
||||
|
||||
// If this pattern match is not exhaustive anymore, update the generator below
|
||||
{
|
||||
((_: MerklePathElement) match {
|
||||
case _: ViewPosition.ListIndex =>
|
||||
() // This one is excluded because it is not made to be serialized
|
||||
case _: ViewPosition.MerkleSeqIndex => ()
|
||||
case _: ViewPosition.MerkleSeqIndexFromRoot =>
|
||||
() // This one is excluded because it is not made to be serialized
|
||||
}).discard
|
||||
}
|
||||
implicit val merklePathElementArg: Arbitrary[MerklePathElement] = Arbitrary(
|
||||
Arbitrary.arbitrary[MerkleSeqIndex]
|
||||
)
|
||||
|
||||
implicit val commonMetadataArb: Arbitrary[CommonMetadata] = Arbitrary(
|
||||
for {
|
||||
@ -319,4 +335,21 @@ object GeneratorsData {
|
||||
pv,
|
||||
)
|
||||
)
|
||||
|
||||
// If this pattern match is not exhaustive anymore, update the generator below
|
||||
{
|
||||
((_: ViewType) match {
|
||||
case ViewType.TransactionViewType => ()
|
||||
case _: ViewType.TransferViewType => ()
|
||||
case _: ViewTypeTest => () // Only for tests, so we don't use it in the generator
|
||||
}).discard
|
||||
}
|
||||
implicit val viewTypeArb: Arbitrary[ViewType] = Arbitrary(
|
||||
Gen.oneOf[ViewType](
|
||||
ViewType.TransactionViewType,
|
||||
ViewType.TransferInViewType,
|
||||
ViewType.TransferOutViewType,
|
||||
)
|
||||
)
|
||||
|
||||
}
|
||||
|
@ -34,6 +34,10 @@ object GeneratorsTransferData {
|
||||
import org.scalatest.EitherValues.*
|
||||
|
||||
@SuppressWarnings(Array("com.digitalasset.canton.GlobalExecutionContext"))
|
||||
/*
|
||||
Execution context is needed for crypto operations. Since wiring a proper ec would be
|
||||
too complex here, using the global one.
|
||||
*/
|
||||
private implicit val ec = ExecutionContext.global
|
||||
|
||||
implicit val transferInCommonData: Arbitrary[TransferInCommonData] = Arbitrary(
|
||||
|
@ -5,7 +5,7 @@ package com.digitalasset.canton.protocol
|
||||
|
||||
import com.digitalasset.canton.BaseTest
|
||||
import com.digitalasset.canton.crypto.*
|
||||
import com.digitalasset.canton.examples.Iou.Iou
|
||||
import com.digitalasset.canton.examples.java.iou.Iou
|
||||
import com.digitalasset.canton.version.ProtocolVersion
|
||||
import org.scalatest.wordspec.AnyWordSpec
|
||||
|
||||
|
@ -26,6 +26,7 @@ import com.digitalasset.canton.data.ViewPosition.MerklePathElement
|
||||
import com.digitalasset.canton.data.*
|
||||
import com.digitalasset.canton.ledger.api.DeduplicationPeriod.DeduplicationDuration
|
||||
import com.digitalasset.canton.protocol.ExampleTransactionFactory.{contractInstance, *}
|
||||
import com.digitalasset.canton.protocol.SerializableContract.LedgerCreateTime
|
||||
import com.digitalasset.canton.topology.client.TopologySnapshot
|
||||
import com.digitalasset.canton.topology.transaction.ParticipantPermission.{
|
||||
Confirmation,
|
||||
@ -289,7 +290,7 @@ object ExampleTransactionFactory {
|
||||
contractId,
|
||||
asSerializableRaw(contractInstance, agreementText),
|
||||
metadata,
|
||||
ledgerTime,
|
||||
LedgerCreateTime(ledgerTime),
|
||||
salt,
|
||||
)
|
||||
|
||||
@ -513,7 +514,7 @@ class ExampleTransactionFactory(
|
||||
viewPosition,
|
||||
viewParticipantDataSalt,
|
||||
createIndex,
|
||||
ledgerTime,
|
||||
LedgerCreateTime(ledgerTime),
|
||||
metadata,
|
||||
asSerializableRaw(suffixedContractInstance, agreementText),
|
||||
cantonContractIdVersion,
|
||||
|
@ -8,7 +8,8 @@ import com.digitalasset.canton.LfPartyId
|
||||
import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt}
|
||||
import com.digitalasset.canton.crypto.*
|
||||
import com.digitalasset.canton.crypto.provider.symbolic.SymbolicPureCrypto
|
||||
import com.digitalasset.canton.data.{CantonTimestamp, ViewPosition}
|
||||
import com.digitalasset.canton.data.ViewPosition
|
||||
import com.digitalasset.canton.protocol.SerializableContract.LedgerCreateTime
|
||||
import com.digitalasset.canton.time.NonNegativeFiniteDuration
|
||||
import com.digitalasset.canton.topology.{DomainId, MediatorId, MediatorRef}
|
||||
import com.digitalasset.canton.version.{GeneratorsVersion, ProtocolVersion}
|
||||
@ -19,7 +20,7 @@ object GeneratorsProtocol {
|
||||
|
||||
import com.digitalasset.canton.Generators.*
|
||||
import com.digitalasset.canton.GeneratorsLf.*
|
||||
import com.digitalasset.canton.config.GeneratorsConfig.*
|
||||
import com.digitalasset.canton.config.GeneratorsConfig.{nonNegativeFiniteDurationArb as _, *}
|
||||
import com.digitalasset.canton.crypto.GeneratorsCrypto.*
|
||||
import com.digitalasset.canton.data.GeneratorsData.*
|
||||
import com.digitalasset.canton.time.GeneratorsTime.*
|
||||
@ -70,67 +71,80 @@ object GeneratorsProtocol {
|
||||
} yield parameters)
|
||||
}
|
||||
|
||||
def dynamicDomainParametersGenFor(pv: ProtocolVersion): Gen[DynamicDomainParameters] =
|
||||
for {
|
||||
participantResponseTimeout <- Arbitrary.arbitrary[NonNegativeFiniteDuration]
|
||||
mediatorReactionTimeout <- Arbitrary.arbitrary[NonNegativeFiniteDuration]
|
||||
transferExclusivityTimeout <- Arbitrary.arbitrary[NonNegativeFiniteDuration]
|
||||
topologyChangeDelay <- Arbitrary.arbitrary[NonNegativeFiniteDuration]
|
||||
|
||||
mediatorDeduplicationMargin <- Arbitrary.arbitrary[NonNegativeFiniteDuration]
|
||||
// Because of the potential multiplication by 2 below, we want a reasonably small value
|
||||
ledgerTimeRecordTimeTolerance <- Gen
|
||||
.choose(0L, 10000L)
|
||||
.map(NonNegativeFiniteDuration.tryOfMicros)
|
||||
|
||||
representativePV = DynamicDomainParameters.protocolVersionRepresentativeFor(pv)
|
||||
|
||||
reconciliationInterval <- defaultValueArb(
|
||||
representativePV,
|
||||
DynamicDomainParameters.defaultReconciliationIntervalUntil,
|
||||
)
|
||||
|
||||
maxRatePerParticipant <- defaultValueArb(
|
||||
representativePV,
|
||||
DynamicDomainParameters.defaultMaxRatePerParticipantUntil,
|
||||
)
|
||||
|
||||
maxRequestSize <- defaultValueArb(
|
||||
representativePV,
|
||||
DynamicDomainParameters.defaultMaxRequestSizeUntil,
|
||||
)
|
||||
|
||||
trafficControlConfig <- defaultValueArb(
|
||||
representativePV,
|
||||
DynamicDomainParameters.defaultTrafficControlParametersUntil,
|
||||
)
|
||||
|
||||
// Starting from pv=4, there is an additional constraint on the mediatorDeduplicationTimeout
|
||||
updatedMediatorDeduplicationTimeout =
|
||||
if (pv > ProtocolVersion.v3)
|
||||
ledgerTimeRecordTimeTolerance * NonNegativeInt.tryCreate(2) + mediatorDeduplicationMargin
|
||||
else
|
||||
ledgerTimeRecordTimeTolerance * NonNegativeInt.tryCreate(2)
|
||||
|
||||
// TODO(#14691) Use generator properly when dynamic domain parameters are properly versioned
|
||||
sequencerAggregateSubmissionTimeout =
|
||||
DynamicDomainParameters.defaultSequencerAggregateSubmissionTimeoutUntilExclusive.defaultValue
|
||||
|
||||
dynamicDomainParameters = DynamicDomainParameters.tryCreate(
|
||||
participantResponseTimeout,
|
||||
mediatorReactionTimeout,
|
||||
transferExclusivityTimeout,
|
||||
topologyChangeDelay,
|
||||
ledgerTimeRecordTimeTolerance,
|
||||
updatedMediatorDeduplicationTimeout,
|
||||
reconciliationInterval,
|
||||
maxRatePerParticipant,
|
||||
maxRequestSize,
|
||||
sequencerAggregateSubmissionTimeout,
|
||||
trafficControlConfig,
|
||||
)(representativePV)
|
||||
|
||||
} yield dynamicDomainParameters
|
||||
|
||||
implicit val dynamicDomainParametersArb: Arbitrary[DynamicDomainParameters] = Arbitrary(for {
|
||||
participantResponseTimeout <- nonNegativeFiniteDurationArb.arbitrary
|
||||
mediatorReactionTimeout <- nonNegativeFiniteDurationArb.arbitrary
|
||||
transferExclusivityTimeout <- nonNegativeFiniteDurationArb.arbitrary
|
||||
topologyChangeDelay <- nonNegativeFiniteDurationArb.arbitrary
|
||||
|
||||
mediatorDeduplicationMargin <- nonNegativeFiniteDurationArb.arbitrary
|
||||
// Because of the potential multiplication by 2 below, we want a reasonably small value
|
||||
ledgerTimeRecordTimeTolerance <- Gen
|
||||
.choose(0L, 10000L)
|
||||
.map(NonNegativeFiniteDuration.tryOfMicros)
|
||||
|
||||
representativePV <- GeneratorsVersion.representativeProtocolVersionGen(DynamicDomainParameters)
|
||||
|
||||
reconciliationInterval <- defaultValueArb(
|
||||
representativePV,
|
||||
DynamicDomainParameters.defaultReconciliationIntervalUntil,
|
||||
)
|
||||
|
||||
maxRatePerParticipant <- defaultValueArb(
|
||||
representativePV,
|
||||
DynamicDomainParameters.defaultMaxRatePerParticipantUntil,
|
||||
)
|
||||
|
||||
maxRequestSize <- defaultValueArb(
|
||||
representativePV,
|
||||
DynamicDomainParameters.defaultMaxRequestSizeUntil,
|
||||
)
|
||||
|
||||
trafficControlConfig <- defaultValueArb(
|
||||
representativePV,
|
||||
DynamicDomainParameters.defaultTrafficControlParametersUntil,
|
||||
)
|
||||
|
||||
// Starting from pv=4, there is an additional constraint on the mediatorDeduplicationTimeout
|
||||
updatedMediatorDeduplicationTimeout =
|
||||
if (representativePV.representative > ProtocolVersion.v3)
|
||||
ledgerTimeRecordTimeTolerance * NonNegativeInt.tryCreate(2) + mediatorDeduplicationMargin
|
||||
else
|
||||
ledgerTimeRecordTimeTolerance * NonNegativeInt.tryCreate(2)
|
||||
|
||||
// TODO(#14691) Use generator properly when dynamic domain parameters are properly versioned
|
||||
sequencerAggregateSubmissionTimeout =
|
||||
DynamicDomainParameters.defaultSequencerAggregateSubmissionTimeoutUntilExclusive.defaultValue
|
||||
|
||||
dynamicDomainParameters = DynamicDomainParameters.tryCreate(
|
||||
participantResponseTimeout,
|
||||
mediatorReactionTimeout,
|
||||
transferExclusivityTimeout,
|
||||
topologyChangeDelay,
|
||||
ledgerTimeRecordTimeTolerance,
|
||||
updatedMediatorDeduplicationTimeout,
|
||||
reconciliationInterval,
|
||||
maxRatePerParticipant,
|
||||
maxRequestSize,
|
||||
sequencerAggregateSubmissionTimeout,
|
||||
trafficControlConfig,
|
||||
)(representativePV)
|
||||
|
||||
dynamicDomainParameters <- dynamicDomainParametersGenFor(representativePV.representative)
|
||||
} yield dynamicDomainParameters)
|
||||
|
||||
implicit val rootHashArb: Arbitrary[RootHash] = Arbitrary(
|
||||
Arbitrary.arbitrary[Hash].map(RootHash(_))
|
||||
)
|
||||
implicit val viewHashArb: Arbitrary[ViewHash] = Arbitrary(
|
||||
Arbitrary.arbitrary[Hash].map(ViewHash(_))
|
||||
)
|
||||
|
||||
implicit val confirmationPolicyArb: Arbitrary[ConfirmationPolicy] = genArbitrary
|
||||
|
||||
implicit val serializableRawContractInstanceArb: Arbitrary[SerializableRawContractInstance] =
|
||||
@ -173,7 +187,7 @@ object GeneratorsProtocol {
|
||||
for {
|
||||
rawContractInstance <- Arbitrary.arbitrary[SerializableRawContractInstance]
|
||||
metadata <- contractMetadataArb(canHaveEmptyKey).arbitrary
|
||||
ledgerCreateTime <- Arbitrary.arbitrary[CantonTimestamp]
|
||||
ledgerCreateTime <- Arbitrary.arbitrary[LedgerCreateTime]
|
||||
|
||||
contractIdVersion <- Gen.oneOf(contractIdVersions)
|
||||
|
||||
@ -190,7 +204,7 @@ object GeneratorsProtocol {
|
||||
viewPosition = ViewPosition(List.empty),
|
||||
viewParticipantDataSalt = TestSalt.generateSalt(saltIndex),
|
||||
createIndex = 0,
|
||||
ledgerTime = ledgerCreateTime,
|
||||
ledgerCreateTime = ledgerCreateTime,
|
||||
metadata = metadata,
|
||||
suffixedContractInstance = rawContractInstance,
|
||||
contractIdVersion = contractIdVersion,
|
||||
|
@ -7,6 +7,7 @@ import com.daml.lf.data.Bytes
|
||||
import com.daml.lf.value.Value
|
||||
import com.digitalasset.canton.crypto.{Hash, HashAlgorithm, TestHash, TestSalt}
|
||||
import com.digitalasset.canton.data.{CantonTimestamp, ProcessedDisclosedContract}
|
||||
import com.digitalasset.canton.protocol.SerializableContract.LedgerCreateTime
|
||||
import com.digitalasset.canton.version.ProtocolVersion
|
||||
import com.digitalasset.canton.{BaseTest, LfPartyId, LfTimestamp, LfValue}
|
||||
import org.scalatest.wordspec.AnyWordSpec
|
||||
@ -96,7 +97,7 @@ class SerializableContractTest extends AnyWordSpec with BaseTest {
|
||||
)
|
||||
.value,
|
||||
metadata = ContractMetadata.tryCreate(Set(alice), Set(alice), None),
|
||||
ledgerCreateTime = CantonTimestamp(createdAt),
|
||||
ledgerCreateTime = LedgerCreateTime(CantonTimestamp(createdAt)),
|
||||
contractSalt = Some(contractSalt),
|
||||
)
|
||||
}
|
||||
|
@ -3,14 +3,14 @@
|
||||
|
||||
package com.digitalasset.canton.protocol
|
||||
|
||||
import com.daml.ledger.client.binding
|
||||
import com.daml.ledger.javaapi.data.Identifier
|
||||
import com.daml.lf.transaction.test.TestNodeBuilder.CreateKey
|
||||
import com.daml.lf.transaction.test.{TestNodeBuilder, TransactionBuilder}
|
||||
import com.daml.nonempty.NonEmpty
|
||||
import com.digitalasset.canton.ComparesLfTransactions.{TxTree, buildLfTransaction}
|
||||
import com.digitalasset.canton.config.RequireTypes.PositiveInt
|
||||
import com.digitalasset.canton.data.CantonTimestamp
|
||||
import com.digitalasset.canton.examples.Iou
|
||||
import com.digitalasset.canton.examples.java.iou
|
||||
import com.digitalasset.canton.protocol.RollbackContext.RollbackScope
|
||||
import com.digitalasset.canton.protocol.WellFormedTransaction.WithSuffixes
|
||||
import com.digitalasset.canton.topology.{PartyId, UniqueIdentifier}
|
||||
@ -43,10 +43,10 @@ class WellFormedTransactionMergeTest
|
||||
import TransactionBuilder.Implicits.*
|
||||
|
||||
private val subTxTree0 = TxTree(
|
||||
tb.fetch(create(newLfContractId(), Iou.Iou.id, alice, bob), byKey = false)
|
||||
tb.fetch(create(newLfContractId(), iou.Iou.TEMPLATE_ID, alice, bob), byKey = false)
|
||||
)
|
||||
private val subTxTree1 = TxTree(create(newLfContractId(), Iou.Iou.id, alice, bob))
|
||||
private val contractCreate = create(newLfContractId(), Iou.Iou.id, alice, alice)
|
||||
private val subTxTree1 = TxTree(create(newLfContractId(), iou.Iou.TEMPLATE_ID, alice, bob))
|
||||
private val contractCreate = create(newLfContractId(), iou.Iou.TEMPLATE_ID, alice, alice)
|
||||
private val subTxTree2 = Seq(
|
||||
TxTree(contractCreate),
|
||||
TxTree(tb.fetch(contractCreate, byKey = false)),
|
||||
@ -64,7 +64,7 @@ class WellFormedTransactionMergeTest
|
||||
TxTree(
|
||||
create(
|
||||
newLfContractId(),
|
||||
Iou.GetCash.id,
|
||||
iou.GetCash.TEMPLATE_ID,
|
||||
alice,
|
||||
alice,
|
||||
arg = args(
|
||||
@ -77,10 +77,12 @@ class WellFormedTransactionMergeTest
|
||||
),
|
||||
),
|
||||
)
|
||||
private val subTxTree3 = TxTree(create(newLfContractId(), Iou.Iou.id, carol, alice, Seq(bob)))
|
||||
private val subTxTree3 = TxTree(
|
||||
create(newLfContractId(), iou.Iou.TEMPLATE_ID, carol, alice, Seq(bob))
|
||||
)
|
||||
private val subTxTree4 = TxTree(
|
||||
tb.exercise(
|
||||
contract = create(newLfContractId(), Iou.Iou.id, bob, bob),
|
||||
contract = create(newLfContractId(), iou.Iou.TEMPLATE_ID, bob, bob),
|
||||
choice = "Archive",
|
||||
consuming = true,
|
||||
actingParties = Set(bob.toLf),
|
||||
@ -297,7 +299,7 @@ class WellFormedTransactionMergeTest
|
||||
|
||||
private def create[T](
|
||||
cid: LfContractId,
|
||||
template: binding.Primitive.TemplateId[T],
|
||||
template: Identifier,
|
||||
payer: PartyId,
|
||||
owner: PartyId,
|
||||
viewers: Seq[PartyId] = Seq.empty,
|
||||
@ -308,13 +310,13 @@ class WellFormedTransactionMergeTest
|
||||
val lfViewers = viewers.map(_.toLf)
|
||||
val lfObservers = Set(lfOwner) ++ lfViewers.toSet
|
||||
|
||||
val templateId = templateIdFromTemplate(template)
|
||||
val lfTemplateId = templateIdFromIdentifier(template)
|
||||
|
||||
tb.create(
|
||||
id = cid,
|
||||
templateId = templateId,
|
||||
templateId = lfTemplateId,
|
||||
argument = template match {
|
||||
case Iou.Iou.id =>
|
||||
case iou.Iou.TEMPLATE_ID =>
|
||||
require(
|
||||
arg == notUsed,
|
||||
"For IOUs, this function figures out the sig and obs parameters by itself",
|
||||
|
@ -3,9 +3,10 @@
|
||||
|
||||
package com.digitalasset.canton.protocol.messages
|
||||
|
||||
import com.daml.error.ErrorCategory
|
||||
import com.daml.nonempty.NonEmpty
|
||||
import com.digitalasset.canton.LfPartyId
|
||||
import com.digitalasset.canton.data.CantonTimestampSecond
|
||||
import com.digitalasset.canton.crypto.{GeneratorsCrypto, Signature}
|
||||
import com.digitalasset.canton.data.{CantonTimestampSecond, ViewPosition, ViewType}
|
||||
import com.digitalasset.canton.error.GeneratorsError
|
||||
import com.digitalasset.canton.protocol.messages.LocalReject.ConsistencyRejections.{
|
||||
DuplicateKey,
|
||||
@ -17,7 +18,6 @@ import com.digitalasset.canton.protocol.messages.LocalReject.ConsistencyRejectio
|
||||
import com.digitalasset.canton.protocol.messages.LocalReject.MalformedRejects.{
|
||||
BadRootHashMessages,
|
||||
CreatesExistingContracts,
|
||||
MalformedRequest,
|
||||
ModelConformance,
|
||||
Payloads,
|
||||
}
|
||||
@ -33,23 +33,37 @@ import com.digitalasset.canton.protocol.messages.LocalReject.TransferInRejects.{
|
||||
ContractIsLocked,
|
||||
}
|
||||
import com.digitalasset.canton.protocol.messages.LocalReject.TransferOutRejects.ActivenessCheckFailed
|
||||
import com.digitalasset.canton.protocol.messages.Verdict.ParticipantReject
|
||||
import com.digitalasset.canton.protocol.{RequestId, RootHash, TransferDomainId, ViewHash}
|
||||
import com.digitalasset.canton.time.PositiveSeconds
|
||||
import com.digitalasset.canton.topology.transaction.GeneratorsTransaction
|
||||
import com.digitalasset.canton.topology.{DomainId, ParticipantId}
|
||||
import com.digitalasset.canton.version.RepresentativeProtocolVersion
|
||||
import com.digitalasset.canton.tracing.TraceContext
|
||||
import com.digitalasset.canton.version.{ProtocolVersion, RepresentativeProtocolVersion}
|
||||
import magnolify.scalacheck.auto.*
|
||||
import org.scalacheck.{Arbitrary, Gen}
|
||||
|
||||
import scala.Ordered.orderingToOrdered
|
||||
import scala.concurrent.duration.*
|
||||
import scala.concurrent.{Await, ExecutionContext}
|
||||
|
||||
object GeneratorsMessages {
|
||||
import com.digitalasset.canton.topology.GeneratorsTopology.*
|
||||
import com.digitalasset.canton.version.GeneratorsVersion.*
|
||||
import com.digitalasset.canton.Generators.*
|
||||
import com.digitalasset.canton.GeneratorsLf.*
|
||||
import com.digitalasset.canton.crypto.GeneratorsCrypto.*
|
||||
import com.digitalasset.canton.data.GeneratorsData.*
|
||||
import com.digitalasset.canton.protocol.GeneratorsProtocol.*
|
||||
import org.scalatest.EitherValues.*
|
||||
|
||||
implicit val acsCommitmentArb = Arbitrary(
|
||||
@SuppressWarnings(Array("com.digitalasset.canton.GlobalExecutionContext"))
|
||||
/*
|
||||
Execution context is needed for crypto operations. Since wiring a proper ec would be
|
||||
too complex here, using the global one.
|
||||
*/
|
||||
private implicit val ec = ExecutionContext.global
|
||||
|
||||
implicit val acsCommitmentArb: Arbitrary[AcsCommitment] = Arbitrary(
|
||||
for {
|
||||
domainId <- Arbitrary.arbitrary[DomainId]
|
||||
sender <- Arbitrary.arbitrary[ParticipantId]
|
||||
@ -75,133 +89,361 @@ object GeneratorsMessages {
|
||||
implicit val protoMediatorRejectionCodeArb
|
||||
: Arbitrary[com.digitalasset.canton.protocol.v0.MediatorRejection.Code] = genArbitrary
|
||||
|
||||
private implicit val mediatorRejectV0Gen: Gen[Verdict.MediatorRejectV0] = {
|
||||
import com.digitalasset.canton.protocol.v0.MediatorRejection.Code
|
||||
for {
|
||||
code <- protoMediatorRejectionCodeArb.arbitrary
|
||||
if {
|
||||
code match {
|
||||
case Code.MissingCode | Code.Unrecognized(_) => false
|
||||
case _ => true
|
||||
object GeneratorsVerdict {
|
||||
private implicit val mediatorRejectV0Gen: Gen[Verdict.MediatorRejectV0] = {
|
||||
import com.digitalasset.canton.protocol.v0.MediatorRejection.Code
|
||||
for {
|
||||
code <- protoMediatorRejectionCodeArb.arbitrary
|
||||
if {
|
||||
code match {
|
||||
case Code.MissingCode | Code.Unrecognized(_) => false
|
||||
case _ => true
|
||||
}
|
||||
}
|
||||
}
|
||||
reason <- Gen.alphaNumStr
|
||||
} yield Verdict.MediatorRejectV0.tryCreate(code, reason)
|
||||
}
|
||||
|
||||
private def mediatorRejectV1Gen(
|
||||
pv: RepresentativeProtocolVersion[Verdict.type]
|
||||
): Gen[Verdict.MediatorRejectV1] = for {
|
||||
cause <- Gen.alphaNumStr
|
||||
id <- Gen.alphaNumStr
|
||||
damlError <- GeneratorsError.damlErrorCategoryArb.arbitrary
|
||||
} yield Verdict.MediatorRejectV1.tryCreate(cause, id, damlError.asInt, pv)
|
||||
|
||||
private def mediatorRejectV2Gen(
|
||||
pv: RepresentativeProtocolVersion[Verdict.type]
|
||||
): Gen[Verdict.MediatorRejectV2] =
|
||||
// TODO(#14515): do we want randomness here?
|
||||
Gen.const {
|
||||
val status = com.google.rpc.status.Status()
|
||||
Verdict.MediatorRejectV2.tryCreate(status, pv)
|
||||
reason <- Gen.alphaNumStr
|
||||
} yield Verdict.MediatorRejectV0.tryCreate(code, reason)
|
||||
}
|
||||
|
||||
private def mediatorRejectGen(
|
||||
pv: RepresentativeProtocolVersion[Verdict.type]
|
||||
): Gen[Verdict.MediatorReject] = {
|
||||
if (
|
||||
pv >= Verdict.protocolVersionRepresentativeFor(
|
||||
Verdict.MediatorRejectV2.firstApplicableProtocolVersion
|
||||
private def mediatorRejectV1Gen(
|
||||
pv: RepresentativeProtocolVersion[Verdict.type]
|
||||
): Gen[Verdict.MediatorRejectV1] = for {
|
||||
cause <- Gen.alphaNumStr
|
||||
id <- Gen.alphaNumStr
|
||||
damlError <- GeneratorsError.damlErrorCategoryArb.arbitrary
|
||||
} yield Verdict.MediatorRejectV1.tryCreate(cause, id, damlError.asInt, pv)
|
||||
|
||||
private def mediatorRejectV2Gen(
|
||||
pv: RepresentativeProtocolVersion[Verdict.type]
|
||||
): Gen[Verdict.MediatorRejectV2] =
|
||||
// TODO(#14515): do we want randomness here?
|
||||
Gen.const {
|
||||
val status = com.google.rpc.status.Status(com.google.rpc.Code.CANCELLED_VALUE)
|
||||
Verdict.MediatorRejectV2.tryCreate(status, pv)
|
||||
}
|
||||
|
||||
// If this pattern match is not exhaustive anymore, update the generator below
|
||||
{
|
||||
((_: Verdict.MediatorReject) match {
|
||||
case _: Verdict.MediatorRejectV0 => ()
|
||||
case _: Verdict.MediatorRejectV1 => ()
|
||||
case _: Verdict.MediatorRejectV2 => ()
|
||||
}).discard
|
||||
}
|
||||
|
||||
private[messages] def mediatorRejectGen(
|
||||
pv: RepresentativeProtocolVersion[Verdict.type]
|
||||
): Gen[Verdict.MediatorReject] = {
|
||||
if (
|
||||
pv >= Verdict.protocolVersionRepresentativeFor(
|
||||
Verdict.MediatorRejectV2.firstApplicableProtocolVersion
|
||||
)
|
||||
) mediatorRejectV2Gen(pv)
|
||||
else if (
|
||||
pv >= Verdict.protocolVersionRepresentativeFor(
|
||||
Verdict.MediatorRejectV1.firstApplicableProtocolVersion
|
||||
)
|
||||
) mediatorRejectV1Gen(pv)
|
||||
else mediatorRejectV0Gen
|
||||
}
|
||||
|
||||
// TODO(#14515) Check that the generator is exhaustive
|
||||
implicit val mediatorRejectArb: Arbitrary[Verdict.MediatorReject] =
|
||||
Arbitrary(
|
||||
representativeProtocolVersionGen(Verdict).flatMap(mediatorRejectGen)
|
||||
)
|
||||
) mediatorRejectV2Gen(pv)
|
||||
else if (
|
||||
pv >= Verdict.protocolVersionRepresentativeFor(
|
||||
Verdict.MediatorRejectV1.firstApplicableProtocolVersion
|
||||
)
|
||||
) mediatorRejectV1Gen(pv)
|
||||
else mediatorRejectV0Gen
|
||||
}
|
||||
|
||||
// TODO(#14515) Check that the generator is exhaustive
|
||||
implicit val mediatorRejectArb: Arbitrary[Verdict.MediatorReject] = Arbitrary(
|
||||
representativeProtocolVersionGen(Verdict).flatMap(mediatorRejectGen)
|
||||
)
|
||||
|
||||
// TODO(#14515) Check that the generator is exhaustive
|
||||
private lazy val localRejectImplGen: Gen[LocalRejectImpl] = {
|
||||
import LocalReject.*
|
||||
val resources = List("resource1", "resource2")
|
||||
val details = "details"
|
||||
|
||||
val builders: Seq[RepresentativeProtocolVersion[LocalVerdict.type] => LocalRejectImpl] = Seq(
|
||||
LockedContracts.Reject(resources),
|
||||
LockedKeys.Reject(resources),
|
||||
InactiveContracts.Reject(resources),
|
||||
DuplicateKey.Reject(resources),
|
||||
InconsistentKey.Reject(resources),
|
||||
LedgerTime.Reject(details),
|
||||
SubmissionTime.Reject(details),
|
||||
LocalTimeout.Reject(),
|
||||
ActivenessCheckFailed.Reject(details),
|
||||
ContractAlreadyArchived.Reject(details),
|
||||
ContractAlreadyActive.Reject(details),
|
||||
ContractIsLocked.Reject(details),
|
||||
AlreadyCompleted.Reject(details),
|
||||
GenericReject("cause", details, resources, "some id", ErrorCategory.TransientServerFailure),
|
||||
private val verdictApproveArb: Arbitrary[Verdict.Approve] = Arbitrary(
|
||||
representativeProtocolVersionGen(Verdict).map(Verdict.Approve())
|
||||
)
|
||||
|
||||
for {
|
||||
pv <- representativeProtocolVersionGen(LocalVerdict)
|
||||
builder <- Gen.oneOf(builders)
|
||||
} yield builder(pv)
|
||||
private def participantRejectGenFor(pv: ProtocolVersion): Gen[Verdict.ParticipantReject] =
|
||||
nonEmptyListGen[(Set[LfPartyId], LocalReject)](
|
||||
GeneratorsLocalVerdict.participantRejectReasonArbFor(pv)
|
||||
).map { reasons =>
|
||||
Verdict.ParticipantReject(reasons)(Verdict.protocolVersionRepresentativeFor(pv))
|
||||
}
|
||||
|
||||
// If this pattern match is not exhaustive anymore, update the generator below
|
||||
{
|
||||
((_: Verdict) match {
|
||||
case _: Verdict.Approve => ()
|
||||
case _: Verdict.MediatorReject => ()
|
||||
case _: Verdict.ParticipantReject => ()
|
||||
}).discard
|
||||
}
|
||||
def verdictGenFor(pv: ProtocolVersion): Gen[Verdict] = {
|
||||
val rpv = Verdict.protocolVersionRepresentativeFor(pv)
|
||||
Gen.oneOf(
|
||||
verdictApproveArb.arbitrary,
|
||||
mediatorRejectGen(rpv),
|
||||
participantRejectGenFor(pv),
|
||||
)
|
||||
}
|
||||
|
||||
implicit val verdictArb: Arbitrary[Verdict] = Arbitrary(
|
||||
Arbitrary.arbitrary[ProtocolVersion].flatMap(verdictGenFor(_))
|
||||
)
|
||||
}
|
||||
|
||||
// TODO(#14515) Check that the generator is exhaustive
|
||||
private lazy val localVerdictMalformedGen: Gen[Malformed] = {
|
||||
val resources = List("resource1", "resource2")
|
||||
val details = "details"
|
||||
object GeneratorsLocalVerdict {
|
||||
// TODO(#14515) Check that the generator is exhaustive
|
||||
private def localRejectImplGen(pv: ProtocolVersion): Gen[LocalRejectImpl] = {
|
||||
val resources = List("resource1", "resource2")
|
||||
val details = "details"
|
||||
|
||||
val builders: Seq[RepresentativeProtocolVersion[LocalVerdict.type] => Malformed] = Seq(
|
||||
MalformedRequest.Reject(details),
|
||||
Payloads.Reject(details),
|
||||
ModelConformance.Reject(details),
|
||||
BadRootHashMessages.Reject(details),
|
||||
CreatesExistingContracts.Reject(resources),
|
||||
val builders: Seq[RepresentativeProtocolVersion[LocalVerdict.type] => LocalRejectImpl] = Seq(
|
||||
LockedContracts.Reject(resources),
|
||||
LockedKeys.Reject(resources),
|
||||
InactiveContracts.Reject(resources),
|
||||
DuplicateKey.Reject(resources),
|
||||
InconsistentKey.Reject(resources),
|
||||
LedgerTime.Reject(details),
|
||||
SubmissionTime.Reject(details),
|
||||
LocalTimeout.Reject(),
|
||||
ActivenessCheckFailed.Reject(details),
|
||||
ContractAlreadyArchived.Reject(details),
|
||||
ContractAlreadyActive.Reject(details),
|
||||
ContractIsLocked.Reject(details),
|
||||
AlreadyCompleted.Reject(details),
|
||||
/*
|
||||
GenericReject is intentionally excluded
|
||||
Reason: it should not be serialized.
|
||||
*/
|
||||
// GenericReject("cause", details, resources, "SOME_ID", ErrorCategory.TransientServerFailure),
|
||||
)
|
||||
|
||||
val rpv = LocalVerdict.protocolVersionRepresentativeFor(pv)
|
||||
Gen.oneOf(builders).map(_(rpv))
|
||||
}
|
||||
|
||||
// TODO(#14515) Check that the generator is exhaustive
|
||||
private def localVerdictMalformedGen(pv: ProtocolVersion): Gen[Malformed] = {
|
||||
val resources = List("resource1", "resource2")
|
||||
val details = "details"
|
||||
|
||||
val builders: Seq[RepresentativeProtocolVersion[LocalVerdict.type] => Malformed] = Seq(
|
||||
/*
|
||||
MalformedRequest.Reject is intentionally excluded
|
||||
The reason is for backward compatibility reason, its `v0.LocalReject.Code` does not correspond to the id
|
||||
(`v0.LocalReject.Code.MalformedPayloads` vs "LOCAL_VERDICT_MALFORMED_REQUEST")
|
||||
*/
|
||||
// MalformedRequest.Reject(details),
|
||||
Payloads.Reject(details),
|
||||
ModelConformance.Reject(details),
|
||||
BadRootHashMessages.Reject(details),
|
||||
CreatesExistingContracts.Reject(resources),
|
||||
)
|
||||
|
||||
val rpv = LocalVerdict.protocolVersionRepresentativeFor(pv)
|
||||
Gen.oneOf(builders).map(_(rpv))
|
||||
}
|
||||
|
||||
// TODO(#14515) Check that the generator is exhaustive
|
||||
private def localRejectGenFor(pv: ProtocolVersion): Gen[LocalReject] =
|
||||
Gen.oneOf(localRejectImplGen(pv), localVerdictMalformedGen(pv))
|
||||
|
||||
private def localApproveGenFor(pv: ProtocolVersion): Gen[LocalApprove] =
|
||||
Gen.const(LocalApprove(pv))
|
||||
|
||||
// If this pattern match is not exhaustive anymore, update the generator below
|
||||
{
|
||||
((_: LocalVerdict) match {
|
||||
case _: LocalApprove => ()
|
||||
case _: LocalReject => ()
|
||||
}).discard
|
||||
}
|
||||
|
||||
def localVerdictArbFor(pv: ProtocolVersion): Gen[LocalVerdict] =
|
||||
Gen.oneOf(localApproveGenFor(pv), localRejectGenFor(pv))
|
||||
|
||||
implicit val localVerdictArb: Arbitrary[LocalVerdict] = Arbitrary(
|
||||
for {
|
||||
rpv <- representativeProtocolVersionGen(LocalVerdict)
|
||||
localVerdict <- localVerdictArbFor(rpv.representative)
|
||||
} yield localVerdict
|
||||
)
|
||||
|
||||
for {
|
||||
pv <- representativeProtocolVersionGen(LocalVerdict)
|
||||
builder <- Gen.oneOf(builders)
|
||||
} yield builder(pv)
|
||||
private[GeneratorsMessages] def participantRejectReasonArbFor(
|
||||
pv: ProtocolVersion
|
||||
): Arbitrary[(Set[LfPartyId], LocalReject)] = Arbitrary(
|
||||
for {
|
||||
parties <- Gen.containerOf[Set, LfPartyId](Arbitrary.arbitrary[LfPartyId])
|
||||
reject <- localRejectGenFor(pv)
|
||||
} yield (parties, reject)
|
||||
)
|
||||
}
|
||||
|
||||
// TODO(#14515) Check that the generator is exhaustive
|
||||
implicit val localRejectArb: Arbitrary[LocalReject] = Arbitrary(
|
||||
Gen.oneOf(localRejectImplGen, localVerdictMalformedGen)
|
||||
)
|
||||
implicit val transferResultArb: Arbitrary[TransferResult[TransferDomainId]] = Arbitrary(for {
|
||||
pv <- Arbitrary.arbitrary[ProtocolVersion]
|
||||
requestId <- Arbitrary.arbitrary[RequestId]
|
||||
informees <- Gen.containerOf[Set, LfPartyId](Arbitrary.arbitrary[LfPartyId])
|
||||
domain <- Arbitrary.arbitrary[TransferDomainId]
|
||||
verdict <- GeneratorsVerdict.verdictGenFor(pv)
|
||||
} yield TransferResult.create(requestId, informees, domain, verdict, pv))
|
||||
|
||||
implicit val verdictApproveArb: Arbitrary[Verdict.Approve] = Arbitrary(
|
||||
representativeProtocolVersionGen(Verdict).map(Verdict.Approve())
|
||||
)
|
||||
implicit val malformedMediatorRequestResultArb: Arbitrary[MalformedMediatorRequestResult] =
|
||||
Arbitrary(
|
||||
for {
|
||||
pv <- Arbitrary.arbitrary[ProtocolVersion]
|
||||
requestId <- Arbitrary.arbitrary[RequestId]
|
||||
domainId <- Arbitrary.arbitrary[DomainId]
|
||||
viewType <- Arbitrary.arbitrary[ViewType]
|
||||
mediatorReject <- GeneratorsVerdict.mediatorRejectGen(
|
||||
Verdict.protocolVersionRepresentativeFor(pv)
|
||||
)
|
||||
} yield MalformedMediatorRequestResult.tryCreate(
|
||||
requestId,
|
||||
domainId,
|
||||
viewType,
|
||||
mediatorReject,
|
||||
pv,
|
||||
)
|
||||
)
|
||||
|
||||
implicit val participantRejectReasonArb: Arbitrary[(Set[LfPartyId], LocalReject)] = Arbitrary(
|
||||
implicit val transactionResultMessage: Arbitrary[TransactionResultMessage] = Arbitrary(for {
|
||||
pv <- Gen.oneOf(ProtocolVersion.v6, ProtocolVersion.CNTestNet)
|
||||
|
||||
verdict <- GeneratorsVerdict.verdictGenFor(pv)
|
||||
rootHash <- Arbitrary.arbitrary[RootHash]
|
||||
requestId <- Arbitrary.arbitrary[RequestId]
|
||||
domainId <- Arbitrary.arbitrary[DomainId]
|
||||
|
||||
// TODO(#14241) Also generate instance that contains InformeeTree + make pv above cover all the values
|
||||
} yield TransactionResultMessage(requestId, verdict, rootHash, domainId, pv))
|
||||
|
||||
implicit val mediatorResponseArb: Arbitrary[MediatorResponse] = Arbitrary(
|
||||
for {
|
||||
parties <- Gen.containerOf[Set, LfPartyId](Arbitrary.arbitrary[LfPartyId])
|
||||
reject <- localRejectArb.arbitrary
|
||||
} yield (parties, reject)
|
||||
pv <- Arbitrary.arbitrary[ProtocolVersion]
|
||||
requestId <- Arbitrary.arbitrary[RequestId]
|
||||
sender <- Arbitrary.arbitrary[ParticipantId]
|
||||
localVerdict <- GeneratorsLocalVerdict.localVerdictArbFor(pv)
|
||||
|
||||
domainId <- Arbitrary.arbitrary[DomainId]
|
||||
|
||||
confirmingParties <- localVerdict match {
|
||||
case _: Malformed =>
|
||||
Gen.const(Set.empty[LfPartyId])
|
||||
case _: LocalApprove | _: LocalReject =>
|
||||
nonEmptySet(implicitly[Arbitrary[LfPartyId]]).arbitrary.map(_.forgetNE)
|
||||
case _ => Gen.containerOf[Set, LfPartyId](Arbitrary.arbitrary[LfPartyId])
|
||||
}
|
||||
|
||||
rootHash <- localVerdict match {
|
||||
case _: LocalApprove | _: LocalReject => Gen.some(Arbitrary.arbitrary[RootHash])
|
||||
case _ => Gen.option(Arbitrary.arbitrary[RootHash])
|
||||
}
|
||||
|
||||
rpv = MediatorResponse.protocolVersionRepresentativeFor(pv)
|
||||
|
||||
viewHashO <- localVerdict match {
|
||||
case _: LocalApprove | _: LocalReject
|
||||
if rpv < MediatorResponse.protocolVersionRepresentativeFor(ProtocolVersion.v5) =>
|
||||
Gen.some(Arbitrary.arbitrary[ViewHash])
|
||||
case _ => Gen.option(Arbitrary.arbitrary[ViewHash])
|
||||
}
|
||||
|
||||
viewPositionO <- localVerdict match {
|
||||
case _: LocalApprove | _: LocalReject
|
||||
if rpv >= MediatorResponse.protocolVersionRepresentativeFor(ProtocolVersion.v5) =>
|
||||
Gen.some(Arbitrary.arbitrary[ViewPosition])
|
||||
case _ => Gen.option(Arbitrary.arbitrary[ViewPosition])
|
||||
}
|
||||
|
||||
} yield MediatorResponse.tryCreate(
|
||||
requestId,
|
||||
sender,
|
||||
viewHashO,
|
||||
viewPositionO,
|
||||
localVerdict,
|
||||
rootHash,
|
||||
confirmingParties,
|
||||
domainId,
|
||||
pv,
|
||||
)
|
||||
)
|
||||
|
||||
implicit val participantRejectArb: Arbitrary[ParticipantReject] = Arbitrary(for {
|
||||
pv <- representativeProtocolVersionGen(Verdict)
|
||||
reasons <- nonEmptyListGen[(Set[LfPartyId], LocalReject)](participantRejectReasonArb)
|
||||
} yield ParticipantReject(reasons)(pv))
|
||||
// TODO(#14515) Check that the generator is exhaustive
|
||||
implicit val mediatorResultArb: Arbitrary[MediatorResult] = Arbitrary(
|
||||
Gen.oneOf[MediatorResult](
|
||||
Arbitrary.arbitrary[MalformedMediatorRequestResult],
|
||||
Arbitrary.arbitrary[TransactionResultMessage],
|
||||
Arbitrary.arbitrary[TransferResult[TransferDomainId]],
|
||||
)
|
||||
)
|
||||
|
||||
// TODO(#14515) Check that the generator is exhaustive
|
||||
implicit val verdictArb: Arbitrary[Verdict] = Arbitrary(
|
||||
implicit val signedProtocolMessageContentArb: Arbitrary[SignedProtocolMessageContent] = Arbitrary(
|
||||
Gen.oneOf(
|
||||
verdictApproveArb.arbitrary,
|
||||
mediatorRejectArb.arbitrary,
|
||||
participantRejectArb.arbitrary,
|
||||
Arbitrary.arbitrary[AcsCommitment],
|
||||
Arbitrary.arbitrary[MalformedMediatorRequestResult],
|
||||
Arbitrary.arbitrary[MediatorResponse],
|
||||
Arbitrary.arbitrary[MediatorResult],
|
||||
)
|
||||
)
|
||||
|
||||
implicit val typedSignedProtocolMessageContent
|
||||
: Arbitrary[TypedSignedProtocolMessageContent[SignedProtocolMessageContent]] = Arbitrary(for {
|
||||
pv <- representativeProtocolVersionGen(TypedSignedProtocolMessageContent)
|
||||
content <- Arbitrary.arbitrary[SignedProtocolMessageContent]
|
||||
} yield TypedSignedProtocolMessageContent(content, pv.representative))
|
||||
|
||||
implicit val signedProtocolMessageArb
|
||||
: Arbitrary[SignedProtocolMessage[SignedProtocolMessageContent]] = Arbitrary(for {
|
||||
rpv <- representativeProtocolVersionGen(SignedProtocolMessage)
|
||||
typedMessage <- Arbitrary
|
||||
.arbitrary[TypedSignedProtocolMessageContent[SignedProtocolMessageContent]]
|
||||
|
||||
signatures <- nonEmptyListGen(implicitly[Arbitrary[Signature]]).map { signatures =>
|
||||
if (rpv >= SignedProtocolMessage.multipleSignaturesSupportedSince) signatures
|
||||
else NonEmpty(List, signatures.head1)
|
||||
}
|
||||
} yield SignedProtocolMessage.create(typedMessage, signatures, rpv).value)
|
||||
|
||||
private implicit val emptyTraceContext: TraceContext = TraceContext.empty
|
||||
private lazy val syncCrypto = GeneratorsCrypto.cryptoFactory.headSnapshot
|
||||
|
||||
private def domainTopologyTransactionMessageGenFor(
|
||||
pv: ProtocolVersion
|
||||
): Gen[DomainTopologyTransactionMessage] =
|
||||
for {
|
||||
transactions <- Gen.listOf(GeneratorsTransaction.signedTopologyTransactionGenFor(pv))
|
||||
domainId <- Arbitrary.arbitrary[DomainId]
|
||||
notSequencedAfter <- valueForEmptyOptionExactlyUntilExclusive(
|
||||
pv,
|
||||
DomainTopologyTransactionMessage.notSequencedAfterInvariant,
|
||||
)
|
||||
} yield Await.result(
|
||||
DomainTopologyTransactionMessage.tryCreate(
|
||||
transactions,
|
||||
syncCrypto,
|
||||
domainId,
|
||||
notSequencedAfter,
|
||||
pv,
|
||||
),
|
||||
10.seconds,
|
||||
)
|
||||
|
||||
// TODO(#14241) Once we have more generators for merkle trees base classes, make these generators exhaustive
|
||||
private def protocolMessageV1GenFor(pv: ProtocolVersion): Gen[ProtocolMessageV1] =
|
||||
domainTopologyTransactionMessageGenFor(pv)
|
||||
private def protocolMessageV2GenFor(pv: ProtocolVersion): Gen[ProtocolMessageV2] =
|
||||
domainTopologyTransactionMessageGenFor(pv)
|
||||
private def unsignedProtocolMessageV3GenFor(pv: ProtocolVersion): Gen[UnsignedProtocolMessageV3] =
|
||||
domainTopologyTransactionMessageGenFor(pv)
|
||||
|
||||
// TODO(#14515) Check that the generator is exhaustive
|
||||
implicit val envelopeContentArb: Arbitrary[EnvelopeContent] = Arbitrary(for {
|
||||
rpv <- representativeProtocolVersionFilteredGen(EnvelopeContent)(
|
||||
List(EnvelopeContent.representativeV0)
|
||||
)
|
||||
pv = rpv.representative
|
||||
// We don't test EnvelopeContentV0 because it uses legacy converter which is incompatible with this test
|
||||
protocolMessageGen = Map(
|
||||
EnvelopeContent.representativeV1 -> protocolMessageV1GenFor(pv),
|
||||
EnvelopeContent.representativeV2 -> protocolMessageV2GenFor(pv),
|
||||
EnvelopeContent.representativeV3 -> unsignedProtocolMessageV3GenFor(pv),
|
||||
)(rpv)
|
||||
protocolMessage <- protocolMessageGen
|
||||
} yield EnvelopeContent.tryCreate(protocolMessage, rpv.representative))
|
||||
|
||||
}
|
||||
|
@ -9,8 +9,7 @@ import magnolify.scalacheck.auto.*
|
||||
import org.scalacheck.Arbitrary
|
||||
|
||||
object GeneratorsTopology {
|
||||
import com.digitalasset.canton.crypto.GeneratorsCrypto.*
|
||||
import com.digitalasset.canton.sequencing.protocol.GeneratorsProtocol.*
|
||||
import com.digitalasset.canton.config.GeneratorsConfig.*
|
||||
|
||||
implicit val identifierArb: Arbitrary[Identifier] = Arbitrary(
|
||||
Generators.lengthLimitedStringGen(String185).map(s => Identifier.tryCreate(s.str))
|
||||
|
@ -160,11 +160,7 @@ class AuthorizationGraphTest extends AnyWordSpec with BaseTestWordSpec {
|
||||
graph.add(nsk3k2)
|
||||
check(graph, key3, requireRoot = true, should = true)
|
||||
|
||||
graph.remove(
|
||||
nsk3k2.copy(transaction =
|
||||
nsk3k2.transaction.copy(key = key1)(signedTransactionProtocolVersionRepresentative, None)
|
||||
)
|
||||
)
|
||||
graph.remove(nsk3k2.copy(transaction = nsk3k2.transaction.update(key = key1)))
|
||||
check(graph, key3, requireRoot = true, should = false)
|
||||
}
|
||||
|
||||
@ -181,9 +177,7 @@ class AuthorizationGraphTest extends AnyWordSpec with BaseTestWordSpec {
|
||||
graph.add(nsk1k1)
|
||||
graph.add(nsk2k1)
|
||||
check(graph, key2, requireRoot = false, should = true)
|
||||
val fakeRemove = nsk2k1.copy(transaction =
|
||||
nsk2k1.transaction.copy(key = key6)(signedTransactionProtocolVersionRepresentative, None)
|
||||
)
|
||||
val fakeRemove = nsk2k1.copy(transaction = nsk2k1.transaction.update(key = key6))
|
||||
graph.remove(fakeRemove) shouldBe false
|
||||
check(graph, key2, requireRoot = false, should = true)
|
||||
graph.remove(nsk2k1)
|
||||
@ -207,10 +201,7 @@ class AuthorizationGraphTest extends AnyWordSpec with BaseTestWordSpec {
|
||||
check(graph, key3, requireRoot = false, should = true)
|
||||
check(graph, key2, requireRoot = true, should = true)
|
||||
graph.remove(
|
||||
nsk2k1.copy(transaction =
|
||||
nsk2k1.transaction
|
||||
.copy(key = key3)(signedTransactionProtocolVersionRepresentative, None)
|
||||
)
|
||||
nsk2k1.copy(transaction = nsk2k1.transaction.update(key = key3))
|
||||
) shouldBe false
|
||||
check(graph, key2, requireRoot = true, should = true)
|
||||
}
|
||||
|
@ -134,7 +134,7 @@ class DomainTopologyTransactionMessageValidatorTest
|
||||
txs,
|
||||
mgrCryptoClient.headSnapshot,
|
||||
DefaultTestIdentities.domainId,
|
||||
ts.plusMillis(10),
|
||||
Some(ts.plusMillis(10)),
|
||||
testedProtocolVersion,
|
||||
)
|
||||
.futureValue
|
||||
|
@ -85,10 +85,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
|
||||
"fail to add if the signature is invalid" in {
|
||||
val validator = mk()
|
||||
import Factory.*
|
||||
val invalid = ns1k2_k1.copy(signature = ns1k1_k1.signature)(
|
||||
signedTransactionProtocolVersionRepresentative,
|
||||
None,
|
||||
)
|
||||
val invalid = ns1k2_k1.update(signature = ns1k1_k1.signature)
|
||||
for {
|
||||
(_, validatedTopologyTransactions) <- validator.validateAndUpdateHeadAuthState(
|
||||
ts(0),
|
||||
|
@ -48,10 +48,7 @@ class TopologyTransactionCollectionTest extends AnyWordSpec with BaseTest with H
|
||||
EffectiveTime(tm),
|
||||
None,
|
||||
addStoredTx.transaction
|
||||
.copy(transaction = reversedTransaction)(
|
||||
addStoredTx.transaction.representativeProtocolVersion,
|
||||
None,
|
||||
)
|
||||
.update(transaction = reversedTransaction)
|
||||
.asInstanceOf[SignedTopologyTransaction[Remove]],
|
||||
)
|
||||
}
|
||||
|
@ -0,0 +1,133 @@
|
||||
// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.digitalasset.canton.topology.transaction
|
||||
|
||||
import better.files.File
|
||||
import com.digitalasset.canton.crypto.{Signature, SigningPublicKey, X509CertificatePem}
|
||||
import com.digitalasset.canton.protocol.GeneratorsProtocol
|
||||
import com.digitalasset.canton.testing.utils.TestResourceUtils
|
||||
import com.digitalasset.canton.topology.{DomainId, ParticipantId, UniqueIdentifier}
|
||||
import com.digitalasset.canton.version.ProtocolVersion
|
||||
import magnolify.scalacheck.auto.*
|
||||
import org.scalacheck.{Arbitrary, Gen}
|
||||
|
||||
object GeneratorsTransaction {
|
||||
import com.digitalasset.canton.GeneratorsLf.*
|
||||
import com.digitalasset.canton.config.GeneratorsConfig.*
|
||||
import com.digitalasset.canton.crypto.GeneratorsCrypto.*
|
||||
import com.digitalasset.canton.protocol.GeneratorsProtocol.*
|
||||
import com.digitalasset.canton.version.GeneratorsVersion.*
|
||||
import com.digitalasset.canton.topology.GeneratorsTopology.*
|
||||
|
||||
implicit val addRemoveChangeOp: Arbitrary[AddRemoveChangeOp] = genArbitrary
|
||||
implicit val topologyChangeOpArb: Arbitrary[TopologyChangeOp] = genArbitrary
|
||||
|
||||
implicit val ownerToKeyMappingArb: Arbitrary[OwnerToKeyMapping] = genArbitrary
|
||||
private lazy val legalIdentityClaimEvidence: LegalIdentityClaimEvidence = {
|
||||
val pemPath = TestResourceUtils.resourceFile("tls/participant.pem").toPath
|
||||
LegalIdentityClaimEvidence.X509Cert(X509CertificatePem.tryFromFile(File(pemPath)))
|
||||
}
|
||||
implicit val legalIdentityClaimArb: Arbitrary[LegalIdentityClaim] = Arbitrary(
|
||||
for {
|
||||
pv <- Arbitrary.arbitrary[ProtocolVersion]
|
||||
uid <- Arbitrary.arbitrary[UniqueIdentifier]
|
||||
evidence = legalIdentityClaimEvidence
|
||||
} yield LegalIdentityClaim.create(uid, evidence, pv)
|
||||
)
|
||||
implicit val signedLegalIdentityClaimArb: Arbitrary[SignedLegalIdentityClaim] = Arbitrary(
|
||||
for {
|
||||
legalIdentityClaim <- Arbitrary.arbitrary[LegalIdentityClaim]
|
||||
signature <- Arbitrary.arbitrary[Signature]
|
||||
} yield SignedLegalIdentityClaim.create(legalIdentityClaim, signature)
|
||||
)
|
||||
implicit val vettedPackagesArb: Arbitrary[VettedPackages] = genArbitrary
|
||||
|
||||
// If the pattern match is not exhaustive, update the list below and the generator of ParticipantState
|
||||
{
|
||||
((_: TrustLevel) match {
|
||||
case TrustLevel.Ordinary => ()
|
||||
case TrustLevel.Vip => ()
|
||||
}).discard
|
||||
}
|
||||
private val trustLevels: Seq[TrustLevel] = Seq(TrustLevel.Vip, TrustLevel.Ordinary)
|
||||
implicit val participantStateArb: Arbitrary[ParticipantState] = Arbitrary(for {
|
||||
side <- Arbitrary.arbitrary[RequestSide]
|
||||
domain <- Arbitrary.arbitrary[DomainId]
|
||||
participant <- Arbitrary.arbitrary[ParticipantId]
|
||||
permission <- Arbitrary.arbitrary[ParticipantPermission]
|
||||
trustLevel <-
|
||||
if (permission.canConfirm) Gen.oneOf(trustLevels) else Gen.const(TrustLevel.Ordinary)
|
||||
} yield ParticipantState(side, domain, participant, permission, trustLevel))
|
||||
|
||||
implicit val topologyStateUpdateMappingArb: Arbitrary[TopologyStateUpdateMapping] = genArbitrary
|
||||
implicit val topologyStateUpdateElementArb: Arbitrary[TopologyStateUpdateElement] = genArbitrary
|
||||
|
||||
def domainParametersChangeGenFor(pv: ProtocolVersion): Gen[DomainParametersChange] = for {
|
||||
domainId <- Arbitrary.arbitrary[DomainId]
|
||||
parameters <- GeneratorsProtocol.dynamicDomainParametersGenFor(pv)
|
||||
} yield DomainParametersChange(domainId, parameters)
|
||||
|
||||
// If the pattern match is not exhaustive, update generator below
|
||||
{
|
||||
((_: DomainGovernanceMapping) match {
|
||||
case _: DomainParametersChange => ()
|
||||
}).discard
|
||||
}
|
||||
def domainGovernanceMappingGenFor(pv: ProtocolVersion): Gen[DomainGovernanceMapping] =
|
||||
domainParametersChangeGenFor(pv)
|
||||
|
||||
def domainGovernanceElementGenFor(pv: ProtocolVersion): Gen[DomainGovernanceElement] =
|
||||
domainGovernanceMappingGenFor(pv).map(DomainGovernanceElement)
|
||||
implicit val domainGovernanceElementArb: Arbitrary[DomainGovernanceElement] = genArbitrary
|
||||
|
||||
implicit val topologyStateUpdateArb: Arbitrary[TopologyStateUpdate[AddRemoveChangeOp]] =
|
||||
Arbitrary(for {
|
||||
op <- Arbitrary.arbitrary[AddRemoveChangeOp]
|
||||
element <- Arbitrary.arbitrary[TopologyStateUpdateElement]
|
||||
rpv <- representativeProtocolVersionGen(TopologyTransaction)
|
||||
} yield TopologyStateUpdate(op, element, rpv.representative))
|
||||
|
||||
implicit val domainGovernanceTransactionArb: Arbitrary[DomainGovernanceTransaction] =
|
||||
Arbitrary(for {
|
||||
rpv <- representativeProtocolVersionGen(TopologyTransaction)
|
||||
element <- domainGovernanceElementGenFor(rpv.representative)
|
||||
} yield DomainGovernanceTransaction(element, rpv.representative))
|
||||
def domainGovernanceTransactionGenFor(pv: ProtocolVersion): Gen[DomainGovernanceTransaction] =
|
||||
for {
|
||||
element <- domainGovernanceElementGenFor(pv)
|
||||
} yield DomainGovernanceTransaction(element, pv)
|
||||
|
||||
// If this pattern match is not exhaustive anymore, update the generator below
|
||||
{
|
||||
((_: TopologyTransaction[TopologyChangeOp]) match {
|
||||
case _: TopologyStateUpdate[_] => ()
|
||||
case _: DomainGovernanceTransaction => ()
|
||||
}).discard
|
||||
}
|
||||
implicit val topologyTransactionArb: Arbitrary[TopologyTransaction[TopologyChangeOp]] = Arbitrary(
|
||||
Gen.oneOf[TopologyTransaction[TopologyChangeOp]](
|
||||
topologyStateUpdateArb.arbitrary,
|
||||
domainGovernanceTransactionArb.arbitrary,
|
||||
)
|
||||
)
|
||||
|
||||
def signedTopologyTransactionGenFor(
|
||||
pv: ProtocolVersion
|
||||
): Gen[SignedTopologyTransaction[TopologyChangeOp]] = for {
|
||||
transaction <- Arbitrary.arbitrary[TopologyTransaction[TopologyChangeOp]]
|
||||
key <- Arbitrary.arbitrary[SigningPublicKey]
|
||||
signature <- Arbitrary.arbitrary[Signature]
|
||||
} yield SignedTopologyTransaction(
|
||||
transaction,
|
||||
key,
|
||||
signature,
|
||||
SignedTopologyTransaction.protocolVersionRepresentativeFor(pv),
|
||||
)
|
||||
|
||||
implicit val signedTopologyTransactionArb
|
||||
: Arbitrary[SignedTopologyTransaction[TopologyChangeOp]] = Arbitrary(for {
|
||||
rpv <- representativeProtocolVersionGen(SignedTopologyTransaction)
|
||||
tx <- signedTopologyTransactionGenFor(rpv.representative)
|
||||
} yield tx)
|
||||
}
|
@ -25,7 +25,7 @@ class TopologyChangeOpTest extends AnyWordSpec with BaseTest with HasExecutionCo
|
||||
val reversedTx = addSignedTx.transaction.reverse
|
||||
|
||||
addSignedTx
|
||||
.copy(transaction = reversedTx)(addSignedTx.representativeProtocolVersion, None)
|
||||
.update(transaction = reversedTx)
|
||||
.asInstanceOf[SignedTopologyTransaction[Remove]]
|
||||
}
|
||||
|
||||
|
@ -4,7 +4,7 @@
|
||||
package com.digitalasset.canton.topology.transaction
|
||||
|
||||
import com.daml.nonempty.NonEmpty
|
||||
import com.digitalasset.canton.config.RequireTypes.PositiveInt
|
||||
import com.digitalasset.canton.config.RequireTypes.{PositiveInt, PositiveLong}
|
||||
import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime}
|
||||
import com.digitalasset.canton.topology.store.TopologyStoreId.AuthorizedStore
|
||||
import com.digitalasset.canton.topology.store.memory.InMemoryTopologyStoreX
|
||||
@ -204,6 +204,59 @@ class ValidatingTopologyMappingXChecksTest
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
"validating TrafficControlStateX" should {
|
||||
def trafficControlState(limit: Int): TrafficControlStateX =
|
||||
TrafficControlStateX
|
||||
.create(domainId, participant1, PositiveLong.tryCreate(limit.toLong))
|
||||
.getOrElse(sys.error("Error creating TrafficControlStateX"))
|
||||
|
||||
val limit5 = factory.mkAdd(trafficControlState(5))
|
||||
val limit10 = factory.mkAdd(trafficControlState(10))
|
||||
val removal10 = factory.mkRemove(trafficControlState(10))
|
||||
|
||||
"reject non monotonically increasing extra traffict limits" in {
|
||||
val (checks, _) = mk()
|
||||
|
||||
val result =
|
||||
checks.checkTransaction(
|
||||
EffectiveTime.MaxValue,
|
||||
toValidate = limit5,
|
||||
inStore = Some(limit10),
|
||||
)
|
||||
result.value.futureValue should matchPattern {
|
||||
case Left(TopologyTransactionRejection.ExtraTrafficLimitTooLow(_, _, _)) =>
|
||||
}
|
||||
}
|
||||
|
||||
"report no errors for valid mappings" in {
|
||||
val (checks, _) = mk()
|
||||
|
||||
def runSuccessfulCheck(
|
||||
toValidate: SignedTopologyTransactionX[TopologyChangeOpX, TrafficControlStateX],
|
||||
inStore: Option[SignedTopologyTransactionX[TopologyChangeOpX, TrafficControlStateX]],
|
||||
) =
|
||||
checks
|
||||
.checkTransaction(EffectiveTime.MaxValue, toValidate, inStore)
|
||||
.value
|
||||
.futureValue shouldBe Right(())
|
||||
|
||||
// first limit for member
|
||||
runSuccessfulCheck(limit10, None)
|
||||
|
||||
// increase limit
|
||||
runSuccessfulCheck(limit10, Some(limit5))
|
||||
|
||||
// same limit
|
||||
runSuccessfulCheck(limit5, Some(limit5))
|
||||
|
||||
// reset monotonicity after removal
|
||||
runSuccessfulCheck(limit5, Some(removal10))
|
||||
|
||||
// remove traffic control state for member
|
||||
runSuccessfulCheck(removal10, Some(limit10))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private def addToStore(
|
||||
|
@ -5,32 +5,10 @@ package com.digitalasset.canton.version
|
||||
|
||||
import com.digitalasset.canton.SerializationDeserializationTestHelpers.DefaultValueUntilExclusive
|
||||
import com.digitalasset.canton.crypto.TestHash
|
||||
import com.digitalasset.canton.data.{
|
||||
ActionDescription,
|
||||
CommonMetadata,
|
||||
ParticipantMetadata,
|
||||
SubmitterMetadata,
|
||||
TransferInCommonData,
|
||||
TransferInView,
|
||||
TransferOutCommonData,
|
||||
TransferOutView,
|
||||
ViewCommonData,
|
||||
ViewParticipantData,
|
||||
}
|
||||
import com.digitalasset.canton.protocol.messages.AcsCommitment
|
||||
import com.digitalasset.canton.protocol.{
|
||||
ConfirmationPolicy,
|
||||
ContractMetadata,
|
||||
DynamicDomainParameters,
|
||||
GeneratorsProtocol,
|
||||
SerializableContract,
|
||||
StaticDomainParameters,
|
||||
}
|
||||
import com.digitalasset.canton.sequencing.protocol.{
|
||||
AcknowledgeRequest,
|
||||
AggregationRule,
|
||||
ClosedEnvelope,
|
||||
}
|
||||
import com.digitalasset.canton.data.*
|
||||
import com.digitalasset.canton.protocol.*
|
||||
import com.digitalasset.canton.protocol.messages.*
|
||||
import com.digitalasset.canton.topology.transaction.{LegalIdentityClaim, SignedTopologyTransaction}
|
||||
import com.digitalasset.canton.{BaseTest, SerializationDeserializationTestHelpers}
|
||||
import org.scalatest.wordspec.AnyWordSpec
|
||||
import org.scalatestplus.scalacheck.ScalaCheckPropertyChecks
|
||||
@ -44,16 +22,31 @@ class SerializationDeserializationTest
|
||||
import com.digitalasset.canton.data.GeneratorsTransferData.*
|
||||
import com.digitalasset.canton.protocol.GeneratorsProtocol.*
|
||||
import com.digitalasset.canton.sequencing.protocol.GeneratorsProtocol.*
|
||||
import com.digitalasset.canton.topology.transaction.GeneratorsTransaction.*
|
||||
import com.digitalasset.canton.protocol.messages.GeneratorsMessages.*
|
||||
import com.digitalasset.canton.protocol.messages.GeneratorsMessages.GeneratorsVerdict.*
|
||||
import com.digitalasset.canton.protocol.messages.GeneratorsMessages.GeneratorsLocalVerdict.*
|
||||
|
||||
"Serialization and deserialization methods" should {
|
||||
"compose to the identity" in {
|
||||
testProtocolVersioned(StaticDomainParameters)
|
||||
testProtocolVersioned(DynamicDomainParameters)
|
||||
testProtocolVersioned(AcknowledgeRequest)
|
||||
testProtocolVersioned(AggregationRule)
|
||||
testProtocolVersioned(com.digitalasset.canton.protocol.DynamicDomainParameters)
|
||||
|
||||
testProtocolVersioned(AcsCommitment)
|
||||
testProtocolVersioned(ClosedEnvelope)
|
||||
testProtocolVersioned(Verdict)
|
||||
testProtocolVersioned(MediatorResponse)
|
||||
testMemoizedProtocolVersionedWithCtx(TypedSignedProtocolMessageContent, TestHash)
|
||||
testProtocolVersionedWithCtx(SignedProtocolMessage, TestHash)
|
||||
|
||||
testProtocolVersioned(LocalVerdict)
|
||||
testProtocolVersioned(TransferResult)
|
||||
testProtocolVersioned(MalformedMediatorRequestResult)
|
||||
testProtocolVersionedWithCtx(EnvelopeContent, TestHash)
|
||||
testMemoizedProtocolVersionedWithCtx(TransactionResultMessage, TestHash)
|
||||
|
||||
testProtocolVersioned(com.digitalasset.canton.sequencing.protocol.AcknowledgeRequest)
|
||||
testProtocolVersioned(com.digitalasset.canton.sequencing.protocol.AggregationRule)
|
||||
testProtocolVersioned(com.digitalasset.canton.sequencing.protocol.ClosedEnvelope)
|
||||
|
||||
testVersioned(ContractMetadata)(
|
||||
GeneratorsProtocol.contractMetadataArb(canHaveEmptyKey = true)
|
||||
@ -63,22 +56,44 @@ class SerializationDeserializationTest
|
||||
List(DefaultValueUntilExclusive(_.copy(contractSalt = None), ProtocolVersion.v4)),
|
||||
)(GeneratorsProtocol.serializableContractArb(canHaveEmptyKey = true))
|
||||
|
||||
testProtocolVersioned(ActionDescription)
|
||||
testProtocolVersioned(com.digitalasset.canton.data.ActionDescription)
|
||||
|
||||
// Merkle tree leaves
|
||||
testProtocolVersionedWithContext(CommonMetadata, TestHash)
|
||||
testProtocolVersionedWithContext(ParticipantMetadata, TestHash)
|
||||
testProtocolVersionedWithContext(SubmitterMetadata, TestHash)
|
||||
testProtocolVersionedWithContext(TransferInCommonData, TestHash)
|
||||
testProtocolVersionedWithContext(TransferInView, TestHash)
|
||||
testProtocolVersionedWithContext(TransferOutCommonData, TestHash)
|
||||
testProtocolVersionedWithContext(TransferOutView, TestHash)
|
||||
testMemoizedProtocolVersionedWithCtx(CommonMetadata, TestHash)
|
||||
testMemoizedProtocolVersionedWithCtx(ParticipantMetadata, TestHash)
|
||||
testMemoizedProtocolVersionedWithCtx(SubmitterMetadata, TestHash)
|
||||
testMemoizedProtocolVersionedWithCtx(TransferInCommonData, TestHash)
|
||||
testMemoizedProtocolVersionedWithCtx(TransferInView, TestHash)
|
||||
testMemoizedProtocolVersionedWithCtx(TransferOutCommonData, TestHash)
|
||||
testMemoizedProtocolVersionedWithCtx(TransferOutView, TestHash)
|
||||
|
||||
Seq(ConfirmationPolicy.Vip, ConfirmationPolicy.Signatory).map { confirmationPolicy =>
|
||||
testProtocolVersionedWithContext(ViewCommonData, (TestHash, confirmationPolicy))
|
||||
testMemoizedProtocolVersionedWithCtx(
|
||||
com.digitalasset.canton.data.ViewCommonData,
|
||||
(TestHash, confirmationPolicy),
|
||||
)
|
||||
}
|
||||
|
||||
testProtocolVersionedWithContext(ViewParticipantData, TestHash)
|
||||
testMemoizedProtocolVersioned(SignedTopologyTransaction)
|
||||
testMemoizedProtocolVersioned(LegalIdentityClaim)
|
||||
|
||||
testMemoizedProtocolVersionedWithCtx(
|
||||
com.digitalasset.canton.data.ViewParticipantData,
|
||||
TestHash,
|
||||
)
|
||||
}
|
||||
|
||||
"be exhaustive" in {
|
||||
val requiredTests =
|
||||
findHasProtocolVersionedWrapperSubClasses("com.digitalasset.canton.protocol.messages")
|
||||
|
||||
val missingTests = requiredTests.diff(testedClasses.toList)
|
||||
|
||||
/*
|
||||
If this test fails, it means that one class inheriting from HasProtocolVersionWrapper in the
|
||||
package is not tested in the SerializationDeserializationTests
|
||||
*/
|
||||
clue(s"Missing tests should be empty but found: $missingTests")(missingTests shouldBe empty)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -109,10 +109,10 @@ object TopologyManagementInitialization {
|
||||
transactions.toList,
|
||||
recentSnapshot,
|
||||
id,
|
||||
maxSequencingTime,
|
||||
Some(maxSequencingTime),
|
||||
protocolVersion,
|
||||
)
|
||||
.leftMap(err => SendAsyncClientError.RequestInvalid(err.toString))
|
||||
.leftMap(SendAsyncClientError.RequestInvalid)
|
||||
batch = domainMembers.map(member =>
|
||||
OpenEnvelope(content, Recipients.cc(member))(protocolVersion)
|
||||
)
|
||||
|
@ -203,7 +203,7 @@ private[mediator] class DbFinalizedResponseStore(
|
||||
) on conflict do nothing"""
|
||||
}
|
||||
|
||||
CloseContext.withCombinedContextF(callerCloseContext, closeContext, timeouts, logger) {
|
||||
CloseContext.withCombinedContext(callerCloseContext, closeContext, timeouts, logger) {
|
||||
closeContext =>
|
||||
storage.update_(
|
||||
insert,
|
||||
@ -217,7 +217,7 @@ private[mediator] class DbFinalizedResponseStore(
|
||||
callerCloseContext: CloseContext,
|
||||
): OptionT[Future, FinalizedResponse] =
|
||||
processingTime.optionTEvent {
|
||||
CloseContext.withCombinedContextOT(callerCloseContext, closeContext, timeouts, logger) {
|
||||
CloseContext.withCombinedContext(callerCloseContext, closeContext, timeouts, logger) {
|
||||
closeContext =>
|
||||
storage.querySingle(
|
||||
sql"""select request_id, mediator_request, version, verdict, request_trace_context
|
||||
@ -240,7 +240,7 @@ private[mediator] class DbFinalizedResponseStore(
|
||||
override def prune(
|
||||
timestamp: CantonTimestamp
|
||||
)(implicit traceContext: TraceContext, callerCloseContext: CloseContext): Future[Unit] =
|
||||
CloseContext.withCombinedContextF(callerCloseContext, closeContext, timeouts, logger) {
|
||||
CloseContext.withCombinedContext(callerCloseContext, closeContext, timeouts, logger) {
|
||||
closeContext =>
|
||||
for {
|
||||
removedCount <- storage.update(
|
||||
@ -254,7 +254,7 @@ private[mediator] class DbFinalizedResponseStore(
|
||||
traceContext: TraceContext,
|
||||
callerCloseContext: CloseContext,
|
||||
): Future[Long] = {
|
||||
CloseContext.withCombinedContextF(callerCloseContext, closeContext, timeouts, logger) {
|
||||
CloseContext.withCombinedContext(callerCloseContext, closeContext, timeouts, logger) {
|
||||
closeContext =>
|
||||
storage.query(
|
||||
sql"select count(request_id) from response_aggregations".as[Long].head,
|
||||
@ -269,7 +269,7 @@ private[mediator] class DbFinalizedResponseStore(
|
||||
traceContext: TraceContext,
|
||||
callerCloseContext: CloseContext,
|
||||
): Future[Option[CantonTimestamp]] = {
|
||||
CloseContext.withCombinedContextF(callerCloseContext, closeContext, timeouts, logger) {
|
||||
CloseContext.withCombinedContext(callerCloseContext, closeContext, timeouts, logger) {
|
||||
closeContext =>
|
||||
storage
|
||||
.query(
|
||||
|
@ -856,7 +856,7 @@ class DbSequencerStore(
|
||||
|
||||
EitherT {
|
||||
val CounterCheckpoint(counter, ts, latestTopologyClientTimestamp) = checkpoint
|
||||
CloseContext.withCombinedContextF(closeContext, externalCloseContext, timeouts, logger)(
|
||||
CloseContext.withCombinedContext(closeContext, externalCloseContext, timeouts, logger)(
|
||||
combinedCloseContext =>
|
||||
storage.queryAndUpdate(
|
||||
for {
|
||||
|
@ -855,8 +855,7 @@ object DomainTopologySender extends TopologyDispatchingErrorGroup {
|
||||
recipients: Recipients,
|
||||
)(maxSequencingTime: CantonTimestamp) =
|
||||
DomainTopologyTransactionMessage
|
||||
.create(batch.toList, snapshot, domainId, maxSequencingTime, protocolVersion)
|
||||
.leftMap(_.toString)
|
||||
.create(batch.toList, snapshot, domainId, Some(maxSequencingTime), protocolVersion)
|
||||
.map(batchMessage =>
|
||||
Batch(
|
||||
List(
|
||||
|
@ -49,7 +49,8 @@ class DomainTopologyManagerEventHandlerTest extends AsyncWordSpec with BaseTest
|
||||
),
|
||||
SymbolicCrypto.signingPublicKey("keyId"),
|
||||
SymbolicCrypto.emptySignature,
|
||||
)(signedTransactionProtocolVersionRepresentative, None)
|
||||
signedTransactionProtocolVersionRepresentative,
|
||||
)
|
||||
private val request = RegisterTopologyTransactionRequest
|
||||
.create(
|
||||
participantId,
|
||||
|
@ -304,10 +304,7 @@ class DomainTopologyManagerRequestServiceTest
|
||||
ns1k3_k2,
|
||||
okm1ak1E_k3,
|
||||
okm1ak5_k3,
|
||||
ps1d1T_k3.copy(signature = okm1ak5_k3.signature)(
|
||||
ps1d1T_k3.representativeProtocolVersion,
|
||||
None,
|
||||
),
|
||||
ps1d1T_k3.update(signature = okm1ak5_k3.signature),
|
||||
),
|
||||
)
|
||||
.failOnShutdown
|
||||
|
@ -0,0 +1,141 @@
|
||||
// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.digitalasset.canton.participant
|
||||
|
||||
import cats.syntax.either.*
|
||||
import com.digitalasset.canton.RequestCounter
|
||||
import com.digitalasset.canton.config.RequireTypes.NegativeLong
|
||||
import com.digitalasset.canton.data.CantonTimestamp
|
||||
import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting}
|
||||
import com.digitalasset.canton.store.db.DbSerializationException
|
||||
import slick.jdbc.{GetResult, SetParameter}
|
||||
|
||||
/** LocalOffset are represented by a tuple (effectiveTime, tieBreaker)
|
||||
*
|
||||
* The effectiveTime is:
|
||||
* - recordTime for [[RequestOffset]]
|
||||
* - topology effectiveTime for [[TopologyOffset]]
|
||||
*
|
||||
* The tie breaker is:
|
||||
* - The non-negative request counter for [[RequestOffset]]
|
||||
* - A negative value for [[TopologyOffset]]`. This allows to split one topology transaction into several events.
|
||||
*
|
||||
* The ordering is such that for the same effective time, a request offset is smaller than a topology offset.
|
||||
* The rationale is that topology transactions have exclusive valid from.
|
||||
*
|
||||
* The discriminator is only use to define a lexicographic ordering on the [[LocalOffset]].
|
||||
*/
|
||||
sealed trait LocalOffset
|
||||
extends PrettyPrinting
|
||||
with Ordered[LocalOffset]
|
||||
with Product
|
||||
with Serializable {
|
||||
override def pretty: Pretty[LocalOffset.this.type] =
|
||||
prettyOfClass(param("effectiveTime", _.effectiveTime), param("tieBreaker", _.tieBreaker))
|
||||
|
||||
def effectiveTime: CantonTimestamp
|
||||
|
||||
def tieBreaker: Long
|
||||
|
||||
def raw: (CantonTimestamp, Long) = (effectiveTime, tieBreaker)
|
||||
|
||||
/** The discriminator allows to distinguish between [[RequestOffset]] and [[TopologyOffset]].
|
||||
* Moreover, the ordering for [[LocalOffset]] is the lexicographic ordering on the tuple
|
||||
* (effectiveTime, discriminator, tieBreaker)
|
||||
*
|
||||
* In particular, for equal [[effectiveTime]], a [[TopologyOffset]] is bigger than a [[RequestOffset]] independent of the [[tieBreaker]].
|
||||
*/
|
||||
final def discriminator: Int =
|
||||
if (tieBreaker >= 0) LocalOffset.RequestOffsetDiscriminator
|
||||
else LocalOffset.TopologyOffsetDiscriminator
|
||||
|
||||
/** The equality methods automatically generated by the case classes that inherit LocalOffset are
|
||||
* compatible with this compare method: a [[RequestOffset]] is never equal to a [[TopologyOffset]]
|
||||
*/
|
||||
override def compare(that: LocalOffset): Int = LocalOffset.orderingLocalOffset.compare(this, that)
|
||||
|
||||
override def equals(other: Any): Boolean = other match {
|
||||
case offset: LocalOffset => offset.compare(this) == 0
|
||||
case _ => false
|
||||
}
|
||||
}
|
||||
|
||||
final case class RequestOffset(effectiveTime: CantonTimestamp, requestCounter: RequestCounter)
|
||||
extends LocalOffset {
|
||||
override def tieBreaker: Long = requestCounter.unwrap
|
||||
}
|
||||
|
||||
object RequestOffset {
|
||||
implicit val getResultRequestOffset: GetResult[RequestOffset] = GetResult { r =>
|
||||
val recordTime = r.<<[CantonTimestamp]
|
||||
val discriminator = r.<<[Int]
|
||||
val tieBreaker = r.<<[Long]
|
||||
|
||||
if (tieBreaker >= 0 && discriminator == LocalOffset.RequestOffsetDiscriminator)
|
||||
RequestOffset(recordTime, RequestCounter(tieBreaker))
|
||||
else
|
||||
throw new DbSerializationException(
|
||||
s"Incompatible tieBreaker=$tieBreaker and discriminator=$discriminator"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
final case class TopologyOffset private (
|
||||
effectiveTime: CantonTimestamp,
|
||||
topologyTieBreaker: NegativeLong,
|
||||
) extends LocalOffset {
|
||||
require(topologyTieBreaker != NegativeLong.MinValue, "topology tie breaker cannot be MinValue")
|
||||
|
||||
override def tieBreaker: Long = topologyTieBreaker.unwrap
|
||||
}
|
||||
|
||||
object TopologyOffset {
|
||||
def tryCreate(effectiveTime: CantonTimestamp, topologyTieBreaker: NegativeLong): TopologyOffset =
|
||||
TopologyOffset(effectiveTime, topologyTieBreaker)
|
||||
|
||||
def create(
|
||||
effectiveTime: CantonTimestamp,
|
||||
topologyTieBreaker: NegativeLong,
|
||||
): Either[String, TopologyOffset] = Either
|
||||
.catchOnly[IllegalArgumentException](tryCreate(effectiveTime, topologyTieBreaker))
|
||||
.leftMap(_.getMessage)
|
||||
}
|
||||
|
||||
object LocalOffset {
|
||||
val MaxValue: LocalOffset =
|
||||
TopologyOffset.tryCreate(CantonTimestamp.MaxValue, NegativeLong.tryCreate(Long.MinValue + 1))
|
||||
|
||||
// Do not change these constant, as they are used in the DBs
|
||||
val RequestOffsetDiscriminator: Int = 0
|
||||
val TopologyOffsetDiscriminator: Int = 1
|
||||
|
||||
implicit val orderingLocalOffset: Ordering[LocalOffset] = Ordering.by { offset =>
|
||||
/*
|
||||
NonNegative tieBreakers have orderingDiscriminator=0
|
||||
Negative tieBreakers have orderingDiscriminator=1
|
||||
*/
|
||||
(offset.effectiveTime, offset.discriminator, offset.tieBreaker)
|
||||
}
|
||||
|
||||
implicit val setParameterLocalOffset: SetParameter[LocalOffset] = (v, p) => {
|
||||
p >> v.effectiveTime
|
||||
p >> v.discriminator
|
||||
p >> v.tieBreaker
|
||||
}
|
||||
|
||||
implicit val getResultLocalOffset: GetResult[LocalOffset] = GetResult { r =>
|
||||
val recordTime = r.<<[CantonTimestamp]
|
||||
val discriminator = r.<<[Int]
|
||||
val tieBreaker = r.<<[Long]
|
||||
|
||||
if (tieBreaker >= 0 && discriminator == LocalOffset.RequestOffsetDiscriminator)
|
||||
RequestOffset(recordTime, RequestCounter(tieBreaker))
|
||||
else if (tieBreaker < 0 && discriminator == LocalOffset.TopologyOffsetDiscriminator)
|
||||
TopologyOffset.tryCreate(recordTime, NegativeLong.tryCreate(tieBreaker))
|
||||
else
|
||||
throw new DbSerializationException(
|
||||
s"Incompatible tieBreaker=$tieBreaker and discriminator=$discriminator"
|
||||
)
|
||||
}
|
||||
}
|
@ -11,7 +11,7 @@ import com.daml.lf.data.Bytes
|
||||
import com.digitalasset.canton.*
|
||||
import com.digitalasset.canton.crypto.SyncCryptoApiProvider
|
||||
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
|
||||
import com.digitalasset.canton.participant.RichRequestCounter
|
||||
import com.digitalasset.canton.participant.admin.repair.MigrateContracts.MigratedContract
|
||||
import com.digitalasset.canton.participant.store.ActiveContractStore.ContractState
|
||||
import com.digitalasset.canton.participant.store.*
|
||||
import com.digitalasset.canton.participant.sync.{LedgerSyncEvent, TimestampedEvent}
|
||||
@ -186,7 +186,7 @@ private final class MigrateContracts(
|
||||
)(implicit executionContext: ExecutionContext, traceContext: TraceContext): EitherT[
|
||||
Future,
|
||||
String,
|
||||
List[MigrateContracts.Data[(SerializableContract, TransferCounter, Boolean)]],
|
||||
List[MigrateContracts.Data[MigratedContract]],
|
||||
] =
|
||||
readContractsFromSource(contractIdsWithTransferCounters).flatMap {
|
||||
_.parTraverse {
|
||||
@ -210,17 +210,15 @@ private final class MigrateContracts(
|
||||
}
|
||||
.getOrElse(EitherT.rightT[Future, String](()))
|
||||
} yield data.copy(payload =
|
||||
(serializedSource, transferCounter, serializedTargetO.isEmpty)
|
||||
MigratedContract(serializedSource, transferCounter, serializedTargetO.isEmpty)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
private def adjustContractKeys(
|
||||
request: RepairRequest,
|
||||
timeOfChange: MigrateContracts.Data[
|
||||
(SerializableContract, TransferCounter, Boolean)
|
||||
] => TimeOfChange,
|
||||
contracts: List[MigrateContracts.Data[(SerializableContract, TransferCounter, Boolean)]],
|
||||
timeOfChange: MigrateContracts.Data[MigratedContract] => TimeOfChange,
|
||||
contracts: List[MigrateContracts.Data[MigratedContract]],
|
||||
newStatus: ContractKeyJournal.Status,
|
||||
)(implicit
|
||||
executionContext: ExecutionContext,
|
||||
@ -234,7 +232,7 @@ private final class MigrateContracts(
|
||||
contracts.parTraverseFilter(contract =>
|
||||
getKeyIfOneMaintainerIsLocal(
|
||||
request.domain.topologySnapshot,
|
||||
contract.payload._1.metadata.maybeKeyWithMaintainers,
|
||||
contract.payload.contract.metadata.maybeKeyWithMaintainers,
|
||||
participantId,
|
||||
).map(_.map(_ -> timeOfChange(contract)))
|
||||
)
|
||||
@ -246,7 +244,7 @@ private final class MigrateContracts(
|
||||
|
||||
private def persistContracts(
|
||||
transactionId: TransactionId,
|
||||
contracts: List[MigrateContracts.Data[(SerializableContract, TransferCounter, Boolean)]],
|
||||
contracts: List[MigrateContracts.Data[MigratedContract]],
|
||||
)(implicit
|
||||
executionContext: ExecutionContext,
|
||||
traceContext: TraceContext,
|
||||
@ -266,12 +264,12 @@ private final class MigrateContracts(
|
||||
)
|
||||
_ <- EitherT.right {
|
||||
contracts.parTraverse_ { contract =>
|
||||
if (contract.payload._3)
|
||||
if (contract.payload.isNew)
|
||||
repairTarget.domain.persistentState.contractStore
|
||||
.storeCreatedContract(
|
||||
contract.targetTimeOfChange.rc,
|
||||
transactionId,
|
||||
contract.payload._1,
|
||||
contract.payload.contract,
|
||||
)
|
||||
else Future.unit
|
||||
}
|
||||
@ -279,7 +277,7 @@ private final class MigrateContracts(
|
||||
} yield ()
|
||||
|
||||
private def persistTransferOutAndIn(
|
||||
contracts: List[MigrateContracts.Data[(SerializableContract, TransferCounter, Boolean)]]
|
||||
contracts: List[MigrateContracts.Data[MigratedContract]]
|
||||
)(implicit
|
||||
executionContext: ExecutionContext,
|
||||
traceContext: TraceContext,
|
||||
@ -292,9 +290,9 @@ private final class MigrateContracts(
|
||||
.transferOutContracts(
|
||||
contracts.map { contract =>
|
||||
(
|
||||
contract.payload._1.contractId,
|
||||
contract.payload.contract.contractId,
|
||||
targetDomainId,
|
||||
whenTransferCounterIsSupported(repairSource)(contract.payload._2),
|
||||
whenTransferCounterIsSupported(repairSource)(contract.payload.transferCounter),
|
||||
contract.sourceTimeOfChange,
|
||||
)
|
||||
}
|
||||
@ -305,9 +303,9 @@ private final class MigrateContracts(
|
||||
.transferInContracts(
|
||||
contracts.map { contract =>
|
||||
(
|
||||
contract.payload._1.contractId,
|
||||
contract.payload.contract.contractId,
|
||||
sourceDomainId,
|
||||
whenTransferCounterIsSupported(repairTarget)(contract.payload._2),
|
||||
whenTransferCounterIsSupported(repairTarget)(contract.payload.transferCounter),
|
||||
contract.targetTimeOfChange,
|
||||
)
|
||||
}
|
||||
@ -319,25 +317,25 @@ private final class MigrateContracts(
|
||||
|
||||
private def insertTransferEventsInLog(
|
||||
transactionId: TransactionId,
|
||||
contracts: List[MigrateContracts.Data[(SerializableContract, TransferCounter, Boolean)]],
|
||||
migratedContracs: List[MigrateContracts.Data[MigratedContract]],
|
||||
)(implicit
|
||||
executionContext: ExecutionContext,
|
||||
traceContext: TraceContext,
|
||||
): EitherT[Future, String, Unit] = {
|
||||
|
||||
val justContracts = contracts.map(_.payload._1)
|
||||
val contracts = migratedContracs.map(_.payload.contract)
|
||||
|
||||
val insertTransferOutEvents =
|
||||
for {
|
||||
hostedParties <- EitherT.right(hostedParties(repairSource, justContracts, participantId))
|
||||
transferOutEvents = contracts.map(transferOut(hostedParties))
|
||||
hostedParties <- EitherT.right(hostedParties(repairSource, contracts, participantId))
|
||||
transferOutEvents = migratedContracs.map(transferOut(hostedParties))
|
||||
_ <- insertMany(repairSource, transferOutEvents)
|
||||
} yield ()
|
||||
|
||||
val insertTransferInEvents =
|
||||
for {
|
||||
hostedParties <- EitherT.right(hostedParties(repairTarget, justContracts, participantId))
|
||||
transferInEvents = contracts.map(transferIn(transactionId, hostedParties))
|
||||
hostedParties <- EitherT.right(hostedParties(repairTarget, contracts, participantId))
|
||||
transferInEvents = migratedContracs.map(transferIn(transactionId, hostedParties))
|
||||
_ <- insertMany(repairTarget, transferInEvents)
|
||||
} yield ()
|
||||
|
||||
@ -371,31 +369,31 @@ private final class MigrateContracts(
|
||||
}
|
||||
|
||||
private def transferOut(hostedParties: Set[LfPartyId])(
|
||||
contract: MigrateContracts.Data[(SerializableContract, TransferCounter, Boolean)]
|
||||
contract: MigrateContracts.Data[MigratedContract]
|
||||
)(implicit traceContext: TraceContext): TimestampedEvent =
|
||||
TimestampedEvent(
|
||||
event = LedgerSyncEvent.TransferredOut(
|
||||
updateId = randomTransactionId(syncCrypto).tryAsLedgerTransactionId,
|
||||
optCompletionInfo = None,
|
||||
submitter = None,
|
||||
contractId = contract.payload._1.contractId,
|
||||
templateId = Option(contract.payload._1.contractInstance.unversioned.template),
|
||||
contractStakeholders = contract.payload._1.metadata.stakeholders,
|
||||
contractId = contract.payload.contract.contractId,
|
||||
templateId = Option(contract.payload.contract.contractInstance.unversioned.template),
|
||||
contractStakeholders = contract.payload.contract.metadata.stakeholders,
|
||||
transferId = transferId,
|
||||
targetDomain = targetDomainId,
|
||||
transferInExclusivity = None,
|
||||
workflowId = None,
|
||||
isTransferringParticipant = false,
|
||||
hostedStakeholders =
|
||||
hostedParties.intersect(contract.payload._1.metadata.stakeholders).toList,
|
||||
transferCounter = contract.payload._2,
|
||||
hostedParties.intersect(contract.payload.contract.metadata.stakeholders).toList,
|
||||
transferCounter = contract.payload.transferCounter,
|
||||
),
|
||||
localOffset = contract.sourceTimeOfChange.rc.asLocalOffset,
|
||||
localOffset = contract.sourceTimeOfChange.asLocalOffset,
|
||||
requestSequencerCounter = None,
|
||||
)
|
||||
|
||||
private def transferIn(transactionId: TransactionId, hostedParties: Set[LfPartyId])(
|
||||
contract: MigrateContracts.Data[(SerializableContract, TransferCounter, Boolean)]
|
||||
contract: MigrateContracts.Data[MigratedContract]
|
||||
)(implicit traceContext: TraceContext) =
|
||||
TimestampedEvent(
|
||||
event = LedgerSyncEvent.TransferredIn(
|
||||
@ -403,11 +401,12 @@ private final class MigrateContracts(
|
||||
optCompletionInfo = None,
|
||||
submitter = None,
|
||||
recordTime = repairTarget.timestamp.toLf,
|
||||
ledgerCreateTime = contract.payload._1.ledgerCreateTime.toLf,
|
||||
createNode = contract.payload._1.toLf,
|
||||
ledgerCreateTime = contract.payload.contract.ledgerCreateTime.toLf,
|
||||
createNode = contract.payload.contract.toLf,
|
||||
creatingTransactionId = transactionId.tryAsLedgerTransactionId,
|
||||
contractMetadata = Bytes.fromByteString(
|
||||
contract.payload._1.metadata.toByteString(repairTarget.domain.parameters.protocolVersion)
|
||||
contract.payload.contract.metadata
|
||||
.toByteString(repairTarget.domain.parameters.protocolVersion)
|
||||
),
|
||||
transferId = transferId,
|
||||
targetDomain = targetDomainId,
|
||||
@ -415,10 +414,10 @@ private final class MigrateContracts(
|
||||
workflowId = None,
|
||||
isTransferringParticipant = false,
|
||||
hostedStakeholders =
|
||||
hostedParties.intersect(contract.payload._1.metadata.stakeholders).toList,
|
||||
transferCounter = contract.payload._2,
|
||||
hostedParties.intersect(contract.payload.contract.metadata.stakeholders).toList,
|
||||
transferCounter = contract.payload.transferCounter,
|
||||
),
|
||||
localOffset = contract.targetTimeOfChange.rc.asLocalOffset,
|
||||
localOffset = contract.targetTimeOfChange.asLocalOffset,
|
||||
requestSequencerCounter = None,
|
||||
)
|
||||
|
||||
@ -433,6 +432,16 @@ private[repair] object MigrateContracts {
|
||||
targetTimeOfChange: TimeOfChange,
|
||||
)
|
||||
|
||||
/** @param contract Contract to be migrated
|
||||
* @param transferCounter Transfer counter
|
||||
* @param isNew true if the contract was not seen before, false if already in the store
|
||||
*/
|
||||
final case class MigratedContract(
|
||||
contract: SerializableContract,
|
||||
transferCounter: TransferCounter,
|
||||
isNew: Boolean,
|
||||
)
|
||||
|
||||
def apply(
|
||||
contractIds: Iterable[MigrateContracts.Data[LfContractId]],
|
||||
repairSource: RepairRequest,
|
||||
|
@ -48,8 +48,9 @@ import com.digitalasset.canton.participant.sync.{
|
||||
}
|
||||
import com.digitalasset.canton.participant.util.DAMLe.ContractWithMetadata
|
||||
import com.digitalasset.canton.participant.util.{DAMLe, TimeOfChange}
|
||||
import com.digitalasset.canton.participant.{ParticipantNodeParameters, RichRequestCounter}
|
||||
import com.digitalasset.canton.participant.{ParticipantNodeParameters, RequestOffset}
|
||||
import com.digitalasset.canton.platform.participant.util.LfEngineToApi
|
||||
import com.digitalasset.canton.protocol.SerializableContract.LedgerCreateTime
|
||||
import com.digitalasset.canton.protocol.{LfChoiceName, LfGlobalKey, *}
|
||||
import com.digitalasset.canton.resource.TransactionalStoreUpdate
|
||||
import com.digitalasset.canton.store.{
|
||||
@ -336,9 +337,7 @@ final class RepairService(
|
||||
contractsByCreation = filteredContracts
|
||||
.groupBy(_.contract.ledgerCreateTime)
|
||||
.toList
|
||||
.sortBy { case (ts, _) =>
|
||||
ts
|
||||
}
|
||||
.sortBy { case (ledgerCreateTime, _) => ledgerCreateTime }
|
||||
|
||||
_ <- PositiveInt
|
||||
.create(contractsByCreation.size)
|
||||
@ -948,7 +947,7 @@ final class RepairService(
|
||||
repair: RepairRequest,
|
||||
)(implicit traceContext: TraceContext): Future[Unit] = {
|
||||
val transactionId = repair.transactionId.tryAsLedgerTransactionId
|
||||
val offset = repair.tryExactlyOneRequestCounter.asLocalOffset
|
||||
val offset = RequestOffset(repair.timestamp, repair.tryExactlyOneRequestCounter)
|
||||
val event =
|
||||
TimestampedEvent(
|
||||
LedgerSyncEvent.ContractsPurged(
|
||||
@ -975,11 +974,11 @@ final class RepairService(
|
||||
repair: RepairRequest,
|
||||
hostedParties: Set[LfPartyId],
|
||||
requestCounter: RequestCounter,
|
||||
timestamp: CantonTimestamp,
|
||||
ledgerCreateTime: LedgerCreateTime,
|
||||
contractsAdded: Seq[ContractToAdd],
|
||||
)(implicit traceContext: TraceContext): Future[Unit] = {
|
||||
val transactionId = randomTransactionId(syncCrypto).tryAsLedgerTransactionId
|
||||
val offset = requestCounter.asLocalOffset
|
||||
val offset = RequestOffset(repair.timestamp, requestCounter)
|
||||
val contractMetadata = contractsAdded.view
|
||||
.map(c => c.contract.contractId -> c.driverMetadata(repair.domain.parameters.protocolVersion))
|
||||
.toMap
|
||||
@ -990,7 +989,7 @@ final class RepairService(
|
||||
contracts = contractsAdded.map(_.contract.toLf),
|
||||
domainId = repair.domain.id,
|
||||
recordTime = repair.timestamp.toLf,
|
||||
ledgerTime = timestamp.toLf,
|
||||
ledgerTime = ledgerCreateTime.toLf,
|
||||
hostedWitnesses = contractsAdded.flatMap(_.witnesses.intersect(hostedParties)),
|
||||
contractMetadata = contractMetadata,
|
||||
),
|
||||
@ -1019,7 +1018,7 @@ final class RepairService(
|
||||
private def writeContractsAddedEvents(
|
||||
repair: RepairRequest,
|
||||
hostedParties: Set[LfPartyId],
|
||||
contractsAdded: Seq[(TimeOfChange, (CantonTimestamp, Seq[ContractToAdd]))],
|
||||
contractsAdded: Seq[(TimeOfChange, (LedgerCreateTime, Seq[ContractToAdd]))],
|
||||
)(implicit traceContext: TraceContext): Future[Unit] =
|
||||
MonadUtil.sequentialTraverse_(contractsAdded) {
|
||||
case (timeOfChange, (timestamp, contractsToAdd)) =>
|
||||
@ -1157,7 +1156,7 @@ final class RepairService(
|
||||
(),
|
||||
log(
|
||||
s"""Cannot apply a repair command as events have been published up to
|
||||
|${domain.startingPoints.eventPublishingNextLocalOffset} offset exclusive
|
||||
|${domain.startingPoints.lastPublishedLocalOffset} offset inclusive
|
||||
|and the repair command would be assigned the offset ${domain.startingPoints.processing.nextRequestCounter}.
|
||||
|Reconnect to the domain to reprocess the dirty requests and retry repair afterwards.""".stripMargin
|
||||
),
|
||||
@ -1380,7 +1379,7 @@ object RepairService {
|
||||
ContractMetadata
|
||||
.tryCreate(signatoriesAsParties, signatoriesAsParties ++ observersAsParties, None)
|
||||
),
|
||||
ledgerCreateTime = time,
|
||||
ledgerCreateTime = LedgerCreateTime(time),
|
||||
contractSalt = contractSalt,
|
||||
)
|
||||
}
|
||||
@ -1389,7 +1388,7 @@ object RepairService {
|
||||
contract: SerializableContract
|
||||
): Either[
|
||||
String,
|
||||
(Identifier, Record, Set[String], Set[String], LfContractId, Option[Salt], CantonTimestamp),
|
||||
(Identifier, Record, Set[String], Set[String], LfContractId, Option[Salt], LedgerCreateTime),
|
||||
] = {
|
||||
val contractInstance = contract.rawContractInstance.contractInstance
|
||||
LfEngineToApi
|
||||
|
@ -352,9 +352,9 @@ class RecordOrderPublisher(
|
||||
override def perform(): FutureUnlessShutdown[Unit] = {
|
||||
// If the requestCounterCommitSetPairO is not set, then by default the commit set is empty, and
|
||||
// the request counter is the smallest possible value that does not throw an exception in
|
||||
// ActiveContractStore.bulkContractsTransferCounterSnapshot, i.e., lowerBound + 1
|
||||
// ActiveContractStore.bulkContractsTransferCounterSnapshot, i.e., Genesis
|
||||
val (requestCounter, commitSet) =
|
||||
requestCounterCommitSetPairO.getOrElse((RequestCounter.LowerBound + 1, CommitSet.empty))
|
||||
requestCounterCommitSetPairO.getOrElse((RequestCounter.Genesis, CommitSet.empty))
|
||||
// Augments the commit set with the updated transfer counters for archive events,
|
||||
// computes the acs change and publishes it
|
||||
logger.debug(
|
||||
|
@ -11,6 +11,10 @@ import com.digitalasset.canton.participant.util.TimeOfChange
|
||||
/** Canton-internal record time
|
||||
* @param timestamp ACS change timestamp
|
||||
* @param tieBreaker ordering tie-breaker for changes that have the same timestamp (currently, happens only with repair requests)
|
||||
*
|
||||
* Value of the `tieBreaker`:
|
||||
* - Requests (regular as well as repair requests) use the request counter as `tieBreaker`.
|
||||
* - Empty ACS changes (ticks, received ACS commitments, time proofs) use `Long.MinValue`
|
||||
*/
|
||||
final case class RecordTime(timestamp: CantonTimestamp, tieBreaker: Long) extends PrettyPrinting {
|
||||
override lazy val pretty: Pretty[RecordTime] = prettyOfClass(
|
||||
|
@ -4,7 +4,6 @@
|
||||
package com.digitalasset.canton
|
||||
|
||||
import com.daml.lf.data.Time
|
||||
import com.digitalasset.canton.data.{Counter, CounterCompanion}
|
||||
import com.digitalasset.canton.ledger.offset
|
||||
|
||||
package object participant {
|
||||
@ -13,23 +12,6 @@ package object participant {
|
||||
type LedgerSyncOffset = offset.Offset
|
||||
val LedgerSyncOffset: offset.Offset.type = offset.Offset
|
||||
|
||||
implicit class RichRequestCounter(val rc: RequestCounter) extends AnyVal {
|
||||
|
||||
/** Use this method to indicate that unwrapping to use the request counter as
|
||||
* a local offset is fine.
|
||||
*/
|
||||
def asLocalOffset: LocalOffset = LocalOffset(rc.unwrap)
|
||||
}
|
||||
|
||||
type LocalOffsetDiscriminator
|
||||
type LocalOffset = Counter[LocalOffsetDiscriminator]
|
||||
val LocalOffset: CounterCompanion[LocalOffsetDiscriminator] =
|
||||
new CounterCompanion[LocalOffsetDiscriminator] {}
|
||||
|
||||
implicit class RichLocalOffset(val offset: LocalOffset) extends AnyVal {
|
||||
def asRequestCounter: RequestCounter = RequestCounter(offset.unwrap)
|
||||
}
|
||||
|
||||
// Ledger record time is "single-dimensional"
|
||||
type LedgerSyncRecordTime = Time.Timestamp
|
||||
val LedgerSyncRecordTime: Time.Timestamp.type = Time.Timestamp
|
||||
|
@ -8,11 +8,13 @@ import com.digitalasset.canton.*
|
||||
import com.digitalasset.canton.data.CantonTimestamp
|
||||
import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting}
|
||||
import com.digitalasset.canton.participant.protocol.ProcessingStartingPoints.InvalidStartingPointsException
|
||||
import com.digitalasset.canton.participant.{LocalOffset, RichRequestCounter}
|
||||
import com.digitalasset.canton.participant.{LocalOffset, RequestOffset}
|
||||
import com.digitalasset.canton.store.CursorPrehead.SequencerCounterCursorPrehead
|
||||
import com.google.common.annotations.VisibleForTesting
|
||||
|
||||
/** Summarizes the counters and timestamps where request processing or replay can start
|
||||
/** Summarizes the counters and timestamps where request processing
|
||||
*
|
||||
* @param cleanRequestPrehead The request offset corresponding to the prehead of the clean request if any.
|
||||
* @param nextRequestCounter The request counter for the next request to be replayed or processed.
|
||||
* @param nextSequencerCounter The sequencer counter for the next event to be replayed or processed.
|
||||
* @param prenextTimestamp A strict lower bound on the timestamp for the `nextSequencerCounter`.
|
||||
@ -26,21 +28,62 @@ import com.digitalasset.canton.store.CursorPrehead.SequencerCounterCursorPrehead
|
||||
* and a lower request counter than `nextRequestCounter`.
|
||||
*/
|
||||
final case class MessageProcessingStartingPoint(
|
||||
cleanRequestPrehead: Option[RequestOffset],
|
||||
nextRequestCounter: RequestCounter,
|
||||
nextSequencerCounter: SequencerCounter,
|
||||
prenextTimestamp: CantonTimestamp,
|
||||
) extends PrettyPrinting {
|
||||
override def pretty: Pretty[MessageProcessingStartingPoint] = prettyOfClass(
|
||||
paramIfDefined("last clean offset", _.cleanRequestPrehead),
|
||||
param("next request counter", _.nextRequestCounter),
|
||||
param("next sequencer counter", _.nextSequencerCounter),
|
||||
param("prenext timestamp", _.prenextTimestamp),
|
||||
)
|
||||
|
||||
def toMessageCleanReplayStartingPoint: MessageCleanReplayStartingPoint =
|
||||
MessageCleanReplayStartingPoint(nextRequestCounter, nextSequencerCounter, prenextTimestamp)
|
||||
}
|
||||
|
||||
object MessageProcessingStartingPoint {
|
||||
def default: MessageProcessingStartingPoint =
|
||||
MessageProcessingStartingPoint(
|
||||
None,
|
||||
RequestCounter.Genesis,
|
||||
SequencerCounter.Genesis,
|
||||
CantonTimestamp.MinValue,
|
||||
)
|
||||
}
|
||||
|
||||
/** Summarizes the counters and timestamps where replay can start
|
||||
*
|
||||
* @param nextRequestCounter The request counter for the next request to be replayed
|
||||
* @param nextSequencerCounter The sequencer counter for the next event to be replayed
|
||||
* @param prenextTimestamp A strict lower bound on the timestamp for the `nextSequencerCounter`.
|
||||
* The bound must be tight, i.e., if a sequenced event has sequencer counter lower than
|
||||
* `nextSequencerCounter` or request counter lower than `nextRequestCounter`,
|
||||
* then the timestamp of the event must be less than or equal to `prenextTimestamp`.
|
||||
*
|
||||
* No sequenced event has both a higher timestamp than `prenextTimestamp`
|
||||
* and a lower sequencer counter than `nextSequencerCounter`.
|
||||
* No request has both a higher timestamp than `prenextTimestamp`
|
||||
* and a lower request counter than `nextRequestCounter`.
|
||||
*/
|
||||
final case class MessageCleanReplayStartingPoint(
|
||||
nextRequestCounter: RequestCounter,
|
||||
nextSequencerCounter: SequencerCounter,
|
||||
prenextTimestamp: CantonTimestamp,
|
||||
) extends PrettyPrinting {
|
||||
|
||||
override def pretty: Pretty[MessageProcessingStartingPoint] = prettyOfClass(
|
||||
override def pretty: Pretty[MessageCleanReplayStartingPoint] = prettyOfClass(
|
||||
param("next request counter", _.nextRequestCounter),
|
||||
param("next sequencer counter", _.nextSequencerCounter),
|
||||
param("prenext timestamp", _.prenextTimestamp),
|
||||
)
|
||||
}
|
||||
|
||||
object MessageProcessingStartingPoint {
|
||||
def default: MessageProcessingStartingPoint =
|
||||
MessageProcessingStartingPoint(
|
||||
object MessageCleanReplayStartingPoint {
|
||||
def default: MessageCleanReplayStartingPoint =
|
||||
MessageCleanReplayStartingPoint(
|
||||
RequestCounter.Genesis,
|
||||
SequencerCounter.Genesis,
|
||||
CantonTimestamp.MinValue,
|
||||
@ -55,7 +98,7 @@ object MessageProcessingStartingPoint {
|
||||
* It refers to the first request that is not known to be clean.
|
||||
* The [[MessageProcessingStartingPoint.prenextTimestamp]] be the timestamp of a sequenced event
|
||||
* or [[com.digitalasset.canton.data.CantonTimestamp.MinValue]].
|
||||
* @param eventPublishingNextLocalOffset The next local offset that may be published to the
|
||||
* @param lastPublishedLocalOffset The last local offset that was published to the
|
||||
* [[com.digitalasset.canton.participant.store.MultiDomainEventLog]]
|
||||
* @param rewoundSequencerCounterPrehead The point to which the sequencer counter prehead needs to be reset as part of the recovery clean-up.
|
||||
* This is the minimum of the following:
|
||||
@ -64,9 +107,9 @@ object MessageProcessingStartingPoint {
|
||||
* @throws ProcessingStartingPoints.InvalidStartingPointsException if `cleanReplay` is after (in any component) `processing`
|
||||
*/
|
||||
final case class ProcessingStartingPoints private (
|
||||
cleanReplay: MessageProcessingStartingPoint,
|
||||
cleanReplay: MessageCleanReplayStartingPoint,
|
||||
processing: MessageProcessingStartingPoint,
|
||||
eventPublishingNextLocalOffset: LocalOffset,
|
||||
lastPublishedLocalOffset: Option[LocalOffset],
|
||||
rewoundSequencerCounterPrehead: Option[SequencerCounterCursorPrehead],
|
||||
) extends PrettyPrinting {
|
||||
|
||||
@ -91,13 +134,20 @@ final case class ProcessingStartingPoints private (
|
||||
* Some tests reset the clean request prehead and thus violate this invariant.
|
||||
* In such a case, this method returns `false.`
|
||||
*/
|
||||
def processingAfterPublished: Boolean =
|
||||
processing.nextRequestCounter.asLocalOffset >= eventPublishingNextLocalOffset
|
||||
def processingAfterPublished: Boolean = {
|
||||
(lastPublishedLocalOffset, processing.cleanRequestPrehead) match {
|
||||
case (Some(lastPublishedLocalOffset), Some(lastProcessed)) =>
|
||||
lastProcessed >= lastPublishedLocalOffset
|
||||
case (Some(_lastPublishedLocalOffset), None) => false
|
||||
case (None, Some(_lastProcessed)) => true
|
||||
case (None, None) => true
|
||||
}
|
||||
}
|
||||
|
||||
override def pretty: Pretty[ProcessingStartingPoints] = prettyOfClass(
|
||||
param("clean replay", _.cleanReplay),
|
||||
param("processing", _.processing),
|
||||
param("event publishing next local offset", _.eventPublishingNextLocalOffset),
|
||||
paramIfDefined("last published local offset", _.lastPublishedLocalOffset),
|
||||
param("rewound sequencer counter prehead", _.rewoundSequencerCounterPrehead),
|
||||
)
|
||||
}
|
||||
@ -106,22 +156,22 @@ object ProcessingStartingPoints {
|
||||
final case class InvalidStartingPointsException(message: String) extends RuntimeException(message)
|
||||
|
||||
def tryCreate(
|
||||
cleanReplay: MessageProcessingStartingPoint,
|
||||
cleanReplay: MessageCleanReplayStartingPoint,
|
||||
processing: MessageProcessingStartingPoint,
|
||||
eventPublishingNextLocalOffset: LocalOffset,
|
||||
lastPublishedLocalOffset: Option[LocalOffset],
|
||||
rewoundSequencerCounterPrehead: Option[SequencerCounterCursorPrehead],
|
||||
): ProcessingStartingPoints =
|
||||
new ProcessingStartingPoints(
|
||||
cleanReplay,
|
||||
processing,
|
||||
eventPublishingNextLocalOffset,
|
||||
lastPublishedLocalOffset,
|
||||
rewoundSequencerCounterPrehead,
|
||||
)
|
||||
|
||||
def create(
|
||||
cleanReplay: MessageProcessingStartingPoint,
|
||||
cleanReplay: MessageCleanReplayStartingPoint,
|
||||
processing: MessageProcessingStartingPoint,
|
||||
eventPublishingNextLocalOffset: LocalOffset,
|
||||
lastPublishedLocalOffset: Option[LocalOffset],
|
||||
rewoundSequencerCounterPrehead: Option[SequencerCounterCursorPrehead],
|
||||
): Either[String, ProcessingStartingPoints] =
|
||||
Either
|
||||
@ -129,17 +179,18 @@ object ProcessingStartingPoints {
|
||||
tryCreate(
|
||||
cleanReplay,
|
||||
processing,
|
||||
eventPublishingNextLocalOffset,
|
||||
lastPublishedLocalOffset,
|
||||
rewoundSequencerCounterPrehead,
|
||||
)
|
||||
)
|
||||
.leftMap(_.message)
|
||||
|
||||
def default: ProcessingStartingPoints =
|
||||
@VisibleForTesting
|
||||
private[protocol] def default: ProcessingStartingPoints =
|
||||
new ProcessingStartingPoints(
|
||||
cleanReplay = MessageProcessingStartingPoint.default,
|
||||
cleanReplay = MessageCleanReplayStartingPoint.default,
|
||||
processing = MessageProcessingStartingPoint.default,
|
||||
eventPublishingNextLocalOffset = RequestCounter.Genesis.asLocalOffset,
|
||||
lastPublishedLocalOffset = None,
|
||||
rewoundSequencerCounterPrehead = None,
|
||||
)
|
||||
}
|
||||
|
@ -5,7 +5,7 @@ package com.digitalasset.canton.participant.protocol
|
||||
|
||||
import com.daml.lf.value.Value.ContractId
|
||||
import com.digitalasset.canton.crypto.Salt
|
||||
import com.digitalasset.canton.data.CantonTimestamp
|
||||
import com.digitalasset.canton.protocol.SerializableContract.LedgerCreateTime
|
||||
import com.digitalasset.canton.protocol.{
|
||||
AuthenticatedContractIdVersion,
|
||||
AuthenticatedContractIdVersionV2,
|
||||
@ -69,7 +69,7 @@ class SerializableContractAuthenticatorImpl(unicumGenerator: UnicumGenerator)
|
||||
def authenticate(
|
||||
contractId: LfContractId,
|
||||
contractSalt: Option[Salt],
|
||||
ledgerTime: CantonTimestamp,
|
||||
ledgerTime: LedgerCreateTime,
|
||||
metadata: ContractMetadata,
|
||||
rawContractInstance: SerializableRawContractInstance,
|
||||
): Either[String, Unit] = {
|
||||
@ -85,7 +85,7 @@ class SerializableContractAuthenticatorImpl(unicumGenerator: UnicumGenerator)
|
||||
recomputedUnicum <- unicumGenerator
|
||||
.recomputeUnicum(
|
||||
contractSalt = salt,
|
||||
ledgerTime = ledgerTime,
|
||||
ledgerCreateTime = ledgerTime,
|
||||
metadata = metadata,
|
||||
suffixedContractInstance = rawContractInstance,
|
||||
contractIdVersion = contractIdVersion,
|
||||
|
@ -26,7 +26,7 @@ import com.digitalasset.canton.ledger.participant.state.v2.*
|
||||
import com.digitalasset.canton.lifecycle.FutureUnlessShutdown
|
||||
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
|
||||
import com.digitalasset.canton.metrics.*
|
||||
import com.digitalasset.canton.participant.RichRequestCounter
|
||||
import com.digitalasset.canton.participant.RequestOffset
|
||||
import com.digitalasset.canton.participant.metrics.TransactionProcessingMetrics
|
||||
import com.digitalasset.canton.participant.protocol.ProtocolProcessor.{
|
||||
MalformedPayload,
|
||||
@ -1167,7 +1167,7 @@ class TransactionProcessingSteps(
|
||||
requestType,
|
||||
Some(domainId),
|
||||
),
|
||||
rc.asLocalOffset,
|
||||
RequestOffset(ts, rc),
|
||||
Some(sc),
|
||||
)
|
||||
} -> None // Transaction processing doesn't use pending submissions
|
||||
@ -1227,7 +1227,7 @@ class TransactionProcessingSteps(
|
||||
TimestampedEvent(
|
||||
LedgerSyncEvent
|
||||
.CommandRejected(requestTime.toLf, info, rejection, requestType, Some(domainId)),
|
||||
requestCounter.asLocalOffset,
|
||||
RequestOffset(requestTime, requestCounter),
|
||||
Some(requestSequencerCounter),
|
||||
)
|
||||
)
|
||||
@ -1408,7 +1408,7 @@ class TransactionProcessingSteps(
|
||||
|
||||
timestampedEvent = TimestampedEvent(
|
||||
acceptedEvent,
|
||||
requestCounter.asLocalOffset,
|
||||
RequestOffset(requestTime, requestCounter),
|
||||
Some(requestSequencerCounter),
|
||||
)
|
||||
} yield CommitAndStoreContractsAndPublishEvent(
|
||||
|
@ -374,7 +374,7 @@ class InFlightSubmissionTracker(
|
||||
|
||||
for {
|
||||
foundLocalEvents <- domainState.singleDimensionEventLog.lookupEventRange(
|
||||
fromInclusive = None,
|
||||
fromExclusive = None,
|
||||
toInclusive = None,
|
||||
fromTimestampInclusive = first.sequencingInfo.sequencingTime.some,
|
||||
toTimestampInclusive = last.sequencingInfo.sequencingTime.some,
|
||||
|
@ -18,6 +18,7 @@ import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory,
|
||||
import com.digitalasset.canton.participant.protocol.submission.TransactionTreeFactory.*
|
||||
import com.digitalasset.canton.protocol.ContractIdSyntax.*
|
||||
import com.digitalasset.canton.protocol.RollbackContext.RollbackScope
|
||||
import com.digitalasset.canton.protocol.SerializableContract.LedgerCreateTime
|
||||
import com.digitalasset.canton.protocol.WellFormedTransaction.{WithSuffixes, WithoutSuffixes}
|
||||
import com.digitalasset.canton.protocol.*
|
||||
import com.digitalasset.canton.topology.client.TopologySnapshot
|
||||
@ -435,7 +436,7 @@ abstract class TransactionTreeFactoryImpl(
|
||||
viewPosition,
|
||||
viewParticipantDataSalt,
|
||||
createIndex,
|
||||
state.ledgerTime,
|
||||
LedgerCreateTime(state.ledgerTime),
|
||||
contractMetadata,
|
||||
serializedCantonContractInst,
|
||||
cantonContractIdVersion,
|
||||
@ -449,7 +450,7 @@ abstract class TransactionTreeFactoryImpl(
|
||||
contractId = contractId,
|
||||
rawContractInstance = serializedCantonContractInst,
|
||||
metadata = contractMetadata,
|
||||
ledgerCreateTime = state.ledgerTime,
|
||||
ledgerCreateTime = LedgerCreateTime(state.ledgerTime),
|
||||
contractSalt = Option.when(protocolVersion >= ProtocolVersion.v4)(contractSalt.unwrap),
|
||||
)
|
||||
state.setCreatedContractInfo(contractId, createdInfo)
|
||||
|
@ -16,7 +16,7 @@ import com.digitalasset.canton.data.*
|
||||
import com.digitalasset.canton.ledger.participant.state.v2.CompletionInfo
|
||||
import com.digitalasset.canton.lifecycle.FutureUnlessShutdown
|
||||
import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, NamedLogging}
|
||||
import com.digitalasset.canton.participant.RichRequestCounter
|
||||
import com.digitalasset.canton.participant.RequestOffset
|
||||
import com.digitalasset.canton.participant.protocol.ProcessingSteps.PendingRequestData
|
||||
import com.digitalasset.canton.participant.protocol.conflictdetection.{
|
||||
ActivenessCheck,
|
||||
@ -539,7 +539,11 @@ private[transfer] class TransferInProcessingSteps(
|
||||
hostedStakeholders.toList,
|
||||
)
|
||||
timestampEvent = Some(
|
||||
TimestampedEvent(event, requestCounter.asLocalOffset, Some(requestSequencerCounter))
|
||||
TimestampedEvent(
|
||||
event,
|
||||
RequestOffset(requestId.unwrap, requestCounter),
|
||||
Some(requestSequencerCounter),
|
||||
)
|
||||
)
|
||||
} yield CommitAndStoreContractsAndPublishEvent(
|
||||
commitSetO,
|
||||
|
@ -20,7 +20,7 @@ import com.digitalasset.canton.data.{
|
||||
import com.digitalasset.canton.ledger.participant.state.v2.CompletionInfo
|
||||
import com.digitalasset.canton.lifecycle.FutureUnlessShutdown
|
||||
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
|
||||
import com.digitalasset.canton.participant.RichRequestCounter
|
||||
import com.digitalasset.canton.participant.RequestOffset
|
||||
import com.digitalasset.canton.participant.protocol.ProcessingSteps.PendingRequestData
|
||||
import com.digitalasset.canton.participant.protocol.conflictdetection.{
|
||||
ActivenessCheck,
|
||||
@ -618,7 +618,7 @@ class TransferOutProcessingSteps(
|
||||
Some(
|
||||
TimestampedEvent(
|
||||
transferOutEvent,
|
||||
requestCounter.asLocalOffset,
|
||||
RequestOffset(requestId.unwrap, requestCounter),
|
||||
Some(requestSequencerCounter),
|
||||
)
|
||||
),
|
||||
|
@ -22,7 +22,7 @@ import com.digitalasset.canton.ledger.participant.state.v2.CompletionInfo
|
||||
import com.digitalasset.canton.lifecycle.FutureUnlessShutdown
|
||||
import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting}
|
||||
import com.digitalasset.canton.logging.{NamedLogging, TracedLogger}
|
||||
import com.digitalasset.canton.participant.RichRequestCounter
|
||||
import com.digitalasset.canton.participant.RequestOffset
|
||||
import com.digitalasset.canton.participant.protocol.ProcessingSteps.WrapsProcessorError
|
||||
import com.digitalasset.canton.participant.protocol.ProtocolProcessor.{
|
||||
MalformedPayload,
|
||||
@ -249,7 +249,7 @@ trait TransferProcessingSteps[
|
||||
TimestampedEvent(
|
||||
LedgerSyncEvent
|
||||
.CommandRejected(ts.toLf, completionInfo, rejection, requestType, Some(domainId.unwrap)),
|
||||
rc.asLocalOffset,
|
||||
RequestOffset(ts, rc),
|
||||
Some(sc),
|
||||
)
|
||||
)
|
||||
@ -289,7 +289,7 @@ trait TransferProcessingSteps[
|
||||
requestType,
|
||||
Some(domainId.unwrap),
|
||||
),
|
||||
pendingTransfer.requestCounter.asLocalOffset,
|
||||
RequestOffset(pendingTransfer.requestId.unwrap, pendingTransfer.requestCounter),
|
||||
Some(pendingTransfer.requestSequencerCounter),
|
||||
)
|
||||
)
|
||||
|
@ -34,7 +34,7 @@ object ContractConsistencyChecker {
|
||||
): Either[List[ReferenceToFutureContractError], Unit] =
|
||||
inputContracts
|
||||
.traverse_ { case (coid, contract) =>
|
||||
val let = contract.ledgerCreateTime
|
||||
val let = contract.ledgerCreateTime.ts
|
||||
Validated.condNec(
|
||||
let <= ledgerTime,
|
||||
(),
|
||||
|
@ -137,7 +137,7 @@ final case class StoredContract(
|
||||
|
||||
override def pretty: Pretty[StoredContract] = prettyOfClass(
|
||||
param("id", _.contractId),
|
||||
param("let", _.contract.ledgerCreateTime),
|
||||
param("let", _.contract.ledgerCreateTime.ts),
|
||||
param("request counter", _.requestCounter),
|
||||
paramIfDefined("creating transaction id", _.creatingTransactionIdO),
|
||||
)
|
||||
|
@ -38,7 +38,12 @@ import com.digitalasset.canton.participant.sync.{
|
||||
SyncDomainPersistentStateLookup,
|
||||
TimestampedEvent,
|
||||
}
|
||||
import com.digitalasset.canton.participant.{GlobalOffset, LocalOffset}
|
||||
import com.digitalasset.canton.participant.{
|
||||
GlobalOffset,
|
||||
LocalOffset,
|
||||
RequestOffset,
|
||||
TopologyOffset,
|
||||
}
|
||||
import com.digitalasset.canton.protocol.TargetDomainId
|
||||
import com.digitalasset.canton.resource.{DbStorage, MemoryStorage, Storage}
|
||||
import com.digitalasset.canton.store.{IndexedDomain, IndexedStringStore}
|
||||
@ -143,7 +148,7 @@ trait MultiDomainEventLog extends AutoCloseable { this: NamedLogging =>
|
||||
* or equal to `upToInclusive`.
|
||||
*
|
||||
* @return `(domainLastOffsets, participantLastOffset)`, where `domainLastOffsets` maps the domains in `domainIds`
|
||||
* to the greatest offset of the corresponding domain event log (if it exists) and
|
||||
* to the greatest local offset and the greatest request offset of the corresponding domain event log and
|
||||
* `participantLastOffset` is the greatest participant offset.
|
||||
*/
|
||||
def lastDomainOffsetsBeforeOrAtGlobalOffset(
|
||||
@ -152,17 +157,35 @@ trait MultiDomainEventLog extends AutoCloseable { this: NamedLogging =>
|
||||
participantEventLogId: ParticipantEventLogId,
|
||||
)(implicit
|
||||
traceContext: TraceContext
|
||||
): Future[(Map[DomainId, LocalOffset], Option[LocalOffset])] = {
|
||||
): Future[(Map[DomainId, (Option[LocalOffset], Option[RequestOffset])], Option[LocalOffset])] = {
|
||||
for {
|
||||
domainLogIds <- domainIds.parTraverse(IndexedDomain.indexed(indexedStringStore))
|
||||
domainOffsets <- domainLogIds.parTraverseFilter { domainId =>
|
||||
lastLocalOffsetBeforeOrAt(
|
||||
DomainEventLogId(domainId),
|
||||
upToInclusive,
|
||||
None,
|
||||
)
|
||||
.map(_.map(domainId.item -> _))
|
||||
|
||||
domainOffsets <- domainLogIds.parTraverse { domainId =>
|
||||
for {
|
||||
localOffset <- lastLocalOffsetBeforeOrAt(
|
||||
DomainEventLogId(domainId),
|
||||
upToInclusive,
|
||||
None,
|
||||
)
|
||||
|
||||
requestOffset <- localOffset match {
|
||||
// Last local offset is a request offset -> no need to query again
|
||||
case Some(requestOffset: RequestOffset) => Future.successful(Some(requestOffset))
|
||||
|
||||
// No known offset -> no need to query again
|
||||
case None => Future.successful(None)
|
||||
|
||||
case Some(_: TopologyOffset) =>
|
||||
lastRequestOffsetBeforeOrAt(
|
||||
DomainEventLogId(domainId),
|
||||
upToInclusive,
|
||||
None,
|
||||
)
|
||||
}
|
||||
} yield domainId.domainId -> (localOffset, requestOffset)
|
||||
}
|
||||
|
||||
participantOffset <- lastLocalOffsetBeforeOrAt(
|
||||
participantEventLogId,
|
||||
upToInclusive,
|
||||
@ -184,6 +207,20 @@ trait MultiDomainEventLog extends AutoCloseable { this: NamedLogging =>
|
||||
timestampInclusive: Option[CantonTimestamp],
|
||||
)(implicit traceContext: TraceContext): Future[Option[LocalOffset]]
|
||||
|
||||
/** Returns the greatest request offset of the [[SingleDimensionEventLog]] given by `eventLogId`, if any,
|
||||
* such that the following holds:
|
||||
* <ol>
|
||||
* <li>The assigned global offset is below or at `upToInclusive`.</li>
|
||||
* <li>The record time of the event is below or at `timestampInclusive` (if defined)</li>
|
||||
* </ol>
|
||||
*/
|
||||
// TODO(#14381) Update coverage of this method with interleaved topology events
|
||||
def lastRequestOffsetBeforeOrAt(
|
||||
eventLogId: EventLogId,
|
||||
upToInclusive: GlobalOffset,
|
||||
timestampInclusive: Option[CantonTimestamp],
|
||||
)(implicit traceContext: TraceContext): Future[Option[RequestOffset]]
|
||||
|
||||
/** Yields the `deltaFromBeginning`-lowest global offset (if it exists).
|
||||
* I.e., `locateOffset(0)` yields the smallest offset, `localOffset(1)` the second smallest offset, and so on.
|
||||
*/
|
||||
|
@ -5,13 +5,14 @@ package com.digitalasset.canton.participant.store
|
||||
|
||||
import com.digitalasset.canton.checked
|
||||
import com.digitalasset.canton.config.ProcessingTimeout
|
||||
import com.digitalasset.canton.config.RequireTypes.NonNegativeInt
|
||||
import com.digitalasset.canton.data.CantonTimestamp
|
||||
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
|
||||
import com.digitalasset.canton.participant.LocalOffset
|
||||
import com.digitalasset.canton.participant.store.EventLogId.ParticipantEventLogId
|
||||
import com.digitalasset.canton.participant.store.db.DbParticipantEventLog
|
||||
import com.digitalasset.canton.participant.store.memory.InMemoryParticipantEventLog
|
||||
import com.digitalasset.canton.participant.sync.TimestampedEvent
|
||||
import com.digitalasset.canton.participant.{LocalOffset, RequestOffset}
|
||||
import com.digitalasset.canton.resource.{DbStorage, MemoryStorage, Storage}
|
||||
import com.digitalasset.canton.store.IndexedStringStore
|
||||
import com.digitalasset.canton.topology.DomainId
|
||||
@ -38,7 +39,7 @@ trait ParticipantEventLog
|
||||
|
||||
/** Allocates the next local offset */
|
||||
def nextLocalOffset()(implicit traceContext: TraceContext): Future[LocalOffset] =
|
||||
nextLocalOffsets(1).map(
|
||||
nextLocalOffsets(NonNegativeInt.one).map(
|
||||
_.headOption.getOrElse(
|
||||
ErrorUtil.internalError(
|
||||
new RuntimeException("failed to allocate at least one local offset")
|
||||
@ -48,11 +49,18 @@ trait ParticipantEventLog
|
||||
|
||||
/** Allocates `count` many new offsets and returns all of them.
|
||||
*/
|
||||
def nextLocalOffsets(count: Int)(implicit traceContext: TraceContext): Future[Seq[LocalOffset]]
|
||||
def nextLocalOffsets(count: NonNegativeInt)(implicit
|
||||
traceContext: TraceContext
|
||||
): Future[Seq[RequestOffset]]
|
||||
}
|
||||
|
||||
object ParticipantEventLog {
|
||||
val InitialLocalOffset: LocalOffset = LocalOffset.Genesis
|
||||
|
||||
/** There is no meaningful `effectiveTime` for the RequestOffset: since the participant event log contains
|
||||
* data related to several domains as well as participant local events, there are several incomparable
|
||||
* clocks in scope. Thus, we consider all `effectiveTime` to be the same.
|
||||
*/
|
||||
private[store] val EffectiveTime: CantonTimestamp = CantonTimestamp.Epoch
|
||||
|
||||
val ProductionParticipantEventLogId: ParticipantEventLogId = checked(
|
||||
ParticipantEventLogId.tryCreate(0)
|
||||
|
@ -33,7 +33,7 @@ trait SingleDimensionEventLogLookup {
|
||||
): OptionT[Future, TimestampedEvent]
|
||||
|
||||
def lookupEventRange(
|
||||
fromInclusive: Option[LocalOffset],
|
||||
fromExclusive: Option[LocalOffset],
|
||||
toInclusive: Option[LocalOffset],
|
||||
fromTimestampInclusive: Option[CantonTimestamp],
|
||||
toTimestampInclusive: Option[CantonTimestamp],
|
||||
@ -131,10 +131,10 @@ trait SingleDimensionEventLog[+Id <: EventLogId] extends SingleDimensionEventLog
|
||||
traceContext: TraceContext
|
||||
): Future[Boolean]
|
||||
|
||||
/** Deletes all events whose local offset is at least `inclusive`.
|
||||
/** Deletes all events whose local offset is greater than `exclusive`.
|
||||
* This operation need not execute atomically.
|
||||
*/
|
||||
def deleteSince(inclusive: LocalOffset)(implicit traceContext: TraceContext): Future[Unit]
|
||||
def deleteAfter(exclusive: LocalOffset)(implicit traceContext: TraceContext): Future[Unit]
|
||||
}
|
||||
|
||||
sealed trait EventLogId extends PrettyPrinting with Product with Serializable {
|
||||
|
@ -9,15 +9,18 @@ import com.digitalasset.canton.config.ProcessingTimeout
|
||||
import com.digitalasset.canton.data.CantonTimestamp
|
||||
import com.digitalasset.canton.lifecycle.CloseContext
|
||||
import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, NamedLogging}
|
||||
import com.digitalasset.canton.participant.RequestOffset
|
||||
import com.digitalasset.canton.participant.admin.repair.RepairService
|
||||
import com.digitalasset.canton.participant.metrics.SyncDomainMetrics
|
||||
import com.digitalasset.canton.participant.protocol.*
|
||||
import com.digitalasset.canton.participant.protocol.submission.InFlightSubmissionTracker
|
||||
import com.digitalasset.canton.participant.store.EventLogId.DomainEventLogId
|
||||
import com.digitalasset.canton.participant.util.TimeOfChange
|
||||
import com.digitalasset.canton.participant.{LocalOffset, RichRequestCounter}
|
||||
import com.digitalasset.canton.sequencing.PossiblyIgnoredSerializedEvent
|
||||
import com.digitalasset.canton.store.CursorPrehead.SequencerCounterCursorPrehead
|
||||
import com.digitalasset.canton.store.CursorPrehead.{
|
||||
RequestCounterCursorPrehead,
|
||||
SequencerCounterCursorPrehead,
|
||||
}
|
||||
import com.digitalasset.canton.store.SequencedEventStore.{ByTimestamp, LatestUpto}
|
||||
import com.digitalasset.canton.store.*
|
||||
import com.digitalasset.canton.time.DomainTimeTracker
|
||||
@ -129,11 +132,6 @@ object SyncDomainEphemeralStateFactory {
|
||||
implicit val traceContext: TraceContext = loggingContext.traceContext
|
||||
val logger = loggingContext.logger
|
||||
|
||||
def eventLogNextOffset: Future[LocalOffset] =
|
||||
multiDomainEventLog
|
||||
.lastLocalOffset(DomainEventLogId(domainId))
|
||||
.map(_.fold(RequestCounter.Genesis.asLocalOffset)(_ + 1L))
|
||||
|
||||
def noEventForRequestTimestamp(rc: RequestCounter)(err: SequencedEventNotFoundError): Nothing =
|
||||
ErrorUtil.internalError(
|
||||
new IllegalStateException(s"No sequenced event found for request $rc: ${err.criterion}.")
|
||||
@ -141,7 +139,7 @@ object SyncDomainEphemeralStateFactory {
|
||||
|
||||
def isRepairOnEmptyDomain(requestData: RequestJournal.RequestData): Boolean = {
|
||||
val RequestJournal.RequestData(
|
||||
rcProcess,
|
||||
_rcProcess,
|
||||
_state,
|
||||
requestTimestamp,
|
||||
_commitTime,
|
||||
@ -152,11 +150,14 @@ object SyncDomainEphemeralStateFactory {
|
||||
|
||||
def processingStartingPointAndRewoundSequencerCounterPrehead(
|
||||
cleanSequencerCounterPreheadO: Option[SequencerCounterCursorPrehead],
|
||||
firstDirtyRc: RequestCounter,
|
||||
requestTimestampPreheadO: Option[CantonTimestamp],
|
||||
cleanRequestPreheadO: Option[RequestCounterCursorPrehead],
|
||||
): Future[
|
||||
(MessageProcessingStartingPoint, Option[SequencerCounterCursorPrehead])
|
||||
] = {
|
||||
val firstDirtyRc = cleanRequestPreheadO.fold(RequestCounter.Genesis)(_.counter + 1)
|
||||
val cleanRequestPreheadLocalOffsetO = cleanRequestPreheadO.map { case CursorPrehead(rc, ts) =>
|
||||
RequestOffset(ts, rc)
|
||||
}
|
||||
|
||||
// Cap the rewound sequencer counter prehead by the clean sequencer counter prehead
|
||||
// so that we do not skip dirty sequencer counters.
|
||||
@ -189,18 +190,21 @@ object SyncDomainEphemeralStateFactory {
|
||||
Future.successful(MessageProcessingStartingPoint.default -> None)
|
||||
case Some(CursorPrehead(preheadSc, preheadScTs)) =>
|
||||
val processingStartingPoint =
|
||||
MessageProcessingStartingPoint(firstDirtyRc, preheadSc + 1L, preheadScTs)
|
||||
MessageProcessingStartingPoint(
|
||||
cleanRequestPreheadLocalOffsetO,
|
||||
firstDirtyRc,
|
||||
preheadSc + 1L,
|
||||
preheadScTs,
|
||||
)
|
||||
Future.successful(processingStartingPoint -> cleanSequencerCounterPreheadO)
|
||||
}
|
||||
|
||||
requestTimestampPreheadO match {
|
||||
cleanRequestPreheadLocalOffsetO match {
|
||||
case None => startAtCleanSequencerCounterPrehead
|
||||
case Some(requestTimestampPrehead) =>
|
||||
case Some(lastCleanOffset @ RequestOffset(requestTimestampPrehead, rcPrehead)) =>
|
||||
if (cleanSequencerCounterPreheadO.exists(_.timestamp >= requestTimestampPrehead)) {
|
||||
startAtCleanSequencerCounterPrehead
|
||||
} else {
|
||||
val rcPrehead = firstDirtyRc - 1L
|
||||
|
||||
def noEventForRcPrehead(err: SequencedEventNotFoundError): Future[
|
||||
(
|
||||
MessageProcessingStartingPoint,
|
||||
@ -219,9 +223,10 @@ object SyncDomainEphemeralStateFactory {
|
||||
requestDataForRcPreheadF.map { requestData =>
|
||||
if (isRepairOnEmptyDomain(requestData))
|
||||
MessageProcessingStartingPoint(
|
||||
rcPrehead + 1L,
|
||||
SequencerCounter.Genesis,
|
||||
CantonTimestamp.MinValue,
|
||||
cleanRequestPrehead = Some(lastCleanOffset),
|
||||
nextRequestCounter = rcPrehead + 1L,
|
||||
nextSequencerCounter = SequencerCounter.Genesis,
|
||||
prenextTimestamp = CantonTimestamp.MinValue,
|
||||
) -> None
|
||||
else
|
||||
ErrorUtil.internalError(
|
||||
@ -241,6 +246,7 @@ object SyncDomainEphemeralStateFactory {
|
||||
val sequencerCounter = tracedEvent.counter
|
||||
val processingStartingPoint =
|
||||
MessageProcessingStartingPoint(
|
||||
Some(lastCleanOffset),
|
||||
rcPrehead + 1L,
|
||||
sequencerCounter + 1L,
|
||||
requestTimestampPrehead,
|
||||
@ -262,6 +268,7 @@ object SyncDomainEphemeralStateFactory {
|
||||
logger.debug(show"First dirty request is repair request $rcProcess on an empty domain.")
|
||||
Future.successful(
|
||||
MessageProcessingStartingPoint(
|
||||
cleanRequestPreheadLocalOffsetO,
|
||||
rcProcess,
|
||||
SequencerCounter.Genesis,
|
||||
RepairService.RepairTimestampOnEmptyDomain,
|
||||
@ -289,12 +296,14 @@ object SyncDomainEphemeralStateFactory {
|
||||
preStartingEventF.map {
|
||||
case None =>
|
||||
MessageProcessingStartingPoint(
|
||||
cleanRequestPreheadLocalOffsetO,
|
||||
rcProcess,
|
||||
SequencerCounter.Genesis,
|
||||
CantonTimestamp.MinValue,
|
||||
) -> None
|
||||
case Some(preStartingEvent) =>
|
||||
val startingPoint = MessageProcessingStartingPoint(
|
||||
cleanRequestPreheadLocalOffsetO,
|
||||
rcProcess,
|
||||
startingEvent.counter,
|
||||
preStartingEvent.timestamp,
|
||||
@ -308,6 +317,7 @@ object SyncDomainEphemeralStateFactory {
|
||||
// as there is no dirty request since the clean request prehead.
|
||||
val startingPoint =
|
||||
MessageProcessingStartingPoint(
|
||||
cleanRequestPreheadLocalOffsetO,
|
||||
rcProcess,
|
||||
startingEvent.counter + 1L,
|
||||
startingEvent.timestamp,
|
||||
@ -325,7 +335,7 @@ object SyncDomainEphemeralStateFactory {
|
||||
|
||||
logger.debug(s"Computing starting points for $domainId")
|
||||
for {
|
||||
nextLocalOffset <- eventLogNextOffset
|
||||
lastPublishedLocalOffsetO <- multiDomainEventLog.lastLocalOffset(DomainEventLogId(domainId))
|
||||
cleanSequencerCounterPrehead <- sequencerCounterTrackerStore.preheadSequencerCounter
|
||||
_ = logger.debug(show"Clean sequencer counter prehead is $cleanSequencerCounterPrehead")
|
||||
preheadClean <- requestJournalStore.preheadClean
|
||||
@ -336,28 +346,26 @@ object SyncDomainEphemeralStateFactory {
|
||||
for {
|
||||
x <- processingStartingPointAndRewoundSequencerCounterPrehead(
|
||||
cleanSequencerCounterPrehead,
|
||||
RequestCounter.Genesis,
|
||||
None,
|
||||
)
|
||||
} yield {
|
||||
val (processingStartingPoint, rewoundCleanSequencerCounterPrehead) = x
|
||||
checked(
|
||||
ProcessingStartingPoints.tryCreate(
|
||||
processingStartingPoint,
|
||||
processingStartingPoint,
|
||||
nextLocalOffset,
|
||||
rewoundCleanSequencerCounterPrehead,
|
||||
cleanReplay = processingStartingPoint.toMessageCleanReplayStartingPoint,
|
||||
processing = processingStartingPoint,
|
||||
lastPublishedLocalOffset = lastPublishedLocalOffsetO,
|
||||
rewoundSequencerCounterPrehead = rewoundCleanSequencerCounterPrehead,
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
case Some(cleanRequestPrehead @ CursorPrehead(rcPrehead, requestTimestampPrehead)) =>
|
||||
case Some(cleanRequestPrehead) =>
|
||||
logger.debug(show"Found clean request prehead at $cleanRequestPrehead")
|
||||
for {
|
||||
x <- processingStartingPointAndRewoundSequencerCounterPrehead(
|
||||
cleanSequencerCounterPrehead,
|
||||
rcPrehead + 1L,
|
||||
Some(requestTimestampPrehead),
|
||||
cleanSequencerCounterPreheadO = cleanSequencerCounterPrehead,
|
||||
cleanRequestPreheadO = Some(cleanRequestPrehead),
|
||||
)
|
||||
(processingStartingPoint, rewoundSequencerCounterPrehead) = x
|
||||
firstReplayedRequest <- requestJournalStore.firstRequestWithCommitTimeAfter(
|
||||
@ -374,7 +382,7 @@ object SyncDomainEphemeralStateFactory {
|
||||
_commitTime,
|
||||
_repairContext,
|
||||
)
|
||||
) if rcReplay <= rcPrehead =>
|
||||
) if rcReplay <= cleanRequestPrehead.counter =>
|
||||
// This request cannot be a repair request on an empty domain because a repair request on the empty domain
|
||||
// commits at CantonTimestamp.MinValue, i.e., its commit time cannot be after the prenext timestamp.
|
||||
sequencedEventStore
|
||||
@ -383,24 +391,24 @@ object SyncDomainEphemeralStateFactory {
|
||||
noEventForRequestTimestamp(rcReplay),
|
||||
event => {
|
||||
logger.debug(s"Found sequenced event ${event.counter} at ${event.timestamp}")
|
||||
MessageProcessingStartingPoint(
|
||||
rcReplay,
|
||||
event.counter,
|
||||
requestTimestampReplay.immediatePredecessor,
|
||||
MessageCleanReplayStartingPoint(
|
||||
nextRequestCounter = rcReplay,
|
||||
nextSequencerCounter = event.counter,
|
||||
prenextTimestamp = requestTimestampReplay.immediatePredecessor,
|
||||
)
|
||||
},
|
||||
)
|
||||
case _ =>
|
||||
// No need to replay clean requests
|
||||
// because no requests to be reprocessed were in flight at the processing starting point.
|
||||
Future.successful(processingStartingPoint)
|
||||
Future.successful(processingStartingPoint.toMessageCleanReplayStartingPoint)
|
||||
}
|
||||
} yield checked(
|
||||
ProcessingStartingPoints
|
||||
.tryCreate(
|
||||
replayStartingPoint,
|
||||
processingStartingPoint,
|
||||
nextLocalOffset,
|
||||
lastPublishedLocalOffsetO,
|
||||
rewoundSequencerCounterPrehead,
|
||||
)
|
||||
)
|
||||
@ -461,20 +469,25 @@ object SyncDomainEphemeralStateFactory {
|
||||
// If there's a dirty repair request, this will delete its unpublished event from the event log,
|
||||
//
|
||||
// Some tests overwrite the clean request prehead.
|
||||
// We therefore can not delete all events in the SingleDimensionEventLog after the processingStartingPoint
|
||||
// We therefore cannot delete all events in the SingleDimensionEventLog after the processingStartingPoint
|
||||
// because some of them may already have been published to the MultiDomainEventLog
|
||||
processingStartingPoint = startingPoints.processing
|
||||
unpublishedOffsetAfterCleanPrehead =
|
||||
unpublishedOffsetAfterCleanPreheadO =
|
||||
if (startingPoints.processingAfterPublished) {
|
||||
processingStartingPoint.nextRequestCounter.asLocalOffset
|
||||
processingStartingPoint.cleanRequestPrehead
|
||||
} else {
|
||||
logger.warn(
|
||||
s"The clean request head ${processingStartingPoint.nextRequestCounter} precedes the next local offset at ${startingPoints.eventPublishingNextLocalOffset}. Has the clean request prehead been manipulated?"
|
||||
s"The clean request prehead ${processingStartingPoint.cleanRequestPrehead} precedes the last published event at ${startingPoints.lastPublishedLocalOffset}. Has the clean request prehead been manipulated?"
|
||||
)
|
||||
startingPoints.eventPublishingNextLocalOffset
|
||||
startingPoints.lastPublishedLocalOffset
|
||||
}
|
||||
_ = logger.debug(s"Deleting unpublished events since $unpublishedOffsetAfterCleanPrehead")
|
||||
_ <- persistentState.eventLog.deleteSince(unpublishedOffsetAfterCleanPrehead)
|
||||
_ <- unpublishedOffsetAfterCleanPreheadO.fold(Future.unit) {
|
||||
unpublishedOffsetAfterCleanPrehead =>
|
||||
logger.debug(
|
||||
s"Deleting unpublished events after $unpublishedOffsetAfterCleanPrehead"
|
||||
)
|
||||
persistentState.eventLog.deleteAfter(unpublishedOffsetAfterCleanPrehead)
|
||||
}
|
||||
_ = logger.debug("Deleting dirty requests")
|
||||
_ <- persistentState.requestJournalStore.deleteSince(
|
||||
processingStartingPoint.nextRequestCounter
|
||||
|
@ -9,11 +9,11 @@ import com.daml.nonempty.NonEmpty
|
||||
import com.digitalasset.canton.config.RequireTypes.NonNegativeInt
|
||||
import com.digitalasset.canton.data.CantonTimestamp
|
||||
import com.digitalasset.canton.lifecycle.FutureUnlessShutdown
|
||||
import com.digitalasset.canton.participant.GlobalOffset
|
||||
import com.digitalasset.canton.participant.protocol.transfer.TransferData.*
|
||||
import com.digitalasset.canton.participant.protocol.transfer.{IncompleteTransferData, TransferData}
|
||||
import com.digitalasset.canton.participant.sync.SyncDomainPersistentStateLookup
|
||||
import com.digitalasset.canton.participant.util.TimeOfChange
|
||||
import com.digitalasset.canton.participant.{GlobalOffset, LocalOffset}
|
||||
import com.digitalasset.canton.protocol.messages.DeliveredTransferOutResult
|
||||
import com.digitalasset.canton.protocol.{SourceDomainId, TargetDomainId, TransferId}
|
||||
import com.digitalasset.canton.tracing.TraceContext
|
||||
@ -311,25 +311,6 @@ trait TransferLookup {
|
||||
traceContext: TraceContext
|
||||
): Future[Seq[TransferData]]
|
||||
|
||||
/** Find utility to look for in-flight transfers.
|
||||
* Transfers are ordered by request timestamp.
|
||||
*
|
||||
* @param sourceDomain source domain of the transfer
|
||||
* @param onlyCompletedTransferOut select only transfers that are successfully transferred-out
|
||||
* @param transferOutRequestNotAfter select only transfers whose transfer-out request counter `rc`
|
||||
* satisfies `rc <= transferOutRequestNotAfter`.
|
||||
* @param stakeholders if non-empty, select only transfers of contracts whose set of stakeholders
|
||||
* intersects `stakeholders`.
|
||||
* @param limit limit the number of results
|
||||
*/
|
||||
def findInFlight(
|
||||
sourceDomain: SourceDomainId,
|
||||
onlyCompletedTransferOut: Boolean,
|
||||
transferOutRequestNotAfter: LocalOffset,
|
||||
stakeholders: Option[NonEmpty[Set[LfPartyId]]],
|
||||
limit: NonNegativeInt,
|
||||
)(implicit traceContext: TraceContext): Future[Seq[TransferData]]
|
||||
|
||||
/** Find utility to look for incomplete transfers.
|
||||
* Transfers are ordered by global offset.
|
||||
*
|
||||
|
@ -19,6 +19,7 @@ import com.digitalasset.canton.logging.pretty.Pretty
|
||||
import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, TracedLogger}
|
||||
import com.digitalasset.canton.metrics.TimedLoadGauge
|
||||
import com.digitalasset.canton.participant.store.*
|
||||
import com.digitalasset.canton.protocol.SerializableContract.LedgerCreateTime
|
||||
import com.digitalasset.canton.protocol.*
|
||||
import com.digitalasset.canton.resource.DbStorage.{DbAction, SQLActionBuilderChain}
|
||||
import com.digitalasset.canton.resource.{DbStorage, DbStore}
|
||||
@ -75,7 +76,13 @@ class DbContractStore(
|
||||
val contractSalt = r.<<[Option[Salt]]
|
||||
|
||||
val contract =
|
||||
SerializableContract(contractId, contractInstance, metadata, ledgerCreateTime, contractSalt)
|
||||
SerializableContract(
|
||||
contractId,
|
||||
contractInstance,
|
||||
metadata,
|
||||
LedgerCreateTime(ledgerCreateTime),
|
||||
contractSalt,
|
||||
)
|
||||
StoredContract(contract, requestCounter, creatingTransactionIdO)
|
||||
}
|
||||
|
||||
@ -268,7 +275,7 @@ class DbContractStore(
|
||||
contractId: LfContractId,
|
||||
instance: SerializableRawContractInstance,
|
||||
metadata: ContractMetadata,
|
||||
ledgerCreateTime: CantonTimestamp,
|
||||
ledgerCreateTime: LedgerCreateTime,
|
||||
contractSalt: Option[Salt],
|
||||
),
|
||||
requestCounter: RequestCounter,
|
||||
@ -282,7 +289,7 @@ class DbContractStore(
|
||||
pp >> domainId
|
||||
pp >> contractId
|
||||
pp >> metadata
|
||||
pp >> ledgerCreateTime
|
||||
pp >> ledgerCreateTime.ts
|
||||
pp >> requestCounter
|
||||
pp >> creatingTransactionId
|
||||
pp >> packageId
|
||||
|
@ -34,7 +34,7 @@ import com.digitalasset.canton.participant.store.db.DbMultiDomainEventLog.*
|
||||
import com.digitalasset.canton.participant.store.{EventLogId, MultiDomainEventLog, TransferStore}
|
||||
import com.digitalasset.canton.participant.sync.TimestampedEvent.TransactionEventId
|
||||
import com.digitalasset.canton.participant.sync.{LedgerSyncEvent, TimestampedEvent}
|
||||
import com.digitalasset.canton.participant.{GlobalOffset, LocalOffset}
|
||||
import com.digitalasset.canton.participant.{GlobalOffset, LocalOffset, RequestOffset}
|
||||
import com.digitalasset.canton.platform.akkastreams.dispatcher.Dispatcher
|
||||
import com.digitalasset.canton.platform.akkastreams.dispatcher.SubSource.RangeSource
|
||||
import com.digitalasset.canton.protocol.TargetDomainId
|
||||
@ -54,6 +54,7 @@ import com.digitalasset.canton.util.ShowUtil.*
|
||||
import com.digitalasset.canton.util.*
|
||||
import com.digitalasset.canton.{DiscardOps, LedgerTransactionId}
|
||||
import com.google.common.annotations.VisibleForTesting
|
||||
import slick.jdbc.GetResult
|
||||
|
||||
import java.util.concurrent.atomic.AtomicReference
|
||||
import scala.collection.concurrent.TrieMap
|
||||
@ -369,16 +370,16 @@ class DbMultiDomainEventLog private[db] (
|
||||
processingTime.event {
|
||||
val insertStatement = storage.profile match {
|
||||
case _: DbStorage.Profile.Oracle =>
|
||||
"""merge /*+ INDEX ( linearized_event_log ( local_offset, log_id ) ) */
|
||||
"""merge /*+ INDEX ( linearized_event_log ( local_offset_effective_time, local_offset_discriminator, local_offset_tie_breaker, log_id ) ) */
|
||||
|into linearized_event_log lel
|
||||
|using (select ? log_id, ? local_offset from dual) input
|
||||
|on (lel.local_offset = input.local_offset and lel.log_id = input.log_id)
|
||||
|using (select ? log_id, ? local_offset_effective_time, ? local_offset_discriminator, ? local_offset_tie_breaker from dual) input
|
||||
|on (lel.local_offset_effective_time = input.local_offset_effective_time and lel.local_offset_discriminator = input.local_offset_discriminator and lel.local_offset_tie_breaker = input.local_offset_tie_breaker and lel.log_id = input.log_id)
|
||||
|when not matched then
|
||||
| insert (log_id, local_offset, publication_time)
|
||||
| values (input.log_id, input.local_offset, ?)""".stripMargin
|
||||
| insert (log_id, local_offset_effective_time, local_offset_discriminator, local_offset_tie_breaker, publication_time)
|
||||
| values (input.log_id, input.local_offset_effective_time, input.local_offset_discriminator, input.local_offset_tie_breaker, ?)""".stripMargin
|
||||
case _: DbStorage.Profile.Postgres | _: DbStorage.Profile.H2 =>
|
||||
"""insert into linearized_event_log (log_id, local_offset, publication_time)
|
||||
|values (?, ?, ?)
|
||||
"""insert into linearized_event_log (log_id, local_offset_effective_time, local_offset_discriminator, local_offset_tie_breaker, publication_time)
|
||||
|values (?, ?, ?, ?, ?)
|
||||
|on conflict do nothing""".stripMargin
|
||||
}
|
||||
val bulkInsert = DbStorage.bulkOperation_(
|
||||
@ -398,17 +399,16 @@ class DbMultiDomainEventLog private[db] (
|
||||
override def fetchUnpublished(id: EventLogId, upToInclusiveO: Option[LocalOffset])(implicit
|
||||
traceContext: TraceContext
|
||||
): Future[List[PendingPublish]] = {
|
||||
val fromExclusive = lastLocalOffsets.getOrElse(id.index, LocalOffset.MinValue)
|
||||
val upToInclusive = upToInclusiveO.getOrElse(LocalOffset.MaxValue)
|
||||
logger.info(s"Fetch unpublished from $id up to ${upToInclusiveO}")
|
||||
val fromExclusive = lastLocalOffsets.get(id.index)
|
||||
logger.info(s"Fetch unpublished from $id up to $upToInclusiveO")
|
||||
|
||||
processingTime.event {
|
||||
for {
|
||||
unpublishedLocalOffsets <- DbSingleDimensionEventLog.lookupEventRange(
|
||||
storage = storage,
|
||||
eventLogId = id,
|
||||
fromInclusive = (fromExclusive + 1L).some,
|
||||
toInclusive = upToInclusive.some,
|
||||
fromExclusive = fromExclusive,
|
||||
toInclusive = upToInclusiveO,
|
||||
fromTimestampInclusive = None,
|
||||
toTimestampInclusive = None,
|
||||
limit = None,
|
||||
@ -450,7 +450,8 @@ class DbMultiDomainEventLog private[db] (
|
||||
.mapAsync(1) { case (batchFromExcl, batchToIncl) =>
|
||||
storage.query(
|
||||
sql"""select /*+ INDEX (linearized_event_log pk_linearized_event_log, event_log pk_event_log) */ global_offset, content, trace_context
|
||||
from linearized_event_log lel join event_log el on lel.log_id = el.log_id and lel.local_offset = el.local_offset
|
||||
from linearized_event_log lel
|
||||
join event_log el on lel.log_id = el.log_id and lel.local_offset_effective_time = el.local_offset_effective_time and lel.local_offset_tie_breaker = el.local_offset_tie_breaker
|
||||
where global_offset > $batchFromExcl and global_offset <= $batchToIncl
|
||||
order by global_offset asc"""
|
||||
.as[
|
||||
@ -474,8 +475,9 @@ class DbMultiDomainEventLog private[db] (
|
||||
processingTime.event {
|
||||
storage
|
||||
.query(
|
||||
sql"""select global_offset, el.local_offset, request_sequencer_counter, el.event_id, content, trace_context
|
||||
from linearized_event_log lel join event_log el on lel.log_id = el.log_id and lel.local_offset = el.local_offset
|
||||
sql"""select global_offset, el.local_offset_effective_time, el.local_offset_discriminator, el.local_offset_tie_breaker, request_sequencer_counter, el.event_id, content, trace_context
|
||||
from linearized_event_log lel join event_log el on lel.log_id = el.log_id
|
||||
and lel.local_offset_effective_time = el.local_offset_effective_time and lel.local_offset_discriminator = el.local_offset_discriminator and lel.local_offset_tie_breaker = el.local_offset_tie_breaker
|
||||
where global_offset <= ${upToInclusive.fold(Long.MaxValue)(_.toLong)}
|
||||
order by global_offset asc #${storage.limit(limit.getOrElse(Int.MaxValue))}"""
|
||||
.as[(GlobalOffset, TimestampedEvent)],
|
||||
@ -497,8 +499,9 @@ class DbMultiDomainEventLog private[db] (
|
||||
val queries = inClauses.map { inClause =>
|
||||
import DbStorage.Implicits.BuilderChain.*
|
||||
(sql"""
|
||||
select global_offset, el.local_offset, request_sequencer_counter, el.event_id, content, trace_context, publication_time
|
||||
from linearized_event_log lel join event_log el on lel.log_id = el.log_id and lel.local_offset = el.local_offset
|
||||
select global_offset, el.local_offset_effective_time, el.local_offset_discriminator, el.local_offset_tie_breaker, request_sequencer_counter, el.event_id, content, trace_context, publication_time
|
||||
from linearized_event_log lel
|
||||
join event_log el on lel.log_id = el.log_id and lel.local_offset_effective_time = el.local_offset_effective_time and lel.local_offset_discriminator = el.local_offset_discriminator and lel.local_offset_tie_breaker = el.local_offset_tie_breaker
|
||||
where
|
||||
""" ++ inClause).as[(GlobalOffset, TimestampedEvent, CantonTimestamp)]
|
||||
}
|
||||
@ -531,31 +534,55 @@ class DbMultiDomainEventLog private[db] (
|
||||
.map(_.domainId)
|
||||
}
|
||||
|
||||
override def lastLocalOffsetBeforeOrAt(
|
||||
private def lastLocalOffsetBeforeOrAt[T <: LocalOffset](
|
||||
eventLogId: EventLogId,
|
||||
upToInclusive: GlobalOffset,
|
||||
timestampInclusive: Option[CantonTimestamp],
|
||||
)(implicit traceContext: TraceContext): Future[Option[LocalOffset]] = {
|
||||
localOffsetDiscriminator: Option[Int],
|
||||
)(implicit traceContext: TraceContext, getResult: GetResult[T]): Future[Option[T]] = {
|
||||
import DbStorage.Implicits.BuilderChain.*
|
||||
|
||||
processingTime.event {
|
||||
val tsFilter = timestampInclusive.map(ts => sql" and el.ts <= $ts").getOrElse(sql" ")
|
||||
val localOffsetDiscriminatorFilter =
|
||||
localOffsetDiscriminator.fold(sql" ")(disc => sql" and el.local_offset_discriminator=$disc")
|
||||
|
||||
val ordering = sql" order by global_offset desc #${storage.limit(1)}"
|
||||
|
||||
// Note for idempotent retries, we don't require that the global offset has an actual ledger entry reference
|
||||
val base =
|
||||
sql"""select lel.local_offset
|
||||
from linearized_event_log lel join event_log el on lel.log_id = el.log_id and lel.local_offset = el.local_offset
|
||||
sql"""select lel.local_offset_effective_time, lel.local_offset_discriminator, lel.local_offset_tie_breaker
|
||||
from linearized_event_log lel
|
||||
join event_log el on lel.log_id = el.log_id and lel.local_offset_effective_time = el.local_offset_effective_time and lel.local_offset_discriminator = el.local_offset_discriminator and lel.local_offset_tie_breaker = el.local_offset_tie_breaker
|
||||
where lel.log_id = ${eventLogId.index} and global_offset <= $upToInclusive
|
||||
"""
|
||||
|
||||
val query = (base ++ tsFilter ++ ordering).as[LocalOffset].headOption
|
||||
val query =
|
||||
(base ++ tsFilter ++ localOffsetDiscriminatorFilter ++ ordering).as[T].headOption
|
||||
|
||||
storage.query(query, functionFullName)
|
||||
}
|
||||
}
|
||||
|
||||
override def lastLocalOffsetBeforeOrAt(
|
||||
eventLogId: EventLogId,
|
||||
upToInclusive: GlobalOffset,
|
||||
timestampInclusive: Option[CantonTimestamp],
|
||||
)(implicit traceContext: TraceContext): Future[Option[LocalOffset]] =
|
||||
lastLocalOffsetBeforeOrAt(eventLogId, upToInclusive, timestampInclusive, None)
|
||||
|
||||
override def lastRequestOffsetBeforeOrAt(
|
||||
eventLogId: EventLogId,
|
||||
upToInclusive: GlobalOffset,
|
||||
timestampInclusive: Option[CantonTimestamp],
|
||||
)(implicit traceContext: TraceContext): Future[Option[RequestOffset]] =
|
||||
lastLocalOffsetBeforeOrAt[RequestOffset](
|
||||
eventLogId,
|
||||
upToInclusive,
|
||||
timestampInclusive,
|
||||
Some(LocalOffset.RequestOffsetDiscriminator),
|
||||
)
|
||||
|
||||
override def locateOffset(
|
||||
deltaFromBeginning: Long
|
||||
)(implicit traceContext: TraceContext): OptionT[Future, GlobalOffset] =
|
||||
@ -593,7 +620,7 @@ class DbMultiDomainEventLog private[db] (
|
||||
processingTime.optionTEvent {
|
||||
storage
|
||||
.querySingle(
|
||||
sql"select log_id, local_offset, publication_time from linearized_event_log where global_offset = $globalOffset"
|
||||
sql"select log_id, local_offset_effective_time, local_offset_discriminator, local_offset_tie_breaker, publication_time from linearized_event_log where global_offset = $globalOffset"
|
||||
.as[(Int, LocalOffset, CantonTimestamp)]
|
||||
.headOption,
|
||||
functionFullName,
|
||||
@ -613,7 +640,7 @@ class DbMultiDomainEventLog private[db] (
|
||||
sql"""
|
||||
select lel.global_offset, lel.publication_time
|
||||
from linearized_event_log lel
|
||||
where lel.log_id = ${eventLogId.index} and lel.local_offset = $localOffset
|
||||
where lel.log_id = ${eventLogId.index} and lel.local_offset_effective_time = ${localOffset.effectiveTime} and lel.local_offset_tie_breaker = ${localOffset.tieBreaker}
|
||||
#${storage.limit(1)}
|
||||
""".as[(GlobalOffset, CantonTimestamp)].headOption,
|
||||
functionFullName,
|
||||
@ -647,7 +674,7 @@ class DbMultiDomainEventLog private[db] (
|
||||
// can be used for the where clause and the ordering
|
||||
val query =
|
||||
sql"""
|
||||
select global_offset, log_id, local_offset
|
||||
select global_offset, log_id, local_offset_effective_time, local_offset_discriminator, local_offset_tie_breaker
|
||||
from linearized_event_log
|
||||
where publication_time >= $fromInclusive
|
||||
order by publication_time asc, global_offset asc
|
||||
@ -662,17 +689,25 @@ class DbMultiDomainEventLog private[db] (
|
||||
|
||||
override def lastLocalOffset(
|
||||
id: EventLogId
|
||||
)(implicit traceContext: TraceContext): Future[Option[LocalOffset]] =
|
||||
)(implicit traceContext: TraceContext): Future[Option[LocalOffset]] = {
|
||||
|
||||
/*
|
||||
We want the maximum local offset.
|
||||
Since global offset increases monotonically with the local offset on a given log, we sort by global offset.
|
||||
*/
|
||||
val query = sql"""
|
||||
select local_offset_effective_time, local_offset_discriminator, local_offset_tie_breaker from linearized_event_log where log_id=${id.index} order by global_offset desc #${storage
|
||||
.limit(1)}
|
||||
"""
|
||||
|
||||
processingTime.event {
|
||||
storage
|
||||
.query(
|
||||
sql"""select local_offset from linearized_event_log where log_id = ${id.index} order by local_offset desc #${storage
|
||||
.limit(1)}"""
|
||||
.as[LocalOffset]
|
||||
.headOption,
|
||||
query.as[LocalOffset].headOption,
|
||||
functionFullName,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
override def lastGlobalOffset(upToInclusive: Option[GlobalOffset] = None)(implicit
|
||||
traceContext: TraceContext
|
||||
@ -701,10 +736,10 @@ class DbMultiDomainEventLog private[db] (
|
||||
): Future[Seq[(GlobalOffset, EventLogId, LocalOffset, CantonTimestamp)]] = {
|
||||
val query = storage.profile match {
|
||||
case Profile.Oracle(_jdbc) =>
|
||||
sql"select * from ((select global_offset, log_id, local_offset, publication_time from linearized_event_log order by global_offset desc)) where rownum < ${count + 1}"
|
||||
sql"select * from ((select global_offset, log_id, local_offset_effective_time, local_offset_discriminator, local_offset_tie_breaker, publication_time from linearized_event_log order by global_offset desc)) where rownum < ${count + 1}"
|
||||
.as[(GlobalOffset, Int, LocalOffset, CantonTimestamp)]
|
||||
case _ =>
|
||||
sql"select global_offset, log_id, local_offset, publication_time from linearized_event_log order by global_offset desc #${storage
|
||||
sql"select global_offset, log_id, local_offset_effective_time, local_offset_discriminator, local_offset_tie_breaker, publication_time from linearized_event_log order by global_offset desc #${storage
|
||||
.limit(count)}"
|
||||
.as[(GlobalOffset, Int, LocalOffset, CantonTimestamp)]
|
||||
}
|
||||
@ -819,8 +854,14 @@ object DbMultiDomainEventLog {
|
||||
storage.query(
|
||||
{
|
||||
for {
|
||||
/*
|
||||
We want the maximum local offset.
|
||||
Since global offset increases monotonically with the local offset on a given log, we sort by global offset.
|
||||
*/
|
||||
rows <-
|
||||
sql"""select log_id, max(local_offset) from linearized_event_log group by log_id"""
|
||||
sql"""select log_id, local_offset_effective_time, local_offset_discriminator, local_offset_tie_breaker
|
||||
from linearized_event_log
|
||||
where global_offset in (select max(global_offset) from linearized_event_log group by log_id)"""
|
||||
.as[(Int, LocalOffset)]
|
||||
} yield {
|
||||
val result = new TrieMap[Int, LocalOffset]()
|
||||
|
@ -4,10 +4,12 @@
|
||||
package com.digitalasset.canton.participant.store.db
|
||||
|
||||
import com.daml.nameof.NameOf.functionFullName
|
||||
import com.digitalasset.canton.RequestCounter
|
||||
import com.digitalasset.canton.config.ProcessingTimeout
|
||||
import com.digitalasset.canton.config.RequireTypes.NonNegativeInt
|
||||
import com.digitalasset.canton.data.CantonTimestamp
|
||||
import com.digitalasset.canton.logging.NamedLoggerFactory
|
||||
import com.digitalasset.canton.participant.LocalOffset
|
||||
import com.digitalasset.canton.participant.RequestOffset
|
||||
import com.digitalasset.canton.participant.store.EventLogId.ParticipantEventLogId
|
||||
import com.digitalasset.canton.participant.store.ParticipantEventLog
|
||||
import com.digitalasset.canton.participant.sync.TimestampedEvent
|
||||
@ -21,7 +23,7 @@ import scala.concurrent.{ExecutionContext, Future}
|
||||
|
||||
class DbParticipantEventLog(
|
||||
id: ParticipantEventLogId,
|
||||
storage: DbStorage,
|
||||
override val storage: DbStorage,
|
||||
indexedStringStore: IndexedStringStore,
|
||||
releaseProtocolVersion: ReleaseProtocolVersion,
|
||||
override protected val timeouts: ProcessingTimeout,
|
||||
@ -54,18 +56,18 @@ class DbParticipantEventLog(
|
||||
val query = storage.profile match {
|
||||
case _: DbStorage.Profile.Oracle =>
|
||||
sql"""
|
||||
select local_offset, request_sequencer_counter, event_id, content, trace_context from event_log
|
||||
select local_offset_effective_time, local_offset_discriminator, local_offset_tie_breaker, request_sequencer_counter, event_id, content, trace_context from event_log
|
||||
where (case when log_id = '#${id.index}' then associated_domain end) = $associatedDomainIndex
|
||||
and (case when log_id = '#${id.index}' and associated_domain is not null then ts end) >= $atOrAfter
|
||||
order by local_offset asc
|
||||
order by local_offset_effective_time asc, local_offset_discriminator asc, local_offset_tie_breaker asc
|
||||
#${storage.limit(1)}
|
||||
""".as[TimestampedEvent]
|
||||
case _: DbStorage.Profile.Postgres | _: DbStorage.Profile.H2 =>
|
||||
sql"""
|
||||
select local_offset, request_sequencer_counter, event_id, content, trace_context from event_log
|
||||
select local_offset_effective_time, local_offset_discriminator, local_offset_tie_breaker, request_sequencer_counter, event_id, content, trace_context from event_log
|
||||
where log_id = '#${id.index}' and associated_domain is not null
|
||||
and associated_domain = $associatedDomainIndex and ts >= $atOrAfter
|
||||
order by local_offset asc
|
||||
order by (local_offset_effective_time, local_offset_discriminator, local_offset_tie_breaker) asc
|
||||
#${storage.limit(1)}
|
||||
""".as[TimestampedEvent]
|
||||
}
|
||||
@ -75,23 +77,25 @@ class DbParticipantEventLog(
|
||||
}
|
||||
|
||||
override def nextLocalOffsets(
|
||||
count: Int
|
||||
)(implicit traceContext: TraceContext): Future[Seq[LocalOffset]] =
|
||||
if (count > 0) {
|
||||
count: NonNegativeInt
|
||||
)(implicit traceContext: TraceContext): Future[Seq[RequestOffset]] =
|
||||
if (count.unwrap > 0) {
|
||||
val query = storage.profile match {
|
||||
case _: DbStorage.Profile.Postgres =>
|
||||
sql"""select nextval('participant_event_publisher_local_offsets') from generate_series(1, #$count)"""
|
||||
.as[LocalOffset]
|
||||
sql"""select nextval('participant_event_publisher_local_offsets') from generate_series(1, #${count.unwrap})"""
|
||||
.as[RequestCounter]
|
||||
case _: DbStorage.Profile.Oracle =>
|
||||
sql"""select participant_event_publisher_local_offsets.nextval from (select level from dual connect by level <= #$count)"""
|
||||
.as[LocalOffset]
|
||||
sql"""select participant_event_publisher_local_offsets.nextval from (select level from dual connect by level <= #${count.unwrap})"""
|
||||
.as[RequestCounter]
|
||||
case _: DbStorage.Profile.H2 =>
|
||||
import DbStorage.Implicits.BuilderChain.*
|
||||
(sql"select nextval('participant_event_publisher_local_offsets') from (values " ++
|
||||
(1 to count).toList.map(i => sql"(#$i)").intercalate(sql", ") ++
|
||||
(1 to count.unwrap).toList.map(i => sql"(#$i)").intercalate(sql", ") ++
|
||||
sql")")
|
||||
.as[LocalOffset]
|
||||
.as[RequestCounter]
|
||||
}
|
||||
storage.queryAndUpdate(query, functionFullName)
|
||||
storage
|
||||
.queryAndUpdate(query, functionFullName)
|
||||
.map(_.map(RequestOffset(ParticipantEventLog.EffectiveTime, _)))
|
||||
} else Future.successful(Seq.empty)
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ import com.digitalasset.canton.participant.LocalOffset
|
||||
import com.digitalasset.canton.participant.store.*
|
||||
import com.digitalasset.canton.participant.sync.TimestampedEvent
|
||||
import com.digitalasset.canton.participant.sync.TimestampedEvent.EventId
|
||||
import com.digitalasset.canton.resource.DbStorage.Profile
|
||||
import com.digitalasset.canton.resource.{DbStorage, DbStore}
|
||||
import com.digitalasset.canton.store.{IndexedDomain, IndexedStringStore}
|
||||
import com.digitalasset.canton.tracing.{SerializableTraceContext, TraceContext}
|
||||
@ -130,10 +131,10 @@ class DbSingleDimensionEventLog[+Id <: EventLogId](
|
||||
case _: DbStorage.Profile.Oracle =>
|
||||
val query =
|
||||
"""merge into event_log e
|
||||
using dual on ( (e.event_id = ?) or (e.log_id = ? and e.local_offset = ?))
|
||||
using dual on ( (e.event_id = ?) or (e.log_id = ? and e.local_offset_effective_time = ? and e.local_offset_discriminator = ? and e.local_offset_tie_breaker = ?))
|
||||
when not matched then
|
||||
insert (log_id, local_offset, ts, request_sequencer_counter, event_id, associated_domain, content, trace_context)
|
||||
values (?, ?, ?, ?, ?, ?, ?, ?)"""
|
||||
insert (log_id, local_offset_effective_time, local_offset_discriminator, local_offset_tie_breaker, ts, request_sequencer_counter, event_id, associated_domain, content, trace_context)
|
||||
values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"""
|
||||
DbStorage.bulkOperation(
|
||||
query,
|
||||
eventsWithAssociatedDomainId,
|
||||
@ -155,8 +156,8 @@ class DbSingleDimensionEventLog[+Id <: EventLogId](
|
||||
}
|
||||
case _: DbStorage.Profile.H2 | _: DbStorage.Profile.Postgres =>
|
||||
val query =
|
||||
"""insert into event_log (log_id, local_offset, ts, request_sequencer_counter, event_id, associated_domain, content, trace_context)
|
||||
values (?, ?, ?, ?, ?, ?, ?, ?)
|
||||
"""insert into event_log (log_id, local_offset_effective_time, local_offset_discriminator, local_offset_tie_breaker, ts, request_sequencer_counter, event_id, associated_domain, content, trace_context)
|
||||
values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
on conflict do nothing"""
|
||||
DbStorage.bulkOperation(query, eventsWithAssociatedDomainId, storage.profile) {
|
||||
pp => eventWithDomain =>
|
||||
@ -191,14 +192,31 @@ class DbSingleDimensionEventLog[+Id <: EventLogId](
|
||||
beforeAndIncluding: LocalOffset
|
||||
)(implicit traceContext: TraceContext): Future[Unit] =
|
||||
processingTime.event {
|
||||
storage.update_(
|
||||
sqlu"""delete from event_log where log_id = $log_id and local_offset <= $beforeAndIncluding""",
|
||||
functionFullName,
|
||||
)
|
||||
val query = storage.profile match {
|
||||
case _: Profile.H2 | _: Profile.Postgres =>
|
||||
sqlu"""
|
||||
delete from event_log
|
||||
where
|
||||
log_id = $log_id
|
||||
and (local_offset_effective_time, local_offset_discriminator, local_offset_tie_breaker) <= (${beforeAndIncluding.effectiveTime}, ${beforeAndIncluding.discriminator}, ${beforeAndIncluding.tieBreaker})"""
|
||||
|
||||
case _: Profile.Oracle =>
|
||||
val t = beforeAndIncluding.effectiveTime
|
||||
val disc = beforeAndIncluding.discriminator
|
||||
val tie = beforeAndIncluding.tieBreaker
|
||||
|
||||
sqlu"""
|
||||
delete from event_log
|
||||
where
|
||||
log_id = $log_id
|
||||
and ((local_offset_effective_time<$t) or (local_offset_effective_time=$t and local_offset_discriminator<$disc) or (local_offset_effective_time=$t and local_offset_discriminator=$disc and local_offset_tie_breaker<=$tie))"""
|
||||
}
|
||||
|
||||
storage.update_(query, functionFullName)
|
||||
}
|
||||
|
||||
override def lookupEventRange(
|
||||
fromInclusive: Option[LocalOffset],
|
||||
fromExclusive: Option[LocalOffset],
|
||||
toInclusive: Option[LocalOffset],
|
||||
fromTimestampInclusive: Option[CantonTimestamp],
|
||||
toTimestampInclusive: Option[CantonTimestamp],
|
||||
@ -209,13 +227,13 @@ class DbSingleDimensionEventLog[+Id <: EventLogId](
|
||||
|
||||
processingTime.event {
|
||||
DbSingleDimensionEventLog.lookupEventRange(
|
||||
storage,
|
||||
id,
|
||||
fromInclusive,
|
||||
toInclusive,
|
||||
fromTimestampInclusive,
|
||||
toTimestampInclusive,
|
||||
limit,
|
||||
storage = storage,
|
||||
eventLogId = id,
|
||||
fromExclusive = fromExclusive,
|
||||
toInclusive = toInclusive,
|
||||
fromTimestampInclusive = fromTimestampInclusive,
|
||||
toTimestampInclusive = toTimestampInclusive,
|
||||
limit = limit,
|
||||
)
|
||||
}
|
||||
}
|
||||
@ -224,14 +242,21 @@ class DbSingleDimensionEventLog[+Id <: EventLogId](
|
||||
offset: LocalOffset
|
||||
)(implicit traceContext: TraceContext): OptionT[Future, TimestampedEvent] =
|
||||
processingTime.optionTEvent {
|
||||
val query = storage.profile match {
|
||||
case _: Profile.H2 | _: Profile.Postgres =>
|
||||
sql"""select local_offset_effective_time, local_offset_discriminator, local_offset_tie_breaker, request_sequencer_counter, event_id, content, trace_context
|
||||
from event_log
|
||||
where log_id = $log_id and (local_offset_effective_time, local_offset_discriminator, local_offset_tie_breaker)=(${offset.effectiveTime}, ${offset.discriminator}, ${offset.tieBreaker})"""
|
||||
case _: Profile.Oracle =>
|
||||
sql"""select /*+ INDEX (event_log pk_event_log) */
|
||||
local_offset_effective_time, local_offset_discriminator, local_offset_tie_breaker, request_sequencer_counter, event_id, content, trace_context
|
||||
from event_log
|
||||
where log_id = $log_id and local_offset_effective_time=${offset.effectiveTime} and local_offset_discriminator=${offset.discriminator} and local_offset_tie_breaker=${offset.tieBreaker}"""
|
||||
}
|
||||
|
||||
storage
|
||||
.querySingle(
|
||||
sql"""select /*+ INDEX (event_log pk_event_log) */
|
||||
local_offset, request_sequencer_counter, event_id, content, trace_context
|
||||
from event_log
|
||||
where log_id = $log_id and local_offset = $offset"""
|
||||
.as[TimestampedEvent]
|
||||
.headOption,
|
||||
query.as[TimestampedEvent].headOption,
|
||||
functionFullName,
|
||||
)
|
||||
}
|
||||
@ -239,7 +264,8 @@ class DbSingleDimensionEventLog[+Id <: EventLogId](
|
||||
override def lastOffset(implicit traceContext: TraceContext): OptionT[Future, LocalOffset] =
|
||||
processingTime.optionTEvent {
|
||||
storage.querySingle(
|
||||
sql"""select local_offset from event_log where log_id = $log_id order by local_offset desc #${storage
|
||||
sql"""select local_offset_effective_time, local_offset_discriminator, local_offset_tie_breaker from event_log where log_id = $log_id
|
||||
order by local_offset_effective_time desc, local_offset_discriminator desc, local_offset_tie_breaker desc #${storage
|
||||
.limit(1)}"""
|
||||
.as[LocalOffset]
|
||||
.headOption,
|
||||
@ -253,7 +279,7 @@ class DbSingleDimensionEventLog[+Id <: EventLogId](
|
||||
processingTime.optionTEvent {
|
||||
storage
|
||||
.querySingle(
|
||||
sql"""select local_offset, request_sequencer_counter, event_id, content, trace_context
|
||||
sql"""select local_offset_effective_time, local_offset_discriminator, local_offset_tie_breaker, request_sequencer_counter, event_id, content, trace_context
|
||||
from event_log
|
||||
where log_id = $log_id and event_id = $eventId"""
|
||||
.as[TimestampedEvent]
|
||||
@ -265,34 +291,80 @@ class DbSingleDimensionEventLog[+Id <: EventLogId](
|
||||
override def existsBetween(
|
||||
timestampInclusive: CantonTimestamp,
|
||||
localOffsetInclusive: LocalOffset,
|
||||
)(implicit traceContext: TraceContext): Future[Boolean] = processingTime.event {
|
||||
val query =
|
||||
sql"""
|
||||
select 1 from event_log where log_id = $log_id and local_offset <= $localOffsetInclusive and ts >= $timestampInclusive
|
||||
#${storage.limit(1)}
|
||||
"""
|
||||
.as[Int]
|
||||
.headOption
|
||||
storage.query(query, "exists between").map(_.isDefined)
|
||||
)(implicit traceContext: TraceContext): Future[Boolean] = {
|
||||
import DbStorage.Implicits.BuilderChain.*
|
||||
|
||||
processingTime.event {
|
||||
val queryLocalOffset =
|
||||
DbSingleDimensionEventLog.localOffsetComparison(storage)("<=", localOffsetInclusive)
|
||||
|
||||
val query = sql"""
|
||||
select 1 from event_log where
|
||||
log_id = $log_id
|
||||
""" ++ queryLocalOffset ++ sql" and ts >= $timestampInclusive #${storage.limit(1)}"
|
||||
|
||||
storage.query(query.as[Int].headOption, "exists between").map(_.isDefined)
|
||||
}
|
||||
}
|
||||
|
||||
override def deleteSince(
|
||||
inclusive: LocalOffset
|
||||
)(implicit traceContext: TraceContext): Future[Unit] =
|
||||
processingTime.event {
|
||||
storage.update_(
|
||||
sqlu"""delete from event_log where log_id = $log_id and local_offset >= $inclusive""",
|
||||
functionFullName,
|
||||
)
|
||||
override def deleteAfter(
|
||||
exclusive: LocalOffset
|
||||
)(implicit traceContext: TraceContext): Future[Unit] = {
|
||||
|
||||
val query = storage.profile match {
|
||||
case _: Profile.H2 | _: Profile.Postgres =>
|
||||
sqlu"""
|
||||
delete from event_log
|
||||
where
|
||||
log_id = $log_id
|
||||
and (local_offset_effective_time, local_offset_discriminator, local_offset_tie_breaker) > (${exclusive.effectiveTime}, ${exclusive.discriminator}, ${exclusive.tieBreaker})"""
|
||||
|
||||
case _: Profile.Oracle =>
|
||||
val t = exclusive.effectiveTime
|
||||
val disc = exclusive.discriminator
|
||||
val tie = exclusive.tieBreaker
|
||||
|
||||
sqlu"""
|
||||
delete from event_log
|
||||
where
|
||||
log_id = $log_id
|
||||
and ((local_offset_effective_time>$t) or (local_offset_effective_time=$t and local_offset_discriminator>$disc) or (local_offset_effective_time=$t and local_offset_discriminator=$disc and local_offset_tie_breaker>$tie))"""
|
||||
}
|
||||
|
||||
processingTime.event {
|
||||
storage.update_(query, functionFullName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
object DbSingleDimensionEventLog {
|
||||
|
||||
/** @param op One comparison operator (<, <=, >, >=)
|
||||
* @param offset Local offset to compare to
|
||||
* @return
|
||||
*/
|
||||
private def localOffsetComparison(
|
||||
storage: DbStorage
|
||||
)(op: String, offset: LocalOffset): canton.SQLActionBuilder = {
|
||||
import storage.api.*
|
||||
|
||||
val t = offset.effectiveTime
|
||||
val disc = offset.discriminator
|
||||
val tie = offset.tieBreaker
|
||||
|
||||
storage.profile match {
|
||||
case _: Profile.H2 | _: Profile.Postgres =>
|
||||
sql" and ((local_offset_effective_time, local_offset_discriminator, local_offset_tie_breaker) #$op ($t, $disc, $tie))"
|
||||
|
||||
case _: Profile.Oracle =>
|
||||
sql" and ((local_offset_effective_time#$op$t) or (local_offset_effective_time=$t and local_offset_discriminator#$op$disc) or (local_offset_effective_time=$t and local_offset_discriminator=$disc and local_offset_tie_breaker#$op$tie))"
|
||||
}
|
||||
}
|
||||
|
||||
private[store] def lookupEventRange(
|
||||
storage: DbStorage,
|
||||
eventLogId: EventLogId,
|
||||
fromInclusive: Option[LocalOffset],
|
||||
fromExclusive: Option[LocalOffset],
|
||||
toInclusive: Option[LocalOffset],
|
||||
fromTimestampInclusive: Option[CantonTimestamp],
|
||||
toTimestampInclusive: Option[CantonTimestamp],
|
||||
@ -309,18 +381,19 @@ object DbSingleDimensionEventLog {
|
||||
import storage.converters.*
|
||||
|
||||
val filters = List(
|
||||
fromInclusive.map(n => sql" and local_offset >= $n"),
|
||||
toInclusive.map(n => sql" and local_offset <= $n"),
|
||||
fromExclusive.map(localOffsetComparison(storage)(">", _)),
|
||||
toInclusive.map(localOffsetComparison(storage)("<=", _)),
|
||||
fromTimestampInclusive.map(n => sql" and ts >= $n"),
|
||||
toTimestampInclusive.map(n => sql" and ts <= $n"),
|
||||
).flattenOption.intercalate(sql"")
|
||||
|
||||
for {
|
||||
eventsVector <- storage.query(
|
||||
(sql"""select local_offset, request_sequencer_counter, event_id, content, trace_context
|
||||
(sql"""select local_offset_effective_time, local_offset_discriminator, local_offset_tie_breaker, request_sequencer_counter, event_id, content, trace_context
|
||||
from event_log
|
||||
where log_id = ${eventLogId.index}""" ++ filters ++
|
||||
sql""" order by local_offset asc #${storage.limit(limit.getOrElse(Int.MaxValue))}""")
|
||||
sql""" order by local_offset_effective_time asc, local_offset_discriminator asc, local_offset_tie_breaker asc #${storage
|
||||
.limit(limit.getOrElse(Int.MaxValue))}""")
|
||||
.as[TimestampedEvent]
|
||||
.map(_.map { event => event.localOffset -> event }),
|
||||
functionFullName,
|
||||
|
@ -18,13 +18,13 @@ import com.digitalasset.canton.data.{CantonTimestamp, FullTransferOutTree}
|
||||
import com.digitalasset.canton.lifecycle.FutureUnlessShutdown
|
||||
import com.digitalasset.canton.logging.NamedLoggerFactory
|
||||
import com.digitalasset.canton.metrics.TimedLoadGauge
|
||||
import com.digitalasset.canton.participant.GlobalOffset
|
||||
import com.digitalasset.canton.participant.protocol.transfer.TransferData.TransferGlobalOffset
|
||||
import com.digitalasset.canton.participant.protocol.transfer.{IncompleteTransferData, TransferData}
|
||||
import com.digitalasset.canton.participant.store.TransferStore
|
||||
import com.digitalasset.canton.participant.store.TransferStore.*
|
||||
import com.digitalasset.canton.participant.store.db.DbTransferStore.RawDeliveredTransferOutResult
|
||||
import com.digitalasset.canton.participant.util.TimeOfChange
|
||||
import com.digitalasset.canton.participant.{GlobalOffset, LocalOffset}
|
||||
import com.digitalasset.canton.protocol.messages.*
|
||||
import com.digitalasset.canton.protocol.{
|
||||
SerializableContract,
|
||||
@ -510,34 +510,6 @@ class DbTransferStore(
|
||||
)
|
||||
}
|
||||
|
||||
private def findInFlightDbQuery(
|
||||
sourceDomain: SourceDomainId,
|
||||
transferredOutOnly: Boolean,
|
||||
transferOutRequestNotAfter: LocalOffset,
|
||||
start: Long,
|
||||
)(implicit traceContext: TraceContext): Future[Seq[TransferData]] =
|
||||
storage.query(
|
||||
{
|
||||
import DbStorage.Implicits.BuilderChain.*
|
||||
|
||||
val offsetFilter = sql" and transfer_out_request_counter <= $transferOutRequestNotAfter"
|
||||
|
||||
val transferredOutOnlyFilter =
|
||||
if (transferredOutOnly)
|
||||
sql" and transfer_out_result is not null"
|
||||
else sql""
|
||||
|
||||
val order = sql" order by transfer_out_timestamp "
|
||||
val limitSql =
|
||||
storage.limitSql(numberOfItems = DbTransferStore.dbQueryLimit, skipItems = start)
|
||||
|
||||
val base = findPendingBase(sourceDomain, onlyNotFinished = true)
|
||||
|
||||
(base ++ offsetFilter ++ transferredOutOnlyFilter ++ order ++ limitSql).as[TransferData]
|
||||
},
|
||||
functionFullName,
|
||||
)
|
||||
|
||||
private def findIncomplete(
|
||||
sourceDomain: Option[SourceDomainId],
|
||||
validAt: GlobalOffset,
|
||||
@ -601,29 +573,6 @@ class DbTransferStore(
|
||||
}
|
||||
}
|
||||
|
||||
override def findInFlight(
|
||||
sourceDomain: SourceDomainId,
|
||||
onlyCompletedTransferOut: Boolean,
|
||||
transferOutRequestNotAfter: LocalOffset,
|
||||
stakeholders: Option[NonEmpty[Set[LfPartyId]]],
|
||||
limit: NonNegativeInt,
|
||||
)(implicit traceContext: TraceContext): Future[Seq[TransferData]] = processingTime.event {
|
||||
val queryFrom = (start: Long, traceContext: TraceContext) =>
|
||||
findInFlightDbQuery(
|
||||
sourceDomain,
|
||||
onlyCompletedTransferOut,
|
||||
transferOutRequestNotAfter,
|
||||
start = start,
|
||||
)(traceContext)
|
||||
|
||||
queryWithFiltering(
|
||||
stakeholders = stakeholders,
|
||||
limit = limit,
|
||||
queryFrom = queryFrom,
|
||||
dbQueryLimit = DbTransferStore.dbQueryLimit,
|
||||
)
|
||||
}
|
||||
|
||||
override def findIncomplete(
|
||||
sourceDomain: Option[SourceDomainId],
|
||||
validAt: GlobalOffset,
|
||||
|
@ -50,7 +50,7 @@ import com.digitalasset.canton.participant.sync.{
|
||||
SyncDomainPersistentStateLookup,
|
||||
TimestampedEvent,
|
||||
}
|
||||
import com.digitalasset.canton.participant.{GlobalOffset, LocalOffset}
|
||||
import com.digitalasset.canton.participant.{GlobalOffset, LocalOffset, RequestOffset}
|
||||
import com.digitalasset.canton.platform.akkastreams.dispatcher.Dispatcher
|
||||
import com.digitalasset.canton.platform.akkastreams.dispatcher.SubSource.RangeSource
|
||||
import com.digitalasset.canton.protocol.TargetDomainId
|
||||
@ -65,16 +65,14 @@ import com.digitalasset.canton.util.{ErrorUtil, FutureUtil, SimpleExecutionQueue
|
||||
import java.util.concurrent.atomic.AtomicReference
|
||||
import scala.collection.immutable.{SortedMap, TreeMap}
|
||||
import scala.concurrent.{ExecutionContext, Future}
|
||||
import scala.reflect.ClassTag
|
||||
|
||||
class InMemoryMultiDomainEventLog(
|
||||
lookupEvent: NamedLoggingContext => (
|
||||
EventLogId,
|
||||
LocalOffset,
|
||||
) => Future[TimestampedEvent],
|
||||
lookupOffsetsBetween: NamedLoggingContext => EventLogId => (
|
||||
LocalOffset,
|
||||
LocalOffset,
|
||||
) => Future[Seq[LocalOffset]],
|
||||
offsetsLookup: InMemoryOffsetsLookup,
|
||||
byEventId: NamedLoggingContext => EventId => OptionT[Future, (EventLogId, LocalOffset)],
|
||||
clock: Clock,
|
||||
metrics: ParticipantMetrics,
|
||||
@ -223,12 +221,11 @@ class InMemoryMultiDomainEventLog(
|
||||
override def fetchUnpublished(id: EventLogId, upToInclusiveO: Option[LocalOffset])(implicit
|
||||
traceContext: TraceContext
|
||||
): Future[Seq[PendingPublish]] = {
|
||||
val fromExclusive = entriesRef.get().lastLocalOffsets.getOrElse(id, LocalOffset.MinValue)
|
||||
val upToInclusive = upToInclusiveO.getOrElse(LocalOffset.MaxValue)
|
||||
val fromExclusive = entriesRef.get().lastLocalOffsets.get(id)
|
||||
for {
|
||||
unpublishedOffsets <- lookupOffsetsBetween(namedLoggingContext)(id)(
|
||||
fromExclusive + 1,
|
||||
upToInclusive,
|
||||
unpublishedOffsets <- offsetsLookup.lookupOffsetsBetween(id)(
|
||||
fromExclusive,
|
||||
upToInclusiveO,
|
||||
)
|
||||
unpublishedEvents <- unpublishedOffsets.parTraverse(offset =>
|
||||
lookupEvent(namedLoggingContext)(id, offset)
|
||||
@ -347,11 +344,27 @@ class InMemoryMultiDomainEventLog(
|
||||
eventLogId: EventLogId,
|
||||
upToInclusive: GlobalOffset,
|
||||
timestampInclusive: Option[CantonTimestamp],
|
||||
)(implicit traceContext: TraceContext): Future[Option[LocalOffset]] = {
|
||||
)(implicit traceContext: TraceContext): Future[Option[LocalOffset]] =
|
||||
lastLocalOffsetBeforeOrAt[LocalOffset](eventLogId, upToInclusive, timestampInclusive)
|
||||
|
||||
override def lastRequestOffsetBeforeOrAt(
|
||||
eventLogId: EventLogId,
|
||||
upToInclusive: GlobalOffset,
|
||||
timestampInclusive: Option[CantonTimestamp],
|
||||
)(implicit traceContext: TraceContext): Future[Option[RequestOffset]] =
|
||||
lastLocalOffsetBeforeOrAt[RequestOffset](eventLogId, upToInclusive, timestampInclusive)
|
||||
|
||||
private def lastLocalOffsetBeforeOrAt[T <: LocalOffset: ClassTag](
|
||||
eventLogId: EventLogId,
|
||||
upToInclusive: GlobalOffset,
|
||||
timestampInclusive: Option[CantonTimestamp],
|
||||
)(implicit traceContext: TraceContext): Future[Option[T]] = {
|
||||
val referencesUpTo = entriesRef.get().referencesByOffset.rangeTo(upToInclusive).values
|
||||
val reversedLocalOffsets =
|
||||
referencesUpTo
|
||||
.collect { case (id, localOffset, _processingTime) if id == eventLogId => localOffset }
|
||||
.collect {
|
||||
case (id, localOffset: T, _processingTime) if id == eventLogId => localOffset
|
||||
}
|
||||
.toList
|
||||
.reverse
|
||||
|
||||
@ -503,9 +516,12 @@ object InMemoryMultiDomainEventLog extends HasLoggerName {
|
||||
(eventLog.id: EventLogId) -> eventLog
|
||||
} + (participantEventLog.id -> participantEventLog)
|
||||
|
||||
val offsetsLooker =
|
||||
new InMemoryOffsetsLookupImpl(syncDomainPersistentStates, participantEventLog)
|
||||
|
||||
new InMemoryMultiDomainEventLog(
|
||||
lookupEvent(allEventLogs),
|
||||
lookupOffsetsBetween(allEventLogs),
|
||||
offsetsLooker,
|
||||
byEventId(allEventLogs),
|
||||
clock,
|
||||
metrics,
|
||||
@ -534,25 +550,6 @@ object InMemoryMultiDomainEventLog extends HasLoggerName {
|
||||
)
|
||||
}
|
||||
|
||||
private def lookupOffsetsBetween(
|
||||
allEventLogs: => Map[EventLogId, SingleDimensionEventLog[EventLogId]]
|
||||
)(namedLoggingContext: NamedLoggingContext)(
|
||||
id: EventLogId
|
||||
)(fromInclusive: LocalOffset, upToInclusive: LocalOffset): Future[Seq[LocalOffset]] = {
|
||||
implicit val loggingContext: NamedLoggingContext = namedLoggingContext
|
||||
implicit val tc: TraceContext = loggingContext.traceContext
|
||||
implicit val ec: ExecutionContext = DirectExecutionContext(loggingContext.tracedLogger)
|
||||
for {
|
||||
events <- allEventLogs(id).lookupEventRange(
|
||||
Some(fromInclusive),
|
||||
Some(upToInclusive),
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
)
|
||||
} yield events.rangeFrom(fromInclusive).rangeTo(upToInclusive).keySet.toSeq
|
||||
}
|
||||
|
||||
private def byEventId(allEventLogs: => Map[EventLogId, SingleDimensionEventLog[EventLogId]])(
|
||||
namedLoggingContext: NamedLoggingContext
|
||||
)(eventId: EventId): OptionT[Future, (EventLogId, LocalOffset)] = {
|
||||
|
@ -0,0 +1,49 @@
|
||||
// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.digitalasset.canton.participant.store.memory
|
||||
|
||||
import com.digitalasset.canton.participant.LocalOffset
|
||||
import com.digitalasset.canton.participant.store.*
|
||||
import com.digitalasset.canton.participant.sync.SyncDomainPersistentStateLookup
|
||||
import com.digitalasset.canton.tracing.TraceContext
|
||||
|
||||
import scala.concurrent.{ExecutionContext, Future}
|
||||
|
||||
trait InMemoryOffsetsLookup {
|
||||
def lookupOffsetsBetween(
|
||||
id: EventLogId
|
||||
)(fromExclusive: Option[LocalOffset], upToInclusive: Option[LocalOffset])(implicit
|
||||
executionContext: ExecutionContext,
|
||||
traceContext: TraceContext,
|
||||
): Future[Seq[LocalOffset]]
|
||||
}
|
||||
|
||||
class InMemoryOffsetsLookupImpl(
|
||||
syncDomainPersistentStates: SyncDomainPersistentStateLookup,
|
||||
participantEventLog: ParticipantEventLog,
|
||||
) extends InMemoryOffsetsLookup {
|
||||
|
||||
def lookupOffsetsBetween(
|
||||
id: EventLogId
|
||||
)(fromExclusive: Option[LocalOffset], upToInclusive: Option[LocalOffset])(implicit
|
||||
executionContext: ExecutionContext,
|
||||
traceContext: TraceContext,
|
||||
): Future[Seq[LocalOffset]] = {
|
||||
for {
|
||||
events <- allEventLogs(id).lookupEventRange(
|
||||
fromExclusive,
|
||||
upToInclusive,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
)
|
||||
} yield events.keySet.toSeq
|
||||
}
|
||||
|
||||
private def allEventLogs: Map[EventLogId, SingleDimensionEventLog[EventLogId]] =
|
||||
syncDomainPersistentStates.getAll.map { case (_, state) =>
|
||||
val eventLog = state.eventLog
|
||||
(eventLog.id: EventLogId) -> eventLog
|
||||
} + (participantEventLog.id -> participantEventLog)
|
||||
}
|
@ -3,15 +3,16 @@
|
||||
|
||||
package com.digitalasset.canton.participant.store.memory
|
||||
|
||||
import com.digitalasset.canton.RequestCounter
|
||||
import com.digitalasset.canton.config.RequireTypes.NonNegativeInt
|
||||
import com.digitalasset.canton.data.CantonTimestamp
|
||||
import com.digitalasset.canton.logging.NamedLoggerFactory
|
||||
import com.digitalasset.canton.participant.LocalOffset
|
||||
import com.digitalasset.canton.participant.RequestOffset
|
||||
import com.digitalasset.canton.participant.store.EventLogId.ParticipantEventLogId
|
||||
import com.digitalasset.canton.participant.store.ParticipantEventLog
|
||||
import com.digitalasset.canton.participant.sync.TimestampedEvent
|
||||
import com.digitalasset.canton.topology.DomainId
|
||||
import com.digitalasset.canton.tracing.TraceContext
|
||||
import com.digitalasset.canton.util.ErrorUtil
|
||||
|
||||
import java.util.concurrent.atomic.AtomicReference
|
||||
import scala.concurrent.{ExecutionContext, Future}
|
||||
@ -21,19 +22,18 @@ class InMemoryParticipantEventLog(id: ParticipantEventLogId, loggerFactory: Name
|
||||
) extends InMemorySingleDimensionEventLog[ParticipantEventLogId](id, loggerFactory)
|
||||
with ParticipantEventLog {
|
||||
|
||||
private val nextLocalOffsetRef =
|
||||
new AtomicReference[LocalOffset](ParticipantEventLog.InitialLocalOffset)
|
||||
private val nextRequestCounterRef =
|
||||
new AtomicReference[RequestCounter](InMemoryParticipantEventLog.InitialCounter)
|
||||
|
||||
override def nextLocalOffsets(
|
||||
count: Int
|
||||
)(implicit traceContext: TraceContext): Future[Seq[LocalOffset]] =
|
||||
count: NonNegativeInt
|
||||
)(implicit traceContext: TraceContext): Future[Seq[RequestOffset]] =
|
||||
Future.successful {
|
||||
ErrorUtil.requireArgument(
|
||||
count >= 0,
|
||||
s"allocation count for offsets must be non-negative: $count",
|
||||
)
|
||||
val oldOffset = nextLocalOffsetRef.getAndUpdate(offset => offset + count)
|
||||
oldOffset until (oldOffset + count)
|
||||
val oldCounter = nextRequestCounterRef.getAndUpdate(offset => offset + count.unwrap.toLong)
|
||||
|
||||
oldCounter
|
||||
.until(oldCounter + count.unwrap.toLong)
|
||||
.map(RequestOffset(ParticipantEventLog.EffectiveTime, _))
|
||||
}
|
||||
|
||||
override def firstEventWithAssociatedDomainAtOrAfter(
|
||||
@ -42,7 +42,7 @@ class InMemoryParticipantEventLog(id: ParticipantEventLogId, loggerFactory: Name
|
||||
)(implicit traceContext: TraceContext): Future[Option[TimestampedEvent]] =
|
||||
Future.successful {
|
||||
state.get().eventsByOffset.collectFirst {
|
||||
case (localOffset, event)
|
||||
case (_localOffset, event)
|
||||
if event.eventId.exists(
|
||||
_.associatedDomain.contains(associatedDomain)
|
||||
) && event.timestamp >= atOrAfter =>
|
||||
@ -52,3 +52,7 @@ class InMemoryParticipantEventLog(id: ParticipantEventLogId, loggerFactory: Name
|
||||
|
||||
override def close(): Unit = ()
|
||||
}
|
||||
|
||||
object InMemoryParticipantEventLog {
|
||||
private val InitialCounter: RequestCounter = RequestCounter.Genesis
|
||||
}
|
||||
|
@ -109,7 +109,7 @@ class InMemorySingleDimensionEventLog[+Id <: EventLogId](
|
||||
}
|
||||
|
||||
override def lookupEventRange(
|
||||
fromInclusive: Option[LocalOffset],
|
||||
fromExclusive: Option[LocalOffset],
|
||||
toInclusive: Option[LocalOffset],
|
||||
fromTimestampInclusive: Option[CantonTimestamp],
|
||||
toTimestampInclusive: Option[CantonTimestamp],
|
||||
@ -119,14 +119,19 @@ class InMemorySingleDimensionEventLog[+Id <: EventLogId](
|
||||
): Future[SortedMap[LocalOffset, TimestampedEvent]] =
|
||||
Future.successful {
|
||||
val allEvents = state.get().eventsByOffset
|
||||
val filteredEvents = allEvents
|
||||
.rangeFrom(fromInclusive.getOrElse(LocalOffset.MinValue))
|
||||
|
||||
def timestampFilter(event: TimestampedEvent): Boolean =
|
||||
fromTimestampInclusive.forall(_ <= event.timestamp) && toTimestampInclusive.forall(
|
||||
event.timestamp <= _
|
||||
)
|
||||
|
||||
val filteredEvents = fromExclusive
|
||||
.fold(allEvents)(allEvents.rangeFrom)
|
||||
.rangeTo(toInclusive.getOrElse(LocalOffset.MaxValue))
|
||||
.filter { case (_, event) =>
|
||||
fromTimestampInclusive.forall(_ <= event.timestamp) && toTimestampInclusive.forall(
|
||||
event.timestamp <= _
|
||||
)
|
||||
.filter { case (_offset, event) =>
|
||||
timestampFilter(event)
|
||||
}
|
||||
|
||||
limit match {
|
||||
case Some(n) => filteredEvents.take(n)
|
||||
case None => filteredEvents
|
||||
@ -159,15 +164,15 @@ class InMemorySingleDimensionEventLog[+Id <: EventLogId](
|
||||
}
|
||||
}
|
||||
|
||||
override def deleteSince(
|
||||
inclusive: LocalOffset
|
||||
override def deleteAfter(
|
||||
exclusive: LocalOffset
|
||||
)(implicit traceContext: TraceContext): Future[Unit] =
|
||||
Future.successful {
|
||||
val _ = state.updateAndGet { case Entries(ledger, _) =>
|
||||
val newLedger = ledger.filter { case (offset, _) => offset < inclusive }
|
||||
state.updateAndGet { case Entries(ledger, _) =>
|
||||
val newLedger = ledger.filter { case (offset, _) => offset <= exclusive }
|
||||
val newTransactionIds = byEventIds(newLedger)
|
||||
Entries(newLedger, newTransactionIds)
|
||||
}
|
||||
}.discard
|
||||
}
|
||||
|
||||
private[this] def byEventIds(
|
||||
|
@ -11,10 +11,10 @@ import com.digitalasset.canton.config.RequireTypes.NonNegativeInt
|
||||
import com.digitalasset.canton.data.CantonTimestamp
|
||||
import com.digitalasset.canton.lifecycle.FutureUnlessShutdown
|
||||
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
|
||||
import com.digitalasset.canton.participant.GlobalOffset
|
||||
import com.digitalasset.canton.participant.protocol.transfer.{IncompleteTransferData, TransferData}
|
||||
import com.digitalasset.canton.participant.store.TransferStore
|
||||
import com.digitalasset.canton.participant.util.TimeOfChange
|
||||
import com.digitalasset.canton.participant.{GlobalOffset, LocalOffset, RichRequestCounter}
|
||||
import com.digitalasset.canton.protocol.messages.DeliveredTransferOutResult
|
||||
import com.digitalasset.canton.protocol.{SourceDomainId, TargetDomainId, TransferId}
|
||||
import com.digitalasset.canton.tracing.TraceContext
|
||||
@ -206,31 +206,6 @@ class InMemoryTransferStore(
|
||||
.take(limit)
|
||||
}
|
||||
|
||||
override def findInFlight(
|
||||
sourceDomain: SourceDomainId,
|
||||
onlyCompletedTransferOut: Boolean,
|
||||
transferOutRequestNotAfter: LocalOffset,
|
||||
stakeholders: Option[NonEmpty[Set[LfPartyId]]],
|
||||
limit: NonNegativeInt,
|
||||
)(implicit traceContext: TraceContext): Future[Seq[TransferData]] = {
|
||||
def filter(entry: TransferEntry): Boolean = {
|
||||
entry.transferData.sourceDomain == sourceDomain &&
|
||||
entry.timeOfCompletion.isEmpty && // Always filter out completed transfer-in
|
||||
entry.transferData.transferOutRequestCounter.asLocalOffset <= transferOutRequestNotAfter &&
|
||||
(!onlyCompletedTransferOut || entry.transferData.transferOutResult.isDefined == onlyCompletedTransferOut) && // Transfer-out is completed condition
|
||||
stakeholders.forall(_.exists(entry.transferData.contract.metadata.stakeholders))
|
||||
}
|
||||
|
||||
val values = transferDataMap.values
|
||||
.to(LazyList)
|
||||
.filter(filter)
|
||||
.sortBy(_.transferData.transferOutTimestamp)
|
||||
.take(limit.unwrap)
|
||||
.map(_.transferData)
|
||||
|
||||
Future.successful(values)
|
||||
}
|
||||
|
||||
override def findIncomplete(
|
||||
sourceDomain: Option[SourceDomainId],
|
||||
validAt: GlobalOffset,
|
||||
|
@ -9,6 +9,7 @@ import com.digitalasset.canton.LfPartyId
|
||||
import com.digitalasset.canton.config.RequireTypes.NonNegativeInt
|
||||
import com.digitalasset.canton.data.CantonTimestamp
|
||||
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
|
||||
import com.digitalasset.canton.participant.GlobalOffset
|
||||
import com.digitalasset.canton.participant.protocol.transfer.{IncompleteTransferData, TransferData}
|
||||
import com.digitalasset.canton.participant.store.TransferStore.{
|
||||
TransferAlreadyCompleted,
|
||||
@ -18,7 +19,6 @@ import com.digitalasset.canton.participant.store.TransferStore.{
|
||||
import com.digitalasset.canton.participant.store.memory.TransferCache.PendingTransferCompletion
|
||||
import com.digitalasset.canton.participant.store.{TransferLookup, TransferStore}
|
||||
import com.digitalasset.canton.participant.util.TimeOfChange
|
||||
import com.digitalasset.canton.participant.{GlobalOffset, LocalOffset}
|
||||
import com.digitalasset.canton.protocol.{SourceDomainId, TransferId}
|
||||
import com.digitalasset.canton.tracing.TraceContext
|
||||
import com.digitalasset.canton.util.{Checked, CheckedT}
|
||||
@ -132,23 +132,6 @@ class TransferCache(transferStore: TransferStore, override val loggerFactory: Na
|
||||
.findAfter(requestAfter, limit)
|
||||
.map(_.filter(transferData => !pendingCompletions.contains(transferData.transferId)))
|
||||
|
||||
override def findInFlight(
|
||||
sourceDomain: SourceDomainId,
|
||||
onlyCompletedTransferOut: Boolean,
|
||||
transferOutRequestNotAfter: LocalOffset,
|
||||
stakeholders: Option[NonEmpty[Set[LfPartyId]]],
|
||||
limit: NonNegativeInt,
|
||||
)(implicit traceContext: TraceContext): Future[Seq[TransferData]] =
|
||||
transferStore
|
||||
.findInFlight(
|
||||
sourceDomain,
|
||||
onlyCompletedTransferOut,
|
||||
transferOutRequestNotAfter,
|
||||
stakeholders,
|
||||
limit,
|
||||
)
|
||||
.map(_.filter(transferData => !pendingCompletions.contains(transferData.transferId)))
|
||||
|
||||
/** Transfer-out/in global offsets will be updated upon publication in the multi-domain event log, when
|
||||
* the global offset is assigned to the event.
|
||||
* In order to avoid race conditions, the multi-domain event log will wait for the calls to
|
||||
|
@ -179,16 +179,10 @@ class CantonSyncService(
|
||||
participantNodePersistentState.value.settingsStore.settings.maxDeduplicationDuration
|
||||
.getOrElse(throw new RuntimeException("Max deduplication duration is not available"))
|
||||
|
||||
private val excludedPackageIds = if (parameters.excludeInfrastructureTransactions) {
|
||||
Set(
|
||||
workflows.PackageID.PingPong,
|
||||
workflows.PackageID.DarDistribution,
|
||||
workflows.PackageID.PingPongVacuum,
|
||||
)
|
||||
.map(Ref.PackageId.assertFromString)
|
||||
} else {
|
||||
Set.empty[Ref.PackageId]
|
||||
}
|
||||
val eventTranslationStrategy = new EventTranslationStrategy(
|
||||
multiDomainLedgerAPIEnabled = multiDomainLedgerAPIEnabled,
|
||||
excludeInfrastructureTransactions = parameters.excludeInfrastructureTransactions,
|
||||
)
|
||||
|
||||
type ConnectionListener = DomainAlias => Unit
|
||||
|
||||
@ -576,48 +570,17 @@ class CantonSyncService(
|
||||
beginStartingAt =>
|
||||
participantNodePersistentState.value.multiDomainEventLog
|
||||
.subscribe(beginStartingAt)
|
||||
.map { case (offset, tracedEvent) =>
|
||||
tracedEvent
|
||||
.map(augmentTransactionStatistics)
|
||||
.map(_.toDamlUpdate(populateTransfers = multiDomainLedgerAPIEnabled))
|
||||
.sequence
|
||||
.map { tracedUpdate =>
|
||||
implicit val traceContext: TraceContext = tracedEvent.traceContext
|
||||
logger
|
||||
.debug(show"Emitting event at offset $offset. Event: ${tracedEvent.value}")
|
||||
(UpstreamOffsetConvert.fromGlobalOffset(offset), tracedUpdate)
|
||||
.mapConcat { case (offset, event) =>
|
||||
event
|
||||
.traverse(eventTranslationStrategy.translate)
|
||||
.map { e =>
|
||||
logger.debug(show"Emitting event at offset $offset. Event: ${event.value}")
|
||||
(UpstreamOffsetConvert.fromGlobalOffset(offset), e)
|
||||
}
|
||||
}
|
||||
.collect { case Some(tuple) => tuple },
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// Augment event with transaction statistics "as late as possible" as stats are redundant data and so that
|
||||
// we don't need to persist stats and deal with versioning stats changes. Also every event is usually consumed
|
||||
// only once.
|
||||
private[sync] def augmentTransactionStatistics(event: LedgerSyncEvent): LedgerSyncEvent =
|
||||
event match {
|
||||
case ta @ LedgerSyncEvent.TransactionAccepted(
|
||||
Some(completionInfo),
|
||||
_transactionMeta,
|
||||
transaction,
|
||||
_transactionId,
|
||||
_recordTime,
|
||||
_divulgedContracts,
|
||||
_blindingInfo,
|
||||
_hostedWitnesses,
|
||||
_contractMetadata,
|
||||
) =>
|
||||
ta.copy(completionInfoO =
|
||||
Some(
|
||||
completionInfo.copy(statistics =
|
||||
Some(LedgerTransactionNodeStatistics(transaction, excludedPackageIds))
|
||||
)
|
||||
)
|
||||
)
|
||||
case event => event
|
||||
}
|
||||
|
||||
override def allocateParty(
|
||||
hint: Option[LfPartyId],
|
||||
displayName: Option[String],
|
||||
|
@ -0,0 +1,63 @@
|
||||
// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.digitalasset.canton.participant.sync
|
||||
|
||||
import com.digitalasset.canton.LfPackageId
|
||||
import com.digitalasset.canton.ledger.participant.state.v2.Update
|
||||
import com.digitalasset.canton.participant.admin.workflows.PackageID
|
||||
import com.digitalasset.canton.participant.protocol.ProcessingSteps.RequestType
|
||||
import com.digitalasset.canton.protocol.LedgerTransactionNodeStatistics
|
||||
|
||||
final class EventTranslationStrategy(
|
||||
multiDomainLedgerAPIEnabled: Boolean,
|
||||
excludeInfrastructureTransactions: Boolean,
|
||||
) {
|
||||
|
||||
def translate(e: LedgerSyncEvent): Option[Update] =
|
||||
e match {
|
||||
case e: LedgerSyncEvent.TransferredOut =>
|
||||
Option.when(multiDomainLedgerAPIEnabled)(e.toDamlUpdate)
|
||||
case e: LedgerSyncEvent.TransferredIn =>
|
||||
Option
|
||||
.when(multiDomainLedgerAPIEnabled)(e.toDamlUpdate)
|
||||
.orElse(e.asTransactionAccepted)
|
||||
case e: LedgerSyncEvent.CommandRejected =>
|
||||
e.kind match {
|
||||
case RequestType.TransferIn | RequestType.TransferOut =>
|
||||
Option.when(multiDomainLedgerAPIEnabled)(e.toDamlUpdate)
|
||||
case RequestType.Transaction =>
|
||||
Option(e.toDamlUpdate)
|
||||
}
|
||||
case e: LedgerSyncEvent.TransactionAccepted =>
|
||||
Option(augmentTransactionStatistics(e).toDamlUpdate)
|
||||
case e =>
|
||||
Option(e.toDamlUpdate)
|
||||
}
|
||||
|
||||
// Augment event with transaction statistics "as late as possible" as stats are redundant data and so that
|
||||
// we don't need to persist stats and deal with versioning stats changes. Also every event is usually consumed
|
||||
// only once.
|
||||
private[sync] def augmentTransactionStatistics(
|
||||
e: LedgerSyncEvent.TransactionAccepted
|
||||
): LedgerSyncEvent.TransactionAccepted =
|
||||
e.copy(completionInfoO =
|
||||
e.completionInfoO.map(completionInfo =>
|
||||
completionInfo.copy(statistics =
|
||||
Some(LedgerTransactionNodeStatistics(e.transaction, excludedPackageIds))
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
private val excludedPackageIds: Set[LfPackageId] =
|
||||
if (excludeInfrastructureTransactions) {
|
||||
Set(
|
||||
LfPackageId.assertFromString(PackageID.PingPong),
|
||||
LfPackageId.assertFromString(PackageID.DarDistribution),
|
||||
LfPackageId.assertFromString(PackageID.PingPongVacuum),
|
||||
)
|
||||
} else {
|
||||
Set.empty[LfPackageId]
|
||||
}
|
||||
|
||||
}
|
@ -19,7 +19,6 @@ import com.digitalasset.canton.ledger.participant.state.v2.{
|
||||
}
|
||||
import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting}
|
||||
import com.digitalasset.canton.participant.protocol.ProcessingSteps
|
||||
import com.digitalasset.canton.participant.protocol.ProcessingSteps.RequestType
|
||||
import com.digitalasset.canton.protocol.{
|
||||
LfCommittedTransaction,
|
||||
LfContractId,
|
||||
@ -58,7 +57,7 @@ import scala.collection.immutable.HashMap
|
||||
sealed trait LedgerSyncEvent extends Product with Serializable with PrettyPrinting {
|
||||
def description: String
|
||||
def recordTime: LfTimestamp
|
||||
def toDamlUpdate(populateTransfers: Boolean = false): Option[Update]
|
||||
def toDamlUpdate: Update
|
||||
|
||||
def setTimestamp(timestamp: LfTimestamp): LedgerSyncEvent =
|
||||
this match {
|
||||
@ -110,9 +109,8 @@ object LedgerSyncEvent {
|
||||
param("submissionId", _.submissionId),
|
||||
param("newConfiguration", _.newConfiguration),
|
||||
)
|
||||
def toDamlUpdate(populateTransfers: Boolean = false): Option[Update] = Some(
|
||||
def toDamlUpdate: Update =
|
||||
this.transformInto[Update.ConfigurationChanged]
|
||||
)
|
||||
}
|
||||
|
||||
final case class ConfigurationChangeRejected(
|
||||
@ -134,9 +132,8 @@ object LedgerSyncEvent {
|
||||
param("rejectionReason", _.rejectionReason.doubleQuoted),
|
||||
param("proposedConfiguration", _.proposedConfiguration),
|
||||
)
|
||||
def toDamlUpdate(populateTransfers: Boolean = false): Option[Update] = Some(
|
||||
def toDamlUpdate: Update =
|
||||
this.transformInto[Update.ConfigurationChangeRejected]
|
||||
)
|
||||
}
|
||||
|
||||
final case class PartyAddedToParticipant(
|
||||
@ -157,9 +154,8 @@ object LedgerSyncEvent {
|
||||
param("party", _.party),
|
||||
param("displayName", _.displayName.singleQuoted),
|
||||
)
|
||||
def toDamlUpdate(populateTransfers: Boolean = false): Option[Update] = Some(
|
||||
def toDamlUpdate: Update =
|
||||
this.transformInto[Update.PartyAddedToParticipant]
|
||||
)
|
||||
}
|
||||
|
||||
final case class PartyAllocationRejected(
|
||||
@ -179,9 +175,8 @@ object LedgerSyncEvent {
|
||||
param("rejectionReason", _.rejectionReason.doubleQuoted),
|
||||
)
|
||||
|
||||
def toDamlUpdate(populateTransfers: Boolean = false): Option[Update] = Some(
|
||||
def toDamlUpdate: Update =
|
||||
this.transformInto[Update.PartyAllocationRejected]
|
||||
)
|
||||
}
|
||||
|
||||
final case class PublicPackageUpload(
|
||||
@ -201,9 +196,8 @@ object LedgerSyncEvent {
|
||||
paramWithoutValue("archives"),
|
||||
)
|
||||
|
||||
def toDamlUpdate(populateTransfers: Boolean = false): Option[Update] = Some(
|
||||
def toDamlUpdate: Update =
|
||||
this.transformInto[Update.PublicPackageUpload]
|
||||
)
|
||||
}
|
||||
|
||||
final case class PublicPackageUploadRejected(
|
||||
@ -221,9 +215,8 @@ object LedgerSyncEvent {
|
||||
param("rejectionReason", _.rejectionReason.doubleQuoted),
|
||||
)
|
||||
|
||||
def toDamlUpdate(populateTransfers: Boolean = false): Option[Update] = Some(
|
||||
def toDamlUpdate: Update =
|
||||
this.transformInto[Update.PublicPackageUploadRejected]
|
||||
)
|
||||
}
|
||||
|
||||
final case class TransactionAccepted(
|
||||
@ -251,9 +244,8 @@ object LedgerSyncEvent {
|
||||
paramWithoutValue("hostedWitnesses"),
|
||||
paramWithoutValue("contractMetadata"),
|
||||
)
|
||||
def toDamlUpdate(populateTransfers: Boolean = false): Option[Update] = Some(
|
||||
def toDamlUpdate: Update =
|
||||
this.transformInto[Update.TransactionAccepted]
|
||||
)
|
||||
|
||||
override def domainId: Option[DomainId] = transactionMeta.optDomainId
|
||||
}
|
||||
@ -283,28 +275,26 @@ object LedgerSyncEvent {
|
||||
override def description: String = s"Contracts added $transactionId"
|
||||
|
||||
// TODO(i12964) expose the migration event as its own type of event (not as a transaction)
|
||||
override def toDamlUpdate(populateTransfers: Boolean = false): Option[Update] =
|
||||
Option(
|
||||
Update.TransactionAccepted(
|
||||
completionInfoO = None,
|
||||
transactionMeta = TransactionMeta(
|
||||
ledgerEffectiveTime = ledgerTime,
|
||||
workflowId = None,
|
||||
submissionTime = recordTime,
|
||||
submissionSeed = LedgerSyncEvent.noOpSeed,
|
||||
optUsedPackages = None,
|
||||
optNodeSeeds = None,
|
||||
optByKeyNodes = None,
|
||||
optDomainId = Option(domainId),
|
||||
),
|
||||
transaction = mkTx(contracts),
|
||||
transactionId = transactionId,
|
||||
recordTime = recordTime,
|
||||
divulgedContracts = List.empty,
|
||||
blindingInfoO = None,
|
||||
hostedWitnesses = hostedWitnesses.toList,
|
||||
contractMetadata = contractMetadata,
|
||||
)
|
||||
override def toDamlUpdate: Update =
|
||||
Update.TransactionAccepted(
|
||||
completionInfoO = None,
|
||||
transactionMeta = TransactionMeta(
|
||||
ledgerEffectiveTime = ledgerTime,
|
||||
workflowId = None,
|
||||
submissionTime = recordTime,
|
||||
submissionSeed = LedgerSyncEvent.noOpSeed,
|
||||
optUsedPackages = None,
|
||||
optNodeSeeds = None,
|
||||
optByKeyNodes = None,
|
||||
optDomainId = Option(domainId),
|
||||
),
|
||||
transaction = mkTx(contracts),
|
||||
transactionId = transactionId,
|
||||
recordTime = recordTime,
|
||||
divulgedContracts = List.empty,
|
||||
blindingInfoO = None,
|
||||
hostedWitnesses = hostedWitnesses.toList,
|
||||
contractMetadata = contractMetadata,
|
||||
)
|
||||
|
||||
override def pretty: Pretty[ContractsAdded] =
|
||||
@ -330,28 +320,26 @@ object LedgerSyncEvent {
|
||||
override def description: String = s"Contracts purged $transactionId"
|
||||
|
||||
// TODO(i12964) expose the migration event as its own type of event (not as a transaction)
|
||||
override def toDamlUpdate(populateTransfers: Boolean = false): Option[Update] =
|
||||
Option(
|
||||
Update.TransactionAccepted(
|
||||
completionInfoO = None,
|
||||
transactionMeta = TransactionMeta(
|
||||
ledgerEffectiveTime = recordTime,
|
||||
workflowId = None,
|
||||
submissionTime = recordTime,
|
||||
submissionSeed = LedgerSyncEvent.noOpSeed,
|
||||
optUsedPackages = None,
|
||||
optNodeSeeds = None,
|
||||
optByKeyNodes = None,
|
||||
optDomainId = Option(domainId),
|
||||
),
|
||||
transaction = mkTx(contracts),
|
||||
transactionId = transactionId,
|
||||
recordTime = recordTime,
|
||||
divulgedContracts = List.empty,
|
||||
blindingInfoO = None,
|
||||
hostedWitnesses = hostedWitnesses.toList,
|
||||
contractMetadata = Map.empty,
|
||||
)
|
||||
override def toDamlUpdate: Update =
|
||||
Update.TransactionAccepted(
|
||||
completionInfoO = None,
|
||||
transactionMeta = TransactionMeta(
|
||||
ledgerEffectiveTime = recordTime,
|
||||
workflowId = None,
|
||||
submissionTime = recordTime,
|
||||
submissionSeed = LedgerSyncEvent.noOpSeed,
|
||||
optUsedPackages = None,
|
||||
optNodeSeeds = None,
|
||||
optByKeyNodes = None,
|
||||
optDomainId = Option(domainId),
|
||||
),
|
||||
transaction = mkTx(contracts),
|
||||
transactionId = transactionId,
|
||||
recordTime = recordTime,
|
||||
divulgedContracts = List.empty,
|
||||
blindingInfoO = None,
|
||||
hostedWitnesses = hostedWitnesses.toList,
|
||||
contractMetadata = Map.empty,
|
||||
)
|
||||
|
||||
override def pretty: Pretty[ContractsPurged] =
|
||||
@ -387,14 +375,8 @@ object LedgerSyncEvent {
|
||||
paramIfDefined("domainId", _.domainId),
|
||||
)
|
||||
|
||||
def toDamlUpdate(populateTransfers: Boolean = false): Option[Update] = {
|
||||
val selector = kind match {
|
||||
case RequestType.Transaction => Some(())
|
||||
case _: RequestType.Transfer => Option.when(populateTransfers)(())
|
||||
}
|
||||
|
||||
selector.map(_ => this.transformInto[Update.CommandRejected])
|
||||
}
|
||||
def toDamlUpdate: Update =
|
||||
this.transformInto[Update.CommandRejected]
|
||||
}
|
||||
|
||||
object CommandRejected {
|
||||
@ -482,33 +464,31 @@ object LedgerSyncEvent {
|
||||
param("transferCounter", _.transferCounter),
|
||||
)
|
||||
|
||||
def toDamlUpdate(populateTransfers: Boolean = false): Option[Update] =
|
||||
Option.when(populateTransfers) {
|
||||
Update.ReassignmentAccepted(
|
||||
optCompletionInfo = optCompletionInfo,
|
||||
workflowId = workflowId,
|
||||
updateId = updateId,
|
||||
recordTime = recordTime,
|
||||
reassignmentInfo = ReassignmentInfo(
|
||||
sourceDomain = transferId.sourceDomain,
|
||||
targetDomain = targetDomain,
|
||||
submitter = submitter,
|
||||
reassignmentCounter = transferCounter.v,
|
||||
hostedStakeholders = hostedStakeholders,
|
||||
unassignId = transferId.transferOutTimestamp,
|
||||
def toDamlUpdate: Update =
|
||||
Update.ReassignmentAccepted(
|
||||
optCompletionInfo = optCompletionInfo,
|
||||
workflowId = workflowId,
|
||||
updateId = updateId,
|
||||
recordTime = recordTime,
|
||||
reassignmentInfo = ReassignmentInfo(
|
||||
sourceDomain = transferId.sourceDomain,
|
||||
targetDomain = targetDomain,
|
||||
submitter = submitter,
|
||||
reassignmentCounter = transferCounter.v,
|
||||
hostedStakeholders = hostedStakeholders,
|
||||
unassignId = transferId.transferOutTimestamp,
|
||||
),
|
||||
reassignment = Reassignment.Unassign(
|
||||
contractId = contractId,
|
||||
templateId = templateId.getOrElse(
|
||||
throw new IllegalStateException(
|
||||
s"templateId should not be empty in transfer-id: $transferId"
|
||||
)
|
||||
),
|
||||
reassignment = Reassignment.Unassign(
|
||||
contractId = contractId,
|
||||
templateId = templateId.getOrElse(
|
||||
throw new IllegalStateException(
|
||||
s"templateId should not be empty in transfer-id: $transferId"
|
||||
)
|
||||
),
|
||||
stakeholders = contractStakeholders.toList,
|
||||
assignmentExclusivity = transferInExclusivity,
|
||||
),
|
||||
)
|
||||
}
|
||||
stakeholders = contractStakeholders.toList,
|
||||
assignmentExclusivity = transferInExclusivity,
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
/** Signal the transfer-in of a contract from the source domain to the target domain.
|
||||
@ -570,69 +550,62 @@ object LedgerSyncEvent {
|
||||
|
||||
override def domainId: Option[DomainId] = Option(targetDomain.id)
|
||||
|
||||
private lazy val transactionMeta: TransactionMeta = TransactionMeta(
|
||||
ledgerEffectiveTime = ledgerCreateTime,
|
||||
workflowId = workflowId,
|
||||
submissionTime = recordTime, // TODO(M41): Upstream mismatch, replace with enter/leave view
|
||||
submissionSeed = LedgerSyncEvent.noOpSeed,
|
||||
optUsedPackages = None,
|
||||
optNodeSeeds = None,
|
||||
optByKeyNodes = None,
|
||||
optDomainId = Some(targetDomain.unwrap),
|
||||
)
|
||||
|
||||
/** Workaround to create an update for informing the ledger API server about a transferred-in contract.
|
||||
* Creates a TransactionAccepted event consisting of a single create action that creates the given contract.
|
||||
*
|
||||
* The transaction has the same ledger time and transaction id as the creation of the contract.
|
||||
*/
|
||||
def toDamlUpdate(populateTransfers: Boolean = false): Option[Update] =
|
||||
Option
|
||||
.when(populateTransfers)(
|
||||
Update.ReassignmentAccepted(
|
||||
optCompletionInfo = optCompletionInfo,
|
||||
def asTransactionAccepted: Option[Update] =
|
||||
Option.when(createTransactionAccepted)(
|
||||
Update.TransactionAccepted(
|
||||
completionInfoO = optCompletionInfo,
|
||||
transactionMeta = TransactionMeta(
|
||||
ledgerEffectiveTime = ledgerCreateTime,
|
||||
workflowId = workflowId,
|
||||
updateId = updateId,
|
||||
recordTime = recordTime,
|
||||
reassignmentInfo = ReassignmentInfo(
|
||||
sourceDomain = transferId.sourceDomain,
|
||||
targetDomain = targetDomain,
|
||||
submitter = submitter,
|
||||
reassignmentCounter = transferCounter.v,
|
||||
hostedStakeholders = hostedStakeholders,
|
||||
unassignId = transferId.transferOutTimestamp,
|
||||
),
|
||||
reassignment = Reassignment.Assign(
|
||||
ledgerEffectiveTime = ledgerCreateTime,
|
||||
createNode = createNode,
|
||||
contractMetadata = contractMetadata,
|
||||
),
|
||||
)
|
||||
)
|
||||
.orElse(
|
||||
Option.when(createTransactionAccepted) {
|
||||
val nodeId = LfNodeId(0)
|
||||
val committedTransaction = LfCommittedTransaction(
|
||||
LfVersionedTransaction(
|
||||
version = createNode.version,
|
||||
nodes = HashMap((nodeId, createNode)),
|
||||
roots = ImmArray(nodeId),
|
||||
)
|
||||
submissionTime =
|
||||
recordTime, // TODO(M41): Upstream mismatch, replace with enter/leave view
|
||||
submissionSeed = LedgerSyncEvent.noOpSeed,
|
||||
optUsedPackages = None,
|
||||
optNodeSeeds = None,
|
||||
optByKeyNodes = None,
|
||||
optDomainId = Some(targetDomain.unwrap),
|
||||
),
|
||||
transaction = LfCommittedTransaction(
|
||||
LfVersionedTransaction(
|
||||
version = createNode.version,
|
||||
nodes = HashMap((LfNodeId(0), createNode)),
|
||||
roots = ImmArray(LfNodeId(0)),
|
||||
)
|
||||
),
|
||||
transactionId = updateId,
|
||||
recordTime = recordTime,
|
||||
divulgedContracts = Nil,
|
||||
blindingInfoO = None,
|
||||
hostedWitnesses = hostedStakeholders,
|
||||
contractMetadata = Map(createNode.coid -> contractMetadata),
|
||||
)
|
||||
)
|
||||
|
||||
Update.TransactionAccepted(
|
||||
completionInfoO = optCompletionInfo,
|
||||
transactionMeta = transactionMeta,
|
||||
transaction = committedTransaction,
|
||||
transactionId = updateId,
|
||||
recordTime = recordTime,
|
||||
divulgedContracts = Nil,
|
||||
blindingInfoO = None,
|
||||
hostedWitnesses = hostedStakeholders,
|
||||
contractMetadata = Map(createNode.coid -> contractMetadata),
|
||||
)
|
||||
}
|
||||
)
|
||||
def toDamlUpdate: Update =
|
||||
Update.ReassignmentAccepted(
|
||||
optCompletionInfo = optCompletionInfo,
|
||||
workflowId = workflowId,
|
||||
updateId = updateId,
|
||||
recordTime = recordTime,
|
||||
reassignmentInfo = ReassignmentInfo(
|
||||
sourceDomain = transferId.sourceDomain,
|
||||
targetDomain = targetDomain,
|
||||
submitter = submitter,
|
||||
reassignmentCounter = transferCounter.v,
|
||||
hostedStakeholders = hostedStakeholders,
|
||||
unassignId = transferId.transferOutTimestamp,
|
||||
),
|
||||
reassignment = Reassignment.Assign(
|
||||
ledgerEffectiveTime = ledgerCreateTime,
|
||||
createNode = createNode,
|
||||
contractMetadata = contractMetadata,
|
||||
),
|
||||
)
|
||||
|
||||
}
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user