update canton to 20240419.13141.v8e75f5c1 (#19057)

* update canton to 20240419.13141.v8e75f5c1

tell-slack: canton

* fix the compilation of community_ledger_ledger-json-api

---------

Co-authored-by: Azure Pipelines Daml Build <support@digitalasset.com>
Co-authored-by: Paul Brauner <paul.brauner@digitalasset.com>
This commit is contained in:
azure-pipelines[bot] 2024-04-23 13:43:11 +00:00 committed by GitHub
parent af57809e3d
commit 612c9bdd74
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
47 changed files with 6049 additions and 5954 deletions

View File

@ -720,6 +720,8 @@ scala_library(
"@maven//:org_apache_pekko_pekko_actor_2_13",
"@maven//:org_apache_pekko_pekko_http_2_13",
"@maven//:org_apache_pekko_pekko_http_core_2_13",
"@maven//:org_bouncycastle_bcpkix_jdk15on",
"@maven//:org_bouncycastle_bcprov_jdk15on",
"@maven//:org_scala_lang_scala_reflect",
"@maven//:org_scalaz_scalaz_core_2_13",
"@maven//:org_slf4j_slf4j_api",

View File

@ -23,6 +23,9 @@ service PackageService {
// Upload a DAR file and all packages inside to the participant node
rpc UploadDar(UploadDarRequest) returns (UploadDarResponse);
// Upload a DAR file and all packages inside to the participant node
rpc ValidateDar(ValidateDarRequest) returns (ValidateDarResponse);
// Remove a package that is not vetted
rpc RemovePackage(RemovePackageRequest) returns (RemovePackageResponse);
@ -139,6 +142,17 @@ message UploadDarResponse {
}
}
// Performs the same checks that UploadDarRequest would perform, but doesn't
// upload the DAR.
message ValidateDarRequest {
bytes data = 1;
string filename = 2;
}
message ValidateDarResponse {
string hash = 1;
}
message GetDarRequest {
string hash = 1;
}

View File

@ -151,6 +151,42 @@ object ParticipantAdminCommands {
}
final case class ValidateDar(
darPath: Option[String],
logger: TracedLogger,
) extends PackageCommand[ValidateDarRequest, ValidateDarResponse, String] {
override def createRequest(): Either[String, ValidateDarRequest] =
for {
pathValue <- darPath.toRight("DAR path not provided")
nonEmptyPathValue <- Either.cond(
pathValue.nonEmpty,
pathValue,
"Provided DAR path is empty",
)
filename = Paths.get(nonEmptyPathValue).getFileName.toString
darData <- BinaryFileUtil.readByteStringFromFile(nonEmptyPathValue)
} yield ValidateDarRequest(
darData,
filename,
)
override def submitRequest(
service: PackageServiceStub,
request: ValidateDarRequest,
): Future[ValidateDarResponse] =
service.validateDar(request)
override def handleResponse(response: ValidateDarResponse): Either[String, String] =
response match {
case ValidateDarResponse(hash) => Right(hash)
}
// file can be big. checking & vetting might take a while
override def timeoutType: TimeoutType = DefaultUnboundedTimeout
}
final case class RemovePackage(
packageId: String,
force: Boolean,

View File

@ -10,23 +10,611 @@ import com.digitalasset.canton.admin.api.client.commands.GrpcAdminCommand.{
TimeoutType,
}
import com.digitalasset.canton.admin.api.client.data.*
import com.digitalasset.canton.admin.api.client.data.topologyx.*
import com.digitalasset.canton.config.RequireTypes.PositiveInt
import com.digitalasset.canton.crypto.Fingerprint
import com.digitalasset.canton.topology.*
import com.digitalasset.canton.topology.admin.grpc.BaseQueryX
import com.digitalasset.canton.topology.admin.v30
import com.digitalasset.canton.topology.admin.v30.AuthorizeRequest.Type.{Proposal, TransactionHash}
import com.digitalasset.canton.topology.admin.v30.IdentityInitializationXServiceGrpc.IdentityInitializationXServiceStub
import com.digitalasset.canton.topology.admin.v30.TopologyAggregationServiceGrpc.TopologyAggregationServiceStub
import com.digitalasset.canton.topology.admin.v30.TopologyManagerReadServiceGrpc.TopologyManagerReadServiceStub
import com.digitalasset.canton.topology.admin.v30.TopologyManagerWriteServiceGrpc.TopologyManagerWriteServiceStub
import com.digitalasset.canton.topology.admin.v30.{
AddTransactionsRequest,
AddTransactionsResponse,
AuthorizeRequest,
AuthorizeResponse,
ImportTopologySnapshotRequest,
ImportTopologySnapshotResponse,
ListTrafficStateRequest,
SignTransactionsRequest,
SignTransactionsResponse,
}
import com.digitalasset.canton.topology.store.StoredTopologyTransactionsX
import com.digitalasset.canton.topology.store.StoredTopologyTransactionsX.GenericStoredTopologyTransactionsX
import com.digitalasset.canton.topology.transaction.SignedTopologyTransactionX.GenericSignedTopologyTransactionX
import com.digitalasset.canton.topology.transaction.{
SignedTopologyTransactionX,
TopologyChangeOpX,
TopologyMappingX,
}
import com.digitalasset.canton.version.ProtocolVersionValidation
import com.google.protobuf.ByteString
import com.google.protobuf.timestamp.Timestamp
import io.grpc.ManagedChannel
import java.time.Instant
import scala.concurrent.Future
import scala.reflect.ClassTag
// TODO(#15161): Move commands to other file, e.g. related to VaultAdministration
object TopologyAdminCommands {
object Read {
abstract class BaseCommand[Req, Res, Ret] extends GrpcAdminCommand[Req, Res, Ret] {
override type Svc = TopologyManagerReadServiceStub
override def createService(channel: ManagedChannel): TopologyManagerReadServiceStub =
v30.TopologyManagerReadServiceGrpc.stub(channel)
// command will potentially take a long time
override def timeoutType: TimeoutType = DefaultUnboundedTimeout
}
final case class ListTrafficControlState(
query: BaseQueryX,
filterMember: String,
) extends BaseCommand[
v30.ListTrafficStateRequest,
v30.ListTrafficStateResponse,
Seq[ListTrafficStateResult],
] {
override def createRequest(): Either[String, v30.ListTrafficStateRequest] =
Right(
new ListTrafficStateRequest(
baseQuery = Some(query.toProtoV1),
filterMember = filterMember,
)
)
override def submitRequest(
service: TopologyManagerReadServiceStub,
request: v30.ListTrafficStateRequest,
): Future[v30.ListTrafficStateResponse] =
service.listTrafficState(request)
override def handleResponse(
response: v30.ListTrafficStateResponse
): Either[String, Seq[ListTrafficStateResult]] =
response.results
.traverse(ListTrafficStateResult.fromProtoV30)
.leftMap(_.toString)
}
final case class ListNamespaceDelegation(
query: BaseQueryX,
filterNamespace: String,
filterTargetKey: Option[Fingerprint],
) extends BaseCommand[
v30.ListNamespaceDelegationRequest,
v30.ListNamespaceDelegationResponse,
Seq[ListNamespaceDelegationResult],
] {
override def createRequest(): Either[String, v30.ListNamespaceDelegationRequest] =
Right(
new v30.ListNamespaceDelegationRequest(
baseQuery = Some(query.toProtoV1),
filterNamespace = filterNamespace,
filterTargetKeyFingerprint = filterTargetKey.map(_.toProtoPrimitive).getOrElse(""),
)
)
override def submitRequest(
service: TopologyManagerReadServiceStub,
request: v30.ListNamespaceDelegationRequest,
): Future[v30.ListNamespaceDelegationResponse] =
service.listNamespaceDelegation(request)
override def handleResponse(
response: v30.ListNamespaceDelegationResponse
): Either[String, Seq[ListNamespaceDelegationResult]] =
response.results.traverse(ListNamespaceDelegationResult.fromProtoV30).leftMap(_.toString)
}
final case class ListDecentralizedNamespaceDefinition(
query: BaseQueryX,
filterNamespace: String,
) extends BaseCommand[
v30.ListDecentralizedNamespaceDefinitionRequest,
v30.ListDecentralizedNamespaceDefinitionResponse,
Seq[ListDecentralizedNamespaceDefinitionResult],
] {
override def createRequest()
: Either[String, v30.ListDecentralizedNamespaceDefinitionRequest] =
Right(
new v30.ListDecentralizedNamespaceDefinitionRequest(
baseQuery = Some(query.toProtoV1),
filterNamespace = filterNamespace,
)
)
override def submitRequest(
service: TopologyManagerReadServiceStub,
request: v30.ListDecentralizedNamespaceDefinitionRequest,
): Future[v30.ListDecentralizedNamespaceDefinitionResponse] =
service.listDecentralizedNamespaceDefinition(request)
override def handleResponse(
response: v30.ListDecentralizedNamespaceDefinitionResponse
): Either[String, Seq[ListDecentralizedNamespaceDefinitionResult]] =
response.results
.traverse(ListDecentralizedNamespaceDefinitionResult.fromProtoV30)
.leftMap(_.toString)
}
final case class ListIdentifierDelegation(
query: BaseQueryX,
filterUid: String,
filterTargetKey: Option[Fingerprint],
) extends BaseCommand[
v30.ListIdentifierDelegationRequest,
v30.ListIdentifierDelegationResponse,
Seq[ListIdentifierDelegationResult],
] {
override def createRequest(): Either[String, v30.ListIdentifierDelegationRequest] =
Right(
new v30.ListIdentifierDelegationRequest(
baseQuery = Some(query.toProtoV1),
filterUid = filterUid,
filterTargetKeyFingerprint = filterTargetKey.map(_.toProtoPrimitive).getOrElse(""),
)
)
override def submitRequest(
service: TopologyManagerReadServiceStub,
request: v30.ListIdentifierDelegationRequest,
): Future[v30.ListIdentifierDelegationResponse] =
service.listIdentifierDelegation(request)
override def handleResponse(
response: v30.ListIdentifierDelegationResponse
): Either[String, Seq[ListIdentifierDelegationResult]] =
response.results.traverse(ListIdentifierDelegationResult.fromProtoV30).leftMap(_.toString)
}
final case class ListOwnerToKeyMapping(
query: BaseQueryX,
filterKeyOwnerType: Option[MemberCode],
filterKeyOwnerUid: String,
) extends BaseCommand[v30.ListOwnerToKeyMappingRequest, v30.ListOwnerToKeyMappingResponse, Seq[
ListOwnerToKeyMappingResult
]] {
override def createRequest(): Either[String, v30.ListOwnerToKeyMappingRequest] =
Right(
new v30.ListOwnerToKeyMappingRequest(
baseQuery = Some(query.toProtoV1),
filterKeyOwnerType = filterKeyOwnerType.map(_.toProtoPrimitive).getOrElse(""),
filterKeyOwnerUid = filterKeyOwnerUid,
)
)
override def submitRequest(
service: TopologyManagerReadServiceStub,
request: v30.ListOwnerToKeyMappingRequest,
): Future[v30.ListOwnerToKeyMappingResponse] =
service.listOwnerToKeyMapping(request)
override def handleResponse(
response: v30.ListOwnerToKeyMappingResponse
): Either[String, Seq[ListOwnerToKeyMappingResult]] =
response.results.traverse(ListOwnerToKeyMappingResult.fromProtoV30).leftMap(_.toString)
}
final case class ListDomainTrustCertificate(
query: BaseQueryX,
filterUid: String,
) extends BaseCommand[
v30.ListDomainTrustCertificateRequest,
v30.ListDomainTrustCertificateResponse,
Seq[ListDomainTrustCertificateResult],
] {
override def createRequest(): Either[String, v30.ListDomainTrustCertificateRequest] =
Right(
new v30.ListDomainTrustCertificateRequest(
baseQuery = Some(query.toProtoV1),
filterUid = filterUid,
)
)
override def submitRequest(
service: TopologyManagerReadServiceStub,
request: v30.ListDomainTrustCertificateRequest,
): Future[v30.ListDomainTrustCertificateResponse] =
service.listDomainTrustCertificate(request)
override def handleResponse(
response: v30.ListDomainTrustCertificateResponse
): Either[String, Seq[ListDomainTrustCertificateResult]] =
response.results.traverse(ListDomainTrustCertificateResult.fromProtoV30).leftMap(_.toString)
}
final case class ListParticipantDomainPermission(
query: BaseQueryX,
filterUid: String,
) extends BaseCommand[
v30.ListParticipantDomainPermissionRequest,
v30.ListParticipantDomainPermissionResponse,
Seq[ListParticipantDomainPermissionResult],
] {
override def createRequest(): Either[String, v30.ListParticipantDomainPermissionRequest] =
Right(
new v30.ListParticipantDomainPermissionRequest(
baseQuery = Some(query.toProtoV1),
filterUid = filterUid,
)
)
override def submitRequest(
service: TopologyManagerReadServiceStub,
request: v30.ListParticipantDomainPermissionRequest,
): Future[v30.ListParticipantDomainPermissionResponse] =
service.listParticipantDomainPermission(request)
override def handleResponse(
response: v30.ListParticipantDomainPermissionResponse
): Either[String, Seq[ListParticipantDomainPermissionResult]] =
response.results
.traverse(ListParticipantDomainPermissionResult.fromProtoV30)
.leftMap(_.toString)
}
final case class ListPartyHostingLimits(
query: BaseQueryX,
filterUid: String,
) extends BaseCommand[
v30.ListPartyHostingLimitsRequest,
v30.ListPartyHostingLimitsResponse,
Seq[ListPartyHostingLimitsResult],
] {
override def createRequest(): Either[String, v30.ListPartyHostingLimitsRequest] =
Right(
new v30.ListPartyHostingLimitsRequest(
baseQuery = Some(query.toProtoV1),
filterUid = filterUid,
)
)
override def submitRequest(
service: TopologyManagerReadServiceStub,
request: v30.ListPartyHostingLimitsRequest,
): Future[v30.ListPartyHostingLimitsResponse] =
service.listPartyHostingLimits(request)
override def handleResponse(
response: v30.ListPartyHostingLimitsResponse
): Either[String, Seq[ListPartyHostingLimitsResult]] =
response.results
.traverse(ListPartyHostingLimitsResult.fromProtoV30)
.leftMap(_.toString)
}
final case class ListVettedPackages(
query: BaseQueryX,
filterParticipant: String,
) extends BaseCommand[
v30.ListVettedPackagesRequest,
v30.ListVettedPackagesResponse,
Seq[ListVettedPackagesResult],
] {
override def createRequest(): Either[String, v30.ListVettedPackagesRequest] =
Right(
new v30.ListVettedPackagesRequest(
baseQuery = Some(query.toProtoV1),
filterParticipant = filterParticipant,
)
)
override def submitRequest(
service: TopologyManagerReadServiceStub,
request: v30.ListVettedPackagesRequest,
): Future[v30.ListVettedPackagesResponse] =
service.listVettedPackages(request)
override def handleResponse(
response: v30.ListVettedPackagesResponse
): Either[String, Seq[ListVettedPackagesResult]] =
response.results
.traverse(ListVettedPackagesResult.fromProtoV30)
.leftMap(_.toString)
}
final case class ListPartyToParticipant(
query: BaseQueryX,
filterParty: String,
filterParticipant: String,
) extends BaseCommand[
v30.ListPartyToParticipantRequest,
v30.ListPartyToParticipantResponse,
Seq[ListPartyToParticipantResult],
] {
override def createRequest(): Either[String, v30.ListPartyToParticipantRequest] =
Right(
new v30.ListPartyToParticipantRequest(
baseQuery = Some(query.toProtoV1),
filterParty = filterParty,
filterParticipant = filterParticipant,
)
)
override def submitRequest(
service: TopologyManagerReadServiceStub,
request: v30.ListPartyToParticipantRequest,
): Future[v30.ListPartyToParticipantResponse] =
service.listPartyToParticipant(request)
override def handleResponse(
response: v30.ListPartyToParticipantResponse
): Either[String, Seq[ListPartyToParticipantResult]] =
response.results
.traverse(ListPartyToParticipantResult.fromProtoV30)
.leftMap(_.toString)
}
final case class ListAuthorityOf(
query: BaseQueryX,
filterParty: String,
) extends BaseCommand[
v30.ListAuthorityOfRequest,
v30.ListAuthorityOfResponse,
Seq[ListAuthorityOfResult],
] {
override def createRequest(): Either[String, v30.ListAuthorityOfRequest] =
Right(
new v30.ListAuthorityOfRequest(
baseQuery = Some(query.toProtoV1),
filterParty = filterParty,
)
)
override def submitRequest(
service: TopologyManagerReadServiceStub,
request: v30.ListAuthorityOfRequest,
): Future[v30.ListAuthorityOfResponse] =
service.listAuthorityOf(request)
override def handleResponse(
response: v30.ListAuthorityOfResponse
): Either[String, Seq[ListAuthorityOfResult]] =
response.results
.traverse(ListAuthorityOfResult.fromProtoV30)
.leftMap(_.toString)
}
final case class DomainParametersState(
query: BaseQueryX,
filterDomain: String,
) extends BaseCommand[
v30.ListDomainParametersStateRequest,
v30.ListDomainParametersStateResponse,
Seq[ListDomainParametersStateResult],
] {
override def createRequest(): Either[String, v30.ListDomainParametersStateRequest] =
Right(
new v30.ListDomainParametersStateRequest(
baseQuery = Some(query.toProtoV1),
filterDomain = filterDomain,
)
)
override def submitRequest(
service: TopologyManagerReadServiceStub,
request: v30.ListDomainParametersStateRequest,
): Future[v30.ListDomainParametersStateResponse] =
service.listDomainParametersState(request)
override def handleResponse(
response: v30.ListDomainParametersStateResponse
): Either[String, Seq[ListDomainParametersStateResult]] =
response.results
.traverse(ListDomainParametersStateResult.fromProtoV30)
.leftMap(_.toString)
}
final case class MediatorDomainState(
query: BaseQueryX,
filterDomain: String,
) extends BaseCommand[
v30.ListMediatorDomainStateRequest,
v30.ListMediatorDomainStateResponse,
Seq[ListMediatorDomainStateResult],
] {
override def createRequest(): Either[String, v30.ListMediatorDomainStateRequest] =
Right(
v30.ListMediatorDomainStateRequest(
baseQuery = Some(query.toProtoV1),
filterDomain = filterDomain,
)
)
override def submitRequest(
service: TopologyManagerReadServiceStub,
request: v30.ListMediatorDomainStateRequest,
): Future[v30.ListMediatorDomainStateResponse] =
service.listMediatorDomainState(request)
override def handleResponse(
response: v30.ListMediatorDomainStateResponse
): Either[String, Seq[ListMediatorDomainStateResult]] =
response.results
.traverse(ListMediatorDomainStateResult.fromProtoV30)
.leftMap(_.toString)
}
final case class SequencerDomainState(
query: BaseQueryX,
filterDomain: String,
) extends BaseCommand[
v30.ListSequencerDomainStateRequest,
v30.ListSequencerDomainStateResponse,
Seq[ListSequencerDomainStateResult],
] {
override def createRequest(): Either[String, v30.ListSequencerDomainStateRequest] =
Right(
new v30.ListSequencerDomainStateRequest(
baseQuery = Some(query.toProtoV1),
filterDomain = filterDomain,
)
)
override def submitRequest(
service: TopologyManagerReadServiceStub,
request: v30.ListSequencerDomainStateRequest,
): Future[v30.ListSequencerDomainStateResponse] =
service.listSequencerDomainState(request)
override def handleResponse(
response: v30.ListSequencerDomainStateResponse
): Either[String, Seq[ListSequencerDomainStateResult]] =
response.results
.traverse(ListSequencerDomainStateResult.fromProtoV30)
.leftMap(_.toString)
}
final case class PurgeTopologyTransaction(
query: BaseQueryX,
filterDomain: String,
) extends BaseCommand[
v30.ListPurgeTopologyTransactionRequest,
v30.ListPurgeTopologyTransactionResponse,
Seq[ListPurgeTopologyTransactionResult],
] {
override def createRequest(): Either[String, v30.ListPurgeTopologyTransactionRequest] =
Right(
new v30.ListPurgeTopologyTransactionRequest(
baseQuery = Some(query.toProtoV1),
filterDomain = filterDomain,
)
)
override def submitRequest(
service: TopologyManagerReadServiceStub,
request: v30.ListPurgeTopologyTransactionRequest,
): Future[v30.ListPurgeTopologyTransactionResponse] =
service.listPurgeTopologyTransaction(request)
override def handleResponse(
response: v30.ListPurgeTopologyTransactionResponse
): Either[String, Seq[ListPurgeTopologyTransactionResult]] =
response.results
.traverse(ListPurgeTopologyTransactionResult.fromProtoV30)
.leftMap(_.toString)
}
final case class ListStores()
extends BaseCommand[v30.ListAvailableStoresRequest, v30.ListAvailableStoresResponse, Seq[
String
]] {
override def createRequest(): Either[String, v30.ListAvailableStoresRequest] =
Right(v30.ListAvailableStoresRequest())
override def submitRequest(
service: TopologyManagerReadServiceStub,
request: v30.ListAvailableStoresRequest,
): Future[v30.ListAvailableStoresResponse] =
service.listAvailableStores(request)
override def handleResponse(
response: v30.ListAvailableStoresResponse
): Either[String, Seq[String]] =
Right(response.storeIds)
}
final case class ListAll(
query: BaseQueryX,
excludeMappings: Seq[String],
filterNamespace: String,
) extends BaseCommand[
v30.ListAllRequest,
v30.ListAllResponse,
GenericStoredTopologyTransactionsX,
] {
override def createRequest(): Either[String, v30.ListAllRequest] =
Right(
new v30.ListAllRequest(
baseQuery = Some(query.toProtoV1),
excludeMappings = excludeMappings,
filterNamespace = filterNamespace,
)
)
override def submitRequest(
service: TopologyManagerReadServiceStub,
request: v30.ListAllRequest,
): Future[v30.ListAllResponse] = service.listAll(request)
override def handleResponse(
response: v30.ListAllResponse
): Either[String, GenericStoredTopologyTransactionsX] =
response.result
.fold[Either[String, GenericStoredTopologyTransactionsX]](
Right(StoredTopologyTransactionsX.empty)
) { collection =>
StoredTopologyTransactionsX.fromProtoV30(collection).leftMap(_.toString)
}
}
final case class ExportTopologySnapshot(
query: BaseQueryX,
excludeMappings: Seq[String],
filterNamespace: String,
) extends BaseCommand[
v30.ExportTopologySnapshotRequest,
v30.ExportTopologySnapshotResponse,
ByteString,
] {
override def createRequest(): Either[String, v30.ExportTopologySnapshotRequest] =
Right(
new v30.ExportTopologySnapshotRequest(
baseQuery = Some(query.toProtoV1),
excludeMappings = excludeMappings,
filterNamespace = filterNamespace,
)
)
override def submitRequest(
service: TopologyManagerReadServiceStub,
request: v30.ExportTopologySnapshotRequest,
): Future[v30.ExportTopologySnapshotResponse] = service.exportTopologySnapshot(request)
override def handleResponse(
response: v30.ExportTopologySnapshotResponse
): Either[String, ByteString] =
Right(response.result)
}
}
object Aggregation {
abstract class BaseCommand[Req, Res, Result] extends GrpcAdminCommand[Req, Res, Result] {
override type Svc = TopologyAggregationServiceStub
override def createService(channel: ManagedChannel): TopologyAggregationServiceStub =
v30.TopologyAggregationServiceGrpc.stub(channel)
}
@ -105,4 +693,239 @@ object TopologyAdminCommands {
}
}
object Write {
abstract class BaseWriteCommand[Req, Res, Ret] extends GrpcAdminCommand[Req, Res, Ret] {
override type Svc = TopologyManagerWriteServiceStub
override def createService(channel: ManagedChannel): TopologyManagerWriteServiceStub =
v30.TopologyManagerWriteServiceGrpc.stub(channel)
// command will potentially take a long time
override def timeoutType: TimeoutType = DefaultUnboundedTimeout
}
final case class AddTransactions(
transactions: Seq[GenericSignedTopologyTransactionX],
store: String,
) extends BaseWriteCommand[AddTransactionsRequest, AddTransactionsResponse, Unit] {
override def createRequest(): Either[String, AddTransactionsRequest] = {
Right(AddTransactionsRequest(transactions.map(_.toProtoV30), forceChange = false, store))
}
override def submitRequest(
service: TopologyManagerWriteServiceStub,
request: AddTransactionsRequest,
): Future[AddTransactionsResponse] = service.addTransactions(request)
override def handleResponse(response: AddTransactionsResponse): Either[String, Unit] =
Right(())
}
final case class ImportTopologySnapshot(
topologySnapshot: ByteString,
store: String,
) extends BaseWriteCommand[
ImportTopologySnapshotRequest,
ImportTopologySnapshotResponse,
Unit,
] {
override def createRequest(): Either[String, ImportTopologySnapshotRequest] = {
Right(
ImportTopologySnapshotRequest(
topologySnapshot,
forceChange = false,
store,
)
)
}
override def submitRequest(
service: TopologyManagerWriteServiceStub,
request: ImportTopologySnapshotRequest,
): Future[ImportTopologySnapshotResponse] = service.importTopologySnapshot(request)
override def handleResponse(
response: ImportTopologySnapshotResponse
): Either[String, Unit] = Right(())
}
final case class SignTransactions(
transactions: Seq[GenericSignedTopologyTransactionX],
signedBy: Seq[Fingerprint],
) extends BaseWriteCommand[SignTransactionsRequest, SignTransactionsResponse, Seq[
GenericSignedTopologyTransactionX
]] {
override def createRequest(): Either[String, SignTransactionsRequest] = {
Right(
SignTransactionsRequest(transactions.map(_.toProtoV30), signedBy.map(_.toProtoPrimitive))
)
}
override def submitRequest(
service: TopologyManagerWriteServiceStub,
request: SignTransactionsRequest,
): Future[SignTransactionsResponse] = service.signTransactions(request)
override def handleResponse(
response: SignTransactionsResponse
): Either[String, Seq[GenericSignedTopologyTransactionX]] =
response.transactions
.traverse(tx =>
SignedTopologyTransactionX.fromProtoV30(ProtocolVersionValidation.NoValidation, tx)
)
.leftMap(_.message)
}
final case class Propose[M <: TopologyMappingX: ClassTag](
mapping: Either[String, M],
signedBy: Seq[Fingerprint],
change: TopologyChangeOpX,
serial: Option[PositiveInt],
mustFullyAuthorize: Boolean,
forceChange: Boolean,
store: String,
) extends BaseWriteCommand[
AuthorizeRequest,
AuthorizeResponse,
SignedTopologyTransactionX[TopologyChangeOpX, M],
] {
override def createRequest(): Either[String, AuthorizeRequest] = mapping.map(m =>
AuthorizeRequest(
Proposal(
AuthorizeRequest.Proposal(
change.toProto,
serial.map(_.value).getOrElse(0),
Some(m.toProtoV30),
)
),
mustFullyAuthorize = mustFullyAuthorize,
forceChange = forceChange,
signedBy = signedBy.map(_.toProtoPrimitive),
store,
)
)
override def submitRequest(
service: TopologyManagerWriteServiceStub,
request: AuthorizeRequest,
): Future[AuthorizeResponse] = service.authorize(request)
override def handleResponse(
response: AuthorizeResponse
): Either[String, SignedTopologyTransactionX[TopologyChangeOpX, M]] = response.transaction
.toRight("no transaction in response")
.flatMap(
SignedTopologyTransactionX
.fromProtoV30(ProtocolVersionValidation.NoValidation, _)
.leftMap(_.message)
.flatMap(tx =>
tx.selectMapping[M]
.toRight(
s"Expected mapping ${ClassTag[M].getClass.getSimpleName}, but received: ${tx.mapping.getClass.getSimpleName}"
)
)
)
}
object Propose {
def apply[M <: TopologyMappingX: ClassTag](
mapping: M,
signedBy: Seq[Fingerprint],
store: String,
serial: Option[PositiveInt] = None,
change: TopologyChangeOpX = TopologyChangeOpX.Replace,
mustFullyAuthorize: Boolean = false,
forceChange: Boolean = false,
): Propose[M] =
Propose(Right(mapping), signedBy, change, serial, mustFullyAuthorize, forceChange, store)
}
final case class Authorize[M <: TopologyMappingX: ClassTag](
transactionHash: String,
mustFullyAuthorize: Boolean,
signedBy: Seq[Fingerprint],
store: String,
) extends BaseWriteCommand[
AuthorizeRequest,
AuthorizeResponse,
SignedTopologyTransactionX[TopologyChangeOpX, M],
] {
override def createRequest(): Either[String, AuthorizeRequest] = Right(
AuthorizeRequest(
TransactionHash(transactionHash),
mustFullyAuthorize = mustFullyAuthorize,
forceChange = false,
signedBy = signedBy.map(_.toProtoPrimitive),
store = store,
)
)
override def submitRequest(
service: TopologyManagerWriteServiceStub,
request: AuthorizeRequest,
): Future[AuthorizeResponse] = service.authorize(request)
override def handleResponse(
response: AuthorizeResponse
): Either[String, SignedTopologyTransactionX[TopologyChangeOpX, M]] = response.transaction
.toRight("no transaction in response")
.flatMap(
SignedTopologyTransactionX
.fromProtoV30(ProtocolVersionValidation.NoValidation, _)
.leftMap(_.message)
.flatMap(tx =>
tx.selectMapping[M]
.toRight(
s"Expected mapping ${ClassTag[M].getClass.getSimpleName}, but received: ${tx.mapping.getClass.getSimpleName}"
)
)
)
}
}
object Init {
abstract class BaseInitializationService[Req, Resp, Res]
extends GrpcAdminCommand[Req, Resp, Res] {
override type Svc = IdentityInitializationXServiceStub
override def createService(channel: ManagedChannel): IdentityInitializationXServiceStub =
v30.IdentityInitializationXServiceGrpc.stub(channel)
}
final case class InitId(identifier: String)
extends BaseInitializationService[v30.InitIdRequest, v30.InitIdResponse, Unit] {
override def createRequest(): Either[String, v30.InitIdRequest] =
Right(v30.InitIdRequest(identifier))
override def submitRequest(
service: IdentityInitializationXServiceStub,
request: v30.InitIdRequest,
): Future[v30.InitIdResponse] =
service.initId(request)
override def handleResponse(response: v30.InitIdResponse): Either[String, Unit] =
Right(())
}
final case class GetId()
extends BaseInitializationService[v30.GetIdRequest, v30.GetIdResponse, UniqueIdentifier] {
override def createRequest(): Either[String, v30.GetIdRequest] =
Right(v30.GetIdRequest())
override def submitRequest(
service: IdentityInitializationXServiceStub,
request: v30.GetIdRequest,
): Future[v30.GetIdResponse] =
service.getId(request)
override def handleResponse(
response: v30.GetIdResponse
): Either[String, UniqueIdentifier] = {
if (response.uniqueIdentifier.nonEmpty)
UniqueIdentifier.fromProtoPrimitive_(response.uniqueIdentifier)
else
Left(
s"Node is not initialized and therefore does not have an Id assigned yet."
)
}
}
}
}

View File

@ -1,843 +0,0 @@
// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package com.digitalasset.canton.admin.api.client.commands
import cats.syntax.either.*
import cats.syntax.traverse.*
import com.digitalasset.canton.admin.api.client.commands.GrpcAdminCommand.{
DefaultUnboundedTimeout,
TimeoutType,
}
import com.digitalasset.canton.admin.api.client.data.topologyx.*
import com.digitalasset.canton.config.RequireTypes.PositiveInt
import com.digitalasset.canton.crypto.Fingerprint
import com.digitalasset.canton.topology.*
import com.digitalasset.canton.topology.admin.grpc.BaseQueryX
import com.digitalasset.canton.topology.admin.v30
import com.digitalasset.canton.topology.admin.v30.AuthorizeRequest.Type.{Proposal, TransactionHash}
import com.digitalasset.canton.topology.admin.v30.IdentityInitializationXServiceGrpc.IdentityInitializationXServiceStub
import com.digitalasset.canton.topology.admin.v30.TopologyManagerReadServiceGrpc.TopologyManagerReadServiceStub
import com.digitalasset.canton.topology.admin.v30.TopologyManagerWriteServiceGrpc.TopologyManagerWriteServiceStub
import com.digitalasset.canton.topology.admin.v30.{
AddTransactionsRequest,
AddTransactionsResponse,
AuthorizeRequest,
AuthorizeResponse,
ImportTopologySnapshotRequest,
ImportTopologySnapshotResponse,
ListTrafficStateRequest,
SignTransactionsRequest,
SignTransactionsResponse,
}
import com.digitalasset.canton.topology.store.StoredTopologyTransactionsX
import com.digitalasset.canton.topology.store.StoredTopologyTransactionsX.GenericStoredTopologyTransactionsX
import com.digitalasset.canton.topology.transaction.SignedTopologyTransactionX.GenericSignedTopologyTransactionX
import com.digitalasset.canton.topology.transaction.{
SignedTopologyTransactionX,
TopologyChangeOpX,
TopologyMappingX,
}
import com.digitalasset.canton.version.ProtocolVersionValidation
import com.google.protobuf.ByteString
import io.grpc.ManagedChannel
import scala.concurrent.Future
import scala.reflect.ClassTag
object TopologyAdminCommandsX {
object Read {
abstract class BaseCommand[Req, Res, Ret] extends GrpcAdminCommand[Req, Res, Ret] {
override type Svc = TopologyManagerReadServiceStub
override def createService(channel: ManagedChannel): TopologyManagerReadServiceStub =
v30.TopologyManagerReadServiceGrpc.stub(channel)
// command will potentially take a long time
override def timeoutType: TimeoutType = DefaultUnboundedTimeout
}
final case class ListTrafficControlState(
query: BaseQueryX,
filterMember: String,
) extends BaseCommand[
v30.ListTrafficStateRequest,
v30.ListTrafficStateResponse,
Seq[ListTrafficStateResult],
] {
override def createRequest(): Either[String, v30.ListTrafficStateRequest] =
Right(
new ListTrafficStateRequest(
baseQuery = Some(query.toProtoV1),
filterMember = filterMember,
)
)
override def submitRequest(
service: TopologyManagerReadServiceStub,
request: v30.ListTrafficStateRequest,
): Future[v30.ListTrafficStateResponse] =
service.listTrafficState(request)
override def handleResponse(
response: v30.ListTrafficStateResponse
): Either[String, Seq[ListTrafficStateResult]] =
response.results
.traverse(ListTrafficStateResult.fromProtoV30)
.leftMap(_.toString)
}
final case class ListNamespaceDelegation(
query: BaseQueryX,
filterNamespace: String,
filterTargetKey: Option[Fingerprint],
) extends BaseCommand[
v30.ListNamespaceDelegationRequest,
v30.ListNamespaceDelegationResponse,
Seq[ListNamespaceDelegationResult],
] {
override def createRequest(): Either[String, v30.ListNamespaceDelegationRequest] =
Right(
new v30.ListNamespaceDelegationRequest(
baseQuery = Some(query.toProtoV1),
filterNamespace = filterNamespace,
filterTargetKeyFingerprint = filterTargetKey.map(_.toProtoPrimitive).getOrElse(""),
)
)
override def submitRequest(
service: TopologyManagerReadServiceStub,
request: v30.ListNamespaceDelegationRequest,
): Future[v30.ListNamespaceDelegationResponse] =
service.listNamespaceDelegation(request)
override def handleResponse(
response: v30.ListNamespaceDelegationResponse
): Either[String, Seq[ListNamespaceDelegationResult]] =
response.results.traverse(ListNamespaceDelegationResult.fromProtoV30).leftMap(_.toString)
}
final case class ListDecentralizedNamespaceDefinition(
query: BaseQueryX,
filterNamespace: String,
) extends BaseCommand[
v30.ListDecentralizedNamespaceDefinitionRequest,
v30.ListDecentralizedNamespaceDefinitionResponse,
Seq[ListDecentralizedNamespaceDefinitionResult],
] {
override def createRequest()
: Either[String, v30.ListDecentralizedNamespaceDefinitionRequest] =
Right(
new v30.ListDecentralizedNamespaceDefinitionRequest(
baseQuery = Some(query.toProtoV1),
filterNamespace = filterNamespace,
)
)
override def submitRequest(
service: TopologyManagerReadServiceStub,
request: v30.ListDecentralizedNamespaceDefinitionRequest,
): Future[v30.ListDecentralizedNamespaceDefinitionResponse] =
service.listDecentralizedNamespaceDefinition(request)
override def handleResponse(
response: v30.ListDecentralizedNamespaceDefinitionResponse
): Either[String, Seq[ListDecentralizedNamespaceDefinitionResult]] =
response.results
.traverse(ListDecentralizedNamespaceDefinitionResult.fromProtoV30)
.leftMap(_.toString)
}
final case class ListIdentifierDelegation(
query: BaseQueryX,
filterUid: String,
filterTargetKey: Option[Fingerprint],
) extends BaseCommand[
v30.ListIdentifierDelegationRequest,
v30.ListIdentifierDelegationResponse,
Seq[ListIdentifierDelegationResult],
] {
override def createRequest(): Either[String, v30.ListIdentifierDelegationRequest] =
Right(
new v30.ListIdentifierDelegationRequest(
baseQuery = Some(query.toProtoV1),
filterUid = filterUid,
filterTargetKeyFingerprint = filterTargetKey.map(_.toProtoPrimitive).getOrElse(""),
)
)
override def submitRequest(
service: TopologyManagerReadServiceStub,
request: v30.ListIdentifierDelegationRequest,
): Future[v30.ListIdentifierDelegationResponse] =
service.listIdentifierDelegation(request)
override def handleResponse(
response: v30.ListIdentifierDelegationResponse
): Either[String, Seq[ListIdentifierDelegationResult]] =
response.results.traverse(ListIdentifierDelegationResult.fromProtoV30).leftMap(_.toString)
}
final case class ListOwnerToKeyMapping(
query: BaseQueryX,
filterKeyOwnerType: Option[MemberCode],
filterKeyOwnerUid: String,
) extends BaseCommand[v30.ListOwnerToKeyMappingRequest, v30.ListOwnerToKeyMappingResponse, Seq[
ListOwnerToKeyMappingResult
]] {
override def createRequest(): Either[String, v30.ListOwnerToKeyMappingRequest] =
Right(
new v30.ListOwnerToKeyMappingRequest(
baseQuery = Some(query.toProtoV1),
filterKeyOwnerType = filterKeyOwnerType.map(_.toProtoPrimitive).getOrElse(""),
filterKeyOwnerUid = filterKeyOwnerUid,
)
)
override def submitRequest(
service: TopologyManagerReadServiceStub,
request: v30.ListOwnerToKeyMappingRequest,
): Future[v30.ListOwnerToKeyMappingResponse] =
service.listOwnerToKeyMapping(request)
override def handleResponse(
response: v30.ListOwnerToKeyMappingResponse
): Either[String, Seq[ListOwnerToKeyMappingResult]] =
response.results.traverse(ListOwnerToKeyMappingResult.fromProtoV30).leftMap(_.toString)
}
final case class ListDomainTrustCertificate(
query: BaseQueryX,
filterUid: String,
) extends BaseCommand[
v30.ListDomainTrustCertificateRequest,
v30.ListDomainTrustCertificateResponse,
Seq[ListDomainTrustCertificateResult],
] {
override def createRequest(): Either[String, v30.ListDomainTrustCertificateRequest] =
Right(
new v30.ListDomainTrustCertificateRequest(
baseQuery = Some(query.toProtoV1),
filterUid = filterUid,
)
)
override def submitRequest(
service: TopologyManagerReadServiceStub,
request: v30.ListDomainTrustCertificateRequest,
): Future[v30.ListDomainTrustCertificateResponse] =
service.listDomainTrustCertificate(request)
override def handleResponse(
response: v30.ListDomainTrustCertificateResponse
): Either[String, Seq[ListDomainTrustCertificateResult]] =
response.results.traverse(ListDomainTrustCertificateResult.fromProtoV30).leftMap(_.toString)
}
final case class ListParticipantDomainPermission(
query: BaseQueryX,
filterUid: String,
) extends BaseCommand[
v30.ListParticipantDomainPermissionRequest,
v30.ListParticipantDomainPermissionResponse,
Seq[ListParticipantDomainPermissionResult],
] {
override def createRequest(): Either[String, v30.ListParticipantDomainPermissionRequest] =
Right(
new v30.ListParticipantDomainPermissionRequest(
baseQuery = Some(query.toProtoV1),
filterUid = filterUid,
)
)
override def submitRequest(
service: TopologyManagerReadServiceStub,
request: v30.ListParticipantDomainPermissionRequest,
): Future[v30.ListParticipantDomainPermissionResponse] =
service.listParticipantDomainPermission(request)
override def handleResponse(
response: v30.ListParticipantDomainPermissionResponse
): Either[String, Seq[ListParticipantDomainPermissionResult]] =
response.results
.traverse(ListParticipantDomainPermissionResult.fromProtoV30)
.leftMap(_.toString)
}
final case class ListPartyHostingLimits(
query: BaseQueryX,
filterUid: String,
) extends BaseCommand[
v30.ListPartyHostingLimitsRequest,
v30.ListPartyHostingLimitsResponse,
Seq[ListPartyHostingLimitsResult],
] {
override def createRequest(): Either[String, v30.ListPartyHostingLimitsRequest] =
Right(
new v30.ListPartyHostingLimitsRequest(
baseQuery = Some(query.toProtoV1),
filterUid = filterUid,
)
)
override def submitRequest(
service: TopologyManagerReadServiceStub,
request: v30.ListPartyHostingLimitsRequest,
): Future[v30.ListPartyHostingLimitsResponse] =
service.listPartyHostingLimits(request)
override def handleResponse(
response: v30.ListPartyHostingLimitsResponse
): Either[String, Seq[ListPartyHostingLimitsResult]] =
response.results
.traverse(ListPartyHostingLimitsResult.fromProtoV30)
.leftMap(_.toString)
}
final case class ListVettedPackages(
query: BaseQueryX,
filterParticipant: String,
) extends BaseCommand[
v30.ListVettedPackagesRequest,
v30.ListVettedPackagesResponse,
Seq[ListVettedPackagesResult],
] {
override def createRequest(): Either[String, v30.ListVettedPackagesRequest] =
Right(
new v30.ListVettedPackagesRequest(
baseQuery = Some(query.toProtoV1),
filterParticipant = filterParticipant,
)
)
override def submitRequest(
service: TopologyManagerReadServiceStub,
request: v30.ListVettedPackagesRequest,
): Future[v30.ListVettedPackagesResponse] =
service.listVettedPackages(request)
override def handleResponse(
response: v30.ListVettedPackagesResponse
): Either[String, Seq[ListVettedPackagesResult]] =
response.results
.traverse(ListVettedPackagesResult.fromProtoV30)
.leftMap(_.toString)
}
final case class ListPartyToParticipant(
query: BaseQueryX,
filterParty: String,
filterParticipant: String,
) extends BaseCommand[
v30.ListPartyToParticipantRequest,
v30.ListPartyToParticipantResponse,
Seq[ListPartyToParticipantResult],
] {
override def createRequest(): Either[String, v30.ListPartyToParticipantRequest] =
Right(
new v30.ListPartyToParticipantRequest(
baseQuery = Some(query.toProtoV1),
filterParty = filterParty,
filterParticipant = filterParticipant,
)
)
override def submitRequest(
service: TopologyManagerReadServiceStub,
request: v30.ListPartyToParticipantRequest,
): Future[v30.ListPartyToParticipantResponse] =
service.listPartyToParticipant(request)
override def handleResponse(
response: v30.ListPartyToParticipantResponse
): Either[String, Seq[ListPartyToParticipantResult]] =
response.results
.traverse(ListPartyToParticipantResult.fromProtoV30)
.leftMap(_.toString)
}
final case class ListAuthorityOf(
query: BaseQueryX,
filterParty: String,
) extends BaseCommand[
v30.ListAuthorityOfRequest,
v30.ListAuthorityOfResponse,
Seq[ListAuthorityOfResult],
] {
override def createRequest(): Either[String, v30.ListAuthorityOfRequest] =
Right(
new v30.ListAuthorityOfRequest(
baseQuery = Some(query.toProtoV1),
filterParty = filterParty,
)
)
override def submitRequest(
service: TopologyManagerReadServiceStub,
request: v30.ListAuthorityOfRequest,
): Future[v30.ListAuthorityOfResponse] =
service.listAuthorityOf(request)
override def handleResponse(
response: v30.ListAuthorityOfResponse
): Either[String, Seq[ListAuthorityOfResult]] =
response.results
.traverse(ListAuthorityOfResult.fromProtoV30)
.leftMap(_.toString)
}
final case class DomainParametersState(
query: BaseQueryX,
filterDomain: String,
) extends BaseCommand[
v30.ListDomainParametersStateRequest,
v30.ListDomainParametersStateResponse,
Seq[ListDomainParametersStateResult],
] {
override def createRequest(): Either[String, v30.ListDomainParametersStateRequest] =
Right(
new v30.ListDomainParametersStateRequest(
baseQuery = Some(query.toProtoV1),
filterDomain = filterDomain,
)
)
override def submitRequest(
service: TopologyManagerReadServiceStub,
request: v30.ListDomainParametersStateRequest,
): Future[v30.ListDomainParametersStateResponse] =
service.listDomainParametersState(request)
override def handleResponse(
response: v30.ListDomainParametersStateResponse
): Either[String, Seq[ListDomainParametersStateResult]] =
response.results
.traverse(ListDomainParametersStateResult.fromProtoV30)
.leftMap(_.toString)
}
final case class MediatorDomainState(
query: BaseQueryX,
filterDomain: String,
) extends BaseCommand[
v30.ListMediatorDomainStateRequest,
v30.ListMediatorDomainStateResponse,
Seq[ListMediatorDomainStateResult],
] {
override def createRequest(): Either[String, v30.ListMediatorDomainStateRequest] =
Right(
v30.ListMediatorDomainStateRequest(
baseQuery = Some(query.toProtoV1),
filterDomain = filterDomain,
)
)
override def submitRequest(
service: TopologyManagerReadServiceStub,
request: v30.ListMediatorDomainStateRequest,
): Future[v30.ListMediatorDomainStateResponse] =
service.listMediatorDomainState(request)
override def handleResponse(
response: v30.ListMediatorDomainStateResponse
): Either[String, Seq[ListMediatorDomainStateResult]] =
response.results
.traverse(ListMediatorDomainStateResult.fromProtoV30)
.leftMap(_.toString)
}
final case class SequencerDomainState(
query: BaseQueryX,
filterDomain: String,
) extends BaseCommand[
v30.ListSequencerDomainStateRequest,
v30.ListSequencerDomainStateResponse,
Seq[ListSequencerDomainStateResult],
] {
override def createRequest(): Either[String, v30.ListSequencerDomainStateRequest] =
Right(
new v30.ListSequencerDomainStateRequest(
baseQuery = Some(query.toProtoV1),
filterDomain = filterDomain,
)
)
override def submitRequest(
service: TopologyManagerReadServiceStub,
request: v30.ListSequencerDomainStateRequest,
): Future[v30.ListSequencerDomainStateResponse] =
service.listSequencerDomainState(request)
override def handleResponse(
response: v30.ListSequencerDomainStateResponse
): Either[String, Seq[ListSequencerDomainStateResult]] =
response.results
.traverse(ListSequencerDomainStateResult.fromProtoV30)
.leftMap(_.toString)
}
final case class PurgeTopologyTransaction(
query: BaseQueryX,
filterDomain: String,
) extends BaseCommand[
v30.ListPurgeTopologyTransactionRequest,
v30.ListPurgeTopologyTransactionResponse,
Seq[ListPurgeTopologyTransactionResult],
] {
override def createRequest(): Either[String, v30.ListPurgeTopologyTransactionRequest] =
Right(
new v30.ListPurgeTopologyTransactionRequest(
baseQuery = Some(query.toProtoV1),
filterDomain = filterDomain,
)
)
override def submitRequest(
service: TopologyManagerReadServiceStub,
request: v30.ListPurgeTopologyTransactionRequest,
): Future[v30.ListPurgeTopologyTransactionResponse] =
service.listPurgeTopologyTransaction(request)
override def handleResponse(
response: v30.ListPurgeTopologyTransactionResponse
): Either[String, Seq[ListPurgeTopologyTransactionResult]] =
response.results
.traverse(ListPurgeTopologyTransactionResult.fromProtoV30)
.leftMap(_.toString)
}
final case class ListStores()
extends BaseCommand[v30.ListAvailableStoresRequest, v30.ListAvailableStoresResponse, Seq[
String
]] {
override def createRequest(): Either[String, v30.ListAvailableStoresRequest] =
Right(v30.ListAvailableStoresRequest())
override def submitRequest(
service: TopologyManagerReadServiceStub,
request: v30.ListAvailableStoresRequest,
): Future[v30.ListAvailableStoresResponse] =
service.listAvailableStores(request)
override def handleResponse(
response: v30.ListAvailableStoresResponse
): Either[String, Seq[String]] =
Right(response.storeIds)
}
final case class ListAll(
query: BaseQueryX,
excludeMappings: Seq[String],
filterNamespace: String,
) extends BaseCommand[
v30.ListAllRequest,
v30.ListAllResponse,
GenericStoredTopologyTransactionsX,
] {
override def createRequest(): Either[String, v30.ListAllRequest] =
Right(
new v30.ListAllRequest(
baseQuery = Some(query.toProtoV1),
excludeMappings = excludeMappings,
filterNamespace = filterNamespace,
)
)
override def submitRequest(
service: TopologyManagerReadServiceStub,
request: v30.ListAllRequest,
): Future[v30.ListAllResponse] = service.listAll(request)
override def handleResponse(
response: v30.ListAllResponse
): Either[String, GenericStoredTopologyTransactionsX] =
response.result
.fold[Either[String, GenericStoredTopologyTransactionsX]](
Right(StoredTopologyTransactionsX.empty)
) { collection =>
StoredTopologyTransactionsX.fromProtoV30(collection).leftMap(_.toString)
}
}
final case class ExportTopologySnapshot(
query: BaseQueryX,
excludeMappings: Seq[String],
filterNamespace: String,
) extends BaseCommand[
v30.ExportTopologySnapshotRequest,
v30.ExportTopologySnapshotResponse,
ByteString,
] {
override def createRequest(): Either[String, v30.ExportTopologySnapshotRequest] =
Right(
new v30.ExportTopologySnapshotRequest(
baseQuery = Some(query.toProtoV1),
excludeMappings = excludeMappings,
filterNamespace = filterNamespace,
)
)
override def submitRequest(
service: TopologyManagerReadServiceStub,
request: v30.ExportTopologySnapshotRequest,
): Future[v30.ExportTopologySnapshotResponse] = service.exportTopologySnapshot(request)
override def handleResponse(
response: v30.ExportTopologySnapshotResponse
): Either[String, ByteString] =
Right(response.result)
}
}
object Write {
abstract class BaseWriteCommand[Req, Res, Ret] extends GrpcAdminCommand[Req, Res, Ret] {
override type Svc = TopologyManagerWriteServiceStub
override def createService(channel: ManagedChannel): TopologyManagerWriteServiceStub =
v30.TopologyManagerWriteServiceGrpc.stub(channel)
// command will potentially take a long time
override def timeoutType: TimeoutType = DefaultUnboundedTimeout
}
final case class AddTransactions(
transactions: Seq[GenericSignedTopologyTransactionX],
store: String,
) extends BaseWriteCommand[AddTransactionsRequest, AddTransactionsResponse, Unit] {
override def createRequest(): Either[String, AddTransactionsRequest] = {
Right(AddTransactionsRequest(transactions.map(_.toProtoV30), forceChange = false, store))
}
override def submitRequest(
service: TopologyManagerWriteServiceStub,
request: AddTransactionsRequest,
): Future[AddTransactionsResponse] = service.addTransactions(request)
override def handleResponse(response: AddTransactionsResponse): Either[String, Unit] =
Right(())
}
final case class ImportTopologySnapshot(
topologySnapshot: ByteString,
store: String,
) extends BaseWriteCommand[
ImportTopologySnapshotRequest,
ImportTopologySnapshotResponse,
Unit,
] {
override def createRequest(): Either[String, ImportTopologySnapshotRequest] = {
Right(
ImportTopologySnapshotRequest(
topologySnapshot,
forceChange = false,
store,
)
)
}
override def submitRequest(
service: TopologyManagerWriteServiceStub,
request: ImportTopologySnapshotRequest,
): Future[ImportTopologySnapshotResponse] = service.importTopologySnapshot(request)
override def handleResponse(
response: ImportTopologySnapshotResponse
): Either[String, Unit] = Right(())
}
final case class SignTransactions(
transactions: Seq[GenericSignedTopologyTransactionX],
signedBy: Seq[Fingerprint],
) extends BaseWriteCommand[SignTransactionsRequest, SignTransactionsResponse, Seq[
GenericSignedTopologyTransactionX
]] {
override def createRequest(): Either[String, SignTransactionsRequest] = {
Right(
SignTransactionsRequest(transactions.map(_.toProtoV30), signedBy.map(_.toProtoPrimitive))
)
}
override def submitRequest(
service: TopologyManagerWriteServiceStub,
request: SignTransactionsRequest,
): Future[SignTransactionsResponse] = service.signTransactions(request)
override def handleResponse(
response: SignTransactionsResponse
): Either[String, Seq[GenericSignedTopologyTransactionX]] =
response.transactions
.traverse(tx =>
SignedTopologyTransactionX.fromProtoV30(ProtocolVersionValidation.NoValidation, tx)
)
.leftMap(_.message)
}
final case class Propose[M <: TopologyMappingX: ClassTag](
mapping: Either[String, M],
signedBy: Seq[Fingerprint],
change: TopologyChangeOpX,
serial: Option[PositiveInt],
mustFullyAuthorize: Boolean,
forceChange: Boolean,
store: String,
) extends BaseWriteCommand[
AuthorizeRequest,
AuthorizeResponse,
SignedTopologyTransactionX[TopologyChangeOpX, M],
] {
override def createRequest(): Either[String, AuthorizeRequest] = mapping.map(m =>
AuthorizeRequest(
Proposal(
AuthorizeRequest.Proposal(
change.toProto,
serial.map(_.value).getOrElse(0),
Some(m.toProtoV30),
)
),
mustFullyAuthorize = mustFullyAuthorize,
forceChange = forceChange,
signedBy = signedBy.map(_.toProtoPrimitive),
store,
)
)
override def submitRequest(
service: TopologyManagerWriteServiceStub,
request: AuthorizeRequest,
): Future[AuthorizeResponse] = service.authorize(request)
override def handleResponse(
response: AuthorizeResponse
): Either[String, SignedTopologyTransactionX[TopologyChangeOpX, M]] = response.transaction
.toRight("no transaction in response")
.flatMap(
SignedTopologyTransactionX
.fromProtoV30(ProtocolVersionValidation.NoValidation, _)
.leftMap(_.message)
.flatMap(tx =>
tx.selectMapping[M]
.toRight(
s"Expected mapping ${ClassTag[M].getClass.getSimpleName}, but received: ${tx.mapping.getClass.getSimpleName}"
)
)
)
}
object Propose {
def apply[M <: TopologyMappingX: ClassTag](
mapping: M,
signedBy: Seq[Fingerprint],
store: String,
serial: Option[PositiveInt] = None,
change: TopologyChangeOpX = TopologyChangeOpX.Replace,
mustFullyAuthorize: Boolean = false,
forceChange: Boolean = false,
): Propose[M] =
Propose(Right(mapping), signedBy, change, serial, mustFullyAuthorize, forceChange, store)
}
final case class Authorize[M <: TopologyMappingX: ClassTag](
transactionHash: String,
mustFullyAuthorize: Boolean,
signedBy: Seq[Fingerprint],
store: String,
) extends BaseWriteCommand[
AuthorizeRequest,
AuthorizeResponse,
SignedTopologyTransactionX[TopologyChangeOpX, M],
] {
override def createRequest(): Either[String, AuthorizeRequest] = Right(
AuthorizeRequest(
TransactionHash(transactionHash),
mustFullyAuthorize = mustFullyAuthorize,
forceChange = false,
signedBy = signedBy.map(_.toProtoPrimitive),
store = store,
)
)
override def submitRequest(
service: TopologyManagerWriteServiceStub,
request: AuthorizeRequest,
): Future[AuthorizeResponse] = service.authorize(request)
override def handleResponse(
response: AuthorizeResponse
): Either[String, SignedTopologyTransactionX[TopologyChangeOpX, M]] = response.transaction
.toRight("no transaction in response")
.flatMap(
SignedTopologyTransactionX
.fromProtoV30(ProtocolVersionValidation.NoValidation, _)
.leftMap(_.message)
.flatMap(tx =>
tx.selectMapping[M]
.toRight(
s"Expected mapping ${ClassTag[M].getClass.getSimpleName}, but received: ${tx.mapping.getClass.getSimpleName}"
)
)
)
}
}
object Init {
abstract class BaseInitializationService[Req, Resp, Res]
extends GrpcAdminCommand[Req, Resp, Res] {
override type Svc = IdentityInitializationXServiceStub
override def createService(channel: ManagedChannel): IdentityInitializationXServiceStub =
v30.IdentityInitializationXServiceGrpc.stub(channel)
}
final case class InitId(identifier: String)
extends BaseInitializationService[v30.InitIdRequest, v30.InitIdResponse, Unit] {
override def createRequest(): Either[String, v30.InitIdRequest] =
Right(v30.InitIdRequest(identifier))
override def submitRequest(
service: IdentityInitializationXServiceStub,
request: v30.InitIdRequest,
): Future[v30.InitIdResponse] =
service.initId(request)
override def handleResponse(response: v30.InitIdResponse): Either[String, Unit] =
Right(())
}
final case class GetId()
extends BaseInitializationService[v30.GetIdRequest, v30.GetIdResponse, UniqueIdentifier] {
override def createRequest(): Either[String, v30.GetIdRequest] =
Right(v30.GetIdRequest())
override def submitRequest(
service: IdentityInitializationXServiceStub,
request: v30.GetIdRequest,
): Future[v30.GetIdResponse] =
service.getId(request)
override def handleResponse(
response: v30.GetIdResponse
): Either[String, UniqueIdentifier] = {
if (response.uniqueIdentifier.nonEmpty)
UniqueIdentifier.fromProtoPrimitive_(response.uniqueIdentifier)
else
Left(
s"Node is not initialized and therefore does not have an Id assigned yet."
)
}
}
}
}

View File

@ -7,7 +7,7 @@ import better.files.File
import ch.qos.logback.classic.Level
import com.digitalasset.canton.admin.api.client.commands.{
StatusAdminCommands,
TopologyAdminCommandsX,
TopologyAdminCommands,
}
import com.digitalasset.canton.config.{ConsoleCommandTimeout, NonNegativeDuration}
import com.digitalasset.canton.console.CommandErrors.{CommandError, GenericCommandError}
@ -188,7 +188,7 @@ class HealthAdministrationX[S <: data.NodeStatus.Status](
override def has_identity(): Boolean = runner
.adminCommand(
TopologyAdminCommandsX.Init.GetId()
TopologyAdminCommands.Init.GetId()
)
.toEither
.isRight

View File

@ -98,6 +98,16 @@ private[console] object ParticipantCommands {
.UploadDar(Some(path), vetAllPackages, synchronizeVetting, logger)
)
def validate(
runner: AdminCommandRunner,
path: String,
logger: TracedLogger,
): ConsoleCommandResult[String] =
runner.adminCommand(
ParticipantAdminCommands.Package
.ValidateDar(Some(path), logger)
)
}
object domains {

View File

@ -10,7 +10,6 @@ import com.digitalasset.canton.LedgerParticipantId
import com.digitalasset.canton.admin.api.client.commands.{
ParticipantAdminCommands,
TopologyAdminCommands,
TopologyAdminCommandsX,
}
import com.digitalasset.canton.admin.api.client.data.{
ListConnectedDomainsResult,
@ -307,7 +306,7 @@ class ParticipantPartiesAdministrationGroupX(
runner
.adminCommand(
TopologyAdminCommandsX.Write.Propose(
TopologyAdminCommands.Write.Propose(
// TODO(#14048) properly set the serial or introduce auto-detection so we don't
// have to set it on the client side
mapping = PartyToParticipantX(

View File

@ -135,7 +135,7 @@ class SecretKeyAdministration(
private def findPublicKey(
fingerprint: String,
topologyAdmin: TopologyAdministrationGroupCommon,
topologyAdmin: TopologyAdministrationGroup,
owner: Member,
): PublicKey =
findPublicKeys(topologyAdmin, owner).find(_.fingerprint.unwrap == fingerprint) match {
@ -241,24 +241,17 @@ class SecretKeyAdministration(
/** Helper to find public keys for topology/x shared between community and enterprise
*/
protected def findPublicKeys(
topologyAdmin: TopologyAdministrationGroupCommon,
topologyAdmin: TopologyAdministrationGroup,
owner: Member,
): Seq[PublicKey] =
topologyAdmin match {
case tx: TopologyAdministrationGroup =>
tx.owner_to_key_mappings
.list(
filterStore = AuthorizedStore.filterName,
filterKeyOwnerUid = owner.filterString,
filterKeyOwnerType = Some(owner.code),
)
.flatMap(_.item.keys)
case _ =>
// TODO(#15161): Remove the match when flattening TopologyAdministrationGroup and Common
throw new IllegalStateException(
"Impossible to encounter topology admin group besides X"
)
}
): Seq[PublicKey] = {
topologyAdmin.owner_to_key_mappings
.list(
filterStore = AuthorizedStore.filterName,
filterKeyOwnerUid = owner.filterString,
filterKeyOwnerType = Some(owner.code),
)
.flatMap(_.item.keys)
}
/** Helper to name new keys generated during a rotation with a ...-rotated-<timestamp> tag to better identify
* the new keys after a rotation

View File

@ -5,7 +5,6 @@ package com.digitalasset.canton.sequencing.protocol
import cats.Functor
import cats.data.EitherT
import cats.syntax.either.*
import cats.syntax.traverse.*
import com.daml.nonempty.NonEmpty
import com.digitalasset.canton.checked

View File

@ -0,0 +1,83 @@
// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package com.digitalasset.canton.topology.store
import com.digitalasset.canton.config.CantonRequireTypes.LengthLimitedString.DisplayName
import com.digitalasset.canton.config.CantonRequireTypes.String255
import com.digitalasset.canton.config.ProcessingTimeout
import com.digitalasset.canton.data.CantonTimestamp
import com.digitalasset.canton.logging.NamedLoggerFactory
import com.digitalasset.canton.resource.{DbStorage, MemoryStorage, Storage}
import com.digitalasset.canton.topology.*
import com.digitalasset.canton.topology.store.db.DbPartyMetadataStore
import com.digitalasset.canton.topology.store.memory.InMemoryPartyMetadataStore
import com.digitalasset.canton.tracing.TraceContext
import scala.concurrent.{ExecutionContext, Future}
/** the party metadata used to inform the ledger api server
*
* the first class parameters correspond to the relevant information, whereas the
* second class parameters are synchronisation information used during crash recovery.
* we don't want these in an equality comparison.
*/
final case class PartyMetadata(
partyId: PartyId,
displayName: Option[DisplayName],
participantId: Option[ParticipantId],
)(
val effectiveTimestamp: CantonTimestamp,
val submissionId: String255,
val notified: Boolean = false,
)
trait PartyMetadataStore extends AutoCloseable {
def metadataForParty(partyId: PartyId)(implicit
traceContext: TraceContext
): Future[Option[PartyMetadata]]
final def insertOrUpdatePartyMetadata(metadata: PartyMetadata)(implicit
traceContext: TraceContext
): Future[Unit] = {
insertOrUpdatePartyMetadata(
partyId = metadata.partyId,
participantId = metadata.participantId,
displayName = metadata.displayName,
effectiveTimestamp = metadata.effectiveTimestamp,
submissionId = metadata.submissionId,
)
}
def insertOrUpdatePartyMetadata(
partyId: PartyId,
participantId: Option[ParticipantId],
displayName: Option[DisplayName],
effectiveTimestamp: CantonTimestamp,
submissionId: String255,
)(implicit traceContext: TraceContext): Future[Unit]
/** mark the given metadata as having been successfully forwarded to the domain */
def markNotified(metadata: PartyMetadata)(implicit traceContext: TraceContext): Future[Unit]
/** fetch the current set of party data which still needs to be notified */
def fetchNotNotified()(implicit traceContext: TraceContext): Future[Seq[PartyMetadata]]
}
object PartyMetadataStore {
def apply(
storage: Storage,
timeouts: ProcessingTimeout,
loggerFactory: NamedLoggerFactory,
)(implicit
ec: ExecutionContext
): PartyMetadataStore =
storage match {
case _: MemoryStorage => new InMemoryPartyMetadataStore()
case jdbc: DbStorage => new DbPartyMetadataStore(jdbc, timeouts, loggerFactory)
}
}

View File

@ -3,93 +3,46 @@
package com.digitalasset.canton.topology.store
import com.digitalasset.canton.config.CantonRequireTypes.LengthLimitedString.DisplayName
import com.digitalasset.canton.config.CantonRequireTypes.{
LengthLimitedString,
String255,
String256M,
}
import cats.syntax.traverse.*
import com.daml.nonempty.NonEmpty
import com.digitalasset.canton.ProtoDeserializationError
import com.digitalasset.canton.config.CantonRequireTypes.{LengthLimitedString, String255}
import com.digitalasset.canton.config.ProcessingTimeout
import com.digitalasset.canton.config.RequireTypes.{PositiveInt, PositiveLong}
import com.digitalasset.canton.crypto.{Fingerprint, SignatureCheckError}
import com.digitalasset.canton.config.RequireTypes.PositiveInt
import com.digitalasset.canton.data.CantonTimestamp
import com.digitalasset.canton.lifecycle.{FlagCloseable, FutureUnlessShutdown}
import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting}
import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory}
import com.digitalasset.canton.protocol.OnboardingRestriction
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
import com.digitalasset.canton.resource.{DbStorage, MemoryStorage, Storage}
import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult
import com.digitalasset.canton.time.NonNegativeFiniteDuration
import com.digitalasset.canton.topology.*
import com.digitalasset.canton.topology.store.db.DbPartyMetadataStore
import com.digitalasset.canton.topology.store.memory.InMemoryPartyMetadataStore
import com.digitalasset.canton.topology.admin.v30 as topoV30
import com.digitalasset.canton.topology.client.DomainTopologyClient
import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime}
import com.digitalasset.canton.topology.store.StoredTopologyTransactionX.GenericStoredTopologyTransactionX
import com.digitalasset.canton.topology.store.StoredTopologyTransactionsX.{
GenericStoredTopologyTransactionsX,
PositiveStoredTopologyTransactionsX,
}
import com.digitalasset.canton.topology.store.TopologyTransactionRejection.Duplicate
import com.digitalasset.canton.topology.store.ValidatedTopologyTransactionX.GenericValidatedTopologyTransactionX
import com.digitalasset.canton.topology.store.db.DbTopologyStoreX
import com.digitalasset.canton.topology.store.memory.InMemoryTopologyStoreX
import com.digitalasset.canton.topology.transaction.SignedTopologyTransactionX.GenericSignedTopologyTransactionX
import com.digitalasset.canton.topology.transaction.TopologyMappingX.MappingHash
import com.digitalasset.canton.topology.transaction.TopologyTransactionX.{
GenericTopologyTransactionX,
TxHash,
}
import com.digitalasset.canton.topology.transaction.*
import com.digitalasset.canton.tracing.TraceContext
import com.digitalasset.canton.version.ProtocolVersion
import com.google.common.annotations.VisibleForTesting
import scala.concurrent.duration.Duration
import scala.concurrent.{ExecutionContext, Future}
// TODO(#15161): Rename file to PartyMetadata or split up helpers into multiple files
/** the party metadata used to inform the ledger api server
*
* the first class parameters correspond to the relevant information, whereas the
* second class parameters are synchronisation information used during crash recovery.
* we don't want these in an equality comparison.
*/
final case class PartyMetadata(
partyId: PartyId,
displayName: Option[DisplayName],
participantId: Option[ParticipantId],
)(
val effectiveTimestamp: CantonTimestamp,
val submissionId: String255,
val notified: Boolean = false,
)
trait PartyMetadataStore extends AutoCloseable {
def metadataForParty(partyId: PartyId)(implicit
traceContext: TraceContext
): Future[Option[PartyMetadata]]
final def insertOrUpdatePartyMetadata(metadata: PartyMetadata)(implicit
traceContext: TraceContext
): Future[Unit] = {
insertOrUpdatePartyMetadata(
partyId = metadata.partyId,
participantId = metadata.participantId,
displayName = metadata.displayName,
effectiveTimestamp = metadata.effectiveTimestamp,
submissionId = metadata.submissionId,
)
}
def insertOrUpdatePartyMetadata(
partyId: PartyId,
participantId: Option[ParticipantId],
displayName: Option[DisplayName],
effectiveTimestamp: CantonTimestamp,
submissionId: String255,
)(implicit traceContext: TraceContext): Future[Unit]
/** mark the given metadata as having been successfully forwarded to the domain */
def markNotified(metadata: PartyMetadata)(implicit traceContext: TraceContext): Future[Unit]
/** fetch the current set of party data which still needs to be notified */
def fetchNotNotified()(implicit traceContext: TraceContext): Future[Seq[PartyMetadata]]
}
object PartyMetadataStore {
def apply(
storage: Storage,
timeouts: ProcessingTimeout,
loggerFactory: NamedLoggerFactory,
)(implicit
ec: ExecutionContext
): PartyMetadataStore =
storage match {
case _: MemoryStorage => new InMemoryPartyMetadataStore()
case jdbc: DbStorage => new DbPartyMetadataStore(jdbc, timeouts, loggerFactory)
}
}
import scala.reflect.ClassTag
sealed trait TopologyStoreId extends PrettyPrinting {
def filterName: String = dbString.unwrap
@ -182,150 +135,417 @@ object TopologyStoreId {
}
sealed trait TopologyTransactionRejection extends PrettyPrinting with Product with Serializable {
def asString: String
def asString1GB: String256M =
String256M.tryCreate(asString, Some("topology transaction rejection"))
final case class StoredTopologyTransactionX[+Op <: TopologyChangeOpX, +M <: TopologyMappingX](
sequenced: SequencedTime,
validFrom: EffectiveTime,
validUntil: Option[EffectiveTime],
transaction: SignedTopologyTransactionX[Op, M],
) extends DelegatedTopologyTransactionLike[Op, M]
with PrettyPrinting {
override protected def transactionLikeDelegate: TopologyTransactionLike[Op, M] = transaction
def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError
}
object TopologyTransactionRejection {
final case class NoDelegationFoundForKeys(keys: Set[Fingerprint])
extends TopologyTransactionRejection {
override def asString: String = s"No delegation found for keys ${keys.mkString(", ")}"
override def pretty: Pretty[NoDelegationFoundForKeys] = prettyOfString(_ => asString)
override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError =
TopologyManagerError.UnauthorizedTransaction.Failure(asString)
}
case object NotAuthorized extends TopologyTransactionRejection {
override def asString: String = "Not authorized"
override def pretty: Pretty[NotAuthorized.type] = prettyOfString(_ => asString)
override def toTopologyManagerError(implicit elc: ErrorLoggingContext) =
TopologyManagerError.UnauthorizedTransaction.Failure(asString)
}
final case class ThresholdTooHigh(actual: Int, mustBeAtMost: Int)
extends TopologyTransactionRejection {
override def asString: String =
s"Threshold must not be higher than $mustBeAtMost, but was $actual."
override def pretty: Pretty[ThresholdTooHigh] = prettyOfString(_ => asString)
override def toTopologyManagerError(implicit elc: ErrorLoggingContext) = {
TopologyManagerError.InvalidThreshold.ThresholdTooHigh(actual, mustBeAtMost)
}
}
final case class OnboardingRestrictionInPlace(
participant: ParticipantId,
restriction: OnboardingRestriction,
loginAfter: Option[CantonTimestamp],
) extends TopologyTransactionRejection {
override def asString: String =
s"Participant ${participant} onboarding rejected as restrictions ${restriction} are in place."
override def pretty: Pretty[OnboardingRestrictionInPlace] = prettyOfString(_ => asString)
override def toTopologyManagerError(implicit elc: ErrorLoggingContext) = {
TopologyManagerError.ParticipantOnboardingRefused.Reject(participant, restriction)
}
}
final case class SignatureCheckFailed(err: SignatureCheckError)
extends TopologyTransactionRejection {
override def asString: String = err.toString
override def pretty: Pretty[SignatureCheckFailed] = prettyOfClass(param("err", _.err))
override def toTopologyManagerError(implicit elc: ErrorLoggingContext) =
TopologyManagerError.InvalidSignatureError.Failure(err)
}
final case class WrongDomain(wrong: DomainId) extends TopologyTransactionRejection {
override def asString: String = show"Wrong domain $wrong"
override def pretty: Pretty[WrongDomain] = prettyOfClass(param("wrong", _.wrong))
override def toTopologyManagerError(implicit elc: ErrorLoggingContext) =
TopologyManagerError.WrongDomain.Failure(wrong)
}
final case class Duplicate(old: CantonTimestamp) extends TopologyTransactionRejection {
override def asString: String = show"Duplicate transaction from ${old}"
override def pretty: Pretty[Duplicate] = prettyOfClass(param("old", _.old))
override def toTopologyManagerError(implicit elc: ErrorLoggingContext) =
TopologyManagerError.DuplicateTransaction.ExistsAt(old)
}
final case class SerialMismatch(expected: PositiveInt, actual: PositiveInt)
extends TopologyTransactionRejection {
override def asString: String =
show"The given serial $actual does not match the expected serial $expected"
override def pretty: Pretty[SerialMismatch] =
prettyOfClass(param("expected", _.expected), param("actual", _.actual))
override def toTopologyManagerError(implicit elc: ErrorLoggingContext) =
TopologyManagerError.SerialMismatch.Failure(expected, actual)
}
final case class Other(str: String) extends TopologyTransactionRejection {
override def asString: String = str
override def pretty: Pretty[Other] = prettyOfString(_ => asString)
override def toTopologyManagerError(implicit elc: ErrorLoggingContext) =
TopologyManagerError.InternalError.Other(str)
}
final case class ExtraTrafficLimitTooLow(
member: Member,
actual: PositiveLong,
expectedMinimum: PositiveLong,
) extends TopologyTransactionRejection {
override def asString: String =
s"Extra traffic limit for $member should be at least $expectedMinimum, but was $actual."
override def pretty: Pretty[ExtraTrafficLimitTooLow] =
prettyOfClass(
param("member", _.member),
param("actual", _.actual),
param("expectedMinimum", _.expectedMinimum),
)
override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError =
TopologyManagerError.InvalidTrafficLimit.TrafficLimitTooLow(member, actual, expectedMinimum)
}
final case class InsufficientKeys(members: Seq[Member]) extends TopologyTransactionRejection {
override def asString: String =
s"Members ${members.sorted.mkString(", ")} are missing a signing key or an encryption key or both."
override def pretty: Pretty[InsufficientKeys] = prettyOfClass(
param("members", _.members)
override def pretty: Pretty[StoredTopologyTransactionX.this.type] =
prettyOfClass(
unnamedParam(_.transaction),
param("sequenced", _.sequenced.value),
param("validFrom", _.validFrom.value),
paramIfDefined("validUntil", _.validUntil.map(_.value)),
)
override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError =
TopologyManagerError.InsufficientKeys.Failure(members)
@SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf"))
def selectMapping[TargetMapping <: TopologyMappingX: ClassTag] = transaction
.selectMapping[TargetMapping]
.map(_ => this.asInstanceOf[StoredTopologyTransactionX[Op, TargetMapping]])
@SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf"))
def selectOp[TargetOp <: TopologyChangeOpX: ClassTag] = transaction
.selectOp[TargetOp]
.map(_ => this.asInstanceOf[StoredTopologyTransactionX[TargetOp, M]])
}
object StoredTopologyTransactionX {
type GenericStoredTopologyTransactionX =
StoredTopologyTransactionX[TopologyChangeOpX, TopologyMappingX]
}
final case class ValidatedTopologyTransactionX[+Op <: TopologyChangeOpX, +M <: TopologyMappingX](
transaction: SignedTopologyTransactionX[Op, M],
rejectionReason: Option[TopologyTransactionRejection] = None,
expireImmediately: Boolean = false,
) extends DelegatedTopologyTransactionLike[Op, M]
with PrettyPrinting {
override protected def transactionLikeDelegate: TopologyTransactionLike[Op, M] = transaction
def nonDuplicateRejectionReason: Option[TopologyTransactionRejection] = rejectionReason match {
case Some(Duplicate(_)) => None
case otherwise => otherwise
}
final case class UnknownMembers(members: Seq[Member]) extends TopologyTransactionRejection {
override def asString: String = s"Members ${members.toSeq.sorted.mkString(", ")} are unknown."
def collectOfMapping[TargetM <: TopologyMappingX: ClassTag]
: Option[ValidatedTopologyTransactionX[Op, TargetM]] =
transaction.selectMapping[TargetM].map(tx => copy[Op, TargetM](transaction = tx))
override def pretty: Pretty[UnknownMembers] = prettyOfClass(param("members", _.members))
def collectOf[TargetO <: TopologyChangeOpX: ClassTag, TargetM <: TopologyMappingX: ClassTag]
: Option[ValidatedTopologyTransactionX[TargetO, TargetM]] =
transaction.select[TargetO, TargetM].map(tx => copy[TargetO, TargetM](transaction = tx))
override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError =
TopologyManagerError.UnknownMembers.Failure(members)
override def pretty: Pretty[ValidatedTopologyTransactionX.this.type] =
prettyOfClass(
unnamedParam(_.transaction),
paramIfDefined("rejectionReason", _.rejectionReason),
paramIfTrue("expireImmediately", _.expireImmediately),
)
}
object ValidatedTopologyTransactionX {
type GenericValidatedTopologyTransactionX =
ValidatedTopologyTransactionX[TopologyChangeOpX, TopologyMappingX]
}
abstract class TopologyStoreX[+StoreID <: TopologyStoreId](implicit
protected val ec: ExecutionContext
) extends FlagCloseable {
this: NamedLogging =>
def storeId: StoreID
/** fetch the effective time updates greater than or equal to a certain timestamp
*
* this function is used to recover the future effective timestamp such that we can reschedule "pokes" of the
* topology client and updates of the acs commitment processor on startup
*/
def findUpcomingEffectiveChanges(asOfInclusive: CantonTimestamp)(implicit
traceContext: TraceContext
): Future[Seq[TopologyStoreX.Change]]
def maxTimestamp()(implicit
traceContext: TraceContext
): Future[Option[(SequencedTime, EffectiveTime)]]
/** returns the current dispatching watermark
*
* for topology transaction dispatching, we keep track up to which point in time
* we have mirrored the authorized store to the remote store
*
* the timestamp always refers to the timestamp of the authorized store!
*/
def currentDispatchingWatermark(implicit
traceContext: TraceContext
): Future[Option[CantonTimestamp]]
/** update the dispatching watermark for this target store */
def updateDispatchingWatermark(timestamp: CantonTimestamp)(implicit
traceContext: TraceContext
): Future[Unit]
def findTransactionsByTxHash(asOfExclusive: EffectiveTime, hashes: Set[TxHash])(implicit
traceContext: TraceContext
): Future[Seq[GenericSignedTopologyTransactionX]]
def findProposalsByTxHash(asOfExclusive: EffectiveTime, hashes: NonEmpty[Set[TxHash]])(implicit
traceContext: TraceContext
): Future[Seq[GenericSignedTopologyTransactionX]]
def findTransactionsForMapping(asOfExclusive: EffectiveTime, hashes: NonEmpty[Set[MappingHash]])(
implicit traceContext: TraceContext
): Future[Seq[GenericSignedTopologyTransactionX]]
/** returns the set of positive transactions
*
* this function is used by the topology processor to determine the set of transaction, such that
* we can perform cascading updates if there was a certificate revocation
*
* @param asOfInclusive whether the search interval should include the current timepoint or not. the state at t is
* defined as "exclusive" of t, whereas for updating the state, we need to be able to query inclusive.
*/
def findPositiveTransactions(
asOf: CantonTimestamp,
asOfInclusive: Boolean,
isProposal: Boolean,
types: Seq[TopologyMappingX.Code],
filterUid: Option[Seq[UniqueIdentifier]],
filterNamespace: Option[Seq[Namespace]],
)(implicit
traceContext: TraceContext
): Future[PositiveStoredTopologyTransactionsX]
/** add validated topology transaction as is to the topology transaction table */
def update(
sequenced: SequencedTime,
effective: EffectiveTime,
removeMapping: Map[MappingHash, PositiveInt],
removeTxs: Set[TxHash],
additions: Seq[GenericValidatedTopologyTransactionX],
)(implicit
traceContext: TraceContext
): Future[Unit]
@VisibleForTesting
protected[topology] def dumpStoreContent()(implicit
traceContext: TraceContext
): Future[GenericStoredTopologyTransactionsX]
/** store an initial set of topology transactions as given into the store */
def bootstrap(snapshot: GenericStoredTopologyTransactionsX)(implicit
traceContext: TraceContext
): Future[Unit]
/** query optimized for inspection
*
* @param proposals if true, query only for proposals instead of approved transaction mappings
* @param recentTimestampO if exists, use this timestamp for the head state to prevent race conditions on the console
*/
def inspect(
proposals: Boolean,
timeQuery: TimeQuery,
// TODO(#14048) - consider removing `recentTimestampO` and moving callers to TimeQueryX.Snapshot
recentTimestampO: Option[CantonTimestamp],
op: Option[TopologyChangeOpX],
types: Seq[TopologyMappingX.Code],
idFilter: Option[String],
namespaceFilter: Option[String],
)(implicit
traceContext: TraceContext
): Future[StoredTopologyTransactionsX[TopologyChangeOpX, TopologyMappingX]]
def inspectKnownParties(
timestamp: CantonTimestamp,
filterParty: String,
filterParticipant: String,
limit: Int,
)(implicit traceContext: TraceContext): Future[Set[PartyId]]
/** Finds the topology transaction that first onboarded the sequencer with ID `sequencerId`
*/
def findFirstSequencerStateForSequencer(
sequencerId: SequencerId
)(implicit
traceContext: TraceContext
): Future[Option[StoredTopologyTransactionX[TopologyChangeOpX.Replace, SequencerDomainStateX]]]
/** Finds the topology transaction that first onboarded the mediator with ID `mediatorId`
*/
def findFirstMediatorStateForMediator(
mediatorId: MediatorId
)(implicit
traceContext: TraceContext
): Future[Option[StoredTopologyTransactionX[TopologyChangeOpX.Replace, MediatorDomainStateX]]]
/** Finds the topology transaction that first onboarded the participant with ID `participantId`
*/
def findFirstTrustCertificateForParticipant(
participant: ParticipantId
)(implicit
traceContext: TraceContext
): Future[Option[StoredTopologyTransactionX[TopologyChangeOpX.Replace, DomainTrustCertificateX]]]
def findEssentialStateAtSequencedTime(
asOfInclusive: SequencedTime
)(implicit traceContext: TraceContext): Future[GenericStoredTopologyTransactionsX]
protected def signedTxFromStoredTx(
storedTx: GenericStoredTopologyTransactionX
): SignedTopologyTransactionX[TopologyChangeOpX, TopologyMappingX] = storedTx.transaction
def providesAdditionalSignatures(
transaction: GenericSignedTopologyTransactionX
)(implicit traceContext: TraceContext): Future[Boolean] = {
findStored(CantonTimestamp.MaxValue, transaction).map(_.forall { inStore =>
// check whether source still could provide an additional signature
transaction.signatures.diff(inStore.transaction.signatures.forgetNE).nonEmpty &&
// but only if the transaction in the target store is a valid proposal
inStore.transaction.isProposal &&
inStore.validUntil.isEmpty
})
}
final case class ParticipantStillHostsParties(participantId: ParticipantId, parties: Seq[PartyId])
extends TopologyTransactionRejection {
override def asString: String =
s"Cannot remove domain trust certificate for $participantId because it still hosts parties ${parties
.mkString(",")}"
/** returns initial set of onboarding transactions that should be dispatched to the domain */
def findParticipantOnboardingTransactions(participantId: ParticipantId, domainId: DomainId)(
implicit traceContext: TraceContext
): FutureUnlessShutdown[Seq[GenericSignedTopologyTransactionX]]
override def pretty: Pretty[ParticipantStillHostsParties] =
prettyOfClass(param("participantId", _.participantId), param("parties", _.parties))
def findDispatchingTransactionsAfter(
timestampExclusive: CantonTimestamp,
limit: Option[Int],
)(implicit
traceContext: TraceContext
): Future[GenericStoredTopologyTransactionsX]
override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError =
TopologyManagerError.IllegalRemovalOfDomainTrustCertificate.ParticipantStillHostsParties(
participantId,
parties,
)
def findStoredForVersion(
asOfExclusive: CantonTimestamp,
transaction: GenericTopologyTransactionX,
protocolVersion: ProtocolVersion,
)(implicit
traceContext: TraceContext
): Future[Option[GenericStoredTopologyTransactionX]]
final def exists(transaction: GenericSignedTopologyTransactionX)(implicit
traceContext: TraceContext
): Future[Boolean] = findStored(CantonTimestamp.MaxValue, transaction).map(
_.exists(signedTxFromStoredTx(_) == transaction)
)
def findStored(
asOfExclusive: CantonTimestamp,
transaction: GenericSignedTopologyTransactionX,
includeRejected: Boolean = false,
)(implicit
traceContext: TraceContext
): Future[Option[GenericStoredTopologyTransactionX]]
}
object TopologyStoreX {
sealed trait Change extends Product with Serializable {
def sequenced: SequencedTime
def effective: EffectiveTime
}
object Change {
final case class TopologyDelay(
sequenced: SequencedTime,
effective: EffectiveTime,
epsilon: NonNegativeFiniteDuration,
) extends Change
final case class Other(sequenced: SequencedTime, effective: EffectiveTime) extends Change
}
def accumulateUpcomingEffectiveChanges(
items: Seq[StoredTopologyTransactionX[TopologyChangeOpX, TopologyMappingX]]
): Seq[Change] = {
items
.map(x => (x, x.mapping))
.map {
case (tx, x: DomainParametersStateX) =>
Change.TopologyDelay(tx.sequenced, tx.validFrom, x.parameters.topologyChangeDelay)
case (tx, _) => Change.Other(tx.sequenced, tx.validFrom)
}
.sortBy(_.effective)
.distinct
}
def apply[StoreID <: TopologyStoreId](
storeId: StoreID,
storage: Storage,
timeouts: ProcessingTimeout,
loggerFactory: NamedLoggerFactory,
)(implicit
ec: ExecutionContext
): TopologyStoreX[StoreID] = {
val storeLoggerFactory = loggerFactory.append("store", storeId.toString)
storage match {
case _: MemoryStorage =>
new InMemoryTopologyStoreX(storeId, storeLoggerFactory, timeouts)
case dbStorage: DbStorage =>
new DbTopologyStoreX(dbStorage, storeId, timeouts, storeLoggerFactory)
}
}
lazy val initialParticipantDispatchingSet = Set(
TopologyMappingX.Code.DomainTrustCertificateX,
TopologyMappingX.Code.OwnerToKeyMappingX,
// TODO(#14060) - potentially revisit this once we implement TopologyStoreX.filterInitialParticipantDispatchingTransactions
TopologyMappingX.Code.NamespaceDelegationX,
TopologyMappingX.Code.IdentifierDelegationX,
TopologyMappingX.Code.DecentralizedNamespaceDefinitionX,
)
def filterInitialParticipantDispatchingTransactions(
participantId: ParticipantId,
domainId: DomainId,
transactions: Seq[GenericStoredTopologyTransactionX],
): Seq[GenericSignedTopologyTransactionX] = {
// TODO(#14060): Extend filtering along the lines of:
// TopologyStore.filterInitialParticipantDispatchingTransactions
transactions.map(_.transaction).collect {
case tx @ SignedTopologyTransactionX(
TopologyTransactionX(_, _, DomainTrustCertificateX(`participantId`, `domainId`, _, _)),
_,
_,
) =>
tx
case tx @ SignedTopologyTransactionX(
TopologyTransactionX(_, _, OwnerToKeyMappingX(`participantId`, _, _)),
_,
_,
) =>
tx
case tx @ SignedTopologyTransactionX(
TopologyTransactionX(_, _, NamespaceDelegationX(ns, _, _)),
_,
_,
) if ns == participantId.uid.namespace =>
tx
case tx @ SignedTopologyTransactionX(
TopologyTransactionX(_, _, IdentifierDelegationX(uid, _)),
_,
_,
) if uid == participantId.uid =>
tx
case tx @ SignedTopologyTransactionX(
TopologyTransactionX(_, _, _: DecentralizedNamespaceDefinitionX),
_,
_,
) =>
tx
}
}
/** convenience method waiting until the last eligible transaction inserted into the source store has been dispatched successfully to the target domain */
def awaitTxObserved(
client: DomainTopologyClient,
transaction: GenericSignedTopologyTransactionX,
target: TopologyStoreX[?],
timeout: Duration,
)(implicit
traceContext: TraceContext,
executionContext: ExecutionContext,
): FutureUnlessShutdown[Boolean] = {
client.await(
// we know that the transaction is stored and effective once we find it in the target
// domain store and once the effective time (valid from) is smaller than the client timestamp
sp => target.findStored(sp.timestamp, transaction, includeRejected = true).map(_.nonEmpty),
timeout,
)
}
}
sealed trait TimeQuery {
def toProtoV30: topoV30.BaseQuery.TimeQuery
}
object TimeQuery {
object HeadState extends TimeQuery {
override def toProtoV30: topoV30.BaseQuery.TimeQuery =
topoV30.BaseQuery.TimeQuery.HeadState(com.google.protobuf.empty.Empty())
}
final case class Snapshot(asOf: CantonTimestamp) extends TimeQuery {
override def toProtoV30: topoV30.BaseQuery.TimeQuery =
topoV30.BaseQuery.TimeQuery.Snapshot(asOf.toProtoTimestamp)
}
final case class Range(from: Option[CantonTimestamp], until: Option[CantonTimestamp])
extends TimeQuery {
override def toProtoV30: topoV30.BaseQuery.TimeQuery = topoV30.BaseQuery.TimeQuery.Range(
topoV30.BaseQuery.TimeRange(from.map(_.toProtoTimestamp), until.map(_.toProtoTimestamp))
)
}
def fromProto(
proto: topoV30.BaseQuery.TimeQuery,
fieldName: String,
): ParsingResult[TimeQuery] =
proto match {
case topoV30.BaseQuery.TimeQuery.Empty =>
Left(ProtoDeserializationError.FieldNotSet(fieldName))
case topoV30.BaseQuery.TimeQuery.Snapshot(value) =>
CantonTimestamp.fromProtoTimestamp(value).map(Snapshot)
case topoV30.BaseQuery.TimeQuery.HeadState(_) => Right(HeadState)
case topoV30.BaseQuery.TimeQuery.Range(value) =>
for {
fromO <- value.from.traverse(CantonTimestamp.fromProtoTimestamp)
toO <- value.until.traverse(CantonTimestamp.fromProtoTimestamp)
} yield Range(fromO, toO)
}
}

View File

@ -1,459 +0,0 @@
// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package com.digitalasset.canton.topology.store
import cats.syntax.traverse.*
import com.daml.nonempty.NonEmpty
import com.digitalasset.canton.ProtoDeserializationError
import com.digitalasset.canton.config.ProcessingTimeout
import com.digitalasset.canton.config.RequireTypes.PositiveInt
import com.digitalasset.canton.data.CantonTimestamp
import com.digitalasset.canton.lifecycle.{FlagCloseable, FutureUnlessShutdown}
import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting}
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
import com.digitalasset.canton.resource.{DbStorage, MemoryStorage, Storage}
import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult
import com.digitalasset.canton.time.NonNegativeFiniteDuration
import com.digitalasset.canton.topology.*
import com.digitalasset.canton.topology.admin.v30 as topoV30
import com.digitalasset.canton.topology.client.DomainTopologyClient
import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime}
import com.digitalasset.canton.topology.store.StoredTopologyTransactionX.GenericStoredTopologyTransactionX
import com.digitalasset.canton.topology.store.StoredTopologyTransactionsX.{
GenericStoredTopologyTransactionsX,
PositiveStoredTopologyTransactionsX,
}
import com.digitalasset.canton.topology.store.TopologyTransactionRejection.Duplicate
import com.digitalasset.canton.topology.store.ValidatedTopologyTransactionX.GenericValidatedTopologyTransactionX
import com.digitalasset.canton.topology.store.db.DbTopologyStoreX
import com.digitalasset.canton.topology.store.memory.InMemoryTopologyStoreX
import com.digitalasset.canton.topology.transaction.SignedTopologyTransactionX.GenericSignedTopologyTransactionX
import com.digitalasset.canton.topology.transaction.TopologyMappingX.MappingHash
import com.digitalasset.canton.topology.transaction.TopologyTransactionX.{
GenericTopologyTransactionX,
TxHash,
}
import com.digitalasset.canton.topology.transaction.*
import com.digitalasset.canton.tracing.TraceContext
import com.digitalasset.canton.version.ProtocolVersion
import com.google.common.annotations.VisibleForTesting
import scala.concurrent.duration.Duration
import scala.concurrent.{ExecutionContext, Future}
import scala.reflect.ClassTag
final case class StoredTopologyTransactionX[+Op <: TopologyChangeOpX, +M <: TopologyMappingX](
sequenced: SequencedTime,
validFrom: EffectiveTime,
validUntil: Option[EffectiveTime],
transaction: SignedTopologyTransactionX[Op, M],
) extends DelegatedTopologyTransactionLike[Op, M]
with PrettyPrinting {
override protected def transactionLikeDelegate: TopologyTransactionLike[Op, M] = transaction
override def pretty: Pretty[StoredTopologyTransactionX.this.type] =
prettyOfClass(
unnamedParam(_.transaction),
param("sequenced", _.sequenced.value),
param("validFrom", _.validFrom.value),
paramIfDefined("validUntil", _.validUntil.map(_.value)),
)
@SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf"))
def selectMapping[TargetMapping <: TopologyMappingX: ClassTag] = transaction
.selectMapping[TargetMapping]
.map(_ => this.asInstanceOf[StoredTopologyTransactionX[Op, TargetMapping]])
@SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf"))
def selectOp[TargetOp <: TopologyChangeOpX: ClassTag] = transaction
.selectOp[TargetOp]
.map(_ => this.asInstanceOf[StoredTopologyTransactionX[TargetOp, M]])
}
object StoredTopologyTransactionX {
type GenericStoredTopologyTransactionX =
StoredTopologyTransactionX[TopologyChangeOpX, TopologyMappingX]
}
final case class ValidatedTopologyTransactionX[+Op <: TopologyChangeOpX, +M <: TopologyMappingX](
transaction: SignedTopologyTransactionX[Op, M],
rejectionReason: Option[TopologyTransactionRejection] = None,
expireImmediately: Boolean = false,
) extends DelegatedTopologyTransactionLike[Op, M]
with PrettyPrinting {
override protected def transactionLikeDelegate: TopologyTransactionLike[Op, M] = transaction
def nonDuplicateRejectionReason: Option[TopologyTransactionRejection] = rejectionReason match {
case Some(Duplicate(_)) => None
case otherwise => otherwise
}
def collectOfMapping[TargetM <: TopologyMappingX: ClassTag]
: Option[ValidatedTopologyTransactionX[Op, TargetM]] =
transaction.selectMapping[TargetM].map(tx => copy[Op, TargetM](transaction = tx))
def collectOf[TargetO <: TopologyChangeOpX: ClassTag, TargetM <: TopologyMappingX: ClassTag]
: Option[ValidatedTopologyTransactionX[TargetO, TargetM]] =
transaction.select[TargetO, TargetM].map(tx => copy[TargetO, TargetM](transaction = tx))
override def pretty: Pretty[ValidatedTopologyTransactionX.this.type] =
prettyOfClass(
unnamedParam(_.transaction),
paramIfDefined("rejectionReason", _.rejectionReason),
paramIfTrue("expireImmediately", _.expireImmediately),
)
}
object ValidatedTopologyTransactionX {
type GenericValidatedTopologyTransactionX =
ValidatedTopologyTransactionX[TopologyChangeOpX, TopologyMappingX]
}
abstract class TopologyStoreX[+StoreID <: TopologyStoreId](implicit
protected val ec: ExecutionContext
) extends FlagCloseable {
this: NamedLogging =>
def storeId: StoreID
/** fetch the effective time updates greater than or equal to a certain timestamp
*
* this function is used to recover the future effective timestamp such that we can reschedule "pokes" of the
* topology client and updates of the acs commitment processor on startup
*/
def findUpcomingEffectiveChanges(asOfInclusive: CantonTimestamp)(implicit
traceContext: TraceContext
): Future[Seq[TopologyStoreX.Change]]
def maxTimestamp()(implicit
traceContext: TraceContext
): Future[Option[(SequencedTime, EffectiveTime)]]
/** returns the current dispatching watermark
*
* for topology transaction dispatching, we keep track up to which point in time
* we have mirrored the authorized store to the remote store
*
* the timestamp always refers to the timestamp of the authorized store!
*/
def currentDispatchingWatermark(implicit
traceContext: TraceContext
): Future[Option[CantonTimestamp]]
/** update the dispatching watermark for this target store */
def updateDispatchingWatermark(timestamp: CantonTimestamp)(implicit
traceContext: TraceContext
): Future[Unit]
def findTransactionsByTxHash(asOfExclusive: EffectiveTime, hashes: Set[TxHash])(implicit
traceContext: TraceContext
): Future[Seq[GenericSignedTopologyTransactionX]]
def findProposalsByTxHash(asOfExclusive: EffectiveTime, hashes: NonEmpty[Set[TxHash]])(implicit
traceContext: TraceContext
): Future[Seq[GenericSignedTopologyTransactionX]]
def findTransactionsForMapping(asOfExclusive: EffectiveTime, hashes: NonEmpty[Set[MappingHash]])(
implicit traceContext: TraceContext
): Future[Seq[GenericSignedTopologyTransactionX]]
/** returns the set of positive transactions
*
* this function is used by the topology processor to determine the set of transaction, such that
* we can perform cascading updates if there was a certificate revocation
*
* @param asOfInclusive whether the search interval should include the current timepoint or not. the state at t is
* defined as "exclusive" of t, whereas for updating the state, we need to be able to query inclusive.
*/
def findPositiveTransactions(
asOf: CantonTimestamp,
asOfInclusive: Boolean,
isProposal: Boolean,
types: Seq[TopologyMappingX.Code],
filterUid: Option[Seq[UniqueIdentifier]],
filterNamespace: Option[Seq[Namespace]],
)(implicit
traceContext: TraceContext
): Future[PositiveStoredTopologyTransactionsX]
/** add validated topology transaction as is to the topology transaction table */
def update(
sequenced: SequencedTime,
effective: EffectiveTime,
removeMapping: Map[MappingHash, PositiveInt],
removeTxs: Set[TxHash],
additions: Seq[GenericValidatedTopologyTransactionX],
)(implicit
traceContext: TraceContext
): Future[Unit]
@VisibleForTesting
protected[topology] def dumpStoreContent()(implicit
traceContext: TraceContext
): Future[GenericStoredTopologyTransactionsX]
/** store an initial set of topology transactions as given into the store */
def bootstrap(snapshot: GenericStoredTopologyTransactionsX)(implicit
traceContext: TraceContext
): Future[Unit]
/** query optimized for inspection
*
* @param proposals if true, query only for proposals instead of approved transaction mappings
* @param recentTimestampO if exists, use this timestamp for the head state to prevent race conditions on the console
*/
def inspect(
proposals: Boolean,
timeQuery: TimeQuery,
// TODO(#14048) - consider removing `recentTimestampO` and moving callers to TimeQueryX.Snapshot
recentTimestampO: Option[CantonTimestamp],
op: Option[TopologyChangeOpX],
types: Seq[TopologyMappingX.Code],
idFilter: Option[String],
namespaceFilter: Option[String],
)(implicit
traceContext: TraceContext
): Future[StoredTopologyTransactionsX[TopologyChangeOpX, TopologyMappingX]]
def inspectKnownParties(
timestamp: CantonTimestamp,
filterParty: String,
filterParticipant: String,
limit: Int,
)(implicit traceContext: TraceContext): Future[Set[PartyId]]
/** Finds the topology transaction that first onboarded the sequencer with ID `sequencerId`
*/
def findFirstSequencerStateForSequencer(
sequencerId: SequencerId
)(implicit
traceContext: TraceContext
): Future[Option[StoredTopologyTransactionX[TopologyChangeOpX.Replace, SequencerDomainStateX]]]
/** Finds the topology transaction that first onboarded the mediator with ID `mediatorId`
*/
def findFirstMediatorStateForMediator(
mediatorId: MediatorId
)(implicit
traceContext: TraceContext
): Future[Option[StoredTopologyTransactionX[TopologyChangeOpX.Replace, MediatorDomainStateX]]]
/** Finds the topology transaction that first onboarded the participant with ID `participantId`
*/
def findFirstTrustCertificateForParticipant(
participant: ParticipantId
)(implicit
traceContext: TraceContext
): Future[Option[StoredTopologyTransactionX[TopologyChangeOpX.Replace, DomainTrustCertificateX]]]
def findEssentialStateAtSequencedTime(
asOfInclusive: SequencedTime
)(implicit traceContext: TraceContext): Future[GenericStoredTopologyTransactionsX]
protected def signedTxFromStoredTx(
storedTx: GenericStoredTopologyTransactionX
): SignedTopologyTransactionX[TopologyChangeOpX, TopologyMappingX] = storedTx.transaction
def providesAdditionalSignatures(
transaction: GenericSignedTopologyTransactionX
)(implicit traceContext: TraceContext): Future[Boolean] = {
findStored(CantonTimestamp.MaxValue, transaction).map(_.forall { inStore =>
// check whether source still could provide an additional signature
transaction.signatures.diff(inStore.transaction.signatures.forgetNE).nonEmpty &&
// but only if the transaction in the target store is a valid proposal
inStore.transaction.isProposal &&
inStore.validUntil.isEmpty
})
}
/** returns initial set of onboarding transactions that should be dispatched to the domain */
def findParticipantOnboardingTransactions(participantId: ParticipantId, domainId: DomainId)(
implicit traceContext: TraceContext
): FutureUnlessShutdown[Seq[GenericSignedTopologyTransactionX]]
def findDispatchingTransactionsAfter(
timestampExclusive: CantonTimestamp,
limit: Option[Int],
)(implicit
traceContext: TraceContext
): Future[GenericStoredTopologyTransactionsX]
def findStoredForVersion(
asOfExclusive: CantonTimestamp,
transaction: GenericTopologyTransactionX,
protocolVersion: ProtocolVersion,
)(implicit
traceContext: TraceContext
): Future[Option[GenericStoredTopologyTransactionX]]
final def exists(transaction: GenericSignedTopologyTransactionX)(implicit
traceContext: TraceContext
): Future[Boolean] = findStored(CantonTimestamp.MaxValue, transaction).map(
_.exists(signedTxFromStoredTx(_) == transaction)
)
def findStored(
asOfExclusive: CantonTimestamp,
transaction: GenericSignedTopologyTransactionX,
includeRejected: Boolean = false,
)(implicit
traceContext: TraceContext
): Future[Option[GenericStoredTopologyTransactionX]]
}
object TopologyStoreX {
sealed trait Change extends Product with Serializable {
def sequenced: SequencedTime
def effective: EffectiveTime
}
object Change {
final case class TopologyDelay(
sequenced: SequencedTime,
effective: EffectiveTime,
epsilon: NonNegativeFiniteDuration,
) extends Change
final case class Other(sequenced: SequencedTime, effective: EffectiveTime) extends Change
}
def accumulateUpcomingEffectiveChanges(
items: Seq[StoredTopologyTransactionX[TopologyChangeOpX, TopologyMappingX]]
): Seq[Change] = {
items
.map(x => (x, x.mapping))
.map {
case (tx, x: DomainParametersStateX) =>
Change.TopologyDelay(tx.sequenced, tx.validFrom, x.parameters.topologyChangeDelay)
case (tx, _) => Change.Other(tx.sequenced, tx.validFrom)
}
.sortBy(_.effective)
.distinct
}
def apply[StoreID <: TopologyStoreId](
storeId: StoreID,
storage: Storage,
timeouts: ProcessingTimeout,
loggerFactory: NamedLoggerFactory,
)(implicit
ec: ExecutionContext
): TopologyStoreX[StoreID] = {
val storeLoggerFactory = loggerFactory.append("store", storeId.toString)
storage match {
case _: MemoryStorage =>
new InMemoryTopologyStoreX(storeId, storeLoggerFactory, timeouts)
case dbStorage: DbStorage =>
new DbTopologyStoreX(dbStorage, storeId, timeouts, storeLoggerFactory)
}
}
lazy val initialParticipantDispatchingSet = Set(
TopologyMappingX.Code.DomainTrustCertificateX,
TopologyMappingX.Code.OwnerToKeyMappingX,
// TODO(#14060) - potentially revisit this once we implement TopologyStoreX.filterInitialParticipantDispatchingTransactions
TopologyMappingX.Code.NamespaceDelegationX,
TopologyMappingX.Code.IdentifierDelegationX,
TopologyMappingX.Code.DecentralizedNamespaceDefinitionX,
)
def filterInitialParticipantDispatchingTransactions(
participantId: ParticipantId,
domainId: DomainId,
transactions: Seq[GenericStoredTopologyTransactionX],
): Seq[GenericSignedTopologyTransactionX] = {
// TODO(#14060): Extend filtering along the lines of:
// TopologyStore.filterInitialParticipantDispatchingTransactions
transactions.map(_.transaction).collect {
case tx @ SignedTopologyTransactionX(
TopologyTransactionX(_, _, DomainTrustCertificateX(`participantId`, `domainId`, _, _)),
_,
_,
) =>
tx
case tx @ SignedTopologyTransactionX(
TopologyTransactionX(_, _, OwnerToKeyMappingX(`participantId`, _, _)),
_,
_,
) =>
tx
case tx @ SignedTopologyTransactionX(
TopologyTransactionX(_, _, NamespaceDelegationX(ns, _, _)),
_,
_,
) if ns == participantId.uid.namespace =>
tx
case tx @ SignedTopologyTransactionX(
TopologyTransactionX(_, _, IdentifierDelegationX(uid, _)),
_,
_,
) if uid == participantId.uid =>
tx
case tx @ SignedTopologyTransactionX(
TopologyTransactionX(_, _, _: DecentralizedNamespaceDefinitionX),
_,
_,
) =>
tx
}
}
/** convenience method waiting until the last eligible transaction inserted into the source store has been dispatched successfully to the target domain */
def awaitTxObserved(
client: DomainTopologyClient,
transaction: GenericSignedTopologyTransactionX,
target: TopologyStoreX[?],
timeout: Duration,
)(implicit
traceContext: TraceContext,
executionContext: ExecutionContext,
): FutureUnlessShutdown[Boolean] = {
client.await(
// we know that the transaction is stored and effective once we find it in the target
// domain store and once the effective time (valid from) is smaller than the client timestamp
sp => target.findStored(sp.timestamp, transaction, includeRejected = true).map(_.nonEmpty),
timeout,
)
}
}
sealed trait TimeQuery {
def toProtoV30: topoV30.BaseQuery.TimeQuery
}
object TimeQuery {
object HeadState extends TimeQuery {
override def toProtoV30: topoV30.BaseQuery.TimeQuery =
topoV30.BaseQuery.TimeQuery.HeadState(com.google.protobuf.empty.Empty())
}
final case class Snapshot(asOf: CantonTimestamp) extends TimeQuery {
override def toProtoV30: topoV30.BaseQuery.TimeQuery =
topoV30.BaseQuery.TimeQuery.Snapshot(asOf.toProtoTimestamp)
}
final case class Range(from: Option[CantonTimestamp], until: Option[CantonTimestamp])
extends TimeQuery {
override def toProtoV30: topoV30.BaseQuery.TimeQuery = topoV30.BaseQuery.TimeQuery.Range(
topoV30.BaseQuery.TimeRange(from.map(_.toProtoTimestamp), until.map(_.toProtoTimestamp))
)
}
def fromProto(
proto: topoV30.BaseQuery.TimeQuery,
fieldName: String,
): ParsingResult[TimeQuery] =
proto match {
case topoV30.BaseQuery.TimeQuery.Empty =>
Left(ProtoDeserializationError.FieldNotSet(fieldName))
case topoV30.BaseQuery.TimeQuery.Snapshot(value) =>
CantonTimestamp.fromProtoTimestamp(value).map(Snapshot)
case topoV30.BaseQuery.TimeQuery.HeadState(_) => Right(HeadState)
case topoV30.BaseQuery.TimeQuery.Range(value) =>
for {
fromO <- value.from.traverse(CantonTimestamp.fromProtoTimestamp)
toO <- value.until.traverse(CantonTimestamp.fromProtoTimestamp)
} yield Range(fromO, toO)
}
}

View File

@ -0,0 +1,167 @@
// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package com.digitalasset.canton.topology.store
import com.digitalasset.canton.config.CantonRequireTypes.String256M
import com.digitalasset.canton.config.RequireTypes.{PositiveInt, PositiveLong}
import com.digitalasset.canton.crypto.{Fingerprint, SignatureCheckError}
import com.digitalasset.canton.data.CantonTimestamp
import com.digitalasset.canton.logging.ErrorLoggingContext
import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting}
import com.digitalasset.canton.protocol.OnboardingRestriction
import com.digitalasset.canton.topology.{
DomainId,
Member,
ParticipantId,
PartyId,
TopologyManagerError,
}
sealed trait TopologyTransactionRejection extends PrettyPrinting with Product with Serializable {
def asString: String
def asString1GB: String256M =
String256M.tryCreate(asString, Some("topology transaction rejection"))
def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError
}
object TopologyTransactionRejection {
final case class NoDelegationFoundForKeys(keys: Set[Fingerprint])
extends TopologyTransactionRejection {
override def asString: String = s"No delegation found for keys ${keys.mkString(", ")}"
override def pretty: Pretty[NoDelegationFoundForKeys] = prettyOfString(_ => asString)
override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError =
TopologyManagerError.UnauthorizedTransaction.Failure(asString)
}
case object NotAuthorized extends TopologyTransactionRejection {
override def asString: String = "Not authorized"
override def pretty: Pretty[NotAuthorized.type] = prettyOfString(_ => asString)
override def toTopologyManagerError(implicit elc: ErrorLoggingContext) =
TopologyManagerError.UnauthorizedTransaction.Failure(asString)
}
final case class ThresholdTooHigh(actual: Int, mustBeAtMost: Int)
extends TopologyTransactionRejection {
override def asString: String =
s"Threshold must not be higher than $mustBeAtMost, but was $actual."
override def pretty: Pretty[ThresholdTooHigh] = prettyOfString(_ => asString)
override def toTopologyManagerError(implicit elc: ErrorLoggingContext) = {
TopologyManagerError.InvalidThreshold.ThresholdTooHigh(actual, mustBeAtMost)
}
}
final case class OnboardingRestrictionInPlace(
participant: ParticipantId,
restriction: OnboardingRestriction,
loginAfter: Option[CantonTimestamp],
) extends TopologyTransactionRejection {
override def asString: String =
s"Participant ${participant} onboarding rejected as restrictions ${restriction} are in place."
override def pretty: Pretty[OnboardingRestrictionInPlace] = prettyOfString(_ => asString)
override def toTopologyManagerError(implicit elc: ErrorLoggingContext) = {
TopologyManagerError.ParticipantOnboardingRefused.Reject(participant, restriction)
}
}
final case class SignatureCheckFailed(err: SignatureCheckError)
extends TopologyTransactionRejection {
override def asString: String = err.toString
override def pretty: Pretty[SignatureCheckFailed] = prettyOfClass(param("err", _.err))
override def toTopologyManagerError(implicit elc: ErrorLoggingContext) =
TopologyManagerError.InvalidSignatureError.Failure(err)
}
final case class WrongDomain(wrong: DomainId) extends TopologyTransactionRejection {
override def asString: String = show"Wrong domain $wrong"
override def pretty: Pretty[WrongDomain] = prettyOfClass(param("wrong", _.wrong))
override def toTopologyManagerError(implicit elc: ErrorLoggingContext) =
TopologyManagerError.WrongDomain.Failure(wrong)
}
final case class Duplicate(old: CantonTimestamp) extends TopologyTransactionRejection {
override def asString: String = show"Duplicate transaction from ${old}"
override def pretty: Pretty[Duplicate] = prettyOfClass(param("old", _.old))
override def toTopologyManagerError(implicit elc: ErrorLoggingContext) =
TopologyManagerError.DuplicateTransaction.ExistsAt(old)
}
final case class SerialMismatch(expected: PositiveInt, actual: PositiveInt)
extends TopologyTransactionRejection {
override def asString: String =
show"The given serial $actual does not match the expected serial $expected"
override def pretty: Pretty[SerialMismatch] =
prettyOfClass(param("expected", _.expected), param("actual", _.actual))
override def toTopologyManagerError(implicit elc: ErrorLoggingContext) =
TopologyManagerError.SerialMismatch.Failure(expected, actual)
}
final case class Other(str: String) extends TopologyTransactionRejection {
override def asString: String = str
override def pretty: Pretty[Other] = prettyOfString(_ => asString)
override def toTopologyManagerError(implicit elc: ErrorLoggingContext) =
TopologyManagerError.InternalError.Other(str)
}
final case class ExtraTrafficLimitTooLow(
member: Member,
actual: PositiveLong,
expectedMinimum: PositiveLong,
) extends TopologyTransactionRejection {
override def asString: String =
s"Extra traffic limit for $member should be at least $expectedMinimum, but was $actual."
override def pretty: Pretty[ExtraTrafficLimitTooLow] =
prettyOfClass(
param("member", _.member),
param("actual", _.actual),
param("expectedMinimum", _.expectedMinimum),
)
override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError =
TopologyManagerError.InvalidTrafficLimit.TrafficLimitTooLow(member, actual, expectedMinimum)
}
final case class InsufficientKeys(members: Seq[Member]) extends TopologyTransactionRejection {
override def asString: String =
s"Members ${members.sorted.mkString(", ")} are missing a signing key or an encryption key or both."
override def pretty: Pretty[InsufficientKeys] = prettyOfClass(
param("members", _.members)
)
override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError =
TopologyManagerError.InsufficientKeys.Failure(members)
}
final case class UnknownMembers(members: Seq[Member]) extends TopologyTransactionRejection {
override def asString: String = s"Members ${members.toSeq.sorted.mkString(", ")} are unknown."
override def pretty: Pretty[UnknownMembers] = prettyOfClass(param("members", _.members))
override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError =
TopologyManagerError.UnknownMembers.Failure(members)
}
final case class ParticipantStillHostsParties(participantId: ParticipantId, parties: Seq[PartyId])
extends TopologyTransactionRejection {
override def asString: String =
s"Cannot remove domain trust certificate for $participantId because it still hosts parties ${parties
.mkString(",")}"
override def pretty: Pretty[ParticipantStillHostsParties] =
prettyOfClass(param("participantId", _.participantId), param("parties", _.parties))
override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError =
TopologyManagerError.IllegalRemovalOfDomainTrustCertificate.ParticipantStillHostsParties(
participantId,
parties,
)
}
}

View File

@ -0,0 +1,137 @@
// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package com.digitalasset.canton.topology.store.db
import com.daml.nameof.NameOf.functionFullName
import com.digitalasset.canton.config.CantonRequireTypes.LengthLimitedString.DisplayName
import com.digitalasset.canton.config.CantonRequireTypes.{String255, String300}
import com.digitalasset.canton.config.ProcessingTimeout
import com.digitalasset.canton.data.CantonTimestamp
import com.digitalasset.canton.logging.NamedLoggerFactory
import com.digitalasset.canton.resource.DbStorage.{DbAction, SQLActionBuilderChain}
import com.digitalasset.canton.resource.{DbStorage, DbStore}
import com.digitalasset.canton.topology.store.{PartyMetadata, PartyMetadataStore}
import com.digitalasset.canton.topology.{ParticipantId, PartyId, UniqueIdentifier}
import com.digitalasset.canton.tracing.TraceContext
import scala.concurrent.{ExecutionContext, Future}
class DbPartyMetadataStore(
override protected val storage: DbStorage,
override protected val timeouts: ProcessingTimeout,
override protected val loggerFactory: NamedLoggerFactory,
)(implicit
ec: ExecutionContext
) extends PartyMetadataStore
with DbStore {
import DbStorage.Implicits.BuilderChain.*
import storage.api.*
override def metadataForParty(
partyId: PartyId
)(implicit traceContext: TraceContext): Future[Option[PartyMetadata]] = {
storage
.query(
metadataForPartyQuery(sql"party_id = $partyId #${storage.limit(1)}"),
functionFullName,
)
.map(_.headOption)
}
private def metadataForPartyQuery(
where: SQLActionBuilderChain
): DbAction.ReadOnly[Seq[PartyMetadata]] = {
val query =
sql"select party_id, display_name, participant_id, submission_id, effective_at, notified from common_party_metadata where " ++ where
for {
data <- query
.as[(PartyId, Option[String], Option[String], String255, CantonTimestamp, Boolean)]
} yield {
data.map {
case (partyId, displayNameS, participantIdS, submissionId, effectiveAt, notified) =>
val participantId =
participantIdS
.flatMap(x => UniqueIdentifier.fromProtoPrimitive_(x).toOption)
.map(ParticipantId(_))
val displayName = displayNameS.flatMap(String255.create(_).toOption)
PartyMetadata(
partyId,
displayName,
participantId = participantId,
)(
effectiveTimestamp = effectiveAt,
submissionId = submissionId,
notified = notified,
)
}
}
}
override def insertOrUpdatePartyMetadata(
partyId: PartyId,
participantId: Option[ParticipantId],
displayName: Option[DisplayName],
effectiveTimestamp: CantonTimestamp,
submissionId: String255,
)(implicit traceContext: TraceContext): Future[Unit] = {
val participantS = dbValue(participantId)
val query = storage.profile match {
case _: DbStorage.Profile.Postgres =>
sqlu"""insert into common_party_metadata (party_id, display_name, participant_id, submission_id, effective_at)
VALUES ($partyId, $displayName, $participantS, $submissionId, $effectiveTimestamp)
on conflict (party_id) do update
set
display_name = $displayName,
participant_id = $participantS,
submission_id = $submissionId,
effective_at = $effectiveTimestamp,
notified = false
"""
case _: DbStorage.Profile.H2 | _: DbStorage.Profile.Oracle =>
sqlu"""merge into common_party_metadata
using dual
on (party_id = $partyId)
when matched then
update set
display_name = $displayName,
participant_id = $participantS,
submission_id = $submissionId,
effective_at = $effectiveTimestamp,
notified = ${false}
when not matched then
insert (party_id, display_name, participant_id, submission_id, effective_at)
values ($partyId, $displayName, $participantS, $submissionId, $effectiveTimestamp)
"""
}
storage.update_(query, functionFullName)
}
private def dbValue(participantId: Option[ParticipantId]): Option[String300] =
participantId.map(_.uid.toLengthLimitedString.asString300)
/** mark the given metadata has having been successfully forwarded to the domain */
override def markNotified(
metadata: PartyMetadata
)(implicit traceContext: TraceContext): Future[Unit] = {
val partyId = metadata.partyId
val effectiveAt = metadata.effectiveTimestamp
val query =
sqlu"UPDATE common_party_metadata SET notified = ${true} WHERE party_id = $partyId and effective_at = $effectiveAt"
storage.update_(query, functionFullName)
}
/** fetch the current set of party data which still needs to be notified */
override def fetchNotNotified()(implicit
traceContext: TraceContext
): Future[Seq[PartyMetadata]] = {
storage
.query(
metadataForPartyQuery(sql"notified = ${false}"),
functionFullName,
)
}
}

View File

@ -3,136 +3,868 @@
package com.digitalasset.canton.topology.store.db
import cats.syntax.option.*
import com.daml.nameof.NameOf.functionFullName
import com.digitalasset.canton.config.CantonRequireTypes.LengthLimitedString.DisplayName
import com.digitalasset.canton.config.CantonRequireTypes.{String255, String300}
import com.daml.nonempty.NonEmpty
import com.digitalasset.canton.config.CantonRequireTypes.{LengthLimitedString, String185}
import com.digitalasset.canton.config.ProcessingTimeout
import com.digitalasset.canton.config.RequireTypes.PositiveInt
import com.digitalasset.canton.data.CantonTimestamp
import com.digitalasset.canton.lifecycle.FutureUnlessShutdown
import com.digitalasset.canton.logging.NamedLoggerFactory
import com.digitalasset.canton.resource.DbStorage.{DbAction, SQLActionBuilderChain}
import com.digitalasset.canton.resource.DbStorage.SQLActionBuilderChain
import com.digitalasset.canton.resource.{DbStorage, DbStore}
import com.digitalasset.canton.topology.*
import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime}
import com.digitalasset.canton.topology.store.StoredTopologyTransactionX.GenericStoredTopologyTransactionX
import com.digitalasset.canton.topology.store.StoredTopologyTransactionsX.{
GenericStoredTopologyTransactionsX,
PositiveStoredTopologyTransactionsX,
}
import com.digitalasset.canton.topology.store.ValidatedTopologyTransactionX.GenericValidatedTopologyTransactionX
import com.digitalasset.canton.topology.store.*
import com.digitalasset.canton.topology.transaction.SignedTopologyTransactionX.GenericSignedTopologyTransactionX
import com.digitalasset.canton.topology.transaction.TopologyChangeOpX.Replace
import com.digitalasset.canton.topology.transaction.TopologyMappingX.MappingHash
import com.digitalasset.canton.topology.transaction.TopologyTransactionX.{
GenericTopologyTransactionX,
TxHash,
}
import com.digitalasset.canton.topology.transaction.*
import com.digitalasset.canton.tracing.TraceContext
import com.digitalasset.canton.util.FutureInstances.*
import com.digitalasset.canton.util.MonadUtil
import com.digitalasset.canton.version.ProtocolVersion
import com.google.common.annotations.VisibleForTesting
import slick.jdbc.canton.SQLActionBuilder
import slick.jdbc.{GetResult, TransactionIsolation}
import slick.sql.SqlStreamingAction
import scala.concurrent.{ExecutionContext, Future}
class DbPartyMetadataStore(
class DbTopologyStoreX[StoreId <: TopologyStoreId](
override protected val storage: DbStorage,
val storeId: StoreId,
override protected val timeouts: ProcessingTimeout,
override protected val loggerFactory: NamedLoggerFactory,
)(implicit
ec: ExecutionContext
) extends PartyMetadataStore
protected val maxItemsInSqlQuery: PositiveInt = PositiveInt.tryCreate(100),
)(implicit ec: ExecutionContext)
extends TopologyStoreX[StoreId]
with DbStore {
import DbStorage.Implicits.BuilderChain.*
import storage.api.*
import storage.converters.*
override def metadataForParty(
partyId: PartyId
)(implicit traceContext: TraceContext): Future[Option[PartyMetadata]] = {
storage
.query(
metadataForPartyQuery(sql"party_id = $partyId #${storage.limit(1)}"),
functionFullName,
private implicit val getResultSignedTopologyTransaction
: GetResult[GenericSignedTopologyTransactionX] =
SignedTopologyTransactionX.createGetResultDomainTopologyTransaction
protected val transactionStoreIdName: LengthLimitedString = storeId.dbString
def findTransactionsByTxHash(asOfExclusive: EffectiveTime, hashes: Set[TxHash])(implicit
traceContext: TraceContext
): Future[Seq[GenericSignedTopologyTransactionX]] = {
if (hashes.isEmpty) Future.successful(Seq.empty)
else {
logger.debug(s"Querying transactions for tx hashes $hashes as of $asOfExclusive")
findAsOfExclusive(
asOfExclusive,
sql" AND (" ++ hashes
.map(txHash => sql"tx_hash = ${txHash.hash.toLengthLimitedHexString}")
.toList
.intercalate(sql" OR ") ++ sql")",
operation = "transactionsByTxHash",
)
.map(_.headOption)
}
}
private def metadataForPartyQuery(
where: SQLActionBuilderChain
): DbAction.ReadOnly[Seq[PartyMetadata]] = {
override def findProposalsByTxHash(
asOfExclusive: EffectiveTime,
hashes: NonEmpty[Set[TxHash]],
)(implicit traceContext: TraceContext): Future[Seq[GenericSignedTopologyTransactionX]] = {
logger.debug(s"Querying proposals for tx hashes $hashes as of $asOfExclusive")
findAsOfExclusive(
asOfExclusive,
sql" AND is_proposal = true AND (" ++ hashes
.map(txHash => sql"tx_hash = ${txHash.hash.toLengthLimitedHexString}")
.forgetNE
.toList
.intercalate(sql" OR ") ++ sql")",
"proposalsByTxHash",
)
}
override def findTransactionsForMapping(
asOfExclusive: EffectiveTime,
hashes: NonEmpty[Set[MappingHash]],
)(implicit
traceContext: TraceContext
): Future[Seq[GenericSignedTopologyTransactionX]] = {
logger.debug(s"Querying proposals for mapping hashes $hashes as of $asOfExclusive")
findAsOfExclusive(
asOfExclusive,
sql" AND is_proposal = false AND (" ++ hashes
.map(mappingHash => sql"mapping_key_hash = ${mappingHash.hash.toLengthLimitedHexString}")
.forgetNE
.toList
.intercalate(sql" OR ") ++ sql")",
operation = "transactionsForMapping",
)
}
/** @param elements Elements to be batched
* @param operationName Name of the operation
* @param f Create a DBIOAction from a batch
*/
private def performBatchedDbOperation[X](
elements: Seq[X],
operationName: String,
processInParallel: Boolean,
)(
f: Seq[X] => DBIOAction[_, NoStream, Effect.Write with Effect.Transactional]
)(implicit traceContext: TraceContext) = if (elements.isEmpty) Future.successful(())
else
MonadUtil.batchedSequentialTraverse_(
parallelism =
if (processInParallel) PositiveInt.two * storage.threadsAvailableForWriting
else PositiveInt.one,
chunkSize = maxItemsInSqlQuery,
)(elements) { elementsBatch =>
storage.update_(
f(elementsBatch),
operationName = operationName,
)
}
/** add validated topology transaction as is to the topology transaction table
*/
override def update(
sequenced: SequencedTime,
effective: EffectiveTime,
removeMapping: Map[TopologyMappingX.MappingHash, PositiveInt],
removeTxs: Set[TopologyTransactionX.TxHash],
additions: Seq[GenericValidatedTopologyTransactionX],
)(implicit traceContext: TraceContext): Future[Unit] = {
val effectiveTs = effective.value
val transactionRemovals = removeMapping.toList.map { case (mappingHash, serial) =>
sql"mapping_key_hash=${mappingHash.hash.toLengthLimitedHexString} and serial_counter <= $serial"
} ++ removeTxs.map(txHash => sql"tx_hash=${txHash.hash.toLengthLimitedHexString}")
lazy val updateRemovals =
(sql"UPDATE common_topology_transactions SET valid_until = ${Some(effectiveTs)} WHERE store_id=$transactionStoreIdName AND (" ++
transactionRemovals
.intercalate(
sql" OR "
) ++ sql") AND valid_from < $effectiveTs AND valid_until is null").asUpdate
lazy val insertAdditions =
insertSignedTransaction[GenericValidatedTopologyTransactionX](vtx =>
TransactionEntry(
sequenced,
effective,
Option.when(
vtx.rejectionReason.nonEmpty || vtx.expireImmediately
)(effective),
vtx.transaction,
vtx.rejectionReason,
)
)(additions)
storage.update_(
DBIO
.seq(
if (transactionRemovals.nonEmpty) updateRemovals else DBIO.successful(0),
if (additions.nonEmpty) insertAdditions
else DBIO.successful(0),
)
.transactionally
.withTransactionIsolation(TransactionIsolation.Serializable),
operationName = "update-topology-transactions",
)
}
@VisibleForTesting
override protected[topology] def dumpStoreContent()(implicit
traceContext: TraceContext
): Future[GenericStoredTopologyTransactionsX] = {
// Helper case class to produce comparable output to the InMemoryStore
case class TopologyStoreEntry(
transaction: GenericSignedTopologyTransactionX,
sequenced: SequencedTime,
from: EffectiveTime,
until: Option[EffectiveTime],
rejected: Option[String],
)
val query =
sql"select party_id, display_name, participant_id, submission_id, effective_at, notified from common_party_metadata where " ++ where
sql"SELECT instance, sequenced, valid_from, valid_until, rejection_reason FROM common_topology_transactions WHERE store_id = $transactionStoreIdName ORDER BY id"
val entriesF =
storage
.query(
query.as[
(
GenericSignedTopologyTransactionX,
CantonTimestamp,
CantonTimestamp,
Option[CantonTimestamp],
Option[String],
)
],
functionFullName,
)
.map(_.map { case (tx, sequencedTs, validFrom, validUntil, rejectionReason) =>
TopologyStoreEntry(
tx,
SequencedTime(sequencedTs),
EffectiveTime(validFrom),
validUntil.map(EffectiveTime(_)),
rejectionReason,
)
})
entriesF.map { entries =>
logger.debug(
entries
.map(_.toString)
.mkString("Topology Store Content[", ", ", "]")
)
StoredTopologyTransactionsX(
entries.map(e => StoredTopologyTransactionX(e.sequenced, e.from, e.until, e.transaction))
)
}
}
override def inspect(
proposals: Boolean,
timeQuery: TimeQuery,
recentTimestampO: Option[CantonTimestamp],
op: Option[TopologyChangeOpX],
types: Seq[TopologyMappingX.Code],
idFilter: Option[String],
namespaceFilter: Option[String],
)(implicit
traceContext: TraceContext
): Future[StoredTopologyTransactionsX[TopologyChangeOpX, TopologyMappingX]] = {
logger.debug(s"Inspecting store for types=$types, filter=$idFilter, time=$timeQuery")
val timeFilter: SQLActionBuilderChain = timeQuery match {
case TimeQuery.HeadState =>
getHeadStateQuery(recentTimestampO)
case TimeQuery.Snapshot(asOf) =>
asOfQuery(asOf = asOf, asOfInclusive = false)
case TimeQuery.Range(None, None) =>
sql"" // The case below inserts an additional `AND` that we don't want
case TimeQuery.Range(from, until) =>
sql" AND " ++ ((from.toList.map(ts => sql"valid_from >= $ts") ++ until.toList.map(ts =>
sql"valid_from <= $ts"
))
.intercalate(sql" AND "))
}
val operationFilter = op.map(value => sql" AND operation = $value").getOrElse(sql"")
val mappingIdFilter = getIdFilter(idFilter)
val mappingNameSpaceFilter = getNamespaceFilter(namespaceFilter)
val mappingTypeFilter = typeFilter(types.toSet)
val mappingProposalsAndPreviousFilter =
timeFilter ++ operationFilter ++ mappingIdFilter ++ mappingNameSpaceFilter ++ mappingTypeFilter ++ sql" AND is_proposal = $proposals"
queryForTransactions(mappingProposalsAndPreviousFilter, "inspect")
}
@SuppressWarnings(Array("com.digitalasset.canton.SlickString"))
override def inspectKnownParties(
timestamp: CantonTimestamp,
filterParty: String,
filterParticipant: String,
limit: Int,
)(implicit traceContext: TraceContext): Future[Set[PartyId]] = {
logger.debug(
s"Inspecting known parties at t=$timestamp with filterParty=$filterParty and filterParticipant=$filterParticipant"
)
def splitFilterPrefixAndSql(uidFilter: String): (String, String, String, String) =
UniqueIdentifier.splitFilter(uidFilter) match {
case (id, ns) => (id, ns, id + "%", ns + "%")
}
val (prefixPartyIdentifier, prefixPartyNS, sqlPartyIdentifier, sqlPartyNS) =
splitFilterPrefixAndSql(filterParty)
val (
prefixParticipantIdentifier,
prefixParticipantNS,
sqlParticipantIdentifier,
sqlParticipantNS,
) =
splitFilterPrefixAndSql(filterParticipant)
// conditional append avoids "like '%'" filters on empty filters
def conditionalAppend(filter: String, sqlIdentifier: String, sqlNamespace: String) =
if (filter.nonEmpty)
sql" AND identifier LIKE ${sqlIdentifier} AND namespace LIKE ${sqlNamespace}"
else sql""
queryForTransactions(
asOfQuery(timestamp, asOfInclusive = false) ++
sql" AND NOT is_proposal AND operation = ${TopologyChangeOpX.Replace} AND ("
// PartyToParticipantX filtering
++ Seq(
sql"(transaction_type = ${PartyToParticipantX.code}"
++ conditionalAppend(filterParty, sqlPartyIdentifier, sqlPartyNS)
++ sql")"
)
++ sql" OR "
// DomainTrustCertificateX filtering
++ Seq(
sql"(transaction_type = ${DomainTrustCertificateX.code}"
// In DomainTrustCertificateX part of the filter, compare not only to participant, but also to party identifier
// to enable searching for the admin party
++ conditionalAppend(filterParty, sqlPartyIdentifier, sqlPartyNS)
++ conditionalAppend(filterParticipant, sqlParticipantIdentifier, sqlParticipantNS)
++ sql")"
)
++ sql")",
storage.limit(limit),
)
.map(
_.result.toSet
.flatMap[PartyId](_.mapping match {
// TODO(#14061): post-filtering for participantId non-columns results in fewer than limit results being returned
// - add indexed secondary uid and/or namespace columns for participant-ids - also to support efficient lookup
// of "what parties a particular participant hosts" (ParticipantId => Set[PartyId])
case ptp: PartyToParticipantX
if filterParticipant.isEmpty || ptp.participants
.exists(
_.participantId.uid
.matchesPrefixes(prefixParticipantIdentifier, prefixParticipantNS)
) =>
Set(ptp.partyId)
case cert: DomainTrustCertificateX
if filterParty.isEmpty || cert.participantId.adminParty.uid
.matchesPrefixes(prefixPartyIdentifier, prefixPartyNS) =>
Set(cert.participantId.adminParty)
case _ => Set.empty
})
)
}
override def findPositiveTransactions(
asOf: CantonTimestamp,
asOfInclusive: Boolean,
isProposal: Boolean,
types: Seq[TopologyMappingX.Code],
filterUid: Option[Seq[UniqueIdentifier]],
filterNamespace: Option[Seq[Namespace]],
)(implicit traceContext: TraceContext): Future[PositiveStoredTopologyTransactionsX] =
findTransactionsBatchingUidFilter(
asOf,
asOfInclusive,
isProposal,
types.toSet,
filterUid,
filterNamespace,
TopologyChangeOpX.Replace.some,
).map(_.collectOfType[TopologyChangeOpX.Replace])
override def findFirstSequencerStateForSequencer(sequencerId: SequencerId)(implicit
traceContext: TraceContext
): Future[Option[StoredTopologyTransactionX[Replace, SequencerDomainStateX]]] = {
logger.debug(s"Querying first sequencer state for $sequencerId")
queryForTransactions(
// We don't expect too many MediatorDomainStateX mappings in a single domain, so fetching them all from the db
// is acceptable and also because we don't expect to run this query frequently. We can only evaluate the
// `mediatorId` field locally as the mediator-id is not exposed in a separate column.
sql" AND is_proposal = false" ++
sql" AND operation = ${TopologyChangeOpX.Replace}" ++
sql" AND transaction_type = ${SequencerDomainStateX.code}",
operation = "firstSequencerState",
).map(
_.collectOfMapping[SequencerDomainStateX]
.collectOfType[Replace]
.result
.filter {
_.mapping.allSequencers.contains(sequencerId)
}
.sortBy(_.serial)
.headOption
)
}
override def findFirstMediatorStateForMediator(mediatorId: MediatorId)(implicit
traceContext: TraceContext
): Future[Option[StoredTopologyTransactionX[Replace, MediatorDomainStateX]]] = {
logger.debug(s"Querying first mediator state for $mediatorId")
queryForTransactions(
// We don't expect too many MediatorDomainStateX mappings in a single domain, so fetching them all from the db
// is acceptable and also because we don't expect to run this query frequently. We can only evaluate the
// `mediatorId` field locally as the mediator-id is not exposed in a separate column.
sql" AND is_proposal = false" ++
sql" AND operation = ${TopologyChangeOpX.Replace}" ++
sql" AND transaction_type = ${MediatorDomainStateX.code}",
operation = "firstMediatorState",
).map(
_.collectOfMapping[MediatorDomainStateX]
.collectOfType[Replace]
.result
.collect {
case tx
if tx.mapping.observers.contains(mediatorId) ||
tx.mapping.active.contains(mediatorId) =>
tx
}
.sortBy(_.serial)
.headOption
)
}
override def findFirstTrustCertificateForParticipant(participant: ParticipantId)(implicit
traceContext: TraceContext
): Future[Option[StoredTopologyTransactionX[Replace, DomainTrustCertificateX]]] = {
logger.debug(s"Querying first trust certificate for participant $participant")
queryForTransactions(
sql" AND is_proposal = false" ++
sql" AND operation = ${TopologyChangeOpX.Replace}" ++
sql" AND transaction_type = ${DomainTrustCertificateX.code}" ++
sql" AND identifier = ${participant.uid.id} AND namespace = ${participant.uid.namespace}",
limit = storage.limit(1),
orderBy = " ORDER BY serial_counter ",
operation = "participantFirstTrustCertificate",
).map(
_.collectOfMapping[DomainTrustCertificateX]
.collectOfType[Replace]
.result
.headOption
)
}
override def findEssentialStateAtSequencedTime(
asOfInclusive: SequencedTime
)(implicit
traceContext: TraceContext
): Future[GenericStoredTopologyTransactionsX] = {
val timeFilter = sql" AND sequenced <= ${asOfInclusive.value}"
logger.debug(s"Querying essential state as of asOfInclusive")
queryForTransactions(timeFilter, "essentialState").map(
_.asSnapshotAtMaxEffectiveTime.retainAuthorizedHistoryAndEffectiveProposals
)
}
override def bootstrap(snapshot: GenericStoredTopologyTransactionsX)(implicit
traceContext: TraceContext
): Future[Unit] =
// inserts must not be processed in parallel to keep the insertion order (as indicated by the `id` column)
// in sync with the monotonicity of sequenced
performBatchedDbOperation(snapshot.result, "bootstrap", processInParallel = false) { txs =>
insertSignedTransaction[GenericStoredTopologyTransactionX](TransactionEntry.fromStoredTx)(txs)
}
override def findUpcomingEffectiveChanges(asOfInclusive: CantonTimestamp)(implicit
traceContext: TraceContext
): Future[Seq[TopologyStoreX.Change]] = {
logger.debug(s"Querying upcoming effective changes as of $asOfInclusive")
queryForTransactions(
sql" AND valid_from >= $asOfInclusive ",
orderBy = " ORDER BY valid_from",
operation = "upcomingEffectiveChanges",
).map(res => TopologyStoreX.accumulateUpcomingEffectiveChanges(res.result))
}
override def maxTimestamp()(implicit
traceContext: TraceContext
): Future[Option[(SequencedTime, EffectiveTime)]] = {
logger.debug(s"Querying max timestamp")
queryForTransactions(sql"", storage.limit(1), orderBy = " ORDER BY id DESC")
.map(_.result.headOption.map(tx => (tx.sequenced, tx.validFrom)))
}
override def findDispatchingTransactionsAfter(
timestampExclusive: CantonTimestamp,
limitO: Option[Int],
)(implicit
traceContext: TraceContext
): Future[GenericStoredTopologyTransactionsX] = {
val subQuery =
sql" AND valid_from > $timestampExclusive AND (not is_proposal OR valid_until is NULL)"
val limitQ = limitO.fold("")(storage.limit(_))
logger.debug(s"Querying dispatching transactions after $timestampExclusive")
queryForTransactions(subQuery, limitQ)
}
override def findStored(
asOfExclusive: CantonTimestamp,
transaction: GenericSignedTopologyTransactionX,
includeRejected: Boolean = false,
)(implicit
traceContext: TraceContext
): Future[Option[GenericStoredTopologyTransactionX]] = {
logger.debug(s"Querying for transaction at $asOfExclusive: $transaction")
findStoredSql(asOfExclusive, transaction.transaction, includeRejected = includeRejected).map(
_.result.lastOption
)
}
override def findStoredForVersion(
asOfExclusive: CantonTimestamp,
transaction: GenericTopologyTransactionX,
protocolVersion: ProtocolVersion,
)(implicit
traceContext: TraceContext
): Future[Option[GenericStoredTopologyTransactionX]] = {
val rpv = TopologyTransactionX.protocolVersionRepresentativeFor(protocolVersion)
logger.debug(s"Querying for transaction $transaction with protocol version $protocolVersion")
findStoredSql(
asOfExclusive,
transaction,
subQuery = sql" AND representative_protocol_version = ${rpv.representative}",
).map(_.result.lastOption)
}
override def findParticipantOnboardingTransactions(
participantId: ParticipantId,
domainId: DomainId,
)(implicit
traceContext: TraceContext
): FutureUnlessShutdown[Seq[GenericSignedTopologyTransactionX]] = {
logger.debug(
s"Querying participant onboarding transactions for participant $participantId on domain $domainId"
)
for {
data <- query
.as[(PartyId, Option[String], Option[String], String255, CantonTimestamp, Boolean)]
} yield {
data.map {
case (partyId, displayNameS, participantIdS, submissionId, effectiveAt, notified) =>
val participantId =
participantIdS
.flatMap(x => UniqueIdentifier.fromProtoPrimitive_(x).toOption)
.map(ParticipantId(_))
val displayName = displayNameS.flatMap(String255.create(_).toOption)
PartyMetadata(
partyId,
displayName,
participantId = participantId,
)(
effectiveTimestamp = effectiveAt,
submissionId = submissionId,
notified = notified,
transactions <- FutureUnlessShutdown
.outcomeF(
queryForTransactions(
sql" AND not is_proposal " ++
sql" AND transaction_type IN (" ++ TopologyStoreX.initialParticipantDispatchingSet.toList
.map(s => sql"$s")
.intercalate(sql", ") ++ sql") ",
operation = "participantOnboardingTransactions",
)
)
filteredTransactions = TopologyStoreX.filterInitialParticipantDispatchingTransactions(
participantId,
domainId,
transactions.result,
)
} yield filteredTransactions
}
// Insert helper shared by bootstrap and update.
private def insertSignedTransaction[T](toTxEntry: T => TransactionEntry)(
transactions: Seq[T]
): SqlStreamingAction[Vector[Int], Int, slick.dbio.Effect.Write]#ResultAction[
Int,
NoStream,
Effect.Write,
] = {
def sqlTransactionParameters(transaction: T) = {
val txEntry = toTxEntry(transaction)
val signedTx = txEntry.signedTx
val validFrom = txEntry.validFrom.value
val validUntil = txEntry.validUntil.map(_.value)
val sequencedTs = txEntry.sequenced.value
val operation = signedTx.operation
val mapping = signedTx.mapping
val transactionType = mapping.code
val namespace = mapping.namespace
val identifier = mapping.maybeUid.map(_.id.toLengthLimitedString).getOrElse(String185.empty)
val serial = signedTx.serial
val mappingHash = mapping.uniqueKey.hash.toLengthLimitedHexString
val reason = txEntry.rejectionReason.map(_.asString1GB)
val txHash = signedTx.hash.hash.toLengthLimitedHexString
val isProposal = signedTx.isProposal
val representativeProtocolVersion = signedTx.transaction.representativeProtocolVersion
val hashOfSignatures = signedTx.hashOfSignatures.toLengthLimitedHexString
storage.profile match {
case _: DbStorage.Profile.Postgres | _: DbStorage.Profile.H2 =>
sql"""($transactionStoreIdName, $sequencedTs, $validFrom, $validUntil, $transactionType, $namespace,
$identifier, $mappingHash, $serial, $operation, $signedTx, $txHash, $isProposal, $reason, $representativeProtocolVersion, $hashOfSignatures)"""
case _: DbStorage.Profile.Oracle =>
throw new IllegalStateException("Oracle not supported by daml 3.0/X yet")
}
}
// TODO(#14061): Decide whether we want additional indices by mapping_key_hash and tx_hash (e.g. for update/removal and lookups)
// TODO(#14061): Come up with columns/indexing for efficient ParticipantId => Seq[PartyId] lookup
// TODO(#12390) should mapping_key_hash rather be tx_hash?
storage.profile match {
case _: DbStorage.Profile.Postgres | _: DbStorage.Profile.H2 =>
(sql"""INSERT INTO common_topology_transactions (store_id, sequenced, valid_from, valid_until, transaction_type, namespace,
identifier, mapping_key_hash, serial_counter, operation, instance, tx_hash, is_proposal, rejection_reason, representative_protocol_version, hash_of_signatures) VALUES""" ++
transactions
.map(sqlTransactionParameters)
.toList
.intercalate(sql", ")
++ sql" ON CONFLICT DO NOTHING" // idempotency-"conflict" based on common_topology_transactions unique constraint
).asUpdate
case _: DbStorage.Profile.Oracle =>
throw new IllegalStateException("Oracle not supported by daml 3.0/X yet")
}
}
override def insertOrUpdatePartyMetadata(
partyId: PartyId,
participantId: Option[ParticipantId],
displayName: Option[DisplayName],
effectiveTimestamp: CantonTimestamp,
submissionId: String255,
)(implicit traceContext: TraceContext): Future[Unit] = {
val participantS = dbValue(participantId)
// Helper to break up large uid-filters into batches to limit the size of sql "in-clauses".
// Fashioned to reuse lessons learned in 2.x-based DbTopologyStore
private def findTransactionsBatchingUidFilter(
asOf: CantonTimestamp,
asOfInclusive: Boolean,
isProposal: Boolean,
types: Set[TopologyMappingX.Code],
filterUid: Option[Seq[UniqueIdentifier]],
filterNamespace: Option[Seq[Namespace]],
filterOp: Option[TopologyChangeOpX],
)(implicit
traceContext: TraceContext
): Future[GenericStoredTopologyTransactionsX] = {
def forwardBatch(filterUidsNew: Option[Seq[UniqueIdentifier]]) =
findTransactionsSingleBatch(
asOf,
asOfInclusive,
isProposal,
types,
filterUidsNew,
filterNamespace,
filterOp,
)
filterUid.map(
// Optimization: remove uid-filters made redundant by namespace filters
_.filterNot(uid => filterNamespace.exists(_.contains(uid.namespace)))
) match {
case None => forwardBatch(None)
case Some(uids) =>
MonadUtil
.batchedSequentialTraverse(
parallelism = storage.threadsAvailableForWriting,
chunkSize = maxItemsInSqlQuery,
)(uids) { batchedUidFilters => forwardBatch(Some(batchedUidFilters)).map(_.result) }
.map(StoredTopologyTransactionsX(_))
}
}
private def findTransactionsSingleBatch(
asOf: CantonTimestamp,
asOfInclusive: Boolean,
isProposal: Boolean,
types: Set[TopologyMappingX.Code],
filterUid: Option[Seq[UniqueIdentifier]],
filterNamespace: Option[Seq[Namespace]],
filterOp: Option[TopologyChangeOpX],
)(implicit
traceContext: TraceContext
): Future[GenericStoredTopologyTransactionsX] = {
val hasUidFilter = filterUid.nonEmpty || filterNamespace.nonEmpty
// exit early if the caller produced an empty uid/namespace filter batch:
if (hasUidFilter && filterUid.forall(_.isEmpty) && filterNamespace.forall(_.isEmpty)) {
Future.successful(StoredTopologyTransactionsX.empty)
} else {
logger.debug(s"Querying transactions as of $asOf for types $types")
val timeRangeFilter = asOfQuery(asOf, asOfInclusive)
val isProposalFilter = sql" AND is_proposal = $isProposal"
val changeOpFilter = filterOp.fold(sql"")(op => sql" AND operation = $op")
val mappingTypeFilter = typeFilter(types)
val uidNamespaceFilter =
if (hasUidFilter) {
val namespaceFilter = filterNamespace.toList.flatMap(_.map(ns => sql"namespace = $ns"))
val uidFilter =
filterUid.toList.flatten.map(uid =>
sql"(identifier = ${uid.id} AND namespace = ${uid.namespace})"
)
sql" AND (" ++ (namespaceFilter ++ uidFilter).intercalate(sql" OR ") ++ sql")"
} else SQLActionBuilderChain(sql"")
queryForTransactions(
timeRangeFilter ++ isProposalFilter ++ changeOpFilter ++ mappingTypeFilter ++ uidNamespaceFilter,
operation = "singleBatch",
)
}
}
private def typeFilter(types: Set[TopologyMappingX.Code]): SQLActionBuilderChain = {
if (types.isEmpty) sql""
else
sql" AND transaction_type IN (" ++ types.toSeq
.map(t => sql"$t")
.intercalate(sql", ") ++ sql")"
}
private def findAsOfExclusive(
effective: EffectiveTime,
subQuery: SQLActionBuilder,
operation: String,
)(implicit traceContext: TraceContext): Future[Seq[GenericSignedTopologyTransactionX]] = {
queryForTransactions(asOfQuery(effective.value, asOfInclusive = false) ++ subQuery, operation)
.map(_.result.map(_.transaction))
}
private def findStoredSql(
asOfExclusive: CantonTimestamp,
transaction: GenericTopologyTransactionX,
subQuery: SQLActionBuilder = sql"",
includeRejected: Boolean = false,
)(implicit
traceContext: TraceContext
): Future[GenericStoredTopologyTransactionsX] = {
val mapping = transaction.mapping
queryForTransactions(
// Query for leading fields of `idx_common_topology_transactions` to enable use of this index
sql" AND transaction_type = ${mapping.code} AND namespace = ${mapping.namespace} AND identifier = ${mapping.maybeUid
.fold(String185.empty)(_.id.toLengthLimitedString)}"
++ sql" AND valid_from < $asOfExclusive"
++ sql" AND mapping_key_hash = ${mapping.uniqueKey.hash.toLengthLimitedHexString}"
++ sql" AND serial_counter = ${transaction.serial}"
++ sql" AND tx_hash = ${transaction.hash.hash.toLengthLimitedHexString}"
++ sql" AND operation = ${transaction.operation}"
++ subQuery,
includeRejected = includeRejected,
operation = "findStored",
)
}
private def queryForTransactions(
subQuery: SQLActionBuilder,
operation: String,
limit: String = "",
orderBy: String = " ORDER BY id ",
includeRejected: Boolean = false,
)(implicit
traceContext: TraceContext
): Future[GenericStoredTopologyTransactionsX] = {
val query =
sql"SELECT instance, sequenced, valid_from, valid_until FROM common_topology_transactions WHERE store_id = $transactionStoreIdName" ++
subQuery ++ (if (!includeRejected) sql" AND rejection_reason IS NULL"
else sql"") ++ sql" #${orderBy} #${limit}"
storage
.query(
query.as[
(
GenericSignedTopologyTransactionX,
CantonTimestamp,
CantonTimestamp,
Option[CantonTimestamp],
)
],
s"$functionFullName-$operation",
)
.map(_.map { case (tx, sequencedTs, validFrom, validUntil) =>
StoredTopologyTransactionX(
SequencedTime(sequencedTs),
EffectiveTime(validFrom),
validUntil.map(EffectiveTime(_)),
tx,
)
})
.map(StoredTopologyTransactionsX(_))
}
override def currentDispatchingWatermark(implicit
traceContext: TraceContext
): Future[Option[CantonTimestamp]] = {
val query =
sql"SELECT watermark_ts FROM common_topology_dispatching WHERE store_id =$transactionStoreIdName"
.as[CantonTimestamp]
.headOption
storage.query(query, functionFullName)
}
override def updateDispatchingWatermark(timestamp: CantonTimestamp)(implicit
traceContext: TraceContext
): Future[Unit] = {
val query = storage.profile match {
case _: DbStorage.Profile.Postgres =>
sqlu"""insert into common_party_metadata (party_id, display_name, participant_id, submission_id, effective_at)
VALUES ($partyId, $displayName, $participantS, $submissionId, $effectiveTimestamp)
on conflict (party_id) do update
sqlu"""insert into common_topology_dispatching (store_id, watermark_ts)
VALUES ($transactionStoreIdName, $timestamp)
on conflict (store_id) do update
set
display_name = $displayName,
participant_id = $participantS,
submission_id = $submissionId,
effective_at = $effectiveTimestamp,
notified = false
watermark_ts = $timestamp
"""
case _: DbStorage.Profile.H2 | _: DbStorage.Profile.Oracle =>
sqlu"""merge into common_party_metadata
sqlu"""merge into common_topology_dispatching
using dual
on (party_id = $partyId)
on (store_id = $transactionStoreIdName)
when matched then
update set
display_name = $displayName,
participant_id = $participantS,
submission_id = $submissionId,
effective_at = $effectiveTimestamp,
notified = ${false}
watermark_ts = $timestamp
when not matched then
insert (party_id, display_name, participant_id, submission_id, effective_at)
values ($partyId, $displayName, $participantS, $submissionId, $effectiveTimestamp)
insert (store_id, watermark_ts)
values ($transactionStoreIdName, $timestamp)
"""
}
storage.update_(query, functionFullName)
}
private def dbValue(participantId: Option[ParticipantId]): Option[String300] =
participantId.map(_.uid.toLengthLimitedString.asString300)
private def asOfQuery(asOf: CantonTimestamp, asOfInclusive: Boolean): SQLActionBuilder =
if (asOfInclusive)
sql" AND valid_from <= $asOf AND (valid_until is NULL OR $asOf < valid_until)"
else
sql" AND valid_from < $asOf AND (valid_until is NULL OR $asOf <= valid_until)"
/** mark the given metadata has having been successfully forwarded to the domain */
override def markNotified(
metadata: PartyMetadata
)(implicit traceContext: TraceContext): Future[Unit] = {
val partyId = metadata.partyId
val effectiveAt = metadata.effectiveTimestamp
val query =
sqlu"UPDATE common_party_metadata SET notified = ${true} WHERE party_id = $partyId and effective_at = $effectiveAt"
storage.update_(query, functionFullName)
private def getHeadStateQuery(
recentTimestampO: Option[CantonTimestamp]
): SQLActionBuilderChain = recentTimestampO match {
case Some(value) => asOfQuery(value, asOfInclusive = false)
case None => sql" AND valid_until is NULL"
}
/** fetch the current set of party data which still needs to be notified */
override def fetchNotNotified()(implicit
traceContext: TraceContext
): Future[Seq[PartyMetadata]] = {
storage
.query(
metadataForPartyQuery(sql"notified = ${false}"),
functionFullName,
)
}
@SuppressWarnings(Array("com.digitalasset.canton.SlickString"))
private def getIdFilter(
idFilter: Option[String]
): SQLActionBuilderChain =
idFilter match {
case Some(value) if value.nonEmpty => sql" AND identifier like ${value + "%"}"
case _ => sql""
}
@SuppressWarnings(Array("com.digitalasset.canton.SlickString"))
private def getNamespaceFilter(namespaceFilter: Option[String]): SQLActionBuilderChain =
namespaceFilter match {
case Some(value) if value.nonEmpty => sql" AND namespace LIKE ${value + "%"}"
case _ => sql""
}
}
// Helper case class to hold StoredTopologyTransactionX-fields in update() providing umbrella
// values for all transactions.
private[db] final case class TransactionEntry(
sequenced: SequencedTime,
validFrom: EffectiveTime,
validUntil: Option[EffectiveTime],
signedTx: GenericSignedTopologyTransactionX,
rejectionReason: Option[TopologyTransactionRejection] = None,
)
private[db] object TransactionEntry {
def fromStoredTx(stx: GenericStoredTopologyTransactionX): TransactionEntry = TransactionEntry(
stx.sequenced,
stx.validFrom,
stx.validUntil,
stx.transaction,
rejectionReason = None,
)
}

View File

@ -1,873 +0,0 @@
// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package com.digitalasset.canton.topology.store.db
import cats.syntax.option.*
import com.daml.nameof.NameOf.functionFullName
import com.daml.nonempty.NonEmpty
import com.digitalasset.canton.config.CantonRequireTypes.{LengthLimitedString, String185}
import com.digitalasset.canton.config.ProcessingTimeout
import com.digitalasset.canton.config.RequireTypes.PositiveInt
import com.digitalasset.canton.data.CantonTimestamp
import com.digitalasset.canton.lifecycle.FutureUnlessShutdown
import com.digitalasset.canton.logging.NamedLoggerFactory
import com.digitalasset.canton.resource.DbStorage.SQLActionBuilderChain
import com.digitalasset.canton.resource.{DbStorage, DbStore}
import com.digitalasset.canton.topology.*
import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime}
import com.digitalasset.canton.topology.store.StoredTopologyTransactionX.GenericStoredTopologyTransactionX
import com.digitalasset.canton.topology.store.StoredTopologyTransactionsX.{
GenericStoredTopologyTransactionsX,
PositiveStoredTopologyTransactionsX,
}
import com.digitalasset.canton.topology.store.ValidatedTopologyTransactionX.GenericValidatedTopologyTransactionX
import com.digitalasset.canton.topology.store.*
import com.digitalasset.canton.topology.transaction.SignedTopologyTransactionX.{
GenericSignedTopologyTransactionX,
setParameterTopologyTransaction,
}
import com.digitalasset.canton.topology.transaction.TopologyChangeOpX.Replace
import com.digitalasset.canton.topology.transaction.TopologyMappingX.MappingHash
import com.digitalasset.canton.topology.transaction.TopologyTransactionX.{
GenericTopologyTransactionX,
TxHash,
}
import com.digitalasset.canton.topology.transaction.*
import com.digitalasset.canton.tracing.TraceContext
import com.digitalasset.canton.util.FutureInstances.*
import com.digitalasset.canton.util.MonadUtil
import com.digitalasset.canton.version.ProtocolVersion
import com.google.common.annotations.VisibleForTesting
import slick.jdbc.canton.SQLActionBuilder
import slick.jdbc.{GetResult, TransactionIsolation}
import slick.sql.SqlStreamingAction
import scala.concurrent.{ExecutionContext, Future}
class DbTopologyStoreX[StoreId <: TopologyStoreId](
override protected val storage: DbStorage,
val storeId: StoreId,
override protected val timeouts: ProcessingTimeout,
override protected val loggerFactory: NamedLoggerFactory,
protected val maxItemsInSqlQuery: PositiveInt = PositiveInt.tryCreate(100),
)(implicit ec: ExecutionContext)
extends TopologyStoreX[StoreId]
with DbStore {
import DbStorage.Implicits.BuilderChain.*
import storage.api.*
import storage.converters.*
private implicit val getResultSignedTopologyTransaction
: GetResult[GenericSignedTopologyTransactionX] =
SignedTopologyTransactionX.createGetResultDomainTopologyTransaction
protected val transactionStoreIdName: LengthLimitedString = storeId.dbString
def findTransactionsByTxHash(asOfExclusive: EffectiveTime, hashes: Set[TxHash])(implicit
traceContext: TraceContext
): Future[Seq[GenericSignedTopologyTransactionX]] = {
if (hashes.isEmpty) Future.successful(Seq.empty)
else {
logger.debug(s"Querying transactions for tx hashes $hashes as of $asOfExclusive")
findAsOfExclusive(
asOfExclusive,
sql" AND (" ++ hashes
.map(txHash => sql"tx_hash = ${txHash.hash.toLengthLimitedHexString}")
.toList
.intercalate(sql" OR ") ++ sql")",
operation = "transactionsByTxHash",
)
}
}
override def findProposalsByTxHash(
asOfExclusive: EffectiveTime,
hashes: NonEmpty[Set[TxHash]],
)(implicit traceContext: TraceContext): Future[Seq[GenericSignedTopologyTransactionX]] = {
logger.debug(s"Querying proposals for tx hashes $hashes as of $asOfExclusive")
findAsOfExclusive(
asOfExclusive,
sql" AND is_proposal = true AND (" ++ hashes
.map(txHash => sql"tx_hash = ${txHash.hash.toLengthLimitedHexString}")
.forgetNE
.toList
.intercalate(sql" OR ") ++ sql")",
"proposalsByTxHash",
)
}
override def findTransactionsForMapping(
asOfExclusive: EffectiveTime,
hashes: NonEmpty[Set[MappingHash]],
)(implicit
traceContext: TraceContext
): Future[Seq[GenericSignedTopologyTransactionX]] = {
logger.debug(s"Querying proposals for mapping hashes $hashes as of $asOfExclusive")
findAsOfExclusive(
asOfExclusive,
sql" AND is_proposal = false AND (" ++ hashes
.map(mappingHash => sql"mapping_key_hash = ${mappingHash.hash.toLengthLimitedHexString}")
.forgetNE
.toList
.intercalate(sql" OR ") ++ sql")",
operation = "transactionsForMapping",
)
}
/** @param elements Elements to be batched
* @param operationName Name of the operation
* @param f Create a DBIOAction from a batch
*/
private def performBatchedDbOperation[X](
elements: Seq[X],
operationName: String,
processInParallel: Boolean,
)(
f: Seq[X] => DBIOAction[_, NoStream, Effect.Write with Effect.Transactional]
)(implicit traceContext: TraceContext) = if (elements.isEmpty) Future.successful(())
else
MonadUtil.batchedSequentialTraverse_(
parallelism =
if (processInParallel) PositiveInt.two * storage.threadsAvailableForWriting
else PositiveInt.one,
chunkSize = maxItemsInSqlQuery,
)(elements) { elementsBatch =>
storage.update_(
f(elementsBatch),
operationName = operationName,
)
}
/** add validated topology transaction as is to the topology transaction table
*/
override def update(
sequenced: SequencedTime,
effective: EffectiveTime,
removeMapping: Map[TopologyMappingX.MappingHash, PositiveInt],
removeTxs: Set[TopologyTransactionX.TxHash],
additions: Seq[GenericValidatedTopologyTransactionX],
)(implicit traceContext: TraceContext): Future[Unit] = {
val effectiveTs = effective.value
val transactionRemovals = removeMapping.toList.map { case (mappingHash, serial) =>
sql"mapping_key_hash=${mappingHash.hash.toLengthLimitedHexString} and serial_counter <= $serial"
} ++ removeTxs.map(txHash => sql"tx_hash=${txHash.hash.toLengthLimitedHexString}")
lazy val updateRemovals =
(sql"UPDATE common_topology_transactions SET valid_until = ${Some(effectiveTs)} WHERE store_id=$transactionStoreIdName AND (" ++
transactionRemovals
.intercalate(
sql" OR "
) ++ sql") AND valid_from < $effectiveTs AND valid_until is null").asUpdate
lazy val insertAdditions =
insertSignedTransaction[GenericValidatedTopologyTransactionX](vtx =>
TransactionEntry(
sequenced,
effective,
Option.when(
vtx.rejectionReason.nonEmpty || vtx.expireImmediately
)(effective),
vtx.transaction,
vtx.rejectionReason,
)
)(additions)
storage.update_(
DBIO
.seq(
if (transactionRemovals.nonEmpty) updateRemovals else DBIO.successful(0),
if (additions.nonEmpty) insertAdditions
else DBIO.successful(0),
)
.transactionally
.withTransactionIsolation(TransactionIsolation.Serializable),
operationName = "update-topology-transactions",
)
}
@VisibleForTesting
override protected[topology] def dumpStoreContent()(implicit
traceContext: TraceContext
): Future[GenericStoredTopologyTransactionsX] = {
// Helper case class to produce comparable output to the InMemoryStore
case class TopologyStoreEntry(
transaction: GenericSignedTopologyTransactionX,
sequenced: SequencedTime,
from: EffectiveTime,
until: Option[EffectiveTime],
rejected: Option[String],
)
val query =
sql"SELECT instance, sequenced, valid_from, valid_until, rejection_reason FROM common_topology_transactions WHERE store_id = $transactionStoreIdName ORDER BY id"
val entriesF =
storage
.query(
query.as[
(
GenericSignedTopologyTransactionX,
CantonTimestamp,
CantonTimestamp,
Option[CantonTimestamp],
Option[String],
)
],
functionFullName,
)
.map(_.map { case (tx, sequencedTs, validFrom, validUntil, rejectionReason) =>
TopologyStoreEntry(
tx,
SequencedTime(sequencedTs),
EffectiveTime(validFrom),
validUntil.map(EffectiveTime(_)),
rejectionReason,
)
})
entriesF.map { entries =>
logger.debug(
entries
.map(_.toString)
.mkString("Topology Store Content[", ", ", "]")
)
StoredTopologyTransactionsX(
entries.map(e => StoredTopologyTransactionX(e.sequenced, e.from, e.until, e.transaction))
)
}
}
override def inspect(
proposals: Boolean,
timeQuery: TimeQuery,
recentTimestampO: Option[CantonTimestamp],
op: Option[TopologyChangeOpX],
types: Seq[TopologyMappingX.Code],
idFilter: Option[String],
namespaceFilter: Option[String],
)(implicit
traceContext: TraceContext
): Future[StoredTopologyTransactionsX[TopologyChangeOpX, TopologyMappingX]] = {
logger.debug(s"Inspecting store for types=$types, filter=$idFilter, time=$timeQuery")
val timeFilter: SQLActionBuilderChain = timeQuery match {
case TimeQuery.HeadState =>
getHeadStateQuery(recentTimestampO)
case TimeQuery.Snapshot(asOf) =>
asOfQuery(asOf = asOf, asOfInclusive = false)
case TimeQuery.Range(None, None) =>
sql"" // The case below inserts an additional `AND` that we don't want
case TimeQuery.Range(from, until) =>
sql" AND " ++ ((from.toList.map(ts => sql"valid_from >= $ts") ++ until.toList.map(ts =>
sql"valid_from <= $ts"
))
.intercalate(sql" AND "))
}
val operationFilter = op.map(value => sql" AND operation = $value").getOrElse(sql"")
val mappingIdFilter = getIdFilter(idFilter)
val mappingNameSpaceFilter = getNamespaceFilter(namespaceFilter)
val mappingTypeFilter = typeFilter(types.toSet)
val mappingProposalsAndPreviousFilter =
timeFilter ++ operationFilter ++ mappingIdFilter ++ mappingNameSpaceFilter ++ mappingTypeFilter ++ sql" AND is_proposal = $proposals"
queryForTransactions(mappingProposalsAndPreviousFilter, "inspect")
}
@SuppressWarnings(Array("com.digitalasset.canton.SlickString"))
override def inspectKnownParties(
timestamp: CantonTimestamp,
filterParty: String,
filterParticipant: String,
limit: Int,
)(implicit traceContext: TraceContext): Future[Set[PartyId]] = {
logger.debug(
s"Inspecting known parties at t=$timestamp with filterParty=$filterParty and filterParticipant=$filterParticipant"
)
def splitFilterPrefixAndSql(uidFilter: String): (String, String, String, String) =
UniqueIdentifier.splitFilter(uidFilter) match {
case (id, ns) => (id, ns, id + "%", ns + "%")
}
val (prefixPartyIdentifier, prefixPartyNS, sqlPartyIdentifier, sqlPartyNS) =
splitFilterPrefixAndSql(filterParty)
val (
prefixParticipantIdentifier,
prefixParticipantNS,
sqlParticipantIdentifier,
sqlParticipantNS,
) =
splitFilterPrefixAndSql(filterParticipant)
// conditional append avoids "like '%'" filters on empty filters
def conditionalAppend(filter: String, sqlIdentifier: String, sqlNamespace: String) =
if (filter.nonEmpty)
sql" AND identifier LIKE ${sqlIdentifier} AND namespace LIKE ${sqlNamespace}"
else sql""
queryForTransactions(
asOfQuery(timestamp, asOfInclusive = false) ++
sql" AND NOT is_proposal AND operation = ${TopologyChangeOpX.Replace} AND ("
// PartyToParticipantX filtering
++ Seq(
sql"(transaction_type = ${PartyToParticipantX.code}"
++ conditionalAppend(filterParty, sqlPartyIdentifier, sqlPartyNS)
++ sql")"
)
++ sql" OR "
// DomainTrustCertificateX filtering
++ Seq(
sql"(transaction_type = ${DomainTrustCertificateX.code}"
// In DomainTrustCertificateX part of the filter, compare not only to participant, but also to party identifier
// to enable searching for the admin party
++ conditionalAppend(filterParty, sqlPartyIdentifier, sqlPartyNS)
++ conditionalAppend(filterParticipant, sqlParticipantIdentifier, sqlParticipantNS)
++ sql")"
)
++ sql")",
storage.limit(limit),
)
.map(
_.result.toSet
.flatMap[PartyId](_.mapping match {
// TODO(#14061): post-filtering for participantId non-columns results in fewer than limit results being returned
// - add indexed secondary uid and/or namespace columns for participant-ids - also to support efficient lookup
// of "what parties a particular participant hosts" (ParticipantId => Set[PartyId])
case ptp: PartyToParticipantX
if filterParticipant.isEmpty || ptp.participants
.exists(
_.participantId.uid
.matchesPrefixes(prefixParticipantIdentifier, prefixParticipantNS)
) =>
Set(ptp.partyId)
case cert: DomainTrustCertificateX
if filterParty.isEmpty || cert.participantId.adminParty.uid
.matchesPrefixes(prefixPartyIdentifier, prefixPartyNS) =>
Set(cert.participantId.adminParty)
case _ => Set.empty
})
)
}
override def findPositiveTransactions(
asOf: CantonTimestamp,
asOfInclusive: Boolean,
isProposal: Boolean,
types: Seq[TopologyMappingX.Code],
filterUid: Option[Seq[UniqueIdentifier]],
filterNamespace: Option[Seq[Namespace]],
)(implicit traceContext: TraceContext): Future[PositiveStoredTopologyTransactionsX] =
findTransactionsBatchingUidFilter(
asOf,
asOfInclusive,
isProposal,
types.toSet,
filterUid,
filterNamespace,
TopologyChangeOpX.Replace.some,
).map(_.collectOfType[TopologyChangeOpX.Replace])
override def findFirstSequencerStateForSequencer(sequencerId: SequencerId)(implicit
traceContext: TraceContext
): Future[Option[StoredTopologyTransactionX[Replace, SequencerDomainStateX]]] = {
logger.debug(s"Querying first sequencer state for $sequencerId")
queryForTransactions(
// We don't expect too many MediatorDomainStateX mappings in a single domain, so fetching them all from the db
// is acceptable and also because we don't expect to run this query frequently. We can only evaluate the
// `mediatorId` field locally as the mediator-id is not exposed in a separate column.
sql" AND is_proposal = false" ++
sql" AND operation = ${TopologyChangeOpX.Replace}" ++
sql" AND transaction_type = ${SequencerDomainStateX.code}",
operation = "firstSequencerState",
).map(
_.collectOfMapping[SequencerDomainStateX]
.collectOfType[Replace]
.result
.filter {
_.mapping.allSequencers.contains(sequencerId)
}
.sortBy(_.serial)
.headOption
)
}
override def findFirstMediatorStateForMediator(mediatorId: MediatorId)(implicit
traceContext: TraceContext
): Future[Option[StoredTopologyTransactionX[Replace, MediatorDomainStateX]]] = {
logger.debug(s"Querying first mediator state for $mediatorId")
queryForTransactions(
// We don't expect too many MediatorDomainStateX mappings in a single domain, so fetching them all from the db
// is acceptable and also because we don't expect to run this query frequently. We can only evaluate the
// `mediatorId` field locally as the mediator-id is not exposed in a separate column.
sql" AND is_proposal = false" ++
sql" AND operation = ${TopologyChangeOpX.Replace}" ++
sql" AND transaction_type = ${MediatorDomainStateX.code}",
operation = "firstMediatorState",
).map(
_.collectOfMapping[MediatorDomainStateX]
.collectOfType[Replace]
.result
.collect {
case tx
if tx.mapping.observers.contains(mediatorId) ||
tx.mapping.active.contains(mediatorId) =>
tx
}
.sortBy(_.serial)
.headOption
)
}
override def findFirstTrustCertificateForParticipant(participant: ParticipantId)(implicit
traceContext: TraceContext
): Future[Option[StoredTopologyTransactionX[Replace, DomainTrustCertificateX]]] = {
logger.debug(s"Querying first trust certificate for participant $participant")
queryForTransactions(
sql" AND is_proposal = false" ++
sql" AND operation = ${TopologyChangeOpX.Replace}" ++
sql" AND transaction_type = ${DomainTrustCertificateX.code}" ++
sql" AND identifier = ${participant.uid.id} AND namespace = ${participant.uid.namespace}",
limit = storage.limit(1),
orderBy = " ORDER BY serial_counter ",
operation = "participantFirstTrustCertificate",
).map(
_.collectOfMapping[DomainTrustCertificateX]
.collectOfType[Replace]
.result
.headOption
)
}
override def findEssentialStateAtSequencedTime(
asOfInclusive: SequencedTime
)(implicit
traceContext: TraceContext
): Future[GenericStoredTopologyTransactionsX] = {
val timeFilter = sql" AND sequenced <= ${asOfInclusive.value}"
logger.debug(s"Querying essential state as of asOfInclusive")
queryForTransactions(timeFilter, "essentialState").map(
_.asSnapshotAtMaxEffectiveTime.retainAuthorizedHistoryAndEffectiveProposals
)
}
override def bootstrap(snapshot: GenericStoredTopologyTransactionsX)(implicit
traceContext: TraceContext
): Future[Unit] =
// inserts must not be processed in parallel to keep the insertion order (as indicated by the `id` column)
// in sync with the monotonicity of sequenced
performBatchedDbOperation(snapshot.result, "bootstrap", processInParallel = false) { txs =>
insertSignedTransaction[GenericStoredTopologyTransactionX](TransactionEntry.fromStoredTx)(txs)
}
override def findUpcomingEffectiveChanges(asOfInclusive: CantonTimestamp)(implicit
traceContext: TraceContext
): Future[Seq[TopologyStoreX.Change]] = {
logger.debug(s"Querying upcoming effective changes as of $asOfInclusive")
queryForTransactions(
sql" AND valid_from >= $asOfInclusive ",
orderBy = " ORDER BY valid_from",
operation = "upcomingEffectiveChanges",
).map(res => TopologyStoreX.accumulateUpcomingEffectiveChanges(res.result))
}
override def maxTimestamp()(implicit
traceContext: TraceContext
): Future[Option[(SequencedTime, EffectiveTime)]] = {
logger.debug(s"Querying max timestamp")
queryForTransactions(sql"", storage.limit(1), orderBy = " ORDER BY id DESC")
.map(_.result.headOption.map(tx => (tx.sequenced, tx.validFrom)))
}
override def findDispatchingTransactionsAfter(
timestampExclusive: CantonTimestamp,
limitO: Option[Int],
)(implicit
traceContext: TraceContext
): Future[GenericStoredTopologyTransactionsX] = {
val subQuery =
sql" AND valid_from > $timestampExclusive AND (not is_proposal OR valid_until is NULL)"
val limitQ = limitO.fold("")(storage.limit(_))
logger.debug(s"Querying dispatching transactions after $timestampExclusive")
queryForTransactions(subQuery, limitQ)
}
override def findStored(
asOfExclusive: CantonTimestamp,
transaction: GenericSignedTopologyTransactionX,
includeRejected: Boolean = false,
)(implicit
traceContext: TraceContext
): Future[Option[GenericStoredTopologyTransactionX]] = {
logger.debug(s"Querying for transaction at $asOfExclusive: $transaction")
findStoredSql(asOfExclusive, transaction.transaction, includeRejected = includeRejected).map(
_.result.lastOption
)
}
override def findStoredForVersion(
asOfExclusive: CantonTimestamp,
transaction: GenericTopologyTransactionX,
protocolVersion: ProtocolVersion,
)(implicit
traceContext: TraceContext
): Future[Option[GenericStoredTopologyTransactionX]] = {
val rpv = TopologyTransactionX.protocolVersionRepresentativeFor(protocolVersion)
logger.debug(s"Querying for transaction $transaction with protocol version $protocolVersion")
findStoredSql(
asOfExclusive,
transaction,
subQuery = sql" AND representative_protocol_version = ${rpv.representative}",
).map(_.result.lastOption)
}
override def findParticipantOnboardingTransactions(
participantId: ParticipantId,
domainId: DomainId,
)(implicit
traceContext: TraceContext
): FutureUnlessShutdown[Seq[GenericSignedTopologyTransactionX]] = {
logger.debug(
s"Querying participant onboarding transactions for participant $participantId on domain $domainId"
)
for {
transactions <- FutureUnlessShutdown
.outcomeF(
queryForTransactions(
sql" AND not is_proposal " ++
sql" AND transaction_type IN (" ++ TopologyStoreX.initialParticipantDispatchingSet.toList
.map(s => sql"$s")
.intercalate(sql", ") ++ sql") ",
operation = "participantOnboardingTransactions",
)
)
filteredTransactions = TopologyStoreX.filterInitialParticipantDispatchingTransactions(
participantId,
domainId,
transactions.result,
)
} yield filteredTransactions
}
// Insert helper shared by bootstrap and update.
private def insertSignedTransaction[T](toTxEntry: T => TransactionEntry)(
transactions: Seq[T]
): SqlStreamingAction[Vector[Int], Int, slick.dbio.Effect.Write]#ResultAction[
Int,
NoStream,
Effect.Write,
] = {
def sqlTransactionParameters(transaction: T) = {
val txEntry = toTxEntry(transaction)
val signedTx = txEntry.signedTx
val validFrom = txEntry.validFrom.value
val validUntil = txEntry.validUntil.map(_.value)
val sequencedTs = txEntry.sequenced.value
val operation = signedTx.operation
val mapping = signedTx.mapping
val transactionType = mapping.code
val namespace = mapping.namespace
val identifier = mapping.maybeUid.map(_.id.toLengthLimitedString).getOrElse(String185.empty)
val serial = signedTx.serial
val mappingHash = mapping.uniqueKey.hash.toLengthLimitedHexString
val reason = txEntry.rejectionReason.map(_.asString1GB)
val txHash = signedTx.hash.hash.toLengthLimitedHexString
val isProposal = signedTx.isProposal
val representativeProtocolVersion = signedTx.transaction.representativeProtocolVersion
val hashOfSignatures = signedTx.hashOfSignatures.toLengthLimitedHexString
storage.profile match {
case _: DbStorage.Profile.Postgres | _: DbStorage.Profile.H2 =>
sql"""($transactionStoreIdName, $sequencedTs, $validFrom, $validUntil, $transactionType, $namespace,
$identifier, $mappingHash, $serial, $operation, $signedTx, $txHash, $isProposal, $reason, $representativeProtocolVersion, $hashOfSignatures)"""
case _: DbStorage.Profile.Oracle =>
throw new IllegalStateException("Oracle not supported by daml 3.0/X yet")
}
}
// TODO(#14061): Decide whether we want additional indices by mapping_key_hash and tx_hash (e.g. for update/removal and lookups)
// TODO(#14061): Come up with columns/indexing for efficient ParticipantId => Seq[PartyId] lookup
// TODO(#12390) should mapping_key_hash rather be tx_hash?
storage.profile match {
case _: DbStorage.Profile.Postgres | _: DbStorage.Profile.H2 =>
(sql"""INSERT INTO common_topology_transactions (store_id, sequenced, valid_from, valid_until, transaction_type, namespace,
identifier, mapping_key_hash, serial_counter, operation, instance, tx_hash, is_proposal, rejection_reason, representative_protocol_version, hash_of_signatures) VALUES""" ++
transactions
.map(sqlTransactionParameters)
.toList
.intercalate(sql", ")
++ sql" ON CONFLICT DO NOTHING" // idempotency-"conflict" based on common_topology_transactions unique constraint
).asUpdate
case _: DbStorage.Profile.Oracle =>
throw new IllegalStateException("Oracle not supported by daml 3.0/X yet")
}
}
// Helper to break up large uid-filters into batches to limit the size of sql "in-clauses".
// Fashioned to reuse lessons learned in 2.x-based DbTopologyStore
private def findTransactionsBatchingUidFilter(
asOf: CantonTimestamp,
asOfInclusive: Boolean,
isProposal: Boolean,
types: Set[TopologyMappingX.Code],
filterUid: Option[Seq[UniqueIdentifier]],
filterNamespace: Option[Seq[Namespace]],
filterOp: Option[TopologyChangeOpX],
)(implicit
traceContext: TraceContext
): Future[GenericStoredTopologyTransactionsX] = {
def forwardBatch(filterUidsNew: Option[Seq[UniqueIdentifier]]) =
findTransactionsSingleBatch(
asOf,
asOfInclusive,
isProposal,
types,
filterUidsNew,
filterNamespace,
filterOp,
)
filterUid.map(
// Optimization: remove uid-filters made redundant by namespace filters
_.filterNot(uid => filterNamespace.exists(_.contains(uid.namespace)))
) match {
case None => forwardBatch(None)
case Some(uids) =>
MonadUtil
.batchedSequentialTraverse(
parallelism = storage.threadsAvailableForWriting,
chunkSize = maxItemsInSqlQuery,
)(uids) { batchedUidFilters => forwardBatch(Some(batchedUidFilters)).map(_.result) }
.map(StoredTopologyTransactionsX(_))
}
}
private def findTransactionsSingleBatch(
asOf: CantonTimestamp,
asOfInclusive: Boolean,
isProposal: Boolean,
types: Set[TopologyMappingX.Code],
filterUid: Option[Seq[UniqueIdentifier]],
filterNamespace: Option[Seq[Namespace]],
filterOp: Option[TopologyChangeOpX],
)(implicit
traceContext: TraceContext
): Future[GenericStoredTopologyTransactionsX] = {
val hasUidFilter = filterUid.nonEmpty || filterNamespace.nonEmpty
// exit early if the caller produced an empty uid/namespace filter batch:
if (hasUidFilter && filterUid.forall(_.isEmpty) && filterNamespace.forall(_.isEmpty)) {
Future.successful(StoredTopologyTransactionsX.empty)
} else {
logger.debug(s"Querying transactions as of $asOf for types $types")
val timeRangeFilter = asOfQuery(asOf, asOfInclusive)
val isProposalFilter = sql" AND is_proposal = $isProposal"
val changeOpFilter = filterOp.fold(sql"")(op => sql" AND operation = $op")
val mappingTypeFilter = typeFilter(types)
val uidNamespaceFilter =
if (hasUidFilter) {
val namespaceFilter = filterNamespace.toList.flatMap(_.map(ns => sql"namespace = $ns"))
val uidFilter =
filterUid.toList.flatten.map(uid =>
sql"(identifier = ${uid.id} AND namespace = ${uid.namespace})"
)
sql" AND (" ++ (namespaceFilter ++ uidFilter).intercalate(sql" OR ") ++ sql")"
} else SQLActionBuilderChain(sql"")
queryForTransactions(
timeRangeFilter ++ isProposalFilter ++ changeOpFilter ++ mappingTypeFilter ++ uidNamespaceFilter,
operation = "singleBatch",
)
}
}
private def typeFilter(types: Set[TopologyMappingX.Code]): SQLActionBuilderChain = {
if (types.isEmpty) sql""
else
sql" AND transaction_type IN (" ++ types.toSeq
.map(t => sql"$t")
.intercalate(sql", ") ++ sql")"
}
private def findAsOfExclusive(
effective: EffectiveTime,
subQuery: SQLActionBuilder,
operation: String,
)(implicit traceContext: TraceContext): Future[Seq[GenericSignedTopologyTransactionX]] = {
queryForTransactions(asOfQuery(effective.value, asOfInclusive = false) ++ subQuery, operation)
.map(_.result.map(_.transaction))
}
private def findStoredSql(
asOfExclusive: CantonTimestamp,
transaction: GenericTopologyTransactionX,
subQuery: SQLActionBuilder = sql"",
includeRejected: Boolean = false,
)(implicit
traceContext: TraceContext
): Future[GenericStoredTopologyTransactionsX] = {
val mapping = transaction.mapping
queryForTransactions(
// Query for leading fields of `idx_common_topology_transactions` to enable use of this index
sql" AND transaction_type = ${mapping.code} AND namespace = ${mapping.namespace} AND identifier = ${mapping.maybeUid
.fold(String185.empty)(_.id.toLengthLimitedString)}"
++ sql" AND valid_from < $asOfExclusive"
++ sql" AND mapping_key_hash = ${mapping.uniqueKey.hash.toLengthLimitedHexString}"
++ sql" AND serial_counter = ${transaction.serial}"
++ sql" AND tx_hash = ${transaction.hash.hash.toLengthLimitedHexString}"
++ sql" AND operation = ${transaction.operation}"
++ subQuery,
includeRejected = includeRejected,
operation = "findStored",
)
}
private def queryForTransactions(
subQuery: SQLActionBuilder,
operation: String,
limit: String = "",
orderBy: String = " ORDER BY id ",
includeRejected: Boolean = false,
)(implicit
traceContext: TraceContext
): Future[GenericStoredTopologyTransactionsX] = {
val query =
sql"SELECT instance, sequenced, valid_from, valid_until FROM common_topology_transactions WHERE store_id = $transactionStoreIdName" ++
subQuery ++ (if (!includeRejected) sql" AND rejection_reason IS NULL"
else sql"") ++ sql" #${orderBy} #${limit}"
storage
.query(
query.as[
(
GenericSignedTopologyTransactionX,
CantonTimestamp,
CantonTimestamp,
Option[CantonTimestamp],
)
],
s"$functionFullName-$operation",
)
.map(_.map { case (tx, sequencedTs, validFrom, validUntil) =>
StoredTopologyTransactionX(
SequencedTime(sequencedTs),
EffectiveTime(validFrom),
validUntil.map(EffectiveTime(_)),
tx,
)
})
.map(StoredTopologyTransactionsX(_))
}
override def currentDispatchingWatermark(implicit
traceContext: TraceContext
): Future[Option[CantonTimestamp]] = {
val query =
sql"SELECT watermark_ts FROM common_topology_dispatching WHERE store_id =$transactionStoreIdName"
.as[CantonTimestamp]
.headOption
storage.query(query, functionFullName)
}
override def updateDispatchingWatermark(timestamp: CantonTimestamp)(implicit
traceContext: TraceContext
): Future[Unit] = {
val query = storage.profile match {
case _: DbStorage.Profile.Postgres =>
sqlu"""insert into common_topology_dispatching (store_id, watermark_ts)
VALUES ($transactionStoreIdName, $timestamp)
on conflict (store_id) do update
set
watermark_ts = $timestamp
"""
case _: DbStorage.Profile.H2 | _: DbStorage.Profile.Oracle =>
sqlu"""merge into common_topology_dispatching
using dual
on (store_id = $transactionStoreIdName)
when matched then
update set
watermark_ts = $timestamp
when not matched then
insert (store_id, watermark_ts)
values ($transactionStoreIdName, $timestamp)
"""
}
storage.update_(query, functionFullName)
}
private def asOfQuery(asOf: CantonTimestamp, asOfInclusive: Boolean): SQLActionBuilder =
if (asOfInclusive)
sql" AND valid_from <= $asOf AND (valid_until is NULL OR $asOf < valid_until)"
else
sql" AND valid_from < $asOf AND (valid_until is NULL OR $asOf <= valid_until)"
private def getHeadStateQuery(
recentTimestampO: Option[CantonTimestamp]
): SQLActionBuilderChain = recentTimestampO match {
case Some(value) => asOfQuery(value, asOfInclusive = false)
case None => sql" AND valid_until is NULL"
}
@SuppressWarnings(Array("com.digitalasset.canton.SlickString"))
private def getIdFilter(
idFilter: Option[String]
): SQLActionBuilderChain =
idFilter match {
case Some(value) if value.nonEmpty => sql" AND identifier like ${value + "%"}"
case _ => sql""
}
@SuppressWarnings(Array("com.digitalasset.canton.SlickString"))
private def getNamespaceFilter(namespaceFilter: Option[String]): SQLActionBuilderChain =
namespaceFilter match {
case Some(value) if value.nonEmpty => sql" AND namespace LIKE ${value + "%"}"
case _ => sql""
}
}
// Helper case class to hold StoredTopologyTransactionX-fields in update() providing umbrella
// values for all transactions.
private[db] final case class TransactionEntry(
sequenced: SequencedTime,
validFrom: EffectiveTime,
validUntil: Option[EffectiveTime],
signedTx: GenericSignedTopologyTransactionX,
rejectionReason: Option[TopologyTransactionRejection] = None,
)
private[db] object TransactionEntry {
def fromStoredTx(stx: GenericStoredTopologyTransactionX): TransactionEntry = TransactionEntry(
stx.sequenced,
stx.validFrom,
stx.validUntil,
stx.transaction,
rejectionReason = None,
)
}

View File

@ -0,0 +1,69 @@
// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package com.digitalasset.canton.topology.store.memory
import com.digitalasset.canton.config.CantonRequireTypes.LengthLimitedString.DisplayName
import com.digitalasset.canton.config.CantonRequireTypes.String255
import com.digitalasset.canton.data.CantonTimestamp
import com.digitalasset.canton.topology.store.{PartyMetadata, PartyMetadataStore}
import com.digitalasset.canton.topology.{ParticipantId, PartyId}
import com.digitalasset.canton.tracing.TraceContext
import scala.collection.concurrent.TrieMap
import scala.concurrent.Future
class InMemoryPartyMetadataStore extends PartyMetadataStore {
private val store = TrieMap[PartyId, PartyMetadata]()
override def insertOrUpdatePartyMetadata(
partyId: PartyId,
participantId: Option[ParticipantId],
displayName: Option[DisplayName],
effectiveTimestamp: CantonTimestamp,
submissionId: String255,
)(implicit traceContext: TraceContext): Future[Unit] = {
store
.put(
partyId,
PartyMetadata(partyId, displayName, participantId)(
effectiveTimestamp = effectiveTimestamp,
submissionId = submissionId,
),
)
.discard
Future.unit
}
override def metadataForParty(partyId: PartyId)(implicit
traceContext: TraceContext
): Future[Option[PartyMetadata]] =
Future.successful(store.get(partyId))
override def markNotified(
metadata: PartyMetadata
)(implicit traceContext: TraceContext): Future[Unit] = {
store.get(metadata.partyId) match {
case Some(cur) if cur.effectiveTimestamp == metadata.effectiveTimestamp =>
store
.put(
metadata.partyId,
metadata.copy()(
effectiveTimestamp = metadata.effectiveTimestamp,
submissionId = metadata.submissionId,
notified = true,
),
)
.discard
case _ => ()
}
Future.unit
}
override def fetchNotNotified()(implicit traceContext: TraceContext): Future[Seq[PartyMetadata]] =
Future.successful(store.values.filterNot(_.notified).toSeq)
override def close(): Unit = ()
}

View File

@ -3,82 +3,592 @@
package com.digitalasset.canton.topology.store.memory
import com.digitalasset.canton.config.CantonRequireTypes.LengthLimitedString.DisplayName
import com.digitalasset.canton.config.CantonRequireTypes.String255
import com.daml.nonempty.NonEmpty
import com.digitalasset.canton.config.ProcessingTimeout
import com.digitalasset.canton.config.RequireTypes.PositiveInt
import com.digitalasset.canton.crypto.Hash
import com.digitalasset.canton.data.CantonTimestamp
import com.digitalasset.canton.logging.NamedLogging
import com.digitalasset.canton.lifecycle.FutureUnlessShutdown
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
import com.digitalasset.canton.topology.*
import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime}
import com.digitalasset.canton.topology.store.StoredTopologyTransactionX.GenericStoredTopologyTransactionX
import com.digitalasset.canton.topology.store.StoredTopologyTransactionsX.{
GenericStoredTopologyTransactionsX,
PositiveStoredTopologyTransactionsX,
}
import com.digitalasset.canton.topology.store.ValidatedTopologyTransactionX.GenericValidatedTopologyTransactionX
import com.digitalasset.canton.topology.store.*
import com.digitalasset.canton.topology.transaction.SignedTopologyTransactionX.GenericSignedTopologyTransactionX
import com.digitalasset.canton.topology.transaction.TopologyMappingX.MappingHash
import com.digitalasset.canton.topology.transaction.TopologyTransactionX.{
GenericTopologyTransactionX,
TxHash,
}
import com.digitalasset.canton.topology.transaction.*
import com.digitalasset.canton.tracing.TraceContext
import com.digitalasset.canton.version.{ProtocolVersion, RepresentativeProtocolVersion}
import com.google.common.annotations.VisibleForTesting
import java.util.concurrent.atomic.AtomicReference
import scala.annotation.nowarn
import scala.collection.concurrent.TrieMap
import scala.concurrent.Future
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.concurrent.{ExecutionContext, Future, blocking}
import scala.math.Ordering.Implicits.*
// TODO(#15161): Rename file to InMemoryPartyMetadataStore
class InMemoryPartyMetadataStore extends PartyMetadataStore {
class InMemoryTopologyStoreX[+StoreId <: TopologyStoreId](
val storeId: StoreId,
val loggerFactory: NamedLoggerFactory,
override val timeouts: ProcessingTimeout,
)(implicit ec: ExecutionContext)
extends TopologyStoreX[StoreId]
with NamedLogging {
private val store = TrieMap[PartyId, PartyMetadata]()
override def onClosed(): Unit = ()
override def insertOrUpdatePartyMetadata(
partyId: PartyId,
participantId: Option[ParticipantId],
displayName: Option[DisplayName],
effectiveTimestamp: CantonTimestamp,
submissionId: String255,
)(implicit traceContext: TraceContext): Future[Unit] = {
store
.put(
partyId,
PartyMetadata(partyId, displayName, participantId)(
effectiveTimestamp = effectiveTimestamp,
submissionId = submissionId,
),
)
.discard
Future.unit
private case class TopologyStoreEntry(
transaction: GenericSignedTopologyTransactionX,
sequenced: SequencedTime,
from: EffectiveTime,
rejected: Option[String],
until: Option[EffectiveTime],
) extends DelegatedTopologyTransactionLike[TopologyChangeOpX, TopologyMappingX] {
override protected def transactionLikeDelegate
: TopologyTransactionLike[TopologyChangeOpX, TopologyMappingX] = transaction
def toStoredTransaction: StoredTopologyTransactionX[TopologyChangeOpX, TopologyMappingX] =
StoredTopologyTransactionX(sequenced, from, until, transaction)
}
override def metadataForParty(partyId: PartyId)(implicit
traceContext: TraceContext
): Future[Option[PartyMetadata]] =
Future.successful(store.get(partyId))
private val topologyTransactionStore = ArrayBuffer[TopologyStoreEntry]()
// the unique key is defined in the database migration file for the common_topology_transactions table
private val topologyTransactionsStoreUniqueIndex = mutable.Set.empty[
(
MappingHash,
PositiveInt,
EffectiveTime,
TopologyChangeOpX,
RepresentativeProtocolVersion[SignedTopologyTransactionX.type],
Hash,
)
]
private val watermark = new AtomicReference[Option[CantonTimestamp]](None)
override def markNotified(
metadata: PartyMetadata
)(implicit traceContext: TraceContext): Future[Unit] = {
store.get(metadata.partyId) match {
case Some(cur) if cur.effectiveTimestamp == metadata.effectiveTimestamp =>
store
.put(
metadata.partyId,
metadata.copy()(
effectiveTimestamp = metadata.effectiveTimestamp,
submissionId = metadata.submissionId,
notified = true,
),
def findTransactionsByTxHash(asOfExclusive: EffectiveTime, hashes: Set[TxHash])(implicit
traceContext: TraceContext
): Future[Seq[GenericSignedTopologyTransactionX]] =
if (hashes.isEmpty) Future.successful(Seq.empty)
else
findFilter(
asOfExclusive,
entry => hashes.contains(entry.hash),
)
override def findProposalsByTxHash(
asOfExclusive: EffectiveTime,
hashes: NonEmpty[Set[TxHash]],
)(implicit traceContext: TraceContext): Future[Seq[GenericSignedTopologyTransactionX]] = {
findFilter(
asOfExclusive,
entry => hashes.contains(entry.hash) && entry.transaction.isProposal,
)
}
private def findFilter(
asOfExclusive: EffectiveTime,
filter: TopologyStoreEntry => Boolean,
): Future[Seq[GenericSignedTopologyTransactionX]] = Future.successful {
blocking {
synchronized {
topologyTransactionStore
.filter(x =>
x.from.value < asOfExclusive.value
&& x.rejected.isEmpty
&& x.until.forall(_.value >= asOfExclusive.value)
&& filter(x)
)
.discard
case _ => ()
.map(_.transaction)
.toSeq
}
}
}
override def findTransactionsForMapping(
asOfExclusive: EffectiveTime,
hashes: NonEmpty[Set[MappingHash]],
)(implicit
traceContext: TraceContext
): Future[Seq[GenericSignedTopologyTransactionX]] = {
findFilter(
asOfExclusive,
entry =>
!entry.transaction.isProposal && hashes.contains(
entry.mapping.uniqueKey
),
)
}
override def update(
sequenced: SequencedTime,
effective: EffectiveTime,
removeMapping: Map[TopologyMappingX.MappingHash, PositiveInt],
removeTxs: Set[TopologyTransactionX.TxHash],
additions: Seq[GenericValidatedTopologyTransactionX],
)(implicit traceContext: TraceContext): Future[Unit] = {
blocking {
synchronized {
// transactionally
// UPDATE txs SET valid_until = effective WHERE effective < $effective AND valid_from is NULL
// AND ((mapping_key_hash IN $removeMapping AND serial_counter <= $serial) OR (tx_hash IN $removeTxs))
// INSERT IGNORE DUPLICATES (...)
topologyTransactionStore.zipWithIndex.foreach { case (tx, idx) =>
if (
tx.from.value < effective.value && tx.until.isEmpty &&
(removeMapping
.get(tx.mapping.uniqueKey)
.exists(_ >= tx.serial)
||
removeTxs.contains(tx.hash))
) {
topologyTransactionStore.update(idx, tx.copy(until = Some(effective)))
}
}
additions.foreach { tx =>
val uniqueKey = (
tx.mapping.uniqueKey,
tx.serial,
effective,
tx.operation,
tx.transaction.representativeProtocolVersion,
tx.transaction.hashOfSignatures,
)
if (topologyTransactionsStoreUniqueIndex.add(uniqueKey)) {
topologyTransactionStore.append(
TopologyStoreEntry(
tx.transaction,
sequenced,
from = effective,
rejected = tx.rejectionReason.map(_.toString),
until = Option.when(
tx.rejectionReason.nonEmpty || tx.expireImmediately
)(effective),
)
)
}
}
}
}
Future.unit
}
override def fetchNotNotified()(implicit traceContext: TraceContext): Future[Seq[PartyMetadata]] =
Future.successful(store.values.filterNot(_.notified).toSeq)
@VisibleForTesting
override protected[topology] def dumpStoreContent()(implicit
traceContext: TraceContext
): Future[GenericStoredTopologyTransactionsX] = {
val entries = blocking {
synchronized {
logger.debug(
topologyTransactionStore
.map(_.toString)
.mkString("Topology Store Content[", ", ", "]")
)
topologyTransactionStore.toSeq
override def close(): Unit = ()
}
}
}
Future.successful(
StoredTopologyTransactionsX(
entries.map(e => StoredTopologyTransactionX(e.sequenced, e.from, e.until, e.transaction))
)
)
}
// TODO(#15161) collapse into InMemoryTopologyStoreX
trait InMemoryTopologyStoreCommon[+StoreId <: TopologyStoreId] extends NamedLogging {
this: TopologyStoreX[StoreId] =>
private def asOfFilter(
asOf: CantonTimestamp,
asOfInclusive: Boolean,
): (CantonTimestamp, Option[CantonTimestamp]) => Boolean =
if (asOfInclusive) { case (validFrom, validUntil) =>
validFrom <= asOf && validUntil.forall(until => asOf < until)
}
else { case (validFrom, validUntil) =>
validFrom < asOf && validUntil.forall(until => asOf <= until)
}
private val watermark = new AtomicReference[Option[CantonTimestamp]](None)
private def filteredState(
table: Seq[TopologyStoreEntry],
filter: TopologyStoreEntry => Boolean,
includeRejected: Boolean = false,
): Future[StoredTopologyTransactionsX[TopologyChangeOpX, TopologyMappingX]] =
Future.successful(
StoredTopologyTransactionsX(
table.collect {
case entry if filter(entry) && (entry.rejected.isEmpty || includeRejected) =>
entry.toStoredTransaction
}
)
)
override def inspectKnownParties(
timestamp: CantonTimestamp,
filterParty: String,
filterParticipant: String,
limit: Int,
)(implicit traceContext: TraceContext): Future[Set[PartyId]] = {
val (prefixPartyIdentifier, prefixPartyNS) = UniqueIdentifier.splitFilter(filterParty)
val (prefixParticipantIdentifier, prefixParticipantNS) =
UniqueIdentifier.splitFilter(filterParticipant)
def filter(entry: TopologyStoreEntry): Boolean = {
// active
entry.from.value < timestamp && entry.until.forall(until => timestamp <= until.value) &&
// not rejected
entry.rejected.isEmpty &&
// is not a proposal
!entry.transaction.isProposal &&
// is of type Replace
entry.operation == TopologyChangeOpX.Replace &&
// matches a party to participant mapping (with appropriate filters)
(entry.mapping match {
case ptp: PartyToParticipantX =>
ptp.partyId.uid.matchesPrefixes(prefixPartyIdentifier, prefixPartyNS) &&
(filterParticipant.isEmpty ||
ptp.participants.exists(
_.participantId.uid
.matchesPrefixes(prefixParticipantIdentifier, prefixParticipantNS)
))
case cert: DomainTrustCertificateX =>
cert.participantId.adminParty.uid
.matchesPrefixes(prefixPartyIdentifier, prefixPartyNS) &&
cert.participantId.uid
.matchesPrefixes(prefixParticipantIdentifier, prefixParticipantNS)
case _ => false
})
}
val topologyStateStoreSeq = blocking(synchronized(topologyTransactionStore.toSeq))
Future.successful(
topologyStateStoreSeq
.foldLeft(Set.empty[PartyId]) {
case (acc, elem) if acc.size >= limit || !filter(elem) => acc
case (acc, elem) =>
elem.mapping.maybeUid.fold(acc)(x => acc + PartyId(x))
}
)
}
override def inspect(
proposals: Boolean,
timeQuery: TimeQuery,
recentTimestampO: Option[CantonTimestamp],
op: Option[TopologyChangeOpX],
types: Seq[TopologyMappingX.Code],
idFilter: Option[String],
namespaceFilter: Option[String],
)(implicit
traceContext: TraceContext
): Future[StoredTopologyTransactionsX[TopologyChangeOpX, TopologyMappingX]] = {
def mkAsOfFilter(asOf: CantonTimestamp): TopologyStoreEntry => Boolean = entry =>
asOfFilter(asOf, asOfInclusive = false)(entry.from.value, entry.until.map(_.value))
val filter1: TopologyStoreEntry => Boolean = timeQuery match {
case TimeQuery.HeadState =>
// use recent timestamp to avoid race conditions (as we are looking
// directly into the store, while the recent time still needs to propagate)
recentTimestampO.map(mkAsOfFilter).getOrElse(entry => entry.until.isEmpty)
case TimeQuery.Snapshot(asOf) => mkAsOfFilter(asOf)
case TimeQuery.Range(from, until) =>
entry =>
from.forall(ts => entry.from.value >= ts) && until.forall(ts => entry.from.value <= ts)
}
val filter2: TopologyStoreEntry => Boolean = entry => op.forall(_ == entry.operation)
val filter3: TopologyStoreEntry => Boolean = {
idFilter match {
case Some(value) if value.nonEmpty =>
(entry: TopologyStoreEntry) =>
entry.mapping.maybeUid.exists(_.id.unwrap.startsWith(value))
case _ => _ => true
}
}
val filter4: TopologyStoreEntry => Boolean = {
namespaceFilter match {
case Some(value) if value.nonEmpty =>
(entry: TopologyStoreEntry) =>
entry.mapping.namespace.fingerprint.unwrap.startsWith(value)
case _ => _ => true
}
}
val filter0: TopologyStoreEntry => Boolean = entry =>
types.isEmpty || types.contains(entry.mapping.code)
filteredState(
blocking(synchronized(topologyTransactionStore.toSeq)),
entry =>
filter0(entry) && (entry.transaction.isProposal == proposals) && filter1(entry) && filter2(
entry
) && filter3(entry) && filter4(entry),
)
}
override def findPositiveTransactions(
asOf: CantonTimestamp,
asOfInclusive: Boolean,
isProposal: Boolean,
types: Seq[TopologyMappingX.Code],
filterUid: Option[Seq[UniqueIdentifier]],
filterNamespace: Option[Seq[Namespace]],
)(implicit traceContext: TraceContext): Future[PositiveStoredTopologyTransactionsX] =
findTransactionsInStore(asOf, asOfInclusive, isProposal, types, filterUid, filterNamespace).map(
_.collectOfType[TopologyChangeOpX.Replace]
)
private def findTransactionsInStore(
asOf: CantonTimestamp,
asOfInclusive: Boolean,
isProposal: Boolean,
types: Seq[TopologyMappingX.Code],
filterUid: Option[Seq[UniqueIdentifier]],
filterNamespace: Option[Seq[Namespace]],
): Future[GenericStoredTopologyTransactionsX] = {
val timeFilter = asOfFilter(asOf, asOfInclusive)
def pathFilter(mapping: TopologyMappingX): Boolean = {
if (filterUid.isEmpty && filterNamespace.isEmpty)
true
else {
mapping.maybeUid.exists(uid => filterUid.exists(_.contains(uid))) ||
filterNamespace.exists(_.contains(mapping.namespace))
}
}
filteredState(
blocking(synchronized { topologyTransactionStore.toSeq }),
entry => {
timeFilter(entry.from.value, entry.until.map(_.value)) &&
types.contains(entry.mapping.code) &&
(pathFilter(entry.mapping)) &&
entry.transaction.isProposal == isProposal
},
)
}
override def findFirstSequencerStateForSequencer(
sequencerId: SequencerId
)(implicit
traceContext: TraceContext
): Future[
Option[StoredTopologyTransactionX[TopologyChangeOpX.Replace, SequencerDomainStateX]]
] = {
filteredState(
blocking(synchronized(topologyTransactionStore.toSeq)),
entry =>
!entry.transaction.isProposal &&
entry.operation == TopologyChangeOpX.Replace &&
entry.mapping
.select[SequencerDomainStateX]
.exists(m => m.allSequencers.contains(sequencerId)),
).map(
_.collectOfType[TopologyChangeOpX.Replace]
.collectOfMapping[SequencerDomainStateX]
.result
.sortBy(_.serial)
.headOption
)
}
override def findFirstMediatorStateForMediator(
mediatorId: MediatorId
)(implicit
traceContext: TraceContext
): Future[Option[StoredTopologyTransactionX[TopologyChangeOpX.Replace, MediatorDomainStateX]]] = {
filteredState(
blocking(synchronized(topologyTransactionStore.toSeq)),
entry =>
!entry.transaction.isProposal &&
entry.operation == TopologyChangeOpX.Replace &&
entry.mapping
.select[MediatorDomainStateX]
.exists(m => m.observers.contains(mediatorId) || m.active.contains(mediatorId)),
).map(
_.collectOfType[TopologyChangeOpX.Replace]
.collectOfMapping[MediatorDomainStateX]
.result
.sortBy(_.serial)
.headOption
)
}
def findFirstTrustCertificateForParticipant(
participant: ParticipantId
)(implicit
traceContext: TraceContext
): Future[
Option[StoredTopologyTransactionX[TopologyChangeOpX.Replace, DomainTrustCertificateX]]
] = {
filteredState(
blocking(synchronized(topologyTransactionStore.toSeq)),
entry =>
!entry.transaction.isProposal &&
entry.operation == TopologyChangeOpX.Replace &&
entry.mapping
.select[DomainTrustCertificateX]
.exists(_.participantId == participant),
).map(
_.collectOfType[TopologyChangeOpX.Replace]
.collectOfMapping[DomainTrustCertificateX]
.result
.sortBy(_.serial)
.headOption
)
}
override def findEssentialStateAtSequencedTime(
asOfInclusive: SequencedTime
)(implicit
traceContext: TraceContext
): Future[GenericStoredTopologyTransactionsX] = {
// asOfInclusive is the effective time of the transaction that onboarded the member.
// 1. load all transactions with a sequenced time <= asOfInclusive, including proposals
filteredState(
blocking(synchronized {
topologyTransactionStore.toSeq
}),
entry => entry.sequenced <= asOfInclusive,
).map(
// 2. transform the result such that the validUntil fields are set as they were at maxEffective time of the snapshot
_.asSnapshotAtMaxEffectiveTime
// and remove proposals that have been superseded by full authorized transactions
.retainAuthorizedHistoryAndEffectiveProposals
)
}
/** store an initial set of topology transactions as given into the store */
override def bootstrap(
snapshot: GenericStoredTopologyTransactionsX
)(implicit traceContext: TraceContext): Future[Unit] = Future {
blocking {
synchronized {
topologyTransactionStore
.appendAll(
snapshot.result.map { tx =>
TopologyStoreEntry(
tx.transaction,
tx.sequenced,
tx.validFrom,
rejected = None,
until = tx.validUntil,
)
}
)
.discard
}
}
}
override def findUpcomingEffectiveChanges(asOfInclusive: CantonTimestamp)(implicit
traceContext: TraceContext
): Future[Seq[TopologyStoreX.Change]] =
Future {
blocking {
synchronized {
TopologyStoreX.accumulateUpcomingEffectiveChanges(
topologyTransactionStore
.filter(_.from.value >= asOfInclusive)
.map(_.toStoredTransaction)
.toSeq
)
}
}
}
override def maxTimestamp()(implicit
traceContext: TraceContext
): Future[Option[(SequencedTime, EffectiveTime)]] = Future {
blocking {
synchronized {
topologyTransactionStore.lastOption.map(x => (x.sequenced, x.from))
}
}
}
override def findDispatchingTransactionsAfter(
timestampExclusive: CantonTimestamp,
limit: Option[Int],
)(implicit
traceContext: TraceContext
): Future[GenericStoredTopologyTransactionsX] =
Future.successful(blocking(synchronized {
val selected = topologyTransactionStore
.filter(x =>
x.from.value > timestampExclusive && (!x.transaction.isProposal || x.until.isEmpty) && x.rejected.isEmpty
)
.map(_.toStoredTransaction)
.toSeq
StoredTopologyTransactionsX(limit.fold(selected)(selected.take))
}))
private def allTransactions(
includeRejected: Boolean = false
): Future[GenericStoredTopologyTransactionsX] =
filteredState(
blocking(synchronized(topologyTransactionStore.toSeq)),
_ => true,
includeRejected,
)
override def findStored(
asOfExclusive: CantonTimestamp,
transaction: GenericSignedTopologyTransactionX,
includeRejected: Boolean = false,
)(implicit
traceContext: TraceContext
): Future[Option[GenericStoredTopologyTransactionX]] =
allTransactions(includeRejected).map(
_.result.findLast(tx => tx.hash == transaction.hash && tx.validFrom.value < asOfExclusive)
)
override def findStoredForVersion(
asOfExclusive: CantonTimestamp,
transaction: GenericTopologyTransactionX,
protocolVersion: ProtocolVersion,
)(implicit
traceContext: TraceContext
): Future[Option[GenericStoredTopologyTransactionX]] = {
val rpv = TopologyTransactionX.protocolVersionRepresentativeFor(protocolVersion)
allTransactions().map(
_.result.findLast(tx =>
tx.transaction.transaction == transaction && tx.transaction.representativeProtocolVersion == rpv && tx.validFrom.value < asOfExclusive
)
)
}
override def findParticipantOnboardingTransactions(
participantId: ParticipantId,
domainId: DomainId,
)(implicit
traceContext: TraceContext
): FutureUnlessShutdown[Seq[GenericSignedTopologyTransactionX]] = {
val res = blocking(synchronized {
topologyTransactionStore.filter(x =>
!x.transaction.isProposal && TopologyStoreX.initialParticipantDispatchingSet.contains(
x.mapping.code
)
)
})
FutureUnlessShutdown.pure(
TopologyStoreX.filterInitialParticipantDispatchingTransactions(
participantId,
domainId,
res.map(_.toStoredTransaction).toSeq,
)
)
}
@nowarn("cat=unused")
override def currentDispatchingWatermark(implicit
traceContext: TraceContext
): Future[Option[CantonTimestamp]] =
@ -96,5 +606,4 @@ trait InMemoryTopologyStoreCommon[+StoreId <: TopologyStoreId] extends NamedLogg
}
Future.unit
}
}

View File

@ -1,591 +0,0 @@
// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package com.digitalasset.canton.topology.store.memory
import com.daml.nonempty.NonEmpty
import com.digitalasset.canton.config.ProcessingTimeout
import com.digitalasset.canton.config.RequireTypes.PositiveInt
import com.digitalasset.canton.crypto.Hash
import com.digitalasset.canton.data.CantonTimestamp
import com.digitalasset.canton.lifecycle.FutureUnlessShutdown
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
import com.digitalasset.canton.topology.*
import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime}
import com.digitalasset.canton.topology.store.StoredTopologyTransactionX.GenericStoredTopologyTransactionX
import com.digitalasset.canton.topology.store.StoredTopologyTransactionsX.{
GenericStoredTopologyTransactionsX,
PositiveStoredTopologyTransactionsX,
}
import com.digitalasset.canton.topology.store.ValidatedTopologyTransactionX.GenericValidatedTopologyTransactionX
import com.digitalasset.canton.topology.store.*
import com.digitalasset.canton.topology.transaction.SignedTopologyTransactionX.GenericSignedTopologyTransactionX
import com.digitalasset.canton.topology.transaction.TopologyMappingX.MappingHash
import com.digitalasset.canton.topology.transaction.TopologyTransactionX.{
GenericTopologyTransactionX,
TxHash,
}
import com.digitalasset.canton.topology.transaction.*
import com.digitalasset.canton.tracing.TraceContext
import com.digitalasset.canton.version.{ProtocolVersion, RepresentativeProtocolVersion}
import com.google.common.annotations.VisibleForTesting
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.concurrent.{ExecutionContext, Future, blocking}
import scala.math.Ordering.Implicits.*
class InMemoryTopologyStoreX[+StoreId <: TopologyStoreId](
val storeId: StoreId,
val loggerFactory: NamedLoggerFactory,
override val timeouts: ProcessingTimeout,
)(implicit ec: ExecutionContext)
extends TopologyStoreX[StoreId]
with InMemoryTopologyStoreCommon[StoreId]
with NamedLogging {
override def onClosed(): Unit = ()
private case class TopologyStoreEntry(
transaction: GenericSignedTopologyTransactionX,
sequenced: SequencedTime,
from: EffectiveTime,
rejected: Option[String],
until: Option[EffectiveTime],
) extends DelegatedTopologyTransactionLike[TopologyChangeOpX, TopologyMappingX] {
override protected def transactionLikeDelegate
: TopologyTransactionLike[TopologyChangeOpX, TopologyMappingX] = transaction
def toStoredTransaction: StoredTopologyTransactionX[TopologyChangeOpX, TopologyMappingX] =
StoredTopologyTransactionX(sequenced, from, until, transaction)
}
private val topologyTransactionStore = ArrayBuffer[TopologyStoreEntry]()
// the unique key is defined in the database migration file for the common_topology_transactions table
private val topologyTransactionsStoreUniqueIndex = mutable.Set.empty[
(
MappingHash,
PositiveInt,
EffectiveTime,
TopologyChangeOpX,
RepresentativeProtocolVersion[SignedTopologyTransactionX.type],
Hash,
)
]
def findTransactionsByTxHash(asOfExclusive: EffectiveTime, hashes: Set[TxHash])(implicit
traceContext: TraceContext
): Future[Seq[GenericSignedTopologyTransactionX]] =
if (hashes.isEmpty) Future.successful(Seq.empty)
else
findFilter(
asOfExclusive,
entry => hashes.contains(entry.hash),
)
override def findProposalsByTxHash(
asOfExclusive: EffectiveTime,
hashes: NonEmpty[Set[TxHash]],
)(implicit traceContext: TraceContext): Future[Seq[GenericSignedTopologyTransactionX]] = {
findFilter(
asOfExclusive,
entry => hashes.contains(entry.hash) && entry.transaction.isProposal,
)
}
private def findFilter(
asOfExclusive: EffectiveTime,
filter: TopologyStoreEntry => Boolean,
): Future[Seq[GenericSignedTopologyTransactionX]] = Future.successful {
blocking {
synchronized {
topologyTransactionStore
.filter(x =>
x.from.value < asOfExclusive.value
&& x.rejected.isEmpty
&& x.until.forall(_.value >= asOfExclusive.value)
&& filter(x)
)
.map(_.transaction)
.toSeq
}
}
}
override def findTransactionsForMapping(
asOfExclusive: EffectiveTime,
hashes: NonEmpty[Set[MappingHash]],
)(implicit
traceContext: TraceContext
): Future[Seq[GenericSignedTopologyTransactionX]] = {
findFilter(
asOfExclusive,
entry =>
!entry.transaction.isProposal && hashes.contains(
entry.mapping.uniqueKey
),
)
}
override def update(
sequenced: SequencedTime,
effective: EffectiveTime,
removeMapping: Map[TopologyMappingX.MappingHash, PositiveInt],
removeTxs: Set[TopologyTransactionX.TxHash],
additions: Seq[GenericValidatedTopologyTransactionX],
)(implicit traceContext: TraceContext): Future[Unit] = {
blocking {
synchronized {
// transactionally
// UPDATE txs SET valid_until = effective WHERE effective < $effective AND valid_from is NULL
// AND ((mapping_key_hash IN $removeMapping AND serial_counter <= $serial) OR (tx_hash IN $removeTxs))
// INSERT IGNORE DUPLICATES (...)
topologyTransactionStore.zipWithIndex.foreach { case (tx, idx) =>
if (
tx.from.value < effective.value && tx.until.isEmpty &&
(removeMapping
.get(tx.mapping.uniqueKey)
.exists(_ >= tx.serial)
||
removeTxs.contains(tx.hash))
) {
topologyTransactionStore.update(idx, tx.copy(until = Some(effective)))
}
}
additions.foreach { tx =>
val uniqueKey = (
tx.mapping.uniqueKey,
tx.serial,
effective,
tx.operation,
tx.transaction.representativeProtocolVersion,
tx.transaction.hashOfSignatures,
)
if (topologyTransactionsStoreUniqueIndex.add(uniqueKey)) {
topologyTransactionStore.append(
TopologyStoreEntry(
tx.transaction,
sequenced,
from = effective,
rejected = tx.rejectionReason.map(_.toString),
until = Option.when(
tx.rejectionReason.nonEmpty || tx.expireImmediately
)(effective),
)
)
}
}
}
}
Future.unit
}
@VisibleForTesting
override protected[topology] def dumpStoreContent()(implicit
traceContext: TraceContext
): Future[GenericStoredTopologyTransactionsX] = {
val entries = blocking {
synchronized {
logger.debug(
topologyTransactionStore
.map(_.toString)
.mkString("Topology Store Content[", ", ", "]")
)
topologyTransactionStore.toSeq
}
}
Future.successful(
StoredTopologyTransactionsX(
entries.map(e => StoredTopologyTransactionX(e.sequenced, e.from, e.until, e.transaction))
)
)
}
private def asOfFilter(
asOf: CantonTimestamp,
asOfInclusive: Boolean,
): (CantonTimestamp, Option[CantonTimestamp]) => Boolean =
if (asOfInclusive) { case (validFrom, validUntil) =>
validFrom <= asOf && validUntil.forall(until => asOf < until)
}
else { case (validFrom, validUntil) =>
validFrom < asOf && validUntil.forall(until => asOf <= until)
}
private def filteredState(
table: Seq[TopologyStoreEntry],
filter: TopologyStoreEntry => Boolean,
includeRejected: Boolean = false,
): Future[StoredTopologyTransactionsX[TopologyChangeOpX, TopologyMappingX]] =
Future.successful(
StoredTopologyTransactionsX(
table.collect {
case entry if filter(entry) && (entry.rejected.isEmpty || includeRejected) =>
entry.toStoredTransaction
}
)
)
override def inspectKnownParties(
timestamp: CantonTimestamp,
filterParty: String,
filterParticipant: String,
limit: Int,
)(implicit traceContext: TraceContext): Future[Set[PartyId]] = {
val (prefixPartyIdentifier, prefixPartyNS) = UniqueIdentifier.splitFilter(filterParty)
val (prefixParticipantIdentifier, prefixParticipantNS) =
UniqueIdentifier.splitFilter(filterParticipant)
def filter(entry: TopologyStoreEntry): Boolean = {
// active
entry.from.value < timestamp && entry.until.forall(until => timestamp <= until.value) &&
// not rejected
entry.rejected.isEmpty &&
// is not a proposal
!entry.transaction.isProposal &&
// is of type Replace
entry.operation == TopologyChangeOpX.Replace &&
// matches a party to participant mapping (with appropriate filters)
(entry.mapping match {
case ptp: PartyToParticipantX =>
ptp.partyId.uid.matchesPrefixes(prefixPartyIdentifier, prefixPartyNS) &&
(filterParticipant.isEmpty ||
ptp.participants.exists(
_.participantId.uid
.matchesPrefixes(prefixParticipantIdentifier, prefixParticipantNS)
))
case cert: DomainTrustCertificateX =>
cert.participantId.adminParty.uid
.matchesPrefixes(prefixPartyIdentifier, prefixPartyNS) &&
cert.participantId.uid
.matchesPrefixes(prefixParticipantIdentifier, prefixParticipantNS)
case _ => false
})
}
val topologyStateStoreSeq = blocking(synchronized(topologyTransactionStore.toSeq))
Future.successful(
topologyStateStoreSeq
.foldLeft(Set.empty[PartyId]) {
case (acc, elem) if acc.size >= limit || !filter(elem) => acc
case (acc, elem) =>
elem.mapping.maybeUid.fold(acc)(x => acc + PartyId(x))
}
)
}
override def inspect(
proposals: Boolean,
timeQuery: TimeQuery,
recentTimestampO: Option[CantonTimestamp],
op: Option[TopologyChangeOpX],
types: Seq[TopologyMappingX.Code],
idFilter: Option[String],
namespaceFilter: Option[String],
)(implicit
traceContext: TraceContext
): Future[StoredTopologyTransactionsX[TopologyChangeOpX, TopologyMappingX]] = {
def mkAsOfFilter(asOf: CantonTimestamp): TopologyStoreEntry => Boolean = entry =>
asOfFilter(asOf, asOfInclusive = false)(entry.from.value, entry.until.map(_.value))
val filter1: TopologyStoreEntry => Boolean = timeQuery match {
case TimeQuery.HeadState =>
// use recent timestamp to avoid race conditions (as we are looking
// directly into the store, while the recent time still needs to propagate)
recentTimestampO.map(mkAsOfFilter).getOrElse(entry => entry.until.isEmpty)
case TimeQuery.Snapshot(asOf) => mkAsOfFilter(asOf)
case TimeQuery.Range(from, until) =>
entry =>
from.forall(ts => entry.from.value >= ts) && until.forall(ts => entry.from.value <= ts)
}
val filter2: TopologyStoreEntry => Boolean = entry => op.forall(_ == entry.operation)
val filter3: TopologyStoreEntry => Boolean = {
idFilter match {
case Some(value) if value.nonEmpty =>
(entry: TopologyStoreEntry) =>
entry.mapping.maybeUid.exists(_.id.unwrap.startsWith(value))
case _ => _ => true
}
}
val filter4: TopologyStoreEntry => Boolean = {
namespaceFilter match {
case Some(value) if value.nonEmpty =>
(entry: TopologyStoreEntry) =>
entry.mapping.namespace.fingerprint.unwrap.startsWith(value)
case _ => _ => true
}
}
val filter0: TopologyStoreEntry => Boolean = entry =>
types.isEmpty || types.contains(entry.mapping.code)
filteredState(
blocking(synchronized(topologyTransactionStore.toSeq)),
entry =>
filter0(entry) && (entry.transaction.isProposal == proposals) && filter1(entry) && filter2(
entry
) && filter3(entry) && filter4(entry),
)
}
override def findPositiveTransactions(
asOf: CantonTimestamp,
asOfInclusive: Boolean,
isProposal: Boolean,
types: Seq[TopologyMappingX.Code],
filterUid: Option[Seq[UniqueIdentifier]],
filterNamespace: Option[Seq[Namespace]],
)(implicit traceContext: TraceContext): Future[PositiveStoredTopologyTransactionsX] =
findTransactionsInStore(asOf, asOfInclusive, isProposal, types, filterUid, filterNamespace).map(
_.collectOfType[TopologyChangeOpX.Replace]
)
private def findTransactionsInStore(
asOf: CantonTimestamp,
asOfInclusive: Boolean,
isProposal: Boolean,
types: Seq[TopologyMappingX.Code],
filterUid: Option[Seq[UniqueIdentifier]],
filterNamespace: Option[Seq[Namespace]],
): Future[GenericStoredTopologyTransactionsX] = {
val timeFilter = asOfFilter(asOf, asOfInclusive)
def pathFilter(mapping: TopologyMappingX): Boolean = {
if (filterUid.isEmpty && filterNamespace.isEmpty)
true
else {
mapping.maybeUid.exists(uid => filterUid.exists(_.contains(uid))) ||
filterNamespace.exists(_.contains(mapping.namespace))
}
}
filteredState(
blocking(synchronized { topologyTransactionStore.toSeq }),
entry => {
timeFilter(entry.from.value, entry.until.map(_.value)) &&
types.contains(entry.mapping.code) &&
(pathFilter(entry.mapping)) &&
entry.transaction.isProposal == isProposal
},
)
}
override def findFirstSequencerStateForSequencer(
sequencerId: SequencerId
)(implicit
traceContext: TraceContext
): Future[
Option[StoredTopologyTransactionX[TopologyChangeOpX.Replace, SequencerDomainStateX]]
] = {
filteredState(
blocking(synchronized(topologyTransactionStore.toSeq)),
entry =>
!entry.transaction.isProposal &&
entry.operation == TopologyChangeOpX.Replace &&
entry.mapping
.select[SequencerDomainStateX]
.exists(m => m.allSequencers.contains(sequencerId)),
).map(
_.collectOfType[TopologyChangeOpX.Replace]
.collectOfMapping[SequencerDomainStateX]
.result
.sortBy(_.serial)
.headOption
)
}
override def findFirstMediatorStateForMediator(
mediatorId: MediatorId
)(implicit
traceContext: TraceContext
): Future[Option[StoredTopologyTransactionX[TopologyChangeOpX.Replace, MediatorDomainStateX]]] = {
filteredState(
blocking(synchronized(topologyTransactionStore.toSeq)),
entry =>
!entry.transaction.isProposal &&
entry.operation == TopologyChangeOpX.Replace &&
entry.mapping
.select[MediatorDomainStateX]
.exists(m => m.observers.contains(mediatorId) || m.active.contains(mediatorId)),
).map(
_.collectOfType[TopologyChangeOpX.Replace]
.collectOfMapping[MediatorDomainStateX]
.result
.sortBy(_.serial)
.headOption
)
}
def findFirstTrustCertificateForParticipant(
participant: ParticipantId
)(implicit
traceContext: TraceContext
): Future[
Option[StoredTopologyTransactionX[TopologyChangeOpX.Replace, DomainTrustCertificateX]]
] = {
filteredState(
blocking(synchronized(topologyTransactionStore.toSeq)),
entry =>
!entry.transaction.isProposal &&
entry.operation == TopologyChangeOpX.Replace &&
entry.mapping
.select[DomainTrustCertificateX]
.exists(_.participantId == participant),
).map(
_.collectOfType[TopologyChangeOpX.Replace]
.collectOfMapping[DomainTrustCertificateX]
.result
.sortBy(_.serial)
.headOption
)
}
override def findEssentialStateAtSequencedTime(
asOfInclusive: SequencedTime
)(implicit
traceContext: TraceContext
): Future[GenericStoredTopologyTransactionsX] = {
// asOfInclusive is the effective time of the transaction that onboarded the member.
// 1. load all transactions with a sequenced time <= asOfInclusive, including proposals
filteredState(
blocking(synchronized {
topologyTransactionStore.toSeq
}),
entry => entry.sequenced <= asOfInclusive,
).map(
// 2. transform the result such that the validUntil fields are set as they were at maxEffective time of the snapshot
_.asSnapshotAtMaxEffectiveTime
// and remove proposals that have been superseded by full authorized transactions
.retainAuthorizedHistoryAndEffectiveProposals
)
}
/** store an initial set of topology transactions as given into the store */
override def bootstrap(
snapshot: GenericStoredTopologyTransactionsX
)(implicit traceContext: TraceContext): Future[Unit] = Future {
blocking {
synchronized {
topologyTransactionStore
.appendAll(
snapshot.result.map { tx =>
TopologyStoreEntry(
tx.transaction,
tx.sequenced,
tx.validFrom,
rejected = None,
until = tx.validUntil,
)
}
)
.discard
}
}
}
override def findUpcomingEffectiveChanges(asOfInclusive: CantonTimestamp)(implicit
traceContext: TraceContext
): Future[Seq[TopologyStoreX.Change]] =
Future {
blocking {
synchronized {
TopologyStoreX.accumulateUpcomingEffectiveChanges(
topologyTransactionStore
.filter(_.from.value >= asOfInclusive)
.map(_.toStoredTransaction)
.toSeq
)
}
}
}
override def maxTimestamp()(implicit
traceContext: TraceContext
): Future[Option[(SequencedTime, EffectiveTime)]] = Future {
blocking {
synchronized {
topologyTransactionStore.lastOption.map(x => (x.sequenced, x.from))
}
}
}
override def findDispatchingTransactionsAfter(
timestampExclusive: CantonTimestamp,
limit: Option[Int],
)(implicit
traceContext: TraceContext
): Future[GenericStoredTopologyTransactionsX] =
Future.successful(blocking(synchronized {
val selected = topologyTransactionStore
.filter(x =>
x.from.value > timestampExclusive && (!x.transaction.isProposal || x.until.isEmpty) && x.rejected.isEmpty
)
.map(_.toStoredTransaction)
.toSeq
StoredTopologyTransactionsX(limit.fold(selected)(selected.take))
}))
private def allTransactions(
includeRejected: Boolean = false
): Future[GenericStoredTopologyTransactionsX] =
filteredState(
blocking(synchronized(topologyTransactionStore.toSeq)),
_ => true,
includeRejected,
)
override def findStored(
asOfExclusive: CantonTimestamp,
transaction: GenericSignedTopologyTransactionX,
includeRejected: Boolean = false,
)(implicit
traceContext: TraceContext
): Future[Option[GenericStoredTopologyTransactionX]] =
allTransactions(includeRejected).map(
_.result.findLast(tx => tx.hash == transaction.hash && tx.validFrom.value < asOfExclusive)
)
override def findStoredForVersion(
asOfExclusive: CantonTimestamp,
transaction: GenericTopologyTransactionX,
protocolVersion: ProtocolVersion,
)(implicit
traceContext: TraceContext
): Future[Option[GenericStoredTopologyTransactionX]] = {
val rpv = TopologyTransactionX.protocolVersionRepresentativeFor(protocolVersion)
allTransactions().map(
_.result.findLast(tx =>
tx.transaction.transaction == transaction && tx.transaction.representativeProtocolVersion == rpv && tx.validFrom.value < asOfExclusive
)
)
}
override def findParticipantOnboardingTransactions(
participantId: ParticipantId,
domainId: DomainId,
)(implicit
traceContext: TraceContext
): FutureUnlessShutdown[Seq[GenericSignedTopologyTransactionX]] = {
val res = blocking(synchronized {
topologyTransactionStore.filter(x =>
!x.transaction.isProposal && TopologyStoreX.initialParticipantDispatchingSet.contains(
x.mapping.code
)
)
})
FutureUnlessShutdown.pure(
TopologyStoreX.filterInitialParticipantDispatchingTransactions(
participantId,
domainId,
res.map(_.toStoredTransaction).toSeq,
)
)
}
}

View File

@ -4,7 +4,6 @@
package com.digitalasset.canton.domain.sequencing.sequencer.block
import cats.data.EitherT
import cats.syntax.bifunctor.*
import cats.syntax.either.*
import com.digitalasset.canton.SequencerCounter
import com.digitalasset.canton.concurrent.FutureSupervisor

View File

@ -31,6 +31,13 @@ service PackageManagementService {
// was already uploaded before.
// - Respond with a gRPC error
rpc UploadDarFile(UploadDarFileRequest) returns (UploadDarFileResponse);
// Performs the same checks that UploadDarFile call perform, but doesn't
// upload the DAR and does not make it available on the whole ledger.
// This call may:
// - Succeed if the package is valid
// - Respond with a gRPC error if the package is not valid
rpc ValidateDarFile(ValidateDarFileRequest) returns (ValidateDarFileResponse);
}
message ListKnownPackagesRequest {}
@ -75,5 +82,21 @@ message UploadDarFileRequest {
string submission_id = 2;
}
// An empty message that is received when the upload operation succeeded.
// A message that is received when the upload operation succeeded.
message UploadDarFileResponse {}
// Performs the same checks that UploadDarFileRequest would perform, but doesn't
// upload the DAR.
message ValidateDarFileRequest {
// Contains a Daml archive DAR file, which in turn is a jar like zipped
// container for ``daml_lf`` archives. See further details in
// ``daml_lf.proto``.
// Required
bytes dar_file = 1;
// Unique submission identifier.
// Optional, defaults to a random identifier.
string submission_id = 2;
}
message ValidateDarFileResponse {}

View File

@ -28,6 +28,9 @@ final class PackageManagementServiceAuthorization(
override def uploadDarFile(request: UploadDarFileRequest): Future[UploadDarFileResponse] =
authorizer.requireAdminClaims(service.uploadDarFile)(request)
override def validateDarFile(request: ValidateDarFileRequest): Future[ValidateDarFileResponse] =
authorizer.requireAdminClaims(service.validateDarFile)(request)
override def bindService(): ServerServiceDefinition =
PackageManagementServiceGrpc.bindService(this, executionContext)

View File

@ -223,8 +223,6 @@ final class CommandsValidator(
} yield Submitters(actAs, readAs)
}
// TODO(i12279): Address usage of deprecated class DeduplicationTime
/** We validate only using current time because we set the currentTime as submitTime so no need to check both
*/
def validateDeduplicationPeriod(

View File

@ -19,12 +19,4 @@ trait IndexCompletionsService extends LedgerEndService {
applicationId: Ref.ApplicationId,
parties: Set[Ref.Party],
)(implicit loggingContext: LoggingContextWithTrace): Source[CompletionStreamResponse, NotUsed]
// TODO(i12282): Remove, as possible. This is solely serving KV Deduplication Offset -> Duration conversion
def getCompletions(
startExclusive: ParticipantOffset,
endInclusive: ParticipantOffset,
applicationId: Ref.ApplicationId,
parties: Set[Ref.Party],
)(implicit loggingContext: LoggingContextWithTrace): Source[CompletionStreamResponse, NotUsed]
}

View File

@ -14,7 +14,7 @@ import com.daml.ledger.api.v2.update_service.{
GetUpdatesResponse,
}
import com.daml.lf.data.Ref
import com.daml.lf.data.Ref.{ApplicationId, Party}
import com.daml.lf.data.Ref.ApplicationId
import com.daml.lf.data.Time.Timestamp
import com.daml.lf.transaction.GlobalKey
import com.daml.lf.value.Value
@ -173,17 +173,6 @@ final class TimedIndexService(delegate: IndexService, metrics: Metrics) extends
delegate.prune(pruneUpToInclusive, pruneAllDivulgedContracts, incompletReassignmentOffsets),
)
override def getCompletions(
startExclusive: ParticipantOffset,
endInclusive: ParticipantOffset,
applicationId: Ref.ApplicationId,
parties: Set[Party],
)(implicit loggingContext: LoggingContextWithTrace): Source[CompletionStreamResponse, NotUsed] =
Timed.source(
metrics.services.index.getCompletionsLimited,
delegate.getCompletions(startExclusive, endInclusive, applicationId, parties),
)
override def currentHealth(): HealthStatus =
delegate.currentHealth()

View File

@ -136,6 +136,15 @@ private[apiserver] final class ApiPackageManagementService private (
} yield dar
}
override def validateDarFile(request: ValidateDarFileRequest): Future[ValidateDarFileResponse] = {
val submissionId = submissionIdGenerator(request.submissionId)
LoggingContextWithTrace.withEnrichedLoggingContext(telemetry)(
logging.submissionId(submissionId)
) { implicit loggingContext: LoggingContextWithTrace =>
decodeAndValidate(request.darFile).map((_: Dar[Archive]) => ValidateDarFileResponse())
}
}
override def uploadDarFile(request: UploadDarFileRequest): Future[UploadDarFileResponse] = {
val submissionId = submissionIdGenerator(request.submissionId)
LoggingContextWithTrace.withEnrichedLoggingContext(telemetry)(

View File

@ -235,24 +235,6 @@ private[index] class IndexServiceImpl(
}
.buffered(metrics.index.completionsBufferSize, LedgerApiStreamsBufferSize)
override def getCompletions(
startExclusive: ParticipantOffset,
endInclusive: ParticipantOffset,
applicationId: Ref.ApplicationId,
parties: Set[Ref.Party],
)(implicit loggingContext: LoggingContextWithTrace): Source[CompletionStreamResponse, NotUsed] =
between(startExclusive, Some(endInclusive)) { (start, end) =>
dispatcher()
.startingAt(
start.getOrElse(Offset.beforeBegin),
RangeSource(commandCompletionsReader.getCommandCompletions(_, _, applicationId, parties)),
end,
)
.mapError(shutdownError)
.map(_._2)
}
.buffered(metrics.index.completionsBufferSize, LedgerApiStreamsBufferSize)
override def getActiveContracts(
transactionFilter: TransactionFilter,
verbose: Boolean,

View File

@ -12,6 +12,8 @@ import com.daml.ledger.api.testing.utils.PekkoBeforeAndAfterAll
import com.daml.ledger.api.v2.admin.package_management_service.{
PackageManagementServiceGrpc,
UploadDarFileRequest,
ValidateDarFileRequest,
ValidateDarFileResponse,
}
import com.daml.lf.archive.testing.Encode
import com.daml.lf.archive.{Dar, GenDarReader}
@ -118,6 +120,15 @@ class ApiPackageManagementServiceSpec
)
}
"validate a dar" in {
val apiService = createApiService()
apiService
.validateDarFile(ValidateDarFileRequest(ByteString.EMPTY, aSubmissionId))
.map { case ValidateDarFileResponse() =>
succeed
}
}
"close while uploading dar" in {
val writeService = mock[state.WritePackagesService]
when(

View File

@ -8,6 +8,7 @@ import com.daml.ledger.api.v2.admin.package_management_service.{
ListKnownPackagesRequest,
PackageDetails,
UploadDarFileRequest,
ValidateDarFileRequest,
}
import com.digitalasset.canton.ledger.client.LedgerClient
import com.google.protobuf.ByteString
@ -30,10 +31,21 @@ final class PackageManagementClient(service: PackageManagementServiceStub)(impli
.listKnownPackages(PackageManagementClient.listKnownPackagesRequest)
.map(_.packageDetails)
def uploadDarFile(darFile: ByteString, token: Option[String] = None): Future[Unit] =
def uploadDarFile(
darFile: ByteString,
token: Option[String] = None,
): Future[Unit] =
LedgerClient
.stub(service, token)
.uploadDarFile(UploadDarFileRequest(darFile))
.map(_ => ())
def validateDarFile(
darFile: ByteString,
token: Option[String] = None,
): Future[Unit] =
LedgerClient
.stub(service, token)
.validateDarFile(ValidateDarFileRequest(darFile))
.map(_ => ())
}

View File

@ -308,7 +308,7 @@ object PackageServiceErrors extends PackageServiceErrorGroup {
ErrorCategory.InvalidIndependentOfSystemState,
) {
final case class Error(
uploadedPackage: Ref.PackageId,
uploadedPackageId: Ref.PackageId,
existingPackage: Ref.PackageId,
packageVersion: Ref.PackageVersion,
)(implicit
@ -316,7 +316,7 @@ object PackageServiceErrors extends PackageServiceErrorGroup {
) extends DamlError(
cause = "A DAR with the same version number has previously been uploaded.",
extraContext = Map(
"uploadedPackage" -> uploadedPackage,
"uploadedPackageId" -> uploadedPackageId,
"existingPackage" -> existingPackage,
"packageVersion" -> packageVersion.toString,
),

View File

@ -3,6 +3,7 @@
package com.digitalasset.canton.http
import com.digitalasset.canton.ledger.api.tls.TlsConfiguration
import com.digitalasset.canton.pureconfigutils.HttpServerConfig
final case class HttpApiConfig(
@ -14,10 +15,11 @@ final case class HttpApiConfig(
) {
// TODO(#13303) Use directly instead of using JsonApiConfig as indirection
def toConfig: JsonApiConfig = {
def toConfig(tls: Option[TlsConfiguration]): JsonApiConfig = {
JsonApiConfig(
address = server.address,
httpPort = server.port,
httpsConfiguration = tls,
portFile = server.portFile,
staticContentConfig = staticContent,
allowNonHttps = allowInsecureTokens,

View File

@ -3,21 +3,15 @@
package com.digitalasset.canton.http
import org.apache.pekko.actor.ActorSystem
import org.apache.pekko.http.scaladsl.Http
import org.apache.pekko.http.scaladsl.Http.ServerBinding
import org.apache.pekko.http.scaladsl.server.Route
import org.apache.pekko.http.scaladsl.settings.ServerSettings
import org.apache.pekko.stream.Materializer
import com.daml.grpc.adapter.ExecutionSequencerFactory
import com.daml.jwt.JwtDecoder
import com.daml.jwt.domain.Jwt
import com.digitalasset.canton.ledger.api.refinements.ApiTypes.ApplicationId
import com.daml.ledger.resources.{Resource, ResourceContext, ResourceOwner}
import com.daml.logging.LoggingContextOf
import com.daml.metrics.pekkohttp.HttpMetricsInterceptor
import com.daml.ports.{Port, PortFiles}
import com.digitalasset.canton.concurrent.DirectExecutionContext
import com.digitalasset.canton.http.HttpService.buildKeyStore
import com.digitalasset.canton.http.json.{
ApiValueToJsValueConverter,
DomainJsonDecoder,
@ -28,23 +22,34 @@ import com.digitalasset.canton.http.metrics.HttpApiMetrics
import com.digitalasset.canton.http.util.ApiValueToLfValueConverter
import com.digitalasset.canton.http.util.FutureUtil.*
import com.digitalasset.canton.http.util.Logging.InstanceUUID
import com.digitalasset.canton.ledger.api.refinements.ApiTypes.ApplicationId
import com.digitalasset.canton.ledger.api.tls.TlsConfiguration
import com.digitalasset.canton.ledger.client.LedgerClient as DamlLedgerClient
import com.digitalasset.canton.ledger.client.configuration.{
CommandClientConfiguration,
LedgerClientConfiguration,
}
import com.digitalasset.canton.ledger.client.services.pkg.PackageClient
import com.digitalasset.canton.ledger.client.LedgerClient as DamlLedgerClient
import com.digitalasset.canton.ledger.service.LedgerReader
import com.digitalasset.canton.ledger.service.LedgerReader.PackageStore
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
import com.digitalasset.canton.tracing.NoTracing
import io.grpc.Channel
import io.grpc.health.v1.health.{HealthCheckRequest, HealthGrpc}
import org.apache.pekko.actor.ActorSystem
import org.apache.pekko.http.scaladsl.Http.ServerBinding
import org.apache.pekko.http.scaladsl.server.Route
import org.apache.pekko.http.scaladsl.settings.ServerSettings
import org.apache.pekko.http.scaladsl.{ConnectionContext, Http, HttpsConnectionContext}
import org.apache.pekko.stream.Materializer
import scalaz.*
import scalaz.Scalaz.*
import java.nio.file.Path
import java.nio.file.{Files, Path}
import java.security.{Key, KeyStore}
import javax.net.ssl.SSLContext
import scala.concurrent.{ExecutionContext, Future}
import scala.util.Using
class HttpService(
startSettings: StartSettings,
@ -151,7 +156,7 @@ class HttpService(
(encoder, decoder) = HttpService.buildJsonCodecs(packageService)
jsonEndpoints = new Endpoints(
allowNonHttps,
startSettings.httpsConfiguration.isEmpty,
HttpService.decodeJwt,
commandService,
contractsService,
@ -197,12 +202,18 @@ class HttpService(
EndpointsCompanion.notFound(logger),
)
binding <- liftET[HttpService.Error](
Http()
binding <- liftET[HttpService.Error] {
val serverBuilder = Http()
.newServerAt(address, httpPort.getOrElse(0))
.withSettings(settings)
httpsConfiguration
.fold(serverBuilder) { config =>
logger.info(s"Enabling HTTPS with $config")
serverBuilder.enableHttps(HttpService.httpsConnectionContext(config))
}
.bind(allEndpoints)
)
}
_ <- either(portFile.cata(f => HttpService.createPortFile(f, binding), \/-(()))): ET[Unit]
@ -216,6 +227,7 @@ class HttpService(
logger.info(s"Stopping JSON API server..., ${lc.makeString}")
binding.unbind().void
})
}
object HttpService {
@ -277,5 +289,65 @@ object HttpService {
PortFiles.write(file, Port(binding.localAddress.getPort)).liftErr(Error.apply)
}
def buildSSLContext(config: TlsConfiguration): SSLContext = {
import java.security.SecureRandom
import javax.net.ssl.{KeyManagerFactory, SSLContext, TrustManagerFactory}
val keyStore = buildKeyStore(config)
val keyManagerFactory = KeyManagerFactory.getInstance("SunX509")
keyManagerFactory.init(keyStore, null)
val trustManagerFactory = TrustManagerFactory.getInstance("SunX509")
trustManagerFactory.init(keyStore)
val context = SSLContext.getInstance("TLS")
context.init(
keyManagerFactory.getKeyManagers,
trustManagerFactory.getTrustManagers,
new SecureRandom,
)
context
}
private def httpsConnectionContext(config: TlsConfiguration): HttpsConnectionContext =
ConnectionContext.httpsServer(buildSSLContext(config))
private def buildKeyStore(config: TlsConfiguration): KeyStore = buildKeyStore(
config.certChainFile.get.toPath,
config.privateKeyFile.get.toPath,
config.trustCollectionFile.get.toPath,
)
private def buildKeyStore(certFile: Path, privateKeyFile: Path, caCertFile: Path): KeyStore = {
import java.security.cert.CertificateFactory
val alias = "key" // This can be anything as long as it's consistent.
val cf = CertificateFactory.getInstance("X.509")
val cert = Using.resource(Files.newInputStream(certFile)) { cf.generateCertificate(_) }
val caCert = Using.resource(Files.newInputStream(caCertFile)) { cf.generateCertificate(_) }
val privateKey = loadPrivateKey(privateKeyFile)
val keyStore = KeyStore.getInstance("PKCS12")
keyStore.load(null)
keyStore.setCertificateEntry(alias, cert)
keyStore.setCertificateEntry(alias, caCert)
keyStore.setKeyEntry(alias, privateKey, null, Array(cert, caCert))
keyStore.setCertificateEntry("trusted-ca", caCert)
keyStore
}
private def loadPrivateKey(pkRsaPemFile: Path): Key = {
import org.bouncycastle.asn1.pkcs.PrivateKeyInfo
import org.bouncycastle.openssl.PEMParser
import org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter
Using.resource(Files.newBufferedReader(pkRsaPemFile)) { reader =>
val pemParser = new PEMParser(reader)
val pkInfo = PrivateKeyInfo.getInstance(pemParser.readObject())
new JcaPEMKeyConverter().getPrivateKey(pkInfo)
}
}
final case class Error(message: String)
}

View File

@ -5,6 +5,7 @@ package com.digitalasset.canton.http
import org.apache.pekko.stream.ThrottleMode
import com.digitalasset.canton.http.WebsocketConfig as WSC
import com.digitalasset.canton.ledger.api.tls.TlsConfiguration
import scalaz.Show
import java.io.File
@ -15,13 +16,14 @@ import scala.concurrent.duration.*
// users should extend StartSettings or DefaultStartSettings themselves
// TODO(#13303): Move to LocalParticipantConfig
final case class JsonApiConfig(
address: String = com.digitalasset.canton.cliopts.Http.defaultAddress,
httpPort: Option[Int] = None,
portFile: Option[Path] = None,
staticContentConfig: Option[StaticContentConfig] = None,
allowNonHttps: Boolean = false,
wsConfig: Option[WebsocketConfig] = None,
debugLoggingOfHttpBodies: Boolean = false,
address: String = com.digitalasset.canton.cliopts.Http.defaultAddress,
httpPort: Option[Int] = None,
portFile: Option[Path] = None,
httpsConfiguration: Option[TlsConfiguration] = None,
staticContentConfig: Option[StaticContentConfig] = None,
allowNonHttps: Boolean = false,
wsConfig: Option[WebsocketConfig] = None,
debugLoggingOfHttpBodies: Boolean = false,
) extends StartSettings
object JsonApiConfig {

View File

@ -3,6 +3,8 @@
package com.digitalasset.canton.http
import com.digitalasset.canton.ledger.api.tls.TlsConfiguration
import java.nio.file.Path
// defined separately from Config so
@ -12,8 +14,8 @@ trait StartSettings {
val address: String
val httpPort: Option[Int]
val portFile: Option[Path]
val httpsConfiguration:Option[TlsConfiguration]
val wsConfig: Option[WebsocketConfig]
val allowNonHttps: Boolean
val staticContentConfig: Option[StaticContentConfig]
val debugLoggingOfHttpBodies: Boolean
}

View File

@ -88,7 +88,7 @@ private[http] final class RouteSetup(
inputJsVal(req).flatMap(x => withJwtPayload[JsValue, P](x).leftMap(it => it: Error))
def withJwtPayload[A, P](fa: (Jwt, A))(implicit
createFromUserToken: CreateFromUserToken[P],
createFromUserToken: CreateFromUserToken[P]
): EitherT[Future, Error, (Jwt, P, A)] =
decodeAndParsePayload[P](fa._1, decodeJwt, userManagementClient).map(t2 =>
(t2._1, t2._2, fa._2)
@ -155,10 +155,12 @@ private[http] final class RouteSetup(
)
}
private def isHttps(req: HttpRequest): Boolean = req.uri.scheme == "https"
private[this] def ensureHttpsForwarded(req: HttpRequest)(implicit
lc: LoggingContextOf[InstanceUUID with RequestID]
): Unauthorized \/ Unit =
if (allowNonHttps || isForwardedForHttps(req.headers)) \/-(())
if (allowNonHttps || isForwardedForHttps(req.headers) || isHttps(req)) \/-(())
else {
logger.warn(s"$nonHttpsErrorMessage, ${lc.makeString}")
\/-(())

View File

@ -137,7 +137,14 @@ class CantonLedgerApiServerFactory(
.initialize(
CantonLedgerApiServerWrapper.Config(
serverConfig = config.ledgerApi,
jsonApiConfig = config.httpLedgerApiExperimental.map(_.toConfig),
jsonApiConfig = config.httpLedgerApiExperimental.map(
_.toConfig(
config.ledgerApi.tls
.map(
LedgerApiServerConfig.ledgerApiServerTlsConfigFromCantonServerConfig
)
)
),
indexerConfig = parameters.ledgerApiServerParameters.indexer,
indexerHaConfig = indexerHaConfig,
participantId = participantId,

View File

@ -6,6 +6,7 @@ package com.digitalasset.canton.participant.admin
import cats.data.EitherT
import cats.implicits.toBifunctorOps
import cats.syntax.either.*
import cats.syntax.functor.*
import cats.syntax.functorFilter.*
import cats.syntax.parallel.*
import com.daml.daml_lf_dev.DamlLf
@ -64,6 +65,12 @@ trait DarService {
vetAllPackages: Boolean,
synchronizeVetting: Boolean,
)(implicit traceContext: TraceContext): EitherT[FutureUnlessShutdown, DamlError, Hash]
def validateByteString(
payload: ByteString,
filename: String,
)(implicit traceContext: TraceContext): EitherT[FutureUnlessShutdown, DamlError, Hash]
def getDar(hash: Hash)(implicit traceContext: TraceContext): Future[Option[PackageService.Dar]]
def listDars(limit: Option[Int])(implicit
traceContext: TraceContext
@ -308,13 +315,24 @@ class PackageService(
filename: String,
vetAllPackages: Boolean,
synchronizeVetting: Boolean,
)(implicit traceContext: TraceContext): EitherT[FutureUnlessShutdown, DamlError, Hash] =
appendDar(
payload,
PathUtils.getFilenameWithoutExtension(Paths.get(filename).getFileName),
vetAllPackages,
synchronizeVetting,
)
)(implicit traceContext: TraceContext): EitherT[FutureUnlessShutdown, DamlError, Hash] = {
val darName = PathUtils.getFilenameWithoutExtension(Paths.get(filename).getFileName)
for {
// Validate the packages before storing them in the DAR store or the package store
res <- validateByteStringToDar(payload, darName)
(hash, lengthValidatedName, dar) = res
_ <- storeValidatedPackagesAndSyncEvent(
dar.all,
lengthValidatedName.asString1GB,
LedgerSubmissionId.assertFromString(UUID.randomUUID().toString),
Some(
PackageService.Dar(DarDescriptor(hash, lengthValidatedName), payload.toByteArray)
),
vetAllPackages = vetAllPackages,
synchronizeVetting = synchronizeVetting,
)
} yield hash
}
private def catchUpstreamErrors[E](
attempt: Either[LfArchiveError, E]
@ -337,15 +355,21 @@ class PackageService(
Left(PackageServiceErrors.InternalError.Unhandled(e))
})
private def appendDar(
def validateByteString(
payload: ByteString,
darName: String,
vetAllPackages: Boolean,
synchronizeVetting: Boolean,
)(implicit traceContext: TraceContext): EitherT[FutureUnlessShutdown, DamlError, Hash] = {
)(implicit traceContext: TraceContext): EitherT[FutureUnlessShutdown, DamlError, Hash] =
validateByteStringToDar(payload, darName).map(_._1)
private def validateByteStringToDar(
payload: ByteString,
darName: String,
)(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, DamlError, (Hash, String255, archive.Dar[DamlLf.Archive])] = {
val hash = hashOps.digest(HashPurpose.DarIdentifier, payload)
val stream = new ZipInputStream(payload.newInput())
val ret: EitherT[FutureUnlessShutdown, DamlError, Hash] = for {
val ret = for {
lengthValidatedName <- EitherT
.fromEither[FutureUnlessShutdown](
String255.create(darName, Some("DAR file name"))
@ -353,20 +377,8 @@ class PackageService(
.leftMap(PackageServiceErrors.Reading.InvalidDarFileName.Error(_))
dar <- catchUpstreamErrors(DarParser.readArchive(darName, stream))
.mapK(FutureUnlessShutdown.outcomeK)
// Validate the packages before storing them in the DAR store or the package store
_ <- validateArchives(dar).mapK(FutureUnlessShutdown.outcomeK)
_ <- storeValidatedPackagesAndSyncEvent(
dar.all,
lengthValidatedName.asString1GB,
LedgerSubmissionId.assertFromString(UUID.randomUUID().toString),
Some(
PackageService.Dar(DarDescriptor(hash, lengthValidatedName), payload.toByteArray)
),
vetAllPackages = vetAllPackages,
synchronizeVetting = synchronizeVetting,
)
} yield hash
} yield (hash, lengthValidatedName, dar)
ret.transform { res =>
stream.close()
res
@ -394,7 +406,7 @@ class PackageService(
private def validateArchives(archives: archive.Dar[DamlLf.Archive])(implicit
traceContext: TraceContext
): EitherT[Future, DamlError, Unit] =
): EitherT[Future, DamlError, PackageId] =
for {
mainPackage <- catchUpstreamErrors(Decode.decodeArchive(archives.main))
dependencies <- archives.dependencies
@ -417,7 +429,7 @@ class PackageService(
LoggingContextWithTrace(loggerFactory)
)
)
} yield ()
} yield mainPackage._1
def vetPackages(packages: Seq[PackageId], syncVetting: Boolean)(implicit
traceContext: TraceContext

View File

@ -39,18 +39,35 @@ class GrpcPackageService(
})
}
override def validateDar(request: ValidateDarRequest): Future[ValidateDarResponse] = {
implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext
val ret =
service
.validateByteString(
request.data,
request.filename,
)
.map((hash: Hash) => ValidateDarResponse(hash.toHexString))
EitherTUtil.toFuture(
ret
.leftMap(ErrorCode.asGrpcError)
.onShutdown(Left(GrpcErrors.AbortedDueToShutdown.Error().asGrpcError))
)
}
override def uploadDar(request: UploadDarRequest): Future[UploadDarResponse] = {
implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext
val ret = for {
hash <- service.appendDarFromByteString(
request.data,
request.filename,
request.vetAllPackages,
request.synchronizeVetting,
val ret =
for {
hash <- service.appendDarFromByteString(
request.data,
request.filename,
request.vetAllPackages,
request.synchronizeVetting,
)
} yield UploadDarResponse(
UploadDarResponse.Value.Success(UploadDarResponse.Success(hash.toHexString))
)
} yield UploadDarResponse(
UploadDarResponse.Value.Success(UploadDarResponse.Success(hash.toHexString))
)
EitherTUtil.toFuture(
ret
.leftMap(ErrorCode.asGrpcError)

View File

@ -1342,10 +1342,8 @@ class CantonSyncService(
partyNotifier,
missingKeysAlerter,
domainHandle.topologyClient,
domainCrypto,
trafficStateController,
ephemeral.recordOrderPublisher,
domainHandle.staticParameters.protocolVersion,
parameters.useNewTrafficControl,
),
missingKeysAlerter,

View File

@ -20,10 +20,7 @@ import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
import com.digitalasset.canton.participant.ParticipantNodeParameters
import com.digitalasset.canton.participant.domain.{DomainAliasResolution, DomainRegistryError}
import com.digitalasset.canton.participant.store.*
import com.digitalasset.canton.participant.topology.{
TopologyComponentFactory,
TopologyComponentFactoryX,
}
import com.digitalasset.canton.participant.topology.TopologyComponentFactory
import com.digitalasset.canton.protocol.StaticDomainParameters
import com.digitalasset.canton.resource.Storage
import com.digitalasset.canton.store.{IndexedDomain, IndexedStringStore, SequencedEventStore}
@ -296,7 +293,7 @@ class SyncDomainPersistentStateManagerX(
override def topologyFactoryFor(domainId: DomainId): Option[TopologyComponentFactory] = {
get(domainId).map(state =>
new TopologyComponentFactoryX(
new TopologyComponentFactory(
domainId,
crypto,
clock,

View File

@ -12,7 +12,7 @@ import com.digitalasset.canton.config.{
ProcessingTimeout,
TopologyConfig,
}
import com.digitalasset.canton.crypto.{Crypto, DomainSyncCryptoClient}
import com.digitalasset.canton.crypto.Crypto
import com.digitalasset.canton.data.CantonTimestamp
import com.digitalasset.canton.logging.NamedLoggerFactory
import com.digitalasset.canton.participant.event.RecordOrderPublisher
@ -37,54 +37,7 @@ import com.digitalasset.canton.version.ProtocolVersion
import scala.concurrent.{ExecutionContext, Future}
trait TopologyComponentFactory {
def createTopologyClient(
protocolVersion: ProtocolVersion,
packageDependencies: PackageId => EitherT[Future, PackageId, Set[PackageId]],
)(implicit executionContext: ExecutionContext): DomainTopologyClientWithInit
def createCachingTopologyClient(
protocolVersion: ProtocolVersion,
packageDependencies: PackageId => EitherT[Future, PackageId, Set[PackageId]],
)(implicit
executionContext: ExecutionContext,
traceContext: TraceContext,
): Future[DomainTopologyClientWithInit]
def createTopologySnapshot(
asOf: CantonTimestamp,
packageDependencies: PackageId => EitherT[Future, PackageId, Set[PackageId]],
preferCaching: Boolean,
)(implicit executionContext: ExecutionContext): TopologySnapshot
def createHeadTopologySnapshot()(implicit
executionContext: ExecutionContext
): TopologySnapshot =
createTopologySnapshot(
CantonTimestamp.MaxValue,
StoreBasedDomainTopologyClient.NoPackageDependencies,
preferCaching = false,
)
def createTopologyProcessorFactory(
partyNotifier: LedgerServerPartyNotifier,
missingKeysAlerter: MissingKeysAlerter,
topologyClient: DomainTopologyClientWithInit,
// this is the client above, wrapped with some crypto methods, but only the base client is accessible, so we
// need to pass both.
// TODO(#15208) remove me with 3.0
syncCrypto: DomainSyncCryptoClient,
trafficStateController: TrafficStateController,
recordOrderPublisher: RecordOrderPublisher,
protocolVersion: ProtocolVersion,
useNewTrafficControl: Boolean,
): TopologyTransactionProcessorCommon.Factory
}
// TODO(#15161) collapse with base trait
class TopologyComponentFactoryX(
class TopologyComponentFactory(
domainId: DomainId,
crypto: Crypto,
clock: Clock,
@ -95,16 +48,14 @@ class TopologyComponentFactoryX(
topologyXConfig: TopologyConfig,
topologyStore: TopologyStoreX[DomainStore],
loggerFactory: NamedLoggerFactory,
) extends TopologyComponentFactory {
) {
override def createTopologyProcessorFactory(
def createTopologyProcessorFactory(
partyNotifier: LedgerServerPartyNotifier,
missingKeysAlerter: MissingKeysAlerter,
topologyClient: DomainTopologyClientWithInit,
syncCrypto: DomainSyncCryptoClient,
trafficStateController: TrafficStateController,
recordOrderPublisher: RecordOrderPublisher,
protocolVersion: ProtocolVersion,
useNewTrafficControl: Boolean,
): TopologyTransactionProcessorCommon.Factory = new TopologyTransactionProcessorCommon.Factory {
override def create(
@ -139,7 +90,7 @@ class TopologyComponentFactoryX(
}
}
override def createTopologyClient(
def createTopologyClient(
protocolVersion: ProtocolVersion,
packageDependencies: PackageId => EitherT[Future, PackageId, Set[PackageId]],
)(implicit executionContext: ExecutionContext): DomainTopologyClientWithInit =
@ -154,7 +105,7 @@ class TopologyComponentFactoryX(
loggerFactory,
)
override def createCachingTopologyClient(
def createCachingTopologyClient(
protocolVersion: ProtocolVersion,
packageDependencies: PackageId => EitherT[Future, PackageId, Set[PackageId]],
)(implicit
@ -173,7 +124,7 @@ class TopologyComponentFactoryX(
loggerFactory,
)
override def createTopologySnapshot(
def createTopologySnapshot(
asOf: CantonTimestamp,
packageDependencies: PackageId => EitherT[Future, PackageId, Set[PackageId]],
preferCaching: Boolean,
@ -190,4 +141,12 @@ class TopologyComponentFactoryX(
snapshot
}
def createHeadTopologySnapshot()(implicit
executionContext: ExecutionContext
): TopologySnapshot =
createTopologySnapshot(
CantonTimestamp.MaxValue,
StoreBasedDomainTopologyClient.NoPackageDependencies,
preferCaching = false,
)
}

View File

@ -147,6 +147,30 @@ class PackageServiceTest extends AsyncWordSpec with BaseTest {
}
}
"validate DAR and packages from bytes" in withEnv { env =>
import env.*
val expectedPackageIdsAndState = examplePackages
.map(DamlPackageStore.readPackageId)
.map(PackageDescription(_, cantonExamplesDescription))
for {
hash <- sut
.validateByteString(
ByteString.copyFrom(bytes),
"some/path/CantonExamples.dar",
)
.value
.map(_.valueOrFail("couldn't validate a dar file"))
.failOnShutdown
packages <- packageStore.listPackages()
dar <- packageStore.getDar(hash)
} yield {
expectedPackageIdsAndState.foreach(packages should not contain _)
dar shouldBe None
}
}
"fetching dependencies" in withEnv { env =>
import env.*
@ -175,6 +199,29 @@ class PackageServiceTest extends AsyncWordSpec with BaseTest {
}
}
"validateDar validates the package" in withEnv { env =>
import env.*
val badDarPath = PackageServiceTest.badDarPath
val payload = BinaryFileUtil
.readByteStringFromFile(badDarPath)
.valueOrFail(s"could not load bad dar file at $badDarPath")
for {
error <- leftOrFail(
sut.validateByteString(
payload,
badDarPath,
)
)("append illformed.dar").failOnShutdown
} yield {
error match {
case validation: PackageServiceErrors.Validation.ValidationError.Error =>
validation.validationError shouldBe a[com.daml.lf.validation.ETypeMismatch]
case _ => fail(s"$error is not a validation error")
}
}
}
"appendDar validates the package" in withEnv { env =>
import env.*

View File

@ -1 +1 @@
20240419.13137.v501a60ec
20240419.13141.v8e75f5c1

View File

@ -707,7 +707,7 @@ abstract class UpgradesSpec(val suffix: String)
case _ => {}
}
cantonLogSrc should include regex (
s"KNOWN_DAR_VERSION\\(.+,.+\\): A DAR with the same version number has previously been uploaded. err-context:\\{existingPackage=$testPackageV2Id, location=.+, packageVersion=$packageVersion, uploadedPackage=$testPackageV1Id\\}"
s"KNOWN_DAR_VERSION\\(.+,.+\\): A DAR with the same version number has previously been uploaded. err-context:\\{existingPackage=$testPackageV2Id, location=.+, packageVersion=$packageVersion, uploadedPackageId=$testPackageV1Id\\}"
)
uploadV2Result match {
case None =>