update canton to 20240709.13636.vd03d4972 (#19547)

Co-authored-by: Azure Pipelines Daml Build <support@digitalasset.com>
Co-authored-by: Remy Haemmerle <Remy.Haemmerle@daml.com>
This commit is contained in:
azure-pipelines[bot] 2024-07-10 09:23:38 +00:00 committed by GitHub
parent 2d6b7f87bc
commit 5078d1e043
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
89 changed files with 656 additions and 596 deletions

View File

@ -11,7 +11,7 @@ import "scalapb/scalapb.proto";
// Schema definition for the exported ACS snapshot
message ActiveContract {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
// The ID of the domain where the contract was assigned at the time of the export
// Required

View File

@ -5,8 +5,8 @@ object ProtocolVersionAnnotation {
/** Type-level marker for whether a protocol version is stable */
sealed trait Status
/** Marker for unstable protocol versions */
sealed trait Unstable extends Status
/** Marker for alpha protocol versions */
sealed trait Alpha extends Status
/** Marker for stable protocol versions */
sealed trait Stable extends Status
@ -19,17 +19,17 @@ object ProtocolVersionAnnotation {
* that are used in some stable protocol versions
*
* Implements both [[com.digitalasset.canton.version.ProtocolVersionAnnotation.Stable]] and
* [[com.digitalasset.canton.version.ProtocolVersionAnnotation.Unstable]] means that [[StableProtoVersion]]
* messages can be used in stable and unstable protocol versions.
* [[com.digitalasset.canton.version.ProtocolVersionAnnotation.Alpha]] means that [[StableProtoVersion]]
* messages can be used in stable and alpha protocol versions.
*/
trait StableProtoVersion
extends ProtocolVersionAnnotation.Stable
with ProtocolVersionAnnotation.Unstable
with ProtocolVersionAnnotation.Alpha
/** Marker trait for Protobuf messages generated by scalapb
* that are used only unstable protocol versions
* that are used only alpha protocol versions
*/
trait UnstableProtoVersion extends ProtocolVersionAnnotation.Unstable
trait AlphaProtoVersion extends ProtocolVersionAnnotation.Alpha
/** Marker trait for Protobuf messages generated by scalapb
* that are used only to persist data in node storage.

View File

@ -4,12 +4,23 @@
package com.digitalasset.canton.admin.api.client.commands
import com.digitalasset.canton.domain.sequencing.sequencer.block.bftordering.admin.EnterpriseSequencerBftAdminData.{
OrderingTopology,
PeerNetworkStatus,
endpointToProto,
}
import com.digitalasset.canton.networking.Endpoint
import com.digitalasset.canton.sequencer.admin.v30
import com.digitalasset.canton.sequencer.admin.v30.*
import com.digitalasset.canton.sequencer.admin.v30.SequencerBftAdministrationServiceGrpc.SequencerBftAdministrationServiceStub
import com.digitalasset.canton.sequencer.admin.v30.{
AddPeerEndpointRequest,
AddPeerEndpointResponse,
GetOrderingTopologyRequest,
GetOrderingTopologyResponse,
GetPeerNetworkStatusRequest,
GetPeerNetworkStatusResponse,
RemovePeerEndpointRequest,
RemovePeerEndpointResponse,
SequencerBftAdministrationServiceGrpc,
}
import io.grpc.ManagedChannel
import scala.concurrent.Future
@ -19,11 +30,11 @@ object EnterpriseSequencerBftAdminCommands {
abstract class BaseSequencerBftAdministrationCommand[Req, Rep, Res]
extends GrpcAdminCommand[Req, Rep, Res] {
override type Svc =
v30.SequencerBftAdministrationServiceGrpc.SequencerBftAdministrationServiceStub
SequencerBftAdministrationServiceStub
override def createService(
channel: ManagedChannel
): v30.SequencerBftAdministrationServiceGrpc.SequencerBftAdministrationServiceStub =
v30.SequencerBftAdministrationServiceGrpc.stub(channel)
): SequencerBftAdministrationServiceStub =
SequencerBftAdministrationServiceGrpc.stub(channel)
}
final case class AddPeerEndpoint(endpoint: Endpoint)
@ -38,7 +49,7 @@ object EnterpriseSequencerBftAdminCommands {
)
override def submitRequest(
service: v30.SequencerBftAdministrationServiceGrpc.SequencerBftAdministrationServiceStub,
service: SequencerBftAdministrationServiceStub,
request: AddPeerEndpointRequest,
): Future[AddPeerEndpointResponse] =
service.addPeerEndpoint(request)
@ -61,7 +72,7 @@ object EnterpriseSequencerBftAdminCommands {
)
override def submitRequest(
service: v30.SequencerBftAdministrationServiceGrpc.SequencerBftAdministrationServiceStub,
service: SequencerBftAdministrationServiceStub,
request: RemovePeerEndpointRequest,
): Future[RemovePeerEndpointResponse] =
service.removePeerEndpoint(request)
@ -84,7 +95,7 @@ object EnterpriseSequencerBftAdminCommands {
)
override def submitRequest(
service: v30.SequencerBftAdministrationServiceGrpc.SequencerBftAdministrationServiceStub,
service: SequencerBftAdministrationServiceStub,
request: GetPeerNetworkStatusRequest,
): Future[GetPeerNetworkStatusResponse] =
service.getPeerNetworkStatus(request)
@ -94,4 +105,27 @@ object EnterpriseSequencerBftAdminCommands {
): Either[String, PeerNetworkStatus] =
PeerNetworkStatus.fromProto(response)
}
final case class GetOrderingTopology()
extends BaseSequencerBftAdministrationCommand[
GetOrderingTopologyRequest,
GetOrderingTopologyResponse,
OrderingTopology,
] {
override def createRequest(): Either[String, GetOrderingTopologyRequest] = Right(
GetOrderingTopologyRequest.of()
)
override def submitRequest(
service: SequencerBftAdministrationServiceStub,
request: GetOrderingTopologyRequest,
): Future[GetOrderingTopologyResponse] =
service.getOrderingTopology(request)
override def handleResponse(
response: GetOrderingTopologyResponse
): Either[String, OrderingTopology] =
OrderingTopology.fromProto(response)
}
}

View File

@ -238,7 +238,7 @@ final case class RetentionPeriodDefaults(
* @param manualStart If set to true, the nodes have to be manually started via console (default false)
* @param startupParallelism Start up to N nodes in parallel (default is num-threads)
* @param nonStandardConfig don't fail config validation on non-standard configuration settings
* @param devVersionSupport If true, allow domain nodes to use unstable protocol versions and participant nodes to connect to such domains
* @param alphaVersionSupport If true, allow domain nodes to use alpha protocol versions and participant nodes to connect to such domains
* @param betaVersionSupport If true, allow domain nodes to use beta protocol versions and participant nodes to connect to such domains
* @param timeouts Sets the timeouts used for processing and console
* @param portsFile A ports file name, where the ports of all participants will be written to after startup
@ -252,7 +252,7 @@ final case class CantonParameters(
// TODO(i15561): Revert back to `false` once there is a stable Daml 3 protocol version
nonStandardConfig: Boolean = true,
// TODO(i15561): Revert back to `false` once there is a stable Daml 3 protocol version
devVersionSupport: Boolean = true,
alphaVersionSupport: Boolean = true,
betaVersionSupport: Boolean = false,
portsFile: Option[String] = None,
timeouts: TimeoutSettings = TimeoutSettings(),
@ -380,7 +380,7 @@ trait CantonConfig {
participantParameters.transferTimeProofFreshnessProportion,
protocolConfig = ParticipantProtocolConfig(
minimumProtocolVersion = participantParameters.minimumProtocolVersion.map(_.unwrap),
devVersionSupport = participantParameters.devVersionSupport,
alphaVersionSupport = participantParameters.alphaVersionSupport,
betaVersionSupport = participantParameters.BetaVersionSupport,
dontWarnOnDeprecatedPV = participantParameters.dontWarnOnDeprecatedPV,
),
@ -523,7 +523,7 @@ private[canton] object CantonNodeParameterConverter {
def protocol(parent: CantonConfig, config: ProtocolConfig): CantonNodeParameters.Protocol =
CantonNodeParameters.Protocol.Impl(
devVersionSupport = parent.parameters.devVersionSupport || config.devVersionSupport,
alphaVersionSupport = parent.parameters.alphaVersionSupport || config.alphaVersionSupport,
betaVersionSupport = parent.parameters.betaVersionSupport || config.betaVersionSupport,
dontWarnOnDeprecatedPV = config.dontWarnOnDeprecatedPV,
)

View File

@ -184,14 +184,14 @@ object CommunityConfigValidations
name: String,
nodeTypeName: String,
nonStandardConfig: Boolean,
devVersionSupport: Boolean,
alphaVersionSupport: Boolean,
): Validated[NonEmpty[Seq[String]], Unit] = {
Validated.cond(
nonStandardConfig || !devVersionSupport,
nonStandardConfig || !alphaVersionSupport,
(),
NonEmpty(
Seq,
s"Enabling dev-version-support for $nodeTypeName $name requires you to explicitly set canton.parameters.non-standard-config = yes",
s"Enabling alpha-version-support for $nodeTypeName $name requires you to explicitly set canton.parameters.non-standard-config = yes",
),
)
}
@ -201,7 +201,7 @@ object CommunityConfigValidations
name = name.unwrap,
nodeTypeName = nodeConfig.nodeTypeName,
nonStandardConfig = config.parameters.nonStandardConfig,
devVersionSupport = nodeConfig.parameters.devVersionSupport,
alphaVersionSupport = nodeConfig.parameters.alphaVersionSupport,
)
}

View File

@ -23,7 +23,10 @@ import com.digitalasset.canton.domain.sequencing.config.{
RemoteSequencerConfig,
SequencerNodeConfigCommon,
}
import com.digitalasset.canton.domain.sequencing.sequencer.block.bftordering.admin.EnterpriseSequencerBftAdminData.PeerNetworkStatus
import com.digitalasset.canton.domain.sequencing.sequencer.block.bftordering.admin.EnterpriseSequencerBftAdminData.{
OrderingTopology,
PeerNetworkStatus,
}
import com.digitalasset.canton.domain.sequencing.sequencer.{
SequencerClients,
SequencerPruningStatus,
@ -1162,6 +1165,12 @@ abstract class SequencerReference(
consoleEnvironment.run {
runner.adminCommand(EnterpriseSequencerBftAdminCommands.GetPeerNetworkStatus(endpoints))
}
@Help.Summary("Get the currently active ordering topology")
def get_ordering_topology(): OrderingTopology =
consoleEnvironment.run {
runner.adminCommand(EnterpriseSequencerBftAdminCommands.GetOrderingTopology())
}
}
}

View File

@ -562,9 +562,7 @@ class LocalSecretKeyAdministration(
.leftMap(err => s"Error retrieving private key [$fingerprint] $err")
publicKey <- crypto.cryptoPublicStore
.publicKey(fingerprint)
.leftMap(_.toString)
.subflatMap(_.toRight(s"no public key found for [$fingerprint]"))
.leftMap(err => s"Error retrieving public key [$fingerprint] $err")
.toRight(s"Error retrieving public key [$fingerprint]: no public key found")
keyPair: CryptoKeyPair[PublicKey, PrivateKey] = (publicKey, privateKey) match {
case (pub: SigningPublicKey, pkey: SigningPrivateKey) =>
new SigningKeyPair(pub, pkey)

View File

@ -206,7 +206,7 @@ class ManagedNodes[
for {
cAndP <- configAndParams(name)
(config, params) = cAndP
_ <- runMigration(name, config.storage, params.devVersionSupport)
_ <- runMigration(name, config.storage, params.alphaVersionSupport)
} yield ()
}
)
@ -216,7 +216,7 @@ class ManagedNodes[
for {
cAndP <- configAndParams(name)
(config, params) = cAndP
_ <- runRepairMigration(name, config.storage, params.devVersionSupport)
_ <- runRepairMigration(name, config.storage, params.alphaVersionSupport)
} yield ()
}
)
@ -299,7 +299,7 @@ class ManagedNodes[
params: CantonNodeParameters,
): Either[StartupError, Unit] =
runIfUsingDatabase[Id](storageConfig) { dbConfig =>
val migrations = migrationsFactory.create(dbConfig, name, params.devVersionSupport)
val migrations = migrationsFactory.create(dbConfig, name, params.alphaVersionSupport)
import TraceContext.Implicits.Empty.*
logger.info(s"Setting up database schemas for $name")
@ -332,11 +332,11 @@ class ManagedNodes[
private def runMigration(
name: InstanceName,
storageConfig: StorageConfig,
devVersionSupport: Boolean,
alphaVersionSupport: Boolean,
): Either[StartupError, Unit] =
runIfUsingDatabase[Id](storageConfig) { dbConfig =>
migrationsFactory
.create(dbConfig, name, devVersionSupport)
.create(dbConfig, name, alphaVersionSupport)
.migrateDatabase()
.leftMap(FailedDatabaseMigration(name, _))
.value
@ -346,11 +346,11 @@ class ManagedNodes[
private def runRepairMigration(
name: InstanceName,
storageConfig: StorageConfig,
devVersionSupport: Boolean,
alphaVersionSupport: Boolean,
): Either[StartupError, Unit] =
runIfUsingDatabase[Id](storageConfig) { dbConfig =>
migrationsFactory
.create(dbConfig, name, devVersionSupport)
.create(dbConfig, name, alphaVersionSupport)
.repairFlywayMigration()
.leftMap(FailedDatabaseRepairMigration(name, _))
.value

View File

@ -3,15 +3,15 @@
// anymore.
_shared {
participant-dev-params = {
dev-version-support = true
alpha-version-support = true
}
// domain parameters config
domain-dev-params = {
dev-version-support = true
alpha-version-support = true
protocol-version = dev
}
}
canton.parameters {
non-standard-config = yes
dev-version-support = yes
alpha-version-support = yes
}

View File

@ -0,0 +1,14 @@
canton.parameters {
# turn on non-standard configuration support
non-standard-config = yes
# turn on support of alpha version support for domain nodes
alpha-version-support = yes
}
canton.participants.participant1.parameters = {
# enable alpha version support on the participant (this will allow the participant to connect to a domain running protocol version dev or any other alpha protocol)
# and it will turn on support for unsafe daml lf dev versions
# not to be used in production and requires you to define non-standard-config = yes
alpha-version-support = yes
}

View File

@ -2,13 +2,13 @@ canton.parameters {
# turn on non-standard configuration support
non-standard-config = yes
# turn on support of development version support for domain nodes
dev-version-support = yes
# turn on support of alpha version support for domain nodes
alpha-version-support = yes
}
canton.participants.participant1.parameters = {
# enable dev version on the participant (this will allow the participant to connect to a domain with dev protocol version)
# enable alpha version support on the participant (this will allow the participant to connect to a domain with dev protocol version)
# and it will turn on support for unsafe daml lf dev versions
# not to be used in production and requires you to define non-standard-config = yes
dev-version-support = yes
alpha-version-support = yes
}

View File

@ -78,7 +78,7 @@ class NodesTest extends FixtureAnyWordSpec with BaseTest with HasExecutionContex
override def batching: BatchingConfig = BatchingConfig()
override def caching: CachingConfigs = CachingConfigs()
override def useUnifiedSequencer: Boolean = false
override def devVersionSupport: Boolean = false
override def alphaVersionSupport: Boolean = false
override def watchdog: Option[WatchdogConfig] = None
}
}
@ -97,7 +97,7 @@ class NodesTest extends FixtureAnyWordSpec with BaseTest with HasExecutionContex
nonStandardConfig: Boolean = false,
dbMigrateAndStart: Boolean = false,
disableUpgradeValidation: Boolean = false,
devVersionSupport: Boolean = false,
alphaVersionSupport: Boolean = false,
betaVersionSupport: Boolean = false,
dontWarnOnDeprecatedPV: Boolean = false,
initialProtocolVersion: ProtocolVersion = testedProtocolVersion,

View File

@ -202,7 +202,7 @@ message AcknowledgeSignedRequest {
message AcknowledgeSignedResponse {}
message DownloadTopologyStateForInitRequest {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
string member = 1;
}
@ -212,7 +212,7 @@ message DownloadTopologyStateForInitResponse {
}
message GetTrafficStateForMemberRequest {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
// Member for which to get the traffic state
string member = 1;
@ -221,7 +221,7 @@ message GetTrafficStateForMemberRequest {
}
message GetTrafficStateForMemberResponse {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
com.digitalasset.canton.protocol.v30.TrafficState traffic_state = 1;
}

View File

@ -30,7 +30,7 @@ message GlobalKey {
}
message AggregationRule {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
// Members who are allowed to send a request for the underlying aggregation.
// Must contain SubmissionRequest.sender, otherwise the request is rejected.

View File

@ -11,7 +11,7 @@ import "scalapb/scalapb.proto";
// Definition of the ConfirmationResponse message which is shared between the transaction and transfer protocol
message LocalVerdict {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
VerdictCode code = 1;
google.rpc.Status reason = 2; // ok iff code is approve
@ -25,7 +25,7 @@ message LocalVerdict {
}
message ConfirmationResponse {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
int64 request_id = 1; // in microseconds of UTC time since Unix epoch
string sender = 2;

View File

@ -47,7 +47,7 @@ enum OnboardingRestriction {
}
message DynamicDomainParameters {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
google.protobuf.Duration confirmation_response_timeout = 1;
google.protobuf.Duration mediator_reaction_timeout = 2;

View File

@ -35,7 +35,7 @@ message MediatorReject {
}
message Verdict {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
oneof some_verdict {
google.protobuf.Empty approve = 1;
@ -46,7 +46,7 @@ message Verdict {
// This covers transactions and transfers
message ConfirmationResultMessage {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
string domain_id = 1;
com.digitalasset.canton.protocol.v30.ViewType view_type = 2;

View File

@ -9,7 +9,7 @@ import "google/protobuf/wrappers.proto";
import "scalapb/scalapb.proto";
message OrderingRequest {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
string sequencer_uid = 1; // UID of the sequencer requesting ordering of the request
google.protobuf.BytesValue content = 2; // Content of the request to be ordered

View File

@ -63,7 +63,7 @@ message ViewCommonData {
}
message Informee {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
string party = 1;
int32 weight = 2; // optional: only set if party is confirming
@ -87,14 +87,14 @@ message ViewParticipantMessage {
// InformeeMessage
message InformeeMessage {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
FullInformeeTree full_informee_tree = 1;
com.digitalasset.canton.crypto.v30.Signature submitting_participant_signature = 2;
}
message LightTransactionViewTree {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
GenTransactionTree tree = 1;
repeated bytes subview_hashes = 2;
@ -118,7 +118,7 @@ message InputContract {
}
message CommonMetadata {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
com.digitalasset.canton.crypto.v30.Salt salt = 1;
// this used to contain a confirmation policy (Signatory or VIP) that no longer exists
@ -129,7 +129,7 @@ message CommonMetadata {
}
message SubmitterMetadata {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
com.digitalasset.canton.crypto.v30.Salt salt = 1;
repeated string act_as = 2;
@ -153,7 +153,7 @@ message SessionKeyLookup {
}
message EncryptedViewMessage {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
bytes view_tree = 1;

View File

@ -21,7 +21,7 @@ message TransferId {
// Messages sent by a participant as part of the transfer protocol
message TransferOutCommonData {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
com.digitalasset.canton.crypto.v30.Salt salt = 1;
string source_domain = 2;
@ -33,7 +33,7 @@ message TransferOutCommonData {
}
message TransferViewTree {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
bytes common_data = 1;
BlindableNode participant_data = 2;
@ -47,14 +47,14 @@ message TransferInMediatorMessage {
}
message TransferOutMediatorMessage {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
TransferViewTree tree = 1;
com.digitalasset.canton.crypto.v30.Signature submitting_participant_signature = 2;
}
message TransferInCommonData {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
com.digitalasset.canton.crypto.v30.Salt salt = 1;
string target_domain = 2;
@ -65,7 +65,7 @@ message TransferInCommonData {
}
message TransferSubmitterMetadata {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
string submitter = 1;
string submitting_participant_uid = 2;
@ -76,7 +76,7 @@ message TransferSubmitterMetadata {
}
message TransferOutView {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
com.digitalasset.canton.crypto.v30.Salt salt = 1;
string target_domain = 3;
@ -88,7 +88,7 @@ message TransferOutView {
}
message TransferInView {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
com.digitalasset.canton.crypto.v30.Salt salt = 1;
v30.SerializableContract contract = 3;

View File

@ -73,7 +73,7 @@ message Handshake {
// Submission cost computed by the sender for a SubmissionRequest
message SequencingSubmissionCost {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
// Computed cost
int64 cost = 1;
}
@ -90,7 +90,7 @@ message StaticDomainParameters {
}
message Envelope {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
// Contains a v30.EnvelopeContent if signatures are empty and a v30.TypedSignedProtocolMessageContent otherwise
bytes content = 1;
@ -102,13 +102,13 @@ message Envelope {
}
message Batch {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
repeated Envelope envelopes = 1;
}
message CompressedBatch {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
enum CompressionAlgorithm {
COMPRESSION_ALGORITHM_UNSPECIFIED = 0;
@ -121,7 +121,7 @@ message CompressedBatch {
}
message SequencedEvent {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
// A sequence number for all events emitted to a subscriber. Starting at 0.
// The same event may have different counter values for different recipients.
@ -158,7 +158,7 @@ message SequencedEvent {
// Messages used for synchronization between sequencer nodes
message SubmissionRequest {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
// Sender of the request.
// This request must be wrapped in a SignedContent and

View File

@ -8,7 +8,7 @@ package com.digitalasset.canton.protocol.v30;
import "scalapb/scalapb.proto";
message DynamicSequencingParameters {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
// Sequencing dynamic domain parameters can only be interpreted by a sequencer implementation
// and are opaque to the rest of the domain.

View File

@ -10,7 +10,7 @@ import "google/protobuf/wrappers.proto";
import "scalapb/scalapb.proto";
message SignedContent {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
google.protobuf.BytesValue content = 1;
repeated com.digitalasset.canton.crypto.v30.Signature signatures = 2;

View File

@ -14,7 +14,7 @@ import "scalapb/scalapb.proto";
// Messages depending on both participant_transaction.proto and participant_transfer.proto.
message TypedSignedProtocolMessageContent {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
oneof some_signed_protocol_message {
bytes confirmation_response = 2;
@ -25,14 +25,14 @@ message TypedSignedProtocolMessageContent {
}
message SignedProtocolMessage {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
repeated com.digitalasset.canton.crypto.v30.Signature signature = 1;
bytes typed_signed_protocol_message_content = 2;
}
message EnvelopeContent {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
oneof some_envelope_content {
v30.InformeeMessage informee_message = 1;

View File

@ -323,7 +323,7 @@ message TopologyMapping {
}
message TopologyTransaction {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
Enums.TopologyChangeOp operation = 1;
@ -337,7 +337,7 @@ message TopologyTransaction {
}
message SignedTopologyTransaction {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
// serialized topology transaction (protobuf bytestring)
bytes transaction = 1;
@ -359,7 +359,7 @@ message SignedTopologyTransaction {
* including the member the submitted the broadcast.
*/
message TopologyTransactionsBroadcast {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
message Broadcast {
string broadcast_id = 1;

View File

@ -10,7 +10,7 @@ import "google/protobuf/wrappers.proto";
import "scalapb/scalapb.proto";
message TrafficControlParameters {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
// In bytes, the maximum amount of base traffic that can be accumulated
uint64 max_base_traffic_amount = 1;
@ -34,7 +34,7 @@ message TrafficControlParameters {
// Message representing a traffic receipt included in SequencedEvent receipts to update sender about
// the traffic consumed state after sequencing of the event
message TrafficReceipt {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
// Cost effectively consumed by this specific event
uint64 consumed_cost = 1;
// Total amount of extra traffic consumed
@ -45,7 +45,7 @@ message TrafficReceipt {
// Message representing traffic consumed by a member at a given point in time
message TrafficConsumed {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
// Member consuming the traffic
string member = 1;
// Total extra traffic consumed
@ -60,7 +60,7 @@ message TrafficConsumed {
// Message representing a traffic purchase made on behalf of a member
message TrafficPurchased {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
// Member receiving the traffic
string member = 1;
// Serial of the update
@ -88,7 +88,7 @@ message TrafficState {
}
message SetTrafficPurchasedMessage {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
// Member to update the balance for
string member = 1;

View File

@ -242,10 +242,10 @@ object CommunityStorageConfig {
trait DbConfig extends StorageConfig with PrettyPrinting {
/** Function to combine the defined migration path together with dev version changes */
final def buildMigrationsPaths(devVersionSupport: Boolean): Seq[String] = {
final def buildMigrationsPaths(alphaVersionSupport: Boolean): Seq[String] = {
if (parameters.migrationsPaths.nonEmpty)
parameters.migrationsPaths
else if (devVersionSupport)
else if (alphaVersionSupport)
Seq(stableMigrationPath, devMigrationPath)
else Seq(stableMigrationPath)
}

View File

@ -46,9 +46,7 @@ class Crypto(
): EitherT[FutureUnlessShutdown, SigningKeyGenerationError, SigningPublicKey] =
for {
publicKey <- privateCrypto.generateSigningKey(scheme, name)
_ <- cryptoPublicStore
.storeSigningKey(publicKey, name)
.leftMap[SigningKeyGenerationError](SigningKeyGenerationError.SigningPublicStoreError)
_ <- EitherT.right(cryptoPublicStore.storeSigningKey(publicKey, name))
} yield publicKey
/** Helper method to generate a new encryption key pair and store the public key in the public store as well. */
@ -60,11 +58,7 @@ class Crypto(
): EitherT[FutureUnlessShutdown, EncryptionKeyGenerationError, EncryptionPublicKey] =
for {
publicKey <- privateCrypto.generateEncryptionKey(scheme, name)
_ <- cryptoPublicStore
.storeEncryptionKey(publicKey, name)
.leftMap[EncryptionKeyGenerationError](
EncryptionKeyGenerationError.EncryptionPublicStoreError
)
_ <- EitherT.right(cryptoPublicStore.storeEncryptionKey(publicKey, name))
} yield publicKey
override def onClosed(): Unit =

View File

@ -457,9 +457,15 @@ object EncryptionPublicKey
final case class EncryptionPublicKeyWithName(
override val publicKey: EncryptionPublicKey,
override val name: Option[KeyName],
) extends PublicKeyWithName {
) extends PublicKeyWithName
with PrettyPrinting {
type K = EncryptionPublicKey
override val id: Fingerprint = publicKey.id
override def pretty: Pretty[EncryptionPublicKeyWithName] =
prettyOfClass(param("publicKey", _.publicKey), param("name", _.name))
}
object EncryptionPublicKeyWithName {

View File

@ -387,9 +387,15 @@ object SigningPublicKey
final case class SigningPublicKeyWithName(
override val publicKey: SigningPublicKey,
override val name: Option[KeyName],
) extends PublicKeyWithName {
) extends PublicKeyWithName
with PrettyPrinting {
type K = SigningPublicKey
override val id: Fingerprint = publicKey.id
override def pretty: Pretty[SigningPublicKeyWithName] =
prettyOfClass(param("publicKey", _.publicKey), param("name", _.name))
}
object SigningPublicKeyWithName {

View File

@ -3,13 +3,14 @@
package com.digitalasset.canton.crypto.store
import cats.data.EitherT
import cats.data.OptionT
import cats.syntax.functor.*
import com.daml.error.{ErrorCategory, ErrorCode, Explanation, Resolution}
import com.digitalasset.canton.config.ProcessingTimeout
import com.digitalasset.canton.crypto.store.db.DbCryptoPublicStore
import com.digitalasset.canton.crypto.store.memory.InMemoryCryptoPublicStore
import com.digitalasset.canton.crypto.{KeyName, *}
import com.digitalasset.canton.discard.Implicits.DiscardOps
import com.digitalasset.canton.error.{BaseCantonError, CantonErrorGroups}
import com.digitalasset.canton.lifecycle.FutureUnlessShutdown
import com.digitalasset.canton.logging.NamedLoggerFactory
@ -34,31 +35,25 @@ trait CryptoPublicStore extends AutoCloseable {
// Write methods that the underlying store has to implement for the caching
protected def writeSigningKey(key: SigningPublicKey, name: Option[KeyName])(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Unit]
): FutureUnlessShutdown[Unit]
protected def writeEncryptionKey(key: EncryptionPublicKey, name: Option[KeyName])(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Unit]
protected[crypto] def listAllKeyFingerprints(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Set[Fingerprint]] =
for {
signingKeys <- listSigningKeys
encryptionKeys <- listEncryptionKeys
} yield signingKeys.map(_.publicKey.id) ++ encryptionKeys.map(_.publicKey.id)
): FutureUnlessShutdown[Unit]
@VisibleForTesting
private[store] def listSigningKeys(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Set[SigningPublicKeyWithName]]
): FutureUnlessShutdown[Set[SigningPublicKeyWithName]]
@VisibleForTesting
private[store] def listEncryptionKeys(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Set[EncryptionPublicKeyWithName]]
): FutureUnlessShutdown[Set[EncryptionPublicKeyWithName]]
def storePublicKey(publicKey: PublicKey, name: Option[KeyName])(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Unit] =
): FutureUnlessShutdown[Unit] =
(publicKey: @unchecked) match {
case sigKey: SigningPublicKey => storeSigningKey(sigKey, name)
case encKey: EncryptionPublicKey => storeEncryptionKey(encKey, name)
@ -66,50 +61,35 @@ trait CryptoPublicStore extends AutoCloseable {
def publicKey(keyId: Fingerprint)(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Option[PublicKey]] =
publicKeyWithName(keyId).map(_.map(_.publicKey))
def publicKeyWithName(keyId: Fingerprint)(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Option[PublicKeyWithName]] =
for {
sigKeyOption <- readSigningKey(keyId)
pubKeyOption <- sigKeyOption.fold(readEncryptionKey(keyId).widen[Option[PublicKeyWithName]])(
key => EitherT.rightT(Some(key))
)
} yield pubKeyOption
def existsPublicKey(keyId: Fingerprint, purpose: KeyPurpose)(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Boolean] =
purpose match {
case KeyPurpose.Signing => signingKey(keyId).map(_.nonEmpty)
case KeyPurpose.Encryption => encryptionKey(keyId).map(_.nonEmpty)
}
): OptionT[FutureUnlessShutdown, PublicKey] =
readSigningKey(keyId)
.widen[PublicKeyWithName]
.orElse(readEncryptionKey(keyId).widen[PublicKeyWithName])
.map(_.publicKey)
def findSigningKeyIdByName(keyName: KeyName)(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Option[SigningPublicKey]] =
listSigningKeys.map(_.find(_.name.contains(keyName)).map(_.publicKey))
): OptionT[FutureUnlessShutdown, SigningPublicKey] =
OptionT(listSigningKeys.map(_.find(_.name.contains(keyName)).map(_.publicKey)))
def findSigningKeyIdByFingerprint(fingerprint: Fingerprint)(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Option[SigningPublicKey]] =
listSigningKeys.map(_.find(_.publicKey.fingerprint == fingerprint).map(_.publicKey))
): OptionT[FutureUnlessShutdown, SigningPublicKey] =
OptionT(listSigningKeys.map(_.find(_.publicKey.fingerprint == fingerprint).map(_.publicKey)))
def findEncryptionKeyIdByName(keyName: KeyName)(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Option[EncryptionPublicKey]] =
listEncryptionKeys.map(_.find(_.name.contains(keyName)).map(_.publicKey))
): OptionT[FutureUnlessShutdown, EncryptionPublicKey] =
OptionT(listEncryptionKeys.map(_.find(_.name.contains(keyName)).map(_.publicKey)))
def findEncryptionKeyIdByFingerprint(fingerprint: Fingerprint)(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Option[EncryptionPublicKey]] =
listEncryptionKeys.map(_.find(_.publicKey.fingerprint == fingerprint).map(_.publicKey))
): OptionT[FutureUnlessShutdown, EncryptionPublicKey] =
OptionT(listEncryptionKeys.map(_.find(_.publicKey.fingerprint == fingerprint).map(_.publicKey)))
def publicKeysWithName(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Set[PublicKeyWithName]] =
): FutureUnlessShutdown[Set[PublicKeyWithName]] =
for {
sigKeys <- listSigningKeys
encKeys <- listEncryptionKeys
@ -117,64 +97,68 @@ trait CryptoPublicStore extends AutoCloseable {
def signingKey(signingKeyId: Fingerprint)(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Option[SigningPublicKey]] =
): OptionT[FutureUnlessShutdown, SigningPublicKey] =
retrieveKeyAndUpdateCache(signingKeyMap, readSigningKey(_))(signingKeyId)
protected def readSigningKey(signingKeyId: Fingerprint)(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Option[SigningPublicKeyWithName]]
): OptionT[FutureUnlessShutdown, SigningPublicKeyWithName]
def signingKeys(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Set[SigningPublicKey]] =
): FutureUnlessShutdown[Set[SigningPublicKey]] =
retrieveKeysAndUpdateCache(listSigningKeys, signingKeyMap)
def storeSigningKey(key: SigningPublicKey, name: Option[KeyName] = None)(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Unit] =
): FutureUnlessShutdown[Unit] =
writeSigningKey(key, name).map { _ =>
val _ = signingKeyMap.put(key.id, SigningPublicKeyWithName(key, name))
signingKeyMap.put(key.id, SigningPublicKeyWithName(key, name)).discard
}
def encryptionKey(encryptionKeyId: Fingerprint)(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Option[EncryptionPublicKey]] =
): OptionT[FutureUnlessShutdown, EncryptionPublicKey] =
retrieveKeyAndUpdateCache(encryptionKeyMap, readEncryptionKey(_))(encryptionKeyId)
protected def readEncryptionKey(encryptionKeyId: Fingerprint)(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Option[EncryptionPublicKeyWithName]]
): OptionT[FutureUnlessShutdown, EncryptionPublicKeyWithName]
def encryptionKeys(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Set[EncryptionPublicKey]] =
): FutureUnlessShutdown[Set[EncryptionPublicKey]] =
retrieveKeysAndUpdateCache(listEncryptionKeys, encryptionKeyMap)
def storeEncryptionKey(key: EncryptionPublicKey, name: Option[KeyName] = None)(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Unit] =
): FutureUnlessShutdown[Unit] =
writeEncryptionKey(key, name)
.map { _ =>
val _ = encryptionKeyMap.put(key.id, EncryptionPublicKeyWithName(key, name))
encryptionKeyMap.put(key.id, EncryptionPublicKeyWithName(key, name)).discard
}
private[crypto] def deleteKey(keyId: Fingerprint)(implicit
traceContext: TraceContext
): FutureUnlessShutdown[Unit]
private def retrieveKeyAndUpdateCache[KN <: PublicKeyWithName](
cache: TrieMap[Fingerprint, KN],
readKey: Fingerprint => EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Option[KN]],
)(keyId: Fingerprint): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Option[KN#K]] =
readKey: Fingerprint => OptionT[FutureUnlessShutdown, KN],
)(keyId: Fingerprint): OptionT[FutureUnlessShutdown, KN#K] =
cache.get(keyId) match {
case Some(value) => EitherT.rightT(Some(value.publicKey))
case Some(key) => OptionT.some(key.publicKey)
case None =>
readKey(keyId).map { keyOption =>
keyOption.foreach(key => cache.putIfAbsent(keyId, key))
keyOption.map(_.publicKey)
readKey(keyId).map { key =>
cache.putIfAbsent(keyId, key).discard
key.publicKey
}
}
private def retrieveKeysAndUpdateCache[KN <: PublicKeyWithName](
keysFromDb: EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Set[KN]],
keysFromDb: FutureUnlessShutdown[Set[KN]],
cache: TrieMap[Fingerprint, KN],
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Set[KN#K]] =
): FutureUnlessShutdown[Set[KN#K]] =
for {
// we always rebuild the cache here just in case new keys have been added by another process
// this should not be a problem since these operations to get all keys are infrequent and typically
@ -194,7 +178,7 @@ object CryptoPublicStore {
ec: ExecutionContext
): CryptoPublicStore = {
storage match {
case _: MemoryStorage => new InMemoryCryptoPublicStore
case _: MemoryStorage => new InMemoryCryptoPublicStore(loggerFactory)
case dbStorage: DbStorage =>
new DbCryptoPublicStore(dbStorage, releaseProtocolVersion, timeouts, loggerFactory)
}
@ -218,27 +202,22 @@ object CryptoPublicStoreError extends CantonErrorGroups.CommandErrorGroup {
extends BaseCantonError.Impl(cause = "An error occurred with the public crypto store")
}
final case class FailedToListKeys(reason: String) extends CryptoPublicStoreError {
override def pretty: Pretty[FailedToListKeys] = prettyOfClass(unnamedParam(_.reason.unquoted))
}
final case class FailedToReadKey(keyId: Fingerprint, reason: String)
extends CryptoPublicStoreError {
override def pretty: Pretty[FailedToReadKey] = prettyOfClass(unnamedParam(_.reason.unquoted))
}
final case class FailedToInsertKey(keyId: Fingerprint, reason: String)
extends CryptoPublicStoreError {
override def pretty: Pretty[FailedToInsertKey] =
prettyOfClass(param("keyId", _.keyId), param("reason", _.reason.unquoted))
}
final case class KeyAlreadyExists(keyId: Fingerprint, existingKeyName: Option[String])
extends CryptoPublicStoreError {
override def pretty: Pretty[KeyAlreadyExists] =
final case class KeyAlreadyExists[K <: PublicKeyWithName: Pretty](
keyId: Fingerprint,
existingPublicKey: K,
newPublicKey: K,
) extends CryptoPublicStoreError {
override def pretty: Pretty[KeyAlreadyExists[K]] =
prettyOfClass(
param("keyId", _.keyId),
param("existingKeyName", _.existingKeyName.getOrElse("").unquoted),
param("existingPublicKey", _.existingPublicKey),
param("newPublicKey", _.newPublicKey),
)
}

View File

@ -17,7 +17,6 @@ import com.digitalasset.canton.resource.DbStorage.DbAction
import com.digitalasset.canton.resource.DbStorage.Implicits.*
import com.digitalasset.canton.resource.{DbStorage, DbStore}
import com.digitalasset.canton.tracing.TraceContext
import com.digitalasset.canton.util.EitherTUtil
import com.digitalasset.canton.version.ReleaseProtocolVersion
import com.google.common.annotations.VisibleForTesting
import com.google.protobuf.ByteString
@ -149,18 +148,16 @@ class DbCryptoPrivateStore(
purpose: KeyPurpose,
)(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPrivateStoreError, Option[StoredPrivateKey]] =
EitherTUtil
.fromFuture[CryptoPrivateStoreError, Option[StoredPrivateKey]](
storage
.querySingle(
queryKey(keyId, purpose),
functionFullName,
)
.value,
err => CryptoPrivateStoreError.FailedToReadKey(keyId, err.toString),
)
.mapK(FutureUnlessShutdown.outcomeK)
): EitherT[FutureUnlessShutdown, CryptoPrivateStoreError, Option[StoredPrivateKey]] = {
EitherT.right(
storage
.querySingleUnlessShutdown(
queryKey(keyId, purpose),
functionFullName,
)
.value
)
}
private[crypto] def writePrivateKey(
key: StoredPrivateKey
@ -172,15 +169,13 @@ class DbCryptoPrivateStore(
@VisibleForTesting
private[canton] def listPrivateKeys(purpose: KeyPurpose, encrypted: Boolean)(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPrivateStoreError, Set[StoredPrivateKey]] =
EitherTUtil
.fromFuture[CryptoPrivateStoreError, Set[StoredPrivateKey]](
storage
.query(queryKeys(purpose), functionFullName)
.map(keys => keys.filter(_.isEncrypted == encrypted)),
err => CryptoPrivateStoreError.FailedToListKeys(err.toString),
)
.mapK(FutureUnlessShutdown.outcomeK)
): EitherT[FutureUnlessShutdown, CryptoPrivateStoreError, Set[StoredPrivateKey]] = {
EitherT.right(
storage
.queryUnlessShutdown(queryKeys(purpose), functionFullName)
.map(keys => keys.filter(_.isEncrypted == encrypted))
)
}
private def deleteKey(keyId: Fingerprint): SqlAction[Int, NoStream, Effect.Write] =
sqlu"delete from common_crypto_private_keys where key_id = $keyId"
@ -191,28 +186,24 @@ class DbCryptoPrivateStore(
private[crypto] def replaceStoredPrivateKeys(newKeys: Seq[StoredPrivateKey])(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPrivateStoreError, Unit] =
EitherTUtil
.fromFuture[CryptoPrivateStoreError, Unit](
storage
.update_(
DBIOAction
.sequence(
newKeys.map(key => deleteKey(key.id).andThen(insertKeyUpdate(key)))
)
.transactionally,
functionFullName,
),
err => CryptoPrivateStoreError.FailedToReplaceKeys(newKeys.map(_.id), err.toString),
)
.mapK(FutureUnlessShutdown.outcomeK)
EitherT.right(
storage
.updateUnlessShutdown_(
DBIOAction
.sequence(
newKeys.map(key => deleteKey(key.id).andThen(insertKeyUpdate(key)))
)
.transactionally,
functionFullName,
)
)
private[crypto] def deletePrivateKey(keyId: Fingerprint)(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPrivateStoreError, Unit] =
EitherTUtil.fromFuture(
EitherT.right(
storage
.updateUnlessShutdown_(deleteKey(keyId), functionFullName),
err => CryptoPrivateStoreError.FailedToDeleteKey(keyId, err.toString),
.updateUnlessShutdown_(deleteKey(keyId), functionFullName)
)
private[crypto] def encrypted(
@ -235,27 +226,23 @@ class DbCryptoPrivateStore(
private[crypto] def getWrapperKeyId()(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPrivateStoreError, Option[String300]] =
EitherTUtil
.fromFuture(
storage.queryUnlessShutdown(
{
EitherT
.right(
storage
.queryUnlessShutdown(
sql"select distinct wrapper_key_id from common_crypto_private_keys"
.as[Option[String300]]
.map(_.toSeq)
},
functionFullName,
),
err => CryptoPrivateStoreError.FailedToGetWrapperKeyId(err.toString),
.map(_.toSeq),
functionFullName,
)
)
.transform {
case Left(err) => Left(err)
case Right(wrapper_keys) =>
if (wrapper_keys.size > 1)
Left(
CryptoPrivateStoreError
.FailedToGetWrapperKeyId("Found more than one distinct wrapper_key_id")
)
else
Right(wrapper_keys.flatten.headOption)
.subflatMap { wrapperKeys =>
if (wrapperKeys.size > 1)
Left(
CryptoPrivateStoreError
.FailedToGetWrapperKeyId("Found more than one distinct wrapper_key_id")
)
else
Right(wrapperKeys.flatten.headOption)
}
}

View File

@ -3,8 +3,7 @@
package com.digitalasset.canton.crypto.store.db
import cats.data.EitherT
import cats.syntax.bifunctor.*
import cats.data.OptionT
import com.daml.nameof.NameOf.functionFullName
import com.digitalasset.canton.config.ProcessingTimeout
import com.digitalasset.canton.crypto.*
@ -12,9 +11,8 @@ import com.digitalasset.canton.crypto.store.*
import com.digitalasset.canton.lifecycle.FutureUnlessShutdown
import com.digitalasset.canton.logging.NamedLoggerFactory
import com.digitalasset.canton.resource.DbStorage.DbAction
import com.digitalasset.canton.resource.{DbStorage, DbStore}
import com.digitalasset.canton.resource.{DbStorage, DbStore, IdempotentInsert}
import com.digitalasset.canton.tracing.TraceContext
import com.digitalasset.canton.util.EitherTUtil
import com.digitalasset.canton.version.ReleaseProtocolVersion
import slick.jdbc.{GetResult, SetParameter}
@ -42,7 +40,7 @@ class DbCryptoPublicStore(
.as[K]
.map(_.toSet)
private def queryKey[K <: PublicKeyWithName: GetResult](
private def queryKeyO[K <: PublicKeyWithName: GetResult](
keyId: Fingerprint,
purpose: KeyPurpose,
): DbAction.ReadOnly[Option[K]] =
@ -50,109 +48,86 @@ class DbCryptoPublicStore(
.as[K]
.headOption
private def insertKeyUpdate[K <: PublicKey: SetParameter, KN <: PublicKeyWithName: GetResult](
key: K,
name: Option[KeyName],
): DbAction.WriteOnly[Int] =
storage.profile match {
case _: DbStorage.Profile.Oracle =>
sqlu"""insert
/*+ IGNORE_ROW_ON_DUPKEY_INDEX ( common_crypto_public_keys ( key_id ) ) */
into common_crypto_public_keys (key_id, purpose, data, name)
values (${key.id}, ${key.purpose}, $key, $name)"""
case _ =>
sqlu"""insert into common_crypto_public_keys (key_id, purpose, data, name)
values (${key.id}, ${key.purpose}, $key, $name)
on conflict do nothing"""
}
private def queryKey[K <: PublicKeyWithName: GetResult](
keyId: Fingerprint,
purpose: KeyPurpose,
): DbAction.ReadOnly[K] =
sql"select data, name from common_crypto_public_keys where key_id = $keyId and purpose = $purpose"
.as[K]
.head
private def insertKey[K <: PublicKey: SetParameter, KN <: PublicKeyWithName: GetResult](
key: K,
name: Option[KeyName],
)(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Unit] =
for {
inserted <- EitherT.right(
storage.updateUnlessShutdown(insertKeyUpdate(key, name), functionFullName)
)
res <-
if (inserted == 0) {
// If no key was inserted by the insert query, check that the existing value matches
storage
.querySingleUnlessShutdown(queryKey(key.id, key.purpose), functionFullName)
.toRight(
CryptoPublicStoreError.FailedToInsertKey(key.id, "No key inserted and no key found")
)
.flatMap { existingKey =>
EitherT
.cond[FutureUnlessShutdown](
existingKey.publicKey == key && existingKey.name == name,
(),
CryptoPublicStoreError.KeyAlreadyExists(key.id, existingKey.name.map(_.unwrap)),
)
.leftWiden[CryptoPublicStoreError]
}
} else EitherT.rightT[FutureUnlessShutdown, CryptoPublicStoreError](())
} yield res
): FutureUnlessShutdown[Unit] = {
storage.queryAndUpdateUnlessShutdown(
IdempotentInsert.insertVerifyingConflicts(
storage,
"common_crypto_public_keys ( key_id )",
sql"common_crypto_public_keys (key_id, purpose, data, name) values (${key.id}, ${key.purpose}, $key, $name)",
queryKey(key.id, key.purpose),
)(
existingKey => existingKey.publicKey == key && existingKey.name == name,
_ => s"Existing public key for ${key.id} is different than inserted key",
),
functionFullName,
)
}
override def readSigningKey(signingKeyId: Fingerprint)(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Option[SigningPublicKeyWithName]] =
EitherTUtil.fromFuture(
storage
.querySingleUnlessShutdown(
queryKey[SigningPublicKeyWithName](signingKeyId, KeyPurpose.Signing),
functionFullName,
)
.value,
err => CryptoPublicStoreError.FailedToReadKey(signingKeyId, err.toString),
)
): OptionT[FutureUnlessShutdown, SigningPublicKeyWithName] =
storage
.querySingleUnlessShutdown(
queryKeyO[SigningPublicKeyWithName](signingKeyId, KeyPurpose.Signing),
functionFullName,
)
override def readEncryptionKey(encryptionKeyId: Fingerprint)(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Option[EncryptionPublicKeyWithName]] =
EitherTUtil.fromFuture(
storage
.querySingleUnlessShutdown(
queryKey[EncryptionPublicKeyWithName](encryptionKeyId, KeyPurpose.Encryption),
functionFullName,
)
.value,
err => CryptoPublicStoreError.FailedToReadKey(encryptionKeyId, err.toString),
)
): OptionT[FutureUnlessShutdown, EncryptionPublicKeyWithName] =
storage
.querySingleUnlessShutdown(
queryKeyO[EncryptionPublicKeyWithName](encryptionKeyId, KeyPurpose.Encryption),
functionFullName,
)
override protected def writeSigningKey(key: SigningPublicKey, name: Option[KeyName])(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Unit] =
): FutureUnlessShutdown[Unit] =
insertKey[SigningPublicKey, SigningPublicKeyWithName](key, name)
override protected def writeEncryptionKey(key: EncryptionPublicKey, name: Option[KeyName])(
implicit traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Unit] =
): FutureUnlessShutdown[Unit] =
insertKey[EncryptionPublicKey, EncryptionPublicKeyWithName](key, name)
override private[store] def listSigningKeys(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Set[SigningPublicKeyWithName]] =
EitherTUtil.fromFuture(
storage.queryUnlessShutdown(
queryKeys[SigningPublicKeyWithName](KeyPurpose.Signing),
functionFullName,
),
err => CryptoPublicStoreError.FailedToListKeys(err.toString),
): FutureUnlessShutdown[Set[SigningPublicKeyWithName]] =
storage.queryUnlessShutdown(
queryKeys[SigningPublicKeyWithName](KeyPurpose.Signing),
functionFullName,
)
override private[store] def listEncryptionKeys(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Set[EncryptionPublicKeyWithName]] =
EitherTUtil
.fromFuture(
storage
.queryUnlessShutdown(
queryKeys[EncryptionPublicKeyWithName](KeyPurpose.Encryption),
functionFullName,
),
err => CryptoPublicStoreError.FailedToListKeys(err.toString),
): FutureUnlessShutdown[Set[EncryptionPublicKeyWithName]] =
storage
.queryUnlessShutdown(
queryKeys[EncryptionPublicKeyWithName](KeyPurpose.Encryption),
functionFullName,
)
override private[crypto] def deleteKey(
keyId: Fingerprint
)(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = {
storage
.updateUnlessShutdown_(
sqlu"delete from common_crypto_public_keys where key_id = $keyId",
functionFullName,
)
}
}

View File

@ -3,76 +3,102 @@
package com.digitalasset.canton.crypto.store.memory
import cats.data.EitherT
import cats.data.OptionT
import cats.syntax.either.*
import com.digitalasset.canton.crypto.store.{CryptoPublicStore, CryptoPublicStoreError}
import com.digitalasset.canton.crypto.{KeyName, *}
import com.digitalasset.canton.discard.Implicits.DiscardOps
import com.digitalasset.canton.lifecycle.FutureUnlessShutdown
import com.digitalasset.canton.logging.pretty.Pretty
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
import com.digitalasset.canton.tracing.TraceContext
import com.digitalasset.canton.util.TrieMapUtil
import com.digitalasset.canton.util.{ErrorUtil, TrieMapUtil}
import scala.collection.concurrent.TrieMap
import scala.concurrent.ExecutionContext
class InMemoryCryptoPublicStore(override implicit val ec: ExecutionContext)
extends CryptoPublicStore {
class InMemoryCryptoPublicStore(override protected val loggerFactory: NamedLoggerFactory)(
override implicit val ec: ExecutionContext
) extends CryptoPublicStore
with NamedLogging {
private val storedSigningKeyMap: TrieMap[Fingerprint, SigningPublicKeyWithName] = TrieMap.empty
private val storedEncryptionKeyMap: TrieMap[Fingerprint, EncryptionPublicKeyWithName] =
TrieMap.empty
private def errorKeyDuplicate[K <: PublicKeyWithName](
private def errorKeyDuplicate[K <: PublicKeyWithName: Pretty](
keyId: Fingerprint,
oldKey: K,
newKey: K,
): CryptoPublicStoreError =
CryptoPublicStoreError.KeyAlreadyExists(keyId, oldKey.name.map(_.unwrap))
CryptoPublicStoreError.KeyAlreadyExists(keyId, oldKey, newKey)
override protected def writeSigningKey(key: SigningPublicKey, name: Option[KeyName])(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Unit] = {
TrieMapUtil
.insertIfAbsent(
storedSigningKeyMap,
key.id,
SigningPublicKeyWithName(key, name),
errorKeyDuplicate[SigningPublicKeyWithName] _,
)
.toEitherT
): FutureUnlessShutdown[Unit] = {
FutureUnlessShutdown.wrap {
TrieMapUtil
.insertIfAbsent(
storedSigningKeyMap,
key.id,
SigningPublicKeyWithName(key, name),
errorKeyDuplicate[SigningPublicKeyWithName] _,
)
.valueOr { err =>
ErrorUtil.invalidState(
s"Existing public key for ${key.id} is different than inserted key: $err"
)
}
}
}
override def readSigningKey(signingKeyId: Fingerprint)(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Option[SigningPublicKeyWithName]] =
EitherT.rightT(storedSigningKeyMap.get(signingKeyId))
): OptionT[FutureUnlessShutdown, SigningPublicKeyWithName] =
OptionT.fromOption(storedSigningKeyMap.get(signingKeyId))
override def readEncryptionKey(encryptionKeyId: Fingerprint)(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Option[EncryptionPublicKeyWithName]] =
EitherT.rightT(storedEncryptionKeyMap.get(encryptionKeyId))
): OptionT[FutureUnlessShutdown, EncryptionPublicKeyWithName] =
OptionT.fromOption(storedEncryptionKeyMap.get(encryptionKeyId))
override protected def writeEncryptionKey(key: EncryptionPublicKey, name: Option[KeyName])(
implicit traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Unit] = {
TrieMapUtil
.insertIfAbsent(
storedEncryptionKeyMap,
key.id,
EncryptionPublicKeyWithName(key, name),
errorKeyDuplicate[EncryptionPublicKeyWithName] _,
)
.toEitherT
): FutureUnlessShutdown[Unit] = {
FutureUnlessShutdown.wrap {
TrieMapUtil
.insertIfAbsent(
storedEncryptionKeyMap,
key.id,
EncryptionPublicKeyWithName(key, name),
errorKeyDuplicate[EncryptionPublicKeyWithName] _,
)
.valueOr { _ =>
ErrorUtil.invalidState(
s"Existing public key for ${key.id} is different than inserted key"
)
}
}
}
override private[store] def listSigningKeys(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Set[SigningPublicKeyWithName]] =
EitherT.rightT(storedSigningKeyMap.values.toSet)
): FutureUnlessShutdown[Set[SigningPublicKeyWithName]] =
FutureUnlessShutdown.pure(storedSigningKeyMap.values.toSet)
override private[store] def listEncryptionKeys(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Set[EncryptionPublicKeyWithName]] =
EitherT.rightT(storedEncryptionKeyMap.values.toSet)
): FutureUnlessShutdown[Set[EncryptionPublicKeyWithName]] =
FutureUnlessShutdown.pure(storedEncryptionKeyMap.values.toSet)
override private[crypto] def deleteKey(
keyId: Fingerprint
)(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = {
storedSigningKeyMap.remove(keyId).discard
storedEncryptionKeyMap.remove(keyId).discard
FutureUnlessShutdown.unit
}
override def close(): Unit = ()
}

View File

@ -55,14 +55,14 @@ object CantonNodeParameters {
) extends CantonNodeParameters.General
}
trait Protocol {
def devVersionSupport: Boolean
def alphaVersionSupport: Boolean
def betaVersionSupport: Boolean
def dontWarnOnDeprecatedPV: Boolean
}
object Protocol {
final case class Impl(
devVersionSupport: Boolean,
alphaVersionSupport: Boolean,
betaVersionSupport: Boolean,
dontWarnOnDeprecatedPV: Boolean,
) extends CantonNodeParameters.Protocol
@ -95,7 +95,7 @@ trait HasProtocolCantonNodeParameters extends CantonNodeParameters.Protocol {
protected def protocol: CantonNodeParameters.Protocol
def devVersionSupport: Boolean = protocol.devVersionSupport
def alphaVersionSupport: Boolean = protocol.alphaVersionSupport
def betaVersionSupport: Boolean = protocol.betaVersionSupport
def dontWarnOnDeprecatedPV: Boolean = protocol.dontWarnOnDeprecatedPV
}

View File

@ -47,6 +47,10 @@ object FutureUnlessShutdown {
Future.successful(x)
)
/** Analog to Future.apply that handles an exception of `x` as a failed future. */
def wrap[A](x: => A)(implicit ec: ExecutionContext): FutureUnlessShutdown[A] =
FutureUnlessShutdown.outcomeF(Future(x))
/** Wraps the result of a [[scala.concurrent.Future]] into an [[UnlessShutdown.Outcome]] */
def outcomeF[A](f: Future[A])(implicit ec: ExecutionContext): FutureUnlessShutdown[A] =
FutureUnlessShutdown(f.map(UnlessShutdown.Outcome(_)))

View File

@ -154,9 +154,11 @@ object EncryptedView {
traceContext: TraceContext,
): EitherT[FutureUnlessShutdown, InvalidEncryptionKey, Unit] =
for {
encryptionKey <- cryptoPublicStore
.findEncryptionKeyIdByFingerprint(keyId)
.leftMap(err => DecryptionError.InvalidEncryptionKey(err.show))
encryptionKey <- EitherT.right(
cryptoPublicStore
.findEncryptionKeyIdByFingerprint(keyId)
.value
)
_ <- encryptionKey match {
case Some(encPubKey) =>
EitherT.cond[FutureUnlessShutdown](

View File

@ -49,14 +49,14 @@ trait DbMigrations { this: NamedLogging =>
* A user that does that, won't be able to upgrade to new Canton versions, as we reserve our right to just
* modify the dev version files in any way we like.
*/
protected def devVersionSupport: Boolean
protected def alphaVersionSupport: Boolean
/** Database is migrated using Flyway, which looks at the migration files at
* src/main/resources/db/migration/canton as explained at https://flywaydb.org/documentation/getstarted/firststeps/api
*/
protected def createFlyway(dataSource: DataSource): Flyway = {
Flyway.configure
.locations(dbConfig.buildMigrationsPaths(devVersionSupport)*)
.locations(dbConfig.buildMigrationsPaths(alphaVersionSupport)*)
.dataSource(dataSource)
.cleanDisabled(!dbConfig.parameters.unsafeCleanOnValidationError)
.cleanOnValidationError(dbConfig.parameters.unsafeCleanOnValidationError)
@ -298,7 +298,7 @@ class CommunityDbMigrationsFactory(loggerFactory: NamedLoggerFactory) extends Db
class CommunityDbMigrations(
protected val dbConfig: DbConfig,
protected val devVersionSupport: Boolean,
protected val alphaVersionSupport: Boolean,
protected val loggerFactory: NamedLoggerFactory,
)(implicit override protected val closeContext: CloseContext)
extends DbMigrations

View File

@ -10,12 +10,12 @@ import com.digitalasset.canton.buildinfo.BuildInfo
import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting}
import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult
import com.digitalasset.canton.version.ProtocolVersion.{
alpha,
beta,
deleted,
deprecated,
stable,
supported,
unstable,
}
import pureconfig.error.FailureReason
import pureconfig.{ConfigReader, ConfigWriter}
@ -38,11 +38,11 @@ import slick.jdbc.{GetResult, PositionedParameters, SetParameter}
*
* How to add a new protocol version `N`:
* - Define a new constant `v<N>` in the [[ProtocolVersion$]] object via
* {{{lazy val v<N>: ProtocolVersionWithStatus[Unstable] = ProtocolVersion.unstable(<N>)}}}
* {{{lazy val v<N>: ProtocolVersionWithStatus[Alpha] = ProtocolVersion.alpha(<N>)}}}
*
* - The new protocol version should be declared as unstable until it is released:
* Define it with type argument [[com.digitalasset.canton.version.ProtocolVersionAnnotation.Unstable]]
* and add it to the list in [[com.digitalasset.canton.version.ProtocolVersion.unstable]].
* - The new protocol version should be declared as alpha until it is released:
* Define it with type argument [[com.digitalasset.canton.version.ProtocolVersionAnnotation.Alpha]]
* and add it to the list in [[com.digitalasset.canton.version.ProtocolVersion.alpha]].
*
* - Add a new test job for the protocol version `N` to the canton_build workflow.
* Make a sensible decision how often it should run.
@ -51,16 +51,16 @@ import slick.jdbc.{GetResult, PositionedParameters, SetParameter}
*
* How to release a protocol version `N`:
* - Switch the type parameter of the protocol version constant `v<N>` from
* [[com.digitalasset.canton.version.ProtocolVersionAnnotation.Unstable]] to [[com.digitalasset.canton.version.ProtocolVersionAnnotation.Stable]]
* [[com.digitalasset.canton.version.ProtocolVersionAnnotation.Alpha]] to [[com.digitalasset.canton.version.ProtocolVersionAnnotation.Stable]]
* As a result, you may have to modify a couple of protobuf definitions and mark them as stable as well.
*
* - Remove `v<N>` from [[com.digitalasset.canton.version.ProtocolVersion.unstable]]
* - Remove `v<N>` from [[com.digitalasset.canton.version.ProtocolVersion.alpha]]
* and add it to [[com.digitalasset.canton.buildinfo.BuildInfo.stableProtocolVersions]].
*
* How to release a protocol version `N` as Beta:
* - Switch the type parameter of the protocol version constant `v<N>` from
* [[com.digitalasset.canton.version.ProtocolVersionAnnotation.Unstable]] to [[com.digitalasset.canton.version.ProtocolVersionAnnotation.Beta]]
* - Remove `v<N>` from [[com.digitalasset.canton.version.ProtocolVersion.unstable]]
* [[com.digitalasset.canton.version.ProtocolVersionAnnotation.Alpha]] to [[com.digitalasset.canton.version.ProtocolVersionAnnotation.Beta]]
* - Remove `v<N>` from [[com.digitalasset.canton.version.ProtocolVersion.alpha]]
* and add it to [[com.digitalasset.canton.buildinfo.BuildInfo.betaProtocolVersions]].
*
* - Check the test jobs for protocol versions:
@ -79,7 +79,7 @@ sealed case class ProtocolVersion private[version] (v: Int)
def isDeprecated: Boolean = deprecated.contains(this)
def isUnstable: Boolean = unstable.contains(this)
def isUnstable: Boolean = alpha.contains(this)
def isBeta: Boolean = beta.contains(this)
@ -110,10 +110,11 @@ object ProtocolVersion {
v: Int
): ProtocolVersionWithStatus[ProtocolVersionAnnotation.Stable] =
createWithStatus[ProtocolVersionAnnotation.Stable](v)
private[version] def createUnstable(
private[version] def createAlpha(
v: Int
): ProtocolVersionWithStatus[ProtocolVersionAnnotation.Unstable] =
createWithStatus[ProtocolVersionAnnotation.Unstable](v)
): ProtocolVersionWithStatus[ProtocolVersionAnnotation.Alpha] =
createWithStatus[ProtocolVersionAnnotation.Alpha](v)
private[version] def createBeta(
v: Int
): ProtocolVersionWithStatus[ProtocolVersionAnnotation.Beta] =
@ -236,31 +237,31 @@ object ProtocolVersion {
ProtocolVersion(30),
)
val unstable: NonEmpty[List[ProtocolVersionWithStatus[ProtocolVersionAnnotation.Unstable]]] =
val alpha: NonEmpty[List[ProtocolVersionWithStatus[ProtocolVersionAnnotation.Alpha]]] =
NonEmpty.mk(List, ProtocolVersion.v31, ProtocolVersion.dev)
val beta: List[ProtocolVersionWithStatus[ProtocolVersionAnnotation.Beta]] =
parseFromBuildInfo(BuildInfo.betaProtocolVersions.toSeq)
.map(pv => ProtocolVersion.createBeta(pv.v))
val supported: NonEmpty[List[ProtocolVersion]] = (unstable ++ beta ++ stable).sorted
val supported: NonEmpty[List[ProtocolVersion]] = (alpha ++ beta ++ stable).sorted
private val allProtocolVersions = deprecated ++ deleted ++ unstable ++ beta ++ stable
private val allProtocolVersions = deprecated ++ deleted ++ alpha ++ beta ++ stable
require(
allProtocolVersions.sizeCompare(allProtocolVersions.distinct) == 0,
s"All the protocol versions should be distinct." +
s"Found: ${Map("deprecated" -> deprecated, "deleted" -> deleted, "unstable" -> unstable, "stable" -> stable)}",
s"Found: ${Map("deprecated" -> deprecated, "deleted" -> deleted, "alpha" -> alpha, "stable" -> stable)}",
)
// TODO(i15561): change back to `stableAndSupported.max1` once there is a stable Daml 3 protocol version
val latest: ProtocolVersion = stable.lastOption.getOrElse(unstable.head1)
val latest: ProtocolVersion = stable.lastOption.getOrElse(alpha.head1)
lazy val dev: ProtocolVersionWithStatus[ProtocolVersionAnnotation.Unstable] =
ProtocolVersion.createUnstable(Int.MaxValue)
lazy val dev: ProtocolVersionWithStatus[ProtocolVersionAnnotation.Alpha] =
ProtocolVersion.createAlpha(Int.MaxValue)
lazy val v31: ProtocolVersionWithStatus[ProtocolVersionAnnotation.Unstable] =
ProtocolVersion.createUnstable(31)
lazy val v31: ProtocolVersionWithStatus[ProtocolVersionAnnotation.Alpha] =
ProtocolVersion.createAlpha(31)
// Minimum stable protocol version introduced
lazy val minimum: ProtocolVersion = v31

View File

@ -1,4 +1,4 @@
sdk-version: 3.1.0-snapshot.20240705.13166.0.v801ce9b3
sdk-version: 3.1.0-snapshot.20240708.13168.0.v7ed18470
build-options:
- --target=2.1
name: CantonExamples

View File

@ -32,7 +32,7 @@ trait LocalNodeParametersConfig {
/** Various cache sizes */
def caching: CachingConfigs
def useUnifiedSequencer: Boolean
def devVersionSupport: Boolean
def alphaVersionSupport: Boolean
def watchdog: Option[WatchdogConfig]
}

View File

@ -4,7 +4,7 @@
package com.digitalasset.canton.config
trait ProtocolConfig {
def devVersionSupport: Boolean
def alphaVersionSupport: Boolean
def betaVersionSupport: Boolean
def dontWarnOnDeprecatedPV: Boolean
}

View File

@ -11,7 +11,7 @@ import com.digitalasset.canton.ProtoDeserializationError.ProtoDeserializationFai
import com.digitalasset.canton.config.CantonRequireTypes.String300
import com.digitalasset.canton.config.ProcessingTimeout
import com.digitalasset.canton.crypto.admin.v30
import com.digitalasset.canton.crypto.store.{CryptoPrivateStoreError, CryptoPublicStoreError}
import com.digitalasset.canton.crypto.store.CryptoPrivateStoreError
import com.digitalasset.canton.crypto.{v30 as cryptoproto, *}
import com.digitalasset.canton.error.BaseCantonError
import com.digitalasset.canton.lifecycle.FutureUnlessShutdown
@ -59,11 +59,7 @@ class GrpcVaultService(
override def listMyKeys(request: v30.ListMyKeysRequest): Future[v30.ListMyKeysResponse] = {
implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext
val result = for {
keys <- crypto.cryptoPublicStore.publicKeysWithName
.leftMap[BaseCantonError] { err =>
CryptoPublicStoreError.ErrorCode
.WrapStr(s"Failed to retrieve public keys: $err")
}
keys <- EitherT.right(crypto.cryptoPublicStore.publicKeysWithName)
publicKeys <-
keys.toList.parFilterA(pk =>
crypto.cryptoPrivateStore
@ -100,7 +96,7 @@ class GrpcVaultService(
implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext
for {
publicKey <-
FutureUnlessShutdown.pure(
FutureUnlessShutdown.wrap(
ProtoConverter
.parse(
cryptoproto.PublicKey.parseFrom,
@ -109,14 +105,12 @@ class GrpcVaultService(
)
.valueOr(err => throw ProtoDeserializationFailure.WrapNoLogging(err).asGrpcError)
)
name <- FutureUnlessShutdown.pure(
name <- FutureUnlessShutdown.wrap(
KeyName
.fromProtoPrimitive(request.name)
.valueOr(err => throw ProtoDeserializationFailure.WrapNoLogging(err).asGrpcError)
)
_ <- crypto.cryptoPublicStore
.storePublicKey(publicKey, name.emptyStringAsNone)
.valueOr(err => throw CryptoPublicStoreError.ErrorCode.Wrap(err).asGrpcError)
_ <- crypto.cryptoPublicStore.storePublicKey(publicKey, name.emptyStringAsNone)
} yield v30.ImportPublicKeyResponse(fingerprint = publicKey.fingerprint.unwrap)
}.failOnShutdownTo(AbortedDueToShutdown.Error().asGrpcError)
@ -125,15 +119,11 @@ class GrpcVaultService(
): Future[v30.ListPublicKeysResponse] = {
implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext
crypto.cryptoPublicStore.publicKeysWithName
.map(keys =>
.map { keys =>
v30.ListPublicKeysResponse(listPublicKeys(request.filters, keys).map(_.toProtoV30))
)
.valueOr(err =>
throw CryptoPublicStoreError.ErrorCode
.WrapStr(s"Failed to retrieve public keys: $err")
.asGrpcError
)
}.failOnShutdownTo(AbortedDueToShutdown.Error().asGrpcError)
}
.failOnShutdownTo(AbortedDueToShutdown.Error().asGrpcError)
}
override def generateSigningKey(
request: v30.GenerateSigningKeyRequest
@ -232,7 +222,7 @@ class GrpcVaultService(
.asRuntimeException()
)
cryptoPrivateStore <-
FutureUnlessShutdown.pure(
FutureUnlessShutdown.wrap(
crypto.cryptoPrivateStore.toExtended.getOrElse(
throw Status.FAILED_PRECONDITION
.withDescription(
@ -242,7 +232,7 @@ class GrpcVaultService(
)
)
fingerprint <-
FutureUnlessShutdown.pure(
FutureUnlessShutdown.wrap(
Fingerprint
.fromProtoPrimitive(request.fingerprint)
.valueOr(err =>
@ -252,7 +242,7 @@ class GrpcVaultService(
)
)
protocolVersion <-
FutureUnlessShutdown.pure(
FutureUnlessShutdown.wrap(
ProtocolVersion
.fromProtoPrimitive(request.protocolVersion)
.valueOr(err =>
@ -276,8 +266,7 @@ class GrpcVaultService(
publicKey <- EitherTUtil.toFutureUnlessShutdown(
crypto.cryptoPublicStore
.publicKey(fingerprint)
.leftMap(_.toString)
.subflatMap(_.toRight(s"no public key found for [$fingerprint]"))
.toRight(s"no public key found for [$fingerprint]")
.leftMap(err =>
Status.FAILED_PRECONDITION
.withDescription(s"Error retrieving public key [$fingerprint] $err")
@ -351,7 +340,7 @@ class GrpcVaultService(
keyPair: CryptoKeyPair[PublicKey, PrivateKey],
)(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] =
for {
cryptoPrivateStore <- FutureUnlessShutdown.pure(
cryptoPrivateStore <- FutureUnlessShutdown.wrap(
crypto.cryptoPrivateStore.toExtended.getOrElse(
throw Status.FAILED_PRECONDITION
.withDescription(
@ -360,20 +349,7 @@ class GrpcVaultService(
.asRuntimeException()
)
)
_ <- crypto.cryptoPublicStore
.storePublicKey(keyPair.publicKey, validatedName)
.recoverWith {
// if the existing key is the same, then ignore error
case error: CryptoPublicStoreError.KeyAlreadyExists =>
for {
existing <- crypto.cryptoPublicStore.publicKey(keyPair.publicKey.fingerprint)
_ <-
if (existing.contains(keyPair.publicKey))
EitherT.rightT[FutureUnlessShutdown, CryptoPublicStoreError](())
else EitherT.leftT[FutureUnlessShutdown, Unit](error: CryptoPublicStoreError)
} yield ()
}
.valueOr(err => throw CryptoPublicStoreError.ErrorCode.Wrap(err).asGrpcError)
_ <- crypto.cryptoPublicStore.storePublicKey(keyPair.publicKey, validatedName)
_ = logger.info(s"Uploading key ${validatedName}")
_ <- cryptoPrivateStore
.storePrivateKey(keyPair.privateKey, validatedName)
@ -381,7 +357,7 @@ class GrpcVaultService(
} yield ()
for {
validatedName <- FutureUnlessShutdown.pure(
validatedName <- FutureUnlessShutdown.wrap(
OptionUtil
.emptyStringAsNone(request.name)
.traverse(KeyName.create)
@ -423,24 +399,25 @@ class GrpcVaultService(
): Future[v30.DeleteKeyPairResponse] = {
implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext
for {
fingerprint <- Future(
fingerprint <- FutureUnlessShutdown.wrap(
Fingerprint
.fromProtoPrimitive(request.fingerprint)
.valueOr(err =>
.valueOr { err =>
throw ProtoDeserializationFailure
.WrapNoLoggingStr(s"Failed to parse key fingerprint: $err")
.asGrpcError
)
}
)
_ <- CantonGrpcUtil.mapErrNewEUS {
crypto.cryptoPrivateStore
.removePrivateKey(fingerprint)
.leftMap(err =>
ProtoDeserializationFailure.WrapNoLoggingStr(s"Failed to remove private key: $err")
)
}
_ <- crypto.cryptoPrivateStore
.removePrivateKey(fingerprint)
.valueOr { err =>
throw Status.FAILED_PRECONDITION
.withDescription(s"Failed to remove private key: $err")
.asRuntimeException()
}
_ <- crypto.cryptoPublicStore.deleteKey(fingerprint)
} yield v30.DeleteKeyPairResponse()
}
}.failOnShutdownTo(AbortedDueToShutdown.Error().asGrpcError)
}
object GrpcVaultService {

View File

@ -4,7 +4,7 @@
package com.digitalasset.canton.environment
import better.files.File
import cats.data.EitherT
import cats.data.{EitherT, OptionT}
import cats.syntax.functorFilter.*
import com.daml.metrics.HealthMetrics
import com.daml.metrics.api.MetricHandle.LabeledMetricsFactory
@ -29,7 +29,7 @@ import com.digitalasset.canton.crypto.*
import com.digitalasset.canton.crypto.admin.grpc.GrpcVaultService.GrpcVaultServiceFactory
import com.digitalasset.canton.crypto.admin.v30.VaultServiceGrpc
import com.digitalasset.canton.crypto.store.CryptoPrivateStore.CryptoPrivateStoreFactory
import com.digitalasset.canton.crypto.store.{CryptoPrivateStoreError, CryptoPublicStoreError}
import com.digitalasset.canton.crypto.store.CryptoPrivateStoreError
import com.digitalasset.canton.data.CantonTimestamp
import com.digitalasset.canton.discard.Implicits.DiscardOps
import com.digitalasset.canton.environment.CantonNodeBootstrap.HealthDumpFunction
@ -697,14 +697,11 @@ abstract class CantonNodeBootstrapImpl[
override protected def autoCompleteStage()
: EitherT[FutureUnlessShutdown, String, Option[Unit]] = {
for {
namespaceKeyO <- crypto.cryptoPublicStore
namespaceKey <- crypto.cryptoPublicStore
.signingKey(nodeId.fingerprint)
.leftMap(_.toString)
namespaceKey <- EitherT.fromEither[FutureUnlessShutdown](
namespaceKeyO.toRight(
.toRight(
s"Performing auto-init but can't find key ${nodeId.fingerprint} from previous step"
)
)
// init topology manager
nsd <- EitherT.fromEither[FutureUnlessShutdown](
NamespaceDelegation.create(
@ -820,46 +817,34 @@ object CantonNodeBootstrapImpl {
private def getKeyByFingerprint[P <: PublicKey](
typ: String,
findPubKeyIdByFingerprint: Fingerprint => EitherT[
FutureUnlessShutdown,
CryptoPublicStoreError,
Option[P],
],
findPubKeyIdByFingerprint: Fingerprint => OptionT[FutureUnlessShutdown, P],
existPrivateKeyByFp: Fingerprint => EitherT[
FutureUnlessShutdown,
CryptoPrivateStoreError,
Boolean,
],
fingerprint: Fingerprint,
)(implicit ec: ExecutionContext): EitherT[FutureUnlessShutdown, String, P] = for {
keyIdO <- findPubKeyIdByFingerprint(fingerprint)
.leftMap(err =>
s"Failure while looking for $typ fingerprint $fingerprint in public store: $err"
)
pubKey <- keyIdO.fold(
EitherT.leftT[FutureUnlessShutdown, P](
s"$typ key with fingerprint $fingerprint does not exist"
)
) { keyWithFingerprint =>
val fingerprint = keyWithFingerprint.fingerprint
existPrivateKeyByFp(fingerprint)
.leftMap(err =>
s"Failure while looking for $typ key $fingerprint in private key store: $err"
)
.transform {
case Right(true) => Right(keyWithFingerprint)
case Right(false) =>
Left(s"Broken private key store: Could not find $typ key $fingerprint")
case Left(err) => Left(err)
}
}
} yield pubKey
)(implicit ec: ExecutionContext): EitherT[FutureUnlessShutdown, String, P] = {
findPubKeyIdByFingerprint(fingerprint)
.toRight(s"$typ key with fingerprint $fingerprint does not exist")
.flatMap { keyWithFingerprint =>
val fingerprint = keyWithFingerprint.fingerprint
existPrivateKeyByFp(fingerprint)
.leftMap(err =>
s"Failure while looking for $typ key $fingerprint in private key store: $err"
)
.transform {
case Right(true) => Right(keyWithFingerprint)
case Right(false) =>
Left(s"Broken private key store: Could not find $typ key $fingerprint")
case Left(err) => Left(err)
}
}
}
private def getOrCreateKey[P <: PublicKey](
typ: String,
findPubKeyIdByName: KeyName => EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Option[
P
]],
findPubKeyIdByName: KeyName => OptionT[FutureUnlessShutdown, P],
generateKey: Option[KeyName] => EitherT[FutureUnlessShutdown, String, P],
existPrivateKeyByFp: Fingerprint => EitherT[
FutureUnlessShutdown,
@ -869,8 +854,7 @@ object CantonNodeBootstrapImpl {
name: String,
)(implicit ec: ExecutionContext): EitherT[FutureUnlessShutdown, String, P] = for {
keyName <- EitherT.fromEither[FutureUnlessShutdown](KeyName.create(name))
keyIdO <- findPubKeyIdByName(keyName)
.leftMap(err => s"Failure while looking for $typ key $name in public store: $err")
keyIdO <- EitherT.right(findPubKeyIdByName(keyName).value)
pubKey <- keyIdO.fold(
generateKey(Some(keyName))
.leftMap(err => s"Failure while generating $typ key for $name: $err")

View File

@ -27,8 +27,8 @@ object ProtocolVersionCompatibility {
release: ReleaseVersion = ReleaseVersion.current,
): NonEmpty[List[ProtocolVersion]] = {
val unstableAndBeta =
if (cantonNodeParameters.devVersionSupport && cantonNodeParameters.nonStandardConfig)
ProtocolVersion.unstable.forgetNE ++ ReleaseVersionToProtocolVersions
if (cantonNodeParameters.alphaVersionSupport && cantonNodeParameters.nonStandardConfig)
ProtocolVersion.alpha.forgetNE ++ ReleaseVersionToProtocolVersions
.getBetaProtocolVersions(release)
else if (cantonNodeParameters.betaVersionSupport)
ReleaseVersionToProtocolVersions.getBetaProtocolVersions(release)
@ -58,7 +58,7 @@ object ProtocolVersionCompatibility {
val unstable =
if (includeUnstableVersions)
ProtocolVersion.unstable.forgetNE
ProtocolVersion.alpha.forgetNE
else List.empty
ReleaseVersionToProtocolVersions.getOrElse(
@ -77,8 +77,8 @@ object ProtocolVersionCompatibility {
release: ReleaseVersion = ReleaseVersion.current,
): NonEmpty[List[ProtocolVersion]] = {
val unstableAndBeta =
if (cantonNodeParameters.devVersionSupport && cantonNodeParameters.nonStandardConfig)
ProtocolVersion.unstable.forgetNE ++ ReleaseVersionToProtocolVersions
if (cantonNodeParameters.alphaVersionSupport && cantonNodeParameters.nonStandardConfig)
ProtocolVersion.alpha.forgetNE ++ ReleaseVersionToProtocolVersions
.getBetaProtocolVersions(release)
else if (cantonNodeParameters.betaVersionSupport)
ReleaseVersionToProtocolVersions.getBetaProtocolVersions(release)
@ -108,7 +108,7 @@ object ProtocolVersionCompatibility {
val unstable =
if (includeUnstableVersions)
ProtocolVersion.unstable.forgetNE
ProtocolVersion.alpha.forgetNE
else List.empty
ReleaseVersionToProtocolVersions.getOrElse(

View File

@ -19,7 +19,7 @@ message VersionedMessageV1 {
}
message VersionedMessageV2 {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
string msg = 1;
int32 iValue = 2;
double dValue = 3;

View File

@ -29,10 +29,10 @@ trait CryptoPublicStoreTest extends BaseTest { this: AsyncWordSpec =>
"save encryption keys correctly when added incrementally" in {
val store = newStore
for {
_ <- store.storeEncryptionKey(encKey1, encKey1WithName.name).valueOrFail("store encKey1")
_ <- store.storeEncryptionKey(encKey2, None).valueOrFail("store encKey2")
result <- store.encryptionKeys.valueOrFail("get encryption keys")
result2 <- store.listEncryptionKeys.valueOrFail("list keys")
_ <- store.storeEncryptionKey(encKey1, encKey1WithName.name)
_ <- store.storeEncryptionKey(encKey2, None)
result <- store.encryptionKeys
result2 <- store.listEncryptionKeys
} yield {
result shouldEqual Set(encKey1, encKey2)
result2 shouldEqual Set(encKey1WithName, encKey2WithName)
@ -44,15 +44,15 @@ trait CryptoPublicStoreTest extends BaseTest { this: AsyncWordSpec =>
val store = newStore
val separateStore = newStore
for {
_ <- store.storeEncryptionKey(encKey1, encKey1WithName.name).valueOrFail("store encKey1")
_ <- store.storeEncryptionKey(encKey2, None).valueOrFail("store encKey2")
result1 <- separateStore.encryptionKey(encKey1.fingerprint).valueOrFail("read encKey1")
result2 <- separateStore.encryptionKey(encKey2.fingerprint).valueOrFail("read encKey2")
_ <- store.storeEncryptionKey(encKey1, encKey1WithName.name)
_ <- store.storeEncryptionKey(encKey2, None)
result1 <- separateStore.encryptionKey(encKey1.fingerprint).value
result2 <- separateStore.encryptionKey(encKey2.fingerprint).value
_ <- store.storeSigningKey(sigKey1, sigKey1WithName.name).valueOrFail("store sigKey1")
_ <- store.storeSigningKey(sigKey2, None).valueOrFail("store sigKey2")
result3 <- separateStore.signingKey(sigKey1.fingerprint).valueOrFail("read sigKey1")
result4 <- separateStore.signingKey(sigKey2.fingerprint).valueOrFail("read sigKey2")
_ <- store.storeSigningKey(sigKey1, sigKey1WithName.name)
_ <- store.storeSigningKey(sigKey2, None)
result3 <- separateStore.signingKey(sigKey1.fingerprint).value
result4 <- separateStore.signingKey(sigKey2.fingerprint).value
} yield {
result1 shouldEqual Some(encKey1)
result2 shouldEqual Some(encKey2)
@ -66,34 +66,46 @@ trait CryptoPublicStoreTest extends BaseTest { this: AsyncWordSpec =>
"save signing keys correctly when added incrementally" in {
val store = newStore
for {
_ <- store.storeSigningKey(sigKey1, sigKey1WithName.name).valueOrFail("store sigKey1")
_ <- store.storeSigningKey(sigKey2, None).valueOrFail("store sigKey2")
result <- store.signingKeys.valueOrFail("list keys")
result2 <- store.listSigningKeys.valueOrFail("list keys")
_ <- store.storeSigningKey(sigKey1, sigKey1WithName.name)
_ <- store.storeSigningKey(sigKey2, None)
result <- store.signingKeys
result2 <- store.listSigningKeys
} yield {
result shouldEqual Set(sigKey1, sigKey2)
result2 shouldEqual Set(sigKey1WithName, sigKey2WithName)
}
}.failOnShutdown
"delete public keys" in {
val store = newStore
for {
_ <- store.storeSigningKey(sigKey1, sigKey1WithName.name)
result1 <- store.signingKeys
_ <- store.deleteKey(sigKey1.id)
result2 <- store.signingKeys
_ <- store.storeSigningKey(sigKey1, None)
} yield {
result1 shouldEqual Set(sigKey1)
result2 shouldEqual Set()
}
}.failOnShutdown
"idempotent store of encryption keys" in {
val store = newStore
for {
_ <- store
.storeEncryptionKey(encKey1, encKey1WithName.name)
.valueOrFail("store key 1 with name")
_ <- store.storeEncryptionKey(encKey1, encKey1WithName.name)
// Should succeed
_ <- store
.storeEncryptionKey(encKey1, encKey1WithName.name)
.valueOrFail("store key 1 with name again")
_ <- store.storeEncryptionKey(encKey1, encKey1WithName.name)
// Should fail due to different name
failedInsert <- store.storeEncryptionKey(encKey1, None).value
_failedInsert <- loggerFactory.assertInternalErrorAsyncUS[IllegalStateException](
store.storeEncryptionKey(encKey1, None),
_.getMessage shouldBe s"Existing public key for ${encKey1.id} is different than inserted key",
)
result <- store.listEncryptionKeys.valueOrFail("listing encryption keys")
result <- store.listEncryptionKeys
} yield {
failedInsert.left.value shouldBe a[CryptoPublicStoreError]
result shouldEqual Set(encKey1WithName)
}
}.failOnShutdown
@ -103,19 +115,21 @@ trait CryptoPublicStoreTest extends BaseTest { this: AsyncWordSpec =>
for {
_ <- store
.storeSigningKey(sigKey1, sigKey1WithName.name)
.valueOrFail("store key 1 with name")
// Should succeed
_ <- store
.storeSigningKey(sigKey1, sigKey1WithName.name)
.valueOrFail("store key 1 with name again")
// Should fail due to different name
failedInsert <- store.storeSigningKey(sigKey1, None).value
_failedInsert <- loggerFactory.assertInternalErrorAsyncUS[IllegalStateException](
store.storeSigningKey(sigKey1, None),
_.getMessage should startWith(
s"Existing public key for ${sigKey1.id} is different than inserted key"
),
)
result <- store.listSigningKeys.valueOrFail("listing encryption keys")
result <- store.listSigningKeys
} yield {
failedInsert.left.value shouldBe a[CryptoPublicStoreError]
result shouldEqual Set(sigKey1WithName)
}
}.failOnShutdown

View File

@ -8,6 +8,9 @@ import org.scalatest.wordspec.AsyncWordSpec
class CryptoPublicStoreTestInMemory extends AsyncWordSpec with CryptoPublicStoreTest {
"InMemoryCryptoPublicStore" should {
behave like cryptoPublicStore(new InMemoryCryptoPublicStore, backedByDatabase = false)
behave like cryptoPublicStore(
new InMemoryCryptoPublicStore(loggerFactory),
backedByDatabase = false,
)
}
}

View File

@ -123,7 +123,7 @@ class HasProtocolVersionedWrapperTest extends AnyWordSpec with BaseTest {
// Used by the compiled string below
val stablePV = ProtocolVersion.createStable(10)
val unstablePV = ProtocolVersion.createUnstable(11)
val alphaPV = ProtocolVersion.createAlpha(11)
def name: String = "message"
@ -139,27 +139,27 @@ class HasProtocolVersionedWrapperTest extends AnyWordSpec with BaseTest {
): Assertion
}
clue("can use a stable proto message in an unstable protocol version") {
clue("can use a stable proto message in an alpha protocol version") {
assertCompiles(
"""
val _ = VersionedProtoConverter(unstablePV)(VersionedMessageV1)(
val _ = VersionedProtoConverter(alphaPV)(VersionedMessageV1)(
supportedProtoVersionMemoized(_)(fromProtoV1),
_.toProtoV1.toByteString
)"""
): Assertion
}
clue("can use an unstable proto message in an unstable protocol version") {
clue("can use an alpha proto message in an alpha protocol version") {
assertCompiles(
"""
val _ = VersionedProtoConverter(unstablePV)(VersionedMessageV2)(
val _ = VersionedProtoConverter(alphaPV)(VersionedMessageV2)(
supportedProtoVersionMemoized(_)(fromProtoV2),
_.toProtoV2.toByteString
)"""
): Assertion
}
clue("can not use an unstable proto message in a stable protocol version") {
clue("can not use an alpha proto message in a stable protocol version") {
assertTypeError(
"""
val _ = VersionedProtoConverter(stablePV)(VersionedMessageV2)(
@ -213,7 +213,7 @@ object HasProtocolVersionedWrapperTest {
protocolVersion 30 31 32 33 34 ...
*/
override val supportedProtoVersions = SupportedProtoVersions(
ProtoVersion(1) -> VersionedProtoConverter(ProtocolVersion.createUnstable((basePV + 2).v))(
ProtoVersion(1) -> VersionedProtoConverter(ProtocolVersion.createAlpha((basePV + 2).v))(
VersionedMessageV1
)(
supportedProtoVersionMemoized(_)(fromProtoV1),
@ -226,9 +226,9 @@ object HasProtocolVersionedWrapperTest {
supportedProtoVersionMemoized(_)(fromProtoV0),
_.toProtoV0.toByteString,
),
// Can use an unstable Protobuf message in an unstable protocol version
// Can use an alpha Protobuf message in an alpha protocol version
ProtoVersion(2) -> VersionedProtoConverter(
ProtocolVersion.createUnstable((basePV + 3).v)
ProtocolVersion.createAlpha((basePV + 3).v)
)(VersionedMessageV2)(
supportedProtoVersionMemoized(_)(fromProtoV2),
_.toProtoV2.toByteString,

View File

@ -1,4 +1,4 @@
sdk-version: 3.1.0-snapshot.20240705.13166.0.v801ce9b3
sdk-version: 3.1.0-snapshot.20240708.13168.0.v7ed18470
build-options:
- --target=2.1
name: ai-analysis

View File

@ -1,4 +1,4 @@
sdk-version: 3.1.0-snapshot.20240705.13166.0.v801ce9b3
sdk-version: 3.1.0-snapshot.20240708.13168.0.v7ed18470
build-options:
- --target=2.1
name: bank

View File

@ -1,4 +1,4 @@
sdk-version: 3.1.0-snapshot.20240705.13166.0.v801ce9b3
sdk-version: 3.1.0-snapshot.20240708.13168.0.v7ed18470
build-options:
- --target=2.1
name: doctor

View File

@ -1,4 +1,4 @@
sdk-version: 3.1.0-snapshot.20240705.13166.0.v801ce9b3
sdk-version: 3.1.0-snapshot.20240708.13168.0.v7ed18470
build-options:
- --target=2.1
name: health-insurance

View File

@ -1,4 +1,4 @@
sdk-version: 3.1.0-snapshot.20240705.13166.0.v801ce9b3
sdk-version: 3.1.0-snapshot.20240708.13168.0.v7ed18470
build-options:
- --target=2.1
name: medical-records

View File

@ -122,7 +122,7 @@ message OnboardingStateResponse {
}
message OnboardingStateForSequencer {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
com.digitalasset.canton.topology.admin.v30.TopologyTransactions topology_snapshot = 1;
com.digitalasset.canton.protocol.v30.StaticDomainParameters static_domain_parameters = 2;

View File

@ -11,6 +11,7 @@ service SequencerBftAdministrationService {
rpc AddPeerEndpoint(AddPeerEndpointRequest) returns (AddPeerEndpointResponse);
rpc RemovePeerEndpoint(RemovePeerEndpointRequest) returns (RemovePeerEndpointResponse);
rpc GetPeerNetworkStatus(GetPeerNetworkStatusRequest) returns (GetPeerNetworkStatusResponse);
rpc GetOrderingTopology(GetOrderingTopologyRequest) returns (GetOrderingTopologyResponse);
}
message AddPeerEndpointRequest {
@ -60,3 +61,12 @@ message GetPeerNetworkStatusRequest {
message GetPeerNetworkStatusResponse {
repeated PeerEndpointStatus statuses = 1;
}
message GetOrderingTopologyRequest {}
message GetOrderingTopologyResponse {
// The current epoch, through which the topology is valid.
int64 current_epoch = 1;
// The sequencer IDs of the active BFT ordering nodes in the network.
repeated string sequencer_ids = 2;
}

View File

@ -12,7 +12,7 @@ import "google/protobuf/wrappers.proto";
import "scalapb/scalapb.proto";
message SequencerSnapshot {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.AlphaProtoVersion";
int64 latest_timestamp = 1; // in microseconds of UTC time since Unix epoch
uint64 last_block_height = 2;

View File

@ -36,7 +36,7 @@ final case class DomainParametersConfig(
requiredHashAlgorithms: Option[NonEmpty[Set[HashAlgorithm]]] = None,
requiredCryptoKeyFormats: Option[NonEmpty[Set[CryptoKeyFormat]]] = None,
// TODO(i15561): Revert back to `false` once there is a stable Daml 3 protocol version
override val devVersionSupport: Boolean = true,
override val alphaVersionSupport: Boolean = true,
override val betaVersionSupport: Boolean = false,
override val dontWarnOnDeprecatedPV: Boolean = false,
) extends ProtocolConfig
@ -48,7 +48,8 @@ final case class DomainParametersConfig(
param("requiredSymmetricKeySchemes", _.requiredSymmetricKeySchemes),
param("requiredHashAlgorithms", _.requiredHashAlgorithms),
param("requiredCryptoKeyFormats", _.requiredCryptoKeyFormats),
param("devVersionSupport", _.devVersionSupport),
param("alphaVersionSupport", _.alphaVersionSupport),
param("betaVersionSupport", _.betaVersionSupport),
param("dontWarnOnDeprecatedPV", _.dontWarnOnDeprecatedPV),
)

View File

@ -90,7 +90,7 @@ abstract class MediatorNodeConfigCommon(
*/
final case class MediatorNodeParameterConfig(
// TODO(i15561): Revert back to `false` once there is a stable Daml 3 protocol version
override val devVersionSupport: Boolean = true,
override val alphaVersionSupport: Boolean = true,
override val betaVersionSupport: Boolean = false,
override val dontWarnOnDeprecatedPV: Boolean = false,
override val batching: BatchingConfig = BatchingConfig(),
@ -488,7 +488,7 @@ class MediatorNodeBootstrap(
timeouts = timeouts,
traceContextPropagation = parameters.tracing.propagation,
clientProtocolVersions =
if (parameterConfig.devVersionSupport) ProtocolVersion.supported
if (parameterConfig.alphaVersionSupport) ProtocolVersion.supported
else
// TODO(#15561) Remove NonEmpty construct once stableAndSupported is NonEmpty again
NonEmpty

View File

@ -125,7 +125,7 @@ class BftOrderingMetrics(
val batchesOrdered: Meter = openTelemetryMetricsFactory.meter(
MetricInfo(
prefix :+ s"ordered-batches",
prefix :+ "ordered-batches",
summary = "Batches ordered",
description = "Measures the total batches ordered.",
qualification = MetricQualification.Traffic,
@ -134,7 +134,7 @@ class BftOrderingMetrics(
val blocksOrdered: Meter = openTelemetryMetricsFactory.meter(
MetricInfo(
prefix :+ s"ordered-blocks",
prefix :+ "ordered-blocks",
summary = "Blocks ordered",
description = "Measures the total blocks ordered.",
qualification = MetricQualification.Traffic,

View File

@ -14,13 +14,13 @@ import com.digitalasset.canton.config.{
/** Various parameters for non-standard sequencer settings
*
* @param devVersionSupport if true, then dev version will be turned on, but we will brick this sequencer node if it is used for production.
* @param alphaVersionSupport if true, then dev version will be turned on, but we will brick this sequencer node if it is used for production.
* @param dontWarnOnDeprecatedPV if true, then this sequencer will not emit a warning when configured to use protocol version 2.0.0.
* @param maxConfirmationRequestsBurstFactor how forgiving the rate limit is in case of bursts (so rate limit starts after observing an initial burst of factor * max_rate commands)
*/
final case class SequencerNodeParameterConfig(
// TODO(i15561): Revert back to `false` once there is a stable Daml 3 protocol version
override val devVersionSupport: Boolean = true,
override val alphaVersionSupport: Boolean = true,
override val betaVersionSupport: Boolean = false,
override val dontWarnOnDeprecatedPV: Boolean = false,
maxConfirmationRequestsBurstFactor: PositiveDouble = PositiveDouble.tryCreate(0.5),

View File

@ -7,12 +7,14 @@ import cats.implicits.*
import com.digitalasset.canton.config.RequireTypes.Port
import com.digitalasset.canton.networking.Endpoint
import com.digitalasset.canton.sequencer.admin.v30.{
GetOrderingTopologyResponse,
GetPeerNetworkStatusResponse,
PeerEndpoint,
PeerEndpointHealth as ProtoPeerEndpointHealth,
PeerEndpointHealthStatus as ProtoPeerEndpointHealthStatus,
PeerEndpointStatus as ProtoPeerEndpointStatus,
}
import com.digitalasset.canton.topology.SequencerId
object EnterpriseSequencerBftAdminData {
@ -89,4 +91,24 @@ object EnterpriseSequencerBftAdminData {
.sequence
.map(PeerNetworkStatus(_))
}
final case class OrderingTopology(currentEpoch: Long, sequencerIds: Seq[SequencerId]) {
def toProto: GetOrderingTopologyResponse =
GetOrderingTopologyResponse.of(currentEpoch, sequencerIds.map(_.toProtoPrimitive))
}
object OrderingTopology {
def fromProto(response: GetOrderingTopologyResponse): Either[String, OrderingTopology] =
response.sequencerIds
.map { sequencerIdString =>
for {
sequencerId <- SequencerId
.fromProtoPrimitive(sequencerIdString, "sequencerId")
.leftMap(_.toString)
} yield sequencerId
}
.sequence
.map(OrderingTopology(response.currentEpoch, _))
}
}

View File

@ -7,7 +7,6 @@ import cats.Order.*
import cats.data.EitherT
import cats.kernel.Order
import cats.syntax.either.*
import cats.syntax.functor.*
import cats.syntax.parallel.*
import cats.{Functor, Show}
import com.daml.nonempty.NonEmpty
@ -29,7 +28,7 @@ import com.digitalasset.canton.tracing.{HasTraceContext, TraceContext, Traced}
import com.digitalasset.canton.util.EitherTUtil.condUnitET
import com.digitalasset.canton.util.FutureInstances.*
import com.digitalasset.canton.util.ShowUtil.*
import com.digitalasset.canton.util.retry
import com.digitalasset.canton.util.{ErrorUtil, retry}
import com.digitalasset.canton.version.ProtocolVersion
import com.digitalasset.canton.{ProtoDeserializationError, SequencerCounter}
import com.google.common.annotations.VisibleForTesting
@ -668,14 +667,15 @@ trait SequencerStore extends SequencerMemberValidator with NamedLogging with Aut
def saveRecentCheckpoints(): Future[Unit] = for {
checkpoints <- checkpointsAtTimestamp(requestedTimestamp.immediatePredecessor)
_ <- checkpoints.toList.parTraverse { case (member, checkpoint) =>
lookupMember(member).map {
case Some(RegisteredMember(memberId, _)) =>
saveCounterCheckpoint(
memberId,
checkpoint,
).value
case _ => Right(())
}
for {
memberId <- lookupMember(member).map(
_.fold(ErrorUtil.invalidState(s"Member $member should be registered"))(_.memberId)
)
_ <- saveCounterCheckpoint(
memberId,
checkpoint,
).value
} yield ()
}
} yield ()

View File

@ -17,13 +17,7 @@ import com.digitalasset.canton.lifecycle.{FlagCloseable, HasCloseContext}
import com.digitalasset.canton.sequencing.protocol.{MessageId, SequencerErrors}
import com.digitalasset.canton.store.db.DbTest
import com.digitalasset.canton.time.NonNegativeFiniteDuration
import com.digitalasset.canton.topology.{
DefaultTestIdentities,
Member,
ParticipantId,
SequencerId,
UniqueIdentifier,
}
import com.digitalasset.canton.topology.{DefaultTestIdentities, Member, ParticipantId}
import com.digitalasset.canton.util.FutureInstances.*
import com.digitalasset.canton.{BaseTest, ProtocolVersionChecksAsyncWordSpec, SequencerCounter}
import com.google.protobuf.ByteString
@ -43,9 +37,7 @@ trait SequencerStoreTest
with FlagCloseable
with ProtocolVersionChecksAsyncWordSpec {
lazy val sequencerMember: Member = SequencerId(
UniqueIdentifier.tryFromProtoPrimitive("sequencer::namespace")
)
lazy val sequencerMember: Member = DefaultTestIdentities.sequencerId
def sequencerStore(mk: () => SequencerStore): Unit = {

View File

@ -13,5 +13,5 @@ object TestProtocolVersions {
/** A valid, supported protocol version that is not part of the released protocol versions.
*/
val UnreleasedValidPV: ProtocolVersion = ProtocolVersion.unstable.head
val UnreleasedValidPV: ProtocolVersion = ProtocolVersion.alpha.head
}

View File

@ -4,13 +4,13 @@
package com.digitalasset.canton.platform.apiserver.services.admin
import cats.data.EitherT
import com.digitalasset.daml.lf.archive.DamlLf.Archive
import com.daml.error.DamlError
import com.daml.logging.entries.LoggingValue.OfString
import com.digitalasset.canton.ledger.error.PackageServiceErrors.{InternalError, Validation}
import com.digitalasset.canton.logging.LoggingContextWithTrace.implicitExtractTraceContext
import com.digitalasset.canton.logging.{LoggingContextWithTrace, NamedLoggerFactory, NamedLogging}
import com.digitalasset.canton.platform.apiserver.services.admin.ApiPackageManagementService.ErrorValidations
import com.digitalasset.daml.lf.archive.DamlLf.Archive
import com.digitalasset.daml.lf.archive.Decode
import com.digitalasset.daml.lf.data.Ref
import com.digitalasset.daml.lf.language.Ast

View File

@ -1,4 +1,4 @@
sdk-version: 3.1.0-snapshot.20240705.13166.0.v801ce9b3
sdk-version: 3.1.0-snapshot.20240708.13168.0.v7ed18470
build-options:
- --enable-interfaces=yes
name: carbonv1-tests

View File

@ -1,4 +1,4 @@
sdk-version: 3.1.0-snapshot.20240705.13166.0.v801ce9b3
sdk-version: 3.1.0-snapshot.20240708.13168.0.v7ed18470
build-options:
- --enable-interfaces=yes
name: carbonv2-tests

View File

@ -1,4 +1,4 @@
sdk-version: 3.1.0-snapshot.20240705.13166.0.v801ce9b3
sdk-version: 3.1.0-snapshot.20240708.13168.0.v7ed18470
name: experimental-tests
source: .
version: 3.1.0

View File

@ -1,4 +1,4 @@
sdk-version: 3.1.0-snapshot.20240705.13166.0.v801ce9b3
sdk-version: 3.1.0-snapshot.20240708.13168.0.v7ed18470
build-options:
- --enable-interfaces=yes
name: model-tests

View File

@ -1,4 +1,4 @@
sdk-version: 3.1.0-snapshot.20240705.13166.0.v801ce9b3
sdk-version: 3.1.0-snapshot.20240708.13168.0.v7ed18470
name: package-management-tests
source: .
version: 3.1.0

View File

@ -1,4 +1,4 @@
sdk-version: 3.1.0-snapshot.20240705.13166.0.v801ce9b3
sdk-version: 3.1.0-snapshot.20240708.13168.0.v7ed18470
build-options:
- --enable-interfaces=yes
name: semantic-tests

View File

@ -1,4 +1,4 @@
sdk-version: 3.1.0-snapshot.20240705.13166.0.v801ce9b3
sdk-version: 3.1.0-snapshot.20240708.13168.0.v7ed18470
name: upgrade-tests
source: .
version: 1.0.0

View File

@ -1,4 +1,4 @@
sdk-version: 3.1.0-snapshot.20240705.13166.0.v801ce9b3
sdk-version: 3.1.0-snapshot.20240708.13168.0.v7ed18470
name: upgrade-tests
source: .
version: 2.0.0

View File

@ -1,4 +1,4 @@
sdk-version: 3.1.0-snapshot.20240705.13166.0.v801ce9b3
sdk-version: 3.1.0-snapshot.20240708.13168.0.v7ed18470
name: upgrade-tests
source: .
version: 3.0.0

View File

@ -1,4 +1,4 @@
sdk-version: 3.1.0-snapshot.20240705.13166.0.v801ce9b3
sdk-version: 3.1.0-snapshot.20240708.13168.0.v7ed18470
build-options:
- --target=2.1
name: JsonEncodingTest

View File

@ -1,4 +1,4 @@
sdk-version: 3.1.0-snapshot.20240705.13166.0.v801ce9b3
sdk-version: 3.1.0-snapshot.20240708.13168.0.v7ed18470
build-options:
- --target=2.dev
name: JsonEncodingTestDev

View File

@ -1,4 +1,4 @@
sdk-version: 3.1.0-snapshot.20240705.13166.0.v801ce9b3
sdk-version: 3.1.0-snapshot.20240708.13168.0.v7ed18470
build-options:
- --target=2.1
name: AdminWorkflows

View File

@ -882,7 +882,7 @@ object ParticipantNodeBootstrap {
override protected def createEngine(arguments: Arguments): Engine =
DAMLe.newEngine(
enableLfDev = arguments.parameterConfig.devVersionSupport,
enableLfDev = arguments.parameterConfig.alphaVersionSupport,
enableLfBeta = arguments.parameterConfig.betaVersionSupport,
enableStackTraces = arguments.parameterConfig.engine.enableEngineStackTraces,
iterationsBetweenInterruptions =

View File

@ -45,7 +45,7 @@ final case class ParticipantNodeParameters(
) extends CantonNodeParameters
with HasGeneralCantonNodeParameters {
override def dontWarnOnDeprecatedPV: Boolean = protocolConfig.dontWarnOnDeprecatedPV
override def devVersionSupport: Boolean = protocolConfig.devVersionSupport
override def alphaVersionSupport: Boolean = protocolConfig.alphaVersionSupport
override def betaVersionSupport: Boolean = protocolConfig.betaVersionSupport
}
@ -82,7 +82,7 @@ object ParticipantNodeParameters {
protocolConfig = ParticipantProtocolConfig(
Some(testedProtocolVersion),
// TODO(i15561): Revert back to `false` once there is a stable Daml 3 protocol version
devVersionSupport = true,
alphaVersionSupport = true,
betaVersionSupport = true,
dontWarnOnDeprecatedPV = false,
),

View File

@ -95,7 +95,7 @@ object PartyNotificationConfig {
final case class ParticipantProtocolConfig(
minimumProtocolVersion: Option[ProtocolVersion],
override val devVersionSupport: Boolean,
override val alphaVersionSupport: Boolean,
override val betaVersionSupport: Boolean,
override val dontWarnOnDeprecatedPV: Boolean,
) extends ProtocolConfig
@ -331,7 +331,7 @@ object TestingTimeServiceConfig {
* Setting to zero will disable reusing recent time proofs and will instead always fetch a new proof.
* @param minimumProtocolVersion The minimum protocol version that this participant will speak when connecting to a domain
* @param initialProtocolVersion The initial protocol version used by the participant (default latest), e.g., used to create the initial topology transactions.
* @param devVersionSupport If set to true, will allow the participant to connect to a domain with dev protocol version and will turn on unsafe Daml LF versions.
* @param alphaVersionSupport If set to true, will allow the participant to connect to a domain with dev protocol version and will turn on unsafe Daml LF versions.
* @param dontWarnOnDeprecatedPV If true, then this participant will not emit a warning when connecting to a sequencer using a deprecated protocol version (such as 2.0.0).
* @param warnIfOverloadedFor If all incoming commands have been rejected due to PARTICIPANT_BACKPRESSURE during this interval, the participant will log a warning.
* @param excludeInfrastructureTransactions If set, infrastructure transactions (i.e. ping, bong and dar distribution) will be excluded from participant metering.
@ -357,7 +357,7 @@ final case class ParticipantNodeParameterConfig(
ProtocolVersion.latest
),
// TODO(i15561): Revert back to `false` once there is a stable Daml 3 protocol version
devVersionSupport: Boolean = true,
alphaVersionSupport: Boolean = true,
BetaVersionSupport: Boolean = false,
dontWarnOnDeprecatedPV: Boolean = false,
warnIfOverloadedFor: Option[config.NonNegativeFiniteDuration] = Some(

View File

@ -41,7 +41,7 @@ object MockedNodeParameters {
override def loggingConfig: LoggingConfig = _loggingConfig
override def devVersionSupport: Boolean = ???
override def alphaVersionSupport: Boolean = ???
override def betaVersionSupport: Boolean = ???

View File

@ -3,7 +3,7 @@
package com.digitalasset.canton.crypto.provider.symbolic
import cats.data.EitherT
import cats.data.{EitherT, OptionT}
import com.digitalasset.canton.concurrent.DirectExecutionContext
import com.digitalasset.canton.config.ProcessingTimeout
import com.digitalasset.canton.crypto.*
@ -37,27 +37,34 @@ class SymbolicCrypto(
loggerFactory,
) {
private def process[E, A](
private def processE[E, A](
description: String
)(fn: TraceContext => EitherT[FutureUnlessShutdown, E, A]): A = {
)(fn: TraceContext => EitherT[FutureUnlessShutdown, E, A]): A =
process(description)(fn(_).valueOr(err => sys.error(s"Failed operation $description: $err")))
private def processO[A](
description: String
)(fn: TraceContext => OptionT[FutureUnlessShutdown, A]): Option[A] =
process(description)(fn(_).value)
private def process[A](description: String)(fn: TraceContext => FutureUnlessShutdown[A]): A = {
TraceContext.withNewTraceContext { implicit traceContext =>
timeouts.default.await(description) {
fn(traceContext)
.valueOr(err => sys.error(s"Failed operation $description: $err"))
.onShutdown(sys.error("aborted due to shutdown"))
}
}
}
def getOrGenerateSymbolicSigningKey(name: String): SigningPublicKey = {
process("get or generate symbolic signing key") { implicit traceContext =>
processO("get or generate symbolic signing key") { implicit traceContext =>
cryptoPublicStore
.findSigningKeyIdByName(KeyName.tryCreate(name))
}.getOrElse(generateSymbolicSigningKey(Some(name)))
}
def getOrGenerateSymbolicEncryptionKey(name: String): EncryptionPublicKey = {
process("get or generate symbolic encryption key") { implicit traceContext =>
processO("get or generate symbolic encryption key") { implicit traceContext =>
cryptoPublicStore
.findEncryptionKeyIdByName(KeyName.tryCreate(name))
}.getOrElse(generateSymbolicEncryptionKey(Some(name)))
@ -67,7 +74,7 @@ class SymbolicCrypto(
def generateSymbolicSigningKey(
name: Option[String] = None
): SigningPublicKey = {
process("generate symbolic signing key") { implicit traceContext =>
processE("generate symbolic signing key") { implicit traceContext =>
// We don't care about the signing key scheme in symbolic crypto
generateSigningKey(SigningKeyScheme.Ed25519, name.map(KeyName.tryCreate))
}
@ -75,7 +82,7 @@ class SymbolicCrypto(
/** Generates a new symbolic signing keypair but does not store it in the public store */
def newSymbolicSigningKeyPair(): SigningKeyPair = {
process("generate symbolic signing keypair") { implicit traceContext =>
processE("generate symbolic signing keypair") { implicit traceContext =>
// We don't care about the signing key scheme in symbolic crypto
privateCrypto
.generateSigningKeypair(SigningKeyScheme.Ed25519)
@ -85,7 +92,7 @@ class SymbolicCrypto(
def generateSymbolicEncryptionKey(
name: Option[String] = None
): EncryptionPublicKey =
process("generate symbolic encryption key") { implicit traceContext =>
processE("generate symbolic encryption key") { implicit traceContext =>
// We don't care about the encryption key scheme in symbolic crypto
generateEncryptionKey(
EncryptionKeyScheme.EciesP256HkdfHmacSha256Aes128Gcm,
@ -94,7 +101,7 @@ class SymbolicCrypto(
}
def newSymbolicEncryptionKeyPair(): EncryptionKeyPair = {
process("generate symbolic encryption keypair") { implicit traceContext =>
processE("generate symbolic encryption keypair") { implicit traceContext =>
// We don't care about the encryption key scheme in symbolic crypto
privateCrypto
.generateEncryptionKeypair(EncryptionKeyScheme.EciesP256HkdfHmacSha256Aes128Gcm)
@ -102,7 +109,7 @@ class SymbolicCrypto(
}
def sign(hash: Hash, signingKeyId: Fingerprint): Signature =
process("symbolic signing") { implicit traceContext =>
processE("symbolic signing") { implicit traceContext =>
privateCrypto.sign(hash, signingKeyId)
}
@ -128,7 +135,7 @@ object SymbolicCrypto {
DirectExecutionContext(loggerFactory.getLogger(this.getClass))
val pureCrypto = new SymbolicPureCrypto()
val cryptoPublicStore = new InMemoryCryptoPublicStore
val cryptoPublicStore = new InMemoryCryptoPublicStore(loggerFactory)
val cryptoPrivateStore = new InMemoryCryptoPrivateStore(releaseProtocolVersion, loggerFactory)
val privateCrypto = new SymbolicPrivateCrypto(pureCrypto, cryptoPrivateStore)

View File

@ -7,6 +7,7 @@ import cats.data.{EitherT, OptionT}
import com.digitalasset.canton.BaseTest
import com.digitalasset.canton.concurrent.{DirectExecutionContext, ExecutionContextMonitor}
import com.digitalasset.canton.discard.Implicits.DiscardOps
import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, UnlessShutdown}
import com.digitalasset.canton.logging.SuppressingLogger.LogEntryOptionality
import com.digitalasset.canton.util.ErrorUtil
import com.digitalasset.canton.util.Thereafter.syntax.*
@ -195,6 +196,20 @@ class SuppressingLogger private[logging] (
checkLogsInternalError(assertion),
)
def assertInternalErrorAsyncUS[T <: Throwable](
within: => FutureUnlessShutdown[_],
assertion: T => Assertion,
)(implicit c: ClassTag[T], pos: source.Position): FutureUnlessShutdown[Assertion] =
assertLogs(
within.transform {
case Success(_) =>
fail(s"An exception of type $c was expected, but no exception was thrown.")
case Failure(c(t)) => Success(UnlessShutdown.Outcome(assertion(t)))
case Failure(t) => fail(s"Exception has wrong type. Expected type: $c.", t)
}(directExecutionContext),
checkLogsInternalError(assertion),
)
/** Asserts that the sequence of logged warnings/errors meets a given sequence of assertions.
* Use this if the expected sequence of logged warnings/errors is deterministic.
*

View File

@ -1 +1 @@
20240708.13628.v22bd0def
20240709.13636.vd03d4972

View File

@ -1,7 +1,7 @@
canton {
parameters {
non-standard-config = yes
dev-version-support = yes
alpha-version-support = yes
}
participants {
build-and-lint-test {
@ -10,7 +10,7 @@ canton {
# This test is run in exclusive mode to avoid clashes.
ledger-api.port = 5011
admin-api.port = 5012
parameters.dev-version-support = yes
parameters.alpha-version-support = yes
http-ledger-api-experimental {
allow-insecure-tokens = true
server {

View File

@ -185,9 +185,9 @@ getCantonConfig conf@SandboxConfig{..} portFile mCerts (ledgerPort, adminPort, s
[ "type" Aeson..= ("sim-clock" :: T.Text) ]
| Static <- [timeMode] ]
-- TODO(https://github.com/DACH-NY/canton/issues/16458): once ProtocolVersion.latest
-- is stable, revert dev-version-support and non-standard-config to
-- is stable, revert alpha-version-support and non-standard-config to
-- devVersionSupport here and below.
, [ "dev-version-support" Aeson..= True]
, [ "alpha-version-support" Aeson..= True]
, [ "non-standard-config" Aeson..= True]
] )
, "participants" Aeson..= Aeson.object
@ -207,7 +207,7 @@ getCantonConfig conf@SandboxConfig{..} portFile mCerts (ledgerPort, adminPort, s
] ]
| Just secret <- [mbSharedSecret] ]
)
, "parameters" Aeson..= Aeson.object [ "dev-version-support" Aeson..= True ]
, "parameters" Aeson..= Aeson.object [ "alpha-version-support" Aeson..= True ]
] <>
[ "testing-time" Aeson..= Aeson.object [ "type" Aeson..= ("monotonic-time" :: T.Text) ]
| Static <- [timeMode]
@ -223,13 +223,13 @@ getCantonConfig conf@SandboxConfig{..} portFile mCerts (ledgerPort, adminPort, s
, storage
, "public-api" Aeson..= port sequencerPublicPort
, "admin-api" Aeson..= port sequencerAdminPort
, "parameters" Aeson..= Aeson.object [ "dev-version-support" Aeson..= True ]
, "parameters" Aeson..= Aeson.object [ "alpha-version-support" Aeson..= True ]
]
]
, "mediators" Aeson..= Aeson.object
[ "mediator1" Aeson..= Aeson.object
[ "admin-api" Aeson..= port mediatorAdminPort
, "parameters" Aeson..= Aeson.object [ "dev-version-support" Aeson..= True ]
, "parameters" Aeson..= Aeson.object [ "alpha-version-support" Aeson..= True ]
]
]
]

View File

@ -89,7 +89,7 @@ object CantonRunner {
val (adminPort, ledgerApiPort) = ports(i)
val participantId = config.participantIds(i)
// TODO(https://github.com/DACH-NY/canton/issues/16458): once ProtocolVersion.latest
// is stable, revert dev-version-support and non-standard-config to
// is stable, revert alpha-version-support and non-standard-config to
// devMode here and below.
s"""${participantId} {
| admin-api.port = ${adminPort.port}
@ -102,7 +102,7 @@ object CantonRunner {
| storage.type = memory
| parameters = {
| engine.enable-engine-stack-traces = true
| dev-version-support = yes
| alpha-version-support = yes
| disable-upgrade-validation = ${config.disableUpgradeValidation}
| }
| ${timeType.fold("")(x => "testing-time.type = " + x)}
@ -114,7 +114,7 @@ object CantonRunner {
s"""canton {
| parameters {
| non-standard-config = yes
| dev-version-support = yes
| alpha-version-support = yes
| ports-file = ${toJson(files.portsFile)}
| ${clockType.fold("")(x => "clock.type = " + x)}
| }