fix build_canton_3x_with_bazel (#18081)

* fix build_canton_3x_with_bazel

* Code pull

* fixing issues with admin-api new project

* fix the way we were passing java_conversions to scalapb plugin

* Enable `proto_gen` to correctly generate Java conversions when using the `scalapb` plugin (#18087)

* Enable `proto_gen` to correctly generate Java conversions when using the `scalapb` plugin

* Remove reference to removed parameter

---------

Co-authored-by: Rafael Guglielmetti <rafael.guglielmetti@digitalasset.com>
Co-authored-by: Stefano Baghino <43749967+stefanobaghino-da@users.noreply.github.com>
This commit is contained in:
Meriam Lachkar 2024-01-05 12:10:14 +01:00 committed by GitHub
parent 0b5c4b2a08
commit de9ce628d3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
271 changed files with 7681 additions and 2882 deletions

View File

@ -51,7 +51,8 @@ def _proto_gen_impl(ctx):
descriptors = [depset for src in ctx.attr.srcs for depset in src[ProtoInfo].transitive_descriptor_sets.to_list()]
args = [
"--descriptor_set_in=" + descriptor_set_delim.join([depset.path for depset in descriptors]),
"--{}_out={}:{}".format(ctx.attr.plugin_name, ",".join(ctx.attr.plugin_options), sources_out.path),
"--{}_out={}".format(ctx.attr.plugin_name, sources_out.path),
"--{}_opt={}".format(ctx.attr.plugin_name, ",".join(ctx.attr.plugin_options)),
]
plugins = []
plugin_runfiles = []
@ -179,7 +180,7 @@ def _proto_scala_srcs(name, grpc):
"@com_github_grpc_grpc//src/proto/grpc/health/v1:health_proto_descriptor",
] if grpc else [])
def _proto_scala_deps(grpc, proto_deps):
def _proto_scala_deps(grpc, proto_deps, java_conversions):
return [
"@maven//:com_google_api_grpc_proto_google_common_protos",
"@maven//:com_google_protobuf_protobuf_java",
@ -194,7 +195,9 @@ def _proto_scala_deps(grpc, proto_deps):
] if grpc else []) + [
"%s_scala" % label
for label in proto_deps
]
] + ([
"@maven//:io_grpc_grpc_services",
] if java_conversions else [])
def proto_jars(
name,
@ -289,10 +292,10 @@ def proto_jars(
srcs = _proto_scala_srcs(name, grpc),
plugin_exec = "//scala-protoc-plugins/scalapb:protoc-gen-scalapb",
plugin_name = "scalapb",
plugin_options = ["grpc"] if grpc else [],
plugin_options = (["grpc"] if grpc else []) + (["java_conversions"] if java_conversions else []),
)
all_scala_deps = _proto_scala_deps(grpc, proto_deps)
all_scala_deps = _proto_scala_deps(grpc, proto_deps, java_conversions)
scala_library(
name = "%s_scala" % name,

View File

@ -280,6 +280,42 @@ scala_library(
],
)
### community/admin-api
proto_library(
name = "community_admin-api_proto",
srcs = glob(["community/admin-api/src/main/protobuf/**/*.proto"]),
strip_import_prefix = "community/admin-api/src/main/protobuf",
deps = [
"@com_google_protobuf//:duration_proto",
"@com_google_protobuf//:empty_proto",
"@com_google_protobuf//:timestamp_proto",
"@com_google_protobuf//:wrappers_proto",
"@go_googleapis//google/rpc:status_proto",
"@scalapb//:scalapb_proto",
],
)
proto_gen(
name = "community_admin-api_proto_scala",
srcs = [":community_admin-api_proto"],
plugin_exec = "//scala-protoc-plugins/scalapb:protoc-gen-scalapb",
plugin_name = "scalapb",
plugin_options = [
"flat_package",
"grpc",
],
)
scala_library(
name = "community_admin-api",
srcs = glob(["community/admin-api/src/main/protobuf/com/digitalasset/canton/version/ProtocolVersionAnnotation.scala"]),
scalacopts = [
"-Xsource:3",
"-language:postfixOps",
],
)
### community/base ###
proto_library(
@ -309,7 +345,7 @@ proto_gen(
scala_library(
name = "community_base",
srcs = glob(["community/base/src/main/scala/**/*.scala"]) + [":community_base_proto_scala"],
srcs = glob(["community/base/src/main/scala/**/*.scala"]) + [":community_base_proto_scala"] + [":community_admin-api_proto_scala"],
plugins = [kind_projector_plugin],
resource_strip_prefix = "canton-3x/community/base/src/main/resources",
resources = glob(["community/base/src/main/resources/**"]),
@ -324,6 +360,7 @@ scala_library(
],
deps = [
":bindings-java",
":community_admin-api",
":community_buildinfo",
":community_ledger_ledger-common",
":community_lib_slick_slick-fork",
@ -416,6 +453,7 @@ scala_library(
],
deps = [
":bindings-java",
":community_admin-api",
":community_base",
":community_buildinfo",
":community_ledger_ledger-common",
@ -683,6 +721,7 @@ proto_library(
srcs = glob(["community/domain/src/main/protobuf/**/*.proto"]),
strip_import_prefix = "community/domain/src/main/protobuf",
deps = [
":community_admin-api_proto",
":community_base_proto",
"@com_google_protobuf//:duration_proto",
"@com_google_protobuf//:empty_proto",
@ -715,6 +754,7 @@ scala_library(
unused_dependency_checker_mode = "error",
deps = [
":bindings-java",
":community_admin-api",
":community_base",
":community_common",
":community_ledger_ledger-common",
@ -770,33 +810,12 @@ scala_library(
### community/participant/ ###
# For now we include only the package service as the rest is not standalone
proto_library(
name = "community_participant_admin_proto",
srcs = glob(["community/participant/src/main/protobuf/com/digitalasset/canton/participant/admin/v0/package_service.proto"]),
strip_import_prefix = "community/participant/src/main/protobuf",
deps = [
"@com_google_protobuf//:empty_proto",
],
)
proto_gen(
name = "community_participant_admin_proto_scala",
srcs = [":community_participant_admin_proto"],
plugin_exec = "//scala-protoc-plugins/scalapb:protoc-gen-scalapb",
plugin_name = "scalapb",
plugin_options = [
"grpc",
"flat_package",
],
visibility = ["//daml-script:__subpackages__"],
)
proto_library(
name = "community_participant_proto",
srcs = glob(["community/participant/src/main/protobuf/**/*.proto"]),
strip_import_prefix = "community/participant/src/main/protobuf",
deps = [
":community_admin-api_proto",
":community_base_proto",
"@com_google_protobuf//:duration_proto",
"@com_google_protobuf//:empty_proto",
@ -818,10 +837,25 @@ proto_gen(
],
)
copy_file(
genrule(
name = "community_participant_admin-workflows_dar",
src = "community/participant/src/main/resources/dar/AdminWorkflows.dar",
out = "AdminWorkflows.dar",
srcs = glob(["community/participant/src/main/daml/*"]) + [
"//daml-script/daml3:daml3-script-2.1.dar",
],
outs = ["AdminWorkflows.dar"],
cmd = """
set -euo pipefail
project_dir=$$(dirname $(location community/participant/src/main/daml/daml.yaml))
tmpdir=$$(mktemp -d)
trap "rm -rf $$tmpdir" EXIT
cp -r $$project_dir/* $$tmpdir
cp $(location //daml-script/daml3:daml3-script-2.1.dar) $$tmpdir
sed -i 's/sdk-version:.*/sdk-version: {sdk_version}/' $$tmpdir/daml.yaml
sed -i 's/daml3-script/daml3-script-2.1.dar/' $$tmpdir/daml.yaml
$(location //compiler/damlc) build --project-root=$$tmpdir --ghc-option=-Werror -o $$PWD/$@
""".format(sdk_version = sdk_version),
tools = ["//compiler/damlc"],
visibility = ["//visibility:public"],
)
genrule(
@ -882,6 +916,7 @@ scala_library(
],
deps = [
":bindings-java",
":community_admin-api",
":community_base",
":community_common",
":community_ledger_ledger-api-core",
@ -966,6 +1001,7 @@ scala_library(
unused_dependency_checker_mode = "error",
deps = [
":bindings-java",
":community_admin-api",
":community_base",
":community_buildinfo",
":community_common",

View File

@ -3,15 +3,11 @@
syntax = "proto3";
package com.digitalasset.canton.domain.api.v0;
package com.digitalasset.canton.admin.domain.v0;
import "google/protobuf/wrappers.proto";
import "scalapb/scalapb.proto";
// Client configuration for how members should connect to the sequencer of a domain.
message SequencerConnection {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StorageProtoVersion";
oneof type {
Grpc grpc = 2;
}
@ -26,7 +22,3 @@ message SequencerConnection {
google.protobuf.BytesValue customTrustCertificates = 3;
}
}
enum SequencerApiType {
Grpc = 0;
}

View File

@ -3,15 +3,15 @@
syntax = "proto3";
package com.digitalasset.canton.domain.api.v1;
package com.digitalasset.canton.admin.domain.v1;
import "com/digitalasset/canton/domain/api/v0/sequencer_connection.proto";
import "com/digitalasset/canton/admin/domain/v0/sequencer_connection.proto";
import "scalapb/scalapb.proto";
message SequencerConnections {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StorageProtoVersion";
repeated com.digitalasset.canton.domain.api.v0.SequencerConnection sequencer_connections = 1;
repeated com.digitalasset.canton.admin.domain.v0.SequencerConnection sequencer_connections = 1;
// This field determines the minimum level of agreement, or consensus, required among the sequencers before a message
// is considered reliable and accepted by the system.

View File

@ -3,10 +3,10 @@
syntax = "proto3";
package com.digitalasset.canton.participant.admin.v0;
package com.digitalasset.canton.admin.participant.v0;
import "com/digitalasset/canton/domain/api/v0/sequencer_connection.proto";
import "com/digitalasset/canton/time/admin/v0/time_tracker_config.proto";
import "com/digitalasset/canton/admin/time/v0/time_tracker_config.proto";
import "com/digitalasset/canton/admin/domain/v0/sequencer_connection.proto";
import "google/protobuf/duration.proto";
/**
@ -42,7 +42,7 @@ message DomainConnectionConfig {
// participant local identifier of the target domain
string domain_alias = 1;
// connection information to sequencer
repeated com.digitalasset.canton.domain.api.v0.SequencerConnection sequencerConnections = 2;
repeated com.digitalasset.canton.admin.domain.v0.SequencerConnection sequencerConnections = 2;
// if false, then domain needs to be manually connected to (default false)
bool manual_connect = 3;
// optional domainId (if TLS isn't to be trusted)
@ -54,7 +54,7 @@ message DomainConnectionConfig {
// maximum delay before an attempt to reconnect to the sequencer
google.protobuf.Duration maxRetryDelay = 7;
// configuration for how time is tracked and requested on this domain
com.digitalasset.canton.time.admin.v0.DomainTimeTrackerConfig timeTracker = 8;
com.digitalasset.canton.admin.time.v0.DomainTimeTrackerConfig timeTracker = 8;
// This field determines the minimum level of agreement, or consensus, required among the sequencers before a message
// is considered reliable and accepted by the system.
// The value set here should not be zero. However, to maintain backward compatibility with older clients, a zero value

View File

@ -3,7 +3,7 @@
syntax = "proto3";
package com.digitalasset.canton.participant.admin.v0;
package com.digitalasset.canton.admin.participant.v0;
service EnterpriseParticipantReplicationService {
rpc SetPassive(SetPassive.Request) returns (SetPassive.Response);

View File

@ -3,7 +3,7 @@
syntax = "proto3";
package com.digitalasset.canton.participant.admin.v0;
package com.digitalasset.canton.admin.participant.v0;
import "google/protobuf/timestamp.proto";

View File

@ -3,7 +3,7 @@
syntax = "proto3";
package com.digitalasset.canton.participant.admin.v0;
package com.digitalasset.canton.admin.participant.v0;
import "google/protobuf/empty.proto";

View File

@ -3,9 +3,9 @@
syntax = "proto3";
package com.digitalasset.canton.participant.admin.v0;
package com.digitalasset.canton.admin.participant.v0;
import "com/digitalasset/canton/participant/admin/v0/domain_connectivity.proto";
import "com/digitalasset/canton/admin/participant/v0/domain_connectivity.proto";
import "google/protobuf/timestamp.proto";
import "google/protobuf/wrappers.proto";

View File

@ -3,7 +3,7 @@
syntax = "proto3";
package com.digitalasset.canton.participant.admin.v0;
package com.digitalasset.canton.admin.participant.v0;
/**
* Local participant service allowing to set the display name for a party

View File

@ -3,7 +3,7 @@
syntax = "proto3";
package com.digitalasset.canton.participant.admin.v0;
package com.digitalasset.canton.admin.participant.v0;
service PingService {
rpc ping(PingRequest) returns (PingResponse);

View File

@ -3,9 +3,9 @@
syntax = "proto3";
package com.digitalasset.canton.participant.admin.v0;
package com.digitalasset.canton.admin.participant.v0;
import "com/digitalasset/canton/pruning/admin/v0/pruning.proto";
import "com/digitalasset/canton/admin/pruning/v0/pruning.proto";
import "google/protobuf/timestamp.proto";
// Canton-internal pruning service that prunes only canton state, but leaves the ledger-api
@ -32,25 +32,25 @@ service PruningService {
// or duration.
// - ``FAILED_PRECONDITION``: if automatic background pruning has not been enabled
// or if invoked on a participant running the Community Edition.
rpc SetSchedule(com.digitalasset.canton.pruning.admin.v0.SetSchedule.Request) returns (com.digitalasset.canton.pruning.admin.v0.SetSchedule.Response);
rpc SetSchedule(com.digitalasset.canton.admin.pruning.v0.SetSchedule.Request) returns (com.digitalasset.canton.admin.pruning.v0.SetSchedule.Response);
// Enable automatic pruning with participant-specific schedule parameters.
rpc SetParticipantSchedule(com.digitalasset.canton.pruning.admin.v0.SetParticipantSchedule.Request) returns (com.digitalasset.canton.pruning.admin.v0.SetParticipantSchedule.Response);
rpc SetParticipantSchedule(com.digitalasset.canton.admin.pruning.v0.SetParticipantSchedule.Request) returns (com.digitalasset.canton.admin.pruning.v0.SetParticipantSchedule.Response);
// Modify individual pruning schedule parameters.
// - ``INVALID_ARGUMENT``: if the payload is malformed or no schedule is configured
rpc SetCron(com.digitalasset.canton.pruning.admin.v0.SetCron.Request) returns (com.digitalasset.canton.pruning.admin.v0.SetCron.Response);
rpc SetMaxDuration(com.digitalasset.canton.pruning.admin.v0.SetMaxDuration.Request) returns (com.digitalasset.canton.pruning.admin.v0.SetMaxDuration.Response);
rpc SetRetention(com.digitalasset.canton.pruning.admin.v0.SetRetention.Request) returns (com.digitalasset.canton.pruning.admin.v0.SetRetention.Response);
rpc SetCron(com.digitalasset.canton.admin.pruning.v0.SetCron.Request) returns (com.digitalasset.canton.admin.pruning.v0.SetCron.Response);
rpc SetMaxDuration(com.digitalasset.canton.admin.pruning.v0.SetMaxDuration.Request) returns (com.digitalasset.canton.admin.pruning.v0.SetMaxDuration.Response);
rpc SetRetention(com.digitalasset.canton.admin.pruning.v0.SetRetention.Request) returns (com.digitalasset.canton.admin.pruning.v0.SetRetention.Response);
// Disable automatic pruning and remove the persisted schedule configuration.
rpc ClearSchedule(com.digitalasset.canton.pruning.admin.v0.ClearSchedule.Request) returns (com.digitalasset.canton.pruning.admin.v0.ClearSchedule.Response);
rpc ClearSchedule(com.digitalasset.canton.admin.pruning.v0.ClearSchedule.Request) returns (com.digitalasset.canton.admin.pruning.v0.ClearSchedule.Response);
// Retrieve the automatic pruning configuration.
rpc GetSchedule(com.digitalasset.canton.pruning.admin.v0.GetSchedule.Request) returns (com.digitalasset.canton.pruning.admin.v0.GetSchedule.Response);
rpc GetSchedule(com.digitalasset.canton.admin.pruning.v0.GetSchedule.Request) returns (com.digitalasset.canton.admin.pruning.v0.GetSchedule.Response);
// Retrieve the automatic, participant-specific pruning configuration.
rpc GetParticipantSchedule(com.digitalasset.canton.pruning.admin.v0.GetParticipantSchedule.Request) returns (com.digitalasset.canton.pruning.admin.v0.GetParticipantSchedule.Response);
rpc GetParticipantSchedule(com.digitalasset.canton.admin.pruning.v0.GetParticipantSchedule.Request) returns (com.digitalasset.canton.admin.pruning.v0.GetParticipantSchedule.Response);
}
message PruneRequest {

View File

@ -3,7 +3,7 @@
syntax = "proto3";
package com.digitalasset.canton.participant.admin.v0;
package com.digitalasset.canton.admin.participant.v0;
import "google/protobuf/empty.proto";

View File

@ -3,9 +3,9 @@
syntax = "proto3";
package com.digitalasset.canton.participant.admin.v0;
package com.digitalasset.canton.admin.participant.v0;
import "com/digitalasset/canton/traffic/v0/member_traffic_status.proto";
import "com/digitalasset/canton/admin/traffic/v0/member_traffic_status.proto";
/*
* Service to retrieve information about the traffic state of the participant.
@ -19,5 +19,5 @@ message TrafficControlStateRequest {
}
message TrafficControlStateResponse {
com.digitalasset.canton.traffic.v0.MemberTrafficStatus traffic_state = 1;
com.digitalasset.canton.admin.traffic.v0.MemberTrafficStatus traffic_state = 1;
}

View File

@ -3,9 +3,8 @@
syntax = "proto3";
package com.digitalasset.canton.participant.admin.v0;
package com.digitalasset.canton.admin.participant.v0;
import "com/digitalasset/canton/protocol/v0/participant_transfer.proto";
import "google/protobuf/timestamp.proto";
// Supports transferring contracts from one domain to another
@ -20,6 +19,11 @@ service TransferService {
rpc TransferSearch(AdminTransferSearchQuery) returns (AdminTransferSearchResponse);
}
message TransferId {
string source_domain = 1;
google.protobuf.Timestamp timestamp = 2;
}
message AdminTransferOutRequest {
string submitting_party = 1;
string contract_id = 2;
@ -32,13 +36,13 @@ message AdminTransferOutRequest {
}
message AdminTransferOutResponse {
com.digitalasset.canton.protocol.v0.TransferId transfer_id = 1;
TransferId transfer_id = 1;
}
message AdminTransferInRequest {
string submitting_party_id = 1;
string target_domain = 2;
com.digitalasset.canton.protocol.v0.TransferId transfer_id = 3;
TransferId transfer_id = 3;
string application_id = 4;
string submission_id = 5; // optional
string workflow_id = 6; // optional
@ -60,7 +64,7 @@ message AdminTransferSearchResponse {
message TransferSearchResult {
string contract_id = 1;
com.digitalasset.canton.protocol.v0.TransferId transfer_id = 2;
TransferId transfer_id = 2;
string origin_domain = 3;
string target_domain = 4;
string submitting_party = 5;

View File

@ -3,7 +3,7 @@
syntax = "proto3";
package com.digitalasset.canton.participant.admin.v1;
package com.digitalasset.canton.admin.participant.v1;
import "scalapb/scalapb.proto";

View File

@ -3,7 +3,7 @@
syntax = "proto3";
package com.digitalasset.canton.pruning.admin.v0;
package com.digitalasset.canton.admin.pruning.v0;
import "google/protobuf/duration.proto";
import "google/protobuf/timestamp.proto";

View File

@ -0,0 +1,15 @@
// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
syntax = "proto3";
import "scalapb/scalapb.proto";
package com.digitalasset.canton.admin;
option (scalapb.options) = {
scope: PACKAGE
preserve_unknown_fields: false
no_default_values_in_constructor: true
};

View File

@ -3,7 +3,7 @@
syntax = "proto3";
package com.digitalasset.canton.time.admin.v0;
package com.digitalasset.canton.admin.time.v0;
import "google/protobuf/duration.proto";

View File

@ -3,7 +3,7 @@
syntax = "proto3";
package com.digitalasset.canton.traffic.v0;
package com.digitalasset.canton.admin.traffic.v0;
import "google/protobuf/timestamp.proto";
import "google/protobuf/wrappers.proto";

View File

@ -0,0 +1,35 @@
package com.digitalasset.canton.version
object ProtocolVersionAnnotation {
/** Type-level marker for whether a protocol version is stable */
sealed trait Status
/** Marker for unstable protocol versions */
sealed trait Unstable extends Status
/** Marker for stable protocol versions */
sealed trait Stable extends Status
}
/** Marker trait for Protobuf messages generated by scalapb
* that are used in some stable protocol versions
*
* Implements both [[com.digitalasset.canton.version.ProtocolVersionAnnotation.Stable]] and
* [[com.digitalasset.canton.version.ProtocolVersionAnnotation.Unstable]] means that [[StableProtoVersion]]
* messages can be used in stable and unstable protocol versions.
*/
trait StableProtoVersion
extends ProtocolVersionAnnotation.Stable
with ProtocolVersionAnnotation.Unstable
/** Marker trait for Protobuf messages generated by scalapb
* that are used only unstable protocol versions
*/
trait UnstableProtoVersion extends ProtocolVersionAnnotation.Unstable
/** Marker trait for Protobuf messages generated by scalapb
* that are used only to persist data in node storage.
* These messages are never exchanged as part of a protocol.
*/
trait StorageProtoVersion

View File

@ -9,6 +9,7 @@ import com.digitalasset.canton.admin.api.client.commands.GrpcAdminCommand.{
DefaultUnboundedTimeout,
TimeoutType,
}
import com.digitalasset.canton.admin.pruning.v0.LocatePruningTimestamp
import com.digitalasset.canton.config.RequireTypes.PositiveInt
import com.digitalasset.canton.crypto.{Fingerprint, PublicKey}
import com.digitalasset.canton.data.CantonTimestamp
@ -21,7 +22,6 @@ import com.digitalasset.canton.domain.mediator.admin.gprc.{
InitializeMediatorResponseX,
}
import com.digitalasset.canton.protocol.StaticDomainParameters
import com.digitalasset.canton.pruning.admin.v0.LocatePruningTimestamp
import com.digitalasset.canton.sequencing.SequencerConnections
import com.digitalasset.canton.topology.store.StoredTopologyTransactions
import com.digitalasset.canton.topology.transaction.TopologyChangeOp

View File

@ -10,6 +10,7 @@ import com.digitalasset.canton.admin.api.client.commands.GrpcAdminCommand.{
TimeoutType,
}
import com.digitalasset.canton.admin.api.client.data.StaticDomainParameters
import com.digitalasset.canton.admin.pruning.v0.LocatePruningTimestamp
import com.digitalasset.canton.config.RequireTypes.PositiveInt
import com.digitalasset.canton.data.CantonTimestamp
import com.digitalasset.canton.domain.admin.v2.SequencerInitializationServiceGrpc
@ -21,7 +22,6 @@ import com.digitalasset.canton.domain.sequencing.admin.grpc.{
InitializeSequencerResponseX,
}
import com.digitalasset.canton.domain.sequencing.sequencer.{LedgerIdentity, SequencerSnapshot}
import com.digitalasset.canton.pruning.admin.v0.LocatePruningTimestamp
import com.digitalasset.canton.topology.store.StoredTopologyTransactions
import com.digitalasset.canton.topology.store.StoredTopologyTransactionsX.GenericStoredTopologyTransactionsX
import com.digitalasset.canton.topology.transaction.TopologyChangeOp

View File

@ -101,11 +101,19 @@ object LedgerApiV2Commands {
object UpdateService {
sealed trait UpdateTreeWrapper
sealed trait UpdateWrapper
sealed trait UpdateTreeWrapper {
def updateId: String
}
sealed trait UpdateWrapper {
def updateId: String
}
final case class TransactionTreeWrapper(transactionTree: TransactionTree)
extends UpdateTreeWrapper
final case class TransactionWrapper(transaction: Transaction) extends UpdateWrapper
extends UpdateTreeWrapper {
override def updateId: String = transactionTree.updateId
}
final case class TransactionWrapper(transaction: Transaction) extends UpdateWrapper {
override def updateId: String = transaction.updateId
}
sealed trait ReassignmentWrapper extends UpdateTreeWrapper with UpdateWrapper {
def reassignment: Reassignment
}
@ -125,9 +133,13 @@ object LedgerApiV2Commands {
}
}
final case class AssignedWrapper(reassignment: Reassignment, assignedEvent: AssignedEvent)
extends ReassignmentWrapper
extends ReassignmentWrapper {
override def updateId: String = reassignment.updateId
}
final case class UnassignedWrapper(reassignment: Reassignment, unassignedEvent: UnassignedEvent)
extends ReassignmentWrapper
extends ReassignmentWrapper {
override def updateId: String = reassignment.updateId
}
trait BaseCommand[Req, Resp, Res] extends GrpcAdminCommand[Req, Resp, Res] {
override type Svc = UpdateServiceStub
@ -279,7 +291,7 @@ object LedgerApiV2Commands {
def submissionId: String
def minLedgerTimeAbs: Option[Instant]
def disclosedContracts: Seq[DisclosedContract]
def domainId: DomainId
def domainId: Option[DomainId]
def applicationId: String
protected def mkCommand: Commands = Commands(
@ -304,7 +316,7 @@ object LedgerApiV2Commands {
minLedgerTimeAbs = minLedgerTimeAbs.map(ProtoConverter.InstantConverter.toProtoPrimitive),
submissionId = submissionId,
disclosedContracts = disclosedContracts,
domainId = domainId.toProtoPrimitive,
domainId = domainId.map(_.toProtoPrimitive).getOrElse(""),
)
override def pretty: Pretty[this.type] =
@ -337,7 +349,7 @@ object LedgerApiV2Commands {
override val submissionId: String,
override val minLedgerTimeAbs: Option[Instant],
override val disclosedContracts: Seq[DisclosedContract],
override val domainId: DomainId,
override val domainId: Option[DomainId],
override val applicationId: String,
) extends SubmitCommand
with BaseCommand[SubmitRequest, SubmitResponse, Unit] {
@ -457,7 +469,7 @@ object LedgerApiV2Commands {
override val submissionId: String,
override val minLedgerTimeAbs: Option[Instant],
override val disclosedContracts: Seq[DisclosedContract],
override val domainId: DomainId,
override val domainId: Option[DomainId],
override val applicationId: String,
) extends SubmitCommand
with BaseCommand[
@ -494,7 +506,7 @@ object LedgerApiV2Commands {
override val submissionId: String,
override val minLedgerTimeAbs: Option[Instant],
override val disclosedContracts: Seq[DisclosedContract],
override val domainId: DomainId,
override val domainId: Option[DomainId],
override val applicationId: String,
) extends SubmitCommand
with BaseCommand[SubmitAndWaitRequest, SubmitAndWaitForTransactionResponse, Transaction] {

View File

@ -16,29 +16,30 @@ import com.digitalasset.canton.admin.api.client.data.{
ListConnectedDomainsResult,
ParticipantPruningSchedule,
}
import com.digitalasset.canton.admin.participant.v0
import com.digitalasset.canton.admin.participant.v0.DomainConnectivityServiceGrpc.DomainConnectivityServiceStub
import com.digitalasset.canton.admin.participant.v0.EnterpriseParticipantReplicationServiceGrpc.EnterpriseParticipantReplicationServiceStub
import com.digitalasset.canton.admin.participant.v0.InspectionServiceGrpc.InspectionServiceStub
import com.digitalasset.canton.admin.participant.v0.PackageServiceGrpc.PackageServiceStub
import com.digitalasset.canton.admin.participant.v0.ParticipantRepairServiceGrpc.ParticipantRepairServiceStub
import com.digitalasset.canton.admin.participant.v0.PartyNameManagementServiceGrpc.PartyNameManagementServiceStub
import com.digitalasset.canton.admin.participant.v0.PingServiceGrpc.PingServiceStub
import com.digitalasset.canton.admin.participant.v0.PruningServiceGrpc.PruningServiceStub
import com.digitalasset.canton.admin.participant.v0.ResourceManagementServiceGrpc.ResourceManagementServiceStub
import com.digitalasset.canton.admin.participant.v0.TransferServiceGrpc.TransferServiceStub
import com.digitalasset.canton.admin.participant.v0.{ResourceLimits as _, *}
import com.digitalasset.canton.admin.pruning
import com.digitalasset.canton.config.RequireTypes.PositiveInt
import com.digitalasset.canton.data.CantonTimestamp
import com.digitalasset.canton.logging.TracedLogger
import com.digitalasset.canton.participant.admin.ResourceLimits
import com.digitalasset.canton.participant.admin.grpc.{
GrpcParticipantRepairService,
TransferSearchResult,
}
import com.digitalasset.canton.participant.admin.v0.DomainConnectivityServiceGrpc.DomainConnectivityServiceStub
import com.digitalasset.canton.participant.admin.v0.EnterpriseParticipantReplicationServiceGrpc.EnterpriseParticipantReplicationServiceStub
import com.digitalasset.canton.participant.admin.v0.InspectionServiceGrpc.InspectionServiceStub
import com.digitalasset.canton.participant.admin.v0.PackageServiceGrpc.PackageServiceStub
import com.digitalasset.canton.participant.admin.v0.ParticipantRepairServiceGrpc.ParticipantRepairServiceStub
import com.digitalasset.canton.participant.admin.v0.PartyNameManagementServiceGrpc.PartyNameManagementServiceStub
import com.digitalasset.canton.participant.admin.v0.PingServiceGrpc.PingServiceStub
import com.digitalasset.canton.participant.admin.v0.PruningServiceGrpc.PruningServiceStub
import com.digitalasset.canton.participant.admin.v0.ResourceManagementServiceGrpc.ResourceManagementServiceStub
import com.digitalasset.canton.participant.admin.v0.TransferServiceGrpc.TransferServiceStub
import com.digitalasset.canton.participant.admin.v0.{ResourceLimits as _, *}
import com.digitalasset.canton.participant.admin.{ResourceLimits, v0}
import com.digitalasset.canton.participant.domain.DomainConnectionConfig as CDomainConnectionConfig
import com.digitalasset.canton.participant.sync.UpstreamOffsetConvert
import com.digitalasset.canton.protocol.{LfContractId, TransferId, v0 as v0proto}
import com.digitalasset.canton.pruning.admin
import com.digitalasset.canton.protocol.{LfContractId, TransferId}
import com.digitalasset.canton.serialization.ProtoConverter.InstantConverter
import com.digitalasset.canton.topology.{DomainId, PartyId}
import com.digitalasset.canton.tracing.TraceContext
@ -820,14 +821,14 @@ object ParticipantAdminCommands {
override def handleResponse(response: AdminTransferOutResponse): Either[String, TransferId] =
response match {
case AdminTransferOutResponse(Some(transferIdP)) =>
TransferId.fromProtoV0(transferIdP).leftMap(_.toString)
TransferId.fromAdminProtoV0(transferIdP).leftMap(_.toString)
case AdminTransferOutResponse(None) => Left("Empty TransferOutResponse")
}
}
final case class TransferIn(
submittingParty: PartyId,
transferId: v0proto.TransferId,
transferId: v0.TransferId,
targetDomain: DomainAlias,
applicationId: LedgerApplicationId,
submissionId: String,
@ -1094,17 +1095,17 @@ object ParticipantAdminCommands {
retention: config.PositiveDurationSeconds,
pruneInternallyOnly: Boolean,
) extends Base[
admin.v0.SetParticipantSchedule.Request,
admin.v0.SetParticipantSchedule.Response,
pruning.v0.SetParticipantSchedule.Request,
pruning.v0.SetParticipantSchedule.Response,
Unit,
] {
override def createRequest(): Right[String, admin.v0.SetParticipantSchedule.Request] =
override def createRequest(): Right[String, pruning.v0.SetParticipantSchedule.Request] =
Right(
admin.v0.SetParticipantSchedule.Request(
pruning.v0.SetParticipantSchedule.Request(
Some(
admin.v0.ParticipantPruningSchedule(
pruning.v0.ParticipantPruningSchedule(
Some(
admin.v0.PruningSchedule(
pruning.v0.PruningSchedule(
cron,
Some(maxDuration.toProtoPrimitive),
Some(retention.toProtoPrimitive),
@ -1118,35 +1119,37 @@ object ParticipantAdminCommands {
override def submitRequest(
service: Svc,
request: admin.v0.SetParticipantSchedule.Request,
): Future[admin.v0.SetParticipantSchedule.Response] = service.setParticipantSchedule(request)
request: pruning.v0.SetParticipantSchedule.Request,
): Future[pruning.v0.SetParticipantSchedule.Response] =
service.setParticipantSchedule(request)
override def handleResponse(
response: admin.v0.SetParticipantSchedule.Response
response: pruning.v0.SetParticipantSchedule.Response
): Either[String, Unit] =
response match {
case admin.v0.SetParticipantSchedule.Response() => Right(())
case pruning.v0.SetParticipantSchedule.Response() => Right(())
}
}
final case class GetParticipantScheduleCommand()
extends Base[
admin.v0.GetParticipantSchedule.Request,
admin.v0.GetParticipantSchedule.Response,
pruning.v0.GetParticipantSchedule.Request,
pruning.v0.GetParticipantSchedule.Response,
Option[ParticipantPruningSchedule],
] {
override def createRequest(): Right[String, admin.v0.GetParticipantSchedule.Request] =
override def createRequest(): Right[String, pruning.v0.GetParticipantSchedule.Request] =
Right(
admin.v0.GetParticipantSchedule.Request()
pruning.v0.GetParticipantSchedule.Request()
)
override def submitRequest(
service: Svc,
request: admin.v0.GetParticipantSchedule.Request,
): Future[admin.v0.GetParticipantSchedule.Response] = service.getParticipantSchedule(request)
request: pruning.v0.GetParticipantSchedule.Request,
): Future[pruning.v0.GetParticipantSchedule.Response] =
service.getParticipantSchedule(request)
override def handleResponse(
response: admin.v0.GetParticipantSchedule.Response
response: pruning.v0.GetParticipantSchedule.Response
): Either[String, Option[ParticipantPruningSchedule]] =
response.schedule.fold(
Right(None): Either[String, Option[ParticipantPruningSchedule]]

View File

@ -5,9 +5,9 @@ package com.digitalasset.canton.admin.api.client.commands
import cats.syntax.either.*
import com.digitalasset.canton.admin.api.client.data.PruningSchedule
import com.digitalasset.canton.admin.pruning.v0
import com.digitalasset.canton.admin.pruning.v0.{PruningSchedule as PruningScheduleP, *}
import com.digitalasset.canton.config.PositiveDurationSeconds
import com.digitalasset.canton.pruning.admin.v0
import com.digitalasset.canton.pruning.admin.v0.{PruningSchedule as PruningScheduleP, *}
import io.grpc.ManagedChannel
import io.grpc.stub.AbstractStub

View File

@ -4,7 +4,7 @@
package com.digitalasset.canton.admin.api.client.data
import com.digitalasset.canton.DomainAlias
import com.digitalasset.canton.participant.admin.{v0 as participantAdminV0}
import com.digitalasset.canton.admin.participant.{v0 as participantAdminV0}
import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult
import com.digitalasset.canton.topology.*

View File

@ -8,6 +8,7 @@ import com.daml.nonempty.NonEmptyUtil
import com.digitalasset.canton.admin.api.client.data.crypto.*
import com.digitalasset.canton.config.RequireTypes.NonNegativeInt
import com.digitalasset.canton.config.{
CommunityCryptoConfig,
CryptoConfig,
NonNegativeFiniteDuration,
PositiveDurationSeconds,
@ -87,6 +88,9 @@ object StaticDomainParameters {
StaticDomainParameters(internal)
}
lazy val defaultsWithoutKMS: StaticDomainParameters =
defaults(CommunityCryptoConfig())
// This method is unsafe. Not prefixing by `try` to have nicer docs snippets.
def defaults(
cryptoConfig: CryptoConfig

View File

@ -3,7 +3,7 @@
package com.digitalasset.canton.admin.api.client.data
import com.digitalasset.canton.pruning.admin.v0
import com.digitalasset.canton.admin.pruning.v0
import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult
import com.digitalasset.canton.{config, participant, scheduler}

View File

@ -14,6 +14,10 @@ import com.digitalasset.canton.domain.config.{
RemoteDomainConfig,
}
import com.digitalasset.canton.domain.mediator.{CommunityMediatorNodeXConfig, RemoteMediatorConfig}
import com.digitalasset.canton.domain.sequencing.config.{
CommunitySequencerNodeXConfig,
RemoteSequencerConfig,
}
import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, TracedLogger}
import com.digitalasset.canton.participant.config.{
CommunityParticipantConfig,
@ -33,10 +37,12 @@ final case class CantonCommunityConfig(
domains: Map[InstanceName, CommunityDomainConfig] = Map.empty,
participants: Map[InstanceName, CommunityParticipantConfig] = Map.empty,
participantsX: Map[InstanceName, CommunityParticipantConfig] = Map.empty,
sequencersX: Map[InstanceName, CommunitySequencerNodeXConfig] = Map.empty,
mediatorsX: Map[InstanceName, CommunityMediatorNodeXConfig] = Map.empty,
remoteDomains: Map[InstanceName, RemoteDomainConfig] = Map.empty,
remoteParticipants: Map[InstanceName, RemoteParticipantConfig] = Map.empty,
remoteParticipantsX: Map[InstanceName, RemoteParticipantConfig] = Map.empty,
remoteSequencersX: Map[InstanceName, RemoteSequencerConfig] = Map.empty,
remoteMediatorsX: Map[InstanceName, RemoteMediatorConfig] = Map.empty,
monitoring: MonitoringConfig = MonitoringConfig(),
parameters: CantonParameters = CantonParameters(),
@ -47,6 +53,7 @@ final case class CantonCommunityConfig(
override type DomainConfigType = CommunityDomainConfig
override type ParticipantConfigType = CommunityParticipantConfig
override type MediatorNodeXConfigType = CommunityMediatorNodeXConfig
override type SequencerNodeXConfigType = CommunitySequencerNodeXConfig
/** renders the config as json (used for dumping config for diagnostic purposes) */
override def dumpString: String = CantonCommunityConfig.makeConfidentialString(this)
@ -102,6 +109,8 @@ object CantonCommunityConfig {
deriveReader[CommunityDomainConfig].applyDeprecations
implicit val communityParticipantConfigReader: ConfigReader[CommunityParticipantConfig] =
deriveReader[CommunityParticipantConfig].applyDeprecations
implicit val communitySequencerNodeXConfigReader: ConfigReader[CommunitySequencerNodeXConfig] =
deriveReader[CommunitySequencerNodeXConfig]
implicit val communityMediatorNodeXConfigReader: ConfigReader[CommunityMediatorNodeXConfig] =
deriveReader[CommunityMediatorNodeXConfig]
@ -116,6 +125,8 @@ object CantonCommunityConfig {
deriveWriter[CommunityDomainConfig]
implicit val communityParticipantConfigWriter: ConfigWriter[CommunityParticipantConfig] =
deriveWriter[CommunityParticipantConfig]
implicit val communitySequencerNodeXConfigWriter: ConfigWriter[CommunitySequencerNodeXConfig] =
deriveWriter[CommunitySequencerNodeXConfig]
implicit val communityMediatorNodeXConfigWriter: ConfigWriter[CommunityMediatorNodeXConfig] =
deriveWriter[CommunityMediatorNodeXConfig]

View File

@ -42,6 +42,13 @@ import com.digitalasset.canton.domain.mediator.{
MediatorNodeParameters,
RemoteMediatorConfig,
}
import com.digitalasset.canton.domain.sequencing.config.{
RemoteSequencerConfig,
SequencerNodeConfigCommon,
SequencerNodeInitXConfig,
SequencerNodeParameterConfig,
SequencerNodeParameters,
}
import com.digitalasset.canton.domain.sequencing.sequencer.*
import com.digitalasset.canton.environment.CantonNodeParameters
import com.digitalasset.canton.http.{HttpApiConfig, StaticContentConfig, WebsocketConfig}
@ -349,6 +356,7 @@ trait CantonConfig {
ParticipantConfigType,
]
type MediatorNodeXConfigType <: MediatorNodeConfigCommon
type SequencerNodeXConfigType <: SequencerNodeConfigCommon
/** all domains that this Canton process can operate
*
@ -384,6 +392,23 @@ trait CantonConfig {
n.unwrap -> c
}
def sequencersX: Map[InstanceName, SequencerNodeXConfigType]
/** Use `sequencersX` instead!
*/
def sequencersByStringX: Map[String, SequencerNodeXConfigType] = sequencersX.map { case (n, c) =>
n.unwrap -> c
}
def remoteSequencersX: Map[InstanceName, RemoteSequencerConfig]
/** Use `remoteSequencersX` instead!
*/
def remoteSequencersByStringX: Map[String, RemoteSequencerConfig] = remoteSequencersX.map {
case (n, c) =>
n.unwrap -> c
}
def mediatorsX: Map[InstanceName, MediatorNodeXConfigType]
/** Use `mediatorsX` instead!
@ -499,6 +524,21 @@ trait CantonConfig {
InstanceName.tryCreate(name)
)
private lazy val sequencerNodeParametersX_ : Map[InstanceName, SequencerNodeParameters] =
sequencersX.fmap { sequencerNodeXConfig =>
SequencerNodeParameters(
general = CantonNodeParameterConverter.general(this, sequencerNodeXConfig),
protocol = CantonNodeParameterConverter.protocol(this, sequencerNodeXConfig.parameters),
maxBurstFactor = sequencerNodeXConfig.parameters.maxBurstFactor,
)
}
private[canton] def sequencerNodeParametersX(name: InstanceName): SequencerNodeParameters =
nodeParametersFor(sequencerNodeParametersX_, "sequencer-x", name)
private[canton] def sequencerNodeParametersByStringX(name: String): SequencerNodeParameters =
sequencerNodeParametersX(InstanceName.tryCreate(name))
private lazy val mediatorNodeParametersX_ : Map[InstanceName, MediatorNodeParameters] =
mediatorsX.fmap { mediatorNodeConfig =>
MediatorNodeParameters(
@ -800,9 +840,6 @@ object CantonConfig {
deriveReader[CryptoSchemeConfig[S]]
lazy implicit val communityCryptoReader: ConfigReader[CommunityCryptoConfig] =
deriveReader[CommunityCryptoConfig]
lazy implicit val apiTypeGrpcConfigReader: ConfigReader[ApiType.Grpc.type] =
deriveReader[ApiType.Grpc.type]
lazy implicit val apiTypeConfigReader: ConfigReader[ApiType] = deriveReader[ApiType]
lazy implicit val clientConfigReader: ConfigReader[ClientConfig] = deriveReader[ClientConfig]
lazy implicit val remoteDomainConfigReader: ConfigReader[RemoteDomainConfig] =
deriveReader[RemoteDomainConfig]
@ -935,8 +972,22 @@ object CantonConfig {
lazy implicit val communityNewDatabaseSequencerWriterConfigLowLatencyReader
: ConfigReader[SequencerWriterConfig.LowLatency] =
deriveReader[SequencerWriterConfig.LowLatency]
lazy implicit val sequencerNodeInitXConfigReader: ConfigReader[SequencerNodeInitXConfig] =
deriveReader[SequencerNodeInitXConfig]
.enableNestedOpt("auto-init", _.copy(identity = None))
lazy implicit val communitySequencerConfigReader: ConfigReader[CommunitySequencerConfig] =
deriveReader[CommunitySequencerConfig]
lazy implicit val sequencerNodeParametersConfigReader
: ConfigReader[SequencerNodeParameterConfig] =
deriveReader[SequencerNodeParameterConfig]
lazy implicit val SequencerHealthConfigReader: ConfigReader[SequencerHealthConfig] =
deriveReader[SequencerHealthConfig]
lazy implicit val remoteSequencerConfigGrpcReader: ConfigReader[RemoteSequencerConfig.Grpc] =
deriveReader[RemoteSequencerConfig.Grpc]
lazy implicit val remoteSequencerConfigReader: ConfigReader[RemoteSequencerConfig] =
deriveReader[RemoteSequencerConfig]
// since the big majority of users will use GRPC, default to it so that they don't need to specify `type = grpc`
.orElse(ConfigReader[RemoteSequencerConfig.Grpc])
lazy implicit val mediatorNodeParameterConfigReader: ConfigReader[MediatorNodeParameterConfig] =
deriveReader[MediatorNodeParameterConfig]
lazy implicit val remoteMediatorConfigReader: ConfigReader[RemoteMediatorConfig] =
@ -1216,9 +1267,6 @@ object CantonConfig {
deriveWriter[CommunityAdminServerConfig]
lazy implicit val tlsBaseServerConfigWriter: ConfigWriter[TlsBaseServerConfig] =
deriveWriter[TlsBaseServerConfig]
lazy implicit val apiTypeGrpcConfigWriter: ConfigWriter[ApiType.Grpc.type] =
deriveWriter[ApiType.Grpc.type]
lazy implicit val apiTypeConfigWriter: ConfigWriter[ApiType] = deriveWriter[ApiType]
lazy implicit val communityPublicServerConfigWriter: ConfigWriter[CommunityPublicServerConfig] =
deriveWriter[CommunityPublicServerConfig]
lazy implicit val clockConfigRemoteClockWriter: ConfigWriter[ClockConfig.RemoteClock] =
@ -1314,8 +1362,19 @@ object CantonConfig {
lazy implicit val communityDatabaseSequencerWriterConfigLowLatencyWriter
: ConfigWriter[SequencerWriterConfig.LowLatency] =
deriveWriter[SequencerWriterConfig.LowLatency]
lazy implicit val sequencerNodeInitXConfigWriter: ConfigWriter[SequencerNodeInitXConfig] =
deriveWriter[SequencerNodeInitXConfig]
lazy implicit val communitySequencerConfigWriter: ConfigWriter[CommunitySequencerConfig] =
deriveWriter[CommunitySequencerConfig]
lazy implicit val sequencerNodeParameterConfigWriter
: ConfigWriter[SequencerNodeParameterConfig] =
deriveWriter[SequencerNodeParameterConfig]
lazy implicit val SequencerHealthConfigWriter: ConfigWriter[SequencerHealthConfig] =
deriveWriter[SequencerHealthConfig]
lazy implicit val remoteSequencerConfigGrpcWriter: ConfigWriter[RemoteSequencerConfig.Grpc] =
deriveWriter[RemoteSequencerConfig.Grpc]
lazy implicit val remoteSequencerConfigWriter: ConfigWriter[RemoteSequencerConfig] =
deriveWriter[RemoteSequencerConfig]
lazy implicit val mediatorNodeParameterConfigWriter: ConfigWriter[MediatorNodeParameterConfig] =
deriveWriter[MediatorNodeParameterConfig]
lazy implicit val remoteMediatorConfigWriter: ConfigWriter[RemoteMediatorConfig] =

View File

@ -154,10 +154,12 @@ object CommunityConfigValidations
domains,
participants,
participantsX,
sequencersX,
mediatorsX,
remoteDomains,
remoteParticipants,
remoteParticipantsX,
remoteSequencersX,
remoteMediatorsX,
_,
_,
@ -174,6 +176,8 @@ object CommunityConfigValidations
remoteParticipantsX,
mediatorsX,
remoteMediatorsX,
sequencersX,
remoteSequencersX,
)
.exists(_.nonEmpty),
(),
@ -182,9 +186,6 @@ object CommunityConfigValidations
}
private[config] val backwardsCompatibleLoggingConfigErr =
"Inconsistent configuration of canton.monitoring.log-message-payloads and canton.monitoring.logging.api.message-payloads. Please use the latter in your configuration"
private def developmentProtocolSafetyCheckDomains(
config: CantonConfig
): Validated[NonEmpty[Seq[String]], Unit] = {

View File

@ -26,6 +26,7 @@ import com.digitalasset.canton.data.CantonTimestamp
import com.digitalasset.canton.environment.Environment
import com.digitalasset.canton.lifecycle.{FlagCloseable, Lifecycle}
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
import com.digitalasset.canton.participant.ParticipantNodeCommon
import com.digitalasset.canton.protocol.SerializableContract
import com.digitalasset.canton.sequencing.{
GrpcSequencerConnection,
@ -346,6 +347,16 @@ trait ConsoleEnvironment extends NamedLogging with FlagCloseable with NoTracing
environment.config.remoteDomainsByString.keys.map(createRemoteDomainReference).toSeq,
)
lazy val sequencersX: NodeReferences[
SequencerNodeReferenceX,
RemoteSequencerNodeReferenceX,
LocalSequencerNodeReferenceX,
] =
NodeReferences(
environment.config.sequencersByStringX.keys.map(createSequencerReferenceX).toSeq,
environment.config.remoteSequencersByStringX.keys.map(createRemoteSequencerReferenceX).toSeq,
)
lazy val mediatorsX
: NodeReferences[MediatorReferenceX, RemoteMediatorReferenceX, LocalMediatorReferenceX] =
NodeReferences(
@ -367,8 +378,20 @@ trait ConsoleEnvironment extends NamedLogging with FlagCloseable with NoTracing
LocalInstanceReferenceCommon,
] = {
NodeReferences(
mergeLocalInstances(participants.local, participantsX.local, domains.local),
mergeRemoteInstances(participants.remote, participantsX.remote, domains.remote),
mergeLocalInstances(
participants.local,
participantsX.local,
domains.local,
sequencersX.local,
mediatorsX.local,
),
mergeRemoteInstances(
participants.remote,
participantsX.remote,
domains.remote,
sequencersX.remote,
mediatorsX.remote,
),
)
}
@ -430,6 +453,22 @@ trait ConsoleEnvironment extends NamedLogging with FlagCloseable with NoTracing
d,
)
)
val localMediatorXBinds: Seq[TopLevelValue[_]] =
mediatorsX.local.map(d =>
TopLevelValue(d.name, helpText("local mediator-x", d.name), d, nodeTopic)
)
val remoteMediatorXBinds: Seq[TopLevelValue[_]] =
mediatorsX.remote.map(d =>
TopLevelValue(d.name, helpText("remote mediator-x", d.name), d, nodeTopic)
)
val localSequencerXBinds: Seq[TopLevelValue[_]] =
sequencersX.local.map(d =>
TopLevelValue(d.name, helpText("local sequencer-x", d.name), d, nodeTopic)
)
val remoteSequencerXBinds: Seq[TopLevelValue[_]] =
sequencersX.remote.map(d =>
TopLevelValue(d.name, helpText("remote sequencer-x", d.name), d, nodeTopic)
)
val clockBinds: Option[TopLevelValue[_]] =
environment.simClock.map(cl =>
TopLevelValue("clock", "Simulated time", new SimClockCommand(cl))
@ -437,7 +476,7 @@ trait ConsoleEnvironment extends NamedLogging with FlagCloseable with NoTracing
val referencesTopic = Seq(topicGenericNodeReferences)
localParticipantBinds ++ remoteParticipantBinds ++
localParticipantXBinds ++ remoteParticipantXBinds ++
localDomainBinds ++ remoteDomainBinds ++ clockBinds.toList :+
localDomainBinds ++ remoteDomainBinds ++ localSequencerXBinds ++ remoteSequencerXBinds ++ localMediatorXBinds ++ remoteMediatorXBinds ++ clockBinds.toList :+
TopLevelValue(
"participants",
"All participant nodes" + genericNodeReferencesDoc,
@ -455,6 +494,18 @@ trait ConsoleEnvironment extends NamedLogging with FlagCloseable with NoTracing
.Partial("domains", "All domain nodes" + genericNodeReferencesDoc, referencesTopic),
domains,
) :+
TopLevelValue(
"mediatorsX",
"All mediator-x nodes" + genericNodeReferencesDoc,
mediatorsX,
referencesTopic,
) :+
TopLevelValue(
"sequencersX",
"All sequencer-x nodes" + genericNodeReferencesDoc,
sequencersX,
referencesTopic,
) :+
TopLevelValue("nodes", "All nodes" + genericNodeReferencesDoc, nodes, referencesTopic)
}
@ -497,6 +548,12 @@ trait ConsoleEnvironment extends NamedLogging with FlagCloseable with NoTracing
protected def createDomainReference(name: String): DomainLocalRef
protected def createRemoteDomainReference(name: String): DomainRemoteRef
private def createSequencerReferenceX(name: String): LocalSequencerNodeReferenceX =
new LocalSequencerNodeReferenceX(this, name)
private def createRemoteSequencerReferenceX(name: String): RemoteSequencerNodeReferenceX =
new RemoteSequencerNodeReferenceX(this, name)
private def createMediatorReferenceX(name: String): LocalMediatorReferenceX =
new LocalMediatorReferenceX(this, name)
@ -551,11 +608,13 @@ object ConsoleEnvironment {
): ParticipantReferencesExtensions =
new ParticipantReferencesExtensions(participants)
implicit def toLocalParticipantReferencesExtensions(
participants: Seq[LocalParticipantReferenceCommon]
implicit def toLocalParticipantReferencesExtensions[ParticipantNodeT <: ParticipantNodeCommon](
participants: Seq[LocalParticipantReferenceCommon[ParticipantNodeT]]
)(implicit
consoleEnvironment: ConsoleEnvironment
): LocalParticipantReferencesExtensions[LocalParticipantReferenceCommon] =
): LocalParticipantReferencesExtensions[ParticipantNodeT, LocalParticipantReferenceCommon[
ParticipantNodeT
]] =
new LocalParticipantReferencesExtensions(participants)
/** Implicitly map strings to DomainAlias, Fingerprint and Identifier

View File

@ -6,6 +6,7 @@ package com.digitalasset.canton.console
import better.files.File
import cats.syntax.either.*
import cats.syntax.functor.*
import cats.syntax.functorFilter.*
import ch.qos.logback.classic.spi.ILoggingEvent
import ch.qos.logback.classic.{Level, Logger}
import ch.qos.logback.core.spi.AppenderAttachable
@ -22,8 +23,10 @@ import com.daml.ledger.api.v1.value.{
Value,
}
import com.daml.lf.value.Value.ContractId
import com.digitalasset.canton.DomainAlias
import com.daml.nonempty.NonEmpty
import com.daml.nonempty.NonEmptyReturningOps.*
import com.digitalasset.canton.admin.api.client.commands.LedgerApiTypeWrappers.ContractData
import com.digitalasset.canton.admin.api.client.data
import com.digitalasset.canton.admin.api.client.data.{ListPartiesResult, TemplateId}
import com.digitalasset.canton.concurrent.Threading
import com.digitalasset.canton.config.NonNegativeDuration
@ -38,10 +41,24 @@ import com.digitalasset.canton.participant.config.{AuthServiceConfig, BasePartic
import com.digitalasset.canton.participant.ledger.api.JwtTokenUtilities
import com.digitalasset.canton.protocol.SerializableContract.LedgerCreateTime
import com.digitalasset.canton.protocol.*
import com.digitalasset.canton.sequencing.SequencerConnections
import com.digitalasset.canton.topology.*
import com.digitalasset.canton.topology.store.TopologyStoreId.AuthorizedStore
import com.digitalasset.canton.topology.transaction.SignedTopologyTransactionX.{
GenericSignedTopologyTransactionX,
PositiveSignedTopologyTransactionX,
}
import com.digitalasset.canton.topology.transaction.{
DecentralizedNamespaceDefinitionX,
NamespaceDelegationX,
OwnerToKeyMappingX,
SignedTopologyTransactionX,
TopologyChangeOpX,
}
import com.digitalasset.canton.tracing.{NoTracing, TraceContext}
import com.digitalasset.canton.util.BinaryFileUtil
import com.digitalasset.canton.util.{BinaryFileUtil, EitherUtil}
import com.digitalasset.canton.version.ProtocolVersion
import com.digitalasset.canton.{DomainAlias, SequencerAlias}
import com.google.protobuf.ByteString
import com.typesafe.scalalogging.LazyLogging
import io.circe.Encoder
@ -751,6 +768,217 @@ trait ConsoleMacros extends NamedLogging with NoTracing {
}
}
@Help.Summary("Functions to bootstrap/setup decentralized namespaces or full domains")
@Help.Group("Bootstrap")
object bootstrap extends Helpful {
@Help.Summary("Bootstraps a decentralized namespace for the provided owners")
@Help.Description(
"""Returns the decentralized namespace, the fully authorized transaction of its definition, as well
|as all root certificates of the owners. This allows other nodes to import and
|fully validate the decentralized namespace definition.
|After this call has finished successfully, all of the owners have stored the co-owners' identity topology
|transactions as well as the fully authorized decentralized namespace definition in the specified topology store."""
)
def decentralized_namespace(
owners: Seq[InstanceReferenceX],
store: String = AuthorizedStore.filterName,
): (Namespace, Seq[GenericSignedTopologyTransactionX]) = {
val decentralizedNamespaceDefinition = NonEmpty
.from(owners)
.getOrElse(
throw new IllegalArgumentException(
"There must be at least 1 owner for a decentralizedNamespace."
)
)
.map(
_.topology.decentralized_namespaces.propose(
owners.map(_.id.member.uid.namespace).toSet,
PositiveInt.tryCreate(1.max(owners.size - 1)),
store = store,
)
)
// merging the signatures here is an "optimization" so that later we only upload a single
// decentralizedNamespace transaction, instead of a transaction per owner.
.reduceLeft[SignedTopologyTransactionX[
TopologyChangeOpX,
DecentralizedNamespaceDefinitionX,
]]((txA, txB) => txA.addSignatures(txB.signatures.forgetNE.toSeq))
val ownerNSDs = owners.flatMap(_.topology.transactions.identity_transactions())
val foundingTransactions = ownerNSDs :+ decentralizedNamespaceDefinition
owners.foreach(_.topology.transactions.load(foundingTransactions, store = store))
(decentralizedNamespaceDefinition.transaction.mapping.namespace, foundingTransactions)
}
private def expected_namespace(
owners: NonEmpty[Set[InstanceReferenceX]]
): Either[String, Option[Namespace]] = {
val expectedNamespace =
DecentralizedNamespaceDefinitionX.computeNamespace(
owners.forgetNE.map(_.id.member.uid.namespace)
)
val recordedNamespaces =
owners.map(
_.topology.decentralized_namespaces
.list(filterStore = AuthorizedStore.filterName)
.collectFirst {
case result if result.item.namespace == expectedNamespace => result.item.namespace
}
)
Either.cond(
recordedNamespaces.sizeIs == 1,
recordedNamespaces.head1,
"the domain owners do not agree on the decentralizedNamespace",
)
}
private def in_domain(
sequencers: NonEmpty[Set[SequencerNodeReferenceX]],
mediators: NonEmpty[Set[MediatorReferenceX]],
)(domainId: DomainId): Either[String, Unit] =
EitherUtil.condUnitE(
sequencers.forall(_.health.status.successOption.exists(_.domainId == domainId)) &&
mediators.forall(_.health.status.successOption.exists(_.domainId == domainId)),
"the domain has already been bootstrapped but not all the given sequencers and mediators are in it",
)
private def no_domain(nodes: NonEmpty[Set[InstanceReferenceX]]): Either[String, Unit] =
EitherUtil.condUnitE(
!nodes.exists(_.health.initialized()),
"the domain has not yet been bootstrapped but some sequencers or mediators are already part of one",
)
private def check_domain_bootstrap_status(
name: String,
owners: Seq[InstanceReferenceX],
sequencers: Seq[SequencerNodeReferenceX],
mediators: Seq[MediatorReferenceX],
): Either[String, Option[DomainId]] =
for {
neOwners <- NonEmpty.from(owners.toSet).toRight("you need at least one domain owner")
neSequencers <- NonEmpty.from(sequencers.toSet).toRight("you need at least one sequencer")
neMediators <- NonEmpty.from(mediators.toSet).toRight("you need at least one mediator")
nodes = neOwners ++ neSequencers ++ neMediators
_ = EitherUtil.condUnitE(nodes.forall(_.health.running()), "all nodes must be running")
ns <- expected_namespace(neOwners)
id = ns.map(ns => DomainId(UniqueIdentifier.tryCreate(name, ns.toProtoPrimitive)))
_ <- id.fold(no_domain(neSequencers ++ neMediators))(in_domain(neSequencers, neMediators))
} yield id
private def run_bootstrap(
domainName: String,
staticDomainParameters: data.StaticDomainParameters,
domainOwners: Seq[InstanceReferenceX],
sequencers: Seq[SequencerNodeReferenceX],
mediators: Seq[MediatorReferenceX],
): DomainId = {
val (decentralizedNamespace, foundingTxs) =
bootstrap.decentralized_namespace(domainOwners, store = AuthorizedStore.filterName)
val domainId = DomainId(
UniqueIdentifier.tryCreate(domainName, decentralizedNamespace.toProtoPrimitive)
)
val seqMedIdentityTxs =
(sequencers ++ mediators).flatMap(_.topology.transactions.identity_transactions())
domainOwners.foreach(
_.topology.transactions.load(seqMedIdentityTxs, store = AuthorizedStore.filterName)
)
val domainGenesisTxs = domainOwners.flatMap(
_.topology.domain_bootstrap.generate_genesis_topology(
domainId,
domainOwners.map(_.id.member),
sequencers.map(_.id),
mediators.map(_.id),
)
)
val initialTopologyState = (foundingTxs ++ seqMedIdentityTxs ++ domainGenesisTxs)
.mapFilter(_.selectOp[TopologyChangeOpX.Replace])
// TODO(#12390) replace this merge / active with proper tooling and checks that things are really fully authorized
val orderingMap =
Seq(
NamespaceDelegationX.code,
OwnerToKeyMappingX.code,
DecentralizedNamespaceDefinitionX.code,
).zipWithIndex.toMap
.withDefaultValue(5)
val merged = initialTopologyState
.groupBy1(_.transaction.hash)
.values
.map(
// combine signatures of transactions with the same hash
_.reduceLeft[PositiveSignedTopologyTransactionX] { (a, b) =>
a.addSignatures(b.signatures.toSeq)
}.copy(isProposal = false)
)
.toSeq
.sortBy(tx => orderingMap(tx.transaction.mapping.code))
// TODO(#14075) resolve with raf on what to use here (right now we use internal case structure)
sequencers.foreach(_.setup.assign_from_beginning(merged, staticDomainParameters).discard)
mediators.foreach { mediator =>
mediator.setup
.assign(
domainId,
staticDomainParameters,
SequencerConnections.tryMany(
sequencers.map(s =>
s.sequencerConnection.withAlias(SequencerAlias.tryCreate(s.name))
),
PositiveInt.tryCreate(1),
),
)
}
domainId
}
@Help.Summary(
"""Bootstraps a new domain with the given static domain parameters and members. Any participants as domain owners
|must still manually connect to the domain afterwards."""
)
def domain(
domainName: String,
sequencers: Seq[SequencerNodeReferenceX],
mediators: Seq[MediatorReferenceX],
domainOwners: Seq[InstanceReferenceX] = Seq.empty,
staticDomainParameters: data.StaticDomainParameters =
data.StaticDomainParameters.defaultsWithoutKMS,
): DomainId = {
val domainOwnersOrDefault = if (domainOwners.isEmpty) sequencers else domainOwners
check_domain_bootstrap_status(
domainName,
domainOwnersOrDefault,
sequencers,
mediators,
) match {
case Right(Some(domainId)) =>
logger.info(s"Domain $domainName has already been bootstrapped with ID $domainId")
domainId
case Right(None) =>
run_bootstrap(
domainName,
staticDomainParameters,
domainOwnersOrDefault,
sequencers,
mediators,
)
case Left(error) =>
val message = s"The domain cannot be bootstrapped: $error"
logger.error(message)
sys.error(message)
}
}
}
}
object ConsoleMacros extends ConsoleMacros with NamedLogging {

View File

@ -3,12 +3,15 @@
package com.digitalasset.canton.console
import cats.syntax.either.*
import com.digitalasset.canton.*
import com.digitalasset.canton.admin.api.client.commands.GrpcAdminCommand
import com.digitalasset.canton.config.RequireTypes.Port
import com.digitalasset.canton.admin.api.client.data.StaticDomainParameters as ConsoleStaticDomainParameters
import com.digitalasset.canton.common.domain.grpc.GrpcSequencerConnectClient
import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, Port, PositiveInt}
import com.digitalasset.canton.config.*
import com.digitalasset.canton.console.CommandErrors.NodeNotStarted
import com.digitalasset.canton.console.commands.*
import com.digitalasset.canton.console.commands.{SequencerNodeAdministrationGroupXWithInit, *}
import com.digitalasset.canton.crypto.Crypto
import com.digitalasset.canton.domain.config.RemoteDomainConfig
import com.digitalasset.canton.domain.mediator.{
@ -17,6 +20,11 @@ import com.digitalasset.canton.domain.mediator.{
MediatorNodeX,
RemoteMediatorConfig,
}
import com.digitalasset.canton.domain.sequencing.config.{
RemoteSequencerConfig,
SequencerNodeConfigCommon,
}
import com.digitalasset.canton.domain.sequencing.{SequencerNodeBootstrapX, SequencerNodeX}
import com.digitalasset.canton.domain.{Domain, DomainNodeBootstrap}
import com.digitalasset.canton.environment.*
import com.digitalasset.canton.health.admin.data.{
@ -24,6 +32,7 @@ import com.digitalasset.canton.health.admin.data.{
MediatorNodeStatus,
NodeStatus,
ParticipantStatus,
SequencerNodeStatus,
}
import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting}
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging, TracedLogger}
@ -40,11 +49,19 @@ import com.digitalasset.canton.participant.{
ParticipantNodeX,
}
import com.digitalasset.canton.sequencing.{GrpcSequencerConnection, SequencerConnections}
import com.digitalasset.canton.topology.{DomainId, MediatorId, NodeIdentity, ParticipantId}
import com.digitalasset.canton.tracing.NoTracing
import com.digitalasset.canton.topology.{
DomainId,
MediatorId,
Member,
NodeIdentity,
ParticipantId,
SequencerId,
}
import com.digitalasset.canton.tracing.{NoTracing, TraceContext, TracingConfig}
import com.digitalasset.canton.util.ErrorUtil
import scala.concurrent.{ExecutionContext, TimeoutException}
import java.util.concurrent.atomic.AtomicReference
import scala.concurrent.{Await, ExecutionContext, TimeoutException}
import scala.util.hashing.MurmurHash3
trait InstanceReferenceCommon
@ -202,7 +219,7 @@ trait LocalInstanceReferenceCommon extends InstanceReferenceCommon with NoTracin
ErrorUtil.withThrowableLogging(clear_cache())
}
protected def migrateInstanceDb(): Either[StartupError, _] = nodes.migrateDatabase(name)
protected def migrateInstanceDb(): Either[StartupError, ?] = nodes.migrateDatabase(name)
protected def repairMigrationOfInstance(force: Boolean): Either[StartupError, Unit] = {
Either
.cond(force, (), DidntUseForceOnRepairMigration(name))
@ -223,7 +240,7 @@ trait LocalInstanceReferenceCommon extends InstanceReferenceCommon with NoTracin
NodeNotStarted.ErrorCanton(this)
override protected[console] def adminCommand[Result](
grpcCommand: GrpcAdminCommand[_, _, Result]
grpcCommand: GrpcAdminCommand[?, ?, Result]
): ConsoleCommandResult[Result] = {
runCommandIfRunning(
consoleEnvironment.grpcAdminCommandRunner
@ -248,7 +265,7 @@ trait GrpcRemoteInstanceReference extends RemoteInstanceReference {
def config: NodeConfig
override protected[console] def adminCommand[Result](
grpcCommand: GrpcAdminCommand[_, _, Result]
grpcCommand: GrpcAdminCommand[?, ?, Result]
): ConsoleCommandResult[Result] =
consoleEnvironment.grpcAdminCommandRunner.runCommand(
name,
@ -439,7 +456,7 @@ class ExternalLedgerApiClient(
throw new NotImplementedError("domain_of is not implemented for external ledger api clients")
override protected[console] def ledgerApiCommand[Result](
command: GrpcAdminCommand[_, _, Result]
command: GrpcAdminCommand[?, ?, Result]
): ConsoleCommandResult[Result] =
consoleEnvironment.grpcAdminCommandRunner
.runCommand("sourceLedger", command, ClientConfig(hostname, port, tls), token)
@ -454,7 +471,10 @@ class ExternalLedgerApiClient(
object ExternalLedgerApiClient {
def forReference(participant: LocalParticipantReferenceCommon, token: String)(implicit
def forReference[ParticipantNodeT <: ParticipantNodeCommon](
participant: LocalParticipantReferenceCommon[ParticipantNodeT],
token: String,
)(implicit
env: ConsoleEnvironment
): ExternalLedgerApiClient = {
val cc = participant.config.ledgerApi.clientConfig
@ -629,7 +649,7 @@ sealed trait RemoteParticipantReferenceCommon
def config: RemoteParticipantConfig
override protected[console] def ledgerApiCommand[Result](
command: GrpcAdminCommand[_, _, Result]
command: GrpcAdminCommand[?, ?, Result]
): ConsoleCommandResult[Result] =
consoleEnvironment.grpcAdminCommandRunner.runCommand(
name,
@ -680,17 +700,20 @@ class RemoteParticipantReference(environment: ConsoleEnvironment, override val n
}
sealed trait LocalParticipantReferenceCommon
sealed trait LocalParticipantReferenceCommon[ParticipantNodeT <: ParticipantNodeCommon]
extends LedgerApiCommandRunner
with ParticipantReferenceCommon
with LocalInstanceReferenceCommon {
with LocalInstanceReferenceCommon
with BaseInspection[ParticipantNodeT] {
override val name: String
def config: LocalParticipantConfig
def adminToken: Option[String]
override protected[console] def ledgerApiCommand[Result](
command: GrpcAdminCommand[_, _, Result]
command: GrpcAdminCommand[?, ?, Result]
): ConsoleCommandResult[Result] =
runCommandIfRunning(
consoleEnvironment.grpcAdminCommandRunner
@ -712,9 +735,8 @@ class LocalParticipantReference(
override val consoleEnvironment: ConsoleEnvironment,
name: String,
) extends ParticipantReference(consoleEnvironment, name)
with LocalParticipantReferenceCommon
with LocalInstanceReference
with BaseInspection[ParticipantNode] {
with LocalParticipantReferenceCommon[ParticipantNode]
with LocalInstanceReference {
override private[console] val nodes = consoleEnvironment.environment.participants
@ -885,9 +907,8 @@ class LocalParticipantReferenceX(
override val consoleEnvironment: ConsoleEnvironment,
name: String,
) extends ParticipantReferenceX(consoleEnvironment, name)
with LocalParticipantReferenceCommon
with LocalInstanceReferenceX
with BaseInspection[ParticipantNodeX] {
with LocalParticipantReferenceCommon[ParticipantNodeX]
with LocalInstanceReferenceX {
override private[console] val nodes = consoleEnvironment.environment.participantsX
@ -936,6 +957,374 @@ class LocalParticipantReferenceX(
def repair: LocalParticipantRepairAdministration = repair_
}
trait SequencerNodeReferenceCommon
extends InstanceReferenceCommon
with InstanceReferenceWithSequencerConnection {
override type Status = SequencerNodeStatus
@Help.Summary(
"Yields the globally unique id of this sequencer. " +
"Throws an exception, if the id has not yet been allocated (e.g., the sequencer has not yet been started)."
)
def id: SequencerId = topology.idHelper(SequencerId(_))
}
object SequencerNodeReference {
val InstanceType = "Sequencer"
}
abstract class SequencerNodeReference(
val consoleEnvironment: ConsoleEnvironment,
name: String,
) extends SequencerNodeReferenceCommon
with InstanceReferenceWithSequencer
with InstanceReference
with SequencerNodeAdministration {
override def equals(obj: Any): Boolean = {
obj match {
case x: SequencerNodeReference => x.consoleEnvironment == consoleEnvironment && x.name == name
case _ => false
}
}
override protected val instanceType: String = SequencerNodeReference.InstanceType
override protected val loggerFactory: NamedLoggerFactory =
consoleEnvironment.environment.loggerFactory.append("sequencer", name)
private lazy val parties_ = new PartiesAdministrationGroup(this, consoleEnvironment)
override def parties: PartiesAdministrationGroup = parties_
private lazy val topology_ =
new TopologyAdministrationGroup(
this,
health.status.successOption.map(_.topologyQueue),
consoleEnvironment,
loggerFactory,
)
override def topology: TopologyAdministrationGroup = topology_
private lazy val sequencer_ =
new SequencerAdministrationGroup(this, consoleEnvironment, loggerFactory)
@Help.Summary("Manage the sequencer")
@Help.Group("Sequencer")
override def sequencer: SequencerAdministrationGroup = sequencer_
@Help.Summary("Health and diagnostic related commands")
@Help.Group("Health")
override def health =
new HealthAdministration[SequencerNodeStatus](
this,
consoleEnvironment,
SequencerNodeStatus.fromProtoV0,
)
}
object SequencerNodeReferenceX {
val InstanceType = "SequencerX"
}
abstract class SequencerNodeReferenceX(
val consoleEnvironment: ConsoleEnvironment,
name: String,
) extends SequencerNodeReferenceCommon
with InstanceReferenceX
with SequencerNodeAdministrationGroupXWithInit {
self =>
override protected def runner: AdminCommandRunner = this
override protected def disable_member(member: Member): Unit =
repair.disable_member(member)
override def equals(obj: Any): Boolean = {
obj match {
case x: SequencerNodeReferenceX =>
x.consoleEnvironment == consoleEnvironment && x.name == name
case _ => false
}
}
override protected val instanceType: String = SequencerNodeReferenceX.InstanceType
override protected val loggerFactory: NamedLoggerFactory =
consoleEnvironment.environment.loggerFactory.append("sequencerx", name)
private lazy val topology_ =
new TopologyAdministrationGroupX(
this,
health.status.successOption.map(_.topologyQueue),
consoleEnvironment,
loggerFactory,
)
private lazy val grpcSequencerConnectClient = new GrpcSequencerConnectClient(
sequencerConnection = sequencerConnection,
timeouts = ProcessingTimeout(),
traceContextPropagation = TracingConfig.Propagation.Enabled,
loggerFactory = loggerFactory,
)(consoleEnvironment.environment.executionContext)
override def topology: TopologyAdministrationGroupX = topology_
private lazy val parties_ = new PartiesAdministrationGroupX(this, consoleEnvironment)
override def parties: PartiesAdministrationGroupX = parties_
private val staticDomainParameters: AtomicReference[Option[ConsoleStaticDomainParameters]] =
new AtomicReference[Option[ConsoleStaticDomainParameters]](None)
private val domainId: AtomicReference[Option[DomainId]] =
new AtomicReference[Option[DomainId]](None)
@Help.Summary("Health and diagnostic related commands")
@Help.Group("Health")
override def health =
new HealthAdministrationX[SequencerNodeStatus](
this,
consoleEnvironment,
SequencerNodeStatus.fromProtoV0,
)
private lazy val sequencerXTrafficControl = new TrafficControlSequencerAdministrationGroup(
this,
topology,
this,
consoleEnvironment,
loggerFactory,
)
@Help.Summary("Admin traffic control related commands")
@Help.Group("Traffic")
override def traffic_control: TrafficControlSequencerAdministrationGroup =
sequencerXTrafficControl
@Help.Summary("Return domain id of the domain")
def domain_id: DomainId = {
domainId.get() match {
case Some(id) => id
case None =>
val id = TraceContext.withNewTraceContext { implicit traceContext =>
Await
.result(
grpcSequencerConnectClient.getDomainId(name).value,
consoleEnvironment.commandTimeouts.bounded.duration,
)
.valueOr(_ => throw new CommandFailure())
}
domainId.set(Some(id))
id
}
}
object mediators {
object groups {
@Help.Summary("Propose a new mediator group")
@Help.Description("""
group: the mediator group identifier
threshold: the minimum number of mediators that need to come to a consensus for a message to be sent to other members.
active: the list of mediators that will take part in the mediator consensus in this mediator group
observers: the mediators that will receive all messages but will not participate in mediator consensus
""")
def propose_new_group(
group: NonNegativeInt,
threshold: PositiveInt,
active: Seq[MediatorReferenceX],
observers: Seq[MediatorReferenceX] = Nil,
): Unit = {
val domainId = domain_id
val staticDomainParameters = domain_parameters.static.get()
val mediators = active ++ observers
mediators.foreach { mediator =>
val identityState = mediator.topology.transactions.identity_transactions()
topology.transactions.load(identityState, domainId.filterString)
}
topology.mediators
.propose(
domainId = domainId,
threshold = threshold,
active = active.map(_.id),
observers = observers.map(_.id),
group = group,
)
.discard
mediators.foreach(
_.setup.assign(
domainId,
staticDomainParameters,
SequencerConnections.single(sequencerConnection),
)
)
}
@Help.Summary("Propose an update to a mediator group")
@Help.Description("""
group: the mediator group identifier
threshold: the minimum number of mediators that need to come to a consensus for a message to be sent to other members.
additionalActive: the new mediators that will take part in the mediator consensus in this mediator group
additionalObservers: the new mediators that will receive all messages but will not participate in mediator consensus
""")
def propose_delta(
group: NonNegativeInt,
threshold: PositiveInt,
additionalActive: Seq[MediatorReferenceX],
additionalObservers: Seq[MediatorReferenceX] = Nil,
): Unit = {
val staticDomainParameters = domain_parameters.static.get()
val domainId = domain_id
val currentMediators = topology.mediators
.list(filterStore = domainId.filterString, group = Some(group))
.maxByOption(_.context.serial)
.getOrElse(throw new IllegalArgumentException(s"Unknown mediator group $group"))
val currentActive = currentMediators.item.active
val currentObservers = currentMediators.item.observers
val current = currentActive ++ currentObservers
val newMediators =
(additionalActive ++ additionalObservers).filterNot(m => current.contains(m.id))
newMediators.foreach { med =>
val identityState = med.topology.transactions.identity_transactions()
topology.transactions.load(identityState, domainId.filterString)
}
topology.mediators
.propose(
domainId = domainId,
threshold = threshold,
active = (currentActive ++ additionalActive.map(_.id)).distinct,
observers = (currentObservers ++ additionalObservers.map(_.id)).distinct,
group = group,
)
.discard
newMediators.foreach(
_.setup.assign(
domainId,
staticDomainParameters,
SequencerConnections.single(sequencerConnection),
)
)
}
}
}
@Help.Summary("Domain parameters related commands")
@Help.Group("Domain parameters")
object domain_parameters {
object static {
@Help.Summary("Return static domain parameters of the domain")
def get(): ConsoleStaticDomainParameters = {
staticDomainParameters.get() match {
case Some(parameters) => parameters
case None =>
val parameters = TraceContext.withNewTraceContext { implicit traceContext =>
Await
.result(
grpcSequencerConnectClient.getDomainParameters(name).value,
consoleEnvironment.commandTimeouts.bounded.duration,
)
.map(ConsoleStaticDomainParameters(_))
.valueOr(_ => throw new CommandFailure())
}
staticDomainParameters.set(Some(parameters))
parameters
}
}
}
}
}
trait LocalSequencerNodeReferenceCommon extends LocalInstanceReferenceCommon {
this: SequencerNodeReferenceCommon =>
def config: SequencerNodeConfigCommon
override lazy val sequencerConnection: GrpcSequencerConnection =
config.publicApi.toSequencerConnectionConfig.toConnection
.fold(err => sys.error(s"Sequencer $name has invalid connection config: $err"), identity)
}
class LocalSequencerNodeReferenceX(
override val consoleEnvironment: ConsoleEnvironment,
val name: String,
) extends SequencerNodeReferenceX(consoleEnvironment, name)
with LocalSequencerNodeReferenceCommon
with LocalInstanceReferenceX
with BaseInspection[SequencerNodeX] {
override protected[canton] def executionContext: ExecutionContext =
consoleEnvironment.environment.executionContext
@Help.Summary("Returns the sequencerx configuration")
override def config: SequencerNodeConfigCommon =
consoleEnvironment.environment.config.sequencersByStringX(name)
private[console] val nodes: SequencerNodesX[?] =
consoleEnvironment.environment.sequencersX
override protected[console] def runningNode: Option[SequencerNodeBootstrapX] =
nodes.getRunning(name)
override protected[console] def startingNode: Option[SequencerNodeBootstrapX] =
nodes.getStarting(name)
}
trait RemoteSequencerNodeReferenceCommon
extends SequencerNodeReferenceCommon
with RemoteInstanceReference {
def environment: ConsoleEnvironment
@Help.Summary("Returns the remote sequencer configuration")
def config: RemoteSequencerConfig
override def sequencerConnection: GrpcSequencerConnection =
config.publicApi.toConnection
.fold(err => sys.error(s"Sequencer $name has invalid connection config: $err"), identity)
override protected[console] def adminCommand[Result](
grpcCommand: GrpcAdminCommand[?, ?, Result]
): ConsoleCommandResult[Result] =
config match {
case config: RemoteSequencerConfig.Grpc =>
consoleEnvironment.grpcAdminCommandRunner.runCommand(
name,
grpcCommand,
config.clientAdminApi,
None,
)
}
}
class RemoteSequencerNodeReferenceX(val environment: ConsoleEnvironment, val name: String)
extends SequencerNodeReferenceX(environment, name)
with RemoteSequencerNodeReferenceCommon {
override protected[canton] def executionContext: ExecutionContext =
consoleEnvironment.environment.executionContext
@Help.Summary("Returns the sequencerx configuration")
override def config: RemoteSequencerConfig =
environment.environment.config.remoteSequencersByStringX(name)
}
trait MediatorReferenceCommon extends InstanceReferenceCommon {
@Help.Summary(

View File

@ -8,6 +8,7 @@ import com.daml.nonempty.NonEmpty
import com.digitalasset.canton.config.NonNegativeDuration
import com.digitalasset.canton.console.commands.ParticipantCommands
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
import com.digitalasset.canton.participant.ParticipantNodeCommon
import com.digitalasset.canton.participant.domain.DomainConnectionConfig
import com.digitalasset.canton.{DomainAlias, SequencerAlias}
@ -138,7 +139,10 @@ class ParticipantReferencesExtensions(participants: Seq[ParticipantReferenceComm
}
class LocalParticipantReferencesExtensions[LocalParticipantRef <: LocalParticipantReferenceCommon](
class LocalParticipantReferencesExtensions[
ParticipantNodeT <: ParticipantNodeCommon,
LocalParticipantRef <: LocalParticipantReferenceCommon[ParticipantNodeT],
](
participants: Seq[LocalParticipantRef]
)(implicit
override val consoleEnvironment: ConsoleEnvironment

View File

@ -26,12 +26,14 @@ import com.daml.ledger.api.v1.value.Value
import com.daml.ledger.api.v1.{EventQueryServiceOuterClass, ValueOuterClass}
import com.daml.ledger.api.v2.event_query_service.GetEventsByContractIdResponse as GetEventsByContractIdResponseV2
import com.daml.ledger.api.v2.participant_offset.ParticipantOffset
import com.daml.ledger.api.v2.reassignment.Reassignment
import com.daml.ledger.api.v2.state_service.GetConnectedDomainsResponse
import com.daml.ledger.api.v2.transaction.{
Transaction as TransactionV2,
TransactionTree as TransactionTreeV2,
}
import com.daml.ledger.api.v2.transaction_filter.TransactionFilter as TransactionFilterV2
import com.daml.ledger.javaapi.data.ReassignmentV2
import com.daml.ledger.{api, javaapi as javab}
import com.daml.lf.data.Ref
import com.daml.metrics.api.MetricHandle.{Histogram, Meter}
@ -105,6 +107,7 @@ import java.util.concurrent.TimeoutException
import java.util.concurrent.atomic.AtomicReference
import scala.annotation.nowarn
import scala.concurrent.{Await, ExecutionContext}
import scala.util.chaining.scalaUtilChainingOps
import scala.util.{Failure, Success, Try}
trait BaseLedgerApiAdministration extends NoTracing {
@ -425,7 +428,7 @@ trait BaseLedgerApiAdministration extends NoTracing {
def submit(
actAs: Seq[PartyId],
commands: Seq[Command],
domainId: DomainId,
domainId: Option[DomainId] = None,
workflowId: String = "",
commandId: String = "",
// TODO(#15280) This feature wont work after V1 is removed. Also after witness blinding is implemented, the underlying algorith will be broken. Idea: drop this feature and wait explicitly with some additional tooling.
@ -474,7 +477,7 @@ trait BaseLedgerApiAdministration extends NoTracing {
def submit_flat(
actAs: Seq[PartyId],
commands: Seq[Command],
domainId: DomainId,
domainId: Option[DomainId] = None,
workflowId: String = "",
commandId: String = "",
// TODO(#15280) This feature wont work after V1 is removed. Also after witness blinding is implemented, the underlying algorith will be broken. Idea: drop this feature and wait explicitly with some additional tooling.
@ -514,7 +517,7 @@ trait BaseLedgerApiAdministration extends NoTracing {
def submit_async(
actAs: Seq[PartyId],
commands: Seq[Command],
domainId: DomainId,
domainId: Option[DomainId] = None,
workflowId: String = "",
commandId: String = "",
deduplicationPeriod: Option[DeduplicationPeriod] = None,
@ -1518,10 +1521,450 @@ trait BaseLedgerApiAdministration extends NoTracing {
})
}
// TODO(#15274)
@Help.Summary("Group of commands that utilize java bindings", FeatureFlag.Testing)
@Help.Group("Ledger Api (Java bindings)")
object javaapi extends Helpful
object javaapi extends Helpful {
@Help.Summary("Submit commands (Java bindings)", FeatureFlag.Testing)
@Help.Group("Command Submission (Java bindings)")
object commands extends Helpful {
@Help.Summary(
"Submit java codegen commands and wait for the resulting transaction, returning the transaction tree or failing otherwise",
FeatureFlag.Testing,
)
@Help.Description(
"""Submits a command on behalf of the `actAs` parties, waits for the resulting transaction to commit and returns it.
| If the timeout is set, it also waits for the transaction to appear at all other configured
| participants who were involved in the transaction. The call blocks until the transaction commits or fails;
| the timeout only specifies how long to wait at the other participants.
| Fails if the transaction doesn't commit, or if it doesn't become visible to the involved participants in
| the allotted time.
| Note that if the optTimeout is set and the involved parties are concurrently enabled/disabled or their
| participants are connected/disconnected, the command may currently result in spurious timeouts or may
| return before the transaction appears at all the involved participants."""
)
def submit(
actAs: Seq[PartyId],
commands: Seq[javab.data.Command],
domainId: Option[DomainId] = None,
workflowId: String = "",
commandId: String = "",
// TODO(#15280) This feature wont work after V1 is removed. Also after witness blinding is implemented, the underlying algorith will be broken. Idea: drop this feature and wait explicitly with some additional tooling.
optTimeout: Option[NonNegativeDuration] = Some(timeouts.ledgerCommand),
deduplicationPeriod: Option[DeduplicationPeriod] = None,
submissionId: String = "",
minLedgerTimeAbs: Option[Instant] = None,
readAs: Seq[PartyId] = Seq.empty,
disclosedContracts: Seq[javab.data.DisclosedContract] = Seq.empty,
applicationId: String = applicationId,
): javab.data.TransactionTreeV2 = check(FeatureFlag.Testing) {
val tx = consoleEnvironment.run {
ledgerApiCommand(
LedgerApiV2Commands.CommandService.SubmitAndWaitTransactionTree(
actAs.map(_.toLf),
readAs.map(_.toLf),
commands.map(c => Command.fromJavaProto(c.toProtoCommand)),
workflowId,
commandId,
deduplicationPeriod,
submissionId,
minLedgerTimeAbs,
disclosedContracts.map(c => DisclosedContract.fromJavaProto(c.toProto)),
domainId,
applicationId,
)
)
}
javab.data.TransactionTreeV2.fromProto(
TransactionTreeV2.toJavaProto(optionallyAwait(tx, tx.updateId, optTimeout))
)
}
@Help.Summary(
"Submit java codegen command and wait for the resulting transaction, returning the flattened transaction or failing otherwise",
FeatureFlag.Testing,
)
@Help.Description(
"""Submits a command on behalf of the `actAs` parties, waits for the resulting transaction to commit, and returns the "flattened" transaction.
| If the timeout is set, it also waits for the transaction to appear at all other configured
| participants who were involved in the transaction. The call blocks until the transaction commits or fails;
| the timeout only specifies how long to wait at the other participants.
| Fails if the transaction doesn't commit, or if it doesn't become visible to the involved participants in
| the allotted time.
| Note that if the optTimeout is set and the involved parties are concurrently enabled/disabled or their
| participants are connected/disconnected, the command may currently result in spurious timeouts or may
| return before the transaction appears at all the involved participants."""
)
def submit_flat(
actAs: Seq[PartyId],
commands: Seq[javab.data.Command],
domainId: Option[DomainId] = None,
workflowId: String = "",
commandId: String = "",
// TODO(#15280) This feature wont work after V1 is removed. Also after witness blinding is implemented, the underlying algorith will be broken. Idea: drop this feature and wait explicitly with some additional tooling.
optTimeout: Option[config.NonNegativeDuration] = Some(timeouts.ledgerCommand),
deduplicationPeriod: Option[DeduplicationPeriod] = None,
submissionId: String = "",
minLedgerTimeAbs: Option[Instant] = None,
readAs: Seq[PartyId] = Seq.empty,
disclosedContracts: Seq[javab.data.DisclosedContract] = Seq.empty,
applicationId: String = applicationId,
): javab.data.TransactionV2 = check(FeatureFlag.Testing) {
val tx = consoleEnvironment.run {
ledgerApiCommand(
LedgerApiV2Commands.CommandService.SubmitAndWaitTransaction(
actAs.map(_.toLf),
readAs.map(_.toLf),
commands.map(c => Command.fromJavaProto(c.toProtoCommand)),
workflowId,
commandId,
deduplicationPeriod,
submissionId,
minLedgerTimeAbs,
disclosedContracts.map(c => DisclosedContract.fromJavaProto(c.toProto)),
domainId,
applicationId,
)
)
}
javab.data.TransactionV2.fromProto(
TransactionV2.toJavaProto(optionallyAwait(tx, tx.updateId, optTimeout))
)
}
@Help.Summary("Submit java codegen command asynchronously", FeatureFlag.Testing)
@Help.Description(
"""Provides access to the command submission service of the Ledger API.
|See https://docs.daml.com/app-dev/services.html for documentation of the parameters."""
)
def submit_async(
actAs: Seq[PartyId],
commands: Seq[javab.data.Command],
domainId: Option[DomainId] = None,
workflowId: String = "",
commandId: String = "",
deduplicationPeriod: Option[DeduplicationPeriod] = None,
submissionId: String = "",
minLedgerTimeAbs: Option[Instant] = None,
readAs: Seq[PartyId] = Seq.empty,
disclosedContracts: Seq[javab.data.DisclosedContract] = Seq.empty,
applicationId: String = applicationId,
): Unit =
ledger_api_v2.commands.submit_async(
actAs,
commands.map(c => Command.fromJavaProto(c.toProtoCommand)),
domainId,
workflowId,
commandId,
deduplicationPeriod,
submissionId,
minLedgerTimeAbs,
readAs,
disclosedContracts.map(c => DisclosedContract.fromJavaProto(c.toProto)),
applicationId,
)
@Help.Summary(
"Submit assign command and wait for the resulting java codegen reassignment, returning the reassignment or failing otherwise",
FeatureFlag.Testing,
)
@Help.Description(
"""Submits an unassignment command on behalf of `submitter` party, waits for the resulting unassignment to commit, and returns the reassignment.
| If waitForParticipants is set, it also waits for the reassignment(s) to appear at all other configured
| participants who were involved in the unassignment. The call blocks until the unassignment commits or fails.
| Fails if the unassignment doesn't commit, or if it doesn't become visible to the involved participants in time.
| Timout specifies the time how long to wait until the reassignment appears in the update stream for the submitting and all the specified participants."""
)
def submit_unassign(
submitter: PartyId,
contractId: LfContractId,
source: DomainId,
target: DomainId,
workflowId: String = "",
applicationId: String = applicationId,
submissionId: String = UUID.randomUUID().toString,
waitForParticipants: Map[ParticipantReferenceCommon, PartyId] = Map.empty,
timeout: config.NonNegativeDuration = timeouts.ledgerCommand,
): ReassignmentV2 =
ledger_api_v2.commands
.submit_unassign(
submitter,
contractId,
source,
target,
workflowId,
applicationId,
submissionId,
waitForParticipants,
timeout,
)
.reassignment
.pipe(Reassignment.toJavaProto)
.pipe(ReassignmentV2.fromProto)
@Help.Summary(
"Submit assign command and wait for the resulting java codegen reassignment, returning the reassignment or failing otherwise",
FeatureFlag.Testing,
)
@Help.Description(
"""Submits a assignment command on behalf of `submitter` party, waits for the resulting assignment to commit, and returns the reassignment.
| If waitForParticipants is set, it also waits for the reassignment(s) to appear at all other configured
| participants who were involved in the assignment. The call blocks until the assignment commits or fails.
| Fails if the assignment doesn't commit, or if it doesn't become visible to the involved participants in time.
| Timout specifies the time how long to wait until the reassignment appears in the update stream for the submitting and all the specified participants.
| The unassignId should be the one returned by the corresponding submit_unassign command."""
)
def submit_assign(
submitter: PartyId,
unassignId: String,
source: DomainId,
target: DomainId,
workflowId: String = "",
applicationId: String = applicationId,
submissionId: String = UUID.randomUUID().toString,
waitForParticipants: Map[ParticipantReferenceCommon, PartyId] = Map.empty,
timeout: config.NonNegativeDuration = timeouts.ledgerCommand,
): ReassignmentV2 =
ledger_api_v2.commands
.submit_assign(
submitter,
unassignId,
source,
target,
workflowId,
applicationId,
submissionId,
waitForParticipants,
timeout,
)
.reassignment
.pipe(Reassignment.toJavaProto)
.pipe(ReassignmentV2.fromProto)
}
@Help.Summary("Read from update stream (Java bindings)", FeatureFlag.Testing)
@Help.Group("Updates (Java bindings)")
object updates extends Helpful {
@Help.Summary(
"Get update trees in the format expected by the Java bindings",
FeatureFlag.Testing,
)
@Help.Description(
"""This function connects to the update tree stream for the given parties and collects update trees
|until either `completeAfter` update trees have been received or `timeout` has elapsed.
|The returned update trees can be filtered to be between the given offsets (default: no filtering).
|If the participant has been pruned via `pruning.prune` and if `beginOffset` is lower than the pruning offset,
|this command fails with a `NOT_FOUND` error."""
)
def trees(
partyIds: Set[PartyId],
completeAfter: Int,
beginOffset: ParticipantOffset = new ParticipantOffset().withBoundary(
ParticipantOffset.ParticipantBoundary.PARTICIPANT_BEGIN
),
endOffset: Option[ParticipantOffset] = None,
verbose: Boolean = true,
timeout: config.NonNegativeDuration = timeouts.ledgerCommand,
resultFilter: UpdateTreeWrapper => Boolean = _ => true,
): Seq[javab.data.GetUpdateTreesResponseV2] = check(FeatureFlag.Testing)(
ledger_api_v2.updates
.trees(partyIds, completeAfter, beginOffset, endOffset, verbose, timeout, resultFilter)
.map {
case tx: TransactionTreeWrapper =>
tx.transactionTree
.pipe(TransactionTreeV2.toJavaProto)
.pipe(javab.data.TransactionTreeV2.fromProto)
.pipe(new javab.data.GetUpdateTreesResponseV2(_))
case reassignment: ReassignmentWrapper =>
reassignment.reassignment
.pipe(Reassignment.toJavaProto)
.pipe(ReassignmentV2.fromProto)
.pipe(new javab.data.GetUpdateTreesResponseV2(_))
}
)
@Help.Summary(
"Get flat updates in the format expected by the Java bindings",
FeatureFlag.Testing,
)
@Help.Description(
"""This function connects to the flat update stream for the given parties and collects updates
|until either `completeAfter` flat updates have been received or `timeout` has elapsed.
|The returned updates can be filtered to be between the given offsets (default: no filtering).
|If the participant has been pruned via `pruning.prune` and if `beginOffset` is lower than the pruning offset,
|this command fails with a `NOT_FOUND` error. If you need to specify filtering conditions for template IDs and
|including create event blobs for explicit disclosure, consider using `flat_with_tx_filter`."""
)
def flat(
partyIds: Set[PartyId],
completeAfter: Int,
beginOffset: ParticipantOffset = new ParticipantOffset().withBoundary(
ParticipantOffset.ParticipantBoundary.PARTICIPANT_BEGIN
),
endOffset: Option[ParticipantOffset] = None,
verbose: Boolean = true,
timeout: config.NonNegativeDuration = timeouts.ledgerCommand,
resultFilter: UpdateWrapper => Boolean = _ => true,
): Seq[javab.data.GetUpdatesResponseV2] = check(FeatureFlag.Testing)(
ledger_api_v2.updates
.flat(partyIds, completeAfter, beginOffset, endOffset, verbose, timeout, resultFilter)
.map {
case tx: TransactionWrapper =>
tx.transaction
.pipe(TransactionV2.toJavaProto)
.pipe(javab.data.TransactionV2.fromProto)
.pipe(new javab.data.GetUpdatesResponseV2(_))
case reassignment: ReassignmentWrapper =>
reassignment.reassignment
.pipe(Reassignment.toJavaProto)
.pipe(ReassignmentV2.fromProto)
.pipe(new javab.data.GetUpdatesResponseV2(_))
}
)
@Help.Summary(
"Get flat updates in the format expected by the Java bindings",
FeatureFlag.Testing,
)
@Help.Description(
"""This function connects to the flat update stream for the given transaction filter and collects updates
|until either `completeAfter` transactions have been received or `timeout` has elapsed.
|The returned transactions can be filtered to be between the given offsets (default: no filtering).
|If the participant has been pruned via `pruning.prune` and if `beginOffset` is lower than the pruning offset,
|this command fails with a `NOT_FOUND` error. If you only need to filter by a set of parties, consider using
|`flat` instead."""
)
def flat_with_tx_filter(
filter: javab.data.TransactionFilterV2,
completeAfter: Int,
beginOffset: ParticipantOffset = new ParticipantOffset().withBoundary(
ParticipantOffset.ParticipantBoundary.PARTICIPANT_BEGIN
),
endOffset: Option[ParticipantOffset] = None,
verbose: Boolean = true,
timeout: config.NonNegativeDuration = timeouts.ledgerCommand,
resultFilter: UpdateWrapper => Boolean = _ => true,
): Seq[javab.data.GetUpdatesResponseV2] = check(FeatureFlag.Testing)(
ledger_api_v2.updates
.flat_with_tx_filter(
TransactionFilterV2.fromJavaProto(filter.toProto),
completeAfter,
beginOffset,
endOffset,
verbose,
timeout,
resultFilter,
)
.map {
case tx: TransactionWrapper =>
tx.transaction
.pipe(TransactionV2.toJavaProto)
.pipe(javab.data.TransactionV2.fromProto)
.pipe(new javab.data.GetUpdatesResponseV2(_))
case reassignment: ReassignmentWrapper =>
reassignment.reassignment
.pipe(Reassignment.toJavaProto)
.pipe(ReassignmentV2.fromProto)
.pipe(new javab.data.GetUpdatesResponseV2(_))
}
)
}
@Help.Summary("Collection of Ledger API state endpoints (Java bindings)", FeatureFlag.Testing)
@Help.Group("State (Java bindings)")
object state extends Helpful {
@Help.Summary("Read active contracts (Java bindings)", FeatureFlag.Testing)
@Help.Group("Active Contracts (Java bindings)")
object acs extends Helpful {
@Help.Summary(
"Wait until a contract becomes available and return the Java codegen contract",
FeatureFlag.Testing,
)
@Help.Description(
"""This function can be used for contracts with a code-generated Scala model.
|You can refine your search using the `filter` function argument.
|The command will wait until the contract appears or throw an exception once it times out."""
)
def await[
TC <: javab.data.codegen.Contract[TCid, T],
TCid <: javab.data.codegen.ContractId[T],
T <: javab.data.Template,
](companion: javab.data.codegen.ContractCompanion[TC, TCid, T])(
partyId: PartyId,
predicate: TC => Boolean = (_: TC) => true,
timeout: config.NonNegativeDuration = timeouts.ledgerCommand,
): TC = check(FeatureFlag.Testing)({
val result = new AtomicReference[Option[TC]](None)
ConsoleMacros.utils.retry_until_true(timeout) {
val tmp = filter(companion)(partyId, predicate)
result.set(tmp.headOption)
tmp.nonEmpty
}
consoleEnvironment.runE {
result
.get()
.toRight(s"Failed to find contract of type ${companion.TEMPLATE_ID} after $timeout")
}
})
@Help.Summary(
"Filter the ACS for contracts of a particular Java code-generated template",
FeatureFlag.Testing,
)
@Help.Description(
"""To use this function, ensure a code-generated Java model for the target template exists.
|You can refine your search using the `predicate` function argument."""
)
def filter[
TC <: javab.data.codegen.Contract[TCid, T],
TCid <: javab.data.codegen.ContractId[T],
T <: javab.data.Template,
](templateCompanion: javab.data.codegen.ContractCompanion[TC, TCid, T])(
partyId: PartyId,
predicate: TC => Boolean = (_: TC) => true,
): Seq[TC] = check(FeatureFlag.Testing) {
val javaTemplateId = templateCompanion.TEMPLATE_ID
val templateId = TemplateId(
javaTemplateId.getPackageId,
javaTemplateId.getModuleName,
javaTemplateId.getEntityName,
)
ledger_api_v2.state.acs
.of_party(partyId, filterTemplates = Seq(templateId))
.map(_.event)
.flatMap(ev =>
JavaDecodeUtil
.decodeCreated(templateCompanion)(
javab.data.CreatedEvent.fromProto(CreatedEvent.toJavaProto(ev))
)
.toList
)
.filter(predicate)
}
}
}
@Help.Summary("Query event details", FeatureFlag.Testing)
@Help.Group("EventQuery")
object event_query extends Helpful {
@Help.Summary("Get events in java codegen by contract Id", FeatureFlag.Testing)
@Help.Description("""Return events associated with the given contract Id""")
def by_contract_id(
contractId: String,
requestingParties: Seq[PartyId],
): com.daml.ledger.api.v2.EventQueryServiceOuterClass.GetEventsByContractIdResponse =
ledger_api_v2.event_query
.by_contract_id(contractId, requestingParties)
.pipe(GetEventsByContractIdResponseV2.toJavaProto)
}
}
private def waitForUpdateId(
administration: BaseLedgerApiAdministration,
@ -3236,7 +3679,7 @@ trait LedgerApiAdministration extends BaseLedgerApiAdministration {
// A participant identity equality check that doesn't blow up if the participant isn't running
def identityIs(pRef: ParticipantReferenceCommon, id: ParticipantId): Boolean = pRef match {
case lRef: LocalParticipantReferenceCommon =>
case lRef: LocalParticipantReferenceCommon[?] =>
lRef.is_running && lRef.health.initialized() && lRef.id == id
case rRef: RemoteParticipantReferenceCommon =>
rRef.health.initialized() && rRef.id == id

View File

@ -26,6 +26,9 @@ import com.digitalasset.canton.admin.api.client.data.{
ListConnectedDomainsResult,
ParticipantPruningSchedule,
}
import com.digitalasset.canton.admin.participant.v0
import com.digitalasset.canton.admin.participant.v0.PruningServiceGrpc
import com.digitalasset.canton.admin.participant.v0.PruningServiceGrpc.PruningServiceStub
import com.digitalasset.canton.config.RequireTypes.PositiveInt
import com.digitalasset.canton.config.{DomainTimeTrackerConfig, NonNegativeDuration}
import com.digitalasset.canton.console.{
@ -48,11 +51,9 @@ import com.digitalasset.canton.data.CantonTimestamp
import com.digitalasset.canton.health.admin.data.ParticipantStatus
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging, TracedLogger}
import com.digitalasset.canton.participant.ParticipantNodeCommon
import com.digitalasset.canton.participant.admin.ResourceLimits
import com.digitalasset.canton.participant.admin.grpc.TransferSearchResult
import com.digitalasset.canton.participant.admin.inspection.SyncStateInspection
import com.digitalasset.canton.participant.admin.v0.PruningServiceGrpc
import com.digitalasset.canton.participant.admin.v0.PruningServiceGrpc.PruningServiceStub
import com.digitalasset.canton.participant.admin.{ResourceLimits, v0}
import com.digitalasset.canton.participant.domain.DomainConnectionConfig
import com.digitalasset.canton.participant.sync.TimestampedEvent
import com.digitalasset.canton.protocol.messages.{
@ -1524,7 +1525,7 @@ trait ParticipantAdministration extends FeatureFlagFilter {
ParticipantAdminCommands.Transfer
.TransferIn(
submittingParty,
transferId.toProtoV0,
transferId.toAdminProto,
targetDomain,
applicationId = applicationId,
submissionId = submissionId,

View File

@ -8,6 +8,7 @@ import com.digitalasset.canton.admin.api.client.commands.{
GrpcAdminCommand,
ParticipantAdminCommands,
}
import com.digitalasset.canton.admin.participant.v0.{ExportAcsRequest, ExportAcsResponse}
import com.digitalasset.canton.config.RequireTypes.PositiveInt
import com.digitalasset.canton.console.CommandErrors.GenericCommandError
import com.digitalasset.canton.console.{
@ -24,7 +25,6 @@ import com.digitalasset.canton.console.{
import com.digitalasset.canton.logging.NamedLoggerFactory
import com.digitalasset.canton.networking.grpc.GrpcError
import com.digitalasset.canton.participant.ParticipantNodeCommon
import com.digitalasset.canton.participant.admin.v0.{ExportAcsRequest, ExportAcsResponse}
import com.digitalasset.canton.participant.domain.DomainConnectionConfig
import com.digitalasset.canton.protocol.{LfContractId, SerializableContractWithWitnesses}
import com.digitalasset.canton.topology.{DomainId, PartyId}

View File

@ -0,0 +1,157 @@
// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package com.digitalasset.canton.console.commands
import cats.syntax.option.*
import com.digitalasset.canton.admin.api.client.commands.EnterpriseSequencerAdminCommands
import com.digitalasset.canton.admin.api.client.commands.EnterpriseSequencerAdminCommands.{
BootstrapTopology,
Initialize,
InitializeX,
}
import com.digitalasset.canton.admin.api.client.data.StaticDomainParameters
import com.digitalasset.canton.console.{
AdminCommandRunner,
FeatureFlagFilter,
Help,
Helpful,
SequencerNodeReference,
}
import com.digitalasset.canton.data.CantonTimestamp
import com.digitalasset.canton.domain.sequencing.admin.grpc.{
InitializeSequencerResponse,
InitializeSequencerResponseX,
}
import com.digitalasset.canton.domain.sequencing.sequencer.SequencerSnapshot
import com.digitalasset.canton.topology.DomainId
import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime}
import com.digitalasset.canton.topology.store.StoredTopologyTransactionsX.GenericStoredTopologyTransactionsX
import com.digitalasset.canton.topology.store.{
StoredTopologyTransactionX,
StoredTopologyTransactions,
StoredTopologyTransactionsX,
}
import com.digitalasset.canton.topology.transaction.SignedTopologyTransactionX.PositiveSignedTopologyTransactionX
import com.digitalasset.canton.topology.transaction.{
TopologyChangeOp,
TopologyChangeOpX,
TopologyMappingX,
}
trait SequencerNodeAdministration {
self: AdminCommandRunner with FeatureFlagFilter with SequencerNodeReference =>
private lazy val _init = new Initialization()
def initialization: Initialization = _init
@Help.Summary("Manage sequencer initialization")
@Help.Group("initialization")
class Initialization extends Helpful {
@Help.Summary(
"Initialize a sequencer from the beginning of the event stream. This should only be called for " +
"sequencer nodes being initialized at the same time as the corresponding domain node. " +
"This is called as part of the domain.setup.bootstrap command, so you are unlikely to need to call this directly."
)
def initialize_from_beginning(
domainId: DomainId,
domainParameters: StaticDomainParameters,
): InitializeSequencerResponse =
consoleEnvironment.run {
adminCommand(
Initialize(domainId, StoredTopologyTransactions.empty, domainParameters)
)
}
@Help.Summary(
"Dynamically initialize a sequencer from a point later than the beginning of the event stream." +
"This is called as part of the domain.setup.onboard_new_sequencer command, so you are unlikely to need to call this directly."
)
def initialize_from_snapshot(
domainId: DomainId,
topologySnapshot: StoredTopologyTransactions[TopologyChangeOp.Positive],
sequencerSnapshot: SequencerSnapshot,
domainParameters: StaticDomainParameters,
): InitializeSequencerResponse =
consoleEnvironment.run {
adminCommand(
Initialize(domainId, topologySnapshot, domainParameters, sequencerSnapshot.some)
)
}
@Help.Summary("Bootstrap topology data")
@Help.Description(
"Use this to sequence the initial batch of topology transactions which must include at least the IDM's and sequencer's" +
"key mappings. This is called as part of domain.setup.bootstrap"
)
def bootstrap_topology(
topologySnapshot: StoredTopologyTransactions[TopologyChangeOp.Positive]
): Unit =
consoleEnvironment.run {
adminCommand(BootstrapTopology(topologySnapshot))
}
}
}
trait SequencerNodeAdministrationGroupXWithInit extends SequencerAdministrationGroupX {
@Help.Summary("Methods used for node initialization")
object setup extends ConsoleCommandGroup.Impl(this) with InitNodeId {
@Help.Summary(
"Download sequencer snapshot at given point in time to bootstrap another sequencer"
)
def snapshot(timestamp: CantonTimestamp): SequencerSnapshot = {
// TODO(#14074) add something like "snapshot for sequencer-id", rather than timestamp based
// we still need to keep the timestamp based such that we can provide recovery for corrupted sequencers
consoleEnvironment.run {
runner.adminCommand(EnterpriseSequencerAdminCommands.Snapshot(timestamp))
}
}
@Help.Summary(
"Initialize a sequencer from the beginning of the event stream. This should only be called for " +
"sequencer nodes being initialized at the same time as the corresponding domain node. " +
"This is called as part of the domain.setup.bootstrap command, so you are unlikely to need to call this directly."
)
def assign_from_beginning(
genesisState: Seq[PositiveSignedTopologyTransactionX],
domainParameters: StaticDomainParameters,
): InitializeSequencerResponseX =
consoleEnvironment.run {
runner.adminCommand(
InitializeX(
StoredTopologyTransactionsX[TopologyChangeOpX.Replace, TopologyMappingX](
genesisState.map(signed =>
StoredTopologyTransactionX(
SequencedTime(CantonTimestamp.MinValue.immediateSuccessor),
EffectiveTime(CantonTimestamp.MinValue.immediateSuccessor),
None,
signed,
)
)
),
domainParameters.toInternal,
None,
)
)
}
@Help.Summary(
"Dynamically initialize a sequencer from a point later than the beginning of the event stream." +
"This is called as part of the domain.setup.onboard_new_sequencer command, so you are unlikely to need to call this directly."
)
def assign_from_snapshot(
topologySnapshot: GenericStoredTopologyTransactionsX,
sequencerSnapshot: SequencerSnapshot,
domainParameters: StaticDomainParameters,
): InitializeSequencerResponseX =
consoleEnvironment.run {
runner.adminCommand(
InitializeX(topologySnapshot, domainParameters.toInternal, sequencerSnapshot.some)
)
}
}
}

View File

@ -14,8 +14,8 @@ import com.digitalasset.canton.admin.api.client.data.{
TrafficControlParameters,
}
import com.digitalasset.canton.config
import com.digitalasset.canton.config.NonNegativeDuration
import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt, PositiveLong}
import com.digitalasset.canton.config.{NonNegativeDuration, RequireTypes}
import com.digitalasset.canton.console.CommandErrors.GenericCommandError
import com.digitalasset.canton.console.{
CommandErrors,
@ -692,7 +692,7 @@ class TopologyAdministrationGroupX(
),
// configurable in case of a key under a decentralized namespace
mustFullyAuthorize: Boolean = true,
): Unit = propose(
): Unit = update(
key,
purpose,
keyOwner,
@ -734,7 +734,7 @@ class TopologyAdministrationGroupX(
// configurable in case of a key under a decentralized namespace
mustFullyAuthorize: Boolean = true,
force: Boolean = false,
): Unit = propose(
): Unit = update(
key,
purpose,
keyOwner,
@ -792,7 +792,7 @@ class TopologyAdministrationGroupX(
// Authorize the new key
// The owner will now have two keys, but by convention the first one added is always
// used by everybody.
propose(
update(
newKey.fingerprint,
newKey.purpose,
member,
@ -803,7 +803,7 @@ class TopologyAdministrationGroupX(
)
// Remove the old key by sending the matching `Remove` transaction
propose(
update(
currentKey.fingerprint,
currentKey.purpose,
member,
@ -819,7 +819,7 @@ class TopologyAdministrationGroupX(
)
}
private def propose(
private def update(
key: Fingerprint,
purpose: KeyPurpose,
keyOwner: Member,
@ -834,20 +834,18 @@ class TopologyAdministrationGroupX(
nodeInstance: InstanceReferenceX,
): Unit = {
// Ensure the specified key has a private key in the vault.
val publicKey =
nodeInstance.keys.secret
.list(
filterFingerprint = key.toProtoPrimitive,
purpose = Set(purpose),
) match {
case privateKeyMetadata +: Nil => privateKeyMetadata.publicKey
case Nil =>
throw new IllegalArgumentException("The specified key is unknown to the key owner")
case multipleKeys =>
throw new IllegalArgumentException(
s"Found ${multipleKeys.size} keys where only one key was expected. Specify a full key instead of a prefix"
)
}
val publicKey = nodeInstance.keys.secret.list(
filterFingerprint = key.toProtoPrimitive,
purpose = Set(purpose),
) match {
case privateKeyMetadata +: Nil => privateKeyMetadata.publicKey
case Nil =>
throw new IllegalArgumentException("The specified key is unknown to the key owner")
case multipleKeys =>
throw new IllegalArgumentException(
s"Found ${multipleKeys.size} keys where only one key was expected. Specify a full key instead of a prefix"
)
}
// Look for an existing authorized OKM mapping.
val maybePreviousState = expectAtMostOneResult(
@ -906,21 +904,42 @@ class TopologyAdministrationGroupX(
}
}
synchronisation
.runAdminCommand(synchronize)(
TopologyAdminCommandsX.Write
.Propose(
mapping = proposedMapping,
signedBy = signedBy.toList,
change = ops,
serial = Some(serial),
mustFullyAuthorize = mustFullyAuthorize,
forceChange = force,
store = AuthorizedStore.filterName,
)
)
.discard
propose(
proposedMapping,
serial,
ops,
signedBy,
AuthorizedStore.filterName,
synchronize,
mustFullyAuthorize,
force,
).discard
}
def propose(
proposedMapping: OwnerToKeyMappingX,
serial: RequireTypes.PositiveNumeric[Int],
ops: TopologyChangeOpX = TopologyChangeOpX.Replace,
signedBy: Option[Fingerprint] = None,
store: String = AuthorizedStore.filterName,
synchronize: Option[config.NonNegativeDuration] = Some(
consoleEnvironment.commandTimeouts.bounded
),
// configurable in case of a key under a decentralized namespace
mustFullyAuthorize: Boolean = true,
force: Boolean = false,
): SignedTopologyTransactionX[TopologyChangeOpX, OwnerToKeyMappingX] =
synchronisation.runAdminCommand(synchronize)(
TopologyAdminCommandsX.Write.Propose(
mapping = proposedMapping,
signedBy = signedBy.toList,
store = store,
change = ops,
serial = Some(serial),
mustFullyAuthorize = mustFullyAuthorize,
forceChange = force,
)
)
}
@Help.Summary("Manage party to participant mappings")
@ -1992,7 +2011,7 @@ class TopologyAdministrationGroupX(
force: must be set to true when performing a dangerous operation, such as increasing the ledgerTimeRecordTimeTolerance"""
)
def propose_update(
domainId: DomainId, // TODO(#15803) check whether we can infer domainId
domainId: DomainId,
update: ConsoleDynamicDomainParameters => ConsoleDynamicDomainParameters,
mustFullyAuthorize: Boolean = false,
// TODO(#14056) don't use the instance's root namespace key by default.

View File

@ -4,6 +4,7 @@
package com.digitalasset.canton.environment
import cats.syntax.either.*
import cats.syntax.option.*
import com.digitalasset.canton.admin.api.client.data.CommunityCantonStatus
import com.digitalasset.canton.config.{CantonCommunityConfig, TestingConfigInternal}
import com.digitalasset.canton.console.{
@ -23,7 +24,9 @@ import com.digitalasset.canton.console.{
Help,
LocalDomainReference,
LocalInstanceReferenceCommon,
LocalMediatorReferenceX,
LocalParticipantReference,
LocalSequencerNodeReferenceX,
NodeReferences,
StandardConsoleOutput,
}
@ -31,9 +34,14 @@ import com.digitalasset.canton.crypto.CommunityCryptoFactory
import com.digitalasset.canton.crypto.admin.grpc.GrpcVaultService.CommunityGrpcVaultServiceFactory
import com.digitalasset.canton.crypto.store.CryptoPrivateStore.CommunityCryptoPrivateStoreFactory
import com.digitalasset.canton.domain.DomainNodeBootstrap
import com.digitalasset.canton.domain.admin.v0.EnterpriseSequencerAdministrationServiceGrpc
import com.digitalasset.canton.domain.mediator.*
import com.digitalasset.canton.domain.metrics.MediatorNodeMetrics
import com.digitalasset.canton.domain.sequencing.SequencerNodeBootstrapX
import com.digitalasset.canton.domain.sequencing.config.CommunitySequencerNodeXConfig
import com.digitalasset.canton.domain.sequencing.sequencer.CommunitySequencerFactory
import com.digitalasset.canton.logging.NamedLoggerFactory
import com.digitalasset.canton.networking.grpc.StaticGrpcServices
import com.digitalasset.canton.participant.{ParticipantNodeBootstrap, ParticipantNodeBootstrapX}
import com.digitalasset.canton.resource.{
CommunityDbMigrationsFactory,
@ -77,6 +85,44 @@ class CommunityEnvironment(
new CommunityHealthDumpGenerator(this, commandRunner)
}
override protected def createSequencerX(
name: String,
sequencerConfig: CommunitySequencerNodeXConfig,
): SequencerNodeBootstrapX = {
val nodeFactoryArguments = NodeFactoryArguments(
name,
sequencerConfig,
config.sequencerNodeParametersByStringX(name),
createClock(Some(SequencerNodeBootstrapX.LoggerFactoryKeyName -> name)),
metricsFactory.forSequencer(name),
testingConfig,
futureSupervisor,
loggerFactory.append(SequencerNodeBootstrapX.LoggerFactoryKeyName, name),
writeHealthDumpToFile,
configuredOpenTelemetry,
)
val boostrapCommonArguments = nodeFactoryArguments
.toCantonNodeBootstrapCommonArguments(
new CommunityStorageFactory(sequencerConfig.storage),
new CommunityCryptoFactory(),
new CommunityCryptoPrivateStoreFactory(),
new CommunityGrpcVaultServiceFactory,
)
.valueOr(err =>
throw new RuntimeException(s"Failed to create sequencer-x node $name: $err")
) // TODO(i3168): Handle node startup errors gracefully
new SequencerNodeBootstrapX(
boostrapCommonArguments,
CommunitySequencerFactory,
(_, _) =>
StaticGrpcServices
.notSupportedByCommunity(EnterpriseSequencerAdministrationServiceGrpc.SERVICE, logger)
.some,
)
}
override protected def createMediatorX(
name: String,
mediatorConfig: CommunityMediatorNodeXConfig,
@ -139,9 +185,11 @@ class CommunityConsoleEnvironment(
override def startupOrderPrecedence(instance: LocalInstanceReferenceCommon): Int =
instance match {
case _: LocalDomainReference => 1
case _: LocalParticipantReference => 2
case _ => 3
case _: LocalSequencerNodeReferenceX => 1
case _: LocalDomainReference => 2
case _: LocalMediatorReferenceX => 3
case _: LocalParticipantReference => 4
case _ => 5
}
override protected def createDomainReference(name: String): DomainLocalRef =

View File

@ -24,6 +24,7 @@ import com.digitalasset.canton.data.CantonTimestamp
import com.digitalasset.canton.domain.DomainNodeBootstrap
import com.digitalasset.canton.domain.mediator.{MediatorNodeBootstrapX, MediatorNodeParameters}
import com.digitalasset.canton.domain.metrics.MediatorNodeMetrics
import com.digitalasset.canton.domain.sequencing.SequencerNodeBootstrapX
import com.digitalasset.canton.environment.CantonNodeBootstrap.HealthDumpFunction
import com.digitalasset.canton.environment.Environment.*
import com.digitalasset.canton.environment.ParticipantNodes.{ParticipantNodesOld, ParticipantNodesX}
@ -292,6 +293,16 @@ trait Environment extends NamedLogging with AutoCloseable with NoTracing {
config.participantNodeParametersByString,
loggerFactory,
)
val sequencersX = new SequencerNodesX(
createSequencerX,
migrationsFactory,
timeouts,
config.sequencersByStringX,
config.sequencerNodeParametersByStringX,
loggerFactory,
)
val mediatorsX =
new MediatorNodesX(
createMediatorX,
@ -305,7 +316,7 @@ trait Environment extends NamedLogging with AutoCloseable with NoTracing {
// convenient grouping of all node collections for performing operations
// intentionally defined in the order we'd like to start them
protected def allNodes: List[Nodes[CantonNode, CantonNodeBootstrap[CantonNode]]] =
List(domains, participants, participantsX)
List(sequencersX, domains, mediatorsX, participants, participantsX)
private def runningNodes: Seq[CantonNodeBootstrap[CantonNode]] = allNodes.flatMap(_.running)
private def autoConnectLocalNodes(): Either[StartupError, Unit] = {
@ -527,6 +538,11 @@ trait Environment extends NamedLogging with AutoCloseable with NoTracing {
.valueOr(err => throw new RuntimeException(s"Failed to create participant bootstrap: $err"))
}
protected def createSequencerX(
name: String,
sequencerConfig: Config#SequencerNodeXConfigType,
): SequencerNodeBootstrapX
protected def createMediatorX(
name: String,
mediatorConfig: Config#MediatorNodeXConfigType,

View File

@ -18,6 +18,11 @@ import com.digitalasset.canton.domain.mediator.{
MediatorNodeParameters,
MediatorNodeX,
}
import com.digitalasset.canton.domain.sequencing.config.{
SequencerNodeConfigCommon,
SequencerNodeParameters,
}
import com.digitalasset.canton.domain.sequencing.{SequencerNodeBootstrapX, SequencerNodeX}
import com.digitalasset.canton.domain.{Domain, DomainNodeBootstrap, DomainNodeParameters}
import com.digitalasset.canton.lifecycle.*
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
@ -445,6 +450,24 @@ class DomainNodes[DC <: DomainConfig](
loggerFactory,
)
class SequencerNodesX[SC <: SequencerNodeConfigCommon](
create: (String, SC) => SequencerNodeBootstrapX,
migrationsFactory: DbMigrationsFactory,
timeouts: ProcessingTimeout,
configs: Map[String, SC],
parameters: String => SequencerNodeParameters,
loggerFactory: NamedLoggerFactory,
)(implicit ec: ExecutionContext)
extends ManagedNodes[SequencerNodeX, SC, SequencerNodeParameters, SequencerNodeBootstrapX](
create,
migrationsFactory,
timeouts,
configs,
parameters,
startUpGroup = 0,
loggerFactory,
)
class MediatorNodesX[MNC <: MediatorNodeConfigCommon](
create: (String, MNC) => MediatorNodeBootstrapX,
migrationsFactory: DbMigrationsFactory,

View File

@ -0,0 +1,17 @@
Reference Configurations
========================
This directory contains a set of reference configurations. The configurations aim to provide a
starting point for your own setup. The following configurations are included:
* `sandbox`: A simple setup for a single participant node connected to a single
domain node, using in-memory stores for testing.
* `participant`: A participant node storing data within a PostgresSQL database.
* `sequencer`: A sequencer node.
* `mediator`: A mediator node.
If you use TLS, note that you need to have an appropriate set of TLS certificates to run the example configurations.
You can use the `tls/gen-test-certs.sh` script to generate a set of self-signed certificates for testing purposes.
The script requires openssl to be installed on your system.
Please check the [installation guide](https://docs.daml.com/canton/usermanual/installation.html) for further details on how to run these configurations.

View File

@ -0,0 +1,23 @@
// Example Mediator configuration
// Include the shared configuration file (which includes storage and monitoring)
include required("shared.conf")
// TLS configuration
// Please check with: https://docs.daml.com/2.8.0/canton/usermanual/apis.html#tls-configuration
// Comment out the following two lines to disable TLS
include required("tls/mtls-admin-api.conf")
canton.mediators-x.mediator {
// Storage configuration (references included storage from shared.conf)
storage = ${_shared.storage}
storage.config.properties.databaseName = "canton_mediator"
admin-api {
address = localhost
port = 10042
tls = ${?_shared.admin-api-mtls}
}
}

View File

@ -0,0 +1,8 @@
canton.monitoring {
// Enables detailed query monitoring, which you can use to diagnose database performance issues.
log-query-cost.every = 60s
// Logs all messages that enter or exit the server. Has a significant performance impact, but can
// be very useful for debugging.
logging.api.message-payloads = true
}

View File

@ -0,0 +1,18 @@
// The following configuration options turn on future features of the system. These features are not
// stable and not supported for production. You will not be able to upgrade to a stable version of Canton
// anymore.
_shared {
participant-dev-params = {
dev-version-support = true
initial-protocol-version = dev
}
// domain parameters config
domain-dev-params = {
dev-version-support = true
protocol-version = dev
}
}
canton.parameters {
non-standard-config = yes
dev-version-support = yes
}

View File

@ -0,0 +1,9 @@
// The following parameters will enable various dangerous or not yet GA supported commands.
// Please use with care, as they are not supported for production deployments or not given with
// any backwards compatibility guarantees.
canton.features {
enable-testing-commands = yes
enable-repair-commands = yes
enable-preview-commands = yes
}

View File

@ -0,0 +1,19 @@
// Parameter set to reduce the sequencer latency at the expensive of a higher
// database load. Please note that this change is targeting the original
// high-throughput parameter set.
// The other parameter set `low-latency` is optimised for testing such that the ledger
// response time is as low as possible at the cost of reducing the throughput.
_shared {
sequencer-writer {
// If you need lower latency, you can use these low latency parameters
payload-write-batch-max-duration = 1ms
event-write-batch-max-duration = 1ms
payload-write-max-concurrency = 10
}
sequencer-reader {
// How often should the reader poll the database for updates
// low value = low latency, higher db load
polling-interval = 1ms
read-batch-size = 1000
}
}

View File

@ -0,0 +1,9 @@
canton.monitoring.metrics {
report-jvm-metrics = yes
reporters = [{
type = prometheus
address = 0.0.0.0
// This will expose the prometheus metrics on port 9000
port = 9000
}]
}

View File

@ -0,0 +1,6 @@
canton.monitoring.tracing.tracer.exporter = {
// zipkin or otlp are alternatives
type = jaeger
address = 169.254.0.0
port = 14250
}

View File

@ -0,0 +1,73 @@
// Example Participant Configuration
// Include the shared configuration file (which includes storage and monitoring)
include required("shared.conf")
// TLS configuration
// Please check with: https://docs.daml.com/2.8.0/canton/usermanual/apis.html#tls-configuration
// Comment out the following two lines to disable TLS
include required("tls/tls-ledger-api.conf")
include required("tls/mtls-admin-api.conf")
// JWT Configuration
// Enable JWT Authorization on the Ledger API
// Please check with: https://docs.daml.com/2.8.0/canton/usermanual/apis.html#jwt-authorization
include required("jwt/unsafe-hmac256.conf")
// include required("jwt/certificate.conf")
// include required("jwt/jwks.conf")
canton.participants-x.participant {
// Configure the node identifier
init.identity.node-identifier = ${?_shared.identifier}
// Storage configuration (references included storage from shared.conf)
storage = ${_shared.storage}
storage.config.properties.databaseName = "canton_participant"
// The following database parameter set assumes that the participants runs on a host machine with 8-16 cores
// and that the database server has 8 cores available for this node.
// https://docs.daml.com/2.8.0/canton/usermanual/persistence.html#performance
// Ideal allocation depends on your use-case.
// https://docs.daml.com/2.8.0/canton/usermanual/persistence.html#max-connection-settings
// Large: 18 = (6,6,6), Medium: 9 = (3,3,3), Small: 6 = (2,2,2)
storage.parameters {
connection-allocation {
num-ledger-api = 6
num-reads = 6
num-writes = 6
}
max-connections = 18
// Optional define the ledger-api jdbc URL directly (used for Oracle backends)
ledger-api-jdbc-url = ${?_shared.storage.ledger-api-jdbc-url}
}
// Ledger API Configuration Section
ledger-api {
// by default, canton binds to 127.0.0.1, only enabling localhost connections
// you need to explicitly set the address to enable connections from other hosts
address = localhost
port = 10001
tls = ${?_shared.ledger-api-tls}
// Include JWT Authorization
auth-services = ${?_shared.ledger-api.auth-services}
}
admin-api {
address = localhost
port = 10002
tls = ${?_shared.admin-api-mtls}
}
// Configure GRPC Health Server for monitoring
// See https://docs.daml.com/canton/usermanual/monitoring.html#grpc-health-check-service
monitoring.grpc-health-server {
address = localhost
port = 10003
}
// Optionally include parameters defined in `misc/dev-protocol.conf`
// Please note that you can not use dev features in production.
parameters = ${?_shared.participant-dev-params}
}

View File

@ -0,0 +1,14 @@
// Example remote mediators configuration
// Include TLS configuration
include required("../tls/mtls-admin-api.conf")
canton {
remote-mediators-x.mediator {
admin-api {
address = localhost
port = 10042
tls = ${?_shared.admin-api-client-mtls}
}
}
}

View File

@ -0,0 +1,19 @@
// Example remote participant configuration
// Include TLS configuration
include required("../tls/mtls-admin-api.conf")
include required("../tls/tls-ledger-api.conf")
canton {
remote-participants-x.participant {
ledger-api {
address = localhost
port = 10001
tls = ${?_shared.ledger-api-client-tls}
}
admin-api {
address = localhost
port = 10002
tls = ${?_shared.admin-api-client-mtls}
}
}
}

View File

@ -0,0 +1,19 @@
// Example remote sequencer configuration
// Include TLS configuration
include required("../tls/mtls-admin-api.conf")
include required("../tls/tls-public-api.conf")
canton {
remote-sequencers-x.sequencer {
public-api = ${?_shared.public-api-client-tls}
public-api {
address = localhost
port = 10038
}
admin-api {
address = localhost
port = 10039
tls = ${?_shared.admin-api-client-mtls}
}
}
}

View File

@ -0,0 +1,38 @@
// Sandbox configuration
//
// You can start & auto-connect the sandbox with
// ./bin/canton -c config/sandbox.conf --auto-connect-local
//
include required("misc/debug.conf")
include required("misc/dev.conf")
canton {
participants-x.sandbox {
// Enable engine stack traces for debugging
parameters.enable-engine-stack-traces = true
ledger-api {
address = localhost
port = 10021
}
admin-api {
address = localhost
port = 10022
}
}
sequencers-x.local {
public-api {
address = localhost
port = 10028
}
admin-api {
address = localhost
port = 10029
}
}
mediators-x.localMediator {
admin-api {
address = localhost
port = 10024
}
}
}

View File

@ -0,0 +1,45 @@
// Example Sequencer Configuration
// Include the shared configuration file (which includes storage and monitoring)
include required("shared.conf")
// TLS configuration
// Please check with: https://docs.daml.com/2.8.0/canton/usermanual/apis.html#tls-configuration
// Comment out the following two lines to disable TLS
include required("tls/tls-public-api.conf")
include required("tls/mtls-admin-api.conf")
// Optionally include lower latency configuration. This is necessary for pushing
// the transaction latencies from ~ 800ms to ~ 600ms at the expense of higher db
// load due to intensive polling.
// include required("misc/low-latency-sequencer.conf")
canton.sequencers-x.sequencer {
// Storage configuration (references included storage from shared.conf)
storage = ${_shared.storage}
storage.config.properties.databaseName = "canton_sequencer"
public-api {
address = localhost
port = 10038
tls = ${?_shared.public-api-tls}
}
admin-api {
address = localhost
port = 10039
tls = ${?_shared.admin-api-mtls}
}
sequencer {
type = database
writer = ${?_shared.sequencer-writer}
reader = ${?_shared.sequencer-reader}
// What should the default parameterization be for the writer
// high-throughput or low-latency are two parameter sets
writer.type = high-throughput
high-availability.enabled = true
}
}

View File

@ -0,0 +1,28 @@
// ------------------------------------
// Storage Choice
// ------------------------------------
// Include the Postgres persistence configuration mixin.
// You can define the Postgres connectivity settings either by using the environment
// variables POSTGRES_HOST, POSTGRES_PORT, POSTGRES_USER, POSTGRES_PASSWORD
// (see storage/postgres.conf for details) or setting the values directly in the config file.
// You can also remove them from the postgres.conf and add them below directly.
include required("storage/postgres.conf")
// If you do not need persistence, you can pick
// include required("storage/memory.conf")
// Monitoring Configuration
// Turn on Prometheus metrics
include required("monitoring/prometheus.conf")
// Turn on tracing with Jaeger, Zipkin or OTLP
// include require ("monitoring/tracing.conf")
// Upon automatic initialisation, pick the following prefix for the node identifier
// the node will then be <prefix>::<fingerprint of a randomly generated key>
// Random is good for larger networks when you don not want that others know who you
// are. Explicit is better for troubleshooting.
_shared.identifier = {
type = random
// type = explicit
// name = "myNodeIdentifier"
}

View File

@ -3,11 +3,8 @@
# This file defines a shared configuration resources. You can mix it into your configuration by
# refer to the shared storage resource and add the database name.
#
# Check nodes/participant1.conf as an example
# Please note that using H2 is unstable and not supported other than for testing.
#
# Please note that using H2 is currently not advised and not supported.
#
_shared {
storage {
type = "h2"
@ -17,4 +14,4 @@ _shared {
driver = org.h2.Driver
}
}
}
}

View File

@ -0,0 +1,5 @@
_shared {
storage {
type = "memory"
}
}

View File

@ -0,0 +1,47 @@
# Postgres persistence configuration mixin
#
# This file defines a shared configuration resources. You can mix it into your configuration by
# refer to the shared storage resource and add the database name.
#
# Example:
# participant1 {
# storage = ${_shared.storage}
# storage.config.properties.databaseName = "participant1"
# }
#
# The user and password is not set. You want to either change this configuration file or pass
# the settings in via environment variables POSTGRES_USER and POSTGRES_PASSWORD.
#
_shared {
storage {
type = postgres
config {
dataSourceClass = "org.postgresql.ds.PGSimpleDataSource"
properties = {
serverName = "localhost"
# the next line will override above "serverName" in case the environment variable POSTGRES_HOST exists
# which makes it optional
serverName = ${?POSTGRES_HOST}
portNumber = "5432"
portNumber = ${?POSTGRES_PORT}
# user and password are equired
user = ${POSTGRES_USER}
password = ${POSTGRES_PASSWORD}
}
}
parameters {
# If defined, will configure the number of database connections per node.
# Please note that the number of connections can be fine tuned for participant nodes (see participant.conf)
max-connections = ${?POSTGRES_NUM_CONNECTIONS}
# If true, then database migrations will be applied on startup automatically
# Otherwise, you will have to run the migration manually using participant.db.migrate()
migrate-and-start = false
# If true (default), then the node will fail to start if it can not connect to the database.
# The setting is useful during initial deployment to get immediate feedback when the
# database is not available.
# In a production setup, you might want to set this to false to allow uncoordinated startups between
# the database and the node.
fail-fast-on-startup = true
}
}
}

View File

@ -0,0 +1,56 @@
#!/bin/bash
# Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
# architecture-handbook-entry-begin: GenTestCertsCmds
DAYS=3650
function create_key {
local name=$1
openssl genrsa -out "${name}.key" 4096
# netty requires the keys in pkcs8 format, therefore convert them appropriately
openssl pkcs8 -topk8 -nocrypt -in "${name}.key" -out "${name}.pem"
}
# create self signed certificate
function create_certificate {
local name=$1
local subj=$2
openssl req -new -x509 -sha256 -key "${name}.key" \
-out "${name}.crt" -days ${DAYS} -subj "$subj"
}
# create certificate signing request with subject and SAN
# we need the SANs as our certificates also need to include localhost or the
# loopback IP for the console access to the admin-api and the ledger-api
function create_csr {
local name=$1
local subj=$2
local san=$3
(
echo "authorityKeyIdentifier=keyid,issuer"
echo "basicConstraints=CA:FALSE"
echo "keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment"
) > ${name}.ext
if [[ -n $san ]]; then
echo "subjectAltName=${san}" >> ${name}.ext
fi
# create certificate (but ensure that localhost is there as SAN as otherwise, admin local connections won't work)
openssl req -new -sha256 -key "${name}.key" -out "${name}.csr" -subj "$subj"
}
function sign_csr {
local name=$1
local sign=$2
openssl x509 -req -sha256 -in "${name}.csr" -extfile "${name}.ext" -CA "${sign}.crt" -CAkey "${sign}.key" -CAcreateserial \
-out "${name}.crt" -days ${DAYS}
rm "${name}.ext" "${name}.csr"
}
function print_certificate {
local name=$1
openssl x509 -in "${name}.crt" -text -noout
}
# architecture-handbook-entry-end: GenTestCertsCmds

View File

@ -0,0 +1,38 @@
#!/bin/bash
# Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
# architecture-handbook-entry-begin: GenTestCerts
# include certs-common.sh from config/tls
. "$(dirname "${BASH_SOURCE[0]}")/certs-common.sh"
# create root certificate such that we can issue self-signed certs
create_key "root-ca"
create_certificate "root-ca" "/O=TESTING/OU=ROOT CA/emailAddress=canton@digitalasset.com"
print_certificate "root-ca"
# create public api certificate
create_key "public-api"
create_csr "public-api" "/O=TESTING/OU=DOMAIN/CN=localhost/emailAddress=canton@digitalasset.com" "DNS:localhost,IP:127.0.0.1"
sign_csr "public-api" "root-ca"
print_certificate "public-api"
# create participant ledger-api certificate
create_key "ledger-api"
create_csr "ledger-api" "/O=TESTING/OU=PARTICIPANT/CN=localhost/emailAddress=canton@digitalasset.com" "DNS:localhost,IP:127.0.0.1"
sign_csr "ledger-api" "root-ca"
# create participant admin-api certificate
create_key "admin-api"
create_csr "admin-api" "/O=TESTING/OU=PARTICIPANT ADMIN/CN=localhost/emailAddress=canton@digitalasset.com" "DNS:localhost,IP:127.0.0.1"
sign_csr "admin-api" "root-ca"
# create participant client key and certificate
create_key "admin-client"
create_csr "admin-client" "/O=TESTING/OU=PARTICIPANT ADMIN CLIENT/CN=localhost/emailAddress=canton@digitalasset.com"
sign_csr "admin-client" "root-ca"
print_certificate "admin-client"
# architecture-handbook-entry-end: GenTestCerts

View File

@ -0,0 +1,38 @@
include required("tls-cert-location.conf")
_shared {
admin-api-mtls {
// Certificate and Key used by Admin API server
cert-chain-file = ${?_TLS_CERT_LOCATION}"/admin-api.crt"
private-key-file = ${?_TLS_CERT_LOCATION}"/admin-api.pem"
// Certificate used to validate client certificates. The file also needs to be provided
// if we use a self-signed certificate, such that the internal processes can connect to
// the APIs.
trust-collection-file = ${?_TLS_CERT_LOCATION}"/root-ca.crt"
client-auth = {
// none, optional and require are supported
type = require
// If clients are required to authenticate as well, we need to provide a client
// certificate and the key, as Canton has internal processes that need to connect to these
// APIs. If the server certificate is trusted by the trust-collection, then you can
// just use the server certificates (which usually happens if you use self-signed certs as we
// do in this example). Otherwise, you need to create separate ones.
admin-client {
// In this example, we use the same certificate as the server certificate.
// Please the the remote participant config to see how to configure a remote client.
cert-chain-file = ${?_TLS_CERT_LOCATION}"/admin-api.crt"
private-key-file = ${?_TLS_CERT_LOCATION}"/admin-api.pem"
}
}
}
admin-api-client-mtls {
// Certificate and Key used by remote client
client-cert {
cert-chain-file = ${?_TLS_CERT_LOCATION}"/admin-api.crt"
private-key-file = ${?_TLS_CERT_LOCATION}"/admin-api.pem"
}
// The trust collection used to verify the server certificate. Used here because of the self-signed certs.
trust-collection-file = ${?_TLS_CERT_LOCATION}"/root-ca.crt"
}
}

View File

@ -0,0 +1,2 @@
_TLS_CERT_LOCATION="config/tls"
_TLS_CERT_LOCATION=${?TLS_CERT_LOCATION}

View File

@ -0,0 +1,19 @@
include required("tls-cert-location.conf")
_shared {
ledger-api-tls {
// Certificate to be used by the server
cert-chain-file = ${?_TLS_CERT_LOCATION}"/ledger-api.crt"
// The private key of the server
private-key-file = ${?_TLS_CERT_LOCATION}"/ledger-api.pem"
// The trust collection. we use it in this example as our certificates are self
// signed but we need it such that the internal canton processes can connect to the
// Ledger API. In a production environment, you would use a proper CA and therefore
// not require this.
trust-collection-file = ${?_TLS_CERT_LOCATION}"/root-ca.crt"
}
ledger-api-client-tls {
// The trust collection used to verify the server certificate. Used here because of the self-signed certs.
trust-collection-file = ${?_TLS_CERT_LOCATION}"/root-ca.crt"
}
}

View File

@ -0,0 +1,14 @@
include required("tls-cert-location.conf")
_shared {
public-api-tls {
// certificate to be used by the server
cert-chain-file = ${?_TLS_CERT_LOCATION}"/public-api.crt"
// the private key of the server
private-key-file = ${?_TLS_CERT_LOCATION}"/public-api.pem"
}
public-api-client-tls {
transport-security = true
// The trust collection used to verify the server certificate. Used here because of the self-signed certs.
custom-trust-certificates.pem-file = ${?_TLS_CERT_LOCATION}"/root-ca.crt"
}
}

View File

@ -0,0 +1,3 @@
canton_participant
canton_mediator
canton_sequencer

View File

@ -0,0 +1,10 @@
#!/bin/bash
// user-manual-entry-begin: PostgresDbEnvConfiguration
export POSTGRES_HOST="localhost"
export POSTGRES_USER="test-user"
export POSTGRES_PASSWORD="test-password"
export POSTGRES_DB=postgres
export POSTGRES_PORT=5432
// user-manual-entry-end: PostgresDbEnvConfiguration
export DBPREFIX=""

View File

@ -0,0 +1,128 @@
#!/usr/bin/env bash
# Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
function check_file() {
local -r file="$1"
if [[ ! -e $file ]]; then
echo "Please run this script from the directory containing the $file file."
exit 1
fi
}
function do_usage() {
echo "Usage: $0 <setup|reset|drop|create-user|start|stop>"
echo " setup: create databases"
echo " reset: drop and recreate databases"
echo " drop: drop databases"
echo " create-user: create user"
echo " start [durable]: start docker db. Without durable, it will remove the container after exit"
echo " resume: resume durable docker db"
echo " stop: stop docker db"
}
function do_setup() {
for db in $(cat "databases")
do
echo "creating db ${db}"
echo "create database ${DBPREFIX}${db}; grant all on database ${DBPREFIX}${db} to current_user;" | \
PGPASSWORD=$POSTGRES_PASSWORD psql -h $POSTGRES_HOST -p $POSTGRES_PORT $POSTGRES_DB $POSTGRES_USER
done
}
function do_drop() {
for db in $(cat "databases")
do
echo "dropping db ${db}"
echo "drop database if exists ${DBPREFIX}${db};" | \
PGPASSWORD=$POSTGRES_PASSWORD psql -h $POSTGRES_HOST -p $POSTGRES_PORT $POSTGRES_DB $POSTGRES_USER
done
}
function do_create_user() {
echo "Creating user ${POSTGRES_USER} (assumes your default user can do that on ${POSTGRES_DB})..."
echo "CREATE ROLE \"${POSTGRES_USER}\" LOGIN PASSWORD '${POSTGRES_PASSWORD}';ALTER USER \"${POSTGRES_USER}\" createdb;" | \
psql -h $POSTGRES_HOST -p $POSTGRES_PORT ${POSTGRES_DB}
}
function do_start_docker_db() {
if [ "$1" == "durable" ]; then
removeDockerAfterExit=""
echo "starting durable docker based postgres"
else
echo "starting non-durable docker based postgres"
removeDockerAfterExit="--rm"
fi
docker run -d ${removeDockerAfterExit} --name canton-postgres \
--shm-size 1024mb \
--publish ${POSTGRES_PORT}:5432 \
-e POSTGRES_USER=$POSTGRES_USER \
-e POSTGRES_PASSWORD=$POSTGRES_PASSWORD \
-e POSTGRES_DB=$POSTGRES_DB \
-v "$PWD/postgres.conf":/etc/postgresql/postgresql.conf \
postgres:12 \
-c 'config_file=/etc/postgresql/postgresql.conf'
}
function do_resume_docker_db() {
echo "resuming docker based postgres"
docker start canton-postgres
}
function do_stop_docker_db() {
echo "stopping docker based postgres"
docker stop canton-postgres
}
function check_env {
if [[ -z "$POSTGRES_USER" || -z "$POSTGRES_HOST" || -z "$POSTGRES_DB" || -z "$POSTGRES_PASSWORD" || -z "$POSTGRES_PORT" ]]; then
echo 1
else
echo 0
fi
}
check_file "databases"
if [[ $(check_env) -ne 0 ]]; then
echo "Looking for db.env as environment variables are not set: POSTGRES_USER, POSTGRES_HOST, POSTGRES_DB, POSTGRES_PASSWORD, POSTGRES_PORT."
echo $(env | grep -v POSTGRES_PASSWORD | grep POSTGRES)
check_file "db.env"
source "db.env"
echo $(env | grep -v POSTGRES_PASSWORD | grep POSTGRES)
if [[ $(check_env) -ne 0 ]]; then
echo "POSTGRES_ environment is not properly set in db.env"
exit 1
fi
else
echo "Using host=${POSTGRES_HOST}, port=${POSTGRES_PORT} user=${POSTGRES_USER}, db=${POSTGRES_DB} from environment"
fi
case "$1" in
setup)
do_setup
;;
reset)
do_drop
do_setup
;;
drop)
do_drop
;;
create-user)
do_create_user
;;
start)
do_start_docker_db $2
;;
resume)
do_resume_docker_db
;;
stop)
do_stop_docker_db
;;
*)
do_usage
;;
esac

View File

@ -0,0 +1,37 @@
# Note, this config has been created using https://pgtune.leopard.in.ua/
# It targets a standard small docker deployment.
# DB Version: 12
# OS Type: linux
# DB Type: oltp
# Total Memory (RAM): 8 GB
# CPUs num: 4
# Connections num: 250
# Data Storage: ssd
listen_addresses = '*'
log_destination = 'stderr'
logging_collector = on
log_directory = '/var/log/postgresql/'
log_file_mode = 0644
log_filename = 'postgresql-%Y-%m-%d-%H.log'
log_min_messages = info
log_min_duration_statement = 2500
max_connections = 250
shared_buffers = 4GB
effective_cache_size = 6GB
maintenance_work_mem = 512MB
checkpoint_completion_target = 0.9
wal_buffers = 16MB
default_statistics_target = 100
random_page_cost = 1.1
effective_io_concurrency = 200
work_mem = 4194kB
huge_pages = off
min_wal_size = 2GB
max_wal_size = 8GB
max_worker_processes = 4
max_parallel_workers_per_gather = 2
max_parallel_workers = 4
max_parallel_maintenance_workers = 2

View File

@ -1,33 +0,0 @@
# Connection to Canton.Global
***
WARNING: The global Canton domain is currently not running. This example does not work at the moment.
***
TODO(#7564) Make this example work again once the global domain is up
***
Participants require a domain to communicate with each other. Digital Asset is running a generally available
global Canton domain (Canton.Global). Any participant can decide to connect to the global domain and use it
for bilateral communication.
The global domain connectivity example demonstrates how to connect a participant node
to the global Canton domain. Currently, the global domain is operated as a test-net.
Longer term, the global domain will serve as a global fall-back committer which can be
used if no closer committer is available.
The global domain connectivity example contains two files, a configuration file and a
script which invokes the necessary registration call and subsequently tests the connection
by pinging the digital asset node.
```
../../bin/canton -c global-domain-participant.conf --bootstrap global-domain-participant.canton
```
After invoking above script, you will be prompted the terms of service for using the global
domain. You will have to accept it once in order to be able to use it.
Please note that right now, the global domain is a pure test-net and we are regularly resetting
the domain entirely, wiping all the content, as we are still developing the protocol. Therefore,
just use it for demonstration purposes.

View File

@ -1,13 +0,0 @@
nodes.local.start()
val domainUrl = sys.env.get("DOMAIN_URL").getOrElse("https://canton.global")
val myself = participant1
myself.domains.connect("global", domainUrl)
myself.health.ping(myself)
val da = myself.parties.list(filterParty="digitalasset").head.participants.head.participant
myself.health.ping(da)

View File

@ -1,16 +0,0 @@
canton {
participants {
participant1 {
admin-api {
port= 6012
}
ledger-api {
port = 6011
}
storage {
type = memory
}
parameters.admin-workflow.bong-test-max-level = 12
}
}
}

View File

@ -1,137 +1,3 @@
# Advanced Configuration Example
This example directory contains a collection of configuration files that can be used to setup domains or
participants for various purposes. The directory contains a set of sub-folders:
- storage: contains "storage mixins" such as [memory.conf](storage/memory.conf) or [postgres.conf](storage/postgres.conf)
- nodes: contains a set of node defintions for domains and participants
- api: contains "api mixins" that modify the API behaviour such as binding to a public address or including jwt authorization
- remote: contains a set of remote node definitions for the nodes in the nodes directory.
- parameters: contains "parameter mixins" that modify the node behaviour in various ways.
## Persistence
For every setup, you need to decide which persistence layer you want to use. Supported are [memory.conf](storage/memory.conf),
[postgres.conf](storage/postgres.conf) or Oracle (Enterprise). Please [consult the manual](https://docs.daml.com/canton/usermanual/installation.html#persistence-using-postgres)
for further instructions. The examples here will illustrate the usage using the in-memory configuration.
There is a small helper script in [dbinit.py](storage/dbinit.py) which you can use to create the appropriate SQL commands
to create users and databases for a series of nodes. This is convenient if you are setting up a test-network. You can
run it using:
```
python3 examples/03-advanced-configuration/storage/dbinit.py \
--type=postgres --user=canton --pwd=<choose-wisely> --participants=2 --domains=1 --drop
```
Please run the script with ``--help`` to get an overview of all commands. Generally, you would just pipe the output
to your SQL console.
## Nodes
The nodes directory contains a set of base configuration files that can be used together with the mix-ins.
### Domain
Start a domain with the following command:
```
./bin/canton -c examples/03-advanced-configuration/storage/memory.conf,examples/03-advanced-configuration/nodes/domain1.conf
```
The domain can be started without any bootstrap script, as it self-initialises by default, waiting for incoming connections.
If you pass in multiple configuration files, they will be combined. It doesn't matter if you separate the
configurations using `,` or if you pass them with several `-c` options.
NOTE: If you unpacked the zip directory, then you might have to make the canton startup script executable
(`chmod u+x bin/canton`).
### Participants
The participant(s) can be started the same way, just by pointing to the participant configuration file.
However, before we can use the participant for any Daml processing, we need to connect it to a domain. You can
connect to the domain interactively, or use the [initialisation script](participant-init.canton).
```
./bin/canton -c examples/03-advanced-configuration/storage/memory.conf \
-c examples/03-advanced-configuration/nodes/participant1.conf,examples/03-advanced-configuration/nodes/participant2.conf \
--bootstrap=examples/03-advanced-configuration/participant-init.canton
```
The initialisation script assumes that the domain can be reached via `localhost`, which needs to change if the domain
runs on a different server.
A setup with more participant nodes can be created using the [participant](nodes/participant1.conf) as a template.
The same applies to the domain configuration. The instance names should be changed (`participant1` to something else),
as otherwise, distinguishing the nodes in a trial run will be difficult.
## API
By default, all the APIs only bind to localhost. If you want to expose them on the network, you should secure them using
TLS and JWT. You can use the mixins configuration in the ``api`` subdirectory for your convenience.
## Parameters
The parameters directory contains a set of mix-ins to modify the behaviour of your nodes.
- [nonuck.conf](nodes/nonuck.conf) enable non-UCK mode such that you can use multiple domains per participant node (preview).
## Test Your Setup
Assuming that you have started both participants and a domain, you can verify that the system works by having
participant2 pinging participant1 (the other way around also works). A ping here is just a built-in Daml
contract which gets sent from one participant to another, and the other responds by exercising a choice.
First, just make sure that the `participant2` is connected to the domain by testing whether the following command
returns `true`
```
@ participant2.domains.active("mydomain")
```
In order to ping participant1, participant2 must know participant1's `ParticipantId`. You could obtain this from
participant1's instance of the Canton console using the command `participant1.id` and copy-pasting the resulting
`ParticipantId` to participant2's Canton console. Another option is to lookup participant1's ID directly using
participant2's console:
```
@ val participant1Id = participant2.parties.list(filterParticipant="participant1").head.participants.head.participant
```
Using the console for participant2, you can now get the two participants to ping each other:
```
@ participant2.health.ping(participant1Id)
```
## Running as Background Process
If you start Canton with the commands above, you will always be in interactive mode within the Canton console.
You can start Canton as well as a non-interactive process using
```
./bin/canton daemon -c examples/03-advanced-configuration/storage/memory.conf \
-c examples/03-advanced-configuration/nodes/participant1.conf \
--bootstrap examples/03-advanced-configuration/participant-init.canton
```
## Connect To Remote Nodes
In many cases, the nodes will run in a background process, started as `daemon`, while the user would
still like the convenience of using the console. This can be achieved by defining remote domains and
participants in the configuration file.
A participant or domain configuration can be turned into a remote config using
```
./bin/canton generate remote-config -c examples/03-advanced-configuration/storage/memory.conf,examples/03-advanced-configuration/nodes/participant1.conf
```
Then, if you start Canton using
```
./bin/canton -c remote-participant1.conf
```
you will have a new instance `participant1`, which will expose most but not all commands
that a node exposes. As an example, run:
```
participant1.health.status
```
Please note that depending on your setup, you might have to adjust the target ip address.
Please note that the configuration examples have been replaced by the reference configuration in the config directory.

View File

@ -1,8 +0,0 @@
_shared {
parameters.ledger-api-server-parameters.jwt-timestamp-leeway {
default = 5
expires-at = 10
issued-at = 15
not-before = 20
}
}

View File

@ -1,7 +0,0 @@
_shared {
ledger-api {
index-service {
max-transactions-in-memory-fan-out-buffer-size = 10000 // default 1000
}
}
}

View File

@ -1,8 +0,0 @@
_shared {
ledger-api {
index-service {
max-contract-state-cache-size = 100000 // default 1e4
max-contract-key-state-cache-size = 100000 // default 1e4
}
}
}

View File

@ -1,7 +0,0 @@
_shared {
admin-api {
// by default, canton binds to 127.0.0.1, only enabling localhost connections
// you need to explicitly set the address to enable connections from other hosts
address = 0.0.0.0
}
}

View File

@ -1,11 +0,0 @@
_shared {
public-api {
// by default, canton binds to 127.0.0.1, only enabling localhost connections
// you need to explicitly set the address to enable connections from other hosts
address = 0.0.0.0
}
ledger-api {
// same as for public-api
address = 0.0.0.0
}
}

View File

@ -1,7 +0,0 @@
_shared {
ledger-api {
auth-services = [{
type = wildcard
}]
}
}

View File

@ -1,18 +0,0 @@
canton {
domains {
domain1 {
storage = ${_shared.storage}
storage.config.properties.databaseName = "domain1"
init.domain-parameters.unique-contract-keys = ${?_.shared.unique-contract-keys}
public-api {
port = 10018
// if defined, this include will override the address we bind to. default is 127.0.0.1
address = ${?_shared.public-api.address}
}
admin-api {
port = 10019
address = ${?_shared.admin-api.address}
}
}
}
}

View File

@ -1,19 +0,0 @@
canton {
participants {
participant1 {
storage = ${_shared.storage}
storage.config.properties.databaseName = "participant1"
init.parameters.unique-contract-keys = ${?_.shared.unique-contract-keys}
admin-api {
port = 10012
// if defined, this include will override the address we bind to. default is 127.0.0.1
address = ${?_shared.admin-api.address}
}
ledger-api {
port = 10011
address = ${?_shared.ledger-api.address}
auth-services = ${?_shared.ledger-api.auth-services}
}
}
}
}

View File

@ -1,19 +0,0 @@
canton {
participants {
participant2 {
storage = ${_shared.storage}
storage.config.properties.databaseName = "participant2"
init.parameters.unique-contract-keys = ${?_.shared.unique-contract-keys}
admin-api {
port = 10022
// if defined, this include will override the address we bind to. default is 127.0.0.1
address = ${?_shared.admin-api.address}
}
ledger-api {
port = 10021
address = ${?_shared.ledger-api.address}
auth-services = ${?_shared.ledger-api.auth-services}
}
}
}
}

View File

@ -1,19 +0,0 @@
canton {
participants {
participant3 {
storage = ${_shared.storage}
storage.config.properties.databaseName = "participant3"
init.parameters.unique-contract-keys = ${?_.shared.unique-contract-keys}
admin-api {
port = 10032
// if defined, this include will override the address we bind to. default is 127.0.0.1
address = ${?_shared.admin-api.address}
}
ledger-api {
port = 10031
address = ${?_shared.ledger-api.address}
auth-services = ${?_shared.ledger-api.auth-services}
}
}
}
}

View File

@ -1,19 +0,0 @@
canton {
participants {
participant4 {
storage = ${_shared.storage}
storage.config.properties.databaseName = "participant4"
init.parameters.unique-contract-keys = ${?_.shared.unique-contract-keys}
admin-api {
port = 10042
// if defined, this include will override the address we bind to. default is 127.0.0.1
address = ${?_shared.admin-api.address}
}
ledger-api {
port = 10041
address = ${?_shared.ledger-api.address}
auth-services = ${?_shared.ledger-api.auth-services}
}
}
}
}

View File

@ -1,3 +0,0 @@
_shared {
unique-contract-keys = no
}

View File

@ -1,22 +0,0 @@
val participant = participants.local.head
// only run once
if(participant.domains.list_registered().isEmpty) {
// connect all local participants to the domain passing a user chosen alias and the domain port as the argument
participants.local.foreach(_.domains.connect("mydomain", "http://localhost:10018"))
// above connect operation is asynchronous. it is generally at the discretion of the domain
// to decide if a participant can join and when. therefore, we need to asynchronously wait here
// until the participant observes its activation on the domain
utils.retry_until_true {
participant.domains.active("mydomain")
}
// synchronize vetting to ensure the participant has the package needed for the ping
participant.packages.synchronize_vetting()
// verify that the connection works
participant.health.ping(participant)
}

View File

@ -1,14 +0,0 @@
canton {
remote-domains {
remoteDomain1 {
public-api {
address = 127.0.0.1
port = 10018
}
admin-api {
port = 10019
address = 127.0.0.1 // default value if omitted
}
}
}
}

View File

@ -1,14 +0,0 @@
canton {
remote-participants {
remoteParticipant1 {
admin-api {
port = 10012
address = 127.0.0.1 // is the default value if omitted
}
ledger-api {
port = 10011
address = 127.0.0.1 // is the default value if omitted
}
}
}
}

View File

@ -1,51 +0,0 @@
#!/usr/bin/python3
#
# Trivial helper script to create users / databases for Canton nodes
#
import argparse
import sys
def get_parser():
parser = argparse.ArgumentParser(description = "Helper utility to setup Canton databases for a set of nodes")
parser.add_argument("--type", help="Type of database to be setup", choices=["postgres"], default="postgres")
parser.add_argument("--participants", type=int, help="Number of participant dbs to generate (will create dbs named participantX for 1 to N)", default=0)
parser.add_argument("--domains", type=int, help="Number of domain dbs to generate (will create dbs named domainX for 1 to N)", default=0)
parser.add_argument("--sequencers", type=int, help="Number of sequencer dbs to generate (will create dbs named sequencerX for 1 to N", default=0)
parser.add_argument("--mediators", type=int, help="Number of mediators dbs to generate (will create dbs named mediatorX for 1 to N", default=0)
parser.add_argument("--user", type=str, help="Database user name. If given, the script will also generate a SQL command to create the user", required=True)
parser.add_argument("--pwd", type=str, help="Database password")
parser.add_argument("--drop", help="Drop existing", action="store_true")
return parser.parse_args()
def do_postgres(args):
print("""
DO
$do$
BEGIN
IF NOT EXISTS (
SELECT FROM pg_catalog.pg_roles
WHERE rolname = '%s') THEN
CREATE ROLE \"%s\" LOGIN PASSWORD '%s';
END IF;
END
$do$;
""" % (args.user, args.user, args.pwd))
for num, prefix in [(args.domains, "domain"), (args.participants, "participant"), (args.mediators, "mediator"), (args.sequencers, "sequencer")]:
for ii in range(1, num + 1):
dbname = prefix + str(ii)
if args.drop:
print("DROP DATABASE IF EXISTS %s;" % (dbname))
print("CREATE DATABASE %s;" % dbname)
print("GRANT ALL ON DATABASE %s to \"%s\";" % (dbname, args.user))
if __name__ == "__main__":
args = get_parser()
if args.type == "postgres":
do_postgres(args)
else:
raise Exception("Unknown database type %s" % (args.type))

View File

@ -1,5 +0,0 @@
_shared {
storage {
type = "memory"
}
}

View File

@ -1,37 +0,0 @@
# Postgres persistence configuration mixin
#
# This file defines a shared configuration resources. You can mix it into your configuration by
# refer to the shared storage resource and add the database name.
#
# Example:
# participant1 {
# storage = ${_shared.storage}
# storage.config.properties.databaseName = "participant1"
# }
#
# The user and password credentials are set to "canton" and "supersafe". As this is not "supersafe", you might
# want to either change this configuration file or pass the settings in via environment variables.
#
_shared {
storage {
type = postgres
config {
dataSourceClass = "org.postgresql.ds.PGSimpleDataSource"
properties = {
serverName = "localhost"
# the next line will override above "serverName" in case the environment variable POSTGRES_HOST exists
serverName = ${?POSTGRES_HOST}
portNumber = "5432"
portNumber = ${?POSTGRES_PORT}
# the next line will fail configuration parsing if the POSTGRES_USER environment variable is not set
user = ${POSTGRES_USER}
password = ${POSTGRES_PASSWORD}
}
}
// If defined, will configure the number of database connections per node.
// Please ensure that your database is setup with sufficient connections.
// If not configured explicitly, every node will create one connection per core on the host machine. This is
// subject to change with future improvements.
parameters.max-connections = ${?POSTGRES_NUM_CONNECTIONS}
}
}

View File

@ -1,141 +0,0 @@
# Messaging via the global domain
***
WARNING: The global Canton domain is currently not running. This example does not work at the moment.
You need to start your own Canton domain and set the environment variable canton-examples.domain-url
to the URL of your domain.
***
TODO(#7564) Make this example work again once the global domain is up
***
Participants require a domain to communicate with each other. Digital
Asset is running a generally available global Canton domain
(Canton.Global). Any participant can decide to connect to the global
domain and use it for bilateral communication.
The messaging example provides a simple messaging application via the
global domain.
The example is structured as follows:
```
.
|-- message Daml model for messages
| |- .daml/dist/message-0.0.1.dar Compiled DAR file
| |- daml/Message.daml Daml source code for messages
| |- daml.yaml Daml configuration file
| |- frontend-config.js Configuration file for Daml Navigator
|
|-- contact Daml model for contacts
| |- daml/Contact.daml Incomplete Daml source code for contacts
| |- daml/Contact.solution Example solution for the Daml exercise below
| |- daml.yaml Daml configuration file
| |- frontend-config.js Configuration file for Daml Navigator
|
|-- canton.conf Configuration file for one participant
|-- init.canton Initialization script for Canton
```
The files in `message` must not be changed because it defines the
format of messages to be exchanged. So `message-0.0.1.dar` must be
the same on all participants that want to exchange messages.
Run the application by performing the following steps:
1. Compile the contact model by issuing the command `daml build` in
the `contact` folder. This should generate the file
`contact/.daml/dist/contact-0.0.1.dar`.
2. Start Canton from the `06-messaging` folder with the following command
```
../../bin/canton -c canton.conf --bootstrap init.canton
```
If you have never connected to the global domain before, you will
be shown the terms of service for using the global domain. You will
have to accept it once in order to be able to use it.
Next, you will be asked for your username in the messaging
application. Canton usernames may contain only letters, numbers,
`-` and `_` and may not be longer than 189. Canton will suffix your
username to make it globally unique. Your suffixed user name will
be output on the screen.
You can set the username in the Java system property
`canton-examples.username` as a command-line argument:
```
../../bin/canton -c canton.conf --bootstrap init.canton -Dcanton-examples.username=Alice
```
3. Start Daml Navigator.
After step 2, Canton outputs the command that you need to run to
start Daml Navigator. Run the command in a separate terminal from
the `contact` folder. Typically, the command looks as follows:
```
daml navigator server localhost 7011 -t wallclock --port 7015 -c ui-backend-participant1.conf
```
This will start the frontend on port 7015.
4. Open a browser and point it to `http://localhost:7015`.
Login with your chosen username.
5. Find someone else whom you want to send a message. You can search
for usernames with the following command in the Canton console:
```
findUser("Alice")
```
This will list all suffixed usernames that contain the string
`Alice`. Note that these users need not be currently online.
Click on the `Message:Message` template in the `Templates` view of
Navigator to create a new message. Put your suffixed username as
`sender` and the recipient's suffixed username as `receiver`.
Click `Submit` to send the message. A `Message:Message` contract
should soon be shown in the `Contracts` table as well as under `Sent`.
The receiver can use the `Reply` choice to send a message back.
Stop Canton and Navigator after that.
Note: Canton is configured to run with a file-based database.
Your username suffix and the messages will be persisted
on your computer in the file `participant1.mv.db`.
Delete this file if you want to start afresh.
6. Extend the `Contact` Daml model. As is, you must specify suffixed
username of yourself and your contact whenever you send a new
message. The `Contact` template in `contact/daml/Contact.daml`
can store these usernames, but it does not have any choices yet.
Add a non-consuming choice `Send` to the `Contact` template that
takes a message as parameter. It shall create a `Message` with
`myself` as sender, `other` as recipient, and the given message.
Write a script to test the message sending via a `Contact` contract
and run the script in Daml studio.
Compile the extended `Contact` Daml model by running `daml build`
in the `contact` folder.
7. Restart Canton and Navigator as described in Step 5.
You will be shown a reminder of your suffixed user name
instead of being asked for one.
Create a `Contact` contract for your counterparty.
Use the `Send` choice on the `Contact` to send a message.
Since you have modified the `Contact` template, there will be now
several `Contact` templates in the `Templates` tab; one for each
version. Your existing `Contact` contracts refer to the old
version and therefore do not offer the `Send` choice. You would
have to explicitly upgrade the contracts; this process is explained
in the Daml documentation at https://docs.daml.com/upgrade/index.html.

View File

@ -1,22 +0,0 @@
canton {
participants {
participant1 {
admin-api {
port= 7012
}
ledger-api {
port = 7011
}
storage {
type = "h2"
config = {
connectionPool = disabled
url = "jdbc:h2:file:./participant1;MODE=PostgreSQL;LOCK_TIMEOUT=10000;DB_CLOSE_DELAY=-1"
user = "participant1"
password = "morethansafe"
driver = org.h2.Driver
}
}
}
}
}

View File

@ -1,2 +0,0 @@
/.daml
ui-backend-participant1.conf

Some files were not shown because too many files have changed in this diff Show More