[docs] remove manually-edited rst files (#16376)

They are moving to the docs repo. I'm removing them here first and will
add to the docs repo afterwards, to avoid losing edits in the meantime.
This commit is contained in:
Gary Verhaegen 2023-02-23 15:55:04 +01:00 committed by GitHub
parent 370a468524
commit 7f36d5e4b1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
271 changed files with 0 additions and 26587 deletions

View File

@ -65,10 +65,6 @@ jobs:
bazel build //release:release
./bazel-bin/release/release --release-dir "$(mktemp -d)"
condition: and(succeeded(), ne(variables['is_release'], 'true'))
- task: PublishBuildArtifacts@1
inputs:
pathtoPublish: 'bazel-bin/docs/html.tar.gz'
artifactName: 'Docs bundle'
- template: tell-slack-failed.yml
parameters:
trigger_sha: '$(trigger_sha)'

View File

@ -311,110 +311,6 @@ genrule(
],
) if not is_windows else None
[
genrule(
name = "sphinx-{}".format(name),
srcs = [
":canton-refs.rst",
":sphinx-source-tree",
":scripts/check-closing-quotes.sh",
":scripts/check-closing-quotes.sh.allow",
] + (["@glibc_locales//:locale-archive"] if is_linux else []),
outs = ["sphinx-{}.tar.gz".format(name)],
cmd = ("""
export LOCALE_ARCHIVE="$$PWD/$(location @glibc_locales//:locale-archive)"
""" if is_linux else "") +
"""
set -eou pipefail
DIR=$$(mktemp -d)
mkdir -p $$DIR/source $$DIR/target
tar xf $(location sphinx-source-tree) -C $$DIR/source --strip-components=1
mv $(location :canton-refs.rst) $$DIR/source/source
if ! docs/scripts/check-closing-quotes.sh $$DIR/source docs/scripts/check-closing-quotes.sh.allow; then
exit 1
fi
export LC_ALL=en_US.UTF-8
export LANG=en_US.UTF-8
SPHINX_BUILD_EXIT_CODE=0
# We hide the output unless we get a failure to make the builds less noisy.
SPHINX_BUILD_OUTPUT=$$($(location @sphinx_nix//:bin/sphinx-build) -W -b {target} -c $$DIR/source/configs/{name} $$DIR/source/source $$DIR/target 2>&1) || SPHINX_BUILD_EXIT_CODE=$$?
if [ "$$SPHINX_BUILD_EXIT_CODE" -ne 0 ]; then
>&2 echo "## SPHINX-BUILD OUTPUT:"
>&2 echo "$$SPHINX_BUILD_OUTPUT"
>&2 echo "## SPHINX-BUILD OUTPUT END"
exit 1
fi
MKTGZ=$$PWD/$(execpath //bazel_tools/sh:mktgz)
OUT_PATH=$$PWD/$@
cd $$DIR
$$MKTGZ $$OUT_PATH target
""".format(
target = target,
name = name,
),
tools = [
"//bazel_tools/sh:mktgz",
"@sphinx_nix//:bin/sphinx-build",
],
)
for (name, target) in [
("html", "html"),
("pdf", "latex"),
]
] if not is_windows else None
genrule(
name = "pdf-docs",
srcs = [
":sphinx-pdf",
":pdf-fonts",
],
outs = ["DigitalAssetSDK.pdf"],
cmd = """
set -euo pipefail
# Set up tools
export PATH="$$( cd "$$(dirname "$(location @imagemagick_nix//:bin/convert)")" ; pwd -P )":$$PATH
mkdir out
tar -zxf $(location sphinx-pdf) -C out --strip-components=1
# Copy in fonts and build with lualatex
cp -L $(locations :pdf-fonts) out/
cd out
# run twice to generate all references properly (this is a latex thing...)
../$(location @texlive_nix//:bin/lualatex) -halt-on-error -interaction=batchmode --shell-escape *.tex
../$(location @texlive_nix//:bin/lualatex) -halt-on-error -interaction=batchmode --shell-escape *.tex
# NOTE, if you get errors of the following form:
#
# luaotfload | db : Font names database not found, generating new one.
# luaotfload | db : This can take several minutes; please be patient.
# luaotfload | db : Reload initiated (formats: otf,ttf,ttc); reason: "File not found: lmroman10-regular.".
#
# Then the error is most likely not font related. To debug the error
# run `bazel build` with `--sandbox_debug`, change into the sandbox
# directory and invoke lualatex from there. You will have to replicate
# the environment variable setup from above.
#
# In the past the following issues caused the error message above:
# - An update of sphinx in nixpkgs that had to be undone.
# - A missing texlive package that had to be added to the Nix derivation.
# Move output to target
mv DigitalAssetSDK.pdf ../$(location DigitalAssetSDK.pdf)""".format(sdk = sdk_version),
tags = ["pdfdocs"],
tools =
[
"@imagemagick_nix//:bin/convert",
"@texlive_nix//:bin/lualatex",
],
) if not is_windows else None
filegroup(
name = "pdf-fonts",
srcs = glob(["configs/pdf/fonts/**"]) + ["@freefont//:fonts"],
@ -477,26 +373,6 @@ genrule(
tools = ["//bazel_tools/sh:mktgz"],
) if not is_windows else None
genrule(
name = "docs-no-pdf",
srcs = [
":sphinx-html",
":non-sphinx-html-docs",
],
outs = ["html-only.tar.gz"],
cmd = """
set -eou pipefail
mkdir -p build/html
tar xf $(location :sphinx-html) -C build/html --strip-components=1
tar xf $(location :non-sphinx-html-docs) -C build/html --strip-components=1
cd build
../$(execpath //bazel_tools/sh:mktgz) ../$@ html
""".format(sdk = sdk_version),
tools = [
"//bazel_tools/sh:mktgz",
],
) if not is_windows else None
genrule(
name = "redirects",
srcs = [
@ -521,42 +397,6 @@ genrule(
tools = ["//bazel_tools/sh:mktgz"],
)
genrule(
name = "docs",
srcs = [
":docs-no-pdf",
":pdf-docs",
],
outs = ["html.tar.gz"],
cmd = """
VERSION_DATE=1970-01-01
tar -zxf $(location :docs-no-pdf)
cd html
find . -name '*.html' | sort | sed -e 's,^\\./,https://docs.daml.com/,' > sitemap
SMHEAD="{head}"
SMITEM="{item}"
SMFOOT="{foot}"
echo $$SMHEAD > sitemap.xml
while read item; do
echo $$SMITEM | sed -e "s,%DATE%,$${{VERSION_DATE}}," | sed -e "s,%LOC%,$${{item}}," >> sitemap.xml
done < sitemap
rm sitemap
echo $$SMFOOT >> sitemap.xml
cd ..
cp -L $(location :pdf-docs) html/_downloads
# Remove Sphinx build products
rm -r html/.buildinfo html/.doctrees html/objects.inv
$(execpath //bazel_tools/sh:mktgz) $@ html
""".format(
head = """<?xml version='1.0' encoding='UTF-8'?><urlset xmlns='http://www.sitemaps.org/schemas/sitemap/0.9' xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance' xsi:schemaLocation='http://www.sitemaps.org/schemas/sitemap/0.9 http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd'>""",
item = """<url><loc>%LOC%</loc><lastmod>%DATE%</lastmod><changefreq>daily</changefreq><priority>0.8</priority></url>""",
foot = """</urlset>""",
version = sdk_version,
),
tags = ["pdfdocs"],
tools = ["//bazel_tools/sh:mktgz"],
) if not is_windows else None
filegroup(
name = "daml-assistant-iou-setup",
srcs = glob(

View File

@ -1,221 +0,0 @@
.. Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
.. SPDX-License-Identifier: Apache-2.0
.. _recommended-architecture:
Daml Application Architecture
#############################
This section describes our recommended design of a full-stack Daml application.
.. image:: ./recommended_architecture.svg
:alt: Diagram of the recommended application architecture, explained in depth immediately below.
The above image shows the recommended architecture. Here there are four types of building blocks that go into our application: user code, Daml components, generated code from Daml, and external components. In the recommended architecture the Daml model determines the DAR files that underpin both the front-end and back-end. The front-end includes user code such as a React Web Frontend, Daml React libraries or other integration libraries, and generated code from the DAR files. The back-end consists of Daml integration components (e.g. JSON API) and a participant node; the participant node communicates with an external token issuer. The Daml network, meanwhile, includes Daml drivers paired with external synchronization technologies.
Of course there are many ways that the architecture and technology
stack can be changed to fit your needs, which we'll mention in the corresponding sections.
To get started quickly with the recommended application architecture, generate a new project using the ``create-daml-app`` template:
.. code-block:: bash
daml new --template=create-daml-app my-project-name
``create-daml-app`` is a small, but fully functional demo application implementing the recommended
architecture, providing you with an excellent starting point for your own application. It showcases
- using Daml React libraries
- quick iteration against the :ref:`Daml Sandbox <sandbox-manual>`.
- authorization
- deploying your application in the cloud as a Docker container
Backend
*******
The backend for your application can be any Daml ledger implementation running your DAR
(:ref:`Daml Archive <dar-file-dalf-file>`) file.
We recommend using the :ref:`Daml JSON API <json-api>` as an interface to your frontend. It is
served by the HTTP JSON API server connected to the ledger API server. It provides simple HTTP
endpoints to interact with the ledger via GET/POST requests. However, if you prefer, you can also
use the :ref:`gRPC Ledger API <grpc>` directly.
When you use the ``create-daml-app`` template application, you can start a Daml Sandbox together
with a JSON API server by running the following command in the root of the project.
.. code-block:: bash
daml start --start-navigator=no
Daml Sandbox exposes the same Daml Ledger API a Participant Node would
expose without requiring a fully-fledged Daml network to back the application. Once your
application matures and becomes ready for production, the ``daml deploy`` command helps you deploy
your frontend and Daml artifacts of your project to a production Daml network.
Frontend
********
We recommended building your frontend with the `React <https://reactjs.org>`_ framework. However,
you can choose virtually any language for your frontend and interact with the ledger via
:ref:`HTTP JSON <json-api>` endpoints. In addition, we provide support libraries for
:ref:`Java <java-bindings>` and you can also interact with the :ref:`gRPC Ledger API <grpc>` directly.
We provide two libraries to build your React frontend for a Daml application.
+--------------------------------------------------------------+--------------------------------------------------------------------------+
| Name | Summary |
+==============================================================+==========================================================================+
| `@daml/react <https://www.npmjs.com/package/@daml/react>`_ | React hooks to query/create/exercise Daml contracts |
+--------------------------------------------------------------+--------------------------------------------------------------------------+
| `@daml/ledger <https://www.npmjs.com/package/@daml/ledger>`_ | Daml ledger object to connect and directly submit commands to the ledger |
+--------------------------------------------------------------+--------------------------------------------------------------------------+
You can install any of these libraries by running ``npm install <library>`` in the ``ui`` directory of
your project, e.g. ``npm install @daml/react``. Please explore the ``create-daml-app`` example project
to see the usage of these libraries.
To make your life easy when interacting with the ledger, the Daml assistant can generate JavaScript
libraries with TypeScript typings from the data types declared in the deployed DAR.
.. code-block:: bash
daml codegen js .daml/dist/<your-project-name.dar> -o ui/daml.js
This command will generate a JavaScript library for each DALF in your DAR, containing metadata about
types and templates in the DALF and TypeScript typings them. In ``create-daml-app``, ``ui/package.json`` refers to these
libraries via the ``"create-daml-app": "file:../daml.js/create-daml-app-0.1.0"`` entry in the
``dependencies`` field.
If you choose a different JavaScript based frontend framework, the packages ``@daml/ledger``,
``@daml/types`` and the generated ``daml.js`` libraries provide you with the necessary code to
connect and issue commands against your ledger.
Authorization
*************
When you deploy your application to a production ledger, you need to authenticate the identities of
your users.
Daml ledgers support a unified interface for authorization of commands. Some Daml ledgers, like for
example https://hub.daml.com, offer integrated authentication and authorization, but you can also
use an external service provider like https://auth0.com. The Daml react libraries support interfacing
with a Daml ledger that validates authorization of incoming requests. Simply initialize your
``DamlLedger`` object with the token obtained by the respective token issuer. How authorization works and the
form of the required tokens is described in the :ref:`Authorization <authorization>` section.
Developer Workflow
******************
The SDK enables a local development environment with fast iteration cycles:
1. The integrated VSCode IDE (``daml studio``) runs your Scripts on any change to your Daml models. See :ref:`Daml Script <testing-using-script>`.
#. ``daml start`` will build all of your Daml code, generate the JavaScript bindings, and start the required "backend" processes (sandbox and HTTP JSON API). It will also allow you to press ``r`` (followed by Enter on Windows) to rebuild your code, regenerate the JavaScript bindings and upload the new code to the running ledger.
#. ``npm start`` will watch your JavaScript source files for change and recompile them immediately when they are saved.
Together, these features can provide you with very tight feedback loops while developing your Daml application, all the way from your Daml contracts up to your web UI. A typical Daml developer workflow is to
1. Make a small change to your Daml data model
#. Optionally test your Daml code with :ref:`Daml Script <testing-using-script>`
#. Edit your React components to be aligned with changes made in Daml code
#. Extend the UI to make use of the newly introduced feature
#. Make further changes either to your Daml and/or React code until you're happy with what you've developed
.. image:: ./developer_workflow.svg
:alt: A simple developer workflow - iterate on the Daml model, iterate on the UI, repeat.
See :doc:`Your First Feature </getting-started/first-feature>` for a more detailed walkthrough of these steps.
Command Deduplication
=====================
The interaction of a Daml application with the ledger is inherently asynchronous: applications send commands to the ledger, and some time later they see the effect of that command on the ledger.
There are several things that can fail during this time window: the application can crash, the participant node can crash, messages can be lost on the network, or the ledger may be just slow to respond due to a high load.
If you want to make sure that a command is not executed twice, your application needs to robustly handle all failure scenarios.
Daml ledgers provide a mechanism for :doc:`command deduplication <command-deduplication>` to help deal with this problem.
For each command the application provides a command ID and an optional parameter that specifies the deduplication period.
If the latter parameter is not specified in the command submission itself, the ledger will use the configured maximum deduplication duration.
The ledger will then guarantee that commands with the same :ref:`change ID <change-id>` will generate a rejection within the effective deduplication period.
For details on how to use command deduplication, see the :doc:`Command Deduplication Guide <command-deduplication>`.
.. _dealing-with-failures:
Deal With Failures
==================
.. _crash-recovery:
Crash Recovery
--------------
In order to restart your application from a previously known ledger state,
your application must keep track of the last ledger offset received
from the :ref:`transaction service <transaction-service>` or the
:ref:`command completion service <command-completion-service>`.
By persisting this offset alongside the relevant state as part of a single,
atomic operation, your application can resume from where it left off.
.. _failing-over-between-ledger-api-endpoints:
Fail Over Between Ledger API Endpoints
--------------------------------------
Some Daml Ledgers support exposing multiple eventually consistent Ledger API
endpoints where command deduplication works across these Ledger API endpoints.
For example, these endpoints might be hosted by separate Ledger API servers
that replicate the same data and host the same parties. Contact your ledger
operator to find out whether this applies to your ledger.
Below we describe how you can build your application such that it can switch
between such eventually consistent Ledger API endpoints to tolerate server
failures. You can do this using the following two steps.
First, your application must keep track of the ledger offset as described in the
:ref:`paragraph about crash recovery <crash-recovery>`. When switching to a new
Ledger API endpoint, it must resume consumption of the transaction (tree)
and/or the command completion streams starting from this last received
offset.
Second, your application must retry on ``OUT_OF_RANGE`` errors (see
`gRPC status codes <https://grpc.github.io/grpc/core/md_doc_statuscodes.html>`_)
received from a stream subscription -- using an appropriate backoff strategy
to avoid overloading the server. Such errors can be raised because of eventual
consistency. The Ledger API endpoint that the application is newly subscribing
to might be behind the endpoint that it subscribed to before the switch, and
needs time to catch up. Thanks to eventual consistency this is guaranteed to
happen at some point in the future.
Once the application successfully subscribes to its required streams on the
new endpoint, it will resume normal operation.
.. _dealing-with-time:
Deal With Time
==============
The Daml language contains a function :ref:`getTime <daml-ref-gettime>` which returns a rough estimate of “current time” called *Ledger Time*. The notion of time comes with a lot of problems in a distributed setting: different participants might run different clocks, there may be latencies due to calculation and network, clocks may drift against each other over time, etc.
In order to provide a useful notion of time in Daml without incurring severe performance or liveness penalties, Daml has two notions of time: *Ledger Time* and *Record Time*:
- As part of command interpretation, each transaction is automatically assigned a *Ledger Time* by the participant server.
- All calls to ``getTime`` within a transaction return the *Ledger Time* assigned to that transaction.
- *Ledger Time* is chosen (and validated) to respect Causal Monotonicity: The Create action on a contract *c* always precedes all other actions on *c* in Ledger Time.
- As part of the commit/synchronization protocol of the underlying infrastructure, every transaction is assigned a *Record Time*, which can be thought of as the infrastructures "system time". It's the best available notion of "real time", but the only guarantees on it are the guarantees the underlying infrastructure can give. It is also not known at interpretation time.
- *Ledger Time* is kept close to "real time" by bounding it against *Record Time*. Transactions where *Ledger* and *Record Time* are too far apart are rejected.
Some commands might take a long time to process, and by the time the resulting transaction is about to be committed to the ledger, it might violate the condition that *Ledger Time* should be reasonably close to *Record Time* (even when considering the ledger's tolerance interval). To avoid such problems, applications can set the optional parameters :ref:`min_ledger_time_abs <com.daml.ledger.api.v1.Commands.min_ledger_time_abs>` or :ref:`min_ledger_time_rel <com.daml.ledger.api.v1.Commands.min_ledger_time_rel>` that specify (in absolute or relative terms) the minimal *Ledger Time* for the transaction. The ledger will then process the command, but wait with committing the resulting transaction until *Ledger Time* fits within the ledger's tolerance interval.
How is this used in practice?
- Be aware that ``getTime`` is only reasonably close to real time, and not completely monotonic. Avoid Daml workflows that rely on very accurate time measurements or high frequency time changes.
- Set ``min_ledger_time_abs`` or ``min_ledger_time_rel`` if the duration of command interpretation and transmission is likely to take a long time relative to the tolerance interval set by the ledger.
- In some corner cases, the participant node may be unable to determine a suitable Ledger Time by itself. If you get an error that no Ledger Time could be found, check whether you have contention on any contract referenced by your command or whether the referenced contracts are sensitive to small changes of ``getTime``.
For more details, see :ref:`Background concepts - time <time>`.

View File

@ -1,245 +0,0 @@
.. Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
.. SPDX-License-Identifier: Apache-2.0
.. _authorization:
Authorization
#############
When developing Daml applications using SDK tools,
your local setup will most likely not perform any Ledger API request authorization --
by default, any valid Ledger API request will be accepted by the sandbox.
This is not the case for participant nodes of deployed ledgers.
For every Ledger API request, the participant node checks whether the request contains an access token that is valid and sufficient to authorize that request.
You thus need to add support for authorization using access token to your application to run it against a deployed ledger.
.. note:: In case of mutual (two-way) TLS authentication, the Ledger API
client must present its certificate (in addition to an access token) to
the Ledger API server as part of the authentication process. The provided
certificate must be signed by a certificate authority (CA) trusted
by the Ledger API server. Note that the identity of the application
will not be proven by using this method, i.e. the `application_id` field in the request
is not necessarily correlated with the CN (Common Name) in the certificate.
Introduction
************
Your Daml application sends requests to the :doc:`Ledger API </app-dev/ledger-api>` exposed by a participant node to submit changes to the ledger
(e.g., "*exercise choice X on contract Y as party Alice*"), or to read data from the ledger
(e.g., "*read all active contracts visible to party Alice*").
Your application might send these requests via a middleware like the :doc:`JSON API </json-api/index>`.
Whether a participant node *can* serve such a request depends on whether the participant node hosts the respective parties, and
whether the request is valid according to the :ref:`Daml Ledger Model <da-ledgers>`.
Whether a participant node *will* serve such a request to a Daml application depends on whether the
request includes an access token that is valid and sufficient to authorize the request for this participant node.
Acquire and Use Access Tokens
*****************************
How an application acquires access tokens depends on the participant node it talks to and is ultimately set up by the participant node operator.
Many setups use a flow in the style of `OAuth 2.0 <https://oauth.net/2/>`_.
In this scenario, the Daml application first contacts a token issuer to get an access token.
The token issuer verifies the identity of the requesting application, looks up the privileges of the application,
and generates a signed access token describing those privileges.
Once the access token is issued, the Daml application sends it along with every Ledger API request.
The Daml ledger verifies:
- that the token was issued by one of its trusted token issuers
- that the token has not been tampered with
- that the token had not expired
- that the privileges described in the token authorize the request
.. image:: ./images/Authentication.svg
:alt: A flowchart illustrating the process of authentication described in the two paragraphs immediately above.
How you attach tokens to requests depends on the tool or library you use to interact with the Ledger API.
See the tool's or library's documentation for more information. (E.g. relevant documentation for
the :ref:`Java bindings <ledger-api-java-bindings-authorization>`
and the :ref:`JSON API <json-api-access-tokens>`.)
.. _authorization-claims:
Access Tokens and Rights
************************
Access tokens contain information about the rights granted to the bearer of the token. These rights are specific to the API being accessed.
The Daml Ledger API uses the following rights to govern request authorization:
- ``public``: the right to retrieve publicly available information, such as the ledger identity
- ``participant_admin``: the right to adminstrate the participant node
- ``canReadAs(p)``: the right to read information off the ledger (like the active contracts) visible to the party ``p``
- ``canActsAs(p)``: same as ``canReadAs(p)``, with the added right of issuing commands on behalf of the party ``p``
The following table summarizes the rights required to access each Ledger API endpoint:
+-------------------------------------+----------------------------+--------------------------------------------------------+
| Ledger API service | Endpoint | Required right |
+=====================================+============================+========================================================+
| LedgerIdentityService | GetLedgerIdentity | public |
+-------------------------------------+----------------------------+--------------------------------------------------------+
| ActiveContractsService | GetActiveContracts | for each requested party p: canReadAs(p) |
+-------------------------------------+----------------------------+--------------------------------------------------------+
| CommandCompletionService | CompletionEnd | public |
| +----------------------------+--------------------------------------------------------+
| | CompletionStream | for each requested party p: canReadAs(p) |
+-------------------------------------+----------------------------+--------------------------------------------------------+
| CommandSubmissionService | Submit | for submitting party p: canActAs(p) |
+-------------------------------------+----------------------------+--------------------------------------------------------+
| CommandService | All | for submitting party p: canActAs(p) |
+-------------------------------------+----------------------------+--------------------------------------------------------+
| Health | All | no access token required for health checking |
+-------------------------------------+----------------------------+--------------------------------------------------------+
| LedgerConfigurationService | GetLedgerConfiguration | public |
+-------------------------------------+----------------------------+--------------------------------------------------------+
| MeteringReportService | All | participant_admin |
+-------------------------------------+----------------------------+--------------------------------------------------------+
| PackageService | All | public |
+-------------------------------------+----------------------------+--------------------------------------------------------+
| PackageManagementService | All | participant_admin |
+-------------------------------------+----------------------------+--------------------------------------------------------+
| PartyManagementService | All | participant_admin |
+-------------------------------------+----------------------------+--------------------------------------------------------+
| ParticipantPruningService | All | participant_admin |
+-------------------------------------+----------------------------+--------------------------------------------------------+
| ServerReflection | All | no access token required for gRPC service reflection |
+-------------------------------------+----------------------------+--------------------------------------------------------+
| TimeService | GetTime | public |
| +----------------------------+--------------------------------------------------------+
| | SetTime | participant_admin |
+-------------------------------------+----------------------------+--------------------------------------------------------+
| TransactionService | LedgerEnd | public |
| +----------------------------+--------------------------------------------------------+
| | All (except LedgerEnd) | for each requested party p: canReadAs(p) |
+-------------------------------------+----------------------------+--------------------------------------------------------+
| UserManagementService | All | participant_admin |
| +----------------------------+--------------------------------------------------------+
| | GetUser | authenticated users can get their own user |
| +----------------------------+--------------------------------------------------------+
| | ListUserRights | authenticated users can list their own rights |
+-------------------------------------+----------------------------+--------------------------------------------------------+
| VersionService | All | public |
+-------------------------------------+----------------------------+--------------------------------------------------------+
.. _access-token-formats:
Access Token Formats
********************
Applications should treat access tokens as opaque blobs.
However, as an application developer it can be helpful to understand the format of access tokens to debug problems.
All Daml ledgers represent access tokens as `JSON Web Tokens (JWTs) <https://datatracker.ietf.org/doc/html/rfc7519>`_,
and there are two formats of the JSON payload used by Daml ledgers.
.. note:: To generate access tokens for testing purposes, you can use the `jwt.io <https://jwt.io/>`__ web site.
.. _user-access-tokens:
User Access Tokens
==================
Daml ledgers that support participant :ref:`user management <user-management-service>` also accept user access tokens.
They are useful for scenarios where an application's rights change dynamically over the application's lifetime.
User access tokens do not encode rights directly like the custom Daml claims tokens explained in the following sections.
Instead, user access tokens encode the participant user on whose behalf the request is issued.
When handling such requests, participant nodes look up the participant user's current rights
before checking request authorization per the :ref:`table above <authorization-claims>`.
Thus the rights granted to an application can be changed dynamically using
the participant user management service *without* issuing new access tokens,
as would be required for the custom Daml claims tokens.
User access tokens are `JWTs <https://datatracker.ietf.org/doc/html/rfc7519>`_ that follow the
`OAuth 2.0 standard <https://datatracker.ietf.org/doc/html/rfc6749>`_. There are two
different JSON encodings: An audience-based token format that relies
on the audience field to specify that it is designated for a specific
Daml participant and a scope-based audience token format which relies on the
scope field to designate the purpose. Both formats can be used interchangeably but
if possible, use of the audience-based token format is recommend as it
is compatible with a wider range of IAMs, e.g., Kubernetes does not
support setting the scope field and makes the participant id mandatory
which prevents misuse of a token on a different participant.
Audience-Based Tokens
---------------------
.. code-block:: json
{
"aud": "https://daml.com/jwt/aud/participant/someParticipantId",
"sub": "someUserId",
"exp": 1300819380
}
To interpret the above notation:
- ``aud`` is a required field which restricts the token to participant nodes with the given ID (e.g. ``someParticipantId``)
- ``sub`` is a required field which specifies the participant user's ID
- ``exp`` is an optional field which specifies the JWT expiration date (in seconds since EPOCH)
Scope-Based Tokens
------------------
.. code-block:: json
{
"aud": "someParticipantId",
"sub": "someUserId",
"exp": 1300819380,
"scope": "daml_ledger_api"
}
To interpret the above notation:
- ``aud`` is an optional field which restricts the token to participant nodes with the given ID
- ``sub`` is a required field which specifies the participant user's ID
- ``exp`` is an optional field which specifies the JWT expiration date (in seconds since EPOCH)
- ``scope`` is a space-separated list of `OAuth 2.0 scopes <https://datatracker.ietf.org/doc/html/rfc6749#section-3.3>`_
that must contain the ``"daml_ledger_api"`` scope
Requirements for User IDs
-------------------------
User IDs must be non-empty strings of at most 128 characters that are either alphanumeric ASCII characters or one of the symbols "@^$.!`-#+'~_|:".
Custom Daml Claims Access Tokens
================================
This format represents the :ref:`rights <authorization-claims>` granted by the access token as custom claims in the JWT's payload, like so:
.. code-block:: json
{
"https://daml.com/ledger-api": {
"ledgerId": null,
"participantId": "123e4567-e89b-12d3-a456-426614174000",
"applicationId": null,
"admin": true,
"actAs": ["Alice"],
"readAs": ["Bob"]
},
"exp": 1300819380
}
where all of the fields are optional, and if present,
- ``ledgerId`` and ``participantId`` restrict the validity of the token to the given ledger or participant node
- ``applicationId`` requires requests with this token to use that application id or not set an application id at all, which should be used to distinguish requests from different applications
- ``exp`` is the standard JWT expiration date (in seconds since EPOCH)
- ``actAs``, ``readAs`` and (participant) ``admin`` encode the rights granted by this access token
The ``public`` right is implicitly granted to any request bearing a non-expired JWT issued by a trusted issuer with matching ``ledgerId``, ``participantId`` and ``applicationId`` values.
.. note:: All Daml ledgers also support a deprecated legacy format of custom Daml claims
access tokens whose format is equal to the above except that the custom claims
are present at the same level as ``exp`` in the token above,
instead of being nested below ``"https://daml.com/ledger-api"``.

View File

@ -1,603 +0,0 @@
.. Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
.. SPDX-License-Identifier: Apache-2.0
.. _daml-codegen-java:
Generate Java Code from Daml
############################
Introduction
============
When writing applications for the ledger in Java, you want to work with a representation of Daml templates and data types in Java that closely resemble the original Daml code while still being as true to the native types in Java as possible. To achieve this, you can use Daml to Java code generator ("Java codegen") to generate Java types based on a Daml model. You can then use these types in your Java code when reading information from and sending data to the ledger.
The :doc:`Daml assistant documentation </tools/codegen>` describes how to run and configure the code generator for all supported bindings, including Java.
The rest of this page describes Java-specific topics.
Understand the Generated Java Model
======================================
The Java codegen generates source files in a directory tree under the output directory specified on the command line.
.. _daml-codegen-java-primitive-types:
Map Daml Primitives to Java Types
---------------------------------
Daml built-in types are translated to the following equivalent types in Java:
+--------------------------------+--------------------------------------------+------------------------+
| Daml type | Java type | Java Bindings |
| | | Value Type |
+================================+============================================+========================+
| ``Int`` | ``java.lang.Long`` | `Int64`_ |
+--------------------------------+--------------------------------------------+------------------------+
| ``Numeric`` | ``java.math.BigDecimal`` | `Numeric`_ |
+--------------------------------+--------------------------------------------+------------------------+
| ``Text`` | ``java.lang.String`` | `Text`_ |
+--------------------------------+--------------------------------------------+------------------------+
| ``Bool`` | ``java.util.Boolean`` | `Bool`_ |
+--------------------------------+--------------------------------------------+------------------------+
| ``Party`` | ``java.lang.String`` | `Party`_ |
+--------------------------------+--------------------------------------------+------------------------+
| ``Date`` | ``java.time.LocalDate`` | `Date`_ |
+--------------------------------+--------------------------------------------+------------------------+
| ``Time`` | ``java.time.Instant`` | `Timestamp`_ |
+--------------------------------+--------------------------------------------+------------------------+
| ``List`` or ``[]`` | ``java.util.List`` | `DamlList`_ |
+--------------------------------+--------------------------------------------+------------------------+
| ``TextMap`` | ``java.util.Map`` | `DamlTextMap`_ |
| | Restricted to using ``String`` keys. | |
+--------------------------------+--------------------------------------------+------------------------+
| ``Optional`` | ``java.util.Optional`` | `DamlOptional`_ |
+--------------------------------+--------------------------------------------+------------------------+
| ``()`` (Unit) | **None** since the Java language doesnt | `Unit`_ |
| | have a direct equivalent of Damls Unit | |
| | type ``()``, the generated code uses the | |
| | Java Bindings value type. | |
+--------------------------------+--------------------------------------------+------------------------+
| ``ContractId`` | Fields of type ``ContractId X`` refer to | `ContractId`_ |
| | the generated ``ContractId`` class of the | |
| | respective template ``X``. | |
+--------------------------------+--------------------------------------------+------------------------+
Understand Escaping Rules
-------------------------
To avoid clashes with Java keywords, the Java codegen applies escaping rules to the following Daml identifiers:
* Type names (except the already mapped :ref:`built-in types <daml-codegen-java-primitive-types>`)
* Constructor names
* Type parameters
* Module names
* Field names
If any of these identifiers match one of the `Java reserved keywords <https://docs.oracle.com/javase/specs/jls/se12/html/jls-3.html#jls-3.9>`__, the Java codegen appends a dollar sign ``$`` to the name. For example, a field with the name ``import`` will be generated as a Java field with the name ``import$``.
Understand the Generated Classes
--------------------------------
Every user-defined data type in Daml (template, record, and variant) is represented by one or more Java classes as described in this section.
The Java package for the generated classes is the equivalent of the lowercase Daml module name.
.. code-block:: daml
:caption: Daml
module Foo.Bar.Baz where
.. code-block:: java
:caption: Java
package foo.bar.baz;
Records (a.k.a Product Types)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A :ref:`Daml record <daml-ref-record-types>` is represented by a Java class with fields that have the same name as the Daml record fields. A Daml field having the type of another record is represented as a field having the type of the generated class for that record.
.. literalinclude:: ./code-snippets/Com/Acme/ProductTypes.daml
:language: daml
:start-after: -- start snippet: product types example
:end-before: -- end snippet: product types example
:caption: Com/Acme/ProductTypes.daml
A Java file is generated that defines the class for the type ``Person``:
.. code-block:: java
:caption: com/acme/producttypes/Person.java
package com.acme.producttypes;
public class Person extends DamlRecord<Person> {
public final Name name;
public final BigDecimal age;
public static Person fromValue(Value value$) { /* ... */ }
public Person(Name name, BigDecimal age) { /* ... */ }
public DamlRecord toValue() { /* ... */ }
}
A Java file is generated that defines the class for the type ``Name``:
.. code-block:: java
:caption: com/acme/producttypes/Name.java
package com.acme.producttypes;
public class Name extends DamlRecord<Name> {
public final String firstName;
public final String lastName;
public static Person fromValue(Value value$) { /* ... */ }
public Name(String firstName, String lastName) { /* ... */ }
public DamlRecord toValue() { /* ... */ }
}
.. _daml-codegen-java-templates:
Templates
^^^^^^^^^
The Java codegen generates three classes for a Daml template:
**TemplateName**
Represents the contract data or the template fields.
**TemplateName.ContractId**
Used whenever a contract ID of the corresponding template is used in another template or record, for example: ``data Foo = Foo (ContractId Bar)``. This class also provides methods to generate an ``ExerciseCommand`` for each choice that can be sent to the ledger with the Java Bindings.
**TemplateName.Contract**
Represents an actual contract on the ledger. It contains a field for the contract ID (of type ``TemplateName.ContractId``) and a field for the template data (of type ``TemplateName``). With the static method ``TemplateName.Contract.fromCreatedEvent``, you can deserialize a `CreatedEvent <https://docs.daml.com/app-dev/bindings-java/javadocs/com/daml/ledger/javaapi/data/CreatedEvent.html>`__ to an instance of ``TemplateName.Contract``.
.. literalinclude:: ./code-snippets/Com/Acme/Templates.daml
:language: daml
:start-after: -- start snippet: template example
:end-before: -- end snippet: template example
:caption: Com/Acme/Templates.daml
A file is generated that defines five Java classes and an interface:
#. ``Bar``
#. ``Bar.ContractId``
#. ``Bar.Contract``
#. ``Bar.CreateAnd``
#. ``Bar.ByKey``
#. ``Bar.Exercises``
.. code-block:: java
:caption: com/acme/templates/Bar.java
:emphasize-lines: 3,21,27,35,43,47
package com.acme.templates;
public class Bar extends Template {
public static final Identifier TEMPLATE_ID = new Identifier("some-package-id", "Com.Acme.Templates", "Bar");
public static final Choice<Bar, Archive, Unit> CHOICE_Archive =
Choice.create(/* ... */);
public static final ContractCompanion.WithKey<Contract, ContractId, Bar, BarKey> COMPANION =
new ContractCompanion.WithKey<>("com.acme.templates.Bar",
TEMPLATE_ID, ContractId::new, Bar::fromValue, Contract::new, e -> BarKey.fromValue(e), List.of(CHOICE_Archive));
public final String owner;
public final String name;
public CreateAnd createAnd() { /* ... */ }
public static ByKey byKey(BarKey key) { /* ... */ }
public static class ContractId extends com.daml.ledger.javaapi.data.codegen.ContractId<Bar>
implements Exercises<ExerciseCommand> {
// inherited:
public final String contractId;
}
public interface Exercises<Cmd> extends com.daml.ledger.javaapi.data.codegen.Exercises<Cmd> {
default Cmd exerciseArchive(Unit arg) { /* ... */ }
default Cmd exerciseBar_SomeChoice(Bar_SomeChoice arg) { /* ... */ }
default Cmd exerciseBar_SomeChoice(String aName) { /* ... */ }
}
public static class Contract extends ContractWithKey<ContractId, Bar, BarKey> {
// inherited:
public final ContractId id;
public final Bar data;
public static Contract fromCreatedEvent(CreatedEvent event) { /* ... */ }
}
public static final class CreateAnd
extends com.daml.ledger.javaapi.data.codegen.CreateAnd
implements Exercises<CreateAndExerciseCommand> { /* ... */ }
public static final class ByKey
extends com.daml.ledger.javaapi.data.codegen.ByKey
implements Exercises<ExerciseByKeyCommand> { /* ... */ }
}
Note that ``byKey`` and ``ByKey`` will only be generated for templates that define a key.
Variants (a.k.a Sum Types)
^^^^^^^^^^^^^^^^^^^^^^^^^^
A :ref:`variant or sum type <daml-ref-sum-types>` is a type with multiple constructors, where each constructor wraps a value of another type. The generated code is comprised of an abstract class for the variant type itself and a subclass thereof for each constructor. Classes for variant constructors are similar to classes for records.
.. literalinclude:: ./code-snippets/Com/Acme/Variants.daml
:language: daml
:start-after: -- start snippet: variant example
:end-before: -- end snippet: variant example
:caption: Com/Acme/Variants.daml
The Java code generated for this variant is:
.. code-block:: java
:caption: com/acme/variants/BookAttribute.java
package com.acme.variants;
public class BookAttribute extends Variant<BookAttribute> {
public static BookAttribute fromValue(Value value) { /* ... */ }
public static BookAttribute fromValue(Value value) { /* ... */ }
public abstract Variant toValue();
}
.. code-block:: java
:caption: com/acme/variants/bookattribute/Pages.java
package com.acme.variants.bookattribute;
public class Pages extends BookAttribute {
public final Long longValue;
public static Pages fromValue(Value value) { /* ... */ }
public Pages(Long longValue) { /* ... */ }
public Variant toValue() { /* ... */ }
}
.. code-block:: java
:caption: com/acme/variants/bookattribute/Authors.java
package com.acme.variants.bookattribute;
public class Authors extends BookAttribute {
public final List<String> listValue;
public static Authors fromValue(Value value) { /* ... */ }
public Author(List<String> listValue) { /* ... */ }
public Variant toValue() { /* ... */ }
}
.. code-block:: java
:caption: com/acme/variants/bookattribute/Title.java
package com.acme.variants.bookattribute;
public class Title extends BookAttribute {
public final String stringValue;
public static Title fromValue(Value value) { /* ... */ }
public Title(String stringValue) { /* ... */ }
public Variant toValue() { /* ... */ }
}
.. code-block:: java
:caption: com/acme/variants/bookattribute/Published.java
package com.acme.variants.bookattribute;
public class Published extends BookAttribute {
public final Long year;
public final String publisher;
public static Published fromValue(Value value) { /* ... */ }
public Published(Long year, String publisher) { /* ... */ }
public Variant toValue() { /* ... */ }
}
Parameterized Types
^^^^^^^^^^^^^^^^^^^
.. note::
This section is only included for completeness: we don't expect users to make use of the ``fromValue`` and ``toValue`` methods, because they would typically come from a template that doesn't have any unbound type parameters.
The Java codegen uses Java Generic types to represent :ref:`Daml parameterized types <daml-ref-parameterized-types>`.
This Daml fragment defines the parameterized type ``Attribute``, used by the ``BookAttribute`` type for modeling the characteristics of the book:
.. literalinclude:: ./code-snippets/Com/Acme/ParameterizedTypes.daml
:language: daml
:start-after: -- start snippet: parameterized types example
:end-before: -- end snippet: parameterized types example
:caption: Com/Acme/ParametrizedTypes.daml
The Java codegen generates a Java file with a generic class for the ``Attribute a`` data type:
.. code-block:: java
:caption: com/acme/parametrizedtypes/Attribute.java
:emphasize-lines: 3,8,10
package com.acme.parametrizedtypes;
public class Attribute<a> {
public final a value;
public Attribute(a value) { /* ... */ }
public DamlRecord toValue(Function<a, Value> toValuea) { /* ... */ }
public static <a> Attribute<a> fromValue(Value value$, Function<Value, a> fromValuea) { /* ... */ }
}
Enums
^^^^^
An enum type is a simplified :ref:`sum type <daml-ref-sum-types>` with multiple
constructors but without argument nor type parameters. The generated code is
standard java Enum whose constants map enum type constructors.
.. literalinclude:: ./code-snippets/Com/Acme/Enum.daml
:language: daml
:start-after: -- start snippet: enum example
:end-before: -- end snippet: enum example
:caption: Com/Acme/Enum.daml
The Java code generated for this variant is:
.. code-block:: java
:caption: com/acme/enum/Color.java
package com.acme.enum;
public enum Color implements DamlEnum<Color> {
RED,
GREEN,
BLUE;
/* ... */
public static final Color fromValue(Value value$) { /* ... */ }
public final DamlEnum toValue() { /* ... */ }
}
.. code-block:: java
:caption: com/acme/enum/bookattribute/Authors.java
package com.acme.enum.bookattribute;
public class Authors extends BookAttribute {
public final List<String> listValue;
public static Authors fromValue(Value value) { /* ... */ }
public Author(List<String> listValue) { /* ... */ }
public Value toValue() { /* ... */ }
}
Convert a Value of a Generated Type to a Java Bindings Value
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
To convert an instance of the generic type ``Attribute<a>`` to a Java Bindings `Value`_, call the ``toValue`` method and pass a function as the ``toValuea`` argument for converting the field of type ``a`` to the respective Java Bindings `Value`_. The name of the parameter consists of ``toValue`` and the name of the type parameter, in this case ``a``, to form the name ``toValuea``.
Below is a Java fragment that converts an attribute with a ``java.lang.Long`` value to the Java Bindings representation using the *method reference* ``Int64::new``.
.. code-block:: java
Attribute<Long> pagesAttribute = new Attributes<>(42L);
Value serializedPages = pagesAttribute.toValue(Int64::new);
See :ref:`Daml To Java Type Mapping <daml-codegen-java-primitive-types>` for an overview of the Java Bindings `Value`_ types.
Note: If the Daml type is a record or variant with more than one type parameter, you need to pass a conversion function to the ``toValue`` method for each type parameter.
Create a Value of a Generated Type from a Java Bindings Value
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Analogous to the ``toValue`` method, to create a value of a generated type, call the method ``fromValue`` and pass conversion functions from a Java Bindings `Value`_ type to the expected Java type.
.. code-block:: java
Attribute<Long> pagesAttribute = Attribute.<Long>fromValue(serializedPages,
f -> f.asInt64().getOrElseThrow(() -> throw new IllegalArgumentException("Expected Int field").getValue());
See Java Bindings `Value`_ class for the methods to transform the Java Bindings types into corresponding Java types.
Non-exposed Parameterized Types
"""""""""""""""""""""""""""""""
If the parameterized type is contained in a type where the *actual* type is specified (as in the ``BookAttributes`` type above), then the conversion methods of the enclosing type provides the required conversion function parameters automatically.
Convert Optional Values
"""""""""""""""""""""""
The conversion of the Java ``Optional`` requires two steps. The
``Optional`` must be mapped in order to convert its contains before
to be passed to ``DamlOptional::of`` function.
.. code-block:: java
Attribute<Optional<Long>> idAttribute = new Attribute<List<Long>>(Optional.of(42));
val serializedId = DamlOptional.of(idAttribute.map(Int64::new));
To convert back `DamlOptional`_ to Java ``Optional``, one must use the
containers method ``toOptional``. This method expects a function to
convert back the value possibly contains in the container.
.. code-block:: java
Attribute<Optional<Long>> idAttribute2 =
serializedId.toOptional(v -> v.asInt64().orElseThrow(() -> new IllegalArgumentException("Expected Int64 element")));
Convert Collection Values
"""""""""""""""""""""""""
`DamlCollectors`_ provides collectors to converted Java collection
containers such as ``List`` and ``Map`` to DamlValues in one pass. The
builders for those collectors require functions to convert the element
of the container.
.. code-block:: java
Attribute<List<String>> authorsAttribute =
new Attribute<List<String>>(Arrays.asList("Homer", "Ovid", "Vergil"));
Value serializedAuthors =
authorsAttribute.toValue(f -> f.stream().collect(DamlCollector.toList(Text::new));
To convert back Daml containers to Java ones, one must use the
containers methods ``toList`` or ``toMap``. Those methods expect
functions to convert back the container's entries.
.. code-block:: java
Attribute<List<String>> authorsAttribute2 =
Attribute.<List<String>>fromValue(
serializedAuthors,
f0 -> f0.asList().orElseThrow(() -> new IllegalArgumentException("Expected DamlList field"))
.toList(
f1 -> f1.asText().orElseThrow(() -> new IllegalArgumentException("Expected Text element"))
.getValue()
)
);
Daml Interfaces
^^^^^^^^^^^^^^^
From this daml definition:
.. literalinclude:: ./code-snippets/Interfaces.daml
:language: daml
:start-after: -- start snippet: interface example
:end-before: -- end snippet: interface example
:caption: Interfaces.daml
The generated file for the interface definition can be seen below.
Effectively it is a class that contains only the inner type ContractId because one will always only be able to deal with Interfaces via their ContractId.
.. code-block:: java
:caption: interfaces/TIf.java
package interfaces
/* imports */
public final class TIf {
public static final Identifier TEMPLATE_ID = new Identifier("94fb4fa48cef1ec7d474ff3d6883a00b2f337666c302ec5e2b87e986da5c27a3", "Interfaces", "TIf");
public static final Choice<TIf, Transfer, ContractId> CHOICE_Transfer =
Choice.create(/* ... */);
public static final Choice<TIf, Archive, Unit> CHOICE_Archive =
Choice.create(/* ... */);
public static final INTERFACE INTERFACE = new INTERFACE();
public static final class ContractId extends com.daml.ledger.javaapi.data.codegen.ContractId<TIf>
implements Exercises<ExerciseCommand> {
public ContractId(String contractId) { /* ... */ }
}
public interface Exercises<Cmd> extends com.daml.ledger.javaapi.data.codegen.Exercises<Cmd> {
default Cmd exerciseUseless(Useless arg) { /* ... */ }
default Cmd exerciseHam(Ham arg) { /* ... */ }
}
public static final class CreateAnd
extends com.daml.ledger.javaapi.data.codegen.CreateAnd.ToInterface
implements Exercises<CreateAndExerciseCommand> { /* ... */ }
public static final class ByKey
extends com.daml.ledger.javaapi.data.codegen.ByKey.ToInterface
implements Exercises<ExerciseByKeyCommand> { /* ... */ }
public static final class INTERFACE extends InterfaceCompanion<TIf> { /* ... */}
}
For templates the code generation will be slightly different if a template implements interfaces.
To allow converting the ContractId of a template to an interface ContractId, an additional conversion method called `toInterface` is generated.
An ``unsafeFromInterface`` is also generated to make the [unchecked] conversion in the other direction.
.. code-block:: java
:caption: interfaces/Child.java
package interfaces
/* ... */
public final class Child extends Template {
/* ... */
public static final class ContractId extends com.daml.ledger.javaapi.data.codegen.ContractId<Child>
implements Exercises<ExerciseCommand> {
/* ... */
public TIf.ContractId toInterface(TIf.INTERFACE interfaceCompanion) { /* ... */ }
public static ContractId unsafeFromInterface(TIf.ContractId interfaceContractId) { /* ... */ }
}
public interface Exercises<Cmd> extends com.daml.ledger.javaapi.data.codegen.Exercises<Cmd> {
default Cmd exerciseBar(Bar arg) { /* ... */ }
default Cmd exerciseBar() { /* ... */ }
}
/* ... */
}
.. _Value: /app-dev/bindings-java/javadocs/com/daml/ledger/javaapi/data/Value.html
.. _Unit: /app-dev/bindings-java/javadocs/com/daml/ledger/javaapi/data/Unit.html
.. _Bool: /app-dev/bindings-java/javadocs/com/daml/ledger/javaapi/data/Bool.html
.. _Int64: /app-dev/bindings-java/javadocs/com/daml/ledger/javaapi/data/Int64.html
.. _Decimal: /app-dev/bindings-java/javadocs/com/daml/ledger/javaapi/data/Decimal.html
.. _Numeric: /app-dev/bindings-java/javadocs/com/daml/ledger/javaapi/data/Numeric.html
.. _Date: /app-dev/bindings-java/javadocs/com/daml/ledger/javaapi/data/Date.html
.. _Timestamp: /app-dev/bindings-java/javadocs/com/daml/ledger/javaapi/data/Timestamp.html
.. _Text: /app-dev/bindings-java/javadocs/com/daml/ledger/javaapi/data/Text.html
.. _Party: /app-dev/bindings-java/javadocs/com/daml/ledger/javaapi/data/Party.html
.. _ContractId: /app-dev/bindings-java/javadocs/com/daml/ledger/javaapi/data/ContractId.html
.. _DamlOptional: /app-dev/bindings-java/javadocs/com/daml/ledger/javaapi/data/DamlOptional.html
.. _DamlList: /app-dev/bindings-java/javadocs/com/daml/ledger/javaapi/data/DamlList.html
.. _DamlTextMap: /app-dev/bindings-java/javadocs/com/daml/ledger/javaapi/data/DamlTextMap.html
.. _DamlMap: /app-dev/bindings-java/javadocs/com/daml/ledger/javaapi/data/DamlMap.html
.. _DamlCollectors: /app-dev/bindings-java/javadocs/com/daml/ledger/javaapi/data/DamlCollectors.html

View File

@ -1,74 +0,0 @@
.. Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
.. SPDX-License-Identifier: Apache-2.0
Java Bindings Example Project
#############################
To try out the Java bindings library, use the `examples on GitHub <https://github.com/digital-asset/ex-java-bindings>`__: ``PingPongReactive``.
The example implements the ``PingPong`` application, which consists of:
- a Daml model with two contract templates, ``Ping`` and ``Pong``
- two parties, ``Alice`` and ``Bob``
The logic of the application goes like this:
#. The application injects a contract of type ``Ping`` for ``Alice``.
#. ``Alice`` sees this contract and exercises the consuming choice ``RespondPong`` to create a contract of type ``Pong`` for ``Bob``.
#. ``Bob`` sees this contract and exercises the consuming choice ``RespondPing`` to create a contract of type ``Ping`` for ``Alice``.
#. Points 2 and 3 are repeated until the maximum number of contracts defined in the Daml is reached.
Set Up the Example Projects
***************************
To set up the example projects, clone the public GitHub repository at `github.com/digital-asset/ex-java-bindings <https://github.com/digital-asset/ex-java-bindings>`__ and follow the setup instruction in the `README file <https://github.com/digital-asset/ex-java-bindings/blob/master/README.rst#setting-up-the-example-projects>`__.
This project contains two examples of the PingPong application, built directly with gRPC and using the RxJava2-based Java bindings.
Example Project
***************
PingPongMain.java
=================
The entry point for the Java code is the main class ``src/main/java/examples/pingpong/grpc/PingPongMain.java``. Look at this class to see:
- how to connect to and interact with a Daml Ledger via the Java bindings
- how to use the Reactive layer to build an automation for both parties.
At high level, the code does the following steps:
- creates an instance of ``DamlLedgerClient`` connecting to an existing Ledger
- connect this instance to the Ledger with ``DamlLedgerClient.connect()``
- create two instances of ``PingPongProcessor``, which contain the logic of the automation
(This is where the application reacts to the new ``Ping`` or ``Pong`` contracts.)
- run the ``PingPongProcessor`` forever by connecting them to the incoming transactions
- inject some contracts for each party of both templates
- wait until the application is done
PingPongProcessor.runIndefinitely()
===================================
The core of the application is the ``PingPongProcessor.runIndefinitely()``.
The ``PingPongProcessor`` queries the transactions first via the ``TransactionsClient`` of the ``DamlLedgerClient``. Then, for each transaction, it produces ``Commands`` that will be sent to the Ledger via the ``CommandSubmissionClient`` of the ``DamlLedgerClient``.
Output
======
The application prints statements similar to these:
.. code-block:: text
Bob is exercising RespondPong on #1:0 in workflow Ping-Alice-1 at count 0
Alice is exercising RespondPing on #344:1 in workflow Ping-Alice-7 at count 9
The first line shows that:
- ``Bob`` is exercising the ``RespondPong`` choice on the contract with ID ``#1:0`` for the workflow ``Ping-Alice-1``.
- Count ``0`` means that this is the first choice after the initial ``Ping`` contract.
- The workflow ID ``Ping-Alice-1`` conveys that this is the workflow triggered by the second initial ``Ping`` contract that was created by ``Alice``.
The second line is analogous to the first one.

View File

@ -1,140 +0,0 @@
.. Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
.. SPDX-License-Identifier: Apache-2.0
.. _java-bindings:
Java Bindings
#############
.. toctree::
:hidden:
codegen
Ping Pong Example <example>
Iou Quickstart Tutorial <quickstart>
The Java bindings is a client implementation of the *Ledger API*
based on `RxJava <https://github.com/ReactiveX/RxJava>`_, a library for composing asynchronous and event-based programs using observable sequences for the Java VM. It provides an idiomatic way to write Daml Ledger applications.
.. seealso::
This documentation for the Java bindings API includes the `JavaDoc reference documentation <javadocs/index.html>`_.
Overview
********
The Java bindings library is composed of:
- The Data Layer
A Java-idiomatic layer based on the Ledger API generated classes.
This layer simplifies the code required to work with the Ledger API.
Can be found in the java package ``com.daml.ledger.javaapi.data``.
- The Reactive Layer
A thin layer built on top of the Ledger API services generated classes.
For each Ledger API service, there is a reactive counterpart with a
matching name. For instance, the reactive counterpart of ``ActiveContractsServiceGrpc``
is ``ActiveContractsClient``.
The Reactive Layer also exposes the main interface representing a client
connecting via the Ledger API. This interface is called ``LedgerClient`` and the
main implementation working against a Daml Ledger is the ``DamlLedgerClient``.
Can be found in the java package ``com.daml.ledger.rxjava``.
Generate Code
=============
When writing applications for the ledger in Java, you want to work with a representation of Daml templates and data types in Java that closely resemble the original Daml code while still being as true to the native types in Java as possible.
To achieve this, you can use Daml to Java code generator ("Java codegen") to generate Java types based on a Daml model. You can then use these types in your Java code when reading information from and sending data to the ledger.
For more information on Java code generation, see :doc:`/app-dev/bindings-java/codegen`.
Connect to the Ledger: ``LedgerClient``
=======================================
Connections to the ledger are made by creating instance of classes that implement the interface ``LedgerClient``. The class ``DamlLedgerClient`` implements this interface, and is used to connect to a Daml ledger.
This class provides access to the ledgerId, and all clients that give access to the various ledger services, such as the active contract set, the transaction service, the time service, etc. This is described :ref:`below <ledger-api-java-binding-connecting>`. Consult the `JavaDoc for DamlLedgerClient <javadocs/com/daml/ledger/rxjava/DamlLedgerClient.html>`_ for full details.
Reference Documentation
***********************
`Click here for the JavaDoc reference documentation <javadocs/index.html>`_.
Get Started
***********
The Java bindings library can be added to a `Maven <https://maven.apache.org/>`_ project.
.. _bindings-java-setup-maven:
Set Up a Maven Project
======================
To use the Java bindings library, add the following dependencies to your project's ``pom.xml``:
.. literalinclude:: ./code-snippets/pom.xml
:language: XML
:start-after: <!-- start snippet: dependencies -->
:end-before: <!-- end snippet: dependencies -->
:dedent: 4
Replace ``x.y.z`` for both dependencies with the version that you want to use. You can find the available versions by checking
the `Maven Central Repository <https://search.maven.org/artifact/com.daml/bindings-java>`__.
You can also take a look at the ``pom.xml`` file from the :ref:`quickstart project <quickstart>`.
.. _ledger-api-java-binding-connecting:
Connect to the Ledger
=====================
Before any ledger services can be accessed, a connection to the ledger must be established. This is done by creating a instance of a ``DamlLedgerClient`` using one of the factory methods ``DamlLedgerClient.forLedgerIdAndHost`` and ``DamlLedgerClient.forHostWithLedgerIdDiscovery``. This instance can then be used to access service clients.
.. _ledger-api-java-bindings-authorization:
Perform Authorization
=====================
Some ledgers will require you to send an access token along with each request.
To learn more about authorization, read the :doc:`Authorization </app-dev/authorization>` overview.
To use the same token for all Ledger API requests, the ``DamlLedgerClient`` builders expose a ``withAccessToken`` method. This will allow you to not pass a token explicitly for every call.
If your application is long-lived and your tokens are bound to expire, you can reload the necessary token when needed and pass it explicitly for every call. Every client method has an overload that allows a token to be passed, as in the following example:
.. code-block:: java
transactionClient.getLedgerEnd(); // Uses the token specified when constructing the client
transactionClient.getLedgerEnd(accessToken); // Override the token for this call exclusively
If you're communicating with a ledger that verifies authorization it's very important to secure the communication channel to prevent your tokens to be exposed to man-in-the-middle attacks. The next chapter describes how to enable TLS.
.. _ledger-api-java-binding-connecting-securely:
Connect Securely
================
The Java bindings library lets you connect to a Daml Ledger via a secure connection. The builders created by
``DamlLedgerClient.newBuilder`` default to a plaintext connection, but you can invoke ``withSslContext`` to pass an ``SslContext``.
Using the default plaintext connection is useful only when connecting to a locally running Sandbox for development purposes.
Secure connections to a Daml Ledger must be configured to use client authentication certificates, which can be provided by a Ledger Operator.
For information on how to set up an ``SslContext`` with the provided certificates for client authentication, please consult the gRPC documentation on
`TLS with OpenSSL <https://github.com/grpc/grpc-java/blob/master/SECURITY.md#tls-with-openssl>`_ as well as the
`HelloWorldClientTls <https://github.com/grpc/grpc-java/blob/70b1b1696a258ffe042c7124217e3a7894821444/examples/src/main/java/io/grpc/examples/helloworldtls/HelloWorldClientTls.java#L46-L57>`_ example of the ``grpc-java`` project.
Advanced Connection Settings
============================
Sometimes the default settings for gRPC connections/channels are not suitable for a given situation. These use cases are supported by creating a custom `NettyChannelBuilder <https://grpc.github.io/grpc-java/javadoc/io/grpc/netty/NettyChannelBuilder.html>`_ object and passing the it to the ``newBuilder`` static method defined over `DamlLedgerClient <javadocs/com/daml/ledger/rxjava/DamlLedgerClient.html>`_.
Example Projects
****************
Example projects using the Java bindings are available on `GitHub <https://github.com/digital-asset/ex-java-bindings>`__. :doc:`Read more about them here </app-dev/bindings-java/example>`.

View File

@ -1,646 +0,0 @@
.. Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
.. SPDX-License-Identifier: Apache-2.0
.. _quickstart:
Daml IOU Quickstart Tutorial
############################
In this guide, you will learn about developer tools and Daml applications by:
- developing a simple ledger application for issuing, managing, transferring and trading IOUs ("I Owe You!")
- developing an integration layer that exposes some of the functionality via custom REST services
Prerequisites:
- You understand what an IOU is. If you are not sure, read the :ref:`IOU tutorial overview<tutorials-iou-overview>`.
- You have installed the SDK. See :doc:`installation </getting-started/installation>`.
.. _quickstart-download:
Download the Quickstart Application
***********************************
You can get the quickstart application using the Daml assistant (``daml``):
#. Run ``daml new quickstart --template quickstart-java``
This creates the ``quickstart-java`` application into a new folder called ``quickstart``.
#. Run ``cd quickstart`` to change into the new directory.
Folder Structure
================
The project contains the following files:
.. code-block:: none
.
├── daml
│   ├── Iou.daml
│   ├── IouTrade.daml
│   ├── Main.daml
│   └── Tests
│   ├── Iou.daml
│   └── Trade.daml
├── daml.yaml
├── frontend-config.js
├── pom.xml
└── src
└── main
├── java
│   └── com
│   └── daml
│   └── quickstart
│   └── iou
│   └── IouMain.java
└── resources
└── logback.xml
- ``daml.yaml`` is a Daml project config file used by the SDK to find out how to build the Daml project and how to run it.
- ``daml`` contains the :ref:`Daml code <quickstart-daml>` specifying the contract model for the ledger.
- ``daml/Tests`` contains :ref:`test scripts <quickstart-scripts>` for the Daml model.
- ``frontend-config.js`` is a configuration file for the :ref:`Navigator <quickstart-navigator>` frontend.
- ``pom.xml`` and ``src/main/java`` constitute a :ref:`Java application <quickstart-application>` that provides REST services to interact with the ledger.
You will explore these in more detail through the rest of this guide.
.. _tutorials-iou-overview:
Understand IOUs
***************
To run through this guide, you will need to understand what an IOU is. This section describes the properties of an IOU like a bank bill that make it useful as a representation and transfer of value.
A bank bill represents a contract between the owner of the bill and its issuer, the central bank. Historically, it is a bearer instrument - it gives anyone who holds it the right to demand a fixed amount of material value, often gold, from the issuer in exchange for the note.
To do this, the note must have certain properties. In particular, the British pound note shown below illustrates the key elements that are needed to describe money in Daml:
.. figure:: quickstart/images/poundNote.jpg
:alt: A British five pound note with sections labelled as described below.
:align: center
**1) The Legal Agreement**
For a long time, money was backed by physical gold or silver stored in a central bank. The British pound note, for example, represented a promise by the central bank to provide a certain amount of gold or silver in exchange for the note. This historical artifact is still represented by the following statement::
I promise to pay the bearer on demand the sum of five pounds.
The true value of the note comes from the fact that it physically represents a bearer right that is matched by an obligation on the issuer.
**2) The Signature of the Counterparty**
The value of a right described in a legal agreement is based on a matching obligation for a counterparty. The British pound note would be worthless if the central bank, as the issuer, did not recognize its obligation to provide a certain amount of gold or silver in exchange for the note. The chief cashier confirms this obligation by signing the note as a delegate for the Bank of England. In general, determining the parties that are involved in a contract is key to understanding its true value.
**3) The Security Token**
Another feature of the pound note is the security token embedded within the physical paper. It allows the note to be authenticated with limited effort by holding it against a light source. Even a third party can verify the note without requiring explicit confirmation from the issuer that it still acknowledges the associated obligations.
**4) The Unique Identifier**
Every note has a unique registration number that allows the issuer to track their obligations and detect duplicate bills. Once the issuer has fulfilled the obligations associated with a particular note, duplicates with the same identifier automatically become invalid.
**5) The Distribution Mechanism**
The note itself is printed on paper, and its legal owner is the person holding it. The physical form of the note allows the rights associated with it to be transferred to other parties that are not explicitly mentioned in the contract.
.. _quickstart-start:
Run the Application Using Prototyping Tools
*******************************************
In this section, you will run the quickstart application and get introduced to the main tools for prototyping Daml:
#. To compile the Daml model, run ``daml build``
This creates a :ref:`DAR file <dar-file-dalf-file>` (DAR is just the format that Daml compiles to) called ``.daml/dist/quickstart-0.0.1.dar``. The output should look like this:
.. code-block:: none
2022-09-08 14:33:41.65 [INFO] [build]
Compiling quickstart to a DAR.
2022-09-08 14:33:42.90 [INFO] [build]
Created .daml/dist/quickstart-0.0.1.dar
.. _quickstart-sandbox:
#. To run the :doc:`sandbox </tools/sandbox>` (a lightweight local version of the ledger), run::
daml sandbox --port 6865
#. In a separate terminal run the following:
- Upload the DAR file::
daml ledger upload-dar --host localhost --port 6865 .daml/dist/quickstart-0.0.1.dar
- Run the init script::
daml script --ledger-host localhost --ledger-port 6865 --dar .daml/dist/quickstart-0.0.1.dar --script-name Main:initialize --output-file output.json
.. _quickstart-navigator:
- Start the :doc:`Navigator </tools/navigator/index>`, a browser-based ledger front-end, by running::
daml navigator server localhost 6865 --port 7500
The Navigator automatically connects to the sandbox. You can access it on port ``7500``.
.. _quickstart-work:
Try the Application
*******************
Now everything is running, you can try out the quickstart application:
#. Go to `http://localhost:7500/ <http://localhost:7500/>`_. This is the :doc:`Navigator </tools/navigator/index>`, which you launched :ref:`earlier <quickstart-navigator>`.
#. On the login screen, select ``alice`` from the dropdown. This logs you in as ``alice``.
This takes you to the contracts view:
.. figure:: quickstart/images/contracts.png
:alt: The Contracts view in the Navigator, showing one existing contract.
This is showing you what contracts are currently active on the sandbox ledger and visible to ``alice``. You can see that there is a single such contract, in our case with Id ``002eb5...``, created from a *template* called ``Iou:Iou@8f199da...``.
Your contract ID will vary. The actual value doesn't matter. We'll refer to this contract as ``002eb5`` in the rest of this document, and you'll need to substitute your own value mentally.
#. On the left-hand side, you can see what the pages the Navigator contains:
- Contracts
- Templates
- Issued Ious
- Owned Ious
- Iou Transfers
- Trades
**Contracts** and **Templates** are standard views, available in any application. The others are created just for this application, specified in the ``frontend-config.js`` file.
For information on creating custom Navigator views, see :ref:`navigator-custom-views`.
#. Click **Templates** to open the Templates page.
This displays all available *contract templates*. Instances of contracts (or just *contracts*) are created from these templates. The names of the templates are of the format `module:template@hash`. Including the hash disambiguates templates, even when identical module and template names are used between packages.
On the far right, you see the number of *contracts* that you can see for each template, if any, or ``-`` for "no contract".
#. Try creating a contract from a template. Issue an Iou to yourself by clicking on the ``Iou:Iou@8f199...`` row, filling it out as shown below (use the provided auto-complete feature for the ``Party`` values in ``issuer`` and ``owner``) and clicking **Submit**.
.. figure:: quickstart/images/createIou.png
:alt: Fill out the form by entering Alice as the Issuer and as the Owner, AliceCoin as the Currency, and 1.0 as the Amount.
#. On the left-hand side, click **Issued Ious** to go to that page. You can see the Iou you just issued yourself.
#. Now, try transferring this Iou to someone else. Click on your Iou, select ``Iou_Transfer``, select ``Bob::...`` as the new owner and hit **Submit**.
#. Go to the **Owned Ious** page.
The screen shows the same contract ``002eb5`` that you already saw on the *Contracts* page. It is an Iou for €100, issued by ``EUR_Bank::...``.
#. Go to the **Iou Transfers** page. It shows the transfer of your recently issued Iou to Bob, but Bob has not accepted the transfer, so it is not settled.
This is an important part of Daml: nobody can be forced into owning an *Iou*, or indeed agreeing to any other contract. They must explicitly consent.
You could cancel the transfer by using the ``IouTransfer_Cancel`` choice within it, but for this walk-through, leave it alone for the time being.
#. Try asking *Bob* to exchange your €100 for $110. To do so, you first have to show your Iou to *Bob* so that he can verify the settlement transaction, should he accept the proposal.
Go back to **Owned Ious**, open the Iou for €100 and click on the button ``Iou_AddObserver``. Select ``Bob::...`` as the ``newObserver``.
Contracts in Daml are immutable, meaning they cannot be changed, only created and archived. If you head back to the **Owned Ious** screen, you can see that the Iou now has a new Contract ID. In our case, it's ``00018fe...``.
#. To propose the trade, go to the **Templates** screen. Click on the ``IouTrade:IouTrade@...`` template, fill in the form as shown below and submit the transaction. Remember to use the dropdown for the values of ``buyer``, ``seller``, ``baseIouCid``, ``baseIssuer``, and ``quoteIssuer``.
.. figure:: quickstart/images/tradeProp.png
:alt: Fill out the form; use the provided dropdown to select Alice as the buyer, Bob as the seller, the new contract we just created as the baseIouCid, and EUR_Bank as the baseIssuer; enter EUR as the baseCurrency, 100.00 as the baseAmount; select USD_Bank from the dropdown as the quote_Issuer; enter USD as the quoteCurrency, and 110.0 as the quoteAmount.
#. Go to the **Trades** page. It shows the just-proposed trade.
#. You are now going to switch user to Bob, so you can accept the trades you have just proposed. Start by clicking on the logout button next to the username, at the top of the screen. On the login page, select ``bob`` from the dropdown.
#. First, accept the transfer of the *AliceCoin*. Go to the **Iou Transfers** page, click on the row of the transfer, and click ``IouTransfer_Accept``, then **Submit**.
#. Go to the **Owned Ious** page. It now shows the *AliceCoin*.
It also shows an *Iou* for $110 issued by ``USD_Bank::...``. This matches the trade proposal you made earlier as Alice. Remember the first few characters of its Contract ID (in our case ``0086c84``).
.. figure:: quickstart/images/bobOwnedIous.png
:alt: List of Owned Ious for Bob at this point. Includes Bob's $110 from USD_Bank.
#. Settle the trade. Go to the **Trades** page, and click on the row of the proposal. Accept the trade by clicking ``IouTrade_Accept``. In the popup, select the Contract ID you just noted from the dropdown as the ``quoteIouCid``, then click **Submit**.
The two legs of the transfer are now settled atomically in a single transaction. The trade either fails or succeeds as a whole.
#. Privacy is an important feature of Daml. You can check that Alice and Bob's privacy relative to the Banks was preserved.
To do this, log out, then log in as ``us``, which maps to ``USD_Bank::...``.
On the **Contracts** page, select **Include archived**. The page now shows all the contracts that ``USD_Bank::...`` has ever known about.
There are just five contracts:
* Three contracts created on startup:
1. A self-issued *Iou* for $110.
2. The *IouTransfer* to transfer that *Iou* to Bob
3. The resulting *Iou* owned by Bob.
* The transfer of Bobs *Iou* to Alice that happened as part of the trade.
Note that this is a transient contract that got archived in the same transaction
it got created in.
* The new $110 *Iou* owned by Alice. This is the only active contract.
Importantly, ``USD_Bank::...`` does not know anything about the trade or the EUR-leg. It has no idea what was exchanged for those $110, or indeed if anything was exchanged at all. For more information on privacy, refer to the :ref:`da-ledgers`.
.. note::
``USD_Bank::...`` does know about an intermediate *IouTransfer* contract that was created and consumed as part of the atomic settlement in the previous step. Since that contract was never active on the ledger, it is not shown in Navigator. You will see how to view a complete transaction graph, including who knows what, in :ref:`quickstart-scripts` below.
.. _quickstart-daml:
Get Started with Daml
*********************
The *contract model* specifies the possible contracts, as well as the allowed transactions on the ledger, and is written in Daml.
The core concept in Daml is a *contract template* - you used them earlier to create contracts. Contract templates specify:
- a type of contract that may exist on the ledger, including a corresponding data type
- the *signatories*, who need to agree to the *creation* of a contract of that type
- the *rights* or *choices* given to parties by a contract of that type
- constraints or conditions on the data on a contract
- additional parties, called observers, who can see the contract
For more information about Daml Ledgers, consult :ref:`da-ledgers` for an in-depth technical description.
Develop with Daml Studio
========================
Take a look at the Daml that specifies the contract model in the quickstart application. The core template is ``Iou``.
#. Open :doc:`Daml Studio </daml/daml-studio>`, a Daml IDE based on VS Code, by running ``daml studio`` from the root of your project.
#. Using the explorer on the left, open ``daml/Iou.daml``.
The first (uncommented, non-empty) line specifies the module name:
.. literalinclude:: quickstart/template-root/daml/Iou.daml
:language: daml
:start-after: -- BEGIN_IOU_MODULE_NAME
:end-before: -- END_IOU_MODULE_NAME
Next, a template called `Iou` is declared together with its datatype. This template has five fields:
.. literalinclude:: quickstart/template-root/daml/Iou.daml
:language: daml
:start-after: -- BEGIN_IOU_TEMPLATE_DATATYPE
:end-before: -- END_IOU_TEMPLATE_DATATYPE
Conditions for the creation of a contract are specified using the `ensure` and `signatory` keywords:
.. literalinclude:: quickstart/template-root/daml/Iou.daml
:language: daml
:start-after: -- BEGIN_IOU_TEMPLATE_CONDITIONS
:end-before: -- END_IOU_TEMPLATE_CONDITIONS
In this case, there are two conditions:
- An ``Iou`` can only be created if it is authorized by both ``issuer`` and ``owner``.
- The ``amount`` needs to be positive.
Earlier, as Alice, you authorized the creation of an ``Iou``. The ``amount`` was ``1.0``, and Alice was both ``issuer`` and ``owner``, so both conditions were satisfied, and you could successfully create the contract.
To see this in action, go back to the Navigator and try to create the same ``Iou`` again, but with Bob as ``owner`` (with Alice as issuer). It will not work. Note that the Navigator shows success an failures as a small icon in the top right, as highlighted here (it would be a small "v" for success):
.. figure:: quickstart/images/navError.png
:alt: Navigator showing an error.
Observers are specified using the ``observer`` keyword:
.. literalinclude:: quickstart/template-root/daml/Iou.daml
:language: daml
:start-after: -- BEGIN_IOU_TEMPLATE_OBSERVERS
:end-before: -- END_IOU_TEMPLATE_OBSERVERS
Here, ``observer`` is the keyword and ``observers`` refers to the field of the template.
Next, the *rights* or *choices* are defined, in this case with ``owner`` as the ``controller``:
.. literalinclude:: quickstart/template-root/daml/Iou.daml
:language: daml
:start-after: -- BEGIN_IOU_TEMPLATE_SPLIT
:end-before: -- END_IOU_TEMPLATE_SPLIT
.. literalinclude:: quickstart/template-root/daml/Iou.daml
:language: daml
:start-after: -- BEGIN_IOU_TEMPLATE_MERGE
:end-before: -- END_IOU_TEMPLATE_MERGE
.. literalinclude:: quickstart/template-root/daml/Iou.daml
:language: daml
:start-after: -- BEGIN_IOU_TEMPLATE_TRANSFER
:end-before: -- END_IOU_TEMPLATE_TRANSFER
.. literalinclude:: quickstart/template-root/daml/Iou.daml
:language: daml
:start-after: -- BEGIN_IOU_TEMPLATE_OBSERVER_CHOICES
:end-before: -- END_IOU_TEMPLATE_OBSERVER_CHOICES
Thus, ``owner`` has the right to:
- Split the Iou.
- Merge it with another one differing only on ``amount``.
- Initiate a transfer.
- Add and remove observers.
The ``Iou_Transfer`` choice above takes a parameter called ``newOwner`` and creates a new ``IouTransfer`` contract and returns its ``ContractId``. It is important to know that, by default, choices *consume* the contract on which they are exercised. Consuming, or archiving, makes the contract no longer active. So the ``IouTransfer`` replaces the ``Iou``.
A more interesting choice is ``IouTrade_Accept``. To look at it, open ``IouTrade.daml``.
.. literalinclude:: quickstart/template-root/daml/IouTrade.daml
:language: daml
:start-after: -- BEGIN_IOU_COMPOSITE_CHOICE
:end-before: -- END_IOU_COMPOSITE_CHOICE
This choice uses the ``===`` operator from the :doc:`Daml Standard Library </daml/stdlib/index>` to check pre-conditions. The standard library is imported using ``import DA.Assert`` at the top of the module.
Then, it *composes* the ``Iou_Transfer`` and ``IouTransfer_Accept`` choices to build one big transaction. In this transaction, ``buyer`` and ``seller`` exchange their Ious atomically, without disclosing the entire transaction to all parties involved.
The *Issuers* of the two Ious, which are involved in the transaction because they are signatories on the ``Iou`` and ``IouTransfer`` contracts, only get to see the sub-transactions that concern them, as we saw earlier.
For a deeper introduction to Daml, consult the :doc:`Daml Reference </daml/reference/index>`.
.. _quickstart-scripts:
Test Using Daml Script
======================
You can check the correct authorization and privacy of a contract model using *scripts*: tests that are written in Daml.
Scripts are a linear sequence of transactions that is evaluated using the same consistency, conformance and authorization rules as it would be on the full ledger server or the sandbox ledger. They are integrated into Daml Studio, which can show you the resulting transaction graph, making them a powerful tool to test and troubleshoot the contract model.
To take a look at the scripts in the quickstart application, open ``daml/Tests/Trade.daml`` in Daml Studio.
A script test is defined with ``trade_test = script do``. The ``submit`` function takes a submitting party and a transaction, which is specified the same way as in contract choices.
The following block, for example, issues an ``Iou`` and transfers it to Alice:
.. literalinclude:: quickstart/template-root/daml/Tests/Trade.daml
:language: daml
:start-after: -- BEGIN_SCRIPT
:end-before: -- END_SCRIPT
Compare the script with the ``initialize`` script in ``daml/Main.daml``. You will see that the script you used to initialize the sandbox is an initial segment of the ``trade_test`` script. The latter adds transactions to perform the trade you performed through Navigator, and a couple of transactions in which expectations are verified.
After a short time, the text *Script results* should appear above the test. Click on it (in ``daml/Tests/Trade.daml``) to open the visualization of the resulting ledger state.
.. figure:: quickstart/images/ledger.png
:alt: The Script Results view showing two subtransactions: one with Issuer USD_Bank and Owner Alice, the other with Issuer EUR_Bank and Owner Bob.
Each row shows a contract on the ledger. The last four columns show which parties know of which contracts. The remaining columns show the data on the contracts. You can see past contracts by checking the **Show archived** box at the top. Click the adjacent **Show transaction view** button to switch to a view of the entire transaction tree.
In the transaction view, transaction ``6`` is of particular interest, as it shows how the Ious are exchanged atomically in one transaction. The lines starting ``disclosed to (since)`` show that the Banks do indeed not know anything they should not:
.. code-block:: none
TX 6 1970-01-01T00:00:00Z (Tests.Trade:70:14)
#6:0
│ disclosed to (since): 'Alice' (6), 'Bob' (6)
└─> 'Bob' exercises IouTrade_Accept on #5:0 (IouTrade:IouTrade)
with
quoteIouCid = #3:1
children:
#6:1
│ disclosed to (since): 'Alice' (6), 'Bob' (6), 'EUR_Bank' (6)
└─> 'Alice' and 'EUR_Bank' fetch #4:1 (Iou:Iou)
#6:2
│ disclosed to (since): 'Alice' (6), 'Bob' (6), 'USD_Bank' (6)
└─> 'Bob' and 'USD_Bank' fetch #3:1 (Iou:Iou)
#6:3
│ disclosed to (since): 'Alice' (6), 'Bob' (6), 'USD_Bank' (6)
└─> 'Bob' exercises Iou_Transfer on #3:1 (Iou:Iou)
with
newOwner = 'Alice'
children:
#6:4
│ consumed by: #6:5
│ referenced by #6:5
│ disclosed to (since): 'Alice' (6), 'Bob' (6), 'USD_Bank' (6)
└─> 'Bob' and 'USD_Bank' create Iou:IouTransfer
with
iou =
(Iou:Iou with
issuer = 'USD_Bank';
owner = 'Bob';
currency = "USD";
amount = 110.0000000000;
observers = []);
newOwner = 'Alice'
#6:5
│ disclosed to (since): 'Alice' (6), 'Bob' (6), 'USD_Bank' (6)
└─> 'Alice' exercises IouTransfer_Accept on #6:4 (Iou:IouTransfer)
children:
#6:6
│ disclosed to (since): 'Alice' (6), 'Bob' (6), 'USD_Bank' (6)
└─> 'Alice' and 'USD_Bank' create Iou:Iou
with
issuer = 'USD_Bank';
owner = 'Alice';
currency = "USD";
amount = 110.0000000000;
observers = []
#6:7
│ disclosed to (since): 'Alice' (6), 'Bob' (6), 'EUR_Bank' (6)
└─> 'Alice' exercises Iou_Transfer on #4:1 (Iou:Iou)
with
newOwner = 'Bob'
children:
#6:8
│ consumed by: #6:9
│ referenced by #6:9
│ disclosed to (since): 'Alice' (6), 'Bob' (6), 'EUR_Bank' (6)
└─> 'Alice' and 'EUR_Bank' create Iou:IouTransfer
with
iou =
(Iou:Iou with
issuer = 'EUR_Bank';
owner = 'Alice';
currency = "EUR";
amount = 100.0000000000;
observers = ['Bob']);
newOwner = 'Bob'
#6:9
│ disclosed to (since): 'Alice' (6), 'Bob' (6), 'EUR_Bank' (6)
└─> 'Bob' exercises IouTransfer_Accept on #6:8 (Iou:IouTransfer)
children:
#6:10
│ disclosed to (since): 'Alice' (6), 'Bob' (6), 'EUR_Bank' (6)
└─> 'Bob' and 'EUR_Bank' create Iou:Iou
with
issuer = 'EUR_Bank';
owner = 'Bob';
currency = "EUR";
amount = 100.0000000000;
observers = []
The ``submit`` function used in this script tries to perform a transaction and fails if any of the ledger integrity rules are violated. There is also a ``submitMustFail`` function, which checks that certain transactions are not possible. This is used in ``daml/Tests/Iou.daml``, for example, to confirm that the ledger model prevents double spends.
.. Interact With the Ledger Through the Command Line
*************************************************
All interaction with a Daml ledger, be it sandbox or any other implementation, happens via the :doc:`Ledger API </app-dev/ledger-api>`. It is based on `gRPC <https://grpc.io/>`_.
The Navigator uses this API, as will any :ref:`custom integration <quickstart-application>`.
This section show a way to fetch data and submit commands via a command-line interface.
_quickstart-api:
Not for this release
Connect to the raw API with grpCurl
===================================
.. _quickstart-application:
Integrate With the Ledger
*************************
A distributed ledger only forms the core of a full Daml application.
To build automations and integrations around the ledger, Daml has :doc:`language bindings </app-dev/bindings-java/index>` for the Ledger API in several programming languages.
To compile the Java integration for the quickstart application, we first need to run the Java codegen on the DAR we built before::
daml codegen java
Once the code has been generated (into ``target/generated-sources`` per the instructions in ``daml.yaml``), we can compile it using::
mvn compile
Now, start the Java integration with::
mvn exec:java@run-quickstart -Dparty=$(cat output.json | sed 's/\[\"//' | sed 's/".*//')
Note that this step requires that the sandbox started :ref:`earlier <quickstart-sandbox>` is still running. If it is not, you'll have to run the ``daml sandbox`` and ``daml script`` commands again to get an ``output.json`` in sync with the new state of the sandbox (party names can change with each sandbox restart).
The application provides REST services on port ``8080`` to perform basic operations on behalf on ``Alice``. For example, check that::
curl http://localhost:8080/iou
returns, for a newly-created sandbox (where you have just run the init script to get the ``output.json`` file), something like::
{"0":{"issuer":"EUR_Bank::NAMESPACE","owner":"Alice::NAMESPACE","currency":"EUR","amount":100.0000000000,"observers":[]}}
If you still have the same sandbox running against which you have run the Navigator steps above, the output might look more like::
{"0":{"issuer":"Alice::NAMESPACE","owner":"Bob::NAMESPACE","currency":"AliceCoin","amount":1.0000000000,"observers":[]},"1":{"issuer":"USD_Bank::NAMESPACE","owner":"Alice::NAMESPACE","currency":"USD","amount":110.0000000000,"observers":[]}}
To start the same application on another port, use the command-line parameter ``-Drestport=PORT``. To start it for another party, use ``-Dparty=PARTY``. For example, to start the application for Bob on ``8081``, run::
mvn exec:java@run-quickstart -Drestport=8081 -Dparty=Bob$(cat output.json | sed 's/\[\"//' | sed 's/".*//')
The following REST services are included:
- ``GET`` on ``http://localhost:8080/iou`` lists all active Ious, and their Ids.
Note that the Ids exposed by the REST API are not the ledger contract Ids, but integers. You can open the address in your browser or run ``curl -X GET http://localhost:8080/iou``.
- ``GET`` on ``http://localhost:8080/iou/ID`` returns the Iou with Id ``ID``.
For example, to get the content of the Iou with Id 0, run:
``curl -X GET http://localhost:8080/iou/0``
- ``PUT`` on ``http://localhost:8080/iou`` creates a new Iou on the ledger.
To create another *AliceCoin*, run::
curl -X PUT -d '{"issuer":"Alice::NAMESPACE","owner":"Alice::NAMESPACE","currency":"AliceCoin","amount":1.0,"observers":[]}' http://localhost:8080/iou
Note that you have to replace ``NAMESPACE`` with the real namespace assigned by the sandbox; you can find it in ``output.json``::
ns=$(cat output.json | sed 's/\[\"Alice:://' | sed 's/".*//'); curl -X PUT -d "$(printf '{"issuer":"Alice::%s","owner":"Alice::%s","currency":"AliceCoin","amount":1.0,"observers":[]}' $ns $ns)" http://localhost:8080/iou
- ``POST`` on ``http://localhost:8080/iou/ID/transfer`` transfers the Iou with Id ``ID``.
Check the index of your new *AliceCoin* by listing all active Ious. If you have just run the init script, it will be ``0``; if you have run the Navigator section, it will likely be ``2``. Once you have the index, you can run::
ns=$(cat output.json | sed 's/\[\"Alice:://' | sed 's/".*//'); curl -X POST -d "{\"newOwner\":\"Bob::${ns}\"}" http://localhost:8080/iou/0/transfer
to transfer it to Bob. If it's not ``0``, just replace the ``0`` in ``iou/0`` in the above command.
The automation is based on the :doc:`Java bindings </app-dev/bindings-java/index>` and the output of the :doc:`Java code generator </app-dev/bindings-java/codegen>`, which are included as a Maven dependency and Maven plugin respectively in the ``pom.xml`` file created by the template:
.. literalinclude:: quickstart/template-root/pom.xml
:language: xml
:lines: 22-32
It consists of the application in file ``IouMain.java``. It uses the class ``Iou`` from ``Iou.java``, which is generated from the Daml model with the Java code generator. The ``Iou`` class provides better serialization and de-serialization to JSON via `gson <https://github.com/google/gson>`_. Looking at ``src/main/java/com/daml/quickstart/iou/IouMain.java``:
#. A connection to the ledger is established using a ``DamlLedgerClient`` object.
.. literalinclude:: quickstart/template-root/src/main/java/com/daml/quickstart/iou/IouMain.java
:language: java
:lines: 46-49
:dedent: 4
#. An in-memory contract store is initialized. This is intended to provide a live view of all active contracts, with mappings between ledger and external Ids.
.. literalinclude:: quickstart/template-root/src/main/java/com/daml/quickstart/iou/IouMain.java
:language: java
:lines: 56-59
:dedent: 4
#. The Active Contracts Service (ACS) is used to quickly build up the contract store to a recent state.
.. literalinclude:: quickstart/template-root/src/main/java/com/daml/quickstart/iou/IouMain.java
:language: java
:lines: 61-73
:dedent: 4
``blockingForEach`` is used to ensure that the contract store is consistent with the ledger state at the latest offset observed by the client.
#. The Transaction Service is wired up to update the contract store on occurrences of ``ArchiveEvent`` and ``CreateEvent`` for Ious. Since ``getTransactions`` is called without end offset, it will stream transactions indefinitely, until the application is terminated.
.. literalinclude:: quickstart/template-root/src/main/java/com/daml/quickstart/iou/IouMain.java
:language: java
:lines: 76-97
:dedent: 4
#. Commands are submitted via the Command Submission Service.
.. literalinclude:: quickstart/template-root/src/main/java/com/daml/quickstart/iou/IouMain.java
:language: java
:lines: 137-141
:dedent: 2
You can find examples of ``Update`` instantiations for creating contract and exercising a choice in the bodies of the ``transfer`` and ``iou`` endpoints, respectively.
.. literalinclude:: quickstart/template-root/src/main/java/com/daml/quickstart/iou/IouMain.java
:caption: Exercise a choice
:language: java
:lines: 119-121
:dedent: 10
.. literalinclude:: quickstart/template-root/src/main/java/com/daml/quickstart/iou/IouMain.java
:caption: Create a contract
:language: java
:lines: 109-111
:dedent: 10
The rest of the application sets up the REST services using `Spark Java <http://sparkjava.com/>`_, and does dynamic package Id detection using the Package Service. The latter is useful during development when package Ids change frequently.
For a discussion of ledger application design and architecture, take a look at :doc:`Application Architecture Guide </app-dev/app-arch>`.
Next Steps
**********
Great - you've completed the quickstart guide!
Some steps you could take next include:
- Explore `examples <https://daml.com/examples>`_ for guidance and inspiration.
- :doc:`Learn Daml </daml/intro/0_Intro>`.
- :doc:`Language reference </daml/reference/index>`.
- Learn more about :doc:`application development </app-dev/app-arch>`.
- Learn about the :doc:`conceptual models </concepts/ledger-model/index>` behind Daml.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1016 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.0 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 766 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 94 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 770 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 354 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 777 KiB

View File

@ -1,11 +0,0 @@
.. Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
.. SPDX-License-Identifier: Apache-2.0
.. _python-bindings:
Python Bindings
###############
The Python bindings (formerly known as DAZL) are a client implementation of the *Ledger API* for the Python language and are supported under the Daml Enterprise license.
The Python bindings are supported for use with Daml and with `Daml Hub <https://hub.daml.com/>`_. Documentation for the bindings can be found `here <https://digital-asset.github.io/dazl-client/>`_.

View File

@ -1,8 +0,0 @@
.. Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
.. SPDX-License-Identifier: Apache-2.0
@daml/ledger
############
`@daml/ledger documentation <ts-daml-ledger_>`_

View File

@ -1,8 +0,0 @@
.. Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
.. SPDX-License-Identifier: Apache-2.0
@daml/react
###########
`@daml/react documentation <ts-daml-react_>`_

View File

@ -1,8 +0,0 @@
.. Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
.. SPDX-License-Identifier: Apache-2.0
@daml/types
###########
`@daml/types documentation <ts-daml-types_>`_

View File

@ -1,288 +0,0 @@
.. Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
.. SPDX-License-Identifier: Apache-2.0
Use the JavaScript Code Generator
#################################
The command ``daml codegen js`` generates JavaScript (and TypeScript) that can be used in conjunction with the `JavaScript Client Libraries </app-dev/bindings-ts/index.html>`_ for interacting with a Daml ledger via the `HTTP JSON API </json-api/index.html>`_.
Inputs to the command are DAR files. Outputs are JavaScript packages with TypeScript typings containing metadata and types for all Daml packages included in the DAR files.
The generated packages use the library `@daml/types <ts-daml-types_>`_.
Generate and Use Code
*********************
In outline, the command to generate JavaScript and TypeScript typings from Daml is ``daml codegen js -o OUTDIR DAR`` where ``DAR`` is the path to a DAR file (generated via ``daml build``) and ``OUTDIR`` is a directory where you want the artifacts to be written.
Here's a complete example on a project built from the standard "skeleton" template.
.. code-block:: bash
:linenos:
daml new my-proj --template skeleton # Create a new project based off the skeleton template
cd my-proj # Enter the newly created project directory
daml build # Compile the project's Daml files into a DAR
daml codegen js -o daml.js .daml/dist/my-proj-0.0.1.dar # Generate JavaScript packages in the daml.js directory
- On execution of these commands:
- The directory ``my-proj/daml.js`` contains generated JavaScript packages with TypeScript typings;
- The files are arranged into directories;
- One of those directories will be named my-proj-0.0.1 and will contain the definitions corresponding to the Daml files in the project;
- For example, ``daml.js/my-proj-0.0.1/lib/index.js`` provides access to the definitions for ``daml/Main.daml``;
- The remaining directories correspond to modules of the Daml standard library;
- Those directories have numeric names (the names are hashes of the Daml-LF package they are derived from).
To get a quickstart idea of how to use what has been generated, you may wish to jump to the `Templates and choices`_ section and return to the reference material that follows as needed.
Primitive Daml Types: @daml/types
*********************************
To understand the TypeScript typings produced by the code generator, it is helpful to keep in mind this quick review of the TypeScript equivalents of the primitive Daml types provided by @daml/types.
**Interfaces**:
- ``Template<T extends object, K = unknown>``
- ``Choice<T extends object, C, R, K = unknown>``
**Types**:
+-------------------+--------------------+----------------------------------+
| Daml | TypeScript | TypeScript definition |
+===================+====================+==================================+
| ``()`` | ``Unit`` | ``{}`` |
+-------------------+--------------------+----------------------------------+
| ``Bool`` | ``Bool`` | ``boolean`` |
+-------------------+--------------------+----------------------------------+
| ``Int`` | ``Int`` | ``string`` |
+-------------------+--------------------+----------------------------------+
| ``Decimal`` | ``Decimal`` | ``string`` |
+-------------------+--------------------+----------------------------------+
| ``Numeric ν`` | ``Numeric`` | ``string`` |
+-------------------+--------------------+----------------------------------+
| ``Text`` | ``Text`` | ``string`` |
+-------------------+--------------------+----------------------------------+
| ``Time`` | ``Time`` | ``string`` |
+-------------------+--------------------+----------------------------------+
| ``Party`` | ``Party`` | ``string`` |
+-------------------+--------------------+----------------------------------+
| ``[τ]`` | ``List<τ>`` | ``τ[]`` |
+-------------------+--------------------+----------------------------------+
| ``Date`` | ``Date`` | ``string`` |
+-------------------+--------------------+----------------------------------+
| ``ContractId τ`` | ``ContractId<τ>`` | ``string`` |
+-------------------+--------------------+----------------------------------+
| ``Optional τ`` | ``Optional<τ>`` | ``null | (null extends τ ?`` |
| | | ``[] | [Exclude<τ, null>] : τ)`` |
+-------------------+--------------------+----------------------------------+
| ``TextMap τ`` | ``TextMap<τ>`` | ``{ [key: string]: τ }`` |
+-------------------+--------------------+----------------------------------+
| ``(τ₁, τ₂)`` | ``Tuple₂<τ₁, τ₂>`` | ``{_1: τ₁; _2: τ₂}`` |
+-------------------+--------------------+----------------------------------+
.. note::
The types given in the "TypeScript" column are defined in @daml/types.
.. note::
For *n*-tuples where *n ≥ 3*, representation is analogous with the pair case (the last line of the table).
.. note::
The TypeScript types ``Time``, ``Decimal``, ``Numeric`` and ``Int`` all alias to ``string``. These choices relate to the avoidance of precision loss under serialization over the `json-api <../json-api/index.html>`_.
.. note::
The TypeScript definition of type ``Optional<τ>`` in the above table might look complicated. It accounts for differences in the encoding of optional values when nested versus when they are not (i.e. "top-level"). For example, ``null`` and ``"foo"`` are two possible values of ``Optional<Text>`` whereas, ``[]`` and ``["foo"]`` are two possible values of type ``Optional<Optional<Text>>`` (``null`` is another possible value, ``[null]`` is **not**).
Daml to TypeScript Mappings
***************************
The mappings from Daml to TypeScript are best explained by example.
Records
=======
In Daml, we might model a person like this.
.. code-block:: daml
:linenos:
data Person =
Person with
name: Text
party: Party
age: Int
Given the above definition, the generated TypeScript code will be as follows.
.. code-block:: typescript
:linenos:
type Person = {
name: string;
party: daml.Party;
age: daml.Int;
}
Variants
========
This is a Daml type for a language of additive expressions.
.. code-block:: daml
:linenos:
data Expr a =
Lit a
| Var Text
| Add (Expr a, Expr a)
In TypeScript, it is represented as a `discriminated union <https://www.typescriptlang.org/docs/handbook/advanced-types.html#discriminated-unions>`_.
.. code-block:: typescript
:linenos:
type Expr<a> =
| { tag: 'Lit'; value: a }
| { tag: 'Var'; value: string }
| { tag: 'Add'; value: {_1: Expr<a>, _2: Expr<a>} }
Sum-of-products
===============
Let's slightly modify the ``Expr a`` type of the last section into the following.
.. code-block:: daml
:linenos:
data Expr a =
Lit a
| Var Text
| Add {lhs: Expr a, rhs: Expr a}
Compared to the earlier definition, the ``Add`` case is now in terms of a record with fields ``lhs`` and ``rhs``. This renders in TypeScript like so.
.. code-block:: typescript
:linenos:
type Expr<a> =
| { tag: 'Lit2'; value: a }
| { tag: 'Var2'; value: string }
| { tag: 'Add'; value: Expr.Add<a> }
namespace Expr {
type Add<a> = {
lhs: Expr<a>;
rhs: Expr<a>;
}
}
The thing to note is how the definition of the ``Add`` case has given rise to a record type definition ``Expr.Add``.
Enums
=====
Given a Daml enumeration like this,
.. code-block:: daml
:linenos:
data Color = Red | Blue | Yellow
the generated TypeScript will consist of a type declaration and the definition of an associated companion object.
.. code-block:: typescript
:linenos:
type Color = 'Red' | 'Blue' | 'Yellow'
const Color = {
Red: 'Red',
Blue: 'Blue',
Yellow: 'Yellow',
keys: ['Red','Blue','Yellow'],
} as const;
Templates and Choices
=====================
Here is a Daml template of a basic 'IOU' contract.
.. code-block:: daml
:linenos:
template Iou
with
issuer: Party
owner: Party
currency: Text
amount: Decimal
where
signatory issuer
choice Transfer: ContractId Iou
with
newOwner: Party
controller owner
do
create this with owner = newOwner
The ``daml codegen js`` command generates types for each of the choices defined on the template as well as the template itself.
.. code-block:: typescript
:linenos:
type Transfer = {
newOwner: daml.Party;
}
type Iou = {
issuer: daml.Party;
owner: daml.Party;
currency: string;
amount: daml.Numeric;
}
Each template results in the generation of a companion object. Here, is a schematic of the one generated from the ``Iou`` template [2]_.
.. code-block:: typescript
:linenos:
const Iou: daml.Template<Iou, undefined> & {
Archive: daml.Choice<Iou, DA_Internal_Template.Archive, {}, undefined>;
Transfer: daml.Choice<Iou, Transfer, daml.ContractId<Iou>, undefined>;
} = {
/* ... */
}
.. [2] The ``undefined`` type parameter captures the fact that ``Iou`` has no contract key.
The exact details of these companion objects are not important - think of them as representing "metadata".
What **is** important is the use of the companion objects when creating contracts and exercising choices using the `@daml/ledger <https://github.com/digital-asset/daml/tree/main/language-support/ts/daml-ledger>`_ package. The following code snippet demonstrates their usage.
.. code-block:: typescript
:linenos:
import Ledger from '@daml/ledger';
import {Iou, Transfer} from /* ... */;
const ledger = new Ledger(/* ... */);
// Contract creation; Bank issues Alice a USD $1MM IOU.
const iouDetails: Iou = {
issuer: 'Chase',
owner: 'Alice',
currency: 'USD',
amount: 1000000.0,
};
const aliceIouCreateEvent = await ledger.create(Iou, iouDetails);
const aliceIouContractId = aliceIouCreateEvent.contractId;
// Choice execution; Alice transfers ownership of the IOU to Bob.
const transferDetails: Transfer = {
newOwner: 'Bob',
}
const [bobIouContractId, _] = await ledger.exercise(Transfer, aliceIouContractId, transferDetails);
Observe on line 14, the first argument to ``create`` is the ``Iou`` companion object and on line 22, the first argument to ``exercise`` is the ``Transfer`` companion object.

View File

@ -1,76 +0,0 @@
.. Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
.. SPDX-License-Identifier: Apache-2.0
Create Your Own Bindings
########################
This page gets you started with creating custom bindings for a Daml Ledger.
Bindings for a language consist of two main components:
- Ledger API
Client "stubs" for the programming language, -- the remote API that allows sending ledger commands and receiving ledger transactions. You have to generate **Ledger API** from `the gRPC protobuf definitions in the daml repository on GitHub <https://github.com/digital-asset/daml/tree/main/ledger-api/grpc-definitions>`_. **Ledger API** is documented on this page: :doc:`/app-dev/grpc/index`. The `gRPC <https://grpc.io/docs/>`_ tutorial explains how to generate client "stubs".
- Codegen
A code generator is a program that generates classes representing Daml contract templates in the language. These classes incorporate all boilerplate code for constructing: :ref:`com.daml.ledger.api.v1.CreateCommand` and :ref:`com.daml.ledger.api.v1.ExerciseCommand` corresponding for each Daml contract template.
Technically codegen is optional. You can construct the commands manually from the auto-generated **Ledger API** classes. However, it is very tedious and error-prone. If you are creating *ad hoc* bindings for a project with a few contract templates, writing a proper codegen may be overkill. On the other hand, if you have hundreds of contract templates in your project or are planning to build language bindings that you will share across multiple projects, we recommend including a codegen in your bindings. It will save you and your users time in the long run.
Note that for different reasons we chose codegen, but that is not the only option. There is really a broad category of metaprogramming features that can solve this problem just as well or even better than codegen; they are language-specific, but often much easier to maintain (i.e. no need to add a build step). Some examples are:
- `F# Type Providers <https://docs.microsoft.com/en-us/dotnet/fsharp/tutorials/type-providers/creating-a-type-provider#a-type-provider-that-is-backed-by-local-data>`_
- `Template Haskell <https://wiki.haskell.org/Template_Haskell>`_
Build Ledger Commands
=====================
No matter what approach you take, either manually building commands or writing a codegen to do this, you need to understand how ledger commands are structured. This section demonstrates how to build create and exercise commands manually and how it can be done using contract classes.
Create Command
--------------
Let's recall an **IOU** example from the :doc:`Quickstart guide </app-dev/bindings-java/quickstart>`, where `Iou` template is defined like this:
.. literalinclude:: /app-dev/bindings-java/quickstart/template-root/daml/Iou.daml
:language: daml
:start-after: -- BEGIN_IOU_TEMPLATE_DATATYPE
:end-before: -- END_IOU_TEMPLATE_DATATYPE
If you do not specify any of the above fields or type their names or values incorrectly, or do not order them exactly as they are in the Daml template, the above code will compile but fail at run-time because you did not structure your create command correctly.
Exercise Command
----------------
To build :ref:`com.daml.ledger.api.v1.ExerciseCommand` for `Iou_Transfer`:
.. literalinclude:: /app-dev/bindings-java/quickstart/template-root/daml/Iou.daml
:language: daml
:start-after: -- BEGIN_IOU_TEMPLATE_TRANSFER
:end-before: -- END_IOU_TEMPLATE_TRANSFER
Summary
=======
When creating custom bindings for Daml Ledgers, you will need to:
- generate **Ledger API** from the gRPC definitions
- decide whether to write a codegen to generate ledger commands or manually build them for all contracts defined in your Daml model.
The above examples should help you get started. If you are creating custom binding or have any questions, see the :doc:`/support/support` page for how to get in touch with us.
Links
=====
- gRPC documentation: https://grpc.io/docs/
- Documentation for Protobuf "well known types": https://developers.google.com/protocol-buffers/docs/reference/google.protobuf
- Daml Ledger API gRPC Protobuf definitions
- current main: https://github.com/digital-asset/daml/tree/main/ledger-api/grpc-definitions
- for specific versions: https://github.com/digital-asset/daml/releases
- Required gRPC Protobuf definitions:
- https://raw.githubusercontent.com/grpc/grpc/v1.18.0/src/proto/grpc/status/status.proto
- https://raw.githubusercontent.com/grpc/grpc/v1.18.0/src/proto/grpc/health/v1/health.proto

View File

@ -1,388 +0,0 @@
.. Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
.. SPDX-License-Identifier: Apache-2.0
.. _command-deduplication:
Command Deduplication
#####################
The interaction of a Daml application with the ledger is inherently asynchronous: applications send commands to the ledger, and some time later they see the effect of that command on the ledger.
Many things can fail during this time window:
- The application can crash.
- The participant node can crash.
- Messages can be lost on the network.
- The ledger may be slow to respond due to a high load.
If you want to make sure that an intended ledger change is not executed twice, your application needs to robustly handle all failure scenarios.
This guide covers the following topics:
- :ref:`How command deduplication works <command-dedup-workings>`.
- :ref:`How applications can effectively use the command deduplication <command-dedup-usage>`.
.. _command-dedup-workings:
How Command Deduplication Works
*******************************
The following fields in a command submissions are relevant for command deduplication.
The first three form the :ref:`change ID <change-id>` that identifies the intended ledger change.
- The union of :ref:`party <com.daml.ledger.api.v1.Commands.party>` and :ref:`act_as <com.daml.ledger.api.v1.Commands.act_as>` define the submitting parties.
- The :ref:`application ID <com.daml.ledger.api.v1.Commands.application_id>` identifies the application that submits the command.
- The :ref:`command ID <com.daml.ledger.api.v1.Commands.command_id>` is chosen by the application to identify the intended ledger change.
- The deduplication period specifies the period for which no earlier submissions with the same change ID should have been accepted, as witnessed by a completion event on the :ref:`command completion service <command-completion-service>`.
If such a change has been accepted in that period, the current submission shall be rejected.
The period is specified either as a :ref:`deduplication duration <com.daml.ledger.api.v1.Commands.deduplication_duration>` or as a :ref:`deduplication offset <com.daml.ledger.api.v1.Commands.deduplication_offset>` (inclusive).
- The :ref:`submission ID <com.daml.ledger.api.v1.Commands.submission_id>` is chosen by the application to identify a specific submission.
It is included in the corresponding completion event so that the application can correlate specific submissions to specific completions.
An application should never reuse a submission ID.
The ledger may arbitrarily extend the deduplication period specified in the submission, even beyond the maximum deduplication duration specified in the :ref:`ledger configuration <ledger-configuration-service>`.
.. note::
The maximum deduplication duration is the length of the deduplication period guaranteed to be supported by the participant.
The deduplication period chosen by the ledger is the *effective deduplication period*.
The ledger may also convert a requested deduplication duration into an effective deduplication offset or vice versa.
The effective deduplication period is reported in the command completion event in the :ref:`deduplication duration <com.daml.ledger.api.v1.Completion.deduplication_duration>` or :ref:`deduplication offset <com.daml.ledger.api.v1.Completion.deduplication_offset>` fields.
A command submission is considered a **duplicate submission** if at least one of the following holds:
- The submitting participant's completion service contains a successful completion event for the same :ref:`change ID <change-id>` within the *effective* deduplication period.
- The participant or Daml ledger are aware of another command submission in-flight with the same :ref:`change ID <change-id>` when they perform command deduplication.
The outcome of command deduplication is communicated as follows:
- Command submissions via the :ref:`command service <command-service>` indicate the command deduplication outcome as a synchronous gRPC response unless the `gRPC deadline <https://grpc.io/blog/deadlines/>`_ was exceeded.
.. note::
The outcome MAY additionally appear as a completion event on the :ref:`command completion service <command-completion-service>`,
but applications using the :ref:`command service <command-service>` typically need not process completion events.
- Command submissions via the :ref:`command submission service <command-submission-service>` can indicate the outcome as a synchronous gRPC response,
or asynchronously through the :ref:`command completion service <command-completion-service>`.
In particular, the submission may be a duplicate even if the command submission service acknowledges the submission with the gRPC status code ``OK``.
Independently of how the outcome is communicated, command deduplication generates the following outcomes of a command submission:
- If there is no conflicting submission with the same :ref:`change ID <change-id>` on the Daml ledger or in-flight, the completion event and possibly the response convey the result of the submission (success or a gRPC error; :doc:`/app-dev/grpc/error-codes` explains how errors are communicated).
- The gRPC status code ``ALREADY_EXISTS`` with error code ID :ref:`DUPLICATE_COMMAND <error_code_DUPLICATE_COMMAND>` indicates that there is an earlier command completion for the same :ref:`change ID <change-id>` within the effective deduplication period.
- The gRPC status code ``ABORTED`` with error code id :ref:`SUBMISSION_ALREADY_IN_FLIGHT <error_code_SUBMISSION_ALREADY_IN_FLIGHT>` indicates that another submission for the same :ref:`change ID <change-id>` was in flight when this submission was processed.
- The gRPC status code ``FAILED_PRECONDITION`` with error code id :ref:`INVALID_DEDUPLICATION_PERIOD <error_code_INVALID_DEDUPLICATION_PERIOD>` indicates that the specified deduplication period is not supported.
The fields ``longest_duration`` or ``earliest_offset`` in the metadata specify the longest duration or earliest offset that is currently supported on the Ledger API endpoint.
At least one of the two fields is present.
Neither deduplication durations up to the :ref:`maximum deduplication duration <com.daml.ledger.api.v1.LedgerConfiguration.max_deduplication_duration>` nor deduplication offsets published within that duration SHOULD result in this error.
Participants may accept longer periods at their discretion.
- The gRPC status code ``FAILED_PRECONDITION`` with error code id :ref:`PARTICIPANT_PRUNED_DATA_ACCESSED <error_code_PARTICIPANT_PRUNED_DATA_ACCESSED>`, when specifying a deduplication period represented by an offset, indicates that the specified deduplication offset has been pruned.
The field ``earliest_offset`` in the metadata specifies the last pruned offset.
For deduplication to work as intended, all submissions for the same ledger change must be submitted via the same participant.
Whether a submission is considered a duplicate is determined by completion events, and by default a participant outputs only the completion events for submissions that were requested via the very same participant.
.. _command-dedup-usage:
How to Use Command Deduplication
********************************
To effectuate a ledger change exactly once, the application must resubmit a command if an earlier submission was lost.
However, the application typically cannot distinguish a lost submission from slow submission processing by the ledger.
Command deduplication allows the application to resubmit the command until it is executed and reject all duplicate submissions thereafter.
Some ledger changes can be executed at most once, so no command deduplication is needed for them.
For example, if the submitted command exercises a consuming choice on a given contract ID, this command can be accepted at most once because every contract can be archived at most once.
All duplicate submissions of such a change will be rejected with :ref:`CONTRACT_NOT_ACTIVE <error_code_CONTRACT_NOT_ACTIVE>`.
In contrast, a :ref:`Create command <com.daml.ledger.api.v1.CreateCommand>` would create a fresh contract instance of the given :ref:`template <com.daml.ledger.api.v1.CreateCommand.template_id>` for each submission that reaches the ledger (unless other constraints such as the :ref:`template preconditions <daml-ref-preconditions>` or contract key uniqueness are violated).
Similarly, an :ref:`Exercise command <com.daml.ledger.api.v1.ExerciseCommand>` on a non-consuming choice or an :ref:`Exercise-By-Key command <com.daml.ledger.api.v1.ExercisebyKeyCommand>` may be executed multiple times if submitted multiple times.
With command deduplication, applications can ensure such intended ledger changes are executed only once within the deduplication period, even if the application resubmits, say because it considers the earlier submissions to be lost or forgot during a crash that it had already submitted the command.
Known Processing Time Bounds
============================
For this strategy, you must estimate a bound ``B`` on the processing time and forward clock drifts in the Daml ledger with respect to the applications clock.
If processing measured across all retries takes longer than your estimate ``B``, the ledger change may take effect several times.
Under this caveat, the following strategy works for applications that use the :ref:`Command Service <command-service>` or the :ref:`Command Submission <command-submission-service>` and :ref:`Command Completion Service <command-completion-service>`.
.. note::
The bound ``B`` should be at most the configured :ref:`maximum deduplication duration <com.daml.ledger.api.v1.LedgerConfiguration.max_deduplication_duration>`.
Otherwise you rely on the ledger accepting longer deduplication durations.
Such reliance makes your application harder to port to other Daml ledgers and fragile, as the ledger may stop accepting such extended durations at its own discretion.
.. _dedup-bounded-step-command-id:
#. Choose a command ID for the ledger change, in a way that makes sure the same ledger change is always assigned the same command ID.
Either determine the command ID deterministically (e.g., if your contract payload contains a globally unique identifier, you can use that as your command ID), or choose the command ID randomly and persist it with the ledger change so that the application can use the same command ID in resubmissions after a crash and restart.
.. note::
Make sure that you assign the same command ID to all command (re-)submissions of the same ledger change.
This is useful for the recovery procedure after an application crash/restart.
After a crash, the application in general cannot know whether it has submitted a set of commands before the crash.
If in doubt, resubmit the commands using the same command ID.
If the commands had been submitted before the crash, command deduplication on the ledger will reject the resubmissions.
.. _dedup-bounded-step-offset:
#. When you use the :ref:`Command Completion Service <command-submission-service>`, obtain a recent offset on the completion stream ``OFF1``, say the :ref:`current ledger end <com.daml.ledger.api.v1.CompletionEndRequest>`.
.. _dedup-bounded-step-submit:
#. Submit the command with the following parameters:
- Set the :ref:`command ID <com.daml.ledger.api.v1.Commands.command_id>` to the chosen command ID from :ref:`Step 1 <dedup-bounded-step-command-id>`.
- Set the :ref:`deduplication duration <com.daml.ledger.api.v1.Commands.deduplication_duration>` to the bound ``B``.
.. note::
It is prudent to explicitly set the deduplication duration to the desired bound ``B``,
to guard against the case where a ledger configuration update shortens the maximum deduplication duration.
With the bound ``B``, you will be notified of such a problem via an :ref:`INVALID_DEDUPLICATION_PERIOD <error_code_INVALID_DEDUPLICATION_PERIOD>` error
if the ledger does not support deduplication durations of length ``B`` any more.
If you omitted the deduplication period, the currently valid maximum deduplication duration would be used.
In this case, a ledger configuration update could silently shorten the deduplication period and thus invalidate your deduplication analysis.
- Set the :ref:`submission ID <com.daml.ledger.api.v1.Commands.submission_id>` to a fresh value, e.g., a random UUID.
- Set the timeout (gRPC deadline) to the expected submission processing time (Command Service) or submission hand-off time (Command Submission Service).
The **submission processing time** is the time between when the application sends off a submission to the :ref:`Command Service <command-service>` and when it receives (synchronously, unless it times out) the acceptance or rejection.
The **submission hand-off time** is the time between when the application sends off a submission to the :ref:`Command Submission Service <command-submission-service>` and when it obtains a synchronous response for this gRPC call.
After the RPC timeout, the application considers the submission as lost and enters a retry loop.
This timeout is typically much shorter than the deduplication duration.
.. _dedup-bounded-step-await:
#. Wait until the RPC call returns a response.
- Status codes other than ``OK`` should be handled according to :ref:`error handling <dedup-bounded-error-handling>`.
- When you use the :ref:`Command Service <command-service>` and the response carries the status code ``OK``, the ledger change took place.
You can report success.
- When you use the :ref:`Command Submission Service <command-submission-service>`,
subscribe with the :ref:`Command Completion Service <command-submission-service>` for completions for ``actAs`` from ``OFF1`` (exclusive) until you see a completion event for the change ID and the submission ID chosen in :ref:`Step 3 <dedup-bounded-step-submit>`.
If the completions status is ``OK``, the ledger change took place and you can report success.
Other status codes should be handled according to :ref:`error handling <dedup-bounded-error-handling>`.
This step needs no timeout as the :ref:`Command Submission Service <command-submission-service>` acknowledges a submission only if there will eventually be a completion event, unless relevant parts of the system become permanently unavailable.
.. _dedup-bounded-error-handling:
Error Handling
--------------
Error handling is needed when the status code of the command submission RPC call or in the :ref:`completion event <com.daml.ledger.api.v1.Completion.status>` is not ``OK``.
The following table lists appropriate reactions by status code (written as ``STATUS_CODE``) and error code (written in capital letters with a link to the error code documentation).
Fields in the error metadata are written as ``field`` in lowercase letters.
.. list-table:: Command deduplication error handling with known processing time bound
:widths: 10 50
:header-rows: 1
- * Error condition
* Reaction
- * ``DEADLINE_EXCEEDED``
* Consider the submission lost.
Retry from :ref:`Step 2 <dedup-bounded-step-offset>`, obtaining the completion offset ``OFF1``, and possibly increase the timeout.
- * Application crashed
* Retry from :ref:`Step 2 <dedup-bounded-step-offset>`, obtaining the completion offset ``OFF1``.
- * ``ALREADY_EXISTS`` / :ref:`DUPLICATE_COMMAND <error_code_DUPLICATE_COMMAND>`
* The change ID has already been accepted by the ledger within the reported deduplication period.
The optional field ``completion_offset`` contains the precise offset.
The optional field ``existing_submission_id`` contains the submission ID of the successful submission.
Report success for the ledger change.
- * ``FAILED_PRECONDITION`` / :ref:`INVALID_DEDUPLICATION_PERIOD <error_code_INVALID_DEDUPLICATION_PERIOD>`
* The specified deduplication period is longer than what the Daml ledger supports or the ledger cannot handle the specified deduplication offset.
``earliest_offset`` contains the earliest deduplication offset or ``longest_duration`` contains the longest deduplication duration that can be used (at least one of the two must be provided).
Options:
- Negotiate support for longer deduplication periods with the ledger operator.
- Set the deduplication offset to ``earliest_offset`` or the deduplication duration to ``longest_duration`` and retry from :ref:`Step 2 <dedup-bounded-step-offset>`, obtaining the completion offset ``OFF1``.
This may lead to accepting the change twice within the originally intended deduplication period.
- * ``FAILED_PRECONDITION`` / :ref:`PARTICIPANT_PRUNED_DATA_ACCESSED <error_code_PARTICIPANT_PRUNED_DATA_ACCESSED>`
* The specified deduplication offset has been pruned by the participant.
``earliest_offset`` contains the last pruned offset.
Use the :ref:`Command Completion Service <command-completion-service>` by asking for the :ref:`completions <com.daml.ledger.api.v1.CompletionStreamRequest>`, starting from the last pruned offset by setting :ref:`offset <com.daml.ledger.api.v1.CompletionStreamRequest.offset>` to the value of
``earliest_offset``, and use the first received :ref:`offset <com.daml.ledger.api.v1.Checkpoint.offset>` as a deduplication offset.
- * ``ABORTED`` / :ref:`SUBMISSION_ALREADY_IN_FLIGHT <error_code_SUBMISSION_ALREADY_IN_FLIGHT>`
This error occurs only as an RPC response, not inside a completion event.
* There is already another submission in flight, with the submission ID in ``existing_submission_id``.
- When you use the :ref:`Command Service <command-service>`, wait a bit and retry from :ref:`Step 3 <dedup-bounded-step-submit>`, submitting the command.
Since the in-flight submission might still be rejected, (repeated) resubmission ensures that you (eventually) learn the outcome:
If an earlier submission was accepted, you will eventually receive a :ref:`DUPLICATE_COMMAND <error_code_DUPLICATE_COMMAND>` rejection.
Otherwise, you have a second chance to get the ledger change accepted on the ledger and learn the outcome.
- When you use the :ref:`Command Completion Service <command-completion-service>`, look for a completion for ``existing_submission_id`` instead of the chosen submission ID in :ref:`Step 4 <dedup-bounded-step-await>`.
- * ``ABORTED`` / other error codes
* Wait a bit and retry from :ref:`Step 2 <dedup-bounded-step-offset>`, obtaining the completion offset ``OFF1``.
- * other error conditions
* Use background knowledge about the business workflow and the current ledger state to decide whether earlier submissions might still get accepted.
- If you conclude that it cannot be accepted any more, stop retrying and report that the ledger change failed.
- Otherwise, retry from :ref:`Step 2 <dedup-bounded-step-offset>`, obtaining a completion offset ``OFF1``, or give up without knowing for sure that the ledger change will not happen.
For example, if the ledger change only creates a contract instance of a template, you can never be sure, as any outstanding submission might still be accepted on the ledger.
In particular, you must not draw any conclusions from not having received a :ref:`SUBMISSION_ALREADY_IN_FLIGHT <error_code_SUBMISSION_ALREADY_IN_FLIGHT>` error, because the outstanding submission may be queued somewhere and will reach the relevant processing point only later.
Failure Scenarios
-----------------
The above strategy can fail in the following scenarios:
#. The bound ``B`` is too low: The command can be executed multiple times.
Possible causes:
- You have retried for longer than the deduplication duration, but never got a meaningful answer, e.g., because the timeout (gRPC deadline) is too short.
For example, this can happen due to long-running Daml interpretation when using the :ref:`Command Service <command-service>`.
- The application clock drifts significantly from the participant's or ledger's clock.
- There are unexpected network delays.
- Submissions are retried internally in the participant or Daml ledger and those retries do not stop before ``B`` is over.
Refer to the specific ledger's documentation for more information.
#. Unacceptable changes cause infinite retries
You need business workflow knowledge to decide that retrying does not make sense any more.
Of course, you can always stop retrying and accept that you do not know the outcome for sure.
Unknown Processing Time Bounds
==============================
Finding a good bound ``B`` on the processing time is hard, and there may still be unforeseen circumstances that delay processing beyond the chosen bound ``B``.
You can avoid these problems by using deduplication offsets instead of durations.
An offset defines a point in the history of the ledger and is thus not affected by clock skews and network delays.
Offsets are arguably less intuitive and require more effort by the application developer.
We recommend the following strategy for using deduplication offsets:
#. Choose a fresh command ID for the ledger change and the ``actAs`` parties, which (together with the application ID) determine the change ID.
Remember the command ID across application crashes.
(Analogous to :ref:`Step 1 above <dedup-bounded-step-command-id>`)
.. _dedup-unbounded-step-dedup-offset:
#. Obtain a recent offset ``OFF0`` on the completion event stream and remember across crashes that you use ``OFF0`` with the chosen command ID. There are several ways to do so:
- Use the :ref:`Command Completion Service <command-completion-service>` by asking for the :ref:`current ledger end <com.daml.ledger.api.v1.CompletionEndRequest>`.
.. note::
Some ledger implementations reject deduplication offsets that do not identify a command completion visible to the submitting parties with the error code id :ref:`INVALID_DEDUPLICATION_PERIOD <error_code_INVALID_DEDUPLICATION_PERIOD>`.
In general, the ledger end need not identify a command completion that is visible to the submitting parties.
When running on such a ledger, use the Command Service approach described next.
- Use the :ref:`Command Service <command-service>` to obtain a recent offset by repeatedly submitting a dummy command, e.g., a :ref:`Create-And-Exercise command <com.daml.ledger.api.v1.CreateAndExerciseCommand>` of some single-signatory template with the :ref:`Archive <function-da-internal-template-functions-archive-2977>` choice, until you get a successful response.
The response contains the :ref:`completion offset <com.daml.ledger.api.v1.SubmitAndWaitForTransactionIdResponse.completion_offset>`.
.. _dedup-unbounded-step-offset:
#. When you use the :ref:`Command Completion Service <command-submission-service>`:
- If you execute this step the first time, set ``OFF1 = OFF0``.
- If you execute this step as part of :ref:`error handling <dedup-unbounded-error-handling>` retrying from Step 3, obtaining the completion offset ``OFF1``,
obtain a recent offset on the completion stream ``OFF1``, say its current end.
(Analogous to :ref:`step 2 above <dedup-bounded-step-offset>`)
#. Submit the command with the following parameters (analogous to :ref:`Step 3 above <dedup-bounded-step-submit>` except for the deduplication period):
- Set the :ref:`command ID <com.daml.ledger.api.v1.Commands.command_id>` to the chosen command ID from :ref:`Step 1 <dedup-bounded-step-command-id>`.
- Set the :ref:`deduplication offset <com.daml.ledger.api.v1.Commands.deduplication_offset>` to ``OFF0``.
- Set the :ref:`submission ID <com.daml.ledger.api.v1.Commands.submission_id>` to a fresh value, e.g., a random UUID.
- Set the timeout (gRPC deadline) to the expected submission processing time (Command Service) or submission hand-off time (Command Submission Service).
#. Wait until the RPC call returns a response.
- Status codes other than ``OK`` should be handled according to :ref:`error handling <dedup-bounded-error-handling>`.
- When you use the :ref:`Command Service <command-service>` and the response carries the status code ``OK``, the ledger change took place.
You can report success.
The response contains a :ref:`completion offset <com.daml.ledger.api.v1.SubmitAndWaitForTransactionIdResponse.completion_offset>` that you can use in :ref:`Step 2 <dedup-unbounded-step-dedup-offset>` of later submissions.
- When you use the :ref:`Command Submission Service <command-submission-service>`,
subscribe with the :ref:`Command Completion Service <command-submission-service>` for completions for ``actAs`` from ``OFF1`` (exclusive) until you see a completion event for the change ID and the submission ID chosen in :ref:`step 3 <dedup-bounded-step-submit>`.
If the completions status is ``OK``, the ledger change took place and you can report success.
Other status codes should be handled according to :ref:`error handling <dedup-bounded-error-handling>`.
.. _dedup-unbounded-error-handling:
Error Handling
--------------
The same as :ref:`for known bounds <dedup-bounded-error-handling>`, except that the former retry from :ref:`Step 2 <dedup-bounded-step-offset>` becomes retry from :ref:`Step 3 <dedup-unbounded-step-offset>`.
Failure Scenarios
-----------------
The above strategy can fail in the following scenarios:
#. No success within the supported deduplication period
When the application receives a :ref:`INVALID_DEDUPLICATION_PERIOD <error_code_INVALID_DEDUPLICATION_PERIOD>` error, it cannot achieve exactly once execution any more within the originally intended deduplication period.
#. Unacceptable changes cause infinite retries
You need business workflow knowledge to decide that retrying does not make sense any more.
Of course, you can always stop retrying and accept that you do not know the outcome for sure.
..
Command deduplication on the JSON API
*************************************

View File

@ -1,396 +0,0 @@
.. Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
.. SPDX-License-Identifier: Apache-2.0
.. _custom-views:
Custom Views
############
The Custom Views library provides convenient features to continuously ingest data from a ledger into a database,
into tables of your choice, optimized for your querying requirements.
Custom Views is a Java library for **projecting** ledger events into a SQL database.
It is currently available in :doc:`Labs</support/status-definitions>` early access, and only supports PostgreSQL right now.
Use the following Maven dependency to use the Custom Views library:
.. code-block:: xml
<dependency>
<groupId>com.daml</groupId>
<artifactId>custom-views_2.13</artifactId>
<version>2.5.0</version>
</dependency>
Please see the `Custom Views github repository <https://github.com/digital-asset/custom-views/>`_ for an example Maven project of a simple application
that projects data into a SQL table and provides access to the data through a REST interface.
Overview
********
A **Projection** is a resumable process that continuously reads ledger events and transforms these into rows in SQL tables.
A projection ensures that rows are committed according to ledger transactions,
ensuring that the isolation and atomicity of changes perceived by database users are consistent with committed transactions on the ledger.
At a high level, the following types are needed to run a projection process:
- A ``BatchSource`` connects to the ledger and reads events from it.
- A ``Projection`` defines which events to process from the ``BatchSource``, from which ``Offset`` to start processing, optionally up to an end ``Offset``.
- A ``Project`` function converts the events into database actions. A ``JdbcAction`` type defines which SQL statements should be executed.
- The ``project`` method on ``Projector`` takes a ``BatchSource``, a ``Projection``, and a ``Project`` function. The ``project`` method starts the projection process. Database transactions are committed as they occur on the ledger.
A common workflow for setting up a projection process follows:
- Create a table in your SQL database.
- Create a ``Projector``.
- Choose the type of event you want to project and create a ``BatchSource`` for it.
- Create a ``Projection``. If the projection already exists, it will continue where it left off.
- Create a ``Project`` function that transforms an event into (0 to N) database actions.
- Invoke ``project`` on the ``Projector``, passing in the ``BatchSource``, the ``Projection``, and the ``Project`` function. This starts the projection process, and returns a ``Control`` to control the process.
- Cancel the projection by invoking ``control.cancel`` on shutdown of your application.
The next sections explain the most important objects in the Custom Views library in more detail.
Projector
*********
A ``Projector`` executes the projection process. The code snippet below shows how to create a ``JdbcProjector``.
.. code-block:: java
var config = new HikariConfig();
config.setJdbcUrl(url);
config.setUsername(user);
config.setPassword(password);
var ds = new HikariDataSource(config);
var system = ActorSystem.create("my-projection-app");
var projector = JdbcProjector.create(ds, system);
A ``Projector`` provides ``project`` methods to start a projection process.
A `DataSource` is used to create database connections when required. In this example a `Hikari connection pool <https://github.com/brettwooldridge/HikariCP>`_ is used.
The ``project`` methods return a ``Control`` which can be used to:
- Cancel the projection.
- Find out if the projection has completed or failed.
- Wait for the projection process to close all its resources.
A projection only completes if an end ``Offset`` is set, otherwise it continuously runs and projects events as they occur on the ledger.
The ``project`` methods take a ``BatchSource``, a ``Projection`` and a ``Project`` function, which are explained in the next sections.
BatchSource
===========
A projection connects to the ledger and reads events using a ``BatchSource``, which internally uses :doc:`the Ledger API with gRPC</app-dev/grpc/index>`.
The Ledger API provides the following types of events:
- ``Event`` (``CreatedEvent`` or ``ArchivedEvent``)
- ``ExercisedEvent``
- ``TreeEvent``
The projection library uses the ``Event``, ``ExercisedEvent`` and ``TreeEvent`` classes from the :doc:`Java Bindings</app-dev/bindings-java/index>`
in the ``com.daml.ledger.javaapi.data`` package to represent these events.
The following ``BatchSource``\s are available:
- ``BatchSource.events`` creates a ``BatchSource`` that reads ``Event``\s from the ledger.
- ``BatchSource.exercisedEvents`` creates a ``BatchSource`` that reads ``ExercisedEvent``\s from the ledger.
- ``BatchSource.treeEvents`` creates a ``BatchSource`` that reads ``TreeEvent``\s from the ledger.
The example below shows how to create a ``BatchSource`` that reads ``CreatedEvent``\s and ``ArchivedEvent``\s from the ledger at ``localhost``, port ``6865``:
.. code-block:: java
var grpcClientSettings = GrpcClientSettings.connectToServiceAt("localhost", 6865, system);
var source = BatchSource.events(grpcClientSettings);
Additionally ``BatchSource.create`` creates a ``BatchSource`` from code-generated ``Contract`` types from ``CreateEvent``\s,
or creates a ``BatchSource`` from simple values, which is convenient for unit testing.
Batch
-----
A ``BatchSource`` reads events into ``Batch``\es. A ``Batch`` consists of 1 to many events, and optionally contains a marker that indicates that a transaction has been committed on the ledger.
`Batches` make it possible to process larger than memory transactions, while tracking transactions as they occur on the ledger, and making it possible for downstream
database transactions to only commit when these transaction markers have been detected.
Envelope
--------
The events in `Batches` are wrapped in `Envelopes`. An ``Envelope`` provides additional fields providing more context about what occurred on the ledger.
It has the following fields:
- ``event``: The wrapped value. ``getEvent`` and ``unwrap()`` both provide this value.
- ``offset``: The offset of the event.
- ``table``: The (main) ``ProjectionTable`` that is projected to.
- ``workflowId`` (optional)
- ``ledgerEffectiveTime`` (optional)
- ``transactionId`` (optional)
Projection
==========
The ``Projection`` keeps track of the projection process and decides which events will be projected from the ``BatchSource``.
A Projection:
- has a `ProjectionId` that must uniquely identify the projection process.
- has an ``Offset`` which is used as a starting point to read from the ledger.
- has a ``ProjectionFilter``. The ``BatchSource`` uses this filter to select events from the ledger. (If you are familiar with the gRPC service, the ``ProjectionFilter`` translates to a ``TransactionFilter``)
- specifies an SQL table to project to with a ``ProjectionTable``.
- optionally has a ``Predicate`` to filter events that were read from the ledger.
- optionally has an end ``Offset``, if set the projection ends when a transaction for the ``Offset`` has been read from the ledger.
- is stored in the ``projection`` SQL table.
A newly created projection by default has no offset, which means a projection starts from the beginning of the ledger.
A projection updates when it successfully commits transactions into the SQL database according to transactions that were committed on the ledger.
A projection resumes from its stored offset automatically, if it can be found by its `ProjectionId`.
The code below shows an example of how to create a `Projection`:
.. code-block:: java
var projectionTable = new ProjectionTable("ious");
var eventsProjection =
Projection.<Event>create(
new ProjectionId("iou-projection-for-party"),
ProjectionFilter.parties(Set.of(partyId))
);
The ``eventsProjection`` ``Projection`` selects ``Event``\s that occurred visible to the party ``partyId`` to the ``ious`` SQL table.
The Project function
====================
The `Project<E,A>` function projects an event `Envelope<E>` into a `List<A>`.
For the ``project`` methods on ``JdbcProjector``, `A` is a ``JdbcAction``.
The code below shows an example of a ``Project`` function that handles `CreatedEvents` and `ArchivedEvents`.
.. code-block:: java
Project<Event, JdbcAction> f =
envelope -> {
var event = envelope.getEvent();
if (event instanceof CreatedEvent) {
Iou.Contract iou = Iou.Contract.fromCreatedEvent((CreatedEvent) event);
var action =
ExecuteUpdate.create(
"insert into "
+ projectionTable.getName()
+ "(contract_id, event_id, amount, currency) "
+ "values (?, ?, ?, ?)"
)
.bind(1, event.getContractId(), Bind.String())
.bind(2, event.getEventId(), Bind.String())
.bind(3, iou.data.amount, Bind.BigDecimal())
.bind(4, iou.data.currency, Bind.String());
return List.of(action);
} else {
var action =
ExecuteUpdate.create(
"delete from " +
projectionTable.getName() +
" where contract_id = ?"
)
.bind(1, event.getContractId(), Bind.String());
return List.of(action);
}
};
The ``Project`` function `f` creates an insert action for every ``CreatedEvent`` and a delete action for every ``ArchivedEvent``.
The ``JdbcAction``\s are further explained in the next section.
The JdbcAction
--------------
A database action captures a SQL statement that is executed by a ``Projector``.
The ``JdbcAction`` is an interface with one method, shown in the example below:
.. code-block:: java
public int execute(java.sql.Connection con);
All actions extend ``JdbcAction``. ``execute`` should return the number of rows affected by the action.
The ``ExecuteUpdate`` action creates an insert, delete, or update statement.
The example below shows how an insert statement can be created, and how arguments can be bound to the statement:
.. code-block:: java
ExecuteUpdate.create(
"insert into "
+ projectionTable.getName()
+ "(contract_id, event_id, amount, currency) "
+ "values (?, ?, ?, ?)")
.bind(1, event.getContractId(), Bind.String())
.bind(2, event.getEventId(), Bind.String())
.bind(3, iou.data.amount, Bind.BigDecimal())
.bind(4, iou.data.currency, Bind.String());
It is also possible to use named parameters, which is shown in the example below:
.. code-block:: java
ExecuteUpdate.create(
"insert into "
+ projectionTable.getName()
+ "(contract_id, event_id, amount, currency) "
+ "values (:cid, :eid, :amount, :currency)")
.bind("cid", event.getContractId(), Bind.String())
.bind("eid", event.getEventId(), Bind.String())
.bind("amount", iou.data.amount, Bind.BigDecimal())
.bind("currency", iou.data.currency, Bind.String());
Projecting rows in batches
--------------------------
The `ExecuteUpdate` action internally creates a new ``java.sql.PreparedStatement`` when it is executed.
Use `UpdateMany` if you want to reuse the ``java.sql.PreparedStatement`` and add statements in batches, which can make a considerable difference to performance.
The example below shows how you can use ``projectRows`` to project using ``UpdateMany``.
In this case we are using a code generated ``Iou.Contract`` class to function as a `Row`, which we use to bind to a SQL statement
which is executed in batches.
.. code-block:: java
var projectionTable = new ProjectionTable("ious");
var contracts = Projection.<Iou.Contract>create(
new ProjectionId("iou-contracts-for-party"),
ProjectionFilter.parties(Set.of(partyId)),
projectionTable
);
var batchSource = BatchSource.create(grpcClientSettings,
e -> {
return Iou.Contract.fromCreatedEvent(e);
});
Project<Iou.Contract, Iou.Contract> mkRow =
envelope -> {
return List.of(envelope.getEvent());
};
Binder<Iou.Contract> binder = Sql.<Iou.Contract>binder(
"insert into "
+ projectionTable.getName()
+ "(contract_id, event_id, amount, currency) "
+ "values (:contract_id, :event_id, :amount, :currency)")
.bind("contract_id", iou -> iou.id.contractId, Bind.String())
.bind("event_id", iou -> null, Bind.String())
.bind("amount", iou -> iou.data.amount, Bind.BigDecimal())
.bind("currency", iou -> iou.data.currency, Bind.String());
BatchRows<Iou.Contract, JdbcAction> batchRows =
UpdateMany.create(binder);
var control =
projector.projectRows(
batchSource,
contracts,
batchRows,
mkRow
);
The ``Project`` function just returns the ``Iou.Contract`` since we can use this directly for our insert statement.
Next we use a ``Binder`` to bind the ``Iou.Contract`` to the insert statement.
The ``UpdateMany.create`` creates a ``BatchRow``\s function that transforms a ``List`` of rows, in this case ``Iou.Contract``\s, into a single ``JdbcAction``.
``projectRows`` starts the projection process, converting created ``Iou.Contract``\s into rows in the ``ious`` table.
Configuration
*************
The Custom Views library uses the `Lightbend config library <https://github.com/lightbend/config>`_ for configuration.
The library is packaged with a ``reference.conf`` file which specifies default settings. The next sections describe the default confguration settings.
You can override the configuration by using an ``application.conf`` file, see `using the Lightbend config library <https://github.com/lightbend/config#using-the-library>`_ for more details.
Database migration with Flyway
==============================
`Flyway <https://flywaydb.org/documentation/>`_ is used for database migration. Resources to create and migrate the database objects
that the library needs internally are provided, for instance for the `projection` table that is used to persist `Projection`\s.
The internal SQL scripts are provided in the jar at `/db/migration/projection`.
The `reference.conf` file configures this by default, shown below:
.. code-block:: none
projection {
# The name of the projection table which keeps track of all projections by projection-id
projection-table-name = "projection"
# database migration configuration
flyway {
# location of flyway migration schemas for internal bookkeeping (the projection-table).
internal-locations = ["db/migration/projection"]
# Override locations to provide your own flyway scripts.
locations = []
# If set to true, database migration is executed automatically.
migrate-on-start = true
}
}
The `projection` table is created automatically when a projection process is started with the ``project``, ``projectRows``, or ``projectEvents`` method on ``Projector``.
Provide additional flyway locations with the `projection.flyway.locations` configuration parameter and bundle your own resources as explained
`in the Flyway documentation <https://flywaydb.org/documentation/concepts/migrations#discovery>`_. This makes it possible to create and migrate
database tables and other database objects required for your projections automatically when a projection is (re-)started.
If you do not want to use Flyway database migration, set `projection.flyway.migrate-on-start` to false. In that case you have to create the `projection` table yourself as well.
Batcher configuration
=====================
A ``Batch`` consists of 1 to many events, and optionally contains a marker that indicates that a transaction has been committed on the ledger.
Both the ``batch-size`` and the ``batch-interval`` are configured in the reference.conf:
.. code-block:: none
projection {
batch-size = 10000
batch-interval = 1 second
}
Dispatcher configuration for blocking operation
===============================================
A default dedicated dispatcher for blocking operations (e.g. db operation) is configured in reference.conf:
.. code-block:: none
projection {
blocking-io-dispatcher {
type = Dispatcher
executor = "thread-pool-executor"
thread-pool-executor {
fixed-pool-size = 16
}
throughput = 1
}
}
Ledger API Authorization
========================
The client must provide an access token when authorization is required by the Ledger.
For details of ledger authorization, please refer to `Ledger Authorization documentation <https://docs.daml.com/app-dev/authorization.html>`_.
Provide access token to custom-view library
-------------------------------------------
Applications can provide an access token when setting up the client. The example below shows how to set `LedgerCallCredentials` on the `GrpcClientSettings`.
.. code-block:: java
var grpcClientSettings = GrpcClientSettings
.connectToServiceAt("localhost", 6865, system)
.withCallCredentials(new LedgerCallCredentials(accessToken));
var source = BatchSource.events(grpcClientSettings);
var control = projector.project(source, events, f);
Provide a newly retrieved access token when the existing one has expired
------------------------------------------------------------------------
When an access token is expired, an application can retrieve a new access token with the stored refresh token.
For details on the refresh token, please refer to `Ledger auth-middleware documentation <https://docs.daml.com/tools/auth-middleware/index.html#refresh-access-token>`_.
With the new access token, an application can cancel the running projection and re-create a new one using the new token.
.. code-block:: java
control.cancel().thenApply(done -> {
var sourceWithNewToken = BatchSource.events(
grpcClientSettings.withCallCredentials(new LedgerCallCredentials(newAccessToken))
);
return projector.project(sourceWithNewToken, events, f);
});

View File

@ -1,273 +0,0 @@
.. Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
.. SPDX-License-Identifier: Apache-2.0
How Daml Types are Translated to Daml-LF
########################################
This page shows how types in Daml are translated into Daml-LF. It should help you understand and predict the generated client interfaces, which is useful when you're building a Daml-based application that uses the Ledger API or client bindings in other languages.
For an introduction to Daml-LF, see :ref:`daml-lf-intro`.
Primitive Types
***************
:ref:`Built-in data types <daml-ref-built-in-types>` in Daml have straightforward mappings to Daml-LF.
This section only covers the serializable types, as these are what client applications can interact with via the generated Daml-LF. (Serializable types are ones whose values can exist on the ledger. Function types, ``Update`` and ``Scenario`` types and any types built up from these are excluded, and there are several other restrictions.)
Most built-in types have the same name in Daml-LF as in Daml. These are the exact mappings:
.. list-table::
:widths: 10 15
:header-rows: 1
* - Daml primitive type
- Daml-LF primitive type
* - ``Int``
- ``Int64``
* - ``Time``
- ``Timestamp``
* - ``()``
- ``Unit``
* - ``[]``
- ``List``
* - ``Decimal``
- ``Decimal``
* - ``Text``
- ``Text``
* - ``Date``
- ``Date``
* - ``Party``
- ``Party``
* - ``Optional``
- ``Optional``
* - ``ContractId``
- ``ContractId``
Be aware that only the Daml primitive types exported by the :ref:`Prelude <module-prelude-72703>` module map to the Daml-LF primitive types above. That means that, if you define your own type named ``Party``, it will not translate to the Daml-LF primitive ``Party``.
Tuple Types
***********
Daml tuple type constructors take types ``T1, T2, …, TN`` to the type ``(T1, T2, …, TN)``. These are exposed in the Daml surface language through the :ref:`Prelude <module-prelude-72703>` module.
The equivalent Daml-LF type constructors are ``daml-prim:DA.Types:TupleN``, for each particular N (where 2 <= N <= 20). This qualified name refers to the package name (``ghc-prim``) and the module name (``GHC.Tuple``).
For example: the Daml pair type ``(Int, Text)`` is translated to ``daml-prim:DA.Types:Tuple2 Int64 Text``.
Data Types
**********
Daml-LF has three kinds of data declarations:
- **Record** types, which define a collection of data
- **Variant** or **sum** types, which define a number of alternatives
- **Enum**, which defines simplified **sum** types without type parameters nor argument.
:ref:`Data type declarations in Daml <daml-ref-data-constructors>` (starting with the ``data`` keyword) are translated to record, variant or enum types. Its sometimes not obvious what they will be translated to, so this section lists many examples of data types in Daml and their translations in Daml-LF.
.. In the tables below, the left column uses Daml 1.2 syntax and the right column uses the notation from the `Daml-LF specification <https://github.com/digital-asset/daml/blob/main/daml-lf/spec/daml-lf-1.rst>`_.
Record Declarations
===================
This section uses the syntax for Daml :ref:`records <daml-ref-record-types>` with curly braces.
.. list-table::
:widths: 10 15
:header-rows: 1
* - Daml declaration
- Daml-LF translation
* - ``data Foo = Foo { foo1: Int; foo2: Text }``
- ``record Foo ↦ { foo1: Int64; foo2: Text }``
* - ``data Foo = Bar { bar1: Int; bar2: Text }``
- ``record Foo ↦ { bar1: Int64; bar2: Text }``
* - ``data Foo = Foo { foo: Int }``
- ``record Foo ↦ { foo: Int64 }``
* - ``data Foo = Bar { foo: Int }``
- ``record Foo ↦ { foo: Int64 }``
* - ``data Foo = Foo {}``
- ``record Foo ↦ {}``
* - ``data Foo = Bar {}``
- ``record Foo ↦ {}``
Variant Declarations
====================
.. list-table::
:widths: 10 15
:header-rows: 1
* - Daml declaration
- Daml-LF translation
* - ``data Foo = Bar Int | Baz Text``
- ``variant Foo ↦ Bar Int64 | Baz Text``
* - ``data Foo a = Bar a | Baz Text``
- ``variant Foo a ↦ Bar a | Baz Text``
* - ``data Foo = Bar Unit | Baz Text``
- ``variant Foo ↦ Bar Unit | Baz Text``
* - ``data Foo = Bar Unit | Baz``
- ``variant Foo ↦ Bar Unit | Baz Unit``
* - ``data Foo a = Bar | Baz``
- ``variant Foo a ↦ Bar Unit | Baz Unit``
* - ``data Foo = Foo Int``
- ``variant Foo ↦ Foo Int64``
* - ``data Foo = Bar Int``
- ``variant Foo ↦ Bar Int64``
* - ``data Foo = Foo ()``
- ``variant Foo ↦ Foo Unit``
* - ``data Foo = Bar ()``
- ``variant Foo ↦ Bar Unit``
* - ``data Foo = Bar { bar: Int } | Baz Text``
- ``variant Foo ↦ Bar Foo.Bar | Baz Text``, ``record Foo.Bar ↦ { bar: Int64 }``
* - ``data Foo = Foo { foo: Int } | Baz Text``
- ``variant Foo ↦ Foo Foo.Foo | Baz Text``, ``record Foo.Foo ↦ { foo: Int64 }``
* - ``data Foo = Bar { bar1: Int; bar2: Decimal } | Baz Text``
- ``variant Foo ↦ Bar Foo.Bar | Baz Text``, ``record Foo.Bar ↦ { bar1: Int64; bar2: Decimal }``
* - ``data Foo = Bar { bar1: Int; bar2: Decimal } | Baz { baz1: Text; baz2: Date }``
- ``data Foo ↦ Bar Foo.Bar | Baz Foo.Baz``, ``record Foo.Bar ↦ { bar1: Int64; bar2: Decimal }``, ``record Foo.Baz ↦ { baz1: Text; baz2: Date }``
Enum Declarations
=================
.. list-table::
:widths: 10 15
:header-rows: 1
* - Daml declaration
- Daml-LF declaration
* - ``data Foo = Bar | Baz``
- ``enum Foo ↦ Bar | Baz``
* - ``data Color = Red | Green | Blue``
- ``enum Color ↦ Red | Green | Blue``
Banned Declarations
===================
There are two gotchas to be aware of: things you might expect to be able to do in Daml that you can't because of Daml-LF.
The first: a single constructor data type must be made unambiguous as to whether it is a record or a variant type. Concretely, the data type declaration ``data Foo = Foo`` causes a compile-time error, because it is unclear whether it is declaring a record or a variant type.
To fix this, you must make the distinction explicitly. Write ``data Foo = Foo {}`` to declare a record type with no fields, or ``data Foo = Foo ()`` for a variant with a single constructor taking unit argument.
The second gotcha is that a constructor in a data type declaration can have at most one unlabelled argument type. This restriction is so that we can provide a straight-forward encoding of Daml-LF types in a variety of client languages.
.. list-table::
:widths: 10 15
:header-rows: 1
* - Banned declaration
- Workaround
* - ``data Foo = Foo``
- ``data Foo = Foo {}`` to produce ``record Foo ↦ {}`` OR ``data Foo = Foo ()`` to produce ``variant Foo ↦ Foo Unit``
* - ``data Foo = Bar``
- ``data Foo = Bar {} to produce record Foo ↦ {}`` OR ``data Foo = Bar () to produce variant Foo ↦ Bar Unit``
* - ``data Foo = Foo Int Text``
- Name constructor arguments using a record declaration, for example ``data Foo = Foo { x: Int; y: Text }``
* - ``data Foo = Bar Int Text``
- Name constructor arguments using a record declaration, for example ``data Foo = Bar { x: Int; y: Text }``
* - ``data Foo = Bar | Baz Int Text``
- Name arguments to the Baz constructor, for example ``data Foo = Bar | Baz { x: Int; y: Text }``
Type Synonyms
*************
:ref:`Type synonyms <daml-ref-type-synonyms>` (starting with the ``type`` keyword) are eliminated during conversion to Daml-LF. The body of the type synonym is inlined for all occurrences of the type synonym name.
For example, consider the following Daml type declarations.
.. literalinclude:: code-snippets/LfTranslation.daml
:language: daml
:start-after: -- start code snippet: type synonyms
:end-before: -- end code snippet: type synonyms
The ``Username`` type is eliminated in the Daml-LF translation, as follows:
.. code-block:: none
record User ↦ { name: Text }
Template Types
**************
A :ref:`template declaration <daml-ref-template-name>` in Daml results in one or more data type declarations behind the scenes. These data types, detailed in this section, are not written explicitly in the Daml program but are created by the compiler.
They are translated to Daml-LF using the same rules as for record declarations above.
These declarations are all at the top level of the module in which the template is defined.
Template Data Types
===================
Every contract template defines a record type for the parameters of the contract. For example, the template declaration:
.. literalinclude:: code-snippets/LfTranslation.daml
:language: daml
:start-after: -- start code snippet: template data types
:end-before: -- end code snippet: template data types
results in this record declaration:
.. literalinclude:: code-snippets/LfResults.daml
:language: daml
:start-after: -- start snippet: data from template
:end-before: -- end snippet: data from template
This translates to the Daml-LF record declaration:
.. code-block:: none
record Iou ↦ { issuer: Party; owner: Party; currency: Text; amount: Decimal }
Choice Data Types
=================
Every choice within a contract template results in a record type for the parameters of that choice. For example, lets suppose the earlier ``Iou`` template has the following choices:
.. literalinclude:: code-snippets/LfTranslation.daml
:language: daml
:start-after: -- start code snippet: choice data types
:end-before: -- end code snippet: choice data types
This results in these two record types:
.. literalinclude:: code-snippets/LfResults.daml
:language: daml
:start-after: -- start snippet: data from choices
:end-before: -- end snippet: data from choices
Whether the choice is consuming or nonconsuming is irrelevant to the data type declaration. The data type is a record even if there are no fields.
These translate to the Daml-LF record declarations:
.. code-block:: none
record DoNothing ↦ {}
record Transfer ↦ { newOwner: Party }
Names with Special Characters
*****************************
All names in Daml—of types, templates, choices, fields, and variant data constructors—are translated to the more restrictive rules of Daml-LF. ASCII letters, digits, and ``_`` underscore are unchanged in Daml-LF; all other characters must be mangled in some way, as follows:
- ``$`` changes to ``$$``,
- Unicode codepoints less than 65536 translate to ``$uABCD``, where ``ABCD`` are exactly four (zero-padded) hexadecimal digits of the codepoint in question, using only lowercase ``a-f``, and
- Unicode codepoints greater translate to ``$UABCD1234``, where ``ABCD1234`` are exactly eight (zero-padded) hexadecimal digits of the codepoint in question, with the same ``a-f`` rule.
.. list-table::
:widths: 10 15
:header-rows: 1
* - Daml name
- Daml-LF identifier
* - ``Foo_bar``
- ``Foo_bar``
* - ``baz'``
- ``baz$u0027``
* - ``:+:``
- ``$u003a$u002b$u003a``
* - ``naïveté``
- ``na$u00efvet$u00e9``
* - ``:🙂:``
- ``$u003a$U0001f642$u003a``

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 5.1 KiB

View File

@ -1,115 +0,0 @@
.. Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
.. SPDX-License-Identifier: Apache-2.0
How Daml Types are Translated to Protobuf
#########################################
This page gives an overview and reference on how Daml types and contracts are represented by the Ledger API as protobuf messages, most notably:
- in the stream of transactions from the :ref:`com.daml.ledger.api.v1.transactionservice`
- as payload for :ref:`com.daml.ledger.api.v1.createcommand` and :ref:`com.daml.ledger.api.v1.exercisecommand` sent to :ref:`com.daml.ledger.api.v1.commandsubmissionservice` and :ref:`com.daml.ledger.api.v1.commandservice`.
The Daml code in the examples below is written in Daml *1.1*.
Notation
********
The notation used on this page for the protobuf messages is the same as you get if you invoke ``protoc --decode=Foo < some_payload.bin``. To illustrate the notation, here is a simple definition of the messages ``Foo`` and ``Bar``:
.. literalinclude:: ../code-snippets/notation.proto
:language: protobuf
:start-after: // start snippet
:end-before: // end snippet
A particular value of ``Foo`` is then represented by the Ledger API in this way:
.. literalinclude:: ../code-snippets/notation.payload
The name of messages is added as a comment after the opening curly brace.
Records and Primitive Types
***************************
Records or product types are translated to :ref:`com.daml.ledger.api.v1.record`. Here's an example Daml record type that contains a field for each primitive type:
.. literalinclude:: ../code-snippets/Types.daml
:language: daml
:start-after: -- PRODUCT_TYPE_DEF_BEGIN
:end-before: -- PRODUCT_TYPE_DEF_END
And here's an example of creating a value of type `MyProductType`:
.. literalinclude:: ../code-snippets/Types.daml
:language: daml
:start-after: -- PRODUCT_TYPE_CREATE_BEGIN
:end-before: -- PRODUCT_TYPE_CREATE_END
For this data, the respective data on the Ledger API is shown below. Note that this value would be enclosed by a particular contract containing a field of type `MyProductType`. See `Contract templates`_ for the translation of Daml contracts to the representation by the Ledger API.
.. literalinclude:: ../code-snippets/records.payload
Variants
********
Variants or sum types are types with multiple constructors. This example defines a simple variant type with two constructors:
.. literalinclude:: ../code-snippets/Types.daml
:language: daml
:start-after: -- SUM_TYPE_DEF_BEGIN
:end-before: -- SUM_TYPE_DEF_END
The constructor ``MyConstructor1`` takes a single parameter of type ``Integer``, whereas the constructor ``MyConstructor2`` takes a tuple with two fields as parameter. The snippet below shows how you can create values with either of the constructors.
.. literalinclude:: ../code-snippets/Types.daml
:language: daml
:start-after: -- SUM_TYPE_CREATE_BEGIN
:end-before: -- SUM_TYPE_CREATE_END
Similar to records, variants are also enclosed by a contract, a record, or another variant.
The snippets below shows the value of ``mySum1`` and ``mySum2`` respectively as they would be transmitted on the Ledger API within a contract.
.. literalinclude:: ../code-snippets/MySumType.payload
:lines: 1-12
:caption: mySum1
.. literalinclude:: ../code-snippets/MySumType.payload
:lines: 14-38
:caption: mySum2
Contract Templates
******************
Contract templates are represented as records with the same identifier as the template.
This first example template below contains only the signatory party and a simple choice to exercise:
.. literalinclude:: ../code-snippets/Templates.daml
:language: daml
:start-after: -- BEGIN_SIMPLE_TEMPLATE
:end-before: -- END_SIMPLE_TEMPLATE
Create a Contract
=================
Creating contracts is done by sending a :ref:`com.daml.ledger.api.v1.createcommand` to the :ref:`com.daml.ledger.api.v1.commandsubmissionservice` or the :ref:`com.daml.ledger.api.v1.commandservice`. The message to create a `MySimpleTemplate` contract with *Alice* being the owner is shown below:
.. literalinclude:: ../code-snippets/CreateMySimpleTemplate.payload
Receive a Contract
==================
Contracts are received from the :ref:`com.daml.ledger.api.v1.transactionservice` in the form of a :ref:`com.daml.ledger.api.v1.createdevent`. The data contained in the event corresponds to the data that was used to create the contract.
.. literalinclude:: ../code-snippets/CreatedEventMySimpleTemplate.payload
Exercise a Choice
=================
A choice is exercised by sending an :ref:`com.daml.ledger.api.v1.exercisecommand`. Taking the same contract template again, exercising the choice ``MyChoice`` would result in a command similar to the following:
.. literalinclude:: ../code-snippets/ExerciseMySimpleTemplate.payload
If the template specifies a key, the :ref:`com.daml.ledger.api.v1.exercisebykeycommand` can be used. It works in a similar way as :ref:`com.daml.ledger.api.v1.exercisecommand`, but instead of specifying the contract identifier you have to provide its key. The example above could be rewritten as follows:
.. literalinclude:: ../code-snippets/ExerciseByKeyMySimpleTemplate.payload

View File

@ -1,250 +0,0 @@
.. Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
.. SPDX-License-Identifier: Apache-2.0
.. Set max depth of the local table contents (visible in the right hand sidebar in the rendered HTML)
.. See https://www.sphinx-doc.org/en/master/usage/restructuredtext/field-lists.html?highlight=tocdepth
:tocdepth: 2
Error Codes
###########
Overview
********
.. _gRPC status codes: https://grpc.github.io/grpc/core/md_doc_statuscodes.html
.. _gRPC status code: https://grpc.github.io/grpc/core/md_doc_statuscodes.html
.. _rich gRPC error model: https://cloud.google.com/apis/design/errors#error_details
.. _standard gRPC description: https://grpc.github.io/grpc-java/javadoc/io/grpc/Status.html#getDescription--
The majority of the errors are a result of some request processing.
They are logged and returned to the user as a failed gRPC response
containing the status code, an optional status message and optional metadata.
This approach remains unchanged in principle while we aim at
enhancing it by providing:
- improved consistency of the returned errors across API endpoints,
- richer error payload format with clearly distinguished machine readable parts to facilitate
automated error handling strategies,
- complete inventory of all error codes with an explanation, suggested resolution and
other useful information.
The goal is to enable users, developers and operators to act on the encountered
errors in a self-service manner, either in an automated-way or manually.
Glossary
********
Error
Represents an occurrence of a failure.
Consists of:
- an `error code id`,
- a `gRPC status code`_ (determined by its error category),
- an `error category`,
- a `correlation id`,
- a human readable message,
- and optional additional metadata.
You can think of it as an
instantiation of an error code.
Error code
Represents a class of failures.
Identified by its error code id (we may use `error code` and `error code id` interchangeably in this document).
Belongs to a single error category.
Error category
A broad categorization of error codes that you can base your error handling strategies on.
Map to exactly one `gRPC status code`_.
We recommended to deal with errors based on their error category.
However, if error category itself is too generic
you can act on particular error codes.
Correlation id
A value whose purpose is to allow the user to clearly identify the request,
such that the operator can lookup any log information associated with this error.
We use request's submission id for correlation id.
Anatomy of an Error
*******************
Errors returned to users contain a `gRPC status code`_, a description and additional machine readable information
represented in the `rich gRPC error model`_.
Error Description
=================
We use the `standard gRPC description`_ that additionally adheres to our custom message format:
.. code-block:: java
<ERROR_CODE_ID>(<CATEGORY_ID>,<CORRELATION_ID_PREFIX>):<HUMAN_READABLE_MESSAGE>
The constituent parts are:
- ``<ERROR_CODE_ID>`` - a unique non empty string containing at most 63 characters:
upper-cased letters, underscores or digits.
Identifies corresponding error code id.
- ``<CATEGORY_ID>`` - a small integer identifying the corresponding error category.
- ``<CORRELATION_ID_PREFIX>`` - a string aimed at identifying originating request.
Absence of one is indicated by value ``0``.
If present it is an 8 character long prefix of the corresponding request's submission id.
Full correlation id can be found in error's additional machine readable information
(see `Additional Machine Readable Information`_).
- ``:`` - a colon character that serves as a separator for the machine and human readable parts.
- ``<HUMAN_READABLE_MESSAGE>`` - a message targeted at a human reader.
Should never be parsed by applications, as the description might change
in future releases to improve clarity.
In a concrete example an error description might look like this:
.. code-block:: java
TRANSACTION_NOT_FOUND(11,12345): Transaction not found, or not visible.
Additional Machine Readable Information
=======================================
We use following error details:
- A mandatory ``com.google.rpc.ErrorInfo`` containing `error code id`.
- A mandatory ``com.google.rpc.RequestInfo`` containing (not-truncated) correlation id
(or ``0`` if correlation id is not available).
- An optional ``com.google.rpc.RetryInfo`` containing retry interval with milliseconds resolution.
- An optional ``com.google.rpc.ResourceInfo`` containing information about the resource the failure is based on.
Any request that fails due to some well-defined resource issues (such as contract, contract-key, package, party, template, domain, etc..) will contain these.
Particular resources are implementation specific and vary across ledger implementations.
Many errors will include more information,
but there is no guarantee given that additional information will be preserved across versions.
Prevent Security Leaks in Error Codes
=====================================
For any error that could leak information to an attacker, the system will return an error message via the API that
will not leak any valuable information. The log file will contain the full error message.
Work With Error Codes
*********************
This example shows how a user can extract the relevant error information.
.. code-block:: scala
object SampleClientSide {
import com.google.rpc.ResourceInfo
import com.google.rpc.{ErrorInfo, RequestInfo, RetryInfo}
import io.grpc.StatusRuntimeException
import scala.jdk.CollectionConverters._
def example(): Unit = {
try {
DummmyServer.serviceEndpointDummy()
} catch {
case e: StatusRuntimeException =>
// Converting to a status object.
val status = io.grpc.protobuf.StatusProto.fromThrowable(e)
// Extracting gRPC status code.
assert(status.getCode == io.grpc.Status.Code.ABORTED.value())
assert(status.getCode == 10)
// Extracting error message, both
// machine oriented part: "MY_ERROR_CODE_ID(2,full-cor):",
// and human oriented part: "A user oriented message".
assert(status.getMessage == "MY_ERROR_CODE_ID(2,full-cor): A user oriented message")
// Getting all the details
val rawDetails: Seq[com.google.protobuf.Any] = status.getDetailsList.asScala.toSeq
// Extracting error code id, error category id and optionally additional metadata.
assert {
rawDetails.collectFirst {
case any if any.is(classOf[ErrorInfo]) =>
val v = any.unpack(classOf[ErrorInfo])
assert(v.getReason == "MY_ERROR_CODE_ID")
assert(v.getMetadataMap.asScala.toMap == Map("category" -> "2", "foo" -> "bar"))
}.isDefined
}
// Extracting full correlation id, if present.
assert {
rawDetails.collectFirst {
case any if any.is(classOf[RequestInfo]) =>
val v = any.unpack(classOf[RequestInfo])
assert(v.getRequestId == "full-correlation-id-123456790")
}.isDefined
}
// Extracting retry information if the error is retryable.
assert {
rawDetails.collectFirst {
case any if any.is(classOf[RetryInfo]) =>
val v = any.unpack(classOf[RetryInfo])
assert(v.getRetryDelay.getSeconds == 123, v.getRetryDelay.getSeconds)
assert(v.getRetryDelay.getNanos == 456 * 1000 * 1000, v.getRetryDelay.getNanos)
}.isDefined
}
// Extracting resource if the error pertains to some well defined resource.
assert {
rawDetails.collectFirst {
case any if any.is(classOf[ResourceInfo]) =>
val v = any.unpack(classOf[ResourceInfo])
assert(v.getResourceType == "CONTRACT_ID")
assert(v.getResourceName == "someContractId")
}.isDefined
}
}
}
}
Error Categories Inventory
**************************
The error categories allow to group errors such that application logic can be built
in a sensible way to automatically deal with errors and decide whether to retry
a request or escalate to the operator.
.. This file is generated:
.. include:: error-categories-inventory.rst.inc
Error Codes Inventory
**********************
.. This file is generated:
.. include:: error-codes-inventory.rst.inc

View File

@ -1,97 +0,0 @@
.. Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
.. SPDX-License-Identifier: Apache-2.0
.. _grpc:
Use the Ledger API With gRPC
############################
If you want to write an application for the ledger API in other languages, you'll need to use `gRPC <https://grpc.io>`__ directly.
If you're not familiar with gRPC and protobuf, we strongly recommend following the `gRPC quickstart <https://grpc.io/docs/quickstart/>`__ and `gRPC tutorials <https://grpc.io/docs/tutorials/>`__. This documentation is written assuming you already have an understanding of gRPC.
Get Started
***********
You can get the protobufs from a `GitHub release <protobufs_>`_, or from the ``daml`` repository `here <https://github.com/digital-asset/daml/tree/main/ledger-api/grpc-definitions>`__.
Protobuf Reference Documentation
********************************
For full details of all of the Ledger API services and their RPC methods, see :doc:`/app-dev/grpc/proto-docs`.
Example Project
***************
We have an example project demonstrating the use of the Ledger API with gRPC. To get the example project, ``PingPongGrpc``:
#. Configure your machine to use the example by following the instructions at :ref:`bindings-java-setup-maven`.
#. Clone the `repository from GitHub <https://github.com/digital-asset/ex-java-bindings>`__.
#. Follow the `setup instructions in the README <https://github.com/digital-asset/ex-java-bindings/blob/master/README.rst#setting-up-the-example-projects>`__. Use ``examples.pingpong.grpc.PingPongGrpcMain`` as the main class.
About the Example Project
=========================
The example shows very simply how two parties can interact via a ledger, using two Daml contract templates, ``Ping`` and ``Pong``.
The logic of the application goes like this:
#. The application injects a contract of type ``Ping`` for ``Alice``.
#. ``Alice`` sees this contract and exercises the consuming choice ``RespondPong`` to create a contract of type ``Pong`` for ``Bob``.
#. ``Bob`` sees this contract and exercises the consuming choice ``RespondPing`` to create a contract of type ``Ping`` for ``Alice``.
#. Points 2 and 3 are repeated until the maximum number of contracts defined in the Daml is reached.
The entry point for the Java code is the main class ``src/main/java/examples/pingpong/grpc/PingPongGrpcMain.java``. Look at it to see how connect to and interact with a ledger using gRPC.
The application prints output like this:
.. code-block:: text
Bob is exercising RespondPong on #1:0 in workflow Ping-Alice-1 at count 0
Alice is exercising RespondPing on #344:1 in workflow Ping-Alice-7 at count 9
The first line shows:
- ``Bob`` is exercising the ``RespondPong`` choice on the contract with ID ``#1:0`` for the workflow ``Ping-Alice-1``.
- Count ``0`` means that this is the first choice after the initial ``Ping`` contract.
- The workflow ID ``Ping-Alice-1`` conveys that this is the workflow triggered by the second initial ``Ping`` contract that was created by ``Alice``.
This example subscribes to transactions for a single party, as different parties typically live on different participant nodes. However, if you have multiple parties registered on the same node, or are running an application against the Sandbox, you can subscribe to transactions for multiple parties in a single subscription by putting multiple entries into the ``filters_by_party`` field of the ``TransactionFilter`` message. Subscribing to transactions for an unknown party will result in an error.
Daml Types and Protobuf
***********************
For information on how Daml types and contracts are represented by the Ledger API as protobuf messages, see :doc:`/app-dev/grpc/daml-to-ledger-api`.
Error Handling
**************
The Ledger API generally uses the gRPC standard status codes for signaling response failures to client applications.
For more details on the gRPC standard status codes, see the `gRPC documentation <https://github.com/grpc/grpc/blob/600272c826b48420084c2ff76dfb0d34324ec296/doc/statuscodes.md>`__ .
Generically, on submitted commands the Ledger API responds with the following gRPC status codes:
ABORTED
The platform failed to record the result of the command due to a transient server-side error (e.g. backpressure due to high load) or a time constraint violation. You can retry the submission. In case of a time constraint violation, please refer to the section :ref:`Dealing with time <dealing-with-time>` on how to handle commands with long processing times.
DEADLINE_EXCEEDED (when returned by the Command Service)
The request might not have been processed, as its deadline expired before its completion was signalled.
ALREADY_EXISTS
The command was rejected because the resource (e.g. contract key) already exists or because it was sent within the deduplication period of a previous command with the same change ID.
NOT_FOUND
The command was rejected due to a missing resources (e.g. contract key not found).
INVALID_ARGUMENT
The submission failed because of a client error. The platform will definitely reject resubmissions of the same command.
FAILED_PRECONDITION
The command was rejected due to an interpretation error or due to a consistency error due to races.
OK (when returned by the Command Submission Service)
Assume that the command was accepted and wait for the resulting completion or a timeout from the Command Completion Service.
OK (when returned by the Command Service)
You can be sure that the command was successful.
INTERNAL, UNKNOWN (when returned by the Command Service)
An internal system fault occurred. Contact the participant operator for the resolution.
Aside from the standard gRPC status codes, the failures returned by the Ledger API are enriched with details meant to help the application
or the application developer to handle the error autonomously (e.g. by retrying on a retryable error).
For more details on the rich error details see the :doc:`error-codes`

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 192 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 21 KiB

View File

@ -1,97 +0,0 @@
.. Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
.. SPDX-License-Identifier: Apache-2.0
The Ledger API
##############
.. toctree::
:hidden:
services
parties-users
grpc/index
grpc/error-codes
grpc/proto-docs
grpc/daml-to-ledger-api
daml-lf-translation
bindings-x-lang/index
To write an application around a Daml ledger, you will need to interact with the **Ledger API**.
Every ledger that Daml can run on exposes this same API.
What's in the Ledger API
************************
The Ledger API exposes the following services:
- Submitting commands to the ledger
- Use the :ref:`command submission service <command-submission-service>` to submit commands (create a contract or exercise a choice) to the ledger.
- Use the :ref:`command completion service <command-completion-service>` to track the status of submitted commands.
- Use the :ref:`command service <command-service>` for a convenient service that combines the command submission and completion services.
- Reading from the ledger
- Use the :ref:`transaction service <transaction-service>` to stream committed transactions and the resulting events (choices exercised, and contracts created or archived), and to look up transactions.
- Use the :ref:`active contracts service <active-contract-service>` to quickly bootstrap an application with the currently active contracts. It saves you the work to process the ledger from the beginning to obtain its current state.
- Utility services
- Use the :ref:`party management service <party-service>` to allocate and find information about parties on the Daml ledger.
- Use the :ref:`package service <package-service>` to query the Daml packages deployed to the ledger.
- Use the :ref:`ledger identity service <ledger-identity-service>` to retrieve the Ledger ID of the ledger the application is connected to.
- Use the :ref:`ledger configuration service <ledger-configuration-service>` to retrieve some dynamic properties of the ledger, like maximum deduplication duration for commands.
- Use the :ref:`version service <version-service>` to retrieve information about the Ledger API version.
- Use the :ref:`user management service <user-management-service>` to manage users and their rights.
- Use the :ref:`metering report service <metering-report-service>` to retrieve a participant metering report.
- Testing services (on Sandbox only, *not* for production ledgers)
- Use the :ref:`time service <time-service>` to obtain the time as known by the ledger.
For full information on the services see :doc:`/app-dev/services`.
You may also want to read the :doc:`protobuf documentation </app-dev/grpc/proto-docs>`, which explains how each service is defined as protobuf messages.
How to Access the Ledger API
****************************
You can access the Ledger API via the :doc:`Java Bindings <bindings-java/index>` or the :doc:`Python Bindings </app-dev/bindings-python>` (formerly known as DAZL).
If you don't use a language that targets the JVM or Python, you can use gRPC to generate the code to access the Ledger API in
several supported programming languages. :doc:`Further documentation <bindings-x-lang/index>` provides a few
pointers on how you may want to approach this.
You can also use the :doc:`HTTP JSON API Service </json-api/index>` to tap into the Ledger API.
At its core, this service provides a simplified view of the active contract set and additional primitives to query it and
exposing it using a well-defined JSON-based encoding over a conventional HTTP connection.
A subset of the services mentioned above is also available as part of the HTTP JSON API.
.. _daml-lf-intro:
Daml-LF
*******
When you :ref:`compile Daml source into a .dar file <assistant-manual-building-dars>`, the underlying format is Daml-LF. Daml-LF is similar to Daml, but is stripped down to a core set of features. The relationship between the surface Daml syntax and Daml-LF is loosely similar to that between Java and JVM bytecode.
As a user, you don't need to interact with Daml-LF directly. But internally, it's used for:
- Executing Daml code on the Sandbox or on another platform
- Sending and receiving values via the Ledger API (using a protocol such as gRPC)
- Generating code in other languages for interacting with Daml models (often called “codegen”)
.. Daml-LF content appears in the package service interactions. It is represented as opaque blobs that require a secondary decoding phase.
When You Need to Know About Daml-LF
===================================
Daml-LF is only really relevant when you're dealing with the objects you send to or receive from the ledger. If you use any of the provided language bindings for the Ledger API, you don't need to know about Daml-LF at all, because this generates idiomatic representations of Daml for you.
Otherwise, it can be helpful to know what the types in your Daml code look like at the Daml-LF level, so you know what to expect from the Ledger API.
For example, if you are writing an application that creates some Daml contracts, you need to construct values to pass as parameters to the contract. These values are determined by the Daml-LF types in that contract template. This means you need an idea of how the Daml-LF types correspond to the types in the original Daml model.
For the most part the translation of types from Daml to Daml-LF should not be surprising. :doc:`This page goes through all the cases in detail </app-dev/daml-lf-translation>`.
For the bindings to your specific programming language, you should refer to the language-specific documentation.

View File

@ -1,77 +0,0 @@
.. Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
.. SPDX-License-Identifier: Apache-2.0
Parties and Users On a Daml Ledger
##################################
Identifying parties and users is an important part of building a workable Daml application. Recall these definitions from the :doc:`Getting Started Guide </getting-started/app-architecture>`:
- **Parties** are unique across the entire Daml network. These must be allocated before you can use them to log in, and allocation results in a random-looking (but not actually random) string that identifies the party and is used in your Daml code. Parties are a builtin concept.
- On each participant node you can create **users** with human-readable user ids. Each user can be associated with one or more parties allocated on that participant node, and refers to that party only on that node. Users are a purely local concept, meaning you can never address a user on another node by user id, and you never work with users in your Daml code; party ids are always used for these purposes. Users are also a builtin concept.
This represents a change from earlier versions of Daml, and the implications of these changes are discussed in more depth here.
Parties in SDK 2.0 and Subsequent
*********************************
In Daml 2.0 and later versions, when you allocate a party with a given hint Alice either in the sandbox or on a production ledger you will get back a party id like ``Alice::1220f2fe29866fd6a0009ecc8a64ccdc09f1958bd0f801166baaee469d1251b2eb72``. The prefix before the double colon corresponds to the hint specified on party allocation. If the hint is not specified, it defaults to ``party-${randomUUID}``. The suffix is the fingerprint of the public key that can authorize topology transactions for this party. Keys are generated randomly, so the suffix will look different locally and every time you restart Sandbox, you will get a different party id. This has a few new implications:
- You can no longer allocate a party with a fixed party id. While you have some control over the prefix, we do not recommend that you rely on that to identify parties.
- Party ids are no longer easily understandable by humans. You may want to display something else in your user interfaces.
- Discovering the party ID of other users might get tricky. For example, to follow the user Bob, you cannot assume that their party ID is "Bob".
Party ID Hints and Display Names
********************************
Party id hints and display names which existed in SDK 1.18.0 are still available in SDK 2.0.0. We recommend against relying on display names for new applications, but if you are migrating your existing application, they function exactly as before.
Party id hints still serve a purpose. While we recommend against parsing party ids and extracting the hint, for debugging and during development it can be helpful to see the party id hint at the beginning. Bear in mind that different parties can be allocated to different participants with the same party id hint. The full party ids will be different due to the suffix, but the party id hint would be the same.
The second remaining use for party id hints is to avoid duplicate party allocation. Consider sending a party allocation request that fails due to a network error. The client has no way of knowing whether the party has been allocated. Because a party allocation will be rejected if a party with the given hint already exists, the client can safely send the same request with the same hint, which will either allocate a party if the previous request failed or fail itself. (Note that while this works for Canton, including Sandbox as well as the VMWare blockchain, it is not part of the ledger API specifications, so other ledgers might behave differently.)
Authorization and User Management
*********************************
Daml 2.0 also introduced :ref:`user management <user-management-service>`. User management allows you to create users on a participant that are associated with a primary party and a dynamic set of actAs and readAs claims. Crucially, the user id can be fully controlled when creating a user unlike party ids and are unique on a single participant. You can also use the user id in :ref:`authorization tokens <user-access-tokens>` instead of party tokens that have specific parties in actAs and readAs fields. This means your IAM, which can sometimes be limited in configurability, only has to work with fixed user ids.
However, users are purely local to a given participant. You cannot refer to users or parties associated with a given user on another participant via their user id. You also need admin claims to interact with the user management endpoint for users other than your own. This means that while you can have a user id in place of the primary party of your own user, you cannot generally replace party ids with user ids.
Working with Parties
********************
So how do you handle these unwieldy party ids? The primary rule is to treat them as *opaque identifiers*. In particular, dont parse them, dont make assumptions about their format, and dont try to turn arbitrary strings into party ids. The only way to get a new party id is as the result of a party allocation. Applications should never hardcode specific parties. Instead either accept them as inputs or read them from contract or choice arguments.
To illustrate this, well go over the tools in the SDK and how this affects them:
Daml Script
===========
In Daml script, allocateParty returns the party id that has been allocated. This party can then be used later, for example, in command submissions. When your script should refer to parties that have been allocated outside of the current script, accept those parties as arguments and pass them in via --input-file. Similarly, if your script allocates parties and you want to refer to them outside of the script, either in a later script or somewhere else, you can store them via --output-file. You can also query the party management and user management endpoints and get access to parties that way. Keep in mind though, this requires admin rights on a participant and there are no uniqueness guarantees for display names. That usually makes querying party and user management endpoints usually only an option for development, and we recommend passing parties as arguments where possible instead.
Daml Triggers
=============
To start a trigger via the trigger service, you still have to supply the party ids for the actAs and readAs claims for your trigger. This could, e.g., come from a party allocation in a Daml script that you wrote to a file via Daml Scripts --output-file. Within your trigger, you get access to those parties via getActAs and getReadAs. To refer to other parties, for example when creating a contract, reference them from an existing contract. If there is no contract, consider creating a special configuration template that lists the parties your trigger should interact with outside of your trigger, and query for that template in your trigger to get access to the parties.
Navigator
=========
Navigator presents you with a list of user ids on the participant as login options. Once logged in, you will interact with the ledger as the primary party of that user. Any field that expects a party provides autocompletion, so if you know the prefix (by having chosen the hint), you dont have to remember the suffix. In addition, party ids have been shortened in the Navigator UI so that not all of the id is shown. Clicking on a party identifier will copy the full identifier to the system clipboard, making it easier to use elsewhere.
Java Bindings
=============
When writing an application using the Java bindings, we recommend that you pass parties as arguments. They can either be CLI arguments or JVM properties as used in the :doc: `quickstart-java example <bindings-java/quickstart.html>`.
Create-daml-app and UIs
=======================
Create-daml-app and UIs in general are a bit more complex. First, they often need to interact with an IAM during the login. Second, it is often important to have human-readable names in a UI — to go back to an earlier example, a user wants to follow Bob without typing a very long party id.
Logging in is going to depend on your specific IAM, but there are a few common patterns. In create-daml-app, you log in by typing your user id directly and then interacting with the primary party of that user. In an authorized setup, users might use their email address and a password, and as a result, the IAM will provide them with a token for their user id. The approach to discovering party ids corresponding to human-readable uses can also vary depending on privacy requirements and other constraints. Create-daml-app addresses this by writing alias contracts on the ledger with associate human-readable names with the party id. These alias contracts are shared with everyone via a public party.

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 48 KiB

View File

@ -1,332 +0,0 @@
.. Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
.. SPDX-License-Identifier: Apache-2.0
.. _ledger-api-services:
The Ledger API Services
#######################
The Ledger API is structured as a set of services. The core services are implemented using `gRPC <https://grpc.io/>`__ and `Protobuf <https://developers.google.com/protocol-buffers/>`__, but most applications access this API through the mediation of the language bindings.
This page gives more detail about each of the services in the API, and will be relevant whichever way you're accessing it.
If you want to read low-level detail about each service, see the :doc:`protobuf documentation of the API </app-dev/grpc/proto-docs>`.
Overview
********
The API is structured as two separate data streams:
- A stream of **commands** TO the ledger that allow an application to submit transactions and change state.
- A stream of **transactions** and corresponding **events** FROM the ledger that indicate all state changes that have taken place on the ledger.
Commands are the only way an application can cause the state of the ledger to change, and events are the only mechanism to read those changes.
For an application, the most important consequence of these architectural decisions and implementation is that the Ledger API is asynchronous. This means:
- The outcome of commands is only known some time after they are submitted.
- The application must deal with successful and erroneous command completions separately from command submission.
- Ledger state changes are indicated by events received asynchronously from the command submissions that cause them.
The need to handle these issues is a major determinant of application architecture. Understanding the consequences of the API characteristics is important for a successful application design.
For more help understanding these issues so you can build correct, performant and maintainable applications, read the :doc:`application architecture guide </app-dev/app-arch>`.
Glossary
========
- The ledger is a list of ``transactions``. The transaction service returns these.
- A ``transaction`` is a tree of ``actions``, also called ``events``, which are of type ``create``, ``exercise`` or ``archive``. The transaction service can return the whole tree, or a flattened list.
- A ``submission`` is a proposed transaction, consisting of a list of ``commands``, which correspond to the top-level ``actions`` in that transaction.
- A ``completion`` indicates the success or failure of a ``submission``.
.. _ledger-api-submission-services:
Submit Commands to the Ledger
*****************************
.. _command-submission-service:
Command Submission Service
==========================
Use the **command submission service** to submit commands to the ledger. Commands either create a new contract, or exercise a choice on an existing contract.
A call to the command submission service will return as soon as the ledger server has parsed the command, and has either accepted or rejected it. This does not mean the command has been executed, only that the server has looked at the command and decided that its format is acceptable, or has rejected it for syntactic or content reasons.
The on-ledger effect of the command execution will be reported via the `transaction service <#transaction-service>`__, described below. The completion status of the command is reported via the `command completion service <#command-completion-service>`__. Your application should receive completions, correlate them with command submission, and handle errors and failed commands. Alternatively, you can use the `command service <#command-service>`__, which conveniently wraps the command submission and completion services.
.. _change-id:
Change ID
---------
Each intended ledger change is identified by its **change ID**, consisting of the following three components:
- The submitting parties, i.e., the union of :ref:`party <com.daml.ledger.api.v1.Commands.party>` and :ref:`act_as <com.daml.ledger.api.v1.Commands.act_as>`
- the :ref:`application ID <com.daml.ledger.api.v1.Commands.application_id>`
- The :ref:`command ID <com.daml.ledger.api.v1.Commands.command_id>`
Application-specific IDs
------------------------
The following application-specific IDs, all of which are included in completion events, can be set in commands:
- A :ref:`submission ID <com.daml.ledger.api.v1.Commands.submission_id>`, returned to the submitting application only. It may be used to correlate specific submissions to specific completions.
- A :ref:`command ID <com.daml.ledger.api.v1.Commands.command_id>`, returned to the submitting application only; it can be used to correlate commands to completions.
- A :ref:`workflow ID <com.daml.ledger.api.v1.Commands.workflow_id>`, returned as part of the resulting transaction to all applications receiving it. It can be used to track workflows between parties, consisting of several transactions.
For full details, see :ref:`the proto documentation for the service <com.daml.ledger.api.v1.CommandSubmissionService>`.
.. _command-submission-service-deduplication:
Command Deduplication
---------------------
The command submission service deduplicates submitted commands based on their :ref:`change ID <change-id>`.
- Applications can provide a deduplication period for each command. If this parameter is not set, the default maximum deduplication duration is used.
- A command submission is considered a duplicate submission if the Ledger API server is aware of another command within the deduplication period and with the same :ref:`change ID <change-id>`.
- A command resubmission will generate a rejection until the original submission was rejected (i.e. the command failed and resulted in a rejected transaction) or until the effective deduplication period has elapsed since the completion of the original command, whichever comes first.
- Command deduplication is only *guaranteed* to work if all commands are submitted to the same participant. Ledgers are free to perform additional command deduplication across participants. Consult the respective ledger's manual for more details.
For details on how to use command deduplication, see the :doc:`Command Deduplication Guide <command-deduplication>`.
.. _command-completion-service:
Command Completion Service
==========================
Use the **command completion service** to find out the completion status of commands you have submitted.
Completions contain the :ref:`command ID <com.daml.ledger.api.v1.Commands.command_id>` of the completed command, and the completion status of the command. This status indicates failure or success, and your application should use it to update what it knows about commands in flight, and implement any application-specific error recovery.
For full details, see :ref:`the proto documentation for the service <com.daml.ledger.api.v1.CommandCompletionService>`.
.. _command-service:
Command Service
===============
Use the **command service** when you want to submit a command and wait for it to be executed. This service is similar to the command submission service, but also receives completions and waits until it knows whether or not the submitted command has completed. It returns the completion status of the command execution.
You can use either the command or command submission services to submit commands to effect a ledger change. The command service is useful for simple applications, as it handles a basic form of coordination between command submission and completion, correlating submissions with completions, and returning a success or failure status. This allow simple applications to be completely stateless, and alleviates the need for them to track command submissions.
For full details, see :ref:`the proto documentation for the service <com.daml.ledger.api.v1.CommandService>`.
Read From the Ledger
********************
.. _transaction-service:
Transaction Service
===================
Use the **transaction service** to listen to changes in the ledger state, reported via a stream of transactions.
Transactions detail the changes on the ledger, and contains all the events (create, exercise, archive of contracts) that had an effect in that transaction.
Transactions contain a :ref:`transaction ID <com.daml.ledger.api.v1.Transaction.transaction_id>` (assigned by the server), the :ref:`workflow ID <com.daml.ledger.api.v1.Commands.workflow_id>`, the :ref:`command ID <com.daml.ledger.api.v1.Commands.command_id>`, and the events in the transaction.
Subscribe to the transaction service to read events from an arbitrary point on the ledger. This arbitrary point is specified by the ledger offset. This is important when starting or restarting and application, and to work in conjunction with the `active contracts service <#active-contract-service>`__.
For full details, see :ref:`the proto documentation for the service <com.daml.ledger.api.v1.TransactionService>`.
Transaction and transaction Trees
---------------------------------
``TransactionService`` offers several different subscriptions. The most commonly used is ``GetTransactions``. If you need more details, you can use ``GetTransactionTrees`` instead, which returns transactions as flattened trees, represented as a map of event IDs to events and a list of root event IDs.
.. _verbosity:
Verbosity
---------
The service works in a non-verbose mode by default, which means that some identifiers are omitted:
- Record IDs
- Record field labels
- Variant IDs
You can get these included in requests related to Transactions by setting the ``verbose`` field in message ``GetTransactionsRequest`` or ``GetActiveContractsRequest`` to ``true``.
.. _transaction-filter:
Transaction Filter
------------------
``TransactionService`` offers transaction subscriptions filtered by templates and interfaces using ``GetTransactions`` calls. A :ref:`transaction filter <com.daml.ledger.api.v1.TransactionFilter>` in ``GetTransactionsRequest``. allows:
- filtering by a party, when the :ref:`inclusive <com.daml.ledger.api.v1.Filters.inclusive>` field is left empty
- filtering by a party and a :ref:`template ID <com.daml.ledger.api.v1.InclusiveFilters.template_ids>`
- filtering by a party and an :ref:`interface ID <com.daml.ledger.api.v1.InterfaceFilter.interface_id>`
- exposing an interface view, when the :ref:`include_interface_view <com.daml.ledger.api.v1.InterfaceFilter.include_interface_view>` is set to ``true``
.. _active-contract-service:
Active Contracts Service
========================
Use the **active contracts service** to obtain a party-specific view of all contracts that are active on the ledger at the time of the request.
The active contracts service returns its response as a stream of batches of the created events that would re-create the state being reported (the size of these batches is left to the ledger implementation). As part of the last message, the offset at which the reported active contract set was valid is included. This offset can be used to subscribe to the "flat transactions" stream to keep a consistent view of the active contract set without querying the active contract service further.
This is most important at application start, if the application needs to synchronize its initial state with a known view of the ledger. Without this service, the only way to do this would be to read the Transaction Stream from the beginning of the ledger, which can be prohibitively expensive with a large ledger.
For full details, see :ref:`the proto documentation for the service <com.daml.ledger.api.v1.ActiveContractsService>`.
Verbosity
---------
See :ref:`verbosity` above.
Transaction Filter
------------------
See :ref:`transaction-filter` above.
.. note::
The RPCs exposed as part of the transaction and active contracts services make use of offsets.
An offset is an opaque string of bytes assigned by the participant to each transaction as they are received from the ledger.
Two offsets returned by the same participant are guaranteed to be lexicographically ordered: while interacting with a single participant, the offset of two transactions can be compared to tell which was committed earlier.
The state of a ledger (i.e. the set of active contracts) as exposed by the Ledger API is valid at a specific offset, which is why the last message your application receives when calling the ``ActiveContractsService`` is precisely that offset.
In this way, the client can keep track of the relevant state without needing to invoke the ``ActiveContractsService`` again, by starting to read transactions from the given offset.
Offsets are also useful to perform crash recovery and failover as documented more in depth in the :ref:`application architecture <dealing-with-failures>` page.
You can read more about offsets in the `protobuf documentation of the API <../app-dev/grpc/proto-docs.html#ledgeroffset>`__.
.. event-query-service:
Event Query Service (EXPERIMENTAL)
==================================
Use the **event query service** to obtain a party-specific view of contract events.
Contract events can be queried by contract id or contract key. If the events being queried are not visible to the requesting parties, the service returns an empty structure. This service returns consumed contracts up until they are pruned.
In the case of contract keys, a number of contracts may have used the contract key over time. The latest contract is returned first, with earlier contracts being returned in subsequent calls with a populated continuation token.
.. note::
When querying by contract key, the key value must be structured in the same way as the key returned in the create event.
For full details, see :ref:`the proto documentation for the service <com.daml.ledger.api.v1.EventQueryService>`.
.. _ledger-api-utility-services:
Utility Services
****************
.. _party-service:
Party Management Service
========================
Use the **party management service** to allocate parties on the ledger, update party properties local to the participant and retrieve information about allocated parties.
Parties govern on-ledger access control as per :ref:`Daml's privacy model <da-model-privacy>`
and :ref:`authorization rules <da-ledgers-authorization-rules>`.
Applications and their operators are expected to allocate and use parties to manage on-ledger access control as per their business requirements.
For more information, refer to the pages on :doc:`Identity Management</concepts/identity-and-package-management>` and :ref:`the API reference documentation <com.daml.ledger.api.v1.admin.PartyManagementService>`.
.. _user-management-service:
User Management Service
=======================
Use the **user management service** to manage the set of users on a participant node and
their :ref:`access rights <authorization-claims>` to that node's Ledger API services
and as the integration point for your organization's IAM (Identity and Access Management) framework.
In contrast to parties, users are local to a participant node.
The relation between a participant node's users and Daml parties is best understood by analogy to classical databases:
a participant node's users are analogous to database users while Daml parties are analogous to database roles; and further, the rights granted to a user are analogous to the user's assigned database roles.
For more information, consult the :ref:`the API reference documentation <com.daml.ledger.api.v1.admin.UserManagementService>` for how to list, create, update and delete users and their rights.
See the :ref:`UserManagementFeature descriptor <com.daml.ledger.api.v1.UserManagementFeature>` to learn about limits of the user management service, e.g., the maximum number of rights per user.
The feature descriptor can be retrieved using the :ref:`Version service <version-service>`.
With user management enabled you can use both new user-based and old custom Daml authorization tokens.
Read the :doc:`Authorization documentation </app-dev/authorization>` to understand how Ledger API requests are authorized, and how to use user management to dynamically change an application's rights.
User management is available in Canton-enabled drivers and not yet available in the Daml for VMware Blockchain driver.
.. _package-service:
Package Service
===============
Use the **package service** to obtain information about Daml packages available on the ledger.
This is useful for obtaining type and metadata information that allow you to interpret event data in a more useful way.
For full details, see :ref:`the proto documentation for the service <com.daml.ledger.api.v1.PackageService>`.
.. _ledger-identity-service:
Ledger Identity Service (DEPRECATED)
=====================================
Use the **ledger identity service** to get the identity string of the ledger that your application is connected to.
Including identity string is optional for all Ledger API requests.
If you include it, commands with an incorrect identity string will be rejected.
For full details, see :ref:`the proto documentation for the service <com.daml.ledger.api.v1.LedgerIdentityService>`.
.. _ledger-configuration-service:
Ledger Configuration Service
============================
Use the **ledger configuration service** to subscribe to changes in ledger configuration.
This configuration includes the maximum command deduplication period (see `Command Deduplication <#command-submission-service-deduplication>`__ for details).
For full details, see :ref:`the proto documentation for the service <com.daml.ledger.api.v1.LedgerConfigurationService>`.
.. _version-service:
Version Service
===============
Use the **version service** to retrieve information about the Ledger API version and what optional features are supported by the ledger server.
For full details, see :ref:`the proto documentation for the service <com.daml.ledger.api.v1.VersionService>`.
.. _pruning-service:
Pruning Service
===============
Use the **pruning service** to prune archived contracts and transactions before or at a given offset.
For full details, see :ref:`the proto documentation for the service <com.daml.ledger.api.v1.admin.ParticipantPruningService>`.
.. _metering-report-service:
Metering Report Service
=======================
Use the **metering report service** to retrieve a participant metering report.
For full details, see :ref:`the proto documentation for the service <com.daml.ledger.api.v1.admin.MeteringReportService>`.
.. _ledger-api-testing-services:
Testing Services
****************
**These are only for use for testing with the Sandbox, not for on production ledgers.**
.. _time-service:
Time Service
============
Use the **time service** to obtain the time as known by the ledger server.
For full details, see :ref:`the proto documentation for the service <com.daml.ledger.api.v1.testing.TimeService>`.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 98 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 210 KiB

View File

@ -1,11 +0,0 @@
.. Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
.. SPDX-License-Identifier: Apache-2.0
Building Applications
=====================
The Building Applications section covers the elements that are used to create, extend, and test your Daml full-stack application (including APIs and JavaScript client libraries) and the architectural best practices for bringing those elements together.
As with the Writing Daml section, you can find the Daml code for the example application and features `here <https://github.com/digital-asset/daml/tree/main/docs/source/daml/intro/daml>`_ or download it using the Daml assistant. For example, to load the sources for section 1 into a folder called ``intro1``, run daml new intro1 --template daml-intro-1.
To run the examples, you will first need to `install the Daml SDK <https://docs.daml.com/getting-started/installation.html>`_.

View File

@ -1,555 +0,0 @@
.. Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
.. SPDX-License-Identifier: Apache-2.0
Glossary of concepts
####################
Key Concepts
************
Daml
====
Daml is a platform for building and running sophisticated, multi-party applications. At its core, it contains a smart contract `language <#daml-language>`__ and `tooling <#developer-tools>`__
that defines the schema, semantics, and execution of transactions between parties. Daml includes `Canton <#canton-ledger>`__, a privacy-enabled distributed ledger that is enhanced when deployed
with complementary blockchains.
Daml Language
=============
The Daml language is a purpose-built language for rapid development of composable multi-party applications. It is a modern, ergonomically designed functional language that carefully avoids many
of the pitfalls that hinder multi-party application development in other languages.
Daml Ledger
===========
A Daml ledger is a distributed ledger system running `Daml smart contracts <#contract>`__ according to the :doc:`Daml ledger model </concepts/ledger-model/index>` and exposes the Daml Ledger APIs.
All current implementations of Daml ledgers consist of a Daml driver that utilises an underlying Synchronization Technology to either implement the Daml ledger directly, or to run the Canton protocol.
Canton Ledger
-------------
A Canton ledger is a privacy-enabled Daml ledger implemented using the Canton application, nodes, and protocol.
Canton Protocol
===============
The Canton protocol is the technology which synchronizes `participant nodes <#participant-node>`__ across any Daml-enabled blockchain or database. The Canton protocol not only makes Daml
applications portable between different underlying `synchronization technologies <#synchronization-technology>`__, but also allows applications to transact with each other across them.
.. Synchronization technology. Not 'Environment', 'Infrastructure layer', 'Messaging layer', 'Topology layer', 'Underlying <enter-any-previous-term>'
Synchronization Technology
==========================
The syncronization technology is the database or blockchain that Daml uses for synchronization, messaging, and topology. Daml runs on a range of synchronization technologies, from centralized
databases to fully distributed deployments, and users can employ the technology that best suits their technical and operational needs.
Daml Drivers
============
Daml drivers enable a `ledger <#daml-ledger>`__ to be implemented on top of different `synchronization technologies <#synchronization-technology>`__; a database or distributed ledger technology.
Daml Language Concepts
**********************
Contract
========
**Contracts** are items on a `ledger <#daml-ledger>`__. They are created from blueprints called `templates <#template>`__, and include:
- data (parameters)
- roles (`signatory`_, `observer`_)
- `choices <#choice>`__ (and `controllers <#controller>`__)
Contracts are immutable: once they are created on the ledger, the information in the contract cannot be changed. The only thing that can happen to them is that they can be `archived <#active-contract-archived-contract>`__.
Active Contract, Archived Contract
----------------------------------
When a `contract <#contract>`__ is created on a `ledger <#daml-ledger>`__, it becomes **active**. But that doesn't mean it will remain active forever: it can be **archived**. This can happen:
- if the `signatories <#signatory>`__ of the contract decide to archive it
- if a `consuming choice <#consuming-choice>`__ is exercised on the contract
Once the contract is archived, it is no longer valid, and `choices <#choice>`__ on the contract can no longer be exercised.
Template
========
A **template** is a blueprint for creating a `contract <#contract>`__. This is the Daml code you write.
For full documentation on what can be in a template, see :doc:`/daml/reference/templates`.
Choice
======
A **choice** is something that a `party <#party>`__ can `exercise <#exercise>`__ on a `contract <#contract>`__. You write code in the choice body that specifies what happens when the choice is exercised: for example, it could create a new contract.
Choices give one a way to transform the data in a contract: while the contract itself is immutable, you can write a choice that `archives <#active-contract-archived-contract>`__ the contract and creates a new version of it with updated data.
A choice can only be exercised by its `controller <#controller>`__. Within the choice body, you have the `authorization <#authorization-signing>`__ of all of the contract's `signatories <#signatory>`__.
For full documentation on choices, see :doc:`/daml/reference/choices`.
Consuming Choice
----------------
A **consuming choice** means that, when the choices is exercised, the `contract <#contract>`__ it is on will be `archived <#active-contract-archived-contract>`__. The alternative is a `nonconsuming choice <#nonconsuming-choice>`__.
Consuming choices can be `preconsuming <#preconsuming-choice>`__ or `postconsuming <#postconsuming-choice>`__.
Preconsuming Choice
~~~~~~~~~~~~~~~~~~~
A `choice <#choice>`__ marked **preconsuming** will be `archived <#active-contract-archived-contract>`__ at the start of that `exercise <#exercise>`__.
Postconsuming Choice
~~~~~~~~~~~~~~~~~~~~
A `choice <#choice>`__ marked **postconsuming** will not be `archived <#active-contract-archived-contract>`__ until the end of the `exercise <#exercise>`__ choice body.
Nonconsuming Choice
--------------------
A **nonconsuming choice** does NOT `archive <#active-contract-archived-contract>`__ the `contract <#contract>`__ it is on when `exercised <#exercise>`__. This means the choice can be exercised more than once on the same `contract <#contract>`__.
Disjunction Choice, Flexible Controllers
----------------------------------------
A **disjunction choice** has more than one `controller <#controller>`__.
If a contract uses **flexible controllers**, this means you don't specify the controller of the `choice <#choice>`__ at `creation <#create>`__ time of the `contract <#contract>`__, but at `exercise <#exercise>`__ time.
.. _glossary-party:
Party
=====
A **party** represents a person or legal entity. Parties can `create contracts <#create>`__ and `exercise choices <#exercise>`__.
`Signatories <#signatory>`_, `observers <#observer>`__, `controllers <#controller>`__, and `maintainers <#maintainer>`__ all must be parties, represented by the ``Party`` data type in Daml and determine who may see
contract data.
Parties are hosted on participant nodes and a participant node can host more than one party. A party can be hosted on several participant nodes simultaneously.
.. Something about how they work in the `execution engine`.
Signatory
---------
A **signatory** is a `party <#party>`__ on a `contract <#contract>`__. The signatories MUST consent to the `creation <#create>`__ of the contract by `authorizing <#authorization-signing>`__ it: if they don't, contract creation will fail. Once the contract is created, signatories can see the contracts and all exercises of that contract.
For documentation on signatories, see :doc:`/daml/reference/templates`.
Observer
--------
An **observer** is a `party <#party>`__ on a `contract <#contract>`__. Being an observer allows them to see that instance and all the information about it. They do NOT have to `consent to <#authorization-signing>`__ the creation.
For documentation on observers, see :doc:`/daml/reference/templates`.
Controller
----------
A **controller** is a `party <#party>`__ that is able to `exercise <#exercise>`__ a particular `choice <#choice>`__ on a particular `contract <#contract>`__.
Controllers must be at least an `observer`_, otherwise they can't see the contract to exercise it on. But they don't have to be a `signatory`_. this enables the :doc:`propose-accept pattern </daml/patterns/initaccept>`.
Choice Observer
---------------
A **choice observer** is a `party <#party>`__ on a `choice <#choice>`__. Choice observers are guaranteed to see the choice being exercised and all its consequences with it.
Stakeholder
-----------
**Stakeholder** is not a term used within the Daml language, but the concept refers to the `signatories <#signatory>`__ and `observers <#observer>`__ collectively. That is, it means all of the `parties <#party>`__ that are interested in a `contract <#contract>`__.
Maintainer
----------
The **maintainer** is a `party <#party>`__ that is part of a `contract key <#contract-key>`__. They must always be a `signatory`_ on the `contract <#contract>`__ that they maintain the key for.
It's not possible for keys to be globally unique, because there is no party that will necessarily know about every contract. However, by including a party as part of the key, this ensures that the maintainer *will* know about all of the contracts, and so can guarantee the uniqueness of the keys that they know about.
For documentation on contract keys, see :doc:`/daml/reference/contract-keys`.
Authorization, Signing
======================
The Daml runtime checks that every submitted transaction is **well-authorized**, according to the :doc:`authorization rules of the ledger model </concepts/ledger-model/ledger-integrity>`, which guarantee the integrity of the underlying ledger.
A Daml update is the composition of update actions created with one of the items in the table below. A Daml update is well-authorized when **all** its contained update actions are well-authorized. Each operation has an associated set of parties that need to authorize it:
.. list-table:: Updates and required authorization
:header-rows: 1
* - Update action
- Type
- Authorization
* - ``create``
- ``(Template c) => c -> Update (ContractId c)``
- All signatories of the created contract
* - ``exercise``
- ``ContractId c -> e -> Update r``
- All controllers of the choice
* - ``fetch``
- ``ContractId c -> e -> Update r``
- One of the union of signatories and observers of the fetched contract
* - ``fetchByKey``
- ``k -> Update (ContractId c, c)``
- Same as ``fetch``
* - ``lookupByKey``
- ``k -> Update (Optional (ContractId c))``
- All key maintainers
At runtime, the Daml execution engine computes the required authorizing parties from this mapping. It also computes which parties have given authorization to the update in question. A party is giving authorization to an update in one of two ways:
- It is the signatory of the contract that contains the update action.
- It is element of the controllers executing the choice containing the update action.
Only if all required parties have given their authorization to an update action, the update action is well-authorized and therefore executed. A missing authorization leads to the abortion of the update action and the failure of the containing transaction.
It is noteworthy, that authorizing parties are always determined only from the local context of a choice in question, that is, its controllers and the contract's signatories. Authorization is never inherited from earlier execution contexts.
Standard Library
================
The **Daml standard library** is a set of `Daml` functions, classes and more that make developing with Daml easier.
For documentation, see :doc:`/daml/stdlib/index`.
Agreement
=========
An **agreement** is part of a `contract <#contract>`__. It is text that explains what the contract represents.
It can be used to clarify the legal intent of a contract, but this text isn't evaluated programmatically.
See :doc:`/daml/reference/templates`.
Create
======
A **create** is an update that creates a `contract <#contract>`__ on the `ledger <#daml-ledger>`__.
Contract creation requires `authorization <#authorization-signing>`__ from all its `signatories <#signatory>`__, or the create will fail. For how to get authorization, see the :doc:`propose-accept </daml/patterns/initaccept>` and :doc:`multi-party agreement </daml/patterns/multiparty-agreement>` patterns.
A `party <#party>`__ `submits <#submitting-commands-writing-to-the-ledger>`__ a create `command <#commands>`__.
See :doc:`/daml/reference/updates`.
Exercise
========
An **exercise** is an action that exercises a `choice <#choice>`__ on a `contract <#contract>`__ on the `ledger <#daml-ledger>`__. If the choice is `consuming <#consuming-choice>`__, the exercise will `archive <#active-contract-archived-contract>`__ the contract; if it is `nonconsuming <#nonconsuming-choice>`__, the contract will stay active.
Exercising a choice requires `authorization <#authorization-signing>`__ from all of the `controllers <#controller>`__ of the choice.
A `party <#party>`__ `submits <#submitting-commands-writing-to-the-ledger>`__ an exercise `command <#commands>`__.
See :doc:`/daml/reference/updates`.
Daml Script
===========
**Daml Script** provides a way of testing Daml code during development. You can run Daml Script inside `Daml Studio <#daml-studio>`__, or write them to be executed on `Sandbox <#sandbox>`__ when it starts up.
They're useful for:
- expressing clearly the intended workflow of your `contracts <#contract>`__
- ensuring that parties can exclusively create contracts, observe contracts, and exercise choices that they are meant to
- acting as regression tests to confirm that everything keeps working correctly
In Daml Studio, Daml Script runs in an emulated ledger. You specify a linear sequence of actions that various parties take, and these are evaluated in order, according to the same consistency, authorization, and privacy rules as they would be on a Daml ledger. Daml Studio shows you the resulting `transaction <#transactions>`__ graph, and (if a Daml Script fails) what caused it to fail.
See :ref:`testing-using-script`.
.. Damle, Daml runtime, Daml execution engine
.. ==========================================
.. The **Daml runtime** (sometimes also called the Daml execution engine or Damle)...
Contract Key
============
A **contract key** allows you to uniquely identify a `contract <#contract>`__ of a particular `template <#template>`__, similarly to a primary key in a database table.
A contract key requires a `maintainer <#maintainer>`__: a simple key would be something like a tuple of text and maintainer, like ``(accountId, bank)``.
See :doc:`/daml/reference/contract-keys`.
.. _dar-file-dalf-file:
DAR File, DALF File
===================
A Daml Archive file, known as a ``.dar`` file is the result of compiling Daml code using the `Assistant <#assistant>`__ which can be interpreted using a Daml interpreter.
You upload ``.dar`` files to a `ledger <#daml-ledger>`__ in order to be able to create contracts from the templates in that file.
A ``.dar`` contains multiple ``.dalf`` files. A ``.dalf`` file is the output of a compiled Daml package or library. Its underlying format is `Daml-LF <#daml-lf>`__.
.. Package, module, library
.. ========================
.. TODO ask Robin
Developer Tools
***************
Assistant
=========
**Daml Assistant** is a command-line tool for many tasks related to Daml. Using it, you can create Daml projects, compile Daml projects into `.dar files <#dar-file-dalf-file>`__, launch other developer tools, and download new SDK versions.
See :doc:`/tools/assistant`.
Studio
======
**Daml Studio** is a plugin for Visual Studio Code, and is the IDE for writing Daml code.
See :doc:`/daml/daml-studio`.
Sandbox
=======
**Sandbox** is a lightweight ledger implementation. In its normal mode, you can use it for testing.
You can also run the Sandbox connected to a PostgreSQL back end, which gives you persistence and a more production-like experience.
See :doc:`/tools/sandbox`.
Navigator
=========
**Navigator** is a tool for exploring what's on the ledger. You can use it to see what contracts can be seen by different parties, and `submit commands <#submitting-commands-writing-to-the-ledger>`__ on behalf of those parties.
Navigator GUI
-------------
This is the version of Navigator that runs as a web app.
See :doc:`/tools/navigator/index`.
Building Applications
*********************
Application, Ledger Client, Integration
=======================================
**Application**, **ledger client** and **integration** are all terms for an application that sits on top of the `ledger <#daml-ledger>`__. These usually `read from the ledger <#reading-from-the-ledger>`_, `send commands <#submitting-commands-writing-to-the-ledger>`__ to the ledger, or both.
There's a lot of information available about application development, starting with the :doc:`/app-dev/app-arch` page.
Ledger API
==========
The **Ledger API** is an API that's exposed by any `ledger <#daml-ledger>`__ on a participant node. Users access and manipulate the ledger state through the leger API.
An alternative name for the Ledger API is the **gRPC Ledger API** if disambiguation from other technologies is needed.
See :doc:`/app-dev/ledger-api` page.
It includes the following :doc:`services </app-dev/services>`.
Command Submission Service
--------------------------
Use the **command submission service** to `submit commands <#submitting-commands-writing-to-the-ledger>`__ - either create commands or exercise commands - to the `ledger <#daml-ledger>`__. See :ref:`command-submission-service`.
Command Completion Service
--------------------------
Use the **command completion service** to find out whether or not `commands you have submitted <#submitting-commands-writing-to-the-ledger>`__ have completed, and what their status was. See :ref:`command-completion-service`.
Command Service
---------------
Use the **command service** when you want to `submit a command <#submitting-commands-writing-to-the-ledger>`__ and wait for it to be executed. See :ref:`command-service`.
Transaction Service
-------------------
Use the **transaction service** to listen to changes in the `ledger <#daml-ledger>`__, reported as a stream of `transactions <#transactions>`__. See :ref:`transaction-service`.
Active Contract Service
-----------------------
Use the **active contract service** to obtain a party-specific view of all `contracts <#contract>`__ currently `active <#active-contract-archived-contract>`__ on the `ledger <#daml-ledger>`__. See :ref:`active-contract-service`.
Package Service
---------------
Use the **package service** to obtain information about Daml packages available on the `ledger <#daml-ledger>`__. See :ref:`package-service`.
Ledger Identity Service
-----------------------
Use the **ledger identity service** to get the identity string of the `ledger <#daml-ledger>`__ that your application is connected to. See :ref:`ledger-identity-service`.
Ledger Configuration Service
----------------------------
Use the **ledger configuration service** to subscribe to changes in `ledger <#daml-ledger>`__ configuration. See :ref:`ledger-configuration-service`.
Ledger API Libraries
====================
The following libraries wrap the `ledger API <#ledger-api>`__ for more native experience applications development.
Java Bindings
-------------
An idiomatic Java library for writing `ledger applications <#application-ledger-client-integration>`__. See :doc:`/app-dev/bindings-java/index`.
Python Bindings
---------------
A Python library (formerly known as DAZL) for writing `ledger applications <#application-ledger-client-integration>`__. See :doc:`Python Bindings </app-dev/bindings-python>`.
Reading From the Ledger
=======================
`Applications <#application-ledger-client-integration>`__ get information about the `ledger <#daml-ledger>`__ by **reading** from it. You can't query the ledger, but you can subscribe to the transaction stream to get the events, or the more sophisticated active contract service.
Submitting Commands, Writing To the Ledger
==========================================
`Applications <#application-ledger-client-integration>`__ make changes to the `ledger <#daml-ledger>`__ by **submitting commands**. You can't change it directly: an application submits a command of `transactions <#transactions>`__. The command gets evaluated by the runtime, and will only be accepted if it's valid.
For example, a command might get rejected because the transactions aren't `well-authorized <#authorization-signing>`__; because the contract isn't `active <#active-contract-archived-contract>`__ (perhaps someone else archived it); or for other reasons.
This is echoed in :ref:`Daml script <daml-script>`, where you can mock an application by having parties submit transactions/updates to the ledger. You can use ``submit`` or ``submitMustFail`` to express what should succeed and what shouldn't.
Commands
--------
A **command** is an instruction to add a transaction to the `ledger <#daml-ledger>`__.
.. Events
.. ======
.. TODO.
.. _daml-lf:
Participant Node
================
The participant node is a server that provides users a consistent programmatic access to a ledger through the `Ledger API <#ledger-api>`__. The participant nodes handles transaction signing and
validation, such that users don't have to deal with cryptographic primitives but can trust the participant node that the data they are observing has been properly verified to be correct.
Sub-transaction Privacy
=======================
Sub-transaction privacy is where participants to a transaction only `learn about the subset of the transaction <https://docs.daml.com/concepts/ledger-model/ledger-privacy.html>`__ they are
directly involved in, but not about any other part of the transaction. This applies to both the content of the transaction as well as other involved participants.
Daml-LF
=======
When you compile Daml source code into a `.dar file <#dar-file-dalf-file>`__, the underlying format is **Daml-LF**. Daml-LF is similar to Daml, but is stripped down to a core set of features. The relationship between the surface Daml syntax and Daml-LF is loosely similar to that between Java and JVM bytecode.
As a user, you don't need to interact with Daml-LF directly. But internally, it's used for:
- executing Daml code on the Sandbox or on another platform
- sending and receiving values via the Ledger API (using a protocol such as gRPC)
- generating code in other languages for interacting with Daml models (often called “codegen”)
Composability
=============
Composability is the ability of a participant to extend an existing system with new Daml applications or new topologies unilaterally without requiring cooperation from anyone except the
directly involved participants who wish to be part of the new application functionality.
.. _trust-domain:
Trust Domain
============
A trust domain encompasses a part of the system (in particular, a Daml ledger) operated by a single real-world entity. This subsystem may consist of one or more physical nodes. A single physical machine is always assumed to be controlled by exactly one real-world entity.
Canton Concepts
***************
Domain
======
The domain provides total ordered, guaranteed delivery multi-cast to the participants. This means that participant nodes communicate with each other by sending end-to-end encrypted messages
through the domain.
The `sequencer service <#sequencer>`__ of the domain orders these messages without knowing about the content and ensures that every participant receives the messages in the same order.
The other services of the domain are the `mediator <#mediator>`__ and the `domain identity manager <#domain-identity-manager>`__.
Private Contract Store
======================
Every participant node manages its own private contract store (PCS) which contains only contracts the participant is privy to. There is no global state or global contract store.
Virtual Global Ledger
=====================
While every participant has their own private contract store (PCS), the `Canton protocol <#canton-protocol>`__ guarantees that the contracts which are stored in the PCS are well-authorized
and that any change to the store is justified, authorized and valid. The result is that every participant only possesses a small part of the *virtual global ledger*. All the local
stores together make up that *virtual global ledger* and they are thus synchronized. The Canton protocol guarantees that the virtual ledger provides integrity, privacy,
transparency and auditability. The ledger is logically global, even though physically, it runs on segregated and isolated domains that are not aware of each other.
Mediator
========
The mediator is a service provided by the `domain <#domain>`__ and used by the `Canton protocol <#canton-protocol>`__. The mediator acts as commit coordinator, collecting individual transaction verdicts issued by validating
participants and aggregates them into a single result. The mediator does not learn about the content of the transaction, they only learn about the involved participants.
Sequencer
=========
The sequencer is a service provided by the `domain <#domain>`__, used by the `Canton protocol <#canton-protocol>`__. The sequencer forwards encrypted addressed messages from participants and ensures that every member receives
the messages in the same order. Think about registered and sealed mail delivered according to the postal datestamp.
Domain Identity Manager
=======================
The Domain Identity Manager is a service provided by the `domain <#domain>`__, used by the `Canton protocol <#canton-protocol>`__. Participants join a new domain by registering with the domain identity manager. The domain
identity manager establishes a consistent identity state among all participants. The domain identity manager only forwards identity updates. It can not invent them.
Consensus
=========
The Canton protocol does not use PBFT or any similar consensus algorithm. There is no proof of work or proof of stake involved. Instead, Canton uses a variant of a stakeholder based
two-phase commit protocol. As such, only stakeholders of a transaction are involved in it and need to process it, providing efficiency, privacy and horizontal scalability. Canton based
ledgers are resilient to malicious participants as long as there is at least a single honest participant. A domain integration itself might be using the consensus mechanism of the underlying
platform, but participant nodes will not be involved in that process.
.. Transaction
.. ===========
.. A transaction is composed of a series of actions.
.. Create (trans)action
.. --------------------
.. Exercise (trans)action
.. ----------------------
.. Fetch (trans)action
.. -------------------
.. Commit
.. ======
.. Privacy, visibility
.. ===================
.. Consistency
.. ===========
.. Conformance
.. ===========

View File

@ -1,173 +0,0 @@
.. Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
.. SPDX-License-Identifier: Apache-2.0
.. _identity-package-management:
Identity and Package Management
###############################
Since Daml ledgers enable parties to automate the management of their rights and obligations through smart contract code, they also have to provide party and code management functions.
Hence, this document addresses:
1. Management of parties' digital identifiers in a Daml ledger.
2. Distribution of smart contract code between the parties connected to the same Daml ledger.
The access to this functionality is usually more restricted compared to the other Ledger API services, as they are part of the administrative API.
This document is intended for the users and implementers of this API.
The administrative part of the Ledger API provides both a :ref:`party management service <com.daml.ledger.api.v1.admin.PartyManagementService>` and a :ref:`package service <com.daml.ledger.api.v1.admin.PackageManagementService>`.
Any implementation of the party and package services is guaranteed to accept inputs and provide outputs of the format specified by these services.
However, the services' *behavior* -- the relationship between the inputs and outputs that the various parties observe -- is largely implementation dependent.
The remainder of the document will present:
#. The minimal behavioral guarantees for identity and package services across all ledger implementations. The service users can rely on these guarantees, and the implementers must ensure that they hold.
#. Guidelines for service users, explaining how different ledgers handle the unspecified part of the behavior.
.. _identity-management:
Identity Management
*******************
A Daml ledger may freely define its own format of party and participant node identifiers, with some minor constraints on the identifiers' serialized form.
For example, a ledger may use human-readable strings as identifiers, such as "Alice" or "Alice's Bank".
A different ledger might use public keys as identifiers, or the keys' fingerprints.
The applications should thus not rely on the format of the identifier -- even a software upgrade of a Daml ledger may introduce a new format.
By definition, identifiers identify parties, and are thus unique for a ledger.
They do not, however, have to be unique across different ledgers.
That is, two identical identifiers in two different ledgers do not necessarily identify the same real-world party.
Moreover, a real-world entity can have multiple identifiers (and thus parties) within the same ledger.
Since the identifiers might be difficult to interpret and manage for humans, the ledger may also accompany each identifier with a user-friendly **display name**.
Unlike the identifier, the display name is not guaranteed to be unique, and two different participant nodes might return different display names for the same party identifier.
Furthermore, a display name is in general not guaranteed to have any link to real world identities.
For example, a party with a display name "Attorney of Nigerian Prince" might well be controlled by a real-world entity without a bar exam.
However, particular ledger deployments might make stronger guarantees about this link.
Finally, the association of identifiers to display names may change over time.
For example, a party might change its display name from "Bruce" to "Caitlyn" -- as long as the identifier remains the same, so does the party.
.. _provisioning-ledger-identifiers:
Provisioning Identifiers
========================
The set of parties of any Daml ledger is dynamic: new parties may always be added to the system.
The first step in adding a new party to the ledger is to provision a new identifier for the party.
The Ledger API provides an :ref:`AllocateParty <com.daml.ledger.api.v1.admin.AllocatePartyRequest>` method for this purpose.
The method, if successful, returns an new party identifier.
The ``AllocateParty`` call can take the desired identifier and display name as optional parameters, but these are merely hints and the ledger implementation may completely ignore them.
If the call returns a new identifier, the participant node serving this call is ready to host the party with this identifier.
For some ledgers (Daml for VMware Blockchain in particular), the returned identifier is guaranteed to be **unique** in the ledger; namely, no other call of the ``AllocateParty`` method at this or any other ledger participant may return the same identifier.
On Canton ledgers, the identifier is also unique as long as the participant node is configured correctly (in particular, it does not share its private key with other participant nodes).
After an identifier is returned, the ledger is set up in such a way that the participant node serving the call is allowed to issue commands and receive transactions on behalf of the party.
However, the newly provisioned identifier need not be visible to the other participant nodes.
For example, consider the setup with two participants ``P1`` and ``P2``, where the party ``Alice_123`` is hosted on ``P1``.
Assume that a new party ``Bob_456`` is next successfully allocated on ``P2``.
As long as ``P1`` and ``P2`` are connected to the same Canton domain or Daml ledger, ``Alice_123`` can now submit a command with ``Bob_456`` as an informee.
For diagnostics, the ledger provides a :ref:`ListKnownParties <com.daml.ledger.api.v1.admin.ListKnownPartiesRequest>` method which lists parties known to the participant node.
The parties can be local (i.e., hosted by the participant) or not.
.. _identifiers-and-authentication:
Identifiers and Authorization
=============================
To issue commands or receive transactions on behalf of a newly provisioned party, an application must provide a
proof to the party's hosting participant that they are authorized to represent the party.
Before the newly provisioned party can be used, the application will have to obtain a token for this party.
The issuance of tokens is specific to each ledger and independent of the Ledger API.
The same is true for the policy which the participants use to decide whether to accept a token.
To learn more about Ledger API security model, please read the :doc:`Authorization documentation </app-dev/authorization>`.
.. _identifiers-and-real-world:
Identifiers and the Real World
==============================
The "substrate" on which Daml workflows are built are the real-world obligations of the parties in the workflow.
To give value to these obligations, they must be connected to parties in the real world.
However, the process of linking party identifiers to real-world entities is left to the ledger implementation.
In centralized deployments, one can simplify the process by trusting the operator of the writer node(s) with providing the link to the real world.
For example, if the operator is a stock exchange, it might guarantee that a real-world exchange participant whose legal name is "Bank Inc." is represented by a ledger party with the identifier "Bank Inc.".
Alternatively, it might use a random identifier, but guarantee that the display name is "Bank Inc.".
In general, a ledger might not have such a single store of identities.
The solutions for linking the identifiers to real-world identities could rely on certificate chains, `verifiable credentials <https://www.w3.org/TR/vc-data-model/>`__, or other mechanisms.
The mechanisms can be implemented off-ledger, using Daml workflows (for instance, a "know your customer" workflow), or a combination of these.
.. _package-management:
Package Management
******************
All Daml ledgers implement endpoints that allow for provisioning new Daml code to the ledger.
The vetting process for this code, however, depends on the particular ledger implementation and its configuration.
The remainder of this section describes the endpoints and general principles behind the vetting process.
The details of the process are ledger-dependent.
.. _package-formats-and-identifiers:
Package Formats and Identifiers
===============================
Any code -- i.e., Daml templates -- to be uploaded must compiled down to the :ref:`Daml-LF <daml-lf>` language.
The unit of packaging for Daml-LF is the :ref:`.dalf <dar-file-dalf-file>` file.
Each ``.dalf`` file is uniquely identified by its **package identifier**, which is the hash of its contents.
Templates in a ``.dalf`` file can reference templates from other ``.dalf`` files, i.e., ``.dalf`` files can depend on other ``.dalf`` files.
A :ref:`.dar <dar-file-dalf-file>` file is a simple archive containing multiple ``.dalf`` files, and has no identifier of its own.
The archive provides a convenient way to package ``.dalf`` files together with their dependencies.
The Ledger API supports only ``.dar`` file uploads.
Internally, the ledger implementation need not (and often will not) store the uploaded ``.dar`` files, but only the contained ``.dalf`` files.
.. _package-management-api:
Package Management API
======================
The package management API supports two methods:
- :ref:`UploadDarFile <com.daml.ledger.api.v1.admin.UploadDarFileRequest>` for uploading ``.dar`` files.
The ledger implementation is, however, free to reject any and all packages and return an error.
Furthermore, even if the method call succeeds, the ledger's vetting process might restrict the usability of the template.
For example, assume that Alice successfully uploads a ``.dar`` file to her participant containing a ``NewTemplate`` template.
It may happen that she can now issue commands that create ``NewTemplate`` instances with Bob as a stakeholder, but that all commands that create ``NewTemplate`` instances with Charlie as a stakeholder fail.
- :ref:`ListKnownPackages <com.daml.ledger.api.v1.admin.ListKnownPackagesRequest>` that lists the ``.dalf`` package vetted for usage at the participant node.
Like with the previous method, the usability of the listed templates depends on the ledger's vetting process.
.. _package-management-vetting:
Package Vetting
===============
Using a Daml package entails running its Daml code.
The Daml interpreter ensures that the Daml code cannot interact with the environment of the system on which it is executing.
However, the operators of the ledger infrastructure nodes may still wish to review and vet any Daml code before allowing it to execute.
One reason for this is that the Daml interpreter currently lacks a notion of reproducible resource limits, and executing a Daml contract might result in high memory or CPU usage.
Thus, Daml ledgers generally allow some form of vetting a package before running its code on a node.
Not all nodes in a Daml ledger must vet all packages, as it is possible that some of them will not execute the code.
The exact vetting mechanism is ledger-dependent.
For example, in the :ref:`Daml Sandbox <sandbox-manual>`, the vetting is implicit: uploading a package through the Ledger API already vets the package, since it's assumed that only the system administrator has access to these API facilities.
The vetting process can be manual, where an administrator inspects each package, or it can be automated, for example, by accepting only packages with a digital signature from a trusted package issuer.
In Canton, participant nodes also only need to vet code for the contracts of the parties they host.
As only participants execute contract code, only they need to vet it.
The vetting results may also differ at different participants.
For example, participants ``P1`` and ``P2`` might vet a package containing a ``NewTemplate`` template, whereas ``P3`` might reject it.
In that case, if Alice is hosted at ``P1``, she can create ``NewTemplate`` instances with stakeholder Bob who is hosted at ``P2``, but not with stakeholder Charlie if he's hosted at ``P3``.
.. _package-upgrades:
Package Upgrades
================
The Ledger API does not have any special support for package upgrades.
A new version of an existing package is treated the same as a completely new package, and undergoes the same vetting process.
Upgrades to active contracts can be done by the Daml code of the new package version, by archiving the old contracts and creating new ones.

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 29 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 29 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 74 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 46 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 34 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 27 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 27 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 32 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 13 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 32 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 38 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 23 KiB

View File

@ -1,483 +0,0 @@
.. Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
.. SPDX-License-Identifier: Apache-2.0
.. _interoperable-ledgers:
Daml Ledger Interoperability
############################
Certain Daml ledgers can interoperate with other Daml ledgers.
That is, the contracts created on one ledger can be used and archived in transactions on other ledgers.
Some Participant Nodes can connect to multiple ledgers and provide their parties unified access to those ledgers via the :ref:`Ledger API <ledger-api-services>`.
For example, when an organization initially deploys two workflows to two Daml ledgers, it can later compose those workflows into a larger workflow that spans both ledgers.
Interoperability may limit the visibility a Participant Node has into a party's ledger projection, i.e., its :ref:`local ledger <local-ledger>`, when the party is hosted on multiple Participant Nodes.
These limitations influence what parties can observe via the Ledger API of each Participant Node.
In particular, interoperability affects which events a party observes and their order.
This document explains the visibility limitations due to interoperability and their consequences for the Transaction Service, by :ref:`example <interop-limitation-examples>` and formally by introducing interoperable versions of :ref:`causality graphs <interop-causality-graph>` and :ref:`projections <ledger-aware-projection>`.
The presentation assumes that you are familiar with the following concepts:
* The :ref:`Ledger API <ledger-api-services>`
* The :ref:`Daml Ledger Model <da-ledgers>`
* :ref:`Local ledgers and causality graphs <local-ledger>`
.. note::
Interoperability for Daml ledgers is under active development.
This document describes the vision for interoperability
and gives an idea of how the Ledger API services may change and what guarantees are provided.
The described services and guarantees may change without notice as the interoperability implementation proceeds.
.. _interop-limitation-examples:
Interoperability Examples
*************************
.. _interoperable-topology:
Topology
========
Participant Nodes connect to Daml ledgers and parties access projections of these ledgers via the Ledger API.
The following picture shows such a setup.
.. https://app.lucidchart.com/documents/edit/6b818d37-cf4c-4513-9d31-d68acddf4533
.. figure:: ./images/multiple-domains.svg
:align: center
:name: multiple-ledgers
:alt: A sample topology with two parties, three participant nodes, and two ledgers, described in detail through the remainder of this section.
Example topology with two interoperable ledgers
The components in this diagram are the following:
* There is a set of interoperable **Daml ledgers**: Ledger 1 (green) and Ledger 2 (yellow).
* Each **Participant Node** is connected to a subset of the Daml ledgers.
- Participant Nodes 1 and 3 are connected to Ledger 1 and 2.
- Participant Node 2 is connected to Ledger 1 only.
* Participant Nodes host parties on a subset of the Daml ledgers they are connected to.
A Participant Node provides a party access to the Daml ledgers that it hosts the party on.
- Participant Node 1 hosts Alice on Ledger 1 and 2.
- Participant Node 2 hosts Alice on Ledger 1.
- Participant Node 3 hosts the painter on Ledger 1 and 2.
.. _interoperable-aggregation:
Aggregation at the Participant
==============================
The Participant Node assembles the updates from these ledgers and outputs them via the party's Transaction Service and Active Contract Service.
When a Participant Node hosts a party only on a subset of the interoperable Daml ledgers,
then the transaction and active contract services of the Participant Node are derived only from those ledgers.
For example, in the :ref:`above topology <multiple-ledgers>`, when a transaction creates a contract with stakeholder Alice on Ledger 2,
then `P1`\ 's transaction stream for Alice will emit this transaction and report the contract as active, but Alice's stream at `P2` will not.
.. _enter-leave-event:
Enter and Leave Events
======================
With interoperability, a transaction can use a contract whose creation was recorded on a different ledger.
In the :ref:`above topology <multiple-ledgers>`, e.g., one transaction creates a contract `c1` with stakeholder Alice on Ledger 1 and another archives the contract on Ledger 2.
Then the Participant Node `P2` outputs the **Create** action as a ``CreatedEvent``, but not the **Exercise** in form of an ``ArchiveEvent`` on the transaction service
because Ledger 2 can not notify `P2` as `P2` does not host Alice on Ledger 2.
Conversely, when one transaction creates a contract `c2` with stakeholder Alice on Ledger 2 and another archives the contract on Ledger 1, then `P2` outputs the ``ArchivedEvent``, but not the ``CreatedEvent``.
To keep the transaction stream consistent, `P2` additionally outputs a **Leave** `c1` action on Alice's transaction stream.
This action signals that the Participant Node no longer outputs events concerning this contract;
in particular not when the contract is archived.
The contract is accordingly no longer reported in the active contract service and cannot be used by command submissions.
Conversely, `P2` outputs an **Enter** `c2` action some time before the ``ArchivedEvent`` on the transaction stream.
This action signals that the Participant Node starts outputting events concerning this contract.
The contract is reported in the Active Contract Service and can be used by command submission.
The actions **Enter** and **Leave** are similar to a **Create** and a consuming **Exercise** action, respectively, except that **Enter** and **Leave** may occur several times for the same contract whereas
there should be at most one **Create** action and at most one consuming **Exercise** action for each contract.
These **Enter** and **Leave** events are generated by the underlying interoperability protocol.
This may happen as part of command submission or for other reasons, e.g., load balancing.
It is guaranteed that the **Enter** action precedes contract usage, subject to the trust assumptions of the underlying ledgers and the interoperability protocol.
A contract may enter and leave the visibility of a Participant Node several times.
For example, suppose that the painter submits the following commands and their commits end up on the given ledgers.
#. Create a contract `c` with signatories Alice and the painter on Ledger 2
#. Exercise a non-consuming choice `ch1` on `c` on Ledger 1.
#. Exercise a non-consuming choice `ch2` on `c` on Ledger 2.
#. Exercise a consuming choice `ch3` on `c` on Ledger 1.
Then, the transaction tree stream that `P2` provides for `A` contains five actions involving contract `c`: **Enter**, non-consuming **Exercise**, **Leave**, **Enter**, consuming **Exercise**.
Importantly, `P2` must not omit the **Leave** action and the subsequent **Enter**, even though they seem to cancel out.
This is because their presence indicates that `P2`\ 's event stream for Alice may miss some events in between; in this example, exercising the choice `ch2`.
The flat transaction stream by `P2` omits the non-consuming exercise choices.
It nevertheless contains the three actions **Enter**, **Leave**, **Enter** before the consuming **Exercise**.
This is because the Participant Node cannot know at the **Leave** action that there will be another **Enter** action coming.
In contrast, `P1` need not output the **Enter** and **Leave** actions at all in this example because `P1` hosts Alice on both ledgers.
.. _cross-ledger-transaction:
Cross-ledger Transactions
=========================
With interoperability, a cross-ledger transaction can be committed on several interoperable Daml ledgers simultaneously.
Such a cross-ledger transaction avoids some of the synchronization overhead of **Enter** and **Leave** actions.
When a cross-ledger transaction uses contracts from several Daml ledgers,
stakeholders may witness actions on their contracts that are actually not visible on the Participant Node.
For example, suppose that the :ref:`split paint counteroffer workflow <split-counteroffer-ledger>` from the causality examples is committed as follows:
The actions on `CounterOffer` and `PaintAgree` contracts are committed on Ledger 1.
All actions on `Iou`\ s are committed on Ledger 2, assuming that some Participant Node hosts the Bank on Ledger 2.
The last transaction is a cross-ledger transaction because the archival of the `CounterOffer` and the creation of the `PaintAgree`\ ment commits on Ledger 1 simultaneously with the transfer of Alice's `Iou` to the painter on Ledger 2.
For the last transaction, Participant Node 1 notifies Alice of the transaction tree, the two archivals and the `PaintAgree` creation via the Transaction Service as usual.
Participant Node 2 also output's the whole transaction tree on Alice's transaction tree stream, which contains the consuming **Exercise** of Alice's `Iou`.
However, it has not output the **Create** of Alice's `Iou` because `Iou` actions commit on Ledger 2, on which Participant Node 2 does not host Alice.
So Alice merely *witnesses* the archival even though she is an :ref:`informee <def-informee>` of the exercise.
The **Exercise** action is therefore marked as merely being witnessed on Participant Node 2's transaction tree stream.
In general, an action is marked as **merely being witnessed** when a party is an informee of the action, but the action is not committed on a ledger on which the Participant Node hosts the party.
Unlike **Enter** and **Leave**, such witnessed actions do not affect causality from the participant's point of view and therefore provide weaker ordering guarantees.
Such witnessed actions show up neither in the flat transaction stream nor in the Active Contracts Service.
For example, suppose that the **Create** `PaintAgree` action commits on Ledger 2 instead of Ledger 1, i.e., only the `CounterOffer` actions commit on Ledger 1.
Then, Participant Node 2 marks the **Create** `PaintAgree` action also as merely being witnessed on the transaction tree stream.
Accordingly, it does not report the contract as active nor can Alice use the contract in her submissions via Participant Node 2.
.. _interop-causality-graph:
Multi-ledger Causality Graphs
*****************************
This section generalizes :ref:`causality graphs <causality-graph>` to the interoperability setting.
Every active Daml contract resides on at most one Daml ledger.
Any use of a contract must be committed on the Daml ledger where it resides.
Initially, when the contract is created, it takes up residence on the Daml ledger on which the **Create** action is committed.
To use contracts residing on different Daml ledgers, cross-ledger transactions are committed on several Daml ledgers.
However, cross-ledger transactions incur overheads and if a contract is frequently used on a Daml ledger that is not its residence, the interoperability protocol can migrate the contract to the other Daml ledger.
The process of the contract giving up residence on the origin Daml ledger and taking up residence on the target Daml ledger is called a **contract transfer**.
The **Enter** and **Leave** events on the transaction stream originate from such contract transfers, as will be explained below.
Moreover, contract transfers are synchronization points between the origin and target Daml ledgers and therefore affect the ordering guarantees.
We therefore generalize causality graphs for interoperability.
Definition »Transfer action«
A **transfer action** on a contract `c` is written **Transfer** `c`.
The **informees** of the transfer actions are the stakeholders of `c`.
In the following, the term *action* refers to transaction actions (**Create**, **Exercise**, **Fetch**, and **NoSuchKey**) as well as transfer actions.
In particular, a transfer action on a contract `c` is an action on `c`.
Transfer actions do not appear in transactions though.
So a transaction action cannot have a transfer action as a consequence and transfer actions do not have consequences at all.
Definition »Multi-Ledger causality graph«
A **multi-ledger causality graph** `G` for a set `Y` of Daml ledgers is a finite, transitively closed, directed acyclic graph.
The vertices are either transactions or transfer actions.
Every action is possibly annotated with an **incoming ledger** and an **outgoing ledger** from `Y` according to the following table:
+---------------+-----------------+-----------------+
| Action | incoming ledger | outgoing ledger |
+===============+=================+=================+
| **Create** | no | yes |
+---------------+-----------------+-----------------+
| consuming | | |
| **Exercise** | yes | no |
+---------------+-----------------+-----------------+
| non-consuming | | |
| **Exercise** | yes | yes |
+---------------+-----------------+-----------------+
| **Fetch** | yes | yes |
+---------------+-----------------+-----------------+
| **NoSuchKey** | no | no |
+---------------+-----------------+-----------------+
| **Transfer** | maybe | maybe |
+---------------+-----------------+-----------------+
For non-consuming **Exercise** and **Fetch** actions, the incoming ledger must be the same as the outgoing ledger.
**Transfer** actions must have at least one of them.
A **transfer** action with both set represents a complete transfer.
If only the incoming ledger is set, it represents the partial information of an **Enter** event;
if only outgoing is set, it is the partial information of a **Leave** event.
**Transfer** actions with missing incoming or outgoing ledger annotations referred to as **Enter** or **Leave** actions, respectively.
The :ref:`action order <def-action-order>` generalizes to multi-ledger causality graphs accordingly.
In the :ref:`example for Enter and Leave events <enter-leave-event>` where the painter exercises three choices on contract `c` with signatories Alice and the painter, the four transactions yield the following multi-ledger causality graph.
Incoming and outgoing ledgers are encoded as colors (green for Ledger 1 and yellow for Ledger 2).
**Transfer** vertices are shown as circles, where the left half is colored with the incoming ledger and the right half with the outgoing ledger.
.. https://app.lucidchart.com/documents/edit/ef1e60ac-fa1e-40be-b1e6-7b3197d4543b
.. _interoperable-causality-graph-linear:
.. figure:: ./images/interoperable-causality-graph-linear.svg
:align: center
:width: 100%
:alt: Transactions and their colors in this graph: tx1 is yellow. tf1 is yellow and green. tx2 is green. tf2 is green and yellow. tx3 is yellow. tf3 is yellow and green. tx4 is green.
Multi-Ledger causality graph with transfer actions
.. note::
As for ordinary causality graphs, the diagrams for multi-ledger causality graphs omit transitive edges for readability.
As an example for a cross-domain transaction, consider the :ref:`split paint counteroffer workflow with the cross-domain transaction <cross-ledger-transaction>`.
The corresponding multi-ledger causality graph is shown below.
The last transaction `tx4` is a cross-ledger transaction because its actions have more than one color.
.. https://app.lucidchart.com/documents/edit/c3b120cf-1974-4ae8-8334-435642f94eed/
.. _counteroffer-interoperable-causality-graph:
.. figure:: ./images/counteroffer-interoperable-causality-graph.svg
:align: center
:width: 100%
:alt: The multi-ledger causality graph for the split paint counteroffer workflow. tx1 and tx3 are yellow. tx2 is green. tx4 is both yellow and green.
Multi-Ledger causality graph for the split paint counteroffer workflow on two Daml ledgers
Consistency
===========
Definition »Ledger trace«
A **ledger trace** is a finite list of pairs `(a`:sub:`i`\ `, b`:sub:`i`\ `)`
such that `b`:sub:`i - 1` = `a`:sub:`i` for all `i` > 0.
Here `a`:sub:`i` and `b`:sub:`i` identify Daml ledgers or are the special value `NONE`,
which is different from all Daml ledger identifiers.
Definition »Multi-Ledger causal consistency for a contract«
Let `G` be a multi-ledger causality graph and `X` be a set of actions from `G` on a contract in `c`.
The graph `G` is **multi-ledger consistent for the contract** `c` on `X` if all of the following hold:
#. If `X` is not empty, then `X` contains a **Create** or at least one **Enter** action.
If it contains a create, then this create precedes all other actions in `X`.
If it does not, then there exists one **Enter** action that precedes all other actions in `X`.
#. `X` contains at most one **Create** action.
#. If `X` contains a consuming **Exercise** action `act`, then `act` follows all other actions in `X` in `G`\ 's action order.
#. All **Transfer** actions in `X` are ordered with all other actions in `X`.
#. For every maximal chain in `X` (i.e., maximal totally ordered subset of `X`), the sequence of `(`\ incoming ledger, outgoing ledger\ `)` pairs is a ledger trace, using `NONE` if the action does not have an incoming or outgoing ledger annotation.
The first three conditions mimic the conditions of :ref:`causal consistency <def-causal-consistency-contract>` for ordinary causality graphs.
They ensure that **Create** actions come first and consuming **Exercise** actions last.
An **Enter** action takes the role of a **Create** if there is no **Create**.
The fourth condition ensures that all transfer actions are synchronization points for a contract.
The last condition about ledger traces ensures that contracts reside on only one Daml ledger and all usages happen on the ledger of residence.
In particular, the next contract action after a **Leave** must be an **Enter**.
For example, the above :ref:`multi-ledger causality graph with transfer actions <interoperable-causality-graph-linear>` is multi-ledger consistent for `c`.
In particular, there is only one maximal chain in the actions on `c`, namely
**Create** `c` -> `tf1` -> **ExeN** `B` `c` `ch1` -> `tf2` -> **ExeN** `B` `c` `ch2` -> `tf3` -> **ExeN** `B` `c` `ch3`,
and for each edge `act`:sub:`1` -> `act`:sub:`2`, the outgoing ledger color of `act`:sub:`1` is the same as the incoming ledger color of `act`:sub:`2`.
The restriction to maximal chains ensures that no node is skipped.
For example, the (non-maximal) chain
**Create** `c` -> **ExeN** `B` `c` `ch1` -> `tf2` -> **ExeN** `B` `c` `ch2` -> `tf3` -> **Exe** `B` `c` `ch3`
is not a ledger trace because the outgoing ledger of the **Create** action (yellow) is not the same as the incoming ledger of the non-consuming **Exercise** action for `ch1` (green).
Accordingly, the subgraph without the `tf1` vertex is not multi-ledger consistent for `c` even though it is a multi-ledger causality graph.
Definition »Consistency for a multi-ledger causality graph«
Let `X` be a subset of actions in a multi-ledger causality graph `G`.
Then `G` is **multi-ledger consistent** for `X` (or `X`-**multi-ledger consistent**)
if `G` is multi-ledger consistent for all contracts `c` on the set of actions on `c` in `X`.
`G` is **multi-ledger consistent** if `G` is multi-ledger consistent on all the actions in `G`.
.. note::
There is no multi-ledger consistency requirement for contract keys yet.
So interoperability does not provide consistency guarantees beyond those that come from the contracts they reference.
In particular, contract keys need not be unique and **NoSuchKey** actions do not check that the contract key is unassigned.
The :ref:`multi-ledger causality graph for the split paint counteroffer workflow <counteroffer-interoperable-causality-graph>` is multi-ledger consistent.
In particular all maximal chains of actions on a contract are ledger traces:
+-------------------------+-----------------------------------------+
| contract | maximal chains |
+=========================+=========================================+
| `Iou Bank A` | **Create** -> **Fetch** -> **Exercise** |
+-------------------------+-----------------------------------------+
| `ShowIou A P Bank` | **Create** -> **Exercise** |
+-------------------------+-----------------------------------------+
| `Counteroffer A P Bank` | **Create** -> **Exercise** |
+-------------------------+-----------------------------------------+
| `Iou Bank P` | **Create** |
+-------------------------+-----------------------------------------+
| `PaintAgree P A` | **Create** |
+-------------------------+-----------------------------------------+
Minimality and Reduction
========================
When edges are added to an `X`-multi-ledger consistent causality graph such that it remains acyclic and transitively closed,
the resulting graph is again `X`-multi-ledger consistent.
The notions :ref:`minimally consistent <minimal-consistent-causality-graph>` and :ref:`reduction <def-reduction-causality-graph>` therefore generalize from ordinary causality graphs accordingly.
Definition »Minimal multi-ledger-consistent causality graph«
An `X`-multi-ledger consistent causality graph `G` is `X`\ -**minimal** if no strict subgraph of `G` (same vertices, fewer edges) is an `X`-multi-ledger consistent causality graph.
If `X` is the set of all actions in `G`, then `X` is omitted.
Definition »Reduction of a multi-ledger consistent causality graph«
For an `X`\ -multi-ledger consistent causality graph `G`, there exists a unique minimal `X`\ -multi-ledger consistent causality graph `reduce`:sub:`X`\ `(G)` with the same vertices and the edges being a subset of `G`.
`reduce`:sub:`X`\ `(G)` is called the `X`\ -**reduction** of `G`.
As before, `X` is omitted if it contains all actions in `G`.
Since multi-ledger causality graphs are acyclic, their vertices can be sorted topologically and the resulting list is again a causality graph, where every vertex has an outgoing edge to all later vertices.
If the original causality graph is `X`\ -consistent, then so is the topological sort, as topological sorting merely adds edges.
From Multi-ledger Causality Graphs to Ledgers
=============================================
Multi-Ledger causality graphs `G` are linked to ledgers `L` in the Daml Ledger Model via topological sort and reduction.
* Given a multi-ledger causality graph `G`,
drop the incoming and outgoing ledger annotations and all transfer vertices,
topologically sort the transaction vertices,
and extend the resulting list of transactions with the requesters to obtain a sequence of commits `L`.
* Given a sequence of commits `L`,
use the transactions as vertices and add an edge from `tx1` to `tx2` whenever `tx1`\ 's commit precedes `tx2`\ 's commit in the sequence.
Then add transfer vertices and incoming and outgoing ledger annotations as needed and connect them with edges to the transaction vertices.
This link preserves consistency only to some extent.
Namely, if a multi-ledger causality graph is multi-ledger consistent for a contract `c`, then the corresponding ledger is consistent for the contract `c`, too.
However, a multi-ledger-consistent causality graph does not yield a consistent ledger because key consistency may be violated.
Conversely, a consistent ledger does not talk about the incoming and outgoing ledger annotations and therefore cannot enforce that the annotations are consistent.
.. _ledger-aware-projection:
Ledger-aware Projection
***********************
A Participant Node maintains a local ledger for each party it hosts and the Transaction Service outputs a topological sort of this local ledger.
When the Participant Node hosts the party on several ledgers, this local ledger is an multi-ledger causality graph.
This section defines the ledger-aware projection of an multi-ledger causality graph, which yields such a local ledger.
Definition »Y-labelled action«
An action with incoming and outgoing ledger annotations is **Y-labelled** for a set `Y`
if its incoming or outgoing ledger annotation is an element of `Y`.
Definition »Ledger-aware projection for transactions«
Let `Y` be a set of Daml ledgers and `tx` a transaction whose actions are annotated with incoming and outgoing ledgers.
Let `Act` be the set of `Y`\ -labelled subactions of `tx` that the party `P` is an informee of.
The **ledger-aware projection** of `tx` for `P` on `Y` (`P`-**projection on** `Y`) consists of all the maximal elements of `Act` (w.r.t. the subaction relation) in execution order.
.. note::
Every action contains all its subactions.
So if `act` is included in the `P`\ -projection on `Y` of `tx`,
then all subactions of `act` are also part of the projection.
Such a subaction `act'` may not be `Y`\ -labelled itself though, i.e., belong to a different ledger.
If `P` is an informee of `act'`, the Participant Node will mark `act'` as merely being witnessed on `P`\ 's transaction stream, as explained below.
The :ref:`cross-domain transaction in the split paint counteroffer workflow <counteroffer-interoperable-causality-graph>`, for example, has the following projections for Alice and the painter on the `Iou` ledger (yellow) and the painting ledger (green).
Here, the projections on the green ledger include the actions of the yellow ledger because a projection includes the subactions.
.. https://www.lucidchart.com/documents/edit/f8ec5741-7a37-4cf5-92a9-bf7b3132ba8e
.. image:: ./images/projecting-transactions-paint-offer-ledger-aware.svg
:align: center
:width: 60%
:alt: Projections for various parties to the split paint counteroffer workflow. The green ledger projections include the yellow ledger, but the yellow ledger projections do not include the green ledger.
Definition »Projection for transfer actions«
Let `act` be a transfer action annotated with an incoming ledger and/or an outgoing ledger.
The **projection** of `act` on a set of ledgers `Y`
removes the annotations from `act` that are not in `Y`.
If the projection removes all annotations, it is empty.
The **projection** of `act` to a party `P` on `Y` (`P`\ -**projection** on `Y`)
is the projection of `act` on `Y` if `P` is a stakeholder of the contract, and empty otherwise.
Definition »Multi-Ledger consistency for a party«
An multi-ledger causality graph `G` is **consistent for a party** `P` on a set of ledgers `Y` (`P`\ -**consistent** on `Y`)
if `G` is multi-ledger consistent on the set of `Y`\ -labelled actions in `G` of which `P` is a stakeholder informee.
The notions of `X`-minimality and `X`-reduction extend to a party `P` on a set `Y` of ledgers accordingly.
Definition »Ledger-aware projection for multi-ledger causality graphs«
Let `G` be a multi-ledger consistent causality graph and `Y` be a set of Daml ledgers.
The **projection** of `G` to party `P` on `Y` (`P`\ -**projection** on `Y`) is the `P`\ -reduction on `Y` of the following causality graph `G'`, which is `P`\ -consistent on `Y`:
* The vertices of `G'` are the vertices of `G` projected to `P` on `Y`, excluding empty projections.
* There is an edge between two vertices `v`:sub:`1` and `v`:sub:`2` in `G'` if there is an edge from the `G`\ -vertex corresponding to `v`:sub:`1` to the `G`\ -vertex corresponding to `v`:sub:`2`.
If `G` is a multi-ledger consistent causality graph, then the `P`\ -projection on `Y` is `P`\ -consistent on `Y`, too.
For example, the :ref:`multi-ledger causality graph for the split paint counteroffer workflow <counteroffer-interoperable-causality-graph>` is projected as follows:
.. https://app.lucidchart.com/documents/edit/d788b464-d670-4029-b2c0-d537c023052f
.. image:: ./images/counteroffer-causality-ledgeraware-projection.svg
:align: center
:width: 100%
:alt: More projections for various parties to the split paint counteroffer workflow, showing greater detail. Alice and the painter have green and yellow, just green, and just yellow projections; the bank has only a yellow projection.
The following points are worth highlighting:
* In Alice's projection on the green ledger, Alice witnesses the archival of her `Iou`.
As explained in the :ref:`interop-ordering-guarantees` below,
the **Exercise** action is marked as merely being witnessed
in the transaction stream of a Participant Node that hosts Alice on the green ledger but not on the yellow ledger.
Similarly, the Painter merely witnesses the **Create** of his `Iou` in the Painter's projection on the green ledger.
* In the Painter's projections, the `ShowIou` transaction `tx3` is unordered w.r.t. to the `CounterOffer` acceptance in `tx4`
like in the :ref:`case of ordinary causality graphs <counteroffer-causality-projections>`.
The edge `tx3` -> `tx4` is removed by the reduction step during projection.
The projection of transfer actions can be illustrated with the :ref:`interoperable-causality-graph-linear`.
The `A`-projections on the yellow and green ledger look as follows.
The white color indicates that a transfer action has no incoming or outgoing ledger annotation.
That is, a **Leave** action is white on the right hand side and an **Enter** action is white on the left hand side.
.. https://app.lucidchart.com/documents/edit/edbf9aaf-b7da-4e68-b9c9-9e631c3a87bb
.. image:: ./images/transfer-projection.svg
:align: center
:width: 100%
:alt: Causality graphs showing only the green or only the yellow ledger.
.. _interop-ordering-guarantees:
Ledger API Ordering Guarantees
******************************
The Transaction Service and the Active Contract Service are derived from the local ledger that the Participant Node maintains for the party.
Let `Y` be the set of ledgers on which the Participant Node hosts a party.
The transaction tree stream outputs a topological sort of the party's local ledger on `Y`, with the following modifications:
#. **Transfer** actions with either an incoming or an outgoing ledger annotation are output as **Enter** and **Leave** events.
**Transfer** actions with both incoming and outgoing ledger annotations are omitted.
#. The incoming and outgoing ledger annotations are not output.
Transaction actions with an incoming or outgoing ledger annotation
that is not in `Y` are marked as merely being witnessed if the
party is an informee of the action.
#. **Fetch** nodes and **NoSuchKey** are omitted.
The flat transaction stream contains precisely the ``CreatedEvent``\ s, ``ArchivedEvent``\ s, and the **Enter** and **Leave** actions that correspond to **Create**, consuming **Exercise**, **Enter** and **Leave** actions in transaction trees on the transaction tree stream where the party is a stakeholder of the affected contract and that are not marked as merely being witnessed.
Similarly, the active contract service provides the set of contracts that are active at the returned offset according to the flat transaction stream.
That is, the contract state changes of all events from the transaction event stream are taken into account in the provided set of contracts.
The :ref:`ordering guarantees <ordering-guarantees>` for single Daml ledgers extend accordingly.
In particular, interoperability ensures that all local ledgers are projections of a virtual shared multi-ledger causality graph that connects to the Daml Ledger Model as described above.
The ledger validity guarantees therefore extend via the local ledgers to the Ledger API.

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 19 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 15 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 8.6 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 26 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 12 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 8.3 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 29 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 14 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 26 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 29 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 14 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 21 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 62 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 52 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 10 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 11 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 12 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 16 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 39 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 42 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 42 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 17 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 15 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 48 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 32 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 21 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 13 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 18 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 16 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 6.5 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 28 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 17 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 17 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 14 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 21 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 21 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 42 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 38 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 15 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 15 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 9.4 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 13 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 18 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 9.4 KiB

View File

@ -1,34 +0,0 @@
.. Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
.. SPDX-License-Identifier: Apache-2.0
.. _da-ledgers:
Daml Ledger Model
#################
Daml Ledgers enable multi-party workflows by providing
parties with a virtual *shared ledger*, which encodes the current
state of their shared contracts, written in Daml. At a high level, the interactions are visualized as
follows:
.. https://www.lucidchart.com/documents/edit/505709a9-e972-4272-b1fd-c01674c323b8
.. image:: ./images/da-ledger-model.svg
:alt: A diagram of the Daml Ledger model. Three parties (Party A, B, and C) interact independently with a single box labeled virtual shared ledger. Each party has two types of interactions: request change (an arrow from the party to the ledger) and access per-party view (an arrow from the ledger to the party).
The Daml ledger model defines:
#. what the ledger looks like - the structure of Daml ledgers
#. who can request which changes - the integrity model for Daml ledgers
#. who sees which changes and data - the privacy model for Daml ledgers
The below sections review these concepts of the ledger model in turn.
They also briefly describe the link between Daml and the model.
.. toctree::
:maxdepth: 3
ledger-structure
ledger-integrity
ledger-privacy
ledger-daml
ledger-exceptions

View File

@ -1,87 +0,0 @@
.. Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
.. SPDX-License-Identifier: Apache-2.0
.. _da-model-daml:
Daml: Define Contract Models Compactly
######################################
As described in preceding sections, both the integrity and privacy notions depend on
a contract model, and such a model must specify:
#. a set of allowed actions on the contracts, and
#. the signatories, contract observers, and
#. an optional agreement text associated with each contract, and
#. the optional key associated with each contract and its maintainers.
The sets of allowed actions can in general be infinite. For instance,
the actions in the IOU contract model considered earlier can be instantiated for an
arbitrary obligor and an arbitrary owner. As enumerating all
possible actions from an infinite set is infeasible, a more
compact way of representing models is needed.
Daml provides exactly that: a compact representation of a contract model.
Intuitively, the allowed actions are:
#. **Create** actions on all instances of templates such that
the template arguments satisfy the `ensure` clause of the
template
#. **Exercise** actions on a contract corresponding to
choices on that template, with given
choice arguments, such that:
#. The actors match the controllers of the choice.
That is, the controllers define the :ref:`required authorizers <da-ledgers-required-authorizers>` of the choice.
#. The choice observers match the observers annotated in the choice.
#. The exercise kind matches.
#. All assertions in the update block hold for the given choice arguments.
#. Create, exercise, fetch and key statements in the update block are represented
as create, exercise and fetch actions and key assertions in the consequences of the exercise
action.
#. **Fetch** actions on a contract corresponding to
a *fetch* of that instance inside of an update block.
The actors must be a non-empty subset of the contract stakeholders.
The actors are determined dynamically as follows: if the fetch appears in an update block of a choice
`ch` on a contract `c1`, and the fetched contract ID resolves to a contract `c2`, then the actors are defined as the
intersection of (1) the signatories of `c1` union the controllers of `ch` with (2) the stakeholders of `c2`.
A :ref:`fetchbykey` statement also produces a **Fetch** action with the actors determined in the same way.
A :ref:`lookupbykey` statement that finds a contract also translates into a **Fetch** action, but all maintainers of the key are the actors.
#. **NoSuchKey** assertions corresponding to a :ref:`lookupByKey` update statement for the given key that does not find a contract.
An instance of a Daml template, that is, a **Daml contract**,
is a triple of:
#. a contract identifier
#. the template identifier
#. the template arguments
The signatories of a Daml contract are derived from the template arguments and the explicit signatory annotations on the contract template.
The contract observers are also derived from the template arguments and include:
1. the observers as explicitly annotated on the template
2. all controllers `c` of every choice defined using the syntax :code:`controller c can...` (as opposed to the syntax :code:`choice ... controller c`)
For example, the following template exactly describes the contract model
of a simple IOU with a unit amount, shown earlier.
.. literalinclude:: ./daml/SimpleIou.daml
:language: daml
:start-after: SNIPPET-START
:end-before: SNIPPET-END
.. _da-daml-model-controller-observer:
In this example, the owner is specified as an observer, since it must be able to see the contract to exercise the :code:`Transfer` and :code:`Settle` choices on it.
The template identifiers of contracts are created
through a content-addressing scheme. This means every contract is
self-describing in a sense: it constrains its stakeholder annotations
and all Daml-conformant actions on itself. As a consequence, one can
talk about "the" Daml contract model, as a single contract model encoding all possible
instances of all possible templates. This model is subaction-closed;
all exercise and create actions done within an update block are also
always permissible as top-level actions.

View File

@ -1,325 +0,0 @@
.. Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
.. SPDX-License-Identifier: Apache-2.0
.. _da-model-exceptions:
Exceptions
##########
The introduction of exceptions, a new Daml feature, has many implications
for the ledger model. This page describes the changes to the ledger model
introduced as part of this new feature.
..
SF: Once the dust settles on exceptions, these changes should be
incorporated into the rest of the ledger model.
Structure
*********
Under the new feature, Daml programs can raise and catch exceptions.
When an exception is caught in a `catch` block, the subtransaction
starting at the corresponding `try` block is rolled back.
To support this in our ledger model, we need to modify the transaction
structure to indicate which subtransactions were rolled back. We do this
by introducing **rollback nodes** in the transaction. Each rollback node
contains a rolled back subtransaction. Rollback nodes are not considered
ledger actions.
Therefore we define transactions as a list of **nodes**, where
each node is either a ledger action or a rollback node. This is reflected
in the updated EBNF grammar for the transaction structure:
.. code-block:: none
Transaction ::= Node*
Node ::= Action | Rollback
Rollback ::= 'Rollback' Transaction
Action ::= 'Create' contract
| 'Exercise' party* contract Kind Transaction
| 'Fetch' party* contract
| 'NoSuchKey' key
Kind ::= 'Consuming' | 'NonConsuming'
Note that `Action` and `Kind` have the same definition as before, but
since `Transaction` may now contain rollback nodes, this means that an
`Exercise` action may have a rollback node as one of its consequences.
For example, the following transaction contains a rollback node inside
an exercise. It represents a paint offer involving multiple banks.
The painter P is offering to paint A's house as long as they receive
an Iou from Bank1 or, failing that, from Bank2. When A accepts, they
confirm that transfer of an Iou via Bank1 has failed for some reason,
so they roll it back and proceed with a transfer via Bank2:
.. https://lucid.app/lucidchart/fb34c83b-8db7-4063-83f1-38e796225fe4/edit
.. image:: ./images/exception-structure-example.svg
:align: center
:width: 80%
:alt: The paint agreement flowchart incorporating a rollback from Bank1 and a transfer from Bank2.
Note also that rollback nodes may be nested, which represents a situation
where multiple exceptions are raised and caught within the same transaction.
For example, the following transaction contains the previous one under a
rollback node. It represents a case where the "accept" has failed at the last
moment, for some reason, and a "cancel" exercise has been issued in response.
.. https://lucid.app/lucidchart/8f18f7be-89b8-42f9-93a2-b995a5030a9e/edit
.. image:: ./images/exception-structure-example-nested.svg
:align: center
:width: 80%
:alt: The paint agreement flowchart with a nested rollback as described in the preceding paragraph.
Consistency
***********
In the previous section on :ref:`consistency <da-model-consistency>`,
we defined a "before-after" relation on ledger actions. This notion needs
to be revised in the presence of rollback nodes. It is no longer enough to
perform a preorder traversal of the transaction tree, because the actions under a
rollback node cannot affect actions that appear later in the transaction tree.
For example, a contract may be consumed by an exercise under a rollback node,
and immediately again after the rollback node. This is allowed because the
exercise was rolled back, and this does not represent a "double spend" of
the same contract. You can see this in the nested example above, where
the PaintOffer contract is consumed by an "agree" exercise, which is rolled
back, and then by a "cancel" exercise.
So, we now define the "before-after" relation as a partial order, rather than a
total order, on all the actions of a transaction. This relation is defined
as follows: `act1` comes before `act2` (equivalently, `act2` comes after `act1`)
if and only if `act1` appears before `act2` in a preorder traversal of the
transaction tree, and any rollback nodes that are ancestors of `act1` are
also ancestors of `act2`.
With this modified "before-after" relation, the notion of internal consistency
remains the same. Meaning that, for example, for any contract `c`, we still
forbid the creation of `c` coming after any action on `c`, and we forbid any
action on `c` coming after a consuming exercise on `c`.
In the example above, neither consuming exercise comes "after" the other.
They are part of separate "continuities", so they don't introduce inconsistency.
Here are three continuities implied by the "before-after" relation. The first:
.. image:: ./images/exception-integrity-continuity-1.svg
:align: center
:width: 80%
:alt: The paint agreement flowchart with rollbacks, showing continuity for a scenario where Bank1 provides the Iou.
The second:
.. https://lucid.app/lucidchart/f1f92199-ae41-4de2-b1bf-0925d3ab89c9/edit
.. image:: ./images/exception-integrity-continuity-2.svg
:align: center
:width: 80%
:alt: The paint agreement flowchart with rollbacks, showing continuity for a scenario where Bank2 provides the Iou.
And the third:
.. https://lucid.app/lucidchart/77d97798-8651-41dc-bb8b-abecf05f81bb/edit
.. image:: ./images/exception-integrity-continuity-3.svg
:align: center
:width: 80%
:alt: The paint agreement flowchart with rollbacks, showing continuity for a scenario where the transaction is cancelled.
As you can see, in each of these continuities, no contract was consumed twice.
Transaction Normalization
*************************
The same "before-after" relation can be represented in more than one way using
rollback nodes. For example, the following three transactions have the same
"before-after" relation among their ledger actions (`act1`, `act2`, and `act3`):
.. https://lucid.app/lucidchart/3aa5922f-ec30-4896-8bbc-56703549c7e5/edit
.. image:: ./images/exception-normalization-1.svg
:align: center
:width: 80%
:alt: Three flowcharts where a transaction leads to act1, act2, and act3; each has two rollbacks, but the rollbacks are placed differently in each.
Because of this, these three transactions are equivalent.
More generally, two transactions are equivalent if:
- The transactions are the same when you ignore all rollback nodes. That is,
if you remove every rollback node and absorb its children into its parent,
then two transactions are the same. Equivalently, the transactions have
the same ledger actions with the same preorder traversal and subaction relation.
- The transactions have the same "before-after" relation between their actions.
- The transactions have the same set of "rollback children".
A "rollback child" is an action whose direct parent is a rollback node.
For all three transactions above, the "transaction tree ignoring rollbacks"
consists only of top-level actions (`act1`, `act2`, and `act3`), the
"before-after" relation only says that `act2` comes before `act3`,
and all three actions are rollback children. Thus all three transactions
are equivalent.
**Transaction normalization** is the process by which equivalent transactions
are converted into the same transaction. In the case above, all three
transactions become the transaction in the middle when normalized.
.. https://lucid.app/lucidchart/8a5a09a1-5473-4abf-a72a-57bf03b56794/edit
.. image:: ./images/exception-normalization-2.svg
:align: center
:width: 80%
:alt: The same set of three transactions as above, illustrating how they all normalize to the center flowchart as described below.
To normalize a transaction, we apply three rules repeatedly across the whole transaction:
1. If a rollback node is empty, we drop it.
2. If a rollback node starts with another rollback node, for instance:
.. code-block:: none
'Rollback' [ 'Rollback' tx , node1, ..., nodeN ]
Then we re-associate the rollback nodes, bringing the inner rollback node out:
.. code-block:: none
'Rollback' tx, 'Rollback' [ node1, ..., nodeN ]
3. If a rollback node ends with another rollback node, for instance:
.. code-block:: none
'Rollback' [ node1, ..., nodeN, 'Rollback' [ node1', ..., nodeM' ] ]
Then we flatten the inner rollback node into its parent:
.. code-block:: none
'Rollback' [ node1, ..., nodeN, node1', ..., nodeM' ]
In the example above, using rule 3 we can turn the left transaction into the middle
transaction, and using rule 2 we can turn the right transaction into the middle
transaction. None of these rules apply to the middle transaction, so it is already
normalized.
In the end, a normalized transaction cannot contain any rollback node that starts
or ends with another rollback node, nor may it contain any empty rollback nodes.
The normalization process minimizes the number of rollback nodes and their depth
needed to represent the transaction.
To reduce the potential for information leaks, the ledger model must only
contain normalized transactions. This also applies to projected transactions.
An unnormalized transaction is always invalid.
Authorization
*************
Since they are not ledger actions, rollback nodes do not have authorizers
directly. Instead, a ledger is well-authorized exactly when the same ledger
with rollback nodes removed (that is, replacing the rollback nodes with
their children) is well-authorized, according to
:ref:`the old definition <da-ledgers-authorization-rules>`.
This is captured in the following rules:
- When a rollback node is authorized by `p`, then all of its children are
authorized by `p`. In particular:
- Top-level rollback nodes share the authorization of the requestors of
the commit with all of its children.
- Rollback nodes that are a consequence of an exercise action `act` on a
contract `c` share the authorization of the signatories of `c` and the
actors of `act` with all of its children.
- A nested rollback node shares the authorization it got from its parent
with all of its children.
- The required authorizers of a rollback node are the union of all
the required authorizers of its children.
Privacy
*******
Rollback nodes also have an interesting effect on the notion of privacy in
the ledger model. When projecting a transaction for a party `p`, it's
necessary to preserve some of the rollback structure of the transaction,
even if `p` does not have the right to observe every action under it. For
example, we need `p` to be able to verify that a rolled back exercise
(to which they are an informee) is conformant, but we also need `p` to
know that the exercise was rolled back.
We adjust the definition of projection as follows:
1. For a ledger action, the projection for `p` is the same as it was before.
That is, if `p` is an informee of the action, then the entire subtree is
preserved. Otherwise the action is dropped, and the action's consequences
are projected for `p`.
2. For a rollback node, the projection for `p` consists of the projection
for `p` of its children, wrapped up in a new rollback node. In other
words, projection happens under the rollback node, but the node is
preserved.
After applying this process, the transaction must be normalized.
Consider the deeply nested example from before. To calculate the projection
for Bank1, we note that the only visible action is the bottom left exercise.
Removing the actions that Bank1 isn't an informee of, this results in a
transaction containing a rollback node, containing a rollback node, containing
an exercise. After normalization, this becomes a simple rollback node
containing an exercise. See below:
.. https://lucid.app/lucidchart/1714e8d2-0c2d-4bbf-9b48-2266b2cd6c9d/edit
.. image:: ./images/exception-projection-example.svg
:align: center
:width: 80%
:alt: A flowchart with the portions invisible to Bank1 in light gray and the visible portion in dark gray, with the visible portion after normalization illustrated as a simpler flowchart at bottom right.
The privacy section of the ledger model makes a point of saying that a
contract model should be **subaction-closed** to support projections. But
this requirement is not necessarily true once we introduce rollbacks.
Rollback nodes may contain actions that are not valid as standalone actions,
since they may have been interrupted prematurely by an exception.
Instead, we require that the contract model be **projection-closed**, i.e.
closed under projections for any party 'p'. This is a weaker requirement
that matches what we actually need.
Relation to Daml Exceptions
***************************
Rollback nodes are created when an exception is thrown and caught within
the same transaction. In particular, any exception that is caught within
a try-catch will generate a rollback node if there are any ledger actions
to roll back. For example:
.. code-block:: daml
try do
cid <- create MyContract { ... }
exercise cid MyChoice { ... }
throw MyException
catch
MyException ->
create MyOtherContract { ... }
This Daml code will try to create a contract, and exercise a choice on this
contract, before throwing an exception. That exception is caught immediately,
and then another contract is created.
Thus a rollback node is created, to reset the ledger to the state it had
at the start of the "try" block. The rollback node contains the create and
exercise nodes. After the rollback node, another contract is created.
Thus the final transaction looks like this:
.. https://lucid.app/lucidchart/2d48d3db-bfcd-4936-b3f2-efe29470b2b6/edit
.. image:: ./images/exception-daml-example.svg
:align: center
:width: 80%
:alt: A flowchart showing the transaction that results when Daml creates a rollback node due to an exception.
Note that rollback nodes are only created if an exception is *caught*. An
uncaught exception will result in an error, not a transaction.
After execution of the Daml code, the generated transaction is normalized.

View File

@ -1,634 +0,0 @@
.. Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
.. SPDX-License-Identifier: Apache-2.0
.. _da-model-integrity:
Integrity
#########
This section addresses the question of who can request which
changes.
.. _da-model-validity:
Valid Ledgers
*************
At the core is the concept of a *valid ledger*; changes
are permissible if adding the corresponding commit to the
ledger results in a valid ledger. **Valid ledgers** are
those that fulfill three conditions:
:ref:`da-model-consistency`
Exercises and fetches on inactive contracts are not allowed, i.e.
contracts that have not yet been created or have already been
consumed by an exercise.
A contract with a contract key can be created only if the key is not associated to another unconsumed contract,
and all key assertions hold.
:ref:`da-model-conformance`
Only a restricted set of actions is allowed on a given contract.
:ref:`da-model-authorization`
The parties who may request a particular change are restricted.
Only the last of these conditions depends on the party (or
parties) requesting the change; the other two are general.
.. _da-model-consistency:
Consistency
***********
Consistency consists of two parts:
#. :ref:`Contract consistency <da-model-contract-consistency>`: Contracts must be created before they are used, and they cannot be used once they are consumed.
#. :ref:`Key consistency <da-model-key-consistency>`: Keys are unique and key assertions are satisfied.
To define this precisely, notions of "before" and "after" are needed.
These are given by putting all actions in a sequence. Technically, the
sequence is obtained by a pre-order traversal of the ledger's actions,
noting that these actions form an (ordered) forest. Intuitively, it is obtained
by always picking parent actions before their proper subactions, and otherwise
always picking the actions on the left before the actions on the right. The image
below depicts the resulting order on the paint offer example:
.. https://www.lucidchart.com/documents/edit/1ef6debb-b89a-4529-84b6-fc2c3e1857e8
.. image:: ./images/consistency-order-on-actions.svg
:align: center
:width: 100%
:alt: The time sequence of commits. In the first commit, Iou Bank A is requested by the bank. In the second, PaintOffer P A P123 is requested by P. In the last commit, requested by A, Exe A (PaintOffer P A P123) leads to Exe A (Iou Bank A) leads to Iou Bank P leads to PaintAgree P A P123
In the image, an action `act` happens before action `act'` if there is
a (non-empty) path from `act` to `act'`.
Then, `act'` happens after `act`.
.. _da-model-contract-consistency:
Contract Consistency
====================
Contract consistency ensures that contracts are used after they have been created and before they are consumed.
.. _def-contract-consistency:
Definition »contract consistency«
A ledger is **consistent for a contract c** if all of the following holds for all actions `act` on `c`:
#. either `act` is itself **Create c** or a **Create c** happens before `act`
#. `act` does not happen before any **Create c** action
#. `act` does not happen after any **Exercise** action consuming `c`.
The consistency condition rules out the double spend example.
As the red path below indicates, the second exercise in the example happens after a consuming exercise on the same
contract, violating the contract consistency criteria.
.. https://www.lucidchart.com/documents/edit/c6113536-70f4-42a4-920d-3c9497f8f7c4
.. image:: ./images/consistency-banning-double-spends.svg
:align: center
:width: 100%
:alt: Another time sequence of commits. In the first commit, Iou Bank A is requested by the bank. In the second, Exe A (Iou Bank A) leads to Iou Bank B via a red line, indicating contract consistency violations. Iou Bank B leads to Exe A (Iou Bank A) in the third commit, also via a red line, and Exe A (Iou Bank A) leads to Iou Bank P.
.. _def-contract-state:
In addition to the consistency notions, the before-after relation on actions can also be used to define the notion of
**contract state** at any point in a given transaction.
The contract state is changed by creating the contract and by exercising it consumingly.
At any point in a transaction, we can then define the latest state change in the obvious way.
Then, given a point in a transaction, the contract state of `c` is:
#. **active**, if the latest state change of `c` was a create;
#. **archived**, if the latest state change of `c` was a consuming exercise;
#. **inexistent**, if `c` never changed state.
A ledger is consistent for `c` exactly if **Exercise** and **Fetch** actions on `c` happen only when `c` is active,
and **Create** actions only when `c` is inexistent.
The figures below visualize the state of different contracts at all points in the example ledger.
.. https://www.lucidchart.com/documents/edit/19226d95-e8ba-423a-8546-e5bae6bd3ab7
.. figure:: ./images/consistency-paint-offer-activeness.svg
:align: center
:width: 100%
:alt: The first time sequence from above. Every action in the first and second commits is inexistent; in the third commit, Exe A (PaintOffer P A P123) is active while all the actions below it are archived.
Activeness of the `PaintOffer` contract
.. https://www.lucidchart.com/documents/edit/19226d95-e8ba-423a-8546-e5bae6bd3ab7
.. figure:: ./images/consistency-alice-iou-activeness.svg
:align: center
:width: 100%
:alt: The same time sequence as above, but with PaintOffer P A P123 in the second commit and Exe A (Iou Bank A) in the third commit also active.
Activeness of the `Iou Bank A` contract
The notion of order can be defined on all the different ledger structures: actions, transactions, lists of transactions,
and ledgers.
Thus, the notions of consistency, inputs and outputs, and contract state can also all be defined on all these
structures.
The **active contract set** of a ledger is the set of all contracts
that are active on the ledger. For the example above, it consists
of contracts `Iou Bank P` and `PaintAgree P A`.
.. _da-model-key-consistency:
Key Consistency
===============
Contract keys introduce a key uniqueness constraint for the ledger.
To capture this notion, the contract model must specify for every contract in the system whether the contract has a key and, if so, the key.
Every contract can have at most one key.
Like contracts, every key has a state.
An action `act` is an **action on a key** `k` if
- `act` is a **Create**, **Exercise**, or a **Fetch** action on a contract `c` with key `k`, or
- `act` is the key assertion **NoSuchKey** `k`.
.. _def-key-state:
Definition »key state«
The **key state** of a key on a ledger is determined by the last action `act` on the key:
- If `act` is a **Create**, non-consuming **Exercise**, or **Fetch** action on a contract `c`,
then the key state is **assigned** to `c`.
- If `act` is a consuming **Exercise** action or a **NoSuchKey** assertion,
then the key state is **free**.
- If there is no such action `act`, then the key state is **unknown**.
A key is **unassigned** if its key state is either **free** or **unknown**.
Key consistency ensures that there is at most one active contract for each key and that all key assertions are satisfied.
.. _def-key-consistency:
Definition »key consistency«
A ledger is **consistent for a key** `k` if for every action `act` on `k`, the key state `s` before `act` satisfies
- If `act` is a **Create** action or **NoSuchKey** assertion, then `s` is **free** or **unknown**.
- If `act` is an **Exercise** or **Fetch** action on some contract `c`, then `s` is **assigned** to `c` or **unknown**.
Key consistency rules out the problematic examples around key consistency.
For example, suppose that the painter `P` has made a paint offer to `A` with reference number `P123`, but `A` has not yet accepted it.
When `P` tries to create another paint offer to `David` with the same reference number `P123`,
then this creation action would violate key uniqueness.
The following ledger violates key uniqueness for the key `(P, P123)`.
.. figure:: ./images/double-key-creation-highlighted.svg
:align: center
:name: double-key-creation
:alt: A ledger with two P123s, violating key uniqueness.
Key assertions can be used in workflows to evidence the inexistence of a certain kind of contract.
For example, suppose that the painter `P` is a member of the union of painters `U`.
This union maintains a blacklist of potential customers that its members must not do business with.
A customer `A` is considered to be on the blacklist if there is an active contract `Blacklist @U &A`.
To make sure that the painter `P` does not make a paint offer if `A` is blacklisted,
the painter combines its commit with a **NoSuchKey** assertion on the key `(U, A)`.
The following ledger shows the transaction, where `UnionMember U P` represents `P`'s membership in the union `U`.
It grants `P` the choice to perform such an assertion, which is needed for :ref:`authorization <da-model-authorization>`.
.. figure:: ./images/paint-offer-blacklist.svg
:align: center
:name: paint-offer-blacklist
:alt: A time sequence with UnionMember U P in the first commit and ExeN (UnionMember U P) "blacklisted", NoSuchKey (U, A) and PaintOffer A @ P Bank &P123 in the second commit.
Key consistency extends to actions, transactions and lists of transactions just like the other consistency notions.
.. _da-model-ledger-consistency:
Ledger Consistency
==================
Definition »ledger consistency«
A ledger is **consistent** if it is consistent for all contracts and for all keys.
Internal Consistency
====================
The above consistency requirement is too strong for actions and transactions
in isolation.
For example, the acceptance transaction from the paint offer example is not consistent as a ledger, because `PaintOffer A P Bank`
and the `Iou Bank A` contracts are used without being created before:
.. image:: ./images/action-structure-paint-offer.svg
:align: center
:width: 60%
:alt: The flowchart of Alice's original paint deal, first described in the Structure section.
However, the transaction can still be appended to a ledger
that creates these contracts and yields a consistent ledger. Such
transactions are said to be internally consistent,
and contracts such as the `PaintOffer A P Bank P123` and `Iou Bank A` are called
input contracts of the transaction.
Dually, output contracts of a transaction are the contracts that a transaction creates and does not archive.
.. _def-internal-consistency:
Definition »internal consistency for a contract«
A transaction is **internally consistent for a contract c** if the following holds for all of its subactions `act` on the contract `c`
#. `act` does not happen before any **Create c** action
#. `act` does not happen after any exercise consuming `c`.
A transaction is **internally consistent** if it is internally consistent for all contracts and consistent for all keys.
.. _def-input-contract:
Definition »input contract«
For an internally consistent transaction,
a contract `c` is an **input contract** of the transaction
if the transaction contains an **Exercise** or a **Fetch** action on `c` but not a **Create c** action.
.. _def-output-contract:
Definition »output contract«
For an internally consistent transaction,
a contract `c` is an **output contract** of the transaction
if the transaction contains a **Create c** action, but not a consuming **Exercise** action on `c`.
Note that
the input and output contracts are undefined for transactions that are not
internally consistent. The image below shows some examples of internally consistent
and inconsistent transactions.
.. figure:: ./images/internal-consistency-examples.svg
:align: center
:width: 100%
:alt: Three transactions involving an Iou between Bank A and Bank B, as described in the caption.
The first two transactions violate the conditions of internal consistency.
The first transaction creates the `Iou` after exercising it consumingly, violating both conditions.
The second transaction contains a (non-consuming) exercise on the `Iou` after a consuming one, violating the second condition.
The last transaction is internally consistent.
Similar to input contracts, we define the input keys as the set that must be unassigned at the beginning of a transaction.
Definition »input key«
A key `k` is an **input key** to an internally consistent transaction
if the first action `act` on `k` is either a **Create** action or a **NoSuchKey** assertion.
In the :ref:`blacklisting example <paint-offer-blacklist>`, `P`\ 's transaction has two input keys: `(U, A)` due to the **NoSuchKey** action and `(P, P123)` as it creates a `PaintOffer` contract.
.. _da-model-conformance:
Conformance
***********
The *conformance* condition constrains the actions that may occur on the
ledger. This is done by considering a **contract model** `M` (or a **model** for short),
which specifies the set of all possible actions. A ledger is **conformant to M**
(or conforms to M) if all top-level actions on the ledger are members of `M`.
Like consistency, the notion of conformance does not depend on the requesters of
a commit, so it can also be applied to transactions and lists of transactions.
For example, the set of allowed actions on IOU contracts could be
described as follows.
.. https://www.lucidchart.com/documents/edit/e181e9fc-634c-49e3-911e-a07b5da28bf8/0
.. image:: ./images/models-simple-iou.svg
:align: center
:width: 80%
:alt: A set of create, transfer, and settle actions allowed on IOU contracts, as described in the paragraph immediately below.
The boxes in the image are templates in the sense that the contract
parameters in a box (such as
obligor or owner) can be instantiated by arbitrary values of the
appropriate type. To facilitate understanding, each box includes a label
describing the intuitive purpose of the corresponding set of actions.
As the image suggests, the transfer box imposes the
constraint that the bank must remain the same both in the exercised
IOU contract, and in the newly created IOU contract. However, the
owner can change arbitrarily. In contrast, in the settle actions, both
the bank and the owner must remain the same.
Furthermore, to be conformant, the actor of a transfer action must be the same as the owner of the contract.
Of course, the constraints on the relationship between the parameters can be
arbitrarily complex, and cannot conveniently be reproduced in this
graphical representation. This is the role of Daml -- it
provides a much more convenient way of representing contract models.
The link between Daml and contract models is explained in more detail in a :ref:`later section <da-model-daml>`.
To see the conformance criterion in action, assume that
the contract model allows only the following actions on `PaintOffer`
and `PaintAgree` contracts.
.. https://www.lucidchart.com/documents/edit/1ea6f551-c212-4620-9417-27784adccbcc
.. image:: ./images/models-paint-offer.svg
:align: center
:width: 90%
:alt: The available create and accept actions on the PaintOffer and PaintAgree contracts.
The problem with the example where Alice changes the
offer's outcome to avoid transferring the money now
becomes apparent.
.. image:: ./images/non-conformant-action.svg
:align: center
:alt: A time sequence illustrating the problem as described below.
`A`'s commit is not conformant to the contract model, as the model does
not contain the top-level action she is trying to commit.
.. _da-model-authorization:
Authorization
*************
The last criterion rules out the last two problematic examples,
:ref:`an obligation imposed on a painter <obligation-imposed-on-painter>`,
and :ref:`the painter stealing Alice's money <painter-stealing-ious>`.
The first of those is visualized below.
.. image:: ./images/invalid-obligation.svg
:align: center
:width: 100%
:alt: A time sequence showing only one commit, in which PaintAgree P A P123 is requested by A.
The reason why the example is intuitively impermissible is that
the `PaintAgree` contract is supposed to express that the painter has an
obligation to paint Alice's house, but he never agreed to that obligation.
On paper contracts, obligations are expressed in the body of the contract,
and imposed on the contract's *signatories*.
.. _da-signatories-agreements-maintainers:
Signatories, Agreements, and Maintainers
========================================
To capture these elements of real-world contracts, the **contract model**
additionally specifies, for each contract in the system:
#. A non-empty set of **signatories**, the parties bound by the
contract.
#. An optional **agreement text** associated with the contract,
specifying the off-ledger, real-world obligations of the
signatories.
#. If the contract is associated with a key, a non-empty set of **maintainers**,
the parties that make sure that at most one unconsumed contract exists for the key.
The maintainers must be a subset of the signatories and depend only on the key.
This dependence is captured by the function `maintainers` that takes a key and returns the key's maintainers.
In the example, the contract model specifies that
#. an `Iou obligor owner` contract has only the `obligor` as a signatory,
and no agreement text.
#. a `MustPay obligor owner` contract has both the `obligor`
and the `owner` as signatories, with an agreement text requiring
the obligor to pay the owner a certain amount, off the ledger.
#. a `PaintOffer houseOwner painter obligor refNo` contract has only the
painter as the signatory, with no agreement text.
Its associated key consists of the painter and the reference number.
The painter is the maintainer.
#. a `PaintAgree houseOwner painter refNo` contract has both the
house owner and the painter as signatories, with an agreement
text requiring the painter to paint the house.
The key consists of the painter and the reference number.
The painter is the only maintainer.
In the graphical representation below, signatories of a contract are indicated
with a dollar sign (as a mnemonic for an obligation) and use a bold
font.
Maintainers are marked with `@` (as a mnemonic who enforces uniqueness).
Since maintainers are always signatories, parties marked with `@` are implicitly signatories.
For example, annotating the paint offer acceptance action with
signatories yields the image below.
.. https://www.lucidchart.com/documents/edit/4a3fdcbc-e521-4fd8-a636-1035b4d65126/0
.. image:: ./images/signatories-paint-offer.svg
:align: center
:width: 60%
:alt: The original paint deal flowchart. P is a maintainer; A and the Bank are signatories.
.. _da-ledgers-authorization-rules:
Authorization Rules
===================
Signatories allow one to precisely state that the painter has an obligation.
The imposed obligation is intuitively invalid because the painter did not
agree to this obligation. In other words, the painter did not *authorize*
the creation of the obligation.
In a Daml ledger, a party can **authorize** a subaction of a commit in
either of the following ways:
* Every top-level action of the commit is authorized by all requesters
of the commit.
* Every consequence of an exercise action `act` on a contract `c` is
authorized by all signatories of `c` and all actors of `act`.
The second authorization rule encodes the offer-acceptance pattern,
which is a prerequisite for contract formation in contract law. The
contract `c` is effectively an offer by its signatories who act as
offerers. The exercise is an acceptance of the offer by the actors who
are the offerees. The consequences of the exercise can be interpreted
as the contract body so the authorization rules of Daml
ledgers closely model the rules for contract formation in contract
law.
.. _da-ledgers-def-well-authorized:
.. _da-ledgers-required-authorizers:
A commit is **well-authorized** if every subaction `act` of the commit is
authorized by at least all of the **required authorizers** of `act`, where:
#. the required authorizers of a **Create** action on a contract `c` are the
signatories of `c`.
#. the required authorizers of an **Exercise** or a **Fetch** action are its actors.
#. the required authorizers of a **NoSuchKey** assertion are the maintainers of the key.
We lift this notion to ledgers, whereby a ledger is well-authorized exactly when all of its commits are.
Examples
========
An intuition for how the authorization definitions work is most easily
developed by looking at some examples. The main example, the
paint offer ledger, is intuitively legitimate. It should therefore
also be well-authorized according to our definitions,
which it is indeed.
In the visualizations below,
`Π ✓ act` denotes that the parties `Π` authorize the
action `act`. The resulting authorizations are shown below.
.. https://www.lucidchart.com/documents/edit/9df74ad9-b781-4974-bbb5-e67c7f03d196/0
.. image:: ./images/authorization-paint-offer.svg
:align: center
:alt: The original paint deal time sequence, described in depth with respect to authorizations below.
In the first commit, the bank authorizes the creation of the IOU by
requesting that commit. As the bank is the sole signatory on the
IOU contract, this commit is well-authorized. Similarly, in the second
commit, the painter authorizes the creation of the paint offer contract,
and painter is the only signatory on that contract, making this commit
also well-authorized.
The third commit is more complicated. First, Alice authorizes
the exercise on the paint offer by requesting it. She is the only actor
on this exercise, so this complies with the authorization requirement.
Since the painter is the signatory of the paint offer, and Alice
the actor of the exercise, they jointly authorize all consequences
of the exercise. The first consequence is an exercise on the IOU, with
Alice as the actor, so this is permissible.
The second consequence is the creation of the new IOU (for P) by exercising the old IOU (for A).
As the IOU was formerly signed by the bank, with Alice as the actor of the exercise, they jointly authorize this creation.
This action is permissible as the bank is the sole signatory.
The final consequence is creating the paint agreement with Alice and the painter as signatories.
Since they both authorize the action, this is also permissible.
Thus, the entire third commit is also well-authorized, and so is the ledger.
Similarly, the intuitively problematic examples
are prohibited by our authorization criterion. In the
first example, Alice forced the painter to paint her house. The
authorizations for the example are shown below.
.. https://www.lucidchart.com/documents/edit/6a05add2-7ec9-4a6a-bb9b-7103bf35390f
.. image:: ./images/authorization-invalid-obligation.svg
:align: center
:alt: A time sequence for a scenario where Alice forces the painter to paint her house, described in depth with respect to authorization below.
Alice authorizes the **Create** action on the `PaintAgree` contract by
requesting it. However, the painter is also a signatory on the
`PaintAgree` contract, but he did not authorize the **Create** action.
Thus, this ledger is indeed not well-authorized.
In the second example, the painter steals money from Alice.
.. https://www.lucidchart.com/documents/edit/e895410e-6e77-4686-9fc6-0286a064f420
.. image:: ./images/authorization-stealing-ious.svg
:align: center
:alt: A time sequence for a scenario where the painter steals Alice's money, described in depth with respect to authorization below.
The bank authorizes the creation of the IOU by requesting this action.
Similarly, the painter authorizes the exercise that transfers the IOU
to him. However, the actor of this exercise is Alice, who has not
authorized the exercise. Thus, this ledger is not
well-authorized.
The rationale for making the maintainers required authorizers for a **NoSuchKey** assertion
is discussed in the next section about :ref:`privacy <da-model-privacy-authorization>`.
Valid Ledgers, Obligations, Offers and Rights
*********************************************
Daml ledgers are designed to mimic real-world interactions between
parties, which are governed by contract law. The validity conditions
on the ledgers, and the information contained in contract models have
several subtle links to the concepts of the contract law that are
worth pointing out.
First, in addition to the explicit off-ledger obligations specified in
the agreement text, contracts also specify implicit **on-ledger
obligations**, which result from consequences of the exercises on
contracts. For example, the `PaintOffer` contains an on-ledger
obligation for `A` to transfer her IOU in case she accepts the offer. Agreement
texts are therefore only necessary to specify obligations that are not
already modeled as permissible actions on the ledger. For example,
`P`'s obligation to paint the house cannot be sensibly modeled on the
ledger, and must thus be specified by the agreement text.
Second, every contract on a Daml ledger can simultaneously model both:
* a real-world offer, whose consequences (both on- and off-ledger)
are specified by the **Exercise** actions on the contract allowed
by the contract model, and
* a real-world contract "proper", specified through the contract's
(optional) agreement text.
Third, in Daml ledgers, as in the real world, one person's rights are
another person's obligations. For example, `A`'s right to accept the
`PaintOffer` is `P`'s obligation to paint her house in case she
accepts.
In Daml ledgers, a party's rights according to a contract model are the exercise actions the party can perform according to the authorization and conformance rules.
Finally, validity conditions ensure three important properties of the Daml
ledger model, that mimic the contract law.
#. **Obligations need consent**.
Daml ledgers follow the offer-acceptance pattern of the
contract law, and thus ensures that all ledger contracts are
formed voluntarily. For example, the following
ledger is not valid.
.. https://www.lucidchart.com/documents/edit/6a05add2-7ec9-4a6a-bb9b-7103bf35390f
.. image:: ./images/authorization-invalid-obligation.svg
:align: center
:width: 100%
:alt: The time sequence for a scenario where Alice forces the painter to paint her house, explained previously in the Authorization Rules Example section.
#. **Consent is needed to take away on-ledger rights**.
As only **Exercise** actions consume contracts, the rights cannot be taken
away from the actors; the contract model specifies exactly who the
actors are, and the authorization rules require them to approve the
contract consumption.
In the examples, Alice had the right to transfer her IOUs;
painter's attempt to take that right away from her, by performing
a transfer himself, was not valid.
.. https://www.lucidchart.com/documents/edit/e895410e-6e77-4686-9fc6-0286a064f420
.. image:: ./images/authorization-stealing-ious.svg
:align: center
:width: 100%
:alt: The time sequence for a scenario where the painter steals Alice's money, explained previously in the Authorization Rules Example section.
Parties can still **delegate** their rights to other parties. For
example, assume that Alice, instead of accepting painter's offer,
decides to make him a counteroffer instead. The painter can
then accept this counteroffer, with the consequences as before:
.. https://www.lucidchart.com/documents/edit/ba64b0d2-776a-4c94-a9be-b76948a76632
.. image:: ./images/counteroffer-acceptance.svg
:align: center
:width: 60%
:name: counteroffer-acceptance
:alt: The original PaintAgreement flow chart, but now the topmost contract is the CounterOffer.
Here, by creating the `CounterOffer` contract, Alice delegates
her right to transfer the IOU contract to the painter. In case of
delegation, prior to submission, the requester must get informed about the contracts
that are part of the requested transaction, but where the requester
is not a signatory. In the example above, the
painter must learn about the existence of the IOU for Alice before
he can request the acceptance of the `CounterOffer`. The
concepts of observers and divulgence, introduced in the next
section, enable such scenarios.
#. **On-ledger obligations cannot be unilaterally escaped**. Once an
obligation is recorded on a Daml ledger, it can only be removed in
accordance with the contract model. For example, assuming the IOU
contract model shown earlier, if the ledger records the creation
of a `MustPay` contract, the bank cannot later simply record an
action that consumes this contract:
.. https://www.lucidchart.com/documents/edit/521f4ec6-9152-447d-bda8-c0c636d7635f
.. image:: ./images/validity-no-removal-of-obligations.svg
:align: center
:width: 100%
:alt: A time sequence in which the first commit includes the creation of a MustPay contract and the second commit includes the bank consuming this contract, as described above.
That is, this ledger is invalid, as the action above is not
conformant to the contract model.

Some files were not shown because too many files have changed in this diff Show More