mirror of
https://github.com/digital-asset/daml.git
synced 2024-09-20 09:17:43 +03:00
Switching to composition at StorageBackend [DPP-704] (#11520)
* Switch to StorageBackendFactory * Fixing tests changelog_begin changelog_end
This commit is contained in:
parent
d006ad0e8b
commit
6c9416492d
@ -20,7 +20,11 @@ import com.daml.platform.store.DbType.{
|
||||
}
|
||||
import com.daml.platform.store.appendonlydao.events.{CompressionStrategy, LfValueTranslation}
|
||||
import com.daml.platform.store.backend.DataSourceStorageBackend.DataSourceConfig
|
||||
import com.daml.platform.store.backend.StorageBackend
|
||||
import com.daml.platform.store.backend.{
|
||||
DataSourceStorageBackend,
|
||||
ResetStorageBackend,
|
||||
StorageBackendFactory,
|
||||
}
|
||||
import com.daml.platform.store.backend.postgresql.PostgresDataSourceConfig
|
||||
import com.daml.platform.store.{DbType, LfValueTranslationCache}
|
||||
|
||||
@ -37,7 +41,12 @@ object JdbcIndexer {
|
||||
)(implicit materializer: Materializer, loggingContext: LoggingContext) {
|
||||
|
||||
def initialized(resetSchema: Boolean = false): ResourceOwner[Indexer] = {
|
||||
val storageBackend = StorageBackend.of(DbType.jdbcType(config.jdbcUrl))
|
||||
val factory = StorageBackendFactory.of(DbType.jdbcType(config.jdbcUrl))
|
||||
val dataSourceStorageBackend = factory.createDataSourceStorageBackend
|
||||
val ingestionStorageBackend = factory.createIngestionStorageBackend
|
||||
val parameterStorageBackend = factory.createParameterStorageBackend
|
||||
val DBLockStorageBackend = factory.createDBLockStorageBackend
|
||||
val resetStorageBackend = factory.createResetStorageBackend
|
||||
val indexer = ParallelIndexerFactory(
|
||||
jdbcUrl = config.jdbcUrl,
|
||||
inputMappingParallelism = config.inputMappingParallelism,
|
||||
@ -58,14 +67,17 @@ object JdbcIndexer {
|
||||
),
|
||||
haConfig = config.haConfig,
|
||||
metrics = metrics,
|
||||
storageBackend = storageBackend,
|
||||
dbLockStorageBackend = DBLockStorageBackend,
|
||||
dataSourceStorageBackend = dataSourceStorageBackend,
|
||||
initializeParallelIngestion = InitializeParallelIngestion(
|
||||
providedParticipantId = config.participantId,
|
||||
storageBackend = storageBackend,
|
||||
parameterStorageBackend = parameterStorageBackend,
|
||||
ingestionStorageBackend = ingestionStorageBackend,
|
||||
metrics = metrics,
|
||||
),
|
||||
parallelIndexerSubscription = ParallelIndexerSubscription(
|
||||
storageBackend = storageBackend,
|
||||
parameterStorageBackend = parameterStorageBackend,
|
||||
ingestionStorageBackend = ingestionStorageBackend,
|
||||
participantId = config.participantId,
|
||||
translation = new LfValueTranslation(
|
||||
cache = lfValueTranslationCache,
|
||||
@ -89,17 +101,20 @@ object JdbcIndexer {
|
||||
readService = readService,
|
||||
)
|
||||
if (resetSchema) {
|
||||
reset(storageBackend).flatMap(_ => indexer)
|
||||
reset(resetStorageBackend, dataSourceStorageBackend).flatMap(_ => indexer)
|
||||
} else {
|
||||
indexer
|
||||
}
|
||||
}
|
||||
|
||||
private def reset(storageBackend: StorageBackend[_]): ResourceOwner[Unit] =
|
||||
private def reset(
|
||||
resetStorageBackend: ResetStorageBackend,
|
||||
dataSourceStorageBackend: DataSourceStorageBackend,
|
||||
): ResourceOwner[Unit] =
|
||||
ResourceOwner.forFuture(() =>
|
||||
Future(
|
||||
Using.resource(storageBackend.createDataSource(config.jdbcUrl).getConnection)(
|
||||
storageBackend.reset
|
||||
Using.resource(dataSourceStorageBackend.createDataSource(config.jdbcUrl).getConnection)(
|
||||
resetStorageBackend.reset
|
||||
)
|
||||
)(servicesExecutionContext)
|
||||
)
|
||||
|
@ -20,7 +20,8 @@ import scala.concurrent.{ExecutionContext, Future}
|
||||
|
||||
private[platform] case class InitializeParallelIngestion(
|
||||
providedParticipantId: Ref.ParticipantId,
|
||||
storageBackend: IngestionStorageBackend[_] with ParameterStorageBackend,
|
||||
ingestionStorageBackend: IngestionStorageBackend[_],
|
||||
parameterStorageBackend: ParameterStorageBackend,
|
||||
metrics: Metrics,
|
||||
) {
|
||||
|
||||
@ -40,7 +41,7 @@ private[platform] case class InitializeParallelIngestion(
|
||||
s"Attempting to initialize with ledger ID $providedLedgerId and participant ID $providedParticipantId"
|
||||
)
|
||||
_ <- dbDispatcher.executeSql(metrics.daml.index.db.initializeLedgerParameters)(
|
||||
storageBackend.initializeParameters(
|
||||
parameterStorageBackend.initializeParameters(
|
||||
ParameterStorageBackend.IdentityParams(
|
||||
ledgerId = providedLedgerId,
|
||||
participantId = domain.ParticipantId(providedParticipantId),
|
||||
@ -48,10 +49,10 @@ private[platform] case class InitializeParallelIngestion(
|
||||
)
|
||||
)
|
||||
ledgerEnd <- dbDispatcher.executeSql(metrics.daml.index.db.getLedgerEnd)(
|
||||
storageBackend.ledgerEnd
|
||||
parameterStorageBackend.ledgerEnd
|
||||
)
|
||||
_ <- dbDispatcher.executeSql(metrics.daml.parallelIndexer.initialization)(
|
||||
storageBackend.deletePartiallyIngestedData(ledgerEnd)
|
||||
ingestionStorageBackend.deletePartiallyIngestedData(ledgerEnd)
|
||||
)
|
||||
} yield InitializeParallelIngestion.Initialized(
|
||||
initialEventSeqId = ledgerEnd.map(_.lastEventSeqId).getOrElse(EventSequentialId.beforeBegin),
|
||||
|
@ -35,7 +35,8 @@ object ParallelIndexerFactory {
|
||||
dataSourceConfig: DataSourceConfig,
|
||||
haConfig: HaConfig,
|
||||
metrics: Metrics,
|
||||
storageBackend: DBLockStorageBackend with DataSourceStorageBackend,
|
||||
dbLockStorageBackend: DBLockStorageBackend,
|
||||
dataSourceStorageBackend: DataSourceStorageBackend,
|
||||
initializeParallelIngestion: InitializeParallelIngestion,
|
||||
parallelIndexerSubscription: ParallelIndexerSubscription[_],
|
||||
mat: Materializer,
|
||||
@ -53,7 +54,7 @@ object ParallelIndexerFactory {
|
||||
Some(metrics.daml.parallelIndexer.batching.executor -> metrics.registry),
|
||||
)
|
||||
haCoordinator <-
|
||||
if (storageBackend.dbLockSupported) {
|
||||
if (dbLockStorageBackend.dbLockSupported) {
|
||||
for {
|
||||
executionContext <- ResourceOwner
|
||||
.forExecutorService(() =>
|
||||
@ -67,10 +68,10 @@ object ParallelIndexerFactory {
|
||||
timer <- ResourceOwner.forTimer(() => new Timer)
|
||||
// this DataSource will be used to spawn the main connection where we keep the Indexer Main Lock
|
||||
// The life-cycle of such connections matches the life-cycle of a protectedExecution
|
||||
dataSource = storageBackend.createDataSource(jdbcUrl, dataSourceConfig)
|
||||
dataSource = dataSourceStorageBackend.createDataSource(jdbcUrl, dataSourceConfig)
|
||||
} yield HaCoordinator.databaseLockBasedHaCoordinator(
|
||||
connectionFactory = () => dataSource.getConnection,
|
||||
storageBackend = storageBackend,
|
||||
storageBackend = dbLockStorageBackend,
|
||||
executionContext = executionContext,
|
||||
timer = timer,
|
||||
haConfig = haConfig,
|
||||
@ -85,7 +86,7 @@ object ParallelIndexerFactory {
|
||||
.owner(
|
||||
// this is the DataSource which will be wrapped by HikariCP, and which will drive the ingestion
|
||||
// therefore this needs to be configured with the connection-init-hook, what we get from HaCoordinator
|
||||
dataSource = storageBackend.createDataSource(
|
||||
dataSource = dataSourceStorageBackend.createDataSource(
|
||||
jdbcUrl = jdbcUrl,
|
||||
dataSourceConfig = dataSourceConfig,
|
||||
connectionInitHook = Some(connectionInitializer.initialize),
|
||||
|
@ -30,7 +30,8 @@ import com.daml.platform.store.backend.{
|
||||
import scala.concurrent.Future
|
||||
|
||||
private[platform] case class ParallelIndexerSubscription[DB_BATCH](
|
||||
storageBackend: IngestionStorageBackend[DB_BATCH] with ParameterStorageBackend,
|
||||
ingestionStorageBackend: IngestionStorageBackend[DB_BATCH],
|
||||
parameterStorageBackend: ParameterStorageBackend,
|
||||
participantId: Ref.ParticipantId,
|
||||
translation: LfValueTranslation,
|
||||
compressionStrategy: CompressionStrategy,
|
||||
@ -69,12 +70,13 @@ private[platform] case class ParallelIndexerSubscription[DB_BATCH](
|
||||
seqMapperZero = seqMapperZero(initialized.initialEventSeqId),
|
||||
seqMapper = seqMapper(metrics),
|
||||
batchingParallelism = batchingParallelism,
|
||||
batcher = batcherExecutor.execute(batcher(storageBackend.batch, metrics)),
|
||||
batcher = batcherExecutor.execute(batcher(ingestionStorageBackend.batch, metrics)),
|
||||
ingestingParallelism = ingestionParallelism,
|
||||
ingester = ingester(storageBackend.insertBatch, dbDispatcher, metrics),
|
||||
tailer = tailer(storageBackend.batch(Vector.empty)),
|
||||
ingester = ingester(ingestionStorageBackend.insertBatch, dbDispatcher, metrics),
|
||||
tailer = tailer(ingestionStorageBackend.batch(Vector.empty)),
|
||||
tailingRateLimitPerSecond = tailingRateLimitPerSecond,
|
||||
ingestTail = ingestTail[DB_BATCH](storageBackend.updateLedgerEnd, dbDispatcher, metrics),
|
||||
ingestTail =
|
||||
ingestTail[DB_BATCH](parameterStorageBackend.updateLedgerEnd, dbDispatcher, metrics),
|
||||
)(
|
||||
InstrumentedSource
|
||||
.bufferedSource(
|
||||
|
@ -35,7 +35,14 @@ import com.daml.platform.server.api.validation.ErrorFactories
|
||||
import com.daml.platform.store.Conversions._
|
||||
import com.daml.platform.store._
|
||||
import com.daml.platform.store.appendonlydao.events._
|
||||
import com.daml.platform.store.backend.{ParameterStorageBackend, StorageBackend, UpdateToDbDto}
|
||||
import com.daml.platform.store.backend.{
|
||||
DeduplicationStorageBackend,
|
||||
ParameterStorageBackend,
|
||||
ReadStorageBackend,
|
||||
ResetStorageBackend,
|
||||
StorageBackendFactory,
|
||||
UpdateToDbDto,
|
||||
}
|
||||
import com.daml.platform.store.cache.MutableLedgerEndCache
|
||||
import com.daml.platform.store.entries.{
|
||||
ConfigurationEntry,
|
||||
@ -60,7 +67,10 @@ private class JdbcLedgerDao(
|
||||
enricher: Option[ValueEnricher],
|
||||
sequentialIndexer: SequentialWriteDao,
|
||||
participantId: Ref.ParticipantId,
|
||||
storageBackend: StorageBackend[_],
|
||||
readStorageBackend: ReadStorageBackend,
|
||||
parameterStorageBackend: ParameterStorageBackend,
|
||||
deduplicationStorageBackend: DeduplicationStorageBackend,
|
||||
resetStorageBackend: ResetStorageBackend,
|
||||
errorFactories: ErrorFactories,
|
||||
) extends LedgerDao {
|
||||
|
||||
@ -73,7 +83,7 @@ private class JdbcLedgerDao(
|
||||
override def lookupLedgerId()(implicit loggingContext: LoggingContext): Future[Option[LedgerId]] =
|
||||
dbDispatcher
|
||||
.executeSql(metrics.daml.index.db.getLedgerId)(
|
||||
storageBackend.ledgerIdentity(_).map(_.ledgerId)
|
||||
parameterStorageBackend.ledgerIdentity(_).map(_.ledgerId)
|
||||
)
|
||||
|
||||
override def lookupParticipantId()(implicit
|
||||
@ -81,7 +91,7 @@ private class JdbcLedgerDao(
|
||||
): Future[Option[ParticipantId]] =
|
||||
dbDispatcher
|
||||
.executeSql(metrics.daml.index.db.getParticipantId)(
|
||||
storageBackend.ledgerIdentity(_).map(_.participantId)
|
||||
parameterStorageBackend.ledgerIdentity(_).map(_.participantId)
|
||||
)
|
||||
|
||||
/** Defaults to Offset.begin if ledger_end is unset
|
||||
@ -89,7 +99,7 @@ private class JdbcLedgerDao(
|
||||
override def lookupLedgerEnd()(implicit loggingContext: LoggingContext): Future[Offset] =
|
||||
dbDispatcher
|
||||
.executeSql(metrics.daml.index.db.getLedgerEnd)(
|
||||
storageBackend.ledgerEndOrBeforeBegin(_).lastOffset
|
||||
parameterStorageBackend.ledgerEndOrBeforeBegin(_).lastOffset
|
||||
)
|
||||
|
||||
case class InvalidLedgerEnd(msg: String) extends RuntimeException(msg)
|
||||
@ -99,7 +109,7 @@ private class JdbcLedgerDao(
|
||||
): Future[(Offset, Long)] =
|
||||
dbDispatcher
|
||||
.executeSql(metrics.daml.index.db.getLedgerEndOffsetAndSequentialId) { connection =>
|
||||
val end = storageBackend.ledgerEndOrBeforeBegin(connection)
|
||||
val end = parameterStorageBackend.ledgerEndOrBeforeBegin(connection)
|
||||
end.lastOffset -> end.lastEventSeqId
|
||||
}
|
||||
|
||||
@ -108,7 +118,7 @@ private class JdbcLedgerDao(
|
||||
): Future[Option[Offset]] =
|
||||
dbDispatcher
|
||||
.executeSql(metrics.daml.index.db.getInitialLedgerEnd)(
|
||||
storageBackend.ledgerEnd(_).map(_.lastOffset)
|
||||
parameterStorageBackend.ledgerEnd(_).map(_.lastOffset)
|
||||
)
|
||||
|
||||
override def initialize(
|
||||
@ -117,7 +127,7 @@ private class JdbcLedgerDao(
|
||||
)(implicit loggingContext: LoggingContext): Future[Unit] =
|
||||
dbDispatcher
|
||||
.executeSql(metrics.daml.index.db.initializeLedgerParameters)(
|
||||
storageBackend.initializeParameters(
|
||||
parameterStorageBackend.initializeParameters(
|
||||
ParameterStorageBackend.IdentityParams(
|
||||
ledgerId = ledgerId,
|
||||
participantId = participantId,
|
||||
@ -129,7 +139,7 @@ private class JdbcLedgerDao(
|
||||
loggingContext: LoggingContext
|
||||
): Future[Option[(Offset, Configuration)]] =
|
||||
dbDispatcher.executeSql(metrics.daml.index.db.lookupConfiguration)(
|
||||
storageBackend.ledgerConfiguration
|
||||
readStorageBackend.configurationStorageBackend.ledgerConfiguration
|
||||
)
|
||||
|
||||
override def getConfigurationEntries(
|
||||
@ -139,7 +149,7 @@ private class JdbcLedgerDao(
|
||||
PaginatingAsyncStream(PageSize) { queryOffset =>
|
||||
withEnrichedLoggingContext("queryOffset" -> queryOffset) { implicit loggingContext =>
|
||||
dbDispatcher.executeSql(metrics.daml.index.db.loadConfigurationEntries) {
|
||||
storageBackend.configurationEntries(
|
||||
readStorageBackend.configurationStorageBackend.configurationEntries(
|
||||
startExclusive = startExclusive,
|
||||
endInclusive = endInclusive,
|
||||
pageSize = PageSize,
|
||||
@ -161,7 +171,8 @@ private class JdbcLedgerDao(
|
||||
dbDispatcher.executeSql(
|
||||
metrics.daml.index.db.storeConfigurationEntryDbMetrics
|
||||
) { implicit conn =>
|
||||
val optCurrentConfig = storageBackend.ledgerConfiguration(conn)
|
||||
val optCurrentConfig =
|
||||
readStorageBackend.configurationStorageBackend.ledgerConfiguration(conn)
|
||||
val optExpectedGeneration: Option[Long] =
|
||||
optCurrentConfig.map { case (_, c) => c.generation + 1 }
|
||||
val finalRejectionReason: Option[String] =
|
||||
@ -263,7 +274,7 @@ private class JdbcLedgerDao(
|
||||
PaginatingAsyncStream(PageSize) { queryOffset =>
|
||||
withEnrichedLoggingContext("queryOffset" -> queryOffset) { implicit loggingContext =>
|
||||
dbDispatcher.executeSql(metrics.daml.index.db.loadPartyEntries)(
|
||||
storageBackend.partyEntries(
|
||||
readStorageBackend.partyStorageBackend.partyEntries(
|
||||
startExclusive = startExclusive,
|
||||
endInclusive = endInclusive,
|
||||
pageSize = PageSize,
|
||||
@ -391,25 +402,33 @@ private class JdbcLedgerDao(
|
||||
Future.successful(List.empty)
|
||||
else
|
||||
dbDispatcher
|
||||
.executeSql(metrics.daml.index.db.loadParties)(storageBackend.parties(parties))
|
||||
.executeSql(metrics.daml.index.db.loadParties)(
|
||||
readStorageBackend.partyStorageBackend.parties(parties)
|
||||
)
|
||||
|
||||
override def listKnownParties()(implicit
|
||||
loggingContext: LoggingContext
|
||||
): Future[List[PartyDetails]] =
|
||||
dbDispatcher
|
||||
.executeSql(metrics.daml.index.db.loadAllParties)(storageBackend.knownParties)
|
||||
.executeSql(metrics.daml.index.db.loadAllParties)(
|
||||
readStorageBackend.partyStorageBackend.knownParties
|
||||
)
|
||||
|
||||
override def listLfPackages()(implicit
|
||||
loggingContext: LoggingContext
|
||||
): Future[Map[Ref.PackageId, PackageDetails]] =
|
||||
dbDispatcher
|
||||
.executeSql(metrics.daml.index.db.loadPackages)(storageBackend.lfPackages)
|
||||
.executeSql(metrics.daml.index.db.loadPackages)(
|
||||
readStorageBackend.packageStorageBackend.lfPackages
|
||||
)
|
||||
|
||||
override def getLfArchive(
|
||||
packageId: Ref.PackageId
|
||||
)(implicit loggingContext: LoggingContext): Future[Option[Archive]] =
|
||||
dbDispatcher
|
||||
.executeSql(metrics.daml.index.db.loadArchive)(storageBackend.lfArchive(packageId))
|
||||
.executeSql(metrics.daml.index.db.loadArchive)(
|
||||
readStorageBackend.packageStorageBackend.lfArchive(packageId)
|
||||
)
|
||||
.map(_.map(data => ArchiveParser.assertFromByteArray(data)))(
|
||||
servicesExecutionContext
|
||||
)
|
||||
@ -480,7 +499,7 @@ private class JdbcLedgerDao(
|
||||
PaginatingAsyncStream(PageSize) { queryOffset =>
|
||||
withEnrichedLoggingContext("queryOffset" -> queryOffset) { implicit loggingContext =>
|
||||
dbDispatcher.executeSql(metrics.daml.index.db.loadPackageEntries)(
|
||||
storageBackend.packageEntries(
|
||||
readStorageBackend.packageStorageBackend.packageEntries(
|
||||
startExclusive = startExclusive,
|
||||
endInclusive = endInclusive,
|
||||
pageSize = PageSize,
|
||||
@ -499,7 +518,7 @@ private class JdbcLedgerDao(
|
||||
dbDispatcher.executeSql(metrics.daml.index.db.deduplicateCommandDbMetrics) { conn =>
|
||||
val key = DeduplicationKeyMaker.make(commandId, submitters)
|
||||
// Insert a new deduplication entry, or update an expired entry
|
||||
val updated = storageBackend.upsertDeduplicationEntry(
|
||||
val updated = deduplicationStorageBackend.upsertDeduplicationEntry(
|
||||
key = key,
|
||||
submittedAt = submittedAt,
|
||||
deduplicateUntil = deduplicateUntil,
|
||||
@ -510,7 +529,7 @@ private class JdbcLedgerDao(
|
||||
CommandDeduplicationNew
|
||||
} else {
|
||||
// Deduplication row already exists
|
||||
CommandDeduplicationDuplicate(storageBackend.deduplicatedUntil(key)(conn))
|
||||
CommandDeduplicationDuplicate(deduplicationStorageBackend.deduplicatedUntil(key)(conn))
|
||||
}
|
||||
}
|
||||
|
||||
@ -518,7 +537,7 @@ private class JdbcLedgerDao(
|
||||
currentTime: Timestamp
|
||||
)(implicit loggingContext: LoggingContext): Future[Unit] =
|
||||
dbDispatcher.executeSql(metrics.daml.index.db.removeExpiredDeduplicationDataDbMetrics)(
|
||||
storageBackend.removeExpiredDeduplicationData(currentTime)
|
||||
deduplicationStorageBackend.removeExpiredDeduplicationData(currentTime)
|
||||
)
|
||||
|
||||
override def stopDeduplicatingCommand(
|
||||
@ -527,7 +546,7 @@ private class JdbcLedgerDao(
|
||||
)(implicit loggingContext: LoggingContext): Future[Unit] = {
|
||||
val key = DeduplicationKeyMaker.make(commandId, submitters)
|
||||
dbDispatcher.executeSql(metrics.daml.index.db.stopDeduplicatingCommandDbMetrics)(
|
||||
storageBackend.stopDeduplicatingCommand(key)
|
||||
deduplicationStorageBackend.stopDeduplicatingCommand(key)
|
||||
)
|
||||
}
|
||||
|
||||
@ -581,7 +600,7 @@ private class JdbcLedgerDao(
|
||||
dbDispatcher
|
||||
.executeSql(metrics.daml.index.db.pruneDbMetrics) { conn =>
|
||||
if (
|
||||
!storageBackend.isPruningOffsetValidAgainstMigration(
|
||||
!readStorageBackend.eventStorageBackend.isPruningOffsetValidAgainstMigration(
|
||||
pruneUpToInclusive,
|
||||
pruneAllDivulgedContracts,
|
||||
conn,
|
||||
@ -592,16 +611,24 @@ private class JdbcLedgerDao(
|
||||
)(new DamlContextualizedErrorLogger(logger, loggingContext, None))
|
||||
}
|
||||
|
||||
storageBackend.pruneEvents(pruneUpToInclusive, pruneAllDivulgedContracts)(
|
||||
readStorageBackend.eventStorageBackend.pruneEvents(
|
||||
pruneUpToInclusive,
|
||||
pruneAllDivulgedContracts,
|
||||
)(
|
||||
conn,
|
||||
loggingContext,
|
||||
)
|
||||
|
||||
storageBackend.pruneCompletions(pruneUpToInclusive)(conn, loggingContext)
|
||||
storageBackend.updatePrunedUptoInclusive(pruneUpToInclusive)(conn)
|
||||
readStorageBackend.completionStorageBackend.pruneCompletions(pruneUpToInclusive)(
|
||||
conn,
|
||||
loggingContext,
|
||||
)
|
||||
parameterStorageBackend.updatePrunedUptoInclusive(pruneUpToInclusive)(conn)
|
||||
|
||||
if (pruneAllDivulgedContracts) {
|
||||
storageBackend.updatePrunedAllDivulgedContractsUpToInclusive(pruneUpToInclusive)(conn)
|
||||
parameterStorageBackend.updatePrunedAllDivulgedContractsUpToInclusive(pruneUpToInclusive)(
|
||||
conn
|
||||
)
|
||||
}
|
||||
}
|
||||
.andThen {
|
||||
@ -615,7 +642,7 @@ private class JdbcLedgerDao(
|
||||
}
|
||||
|
||||
override def reset()(implicit loggingContext: LoggingContext): Future[Unit] =
|
||||
dbDispatcher.executeSql(metrics.daml.index.db.truncateAllTables)(storageBackend.reset)
|
||||
dbDispatcher.executeSql(metrics.daml.index.db.truncateAllTables)(resetStorageBackend.reset)
|
||||
|
||||
private val translation: LfValueTranslation =
|
||||
new LfValueTranslation(
|
||||
@ -625,13 +652,14 @@ private class JdbcLedgerDao(
|
||||
loadPackage = (packageId, loggingContext) => this.getLfArchive(packageId)(loggingContext),
|
||||
)
|
||||
|
||||
private val queryNonPruned = QueryNonPrunedImpl(storageBackend, errorFactories)
|
||||
private val queryNonPruned = QueryNonPrunedImpl(parameterStorageBackend, errorFactories)
|
||||
|
||||
override val transactionsReader: TransactionsReader =
|
||||
new TransactionsReader(
|
||||
dispatcher = dbDispatcher,
|
||||
queryNonPruned = queryNonPruned,
|
||||
storageBackend = storageBackend,
|
||||
eventStorageBackend = readStorageBackend.eventStorageBackend,
|
||||
contractStorageBackend = readStorageBackend.contractStorageBackend,
|
||||
pageSize = eventsPageSize,
|
||||
eventProcessingParallelism = eventsProcessingParallelism,
|
||||
metrics = metrics,
|
||||
@ -641,21 +669,25 @@ private class JdbcLedgerDao(
|
||||
)
|
||||
|
||||
override val contractsReader: ContractsReader =
|
||||
ContractsReader(dbDispatcher, metrics, storageBackend)(
|
||||
ContractsReader(dbDispatcher, metrics, readStorageBackend.contractStorageBackend)(
|
||||
servicesExecutionContext
|
||||
)
|
||||
|
||||
override val completions: CommandCompletionsReader =
|
||||
new CommandCompletionsReader(
|
||||
dbDispatcher,
|
||||
storageBackend,
|
||||
readStorageBackend.completionStorageBackend,
|
||||
queryNonPruned,
|
||||
metrics,
|
||||
)
|
||||
|
||||
private val postCommitValidation =
|
||||
if (performPostCommitValidation)
|
||||
new PostCommitValidation.BackedBy(storageBackend, validatePartyAllocation)
|
||||
new PostCommitValidation.BackedBy(
|
||||
readStorageBackend.partyStorageBackend,
|
||||
readStorageBackend.contractStorageBackend,
|
||||
validatePartyAllocation,
|
||||
)
|
||||
else
|
||||
PostCommitValidation.Skip
|
||||
|
||||
@ -740,9 +772,7 @@ private[platform] object JdbcLedgerDao {
|
||||
enricher: Option[ValueEnricher],
|
||||
participantId: Ref.ParticipantId,
|
||||
errorFactories: ErrorFactories,
|
||||
)(implicit loggingContext: LoggingContext): ResourceOwner[LedgerReadDao] = {
|
||||
val dbType = DbType.jdbcType(jdbcUrl)
|
||||
val storageBackend = StorageBackend.of(dbType)
|
||||
)(implicit loggingContext: LoggingContext): ResourceOwner[LedgerReadDao] =
|
||||
owner(
|
||||
serverRole,
|
||||
jdbcUrl,
|
||||
@ -757,10 +787,8 @@ private[platform] object JdbcLedgerDao {
|
||||
enricher = enricher,
|
||||
participantId = participantId,
|
||||
sequentialWriteDao = NoopSequentialWriteDao,
|
||||
storageBackend = storageBackend,
|
||||
errorFactories = errorFactories,
|
||||
).map(new MeteredLedgerReadDao(_, metrics))
|
||||
}
|
||||
|
||||
def writeOwner(
|
||||
serverRole: ServerRole,
|
||||
@ -778,7 +806,6 @@ private[platform] object JdbcLedgerDao {
|
||||
errorFactories: ErrorFactories,
|
||||
)(implicit loggingContext: LoggingContext): ResourceOwner[LedgerDao] = {
|
||||
val dbType = DbType.jdbcType(jdbcUrl)
|
||||
val storageBackend = StorageBackend.of(dbType)
|
||||
owner(
|
||||
serverRole,
|
||||
jdbcUrl,
|
||||
@ -797,10 +824,9 @@ private[platform] object JdbcLedgerDao {
|
||||
lfValueTranslationCache,
|
||||
metrics,
|
||||
CompressionStrategy.none(metrics),
|
||||
storageBackend,
|
||||
DbType.jdbcType(jdbcUrl),
|
||||
ledgerEndCache,
|
||||
),
|
||||
storageBackend = storageBackend,
|
||||
errorFactories = errorFactories,
|
||||
).map(new MeteredLedgerDao(_, metrics))
|
||||
}
|
||||
@ -823,7 +849,6 @@ private[platform] object JdbcLedgerDao {
|
||||
errorFactories: ErrorFactories,
|
||||
)(implicit loggingContext: LoggingContext): ResourceOwner[LedgerDao] = {
|
||||
val dbType = DbType.jdbcType(jdbcUrl)
|
||||
val storageBackend = StorageBackend.of(dbType)
|
||||
owner(
|
||||
serverRole,
|
||||
jdbcUrl,
|
||||
@ -843,10 +868,9 @@ private[platform] object JdbcLedgerDao {
|
||||
lfValueTranslationCache,
|
||||
metrics,
|
||||
compressionStrategy,
|
||||
storageBackend,
|
||||
dbType,
|
||||
ledgerEndCache,
|
||||
),
|
||||
storageBackend = storageBackend,
|
||||
errorFactories = errorFactories,
|
||||
).map(new MeteredLedgerDao(_, metrics))
|
||||
}
|
||||
@ -856,11 +880,13 @@ private[platform] object JdbcLedgerDao {
|
||||
lfValueTranslationCache: LfValueTranslationCache.Cache,
|
||||
metrics: Metrics,
|
||||
compressionStrategy: CompressionStrategy,
|
||||
storageBackend: StorageBackend[_],
|
||||
dbType: DbType,
|
||||
ledgerEndCache: MutableLedgerEndCache,
|
||||
): SequentialWriteDao =
|
||||
): SequentialWriteDao = {
|
||||
val factory = StorageBackendFactory.of(dbType)
|
||||
SequentialWriteDaoImpl(
|
||||
storageBackend = storageBackend,
|
||||
ingestionStorageBackend = factory.createIngestionStorageBackend,
|
||||
parameterStorageBackend = factory.createParameterStorageBackend,
|
||||
updateToDbDtos = UpdateToDbDto(
|
||||
participantId = participantId,
|
||||
translation = new LfValueTranslation(
|
||||
@ -873,6 +899,7 @@ private[platform] object JdbcLedgerDao {
|
||||
),
|
||||
ledgerEndCache = ledgerEndCache,
|
||||
)
|
||||
}
|
||||
|
||||
private def owner(
|
||||
serverRole: ServerRole,
|
||||
@ -889,12 +916,13 @@ private[platform] object JdbcLedgerDao {
|
||||
enricher: Option[ValueEnricher],
|
||||
participantId: Ref.ParticipantId,
|
||||
sequentialWriteDao: SequentialWriteDao,
|
||||
storageBackend: StorageBackend[_],
|
||||
errorFactories: ErrorFactories,
|
||||
)(implicit loggingContext: LoggingContext): ResourceOwner[LedgerDao] = {
|
||||
val dbType = DbType.jdbcType(jdbcUrl)
|
||||
val factory = StorageBackendFactory.of(dbType)
|
||||
for {
|
||||
dbDispatcher <- DbDispatcher.owner(
|
||||
storageBackend.createDataSource(jdbcUrl),
|
||||
factory.createDataSourceStorageBackend.createDataSource(jdbcUrl),
|
||||
serverRole,
|
||||
connectionPoolSize,
|
||||
connectionTimeout,
|
||||
@ -912,7 +940,10 @@ private[platform] object JdbcLedgerDao {
|
||||
enricher,
|
||||
sequentialWriteDao,
|
||||
participantId,
|
||||
storageBackend,
|
||||
StorageBackendFactory.readStorageBackendFor(dbType),
|
||||
factory.createParameterStorageBackend,
|
||||
factory.createDeduplicationStorageBackend,
|
||||
factory.createResetStorageBackend,
|
||||
errorFactories,
|
||||
)
|
||||
}
|
||||
|
@ -23,7 +23,8 @@ object NoopSequentialWriteDao extends SequentialWriteDao {
|
||||
}
|
||||
|
||||
case class SequentialWriteDaoImpl[DB_BATCH](
|
||||
storageBackend: IngestionStorageBackend[DB_BATCH] with ParameterStorageBackend,
|
||||
ingestionStorageBackend: IngestionStorageBackend[DB_BATCH],
|
||||
parameterStorageBackend: ParameterStorageBackend,
|
||||
updateToDbDtos: Offset => state.Update => Iterator[DbDto],
|
||||
ledgerEndCache: MutableLedgerEndCache,
|
||||
) extends SequentialWriteDao {
|
||||
@ -33,7 +34,7 @@ case class SequentialWriteDaoImpl[DB_BATCH](
|
||||
|
||||
private def lazyInit(connection: Connection): Unit =
|
||||
if (!lastEventSeqIdInitialized) {
|
||||
lastEventSeqId = storageBackend.ledgerEndOrBeforeBegin(connection).lastEventSeqId
|
||||
lastEventSeqId = parameterStorageBackend.ledgerEndOrBeforeBegin(connection).lastEventSeqId
|
||||
lastEventSeqIdInitialized = true
|
||||
}
|
||||
|
||||
@ -60,10 +61,10 @@ case class SequentialWriteDaoImpl[DB_BATCH](
|
||||
.getOrElse(Vector.empty)
|
||||
|
||||
dbDtos
|
||||
.pipe(storageBackend.batch)
|
||||
.pipe(storageBackend.insertBatch(connection, _))
|
||||
.pipe(ingestionStorageBackend.batch)
|
||||
.pipe(ingestionStorageBackend.insertBatch(connection, _))
|
||||
|
||||
storageBackend.updateLedgerEnd(
|
||||
parameterStorageBackend.updateLedgerEnd(
|
||||
ParameterStorageBackend.LedgerEnd(
|
||||
lastOffset = offset,
|
||||
lastEventSeqId = lastEventSeqId,
|
||||
|
@ -53,7 +53,8 @@ private[appendonlydao] object PostCommitValidation {
|
||||
}
|
||||
|
||||
final class BackedBy(
|
||||
dao: PartyStorageBackend with ContractStorageBackend,
|
||||
partyStorageBackend: PartyStorageBackend,
|
||||
contractStorageBackend: ContractStorageBackend,
|
||||
validatePartyAllocation: Boolean,
|
||||
) extends PostCommitValidation {
|
||||
|
||||
@ -90,7 +91,7 @@ private[appendonlydao] object PostCommitValidation {
|
||||
if (referredContracts.isEmpty) {
|
||||
None
|
||||
} else {
|
||||
dao
|
||||
contractStorageBackend
|
||||
.maximumLedgerTime(referredContracts)(connection)
|
||||
.map(validateCausalMonotonicity(_, transactionLedgerEffectiveTime))
|
||||
.getOrElse(Some(Rejection.UnknownContract))
|
||||
@ -116,7 +117,7 @@ private[appendonlydao] object PostCommitValidation {
|
||||
transaction: CommittedTransaction
|
||||
)(implicit connection: Connection): Option[Rejection] = {
|
||||
val informees = transaction.informees
|
||||
val allocatedInformees = dao.parties(informees.toSeq)(connection).map(_.party)
|
||||
val allocatedInformees = partyStorageBackend.parties(informees.toSeq)(connection).map(_.party)
|
||||
if (allocatedInformees.toSet == informees)
|
||||
None
|
||||
else
|
||||
@ -134,7 +135,7 @@ private[appendonlydao] object PostCommitValidation {
|
||||
transaction: CommittedTransaction
|
||||
)(implicit connection: Connection): Option[Rejection] =
|
||||
transaction
|
||||
.foldInExecutionOrder[Result](Right(State.empty(dao)))(
|
||||
.foldInExecutionOrder[Result](Right(State.empty(contractStorageBackend)))(
|
||||
exerciseBegin = (acc, _, exe) => {
|
||||
val newAcc = acc.flatMap(validateKeyUsages(exe, _))
|
||||
(newAcc, true)
|
||||
@ -206,13 +207,13 @@ private[appendonlydao] object PostCommitValidation {
|
||||
* @param rollbackStack Stack of states at the beginning of rollback nodes so we can
|
||||
* restore the state at the end of the rollback. The most recent rollback
|
||||
* comes first.
|
||||
* @param dao Dao about committed contracts for post-commit validation purposes.
|
||||
* @param contractStorageBackend For getting committed contracts for post-commit validation purposes.
|
||||
* This is never changed during the traversal of the transaction.
|
||||
*/
|
||||
private final case class State(
|
||||
private val currentState: ActiveState,
|
||||
private val rollbackStack: List[ActiveState],
|
||||
private val dao: PartyStorageBackend with ContractStorageBackend,
|
||||
private val contractStorageBackend: ContractStorageBackend,
|
||||
) {
|
||||
|
||||
def validateCreate(
|
||||
@ -264,14 +265,16 @@ private[appendonlydao] object PostCommitValidation {
|
||||
private def lookup(key: Key)(implicit connection: Connection): Option[ContractId] =
|
||||
currentState.contracts.get(key.hash).orElse {
|
||||
if (currentState.removed(key.hash)) None
|
||||
else dao.contractKeyGlobally(key)(connection)
|
||||
else contractStorageBackend.contractKeyGlobally(key)(connection)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private object State {
|
||||
def empty(dao: PartyStorageBackend with ContractStorageBackend): State =
|
||||
State(ActiveState(Map.empty, Set.empty), Nil, dao)
|
||||
def empty(
|
||||
contractStorageBackend: ContractStorageBackend
|
||||
): State =
|
||||
State(ActiveState(Map.empty, Set.empty), Nil, contractStorageBackend)
|
||||
}
|
||||
|
||||
sealed trait Rejection {
|
||||
|
@ -4,6 +4,7 @@
|
||||
package com.daml.platform.store.appendonlydao.events
|
||||
|
||||
import java.sql.Connection
|
||||
|
||||
import akka.stream.OverflowStrategy
|
||||
import akka.stream.scaladsl.Source
|
||||
import akka.{Done, NotUsed}
|
||||
@ -29,7 +30,7 @@ import com.daml.platform.store.appendonlydao.{
|
||||
PaginatingAsyncStream,
|
||||
}
|
||||
import com.daml.platform.store.backend.EventStorageBackend.{FilterParams, RangeParams}
|
||||
import com.daml.platform.store.backend.StorageBackend
|
||||
import com.daml.platform.store.backend.{ContractStorageBackend, EventStorageBackend}
|
||||
import com.daml.platform.store.interfaces.TransactionLogUpdate
|
||||
import com.daml.platform.store.utils.Telemetry
|
||||
import com.daml.telemetry
|
||||
@ -49,7 +50,8 @@ import scala.util.{Failure, Success}
|
||||
private[appendonlydao] final class TransactionsReader(
|
||||
dispatcher: DbDispatcher,
|
||||
queryNonPruned: QueryNonPruned,
|
||||
storageBackend: StorageBackend[_],
|
||||
eventStorageBackend: EventStorageBackend,
|
||||
contractStorageBackend: ContractStorageBackend,
|
||||
pageSize: Int,
|
||||
eventProcessingParallelism: Int,
|
||||
metrics: Metrics,
|
||||
@ -59,11 +61,11 @@ private[appendonlydao] final class TransactionsReader(
|
||||
|
||||
private val dbMetrics = metrics.daml.index.db
|
||||
private val eventSeqIdReader =
|
||||
new EventsRange.EventSeqIdReader(storageBackend.maxEventSequentialIdOfAnObservableEvent)
|
||||
new EventsRange.EventSeqIdReader(eventStorageBackend.maxEventSequentialIdOfAnObservableEvent)
|
||||
private val getTransactions =
|
||||
new EventsTableFlatEventsRangeQueries.GetTransactions(storageBackend)
|
||||
new EventsTableFlatEventsRangeQueries.GetTransactions(eventStorageBackend)
|
||||
private val getActiveContracts =
|
||||
new EventsTableFlatEventsRangeQueries.GetActiveContracts(storageBackend)
|
||||
new EventsTableFlatEventsRangeQueries.GetActiveContracts(eventStorageBackend)
|
||||
|
||||
private val logger = ContextualizedLogger.get(this.getClass)
|
||||
|
||||
@ -156,7 +158,7 @@ private[appendonlydao] final class TransactionsReader(
|
||||
)(implicit loggingContext: LoggingContext): Future[Option[GetFlatTransactionResponse]] =
|
||||
dispatcher
|
||||
.executeSql(dbMetrics.lookupFlatTransactionById)(
|
||||
storageBackend.flatTransaction(
|
||||
eventStorageBackend.flatTransaction(
|
||||
transactionId,
|
||||
FilterParams(
|
||||
wildCardParties = requestingParties,
|
||||
@ -193,7 +195,7 @@ private[appendonlydao] final class TransactionsReader(
|
||||
queryNonPruned.executeSql(
|
||||
EventsRange.readPage(
|
||||
read = (range, limit, fetchSizeHint) =>
|
||||
storageBackend.transactionTreeEvents(
|
||||
eventStorageBackend.transactionTreeEvents(
|
||||
rangeParams = RangeParams(
|
||||
startExclusive = range.startExclusive,
|
||||
endInclusive = range.endInclusive,
|
||||
@ -256,7 +258,7 @@ private[appendonlydao] final class TransactionsReader(
|
||||
)(implicit loggingContext: LoggingContext): Future[Option[GetTransactionResponse]] =
|
||||
dispatcher
|
||||
.executeSql(dbMetrics.lookupTransactionTreeById)(
|
||||
storageBackend.transactionTree(
|
||||
eventStorageBackend.transactionTree(
|
||||
transactionId,
|
||||
FilterParams(
|
||||
wildCardParties = requestingParties,
|
||||
@ -306,7 +308,7 @@ private[appendonlydao] final class TransactionsReader(
|
||||
.mapAsync(eventProcessingParallelism) { range =>
|
||||
dispatcher.executeSql(dbMetrics.getTransactionLogUpdates) { implicit conn =>
|
||||
queryNonPruned.executeSql(
|
||||
query = storageBackend.rawEvents(
|
||||
query = eventStorageBackend.rawEvents(
|
||||
startExclusive = range.startExclusive,
|
||||
endInclusive = range.endInclusive,
|
||||
)(conn),
|
||||
@ -436,7 +438,7 @@ private[appendonlydao] final class TransactionsReader(
|
||||
.mapAsync(eventProcessingParallelism) { range =>
|
||||
dispatcher.executeSql(dbMetrics.getContractStateEvents) { implicit conn =>
|
||||
queryNonPruned.executeSql(
|
||||
storageBackend
|
||||
contractStorageBackend
|
||||
.contractStateEvents(range.startExclusive, range.endInclusive)(conn),
|
||||
startExclusive._1,
|
||||
pruned =>
|
||||
|
@ -14,13 +14,11 @@ import com.daml.lf.data.Time.Timestamp
|
||||
import com.daml.lf.ledger.EventId
|
||||
import com.daml.logging.LoggingContext
|
||||
import com.daml.platform
|
||||
import com.daml.platform.store.{DbType, EventSequentialId}
|
||||
import com.daml.platform.store.EventSequentialId
|
||||
import com.daml.platform.store.appendonlydao.events.{ContractId, EventsTable, Key, Raw}
|
||||
import com.daml.platform.store.backend.EventStorageBackend.{FilterParams, RangeParams}
|
||||
import com.daml.platform.store.backend.StorageBackend.RawTransactionEvent
|
||||
import com.daml.platform.store.backend.h2.H2StorageBackend
|
||||
import com.daml.platform.store.backend.oracle.OracleStorageBackend
|
||||
import com.daml.platform.store.backend.postgresql.{PostgresDataSourceConfig, PostgresStorageBackend}
|
||||
import com.daml.platform.store.backend.postgresql.PostgresDataSourceConfig
|
||||
import com.daml.platform.store.entries.{ConfigurationEntry, PackageLedgerEntry, PartyLedgerEntry}
|
||||
import com.daml.platform.store.interfaces.LedgerDaoContractsReader.KeyState
|
||||
import com.daml.scalautil.NeverEqualsOverride
|
||||
@ -48,7 +46,10 @@ trait StorageBackend[DB_BATCH]
|
||||
with EventStorageBackend
|
||||
with DataSourceStorageBackend
|
||||
with DBLockStorageBackend
|
||||
with IntegrityStorageBackend {
|
||||
with IntegrityStorageBackend
|
||||
with ResetStorageBackend
|
||||
|
||||
trait ResetStorageBackend {
|
||||
|
||||
/** Truncates all storage backend tables, EXCEPT the packages table.
|
||||
* Does not touch other tables, like the Flyway history table.
|
||||
@ -411,11 +412,4 @@ object StorageBackend {
|
||||
eventSequentialId: Long,
|
||||
offset: Offset,
|
||||
) extends NeverEqualsOverride
|
||||
|
||||
def of(dbType: DbType): StorageBackend[_] =
|
||||
dbType match {
|
||||
case DbType.H2Database => H2StorageBackend
|
||||
case DbType.Postgres => PostgresStorageBackend
|
||||
case DbType.Oracle => OracleStorageBackend
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,55 @@
|
||||
// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.daml.platform.store.backend
|
||||
|
||||
import com.daml.platform.store.DbType
|
||||
import com.daml.platform.store.backend.h2.H2StorageBackendFactory
|
||||
import com.daml.platform.store.backend.oracle.OracleStorageBackendFactory
|
||||
import com.daml.platform.store.backend.postgresql.PostgresStorageBackendFactory
|
||||
|
||||
trait StorageBackendFactory {
|
||||
def createIngestionStorageBackend: IngestionStorageBackend[_]
|
||||
def createParameterStorageBackend: ParameterStorageBackend
|
||||
def createConfigurationStorageBackend: ConfigurationStorageBackend
|
||||
def createPartyStorageBackend: PartyStorageBackend
|
||||
def createPackageStorageBackend: PackageStorageBackend
|
||||
def createDeduplicationStorageBackend: DeduplicationStorageBackend
|
||||
def createCompletionStorageBackend: CompletionStorageBackend
|
||||
def createContractStorageBackend: ContractStorageBackend
|
||||
def createEventStorageBackend: EventStorageBackend
|
||||
def createDataSourceStorageBackend: DataSourceStorageBackend
|
||||
def createDBLockStorageBackend: DBLockStorageBackend
|
||||
def createIntegrityStorageBackend: IntegrityStorageBackend
|
||||
def createResetStorageBackend: ResetStorageBackend
|
||||
}
|
||||
|
||||
object StorageBackendFactory {
|
||||
def of(dbType: DbType): StorageBackendFactory =
|
||||
dbType match {
|
||||
case DbType.H2Database => H2StorageBackendFactory
|
||||
case DbType.Postgres => PostgresStorageBackendFactory
|
||||
case DbType.Oracle => OracleStorageBackendFactory
|
||||
}
|
||||
|
||||
def readStorageBackendFor(dbType: DbType): ReadStorageBackend = {
|
||||
val factory = of(dbType)
|
||||
ReadStorageBackend(
|
||||
configurationStorageBackend = factory.createConfigurationStorageBackend,
|
||||
partyStorageBackend = factory.createPartyStorageBackend,
|
||||
packageStorageBackend = factory.createPackageStorageBackend,
|
||||
completionStorageBackend = factory.createCompletionStorageBackend,
|
||||
contractStorageBackend = factory.createContractStorageBackend,
|
||||
eventStorageBackend = factory.createEventStorageBackend,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
case class ReadStorageBackend(
|
||||
configurationStorageBackend: ConfigurationStorageBackend,
|
||||
partyStorageBackend: PartyStorageBackend,
|
||||
packageStorageBackend: PackageStorageBackend,
|
||||
completionStorageBackend: CompletionStorageBackend,
|
||||
contractStorageBackend: ContractStorageBackend,
|
||||
eventStorageBackend: EventStorageBackend,
|
||||
)
|
@ -23,19 +23,20 @@ object VerifiedDataSource {
|
||||
executionContext: ExecutionContext,
|
||||
loggingContext: LoggingContext,
|
||||
): Future[DataSource] = {
|
||||
val storageBackend = StorageBackend.of(DbType.jdbcType(jdbcUrl))
|
||||
val dataSourceStorageBackend =
|
||||
StorageBackendFactory.of(DbType.jdbcType(jdbcUrl)).createDataSourceStorageBackend
|
||||
for {
|
||||
dataSource <- RetryStrategy.constant(
|
||||
attempts = MaxInitialConnectRetryAttempts,
|
||||
waitTime = 1.second,
|
||||
) { (i, _) =>
|
||||
Future {
|
||||
val createdDatasource = storageBackend.createDataSource(jdbcUrl)
|
||||
val createdDatasource = dataSourceStorageBackend.createDataSource(jdbcUrl)
|
||||
logger.info(
|
||||
s"Attempting to connect to the database (attempt $i/$MaxInitialConnectRetryAttempts)"
|
||||
)
|
||||
Using.resource(createdDatasource.getConnection)(
|
||||
storageBackend.checkDatabaseAvailable
|
||||
dataSourceStorageBackend.checkDatabaseAvailable
|
||||
)
|
||||
createdDatasource
|
||||
}.andThen { case Failure(exception) =>
|
||||
@ -44,7 +45,7 @@ object VerifiedDataSource {
|
||||
}
|
||||
_ <- Future {
|
||||
Using.resource(dataSource.getConnection)(
|
||||
storageBackend.checkCompatibility
|
||||
dataSourceStorageBackend.checkCompatibility
|
||||
)
|
||||
}
|
||||
} yield dataSource
|
||||
|
@ -20,12 +20,11 @@ import com.daml.platform.store.backend.common.ComposableQuery.SqlStringInterpola
|
||||
import com.google.protobuf.any
|
||||
import com.google.rpc.status.{Status => StatusProto}
|
||||
|
||||
trait CompletionStorageBackendTemplate extends CompletionStorageBackend {
|
||||
class CompletionStorageBackendTemplate(queryStrategy: QueryStrategy)
|
||||
extends CompletionStorageBackend {
|
||||
|
||||
private val logger: ContextualizedLogger = ContextualizedLogger.get(this.getClass)
|
||||
|
||||
def queryStrategy: QueryStrategy
|
||||
|
||||
override def commandCompletions(
|
||||
startExclusive: Offset,
|
||||
endInclusive: Offset,
|
||||
@ -154,7 +153,7 @@ trait CompletionStorageBackendTemplate extends CompletionStorageBackend {
|
||||
.map(_.details)
|
||||
.getOrElse(Seq.empty)
|
||||
|
||||
def pruneCompletions(
|
||||
override def pruneCompletions(
|
||||
pruneUpToInclusive: Offset
|
||||
)(connection: Connection, loggingContext: LoggingContext): Unit = {
|
||||
pruneWithLogging(queryDescription = "Command completions pruning") {
|
||||
|
@ -15,7 +15,7 @@ import com.daml.platform.store.appendonlydao.JdbcLedgerDao.{acceptType, rejectTy
|
||||
import com.daml.platform.store.backend.ConfigurationStorageBackend
|
||||
import com.daml.platform.store.entries.ConfigurationEntry
|
||||
|
||||
private[backend] trait ConfigurationStorageBackendTemplate extends ConfigurationStorageBackend {
|
||||
private[backend] object ConfigurationStorageBackendTemplate extends ConfigurationStorageBackend {
|
||||
|
||||
private val SQL_GET_CONFIGURATION_ENTRIES = SQL(
|
||||
"""select
|
||||
|
@ -27,9 +27,7 @@ import com.daml.platform.store.interfaces.LedgerDaoContractsReader.{
|
||||
|
||||
import scala.util.{Failure, Success, Try}
|
||||
|
||||
trait ContractStorageBackendTemplate extends ContractStorageBackend {
|
||||
|
||||
def queryStrategy: QueryStrategy
|
||||
class ContractStorageBackendTemplate(queryStrategy: QueryStrategy) extends ContractStorageBackend {
|
||||
|
||||
override def contractKeyGlobally(key: Key)(connection: Connection): Option[ContractId] =
|
||||
contractKey(
|
||||
|
@ -6,21 +6,15 @@ package com.daml.platform.store.backend.common
|
||||
import java.sql.Connection
|
||||
|
||||
import anorm.SqlParser.get
|
||||
import com.daml.platform.store.backend.DataSourceStorageBackend
|
||||
import com.daml.platform.store.backend.common.ComposableQuery.SqlStringInterpolation
|
||||
|
||||
private[backend] trait DataSourceStorageBackendTemplate extends DataSourceStorageBackend {
|
||||
private[backend] object DataSourceStorageBackendTemplate {
|
||||
|
||||
protected def exe(statement: String): Connection => Unit = { connection =>
|
||||
val stmnt = connection.createStatement()
|
||||
try {
|
||||
stmnt.execute(statement)
|
||||
def exe(statement: String): Connection => Unit = { implicit connection =>
|
||||
SQL"#$statement".execute()
|
||||
()
|
||||
} finally {
|
||||
stmnt.close()
|
||||
}
|
||||
}
|
||||
|
||||
override def checkDatabaseAvailable(connection: Connection): Unit =
|
||||
def checkDatabaseAvailable(connection: Connection): Unit =
|
||||
assert(SQL"SELECT 1".as(get[Int](1).single)(connection) == 1)
|
||||
}
|
||||
|
@ -26,19 +26,17 @@ import com.daml.platform.store.backend.common.ComposableQuery.{CompositeSql, Sql
|
||||
|
||||
import scala.collection.compat.immutable.ArraySeq
|
||||
|
||||
trait EventStorageBackendTemplate extends EventStorageBackend {
|
||||
abstract class EventStorageBackendTemplate(
|
||||
eventStrategy: EventStrategy,
|
||||
queryStrategy: QueryStrategy,
|
||||
// TODO Refactoring: This method is needed in pruneEvents, but belongs to [[ParameterStorageBackend]].
|
||||
// Remove with the break-out of pruneEvents.
|
||||
participantAllDivulgedContractsPrunedUpToInclusive: Connection => Option[Offset],
|
||||
) extends EventStorageBackend {
|
||||
import com.daml.platform.store.Conversions.ArrayColumnToStringArray.arrayColumnToStringArray
|
||||
|
||||
private val logger: ContextualizedLogger = ContextualizedLogger.get(this.getClass)
|
||||
|
||||
def eventStrategy: EventStrategy
|
||||
def queryStrategy: QueryStrategy
|
||||
// TODO Refactoring: This method is needed in pruneEvents, but belongs to [[ParameterStorageBackend]].
|
||||
// Remove with the break-out of pruneEvents.
|
||||
def participantAllDivulgedContractsPrunedUpToInclusive(
|
||||
connection: Connection
|
||||
): Option[Offset]
|
||||
|
||||
private val selectColumnsForFlatTransactions =
|
||||
Seq(
|
||||
"event_offset",
|
||||
|
@ -6,10 +6,10 @@ package com.daml.platform.store.backend.common
|
||||
import java.sql.Connection
|
||||
|
||||
import anorm.{SQL, SqlQuery}
|
||||
import com.daml.platform.store.backend.{IngestionStorageBackend, ParameterStorageBackend}
|
||||
import com.daml.platform.store.backend.{DbDto, IngestionStorageBackend, ParameterStorageBackend}
|
||||
|
||||
private[backend] trait IngestionStorageBackendTemplate[DB_BATCH]
|
||||
extends IngestionStorageBackend[DB_BATCH] {
|
||||
private[backend] class IngestionStorageBackendTemplate(schema: Schema[DbDto])
|
||||
extends IngestionStorageBackend[AppendOnlySchema.Batch] {
|
||||
|
||||
private val SQL_DELETE_OVERSPILL_ENTRIES: List[SqlQuery] =
|
||||
List(
|
||||
@ -39,4 +39,13 @@ private[backend] trait IngestionStorageBackendTemplate[DB_BATCH]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
override def insertBatch(
|
||||
connection: Connection,
|
||||
dbBatch: AppendOnlySchema.Batch,
|
||||
): Unit =
|
||||
schema.executeUpdate(dbBatch, connection)
|
||||
|
||||
override def batch(dbDtos: Vector[DbDto]): AppendOnlySchema.Batch =
|
||||
schema.prepareData(dbDtos)
|
||||
}
|
||||
|
@ -10,7 +10,7 @@ import anorm.SqlParser.{long, str}
|
||||
import anorm.~
|
||||
import com.daml.platform.store.backend.IntegrityStorageBackend
|
||||
|
||||
private[backend] trait IntegrityStorageBackendTemplate extends IntegrityStorageBackend {
|
||||
private[backend] object IntegrityStorageBackendTemplate extends IntegrityStorageBackend {
|
||||
|
||||
private val allSequentialIds: String =
|
||||
s"""
|
||||
|
@ -17,7 +17,7 @@ import com.daml.platform.store.appendonlydao.JdbcLedgerDao.{acceptType, rejectTy
|
||||
import com.daml.platform.store.backend.PackageStorageBackend
|
||||
import com.daml.platform.store.entries.PackageLedgerEntry
|
||||
|
||||
private[backend] trait PackageStorageBackendTemplate extends PackageStorageBackend {
|
||||
private[backend] object PackageStorageBackendTemplate extends PackageStorageBackend {
|
||||
|
||||
private val SQL_SELECT_PACKAGES =
|
||||
SQL(
|
||||
|
@ -18,7 +18,7 @@ import com.daml.platform.store.backend.common.ComposableQuery.SqlStringInterpola
|
||||
import com.daml.scalautil.Statement.discard
|
||||
import scalaz.syntax.tag._
|
||||
|
||||
private[backend] trait ParameterStorageBackendTemplate extends ParameterStorageBackend {
|
||||
private[backend] object ParameterStorageBackendTemplate extends ParameterStorageBackend {
|
||||
|
||||
private val logger: ContextualizedLogger = ContextualizedLogger.get(this.getClass)
|
||||
|
||||
|
@ -17,9 +17,7 @@ import com.daml.platform.store.backend.PartyStorageBackend
|
||||
import com.daml.platform.store.backend.common.ComposableQuery.SqlStringInterpolation
|
||||
import com.daml.platform.store.entries.PartyLedgerEntry
|
||||
|
||||
trait PartyStorageBackendTemplate extends PartyStorageBackend {
|
||||
|
||||
def queryStrategy: QueryStrategy
|
||||
class PartyStorageBackendTemplate(queryStrategy: QueryStrategy) extends PartyStorageBackend {
|
||||
|
||||
private val SQL_GET_PARTY_ENTRIES = SQL(
|
||||
"""select * from party_entries
|
||||
|
@ -0,0 +1,110 @@
|
||||
// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.daml.platform.store.backend.h2
|
||||
|
||||
import anorm.{Row, SimpleSql}
|
||||
import com.daml.platform.store.appendonlydao.events.ContractId
|
||||
import com.daml.platform.store.backend.common.ComposableQuery.{CompositeSql, SqlStringInterpolation}
|
||||
import com.daml.platform.store.backend.common.ContractStorageBackendTemplate
|
||||
|
||||
object H2ContractStorageBackend extends ContractStorageBackendTemplate(H2QueryStrategy) {
|
||||
override def maximumLedgerTimeSqlLiteral(id: ContractId): SimpleSql[Row] = {
|
||||
import com.daml.platform.store.Conversions.ContractIdToStatement
|
||||
SQL"""
|
||||
WITH archival_event AS (
|
||||
SELECT 1
|
||||
FROM participant_events_consuming_exercise, parameters
|
||||
WHERE contract_id = $id
|
||||
AND event_sequential_id <= parameters.ledger_end_sequential_id
|
||||
FETCH NEXT 1 ROW ONLY
|
||||
),
|
||||
create_event AS (
|
||||
SELECT ledger_effective_time
|
||||
FROM participant_events_create, parameters
|
||||
WHERE contract_id = $id
|
||||
AND event_sequential_id <= parameters.ledger_end_sequential_id
|
||||
FETCH NEXT 1 ROW ONLY -- limit here to guide planner wrt expected number of results
|
||||
),
|
||||
divulged_contract AS (
|
||||
SELECT NULL::BIGINT
|
||||
FROM participant_events_divulgence, parameters
|
||||
WHERE contract_id = $id
|
||||
AND event_sequential_id <= parameters.ledger_end_sequential_id
|
||||
ORDER BY event_sequential_id
|
||||
-- prudent engineering: make results more stable by preferring earlier divulgence events
|
||||
-- Results might still change due to pruning.
|
||||
FETCH NEXT 1 ROW ONLY
|
||||
),
|
||||
create_and_divulged_contracts AS (
|
||||
(SELECT * FROM create_event) -- prefer create over divulgence events
|
||||
UNION ALL
|
||||
(SELECT * FROM divulged_contract)
|
||||
)
|
||||
SELECT ledger_effective_time
|
||||
FROM create_and_divulged_contracts
|
||||
WHERE NOT EXISTS (SELECT 1 FROM archival_event)
|
||||
FETCH NEXT 1 ROW ONLY"""
|
||||
}
|
||||
|
||||
override def activeContractSqlLiteral(
|
||||
contractId: ContractId,
|
||||
treeEventWitnessesClause: CompositeSql,
|
||||
resultColumns: List[String],
|
||||
coalescedColumns: String,
|
||||
): SimpleSql[Row] = {
|
||||
import com.daml.platform.store.Conversions.ContractIdToStatement
|
||||
SQL""" WITH archival_event AS (
|
||||
SELECT 1
|
||||
FROM participant_events_consuming_exercise, parameters
|
||||
WHERE contract_id = $contractId
|
||||
AND event_sequential_id <= parameters.ledger_end_sequential_id
|
||||
AND $treeEventWitnessesClause -- only use visible archivals
|
||||
FETCH NEXT 1 ROW ONLY
|
||||
),
|
||||
create_event AS (
|
||||
SELECT contract_id, #${resultColumns.mkString(", ")}
|
||||
FROM participant_events_create, parameters
|
||||
WHERE contract_id = $contractId
|
||||
AND event_sequential_id <= parameters.ledger_end_sequential_id
|
||||
AND $treeEventWitnessesClause
|
||||
FETCH NEXT 1 ROW ONLY -- limit here to guide planner wrt expected number of results
|
||||
),
|
||||
-- no visibility check, as it is used to backfill missing template_id and create_arguments for divulged contracts
|
||||
create_event_unrestricted AS (
|
||||
SELECT contract_id, #${resultColumns.mkString(", ")}
|
||||
FROM participant_events_create, parameters
|
||||
WHERE contract_id = $contractId
|
||||
AND event_sequential_id <= parameters.ledger_end_sequential_id
|
||||
FETCH NEXT 1 ROW ONLY -- limit here to guide planner wrt expected number of results
|
||||
),
|
||||
divulged_contract AS (
|
||||
SELECT divulgence_events.contract_id,
|
||||
-- Note: the divulgence_event.template_id can be NULL
|
||||
-- for certain integrations. For example, the KV integration exploits that
|
||||
-- every participant node knows about all create events. The integration
|
||||
-- therefore only communicates the change in visibility to the IndexDB, but
|
||||
-- does not include a full divulgence event.
|
||||
#$coalescedColumns
|
||||
FROM participant_events_divulgence divulgence_events LEFT OUTER JOIN create_event_unrestricted ON (divulgence_events.contract_id = create_event_unrestricted.contract_id),
|
||||
parameters
|
||||
WHERE divulgence_events.contract_id = $contractId -- restrict to aid query planner
|
||||
AND divulgence_events.event_sequential_id <= parameters.ledger_end_sequential_id
|
||||
AND $treeEventWitnessesClause
|
||||
ORDER BY divulgence_events.event_sequential_id
|
||||
-- prudent engineering: make results more stable by preferring earlier divulgence events
|
||||
-- Results might still change due to pruning.
|
||||
FETCH NEXT 1 ROW ONLY
|
||||
),
|
||||
create_and_divulged_contracts AS (
|
||||
(SELECT * FROM create_event) -- prefer create over divulgence events
|
||||
UNION ALL
|
||||
(SELECT * FROM divulged_contract)
|
||||
)
|
||||
SELECT contract_id, #${resultColumns.mkString(", ")}
|
||||
FROM create_and_divulged_contracts
|
||||
WHERE NOT EXISTS (SELECT 1 FROM archival_event)
|
||||
FETCH NEXT 1 ROW ONLY"""
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,24 @@
|
||||
// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.daml.platform.store.backend.h2
|
||||
|
||||
import java.sql.Connection
|
||||
|
||||
import com.daml.platform.store.backend.DBLockStorageBackend
|
||||
|
||||
object H2DBLockStorageBackend extends DBLockStorageBackend {
|
||||
override def tryAcquire(
|
||||
lockId: DBLockStorageBackend.LockId,
|
||||
lockMode: DBLockStorageBackend.LockMode,
|
||||
)(connection: Connection): Option[DBLockStorageBackend.Lock] =
|
||||
throw new UnsupportedOperationException("db level locks are not supported for H2")
|
||||
|
||||
override def release(lock: DBLockStorageBackend.Lock)(connection: Connection): Boolean =
|
||||
throw new UnsupportedOperationException("db level locks are not supported for H2")
|
||||
|
||||
override def lock(id: Int): DBLockStorageBackend.LockId =
|
||||
throw new UnsupportedOperationException("db level locks are not supported for H2")
|
||||
|
||||
override def dbLockSupported: Boolean = false
|
||||
}
|
@ -0,0 +1,56 @@
|
||||
// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.daml.platform.store.backend.h2
|
||||
|
||||
import java.sql.Connection
|
||||
|
||||
import com.daml.logging.LoggingContext
|
||||
import com.daml.platform.store.backend.DataSourceStorageBackend
|
||||
import com.daml.platform.store.backend.common.{
|
||||
DataSourceStorageBackendTemplate,
|
||||
InitHookDataSourceProxy,
|
||||
}
|
||||
import javax.sql.DataSource
|
||||
|
||||
object H2DataSourceStorageBackend extends DataSourceStorageBackend {
|
||||
override def createDataSource(
|
||||
jdbcUrl: String,
|
||||
dataSourceConfig: DataSourceStorageBackend.DataSourceConfig,
|
||||
connectionInitHook: Option[Connection => Unit],
|
||||
)(implicit loggingContext: LoggingContext): DataSource = {
|
||||
val h2DataSource = new org.h2.jdbcx.JdbcDataSource()
|
||||
|
||||
// H2 (org.h2.jdbcx.JdbcDataSource) does not support setting the user/password within the jdbcUrl, so remove
|
||||
// those properties from the url if present and set them separately. Note that Postgres and Oracle support
|
||||
// user/password in the URLs, so we don't bother exposing user/password configs separately from the url just for h2
|
||||
// which is anyway not supported for production. (This also helps run canton h2 participants that set user and
|
||||
// password.)
|
||||
val (urlNoUserNoPassword, user, password) = extractUserPasswordAndRemoveFromUrl(jdbcUrl)
|
||||
user.foreach(h2DataSource.setUser)
|
||||
password.foreach(h2DataSource.setPassword)
|
||||
h2DataSource.setUrl(urlNoUserNoPassword)
|
||||
|
||||
InitHookDataSourceProxy(h2DataSource, connectionInitHook.toList)
|
||||
}
|
||||
|
||||
def extractUserPasswordAndRemoveFromUrl(
|
||||
jdbcUrl: String
|
||||
): (String, Option[String], Option[String]) = {
|
||||
def setKeyValueAndRemoveFromUrl(url: String, key: String): (String, Option[String]) = {
|
||||
val regex = s".*(;(?i)${key}=([^;]*)).*".r
|
||||
url match {
|
||||
case regex(keyAndValue, value) =>
|
||||
(url.replace(keyAndValue, ""), Some(value))
|
||||
case _ => (url, None)
|
||||
}
|
||||
}
|
||||
|
||||
val (urlNoUser, user) = setKeyValueAndRemoveFromUrl(jdbcUrl, "user")
|
||||
val (urlNoUserNoPassword, password) = setKeyValueAndRemoveFromUrl(urlNoUser, "password")
|
||||
(urlNoUserNoPassword, user, password)
|
||||
}
|
||||
|
||||
override def checkDatabaseAvailable(connection: Connection): Unit =
|
||||
DataSourceStorageBackendTemplate.checkDatabaseAvailable(connection)
|
||||
}
|
@ -0,0 +1,55 @@
|
||||
// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.daml.platform.store.backend.h2
|
||||
|
||||
import java.sql.Connection
|
||||
|
||||
import anorm.SQL
|
||||
import com.daml.lf.data.Time.Timestamp
|
||||
import com.daml.logging.{ContextualizedLogger, LoggingContext}
|
||||
import com.daml.platform.store.backend.common.DeduplicationStorageBackendTemplate
|
||||
|
||||
import scala.util.control.NonFatal
|
||||
|
||||
object H2DeduplicationStorageBackend extends DeduplicationStorageBackendTemplate {
|
||||
private val logger = ContextualizedLogger.get(this.getClass)
|
||||
|
||||
val SQL_INSERT_COMMAND: String =
|
||||
"""merge into participant_command_submissions pcs
|
||||
|using dual on deduplication_key = {deduplicationKey}
|
||||
|when not matched then
|
||||
| insert (deduplication_key, deduplicate_until)
|
||||
| values ({deduplicationKey}, {deduplicateUntil})
|
||||
|when matched and pcs.deduplicate_until < {submittedAt} then
|
||||
| update set deduplicate_until={deduplicateUntil}""".stripMargin
|
||||
|
||||
override def upsertDeduplicationEntry(
|
||||
key: String,
|
||||
submittedAt: Timestamp,
|
||||
deduplicateUntil: Timestamp,
|
||||
)(connection: Connection)(implicit loggingContext: LoggingContext): Int = {
|
||||
|
||||
// Under the default READ_COMMITTED isolation level used for the indexdb, when a deduplication
|
||||
// upsert is performed simultaneously from multiple threads, the query fails with
|
||||
// JdbcSQLIntegrityConstraintViolationException: Unique index or primary key violation
|
||||
// Simple retry helps
|
||||
def retry[T](op: => T): T =
|
||||
try {
|
||||
op
|
||||
} catch {
|
||||
case NonFatal(e) =>
|
||||
logger.debug(s"Caught exception while upserting a deduplication entry: $e")
|
||||
op
|
||||
}
|
||||
retry(
|
||||
SQL(SQL_INSERT_COMMAND)
|
||||
.on(
|
||||
"deduplicationKey" -> key,
|
||||
"submittedAt" -> submittedAt.micros,
|
||||
"deduplicateUntil" -> deduplicateUntil.micros,
|
||||
)
|
||||
.executeUpdate()(connection)
|
||||
)
|
||||
}
|
||||
}
|
@ -0,0 +1,47 @@
|
||||
// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.daml.platform.store.backend.h2
|
||||
|
||||
import java.sql.Connection
|
||||
|
||||
import anorm.SqlParser.get
|
||||
import anorm.SqlStringInterpolation
|
||||
import com.daml.ledger.offset.Offset
|
||||
import com.daml.platform.store.backend.common.{
|
||||
EventStorageBackendTemplate,
|
||||
ParameterStorageBackendTemplate,
|
||||
}
|
||||
|
||||
object H2EventStorageBackend
|
||||
extends EventStorageBackendTemplate(
|
||||
queryStrategy = H2QueryStrategy,
|
||||
eventStrategy = H2EventStrategy,
|
||||
participantAllDivulgedContractsPrunedUpToInclusive =
|
||||
ParameterStorageBackendTemplate.participantAllDivulgedContractsPrunedUpToInclusive,
|
||||
) {
|
||||
|
||||
override def maxEventSequentialIdOfAnObservableEvent(
|
||||
offset: Offset
|
||||
)(connection: Connection): Option[Long] = {
|
||||
import com.daml.platform.store.Conversions.OffsetToStatement
|
||||
SQL"""
|
||||
SELECT max_esi FROM (
|
||||
(SELECT max(event_sequential_id) AS max_esi FROM participant_events_consuming_exercise WHERE event_offset <= $offset GROUP BY event_offset ORDER BY event_offset DESC FETCH NEXT 1 ROW ONLY)
|
||||
UNION ALL
|
||||
(SELECT max(event_sequential_id) AS max_esi FROM participant_events_non_consuming_exercise WHERE event_offset <= $offset GROUP BY event_offset ORDER BY event_offset DESC FETCH NEXT 1 ROW ONLY)
|
||||
UNION ALL
|
||||
(SELECT max(event_sequential_id) AS max_esi FROM participant_events_create WHERE event_offset <= $offset GROUP BY event_offset ORDER BY event_offset DESC FETCH NEXT 1 ROW ONLY)
|
||||
) AS t
|
||||
ORDER BY max_esi DESC
|
||||
FETCH NEXT 1 ROW ONLY;
|
||||
""".as(get[Long](1).singleOpt)(connection)
|
||||
}
|
||||
|
||||
// Migration from mutable schema is not supported for H2
|
||||
override def isPruningOffsetValidAgainstMigration(
|
||||
pruneUpToInclusive: Offset,
|
||||
pruneAllDivulgedContracts: Boolean,
|
||||
connection: Connection,
|
||||
): Boolean = true
|
||||
}
|
@ -0,0 +1,52 @@
|
||||
// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.daml.platform.store.backend.h2
|
||||
|
||||
import com.daml.lf.data.Ref
|
||||
import com.daml.platform.store.backend.EventStorageBackend.FilterParams
|
||||
import com.daml.platform.store.backend.common.ComposableQuery.{CompositeSql, SqlStringInterpolation}
|
||||
import com.daml.platform.store.backend.common.EventStrategy
|
||||
|
||||
object H2EventStrategy extends EventStrategy {
|
||||
override def filteredEventWitnessesClause(
|
||||
witnessesColumnName: String,
|
||||
parties: Set[Ref.Party],
|
||||
): CompositeSql = {
|
||||
val partiesArray = parties.view.map(_.toString).toArray
|
||||
cSQL"array_intersection(#$witnessesColumnName, $partiesArray)"
|
||||
}
|
||||
|
||||
override def submittersArePartiesClause(
|
||||
submittersColumnName: String,
|
||||
parties: Set[Ref.Party],
|
||||
): CompositeSql =
|
||||
H2QueryStrategy.arrayIntersectionNonEmptyClause(
|
||||
columnName = submittersColumnName,
|
||||
parties = parties,
|
||||
)
|
||||
|
||||
override def witnessesWhereClause(
|
||||
witnessesColumnName: String,
|
||||
filterParams: FilterParams,
|
||||
): CompositeSql = {
|
||||
val wildCardClause = filterParams.wildCardParties match {
|
||||
case wildCardParties if wildCardParties.isEmpty =>
|
||||
Nil
|
||||
|
||||
case wildCardParties =>
|
||||
cSQL"(${H2QueryStrategy.arrayIntersectionNonEmptyClause(witnessesColumnName, wildCardParties)})" :: Nil
|
||||
}
|
||||
val partiesTemplatesClauses =
|
||||
filterParams.partiesAndTemplates.iterator.map { case (parties, templateIds) =>
|
||||
val clause =
|
||||
H2QueryStrategy.arrayIntersectionNonEmptyClause(
|
||||
witnessesColumnName,
|
||||
parties,
|
||||
)
|
||||
val templateIdsArray = templateIds.view.map(_.toString).toArray
|
||||
cSQL"( ($clause) AND (template_id = ANY($templateIdsArray)) )"
|
||||
}.toList
|
||||
(wildCardClause ::: partiesTemplatesClauses).mkComposite("(", " OR ", ")")
|
||||
}
|
||||
}
|
@ -0,0 +1,27 @@
|
||||
// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.daml.platform.store.backend.h2
|
||||
|
||||
import com.daml.lf.data.Ref
|
||||
import com.daml.platform.store.backend.common.ComposableQuery.{CompositeSql, SqlStringInterpolation}
|
||||
import com.daml.platform.store.backend.common.QueryStrategy
|
||||
|
||||
object H2QueryStrategy extends QueryStrategy {
|
||||
|
||||
override def arrayIntersectionNonEmptyClause(
|
||||
columnName: String,
|
||||
parties: Set[Ref.Party],
|
||||
): CompositeSql =
|
||||
if (parties.isEmpty)
|
||||
cSQL"false"
|
||||
else
|
||||
parties.view
|
||||
.map(p => cSQL"array_contains(#$columnName, '#${p.toString}')")
|
||||
.mkComposite("(", " or ", ")")
|
||||
|
||||
override def arrayContains(arrayColumnName: String, elementColumnName: String): String =
|
||||
s"array_contains($arrayColumnName, $elementColumnName)"
|
||||
|
||||
override def isTrue(booleanColumnName: String): String = booleanColumnName
|
||||
}
|
@ -0,0 +1,47 @@
|
||||
// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.daml.platform.store.backend.h2
|
||||
|
||||
import java.sql.Connection
|
||||
|
||||
import anorm.SQL
|
||||
import com.daml.platform.store.backend.ResetStorageBackend
|
||||
|
||||
object H2ResetStorageBackend extends ResetStorageBackend {
|
||||
|
||||
override def reset(connection: Connection): Unit = {
|
||||
SQL("""set referential_integrity false;
|
||||
|truncate table configuration_entries;
|
||||
|truncate table package_entries;
|
||||
|truncate table parameters;
|
||||
|truncate table participant_command_completions;
|
||||
|truncate table participant_command_submissions;
|
||||
|truncate table participant_events_divulgence;
|
||||
|truncate table participant_events_create;
|
||||
|truncate table participant_events_consuming_exercise;
|
||||
|truncate table participant_events_non_consuming_exercise;
|
||||
|truncate table party_entries;
|
||||
|set referential_integrity true;""".stripMargin)
|
||||
.execute()(connection)
|
||||
()
|
||||
}
|
||||
|
||||
override def resetAll(connection: Connection): Unit = {
|
||||
SQL("""set referential_integrity false;
|
||||
|truncate table configuration_entries;
|
||||
|truncate table packages;
|
||||
|truncate table package_entries;
|
||||
|truncate table parameters;
|
||||
|truncate table participant_command_completions;
|
||||
|truncate table participant_command_submissions;
|
||||
|truncate table participant_events_divulgence;
|
||||
|truncate table participant_events_create;
|
||||
|truncate table participant_events_consuming_exercise;
|
||||
|truncate table participant_events_non_consuming_exercise;
|
||||
|truncate table party_entries;
|
||||
|set referential_integrity true;""".stripMargin)
|
||||
.execute()(connection)
|
||||
()
|
||||
}
|
||||
}
|
@ -1,378 +0,0 @@
|
||||
// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.daml.platform.store.backend.h2
|
||||
|
||||
import java.sql.Connection
|
||||
import anorm.{Row, SQL, SimpleSql}
|
||||
import anorm.SqlParser.get
|
||||
import com.daml.ledger.offset.Offset
|
||||
import com.daml.lf.data.Ref
|
||||
import com.daml.lf.data.Time.Timestamp
|
||||
import com.daml.logging.{ContextualizedLogger, LoggingContext}
|
||||
import com.daml.platform.store.appendonlydao.events.ContractId
|
||||
import com.daml.platform.store.backend.EventStorageBackend.FilterParams
|
||||
import com.daml.platform.store.backend.common.ComposableQuery.{CompositeSql, SqlStringInterpolation}
|
||||
import com.daml.platform.store.backend.common.{
|
||||
AppendOnlySchema,
|
||||
CompletionStorageBackendTemplate,
|
||||
ConfigurationStorageBackendTemplate,
|
||||
ContractStorageBackendTemplate,
|
||||
DataSourceStorageBackendTemplate,
|
||||
DeduplicationStorageBackendTemplate,
|
||||
EventStorageBackendTemplate,
|
||||
EventStrategy,
|
||||
IngestionStorageBackendTemplate,
|
||||
InitHookDataSourceProxy,
|
||||
IntegrityStorageBackendTemplate,
|
||||
PackageStorageBackendTemplate,
|
||||
ParameterStorageBackendTemplate,
|
||||
PartyStorageBackendTemplate,
|
||||
QueryStrategy,
|
||||
}
|
||||
import com.daml.platform.store.backend.{
|
||||
DBLockStorageBackend,
|
||||
DataSourceStorageBackend,
|
||||
DbDto,
|
||||
StorageBackend,
|
||||
common,
|
||||
}
|
||||
|
||||
import javax.sql.DataSource
|
||||
import scala.util.control.NonFatal
|
||||
|
||||
private[backend] object H2StorageBackend
|
||||
extends StorageBackend[AppendOnlySchema.Batch]
|
||||
with DataSourceStorageBackendTemplate
|
||||
with IngestionStorageBackendTemplate[AppendOnlySchema.Batch]
|
||||
with ParameterStorageBackendTemplate
|
||||
with ConfigurationStorageBackendTemplate
|
||||
with PackageStorageBackendTemplate
|
||||
with DeduplicationStorageBackendTemplate
|
||||
with EventStorageBackendTemplate
|
||||
with ContractStorageBackendTemplate
|
||||
with CompletionStorageBackendTemplate
|
||||
with PartyStorageBackendTemplate
|
||||
with IntegrityStorageBackendTemplate {
|
||||
|
||||
private val logger = ContextualizedLogger.get(this.getClass)
|
||||
|
||||
override def reset(connection: Connection): Unit = {
|
||||
SQL("""set referential_integrity false;
|
||||
|truncate table configuration_entries;
|
||||
|truncate table package_entries;
|
||||
|truncate table parameters;
|
||||
|truncate table participant_command_completions;
|
||||
|truncate table participant_command_submissions;
|
||||
|truncate table participant_events_divulgence;
|
||||
|truncate table participant_events_create;
|
||||
|truncate table participant_events_consuming_exercise;
|
||||
|truncate table participant_events_non_consuming_exercise;
|
||||
|truncate table party_entries;
|
||||
|set referential_integrity true;""".stripMargin)
|
||||
.execute()(connection)
|
||||
()
|
||||
}
|
||||
|
||||
override def resetAll(connection: Connection): Unit = {
|
||||
SQL("""set referential_integrity false;
|
||||
|truncate table configuration_entries;
|
||||
|truncate table packages;
|
||||
|truncate table package_entries;
|
||||
|truncate table parameters;
|
||||
|truncate table participant_command_completions;
|
||||
|truncate table participant_command_submissions;
|
||||
|truncate table participant_events_divulgence;
|
||||
|truncate table participant_events_create;
|
||||
|truncate table participant_events_consuming_exercise;
|
||||
|truncate table participant_events_non_consuming_exercise;
|
||||
|truncate table party_entries;
|
||||
|set referential_integrity true;""".stripMargin)
|
||||
.execute()(connection)
|
||||
()
|
||||
}
|
||||
|
||||
val SQL_INSERT_COMMAND: String =
|
||||
"""merge into participant_command_submissions pcs
|
||||
|using dual on deduplication_key = {deduplicationKey}
|
||||
|when not matched then
|
||||
| insert (deduplication_key, deduplicate_until)
|
||||
| values ({deduplicationKey}, {deduplicateUntil})
|
||||
|when matched and pcs.deduplicate_until < {submittedAt} then
|
||||
| update set deduplicate_until={deduplicateUntil}""".stripMargin
|
||||
|
||||
override def upsertDeduplicationEntry(
|
||||
key: String,
|
||||
submittedAt: Timestamp,
|
||||
deduplicateUntil: Timestamp,
|
||||
)(connection: Connection)(implicit loggingContext: LoggingContext): Int = {
|
||||
|
||||
// Under the default READ_COMMITTED isolation level used for the indexdb, when a deduplication
|
||||
// upsert is performed simultaneously from multiple threads, the query fails with
|
||||
// JdbcSQLIntegrityConstraintViolationException: Unique index or primary key violation
|
||||
// Simple retry helps
|
||||
def retry[T](op: => T): T =
|
||||
try {
|
||||
op
|
||||
} catch {
|
||||
case NonFatal(e) =>
|
||||
logger.debug(s"Caught exception while upserting a deduplication entry: $e")
|
||||
op
|
||||
}
|
||||
retry(
|
||||
SQL(SQL_INSERT_COMMAND)
|
||||
.on(
|
||||
"deduplicationKey" -> key,
|
||||
"submittedAt" -> submittedAt.micros,
|
||||
"deduplicateUntil" -> deduplicateUntil.micros,
|
||||
)
|
||||
.executeUpdate()(connection)
|
||||
)
|
||||
}
|
||||
|
||||
override def batch(dbDtos: Vector[DbDto]): AppendOnlySchema.Batch =
|
||||
H2Schema.schema.prepareData(dbDtos)
|
||||
|
||||
override def insertBatch(connection: Connection, batch: AppendOnlySchema.Batch): Unit =
|
||||
H2Schema.schema.executeUpdate(batch, connection)
|
||||
|
||||
def maxEventSequentialIdOfAnObservableEvent(
|
||||
offset: Offset
|
||||
)(connection: Connection): Option[Long] = {
|
||||
import com.daml.platform.store.Conversions.OffsetToStatement
|
||||
SQL"""
|
||||
SELECT max_esi FROM (
|
||||
(SELECT max(event_sequential_id) AS max_esi FROM participant_events_consuming_exercise WHERE event_offset <= $offset GROUP BY event_offset ORDER BY event_offset DESC FETCH NEXT 1 ROW ONLY)
|
||||
UNION ALL
|
||||
(SELECT max(event_sequential_id) AS max_esi FROM participant_events_non_consuming_exercise WHERE event_offset <= $offset GROUP BY event_offset ORDER BY event_offset DESC FETCH NEXT 1 ROW ONLY)
|
||||
UNION ALL
|
||||
(SELECT max(event_sequential_id) AS max_esi FROM participant_events_create WHERE event_offset <= $offset GROUP BY event_offset ORDER BY event_offset DESC FETCH NEXT 1 ROW ONLY)
|
||||
) AS t
|
||||
ORDER BY max_esi DESC
|
||||
FETCH NEXT 1 ROW ONLY;
|
||||
""".as(get[Long](1).singleOpt)(connection)
|
||||
}
|
||||
|
||||
object H2QueryStrategy extends QueryStrategy {
|
||||
|
||||
override def arrayIntersectionNonEmptyClause(
|
||||
columnName: String,
|
||||
parties: Set[Ref.Party],
|
||||
): CompositeSql =
|
||||
if (parties.isEmpty)
|
||||
cSQL"false"
|
||||
else
|
||||
parties.view
|
||||
.map(p => cSQL"array_contains(#$columnName, '#${p.toString}')")
|
||||
.mkComposite("(", " or ", ")")
|
||||
|
||||
override def arrayContains(arrayColumnName: String, elementColumnName: String): String =
|
||||
s"array_contains($arrayColumnName, $elementColumnName)"
|
||||
|
||||
override def isTrue(booleanColumnName: String): String = booleanColumnName
|
||||
}
|
||||
|
||||
override def queryStrategy: QueryStrategy = H2QueryStrategy
|
||||
|
||||
object H2EventStrategy extends EventStrategy {
|
||||
override def filteredEventWitnessesClause(
|
||||
witnessesColumnName: String,
|
||||
parties: Set[Ref.Party],
|
||||
): CompositeSql = {
|
||||
val partiesArray = parties.view.map(_.toString).toArray
|
||||
cSQL"array_intersection(#$witnessesColumnName, $partiesArray)"
|
||||
}
|
||||
|
||||
override def submittersArePartiesClause(
|
||||
submittersColumnName: String,
|
||||
parties: Set[Ref.Party],
|
||||
): CompositeSql =
|
||||
H2QueryStrategy.arrayIntersectionNonEmptyClause(
|
||||
columnName = submittersColumnName,
|
||||
parties = parties,
|
||||
)
|
||||
|
||||
override def witnessesWhereClause(
|
||||
witnessesColumnName: String,
|
||||
filterParams: FilterParams,
|
||||
): CompositeSql = {
|
||||
val wildCardClause = filterParams.wildCardParties match {
|
||||
case wildCardParties if wildCardParties.isEmpty =>
|
||||
Nil
|
||||
|
||||
case wildCardParties =>
|
||||
cSQL"(${H2QueryStrategy.arrayIntersectionNonEmptyClause(witnessesColumnName, wildCardParties)})" :: Nil
|
||||
}
|
||||
val partiesTemplatesClauses =
|
||||
filterParams.partiesAndTemplates.iterator.map { case (parties, templateIds) =>
|
||||
val clause =
|
||||
H2QueryStrategy.arrayIntersectionNonEmptyClause(
|
||||
witnessesColumnName,
|
||||
parties,
|
||||
)
|
||||
val templateIdsArray = templateIds.view.map(_.toString).toArray
|
||||
cSQL"( ($clause) AND (template_id = ANY($templateIdsArray)) )"
|
||||
}.toList
|
||||
(wildCardClause ::: partiesTemplatesClauses).mkComposite("(", " OR ", ")")
|
||||
}
|
||||
}
|
||||
|
||||
override def eventStrategy: common.EventStrategy = H2EventStrategy
|
||||
|
||||
override def createDataSource(
|
||||
jdbcUrl: String,
|
||||
dataSourceConfig: DataSourceStorageBackend.DataSourceConfig,
|
||||
connectionInitHook: Option[Connection => Unit],
|
||||
)(implicit loggingContext: LoggingContext): DataSource = {
|
||||
val h2DataSource = new org.h2.jdbcx.JdbcDataSource()
|
||||
|
||||
// H2 (org.h2.jdbcx.JdbcDataSource) does not support setting the user/password within the jdbcUrl, so remove
|
||||
// those properties from the url if present and set them separately. Note that Postgres and Oracle support
|
||||
// user/password in the URLs, so we don't bother exposing user/password configs separately from the url just for h2
|
||||
// which is anyway not supported for production. (This also helps run canton h2 participants that set user and
|
||||
// password.)
|
||||
val (urlNoUserNoPassword, user, password) = extractUserPasswordAndRemoveFromUrl(jdbcUrl)
|
||||
user.foreach(h2DataSource.setUser)
|
||||
password.foreach(h2DataSource.setPassword)
|
||||
h2DataSource.setUrl(urlNoUserNoPassword)
|
||||
|
||||
InitHookDataSourceProxy(h2DataSource, connectionInitHook.toList)
|
||||
}
|
||||
|
||||
def extractUserPasswordAndRemoveFromUrl(
|
||||
jdbcUrl: String
|
||||
): (String, Option[String], Option[String]) = {
|
||||
def setKeyValueAndRemoveFromUrl(url: String, key: String): (String, Option[String]) = {
|
||||
val regex = s".*(;(?i)${key}=([^;]*)).*".r
|
||||
url match {
|
||||
case regex(keyAndValue, value) =>
|
||||
(url.replace(keyAndValue, ""), Some(value))
|
||||
case _ => (url, None)
|
||||
}
|
||||
}
|
||||
|
||||
val (urlNoUser, user) = setKeyValueAndRemoveFromUrl(jdbcUrl, "user")
|
||||
val (urlNoUserNoPassword, password) = setKeyValueAndRemoveFromUrl(urlNoUser, "password")
|
||||
(urlNoUserNoPassword, user, password)
|
||||
}
|
||||
|
||||
override def tryAcquire(
|
||||
lockId: DBLockStorageBackend.LockId,
|
||||
lockMode: DBLockStorageBackend.LockMode,
|
||||
)(connection: Connection): Option[DBLockStorageBackend.Lock] =
|
||||
throw new UnsupportedOperationException("db level locks are not supported for H2")
|
||||
|
||||
override def release(lock: DBLockStorageBackend.Lock)(connection: Connection): Boolean =
|
||||
throw new UnsupportedOperationException("db level locks are not supported for H2")
|
||||
|
||||
override def lock(id: Int): DBLockStorageBackend.LockId =
|
||||
throw new UnsupportedOperationException("db level locks are not supported for H2")
|
||||
|
||||
override def dbLockSupported: Boolean = false
|
||||
|
||||
// Migration from mutable schema is not supported for H2
|
||||
override def isPruningOffsetValidAgainstMigration(
|
||||
pruneUpToInclusive: Offset,
|
||||
pruneAllDivulgedContracts: Boolean,
|
||||
connection: Connection,
|
||||
): Boolean = true
|
||||
|
||||
override def maximumLedgerTimeSqlLiteral(id: ContractId): SimpleSql[Row] = {
|
||||
import com.daml.platform.store.Conversions.ContractIdToStatement
|
||||
SQL"""
|
||||
WITH archival_event AS (
|
||||
SELECT 1
|
||||
FROM participant_events_consuming_exercise, parameters
|
||||
WHERE contract_id = $id
|
||||
AND event_sequential_id <= parameters.ledger_end_sequential_id
|
||||
FETCH NEXT 1 ROW ONLY
|
||||
),
|
||||
create_event AS (
|
||||
SELECT ledger_effective_time
|
||||
FROM participant_events_create, parameters
|
||||
WHERE contract_id = $id
|
||||
AND event_sequential_id <= parameters.ledger_end_sequential_id
|
||||
FETCH NEXT 1 ROW ONLY -- limit here to guide planner wrt expected number of results
|
||||
),
|
||||
divulged_contract AS (
|
||||
SELECT NULL::BIGINT
|
||||
FROM participant_events_divulgence, parameters
|
||||
WHERE contract_id = $id
|
||||
AND event_sequential_id <= parameters.ledger_end_sequential_id
|
||||
ORDER BY event_sequential_id
|
||||
-- prudent engineering: make results more stable by preferring earlier divulgence events
|
||||
-- Results might still change due to pruning.
|
||||
FETCH NEXT 1 ROW ONLY
|
||||
),
|
||||
create_and_divulged_contracts AS (
|
||||
(SELECT * FROM create_event) -- prefer create over divulgence events
|
||||
UNION ALL
|
||||
(SELECT * FROM divulged_contract)
|
||||
)
|
||||
SELECT ledger_effective_time
|
||||
FROM create_and_divulged_contracts
|
||||
WHERE NOT EXISTS (SELECT 1 FROM archival_event)
|
||||
FETCH NEXT 1 ROW ONLY"""
|
||||
}
|
||||
|
||||
override def activeContractSqlLiteral(
|
||||
contractId: ContractId,
|
||||
treeEventWitnessesClause: CompositeSql,
|
||||
resultColumns: List[String],
|
||||
coalescedColumns: String,
|
||||
): SimpleSql[Row] = {
|
||||
import com.daml.platform.store.Conversions.ContractIdToStatement
|
||||
SQL""" WITH archival_event AS (
|
||||
SELECT 1
|
||||
FROM participant_events_consuming_exercise, parameters
|
||||
WHERE contract_id = $contractId
|
||||
AND event_sequential_id <= parameters.ledger_end_sequential_id
|
||||
AND $treeEventWitnessesClause -- only use visible archivals
|
||||
FETCH NEXT 1 ROW ONLY
|
||||
),
|
||||
create_event AS (
|
||||
SELECT contract_id, #${resultColumns.mkString(", ")}
|
||||
FROM participant_events_create, parameters
|
||||
WHERE contract_id = $contractId
|
||||
AND event_sequential_id <= parameters.ledger_end_sequential_id
|
||||
AND $treeEventWitnessesClause
|
||||
FETCH NEXT 1 ROW ONLY -- limit here to guide planner wrt expected number of results
|
||||
),
|
||||
-- no visibility check, as it is used to backfill missing template_id and create_arguments for divulged contracts
|
||||
create_event_unrestricted AS (
|
||||
SELECT contract_id, #${resultColumns.mkString(", ")}
|
||||
FROM participant_events_create, parameters
|
||||
WHERE contract_id = $contractId
|
||||
AND event_sequential_id <= parameters.ledger_end_sequential_id
|
||||
FETCH NEXT 1 ROW ONLY -- limit here to guide planner wrt expected number of results
|
||||
),
|
||||
divulged_contract AS (
|
||||
SELECT divulgence_events.contract_id,
|
||||
-- Note: the divulgence_event.template_id can be NULL
|
||||
-- for certain integrations. For example, the KV integration exploits that
|
||||
-- every participant node knows about all create events. The integration
|
||||
-- therefore only communicates the change in visibility to the IndexDB, but
|
||||
-- does not include a full divulgence event.
|
||||
#$coalescedColumns
|
||||
FROM participant_events_divulgence divulgence_events LEFT OUTER JOIN create_event_unrestricted ON (divulgence_events.contract_id = create_event_unrestricted.contract_id),
|
||||
parameters
|
||||
WHERE divulgence_events.contract_id = $contractId -- restrict to aid query planner
|
||||
AND divulgence_events.event_sequential_id <= parameters.ledger_end_sequential_id
|
||||
AND $treeEventWitnessesClause
|
||||
ORDER BY divulgence_events.event_sequential_id
|
||||
-- prudent engineering: make results more stable by preferring earlier divulgence events
|
||||
-- Results might still change due to pruning.
|
||||
FETCH NEXT 1 ROW ONLY
|
||||
),
|
||||
create_and_divulged_contracts AS (
|
||||
(SELECT * FROM create_event) -- prefer create over divulgence events
|
||||
UNION ALL
|
||||
(SELECT * FROM divulged_contract)
|
||||
)
|
||||
SELECT contract_id, #${resultColumns.mkString(", ")}
|
||||
FROM create_and_divulged_contracts
|
||||
WHERE NOT EXISTS (SELECT 1 FROM archival_event)
|
||||
FETCH NEXT 1 ROW ONLY"""
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,71 @@
|
||||
// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.daml.platform.store.backend.h2
|
||||
|
||||
import com.daml.platform.store.backend.common.{
|
||||
CompletionStorageBackendTemplate,
|
||||
ConfigurationStorageBackendTemplate,
|
||||
IngestionStorageBackendTemplate,
|
||||
IntegrityStorageBackendTemplate,
|
||||
PackageStorageBackendTemplate,
|
||||
ParameterStorageBackendTemplate,
|
||||
PartyStorageBackendTemplate,
|
||||
}
|
||||
import com.daml.platform.store.backend.{
|
||||
CompletionStorageBackend,
|
||||
ConfigurationStorageBackend,
|
||||
ContractStorageBackend,
|
||||
DBLockStorageBackend,
|
||||
DataSourceStorageBackend,
|
||||
DeduplicationStorageBackend,
|
||||
EventStorageBackend,
|
||||
IngestionStorageBackend,
|
||||
IntegrityStorageBackend,
|
||||
PackageStorageBackend,
|
||||
ParameterStorageBackend,
|
||||
PartyStorageBackend,
|
||||
ResetStorageBackend,
|
||||
StorageBackendFactory,
|
||||
}
|
||||
|
||||
object H2StorageBackendFactory extends StorageBackendFactory {
|
||||
override val createIngestionStorageBackend: IngestionStorageBackend[_] =
|
||||
new IngestionStorageBackendTemplate(H2Schema.schema)
|
||||
|
||||
override val createParameterStorageBackend: ParameterStorageBackend =
|
||||
ParameterStorageBackendTemplate
|
||||
|
||||
override val createConfigurationStorageBackend: ConfigurationStorageBackend =
|
||||
ConfigurationStorageBackendTemplate
|
||||
|
||||
override val createPartyStorageBackend: PartyStorageBackend =
|
||||
new PartyStorageBackendTemplate(H2QueryStrategy)
|
||||
|
||||
override val createPackageStorageBackend: PackageStorageBackend =
|
||||
PackageStorageBackendTemplate
|
||||
|
||||
override val createDeduplicationStorageBackend: DeduplicationStorageBackend =
|
||||
H2DeduplicationStorageBackend
|
||||
|
||||
override val createCompletionStorageBackend: CompletionStorageBackend =
|
||||
new CompletionStorageBackendTemplate(H2QueryStrategy)
|
||||
|
||||
override val createContractStorageBackend: ContractStorageBackend =
|
||||
H2ContractStorageBackend
|
||||
|
||||
override val createEventStorageBackend: EventStorageBackend =
|
||||
H2EventStorageBackend
|
||||
|
||||
override val createDataSourceStorageBackend: DataSourceStorageBackend =
|
||||
H2DataSourceStorageBackend
|
||||
|
||||
override val createDBLockStorageBackend: DBLockStorageBackend =
|
||||
H2DBLockStorageBackend
|
||||
|
||||
override val createIntegrityStorageBackend: IntegrityStorageBackend =
|
||||
IntegrityStorageBackendTemplate
|
||||
|
||||
override val createResetStorageBackend: ResetStorageBackend =
|
||||
H2ResetStorageBackend
|
||||
}
|
@ -0,0 +1,72 @@
|
||||
// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.daml.platform.store.backend.oracle
|
||||
|
||||
import java.sql.Connection
|
||||
|
||||
import anorm.SqlParser.get
|
||||
import anorm.SqlStringInterpolation
|
||||
import com.daml.platform.store.backend.DBLockStorageBackend
|
||||
|
||||
object OracleDBLockStorageBackend extends DBLockStorageBackend {
|
||||
override def tryAcquire(
|
||||
lockId: DBLockStorageBackend.LockId,
|
||||
lockMode: DBLockStorageBackend.LockMode,
|
||||
)(connection: Connection): Option[DBLockStorageBackend.Lock] = {
|
||||
val oracleLockMode = lockMode match {
|
||||
case DBLockStorageBackend.LockMode.Exclusive => "6" // "DBMS_LOCK.x_mode"
|
||||
case DBLockStorageBackend.LockMode.Shared => "4" // "DBMS_LOCK.s_mode"
|
||||
}
|
||||
SQL"""
|
||||
SELECT DBMS_LOCK.REQUEST(
|
||||
id => ${oracleIntLockId(lockId)},
|
||||
lockmode => #$oracleLockMode,
|
||||
timeout => 0
|
||||
) FROM DUAL"""
|
||||
.as(get[Int](1).single)(connection) match {
|
||||
case 0 => Some(DBLockStorageBackend.Lock(lockId, lockMode))
|
||||
case 1 => None
|
||||
case 2 => throw new Exception("DBMS_LOCK.REQUEST Error 2: Acquiring lock caused a deadlock!")
|
||||
case 3 => throw new Exception("DBMS_LOCK.REQUEST Error 3: Parameter error as acquiring lock")
|
||||
case 4 => Some(DBLockStorageBackend.Lock(lockId, lockMode))
|
||||
case 5 =>
|
||||
throw new Exception("DBMS_LOCK.REQUEST Error 5: Illegal lock handle as acquiring lock")
|
||||
case unknown => throw new Exception(s"Invalid result from DBMS_LOCK.REQUEST: $unknown")
|
||||
}
|
||||
}
|
||||
|
||||
override def release(lock: DBLockStorageBackend.Lock)(connection: Connection): Boolean = {
|
||||
SQL"""
|
||||
SELECT DBMS_LOCK.RELEASE(
|
||||
id => ${oracleIntLockId(lock.lockId)}
|
||||
) FROM DUAL"""
|
||||
.as(get[Int](1).single)(connection) match {
|
||||
case 0 => true
|
||||
case 3 => throw new Exception("DBMS_LOCK.RELEASE Error 3: Parameter error as releasing lock")
|
||||
case 4 => false
|
||||
case 5 =>
|
||||
throw new Exception("DBMS_LOCK.RELEASE Error 5: Illegal lock handle as releasing lock")
|
||||
case unknown => throw new Exception(s"Invalid result from DBMS_LOCK.RELEASE: $unknown")
|
||||
}
|
||||
}
|
||||
|
||||
case class OracleLockId(id: Int) extends DBLockStorageBackend.LockId {
|
||||
// respecting Oracle limitations: https://docs.oracle.com/cd/B19306_01/appdev.102/b14258/d_lock.htm#ARPLS021
|
||||
assert(id >= 0, s"Lock id $id is too small for Oracle")
|
||||
assert(id <= 1073741823, s"Lock id $id is too large for Oracle")
|
||||
}
|
||||
|
||||
private def oracleIntLockId(lockId: DBLockStorageBackend.LockId): Int =
|
||||
lockId match {
|
||||
case OracleLockId(id) => id
|
||||
case unknown =>
|
||||
throw new Exception(
|
||||
s"LockId $unknown not supported. Probable cause: LockId was created by a different StorageBackend"
|
||||
)
|
||||
}
|
||||
|
||||
override def lock(id: Int): DBLockStorageBackend.LockId = OracleLockId(id)
|
||||
|
||||
override def dbLockSupported: Boolean = true
|
||||
}
|
@ -0,0 +1,28 @@
|
||||
// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.daml.platform.store.backend.oracle
|
||||
|
||||
import java.sql.Connection
|
||||
|
||||
import anorm.SqlParser.get
|
||||
import anorm.SqlStringInterpolation
|
||||
import com.daml.logging.LoggingContext
|
||||
import com.daml.platform.store.backend.DataSourceStorageBackend
|
||||
import com.daml.platform.store.backend.common.InitHookDataSourceProxy
|
||||
import javax.sql.DataSource
|
||||
|
||||
object OracleDataSourceStorageBackend extends DataSourceStorageBackend {
|
||||
override def createDataSource(
|
||||
jdbcUrl: String,
|
||||
dataSourceConfig: DataSourceStorageBackend.DataSourceConfig,
|
||||
connectionInitHook: Option[Connection => Unit],
|
||||
)(implicit loggingContext: LoggingContext): DataSource = {
|
||||
val oracleDataSource = new oracle.jdbc.pool.OracleDataSource
|
||||
oracleDataSource.setURL(jdbcUrl)
|
||||
InitHookDataSourceProxy(oracleDataSource, connectionInitHook.toList)
|
||||
}
|
||||
|
||||
override def checkDatabaseAvailable(connection: Connection): Unit =
|
||||
assert(SQL"SELECT 1 FROM DUAL".as(get[Int](1).single)(connection) == 1)
|
||||
}
|
@ -0,0 +1,58 @@
|
||||
// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.daml.platform.store.backend.oracle
|
||||
|
||||
import java.sql.Connection
|
||||
|
||||
import anorm.SQL
|
||||
import com.daml.lf.data.Time.Timestamp
|
||||
import com.daml.logging.{ContextualizedLogger, LoggingContext}
|
||||
import com.daml.platform.store.backend.common.DeduplicationStorageBackendTemplate
|
||||
|
||||
import scala.util.control.NonFatal
|
||||
|
||||
object OracleDeduplicationStorageBackend extends DeduplicationStorageBackendTemplate {
|
||||
private val logger = ContextualizedLogger.get(this.getClass)
|
||||
|
||||
val SQL_INSERT_COMMAND: String =
|
||||
"""merge into participant_command_submissions pcs
|
||||
|using dual
|
||||
|on (pcs.deduplication_key ={deduplicationKey})
|
||||
|when matched then
|
||||
| update set pcs.deduplicate_until={deduplicateUntil}
|
||||
| where pcs.deduplicate_until < {submittedAt}
|
||||
|when not matched then
|
||||
| insert (pcs.deduplication_key, pcs.deduplicate_until)
|
||||
| values ({deduplicationKey}, {deduplicateUntil})""".stripMargin
|
||||
|
||||
override def upsertDeduplicationEntry(
|
||||
key: String,
|
||||
submittedAt: Timestamp,
|
||||
deduplicateUntil: Timestamp,
|
||||
)(connection: Connection)(implicit loggingContext: LoggingContext): Int = {
|
||||
|
||||
// Under the default READ_COMMITTED isolation level used for the indexdb, when a deduplication
|
||||
// upsert is performed simultaneously from multiple threads, the query fails with
|
||||
// SQLIntegrityConstraintViolationException: ORA-00001: unique constraint (INDEXDB.SYS_C007590) violated
|
||||
// Simple retry helps
|
||||
def retry[T](op: => T): T =
|
||||
try {
|
||||
op
|
||||
} catch {
|
||||
case NonFatal(e) =>
|
||||
logger.debug(s"Caught exception while upserting a deduplication entry: $e")
|
||||
op
|
||||
}
|
||||
retry(
|
||||
SQL(SQL_INSERT_COMMAND)
|
||||
.on(
|
||||
"deduplicationKey" -> key,
|
||||
"submittedAt" -> submittedAt.micros,
|
||||
"deduplicateUntil" -> deduplicateUntil.micros,
|
||||
)
|
||||
.executeUpdate()(connection)
|
||||
)
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,50 @@
|
||||
// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.daml.platform.store.backend.oracle
|
||||
|
||||
import java.sql.Connection
|
||||
|
||||
import anorm.SqlParser.get
|
||||
import anorm.SqlStringInterpolation
|
||||
import com.daml.ledger.offset.Offset
|
||||
import com.daml.platform.store.backend.common.{
|
||||
EventStorageBackendTemplate,
|
||||
ParameterStorageBackendTemplate,
|
||||
}
|
||||
|
||||
object OracleEventStorageBackend
|
||||
extends EventStorageBackendTemplate(
|
||||
eventStrategy = OracleEventStrategy,
|
||||
queryStrategy = OracleQueryStrategy,
|
||||
participantAllDivulgedContractsPrunedUpToInclusive =
|
||||
ParameterStorageBackendTemplate.participantAllDivulgedContractsPrunedUpToInclusive,
|
||||
) {
|
||||
|
||||
def maxEventSequentialIdOfAnObservableEvent(
|
||||
offset: Offset
|
||||
)(connection: Connection): Option[Long] = {
|
||||
import com.daml.platform.store.Conversions.OffsetToStatement
|
||||
SQL"""SELECT max(max_esi) FROM (
|
||||
(
|
||||
SELECT max(event_sequential_id) AS max_esi FROM participant_events_consuming_exercise
|
||||
WHERE event_offset = (select max(event_offset) from participant_events_consuming_exercise where event_offset <= $offset)
|
||||
) UNION ALL (
|
||||
SELECT max(event_sequential_id) AS max_esi FROM participant_events_create
|
||||
WHERE event_offset = (select max(event_offset) from participant_events_create where event_offset <= $offset)
|
||||
) UNION ALL (
|
||||
SELECT max(event_sequential_id) AS max_esi FROM participant_events_non_consuming_exercise
|
||||
WHERE event_offset = (select max(event_offset) from participant_events_non_consuming_exercise where event_offset <= $offset)
|
||||
)
|
||||
)"""
|
||||
.as(get[Long](1).?.single)(connection)
|
||||
}
|
||||
|
||||
// Migration from mutable schema is not supported for Oracle
|
||||
override def isPruningOffsetValidAgainstMigration(
|
||||
pruneUpToInclusive: Offset,
|
||||
pruneAllDivulgedContracts: Boolean,
|
||||
connection: Connection,
|
||||
): Boolean = true
|
||||
|
||||
}
|
@ -0,0 +1,54 @@
|
||||
// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.daml.platform.store.backend.oracle
|
||||
|
||||
import com.daml.lf.data.Ref
|
||||
import com.daml.platform.store.backend.EventStorageBackend.FilterParams
|
||||
import com.daml.platform.store.backend.common.ComposableQuery.{CompositeSql, SqlStringInterpolation}
|
||||
import com.daml.platform.store.backend.common.EventStrategy
|
||||
|
||||
object OracleEventStrategy extends EventStrategy {
|
||||
|
||||
override def filteredEventWitnessesClause(
|
||||
witnessesColumnName: String,
|
||||
parties: Set[Ref.Party],
|
||||
): CompositeSql =
|
||||
if (parties.size == 1)
|
||||
cSQL"(json_array(${parties.head.toString}))"
|
||||
else
|
||||
cSQL"""
|
||||
(select json_arrayagg(value) from (select value
|
||||
from json_table(#$witnessesColumnName, '$$[*]' columns (value PATH '$$'))
|
||||
where value IN (${parties.map(_.toString)})))
|
||||
"""
|
||||
|
||||
override def submittersArePartiesClause(
|
||||
submittersColumnName: String,
|
||||
parties: Set[Ref.Party],
|
||||
): CompositeSql =
|
||||
cSQL"(${OracleQueryStrategy.arrayIntersectionNonEmptyClause(submittersColumnName, parties)})"
|
||||
|
||||
override def witnessesWhereClause(
|
||||
witnessesColumnName: String,
|
||||
filterParams: FilterParams,
|
||||
): CompositeSql = {
|
||||
val wildCardClause = filterParams.wildCardParties match {
|
||||
case wildCardParties if wildCardParties.isEmpty =>
|
||||
Nil
|
||||
|
||||
case wildCardParties =>
|
||||
cSQL"(${OracleQueryStrategy.arrayIntersectionNonEmptyClause(witnessesColumnName, wildCardParties)})" :: Nil
|
||||
}
|
||||
val partiesTemplatesClauses =
|
||||
filterParams.partiesAndTemplates.iterator.map { case (parties, templateIds) =>
|
||||
val clause =
|
||||
OracleQueryStrategy.arrayIntersectionNonEmptyClause(
|
||||
witnessesColumnName,
|
||||
parties,
|
||||
)
|
||||
cSQL"( ($clause) AND (template_id IN (${templateIds.map(_.toString)})) )"
|
||||
}.toList
|
||||
(wildCardClause ::: partiesTemplatesClauses).mkComposite("(", " OR ", ")")
|
||||
}
|
||||
}
|
@ -0,0 +1,28 @@
|
||||
// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.daml.platform.store.backend.oracle
|
||||
|
||||
import com.daml.lf.data.Ref
|
||||
import com.daml.platform.store.backend.common.ComposableQuery.{CompositeSql, SqlStringInterpolation}
|
||||
import com.daml.platform.store.backend.common.QueryStrategy
|
||||
|
||||
object OracleQueryStrategy extends QueryStrategy {
|
||||
|
||||
override def arrayIntersectionNonEmptyClause(
|
||||
columnName: String,
|
||||
parties: Set[Ref.Party],
|
||||
): CompositeSql =
|
||||
cSQL"EXISTS (SELECT 1 FROM JSON_TABLE(#$columnName, '$$[*]' columns (value PATH '$$')) WHERE value IN (${parties
|
||||
.map(_.toString)}))"
|
||||
|
||||
override def columnEqualityBoolean(column: String, value: String): String =
|
||||
s"""case when ($column = $value) then 1 else 0 end"""
|
||||
|
||||
override def booleanOrAggregationFunction: String = "max"
|
||||
|
||||
override def arrayContains(arrayColumnName: String, elementColumnName: String): String =
|
||||
s"EXISTS (SELECT 1 FROM JSON_TABLE($arrayColumnName, '$$[*]' columns (value PATH '$$')) WHERE value = $elementColumnName)"
|
||||
|
||||
override def isTrue(booleanColumnName: String): String = s"$booleanColumnName = 1"
|
||||
}
|
@ -0,0 +1,40 @@
|
||||
// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.daml.platform.store.backend.oracle
|
||||
|
||||
import java.sql.Connection
|
||||
|
||||
import anorm.SQL
|
||||
import com.daml.platform.store.backend.ResetStorageBackend
|
||||
|
||||
object OracleResetStorageBackend extends ResetStorageBackend {
|
||||
override def reset(connection: Connection): Unit =
|
||||
List(
|
||||
"truncate table configuration_entries cascade",
|
||||
"truncate table package_entries cascade",
|
||||
"truncate table parameters cascade",
|
||||
"truncate table participant_command_completions cascade",
|
||||
"truncate table participant_command_submissions cascade",
|
||||
"truncate table participant_events_divulgence cascade",
|
||||
"truncate table participant_events_create cascade",
|
||||
"truncate table participant_events_consuming_exercise cascade",
|
||||
"truncate table participant_events_non_consuming_exercise cascade",
|
||||
"truncate table party_entries cascade",
|
||||
).map(SQL(_)).foreach(_.execute()(connection))
|
||||
|
||||
override def resetAll(connection: Connection): Unit =
|
||||
List(
|
||||
"truncate table configuration_entries cascade",
|
||||
"truncate table packages cascade",
|
||||
"truncate table package_entries cascade",
|
||||
"truncate table parameters cascade",
|
||||
"truncate table participant_command_completions cascade",
|
||||
"truncate table participant_command_submissions cascade",
|
||||
"truncate table participant_events_divulgence cascade",
|
||||
"truncate table participant_events_create cascade",
|
||||
"truncate table participant_events_consuming_exercise cascade",
|
||||
"truncate table participant_events_non_consuming_exercise cascade",
|
||||
"truncate table party_entries cascade",
|
||||
).map(SQL(_)).foreach(_.execute()(connection))
|
||||
}
|
@ -1,302 +0,0 @@
|
||||
// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.daml.platform.store.backend.oracle
|
||||
|
||||
import anorm.SqlParser.get
|
||||
import anorm.SQL
|
||||
import com.daml.lf.data.Ref
|
||||
import com.daml.platform.store.backend.common.{
|
||||
AppendOnlySchema,
|
||||
CompletionStorageBackendTemplate,
|
||||
ConfigurationStorageBackendTemplate,
|
||||
ContractStorageBackendTemplate,
|
||||
DataSourceStorageBackendTemplate,
|
||||
DeduplicationStorageBackendTemplate,
|
||||
EventStorageBackendTemplate,
|
||||
EventStrategy,
|
||||
IngestionStorageBackendTemplate,
|
||||
InitHookDataSourceProxy,
|
||||
IntegrityStorageBackendTemplate,
|
||||
PackageStorageBackendTemplate,
|
||||
ParameterStorageBackendTemplate,
|
||||
PartyStorageBackendTemplate,
|
||||
QueryStrategy,
|
||||
}
|
||||
import com.daml.platform.store.backend.{
|
||||
DBLockStorageBackend,
|
||||
DataSourceStorageBackend,
|
||||
DbDto,
|
||||
StorageBackend,
|
||||
common,
|
||||
}
|
||||
|
||||
import java.sql.Connection
|
||||
import com.daml.ledger.offset.Offset
|
||||
import com.daml.lf.data.Time.Timestamp
|
||||
import com.daml.platform.store.backend.EventStorageBackend.FilterParams
|
||||
import com.daml.logging.{ContextualizedLogger, LoggingContext}
|
||||
import com.daml.platform.store.backend.common.ComposableQuery.{CompositeSql, SqlStringInterpolation}
|
||||
|
||||
import javax.sql.DataSource
|
||||
import scala.util.control.NonFatal
|
||||
|
||||
private[backend] object OracleStorageBackend
|
||||
extends StorageBackend[AppendOnlySchema.Batch]
|
||||
with DataSourceStorageBackendTemplate
|
||||
with IngestionStorageBackendTemplate[AppendOnlySchema.Batch]
|
||||
with ParameterStorageBackendTemplate
|
||||
with ConfigurationStorageBackendTemplate
|
||||
with PackageStorageBackendTemplate
|
||||
with DeduplicationStorageBackendTemplate
|
||||
with EventStorageBackendTemplate
|
||||
with ContractStorageBackendTemplate
|
||||
with CompletionStorageBackendTemplate
|
||||
with PartyStorageBackendTemplate
|
||||
with IntegrityStorageBackendTemplate {
|
||||
|
||||
private val logger = ContextualizedLogger.get(this.getClass)
|
||||
|
||||
override def reset(connection: Connection): Unit =
|
||||
List(
|
||||
"truncate table configuration_entries cascade",
|
||||
"truncate table package_entries cascade",
|
||||
"truncate table parameters cascade",
|
||||
"truncate table participant_command_completions cascade",
|
||||
"truncate table participant_command_submissions cascade",
|
||||
"truncate table participant_events_divulgence cascade",
|
||||
"truncate table participant_events_create cascade",
|
||||
"truncate table participant_events_consuming_exercise cascade",
|
||||
"truncate table participant_events_non_consuming_exercise cascade",
|
||||
"truncate table party_entries cascade",
|
||||
).map(SQL(_)).foreach(_.execute()(connection))
|
||||
|
||||
override def resetAll(connection: Connection): Unit =
|
||||
List(
|
||||
"truncate table configuration_entries cascade",
|
||||
"truncate table packages cascade",
|
||||
"truncate table package_entries cascade",
|
||||
"truncate table parameters cascade",
|
||||
"truncate table participant_command_completions cascade",
|
||||
"truncate table participant_command_submissions cascade",
|
||||
"truncate table participant_events_divulgence cascade",
|
||||
"truncate table participant_events_create cascade",
|
||||
"truncate table participant_events_consuming_exercise cascade",
|
||||
"truncate table participant_events_non_consuming_exercise cascade",
|
||||
"truncate table party_entries cascade",
|
||||
).map(SQL(_)).foreach(_.execute()(connection))
|
||||
|
||||
val SQL_INSERT_COMMAND: String =
|
||||
"""merge into participant_command_submissions pcs
|
||||
|using dual
|
||||
|on (pcs.deduplication_key ={deduplicationKey})
|
||||
|when matched then
|
||||
| update set pcs.deduplicate_until={deduplicateUntil}
|
||||
| where pcs.deduplicate_until < {submittedAt}
|
||||
|when not matched then
|
||||
| insert (pcs.deduplication_key, pcs.deduplicate_until)
|
||||
| values ({deduplicationKey}, {deduplicateUntil})""".stripMargin
|
||||
|
||||
override def upsertDeduplicationEntry(
|
||||
key: String,
|
||||
submittedAt: Timestamp,
|
||||
deduplicateUntil: Timestamp,
|
||||
)(connection: Connection)(implicit loggingContext: LoggingContext): Int = {
|
||||
|
||||
// Under the default READ_COMMITTED isolation level used for the indexdb, when a deduplication
|
||||
// upsert is performed simultaneously from multiple threads, the query fails with
|
||||
// SQLIntegrityConstraintViolationException: ORA-00001: unique constraint (INDEXDB.SYS_C007590) violated
|
||||
// Simple retry helps
|
||||
def retry[T](op: => T): T =
|
||||
try {
|
||||
op
|
||||
} catch {
|
||||
case NonFatal(e) =>
|
||||
logger.debug(s"Caught exception while upserting a deduplication entry: $e")
|
||||
op
|
||||
}
|
||||
retry(
|
||||
SQL(SQL_INSERT_COMMAND)
|
||||
.on(
|
||||
"deduplicationKey" -> key,
|
||||
"submittedAt" -> submittedAt.micros,
|
||||
"deduplicateUntil" -> deduplicateUntil.micros,
|
||||
)
|
||||
.executeUpdate()(connection)
|
||||
)
|
||||
}
|
||||
|
||||
override def batch(dbDtos: Vector[DbDto]): AppendOnlySchema.Batch =
|
||||
OracleSchema.schema.prepareData(dbDtos)
|
||||
|
||||
override def insertBatch(connection: Connection, batch: AppendOnlySchema.Batch): Unit =
|
||||
OracleSchema.schema.executeUpdate(batch, connection)
|
||||
|
||||
object OracleQueryStrategy extends QueryStrategy {
|
||||
|
||||
override def arrayIntersectionNonEmptyClause(
|
||||
columnName: String,
|
||||
parties: Set[Ref.Party],
|
||||
): CompositeSql =
|
||||
cSQL"EXISTS (SELECT 1 FROM JSON_TABLE(#$columnName, '$$[*]' columns (value PATH '$$')) WHERE value IN (${parties
|
||||
.map(_.toString)}))"
|
||||
|
||||
override def columnEqualityBoolean(column: String, value: String): String =
|
||||
s"""case when ($column = $value) then 1 else 0 end"""
|
||||
|
||||
override def booleanOrAggregationFunction: String = "max"
|
||||
|
||||
override def arrayContains(arrayColumnName: String, elementColumnName: String): String =
|
||||
s"EXISTS (SELECT 1 FROM JSON_TABLE($arrayColumnName, '$$[*]' columns (value PATH '$$')) WHERE value = $elementColumnName)"
|
||||
|
||||
override def isTrue(booleanColumnName: String): String = s"$booleanColumnName = 1"
|
||||
}
|
||||
|
||||
override def queryStrategy: QueryStrategy = OracleQueryStrategy
|
||||
|
||||
object OracleEventStrategy extends EventStrategy {
|
||||
|
||||
override def filteredEventWitnessesClause(
|
||||
witnessesColumnName: String,
|
||||
parties: Set[Ref.Party],
|
||||
): CompositeSql =
|
||||
if (parties.size == 1)
|
||||
cSQL"(json_array(${parties.head.toString}))"
|
||||
else
|
||||
cSQL"""
|
||||
(select json_arrayagg(value) from (select value
|
||||
from json_table(#$witnessesColumnName, '$$[*]' columns (value PATH '$$'))
|
||||
where value IN (${parties.map(_.toString)})))
|
||||
"""
|
||||
|
||||
override def submittersArePartiesClause(
|
||||
submittersColumnName: String,
|
||||
parties: Set[Ref.Party],
|
||||
): CompositeSql =
|
||||
cSQL"(${OracleQueryStrategy.arrayIntersectionNonEmptyClause(submittersColumnName, parties)})"
|
||||
|
||||
override def witnessesWhereClause(
|
||||
witnessesColumnName: String,
|
||||
filterParams: FilterParams,
|
||||
): CompositeSql = {
|
||||
val wildCardClause = filterParams.wildCardParties match {
|
||||
case wildCardParties if wildCardParties.isEmpty =>
|
||||
Nil
|
||||
|
||||
case wildCardParties =>
|
||||
cSQL"(${OracleQueryStrategy.arrayIntersectionNonEmptyClause(witnessesColumnName, wildCardParties)})" :: Nil
|
||||
}
|
||||
val partiesTemplatesClauses =
|
||||
filterParams.partiesAndTemplates.iterator.map { case (parties, templateIds) =>
|
||||
val clause =
|
||||
OracleQueryStrategy.arrayIntersectionNonEmptyClause(
|
||||
witnessesColumnName,
|
||||
parties,
|
||||
)
|
||||
cSQL"( ($clause) AND (template_id IN (${templateIds.map(_.toString)})) )"
|
||||
}.toList
|
||||
(wildCardClause ::: partiesTemplatesClauses).mkComposite("(", " OR ", ")")
|
||||
}
|
||||
}
|
||||
|
||||
override def eventStrategy: common.EventStrategy = OracleEventStrategy
|
||||
|
||||
def maxEventSequentialIdOfAnObservableEvent(
|
||||
offset: Offset
|
||||
)(connection: Connection): Option[Long] = {
|
||||
import com.daml.platform.store.Conversions.OffsetToStatement
|
||||
SQL"""SELECT max(max_esi) FROM (
|
||||
(
|
||||
SELECT max(event_sequential_id) AS max_esi FROM participant_events_consuming_exercise
|
||||
WHERE event_offset = (select max(event_offset) from participant_events_consuming_exercise where event_offset <= $offset)
|
||||
) UNION ALL (
|
||||
SELECT max(event_sequential_id) AS max_esi FROM participant_events_create
|
||||
WHERE event_offset = (select max(event_offset) from participant_events_create where event_offset <= $offset)
|
||||
) UNION ALL (
|
||||
SELECT max(event_sequential_id) AS max_esi FROM participant_events_non_consuming_exercise
|
||||
WHERE event_offset = (select max(event_offset) from participant_events_non_consuming_exercise where event_offset <= $offset)
|
||||
)
|
||||
)"""
|
||||
.as(get[Long](1).?.single)(connection)
|
||||
}
|
||||
|
||||
override def createDataSource(
|
||||
jdbcUrl: String,
|
||||
dataSourceConfig: DataSourceStorageBackend.DataSourceConfig,
|
||||
connectionInitHook: Option[Connection => Unit],
|
||||
)(implicit loggingContext: LoggingContext): DataSource = {
|
||||
val oracleDataSource = new oracle.jdbc.pool.OracleDataSource
|
||||
oracleDataSource.setURL(jdbcUrl)
|
||||
InitHookDataSourceProxy(oracleDataSource, connectionInitHook.toList)
|
||||
}
|
||||
|
||||
override def checkDatabaseAvailable(connection: Connection): Unit =
|
||||
assert(SQL"SELECT 1 FROM DUAL".as(get[Int](1).single)(connection) == 1)
|
||||
|
||||
override def tryAcquire(
|
||||
lockId: DBLockStorageBackend.LockId,
|
||||
lockMode: DBLockStorageBackend.LockMode,
|
||||
)(connection: Connection): Option[DBLockStorageBackend.Lock] = {
|
||||
val oracleLockMode = lockMode match {
|
||||
case DBLockStorageBackend.LockMode.Exclusive => "6" // "DBMS_LOCK.x_mode"
|
||||
case DBLockStorageBackend.LockMode.Shared => "4" // "DBMS_LOCK.s_mode"
|
||||
}
|
||||
SQL"""
|
||||
SELECT DBMS_LOCK.REQUEST(
|
||||
id => ${oracleIntLockId(lockId)},
|
||||
lockmode => #$oracleLockMode,
|
||||
timeout => 0
|
||||
) FROM DUAL"""
|
||||
.as(get[Int](1).single)(connection) match {
|
||||
case 0 => Some(DBLockStorageBackend.Lock(lockId, lockMode))
|
||||
case 1 => None
|
||||
case 2 => throw new Exception("DBMS_LOCK.REQUEST Error 2: Acquiring lock caused a deadlock!")
|
||||
case 3 => throw new Exception("DBMS_LOCK.REQUEST Error 3: Parameter error as acquiring lock")
|
||||
case 4 => Some(DBLockStorageBackend.Lock(lockId, lockMode))
|
||||
case 5 =>
|
||||
throw new Exception("DBMS_LOCK.REQUEST Error 5: Illegal lock handle as acquiring lock")
|
||||
case unknown => throw new Exception(s"Invalid result from DBMS_LOCK.REQUEST: $unknown")
|
||||
}
|
||||
}
|
||||
|
||||
override def release(lock: DBLockStorageBackend.Lock)(connection: Connection): Boolean = {
|
||||
SQL"""
|
||||
SELECT DBMS_LOCK.RELEASE(
|
||||
id => ${oracleIntLockId(lock.lockId)}
|
||||
) FROM DUAL"""
|
||||
.as(get[Int](1).single)(connection) match {
|
||||
case 0 => true
|
||||
case 3 => throw new Exception("DBMS_LOCK.RELEASE Error 3: Parameter error as releasing lock")
|
||||
case 4 => false
|
||||
case 5 =>
|
||||
throw new Exception("DBMS_LOCK.RELEASE Error 5: Illegal lock handle as releasing lock")
|
||||
case unknown => throw new Exception(s"Invalid result from DBMS_LOCK.RELEASE: $unknown")
|
||||
}
|
||||
}
|
||||
|
||||
case class OracleLockId(id: Int) extends DBLockStorageBackend.LockId {
|
||||
// respecting Oracle limitations: https://docs.oracle.com/cd/B19306_01/appdev.102/b14258/d_lock.htm#ARPLS021
|
||||
assert(id >= 0, s"Lock id $id is too small for Oracle")
|
||||
assert(id <= 1073741823, s"Lock id $id is too large for Oracle")
|
||||
}
|
||||
|
||||
private def oracleIntLockId(lockId: DBLockStorageBackend.LockId): Int =
|
||||
lockId match {
|
||||
case OracleLockId(id) => id
|
||||
case unknown =>
|
||||
throw new Exception(
|
||||
s"LockId $unknown not supported. Probable cause: LockId was created by a different StorageBackend"
|
||||
)
|
||||
}
|
||||
|
||||
override def lock(id: Int): DBLockStorageBackend.LockId = OracleLockId(id)
|
||||
|
||||
override def dbLockSupported: Boolean = true
|
||||
|
||||
// Migration from mutable schema is not supported for Oracle
|
||||
override def isPruningOffsetValidAgainstMigration(
|
||||
pruneUpToInclusive: Offset,
|
||||
pruneAllDivulgedContracts: Boolean,
|
||||
connection: Connection,
|
||||
): Boolean = true
|
||||
}
|
@ -0,0 +1,72 @@
|
||||
// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.daml.platform.store.backend.oracle
|
||||
|
||||
import com.daml.platform.store.backend.common.{
|
||||
CompletionStorageBackendTemplate,
|
||||
ConfigurationStorageBackendTemplate,
|
||||
ContractStorageBackendTemplate,
|
||||
IngestionStorageBackendTemplate,
|
||||
IntegrityStorageBackendTemplate,
|
||||
PackageStorageBackendTemplate,
|
||||
ParameterStorageBackendTemplate,
|
||||
PartyStorageBackendTemplate,
|
||||
}
|
||||
import com.daml.platform.store.backend.{
|
||||
CompletionStorageBackend,
|
||||
ConfigurationStorageBackend,
|
||||
ContractStorageBackend,
|
||||
DBLockStorageBackend,
|
||||
DataSourceStorageBackend,
|
||||
DeduplicationStorageBackend,
|
||||
EventStorageBackend,
|
||||
IngestionStorageBackend,
|
||||
IntegrityStorageBackend,
|
||||
PackageStorageBackend,
|
||||
ParameterStorageBackend,
|
||||
PartyStorageBackend,
|
||||
ResetStorageBackend,
|
||||
StorageBackendFactory,
|
||||
}
|
||||
|
||||
object OracleStorageBackendFactory extends StorageBackendFactory {
|
||||
override val createIngestionStorageBackend: IngestionStorageBackend[_] =
|
||||
new IngestionStorageBackendTemplate(OracleSchema.schema)
|
||||
|
||||
override val createParameterStorageBackend: ParameterStorageBackend =
|
||||
ParameterStorageBackendTemplate
|
||||
|
||||
override val createConfigurationStorageBackend: ConfigurationStorageBackend =
|
||||
ConfigurationStorageBackendTemplate
|
||||
|
||||
override val createPartyStorageBackend: PartyStorageBackend =
|
||||
new PartyStorageBackendTemplate(OracleQueryStrategy)
|
||||
|
||||
override val createPackageStorageBackend: PackageStorageBackend =
|
||||
PackageStorageBackendTemplate
|
||||
|
||||
override val createDeduplicationStorageBackend: DeduplicationStorageBackend =
|
||||
OracleDeduplicationStorageBackend
|
||||
|
||||
override val createCompletionStorageBackend: CompletionStorageBackend =
|
||||
new CompletionStorageBackendTemplate(OracleQueryStrategy)
|
||||
|
||||
override val createContractStorageBackend: ContractStorageBackend =
|
||||
new ContractStorageBackendTemplate(OracleQueryStrategy)
|
||||
|
||||
override val createEventStorageBackend: EventStorageBackend =
|
||||
OracleEventStorageBackend
|
||||
|
||||
override val createDataSourceStorageBackend: DataSourceStorageBackend =
|
||||
OracleDataSourceStorageBackend
|
||||
|
||||
override val createDBLockStorageBackend: DBLockStorageBackend =
|
||||
OracleDBLockStorageBackend
|
||||
|
||||
override val createIntegrityStorageBackend: IntegrityStorageBackend =
|
||||
IntegrityStorageBackendTemplate
|
||||
|
||||
override val createResetStorageBackend: ResetStorageBackend =
|
||||
OracleResetStorageBackend
|
||||
}
|
@ -0,0 +1,52 @@
|
||||
// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.daml.platform.store.backend.postgresql
|
||||
|
||||
import java.sql.Connection
|
||||
|
||||
import anorm.SqlStringInterpolation
|
||||
import anorm.SqlParser.get
|
||||
import com.daml.platform.store.backend.DBLockStorageBackend
|
||||
|
||||
object PostgresDBLockStorageBackend extends DBLockStorageBackend {
|
||||
|
||||
override def tryAcquire(
|
||||
lockId: DBLockStorageBackend.LockId,
|
||||
lockMode: DBLockStorageBackend.LockMode,
|
||||
)(connection: Connection): Option[DBLockStorageBackend.Lock] = {
|
||||
val lockFunction = lockMode match {
|
||||
case DBLockStorageBackend.LockMode.Exclusive => "pg_try_advisory_lock"
|
||||
case DBLockStorageBackend.LockMode.Shared => "pg_try_advisory_lock_shared"
|
||||
}
|
||||
SQL"SELECT #$lockFunction(${pgBigintLockId(lockId)})"
|
||||
.as(get[Boolean](1).single)(connection) match {
|
||||
case true => Some(DBLockStorageBackend.Lock(lockId, lockMode))
|
||||
case false => None
|
||||
}
|
||||
}
|
||||
|
||||
override def release(lock: DBLockStorageBackend.Lock)(connection: Connection): Boolean = {
|
||||
val lockFunction = lock.lockMode match {
|
||||
case DBLockStorageBackend.LockMode.Exclusive => "pg_advisory_unlock"
|
||||
case DBLockStorageBackend.LockMode.Shared => "pg_advisory_unlock_shared"
|
||||
}
|
||||
SQL"SELECT #$lockFunction(${pgBigintLockId(lock.lockId)})"
|
||||
.as(get[Boolean](1).single)(connection)
|
||||
}
|
||||
|
||||
case class PGLockId(id: Long) extends DBLockStorageBackend.LockId
|
||||
|
||||
private def pgBigintLockId(lockId: DBLockStorageBackend.LockId): Long =
|
||||
lockId match {
|
||||
case PGLockId(id) => id
|
||||
case unknown =>
|
||||
throw new Exception(
|
||||
s"LockId $unknown not supported. Probable cause: LockId was created by a different StorageBackend"
|
||||
)
|
||||
}
|
||||
|
||||
override def lock(id: Int): DBLockStorageBackend.LockId = PGLockId(id.toLong)
|
||||
|
||||
override def dbLockSupported: Boolean = true
|
||||
}
|
@ -1,25 +0,0 @@
|
||||
// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.daml.platform.store.backend.postgresql
|
||||
|
||||
import com.daml.platform.store.backend.postgresql.PostgresDataSourceConfig.SynchronousCommitValue
|
||||
|
||||
case class PostgresDataSourceConfig(
|
||||
synchronousCommit: Option[SynchronousCommitValue] = None,
|
||||
// TCP keepalive configuration for postgres. See https://www.postgresql.org/docs/13/runtime-config-connection.html#RUNTIME-CONFIG-CONNECTION-SETTINGS for details
|
||||
tcpKeepalivesIdle: Option[Int] = None, // corresponds to: tcp_keepalives_idle
|
||||
tcpKeepalivesInterval: Option[Int] = None, // corresponds to: tcp_keepalives_interval
|
||||
tcpKeepalivesCount: Option[Int] = None, // corresponds to: tcp_keepalives_count
|
||||
)
|
||||
|
||||
object PostgresDataSourceConfig {
|
||||
sealed abstract class SynchronousCommitValue(val pgSqlName: String)
|
||||
object SynchronousCommitValue {
|
||||
case object On extends SynchronousCommitValue("on")
|
||||
case object Off extends SynchronousCommitValue("off")
|
||||
case object RemoteWrite extends SynchronousCommitValue("remote_write")
|
||||
case object RemoteApply extends SynchronousCommitValue("remote_apply")
|
||||
case object Local extends SynchronousCommitValue("local")
|
||||
}
|
||||
}
|
@ -0,0 +1,105 @@
|
||||
// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.daml.platform.store.backend.postgresql
|
||||
|
||||
import java.sql.Connection
|
||||
|
||||
import anorm.SqlParser.get
|
||||
import anorm.SqlStringInterpolation
|
||||
import com.daml.logging.{ContextualizedLogger, LoggingContext}
|
||||
import com.daml.platform.store.backend.DataSourceStorageBackend
|
||||
import com.daml.platform.store.backend.common.{
|
||||
DataSourceStorageBackendTemplate,
|
||||
InitHookDataSourceProxy,
|
||||
}
|
||||
import javax.sql.DataSource
|
||||
import org.postgresql.ds.PGSimpleDataSource
|
||||
|
||||
import com.daml.platform.store.backend.postgresql.PostgresDataSourceConfig.SynchronousCommitValue
|
||||
|
||||
case class PostgresDataSourceConfig(
|
||||
synchronousCommit: Option[SynchronousCommitValue] = None,
|
||||
// TCP keepalive configuration for postgres. See https://www.postgresql.org/docs/13/runtime-config-connection.html#RUNTIME-CONFIG-CONNECTION-SETTINGS for details
|
||||
tcpKeepalivesIdle: Option[Int] = None, // corresponds to: tcp_keepalives_idle
|
||||
tcpKeepalivesInterval: Option[Int] = None, // corresponds to: tcp_keepalives_interval
|
||||
tcpKeepalivesCount: Option[Int] = None, // corresponds to: tcp_keepalives_count
|
||||
)
|
||||
|
||||
object PostgresDataSourceConfig {
|
||||
sealed abstract class SynchronousCommitValue(val pgSqlName: String)
|
||||
object SynchronousCommitValue {
|
||||
case object On extends SynchronousCommitValue("on")
|
||||
case object Off extends SynchronousCommitValue("off")
|
||||
case object RemoteWrite extends SynchronousCommitValue("remote_write")
|
||||
case object RemoteApply extends SynchronousCommitValue("remote_apply")
|
||||
case object Local extends SynchronousCommitValue("local")
|
||||
}
|
||||
}
|
||||
|
||||
object PostgresDataSourceStorageBackend extends DataSourceStorageBackend {
|
||||
private val logger: ContextualizedLogger = ContextualizedLogger.get(this.getClass)
|
||||
|
||||
override def createDataSource(
|
||||
jdbcUrl: String,
|
||||
dataSourceConfig: DataSourceStorageBackend.DataSourceConfig,
|
||||
connectionInitHook: Option[Connection => Unit],
|
||||
)(implicit loggingContext: LoggingContext): DataSource = {
|
||||
import DataSourceStorageBackendTemplate.exe
|
||||
val pgSimpleDataSource = new PGSimpleDataSource()
|
||||
pgSimpleDataSource.setUrl(jdbcUrl)
|
||||
|
||||
val hookFunctions = List(
|
||||
dataSourceConfig.postgresConfig.synchronousCommit.toList
|
||||
.map(synchCommitValue => exe(s"SET synchronous_commit TO ${synchCommitValue.pgSqlName}")),
|
||||
dataSourceConfig.postgresConfig.tcpKeepalivesIdle.toList
|
||||
.map(i => exe(s"SET tcp_keepalives_idle TO $i")),
|
||||
dataSourceConfig.postgresConfig.tcpKeepalivesInterval.toList
|
||||
.map(i => exe(s"SET tcp_keepalives_interval TO $i")),
|
||||
dataSourceConfig.postgresConfig.tcpKeepalivesCount.toList
|
||||
.map(i => exe(s"SET tcp_keepalives_count TO $i")),
|
||||
connectionInitHook.toList,
|
||||
).flatten
|
||||
InitHookDataSourceProxy(pgSimpleDataSource, hookFunctions)
|
||||
}
|
||||
|
||||
override def checkCompatibility(
|
||||
connection: Connection
|
||||
)(implicit loggingContext: LoggingContext): Unit = {
|
||||
getPostgresVersion(connection) match {
|
||||
case Some((major, minor)) =>
|
||||
if (major < 10) {
|
||||
logger.error(
|
||||
"Deprecated Postgres version. " +
|
||||
s"Found Postgres version $major.$minor, minimum required Postgres version is 10. " +
|
||||
"This application will continue running but is at risk of data loss, as Postgres < 10 does not support crash-fault tolerant hash indices. " +
|
||||
"Please upgrade your Postgres database to version 10 or later to fix this issue."
|
||||
)
|
||||
}
|
||||
case None =>
|
||||
logger.warn(
|
||||
s"Could not determine the version of the Postgres database. Please verify that this application is compatible with this Postgres version."
|
||||
)
|
||||
}
|
||||
()
|
||||
}
|
||||
|
||||
private[backend] def getPostgresVersion(
|
||||
connection: Connection
|
||||
)(implicit loggingContext: LoggingContext): Option[(Int, Int)] = {
|
||||
val version = SQL"SHOW server_version".as(get[String](1).single)(connection)
|
||||
logger.debug(s"Found Postgres version $version")
|
||||
parsePostgresVersion(version)
|
||||
}
|
||||
|
||||
private[backend] def parsePostgresVersion(version: String): Option[(Int, Int)] = {
|
||||
val versionPattern = """(\d+)[.](\d+).*""".r
|
||||
version match {
|
||||
case versionPattern(major, minor) => Some((major.toInt, minor.toInt))
|
||||
case _ => None
|
||||
}
|
||||
}
|
||||
|
||||
override def checkDatabaseAvailable(connection: Connection): Unit =
|
||||
DataSourceStorageBackendTemplate.checkDatabaseAvailable(connection)
|
||||
}
|
@ -0,0 +1,35 @@
|
||||
// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.daml.platform.store.backend.postgresql
|
||||
|
||||
import java.sql.Connection
|
||||
|
||||
import anorm.SQL
|
||||
import com.daml.lf.data.Time.Timestamp
|
||||
import com.daml.logging.LoggingContext
|
||||
import com.daml.platform.store.backend.common.DeduplicationStorageBackendTemplate
|
||||
|
||||
object PostgresDeduplicationStorageBackend extends DeduplicationStorageBackendTemplate {
|
||||
|
||||
private val SQL_INSERT_COMMAND: String =
|
||||
"""insert into participant_command_submissions as pcs (deduplication_key, deduplicate_until)
|
||||
|values ({deduplicationKey}, {deduplicateUntil})
|
||||
|on conflict (deduplication_key)
|
||||
| do update
|
||||
| set deduplicate_until={deduplicateUntil}
|
||||
| where pcs.deduplicate_until < {submittedAt}""".stripMargin
|
||||
|
||||
override def upsertDeduplicationEntry(
|
||||
key: String,
|
||||
submittedAt: Timestamp,
|
||||
deduplicateUntil: Timestamp,
|
||||
)(connection: Connection)(implicit loggingContext: LoggingContext): Int =
|
||||
SQL(SQL_INSERT_COMMAND)
|
||||
.on(
|
||||
"deduplicationKey" -> key,
|
||||
"submittedAt" -> submittedAt.micros,
|
||||
"deduplicateUntil" -> deduplicateUntil.micros,
|
||||
)
|
||||
.executeUpdate()(connection)
|
||||
}
|
@ -0,0 +1,62 @@
|
||||
// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.daml.platform.store.backend.postgresql
|
||||
|
||||
import java.sql.Connection
|
||||
|
||||
import anorm.SqlParser.{get, int}
|
||||
import com.daml.ledger.offset.Offset
|
||||
import com.daml.platform.store.backend.common.ComposableQuery.SqlStringInterpolation
|
||||
import com.daml.platform.store.backend.common.{
|
||||
EventStorageBackendTemplate,
|
||||
ParameterStorageBackendTemplate,
|
||||
}
|
||||
|
||||
object PostgresEventStorageBackend
|
||||
extends EventStorageBackendTemplate(
|
||||
eventStrategy = PostgresEventStrategy,
|
||||
queryStrategy = PostgresQueryStrategy,
|
||||
participantAllDivulgedContractsPrunedUpToInclusive =
|
||||
ParameterStorageBackendTemplate.participantAllDivulgedContractsPrunedUpToInclusive,
|
||||
) {
|
||||
|
||||
// TODO FIXME: Use tables directly instead of the participant_events view.
|
||||
override def maxEventSequentialIdOfAnObservableEvent(
|
||||
offset: Offset
|
||||
)(connection: Connection): Option[Long] = {
|
||||
import com.daml.platform.store.Conversions.OffsetToStatement
|
||||
// This query could be: "select max(event_sequential_id) from participant_events where event_offset <= ${range.endInclusive}"
|
||||
// however tests using PostgreSQL 12 with tens of millions of events have shown that the index
|
||||
// on `event_offset` is not used unless we _hint_ at it by specifying `order by event_offset`
|
||||
SQL"select max(event_sequential_id) from participant_events where event_offset <= $offset group by event_offset order by event_offset desc limit 1"
|
||||
.as(get[Long](1).singleOpt)(connection)
|
||||
}
|
||||
|
||||
/** If `pruneAllDivulgedContracts` is set, validate that the pruning offset is after
|
||||
* the last event offset that was ingested before the migration to append-only schema (if such event offset exists).
|
||||
* (see [[com.daml.platform.store.appendonlydao.JdbcLedgerDao.prune]])
|
||||
*/
|
||||
override def isPruningOffsetValidAgainstMigration(
|
||||
pruneUpToInclusive: Offset,
|
||||
pruneAllDivulgedContracts: Boolean,
|
||||
connection: Connection,
|
||||
): Boolean =
|
||||
if (pruneAllDivulgedContracts) {
|
||||
import com.daml.platform.store.Conversions.OffsetToStatement
|
||||
SQL"""
|
||||
with max_offset_before_migration as (
|
||||
select max(event_offset) as max_event_offset
|
||||
from participant_events, participant_migration_history_v100
|
||||
where event_sequential_id <= ledger_end_sequential_id_before
|
||||
)
|
||||
select 1 as result
|
||||
from max_offset_before_migration
|
||||
where max_event_offset >= $pruneUpToInclusive
|
||||
"""
|
||||
.as(int("result").singleOpt)(connection)
|
||||
.isEmpty
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}
|
@ -0,0 +1,51 @@
|
||||
// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.daml.platform.store.backend.postgresql
|
||||
|
||||
import com.daml.platform.store.appendonlydao.events.Party
|
||||
import com.daml.platform.store.backend.EventStorageBackend.FilterParams
|
||||
import com.daml.platform.store.backend.common.ComposableQuery.{CompositeSql, SqlStringInterpolation}
|
||||
import com.daml.platform.store.backend.common.EventStrategy
|
||||
|
||||
object PostgresEventStrategy extends EventStrategy {
|
||||
override def filteredEventWitnessesClause(
|
||||
witnessesColumnName: String,
|
||||
parties: Set[Party],
|
||||
): CompositeSql =
|
||||
if (parties.size == 1)
|
||||
cSQL"array[${parties.head.toString}]::text[]"
|
||||
else {
|
||||
val partiesArray: Array[String] = parties.view.map(_.toString).toArray
|
||||
cSQL"array(select unnest(#$witnessesColumnName) intersect select unnest($partiesArray::text[]))"
|
||||
}
|
||||
|
||||
override def submittersArePartiesClause(
|
||||
submittersColumnName: String,
|
||||
parties: Set[Party],
|
||||
): CompositeSql = {
|
||||
val partiesArray = parties.view.map(_.toString).toArray
|
||||
cSQL"(#$submittersColumnName::text[] && $partiesArray::text[])"
|
||||
}
|
||||
|
||||
override def witnessesWhereClause(
|
||||
witnessesColumnName: String,
|
||||
filterParams: FilterParams,
|
||||
): CompositeSql = {
|
||||
val wildCardClause = filterParams.wildCardParties match {
|
||||
case wildCardParties if wildCardParties.isEmpty =>
|
||||
Nil
|
||||
|
||||
case wildCardParties =>
|
||||
val partiesArray = wildCardParties.view.map(_.toString).toArray
|
||||
cSQL"(#$witnessesColumnName::text[] && $partiesArray::text[])" :: Nil
|
||||
}
|
||||
val partiesTemplatesClauses =
|
||||
filterParams.partiesAndTemplates.iterator.map { case (parties, templateIds) =>
|
||||
val partiesArray = parties.view.map(_.toString).toArray
|
||||
val templateIdsArray = templateIds.view.map(_.toString).toArray
|
||||
cSQL"( (#$witnessesColumnName::text[] && $partiesArray::text[]) AND (template_id = ANY($templateIdsArray::text[])) )"
|
||||
}.toList
|
||||
(wildCardClause ::: partiesTemplatesClauses).mkComposite("(", " OR ", ")")
|
||||
}
|
||||
}
|
@ -0,0 +1,25 @@
|
||||
// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.daml.platform.store.backend.postgresql
|
||||
|
||||
import com.daml.lf.data.Ref
|
||||
import com.daml.platform.store.backend.common.ComposableQuery.CompositeSql
|
||||
import com.daml.platform.store.backend.common.QueryStrategy
|
||||
|
||||
object PostgresQueryStrategy extends QueryStrategy {
|
||||
|
||||
override def arrayIntersectionNonEmptyClause(
|
||||
columnName: String,
|
||||
parties: Set[Ref.Party],
|
||||
): CompositeSql = {
|
||||
import com.daml.platform.store.backend.common.ComposableQuery.SqlStringInterpolation
|
||||
val partiesArray: Array[String] = parties.map(_.toString).toArray
|
||||
cSQL"#$columnName::text[] && $partiesArray::text[]"
|
||||
}
|
||||
|
||||
override def arrayContains(arrayColumnName: String, elementColumnName: String): String =
|
||||
s"$elementColumnName = any($arrayColumnName)"
|
||||
|
||||
override def isTrue(booleanColumnName: String): String = booleanColumnName
|
||||
}
|
@ -0,0 +1,44 @@
|
||||
// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.daml.platform.store.backend.postgresql
|
||||
|
||||
import java.sql.Connection
|
||||
|
||||
import anorm.SQL
|
||||
import com.daml.platform.store.backend.ResetStorageBackend
|
||||
|
||||
object PostgresResetStorageBackend extends ResetStorageBackend {
|
||||
override def reset(connection: Connection): Unit = {
|
||||
SQL("""truncate table configuration_entries cascade;
|
||||
|truncate table package_entries cascade;
|
||||
|truncate table parameters cascade;
|
||||
|truncate table participant_command_completions cascade;
|
||||
|truncate table participant_command_submissions cascade;
|
||||
|truncate table participant_events_divulgence cascade;
|
||||
|truncate table participant_events_create cascade;
|
||||
|truncate table participant_events_consuming_exercise cascade;
|
||||
|truncate table participant_events_non_consuming_exercise cascade;
|
||||
|truncate table party_entries cascade;
|
||||
|""".stripMargin)
|
||||
.execute()(connection)
|
||||
()
|
||||
}
|
||||
|
||||
override def resetAll(connection: Connection): Unit = {
|
||||
SQL("""truncate table configuration_entries cascade;
|
||||
|truncate table packages cascade;
|
||||
|truncate table package_entries cascade;
|
||||
|truncate table parameters cascade;
|
||||
|truncate table participant_command_completions cascade;
|
||||
|truncate table participant_command_submissions cascade;
|
||||
|truncate table participant_events_divulgence cascade;
|
||||
|truncate table participant_events_create cascade;
|
||||
|truncate table participant_events_consuming_exercise cascade;
|
||||
|truncate table participant_events_non_consuming_exercise cascade;
|
||||
|truncate table party_entries cascade;
|
||||
|""".stripMargin)
|
||||
.execute()(connection)
|
||||
()
|
||||
}
|
||||
}
|
@ -1,322 +0,0 @@
|
||||
// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.daml.platform.store.backend.postgresql
|
||||
|
||||
import java.sql.Connection
|
||||
import anorm.SQL
|
||||
import anorm.SqlParser.{get, int}
|
||||
import com.daml.ledger.offset.Offset
|
||||
import com.daml.lf.data.Ref
|
||||
import com.daml.lf.data.Time.Timestamp
|
||||
import com.daml.logging.{ContextualizedLogger, LoggingContext}
|
||||
import com.daml.platform.store.appendonlydao.events.Party
|
||||
import com.daml.platform.store.backend.EventStorageBackend.FilterParams
|
||||
import com.daml.platform.store.backend.common.ComposableQuery.{CompositeSql, SqlStringInterpolation}
|
||||
import com.daml.platform.store.backend.common.{
|
||||
AppendOnlySchema,
|
||||
CompletionStorageBackendTemplate,
|
||||
ConfigurationStorageBackendTemplate,
|
||||
ContractStorageBackendTemplate,
|
||||
DataSourceStorageBackendTemplate,
|
||||
DeduplicationStorageBackendTemplate,
|
||||
EventStorageBackendTemplate,
|
||||
EventStrategy,
|
||||
IngestionStorageBackendTemplate,
|
||||
InitHookDataSourceProxy,
|
||||
IntegrityStorageBackendTemplate,
|
||||
PackageStorageBackendTemplate,
|
||||
ParameterStorageBackendTemplate,
|
||||
PartyStorageBackendTemplate,
|
||||
QueryStrategy,
|
||||
}
|
||||
import com.daml.platform.store.backend.{
|
||||
DBLockStorageBackend,
|
||||
DataSourceStorageBackend,
|
||||
DbDto,
|
||||
StorageBackend,
|
||||
common,
|
||||
}
|
||||
|
||||
import javax.sql.DataSource
|
||||
import org.postgresql.ds.PGSimpleDataSource
|
||||
|
||||
private[backend] object PostgresStorageBackend
|
||||
extends StorageBackend[AppendOnlySchema.Batch]
|
||||
with DataSourceStorageBackendTemplate
|
||||
with IngestionStorageBackendTemplate[AppendOnlySchema.Batch]
|
||||
with ParameterStorageBackendTemplate
|
||||
with ConfigurationStorageBackendTemplate
|
||||
with PackageStorageBackendTemplate
|
||||
with DeduplicationStorageBackendTemplate
|
||||
with EventStorageBackendTemplate
|
||||
with ContractStorageBackendTemplate
|
||||
with CompletionStorageBackendTemplate
|
||||
with PartyStorageBackendTemplate
|
||||
with IntegrityStorageBackendTemplate {
|
||||
|
||||
private val logger: ContextualizedLogger = ContextualizedLogger.get(this.getClass)
|
||||
|
||||
override def insertBatch(
|
||||
connection: Connection,
|
||||
postgresDbBatch: AppendOnlySchema.Batch,
|
||||
): Unit =
|
||||
PGSchema.schema.executeUpdate(postgresDbBatch, connection)
|
||||
|
||||
override def batch(dbDtos: Vector[DbDto]): AppendOnlySchema.Batch =
|
||||
PGSchema.schema.prepareData(dbDtos)
|
||||
|
||||
private val SQL_INSERT_COMMAND: String =
|
||||
"""insert into participant_command_submissions as pcs (deduplication_key, deduplicate_until)
|
||||
|values ({deduplicationKey}, {deduplicateUntil})
|
||||
|on conflict (deduplication_key)
|
||||
| do update
|
||||
| set deduplicate_until={deduplicateUntil}
|
||||
| where pcs.deduplicate_until < {submittedAt}""".stripMargin
|
||||
|
||||
override def upsertDeduplicationEntry(
|
||||
key: String,
|
||||
submittedAt: Timestamp,
|
||||
deduplicateUntil: Timestamp,
|
||||
)(connection: Connection)(implicit loggingContext: LoggingContext): Int =
|
||||
SQL(SQL_INSERT_COMMAND)
|
||||
.on(
|
||||
"deduplicationKey" -> key,
|
||||
"submittedAt" -> submittedAt.micros,
|
||||
"deduplicateUntil" -> deduplicateUntil.micros,
|
||||
)
|
||||
.executeUpdate()(connection)
|
||||
|
||||
override def reset(connection: Connection): Unit = {
|
||||
SQL("""truncate table configuration_entries cascade;
|
||||
|truncate table package_entries cascade;
|
||||
|truncate table parameters cascade;
|
||||
|truncate table participant_command_completions cascade;
|
||||
|truncate table participant_command_submissions cascade;
|
||||
|truncate table participant_events_divulgence cascade;
|
||||
|truncate table participant_events_create cascade;
|
||||
|truncate table participant_events_consuming_exercise cascade;
|
||||
|truncate table participant_events_non_consuming_exercise cascade;
|
||||
|truncate table party_entries cascade;
|
||||
|""".stripMargin)
|
||||
.execute()(connection)
|
||||
()
|
||||
}
|
||||
|
||||
override def resetAll(connection: Connection): Unit = {
|
||||
SQL("""truncate table configuration_entries cascade;
|
||||
|truncate table packages cascade;
|
||||
|truncate table package_entries cascade;
|
||||
|truncate table parameters cascade;
|
||||
|truncate table participant_command_completions cascade;
|
||||
|truncate table participant_command_submissions cascade;
|
||||
|truncate table participant_events_divulgence cascade;
|
||||
|truncate table participant_events_create cascade;
|
||||
|truncate table participant_events_consuming_exercise cascade;
|
||||
|truncate table participant_events_non_consuming_exercise cascade;
|
||||
|truncate table party_entries cascade;
|
||||
|""".stripMargin)
|
||||
.execute()(connection)
|
||||
()
|
||||
}
|
||||
|
||||
/** If `pruneAllDivulgedContracts` is set, validate that the pruning offset is after
|
||||
* the last event offset that was ingested before the migration to append-only schema (if such event offset exists).
|
||||
* (see [[com.daml.platform.store.appendonlydao.JdbcLedgerDao.prune]])
|
||||
*/
|
||||
def isPruningOffsetValidAgainstMigration(
|
||||
pruneUpToInclusive: Offset,
|
||||
pruneAllDivulgedContracts: Boolean,
|
||||
connection: Connection,
|
||||
): Boolean =
|
||||
if (pruneAllDivulgedContracts) {
|
||||
import com.daml.platform.store.Conversions.OffsetToStatement
|
||||
SQL"""
|
||||
with max_offset_before_migration as (
|
||||
select max(event_offset) as max_event_offset
|
||||
from participant_events, participant_migration_history_v100
|
||||
where event_sequential_id <= ledger_end_sequential_id_before
|
||||
)
|
||||
select 1 as result
|
||||
from max_offset_before_migration
|
||||
where max_event_offset >= $pruneUpToInclusive
|
||||
"""
|
||||
.as(int("result").singleOpt)(connection)
|
||||
.isEmpty
|
||||
} else {
|
||||
true
|
||||
}
|
||||
|
||||
object PostgresQueryStrategy extends QueryStrategy {
|
||||
|
||||
override def arrayIntersectionNonEmptyClause(
|
||||
columnName: String,
|
||||
parties: Set[Ref.Party],
|
||||
): CompositeSql = {
|
||||
import com.daml.platform.store.backend.common.ComposableQuery.SqlStringInterpolation
|
||||
val partiesArray: Array[String] = parties.map(_.toString).toArray
|
||||
cSQL"#$columnName::text[] && $partiesArray::text[]"
|
||||
}
|
||||
|
||||
override def arrayContains(arrayColumnName: String, elementColumnName: String): String =
|
||||
s"$elementColumnName = any($arrayColumnName)"
|
||||
|
||||
override def isTrue(booleanColumnName: String): String = booleanColumnName
|
||||
}
|
||||
|
||||
override def queryStrategy: QueryStrategy = PostgresQueryStrategy
|
||||
|
||||
object PostgresEventStrategy extends EventStrategy {
|
||||
override def filteredEventWitnessesClause(
|
||||
witnessesColumnName: String,
|
||||
parties: Set[Party],
|
||||
): CompositeSql =
|
||||
if (parties.size == 1)
|
||||
cSQL"array[${parties.head.toString}]::text[]"
|
||||
else {
|
||||
val partiesArray: Array[String] = parties.view.map(_.toString).toArray
|
||||
cSQL"array(select unnest(#$witnessesColumnName) intersect select unnest($partiesArray::text[]))"
|
||||
}
|
||||
|
||||
override def submittersArePartiesClause(
|
||||
submittersColumnName: String,
|
||||
parties: Set[Party],
|
||||
): CompositeSql = {
|
||||
val partiesArray = parties.view.map(_.toString).toArray
|
||||
cSQL"(#$submittersColumnName::text[] && $partiesArray::text[])"
|
||||
}
|
||||
|
||||
override def witnessesWhereClause(
|
||||
witnessesColumnName: String,
|
||||
filterParams: FilterParams,
|
||||
): CompositeSql = {
|
||||
val wildCardClause = filterParams.wildCardParties match {
|
||||
case wildCardParties if wildCardParties.isEmpty =>
|
||||
Nil
|
||||
|
||||
case wildCardParties =>
|
||||
val partiesArray = wildCardParties.view.map(_.toString).toArray
|
||||
cSQL"(#$witnessesColumnName::text[] && $partiesArray::text[])" :: Nil
|
||||
}
|
||||
val partiesTemplatesClauses =
|
||||
filterParams.partiesAndTemplates.iterator.map { case (parties, templateIds) =>
|
||||
val partiesArray = parties.view.map(_.toString).toArray
|
||||
val templateIdsArray = templateIds.view.map(_.toString).toArray
|
||||
cSQL"( (#$witnessesColumnName::text[] && $partiesArray::text[]) AND (template_id = ANY($templateIdsArray::text[])) )"
|
||||
}.toList
|
||||
(wildCardClause ::: partiesTemplatesClauses).mkComposite("(", " OR ", ")")
|
||||
}
|
||||
}
|
||||
|
||||
override def eventStrategy: common.EventStrategy = PostgresEventStrategy
|
||||
|
||||
// TODO FIXME: Use tables directly instead of the participant_events view.
|
||||
override def maxEventSequentialIdOfAnObservableEvent(
|
||||
offset: Offset
|
||||
)(connection: Connection): Option[Long] = {
|
||||
import com.daml.platform.store.Conversions.OffsetToStatement
|
||||
// This query could be: "select max(event_sequential_id) from participant_events where event_offset <= ${range.endInclusive}"
|
||||
// however tests using PostgreSQL 12 with tens of millions of events have shown that the index
|
||||
// on `event_offset` is not used unless we _hint_ at it by specifying `order by event_offset`
|
||||
SQL"select max(event_sequential_id) from participant_events where event_offset <= $offset group by event_offset order by event_offset desc limit 1"
|
||||
.as(get[Long](1).singleOpt)(connection)
|
||||
}
|
||||
|
||||
override def createDataSource(
|
||||
jdbcUrl: String,
|
||||
dataSourceConfig: DataSourceStorageBackend.DataSourceConfig,
|
||||
connectionInitHook: Option[Connection => Unit],
|
||||
)(implicit loggingContext: LoggingContext): DataSource = {
|
||||
val pgSimpleDataSource = new PGSimpleDataSource()
|
||||
pgSimpleDataSource.setUrl(jdbcUrl)
|
||||
|
||||
val hookFunctions = List(
|
||||
dataSourceConfig.postgresConfig.synchronousCommit.toList
|
||||
.map(synchCommitValue => exe(s"SET synchronous_commit TO ${synchCommitValue.pgSqlName}")),
|
||||
dataSourceConfig.postgresConfig.tcpKeepalivesIdle.toList
|
||||
.map(i => exe(s"SET tcp_keepalives_idle TO $i")),
|
||||
dataSourceConfig.postgresConfig.tcpKeepalivesInterval.toList
|
||||
.map(i => exe(s"SET tcp_keepalives_interval TO $i")),
|
||||
dataSourceConfig.postgresConfig.tcpKeepalivesCount.toList
|
||||
.map(i => exe(s"SET tcp_keepalives_count TO $i")),
|
||||
connectionInitHook.toList,
|
||||
).flatten
|
||||
InitHookDataSourceProxy(pgSimpleDataSource, hookFunctions)
|
||||
}
|
||||
|
||||
override def checkCompatibility(
|
||||
connection: Connection
|
||||
)(implicit loggingContext: LoggingContext): Unit = {
|
||||
getPostgresVersion(connection) match {
|
||||
case Some((major, minor)) =>
|
||||
if (major < 10) {
|
||||
logger.error(
|
||||
"Deprecated Postgres version. " +
|
||||
s"Found Postgres version $major.$minor, minimum required Postgres version is 10. " +
|
||||
"This application will continue running but is at risk of data loss, as Postgres < 10 does not support crash-fault tolerant hash indices. " +
|
||||
"Please upgrade your Postgres database to version 10 or later to fix this issue."
|
||||
)
|
||||
}
|
||||
case None =>
|
||||
logger.warn(
|
||||
s"Could not determine the version of the Postgres database. Please verify that this application is compatible with this Postgres version."
|
||||
)
|
||||
}
|
||||
()
|
||||
}
|
||||
|
||||
private[backend] def getPostgresVersion(
|
||||
connection: Connection
|
||||
)(implicit loggingContext: LoggingContext): Option[(Int, Int)] = {
|
||||
val version = SQL"SHOW server_version".as(get[String](1).single)(connection)
|
||||
logger.debug(s"Found Postgres version $version")
|
||||
parsePostgresVersion(version)
|
||||
}
|
||||
|
||||
private[backend] def parsePostgresVersion(version: String): Option[(Int, Int)] = {
|
||||
val versionPattern = """(\d+)[.](\d+).*""".r
|
||||
version match {
|
||||
case versionPattern(major, minor) => Some((major.toInt, minor.toInt))
|
||||
case _ => None
|
||||
}
|
||||
}
|
||||
|
||||
override def tryAcquire(
|
||||
lockId: DBLockStorageBackend.LockId,
|
||||
lockMode: DBLockStorageBackend.LockMode,
|
||||
)(connection: Connection): Option[DBLockStorageBackend.Lock] = {
|
||||
val lockFunction = lockMode match {
|
||||
case DBLockStorageBackend.LockMode.Exclusive => "pg_try_advisory_lock"
|
||||
case DBLockStorageBackend.LockMode.Shared => "pg_try_advisory_lock_shared"
|
||||
}
|
||||
SQL"SELECT #$lockFunction(${pgBigintLockId(lockId)})"
|
||||
.as(get[Boolean](1).single)(connection) match {
|
||||
case true => Some(DBLockStorageBackend.Lock(lockId, lockMode))
|
||||
case false => None
|
||||
}
|
||||
}
|
||||
|
||||
override def release(lock: DBLockStorageBackend.Lock)(connection: Connection): Boolean = {
|
||||
val lockFunction = lock.lockMode match {
|
||||
case DBLockStorageBackend.LockMode.Exclusive => "pg_advisory_unlock"
|
||||
case DBLockStorageBackend.LockMode.Shared => "pg_advisory_unlock_shared"
|
||||
}
|
||||
SQL"SELECT #$lockFunction(${pgBigintLockId(lock.lockId)})"
|
||||
.as(get[Boolean](1).single)(connection)
|
||||
}
|
||||
|
||||
case class PGLockId(id: Long) extends DBLockStorageBackend.LockId
|
||||
|
||||
private def pgBigintLockId(lockId: DBLockStorageBackend.LockId): Long =
|
||||
lockId match {
|
||||
case PGLockId(id) => id
|
||||
case unknown =>
|
||||
throw new Exception(
|
||||
s"LockId $unknown not supported. Probable cause: LockId was created by a different StorageBackend"
|
||||
)
|
||||
}
|
||||
|
||||
override def lock(id: Int): DBLockStorageBackend.LockId = PGLockId(id.toLong)
|
||||
|
||||
override def dbLockSupported: Boolean = true
|
||||
}
|
@ -0,0 +1,72 @@
|
||||
// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package com.daml.platform.store.backend.postgresql
|
||||
|
||||
import com.daml.platform.store.backend.common.{
|
||||
CompletionStorageBackendTemplate,
|
||||
ConfigurationStorageBackendTemplate,
|
||||
ContractStorageBackendTemplate,
|
||||
IngestionStorageBackendTemplate,
|
||||
IntegrityStorageBackendTemplate,
|
||||
PackageStorageBackendTemplate,
|
||||
ParameterStorageBackendTemplate,
|
||||
PartyStorageBackendTemplate,
|
||||
}
|
||||
import com.daml.platform.store.backend.{
|
||||
CompletionStorageBackend,
|
||||
ConfigurationStorageBackend,
|
||||
ContractStorageBackend,
|
||||
DBLockStorageBackend,
|
||||
DataSourceStorageBackend,
|
||||
DeduplicationStorageBackend,
|
||||
EventStorageBackend,
|
||||
IngestionStorageBackend,
|
||||
IntegrityStorageBackend,
|
||||
PackageStorageBackend,
|
||||
ParameterStorageBackend,
|
||||
PartyStorageBackend,
|
||||
ResetStorageBackend,
|
||||
StorageBackendFactory,
|
||||
}
|
||||
|
||||
object PostgresStorageBackendFactory extends StorageBackendFactory {
|
||||
override val createIngestionStorageBackend: IngestionStorageBackend[_] =
|
||||
new IngestionStorageBackendTemplate(PGSchema.schema)
|
||||
|
||||
override val createParameterStorageBackend: ParameterStorageBackend =
|
||||
ParameterStorageBackendTemplate
|
||||
|
||||
override val createConfigurationStorageBackend: ConfigurationStorageBackend =
|
||||
ConfigurationStorageBackendTemplate
|
||||
|
||||
override val createPartyStorageBackend: PartyStorageBackend =
|
||||
new PartyStorageBackendTemplate(PostgresQueryStrategy)
|
||||
|
||||
override val createPackageStorageBackend: PackageStorageBackend =
|
||||
PackageStorageBackendTemplate
|
||||
|
||||
override val createDeduplicationStorageBackend: DeduplicationStorageBackend =
|
||||
PostgresDeduplicationStorageBackend
|
||||
|
||||
override val createCompletionStorageBackend: CompletionStorageBackend =
|
||||
new CompletionStorageBackendTemplate(PostgresQueryStrategy)
|
||||
|
||||
override val createContractStorageBackend: ContractStorageBackend =
|
||||
new ContractStorageBackendTemplate(PostgresQueryStrategy)
|
||||
|
||||
override val createEventStorageBackend: EventStorageBackend =
|
||||
PostgresEventStorageBackend
|
||||
|
||||
override val createDataSourceStorageBackend: DataSourceStorageBackend =
|
||||
PostgresDataSourceStorageBackend
|
||||
|
||||
override val createDBLockStorageBackend: DBLockStorageBackend =
|
||||
PostgresDBLockStorageBackend
|
||||
|
||||
override val createIntegrityStorageBackend: IntegrityStorageBackend =
|
||||
IntegrityStorageBackendTemplate
|
||||
|
||||
override val createResetStorageBackend: ResetStorageBackend =
|
||||
PostgresResetStorageBackend
|
||||
}
|
@ -7,14 +7,14 @@ import com.daml.ledger.api.testing.utils.AkkaBeforeAndAfterAll
|
||||
import com.daml.ledger.resources.ResourceContext
|
||||
import com.daml.logging.LoggingContext
|
||||
import com.daml.platform.store.DbType
|
||||
import com.daml.platform.store.backend.StorageBackend
|
||||
import com.daml.platform.store.backend.{ParameterStorageBackend, StorageBackendFactory}
|
||||
import org.scalatest.Assertion
|
||||
import org.scalatest.concurrent.Eventually
|
||||
import org.scalatest.flatspec.AsyncFlatSpec
|
||||
import org.scalatest.matchers.should.Matchers
|
||||
import org.scalatest.time.{Millis, Seconds, Span}
|
||||
|
||||
import java.sql.Connection
|
||||
|
||||
import scala.concurrent.{ExecutionContext, Future}
|
||||
|
||||
trait IndexerStabilitySpec
|
||||
@ -50,8 +50,10 @@ trait IndexerStabilitySpec
|
||||
materializer,
|
||||
)
|
||||
.use[Unit] { indexers =>
|
||||
val storageBackend = StorageBackend.of(DbType.jdbcType(jdbcUrl))
|
||||
val dataSource = storageBackend.createDataSource(jdbcUrl)
|
||||
val factory = StorageBackendFactory.of(DbType.jdbcType(jdbcUrl))
|
||||
val dataSource = factory.createDataSourceStorageBackend.createDataSource(jdbcUrl)
|
||||
val parameterStorageBackend = factory.createParameterStorageBackend
|
||||
val integrityStorageBackend = factory.createIntegrityStorageBackend
|
||||
val connection = dataSource.getConnection()
|
||||
|
||||
Iterator
|
||||
@ -61,7 +63,7 @@ trait IndexerStabilitySpec
|
||||
info(s"Indexer ${activeIndexer.readService.name} is running")
|
||||
|
||||
// Assert that state updates are being indexed
|
||||
assertLedgerEndHasMoved(storageBackend, connection)
|
||||
assertLedgerEndHasMoved(parameterStorageBackend, connection)
|
||||
info("Ledger end has moved")
|
||||
|
||||
// At this point, the indexer that was aborted by the previous iteration can be reset,
|
||||
@ -94,7 +96,7 @@ trait IndexerStabilitySpec
|
||||
Thread.sleep(1000L)
|
||||
|
||||
// Verify the integrity of the index database
|
||||
storageBackend.verifyIntegrity()(connection)
|
||||
integrityStorageBackend.verifyIntegrity()(connection)
|
||||
info(s"Integrity of the index database was checked")
|
||||
|
||||
connection.close()
|
||||
@ -120,7 +122,7 @@ trait IndexerStabilitySpec
|
||||
|
||||
// Asserts that the ledger end has moved at least the specified number of events within a short time
|
||||
private def assertLedgerEndHasMoved(
|
||||
storageBackend: StorageBackend[_],
|
||||
parameterStorageBackend: ParameterStorageBackend,
|
||||
connection: Connection,
|
||||
)(implicit pos: org.scalactic.source.Position): Assertion = {
|
||||
implicit val patienceConfig: PatienceConfig = PatienceConfig(
|
||||
@ -129,10 +131,10 @@ trait IndexerStabilitySpec
|
||||
)
|
||||
// Note: we don't know exactly at which ledger end the current indexer has started.
|
||||
// We only observe that the ledger end is moving right now.
|
||||
val initialLedgerEnd = storageBackend.ledgerEndOrBeforeBegin(connection)
|
||||
val initialLedgerEnd = parameterStorageBackend.ledgerEndOrBeforeBegin(connection)
|
||||
val minEvents = 2L
|
||||
eventually {
|
||||
val ledgerEnd = storageBackend.ledgerEndOrBeforeBegin(connection)
|
||||
val ledgerEnd = parameterStorageBackend.ledgerEndOrBeforeBegin(connection)
|
||||
assert(ledgerEnd.lastEventSeqId > initialLedgerEnd.lastEventSeqId + minEvents)
|
||||
}
|
||||
}
|
||||
|
@ -4,9 +4,10 @@
|
||||
package com.daml.platform.store.backend
|
||||
|
||||
import java.sql.Connection
|
||||
import com.daml.platform.store.backend.h2.H2StorageBackend
|
||||
import com.daml.platform.store.backend.oracle.OracleStorageBackend
|
||||
import com.daml.platform.store.backend.postgresql.PostgresStorageBackend
|
||||
|
||||
import com.daml.platform.store.backend.h2.H2StorageBackendFactory
|
||||
import com.daml.platform.store.backend.oracle.OracleStorageBackendFactory
|
||||
import com.daml.platform.store.backend.postgresql.PostgresStorageBackendFactory
|
||||
import com.daml.testing.oracle.OracleAroundAll
|
||||
import com.daml.testing.postgresql.PostgresAroundAll
|
||||
import org.scalatest.Suite
|
||||
@ -16,11 +17,12 @@ import org.scalatest.Suite
|
||||
*/
|
||||
private[backend] trait StorageBackendProvider {
|
||||
protected def jdbcUrl: String
|
||||
protected def backend: StorageBackend[_]
|
||||
protected def backendFactory: StorageBackendFactory
|
||||
|
||||
protected final def ingest(dbDtos: Vector[DbDto], connection: Connection): Unit = {
|
||||
def typeBoundIngest[T](backend: StorageBackend[T]): Unit =
|
||||
backend.insertBatch(connection, backend.batch(dbDtos))
|
||||
typeBoundIngest(backend)
|
||||
def typeBoundIngest[T](ingestionStorageBackend: IngestionStorageBackend[T]): Unit =
|
||||
ingestionStorageBackend.insertBatch(connection, ingestionStorageBackend.batch(dbDtos))
|
||||
typeBoundIngest(backendFactory.createIngestionStorageBackend)
|
||||
}
|
||||
}
|
||||
|
||||
@ -28,12 +30,12 @@ private[backend] trait StorageBackendProviderPostgres
|
||||
extends StorageBackendProvider
|
||||
with PostgresAroundAll { this: Suite =>
|
||||
override protected def jdbcUrl: String = postgresDatabase.url
|
||||
override protected val backend: StorageBackend[_] = PostgresStorageBackend
|
||||
override protected val backendFactory: StorageBackendFactory = PostgresStorageBackendFactory
|
||||
}
|
||||
|
||||
private[backend] trait StorageBackendProviderH2 extends StorageBackendProvider { this: Suite =>
|
||||
override protected def jdbcUrl: String = "jdbc:h2:mem:storage_backend_provider;db_close_delay=-1"
|
||||
override protected val backend: StorageBackend[_] = H2StorageBackend
|
||||
override protected val backendFactory: StorageBackendFactory = H2StorageBackendFactory
|
||||
}
|
||||
|
||||
private[backend] trait StorageBackendProviderOracle
|
||||
@ -41,5 +43,5 @@ private[backend] trait StorageBackendProviderOracle
|
||||
with OracleAroundAll { this: Suite =>
|
||||
override protected def jdbcUrl: String =
|
||||
s"jdbc:oracle:thin:$oracleUser/$oraclePwd@localhost:$oraclePort/ORCLPDB1"
|
||||
override protected val backend: StorageBackend[_] = OracleStorageBackend
|
||||
override protected val backendFactory: StorageBackendFactory = OracleStorageBackendFactory
|
||||
}
|
||||
|
@ -49,7 +49,7 @@ private[backend] trait StorageBackendSpec
|
||||
)
|
||||
dispatcher <- DbDispatcher
|
||||
.owner(
|
||||
dataSource = backend.createDataSource(jdbcUrl),
|
||||
dataSource = backendFactory.createDataSourceStorageBackend.createDataSource(jdbcUrl),
|
||||
serverRole = ServerRole.Testing(this.getClass),
|
||||
connectionPoolSize = connectionPoolSize,
|
||||
connectionTimeout = FiniteDuration(250, "millis"),
|
||||
@ -81,7 +81,7 @@ private[backend] trait StorageBackendSpec
|
||||
runningTests.incrementAndGet() == 1,
|
||||
"StorageBackendSpec tests must not run in parallel, as they all run against the same database.",
|
||||
)
|
||||
Await.result(executeSql(backend.resetAll), 60.seconds)
|
||||
Await.result(executeSql(backendFactory.createResetStorageBackend.resetAll), 60.seconds)
|
||||
}
|
||||
|
||||
override protected def afterEach(): Unit = {
|
||||
|
@ -15,6 +15,11 @@ private[backend] trait StorageBackendTestsCompletions
|
||||
with StorageBackendSpec {
|
||||
this: AsyncFlatSpec =>
|
||||
|
||||
private val parameterStorageBackend: ParameterStorageBackend =
|
||||
backendFactory.createParameterStorageBackend
|
||||
private val completionStorageBackend: CompletionStorageBackend =
|
||||
backendFactory.createCompletionStorageBackend
|
||||
|
||||
behavior of "StorageBackend (completions)"
|
||||
|
||||
import StorageBackendTestValues._
|
||||
@ -31,20 +36,27 @@ private[backend] trait StorageBackendTestsCompletions
|
||||
)
|
||||
|
||||
for {
|
||||
_ <- executeSql(backend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(parameterStorageBackend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(ingest(dtos, _))
|
||||
_ <- executeSql(backend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(4), 3L)))
|
||||
_ <- executeSql(
|
||||
parameterStorageBackend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(4), 3L))
|
||||
)
|
||||
completions0to3 <- executeSql(
|
||||
backend.commandCompletions(Offset.beforeBegin, offset(3), applicationId, Set(party))
|
||||
completionStorageBackend.commandCompletions(
|
||||
Offset.beforeBegin,
|
||||
offset(3),
|
||||
applicationId,
|
||||
Set(party),
|
||||
)
|
||||
)
|
||||
completions1to3 <- executeSql(
|
||||
backend.commandCompletions(offset(1), offset(3), applicationId, Set(party))
|
||||
completionStorageBackend.commandCompletions(offset(1), offset(3), applicationId, Set(party))
|
||||
)
|
||||
completions2to3 <- executeSql(
|
||||
backend.commandCompletions(offset(2), offset(3), applicationId, Set(party))
|
||||
completionStorageBackend.commandCompletions(offset(2), offset(3), applicationId, Set(party))
|
||||
)
|
||||
completions1to9 <- executeSql(
|
||||
backend.commandCompletions(offset(1), offset(9), applicationId, Set(party))
|
||||
completionStorageBackend.commandCompletions(offset(1), offset(9), applicationId, Set(party))
|
||||
)
|
||||
} yield {
|
||||
completions0to3 should have length 2
|
||||
@ -64,11 +76,13 @@ private[backend] trait StorageBackendTestsCompletions
|
||||
)
|
||||
|
||||
for {
|
||||
_ <- executeSql(backend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(parameterStorageBackend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(ingest(dtos, _))
|
||||
_ <- executeSql(backend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(2), 1L)))
|
||||
_ <- executeSql(
|
||||
parameterStorageBackend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(2), 1L))
|
||||
)
|
||||
completions <- executeSql(
|
||||
backend.commandCompletions(offset(1), offset(2), applicationId, Set(party))
|
||||
completionStorageBackend.commandCompletions(offset(1), offset(2), applicationId, Set(party))
|
||||
)
|
||||
} yield {
|
||||
completions should have length 1
|
||||
@ -88,11 +102,18 @@ private[backend] trait StorageBackendTestsCompletions
|
||||
)
|
||||
|
||||
for {
|
||||
_ <- executeSql(backend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(parameterStorageBackend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(ingest(dtos, _))
|
||||
_ <- executeSql(backend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(3), 2L)))
|
||||
_ <- executeSql(
|
||||
parameterStorageBackend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(3), 2L))
|
||||
)
|
||||
completions <- executeSql(
|
||||
backend.commandCompletions(offset(1), offset(3), someApplicationId, Set(party))
|
||||
completionStorageBackend.commandCompletions(
|
||||
offset(1),
|
||||
offset(3),
|
||||
someApplicationId,
|
||||
Set(party),
|
||||
)
|
||||
)
|
||||
} yield {
|
||||
completions should have length 2
|
||||
@ -119,11 +140,18 @@ private[backend] trait StorageBackendTestsCompletions
|
||||
)
|
||||
|
||||
for {
|
||||
_ <- executeSql(backend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(parameterStorageBackend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(ingest(dtos, _))
|
||||
_ <- executeSql(backend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(3), 2L)))
|
||||
_ <- executeSql(
|
||||
parameterStorageBackend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(3), 2L))
|
||||
)
|
||||
completions <- executeSql(
|
||||
backend.commandCompletions(offset(1), offset(3), someApplicationId, Set(party))
|
||||
completionStorageBackend.commandCompletions(
|
||||
offset(1),
|
||||
offset(3),
|
||||
someApplicationId,
|
||||
Set(party),
|
||||
)
|
||||
)
|
||||
} yield {
|
||||
completions should have length 2
|
||||
@ -161,11 +189,18 @@ private[backend] trait StorageBackendTestsCompletions
|
||||
)
|
||||
|
||||
for {
|
||||
_ <- executeSql(backend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(parameterStorageBackend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(ingest(dtos, _))
|
||||
_ <- executeSql(backend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(3), 2L)))
|
||||
_ <- executeSql(
|
||||
parameterStorageBackend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(3), 2L))
|
||||
)
|
||||
completions <- executeSql(
|
||||
backend.commandCompletions(offset(1), offset(3), someApplicationId, Set(party))
|
||||
completionStorageBackend.commandCompletions(
|
||||
offset(1),
|
||||
offset(3),
|
||||
someApplicationId,
|
||||
Set(party),
|
||||
)
|
||||
)
|
||||
} yield {
|
||||
completions should have length 2
|
||||
@ -200,11 +235,18 @@ private[backend] trait StorageBackendTestsCompletions
|
||||
)
|
||||
|
||||
for {
|
||||
_ <- executeSql(backend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(parameterStorageBackend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(ingest(dtos1, _))
|
||||
_ <- executeSql(backend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(2), 1L)))
|
||||
_ <- executeSql(
|
||||
parameterStorageBackend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(2), 1L))
|
||||
)
|
||||
result <- executeSql(
|
||||
backend.commandCompletions(offset(1), offset(2), someApplicationId, Set(party))
|
||||
completionStorageBackend.commandCompletions(
|
||||
offset(1),
|
||||
offset(2),
|
||||
someApplicationId,
|
||||
Set(party),
|
||||
)
|
||||
).failed
|
||||
} yield {
|
||||
result shouldBe an[IllegalArgumentException]
|
||||
@ -222,9 +264,16 @@ private[backend] trait StorageBackendTestsCompletions
|
||||
|
||||
for {
|
||||
_ <- executeSql(ingest(dtos2, _))
|
||||
_ <- executeSql(backend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(3), 2L)))
|
||||
_ <- executeSql(
|
||||
parameterStorageBackend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(3), 2L))
|
||||
)
|
||||
result <- executeSql(
|
||||
backend.commandCompletions(offset(2), offset(3), someApplicationId, Set(party))
|
||||
completionStorageBackend.commandCompletions(
|
||||
offset(2),
|
||||
offset(3),
|
||||
someApplicationId,
|
||||
Set(party),
|
||||
)
|
||||
).failed
|
||||
} yield {
|
||||
result shouldBe an[IllegalArgumentException]
|
||||
|
@ -172,8 +172,10 @@ trait StorageBackendTestsDBLockForSuite
|
||||
with StorageBackendProvider {
|
||||
this: AsyncFlatSpec =>
|
||||
|
||||
override def dbLock: DBLockStorageBackend = backend
|
||||
override val dbLock: DBLockStorageBackend = backendFactory.createDBLockStorageBackend
|
||||
|
||||
override def getConnection: Connection =
|
||||
backend.createDataSource(jdbcUrl)(LoggingContext.ForTesting).getConnection
|
||||
backendFactory.createDataSourceStorageBackend
|
||||
.createDataSource(jdbcUrl)(LoggingContext.ForTesting)
|
||||
.getConnection
|
||||
}
|
||||
|
@ -16,6 +16,11 @@ private[backend] trait StorageBackendTestsDeduplication
|
||||
with StorageBackendSpec {
|
||||
this: AsyncFlatSpec =>
|
||||
|
||||
private val parameterStorageBackend: ParameterStorageBackend =
|
||||
backendFactory.createParameterStorageBackend
|
||||
private val deduplicationStorageBackend: DeduplicationStorageBackend =
|
||||
backendFactory.createDeduplicationStorageBackend
|
||||
|
||||
behavior of "DeduplicationStorageBackend"
|
||||
|
||||
import StorageBackendTestValues._
|
||||
@ -27,13 +32,15 @@ private[backend] trait StorageBackendTestsDeduplication
|
||||
val n = 8
|
||||
|
||||
for {
|
||||
_ <- executeSql(backend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(parameterStorageBackend.initializeParameters(someIdentityParams))
|
||||
insertedRows <- Future.sequence(
|
||||
Vector.fill(n)(
|
||||
executeSql(backend.upsertDeduplicationEntry(key, submittedAt, deduplicateUntil))
|
||||
executeSql(
|
||||
deduplicationStorageBackend.upsertDeduplicationEntry(key, submittedAt, deduplicateUntil)
|
||||
)
|
||||
)
|
||||
foundDeduplicateUntil <- executeSql(backend.deduplicatedUntil(key))
|
||||
)
|
||||
foundDeduplicateUntil <- executeSql(deduplicationStorageBackend.deduplicatedUntil(key))
|
||||
} yield {
|
||||
insertedRows.count(_ == 1) shouldBe 1 // One of the calls inserts a new row
|
||||
insertedRows.count(_ == 0) shouldBe (n - 1) // All other calls don't write anything
|
||||
@ -52,17 +59,23 @@ private[backend] trait StorageBackendTestsDeduplication
|
||||
val n = 8
|
||||
|
||||
for {
|
||||
_ <- executeSql(backend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(parameterStorageBackend.initializeParameters(someIdentityParams))
|
||||
insertedRows <- executeSql(
|
||||
backend.upsertDeduplicationEntry(key, submittedAt, deduplicateUntil)
|
||||
deduplicationStorageBackend.upsertDeduplicationEntry(key, submittedAt, deduplicateUntil)
|
||||
)
|
||||
foundDeduplicateUntil <- executeSql(backend.deduplicatedUntil(key))
|
||||
foundDeduplicateUntil <- executeSql(deduplicationStorageBackend.deduplicatedUntil(key))
|
||||
updatedRows <- Future.sequence(
|
||||
Vector.fill(n)(
|
||||
executeSql(backend.upsertDeduplicationEntry(key, submittedAt2, deduplicateUntil2))
|
||||
executeSql(
|
||||
deduplicationStorageBackend.upsertDeduplicationEntry(
|
||||
key,
|
||||
submittedAt2,
|
||||
deduplicateUntil2,
|
||||
)
|
||||
)
|
||||
foundDeduplicateUntil2 <- executeSql(backend.deduplicatedUntil(key))
|
||||
)
|
||||
)
|
||||
foundDeduplicateUntil2 <- executeSql(deduplicationStorageBackend.deduplicatedUntil(key))
|
||||
} yield {
|
||||
insertedRows shouldBe 1 // First call inserts a new row
|
||||
updatedRows.count(
|
||||
@ -84,15 +97,15 @@ private[backend] trait StorageBackendTestsDeduplication
|
||||
val deduplicateUntil2 = submittedAt2.addMicros(5000L)
|
||||
|
||||
for {
|
||||
_ <- executeSql(backend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(parameterStorageBackend.initializeParameters(someIdentityParams))
|
||||
insertedRows <- executeSql(
|
||||
backend.upsertDeduplicationEntry(key, submittedAt, deduplicateUntil)
|
||||
deduplicationStorageBackend.upsertDeduplicationEntry(key, submittedAt, deduplicateUntil)
|
||||
)
|
||||
foundDeduplicateUntil <- executeSql(backend.deduplicatedUntil(key))
|
||||
foundDeduplicateUntil <- executeSql(deduplicationStorageBackend.deduplicatedUntil(key))
|
||||
updatedRows <- executeSql(
|
||||
backend.upsertDeduplicationEntry(key, submittedAt2, deduplicateUntil2)
|
||||
deduplicationStorageBackend.upsertDeduplicationEntry(key, submittedAt2, deduplicateUntil2)
|
||||
)
|
||||
foundDeduplicateUntil2 <- executeSql(backend.deduplicatedUntil(key))
|
||||
foundDeduplicateUntil2 <- executeSql(deduplicationStorageBackend.deduplicatedUntil(key))
|
||||
} yield {
|
||||
insertedRows shouldBe 1 // First call inserts a new row
|
||||
updatedRows shouldBe 0 // Second call doesn't write anything
|
||||
|
@ -13,6 +13,14 @@ private[backend] trait StorageBackendTestsIngestion
|
||||
with StorageBackendSpec {
|
||||
this: AsyncFlatSpec =>
|
||||
|
||||
private val parameterStorageBackend: ParameterStorageBackend =
|
||||
backendFactory.createParameterStorageBackend
|
||||
private val configurationStorageBackend: ConfigurationStorageBackend =
|
||||
backendFactory.createConfigurationStorageBackend
|
||||
private val partyStorageBackend: PartyStorageBackend = backendFactory.createPartyStorageBackend
|
||||
private val packageStorageBackend: PackageStorageBackend =
|
||||
backendFactory.createPackageStorageBackend
|
||||
|
||||
behavior of "StorageBackend (ingestion)"
|
||||
|
||||
import StorageBackendTestValues._
|
||||
@ -24,11 +32,13 @@ private[backend] trait StorageBackendTestsIngestion
|
||||
)
|
||||
|
||||
for {
|
||||
_ <- executeSql(backend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(parameterStorageBackend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(ingest(dtos, _))
|
||||
configBeforeLedgerEndUpdate <- executeSql(backend.ledgerConfiguration)
|
||||
_ <- executeSql(backend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(someOffset, 0)))
|
||||
configAfterLedgerEndUpdate <- executeSql(backend.ledgerConfiguration)
|
||||
configBeforeLedgerEndUpdate <- executeSql(configurationStorageBackend.ledgerConfiguration)
|
||||
_ <- executeSql(
|
||||
parameterStorageBackend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(someOffset, 0))
|
||||
)
|
||||
configAfterLedgerEndUpdate <- executeSql(configurationStorageBackend.ledgerConfiguration)
|
||||
} yield {
|
||||
// The first query is executed before the ledger end is updated.
|
||||
// It should not see the already ingested configuration change.
|
||||
@ -51,11 +61,13 @@ private[backend] trait StorageBackendTestsIngestion
|
||||
)
|
||||
|
||||
for {
|
||||
_ <- executeSql(backend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(parameterStorageBackend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(ingest(dtos, _))
|
||||
packagesBeforeLedgerEndUpdate <- executeSql(backend.lfPackages)
|
||||
_ <- executeSql(backend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(someOffset, 0)))
|
||||
packagesAfterLedgerEndUpdate <- executeSql(backend.lfPackages)
|
||||
packagesBeforeLedgerEndUpdate <- executeSql(packageStorageBackend.lfPackages)
|
||||
_ <- executeSql(
|
||||
parameterStorageBackend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(someOffset, 0))
|
||||
)
|
||||
packagesAfterLedgerEndUpdate <- executeSql(packageStorageBackend.lfPackages)
|
||||
} yield {
|
||||
// The first query is executed before the ledger end is updated.
|
||||
// It should not see the already ingested package upload.
|
||||
@ -73,11 +85,13 @@ private[backend] trait StorageBackendTestsIngestion
|
||||
)
|
||||
|
||||
for {
|
||||
_ <- executeSql(backend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(parameterStorageBackend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(ingest(dtos, _))
|
||||
partiesBeforeLedgerEndUpdate <- executeSql(backend.knownParties)
|
||||
_ <- executeSql(backend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(someOffset, 0)))
|
||||
partiesAfterLedgerEndUpdate <- executeSql(backend.knownParties)
|
||||
partiesBeforeLedgerEndUpdate <- executeSql(partyStorageBackend.knownParties)
|
||||
_ <- executeSql(
|
||||
parameterStorageBackend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(someOffset, 0))
|
||||
)
|
||||
partiesAfterLedgerEndUpdate <- executeSql(partyStorageBackend.knownParties)
|
||||
} yield {
|
||||
// The first query is executed before the ledger end is updated.
|
||||
// It should not see the already ingested party allocation.
|
||||
|
@ -12,6 +12,9 @@ import org.scalatest.matchers.should.Matchers
|
||||
private[backend] trait StorageBackendTestsInitialization extends Matchers with StorageBackendSpec {
|
||||
this: AsyncFlatSpec =>
|
||||
|
||||
private val parameterStorageBackend: ParameterStorageBackend =
|
||||
backendFactory.createParameterStorageBackend
|
||||
|
||||
behavior of "StorageBackend (initialization)"
|
||||
|
||||
it should "correctly handle repeated initialization" in {
|
||||
@ -22,7 +25,7 @@ private[backend] trait StorageBackendTestsInitialization extends Matchers with S
|
||||
|
||||
for {
|
||||
_ <- executeSql(
|
||||
backend.initializeParameters(
|
||||
parameterStorageBackend.initializeParameters(
|
||||
ParameterStorageBackend.IdentityParams(
|
||||
ledgerId = ledgerId,
|
||||
participantId = participantId,
|
||||
@ -30,7 +33,7 @@ private[backend] trait StorageBackendTestsInitialization extends Matchers with S
|
||||
)
|
||||
)
|
||||
error1 <- executeSql(
|
||||
backend.initializeParameters(
|
||||
parameterStorageBackend.initializeParameters(
|
||||
ParameterStorageBackend.IdentityParams(
|
||||
ledgerId = otherLedgerId,
|
||||
participantId = participantId,
|
||||
@ -38,7 +41,7 @@ private[backend] trait StorageBackendTestsInitialization extends Matchers with S
|
||||
)
|
||||
).failed
|
||||
error2 <- executeSql(
|
||||
backend.initializeParameters(
|
||||
parameterStorageBackend.initializeParameters(
|
||||
ParameterStorageBackend.IdentityParams(
|
||||
ledgerId = ledgerId,
|
||||
participantId = otherParticipantId,
|
||||
@ -46,7 +49,7 @@ private[backend] trait StorageBackendTestsInitialization extends Matchers with S
|
||||
)
|
||||
).failed
|
||||
error3 <- executeSql(
|
||||
backend.initializeParameters(
|
||||
parameterStorageBackend.initializeParameters(
|
||||
ParameterStorageBackend.IdentityParams(
|
||||
ledgerId = otherLedgerId,
|
||||
participantId = otherParticipantId,
|
||||
@ -54,7 +57,7 @@ private[backend] trait StorageBackendTestsInitialization extends Matchers with S
|
||||
)
|
||||
).failed
|
||||
_ <- executeSql(
|
||||
backend.initializeParameters(
|
||||
parameterStorageBackend.initializeParameters(
|
||||
ParameterStorageBackend.IdentityParams(
|
||||
ledgerId = ledgerId,
|
||||
participantId = participantId,
|
||||
|
@ -15,6 +15,18 @@ private[backend] trait StorageBackendTestsInitializeIngestion
|
||||
with StorageBackendSpec {
|
||||
this: AsyncFlatSpec =>
|
||||
|
||||
private val parameterStorageBackend: ParameterStorageBackend =
|
||||
backendFactory.createParameterStorageBackend
|
||||
private val configurationStorageBackend: ConfigurationStorageBackend =
|
||||
backendFactory.createConfigurationStorageBackend
|
||||
private val partyStorageBackend: PartyStorageBackend = backendFactory.createPartyStorageBackend
|
||||
private val packageStorageBackend: PackageStorageBackend =
|
||||
backendFactory.createPackageStorageBackend
|
||||
private val ingestionStorageBackend: IngestionStorageBackend[_] =
|
||||
backendFactory.createIngestionStorageBackend
|
||||
private val contractStorageBackend: ContractStorageBackend =
|
||||
backendFactory.createContractStorageBackend
|
||||
|
||||
behavior of "StorageBackend (initializeIngestion)"
|
||||
|
||||
import StorageBackendTestValues._
|
||||
@ -59,46 +71,58 @@ private[backend] trait StorageBackendTestsInitializeIngestion
|
||||
|
||||
for {
|
||||
// Initialize
|
||||
_ <- executeSql(backend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(parameterStorageBackend.initializeParameters(someIdentityParams))
|
||||
|
||||
// Start the indexer (a no-op in this case)
|
||||
end1 <- executeSql(backend.ledgerEnd)
|
||||
_ <- executeSql(backend.deletePartiallyIngestedData(end1))
|
||||
end1 <- executeSql(parameterStorageBackend.ledgerEnd)
|
||||
_ <- executeSql(ingestionStorageBackend.deletePartiallyIngestedData(end1))
|
||||
|
||||
// Fully insert first batch of updates
|
||||
_ <- executeSql(ingest(dtos1, _))
|
||||
_ <- executeSql(backend.updateLedgerEnd(ledgerEnd(5, 3L)))
|
||||
_ <- executeSql(parameterStorageBackend.updateLedgerEnd(ledgerEnd(5, 3L)))
|
||||
|
||||
// Partially insert second batch of updates (indexer crashes before updating ledger end)
|
||||
_ <- executeSql(ingest(dtos2, _))
|
||||
|
||||
// Check the contents
|
||||
parties1 <- executeSql(backend.knownParties)
|
||||
config1 <- executeSql(backend.ledgerConfiguration)
|
||||
packages1 <- executeSql(backend.lfPackages)
|
||||
parties1 <- executeSql(partyStorageBackend.knownParties)
|
||||
config1 <- executeSql(configurationStorageBackend.ledgerConfiguration)
|
||||
packages1 <- executeSql(packageStorageBackend.lfPackages)
|
||||
contract41 <- executeSql(
|
||||
backend.activeContractWithoutArgument(readers, ContractId.V0.assertFromString("#4"))
|
||||
contractStorageBackend.activeContractWithoutArgument(
|
||||
readers,
|
||||
ContractId.V0.assertFromString("#4"),
|
||||
)
|
||||
)
|
||||
contract91 <- executeSql(
|
||||
backend.activeContractWithoutArgument(readers, ContractId.V0.assertFromString("#9"))
|
||||
contractStorageBackend.activeContractWithoutArgument(
|
||||
readers,
|
||||
ContractId.V0.assertFromString("#9"),
|
||||
)
|
||||
)
|
||||
|
||||
// Restart the indexer - should delete data from the partial insert above
|
||||
end2 <- executeSql(backend.ledgerEnd)
|
||||
_ <- executeSql(backend.deletePartiallyIngestedData(end2))
|
||||
end2 <- executeSql(parameterStorageBackend.ledgerEnd)
|
||||
_ <- executeSql(ingestionStorageBackend.deletePartiallyIngestedData(end2))
|
||||
|
||||
// Move the ledger end so that any non-deleted data would become visible
|
||||
_ <- executeSql(backend.updateLedgerEnd(ledgerEnd(10, 6L)))
|
||||
_ <- executeSql(parameterStorageBackend.updateLedgerEnd(ledgerEnd(10, 6L)))
|
||||
|
||||
// Check the contents
|
||||
parties2 <- executeSql(backend.knownParties)
|
||||
config2 <- executeSql(backend.ledgerConfiguration)
|
||||
packages2 <- executeSql(backend.lfPackages)
|
||||
parties2 <- executeSql(partyStorageBackend.knownParties)
|
||||
config2 <- executeSql(configurationStorageBackend.ledgerConfiguration)
|
||||
packages2 <- executeSql(packageStorageBackend.lfPackages)
|
||||
contract42 <- executeSql(
|
||||
backend.activeContractWithoutArgument(readers, ContractId.V0.assertFromString("#4"))
|
||||
contractStorageBackend.activeContractWithoutArgument(
|
||||
readers,
|
||||
ContractId.V0.assertFromString("#4"),
|
||||
)
|
||||
)
|
||||
contract92 <- executeSql(
|
||||
backend.activeContractWithoutArgument(readers, ContractId.V0.assertFromString("#9"))
|
||||
contractStorageBackend.activeContractWithoutArgument(
|
||||
readers,
|
||||
ContractId.V0.assertFromString("#9"),
|
||||
)
|
||||
)
|
||||
} yield {
|
||||
parties1 should have length 1
|
||||
|
@ -9,6 +9,11 @@ import org.scalatest.matchers.should.Matchers
|
||||
private[backend] trait StorageBackendTestsIntegrity extends Matchers with StorageBackendSpec {
|
||||
this: AsyncFlatSpec =>
|
||||
|
||||
private val parameterStorageBackend: ParameterStorageBackend =
|
||||
backendFactory.createParameterStorageBackend
|
||||
private val integrityStorageBackend: IntegrityStorageBackend =
|
||||
backendFactory.createIntegrityStorageBackend
|
||||
|
||||
import StorageBackendTestValues._
|
||||
|
||||
behavior of "IntegrityStorageBackend"
|
||||
@ -20,10 +25,12 @@ private[backend] trait StorageBackendTestsIntegrity extends Matchers with Storag
|
||||
)
|
||||
|
||||
for {
|
||||
_ <- executeSql(backend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(parameterStorageBackend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(ingest(updates, _))
|
||||
_ <- executeSql(backend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(7), 7L)))
|
||||
failure <- executeSql(backend.verifyIntegrity()).failed
|
||||
_ <- executeSql(
|
||||
parameterStorageBackend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(7), 7L))
|
||||
)
|
||||
failure <- executeSql(integrityStorageBackend.verifyIntegrity()).failed
|
||||
} yield {
|
||||
// Error message should contain the duplicate event sequential id
|
||||
failure.getMessage should include("7")
|
||||
@ -37,10 +44,12 @@ private[backend] trait StorageBackendTestsIntegrity extends Matchers with Storag
|
||||
)
|
||||
|
||||
for {
|
||||
_ <- executeSql(backend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(parameterStorageBackend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(ingest(updates, _))
|
||||
_ <- executeSql(backend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(3), 3L)))
|
||||
failure <- executeSql(backend.verifyIntegrity()).failed
|
||||
_ <- executeSql(
|
||||
parameterStorageBackend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(3), 3L))
|
||||
)
|
||||
failure <- executeSql(integrityStorageBackend.verifyIntegrity()).failed
|
||||
} yield {
|
||||
failure.getMessage should include("consecutive")
|
||||
}
|
||||
@ -56,10 +65,12 @@ private[backend] trait StorageBackendTestsIntegrity extends Matchers with Storag
|
||||
)
|
||||
|
||||
for {
|
||||
_ <- executeSql(backend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(parameterStorageBackend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(ingest(updates, _))
|
||||
_ <- executeSql(backend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(2), 2L)))
|
||||
_ <- executeSql(backend.verifyIntegrity())
|
||||
_ <- executeSql(
|
||||
parameterStorageBackend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(2), 2L))
|
||||
)
|
||||
_ <- executeSql(integrityStorageBackend.verifyIntegrity())
|
||||
} yield {
|
||||
succeed
|
||||
}
|
||||
|
@ -17,6 +17,12 @@ private[backend] trait StorageBackendTestsMigrationPruning
|
||||
with StorageBackendSpec {
|
||||
this: AsyncFlatSpec =>
|
||||
|
||||
private val parameterStorageBackend: ParameterStorageBackend =
|
||||
backendFactory.createParameterStorageBackend
|
||||
private val contractStorageBackend: ContractStorageBackend =
|
||||
backendFactory.createContractStorageBackend
|
||||
private val eventStorageBackend: EventStorageBackend = backendFactory.createEventStorageBackend
|
||||
|
||||
import StorageBackendTestValues._
|
||||
|
||||
it should "prune all divulgence events if pruning offset is after migration offset" in {
|
||||
@ -28,20 +34,25 @@ private[backend] trait StorageBackendTestsMigrationPruning
|
||||
val archive = dtoExercise(offset(2), 3L, consuming = true, "#1", submitter)
|
||||
|
||||
for {
|
||||
_ <- executeSql(backend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(parameterStorageBackend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(ingest(Vector(create, divulgence, archive), _))
|
||||
_ <- executeSql(backend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(2), 3L)))
|
||||
_ <- executeSql(
|
||||
parameterStorageBackend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(2), 3L))
|
||||
)
|
||||
// Simulate that the archive happened after the migration to append-only schema
|
||||
_ <- executeSql(updateMigrationHistoryTable(ledgerSequentialIdBefore = 2))
|
||||
beforePruning <- executeSql(
|
||||
backend.activeContractWithoutArgument(Set(divulgee), ContractId.assertFromString("#1"))
|
||||
contractStorageBackend.activeContractWithoutArgument(
|
||||
Set(divulgee),
|
||||
ContractId.assertFromString("#1"),
|
||||
)
|
||||
)
|
||||
// Check that the divulgee can fetch the divulged event
|
||||
_ <- Future.successful(beforePruning should not be empty)
|
||||
// Trying to prune all divulged contracts before the migration should fail
|
||||
_ <-
|
||||
executeSql(
|
||||
backend.isPruningOffsetValidAgainstMigration(
|
||||
eventStorageBackend.isPruningOffsetValidAgainstMigration(
|
||||
offset(1),
|
||||
pruneAllDivulgedContracts = true,
|
||||
_,
|
||||
@ -49,18 +60,24 @@ private[backend] trait StorageBackendTestsMigrationPruning
|
||||
).map(_ shouldBe false)
|
||||
// Validation passes the pruning offset for all divulged contracts is after the migration
|
||||
_ <- executeSql(
|
||||
backend.isPruningOffsetValidAgainstMigration(
|
||||
eventStorageBackend.isPruningOffsetValidAgainstMigration(
|
||||
offset(2),
|
||||
pruneAllDivulgedContracts = true,
|
||||
_,
|
||||
)
|
||||
).map(_ shouldBe true)
|
||||
_ <- executeSql(
|
||||
backend.pruneEvents(offset(2), pruneAllDivulgedContracts = true)(_, loggingContext)
|
||||
eventStorageBackend.pruneEvents(offset(2), pruneAllDivulgedContracts = true)(
|
||||
_,
|
||||
loggingContext,
|
||||
)
|
||||
)
|
||||
// Ensure the divulged contract is not visible anymore
|
||||
afterPruning <- executeSql(
|
||||
backend.activeContractWithoutArgument(Set(divulgee), ContractId.assertFromString("#1"))
|
||||
contractStorageBackend.activeContractWithoutArgument(
|
||||
Set(divulgee),
|
||||
ContractId.assertFromString("#1"),
|
||||
)
|
||||
)
|
||||
} yield {
|
||||
// Pruning succeeded
|
||||
|
@ -12,6 +12,14 @@ import org.scalatest.matchers.should.Matchers
|
||||
private[backend] trait StorageBackendTestsPruning extends Matchers with StorageBackendSpec {
|
||||
this: AsyncFlatSpec =>
|
||||
|
||||
private val parameterStorageBackend: ParameterStorageBackend =
|
||||
backendFactory.createParameterStorageBackend
|
||||
private val contractStorageBackend: ContractStorageBackend =
|
||||
backendFactory.createContractStorageBackend
|
||||
private val eventStorageBackend: EventStorageBackend = backendFactory.createEventStorageBackend
|
||||
private val completionStorageBackend: CompletionStorageBackend =
|
||||
backendFactory.createCompletionStorageBackend
|
||||
|
||||
behavior of "StorageBackend (pruning)"
|
||||
|
||||
import StorageBackendTestValues._
|
||||
@ -21,17 +29,17 @@ private[backend] trait StorageBackendTestsPruning extends Matchers with StorageB
|
||||
val offset_2 = offset(2)
|
||||
val offset_3 = offset(4)
|
||||
for {
|
||||
_ <- executeSql(backend.initializeParameters(someIdentityParams))
|
||||
initialPruningOffset <- executeSql(backend.prunedUpToInclusive)
|
||||
_ <- executeSql(parameterStorageBackend.initializeParameters(someIdentityParams))
|
||||
initialPruningOffset <- executeSql(parameterStorageBackend.prunedUpToInclusive)
|
||||
|
||||
_ <- executeSql(backend.updatePrunedUptoInclusive(offset_1))
|
||||
updatedPruningOffset_1 <- executeSql(backend.prunedUpToInclusive)
|
||||
_ <- executeSql(parameterStorageBackend.updatePrunedUptoInclusive(offset_1))
|
||||
updatedPruningOffset_1 <- executeSql(parameterStorageBackend.prunedUpToInclusive)
|
||||
|
||||
_ <- executeSql(backend.updatePrunedUptoInclusive(offset_2))
|
||||
updatedPruningOffset_2 <- executeSql(backend.prunedUpToInclusive)
|
||||
_ <- executeSql(parameterStorageBackend.updatePrunedUptoInclusive(offset_2))
|
||||
updatedPruningOffset_2 <- executeSql(parameterStorageBackend.prunedUpToInclusive)
|
||||
|
||||
_ <- executeSql(backend.updatePrunedUptoInclusive(offset_3))
|
||||
updatedPruningOffset_3 <- executeSql(backend.prunedUpToInclusive)
|
||||
_ <- executeSql(parameterStorageBackend.updatePrunedUptoInclusive(offset_3))
|
||||
updatedPruningOffset_3 <- executeSql(parameterStorageBackend.prunedUpToInclusive)
|
||||
} yield {
|
||||
initialPruningOffset shouldBe empty
|
||||
updatedPruningOffset_1 shouldBe Some(offset_1)
|
||||
@ -46,22 +54,30 @@ private[backend] trait StorageBackendTestsPruning extends Matchers with StorageB
|
||||
val offset_2 = offset(2)
|
||||
val offset_3 = offset(4)
|
||||
for {
|
||||
_ <- executeSql(backend.initializeParameters(someIdentityParams))
|
||||
initialPruningOffset <- executeSql(backend.participantAllDivulgedContractsPrunedUpToInclusive)
|
||||
_ <- executeSql(parameterStorageBackend.initializeParameters(someIdentityParams))
|
||||
initialPruningOffset <- executeSql(
|
||||
parameterStorageBackend.participantAllDivulgedContractsPrunedUpToInclusive
|
||||
)
|
||||
|
||||
_ <- executeSql(backend.updatePrunedAllDivulgedContractsUpToInclusive(offset_1))
|
||||
_ <- executeSql(
|
||||
parameterStorageBackend.updatePrunedAllDivulgedContractsUpToInclusive(offset_1)
|
||||
)
|
||||
updatedPruningOffset_1 <- executeSql(
|
||||
backend.participantAllDivulgedContractsPrunedUpToInclusive
|
||||
parameterStorageBackend.participantAllDivulgedContractsPrunedUpToInclusive
|
||||
)
|
||||
|
||||
_ <- executeSql(backend.updatePrunedAllDivulgedContractsUpToInclusive(offset_2))
|
||||
_ <- executeSql(
|
||||
parameterStorageBackend.updatePrunedAllDivulgedContractsUpToInclusive(offset_2)
|
||||
)
|
||||
updatedPruningOffset_2 <- executeSql(
|
||||
backend.participantAllDivulgedContractsPrunedUpToInclusive
|
||||
parameterStorageBackend.participantAllDivulgedContractsPrunedUpToInclusive
|
||||
)
|
||||
|
||||
_ <- executeSql(backend.updatePrunedAllDivulgedContractsUpToInclusive(offset_3))
|
||||
_ <- executeSql(
|
||||
parameterStorageBackend.updatePrunedAllDivulgedContractsUpToInclusive(offset_3)
|
||||
)
|
||||
updatedPruningOffset_3 <- executeSql(
|
||||
backend.participantAllDivulgedContractsPrunedUpToInclusive
|
||||
parameterStorageBackend.participantAllDivulgedContractsPrunedUpToInclusive
|
||||
)
|
||||
} yield {
|
||||
initialPruningOffset shouldBe empty
|
||||
@ -91,29 +107,34 @@ private[backend] trait StorageBackendTestsPruning extends Matchers with StorageB
|
||||
val range = RangeParams(0L, 2L, None, None)
|
||||
val filter = FilterParams(Set(someParty), Set.empty)
|
||||
for {
|
||||
_ <- executeSql(backend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(parameterStorageBackend.initializeParameters(someIdentityParams))
|
||||
// Ingest a create and archive event
|
||||
_ <- executeSql(ingest(Vector(create, archive), _))
|
||||
_ <- executeSql(backend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(2), 2L)))
|
||||
_ <- executeSql(
|
||||
parameterStorageBackend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(2), 2L))
|
||||
)
|
||||
// Make sure the events are visible
|
||||
before1 <- executeSql(backend.transactionEvents(range, filter))
|
||||
before2 <- executeSql(backend.activeContractEvents(range, filter, offset(1)))
|
||||
before3 <- executeSql(backend.flatTransaction(createTransactionId, filter))
|
||||
before4 <- executeSql(backend.transactionTreeEvents(range, filter))
|
||||
before5 <- executeSql(backend.transactionTree(createTransactionId, filter))
|
||||
before6 <- executeSql(backend.rawEvents(0, 2L))
|
||||
before1 <- executeSql(eventStorageBackend.transactionEvents(range, filter))
|
||||
before2 <- executeSql(eventStorageBackend.activeContractEvents(range, filter, offset(1)))
|
||||
before3 <- executeSql(eventStorageBackend.flatTransaction(createTransactionId, filter))
|
||||
before4 <- executeSql(eventStorageBackend.transactionTreeEvents(range, filter))
|
||||
before5 <- executeSql(eventStorageBackend.transactionTree(createTransactionId, filter))
|
||||
before6 <- executeSql(eventStorageBackend.rawEvents(0, 2L))
|
||||
// Prune
|
||||
_ <- executeSql(
|
||||
backend.pruneEvents(offset(2), pruneAllDivulgedContracts = true)(_, loggingContext)
|
||||
eventStorageBackend.pruneEvents(offset(2), pruneAllDivulgedContracts = true)(
|
||||
_,
|
||||
loggingContext,
|
||||
)
|
||||
_ <- executeSql(backend.updatePrunedUptoInclusive(offset(2)))
|
||||
)
|
||||
_ <- executeSql(parameterStorageBackend.updatePrunedUptoInclusive(offset(2)))
|
||||
// Make sure the events are not visible anymore
|
||||
after1 <- executeSql(backend.transactionEvents(range, filter))
|
||||
after2 <- executeSql(backend.activeContractEvents(range, filter, offset(1)))
|
||||
after3 <- executeSql(backend.flatTransaction(createTransactionId, filter))
|
||||
after4 <- executeSql(backend.transactionTreeEvents(range, filter))
|
||||
after5 <- executeSql(backend.transactionTree(createTransactionId, filter))
|
||||
after6 <- executeSql(backend.rawEvents(0, 2L))
|
||||
after1 <- executeSql(eventStorageBackend.transactionEvents(range, filter))
|
||||
after2 <- executeSql(eventStorageBackend.activeContractEvents(range, filter, offset(1)))
|
||||
after3 <- executeSql(eventStorageBackend.flatTransaction(createTransactionId, filter))
|
||||
after4 <- executeSql(eventStorageBackend.transactionTreeEvents(range, filter))
|
||||
after5 <- executeSql(eventStorageBackend.transactionTree(createTransactionId, filter))
|
||||
after6 <- executeSql(eventStorageBackend.rawEvents(0, 2L))
|
||||
} yield {
|
||||
before1 should not be empty
|
||||
before2 should not be empty
|
||||
@ -145,29 +166,34 @@ private[backend] trait StorageBackendTestsPruning extends Matchers with StorageB
|
||||
val range = RangeParams(0L, 1L, None, None)
|
||||
val filter = FilterParams(Set(someParty), Set.empty)
|
||||
for {
|
||||
_ <- executeSql(backend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(parameterStorageBackend.initializeParameters(someIdentityParams))
|
||||
// Ingest a create and archive event
|
||||
_ <- executeSql(ingest(Vector(partyEntry, create), _))
|
||||
_ <- executeSql(backend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(2), 1L)))
|
||||
_ <- executeSql(
|
||||
parameterStorageBackend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(2), 1L))
|
||||
)
|
||||
// Make sure the events are visible
|
||||
before1 <- executeSql(backend.transactionEvents(range, filter))
|
||||
before2 <- executeSql(backend.activeContractEvents(range, filter, offset(2)))
|
||||
before3 <- executeSql(backend.flatTransaction(createTransactionId, filter))
|
||||
before4 <- executeSql(backend.transactionTreeEvents(range, filter))
|
||||
before5 <- executeSql(backend.transactionTree(createTransactionId, filter))
|
||||
before6 <- executeSql(backend.rawEvents(0, 1L))
|
||||
before1 <- executeSql(eventStorageBackend.transactionEvents(range, filter))
|
||||
before2 <- executeSql(eventStorageBackend.activeContractEvents(range, filter, offset(2)))
|
||||
before3 <- executeSql(eventStorageBackend.flatTransaction(createTransactionId, filter))
|
||||
before4 <- executeSql(eventStorageBackend.transactionTreeEvents(range, filter))
|
||||
before5 <- executeSql(eventStorageBackend.transactionTree(createTransactionId, filter))
|
||||
before6 <- executeSql(eventStorageBackend.rawEvents(0, 1L))
|
||||
// Prune
|
||||
_ <- executeSql(
|
||||
backend.pruneEvents(offset(2), pruneAllDivulgedContracts = true)(_, loggingContext)
|
||||
eventStorageBackend.pruneEvents(offset(2), pruneAllDivulgedContracts = true)(
|
||||
_,
|
||||
loggingContext,
|
||||
)
|
||||
_ <- executeSql(backend.updatePrunedUptoInclusive(offset(2)))
|
||||
)
|
||||
_ <- executeSql(parameterStorageBackend.updatePrunedUptoInclusive(offset(2)))
|
||||
// Make sure the events are still visible - active contracts should not be pruned
|
||||
after1 <- executeSql(backend.transactionEvents(range, filter))
|
||||
after2 <- executeSql(backend.activeContractEvents(range, filter, offset(2)))
|
||||
after3 <- executeSql(backend.flatTransaction(createTransactionId, filter))
|
||||
after4 <- executeSql(backend.transactionTreeEvents(range, filter))
|
||||
after5 <- executeSql(backend.transactionTree(createTransactionId, filter))
|
||||
after6 <- executeSql(backend.rawEvents(0, 1L))
|
||||
after1 <- executeSql(eventStorageBackend.transactionEvents(range, filter))
|
||||
after2 <- executeSql(eventStorageBackend.activeContractEvents(range, filter, offset(2)))
|
||||
after3 <- executeSql(eventStorageBackend.flatTransaction(createTransactionId, filter))
|
||||
after4 <- executeSql(eventStorageBackend.transactionTreeEvents(range, filter))
|
||||
after5 <- executeSql(eventStorageBackend.transactionTree(createTransactionId, filter))
|
||||
after6 <- executeSql(eventStorageBackend.rawEvents(0, 1L))
|
||||
} yield {
|
||||
before1 should not be empty
|
||||
before2 should not be empty
|
||||
@ -214,7 +240,7 @@ private[backend] trait StorageBackendTestsPruning extends Matchers with StorageB
|
||||
)
|
||||
|
||||
for {
|
||||
_ <- executeSql(backend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(parameterStorageBackend.initializeParameters(someIdentityParams))
|
||||
// Ingest
|
||||
_ <- executeSql(
|
||||
ingest(
|
||||
@ -227,30 +253,35 @@ private[backend] trait StorageBackendTestsPruning extends Matchers with StorageB
|
||||
_,
|
||||
)
|
||||
)
|
||||
_ <- executeSql(backend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(4), 4L)))
|
||||
_ <- executeSql(
|
||||
parameterStorageBackend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(4), 4L))
|
||||
)
|
||||
contract1_beforePruning <- executeSql(
|
||||
backend.activeContractWithoutArgument(
|
||||
contractStorageBackend.activeContractWithoutArgument(
|
||||
Set(divulgee),
|
||||
ContractId.assertFromString(contract1_id),
|
||||
)
|
||||
)
|
||||
contract2_beforePruning <- executeSql(
|
||||
backend.activeContractWithoutArgument(
|
||||
contractStorageBackend.activeContractWithoutArgument(
|
||||
Set(divulgee),
|
||||
ContractId.assertFromString(contract2_id),
|
||||
)
|
||||
)
|
||||
_ <- executeSql(
|
||||
backend.pruneEvents(offset(3), pruneAllDivulgedContracts = true)(_, loggingContext)
|
||||
eventStorageBackend.pruneEvents(offset(3), pruneAllDivulgedContracts = true)(
|
||||
_,
|
||||
loggingContext,
|
||||
)
|
||||
)
|
||||
contract1_afterPruning <- executeSql(
|
||||
backend.activeContractWithoutArgument(
|
||||
contractStorageBackend.activeContractWithoutArgument(
|
||||
Set(divulgee),
|
||||
ContractId.assertFromString(contract1_id),
|
||||
)
|
||||
)
|
||||
contract2_afterPruning <- executeSql(
|
||||
backend.activeContractWithoutArgument(
|
||||
contractStorageBackend.activeContractWithoutArgument(
|
||||
Set(divulgee),
|
||||
ContractId.assertFromString(contract2_id),
|
||||
)
|
||||
@ -298,7 +329,7 @@ private[backend] trait StorageBackendTestsPruning extends Matchers with StorageB
|
||||
)
|
||||
|
||||
for {
|
||||
_ <- executeSql(backend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(parameterStorageBackend.initializeParameters(someIdentityParams))
|
||||
// Ingest
|
||||
_ <- executeSql(
|
||||
ingest(
|
||||
@ -312,30 +343,35 @@ private[backend] trait StorageBackendTestsPruning extends Matchers with StorageB
|
||||
)
|
||||
)
|
||||
// Set the ledger end past the last ingested event so we can prune up to it inclusively
|
||||
_ <- executeSql(backend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(5), 5L)))
|
||||
_ <- executeSql(
|
||||
parameterStorageBackend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(5), 5L))
|
||||
)
|
||||
contract1_beforePruning <- executeSql(
|
||||
backend.activeContractWithoutArgument(
|
||||
contractStorageBackend.activeContractWithoutArgument(
|
||||
Set(divulgee),
|
||||
ContractId.assertFromString(contract1_id),
|
||||
)
|
||||
)
|
||||
contract2_beforePruning <- executeSql(
|
||||
backend.activeContractWithoutArgument(
|
||||
contractStorageBackend.activeContractWithoutArgument(
|
||||
Set(divulgee),
|
||||
ContractId.assertFromString(contract2_id),
|
||||
)
|
||||
)
|
||||
_ <- executeSql(
|
||||
backend.pruneEvents(offset(4), pruneAllDivulgedContracts = false)(_, loggingContext)
|
||||
eventStorageBackend.pruneEvents(offset(4), pruneAllDivulgedContracts = false)(
|
||||
_,
|
||||
loggingContext,
|
||||
)
|
||||
)
|
||||
contract1_afterPruning <- executeSql(
|
||||
backend.activeContractWithoutArgument(
|
||||
contractStorageBackend.activeContractWithoutArgument(
|
||||
Set(divulgee),
|
||||
ContractId.assertFromString(contract1_id),
|
||||
)
|
||||
)
|
||||
contract2_afterPruning <- executeSql(
|
||||
backend.activeContractWithoutArgument(
|
||||
contractStorageBackend.activeContractWithoutArgument(
|
||||
Set(divulgee),
|
||||
ContractId.assertFromString(contract2_id),
|
||||
)
|
||||
@ -362,20 +398,32 @@ private[backend] trait StorageBackendTestsPruning extends Matchers with StorageB
|
||||
)
|
||||
val applicationId = dtoApplicationId(completion)
|
||||
for {
|
||||
_ <- executeSql(backend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(parameterStorageBackend.initializeParameters(someIdentityParams))
|
||||
// Ingest a completion
|
||||
_ <- executeSql(ingest(Vector(completion), _))
|
||||
_ <- executeSql(backend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(1), 1L)))
|
||||
_ <- executeSql(
|
||||
parameterStorageBackend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(1), 1L))
|
||||
)
|
||||
// Make sure the completion is visible
|
||||
before <- executeSql(
|
||||
backend.commandCompletions(offset(0), offset(1), applicationId, Set(someParty))
|
||||
completionStorageBackend.commandCompletions(
|
||||
offset(0),
|
||||
offset(1),
|
||||
applicationId,
|
||||
Set(someParty),
|
||||
)
|
||||
)
|
||||
// Prune
|
||||
_ <- executeSql(backend.pruneCompletions(offset(1))(_, loggingContext))
|
||||
_ <- executeSql(backend.updatePrunedUptoInclusive(offset(1)))
|
||||
_ <- executeSql(completionStorageBackend.pruneCompletions(offset(1))(_, loggingContext))
|
||||
_ <- executeSql(parameterStorageBackend.updatePrunedUptoInclusive(offset(1)))
|
||||
// Make sure the completion is not visible anymore
|
||||
after <- executeSql(
|
||||
backend.commandCompletions(offset(0), offset(1), applicationId, Set(someParty))
|
||||
completionStorageBackend.commandCompletions(
|
||||
offset(0),
|
||||
offset(1),
|
||||
applicationId,
|
||||
Set(someParty),
|
||||
)
|
||||
)
|
||||
} yield {
|
||||
before should not be empty
|
||||
|
@ -11,18 +11,29 @@ import scala.concurrent.Future
|
||||
private[backend] trait StorageBackendTestsReset extends Matchers with StorageBackendSpec {
|
||||
this: AsyncFlatSpec =>
|
||||
|
||||
private val parameterStorageBackend: ParameterStorageBackend =
|
||||
backendFactory.createParameterStorageBackend
|
||||
private val configurationStorageBackend: ConfigurationStorageBackend =
|
||||
backendFactory.createConfigurationStorageBackend
|
||||
private val partyStorageBackend: PartyStorageBackend = backendFactory.createPartyStorageBackend
|
||||
private val packageStorageBackend: PackageStorageBackend =
|
||||
backendFactory.createPackageStorageBackend
|
||||
private val contractStorageBackend: ContractStorageBackend =
|
||||
backendFactory.createContractStorageBackend
|
||||
private val resetStorageBackend: ResetStorageBackend = backendFactory.createResetStorageBackend
|
||||
|
||||
behavior of "StorageBackend (reset)"
|
||||
|
||||
import StorageBackendTestValues._
|
||||
|
||||
it should "start with an empty index" in {
|
||||
for {
|
||||
identity <- executeSql(backend.ledgerIdentity)
|
||||
end <- executeSql(backend.ledgerEnd)
|
||||
parties <- executeSql(backend.knownParties)
|
||||
config <- executeSql(backend.ledgerConfiguration)
|
||||
packages <- executeSql(backend.lfPackages)
|
||||
events <- executeSql(backend.contractStateEvents(0, Long.MaxValue))
|
||||
identity <- executeSql(parameterStorageBackend.ledgerIdentity)
|
||||
end <- executeSql(parameterStorageBackend.ledgerEnd)
|
||||
parties <- executeSql(partyStorageBackend.knownParties)
|
||||
config <- executeSql(configurationStorageBackend.ledgerConfiguration)
|
||||
packages <- executeSql(packageStorageBackend.lfPackages)
|
||||
events <- executeSql(contractStorageBackend.contractStateEvents(0, Long.MaxValue))
|
||||
} yield {
|
||||
identity shouldBe None
|
||||
end shouldBe None
|
||||
@ -36,9 +47,9 @@ private[backend] trait StorageBackendTestsReset extends Matchers with StorageBac
|
||||
it should "not see any data after advancing the ledger end" in {
|
||||
for {
|
||||
_ <- advanceLedgerEndToMakeOldDataVisible()
|
||||
parties <- executeSql(backend.knownParties)
|
||||
config <- executeSql(backend.ledgerConfiguration)
|
||||
packages <- executeSql(backend.lfPackages)
|
||||
parties <- executeSql(partyStorageBackend.knownParties)
|
||||
config <- executeSql(configurationStorageBackend.ledgerConfiguration)
|
||||
packages <- executeSql(packageStorageBackend.lfPackages)
|
||||
} yield {
|
||||
parties shouldBe empty
|
||||
packages shouldBe empty
|
||||
@ -66,23 +77,23 @@ private[backend] trait StorageBackendTestsReset extends Matchers with StorageBac
|
||||
|
||||
for {
|
||||
// Initialize and insert some data
|
||||
_ <- executeSql(backend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(parameterStorageBackend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(ingest(dtos, _))
|
||||
_ <- executeSql(backend.updateLedgerEnd(ledgerEnd(5, 3L)))
|
||||
_ <- executeSql(parameterStorageBackend.updateLedgerEnd(ledgerEnd(5, 3L)))
|
||||
|
||||
// Reset
|
||||
_ <- executeSql(backend.reset)
|
||||
_ <- executeSql(resetStorageBackend.reset)
|
||||
|
||||
// Check the contents
|
||||
identity <- executeSql(backend.ledgerIdentity)
|
||||
end <- executeSql(backend.ledgerEnd)
|
||||
events <- executeSql(backend.contractStateEvents(0, Long.MaxValue))
|
||||
identity <- executeSql(parameterStorageBackend.ledgerIdentity)
|
||||
end <- executeSql(parameterStorageBackend.ledgerEnd)
|
||||
events <- executeSql(contractStorageBackend.contractStateEvents(0, Long.MaxValue))
|
||||
|
||||
// Check the contents (queries that don't read beyond ledger end)
|
||||
_ <- advanceLedgerEndToMakeOldDataVisible()
|
||||
parties <- executeSql(backend.knownParties)
|
||||
config <- executeSql(backend.ledgerConfiguration)
|
||||
packages <- executeSql(backend.lfPackages)
|
||||
parties <- executeSql(partyStorageBackend.knownParties)
|
||||
config <- executeSql(configurationStorageBackend.ledgerConfiguration)
|
||||
packages <- executeSql(packageStorageBackend.lfPackages)
|
||||
} yield {
|
||||
identity shouldBe None
|
||||
end shouldBe None
|
||||
@ -113,23 +124,23 @@ private[backend] trait StorageBackendTestsReset extends Matchers with StorageBac
|
||||
|
||||
for {
|
||||
// Initialize and insert some data
|
||||
_ <- executeSql(backend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(parameterStorageBackend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(ingest(dtos, _))
|
||||
_ <- executeSql(backend.updateLedgerEnd(ledgerEnd(5, 3L)))
|
||||
_ <- executeSql(parameterStorageBackend.updateLedgerEnd(ledgerEnd(5, 3L)))
|
||||
|
||||
// Reset
|
||||
_ <- executeSql(backend.resetAll)
|
||||
_ <- executeSql(resetStorageBackend.resetAll)
|
||||
|
||||
// Check the contents (queries that do not depend on ledger end)
|
||||
identity <- executeSql(backend.ledgerIdentity)
|
||||
end <- executeSql(backend.ledgerEnd)
|
||||
events <- executeSql(backend.contractStateEvents(0, Long.MaxValue))
|
||||
identity <- executeSql(parameterStorageBackend.ledgerIdentity)
|
||||
end <- executeSql(parameterStorageBackend.ledgerEnd)
|
||||
events <- executeSql(contractStorageBackend.contractStateEvents(0, Long.MaxValue))
|
||||
|
||||
// Check the contents (queries that don't read beyond ledger end)
|
||||
_ <- advanceLedgerEndToMakeOldDataVisible()
|
||||
parties <- executeSql(backend.knownParties)
|
||||
config <- executeSql(backend.ledgerConfiguration)
|
||||
packages <- executeSql(backend.lfPackages)
|
||||
parties <- executeSql(partyStorageBackend.knownParties)
|
||||
config <- executeSql(configurationStorageBackend.ledgerConfiguration)
|
||||
packages <- executeSql(packageStorageBackend.lfPackages)
|
||||
} yield {
|
||||
identity shouldBe None
|
||||
end shouldBe None
|
||||
@ -145,8 +156,8 @@ private[backend] trait StorageBackendTestsReset extends Matchers with StorageBac
|
||||
// queries now find any left-over data not cleaned by reset.
|
||||
private def advanceLedgerEndToMakeOldDataVisible(): Future[Unit] = {
|
||||
for {
|
||||
_ <- executeSql(backend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(backend.updateLedgerEnd(ledgerEnd(10000, 10000)))
|
||||
_ <- executeSql(parameterStorageBackend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(parameterStorageBackend.updateLedgerEnd(ledgerEnd(10000, 10000)))
|
||||
} yield ()
|
||||
}
|
||||
}
|
||||
|
@ -15,6 +15,12 @@ import scala.util.Success
|
||||
private[backend] trait StorageBackendTestsTimestamps extends Matchers with StorageBackendSpec {
|
||||
this: AsyncFlatSpec =>
|
||||
|
||||
private val parameterStorageBackend: ParameterStorageBackend =
|
||||
backendFactory.createParameterStorageBackend
|
||||
private val eventStorageBackend: EventStorageBackend = backendFactory.createEventStorageBackend
|
||||
private val contractStorageBackend: ContractStorageBackend =
|
||||
backendFactory.createContractStorageBackend
|
||||
|
||||
behavior of "StorageBackend (timestamps)"
|
||||
|
||||
import StorageBackendTestValues._
|
||||
@ -29,14 +35,20 @@ private[backend] trait StorageBackendTestsTimestamps extends Matchers with Stora
|
||||
ledgerEffectiveTime = Some(let),
|
||||
)
|
||||
for {
|
||||
_ <- executeSql(backend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(parameterStorageBackend.initializeParameters(someIdentityParams))
|
||||
|
||||
_ <- executeSql(ingest(Vector(create), _))
|
||||
_ <- executeSql(backend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(1), 1L)))
|
||||
_ <- executeSql(
|
||||
parameterStorageBackend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(1), 1L))
|
||||
)
|
||||
|
||||
let1 <- executeSql(backend.maximumLedgerTime(Set(cid)))
|
||||
let2 <- executeSql(withDefaultTimeZone("GMT-1")(backend.maximumLedgerTime(Set(cid))))
|
||||
let3 <- executeSql(withDefaultTimeZone("GMT+1")(backend.maximumLedgerTime(Set(cid))))
|
||||
let1 <- executeSql(contractStorageBackend.maximumLedgerTime(Set(cid)))
|
||||
let2 <- executeSql(
|
||||
withDefaultTimeZone("GMT-1")(contractStorageBackend.maximumLedgerTime(Set(cid)))
|
||||
)
|
||||
let3 <- executeSql(
|
||||
withDefaultTimeZone("GMT+1")(contractStorageBackend.maximumLedgerTime(Set(cid)))
|
||||
)
|
||||
} yield {
|
||||
withClue("UTC") { let1 shouldBe Success(Some(let)) }
|
||||
withClue("GMT-1") { let2 shouldBe Success(Some(let)) }
|
||||
@ -54,14 +66,16 @@ private[backend] trait StorageBackendTestsTimestamps extends Matchers with Stora
|
||||
ledgerEffectiveTime = Some(let),
|
||||
)
|
||||
for {
|
||||
_ <- executeSql(backend.initializeParameters(someIdentityParams))
|
||||
_ <- executeSql(parameterStorageBackend.initializeParameters(someIdentityParams))
|
||||
|
||||
_ <- executeSql(ingest(Vector(create), _))
|
||||
_ <- executeSql(backend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(1), 1L)))
|
||||
_ <- executeSql(
|
||||
parameterStorageBackend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(1), 1L))
|
||||
)
|
||||
|
||||
events1 <- executeSql(backend.rawEvents(0L, 1L))
|
||||
events2 <- executeSql(withDefaultTimeZone("GMT-1")(backend.rawEvents(0L, 1L)))
|
||||
events3 <- executeSql(withDefaultTimeZone("GMT+1")(backend.rawEvents(0L, 1L)))
|
||||
events1 <- executeSql(eventStorageBackend.rawEvents(0L, 1L))
|
||||
events2 <- executeSql(withDefaultTimeZone("GMT-1")(eventStorageBackend.rawEvents(0L, 1L)))
|
||||
events3 <- executeSql(withDefaultTimeZone("GMT+1")(eventStorageBackend.rawEvents(0L, 1L)))
|
||||
} yield {
|
||||
withClue("UTC") { events1.head.ledgerEffectiveTime shouldBe Some(let) }
|
||||
withClue("GMT-1") { events2.head.ledgerEffectiveTime shouldBe Some(let) }
|
||||
|
@ -26,7 +26,8 @@ class SequentialWriteDaoSpec extends AnyFlatSpec with Matchers {
|
||||
val storageBackendCaptor = new StorageBackendCaptor(Some(LedgerEnd(Offset.beforeBegin, 5)))
|
||||
val ledgerEndCache = MutableLedgerEndCache()
|
||||
val testee = SequentialWriteDaoImpl(
|
||||
storageBackend = storageBackendCaptor,
|
||||
parameterStorageBackend = storageBackendCaptor,
|
||||
ingestionStorageBackend = storageBackendCaptor,
|
||||
updateToDbDtos = updateToDbDtoFixture,
|
||||
ledgerEndCache = ledgerEndCache,
|
||||
)
|
||||
@ -62,7 +63,8 @@ class SequentialWriteDaoSpec extends AnyFlatSpec with Matchers {
|
||||
val storageBackendCaptor = new StorageBackendCaptor(None)
|
||||
val ledgerEndCache = MutableLedgerEndCache()
|
||||
val testee = SequentialWriteDaoImpl(
|
||||
storageBackend = storageBackendCaptor,
|
||||
parameterStorageBackend = storageBackendCaptor,
|
||||
ingestionStorageBackend = storageBackendCaptor,
|
||||
updateToDbDtos = updateToDbDtoFixture,
|
||||
ledgerEndCache = ledgerEndCache,
|
||||
)
|
||||
|
@ -27,8 +27,10 @@ final class PostCommitValidationSpec extends AnyWordSpec with Matchers {
|
||||
|
||||
"PostCommitValidation" when {
|
||||
"run without prior history" should {
|
||||
val fixture = noCommittedContract(parties = List.empty)
|
||||
val store = new PostCommitValidation.BackedBy(
|
||||
noCommittedContract(parties = List.empty),
|
||||
fixture,
|
||||
fixture,
|
||||
validatePartyAllocation = false,
|
||||
)
|
||||
|
||||
@ -257,8 +259,7 @@ final class PostCommitValidationSpec extends AnyWordSpec with Matchers {
|
||||
val committedContractLedgerEffectiveTime =
|
||||
Timestamp.assertFromInstant(Instant.ofEpochMilli(1000))
|
||||
|
||||
val store = new PostCommitValidation.BackedBy(
|
||||
committedContracts(
|
||||
val fixture = committedContracts(
|
||||
parties = List.empty,
|
||||
contractFixture = committed(
|
||||
id = committedContract.coid.coid,
|
||||
@ -267,7 +268,10 @@ final class PostCommitValidationSpec extends AnyWordSpec with Matchers {
|
||||
GlobalKey.assertBuild(committedContract.templateId, x.key)
|
||||
),
|
||||
),
|
||||
),
|
||||
)
|
||||
val store = new PostCommitValidation.BackedBy(
|
||||
fixture,
|
||||
fixture,
|
||||
validatePartyAllocation = false,
|
||||
)
|
||||
|
||||
@ -422,11 +426,13 @@ final class PostCommitValidationSpec extends AnyWordSpec with Matchers {
|
||||
val divulgedContract = genTestCreate()
|
||||
val exerciseOnDivulgedContract = genTestExercise(divulgedContract)
|
||||
|
||||
val store = new PostCommitValidation.BackedBy(
|
||||
committedContracts(
|
||||
val fixture = committedContracts(
|
||||
parties = List.empty,
|
||||
contractFixture = divulged(divulgedContract.coid.coid),
|
||||
),
|
||||
)
|
||||
val store = new PostCommitValidation.BackedBy(
|
||||
fixture,
|
||||
fixture,
|
||||
validatePartyAllocation = false,
|
||||
)
|
||||
|
||||
@ -453,6 +459,7 @@ final class PostCommitValidationSpec extends AnyWordSpec with Matchers {
|
||||
|
||||
"run with unallocated parties" should {
|
||||
val store = new PostCommitValidation.BackedBy(
|
||||
noCommittedContract(List.empty),
|
||||
noCommittedContract(List.empty),
|
||||
validatePartyAllocation = true,
|
||||
)
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
package com.daml.platform.store.backend
|
||||
|
||||
import com.daml.platform.store.backend.postgresql.PostgresStorageBackend
|
||||
import com.daml.platform.store.backend.postgresql.PostgresDataSourceStorageBackend
|
||||
import org.scalatest.Inside
|
||||
import org.scalatest.flatspec.AsyncFlatSpec
|
||||
|
||||
@ -18,7 +18,7 @@ final class StorageBackendPostgresSpec
|
||||
|
||||
it should "find the Postgres version" in {
|
||||
for {
|
||||
version <- executeSql(PostgresStorageBackend.getPostgresVersion)
|
||||
version <- executeSql(PostgresDataSourceStorageBackend.getPostgresVersion)
|
||||
} yield {
|
||||
inside(version) { case Some(versionNumbers) =>
|
||||
// Minimum Postgres version used in tests
|
||||
@ -29,9 +29,9 @@ final class StorageBackendPostgresSpec
|
||||
}
|
||||
|
||||
it should "correctly parse a Postgres version" in {
|
||||
PostgresStorageBackend.parsePostgresVersion("1.2") shouldBe Some((1, 2))
|
||||
PostgresStorageBackend.parsePostgresVersion("1.2.3") shouldBe Some((1, 2))
|
||||
PostgresStorageBackend.parsePostgresVersion("1.2.3-alpha.4.5") shouldBe Some((1, 2))
|
||||
PostgresStorageBackend.parsePostgresVersion("10.11") shouldBe Some((10, 11))
|
||||
PostgresDataSourceStorageBackend.parsePostgresVersion("1.2") shouldBe Some((1, 2))
|
||||
PostgresDataSourceStorageBackend.parsePostgresVersion("1.2.3") shouldBe Some((1, 2))
|
||||
PostgresDataSourceStorageBackend.parsePostgresVersion("1.2.3-alpha.4.5") shouldBe Some((1, 2))
|
||||
PostgresDataSourceStorageBackend.parsePostgresVersion("10.11") shouldBe Some((10, 11))
|
||||
}
|
||||
}
|
||||
|
@ -6,43 +6,43 @@ package com.daml.platform.store.backend.h2
|
||||
import org.scalatest.matchers.should.Matchers
|
||||
import org.scalatest.wordspec.AnyWordSpec
|
||||
|
||||
class H2StorageBackendSpec extends AnyWordSpec with Matchers {
|
||||
class H2DataSourceStorageBackendSpec extends AnyWordSpec with Matchers {
|
||||
|
||||
"H2StorageBackend" should {
|
||||
"extractUserPasswordAndRemoveFromUrl" should {
|
||||
|
||||
"strip user from url with user" in {
|
||||
H2StorageBackend.extractUserPasswordAndRemoveFromUrl(
|
||||
H2DataSourceStorageBackend.extractUserPasswordAndRemoveFromUrl(
|
||||
"url;user=harry"
|
||||
) shouldBe (("url", Some("harry"), None))
|
||||
}
|
||||
|
||||
"strip user from url with user in the middle" in {
|
||||
H2StorageBackend.extractUserPasswordAndRemoveFromUrl(
|
||||
H2DataSourceStorageBackend.extractUserPasswordAndRemoveFromUrl(
|
||||
"url;user=harry;password=weak"
|
||||
) shouldBe (("url", Some("harry"), Some("weak")))
|
||||
}
|
||||
|
||||
"only strip password if user absent" in {
|
||||
H2StorageBackend.extractUserPasswordAndRemoveFromUrl(
|
||||
H2DataSourceStorageBackend.extractUserPasswordAndRemoveFromUrl(
|
||||
"url;password=weak"
|
||||
) shouldBe (("url", None, Some("weak")))
|
||||
}
|
||||
|
||||
"not touch other properties" in {
|
||||
H2StorageBackend.extractUserPasswordAndRemoveFromUrl(
|
||||
H2DataSourceStorageBackend.extractUserPasswordAndRemoveFromUrl(
|
||||
"url;alpha=1;beta=2;gamma=3"
|
||||
) shouldBe (("url;alpha=1;beta=2;gamma=3", None, None))
|
||||
}
|
||||
|
||||
"match upper-case user and password keys" in {
|
||||
H2StorageBackend.extractUserPasswordAndRemoveFromUrl(
|
||||
H2DataSourceStorageBackend.extractUserPasswordAndRemoveFromUrl(
|
||||
"url;USER=sally;PASSWORD=supersafe"
|
||||
) shouldBe (("url", Some("sally"), Some("supersafe")))
|
||||
}
|
||||
|
||||
"match mixed-case user and password keys" in {
|
||||
H2StorageBackend.extractUserPasswordAndRemoveFromUrl(
|
||||
H2DataSourceStorageBackend.extractUserPasswordAndRemoveFromUrl(
|
||||
"url;User=sally;Password=supersafe"
|
||||
) shouldBe (("url", Some("sally"), Some("supersafe")))
|
||||
}
|
Loading…
Reference in New Issue
Block a user