extractor: >50MiB module loading test (#1551)

Test that a 51MiB DAML module crashes the extractor when set to 50MiB
max, then that it succeeds at 60MiB. The module's source code is
generated in bazel shelling out.

This costs about a minute of build time to generate (20s) and compile
(40s) the module alone; it will be reduced by lowering the bracket in a
later PR. See #1551 for more.

* generate "code" for VeryLargeArchive and compile to dar

* test cases for failing and succeeding based on inbound size threshold

* proper assertions in the failure case

* separate PG instance for each VeryLargeArchiveSpec test

* comment on the size choices
This commit is contained in:
Stephen Compall 2019-06-07 11:49:30 -04:00 committed by GitHub
parent 9b5f152f18
commit e5fb5e0025
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 102 additions and 1 deletions

View File

@ -56,6 +56,44 @@ TEST_DARS = [
for darmod in TEST_DARS
]
genrule(
name = "VeryLargeArchive_src",
outs = ["VeryLargeArchive/Blobs.daml"] + ["VeryLargeArchive/Blob%s.daml" % n for n in range(
1,
71 + 1,
)],
cmd =
'''
filecount=71
outs=($(OUTS))
main="$${outs[0]}"
echo 'daml 1.2
module VeryLargeArchive.Blobs where
import VeryLargeArchive.Blob1' > "$$main"
firstfil="$${outs[1]}"
echo 'daml 1.2
module VeryLargeArchive.Blob1 where
' > "$$firstfil"
{ for linen in `seq 1 4096`; do
echo -n "x$$linen = "\\"
for charn in `seq 1 16`; do
echo -n qqqqqqqq
done;
echo \\"
done; } >> $$firstfil
for filen in `seq 2 $$filecount`; do
echo "import VeryLargeArchive.Blob$$filen" >> "$$main"
sed -e '2s/^\\(module .*\\)1/\\1'$$filen/ "$$firstfil" > "$${outs[$$filen]}"
done
''',
)
daml_compile(
name = "VeryLargeArchive",
srcs = [":VeryLargeArchive_src"],
main_src = ":VeryLargeArchive/Blobs.daml",
)
testDependencies = [
":extractor",
"//daml-lf/transaction-scalacheck",
@ -105,6 +143,7 @@ da_scala_test_suite(
"//extractor:PrimitiveTypes.dar",
"//extractor:RecordsAndVariants.dar",
"//extractor:TransactionExample.dar",
"//extractor:VeryLargeArchive.dar",
],
resources = glob(["src/test/resources/**/*"]),
deps = [

View File

@ -43,7 +43,7 @@ trait ExtractorFixture extends SandboxFixture with PostgresAround with Types {
protected def configureExtractor(ec: ExtractorConfig): ExtractorConfig = ec
protected lazy val target: PostgreSQLTarget = PostgreSQLTarget(
protected def target: PostgreSQLTarget = PostgreSQLTarget(
connectUrl = postgresFixture.jdbcUrl,
user = "test",
password = "",

View File

@ -0,0 +1,62 @@
// Copyright (c) 2019 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package com.digitalasset.extractor
import com.digitalasset.extractor.services.ExtractorFixture
import com.digitalasset.ledger.api.testing.utils.SuiteResourceManagementAroundAll
import com.digitalasset.platform.sandbox.persistence.PostgresAroundEach
import scala.concurrent.Await
import scala.concurrent.duration._
import io.grpc.StatusRuntimeException
import org.scalatest._
import java.io.File
@SuppressWarnings(Array("org.wartremover.warts.Any"))
class VeryLargeArchiveSpec
extends FlatSpec
with Suite
with PostgresAroundEach
with SuiteResourceManagementAroundAll
with ExtractorFixture
with Matchers {
override protected def darFile = new File("extractor/VeryLargeArchive.dar")
private def runWithInboundLimit[Z](bytes: Int)(f: => Z): Z = {
val config = baseConfig.copy(ledgerPort = getSandboxPort, ledgerInboundMessageSizeMax = bytes)
val extractor = new Extractor(config, target)
Await.result(extractor.run(), Duration.Inf) // as with ExtractorFixture#run
try f
finally Await.result(extractor.shutdown(), Duration.Inf) // as with ExtractorFixture#kill
}
// there are a couple goals with these choices:
// 1. ensure that we can actually observe failure when the limit is too low
// 2. ensure that no other system we aren't reconfiguring doesn't impose a
// similar limit to the original 50MiB limit
//
// A smaller test case (with smaller numbers below) would satisfy 1, but not 2.
//
// That said, the only purpose is to *ensure that failure can be observed*;
// future editors of this test should not feel obliged to synthesize a failure
// if the system design has really changed so failures of this nature cannot
// happen.
val failMB = 50
val successMB = 60
s"${failMB}MiB" should "fail" in {
val e = the[StatusRuntimeException] thrownBy runWithInboundLimit(failMB * 1024 * 1024) {
fail("shouldn't successfully run")
}
e.getStatus.getCode should ===(io.grpc.Status.Code.RESOURCE_EXHAUSTED)
e.getStatus.getDescription should startWith("gRPC message exceeds maximum size")
}
s"${successMB}MiB" should "succeed" in {
runWithInboundLimit(successMB * 1024 * 1024) {
()
}
}
}