Implement Decimal support for Postgres backend (#10216)

* treat scale nothing as unspecifed

* cast to decimal

* float int biginteger

* conversion failure ints

* loss of decimal precision

* precision loss for mixed column to float

* mixed columns

* loss of precision on inexact float conversion

* cleanup, reuse

* changelog

* review

* no fits bd

* no warning on 0.1 conversion

* fmt

* big_decimal_fetcher

* default fetcher and statement setting

* round-trip d

* fix warning

* expr +10

* double builder retype to bigdecimal

* Use BD fetcher for underspecified postgres numeric column, not inferred builder, and do not use biginteger builder for integral bigdecimal values

* fix tests

* fix test

* cast_op_type

* no-ops for other dialects

* Types

* sum + avg

* avg + sum test

* fix test

* update agg type inference test

* wip

* is_int8, stddev

* more doc, overflow check

* fmt

* finish round-trip test

* wip
This commit is contained in:
GregoryTravis 2024-07-02 15:01:55 -04:00 committed by GitHub
parent 08ec3acdd4
commit 48fb999eb3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
16 changed files with 225 additions and 49 deletions

View File

@ -57,6 +57,7 @@
- [Implemented fallback to Windows-1252 encoding for `Encoding.Default`.][10190]
- [Added Table.duplicates component][10323]
- [Renamed `Table.order_by` to `Table.sort`][10372]
- [Implemented `Decimal` support for Postgres backend.][10216]
[debug-shortcuts]:
@ -67,6 +68,7 @@
[10190]: https://github.com/enso-org/enso/pull/10190
[10323]: https://github.com/enso-org/enso/pull/10323
[10372]: https://github.com/enso-org/enso/pull/10372
[10216]: https://github.com/enso-org/enso/pull/10216
<br/>![Release Notes](/docs/assets/tags/release_notes.svg)

View File

@ -130,6 +130,20 @@ type Redshift_Dialect
_ = [approximate_result_type, infer_result_type_from_database_callback]
column
## PRIVATE
Add an extra cast to adjust the output type of certain operations with
certain arguments.
It is used when the normal type inference provided by the database engine
needs to be adjusted.
In most cases this method will just return the expression unchanged, it
is used only to override the type in cases where the default one that the
database uses is not what we want.
cast_op_type self (op_kind:Text) (args:(Vector Internal_Column)) (expression:SQL_Expression) =
_ = [op_kind, args]
expression
## PRIVATE
prepare_fetch_types_query : SQL_Expression -> Context -> SQL_Statement
prepare_fetch_types_query self expression context =

View File

@ -29,7 +29,7 @@ make_aggregate_column : DB_Table -> Aggregate_Column -> Text -> Dialect -> (Any
make_aggregate_column table aggregate as dialect infer_return_type problem_builder =
is_non_empty_selector v = v.is_nothing.not && v.not_empty
simple_aggregate op_kind columns =
expression = SQL_Expression.Operation op_kind (columns.map c->c.expression)
expression = dialect.cast_op_type op_kind columns (SQL_Expression.Operation op_kind (columns.map c->c.expression))
sql_type_ref = infer_return_type op_kind columns expression
Internal_Column.Value as sql_type_ref expression

View File

@ -97,6 +97,19 @@ big_integer_fetcher =
make_builder_from_java_object_builder java_builder
Column_Fetcher.Value fetch_value make_builder
## PRIVATE
big_decimal_fetcher : Column_Fetcher
big_decimal_fetcher =
fetch_value rs i =
big_decimal = rs.getBigDecimal i
if rs.wasNull then Nothing else
big_decimal
make_builder initial_size java_problem_aggregator =
_ = java_problem_aggregator
java_builder = Java_Exports.make_bigdecimal_builder initial_size
make_builder_from_java_object_builder java_builder
Column_Fetcher.Value fetch_value make_builder
## PRIVATE
text_fetcher : Value_Type -> Column_Fetcher
text_fetcher value_type =
@ -178,14 +191,14 @@ default_fetcher_for_value_type value_type =
# We currently don't distinguish timestamps without a timezone on the Enso value side.
Value_Type.Date_Time has_timezone ->
if has_timezone then date_time_fetcher else local_date_time_fetcher
# If we can determine that scale = 0
## If we can determine that scale <= 0, we use BigIntegerBuilder.
Otherwise, we use BigDecimalBuilder, since it's possible some values
will be BigDecimal.
Value_Type.Decimal _ scale ->
is_guaranteed_integer = scale.is_nothing.not && scale <= 0
case is_guaranteed_integer of
True -> big_integer_fetcher
# If we cannot guarantee that the column is integer, we will fall back to Float values, since there is no BigDecimal implementation yet.
# In another place this will trigger a Inexact_Type_Coercion warning.
False -> double_fetcher
False -> big_decimal_fetcher
_ -> fallback_fetcher
## PRIVATE

View File

@ -39,6 +39,8 @@ import project.SQL_Type.SQL_Type
from project.Errors import SQL_Error, Unsupported_Database_Operation
from project.Internal.IR.Operation_Metadata import Date_Period_Metadata
polyglot java import java.sql.Types
## PRIVATE
The dialect of PostgreSQL databases.
@ -195,6 +197,36 @@ type Postgres_Dialect
Illegal_State.Error "The type computed by our logic is Char, but the Database computed a non-text type ("+db_type.to_display_text+"). This should never happen and should be reported as a bug in the Database library."
False -> column
## PRIVATE
Add an extra cast to adjust the output type of certain operations with
certain arguments.
It is used when the normal type inference provided by the database engine
needs to be adjusted.
In most cases this method will just return the expression unchanged, it
is used only to override the type in cases where the default one that the
database uses is not what we want.
cast_op_type self (op_kind:Text) (args:(Vector Internal_Column)) (expression:SQL_Expression) =
is_int8 ic = ic.sql_type_reference.get.typeid == Types.BIGINT
is_int ic =
typeid = ic.sql_type_reference.get.typeid
typeid == Types.SMALLINT || typeid == Types.INTEGER || typeid == Types.BIGINT
cast_to = case op_kind of
"SUM" ->
if is_int8 (args.at 0) then "numeric(1000,0)" else Nothing
"AVG" ->
if is_int (args.at 0) then "float8" else Nothing
"STDDEV_POP" ->
if is_int (args.at 0) then "float8" else Nothing
"STDDEV_SAMP" ->
if is_int (args.at 0) then "float8" else Nothing
_ -> Nothing
if cast_to.is_nothing then expression else
SQL_Expression.Operation "CAST" [expression, SQL_Expression.Literal cast_to]
## PRIVATE
prepare_fetch_types_query : SQL_Expression -> Context -> SQL_Statement
prepare_fetch_types_query self expression context =

View File

@ -184,6 +184,20 @@ type SQLite_Dialect
new_sql_type_reference = SQL_Type_Reference.from_constant sql_type
Internal_Column.Value column.name new_sql_type_reference new_expression
## PRIVATE
Add an extra cast to adjust the output type of certain operations with
certain arguments.
It is used when the normal type inference provided by the database engine
needs to be adjusted.
In most cases this method will just return the expression unchanged, it
is used only to override the type in cases where the default one that the
database uses is not what we want.
cast_op_type self (op_kind:Text) (args:(Vector Internal_Column)) (expression:SQL_Expression) =
_ = [op_kind, args]
expression
## PRIVATE
prepare_fetch_types_query : SQL_Expression -> Context -> SQL_Statement
prepare_fetch_types_query self expression context =

View File

@ -41,7 +41,8 @@ fill_hole_default stmt i type_hint value = case value of
big_decimal = NumericConverter.bigIntegerAsBigDecimal value
stmt.setBigDecimal i big_decimal
False -> stmt.setLong i value
_ : Float -> stmt.setDouble i value
_ : Decimal -> stmt.setBigDecimal i value.big_decimal
_ : Float -> stmt.setDouble i value
_ : Text -> stmt.setString i value
_ : Date_Time ->
has_timezone = case type_hint of

View File

@ -184,6 +184,20 @@ type Snowflake_Dialect
_ = [approximate_result_type, infer_result_type_from_database_callback]
column
## PRIVATE
Add an extra cast to adjust the output type of certain operations with
certain arguments.
It is used when the normal type inference provided by the database engine
needs to be adjusted.
In most cases this method will just return the expression unchanged, it
is used only to override the type in cases where the default one that the
database uses is not what we want.
cast_op_type self (op_kind:Text) (args:(Vector Internal_Column)) (expression:SQL_Expression) =
_ = [op_kind, args]
expression
## PRIVATE
prepare_fetch_types_query : SQL_Expression -> Context -> SQL_Statement
prepare_fetch_types_query self expression context =

View File

@ -5,6 +5,7 @@ import project.Internal.Storage
import project.Value_Type.Bits
import project.Value_Type.Value_Type
polyglot java import org.enso.table.data.column.builder.BigDecimalBuilder
polyglot java import org.enso.table.data.column.builder.BigIntegerBuilder
polyglot java import org.enso.table.data.column.builder.BoolBuilder
polyglot java import org.enso.table.data.column.builder.DateBuilder
@ -36,6 +37,11 @@ make_biginteger_builder : Integer -> ProblemAggregator -> BigIntegerBuilder
make_biginteger_builder initial_size java_problem_aggregator=(Missing_Argument.ensure_present "java_problem_aggregator") =
BigIntegerBuilder.new initial_size java_problem_aggregator
## PRIVATE
make_bigdecimal_builder : Integer -> BigDecimalBuilder
make_bigdecimal_builder initial_size =
BigDecimalBuilder.new initial_size
## PRIVATE
make_string_builder : Integer -> Value_Type -> StringBuilder
make_string_builder initial_size value_type=Value_Type.Char =

View File

@ -36,11 +36,6 @@ public class BigDecimalBuilder extends TypedBuilderImpl<BigDecimal> {
data[currentSize++] = value;
}
@Override
public void append(Object o) {
appendNoGrow(o);
}
@Override
public boolean accepts(Object o) {
return o instanceof BigDecimal;

View File

@ -7,6 +7,7 @@ import org.enso.table.data.column.storage.Storage;
import org.enso.table.data.column.storage.numeric.AbstractLongStorage;
import org.enso.table.data.column.storage.numeric.BigIntegerStorage;
import org.enso.table.data.column.storage.type.AnyObjectType;
import org.enso.table.data.column.storage.type.BigDecimalType;
import org.enso.table.data.column.storage.type.BigIntegerType;
import org.enso.table.data.column.storage.type.FloatType;
import org.enso.table.data.column.storage.type.IntegerType;
@ -37,7 +38,9 @@ public class BigIntegerBuilder extends TypedBuilderImpl<BigInteger> {
@Override
public boolean canRetypeTo(StorageType type) {
return type instanceof FloatType || type instanceof AnyObjectType;
return type instanceof FloatType
|| type instanceof BigDecimalType
|| type instanceof AnyObjectType;
}
@Override
@ -53,6 +56,16 @@ public class BigIntegerBuilder extends TypedBuilderImpl<BigInteger> {
}
}
return res;
} else if (type instanceof BigDecimalType) {
BigDecimalBuilder res = new BigDecimalBuilder(currentSize);
for (int i = 0; i < currentSize; i++) {
if (data[i] == null) {
res.appendNulls(1);
} else {
res.appendNoGrow(data[i]);
}
}
return res;
} else if (type instanceof AnyObjectType) {
Object[] widenedData = Arrays.copyOf(data, data.length, Object[].class);
ObjectBuilder res = new MixedBuilder(widenedData);

View File

@ -11,6 +11,7 @@ import org.enso.table.data.column.storage.Storage;
import org.enso.table.data.column.storage.numeric.AbstractLongStorage;
import org.enso.table.data.column.storage.numeric.BigIntegerStorage;
import org.enso.table.data.column.storage.numeric.DoubleStorage;
import org.enso.table.data.column.storage.type.BigDecimalType;
import org.enso.table.data.column.storage.type.BigIntegerType;
import org.enso.table.data.column.storage.type.BooleanType;
import org.enso.table.data.column.storage.type.FloatType;
@ -39,12 +40,26 @@ public class DoubleBuilder extends NumericBuilder {
@Override
public boolean canRetypeTo(StorageType type) {
return false;
return type instanceof BigDecimalType;
}
@Override
public TypedBuilder retypeTo(StorageType type) {
throw new UnsupportedOperationException();
if (type instanceof BigDecimalType) {
BigDecimalBuilder res = new BigDecimalBuilder(currentSize);
for (int i = 0; i < currentSize; i++) {
if (isNothing.get(i)) {
res.appendNulls(1);
} else {
double d = Double.longBitsToDouble(data[i]);
BigDecimal bigDecimal = BigDecimal.valueOf(d);
res.appendNoGrow(bigDecimal);
}
}
return res;
} else {
throw new UnsupportedOperationException();
}
}
@Override

View File

@ -9,6 +9,7 @@ import java.util.List;
import org.enso.base.polyglot.NumericConverter;
import org.enso.base.polyglot.Polyglot_Utils;
import org.enso.table.data.column.storage.Storage;
import org.enso.table.data.column.storage.type.BigDecimalType;
import org.enso.table.data.column.storage.type.BigIntegerType;
import org.enso.table.data.column.storage.type.BooleanType;
import org.enso.table.data.column.storage.type.DateTimeType;
@ -138,9 +139,7 @@ public class InferredBuilder extends Builder {
new RetypeInfo(Long.class, IntegerType.INT_64),
new RetypeInfo(Double.class, FloatType.FLOAT_64),
new RetypeInfo(String.class, TextType.VARIABLE_LENGTH),
// TODO [RW] I think BigDecimals should not be coerced to floats, we should add Decimal
// support to in-memory tables at some point
// new RetypeInfo(BigDecimal.class, StorageType.FLOAT_64),
new RetypeInfo(BigDecimal.class, BigDecimalType.INSTANCE),
new RetypeInfo(LocalDate.class, DateType.INSTANCE),
new RetypeInfo(LocalTime.class, TimeOfDayType.INSTANCE),
new RetypeInfo(ZonedDateTime.class, DateTimeType.INSTANCE),

View File

@ -947,7 +947,7 @@ add_specs suite_builder setup =
result.to_vector.should_equal [0, 1, 3, 4, 0, -1, -3, -4]
result.name . should_equal "round([x])"
group_builder.specify "should allow round on a float column (to >0 decimal places)" <|
group_builder.specify "should allow round on a "+type.to_text+" column (to >0 decimal places)" <|
table = table_builder [["x", [0.51, 0.59, 3.51, 3.59, -0.51, -0.59, -3.51, -3.59]]]
result = table.at "x" . cast type . round 1
# TODO why it's becoming an Int?
@ -956,7 +956,7 @@ add_specs suite_builder setup =
result.to_vector.should_equal [0.5, 0.6, 3.5, 3.6, -0.5, -0.6, -3.5, -3.6]
result.name . should_equal "round([x])"
group_builder.specify "should allow round on a float column (to <0 decimal places)" <|
group_builder.specify "should allow round on a "+type.to_text+" column (to <0 decimal places)" <|
table = table_builder [["x", [51.2, 59.3, 351.45, 359.11, -51.2, -59.3, -351.23, -359.69]]]
result = table.at "x" . cast type . round -1
result.to_vector.should_equal [50.0, 60.0, 350.0, 360.0, -50.0, -60.0, -350.0, -360.0]
@ -981,8 +981,9 @@ add_specs suite_builder setup =
result.name . should_equal "floor([x])"
test_floatlike Value_Type.Float
if setup.test_selection.supports_decimal_type then
test_floatlike Value_Type.Decimal
group_builder.specify "should allow round on a Decimal column" pending="https://github.com/enso-org/enso/issues/10344" <|
if setup.test_selection.supports_decimal_type then
test_floatlike Value_Type.Decimal
group_builder.specify "should allow round on an int column" <|
table = table_builder [["x", [1, 9, 31, 39, -1, -9, -31, -39]]]
@ -1020,7 +1021,7 @@ add_specs suite_builder setup =
decimal_col.value_type.is_decimal . should_be_true
decimal_col2 = decimal_col + decimal_col*decimal_col
[(.floor), (.ceil), (.truncate), (x-> x.round 0), (x-> x.round 2)].each op->
op decimal_col2 . to_vector . should_equal [i1 + i1*i1 . to_float]
op decimal_col2 . to_vector . should_equal [i1 + i1*i1]
group_builder.specify "should allow Nothing/NULL" <|
table = table_builder [["x", [Nothing, 0.51, 0.59, 3.51, Nothing, 3.59, -0.51, -0.59, -3.51, -3.59]]]

View File

@ -107,6 +107,8 @@ type Postgres_Aggregate_Data
connection = create_connection_fn Nothing
name = Name_Generator.random_name "Ttypes"
connection.execute_update 'CREATE TEMPORARY TABLE "'+name+'" ("txt" VARCHAR, "i1" SMALLINT, "i2" INT, "i3" BIGINT, "i4" NUMERIC, "r1" REAL, "r2" DOUBLE PRECISION, "bools" BOOLEAN)'
connection.execute_update 'INSERT INTO "'+name+'" VALUES (\'a\', 1, 2, 3, 4, 5.5, 6.6, true)'
connection.execute_update 'INSERT INTO "'+name+'" VALUES (\'a\', 11, 12, 13, 14, 15.5, 16.6, false)'
t = connection.query (SQL_Query.Table_Name name)
[connection, name, t]
@ -238,7 +240,7 @@ postgres_specific_spec suite_builder create_connection_fn db_name setup =
i = data.t.aggregate columns=[Aggregate_Column.Concatenate "strs", Aggregate_Column.Sum "ints", Aggregate_Column.Count_Distinct "bools"] . column_info
i.at "Column" . to_vector . should_equal ["Concatenate strs", "Sum ints", "Count Distinct bools"]
i.at "Items Count" . to_vector . should_equal [1, 1, 1]
i.at "Value Type" . to_vector . should_equal [default_text, Value_Type.Decimal, Value_Type.Integer]
i.at "Value Type" . to_vector . should_equal [default_text, Value_Type.Decimal 1000 0, Value_Type.Integer]
group_builder.specify "should infer standard types correctly" <|
data.t.at "strs" . value_type . is_text . should_be_true
@ -274,7 +276,7 @@ postgres_specific_spec suite_builder create_connection_fn db_name setup =
expected_code = code_template.replace "{Tinfo}" data.tinfo
t.distinct ["strs"] . to_sql . prepare . should_equal [expected_code, []]
suite_builder.group "[PostgreSQL] Table.aggregate should correctly infer result types" group_builder->
suite_builder.group "[PostgreSQL] Table.aggregate should correctly infer result types, and the resulting values should be of the correct type" group_builder->
data = Postgres_Aggregate_Data.setup create_connection_fn
group_builder.teardown <|
@ -296,19 +298,47 @@ postgres_specific_spec suite_builder create_connection_fn db_name setup =
r = data.t.aggregate columns=[Aggregate_Column.Sum "i1", Aggregate_Column.Sum "i2", Aggregate_Column.Sum "i3", Aggregate_Column.Sum "i4", Aggregate_Column.Sum "r1", Aggregate_Column.Sum "r2"]
r.columns.at 0 . value_type . should_equal Value_Type.Integer
r.columns.at 1 . value_type . should_equal Value_Type.Integer
r.columns.at 2 . value_type . should_equal Value_Type.Decimal
r.columns.at 2 . value_type . should_equal (Value_Type.Decimal 1000 0)
r.columns.at 3 . value_type . should_equal Value_Type.Decimal
r.columns.at 4 . value_type . should_equal (Value_Type.Float Bits.Bits_32)
r.columns.at 5 . value_type . should_equal (Value_Type.Float Bits.Bits_64)
r.columns.at 0 . at 0 . should_be_a Integer
r.columns.at 1 . at 0 . should_be_a Integer
r.columns.at 2 . at 0 . should_be_a Integer
r.columns.at 3 . at 0 . should_be_a Decimal
r.columns.at 4 . at 0 . should_be_a Float
r.columns.at 5 . at 0 . should_be_a Float
group_builder.specify "Average" <|
r = data.t.aggregate columns=[Aggregate_Column.Average "i1", Aggregate_Column.Average "i2", Aggregate_Column.Average "i3", Aggregate_Column.Average "i4", Aggregate_Column.Average "r1", Aggregate_Column.Average "r2"]
r.columns.at 0 . value_type . should_equal Value_Type.Decimal
r.columns.at 1 . value_type . should_equal Value_Type.Decimal
r.columns.at 2 . value_type . should_equal Value_Type.Decimal
r.columns.at 0 . value_type . should_equal Value_Type.Float
r.columns.at 1 . value_type . should_equal Value_Type.Float
r.columns.at 2 . value_type . should_equal Value_Type.Float
r.columns.at 3 . value_type . should_equal Value_Type.Decimal
r.columns.at 4 . value_type . should_equal Value_Type.Float
r.columns.at 5 . value_type . should_equal Value_Type.Float
r.columns.at 0 . at 0 . should_be_a Float
r.columns.at 1 . at 0 . should_be_a Float
r.columns.at 2 . at 0 . should_be_a Float
r.columns.at 3 . at 0 . should_be_a Decimal
r.columns.at 4 . at 0 . should_be_a Float
r.columns.at 5 . at 0 . should_be_a Float
group_builder.specify "Standard Deviation" <|
[False, True].map population->
r = data.t.aggregate columns=[Aggregate_Column.Standard_Deviation population=population "i1", Aggregate_Column.Standard_Deviation population=population "i2", Aggregate_Column.Standard_Deviation population=population "i3", Aggregate_Column.Standard_Deviation population=population "i4", Aggregate_Column.Standard_Deviation population=population "r1", Aggregate_Column.Standard_Deviation population=population "r2"]
r.columns.at 0 . value_type . should_equal Value_Type.Float
r.columns.at 1 . value_type . should_equal Value_Type.Float
r.columns.at 2 . value_type . should_equal Value_Type.Float
r.columns.at 3 . value_type . should_equal Value_Type.Decimal
r.columns.at 4 . value_type . should_equal Value_Type.Float
r.columns.at 5 . value_type . should_equal Value_Type.Float
r.columns.at 0 . at 0 . should_be_a Float
r.columns.at 1 . at 0 . should_be_a Float
r.columns.at 2 . at 0 . should_be_a Float
r.columns.at 3 . at 0 . should_be_a Decimal
r.columns.at 4 . at 0 . should_be_a Float
r.columns.at 5 . at 0 . should_be_a Float
suite_builder.group "[PostgreSQL] Warning/Error handling" group_builder->
@ -404,19 +434,15 @@ postgres_specific_spec suite_builder create_connection_fn db_name setup =
# Unfortunately, performing operations on a Decimal column in postgres can lose information about it being an integer column.
t2.at "Y" . value_type . scale . should_equal Nothing
t2.at "X" . to_vector . should_equal [10, x]
# Only works by approximation:
t2.at "Y" . to_vector . should_equal [20, x+10 . to_float]
t2.at "Y" . to_vector . should_equal [20, x+10]
t2.at "Y" . cast Value_Type.Char . to_vector . should_equal ["20", (x+10).to_text]
m2 = t2.remove_warnings.read
m2.at "X" . value_type . should_be_a (Value_Type.Decimal ...)
# As noted above - once operations are performed, the scale=0 may be lost and the column will be approximated as a float.
m2.at "Y" . value_type . should_equal Value_Type.Float
m2.at "Y" . value_type . should_equal (Value_Type.Decimal Nothing Nothing)
m2.at "X" . to_vector . should_equal [10, x]
m2.at "Y" . to_vector . should_equal [20, x+10 . to_float]
w2 = Problems.expect_only_warning Inexact_Type_Coercion m2
w2.requested_type . should_equal (Value_Type.Decimal precision=Nothing scale=Nothing)
w2.actual_type . should_equal Value_Type.Float
m2.at "Y" . to_vector . should_equal [20, x+10]
# This has more than 1000 digits.
super_large = 11^2000
@ -428,18 +454,50 @@ postgres_specific_spec suite_builder create_connection_fn db_name setup =
t3 . at "X" . value_type . precision . should_equal Nothing
t3 . at "X" . value_type . scale . should_equal Nothing
# Works but only relying on imprecise float equality:
t3 . at "X" . to_vector . should_equal [super_large . to_float]
t3 . at "X" . to_vector . should_equal [super_large]
w3 = Problems.expect_only_warning Inexact_Type_Coercion t3
w3.requested_type . should_equal (Value_Type.Decimal precision=Nothing scale=0)
w3.actual_type . should_equal (Value_Type.Decimal precision=Nothing scale=Nothing)
m4 = t3.remove_warnings.read
# Because we no longer have a set scale, we cannot get a BigInteger column back - we'd need BigDecimal, but that is not fully supported yet in Enso - so we get the closest approximation - the imprecise Float.
m4 . at "X" . value_type . should_equal Value_Type.Float
m4 . at "X" . to_vector . should_equal [super_large . to_float]
w4 = Problems.expect_only_warning Inexact_Type_Coercion m4
w4.requested_type . should_equal (Value_Type.Decimal precision=Nothing scale=Nothing)
w4.actual_type . should_equal Value_Type.Float
m4 . at "X" . value_type . should_equal Value_Type.Decimal
m4 . at "X" . to_vector . should_equal [super_large]
group_builder.specify "should be able to round-trip a BigDecimal column" <|
x = Decimal.new "123.45234737459387459837493874593874937845937495837459345345345E468"
m1 = Table.new [["X", [Decimal.new 10, x, x+1]]]
m1.at "X" . value_type . should_equal (Value_Type.Decimal Nothing Nothing)
t1 = m1.select_into_database_table data.connection "BigDecimal" primary_key=[] temporary=True
t1.at "X" . value_type . should_equal (Value_Type.Decimal Nothing Nothing)
t1.at "X" . to_vector . should_equal [10, x, x+1]
v1x = t1.at "X" . to_vector
v1x.should_equal [10, x, x+1]
v1x.each e-> Test.with_clue "("+e.to_text+"): " <| e.should_be_a Decimal
t2 = t1.set (expr "[X] + 10") "Y"
t2.at "X" . value_type . should_equal (Value_Type.Decimal Nothing Nothing)
t2.at "Y" . value_type . should_equal (Value_Type.Decimal Nothing Nothing)
t2.at "X" . to_vector . should_equal [10, x, x+1]
t2.at "Y" . to_vector . should_equal [20, x+10, x+11]
t2.at "Y" . cast Value_Type.Char . to_vector . should_equal ["20", (x+10).to_text, (x+11).to_text]
m2 = t2.remove_warnings.read
m2.at "X" . value_type . should_equal (Value_Type.Decimal Nothing Nothing)
m2.at "Y" . value_type . should_equal (Value_Type.Decimal Nothing Nothing)
m2.at "X" . to_vector . should_equal [10, x, x+1]
m2.at "Y" . to_vector . should_equal [20, x+10, x+11]
group_builder.specify "type inference adjustments in cast_op_type` should not cause overflows" <|
max_long = 9223372036854775807
more_than_max_long = max_long + 50
t = table_builder [["x", [max_long, max_long]], ["y", [more_than_max_long, more_than_max_long]]]
tsum = t.aggregate [] [Aggregate_Column.Sum "x" "xs", Aggregate_Column.Sum "y" "ys"]
tsum.at "xs" . value_type . should_equal (Value_Type.Decimal 1000 0)
tsum.at "ys" . value_type . should_equal (Value_Type.Decimal Nothing Nothing)
tsum.at "xs" . to_vector . should_equal [max_long + max_long]
tsum.at "ys" . to_vector . should_equal [more_than_max_long + more_than_max_long]
group_builder.specify "should round-trip timestamptz column, preserving instant but converting to UTC" <|
table_name = Name_Generator.random_name "TimestampTZ"
@ -604,10 +662,11 @@ postgres_specific_spec suite_builder create_connection_fn db_name setup =
do_round data 231.2 -1 . should_be_a Float
group_builder.specify "round returns the correct type" <|
do_round data 231 1 . should_be_a Float
# TODO https://github.com/enso-org/enso/issues/10345
do_round data 231 1 . should_be_a Decimal
do_round data 231 0 . should_be_a Float
do_round data 231 . should_be_a Float
do_round data 231 -1 . should_be_a Float
do_round data 231 -1 . should_be_a Decimal
type Lazy_Ref
Value ~get

View File

@ -160,11 +160,9 @@ add_specs suite_builder create_connection_fn =
table.at "B" . value_type . should_equal (Value_Type.Decimal precision=100 scale=5)
m2 = table.read
m2.at "B" . value_type . should_equal Value_Type.Float
m2.at "B" . to_vector . should_equal [1.5, 2.5]
w2 = Problems.expect_only_warning Inexact_Type_Coercion m2
w2.requested_type . should_equal (Value_Type.Decimal precision=100 scale=5)
w2.actual_type . should_equal Value_Type.Float
m2.at "B" . value_type . should_equal Value_Type.Decimal
m2.at "B" . to_vector . at 0 . should_be_a Decimal
m2.at "B" . to_vector . should_equal [Decimal.new "1.5", Decimal.new "2.5"]
group_builder.specify "should warn when fetching a Binary column and coercing it to Mixed because in-memory does not support Binary" <|
table_name = Name_Generator.random_name "Bin"