Add Initial SQL Server support (#10624)

* Squash all commits to resolve merge conflicts

* Fix merge problems

* Merge fix

* Fix port

* Fix warning

* cargo fmt

* legal review

* Small fixes

* Update instructions

* Code review feedback

* Cleanup

* typo

* Fix

* Remove leftover snowflake code

* Remove comment

* Add underscore

* Type cleanup

* Code review fix

* Cleanup

* Add datetime roundtrip test

* add comment

* drop

* Refactor

* Refactor

* Fix merge

* Fix

* Fix

* fix

* Add comment
This commit is contained in:
AdRiley 2024-07-30 11:13:08 +01:00 committed by GitHub
parent b9214f052c
commit 0c552489e3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
38 changed files with 2297 additions and 30 deletions

View File

@ -57,6 +57,7 @@
- [Compare two objects with `Ordering.compare` and define comparator with
`Comparable.new`][10468]
- [Added `dec` construction function for creating `Decimal`s.][10517]
- [Added initial read support for SQLServer][10324]
[10434]: https://github.com/enso-org/enso/pull/10434
[10445]: https://github.com/enso-org/enso/pull/10445
@ -64,6 +65,7 @@
[10467]: https://github.com/enso-org/enso/pull/10467
[10474]: https://github.com/enso-org/enso/pull/10474
[10517]: https://github.com/enso-org/enso/pull/10517
[10324]: https://github.com/enso-org/enso/pull/10324
# Enso 2024.2

View File

@ -163,7 +163,11 @@ GatherLicenses.distributions := Seq(
makeStdLibDistribution("Database", Distribution.sbtProjects(`std-database`)),
makeStdLibDistribution("Image", Distribution.sbtProjects(`std-image`)),
makeStdLibDistribution("AWS", Distribution.sbtProjects(`std-aws`)),
makeStdLibDistribution("Snowflake", Distribution.sbtProjects(`std-snowflake`))
makeStdLibDistribution(
"Snowflake",
Distribution.sbtProjects(`std-snowflake`)
),
makeStdLibDistribution("Microsoft", Distribution.sbtProjects(`std-microsoft`))
)
GatherLicenses.licenseConfigurations := Set("compile")
@ -346,6 +350,7 @@ lazy val enso = (project in file("."))
`std-table`,
`std-aws`,
`std-snowflake`,
`std-microsoft`,
`http-test-helper`,
`enso-test-java-helpers`,
`exploratory-benchmark-java-helpers`,
@ -563,6 +568,7 @@ val fansiVersion = "0.4.0"
val httpComponentsVersion = "4.4.1"
val apacheArrowVersion = "14.0.1"
val snowflakeJDBCVersion = "3.15.0"
val mssqlserverJDBCVersion = "12.6.2.jre11"
val jsoniterVersion = "2.28.5"
// ============================================================================
@ -1942,6 +1948,7 @@ lazy val runtime = (project in file("engine/runtime"))
.dependsOn(`std-table` / Compile / packageBin)
.dependsOn(`std-aws` / Compile / packageBin)
.dependsOn(`std-snowflake` / Compile / packageBin)
.dependsOn(`std-microsoft` / Compile / packageBin)
.value
)
.dependsOn(`common-polyglot-core-utils`)
@ -3260,6 +3267,8 @@ val `std-aws-polyglot-root` =
stdLibComponentRoot("AWS") / "polyglot" / "java"
val `std-snowflake-polyglot-root` =
stdLibComponentRoot("Snowflake") / "polyglot" / "java"
val `std-microsoft-polyglot-root` =
stdLibComponentRoot("Microsoft") / "polyglot" / "java"
lazy val `std-base` = project
.in(file("std-bits") / "base")
@ -3567,6 +3576,36 @@ lazy val `std-snowflake` = project
.dependsOn(`std-table` % "provided")
.dependsOn(`std-database` % "provided")
lazy val `std-microsoft` = project
.in(file("std-bits") / "microsoft")
.settings(
frgaalJavaCompilerSetting,
autoScalaLibrary := false,
Compile / compile / compileInputs := (Compile / compile / compileInputs)
.dependsOn(SPIHelpers.ensureSPIConsistency)
.value,
Compile / packageBin / artifactPath :=
`std-microsoft-polyglot-root` / "std-microsoft.jar",
libraryDependencies ++= Seq(
"org.netbeans.api" % "org-openide-util-lookup" % netbeansApiVersion % "provided",
"com.microsoft.sqlserver" % "mssql-jdbc" % mssqlserverJDBCVersion
),
Compile / packageBin := Def.task {
val result = (Compile / packageBin).value
val _ = StdBits
.copyDependencies(
`std-microsoft-polyglot-root`,
Seq("std-microsoft.jar"),
ignoreScalaLibrary = true
)
.value
result
}.value
)
.dependsOn(`std-base` % "provided")
.dependsOn(`std-table` % "provided")
.dependsOn(`std-database` % "provided")
/* Note [Native Image Workaround for GraalVM 20.2]
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* In GraalVM 20.2 the Native Image build of even simple Scala programs has
@ -3713,6 +3752,7 @@ val stdBitsProjects =
"Database",
"Google_Api",
"Image",
"Microsoft",
"Snowflake",
"Table"
) ++ allStdBitsSuffix
@ -3784,6 +3824,8 @@ pkgStdLibInternal := Def.inputTask {
(`std-aws` / Compile / packageBin).value
case "Snowflake" =>
(`std-snowflake` / Compile / packageBin).value
case "Microsoft" =>
(`std-microsoft` / Compile / packageBin).value
case _ if buildAllCmd =>
(`std-base` / Compile / packageBin).value
(`enso-test-java-helpers` / Compile / packageBin).value
@ -3795,6 +3837,7 @@ pkgStdLibInternal := Def.inputTask {
(`std-google-api` / Compile / packageBin).value
(`std-aws` / Compile / packageBin).value
(`std-snowflake` / Compile / packageBin).value
(`std-microsoft` / Compile / packageBin).value
case _ =>
}
val libs =

View File

@ -7,8 +7,11 @@ use crate::paths::ENSO_META_TEST_ARGS;
use crate::paths::ENSO_META_TEST_COMMAND;
use crate::paths::ENSO_TEST_ANSI_COLORS;
use crate::postgres;
use crate::postgres::EndpointConfiguration;
use crate::postgres::EndpointConfiguration as PostgresEndpointConfiguration;
use crate::postgres::Postgresql;
use crate::sqlserver;
use crate::sqlserver::EndpointConfiguration as SQLServerEndpointConfiguration;
use crate::sqlserver::SQLServer;
use ide_ci::future::AsyncPolicy;
use ide_ci::programs::docker::ContainerId;
@ -142,7 +145,12 @@ impl BuiltEnso {
let may_need_postgres = match &test_selection {
StandardLibraryTestsSelection::All => true,
StandardLibraryTestsSelection::Selected(only) =>
only.iter().any(|test| test.contains("Postgres_Tests")),
only.iter().any(|test| test.contains("Table_Tests")),
};
let may_need_sqlserver = match &test_selection {
StandardLibraryTestsSelection::All => true,
StandardLibraryTestsSelection::Selected(only) =>
only.iter().any(|test| test.contains("Microsoft_Tests")),
};
let _httpbin = crate::httpbin::get_and_spawn_httpbin_on_free_port(sbt).await?;
@ -162,7 +170,7 @@ impl BuiltEnso {
database_name: "enso_test_db".to_string(),
user: "enso_test_user".to_string(),
password: "enso_test_password".to_string(),
endpoint: EndpointConfiguration::deduce()?,
endpoint: PostgresEndpointConfiguration::deduce()?,
version: "latest".to_string(),
};
let postgres = Postgresql::start(config).await?;
@ -171,6 +179,30 @@ impl BuiltEnso {
_ => None,
};
let _sqlserver = match TARGET_OS {
OS::Linux if may_need_sqlserver => {
let runner_context_string = crate::env::ENSO_RUNNER_CONTAINER_NAME
.get_raw()
.or_else(|_| ide_ci::actions::env::RUNNER_NAME.get())
.unwrap_or_else(|_| Uuid::new_v4().to_string());
// GH-hosted runners are named like "GitHub Actions 10". Spaces are not allowed in
// the container name.
let container_name =
format!("sqlserver-for-{runner_context_string}").replace(' ', "_");
let config = sqlserver::Configuration {
sqlserver_container: ContainerId(container_name),
database_name: "tempdb".to_string(),
user: "sa".to_string(),
password: "enso_test_password_<YourStrong@Passw0rd>".to_string(),
endpoint: SQLServerEndpointConfiguration::deduce()?,
version: "latest".to_string(),
};
let sqlserver = SQLServer::start(config).await?;
Some(sqlserver)
}
_ => None,
};
let futures = std_tests.into_iter().map(|test_path| {
let command = self.run_test(test_path, ir_caches);
async move { command?.run_ok().await }

View File

@ -53,6 +53,7 @@ pub mod release;
pub mod repo;
pub mod rust;
pub mod source;
pub mod sqlserver;
pub mod version;
pub mod web;

View File

@ -0,0 +1,226 @@
use crate::prelude::*;
use ide_ci::env::accessor::RawVariable;
use ide_ci::env::accessor::TypedVariable;
use ide_ci::get_free_port;
use ide_ci::programs::docker::ContainerId;
use ide_ci::programs::docker::ImageId;
use ide_ci::programs::docker::Network;
use ide_ci::programs::docker::RunOptions;
use ide_ci::programs::Docker;
use std::process::Stdio;
use tokio::io::AsyncBufReadExt;
use tokio::io::AsyncRead;
use tokio::io::BufReader;
use tokio::process::Child;
/// Port used by SQLServer in its container.
const SQLSERVER_CONTAINER_DEFAULT_PORT: u16 = 1433;
/// Environment variables used to configure the SQLServer container.
pub mod env {
pub mod container {
ide_ci::define_env_var! {
ACCEPT_EULA, String;
MSSQL_SA_PASSWORD, String;
}
}
pub mod tests {
ide_ci::define_env_var! {
ENSO_SQLSERVER_DATABASE, String;
ENSO_SQLSERVER_HOST, String;
ENSO_SQLSERVER_PORT, String;
ENSO_SQLSERVER_USER, String;
ENSO_SQLSERVER_PASSWORD, String;
}
}
}
#[derive(Clone, Debug)]
pub enum EndpointConfiguration {
/// Used when the SQLServer container is started directly from host (rather than Docker
/// container). In such case the SQLServer will be exposed to host network on a given port.
Host { port: u16 },
/// Used when SQLServer is spawned from a container. In such case it will be spawned in a
/// owning container's network on the default port.
Container { owner: ContainerId },
}
impl EndpointConfiguration {
/// Tries to deduce what endpoint should be used for a spawned SQLServer service.
pub fn deduce() -> Result<Self> {
if let Ok(container_name) = crate::env::ENSO_RUNNER_CONTAINER_NAME.get() {
debug!("Assuming that I am in the Docker container named {container_name}.");
Ok(Self::Container { owner: container_name })
} else {
// If we are running on the bare machine (i.e. not in container), we spawn SQLServer
// and expose it on a free host port. Then we can directly consume.
let port = if port_check::is_local_port_free(SQLSERVER_CONTAINER_DEFAULT_PORT) {
// Prefer the usual port.
SQLSERVER_CONTAINER_DEFAULT_PORT
} else {
get_free_port()?
};
Ok(Self::Host { port })
}
}
}
#[derive(Clone, Debug)]
pub struct Configuration {
pub sqlserver_container: ContainerId,
pub database_name: String,
pub user: String,
pub password: String,
pub endpoint: EndpointConfiguration,
pub version: String,
}
impl Configuration {
pub fn image_id(&self) -> ImageId {
ImageId(format!("mcr.microsoft.com/mssql/server:{}", &self.version))
}
pub fn set_enso_test_env(&self) -> Result {
env::tests::ENSO_SQLSERVER_DATABASE.set(&self.database_name)?;
env::tests::ENSO_SQLSERVER_HOST.set("localhost")?;
env::tests::ENSO_SQLSERVER_PORT.set(&match &self.endpoint {
EndpointConfiguration::Host { port } => port.to_string(),
EndpointConfiguration::Container { .. } => SQLSERVER_CONTAINER_DEFAULT_PORT.to_string(),
})?;
env::tests::ENSO_SQLSERVER_USER.set(&self.user)?;
env::tests::ENSO_SQLSERVER_PASSWORD.set(&self.password)?;
Ok(())
}
pub fn clear_enso_test_env(&self) {
env::tests::ENSO_SQLSERVER_DATABASE.remove();
env::tests::ENSO_SQLSERVER_HOST.remove();
env::tests::ENSO_SQLSERVER_PORT.remove();
env::tests::ENSO_SQLSERVER_USER.remove();
env::tests::ENSO_SQLSERVER_PASSWORD.remove();
}
pub async fn cleanup(&self) -> Result {
Docker.remove_container(&self.sqlserver_container, true).await
}
}
/// Retrieve input from asynchronous reader line by line and feed them into the given function.
pub async fn process_lines<R: AsyncRead + Unpin>(reader: R, f: impl Fn(String)) -> Result<R> {
debug!("Started line processor.");
let mut reader = BufReader::new(reader);
let mut line = String::new();
while reader.read_line(&mut line).await? != 0 {
f(std::mem::take(&mut line));
}
Ok(reader.into_inner())
}
pub async fn process_lines_until<R: AsyncRead + Unpin>(
reader: R,
f: &impl Fn(&str) -> bool,
) -> Result<R> {
let mut reader = BufReader::new(reader);
let mut line = String::new();
loop {
let bytes_read = reader.read_line(&mut line).await?;
ensure!(bytes_read != 0, "SQLServer container closed without being ready!");
if f(&line) {
break;
}
line.clear();
}
Ok(reader.into_inner())
}
#[derive(Debug)]
pub struct SQLServerContainer {
_docker_run: Child,
config: Configuration,
}
impl Drop for SQLServerContainer {
fn drop(&mut self) {
self.config.clear_enso_test_env();
debug!("Will remove the SQLServer container");
let cleanup_future = self.config.cleanup();
if let Err(e) = futures::executor::block_on(cleanup_future) {
debug!(
"Failed to kill the SQLServer container named {}: {}",
self.config.sqlserver_container, e
);
} else {
debug!("SQLServer container killed.");
}
}
}
#[derive(Clone, Copy, Debug)]
pub struct SQLServer;
impl SQLServer {
pub async fn start(config: Configuration) -> Result<SQLServerContainer> {
// Attempt cleanup in case previous script run crashed in the middle of this.
// Otherwise, SQLServer container names could collide.
let _ = config.cleanup().await;
let mut opts = RunOptions::new(config.image_id());
opts.env(&env::container::ACCEPT_EULA, "Y")?;
opts.env(&env::container::MSSQL_SA_PASSWORD, &*config.password)?;
match &config.endpoint {
EndpointConfiguration::Host { port } => {
opts.publish_port(*port, SQLSERVER_CONTAINER_DEFAULT_PORT);
}
EndpointConfiguration::Container { owner } => {
opts.network = Some(Network::Container(owner.clone()));
}
}
opts.sig_proxy = Some(true);
opts.name = Some(config.sqlserver_container.to_string());
let mut cmd = Docker.run_cmd(&opts)?;
cmd.stdout(Stdio::piped());
cmd.kill_on_drop(true);
let mut child = cmd.spawn().anyhow_err()?;
let stdout = child
.stdout
.take()
.ok_or_else(|| anyhow!("Failed to access standard output of the spawned process!"))?;
// Wait until container is ready.
let check_line = |line: &str| {
debug!("ERR: {}", line);
line.contains("The tempdb database has")
};
let stdout = process_lines_until(stdout, &check_line).await?;
// Put back stream we've been reading and pack the whole thing back for the caller.
child.stdout = Some(stdout);
config.set_enso_test_env()?;
Ok(SQLServerContainer { _docker_run: child, config })
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn start_sqlserver() -> Result {
let config = Configuration {
sqlserver_container: ContainerId("something".into()),
endpoint: EndpointConfiguration::deduce()?,
version: "latest".into(),
user: "test".into(),
password: "<YourStrong@Passw0rd>".into(),
database_name: "test".into(),
};
let child = SQLServer::start(config).await?;
// drop(child);
std::mem::forget(child);
Ok(())
}
}

View File

@ -66,6 +66,12 @@ type Redshift_Dialect
generate_truncate_table_sql self table_name =
Base_Generator.truncate_table_delete_from_style self table_name
## PRIVATE
Generates SQL modifier for limiting the number of rows and its position in the query
get_limit_sql_modifier : Integer -> Any
get_limit_sql_modifier self limit =
[700, SQL_Builder.code (" LIMIT " + limit.to_text)]
## PRIVATE
Wraps and possibly escapes the identifier so that it can be used in a
generated query regardless of what characters it contains.
@ -139,6 +145,11 @@ type Redshift_Dialect
supports_float_round_decimal_places : Boolean
supports_float_round_decimal_places self = False
## PRIVATE
Specifies whether the Database supports CREATE TEMPORARY TABLE syntax.
suppports_temporary_table_syntax : Boolean
suppports_temporary_table_syntax self = True
## PRIVATE
adapt_unified_column : Internal_Column -> Value_Type -> (SQL_Expression -> SQL_Type_Reference) -> Internal_Column
adapt_unified_column self column approximate_result_type infer_result_type_from_database_callback =
@ -162,7 +173,7 @@ type Redshift_Dialect
## PRIVATE
prepare_fetch_types_query : SQL_Expression -> Context -> SQL_Statement
prepare_fetch_types_query self expression context =
Dialect.default_fetch_types_query self expression context
Base_Generator.default_fetch_types_query self expression context
## PRIVATE
check_aggregate_support : Aggregate_Column -> Boolean ! Unsupported_Database_Operation

View File

@ -172,7 +172,7 @@ type Connection
result = self.jdbc_connection.with_metadata metadata->
table = Managed_Resource.bracket (metadata.getTables database schema name_like types_vector) .close result_set->
result_set_to_table result_set self.dialect.get_type_mapping.make_column_fetcher
renamed = table.rename_columns name_dict
renamed = table.rename_columns name_dict error_on_missing_columns=False on_problems=..Ignore
if all_fields then renamed else
renamed.select_columns ["Database", "Schema", "Name", "Type", "Description"]
case include_hidden of

View File

@ -54,6 +54,13 @@ type Dialect
_ = [table_name]
Unimplemented.throw "This is an interface only."
## PRIVATE
Generates SQL modifier for limiting the number of rows and its position in the query
get_limit_sql_modifier : Integer -> Any
get_limit_sql_modifier self limit =
_ = [limit]
Unimplemented.throw "This is an interface only."
## PRIVATE
Prepares an ordering descriptor.
@ -156,6 +163,12 @@ type Dialect
supports_float_round_decimal_places self =
Unimplemented.throw "This is an interface only."
## PRIVATE
Specifies whether the Database supports CREATE TEMPORARY TABLE syntax.
suppports_temporary_table_syntax : Boolean
suppports_temporary_table_syntax self =
Unimplemented.throw "This is an interface only."
## PRIVATE
Performs any transformations on a column resulting from unifying other
columns.
@ -258,11 +271,6 @@ sqlite = SQLite_Dialect.sqlite
postgres : Dialect
postgres = Postgres_Dialect.postgres
## PRIVATE
default_fetch_types_query dialect expression context =
empty_context = context.add_where_filters [SQL_Expression.Literal "FALSE"]
dialect.generate_sql (Query.Select [["typed_column", expression]] empty_context)
## PRIVATE
Default implementation relying on DatabaseMetaData.
default_fetch_primary_key connection table_name =

View File

@ -418,15 +418,14 @@ generate_order dialect order_descriptor =
base_expression = generate_expression dialect order_descriptor.expression
base_expression ++ collation ++ order_suffix ++ nulls_suffix
## PRIVATE
Generates SQL code corresponding to a SELECT statement.
Arguments:
- dialect: The SQL dialect for which the code is being generated.
- ctx: A description of the SELECT clause.
generate_select : Dialect -> Vector | Nothing -> Context -> SQL_Builder
generate_select dialect columns ctx =
generate_select_query_sql : Dialect -> Vector (Pair Text SQL_Expression) -> Context -> SQL_Builder
generate_select_query_sql dialect columns ctx =
gen_exprs exprs = exprs.map (generate_expression dialect)
gen_column pair = (generate_expression dialect pair.second) ++ alias dialect pair.first
@ -441,10 +440,6 @@ generate_select dialect columns ctx =
orders = ctx.orders.map (generate_order dialect)
order_part = (SQL_Builder.join ", " orders) . prefix_if_present " ORDER BY "
limit_part = case ctx.limit of
Nothing -> ""
_ : Integer -> " LIMIT " + ctx.limit.to_text
extensions = ctx.extensions.map extension->
part = extension.run_generator (gen_exprs extension.expressions)
[extension.position, part]
@ -456,7 +451,9 @@ generate_select dialect columns ctx =
builder.append [400, where_part]
builder.append [500, group_part]
builder.append [600, order_part]
builder.append [700, limit_part]
case ctx.limit of
Nothing -> _
_ : Integer -> builder.append (dialect.get_limit_sql_modifier ctx.limit)
extensions.each builder.append
SQL_Builder.join "" <| parts.sort on=(.first) . map .second
@ -488,7 +485,7 @@ generate_insert_query dialect table_name pairs =
generate_query : Dialect -> Query -> SQL_Builder
generate_query dialect query = case query of
Query.Select columns ctx ->
generate_select dialect columns ctx
generate_select_query_sql dialect columns ctx
Query.Insert table_name pairs ->
generate_insert_query dialect table_name pairs
Query.Create_Table name columns primary_key temporary ->
@ -590,7 +587,7 @@ generate_create_table dialect name columns primary_key temporary =
column_definitions = columns.map (generate_column_description dialect)
modifiers = if primary_key.is_nothing then [] else
[SQL_Builder.code ", PRIMARY KEY (" ++ SQL_Builder.join ", " (primary_key.map dialect.wrap_identifier) ++ ")"]
table_type = if temporary then "TEMPORARY TABLE" else "TABLE"
table_type = if temporary && dialect.suppports_temporary_table_syntax then "TEMPORARY TABLE" else "TABLE"
create_prefix = SQL_Builder.code ("CREATE "+table_type+" ") ++ dialect.wrap_identifier name
create_body = (SQL_Builder.join ", " column_definitions) ++ (SQL_Builder.join "" modifiers)
create_prefix ++ " (" ++ create_body ++ ")"
@ -622,3 +619,8 @@ truncate_table_delete_from_style dialect table_name =
truncate_table_truncate_table_style : Dialect -> Text -> SQL_Builder
truncate_table_truncate_table_style dialect table_name =
SQL_Builder.code "TRUNCATE TABLE " ++ dialect.wrap_identifier table_name
## PRIVATE
default_fetch_types_query dialect expression context where_filter_always_false_literal="FALSE" =
empty_context = context.add_where_filters [SQL_Expression.Literal where_filter_always_false_literal]
dialect.generate_sql (Query.Select [["typed_column", expression]] empty_context)

View File

@ -78,6 +78,12 @@ type Postgres_Dialect
generate_truncate_table_sql self table_name =
Base_Generator.truncate_table_truncate_table_style self table_name
## PRIVATE
Generates SQL modifier for limiting the number of rows and its position in the query
get_limit_sql_modifier : Integer -> Any
get_limit_sql_modifier self limit =
[700, SQL_Builder.code (" LIMIT " + limit.to_text)]
## PRIVATE
Wraps and possibly escapes the identifier so that it can be used in a
generated query regardless of what characters it contains.
@ -176,6 +182,11 @@ type Postgres_Dialect
supports_float_round_decimal_places : Boolean
supports_float_round_decimal_places self = False
## PRIVATE
Specifies whether the Database supports CREATE TEMPORARY TABLE syntax.
suppports_temporary_table_syntax : Boolean
suppports_temporary_table_syntax self = True
## PRIVATE
There is a bug in Postgres type inference, where if we unify two
fixed-length char columns of length N and M, the result type is said to
@ -237,7 +248,7 @@ type Postgres_Dialect
## PRIVATE
prepare_fetch_types_query : SQL_Expression -> Context -> SQL_Statement
prepare_fetch_types_query self expression context =
Dialect.default_fetch_types_query self expression context
Base_Generator.default_fetch_types_query self expression context
## PRIVATE
check_aggregate_support : Aggregate_Column -> Boolean ! Unsupported_Database_Operation

View File

@ -70,6 +70,12 @@ type SQLite_Dialect
generate_truncate_table_sql self table_name =
Base_Generator.truncate_table_delete_from_style self table_name
## PRIVATE
Generates SQL modifier for limiting the number of rows and its position in the query
get_limit_sql_modifier : Integer -> Any
get_limit_sql_modifier self limit =
[700, SQL_Builder.code (" LIMIT " + limit.to_text)]
## PRIVATE
Wraps and possibly escapes the identifier so that it can be used in a
generated query regardless of what characters it contains.
@ -186,6 +192,11 @@ type SQLite_Dialect
supports_float_round_decimal_places : Boolean
supports_float_round_decimal_places self = True
## PRIVATE
Specifies whether the Database supports CREATE TEMPORARY TABLE syntax.
suppports_temporary_table_syntax : Boolean
suppports_temporary_table_syntax self = True
## PRIVATE
SQLite allows mixed type columns, but we want our columns to be uniform.
So after unifying columns with mixed types, we add a cast to ensure that.

View File

@ -0,0 +1,7 @@
Enso
Copyright 2020 - 2024 New Byte Order sp. z o. o.
'mssql-jdbc', licensed under the MIT License, is distributed with the Microsoft.
The license file can be found at `licenses/MIT`.
Copyright notices related to this dependency can be found in the directory `com.microsoft.sqlserver.mssql-jdbc-12.6.2.jre11`.

View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,50 @@
/*
* Copyright 2010 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Copyright 2012 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Microsoft JDBC Driver for SQL Server Copyright(c) Microsoft Corporation All rights reserved. This program is made
* available under the terms of the MIT License. See the LICENSE file in the project root for more information.
*/
/**
* Copyright (C) 2012 tamtam180
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations
* under the License.
*
* @author tamtam180 - kirscheless at gmail.com
* @see <a href=
* "http://google-opensource.blogspot.jp/2011/04/introducing-cityhash.html">http://google-opensource.blogspot.jp/2011/04/introducing-cityhash.html</a>
* @see <a href="http://code.google.com/p/cityhash/">http://code.google.com/p/cityhash/</a>
*/
Copyright 2011 Google Inc. All Rights Reserved.

View File

@ -0,0 +1,6 @@
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -0,0 +1,10 @@
name: Microsoft
namespace: Standard
version: 0.0.0-dev
license: APLv2
authors:
- name: Enso Team
email: contact@enso.org
maintainers:
- name: Enso Team
email: contact@enso.org

View File

@ -0,0 +1,275 @@
private
from Standard.Base import all
import Standard.Base.Metadata.Display
from Standard.Base.Metadata.Choice import Option
from Standard.Base.Metadata.Widget import Single_Choice
import Standard.Table.Rows_To_Read.Rows_To_Read
from Standard.Table import Table
import Standard.Database.Column_Description.Column_Description
import Standard.Database.Connection.Connection.Connection
import Standard.Database.DB_Table.DB_Table
import Standard.Database.Internal.Connection.Entity_Naming_Properties.Entity_Naming_Properties
import Standard.Database.Internal.JDBC_Connection
import Standard.Database.SQL_Query.SQL_Query
import Standard.Database.SQL_Statement.SQL_Statement
from Standard.Database.Connection.Connection import make_database_selector, make_schema_selector, make_structure_creator, make_table_name_selector, make_table_types_selector
from Standard.Database.Errors import SQL_Error, Table_Already_Exists, Table_Not_Found
from Standard.Database.Internal.Upload_Table import first_column_name_in_structure
import project.Internal.SQLServer_Dialect
type SQLServer_Connection
## PRIVATE
Creates a SQLServer connection based on a URL and properties.
Arguments:
- url: The URL to connect to.
- properties: A vector of properties for the connection.
- make_new: A function that returns a new connection.
create : Text -> Vector -> (Text -> Text -> SQLServer_Connection) -> SQLServer_Connection
create url properties make_new =
jdbc_connection = JDBC_Connection.create url properties
entity_naming_properties = Entity_Naming_Properties.from_jdbc_connection jdbc_connection is_case_sensitive=False
SQLServer_Connection.Value (Connection.new jdbc_connection SQLServer_Dialect.sqlserver entity_naming_properties) make_new
## PRIVATE
A SQLServer database connection.
Arguments:
- connection: the underlying connection.
- make_new: a function that returns a new connection.
Value connection make_new
## ICON data_input
Closes the connection releasing the underlying database resources
immediately instead of waiting for them to be automatically released.
The connection is not usable afterwards.
close : Nothing
close self = self.connection.close
## ICON metadata
Returns the list of databases (or catalogs) for the connection.
databases : Vector Text
databases self = self.connection.databases
## ICON metadata
Returns the name of the current database (or catalog).
database : Text
database self = self.connection.database
## ICON data_input
Returns a new Connection with the specified database set as default.
Arguments:
- database: The name of the database to connect to.
@database (self-> Single_Choice display=Display.Always values=(self.databases . map d-> Option d d.pretty))
set_database : Text -> Connection ! SQL_Error
set_database self database =
if database . equals_ignore_case self.database then self else
self.make_new database Nothing Nothing
## ICON metadata
Returns the list of schemas for the connection within the current database (or catalog).
schemas : Vector Text
schemas self =
self.connection.read_single_column "show schemas in database" "name"
## ICON metadata
Returns the name of the current schema.
schema : Text
schema self = self.connection.schema
## ICON data_input
Returns a new Connection with the specified schema set as default.
Arguments:
- schema: The name of the schema to connect to.
@schema make_schema_selector
set_schema : Text -> Connection ! SQL_Error
set_schema self schema =
if schema . equals_ignore_case self.schema then self else
self.make_new Nothing schema Nothing
## GROUP Standard.Base.Metadata
ICON metadata
Gets a list of the table types.
table_types : Vector Text
table_types self = self.connection.table_types
## GROUP Standard.Base.Metadata
ICON metadata
Returns a materialized Table of all the matching views and tables.
? Temporary Tables
Note that the temporary tables may be created in a different schema
than the current one, so take this into account when filtering by schema.
Arguments:
- name_like: The table name pattern to search for. Supports SQL
wildcards (`%`, `_`). Defaults to `""` which means all tables are
selected.
- database: The database name to search in (default is current).
- schema: The schema name to search in (defaults to current).
If "*" is provided, all schemas are searched.
- types: The table types to search for. The list of possible values can
be obtained using the `table_types` method. Defaults to a set of most
commonly used table types, ignoring internal system tables or indices.
- all_fields: Return all the fields in the metadata table.
@types make_table_types_selector
@database (make_database_selector include_any=True)
@schema (make_schema_selector include_any=True)
tables : Text -> Text -> Text -> Vector -> Boolean -> Table
tables self name_like:Text="" database:Text=self.database schema:Text=self.schema types=["TABLE", "VIEW"] all_fields=False =
parsed_database = if database == "*" then Nothing else (if database == "" then self.database else database)
parsed_schema = if schema == "*" then Nothing else (if schema == "" then self.schema else schema)
self.connection.tables (if name_like == "" then Nothing else name_like) parsed_database parsed_schema types all_fields
## GROUP Standard.Base.Input
ICON data_input
Set up a query returning a Table object, which can be used to work with
data within the database or load it into memory.
Arguments:
- query: name of the table or sql statement to query.
If supplied as `Text`, the name is checked against the `tables` list to
determine if it is a table or a query.
- alias: optionally specify a friendly alias for the query.
! Error Conditions
- If provided with a `Raw_SQL` query or `Text` that looks like a query, if
any SQL error occurs when executing the query, a `SQL_Error` error is
raised.
- If provided with a `Table_Name` or a text short-hand and the table is
not found, a `Table_Not_Found` error is raised.
@query make_table_name_selector
query : Text | SQL_Query -> Text -> DB_Table ! Table_Not_Found
query self query alias="" = self.connection.query query alias
## GROUP Standard.Base.Input
ICON data_input
Execute the query and load the results into memory as a Table.
Arguments:
- query: name of the table or sql statement to query.
If supplied as `Text`, the name is checked against the `tables` list to
determine if it is a table or a query.
- limit: the maximum number of rows to read.
? Side Effects
Note that the `read` method is running without restrictions when the
output context is disabled, but it can technically cause side effects,
if it is provided with a DML query. Usually it is preferred to use
`execute_update` for DML queries, or if they are supposed to return
results, the `read` should be wrapped in an execution context check.
@query make_table_name_selector
@limit Rows_To_Read.default_widget
read : Text | SQL_Query -> Rows_To_Read -> Table ! Table_Not_Found
read self query (limit : Rows_To_Read = ..First_With_Warning 1000) =
self.connection.read query limit
## GROUP Standard.Base.Output
ICON data_output
Creates a new empty table in the database and returns a query referencing
the new table.
Arguments:
- table_name: the name of the table to create.
- structure: the structure of the table, provided as either an existing
`Table` (no data will be copied) or a `Vector` of `Column_Description`.
- primary_key: the names of the columns to use as the primary key. The
first column from the table is used by default. If it is set to
`Nothing` or an empty vector, no primary key will be created.
- temporary: if set to `True`, the table will be temporary, meaning that
it will be dropped once the `connection` is closed. Defaults to
`False`.
- allow_existing: Defaults to `False`, meaning that if the table with the
provided name already exists, an error will be raised. If set to `True`,
the existing table will be returned instead. Note that the existing
table is not guaranteed to have the same structure as the one provided.
- on_problems: the behavior to use when encountering non-fatal problems.
Defaults to reporting them as warning.
! Error Conditions
- If a table with the given name already exists, then a
`Table_Already_Exists` error is raised.
- If a column type is not supported and is coerced to a similar
supported type, an `Inexact_Type_Coercion` problem is reported
according to the `on_problems` setting.
- If a column type is not supported and there is no replacement (e.g.
native Enso types), an `Unsupported_Type` error is raised.
- If the provided primary key columns are not present in table
structure provided, `Missing_Input_Columns` error is raised.
- An `SQL_Error` may be reported if there is a failure on the database
side.
? Dry Run if Output disabled
If performing output actions is disabled, only a dry run is performed
and no permanent changes occur. The operation will test for errors
(like missing columns) and if successful, return a temporary table with
a `Dry_Run_Operation` warning attached.
@structure make_structure_creator
create_table : Text -> Vector Column_Description | DB_Table | Table -> Vector Text | Nothing -> Boolean -> Boolean -> Problem_Behavior -> DB_Table ! Table_Already_Exists
create_table self (table_name : Text) (structure : Vector Column_Description | DB_Table | Table) (primary_key : (Vector Text | Nothing) = [first_column_name_in_structure structure]) (temporary : Boolean = False) (allow_existing : Boolean = False) (on_problems:Problem_Behavior = Problem_Behavior.Report_Warning) =
self.connection.create_table table_name structure primary_key temporary allow_existing on_problems
## ADVANCED
GROUP Standard.Base.Output
ICON data_output
Executes a raw update query. If the query was inserting, updating or
deleting rows, the number of affected rows is returned; otherwise it
returns 0 for other types of queries (like creating or altering tables).
Arguments:
- query: either raw SQL code as Text or an instance of SQL_Statement
representing the query to execute.
execute_update : Text | SQL_Statement -> Integer
execute_update self query =
self.connection.execute_update query
## PRIVATE
Access the dialect.
dialect self = self.connection.dialect
## PRIVATE
Access the underlying JDBC connection.
jdbc_connection self = self.connection.jdbc_connection
## PRIVATE
Drops a table.
Arguments:
- table_name: the name of the table to drop.
- if_exists: if set to `True`, the operation will not fail if the table
does not exist. Defaults to `False`.
drop_table : Text -> Boolean -> Nothing
drop_table self table_name if_exists=False =
self.connection.drop_table table_name if_exists
## PRIVATE
Removes all rows from a table.
Arguments:
- table_name: the name of the table to truncate.
truncate_table : Text -> Nothing ! Table_Not_Found
truncate_table self table_name =
self.connection.truncate_table table_name
## PRIVATE
Returns the base `Connection` instance.
Used, so that all internal helper functions do not need to be replicated
on the 'subclasses'.
base_connection : Connection
base_connection self = self.connection

View File

@ -0,0 +1,708 @@
private
from Standard.Base import all hiding First, Last
import Standard.Base.Errors.Illegal_Argument.Illegal_Argument
import Standard.Base.Errors.Illegal_State.Illegal_State
import Standard.Base.Errors.Unimplemented.Unimplemented
import Standard.Table.Internal.Problem_Builder.Problem_Builder
import Standard.Table.Internal.Vector_Builder.Vector_Builder
from Standard.Table import Aggregate_Column, Column, Value_Type
from Standard.Table.Aggregate_Column.Aggregate_Column import all
from Standard.Table.Errors import Inexact_Type_Coercion
from Standard.Table.Internal.Storage import get_storage_for_column
import Standard.Database.Connection.Connection.Connection
import Standard.Database.DB_Column.DB_Column
import Standard.Database.DB_Table.DB_Table
import Standard.Database.Dialect
import Standard.Database.Internal.Base_Generator
import Standard.Database.Internal.Common.Database_Distinct_Helper
import Standard.Database.Internal.Common.Database_Join_Helper
import Standard.Database.Internal.Error_Mapper.Error_Mapper
import Standard.Database.Internal.Internals_Access
import Standard.Database.Internal.IR.Context.Context
import Standard.Database.Internal.IR.Context.Context_Extension
import Standard.Database.Internal.IR.From_Spec.From_Spec
import Standard.Database.Internal.IR.Internal_Column.Internal_Column
import Standard.Database.Internal.IR.Nulls_Order.Nulls_Order
import Standard.Database.Internal.IR.Order_Descriptor.Order_Descriptor
import Standard.Database.Internal.IR.Query.Query
import Standard.Database.Internal.IR.SQL_Expression.SQL_Expression
import Standard.Database.Internal.IR.SQL_Join_Kind.SQL_Join_Kind
## TODO replace with custom one
import Standard.Database.Internal.Postgres.Postgres_Error_Mapper.Postgres_Error_Mapper
import Standard.Database.Internal.Replace_Params.Replace_Params
import Standard.Database.Internal.SQL_Type_Mapping.SQL_Type_Mapping
import Standard.Database.Internal.SQL_Type_Reference.SQL_Type_Reference
import Standard.Database.Internal.Statement_Setter.Statement_Setter
import Standard.Database.SQL.SQL_Builder
import Standard.Database.SQL.SQL_Fragment
import Standard.Database.SQL_Statement.SQL_Statement
import Standard.Database.SQL_Type.SQL_Type
from Standard.Database.Errors import SQL_Error, Unsupported_Database_Operation
from Standard.Database.Internal.IR.Operation_Metadata import Date_Period_Metadata
from Standard.Database.Internal.Statement_Setter import fill_hole_default
import project.Internal.SQLServer_Type_Mapping.SQLServer_Type_Mapping
polyglot java import java.sql.Types as Java_Types
polyglot java import org.enso.database.JDBCUtils
## PRIVATE
The dialect of SQL Server databases.
sqlserver : SQLSever_Dialect
sqlserver =
SQLSever_Dialect.Value make_dialect_operations
## PRIVATE
The dialect of SQL Server databases.
type SQLSever_Dialect
## PRIVATE
The dialect of SQL Server databases.
Value dialect_operations
## PRIVATE
Name of the dialect.
name : Text
name self = sqlserver_dialect_name
## PRIVATE
to_text : Text
to_text self = "SQL_Server_Dialect"
## PRIVATE
A function which generates SQL code from the internal representation
according to the specific dialect.
generate_sql : Query -> SQL_Statement
generate_sql self query =
Base_Generator.generate_query self query . build
## PRIVATE
Generates SQL to truncate a table.
generate_truncate_table_sql : Text -> SQL_Builder
generate_truncate_table_sql self table_name =
Base_Generator.truncate_table_truncate_table_style self table_name
## PRIVATE
Generates SQL modifier for limiting the number of rows and its position in the query
get_limit_sql_modifier : Integer -> Any
get_limit_sql_modifier self limit =
[150, SQL_Builder.code (" TOP " + limit.to_text)]
## PRIVATE
Wraps and possibly escapes the identifier so that it can be used in a
generated query regardless of what characters it contains.
The quotes used will depend on the dialect.
wrap_identifier : Text -> Text
wrap_identifier self identifier =
Base_Generator.wrap_in_quotes identifier
## PRIVATE
Prepares an ordering descriptor.
One of the purposes of this method is to verify if the expected ordering
settings are supported by the given database backend.
Arguments:
- internal_column: the column to order by.
- sort_direction: the direction of the ordering.
- text_ordering: If provided, specifies that the column should be treated
as text values according to the provided ordering. For non-text types,
it should be set to `Nothing`.
prepare_order_descriptor : Internal_Column -> Sort_Direction -> Nothing | Text_Ordering -> Order_Descriptor
prepare_order_descriptor self internal_column sort_direction text_ordering =
make_order_descriptor internal_column sort_direction text_ordering
## PRIVATE
Prepares a distinct operation.
prepare_distinct : DB_Table -> Vector -> Case_Sensitivity -> Problem_Builder -> DB_Table
prepare_distinct self table key_columns case_sensitivity problem_builder =
table_name_deduplicator = table.connection.base_connection.table_naming_helper.create_unique_name_strategy
table_name_deduplicator.mark_used table.name
inner_table_alias = table_name_deduplicator.make_unique table.name+"_inner"
setup = table.context.as_subquery inner_table_alias [table.internal_columns]
new_columns = setup.new_columns.first
column_mapping = Dictionary.from_vector <| new_columns.map c-> [c.name, c]
new_key_columns = key_columns.map c-> column_mapping.at c.name
type_mapping = self.get_type_mapping
distinct_expressions = new_key_columns.map column->
value_type = type_mapping.sql_type_to_value_type column.sql_type_reference.get
Database_Distinct_Helper.make_distinct_expression case_sensitivity problem_builder column value_type
new_context = Context.for_subquery setup.subquery . add_extension (make_distinct_extension distinct_expressions)
table.updated_context_and_columns new_context new_columns subquery=True
## PRIVATE
A heuristic used by `Connection.query` to determine if a given text looks
like a SQL query for the given dialect or is rather a table name.
is_probably_a_query : Text -> Boolean
is_probably_a_query self text = Base_Generator.is_probably_a_query text
## PRIVATE
Returns the mapping between SQL types of this dialect and Enso
`Value_Type`.
get_type_mapping : SQL_Type_Mapping
get_type_mapping self = SQLServer_Type_Mapping
## PRIVATE
get_statement_setter : Statement_Setter
get_statement_setter self =
custom_fill_hole stmt i type_hint value = case value of
Nothing ->
java_type = case type_hint of
Nothing -> Java_Types.NULL
_ ->
## SQLServer needs its NULLS to be typed at least for TIME.
type_mapping = self.get_type_mapping
sql_type = type_mapping.value_type_to_sql type_hint Problem_Behavior.Ignore
sql_type.typeid
stmt.setNull i java_type
_ : Time_Of_Day -> JDBCUtils.setLocalTimeViaTimeStamp stmt i value
# Fallback to default logic for everything else
_ -> fill_hole_default stmt i type_hint value
Statement_Setter.Value custom_fill_hole
## PRIVATE
make_cast : Internal_Column -> SQL_Type -> (SQL_Expression -> SQL_Type_Reference) -> Internal_Column
make_cast self column target_type infer_result_type_from_database_callback =
mapping = self.get_type_mapping
source_type = mapping.sql_type_to_value_type column.sql_type_reference.get
target_value_type = mapping.sql_type_to_value_type target_type
# Boolean to Numeric casts need special handling:
transformed_expression = case source_type.is_boolean && target_value_type.is_numeric of
True ->
SQL_Expression.Operation "IIF" [Internals_Access.column_expression column, SQL_Expression.Literal "1", SQL_Expression.Literal "0"]
False -> Internals_Access.column_expression column
target_type_sql_text = mapping.sql_type_to_text target_type
new_expression = SQL_Expression.Operation "CAST" [transformed_expression, SQL_Expression.Literal target_type_sql_text]
new_sql_type_reference = infer_result_type_from_database_callback new_expression
Internal_Column.Value column.name new_sql_type_reference new_expression
## PRIVATE
needs_execute_query_for_type_inference : Text | SQL_Statement -> Boolean
needs_execute_query_for_type_inference self statement =
_ = statement
False
## PRIVATE
supports_separate_nan : Boolean
supports_separate_nan self = True
## PRIVATE
supports_negative_round_decimal_places : Boolean
supports_negative_round_decimal_places self = True
## PRIVATE
supports_float_round_decimal_places : Boolean
supports_float_round_decimal_places self = True
## PRIVATE
Specifies whether the Database supports CREATE TEMPORARY TABLE syntax.
suppports_temporary_table_syntax : Boolean
suppports_temporary_table_syntax self = False
## PRIVATE
adapt_unified_column : Internal_Column -> Value_Type -> (SQL_Expression -> SQL_Type_Reference) -> Internal_Column
adapt_unified_column self column approximate_result_type infer_result_type_from_database_callback =
_ = [approximate_result_type, infer_result_type_from_database_callback]
column
## PRIVATE
Add an extra cast to adjust the output type of certain operations with
certain arguments.
It is used when the normal type inference provided by the database engine
needs to be adjusted.
In most cases this method will just return the expression unchanged, it
is used only to override the type in cases where the default one that the
database uses is not what we want.
cast_op_type self (op_kind:Text) (args:(Vector Internal_Column)) (expression:SQL_Expression) =
_ = [op_kind, args]
expression
## PRIVATE
prepare_fetch_types_query : SQL_Expression -> Context -> SQL_Statement
prepare_fetch_types_query self expression context =
Base_Generator.default_fetch_types_query self expression context where_filter_always_false_literal="1=0"
## PRIVATE
check_aggregate_support : Aggregate_Column -> Boolean ! Unsupported_Database_Operation
check_aggregate_support self aggregate =
_ = aggregate
True
## PRIVATE
Checks if an operation is supported by the dialect.
is_supported : Text -> Boolean
is_supported self operation =
self.dialect_operations.is_supported operation
## PRIVATE
The default table types to use when listing tables.
default_table_types : Vector Text
default_table_types self =
["TABLE", "VIEW", "TEMPORARY TABLE", "TEMPORARY VIEW", "MATERIALIZED VIEW"]
## PRIVATE
get_error_mapper : Error_Mapper
get_error_mapper self = Postgres_Error_Mapper
## PRIVATE
The dialect-dependent strategy to get the Primary Key for a given table.
Returns `Nothing` if the key is not defined.
fetch_primary_key : Connection -> Text -> Vector Text ! Nothing
fetch_primary_key self connection table_name =
Dialect.default_fetch_primary_key connection table_name
## PRIVATE
Prepares metadata for an operation taking a date/time period and checks
if the given period is supported.
prepare_metadata_for_period : Date_Period | Time_Period -> Value_Type -> Any
prepare_metadata_for_period self period operation_input_type =
Date_Period_Metadata.Value period operation_input_type
## PRIVATE
Returns true if the `replace` parameters are supported by this backend.
if_replace_params_supports : Replace_Params -> Any -> Any
if_replace_params_supports self replace_params ~action =
if supported_replace_params.contains replace_params then action else replace_params.throw_unsupported sqlserver_dialect_name
## PRIVATE
value_type_for_upload_of_existing_column : DB_Column -> Value_Type
value_type_for_upload_of_existing_column self column = case column of
# Return the type as-is for database columns.
_ : DB_Column -> column.value_type
_ : Column ->
base_type = column.value_type
case base_type of
Value_Type.Decimal precision scale ->
used_scale = scale.if_nothing 12
used_precision = Math.min 38 precision.if_nothing 38
new_type = Value_Type.Decimal used_precision used_scale
if used_scale==scale && used_precision==precision then new_type else
Warning.attach (Inexact_Type_Coercion.Warning base_type new_type unavailable=False) new_type
_ -> base_type
## PRIVATE
make_dialect_operations =
cases = [["LOWER", Base_Generator.make_function "LOWER"], ["UPPER", Base_Generator.make_function "UPPER"]]
text = [starts_with, contains, ends_with, agg_shortest, agg_longest, make_case_sensitive, ["REPLACE", replace], left, right]+concat_ops+cases+trim_ops
counts = [agg_count_is_null, agg_count_empty, agg_count_not_empty, ["COUNT_DISTINCT", agg_count_distinct], ["COUNT_DISTINCT_INCLUDE_NULL", agg_count_distinct_include_null]]
arith_extensions = [is_nan, is_inf, floating_point_div, mod_op, decimal_div, decimal_mod, ["ROW_MIN", Base_Generator.make_function "LEAST"], ["ROW_MAX", Base_Generator.make_function "GREATEST"]]
bool = [bool_or]
stddev_pop = ["STDDEV_POP", Base_Generator.make_function "stddev_pop"]
stddev_samp = ["STDDEV_SAMP", Base_Generator.make_function "stddev_samp"]
stats = [agg_median, agg_mode, agg_percentile, stddev_pop, stddev_samp]
date_ops = [make_extract_as_int "year", make_extract_as_int "quarter", make_extract_as_int "month", make_extract_as_int "week", make_extract_as_int "day", make_extract_as_int "hour", make_extract_as_int "minute", make_extract_fractional_as_int "second", make_extract_fractional_as_int "millisecond" modulus=1000, make_extract_fractional_as_int "microsecond" modulus=1000, ["date_add", make_date_add], ["date_diff", make_date_diff], ["date_trunc_to_day", make_date_trunc_to_day]]
special_overrides = []
other = [["RUNTIME_ERROR", make_runtime_error_op]]
my_mappings = text + counts + stats + first_last_aggregators + arith_extensions + bool + date_ops + special_overrides + other
Base_Generator.base_dialect_operations . extend_with my_mappings
## PRIVATE
agg_count_is_null = Base_Generator.lift_unary_op "COUNT_IS_NULL" arg->
SQL_Builder.code "COUNT_IF(" ++ arg.paren ++ " IS NULL)"
## PRIVATE
agg_count_empty = Base_Generator.lift_unary_op "COUNT_EMPTY" arg->
SQL_Builder.code "COUNT_IF("++ arg.paren ++ " IS NULL OR " ++ arg.paren ++ " == '')"
## PRIVATE
agg_count_not_empty = Base_Generator.lift_unary_op "COUNT_NOT_EMPTY" arg->
SQL_Builder.code "COUNT_IF(" ++ arg.paren ++ " IS NOT NULL AND " ++ arg.paren ++ " != '')"
## PRIVATE
agg_median = Base_Generator.lift_unary_op "MEDIAN" arg->
median = SQL_Builder.code "MEDIAN(" ++ arg ++ ")"
has_nan = SQL_Builder.code "BOOLOR_AGG(" ++ arg ++ " = 'NaN'::Double)"
SQL_Builder.code "CASE WHEN " ++ has_nan ++ " THEN 'NaN'::Double ELSE " ++ median ++ " END"
## PRIVATE
agg_mode = Base_Generator.lift_unary_op "MODE" arg->
SQL_Builder.code "MODE(" ++ arg ++ ")"
## PRIVATE
agg_percentile = Base_Generator.lift_binary_op "PERCENTILE" p-> expr->
percentile = SQL_Builder.code "percentile_cont(" ++ p ++ ") WITHIN GROUP (ORDER BY " ++ expr ++ ")"
has_nan = SQL_Builder.code "BOOLOR_AGG(" ++ expr ++ " = 'NaN'::Double)"
SQL_Builder.code "CASE WHEN " ++ has_nan ++ " THEN 'NaN' ELSE " ++ percentile ++ " END"
## PRIVATE
These are written in a not most-efficient way, but a way that makes them
compatible with other group-by aggregations out-of-the-box. In the future, we
may want to consider some alternative solutions.
first_last_aggregators =
first = make_first_aggregator reverse=False ignore_null=False
first_not_null = make_first_aggregator reverse=False ignore_null=True
last = make_first_aggregator reverse=True ignore_null=False
last_not_null = make_first_aggregator reverse=True ignore_null=True
[["FIRST", first], ["FIRST_NOT_NULL", first_not_null], ["LAST", last], ["LAST_NOT_NULL", last_not_null]]
## PRIVATE
make_first_aggregator reverse ignore_null args =
if args.length < 2 then Error.throw (Illegal_State.Error "Insufficient number of arguments for the operation.") else
result_expr = args.first
order_bys = args.drop 1
method_name = if reverse then "LAST_VALUE" else "FIRST_VALUE"
filter_clause = if ignore_null then ") IGNORE NULLS OVER" else ") OVER"
order_clause = SQL_Builder.code " ORDER BY " ++ SQL_Builder.join "," order_bys
SQL_Builder.code (method_name + "(") ++ result_expr ++ filter_clause ++ order_clause
## PRIVATE
agg_shortest = Base_Generator.lift_unary_op "SHORTEST" arg->
SQL_Builder.code "FIRST_VALUE(" ++ arg ++ ") IGNORE NULLS OVER (ORDER BY LENGTH(" ++ arg ++ "))"
## PRIVATE
agg_longest = Base_Generator.lift_unary_op "LONGEST" arg->
SQL_Builder.code "FIRST_VALUE(" ++ arg ++ ") IGNORE NULLS OVER (ORDER BY LENGTH(" ++ arg ++ ") DESC)"
## PRIVATE
concat_ops =
make_raw_concat_expr expr separator =
SQL_Builder.code "string_agg(" ++ expr ++ ", " ++ separator ++ ")"
concat = Base_Generator.make_concat make_raw_concat_expr make_contains_expr
[["CONCAT", concat (has_quote=False)], ["CONCAT_QUOTE_IF_NEEDED", concat (has_quote=True)]]
## PRIVATE
trim_ops =
whitespace = "' ' || CHR(9) || CHR(10) || CHR(13)"
make_fn fn_name = Base_Generator.lift_binary_op fn_name input-> chars-> case chars of
Nothing -> SQL_Builder.code fn_name+"(" ++ input ++ ", " ++ whitespace ++ ")"
_ ->
case chars.is_constant of
True ->
const = chars.fragments.vec.first.object
if const.is_nothing || const.is_empty then SQL_Builder.code fn_name+"(" ++ input ++ ", " ++ whitespace ++ ")" else
SQL_Builder.code fn_name+"(" ++ input ++ ", " ++ chars ++ ")"
False ->
SQL_Builder.code "CASE WHEN " ++ chars ++ " IS NULL OR " ++ chars ++ " = '' THEN " ++ fn_name ++ "(" ++ input ++ ") ELSE " ++ fn_name ++ "(" ++ input ++ ", " ++ chars ++ ") END"
[make_fn "TRIM", make_fn "LTRIM", make_fn "RTRIM"]
## PRIVATE
agg_count_distinct args = if args.is_empty then (Error.throw (Illegal_Argument.Error "COUNT_DISTINCT requires at least one argument.")) else
case args.length == 1 of
True ->
## A single null value will be skipped.
SQL_Builder.code "COUNT(DISTINCT " ++ args.first ++ ")"
False ->
## A tuple of nulls is not a null, so it will not be skipped - but
we want to ignore all-null columns. So we manually filter them
out.
count = SQL_Builder.code "COUNT(DISTINCT (" ++ SQL_Builder.join ", " args ++ "))"
are_nulls = args.map arg-> arg.paren ++ " IS NULL"
all_nulls_filter = SQL_Builder.code " FILTER (WHERE NOT (" ++ SQL_Builder.join " AND " are_nulls ++ "))"
(count ++ all_nulls_filter).paren
## PRIVATE
agg_count_distinct_include_null args = case args.length == 1 of
True ->
arg = args.first
count = SQL_Builder.code "COUNT(DISTINCT " ++ arg ++ ")"
all_nulls_case = SQL_Builder.code "CASE WHEN COUNT(CASE WHEN " ++ arg ++ "IS NULL THEN 1 END) > 0 THEN 1 ELSE 0 END"
count ++ " + " ++ all_nulls_case
False -> Error.throw (Illegal_Argument.Error "COUNT_DISTINCT supports only single arguments in SQLServer.")
## PRIVATE
starts_with = Base_Generator.lift_binary_sql_function "STARTS_WITH" "STARTSWITH"
## PRIVATE
ends_with = Base_Generator.lift_binary_sql_function "ENDS_WITH" "ENDSWITH"
## PRIVATE
contains = Base_Generator.lift_binary_sql_function "CONTAINS" "CONTAINS"
## PRIVATE
make_contains_expr expr substring = contains [expr, substring]
## PRIVATE
make_case_sensitive = Base_Generator.lift_unary_op "MAKE_CASE_SENSITIVE" arg->
SQL_Builder.code "((" ++ arg ++ ') COLLATE "ucs_basic")'
## PRIVATE
left = Base_Generator.lift_binary_op "LEFT" str-> n->
SQL_Builder.code "left(" ++ str ++ ", CAST(" ++ n ++ " AS INT))"
## PRIVATE
right = Base_Generator.lift_binary_op "RIGHT" str-> n->
SQL_Builder.code "right(" ++ str ++ ", CAST(" ++ n ++ " AS INT))"
## PRIVATE
make_order_descriptor internal_column sort_direction text_ordering =
nulls = case sort_direction of
Sort_Direction.Ascending -> Nulls_Order.First
Sort_Direction.Descending -> Nulls_Order.Last
case text_ordering of
Nothing ->
Order_Descriptor.Value (Internals_Access.column_expression internal_column) sort_direction nulls_order=nulls collation=Nothing
_ ->
## In the future we can modify this error to suggest using a custom defined collation.
if text_ordering.sort_digits_as_numbers then Error.throw (Unsupported_Database_Operation.Error "Natural ordering is currently not supported. You may need to materialize the Table to perform this operation.") else
case text_ordering.case_sensitivity of
Case_Sensitivity.Default ->
Order_Descriptor.Value (Internals_Access.column_expression internal_column) sort_direction nulls_order=nulls collation=Nothing
Case_Sensitivity.Sensitive ->
Order_Descriptor.Value (Internals_Access.column_expression internal_column) sort_direction nulls_order=nulls collation="ucs_basic"
Case_Sensitivity.Insensitive locale -> case locale == Locale.default of
False ->
Error.throw (Unsupported_Database_Operation.Error "Case insensitive ordering with custom locale is currently not supported. You may need to materialize the Table to perform this operation.")
True ->
upper = SQL_Expression.Operation "UPPER" [Internals_Access.column_expression internal_column]
folded_expression = SQL_Expression.Operation "LOWER" [upper]
Order_Descriptor.Value folded_expression sort_direction nulls_order=nulls collation=Nothing
## PRIVATE
is_nan = Base_Generator.lift_unary_op "IS_NAN" arg->
(arg ++ " in (double precision 'NaN')").paren
## PRIVATE
is_inf = Base_Generator.lift_unary_op "IS_INF" arg->
(arg ++ " in (double precision 'Infinity', double precision '-Infinity')").paren
## PRIVATE
bool_or = Base_Generator.lift_unary_op "BOOL_OR" arg->
SQL_Builder.code "bool_or(" ++ arg ++ ")"
## PRIVATE
floating_point_div = Base_Generator.lift_binary_op "/" x-> y->
SQL_Builder.code "CAST(" ++ x ++ " AS double precision) / CAST(" ++ y ++ " AS double precision)"
## PRIVATE
mod_op = Base_Generator.lift_binary_op "MOD" x-> y->
x ++ " - FLOOR(CAST(" ++ x ++ " AS double precision) / CAST(" ++ y ++ " AS double precision)) * " ++ y
## PRIVATE
decimal_div = Base_Generator.lift_binary_op "DECIMAL_DIV" x-> y->
SQL_Builder.code "CAST(" ++ x ++ " AS decimal) / CAST(" ++ y ++ " AS decimal)"
## PRIVATE
decimal_mod = Base_Generator.lift_binary_op "DECIMAL_MOD" x-> y->
x ++ " - FLOOR(CAST(" ++ x ++ " AS decimal) / CAST(" ++ y ++ " AS decimal)) * " ++ y
## PRIVATE
supported_replace_params : Hashset Replace_Params
supported_replace_params =
e0 = [Replace_Params.Value Text Case_Sensitivity.Default False, Replace_Params.Value Text Case_Sensitivity.Default True, Replace_Params.Value Text Case_Sensitivity.Sensitive False]
e1 = [Replace_Params.Value Text Case_Sensitivity.Sensitive True, Replace_Params.Value Text Case_Sensitivity.Insensitive False, Replace_Params.Value Text Case_Sensitivity.Insensitive True]
e2 = [Replace_Params.Value Regex Case_Sensitivity.Default False, Replace_Params.Value Regex Case_Sensitivity.Default True, Replace_Params.Value Regex Case_Sensitivity.Sensitive False]
e3 = [Replace_Params.Value Regex Case_Sensitivity.Sensitive True, Replace_Params.Value Regex Case_Sensitivity.Insensitive False, Replace_Params.Value Regex Case_Sensitivity.Insensitive True]
e4 = [Replace_Params.Value DB_Column Case_Sensitivity.Default False, Replace_Params.Value DB_Column Case_Sensitivity.Sensitive False]
Hashset.from_vector <| e0 + e1 + e2 + e3 + e4
## PRIVATE
replace : Vector SQL_Builder -> Any -> SQL_Builder
replace args metadata =
input = args.at 0
pattern = args.at 1
replacement = args.at 2
## `raw_pattern` is a `Text1 or `Regex`; it's the same value as `input`, but not
embedded in IR.
raw_pattern = metadata.at 0
replace_params = metadata.at 1
expression = case replace_params.input_type of
Text ->
## To use REGEXP_REPLACE on a non-regex, we have to escape it.
escaped_pattern = SQL_Builder.interpolation (Regex.escape raw_pattern)
case replace_params.only_first of
False -> case replace_params.case_sensitivity of
Case_Sensitivity.Insensitive _ ->
SQL_Builder.code "REGEXP_REPLACE(" ++ input ++ ", " ++ escaped_pattern ++ ", " ++ replacement ++ ", 'ig')"
_ ->
SQL_Builder.code "REPLACE(" ++ input ++ ", " ++ pattern ++ ", " ++ replacement ++ ")"
True -> case replace_params.case_sensitivity of
Case_Sensitivity.Insensitive _ ->
SQL_Builder.code "REGEXP_REPLACE(" ++ input ++ ", " ++ escaped_pattern ++ ", " ++ replacement ++ ", 'i')"
_ ->
SQL_Builder.code "REGEXP_REPLACE(" ++ input ++ ", " ++ escaped_pattern ++ ", " ++ replacement ++ ")"
Regex ->
pattern_string = SQL_Builder.interpolation raw_pattern.pattern_string
case replace_params.only_first of
False -> case replace_params.case_sensitivity of
Case_Sensitivity.Insensitive _ ->
SQL_Builder.code "REGEXP_REPLACE(" ++ input ++ ", " ++ pattern_string ++ ", " ++ replacement ++ ", 'ig')"
_ ->
SQL_Builder.code "REGEXP_REPLACE(" ++ input ++ ", " ++ pattern_string ++ ", " ++ replacement ++ ", 'g')"
True -> case replace_params.case_sensitivity of
Case_Sensitivity.Insensitive _ ->
SQL_Builder.code "REGEXP_REPLACE(" ++ input ++ ", " ++ pattern_string ++ ", " ++ replacement ++ ", 'i')"
_ ->
SQL_Builder.code "REGEXP_REPLACE(" ++ input ++ ", " ++ pattern_string ++ ", " ++ replacement ++ ")"
DB_Column ->
case replace_params.only_first of
False -> case replace_params.case_sensitivity of
Case_Sensitivity.Insensitive _ ->
Nothing
_ ->
SQL_Builder.code "REPLACE(" ++ input ++ ", " ++ pattern ++ ", " ++ replacement ++ ")"
True -> Nothing
expression.if_nothing (replace_params.throw_unsupported sqlserver_dialect_name)
## PRIVATE
make_extract_as_int enso_name sql_name=enso_name =
Base_Generator.lift_unary_op enso_name arg->
as_int32 <| SQL_Builder.code "EXTRACT(" ++ sql_name ++ " FROM " ++ arg ++ ")"
## PRIVATE
make_extract_fractional_as_int enso_name sql_name=enso_name modulus=Nothing =
Base_Generator.lift_unary_op enso_name arg->
result = as_int32 <| SQL_Builder.code "TRUNC(EXTRACT(" ++ sql_name ++ " FROM " ++ arg ++ "))"
case modulus of
Nothing -> result
_ : Integer ->
(result ++ (" % "+modulus.to_text)).paren
## PRIVATE
make_date_add arguments (metadata : Date_Period_Metadata) =
if arguments.length != 2 then Error.throw (Illegal_State.Error "date_add expects exactly 2 sub expressions. This is a bug in Database library.") else
expr = arguments.at 0
amount = arguments.at 1
interval_arg = case metadata.period of
Date_Period.Year ->
"years=>1"
Date_Period.Quarter ->
"months=>3"
Date_Period.Month ->
"months=>1"
Date_Period.Week _ ->
"weeks=>1"
Date_Period.Day ->
"days=>1"
Time_Period.Day ->
"hours=>24"
Time_Period.Hour ->
"hours=>1"
Time_Period.Minute ->
"mins=>1"
Time_Period.Second ->
"secs=>1"
Time_Period.Millisecond ->
"secs=>0.001"
Time_Period.Microsecond ->
"secs=>0.000001"
interval_expression = SQL_Builder.code "make_interval(" ++ interval_arg ++ ")"
shifted = SQL_Builder.code "(" ++ expr ++ " + (" ++ amount ++ " * " ++ interval_expression ++ "))"
case metadata.input_value_type of
Value_Type.Date ->
SQL_Builder.code "(" ++ shifted ++ "::date)"
_ -> shifted
## PRIVATE
make_date_diff arguments (metadata : Date_Period_Metadata) =
if arguments.length != 2 then Error.throw (Illegal_State.Error "date_diff expects exactly 2 sub expressions. This is a bug in Database library.") else
start = arguments.at 0
end = arguments.at 1
truncate expr =
SQL_Builder.code "TRUNC(" ++ expr ++ ")"
# `age` computes a 'symbolic' difference expressed in years, months and days.
extract_years =
as_int32 <| SQL_Builder.code "EXTRACT(YEARS FROM age(" ++ end ++ ", " ++ start ++ "))"
# To get total months, we need to sum up with whole years.
extract_months =
months = as_int32 <|
SQL_Builder.code "EXTRACT(MONTHS FROM age(" ++ end ++ ", " ++ start ++ "))"
SQL_Builder.code "(" ++ extract_years ++ " * 12 + " ++ months ++ ")"
## To get total days, we cannot use `age`, because we cannot convert an
amount of months to days (month lengths vary). Instead we rely on `-`
returning an interval based in 'raw' days.
extract_days =
as_int32 <| case metadata.input_value_type of
## For pure 'date' datatype, the difference is a simple integer
count of days.
Value_Type.Date -> (end ++ " - " ++ start).paren
# For others, it is an interval, so we need to extract.
_ -> SQL_Builder.code "EXTRACT(DAYS FROM (" ++ end ++ " - " ++ start ++ "))"
## We round the amount of seconds towards zero, as we only count full
elapsed seconds in the interval.
Note that it is important the interval is computed using `-`. The
symbolic `age` has no clear mapping to the count of days, skewing the
result.
extract_seconds =
seconds_numeric = SQL_Builder.code "EXTRACT(EPOCH FROM (" ++ end ++ " - " ++ start ++ "))"
as_int64 (truncate seconds_numeric)
case metadata.period of
Date_Period.Year -> extract_years
Date_Period.Month -> extract_months
Date_Period.Quarter -> (extract_months ++ " / 3").paren
Date_Period.Week _ -> (extract_days ++ " / 7").paren
Date_Period.Day -> extract_days
## EXTRACT HOURS/MINUTES would yield only a date part, but we need
the total which is easiest achieved by EPOCH
Time_Period.Hour -> (extract_seconds ++ " / 3600").paren
Time_Period.Minute -> (extract_seconds ++ " / 60").paren
Time_Period.Second -> extract_seconds
Time_Period.Day -> case metadata.input_value_type of
Value_Type.Date -> extract_days
# Time_Period.Day is treated as 24 hours, so for types that support time we use the same algorithm like for hours, but divide by 24.
_ -> (extract_seconds ++ " / (3600 * 24)").paren
## The EPOCH gives back just the integer amount of seconds, without
the fractional part. So we get the fractional part using
MILLISECONDS - but that does not give the _total_ just the
'seconds of minute' part, expressed in milliseconds. So we need
to merge both - but then seconds of minute appear twice, so we %
the milliseconds to get just the fractional part from it and sum
both.
Time_Period.Millisecond ->
millis = truncate <|
SQL_Builder.code "EXTRACT(MILLISECONDS FROM (" ++ end ++ " - " ++ start ++ "))"
as_int64 <|
((extract_seconds ++ " * 1000").paren ++ " + " ++ (millis ++ " % 1000").paren).paren
Time_Period.Microsecond ->
micros = SQL_Builder.code "EXTRACT(MICROSECONDS FROM (" ++ end ++ " - " ++ start ++ "))"
as_int64 <|
((extract_seconds ++ " * 1000000").paren ++ " + " ++ (micros ++ " % 1000000").paren).paren
## PRIVATE
make_date_trunc_to_day arguments =
if arguments.length != 1 then Error.throw (Illegal_State.Error "date_trunc_to_day expects exactly one sub expression. This is a bug in Database library.") else
expr = arguments.at 0
SQL_Builder.code "(DATE_TRUNC('day'," ++ expr ++ ") :: DATE)"
## PRIVATE
Alters the expression casting the value to a 64-bit integer.
TODO probably remove
as_int64 expr =
SQL_Builder.code "(" ++ expr ++ "::int8)"
## PRIVATE
Alters the expression casting the value to a 32-bit integer.
TODO probably remove
as_int32 expr =
SQL_Builder.code "(" ++ expr ++ "::int4)"
## PRIVATE
The RUNTIME_ERROR operation should allow the query to compile fine and it
will not prevent it from running if the branch including this operation is
not taken. But if the branch is computed, it should ensure the query fails.
This query never returns a value, so its type should be polymorphic. However,
that is not possible - so currently it just 'pretends' that it would return a
Boolean - because that is the type we expect in the use-case. This can be
altered if needed.
It takes a variable as the second argument. It can be any value that is not
statically known - this ensure that the optimizer will not be able to
pre-compute the expression too early (which could make the query fail
spuriously). See `make_invariant_check` in `Lookup_Query_Helper` for an
example.
make_runtime_error_op arguments =
if arguments.length != 2 then
Panic.throw (Illegal_Argument.Error "RUNTIME_ERROR takes exactly 2 arguments (error message and a variable to ensure deferred execution).")
error_message = arguments.at 0
variable_to_defer = arguments.at 1
SQL_Builder.code "CAST('[ENSO INVARIANT VIOLATED: '||" ++ error_message ++ "||'] '||COALESCE(" ++ variable_to_defer ++ "::TEXT,'NULL') AS BOOLEAN)"
## PRIVATE
make_distinct_extension expressions =
run_generator sql_expressions =
SQL_Builder.code "DISTINCT ON (" ++ (SQL_Builder.join ", " sql_expressions) ++ ") "
Context_Extension.Value position=120 expressions=expressions run_generator=run_generator
## PRIVATE
sqlserver_dialect_name = "SQL Server"

View File

@ -0,0 +1,153 @@
private
from Standard.Base import all
import Standard.Base.Data.Numbers.Positive_Integer
import Standard.Base.Errors.Illegal_Argument.Illegal_Argument
import Standard.Table.Internal.Java_Exports
from Standard.Table import Bits, Value_Type
from Standard.Table.Errors import Inexact_Type_Coercion
import Standard.Database.Internal.Column_Fetcher as Column_Fetcher_Module
import Standard.Database.Internal.Column_Fetcher.Column_Fetcher
import Standard.Database.Internal.IR.SQL_Expression.SQL_Expression
import Standard.Database.Internal.SQL_Type_Mapping
import Standard.Database.Internal.SQL_Type_Reference.SQL_Type_Reference
import Standard.Database.SQL_Type.SQL_Type
from Standard.Database.Errors import Unsupported_Database_Operation
polyglot java import java.sql.Types
## PRIVATE
type SQLServer_Type_Mapping
## PRIVATE
value_type_to_sql : Value_Type -> Problem_Behavior -> SQL_Type
value_type_to_sql value_type on_problems =
result = case value_type of
Value_Type.Boolean -> SQL_Type.Value Types.BIT "BIT"
## A SQLServer TinyInt is unsigned with a range of 0-255,
so we have to use SmallInt to be able to handle the negative values in our Byte type.
Value_Type.Byte -> SQL_Type.Value Types.SMALLINT "SMALLINT"
Value_Type.Integer Bits.Bits_16 -> SQL_Type.Value Types.SMALLINT "SMALLINT"
Value_Type.Integer Bits.Bits_32 -> SQL_Type.Value Types.INTEGER "INTEGER"
Value_Type.Integer Bits.Bits_64 -> SQL_Type.Value Types.BIGINT "BIGINT"
Value_Type.Float Bits.Bits_32 -> SQL_Type.Value Types.REAL "REAL"
Value_Type.Float Bits.Bits_64 -> SQL_Type.Value Types.DOUBLE "FLOAT"
Value_Type.Decimal precision scale -> case precision of
# If precision is not set, scale is also lost because SQL is unable to express a scale without a precision.
Nothing -> SQL_Type.Value Types.DECIMAL "DECIMAL" Nothing Nothing
# Scale can be set or not, if precision is given, so no check needed.
_ -> SQL_Type.Value Types.DECIMAL "DECIMAL" precision scale
Value_Type.Char size variable ->
case variable of
True ->
is_unbounded = case size of
Nothing -> True
Positive_Integer.Value integer -> integer == max_precision
case is_unbounded of
True -> SQL_Type.Value Types.VARCHAR "NVARCHAR(MAX)"
False -> SQL_Type.Value Types.VARCHAR "NVARCHAR" size
False -> SQL_Type.Value Types.CHAR "NCHAR" size
Value_Type.Time -> SQL_Type.Value Types.TIME "TIME"
Value_Type.Date -> SQL_Type.Value Types.DATE "DATE"
Value_Type.Date_Time with_timezone ->
type_name = if with_timezone then "DATETIMEOFFSET" else "DATETIME2"
SQL_Type.Value Types.TIMESTAMP type_name
Value_Type.Binary size variable_length ->
## Max size for BINARY and VARBINARY is 8000, so we have to use VARBINARY(MAX) for larger sizes.
need_varbinary_max = size.is_nothing || (size > 8000)
case need_varbinary_max of
True -> SQL_Type.Value Types.BINARY "VARBINARY(MAX)"
False -> case variable_length of
True -> SQL_Type.Value Types.BINARY "VARBINARY" size
False -> SQL_Type.Value Types.BINARY "BINARY" size
Value_Type.Mixed -> Error.throw (Unsupported_Database_Operation.Error "SQLServer tables do not support Mixed types.")
Value_Type.Unsupported_Data_Type type_name underlying_type ->
underlying_type.if_nothing <| Error.throw <| Illegal_Argument.Error <|
"An unsupported SQL type ["+type_name.to_text+"] cannot be converted into an SQL type because it did not contain the SQL metadata needed to reconstruct it."
approximated_value_type = SQLServer_Type_Mapping.sql_type_to_value_type result
problems = if approximated_value_type == value_type then [] else [Inexact_Type_Coercion.Warning value_type approximated_value_type]
on_problems.attach_problems_before problems result
## PRIVATE
sql_type_to_value_type : SQL_Type -> Value_Type
sql_type_to_value_type sql_type =
case sql_type.typeid of
Types.BIT -> Value_Type.Boolean
Types.TINYINT -> Value_Type.Integer Bits.Bits_16
Types.SMALLINT -> Value_Type.Integer Bits.Bits_16
Types.INTEGER -> Value_Type.Integer Bits.Bits_32
Types.BIGINT -> Value_Type.Integer Bits.Bits_64
Types.REAL -> Value_Type.Float Bits.Bits_32
Types.DOUBLE -> Value_Type.Float Bits.Bits_64
Types.VARCHAR ->
effective_size = if sql_type.precision==max_length || (sql_type.precision==9 && sql_type.scale==9) then Nothing else sql_type.precision
Value_Type.Char size=effective_size variable_length=True
Types.DATE -> Value_Type.Date
Types.TIME -> Value_Type.Time
Types.TIMESTAMP -> case sql_type.name of
"smalldatetime" -> Value_Type.Date_Time with_timezone=False
"datetime" -> Value_Type.Date_Time with_timezone=False
"datetime2" -> Value_Type.Date_Time with_timezone=False
_ -> on_unknown_type sql_type
Types.BINARY -> case sql_type.name of
"varbinary" -> Value_Type.Binary size=sql_type.precision variable_length=True
"binary" -> Value_Type.Binary size=sql_type.precision variable_length=False
_ -> on_unknown_type sql_type
_ -> case sql_type.name of
"datetimeoffset" -> Value_Type.Date_Time with_timezone=True
_ -> on_unknown_type sql_type
## PRIVATE
sql_type_to_text : SQL_Type -> Text
sql_type_to_text sql_type = SQL_Type_Mapping.default_sql_type_to_text sql_type
## PRIVATE
The SQLServer_Type_Mapping always relies on the return type determined by
the database backend.
infer_return_type : (SQL_Expression -> SQL_Type_Reference) -> Text -> Vector -> SQL_Expression -> SQL_Type_Reference
infer_return_type infer_from_database_callback op_name arguments expression =
_ = [op_name, arguments]
infer_from_database_callback expression
## PRIVATE
We want to respect any overriding references, but references that rely on
computing the type by the database are resolved to Nothing to just rely
on the `ResultSet` metadata and decrease overhead.
prepare_type_overrides : Nothing | Vector SQL_Type_Reference -> Nothing | Vector (Nothing | SQL_Type)
prepare_type_overrides column_type_suggestions = case column_type_suggestions of
Nothing -> Nothing
_ : Vector -> column_type_suggestions.map .to_type_override
## PRIVATE
Creates a `Column_Fetcher` used to fetch data from a result set and build
an in-memory column from it, based on the given column type.
make_column_fetcher : SQL_Type -> Column_Fetcher
make_column_fetcher self sql_type =
value_type = self.sql_type_to_value_type sql_type
Column_Fetcher_Module.default_fetcher_for_value_type value_type
## PRIVATE
is_implicit_conversion (source_type : Value_Type) (target_type : Value_Type) -> Boolean =
# Currently, we do not have any implicit conversions.
_ = [source_type, target_type]
False
## PRIVATE
should_warn_on_materialize (db_type : Value_Type) (in_memory_type : Value_Type) -> Boolean =
SQL_Type_Mapping.default_should_warn_on_materialize db_type in_memory_type
## PRIVATE
on_unknown_type sql_type =
Value_Type.Unsupported_Data_Type sql_type.name sql_type
## PRIVATE
This is the maximum size that JDBC driver reports for 'unbounded' types in
SQLServer.
max_length = 16777216
## PRIVATE
This is the maximum size that JDBC driver reports for 'unbounded' types in
SQLServer.
max_precision = 2147483647

View File

@ -0,0 +1 @@
export project.SQLServer_Details.SQLServer_Details

View File

@ -0,0 +1,46 @@
from Standard.Base import all
import Standard.Base.Data.Numbers.Number_Parse_Error
import Standard.Base.Errors.Illegal_State.Illegal_State
import Standard.Database.Connection.Connection_Options.Connection_Options
import Standard.Database.Connection.Credentials.Credentials
import project.Internal.SQLServer_Connection.SQLServer_Connection
type SQLServer_Details
## Connect to a SQLServer database.
Arguments:
- host: The name of the server to connect to.
- port: The port to connect to.
- credentials: The credentials to use for the connection.
- database: The name of the database to connect to.
SQLServer host:Text port:Text credentials:Credentials database:Text="master"
## PRIVATE
Build the Connection resource.
Arguments:
- options: Overrides for the connection properties.
connect : Connection_Options -> SQLServer_Connection
connect self options =
properties = options.merge self.jdbc_properties
make_new database =
SQLServer_Details.SQLServer self.host self.port self.credentials (database.if_nothing self.database) . connect options
SQLServer_Connection.create self.jdbc_url properties make_new
## PRIVATE
Provides the jdbc url for the connection.
jdbc_url : Text
jdbc_url self =
'jdbc:sqlserver://' + self.host + ':' + self.port
## PRIVATE
Provides the properties for the connection.
jdbc_properties : Vector (Pair Text Text)
jdbc_properties self =
account = [Pair.new 'encrypt' 'false']
database = [Pair.new 'databaseName' self.database]
credentials = [Pair.new 'user' self.credentials.username, Pair.new 'password' self.credentials.password]
account + database + credentials

View File

@ -85,6 +85,12 @@ type Snowflake_Dialect
generate_truncate_table_sql self table_name =
Base_Generator.truncate_table_truncate_table_style self table_name
## PRIVATE
Generates SQL modifier for limiting the number of rows and its position in the query
get_limit_sql_modifier : Integer -> Any
get_limit_sql_modifier self limit =
[700, SQL_Builder.code (" LIMIT " + limit.to_text)]
## PRIVATE
Wraps and possibly escapes the identifier so that it can be used in a
generated query regardless of what characters it contains.
@ -200,6 +206,11 @@ type Snowflake_Dialect
supports_float_round_decimal_places : Boolean
supports_float_round_decimal_places self = True
## PRIVATE
Specifies whether the Database supports CREATE TEMPORARY TABLE syntax.
suppports_temporary_table_syntax : Boolean
suppports_temporary_table_syntax self = True
## PRIVATE
adapt_unified_column : Internal_Column -> Value_Type -> (SQL_Expression -> SQL_Type_Reference) -> Internal_Column
adapt_unified_column self column approximate_result_type infer_result_type_from_database_callback =
@ -223,7 +234,7 @@ type Snowflake_Dialect
## PRIVATE
prepare_fetch_types_query : SQL_Expression -> Context -> SQL_Statement
prepare_fetch_types_query self expression context =
Dialect.default_fetch_types_query self expression context
Base_Generator.default_fetch_types_query self expression context
## PRIVATE
check_aggregate_support : Aggregate_Column -> Boolean ! Unsupported_Database_Operation

View File

@ -304,8 +304,9 @@ type Value_Type
Value_Type.Float _ -> True
Value_Type.Decimal _ _ -> True
_ -> False
Value_Type.Float self_bits -> case target_type of
Value_Type.Float target_bits -> target_bits >= self_bits
Value_Type.Float _ -> case target_type of
## Consider if we still want this after we have support for 32-bit floats in the in-memory backend. https://github.com/enso-org/enso/issues/6109
Value_Type.Float _ -> True
Value_Type.Decimal _ _ -> True
_ -> False
Value_Type.Char self_size self_variable_length -> case target_type of

View File

@ -19,7 +19,8 @@ object Editions {
"Standard.Examples",
"Standard.Searcher",
"Standard.Google_Api",
"Standard.Snowflake"
"Standard.Snowflake",
"Standard.Microsoft"
)
case class ContribLibrary(name: String, version: String)

View File

@ -3,6 +3,7 @@ package org.enso.database;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.sql.Types;
import java.time.LocalDate;
import java.time.LocalDateTime;
@ -12,6 +13,7 @@ import java.time.ZonedDateTime;
import org.enso.polyglot.common_utils.Core_Date_Utils;
public class JDBCUtils {
/** Gets a LocalDate from a ResultSet. */
public static LocalDate getLocalDate(ResultSet rs, int columnIndex) throws SQLException {
var sqlDate = rs.getDate(columnIndex);
@ -70,9 +72,22 @@ public class JDBCUtils {
stmt.setObject(columnIndex, localTime, Types.TIME);
}
/**
* Sets a LocalTime in a PreparedStatement via TimeStamp.
*
* @param stmt
*/
public static void setLocalTimeViaTimeStamp(
PreparedStatement stmt, int columnIndex, LocalTime localTime) throws SQLException {
Timestamp timestamp = Timestamp.valueOf(localTime.atDate(LocalDate.of(1970, 1, 1)));
stmt.setTimestamp(columnIndex, timestamp);
}
/** Sets a LocalDate in a PreparedStatement. */
public static void setLocalDate(PreparedStatement stmt, int columnIndex, LocalDate localDate)
throws SQLException {
stmt.setObject(columnIndex, localDate, Types.DATE);
}
}

View File

@ -0,0 +1,27 @@
package org.enso.microsoft;
import org.enso.database.DatabaseConnectionDetailsSPI;
@org.openide.util.lookup.ServiceProvider(service = DatabaseConnectionDetailsSPI.class)
public class SQLServerConnectionDetailsSPI extends DatabaseConnectionDetailsSPI {
@Override
protected String getModuleName() {
return "Standard.Mircosoft.SQLServer_Details";
}
@Override
protected String getTypeName() {
return "SQLServer_Details";
}
@Override
protected String getCodeForDefaultConstructor() {
return "(SQLServer_Details.SQLServer)";
}
@Override
protected String getUserFacingConnectionName() {
return "Microsoft SQL Server";
}
}

View File

@ -0,0 +1,25 @@
This is a set of tests for the Microsoft integration for Enso.
## Testing Microsoft SQL Server
To run the tests, you need to prepare credentials for a Microsoft SQL Server
instance that can be used to run them on.
Please set the following environment variables:
- 'ENSO_SQLSERVER_HOST' - the name of the server hosting SQLServer,
- 'ENSO_SQLSERVER_PORT' - the port SQLServer is on,
- 'ENSO_SQLSERVER_USER' - the user name to use to connect,
- 'ENSO_SQLSERVER_PASSWORD' - the pasword for that user,
- 'ENSO_SQLSERVER_DATABASE' - the database on the SQLServer to use.
## Docker
The easiest way to test locally is to use a docker image
```docker run -e "ACCEPT_EULA=Y" -e "MSSQL_SA_PASSWORD=<YourStrong@Passw0rd>" -p 1433:1433 --name sql1 --hostname sql1 -d mcr.microsoft.com/mssql/server:2022-latest
Set ENSO_SQLSERVER_DATABASE to tempdb and the defaults will work for everything else. (The user is sa with the above password)
```

View File

@ -0,0 +1,7 @@
name: Microsoft_Tests
namespace: enso_dev
version: 0.0.1
license: MIT
author: enso-dev@enso.org
maintainer: enso-dev@enso.org
prefer-local-libraries: true

View File

@ -0,0 +1,11 @@
from Standard.Base import all
from Standard.Test import Test
import project.SQLServer_Spec
main filter=Nothing =
suite = Test.build suite_builder->
SQLServer_Spec.add_specs suite_builder
suite.run_with_filter filter

View File

@ -0,0 +1,151 @@
from Standard.Base import all
import Standard.Base.Errors.Illegal_Argument.Illegal_Argument
import Standard.Base.Errors.Illegal_State.Illegal_State
import Standard.Base.Runtime.Ref.Ref
from Standard.Table import Table, Value_Type, Aggregate_Column, Bits, expr
from Standard.Table.Errors import Invalid_Column_Names, Inexact_Type_Coercion, Duplicate_Output_Column_Names
import Standard.Database.DB_Column.DB_Column
import Standard.Database.DB_Table.DB_Table
import Standard.Database.SQL_Type.SQL_Type
import Standard.Database.Internal.Replace_Params.Replace_Params
from Standard.Database import all
from Standard.Database.Errors import all
from Standard.Microsoft import all
from Standard.Test import all
import Standard.Test.Test_Environment
import enso_dev.Table_Tests
import enso_dev.Table_Tests.Database.Common.Common_Spec
import enso_dev.Table_Tests.Database.Transaction_Spec
import enso_dev.Table_Tests.Database.Upload_Spec
import enso_dev.Table_Tests.Database.Helpers.Name_Generator
import enso_dev.Table_Tests.Common_Table_Operations
from enso_dev.Table_Tests.Common_Table_Operations.Util import all
from enso_dev.Table_Tests.Database.Types.Postgres_Type_Mapping_Spec import default_text
from enso_dev.Table_Tests.Database.Postgres_Spec import Basic_Test_Data, Postgres_Tables_Data
from enso_dev.Table_Tests.Util import all
import enso_dev.Base_Tests.Network.Enso_Cloud.Cloud_Tests_Setup.Cloud_Tests_Setup
type SQLServer_Info_Data
Value ~data
connection self = self.data.at 0
tinfo self = self.data.at 1
t self = self.data.at 2
setup default_connection = SQLServer_Info_Data.Value <|
connection = default_connection
tinfo = Name_Generator.random_name "Tinfo"
connection.execute_update 'Create Table "'+tinfo+'" ("strs" VARCHAR(255), "ints" INTEGER, "bools" BIT, "reals" REAL, "floats" FLOAT, "tinyints" TINYINT, "smallints" SMALLINT, "bigints" BIGINT, "times" TIME, "dates" DATE, "datetimes" DATETIME, "smalldatetimes" SMALLDATETIME, "datetime2s" DATETIME2, "datetimeoffsets" DATETIMEOFFSET)'
t = connection.query (SQL_Query.Table_Name tinfo)
row1 = ["a", Nothing, False, 1.2, 1.2, 0, 0, 0, Time_Of_Day.new 12 12 12 1 1 1, Date.new 2021 1 1, Date_Time.new 2021 1 1 12 12 12 500 1 1, Date_Time.new 2021 1 1 12 12 12 1 1 1, Date_Time.new 2021 1 1 12 12 12 1 1 1, Date_Time.new 2021 1 1 12 12 12 1 1 1]
row2 = ["abc", Nothing, Nothing, 1.3, 1.3, 255, 32767, 9223372036854775807, Time_Of_Day.new 7 12 12 1 1 1, Date.new 1999 1 1, Date_Time.new 1999 1 1 12 12 12 1 1 1, Date_Time.new 1999 1 1 12 12 12 1 1 1, Date_Time.new 1999 1 1 12 12 12 1 1 1, Date_Time.new 1999 1 1 12 12 12 1 1 1]
row3 = ["def", 42, True, 1.4, 1.4, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing]
source_table = Table.from_rows ["strs", "ints", "bools", "reals", "floats", "tinyints", "smallints", "bigints", "times", "dates", "datetimes", "smalldatetimes", "datetime2s", "datetimeoffsets"] [row1, row2, row3]
. cast ['tinyints'] (Value_Type.Integer Bits.Bits_16)
. cast ['smallints'] (Value_Type.Integer Bits.Bits_16)
. cast ['ints'] (Value_Type.Integer Bits.Bits_32)
Panic.rethrow <|
t.update_rows source_table update_action=Update_Action.Insert
[connection, tinfo, t]
teardown self =
self.connection.execute_update 'DROP TABLE "'+self.tinfo+'"'
self.connection.close
get_configured_connection_details =
host = Environment.get "ENSO_SQLSERVER_HOST" if_missing="localhost"
port = Environment.get "ENSO_SQLSERVER_PORT" if_missing="1433"
user = Environment.get "ENSO_SQLSERVER_USER" if_missing="sa"
password = Environment.get "ENSO_SQLSERVER_PASSWORD" if_missing="<YourStrong@Passw0rd>"
database = Environment.get "ENSO_SQLSERVER_DATABASE"
resolved_password = if password.starts_with "enso://" then Enso_Secret.get password else password
credentials = Credentials.Username_And_Password user resolved_password
if database.is_nothing then Nothing else
SQLServer_Details.SQLServer host port credentials database
## Returns a function that takes anything and returns a new connection.
The function creates a _new_ connection on each invocation
(this is needed for some tests that need multiple distinct connections).
create_connection_builder =
connection_details = get_configured_connection_details
connection_details.if_not_nothing <|
_ -> Database.connect connection_details
add_specs suite_builder =
case create_connection_builder of
Nothing ->
message = "SQLServer test database is not configured. See README.md for instructions."
suite_builder.group "[SQLServer] Database tests" pending=message (_-> Nothing)
connection_builder ->
_ = connection_builder
suite_builder.group "[SQLServer] Info" group_builder->
default_connection = Database.connect get_configured_connection_details
data = SQLServer_Info_Data.setup default_connection
group_builder.teardown <|
data.teardown
group_builder.specify "should return Table information" <|
i = data.t.column_info
i.at "Column" . to_vector . should_equal ["strs", "ints", "bools", "reals", "floats", "tinyints", "smallints", "bigints", "times", "dates", "datetimes", "smalldatetimes", "datetime2s", "datetimeoffsets"]
i.at "Items Count" . to_vector . should_equal [3, 1, 2, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2]
i.at "Value Type" . to_vector . should_equal [Value_Type.Char 255, Value_Type.Integer ..Bits_32, Value_Type.Boolean, Value_Type.Float ..Bits_32, Value_Type.Float, Value_Type.Integer Bits.Bits_16, Value_Type.Integer Bits.Bits_16, Value_Type.Integer Bits.Bits_64, Value_Type.Time, Value_Type.Date, Value_Type.Date_Time False, Value_Type.Date_Time False, Value_Type.Date_Time False, Value_Type.Date_Time True]
group_builder.specify "should return Table information, also for aggregated results" <|
i = data.t.aggregate columns=[Aggregate_Column.Sum "ints", Aggregate_Column.Count_Distinct "bools"] . column_info
i.at "Column" . to_vector . should_equal ["Sum ints", "Count Distinct bools"]
i.at "Items Count" . to_vector . should_equal [1, 1]
i.at "Value Type" . to_vector . should_equal [Value_Type.Integer ..Bits_32, Value_Type.Integer ..Bits_32]
group_builder.specify "should infer standard types correctly" <|
data.t.at "strs" . value_type . is_text . should_be_true
data.t.at "ints" . value_type . is_integer . should_be_true
data.t.at "bools" . value_type . is_boolean . should_be_true
data.t.at "floats" . value_type . is_floating_point . should_be_true
group_builder.specify "should preserve SQLServer types when table is materialized, where possible" pending="TODO" <|
name = Name_Generator.random_name "types-test"
Problems.assume_no_problems <|
data.connection.execute_update 'CREATE TABLE "#'+name+'" ("int4" int4, "int2" int2, "txt-limited" varchar(10), "txt-fixed" char(3))'
t1 = data.connection.query (SQL_Query.Table_Name name)
t1.at "int4" . value_type . should_equal (Value_Type.Integer Bits.Bits_32)
t1.at "int2" . value_type . should_equal (Value_Type.Integer Bits.Bits_16)
t1.at "txt-limited" . value_type . should_equal (Value_Type.Char size=10 variable_length=True)
t1.at "txt-fixed" . value_type . should_equal (Value_Type.Char size=3 variable_length=False)
in_memory = t1.read
in_memory.at "int4" . value_type . should_equal (Value_Type.Integer Bits.Bits_32)
in_memory.at "int2" . value_type . should_equal (Value_Type.Integer Bits.Bits_16)
in_memory.at "txt-limited" . value_type . should_equal (Value_Type.Char size=10 variable_length=True)
in_memory.at "txt-fixed" . value_type . should_equal (Value_Type.Char size=3 variable_length=False)
group_builder.specify "test datetime2 precision round trip" <|
name = Name_Generator.random_name "datetime2-test"
Problems.assume_no_problems <|
data.connection.execute_update 'CREATE TABLE "'+name+'" ("dt2" DATETIME2)'
t = data.connection.query (SQL_Query.Table_Name name)
row1 = [Date_Time.new 2021 1 1 12 13 14 500 1 1]
row2 = [Date_Time.new 2021 1 1 9 12 12 987 654 321]
row3 = [Nothing]
source_table = Table.from_rows ["dt2"] [row1, row2, row3]
t.update_rows source_table update_action=Update_Action.Insert
## SQLServer only supports precision to 100 nanoseconds
expected_row1 = [Date_Time.new 2021 1 1 12 13 14 500 1 0]
expected_row2 = [Date_Time.new 2021 1 1 9 12 12 987 654 300]
expected_row3 = [Nothing]
expected_table = Table.from_rows ["dt2"] [expected_row1, expected_row2, expected_row3]
returned_table = t.read
returned_table.should_equal expected_table
data.connection.execute_update 'DROP TABLE "'+name+'"'
main filter=Nothing =
suite = Test.build suite_builder->
add_specs suite_builder
suite.run_with_filter filter

View File

@ -0,0 +1 @@
Copyright 2011 Google Inc. All Rights Reserved.

View File

@ -0,0 +1,4 @@
Copyright (C) 2012 tamtam180
Copyright 2010 Google Inc. All Rights Reserved.
Copyright 2012 Google Inc. All Rights Reserved.
Microsoft JDBC Driver for SQL Server Copyright(c) Microsoft Corporation All rights reserved. This program is made

View File

@ -0,0 +1,2 @@
mssql/googlecode/cityhash/NOTICE
mssql/googlecode/concurrentlinkedhashmap/NOTICE

View File

@ -0,0 +1,2 @@
mssql/googlecode/cityhash/LICENSE
mssql/googlecode/concurrentlinkedhashmap/LICENSE

View File

@ -0,0 +1,3 @@
5068C10867C41BBE95A1A1CDEA08739F2ABBDCD465EFC7CD39A390C5002EFC60
7FA24514EA20CACFF848C3302393DBF46A85BB64FD297636759CA81A8DC23166
0

View File

@ -0,0 +1 @@
tools/legal-review/license-texts/MIT