Remove gui1 codebase (#9242)

Removes the old GUI1 code base and reduces the Rust code footprint by removing unused code.

# Important Notes
Updates build scripts and reformats part of the codebase with the autoformatter.
This commit is contained in:
Michael Mauderer 2024-03-07 02:20:21 +00:00 committed by GitHub
parent 79a6a6a1c0
commit 7c68bf170d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
919 changed files with 1188 additions and 217933 deletions

1
.github/CODEOWNERS vendored
View File

@ -16,7 +16,6 @@ rustfmt.toml @MichaelMauderer @mwu-tow @farmaazon @vitvakatu @Frizi @kazcw
Cargo.lock
Cargo.toml
/lib/rust/ @MichaelMauderer @mwu-tow @farmaazon @kazcw @vitvakatu @Frizi
/lib/rust/ensogl/ @MichaelMauderer @farmaazon @kazcw @vitvakatu @Frizi
/lib/rust/parser/ @MichaelMauderer @mwu-tow @farmaazon @kazcw @vitvakatu @Frizi @jaroslavtulach
/integration-test/ @MichaelMauderer @farmaazon @kazcw @vitvakatu @Frizi
/tools/build-performance/ @kazcw @mwu-tow @Akirathan

View File

@ -53,169 +53,6 @@ jobs:
outputs:
ENSO_RELEASE_ID: ${{ steps.prepare.outputs.ENSO_RELEASE_ID }}
ENSO_VERSION: ${{ steps.prepare.outputs.ENSO_VERSION }}
enso-build-ci-gen-job-build-wasm-linux-x86_64:
name: Build GUI (WASM) (linux, x86_64)
runs-on:
- self-hosted
- Linux
steps:
- if: startsWith(runner.name, 'GitHub Actions') || startsWith(runner.name, 'Hosted Agent')
name: Setup conda (GH runners only)
uses: s-weigand/setup-conda@v1.2.1
with:
update-conda: false
conda-channels: anaconda, conda-forge
- if: startsWith(runner.name, 'GitHub Actions') || startsWith(runner.name, 'Hosted Agent')
name: Installing wasm-pack
uses: jetli/wasm-pack-action@v0.4.0
with:
version: v0.10.2
- name: Expose Artifact API and context information.
uses: actions/github-script@v7
with:
script: "\n core.exportVariable(\"ACTIONS_RUNTIME_TOKEN\", process.env[\"ACTIONS_RUNTIME_TOKEN\"])\n core.exportVariable(\"ACTIONS_RUNTIME_URL\", process.env[\"ACTIONS_RUNTIME_URL\"])\n core.exportVariable(\"GITHUB_RETENTION_DAYS\", process.env[\"GITHUB_RETENTION_DAYS\"])\n console.log(context)\n "
- name: Checking out the repository
uses: actions/checkout@v4
with:
clean: false
submodules: recursive
- name: Build Script Setup
run: ./run --help
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- if: "(contains(github.event.pull_request.labels.*.name, 'CI: Clean build required') || inputs.clean_build_required)"
name: Clean before
run: ./run git-clean
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: ./run wasm build --wasm-upload-artifact ${{ runner.os == 'Linux' }}
env:
ENSO_AG_GRID_LICENSE_KEY: ${{ secrets.ENSO_AG_GRID_LICENSE_KEY }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- if: failure() && runner.os == 'Windows'
name: List files if failed (Windows)
run: Get-ChildItem -Force -Recurse
- if: failure() && runner.os != 'Windows'
name: List files if failed (non-Windows)
run: ls -lAR
- if: "(always()) && (contains(github.event.pull_request.labels.*.name, 'CI: Clean build required') || inputs.clean_build_required)"
name: Clean after
run: ./run git-clean
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
enso-build-ci-gen-job-deploy-gui-linux-x86_64:
name: Upload GUI to S3 (linux, x86_64)
needs:
- enso-build-ci-gen-upload-ide-linux-x86_64
runs-on:
- self-hosted
- Linux
steps:
- if: startsWith(runner.name, 'GitHub Actions') || startsWith(runner.name, 'Hosted Agent')
name: Setup conda (GH runners only)
uses: s-weigand/setup-conda@v1.2.1
with:
update-conda: false
conda-channels: anaconda, conda-forge
- if: startsWith(runner.name, 'GitHub Actions') || startsWith(runner.name, 'Hosted Agent')
name: Installing wasm-pack
uses: jetli/wasm-pack-action@v0.4.0
with:
version: v0.10.2
- name: Expose Artifact API and context information.
uses: actions/github-script@v7
with:
script: "\n core.exportVariable(\"ACTIONS_RUNTIME_TOKEN\", process.env[\"ACTIONS_RUNTIME_TOKEN\"])\n core.exportVariable(\"ACTIONS_RUNTIME_URL\", process.env[\"ACTIONS_RUNTIME_URL\"])\n core.exportVariable(\"GITHUB_RETENTION_DAYS\", process.env[\"GITHUB_RETENTION_DAYS\"])\n console.log(context)\n "
- name: Checking out the repository
uses: actions/checkout@v4
with:
clean: false
submodules: recursive
- name: Build Script Setup
run: ./run --help
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- if: "(contains(github.event.pull_request.labels.*.name, 'CI: Clean build required') || inputs.clean_build_required)"
name: Clean before
run: ./run git-clean
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: ./run release deploy-gui
env:
AWS_ACCESS_KEY_ID: ${{ secrets.ARTEFACT_S3_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.ARTEFACT_S3_SECRET_ACCESS_KEY }}
ENSO_ADMIN_TOKEN: ${{ secrets.ENSO_ADMIN_TOKEN }}
GITHUB_TOKEN: ${{ secrets.CI_PRIVATE_TOKEN }}
- if: failure() && runner.os == 'Windows'
name: List files if failed (Windows)
run: Get-ChildItem -Force -Recurse
- if: failure() && runner.os != 'Windows'
name: List files if failed (non-Windows)
run: ls -lAR
- if: "(always()) && (contains(github.event.pull_request.labels.*.name, 'CI: Clean build required') || inputs.clean_build_required)"
name: Clean after
run: ./run git-clean
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
enso-build-ci-gen-job-deploy-runtime-linux-x86_64:
name: Upload Runtime to ECR (linux, x86_64)
needs:
- enso-build-ci-gen-draft-release-linux-x86_64
- enso-build-ci-gen-job-upload-backend-linux-x86_64
runs-on:
- self-hosted
- Linux
steps:
- if: startsWith(runner.name, 'GitHub Actions') || startsWith(runner.name, 'Hosted Agent')
name: Setup conda (GH runners only)
uses: s-weigand/setup-conda@v1.2.1
with:
update-conda: false
conda-channels: anaconda, conda-forge
- if: startsWith(runner.name, 'GitHub Actions') || startsWith(runner.name, 'Hosted Agent')
name: Installing wasm-pack
uses: jetli/wasm-pack-action@v0.4.0
with:
version: v0.10.2
- name: Expose Artifact API and context information.
uses: actions/github-script@v7
with:
script: "\n core.exportVariable(\"ACTIONS_RUNTIME_TOKEN\", process.env[\"ACTIONS_RUNTIME_TOKEN\"])\n core.exportVariable(\"ACTIONS_RUNTIME_URL\", process.env[\"ACTIONS_RUNTIME_URL\"])\n core.exportVariable(\"GITHUB_RETENTION_DAYS\", process.env[\"GITHUB_RETENTION_DAYS\"])\n console.log(context)\n "
- name: Checking out the repository
uses: actions/checkout@v4
with:
clean: false
submodules: recursive
- name: Build Script Setup
run: ./run --help
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- if: "(contains(github.event.pull_request.labels.*.name, 'CI: Clean build required') || inputs.clean_build_required)"
name: Clean before
run: ./run git-clean
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: ./run release deploy-runtime
env:
AWS_ACCESS_KEY_ID: ${{ secrets.ECR_PUSH_RUNTIME_ACCESS_KEY_ID }}
AWS_DEFAULT_REGION: eu-west-1
AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_PUSH_RUNTIME_SECRET_ACCESS_KEY }}
ENSO_BUILD_ECR_REPOSITORY: runtime
GITHUB_TOKEN: ${{ secrets.CI_PRIVATE_TOKEN }}
- if: failure() && runner.os == 'Windows'
name: List files if failed (Windows)
run: Get-ChildItem -Force -Recurse
- if: failure() && runner.os != 'Windows'
name: List files if failed (non-Windows)
run: ls -lAR
- if: "(always()) && (contains(github.event.pull_request.labels.*.name, 'CI: Clean build required') || inputs.clean_build_required)"
name: Clean after
run: ./run git-clean
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
env:
ENSO_RELEASE_ID: ${{ needs.enso-build-ci-gen-draft-release-linux-x86_64.outputs.ENSO_RELEASE_ID }}
ENSO_VERSION: ${{ needs.enso-build-ci-gen-draft-release-linux-x86_64.outputs.ENSO_VERSION }}
enso-build-ci-gen-job-upload-backend-linux-x86_64:
name: Upload Backend (linux, x86_64)
needs:
@ -436,12 +273,6 @@ jobs:
name: Publish release (linux, x86_64)
needs:
- enso-build-ci-gen-draft-release-linux-x86_64
- enso-build-ci-gen-job-deploy-gui-linux-x86_64
- enso-build-ci-gen-job-deploy-runtime-linux-x86_64
- enso-build-ci-gen-upload-ide-linux-x86_64
- enso-build-ci-gen-upload-ide-macos-aarch64
- enso-build-ci-gen-upload-ide-macos-x86_64
- enso-build-ci-gen-upload-ide-windows-x86_64
- enso-build-ci-gen-upload-ide2-linux-x86_64
- enso-build-ci-gen-upload-ide2-macos-aarch64
- enso-build-ci-gen-upload-ide2-macos-x86_64
@ -499,258 +330,6 @@ jobs:
AWS_SECRET_ACCESS_KEY: ${{ secrets.ARTEFACT_S3_SECRET_ACCESS_KEY }}
ENSO_RELEASE_ID: ${{ needs.enso-build-ci-gen-draft-release-linux-x86_64.outputs.ENSO_RELEASE_ID }}
ENSO_VERSION: ${{ needs.enso-build-ci-gen-draft-release-linux-x86_64.outputs.ENSO_VERSION }}
enso-build-ci-gen-upload-ide-linux-x86_64:
name: Build Old IDE (linux, x86_64)
needs:
- enso-build-ci-gen-draft-release-linux-x86_64
- enso-build-ci-gen-job-build-wasm-linux-x86_64
- enso-build-ci-gen-job-upload-backend-linux-x86_64
runs-on:
- self-hosted
- Linux
steps:
- if: startsWith(runner.name, 'GitHub Actions') || startsWith(runner.name, 'Hosted Agent')
name: Setup conda (GH runners only)
uses: s-weigand/setup-conda@v1.2.1
with:
update-conda: false
conda-channels: anaconda, conda-forge
- if: startsWith(runner.name, 'GitHub Actions') || startsWith(runner.name, 'Hosted Agent')
name: Installing wasm-pack
uses: jetli/wasm-pack-action@v0.4.0
with:
version: v0.10.2
- name: Expose Artifact API and context information.
uses: actions/github-script@v7
with:
script: "\n core.exportVariable(\"ACTIONS_RUNTIME_TOKEN\", process.env[\"ACTIONS_RUNTIME_TOKEN\"])\n core.exportVariable(\"ACTIONS_RUNTIME_URL\", process.env[\"ACTIONS_RUNTIME_URL\"])\n core.exportVariable(\"GITHUB_RETENTION_DAYS\", process.env[\"GITHUB_RETENTION_DAYS\"])\n console.log(context)\n "
- name: Checking out the repository
uses: actions/checkout@v4
with:
clean: false
submodules: recursive
- name: Build Script Setup
run: ./run --help
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- if: (always())
name: Clean before
run: ./run git-clean
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: ./run ide upload --wasm-source current-ci-run --backend-source release --backend-release ${{env.ENSO_RELEASE_ID}}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- if: failure() && runner.os == 'Windows'
name: List files if failed (Windows)
run: Get-ChildItem -Force -Recurse
- if: failure() && runner.os != 'Windows'
name: List files if failed (non-Windows)
run: ls -lAR
- if: (always())
name: Clean after
run: ./run git-clean
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
env:
ENSO_RELEASE_ID: ${{ needs.enso-build-ci-gen-draft-release-linux-x86_64.outputs.ENSO_RELEASE_ID }}
ENSO_VERSION: ${{ needs.enso-build-ci-gen-draft-release-linux-x86_64.outputs.ENSO_VERSION }}
enso-build-ci-gen-upload-ide-macos-aarch64:
name: Build Old IDE (macos, aarch64)
needs:
- enso-build-ci-gen-draft-release-linux-x86_64
- enso-build-ci-gen-job-build-wasm-linux-x86_64
- enso-build-ci-gen-job-upload-backend-macos-aarch64
runs-on:
- self-hosted
- macOS
- ARM64
steps:
- if: startsWith(runner.name, 'GitHub Actions') || startsWith(runner.name, 'Hosted Agent')
name: Setup conda (GH runners only)
uses: s-weigand/setup-conda@v1.2.1
with:
update-conda: false
conda-channels: anaconda, conda-forge
- if: startsWith(runner.name, 'GitHub Actions') || startsWith(runner.name, 'Hosted Agent')
name: Installing wasm-pack
uses: jetli/wasm-pack-action@v0.4.0
with:
version: v0.10.2
- name: Expose Artifact API and context information.
uses: actions/github-script@v7
with:
script: "\n core.exportVariable(\"ACTIONS_RUNTIME_TOKEN\", process.env[\"ACTIONS_RUNTIME_TOKEN\"])\n core.exportVariable(\"ACTIONS_RUNTIME_URL\", process.env[\"ACTIONS_RUNTIME_URL\"])\n core.exportVariable(\"GITHUB_RETENTION_DAYS\", process.env[\"GITHUB_RETENTION_DAYS\"])\n console.log(context)\n "
- name: Checking out the repository
uses: actions/checkout@v4
with:
clean: false
submodules: recursive
- name: Build Script Setup
run: ./run --help
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- if: (always())
name: Clean before
run: ./run git-clean
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: NPM install
run: npm install
- name: Uninstall old Electron Builder
run: npm uninstall --save --workspace enso electron-builder
- name: Install new Electron Builder
run: npm install --save-dev --workspace enso electron-builder@24.6.4
- run: ./run ide upload --wasm-source current-ci-run --backend-source release --backend-release ${{env.ENSO_RELEASE_ID}}
env:
APPLEID: ${{ secrets.APPLE_NOTARIZATION_USERNAME }}
APPLEIDPASS: ${{ secrets.APPLE_NOTARIZATION_PASSWORD }}
APPLETEAMID: ${{ secrets.APPLE_NOTARIZATION_TEAM_ID }}
CSC_FOR_PULL_REQUEST: "true"
CSC_IDENTITY_AUTO_DISCOVERY: "true"
CSC_KEY_PASSWORD: ${{ secrets.APPLE_CODE_SIGNING_CERT_PASSWORD }}
CSC_LINK: ${{ secrets.APPLE_CODE_SIGNING_CERT }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- if: failure() && runner.os == 'Windows'
name: List files if failed (Windows)
run: Get-ChildItem -Force -Recurse
- if: failure() && runner.os != 'Windows'
name: List files if failed (non-Windows)
run: ls -lAR
- if: (always())
name: Clean after
run: ./run git-clean
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
env:
ENSO_RELEASE_ID: ${{ needs.enso-build-ci-gen-draft-release-linux-x86_64.outputs.ENSO_RELEASE_ID }}
ENSO_VERSION: ${{ needs.enso-build-ci-gen-draft-release-linux-x86_64.outputs.ENSO_VERSION }}
enso-build-ci-gen-upload-ide-macos-x86_64:
name: Build Old IDE (macos, x86_64)
needs:
- enso-build-ci-gen-draft-release-linux-x86_64
- enso-build-ci-gen-job-build-wasm-linux-x86_64
- enso-build-ci-gen-job-upload-backend-macos-x86_64
runs-on:
- macos-latest
steps:
- if: startsWith(runner.name, 'GitHub Actions') || startsWith(runner.name, 'Hosted Agent')
name: Setup conda (GH runners only)
uses: s-weigand/setup-conda@v1.2.1
with:
update-conda: false
conda-channels: anaconda, conda-forge
- if: startsWith(runner.name, 'GitHub Actions') || startsWith(runner.name, 'Hosted Agent')
name: Installing wasm-pack
uses: jetli/wasm-pack-action@v0.4.0
with:
version: v0.10.2
- name: Expose Artifact API and context information.
uses: actions/github-script@v7
with:
script: "\n core.exportVariable(\"ACTIONS_RUNTIME_TOKEN\", process.env[\"ACTIONS_RUNTIME_TOKEN\"])\n core.exportVariable(\"ACTIONS_RUNTIME_URL\", process.env[\"ACTIONS_RUNTIME_URL\"])\n core.exportVariable(\"GITHUB_RETENTION_DAYS\", process.env[\"GITHUB_RETENTION_DAYS\"])\n console.log(context)\n "
- name: Checking out the repository
uses: actions/checkout@v4
with:
clean: false
submodules: recursive
- name: Build Script Setup
run: ./run --help
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- if: (always())
name: Clean before
run: ./run git-clean
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: NPM install
run: npm install
- name: Uninstall old Electron Builder
run: npm uninstall --save --workspace enso electron-builder
- name: Install new Electron Builder
run: npm install --save-dev --workspace enso electron-builder@24.6.4
- run: ./run ide upload --wasm-source current-ci-run --backend-source release --backend-release ${{env.ENSO_RELEASE_ID}}
env:
APPLEID: ${{ secrets.APPLE_NOTARIZATION_USERNAME }}
APPLEIDPASS: ${{ secrets.APPLE_NOTARIZATION_PASSWORD }}
APPLETEAMID: ${{ secrets.APPLE_NOTARIZATION_TEAM_ID }}
CSC_FOR_PULL_REQUEST: "true"
CSC_IDENTITY_AUTO_DISCOVERY: "true"
CSC_KEY_PASSWORD: ${{ secrets.APPLE_CODE_SIGNING_CERT_PASSWORD }}
CSC_LINK: ${{ secrets.APPLE_CODE_SIGNING_CERT }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- if: failure() && runner.os == 'Windows'
name: List files if failed (Windows)
run: Get-ChildItem -Force -Recurse
- if: failure() && runner.os != 'Windows'
name: List files if failed (non-Windows)
run: ls -lAR
- if: (always())
name: Clean after
run: ./run git-clean
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
env:
ENSO_RELEASE_ID: ${{ needs.enso-build-ci-gen-draft-release-linux-x86_64.outputs.ENSO_RELEASE_ID }}
ENSO_VERSION: ${{ needs.enso-build-ci-gen-draft-release-linux-x86_64.outputs.ENSO_VERSION }}
enso-build-ci-gen-upload-ide-windows-x86_64:
name: Build Old IDE (windows, x86_64)
needs:
- enso-build-ci-gen-draft-release-linux-x86_64
- enso-build-ci-gen-job-build-wasm-linux-x86_64
- enso-build-ci-gen-job-upload-backend-windows-x86_64
runs-on:
- self-hosted
- Windows
steps:
- if: startsWith(runner.name, 'GitHub Actions') || startsWith(runner.name, 'Hosted Agent')
name: Setup conda (GH runners only)
uses: s-weigand/setup-conda@v1.2.1
with:
update-conda: false
conda-channels: anaconda, conda-forge
- if: startsWith(runner.name, 'GitHub Actions') || startsWith(runner.name, 'Hosted Agent')
name: Installing wasm-pack
uses: jetli/wasm-pack-action@v0.4.0
with:
version: v0.10.2
- name: Expose Artifact API and context information.
uses: actions/github-script@v7
with:
script: "\n core.exportVariable(\"ACTIONS_RUNTIME_TOKEN\", process.env[\"ACTIONS_RUNTIME_TOKEN\"])\n core.exportVariable(\"ACTIONS_RUNTIME_URL\", process.env[\"ACTIONS_RUNTIME_URL\"])\n core.exportVariable(\"GITHUB_RETENTION_DAYS\", process.env[\"GITHUB_RETENTION_DAYS\"])\n console.log(context)\n "
- name: Checking out the repository
uses: actions/checkout@v4
with:
clean: false
submodules: recursive
- name: Build Script Setup
run: ./run --help
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- if: (always())
name: Clean before
run: ./run git-clean
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: ./run ide upload --wasm-source current-ci-run --backend-source release --backend-release ${{env.ENSO_RELEASE_ID}}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
WIN_CSC_KEY_PASSWORD: ${{ secrets.MICROSOFT_CODE_SIGNING_CERT_PASSWORD }}
WIN_CSC_LINK: ${{ secrets.MICROSOFT_CODE_SIGNING_CERT }}
- if: failure() && runner.os == 'Windows'
name: List files if failed (Windows)
run: Get-ChildItem -Force -Recurse
- if: failure() && runner.os != 'Windows'
name: List files if failed (non-Windows)
run: ls -lAR
- if: (always())
name: Clean after
run: ./run git-clean
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
env:
ENSO_RELEASE_ID: ${{ needs.enso-build-ci-gen-draft-release-linux-x86_64.outputs.ENSO_RELEASE_ID }}
ENSO_VERSION: ${{ needs.enso-build-ci-gen-draft-release-linux-x86_64.outputs.ENSO_VERSION }}
enso-build-ci-gen-upload-ide2-linux-x86_64:
name: Build New IDE (linux, x86_64)
needs:

2221
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -7,33 +7,24 @@ resolver = "2"
# path, e.g. `lib/rust/ensogl/examples`, or `app/gui/view/examples`; this is used to optimize the application for
# loading the IDE.
members = [
"app/gui",
"app/gui/language/parser",
"app/gui/enso-profiler-enso-data",
"app/gui2/rust-ffi",
"build/cli",
"build/macros/proc-macro",
"build/ci-gen",
"build/cli",
"build/intellij-run-config-gen",
"build/deprecated/rust-scripts",
"build/shader-tools",
"lib/rust/*",
"lib/rust/parser/doc-parser",
"lib/rust/parser/src/syntax/tree/visitor",
"lib/rust/parser/jni",
"lib/rust/parser/generate-java",
"lib/rust/parser/schema",
"lib/rust/parser/debug",
"lib/rust/ensogl/pack",
"lib/rust/profiler/data",
"lib/rust/profiler/demo-data",
"integration-test",
"tools/language-server/logstat",
"tools/language-server/wstest",
"app/gui2/rust-ffi",
"build/cli",
"build/macros/proc-macro",
"build/ci-gen",
"build/cli",
"build/intellij-run-config-gen",
"build/deprecated/rust-scripts",
"lib/rust/*",
"lib/rust/parser/doc-parser",
"lib/rust/parser/src/syntax/tree/visitor",
"lib/rust/parser/jni",
"lib/rust/parser/generate-java",
"lib/rust/parser/schema",
"lib/rust/parser/debug",
"lib/rust/enso-pack",
"tools/language-server/logstat",
"tools/language-server/wstest",
]
# The default memebers are those we want to check and test by default.
default-members = ["app/gui", "lib/rust/*"]
# We are using a version with extended functionality. The changes have been PR'd upstream:
# https://github.com/rustwasm/console_error_panic_hook/pull/24
@ -88,7 +79,7 @@ console-subscriber = "0.1.8"
dirs = { version = "5.0.1" }
nix = { version = "0.27.1" }
octocrab = { git = "https://github.com/enso-org/octocrab", default-features = false, features = [
"rustls",
"rustls",
] }
platforms = { version = "3.2.0", features = ["serde"] }
portpicker = { version = "0.1.1" }
@ -131,25 +122,25 @@ bytes = { version = "1.1.0" }
matches = { version = "0.1" }
console_error_panic_hook = { version = "0.1.6" }
reqwest = { version = "0.11.5", default-features = false, features = [
"rustls-tls",
"stream"
"rustls-tls",
"stream"
] }
proc-macro2 = { version = "1.0.50" }
syn = { version = "2.0", features = [
"full",
"extra-traits",
"printing",
"parsing",
"visit",
"visit-mut",
"full",
"extra-traits",
"printing",
"parsing",
"visit",
"visit-mut",
] }
syn_1 = { package = "syn", version = "1.0", features = [
"full",
"extra-traits",
"printing",
"parsing",
"visit",
"visit-mut",
"full",
"extra-traits",
"printing",
"parsing",
"visit",
"visit-mut",
] }
quote = { version = "1.0.23" }
semver = { version = "1.0.0", features = ["serde"] }

View File

@ -1,92 +0,0 @@
[package]
name = "enso-gui"
version = "0.1.0"
authors = ["Enso Team <contact@enso.org>"]
edition = "2021"
[lib]
crate-type = ["cdylib", "rlib"]
[dependencies]
analytics = { path = "analytics" }
double-representation = { path = "controller/double-representation" }
enso-config = { path = "config" }
enso-callback = { path = "../../lib/rust/callback" }
enso-data-structures = { path = "../../lib/rust/data-structures" }
enso-debug-api = { path = "../../lib/rust/debug-api" }
enso-debug-scene = { path = "view/examples" }
enso-frp = { path = "../../lib/rust/frp" }
enso-doc-parser = { path = "../../lib/rust/parser/doc-parser" }
enso-prelude = { path = "../../lib/rust/prelude" }
enso-profiler = { path = "../../lib/rust/profiler" }
enso-executor = { path = "../../lib/rust/executor" }
enso-notification = { path = "../../lib/rust/notification" }
enso-shapely = { path = "../../lib/rust/shapely" }
enso-text = { path = "../../lib/rust/text" }
enso-web = { path = "../../lib/rust/web" }
enso-suggestion-database = { path = "suggestion-database" }
ensogl = { path = "../../lib/rust/ensogl" }
ensogl-examples = { path = "../../lib/rust/ensogl/examples" }
ensogl-component = { path = "../../lib/rust/ensogl/component" }
ensogl-icons = { path = "../../lib/rust/ensogl/component/icons" }
ensogl-dynamic-assets = { path = "../../lib/rust/ensogl/component/dynamic-assets" }
ensogl-text-msdf = { path = "../../lib/rust/ensogl/component/text/src/font/msdf" }
ensogl-hardcoded-theme = { path = "../../lib/rust/ensogl/app/theme/hardcoded" }
ensogl-drop-manager = { path = "../../lib/rust/ensogl/component/drop-manager" }
ensogl-breadcrumbs = { path = "../../lib/rust/ensogl/component/breadcrumbs" }
fuzzly = { path = "../../lib/rust/fuzzly" }
ast = { path = "language/ast/impl" }
parser = { path = "language/parser" }
ide-view = { path = "view" }
engine-protocol = { path = "controller/engine-protocol" }
json-rpc = { path = "../../lib/rust/json-rpc" }
span-tree = { path = "language/span-tree" }
bimap = { version = "0.4.0" }
console_error_panic_hook = { workspace = true }
const_format = { workspace = true }
convert_case = { workspace = true }
failure = { workspace = true }
flo_stream = { version = "0.4.0" }
futures = { workspace = true }
itertools = { workspace = true }
js-sys = { workspace = true }
mockall = { version = "0.7.1", features = ["nightly"] }
nalgebra = { workspace = true }
semver = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
sha3 = { version = "0.8.2" }
superslice = { workspace = true }
uuid = { version = "0.8", features = ["serde", "v4", "wasm-bindgen"] }
ordered-float = "3.4.0"
# wasm-bindgen version 0.2.79 is causing issues with clippy.
# See for more information. https://github.com/rustwasm/wasm-bindgen/issues/2774
# Should be removed once 0.2.80 is available.
wasm-bindgen = { workspace = true }
wasm-bindgen-futures = "0.4"
[dev-dependencies]
regex = { workspace = true }
wasm-bindgen-test = { workspace = true }
[dependencies.web-sys]
version = "0.3.22"
features = [
'BinaryType',
'Blob',
'console',
'CloseEvent',
'Document',
'Element',
'ErrorEvent',
'EventTarget',
'MessageEvent',
'HtmlElement',
'Node',
'WebSocket',
'Window',
]
# Stop wasm-pack from running wasm-opt, because we run it from our build scripts in order to customize options.
[package.metadata.wasm-pack.profile.release]
wasm-opt = false

View File

@ -1,661 +0,0 @@
GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU Affero General Public License is a free, copyleft license for
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
our General Public Licenses are intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
Developers that use our General Public Licenses protect your rights
with two steps: (1) assert copyright on the software, and (2) offer
you this License which gives you legal permission to copy, distribute
and/or modify the software.
A secondary benefit of defending all users' freedom is that
improvements made in alternate versions of the program, if they
receive widespread use, become available for other developers to
incorporate. Many developers of free software are heartened and
encouraged by the resulting cooperation. However, in the case of
software used on network servers, this result may fail to come about.
The GNU General Public License permits making a modified version and
letting the public access it on a server without ever releasing its
source code to the public.
The GNU Affero General Public License is designed specifically to
ensure that, in such cases, the modified source code becomes available
to the community. It requires the operator of a network server to
provide the source code of the modified version running there to the
users of that server. Therefore, public use of a modified version, on
a publicly accessible server, gives the public access to the source
code of the modified version.
An older license, called the Affero General Public License and
published by Affero, was designed to accomplish similar goals. This is
a different license, not a version of the Affero GPL, but Affero has
released a new version of the Affero GPL which permits relicensing under
this license.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU Affero General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Remote Network Interaction; Use with the GNU General Public License.
Notwithstanding any other provision of this License, if you modify the
Program, your modified version must prominently offer all users
interacting with it remotely through a computer network (if your version
supports such interaction) an opportunity to receive the Corresponding
Source of your version by providing access to the Corresponding Source
from a network server at no charge, through some standard or customary
means of facilitating copying of software. This Corresponding Source
shall include the Corresponding Source for any work covered by version 3
of the GNU General Public License that is incorporated pursuant to the
following paragraph.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the work with which it is combined will remain governed by version
3 of the GNU General Public License.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU Affero General Public License from time to time. Such new versions
will be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU Affero General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU Affero General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU Affero General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If your software can interact with users remotely through a computer
network, you should also make sure that it provides a way for users to
get its source. For example, if your program is a web application, its
interface could display a "Source" link that leads users to an archive
of the code. There are many ways you could offer source, and different
solutions will be better for different programs; see section 13 for the
specific requirements.
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU AGPL, see
<https://www.gnu.org/licenses/>.

View File

@ -1,88 +0,0 @@
# This is the subtree for Enso's graphical interface component. If you're looking for the repository root, you may find it at at 👉 <a href="https://github.com/enso-org/enso">github.com/enso-org/enso</a> 👈
<br/>
# Enso IDE
### Overview
<p>
<a href="https://discord.gg/PMtNMP46">
<img src="https://img.shields.io/discord/401396655599124480.svg?label=&logo=discord&logoColor=ffffff&color=7389D8&labelColor=6A7EC2"
alt="Chat">
</a>
<a href="https://github.com/enso-org/enso/blob/develop/LICENSE">
<img src="https://img.shields.io/static/v1?label=Compiler%20License&message=Apache%20v2&color=2ec352&labelColor=2c3239"
alt="License">
</a>
<a href="https://github.com/enso-org/enso/tree/develop/gui/LICENSE">
<img src="https://img.shields.io/static/v1?label=GUI%20License&message=AGPL%20v3&color=2ec352&labelColor=2c3239"
alt="License">
</a>
</p>
Enso is an award-winning interactive programming language with dual visual and
textual representations. It is a tool that spans the entire stack, going from
high-level visualization and communication to the nitty-gritty of backend
services, all in a single language. Watch the following introduction video to
learn what Enso is, and how it helps companies build data workflows in minutes
instead of weeks.
This repository contains the source code of Enso interface only. If you are
interested in how the interface is build or you want to develop it with us, you
are in the right place. See the
[development and contributing guidelines](docs/CONTRIBUTING.md) to learn more
about the code structure and the development process.
<br/>
### Getting Started
Enso is distributed both in form of
[pre-build packages for MacOS, Windows, or Linux](https://github.com/enso-org/ide/releases),
as well as the [source code](https://github.com/enso-org). See the
[demo scenes](http://TODO), and read the [documentation](docs/product) to learn
more.
<br/>
### Building
The project builds on macOS, Linux, and Windows. Build functionality is provided
by our build script, that are accessible through `run` (Linux and macOS) or
`run.cmd` (Windows) wrappers.
To build the project, simply run `./run ide build` (on Linux or macOS) or
`.\run.cmd ide build` (Windows) to build IDE. To learn more about other
available commands use `--help` argument. Read the detailed
[development guide](docs/CONTRIBUTING.md) to learn more.
<br/>
### License
The Enso Language Compiler is released under the terms of the
[Apache v2 License](https://github.com/enso-org/enso/blob/develop/LICENSE). The
Enso Graphical Interface and it's rendering engine are released under the terms
of the
[AGPL v3 License](https://github.com/enso-org/enso/blob/develop/app/gui/LICENSEE).
This license set was choosen to both provide you with a complete freedom to use
Enso, create libraries, and release them under any license of your choice, while
also allowing us to release commercial products on top of the platform,
including Enso Cloud and Enso Enterprise on-premise server managers.
<br/>
### Contributing
Enso is a community-driven open source project which is and will always be open
and free to use. We are committed to a fully transparent development process and
highly appreciate every contribution. If you love the vision behind Enso and you
want to redefine the data processing world, join us and help us track down bugs,
implement new features, improve the documentation or spread the word! Join our
community on a [Discord chat](http://chat.enso.org) and read the
[development and contributing guidelines](docs/CONTRIBUTING.md).
<a href="https://github.com/enso-org/ide/graphs/contributors">
<img src="https://opencollective.com/enso-language/contributors.svg?width=890&button=false" />
</a>

View File

@ -1,9 +0,0 @@
[package]
name = "analytics"
version = "0.1.0"
authors = ["Enso Team <contact@enso.org>"]
edition = "2021"
[dependencies]
js-sys = { workspace = true }
wasm-bindgen = { workspace = true }

View File

@ -1,50 +0,0 @@
//! Provides data wrappers for our analytics api. This is intended to ensure we are conscious of
//! whether we are sending public or private data. No private data should be logged at the moment.
//!
//! Note: this is meant to be a little bit un-ergonomic to ensure the data has been vetted by the
//! API user and allow the reader of the code to see the intent behind the data.
use wasm_bindgen::JsValue;
/// Trait that allows us to log an object remotely.
pub trait Loggable {
/// Return the log message as JsValue.
fn get(self) -> JsValue;
}
impl Loggable for bool {
fn get(self) -> JsValue {
self.into()
}
}
impl Loggable for &str {
fn get(self) -> JsValue {
self.into()
}
}
impl Loggable for String {
fn get(self) -> JsValue {
self.into()
}
}
impl Loggable for &String {
fn get(self) -> JsValue {
self.into()
}
}
impl<F, S> Loggable for F
where
F: Fn() -> S,
S: Loggable,
{
fn get(self) -> JsValue {
self().get()
}
}
/// Wrapper struct for data that can be made public and has no privacy implications.
#[derive(Clone, Copy, Debug)]
pub struct AnonymousData<T: Loggable>(pub T);

View File

@ -1,23 +0,0 @@
//! Remote analytics logging.
// === Standard Linter Configuration ===
#![deny(non_ascii_idents)]
#![warn(unsafe_code)]
#![allow(clippy::bool_to_int_with_if)]
#![allow(clippy::let_and_return)]
// === Non-Standard Linter Configuration ===
#![warn(missing_docs)]
#![warn(trivial_casts)]
#![warn(trivial_numeric_casts)]
#![warn(unused_import_braces)]
#![warn(unused_qualifications)]
#![warn(missing_copy_implementations)]
#![warn(missing_debug_implementations)]
mod data;
mod remote_log;
pub use data::*;
pub use remote_log::*;

View File

@ -1,67 +0,0 @@
//! Provides an API to send data to our remote logging service. Requires the remote logging
//! to be already set up on the JS side. That means, there needs to exist a `window.enso.remoteLog`
//! method that takes a string and does the actual logging.
use crate::data::*;
// ==============
// === Export ===
// ==============
pub use wasm_bindgen::prelude::*;
mod js {
use super::*;
#[wasm_bindgen(inline_js = "
export function remote_log(msg, value) {
window.ensoglApp.remoteLog(msg, value).catch((error) => {
console.error(`Error while logging message. ${error}`)
})
}
export function remote_log_value(msg, field_name, value) {
const data = {}
data[field_name] = value
remote_log(msg, data)
}
")]
extern "C" {
#[allow(unsafe_code)]
pub fn remote_log_value(msg: JsValue, field_name: JsValue, value: JsValue);
#[allow(unsafe_code)]
pub fn remote_log(msg: JsValue, value: JsValue);
}
}
/// Send the provided public event to our logging service.
#[allow(unused_variables)] // used only on wasm target
pub fn remote_log_event(message: &str) {
// Note: Disabling on non-wasm targets
#[cfg(target_arch = "wasm32")]
{
js::remote_log(JsValue::from(message.to_string()), JsValue::UNDEFINED);
}
}
/// Send the provided public event with a named value to our logging service.
#[allow(unused_variables)] // used only on wasm target
pub fn remote_log_value<T: Loggable>(message: &str, field_name: &str, data: AnonymousData<T>) {
// Note: Disabling on non-wasm targets
#[cfg(target_arch = "wasm32")]
{
let msg = JsValue::from(message.to_string());
let field_name = JsValue::from(field_name.to_string());
js::remote_log_value(msg, field_name, data.0.get());
}
}
// Note: Disabling on non-wasm targets
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// The code must be disabled on non-wasm targets, because trying to construct JS values would
// immediately panic. As remote logs are invoked from controller code, that would prevent having
// some controller tests native.

View File

@ -1,15 +0,0 @@
[package]
name = "enso-config"
version = "0.1.0"
authors = ["Enso Team <contact@enso.org>"]
edition = "2021"
[dependencies]
ensogl = { path = "../../../lib/rust/ensogl" }
enso-prelude = { path = "../../../lib/rust/prelude" }
enso-json-to-struct = { path = "../../../lib/rust/json-to-struct" }
semver = { workspace = true }
thiserror = { workspace = true }
[build-dependencies]
config-reader = { path = "../../../lib/rust/config-reader" }

View File

@ -1,16 +0,0 @@
const CONFIG_PATH: &str = "../config.yaml";
const JSON_CONFIG: &[&str] = &[
"../../../lib/rust/ensogl/pack/js/src/runner/config.json",
"../../../app/ide-desktop/lib/content-config/src/config.json",
];
fn main() {
println!("cargo:rerun-if-changed={CONFIG_PATH}");
println!("cargo:rerun-if-changed=build.rs");
for path in JSON_CONFIG {
println!("cargo:rerun-if-changed={path}");
}
config_reader::generate_config_module_from_yaml(CONFIG_PATH);
}

View File

@ -1,180 +0,0 @@
//! Startup arguments definition.
// === Standard Linter Configuration ===
#![deny(non_ascii_idents)]
#![warn(unsafe_code)]
#![allow(clippy::bool_to_int_with_if)]
#![allow(clippy::let_and_return)]
// === Non-Standard Linter Configuration ===
#![warn(trivial_casts)]
#![warn(trivial_numeric_casts)]
#![warn(unused_import_braces)]
#![warn(unused_qualifications)]
#![warn(missing_copy_implementations)]
#![warn(missing_debug_implementations)]
use enso_prelude::*;
use enso_json_to_struct::json_to_struct;
// =================
// === Constants ===
// =================
const LOCAL_ENGINE_VERSION: &str = "0.0.0-dev";
// ==============
// === Errors ===
// ==============
///Error type with information that the Engine version does not meet the requirements.
#[derive(Clone, Debug, thiserror::Error)]
#[error("Unsupported Engine version: required {required} (or newer), found {found}.")]
pub struct UnsupportedEngineVersion {
/// The version of the Engine that is required.
pub required: semver::Version,
/// The version of the Engine that was found.
pub found: semver::Version,
}
// ===============
// === Version ===
// ===============
include!(concat!(env!("OUT_DIR"), "/config.rs"));
pub use generated::*;
/// The minimum supported engine version.
pub fn engine_version_required() -> semver::Version {
// Safe to unwrap, as `engine_version_supported` compile-time and is validated by the test.
semver::Version::parse(engine_version_supported).unwrap()
}
fn local_engine_version() -> semver::Version {
// Safe to unwrap, as `LOCAL_ENGINE_VERSION` compile-time and is validated by the test.
semver::Version::parse(LOCAL_ENGINE_VERSION).unwrap()
}
/// Check if the given Engine version meets the requirements.
///
/// Effectively, this checks if the given version is greater or equal to the minimum supported.
/// "Greater or equal" is defined by the [Semantic Versioning specification](https://semver.org/)
/// term of precedence.
///
/// There is a special exception for locally built engine's version, as they may be theoretically
/// lower versions, but we don't want to treat them as unsupported.
pub fn check_engine_version_requirement(
required_version: &semver::Version,
tested_version: &semver::Version,
) -> Result<(), UnsupportedEngineVersion> {
// We don't want to rely on the `semver::VersionReq` semantics here. Unfortunately the
// [Semantic Versioning specification](https://semver.org/) does not define the semantics of
// the version requirement operators, so different implementations may behave differently.
//
// The `semver::VersionReq` implementation follows the Cargo's implementation, namely:
// ```
// In particular, in order for any VersionReq to match a pre-release version, the VersionReq
// must contain at least one Comparator that has an explicit major, minor, and patch version
// identical to the pre-release being matched, and that has a nonempty pre-release component.
// ```
// See: https://docs.rs/semver/latest/semver/struct.VersionReq.html#associatedconstant.STAR
// This leads to counter-intuitive behavior, where `2023.0.0-dev` does not fulfill the
// `>= 2022.0.0-dev` requirement.
if tested_version < required_version && tested_version != &local_engine_version() {
Err(UnsupportedEngineVersion {
required: required_version.clone(),
found: tested_version.clone(),
})
} else {
Ok(())
}
}
/// Check if the given Engine version meets the requirements for this build.
///
/// See [`check_engine_version_requirement`] for more details.
pub fn check_engine_version(
engine_version: &semver::Version,
) -> Result<(), UnsupportedEngineVersion> {
check_engine_version_requirement(&engine_version_required(), engine_version)
}
// ============
// === Args ===
// ============
json_to_struct!(
"../../../../lib/rust/ensogl/pack/js/src/runner/config.json",
"../../../../app/ide-desktop/lib/content-config/src/config.json"
);
pub fn read_args() -> Args {
debug_span!("Reading application arguments from JS.").in_scope(|| {
let mut args = Args::default();
if let Ok(js_app) = ensogl::system::js::app::app() {
for param in js_app.config().params() {
if let Some(value) = param.value() {
let path = format!("{}.value", param.structural_name());
if let Some(err) = args.set(&path, value) {
error!("{}", err.display())
}
}
}
} else {
error!("Could not connect to JS application. Using default configuration.")
}
args
})
}
lazy_static! {
pub static ref ARGS: Args = read_args();
}
// =============
// === Tests ===
// =============
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn check_that_version_requirement_parses() {
// We just expect that it won't panic.
engine_version_required();
}
#[test]
fn check_that_local_engine_version_constant_parses() {
// We just expect that it won't panic.
local_engine_version();
}
#[test]
fn new_project_engine_version_fills_requirements() {
// Sanity check: required version must be supported.
assert!(check_engine_version(&engine_version_required()).is_ok());
}
#[test]
fn newer_prerelease_matches() -> anyhow::Result<()> {
// Whatever version we have currently defined with `-dev` prerelease.
let current =
semver::Version { pre: semver::Prerelease::new("dev")?, ..engine_version_required() };
let newer = semver::Version { major: current.major + 1, ..current.clone() };
check_engine_version_requirement(&current, &newer)?;
Ok(())
}
}

View File

@ -1,8 +0,0 @@
[package]
name = "controller"
version = "0.1.0"
authors = ["Enso Team <contact@enso.org>"]
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]

View File

@ -1,26 +0,0 @@
[package]
name = "double-representation"
version = "0.1.0"
authors = ["Enso Team <contact@enso.org>"]
edition = "2021"
[lib]
crate-type = ["cdylib", "rlib"]
[dependencies]
ast = { path = "../../language/ast/impl" }
parser = { path = "../../language/parser" }
engine-protocol = { path = "../engine-protocol" }
enso-data-structures = { path = "../../../../lib/rust/data-structures" }
enso-prelude = { path = "../../../../lib/rust/prelude" }
enso-profiler = { path = "../../../../lib/rust/profiler" }
enso-text = { path = "../../../../lib/rust/text" }
const_format = { workspace = true }
failure = { workspace = true }
itertools = { workspace = true }
serde = { workspace = true }
uuid = { version = "0.8", features = ["serde", "v4", "wasm-bindgen"] }
[dev-dependencies]
regex = { workspace = true }
wasm-bindgen-test = { workspace = true }

View File

@ -1,454 +0,0 @@
//! Module with alias analysis — allows telling what identifiers are used and introduced by each
//! node in the graph.
use crate::prelude::*;
use crate::definition::DefinitionInfo;
use crate::definition::ScopeKind;
use ast::crumbs::Crumb;
use ast::crumbs::InfixCrumb;
use ast::crumbs::Located;
use ast::opr::match_named_argument;
use std::borrow::Borrow;
// ==============
// === Export ===
// ==============
#[cfg(test)]
pub mod test_utils;
// =======================
// === IdentifierUsage ===
// =======================
/// Description of how some node is interacting with the graph's scope.
#[derive(Clone, Debug, Default)]
pub struct IdentifierUsage {
/// Identifiers from the graph's scope that node is using.
pub introduced: Vec<Located<String>>,
/// Identifiers that node introduces into the parent scope.
pub used: Vec<Located<String>>,
}
impl IdentifierUsage {
/// Returns all identifiers that are either used from or introduced into the scope.
pub fn all_identifiers(&self) -> Vec<Located<String>> {
self.introduced.iter().chain(self.used.iter()).cloned().collect()
}
}
// ================
// === Analysis ===
// ================
// === Helper Datatypes ===
/// Says whether the identifier occurrence introduces it into scope or uses it from scope.
#[allow(missing_docs)]
#[derive(Clone, Copy, Debug, Display, PartialEq, Eq)]
pub enum OccurrenceKind {
Used,
Introduced,
}
/// If the current context in the AST processor is a pattern context.
// TODO [mwu] Refer to the specification once it is merged.
#[allow(missing_docs)]
#[derive(Clone, Copy, Debug, Display, PartialEq, Eq)]
pub enum Context {
NonPattern,
Pattern,
}
/// Represents scope and information about identifiers usage within it.
#[derive(Clone, Debug, Default)]
pub struct Scope {
#[allow(missing_docs)]
pub symbols: IdentifierUsage,
}
impl Scope {
/// Iterates over identifiers that are used in this scope but are not introduced in this scope
/// i.e. the identifiers that parent scope must provide.
pub fn used_from_parent(self) -> impl Iterator<Item = Located<String>> {
let available = self.symbols.introduced.into_iter().map(|located_name| located_name.item);
let available = available.collect::<HashSet<_>>();
let all_used = self.symbols.used.into_iter();
all_used.filter(move |name| !available.contains(&name.item))
}
/// Drops the information about nested child scope by:
/// 1) disregarding any usage of identifiers introduced in the child scope;
/// 2) propagating all non-shadowed identifier usage from this scope into this scope usage list.
fn coalesce_child(&mut self, child: Scope) {
let symbols_to_use = child.used_from_parent();
self.symbols.used.extend(symbols_to_use);
}
}
// === AliasAnalyzer ===
/// Traverser AST and analyzes identifier usage.
#[derive(Clone, Debug, Default)]
pub struct AliasAnalyzer {
/// Root scope for this analyzer.
pub root_scope: Scope,
/// Stack of scopes that shadow the root one.
shadowing_scopes: Vec<Scope>,
/// Stack of context. Lack of any context information is considered non-pattern context.
context: Vec<Context>,
/// Current location, relative to the input AST root.
location: Vec<Crumb>,
}
impl AliasAnalyzer {
/// Creates a new analyzer.
pub fn new() -> AliasAnalyzer {
AliasAnalyzer::default()
}
/// Adds items to the target vector, calls the callback `f` then removes the items.
fn with_items_added<T, R>(
&mut self,
vec: impl Fn(&mut Self) -> &mut Vec<T>,
items: impl IntoIterator<Item: Into<T>>,
f: impl FnOnce(&mut Self) -> R,
) -> R {
let original_count = vec(self).len();
vec(self).extend(items.into_iter().map(|item| item.into()));
let ret = f(self);
vec(self).truncate(original_count);
ret
}
/// Pushes a new scope, then runs a given `f` function. Once it finished, scope is removed and
/// its unshadowed variable usage propagated onto the current scope.
fn in_new_scope(&mut self, f: impl FnOnce(&mut Self)) {
let scope = Scope::default();
self.shadowing_scopes.push(scope);
f(self);
let scope = self.shadowing_scopes.pop().unwrap();
self.current_scope_mut().coalesce_child(scope);
}
/// Temporarily sets contest and invokes `f` within it.
fn in_context(&mut self, context: Context, f: impl FnOnce(&mut Self)) {
self.with_items_added(|this| &mut this.context, iter::once(context), f);
}
/// Enters a new location (relative to the current one), invokes `f`, leaves the location.
fn in_location<Cs, F, R>(&mut self, crumbs: Cs, f: F) -> R
where
Cs: IntoIterator<Item = Crumb>,
F: FnOnce(&mut Self) -> R, {
self.with_items_added(|this| &mut this.location, crumbs, f)
}
/// Enters a new location (relative to the current one), invokes `f`, leaves the location.
fn in_location_of<T, F, R>(&mut self, located_item: &Located<T>, f: F) -> R
where F: FnOnce(&mut Self) -> R {
self.in_location(located_item.crumbs.iter().cloned(), f)
}
/// Obtains a mutable reference to the current scope.
fn current_scope_mut(&mut self) -> &mut Scope {
self.shadowing_scopes.last_mut().unwrap_or(&mut self.root_scope)
}
/// Returns the current context kind. (pattern or not)
fn current_context(&self) -> Context {
self.context.last().copied().unwrap_or(Context::NonPattern)
}
/// Records identifier occurrence in the current scope.
fn record_identifier(&mut self, kind: OccurrenceKind, identifier: String) {
let identifier = Located::new(self.location.clone(), identifier);
let symbols = &mut self.current_scope_mut().symbols;
let target = match kind {
OccurrenceKind::Used => &mut symbols.used,
OccurrenceKind::Introduced => &mut symbols.introduced,
};
target.push(identifier)
}
/// Checks if we are currently in the pattern context.
fn is_in_pattern(&self) -> bool {
self.current_context() == Context::Pattern
}
/// If given AST is an identifier, records its occurrence.
/// Returns boolean saying if the identifier was recorded.
fn try_recording_identifier(&mut self, kind: OccurrenceKind, ast: &Ast) -> bool {
let name = ast::identifier::name(ast);
name.map(|name| self.record_identifier(kind, name.to_owned())).is_some()
}
/// If the given located AST-like entity is an identifier, records its occurrence.
fn store_if_name<'a, T>(&mut self, kind: OccurrenceKind, located: Located<T>) -> bool
where T: Into<&'a Ast> + 'a + Copy {
let ast = located.item.into();
self.in_location_of(&located, |this| this.try_recording_identifier(kind, ast))
}
/// Processes the given AST, while crumb is temporarily pushed to the current location.
fn process_subtree_at(&mut self, crumb: impl Into<Crumb>, subtree: &Ast) {
self.in_location(crumb.into(), |this| this.process_ast(subtree))
}
/// Processes the given AST, while crumb is temporarily pushed to the current location.
fn process_located_ast(&mut self, located_ast: &Located<impl Borrow<Ast>>) {
self.in_location_of(located_ast, |this| this.process_ast(located_ast.item.borrow()))
}
/// Processes subtrees of the given AST denoted by given crumbs
pub fn process_given_subtrees<C>(&mut self, ast: &C, crumbs: impl Iterator<Item = C::Crumb>)
where
C: Crumbable,
C::Crumb: Into<Crumb>, {
for crumb in crumbs {
// Failure should never happen but we don't really care enough to crash anything
// otherwise.
if let Ok(subtree) = ast.get(&crumb) {
self.process_subtree_at(crumb.into(), subtree)
}
}
}
/// Processes all subtrees of the given AST in their respective locations.
pub fn process_subtrees(&mut self, ast: &impl Crumbable) {
for (crumb, ast) in ast.enumerate() {
self.process_subtree_at(crumb, ast)
}
}
/// Processes the given AST, along with its subtree.
///
/// This is the primary function that is recursively being called as the AST is being traversed.
pub fn process_ast(&mut self, ast: &Ast) {
if let Some(definition) = DefinitionInfo::from_line_ast(ast, ScopeKind::NonRoot, default())
{
self.process_definition(&definition)
} else if let Some(assignment) = ast::opr::to_assignment(ast) {
self.process_assignment(&assignment);
} else if let Some(lambda) = ast::macros::as_lambda(ast) {
self.process_lambda(&lambda);
} else if self.is_in_pattern() {
// We are in the pattern (be it a lambda's or assignment's left side). Three options:
// 1) This is a destructuring pattern match using infix syntax, like `head,tail`.
// 2) This is a destructuring pattern match with prefix syntax, like `Point x y`.
// 3) This is a single AST node, like `foo` or `Foo`.
// (the possibility of definition has been already excluded)
if let Some(infix_chain) = ast::opr::Chain::try_new(ast) {
// Infix always acts as pattern-match in left-side.
for operand in infix_chain.enumerate_non_empty_operands() {
self.process_located_ast(&operand.map(|operand| &operand.arg))
}
for operator in infix_chain.enumerate_operators() {
// Operators in infix positions are treated as constructors, i.e. they are used.
self.store_if_name(OccurrenceKind::Used, operator);
}
} else if let Some(prefix_chain) = ast::prefix::Chain::from_ast(ast) {
// Constructor we match against is used. Its arguments introduce names.
if ast::known::Cons::try_from(&prefix_chain.func).is_ok() {
self.store_if_name(OccurrenceKind::Used, prefix_chain.located_func());
}
// Arguments introduce names, we ignore function name.
// Arguments will just introduce names in pattern context.
for argument in prefix_chain.enumerate_args() {
self.process_located_ast(&argument)
}
} else {
// Single AST node on the assignment LHS. Deal with identifiers, otherwise
// recursively process subtrees.
match ast.shape() {
ast::Shape::Cons(_) => {
self.try_recording_identifier(OccurrenceKind::Used, ast);
}
ast::Shape::Var(_) => {
self.try_recording_identifier(OccurrenceKind::Introduced, ast);
}
_ => {
self.process_subtrees(ast);
}
}
}
} else {
// Non-pattern context.
if ast::known::Block::try_from(ast).is_ok() {
self.in_new_scope(|this| this.process_subtrees(ast))
} else if self.try_recording_identifier(OccurrenceKind::Used, ast) {
// Plain identifier: we just added as the condition side-effect.
// No need to do anything more.
} else if let Some(prefix_chain) = ast::prefix::Chain::from_ast(ast) {
self.process_located_ast(&prefix_chain.located_func());
for argument in prefix_chain.enumerate_args() {
// Ignore the assignment used for named arguments. Descend directly into the
// argument value.
if let Some(named) = match_named_argument(argument.item) {
let rhs = argument.descendant(InfixCrumb::RightOperand, named.rarg);
self.process_located_ast(&rhs)
} else {
self.process_located_ast(&argument)
}
}
} else {
self.process_subtrees(ast);
}
}
}
fn process_definition(&mut self, definition: &DefinitionInfo) {
// Handle the definition name.
self.in_location(definition.name.crumbs.clone(), |this|
// We take the base name (ignoring extension components) and mark it as introduced.
this.in_location(definition.name.name.crumbs.clone(), |this| {
let name = definition.name.name.item.clone();
this.record_identifier(OccurrenceKind::Introduced,name);
}));
// The scoping for definitions is not entirely clean (should each argument introduce a new
// subscope?) but we do not really care that much. Mostly we are just interested in knowing
// what identifiers are taken in / introduced into the parent scope.
// What happens in the definition body, stays in the definition body.
self.in_new_scope(|this| {
// Args are just patterns.
this.in_context(Context::Pattern, |this| {
for arg in &definition.args {
this.process_located_ast(arg)
}
});
this.process_located_ast(&definition.body());
});
}
/// Processes the assignment AST node. Left side is pattern, right side is business as usual.
fn process_assignment(&mut self, assignment: &ast::known::Infix) {
self.in_context(Context::Pattern, |this| {
this.process_subtree_at(InfixCrumb::LeftOperand, &assignment.larg)
});
self.process_subtree_at(InfixCrumb::RightOperand, &assignment.rarg);
}
/// Processes the matched lambda macro. Argument is in pattern context, and the whole lambda is
/// a new scope.
fn process_lambda(&mut self, lambda: &ast::macros::LambdaInfo) {
self.in_new_scope(|this| {
this.in_context(Context::Pattern, |this| this.process_located_ast(&lambda.arg));
this.process_located_ast(&lambda.body)
})
}
}
/// Describes identifiers that code represented by AST introduces into the graph and identifiers
/// from graph's scope that code uses. This logic serves as a base for connection discovery,
/// where ASTs are typically the node's ASTs.
pub fn analyze_ast(ast: &Ast) -> IdentifierUsage {
let mut analyzer = AliasAnalyzer::new();
analyzer.process_ast(ast);
analyzer.root_scope.symbols
}
/// Describes variable usage within a given Ast-like crumbable entity.
pub fn analyze_crumbable(crumbable: &impl Crumbable) -> IdentifierUsage {
let mut analyzer = AliasAnalyzer::default();
analyzer.process_subtrees(crumbable);
analyzer.root_scope.symbols
}
// =============
// === Tests ===
// =============
#[cfg(test)]
mod tests {
use super::test_utils::*;
use super::*;
/// Checks if actual observed sequence of located identifiers matches the expected one.
/// Expected identifiers are described as code spans in the node's text representation.
fn validate_identifiers(
name: impl Str,
ast: &Ast,
expected: Vec<Range<usize>>,
actual: &[Located<String>],
) {
let mut checker = IdentifierValidator::new(name, ast, expected);
checker.validate_identifiers(actual);
}
/// Runs the test for the given test case description.
fn run_case(parser: &parser::Parser, case: Case) {
debug!("\n===========================================================================\n");
debug!("Case: {}", case.code);
let ast = parser.parse_line_ast(&case.code).unwrap();
let result = analyze_ast(&ast);
debug!("Analysis results: {result:?}");
validate_identifiers("introduced", &ast, case.expected_introduced, &result.introduced);
validate_identifiers("used", &ast, case.expected_used, &result.used);
}
/// Runs the test for the test case expressed using markdown notation. See `Case` for details.
fn run_markdown_case(parser: &parser::Parser, marked_code: impl AsRef<str>) {
debug!("Running test case for {}", marked_code.as_ref());
let case = Case::from_markdown(marked_code.as_ref());
run_case(parser, case)
}
#[test]
fn test_alias_analysis() {
let parser = parser::Parser::new();
let test_cases = [
"»foo«",
"«five» = 5",
"»Five« = 5",
"«foo» = »bar«",
"«foo» = »foo« »+« »bar«",
"«foo» = »Bar«",
"5 = »Bar«",
"«sum» = »a« »+« »b«",
"»Point« «x» «u» = »point«",
"«x» »,« «y» = »pair«",
r"«inc» =
»foo« »+« 1",
r"«inc» =
foo = 2
foo »+« 1",
// Below should know that "foo + 1" does not uses "foo" from scope.
// That requires at least partial support for definitions.
r"«inc» =
foo x = 2
foo »+« 1",
// === Macros Match ===
"a -> a",
"a -> »b«",
"»A« -> »b«",
"a -> »A« -> a",
"a -> a -> »A«",
"(»foo«)",
"(«foo») = (»bar«)",
"if »A« then »B«",
"if »a« then »b« else »c«",
"(»foo«",
// === Definition ===
"«foo» a b c = »foo« a »d«",
"«foo» a b c = d -> a d",
"«foo» a (»Point« x y) c = »foo« a x »d«",
];
for case in &test_cases {
run_markdown_case(&parser, case)
}
}
}

View File

@ -1,205 +0,0 @@
//! Utilities to facilitate testing of alias-analysis-related code.
use crate::prelude::*;
use crate::test_utils::MarkdownProcessor;
use ast::crumbs::Located;
use regex::Captures;
use regex::Regex;
use regex::Replacer;
// ============
// === Case ===
// ============
/// Test case for testing identifier resolution for nodes.
/// Can be expressed using markdown notation, see `from_markdown` method.
#[derive(Clone, Debug, Default)]
pub struct Case {
/// The code: the text of the block line that is considered to be a node of a graph.
/// Any markers are already removed.
pub code: String,
/// List of spans in the code where the identifiers are introduced into the graph's scope.
pub expected_introduced: Vec<Range<usize>>,
/// List of spans in the code where the identifiers from the graph's scope are used.
pub expected_used: Vec<Range<usize>>,
}
impl Case {
/// Constructs a test case using a markdown. Input should be text representation of the node's
/// AST in which all identifiers introduced into the graph's scope are marked like `«foo»`, and
/// all identifiers used from graph's scope are marked like `»sum«`.
pub fn from_markdown(marked_code: impl Str) -> Case {
// Regexp that matches either «sth» or »sth« into a group named `introduced` or `used`,
// respectively. See: https://regex101.com/r/pboF8O/2 for detailed explanation.
let regex = format!(r"«(?P<{INTRODUCED}>[^»]*)»|»(?P<{USED}>[^«]*)«");
// As this is test utils, we don't try nicely handling failure nor reusing the compiled
// regexp between calls to save some cycles.
let regex = Regex::new(&regex).unwrap();
let mut replacer = MarkdownReplacer::default();
let code = regex.replace_all(marked_code.as_ref(), replacer.by_ref()).into();
Case { code, expected_introduced: replacer.introduced, expected_used: replacer.used }
}
}
// ========================
// === MarkdownReplacer ===
// ========================
/// We want to recognize two kinds of marked identifiers: ones introduced into the graph's scope and
/// ones used from the graph's scope.
#[derive(Clone, Copy, Debug, Display)]
pub enum Kind {
Introduced,
Used,
}
/// Name of the pattern group matching introduced identifier.
const INTRODUCED: &str = "introduced";
/// Name of the pattern group matching used identifier.
const USED: &str = "used";
/// Replacer that is called with each marked token. Does the following:
/// * removes the markdown, i.e. replaces `»foo«` with `foo`;
/// * counts removed markdown bytes, so it is possible to translate between indices in marked and
/// unmarked code;
/// * accumulates spans of introduced and used identifiers.
#[derive(Debug, Default)]
struct MarkdownReplacer {
processor: MarkdownProcessor,
/// Spans in the unmarked code.
introduced: Vec<Range<usize>>,
/// Spans in the unmarked code.
used: Vec<Range<usize>>,
}
// Processes every single match for a marked entity.
impl Replacer for MarkdownReplacer {
fn replace_append(&mut self, captures: &Captures, dst: &mut String) {
let (kind, matched) = if let Some(introduced) = captures.name(INTRODUCED) {
(Kind::Introduced, introduced)
} else if let Some(used) = captures.name(USED) {
(Kind::Used, used)
} else {
panic!("Unexpected capture: expected named capture `{INTRODUCED}` or `{USED}`.")
};
let span = self.processor.process_match(captures, &matched, dst);
match kind {
Kind::Introduced => self.introduced.push(span),
Kind::Used => self.used.push(span),
};
}
}
// ===========================
// === IdentifierValidator ===
// ===========================
#[derive(Clone, Copy, Debug, Display, PartialEq)]
enum HasBeenValidated {
No,
Yes,
}
/// Helper test structure that requires that each given identifier is validated at least once.
/// Otherwise, it shall panic when dropped.
#[derive(Clone, Debug)]
pub struct IdentifierValidator<'a> {
ast: &'a Ast,
name: String,
validations: HashMap<String, HasBeenValidated>,
}
impl<'a> IdentifierValidator<'a> {
/// Creates a new checker, with identifier set obtained from given node's representation
/// spans.
pub fn new(name: impl Str, ast: &Ast, spans: Vec<Range<usize>>) -> IdentifierValidator {
let name = name.into();
let repr = ast.repr();
let mut validations = HashMap::default();
for span in spans {
let name = repr[span].to_owned();
validations.insert(name, HasBeenValidated::No);
}
IdentifierValidator { ast, name, validations }
}
/// Marks given identifier as checked.
pub fn validate_identifier(&mut self, name: &str) {
let err = format!("{}: unexpected identifier `{name}` validated", self.name);
let used = self.validations.get_mut(name).expect(&err);
*used = HasBeenValidated::Yes;
}
/// Marks given sequence of identifiers as checked.
pub fn validate_identifiers(
&mut self,
identifiers: impl IntoIterator<Item = &'a Located<String>>,
) {
for identifier in identifiers {
self.validate_identifier(&identifier.item);
let crumbs = &identifier.crumbs;
let ast_result = self.ast.get_traversing(crumbs);
let ast = ast_result.expect("failed to retrieve ast from crumb");
let name_err = || panic!("Failed to use AST {} as an identifier name", ast.repr());
let name = ast::identifier::name(ast).unwrap_or_else(name_err);
assert_eq!(name, identifier.item)
}
}
}
/// Panics if there are remaining identifiers that were not checked.
impl<'a> Drop for IdentifierValidator<'a> {
fn drop(&mut self) {
if !std::thread::panicking() {
for elem in &self.validations {
assert_eq!(
elem.1,
&HasBeenValidated::Yes,
"{}: identifier `{}` was not validated)",
self.name,
elem.0
)
}
} else {
debug!("Skipping identifier validation, because thread is already in panic.");
}
}
}
// =============
// === Tests ===
// =============
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parsing_markdown_to_test_case() {
let code = "«sum» = »a« + »b«";
let case = Case::from_markdown(code);
assert_eq!(case.code, "sum = a + b");
assert_eq!(case.expected_introduced.len(), 1);
assert_eq!(case.expected_introduced[0], 0..3);
assert_eq!(&case.code[case.expected_introduced[0].clone()], "sum");
assert_eq!(case.expected_used.len(), 2);
assert_eq!(case.expected_used[0], 6..7); // these are utf-8 byte indices
assert_eq!(&case.code[case.expected_used[0].clone()], "a");
assert_eq!(case.expected_used[1], 10..11);
assert_eq!(&case.code[case.expected_used[1].clone()], "b");
}
}

View File

@ -1,266 +0,0 @@
//! Code related to connection discovery and operations.
use crate::prelude::*;
use crate::alias_analysis::analyze_crumbable;
use crate::definition::DefinitionInfo;
use crate::definition::ScopeKind;
use crate::node::Id;
use crate::node::MainLine;
use ast::crumbs::Crumb;
use ast::crumbs::Crumbs;
use ast::crumbs::Located;
// ================
// === Endpoint ===
// ================
/// A connection endpoint.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Endpoint {
/// Id of the node where the endpoint is located.
pub node: Id,
/// The AST ID and location of a port to which this endpoint is connected. The location is
/// relative to the entire node's AST, including both its expression and assignment pattern.
pub port: Located<Id>,
}
impl Endpoint {
/// First crumb identifies line in a given block, i.e. the node. Remaining crumbs identify
/// AST within the node's AST.
///
/// Returns None if first crumb is not present or does not denote a valid node.
fn new_in_block(block: &ast::Block<Ast>, mut crumbs: Crumbs) -> Option<Endpoint> {
let Some(Crumb::Block(line_crumb)) = crumbs.pop_front() else { return None };
let line_ast = block.get(&line_crumb).ok()?;
let definition = DefinitionInfo::from_line_ast(line_ast, ScopeKind::NonRoot, block.indent);
let is_non_def = definition.is_none();
let node = is_non_def.and_option_from(|| MainLine::from_ast(line_ast))?.id();
let port_id = line_ast.get_traversing(&crumbs).ok()?.id?;
Some(Endpoint { node, port: Located::new(crumbs, port_id) })
}
}
// ==================
// === Connection ===
// ==================
/// Describes a connection between two endpoints: from `source` to `target`.
#[allow(missing_docs)]
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Connection {
pub source: Endpoint,
pub target: Endpoint,
}
/// Lists all the connection in the graph for the given code block.
pub fn list_block(block: &ast::Block<Ast>) -> Vec<Connection> {
let identifiers = analyze_crumbable(block);
let introduced = identifiers.introduced.into_iter();
let used = identifiers.used.into_iter();
let introduced_names: HashMap<String, Endpoint> = introduced
.flat_map(|ident| Some((ident.item, Endpoint::new_in_block(block, ident.crumbs)?)))
.collect();
used.flat_map(|ident| {
// If name is both introduced and used in the graph's scope; and both of these
// occurrences can be represented as endpoints, then we have a connection.
let source = introduced_names.get(&ident.item)?.clone();
let target = Endpoint::new_in_block(block, ident.crumbs)?;
Some(Connection { source, target })
})
.collect()
}
/// Lists all the connection in the single-expression definition body.
pub fn list_expression(_ast: &Ast) -> Vec<Connection> {
// At this points single-expression graphs do not have any connection.
// This will change when there will be input/output pseudo-nodes.
vec![]
}
/// Lists connections in the given definition body. For now it only makes sense for block shape.
pub fn list(body: &Ast) -> Vec<Connection> {
match body.shape() {
ast::Shape::Block(block) => list_block(block),
_ => list_expression(body),
}
}
// ============================
// === Connections Analysis ===
// ============================
/// A function grouping a set of connections by their source node.
pub fn group_by_source_node(
connections: impl IntoIterator<Item = Connection>,
) -> HashMap<Id, Vec<Connection>> {
let mut result = HashMap::<Id, Vec<Connection>>::new();
for connection in connections {
result.entry(connection.source.node).or_default().push(connection)
}
result
}
/// Returns a set of nodes dependent of the given node in the block.
///
/// A node _A_ is dependent on node _B_ if its input is connected to _B_'s output, or to at least
/// one node dependent on _B_.
pub fn dependent_nodes_in_def(body: &Ast, node: Id) -> HashSet<Id> {
let connections = list(body);
let node_out_connections = group_by_source_node(connections);
let mut result = HashSet::new();
let mut to_visit = vec![node];
while let Some(current_node) = to_visit.pop() {
let opt_out_connections = node_out_connections.get(&current_node);
let out_connections = opt_out_connections.iter().flat_map(|v| v.iter());
let out_nodes = out_connections.map(|c| c.target.node);
let new_nodes_in_result = out_nodes.filter(|n| result.insert(*n));
for node in new_nodes_in_result {
to_visit.push(node)
}
}
result
}
// =============
// === Tests ===
// =============
#[cfg(test)]
mod tests {
use super::*;
use crate::definition::DefinitionInfo;
use crate::graph::GraphInfo;
use ast::crumbs;
use ast::crumbs::InfixCrumb;
use parser::Parser;
struct TestRun {
graph: GraphInfo,
connections: Vec<Connection>,
}
impl TestRun {
fn from_definition(definition: DefinitionInfo) -> TestRun {
let graph = GraphInfo::from_definition(definition);
let repr_of = |connection: &Connection| {
let endpoint = &connection.source;
let node = graph.find_node(endpoint.node).unwrap();
let ast = node.ast().get_traversing(&endpoint.port.crumbs).unwrap();
ast.repr()
};
let mut connections = graph.connections();
connections.sort_by_key(|con| repr_of(con));
TestRun { graph, connections }
}
fn from_main_def(code: impl Str) -> TestRun {
let parser = Parser::new();
let module = parser.parse_module(code, default()).unwrap();
let definition = DefinitionInfo::from_root_line(&module.lines[0]).unwrap();
Self::from_definition(definition)
}
fn from_block(code: impl Str) -> TestRun {
let body = code.as_ref().lines().map(|line| format!(" {}", line.trim())).join("\n");
let definition_code = format!("main =\n{body}");
Self::from_main_def(definition_code)
}
fn endpoint_node_repr(&self, endpoint: &Endpoint) -> String {
self.graph.find_node(endpoint.node).unwrap().ast().clone().repr()
}
}
#[test]
pub fn connection_listing_test_plain() {
use InfixCrumb::LeftOperand;
use InfixCrumb::RightOperand;
let code_block = r"
d = p
a = d
b = d
c = a + b
fun a = a b
f = fun 2";
let run = TestRun::from_block(code_block);
let c = &run.connections[0];
assert_eq!(run.endpoint_node_repr(&c.source), "a = d");
assert_eq!(&c.source.port.crumbs, &crumbs![LeftOperand]);
assert_eq!(run.endpoint_node_repr(&c.target), "c = a + b");
assert_eq!(&c.target.port.crumbs, &crumbs![RightOperand, LeftOperand]);
let c = &run.connections[1];
assert_eq!(run.endpoint_node_repr(&c.source), "b = d");
assert_eq!(&c.source.port.crumbs, &crumbs![LeftOperand]);
assert_eq!(run.endpoint_node_repr(&c.target), "c = a + b");
assert_eq!(&c.target.port.crumbs, &crumbs![RightOperand, RightOperand]);
let c = &run.connections[2];
assert_eq!(run.endpoint_node_repr(&c.source), "d = p");
assert_eq!(&c.source.port.crumbs, &crumbs![LeftOperand]);
assert_eq!(run.endpoint_node_repr(&c.target), "a = d");
assert_eq!(&c.target.port.crumbs, &crumbs![RightOperand]);
let c = &run.connections[3];
assert_eq!(run.endpoint_node_repr(&c.source), "d = p");
assert_eq!(&c.source.port.crumbs, &crumbs![LeftOperand]);
assert_eq!(run.endpoint_node_repr(&c.target), "b = d");
assert_eq!(&c.target.port.crumbs, &crumbs![RightOperand]);
// Note that line `fun a = a b` des not introduce any connections, as it is a definition.
assert_eq!(run.connections.len(), 4);
}
#[test]
pub fn inline_definition() {
let run = TestRun::from_main_def("main = a");
assert!(run.connections.is_empty());
}
#[test]
pub fn listing_dependent_nodes() {
let code_block = "\
f,g = p
a = f
b = g
c = 2
d = a + b
e = b";
let mut expected_dependent_nodes = HashMap::<&'static str, Vec<&'static str>>::new();
expected_dependent_nodes.insert("a = f", vec!["d = a + b"]);
expected_dependent_nodes.insert("b = g", vec!["d = a + b", "e = b"]);
expected_dependent_nodes.insert("c = 2", vec![]);
expected_dependent_nodes.insert("d = a + b", vec![]);
expected_dependent_nodes.insert("e = b", vec![]);
let TestRun { graph, .. } = TestRun::from_block(code_block);
let nodes = graph.nodes();
assert_eq!(nodes.len(), expected_dependent_nodes.len());
for node in nodes {
let node_repr = node.ast().repr();
let expected = expected_dependent_nodes.get(node_repr.as_str()).unwrap();
let result = dependent_nodes_in_def(graph.source.body().item, node.id());
let result_node = result.iter().map(|id| graph.find_node(*id).unwrap());
let mut result_repr = result_node.map(|n| n.ast().repr()).collect_vec();
result_repr.sort();
assert_eq!(result_repr, *expected);
}
}
}

View File

@ -1,245 +0,0 @@
//! A module containing utilities for managing execution context switch expressions.
//!
//! The context switch expressions are used to enable or disable a certain context for a single
//! node.
//!
//! For example, the following code:
//! ```ignore
//! operator11 = Runtime.with_enabled_context Context.Output environment="design" <|
//! operator10.write "C:\Temp\Test.xlsx"
//! ```
//! will enable the [`Context::Output`] context for the `operator11` node in `design` environment.
use crate::prelude::*;
use crate::name::QualifiedName;
use crate::name::QualifiedNameRef;
use ast::known;
use ast::opr::qualified_name_chain;
// =================
// === Constants ===
// =================
/// FQN for the [`Context::Output`].
const OUTPUT: [&str; 5] = ["Standard", "Base", "Runtime", "Context", "Output"];
/// FQN for the `Runtime.with_enabled_context`, which corresponds to [`ContextSwitch::Enable`].
const ENABLE: [&str; 4] = ["Standard", "Base", "Runtime", "with_enabled_context"];
/// FQN for the `Runtime.with_disabled_context`, which corresponds to [`ContextSwitch::Disable`].
const DISABLE: [&str; 4] = ["Standard", "Base", "Runtime", "with_disabled_context"];
// ===============================
// === ContextSwitchExpression ===
// ===============================
// === ContextSwitch
/// The two possible context switches.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[allow(missing_docs)]
pub enum ContextSwitch {
Enable,
Disable,
}
impl<'a> TryFrom<QualifiedNameRef<'a>> for ContextSwitch {
type Error = anyhow::Error;
fn try_from(qualified_name: QualifiedNameRef<'a>) -> Result<Self, Self::Error> {
// Unwraps are safe, because this function is tested.
let enable_name = QualifiedName::from_all_segments(ENABLE).unwrap();
let disable_name = QualifiedName::from_all_segments(DISABLE).unwrap();
if qualified_name == enable_name {
Ok(ContextSwitch::Enable)
} else if qualified_name == disable_name {
Ok(ContextSwitch::Disable)
} else {
Err(anyhow::anyhow!("Invalid context switch name: {:?}.", qualified_name))
}
}
}
// === Context ===
/// Available execution contexts.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[allow(missing_docs)]
pub enum Context {
Output,
}
impl<'a> TryFrom<QualifiedNameRef<'a>> for Context {
type Error = anyhow::Error;
fn try_from(qualified_name: QualifiedNameRef<'a>) -> Result<Self, Self::Error> {
// Unwrap is safe, because this function is tested.
let output_name = QualifiedName::from_all_segments(OUTPUT).unwrap();
if qualified_name == output_name {
Ok(Context::Output)
} else {
Err(anyhow::anyhow!("Invalid context name: {:?}.", qualified_name))
}
}
}
// === Environment ===
im_string_newtype! {
/// The name of the execution environment.
Environment
}
// === ContextSwitchExpression ===
/// A representation of a single context switch expression.
#[derive(Debug, Clone, PartialEq)]
#[allow(missing_docs)]
pub struct ContextSwitchExpression {
pub switch: ContextSwitch,
pub context: Context,
pub environment: Environment,
}
impl ContextSwitchExpression {
/// Parse the context switch expression from the given AST.
pub fn parse(ast: &Ast) -> Option<Self> {
let infix = known::Infix::try_new(ast.clone()).ok()?;
if ast::opr::is_right_assoc_opr(&infix.opr) {
let prefix = ast::prefix::Chain::from_ast(&infix.larg)?;
let context_switch = QualifiedName::try_from(&prefix.func).ok()?;
let switch = ContextSwitch::try_from(context_switch.as_ref()).ok()?;
if let [context, environment] = &prefix.args[..] {
let context_name = QualifiedName::try_from(&context.sast.wrapped).ok()?;
let context = Context::try_from(context_name.as_ref()).ok()?;
let environment = environment.sast.wrapped.clone();
let environment = known::Tree::try_from(environment).ok();
let environment = environment.map(|t| t.as_text().map(Into::into));
let environment = environment.flatten()?;
Some(ContextSwitchExpression { switch, context, environment })
} else {
None
}
} else {
None
}
}
/// Convert the context switch expression to an AST.
pub fn to_ast(&self) -> Ast {
let func = match self.switch {
ContextSwitch::Enable => ENABLE,
ContextSwitch::Disable => DISABLE,
};
let context = match self.context {
Context::Output => OUTPUT,
};
let func = qualified_name_chain(func.into_iter()).unwrap().into_ast();
let context = qualified_name_chain(context.into_iter()).unwrap().into_ast();
let environment = format!("\"{}\"", self.environment.deref());
let environment: Ast = ast::Tree::text(environment).into();
let args = vec![context, environment];
ast::prefix::Chain::new(func, args).into_ast()
}
/// Remove the context switch expression from the given AST. The unmodified `ast` is returned
/// if it does not contain any context switch expression.
pub fn without_expression(ast: &Ast) -> Ast {
if ContextSwitchExpression::parse(ast).is_some() {
let crumb = ast::crumbs::InfixCrumb::RightOperand.into();
let rarg = ast.get(&crumb).unwrap_or(ast);
rarg.clone()
} else {
ast.clone()
}
}
}
// =============
// === Tests ===
// =============
#[cfg(test)]
mod tests {
use super::*;
use parser::Parser;
#[test]
fn test_recognizing_execution_context_switch() {
#[derive(Debug)]
struct Case {
input: &'static str,
expected: Option<ContextSwitchExpression>,
}
#[rustfmt::skip]
let cases = vec![
Case {
input: "foo",
expected: None,
},
Case {
input: "foo <| bar",
expected: None,
},
Case {
input: "foo <| bar <| baz",
expected: None,
},
Case {
input: "Runtime.with_enabled_context blabla <| bar",
expected: None,
},
Case {
input: "Runtime.with_enabled_context Context.Output \"context\" <| bar",
expected: None,
},
Case {
input: "Standard.Base.Runtime.with_enabled_context Context.Output \"context\" <| bar",
expected: None,
},
Case {
input: "Standard.Base.Runtime.with_enabled_context Standard.Base.Runtime.Context.Output \"context\" <| bar",
expected: Some(ContextSwitchExpression {
switch: ContextSwitch::Enable,
context: Context::Output,
environment: "context".into(),
}),
},
Case {
input: "Standard.Base.Runtime.with_disabled_context Standard.Base.Runtime.Context.Output \"context_name\" <| bar",
expected: Some(ContextSwitchExpression {
switch: ContextSwitch::Disable,
context: Context::Output,
environment: "context_name".into(),
}),
},
Case {
input: "Standard.Base.Runtime.with_disabled_context Standard.Base.Runtime.Context.Output \"context_name\" <| bar <| baz",
expected: Some(ContextSwitchExpression {
switch: ContextSwitch::Disable,
context: Context::Output,
environment: "context_name".into(),
}),
},
];
let parser = Parser::new();
for case in cases.iter() {
let ast = parser.parse_line_ast(case.input).unwrap();
let result = ContextSwitchExpression::parse(&ast);
assert_eq!(result, case.expected, "{case:?}");
}
}
}

View File

@ -1,814 +0,0 @@
//! Code for definition discovery in the blocks, finding definition by name and related utilities.
use crate::prelude::*;
use crate::LineKind;
use crate::INDENT;
use ast::crumbs::ChildAst;
use ast::crumbs::Crumbable;
use ast::crumbs::InfixCrumb;
use ast::crumbs::Located;
use ast::known;
use ast::opr;
use parser::Parser;
use std::iter::FusedIterator;
// =====================
// === Definition Id ===
// =====================
#[allow(missing_docs)]
#[derive(Copy, Fail, Clone, Debug)]
#[fail(display = "Encountered an empty definition ID. It must contain at least one crumb.")]
pub struct EmptyDefinitionId;
/// Crumb describes step that needs to be done when going from context (for graph being a module)
/// to the target.
// TODO [mwu]
// Currently we support only entering named definitions.
pub type Crumb = DefinitionName;
/// Identifies graph in the module.
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub struct Id {
/// Sequence of traverses from module root up to the identified graph.
pub crumbs: Vec<Crumb>,
}
impl Id {
/// Creates a new graph identifier consisting of a single crumb.
pub fn new_single_crumb(crumb: DefinitionName) -> Id {
let crumbs = vec![crumb];
Id { crumbs }
}
/// Creates a new identifier with a single plain name.
pub fn new_plain_name(name: impl Str) -> Id {
Self::new_plain_names(std::iter::once(name.into()))
}
/// Creates a new identifier from a sequence of plain definition names.
pub fn new_plain_names<S>(names: impl IntoIterator<Item = S>) -> Id
where S: ToString {
let crumbs =
names.into_iter().map(|name| DefinitionName::new_plain(name.to_string())).collect_vec();
Id { crumbs }
}
}
impl Display for Id {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut iter = self.crumbs.iter();
if let Some(crumb) = iter.next() {
write!(f, "{crumb}")?
}
for crumb in iter {
write!(f, "⮚{crumb}")?
}
Ok(())
}
}
// =============
// === Error ===
// =============
#[derive(Fail, Debug)]
#[fail(
display = "Cannot set Block lines because no line with Some(Ast) was found. Block must have \
at least one non-empty line."
)]
struct MissingLineWithAst;
#[allow(missing_docs)]
#[derive(Fail, Clone, Debug)]
#[fail(display = "Cannot find definition child with id {:?}.", _0)]
pub struct CannotFindChild(Crumb);
// =================
// === ScopeKind ===
// =================
/// Describes the kind of code block (scope) to which definition can belong.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ScopeKind {
/// Module scope is a file's top-level block.
Root,
/// Any other block, e.g. introduced as body of some definition binding.
NonRoot,
}
// ======================
// === DefinitionName ===
// ======================
/// Structure representing definition name. If this is an extension method, extended type is
/// also included.
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub struct DefinitionName {
/// Name of the function itself.
pub name: Located<String>,
/// Used when definition is an extension method. Then it stores the segments
/// of the extended target type path.
pub extended_target: Vec<Located<String>>,
}
impl DefinitionName {
/// Creates a new name consisting of a single unqualified identifier (not an explicit extension
/// method).
pub fn new_plain(name: impl Into<String>) -> DefinitionName {
let name = Located::new_root(name.into());
DefinitionName { name, extended_target: default() }
}
/// Creates a new explicit extension method name.
pub fn new_method(extended_atom: impl Str, name: impl Str) -> DefinitionName {
let extended_atom = Located::new(InfixCrumb::LeftOperand, extended_atom.into());
let name = Located::new(InfixCrumb::RightOperand, name.into());
let extended_target = vec![extended_atom];
DefinitionName { name, extended_target }
}
/// Tries describing given Ast piece as a definition name. Typically, passed Ast
/// should be the binding's left-hand side.
///
/// Returns `None` if is not name-like entity.
pub fn from_ast(ast: &Ast) -> Option<DefinitionName> {
let accessor_chain = opr::as_access_chain(ast);
let (extended_target, name) = match accessor_chain {
Some(accessor_chain) => {
// Not really clear how the incomplete names should be supported. For now we just
// reject them. When use-cases appear, this check might need to be relaxed.
if !accessor_chain.all_operands_set() {
return None;
}
let mut pieces = Vec::new();
for piece in accessor_chain.enumerate_non_empty_operands() {
let name = ast::identifier::name(&piece.item.arg)?.to_owned();
pieces.push(piece.map(|_| name));
}
let name = pieces.pop()?;
(pieces, name)
}
None => {
let name = match ast.shape() {
ast::Shape::Var(var) => Some(var.name.as_str()),
ast::Shape::Opr(opr) => Some(opr.name.as_str()),
ast::Shape::SectionSides(sides) => ast::identifier::name(&sides.opr),
// Shape::Cons is intentionally omitted.
// It serves to pattern-match, not as definition name.
_ => None,
}?;
let name = Located::new_root(name.to_owned());
(Vec::new(), name)
}
};
Some(DefinitionName { name, extended_target })
}
/// Iterate over name segments of this name, left to right.
pub fn name_segments(&self) -> impl Iterator<Item = &str> {
let path = self.extended_target.iter().map(|segment| segment.as_str());
let last = std::iter::once(self.name.as_str());
path.chain(last)
}
/// Get AST of this name.
pub fn ast(&self, parser: &Parser) -> FallibleResult<Ast> {
// We can't assume that string pieces we have are valid identifiers.
// But neither this is our responsibility. If it parses, we can store it in the Ast.
parser.parse_line_ast(self.to_string())
}
/// Checks if the given definition name is a method defined on given expected atom name.
///
/// E.g. `Main.foo` is a method of `Main`. Also, if `Main` is a name of module, than root-scoped
/// definition named `foo` will be treated as an extension method on `Main`. To handle such case
/// properly, function takes `parent_name` - it is a name of module or type where the
/// definition is located.
pub fn method_of(&self, parent_name: &str, expected_atom: &str) -> bool {
if self.extended_target.is_empty() {
parent_name == expected_atom
} else {
self.explicitly_extends_type(expected_atom)
}
}
/// Check if this name is an explicit extension method for given atom.
///
/// For example `Int.add` is an extension method for `Int`, whereas plain name `add` is not.
pub fn explicitly_extends_type(&self, expected_typename: &str) -> bool {
let expected_segments = ast::opr::name_segments(expected_typename);
let segments = &self.extended_target;
expected_segments.eq_by(segments, |lhs, rhs| lhs == rhs.item.as_str())
}
}
impl Display for DefinitionName {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let text = self.name_segments().join(ast::opr::predefined::ACCESS);
write!(f, "{text}")
}
}
// ======================
// === DefinitionInfo ===
// ======================
/// Information about definition binding.
#[derive(Clone, Debug)]
pub struct DefinitionInfo {
/// The whole definition. It is an Infix shape with `=` operator. Its left-hand side is
/// an App.
pub ast: known::Infix,
/// Name of this definition. Includes typename, if this is an extension method.
pub name: Located<DefinitionName>,
/// Arguments for this definition. Does not include any implicit ones (e.g. no `this`).
pub args: Vec<Located<Ast>>,
/// The absolute indentation of the code block that introduced this definition.
pub context_indent: usize,
}
impl DefinitionInfo {
/// Returns the definition body, i.e. Ast standing on the assignment's right-hand side.
pub fn body(&self) -> Located<&Ast> {
Located::new(InfixCrumb::RightOperand, &self.ast.rarg)
}
/// Gets the definition block lines. If `body` is a `Block`, it returns its `BlockLine`s,
/// concatenating `empty_lines`, `first_line` and `lines`, in this exact order. If `body` is
/// `Infix`, it returns a single `BlockLine`.
pub fn block_lines(&self) -> Vec<ast::BlockLine<Option<Ast>>> {
if let Ok(block) = known::Block::try_from(*self.body()) {
block.iter_all_lines().map(|line| line.map_opt(CloneRef::clone_ref)).collect()
} else {
let elem = Some((*self.body()).clone());
let off = 0;
vec![ast::BlockLine { elem, off }]
}
}
/// Sets the definition block lines. `lines` must contain at least one non-empty line to
/// succeed.
pub fn set_block_lines(
&mut self,
mut lines: Vec<ast::BlockLine<Option<Ast>>>,
) -> FallibleResult {
// FIXME [mwu]
// This doesn't deal correctly with offsets, but I have no idea how it should behave,
// as the current parser's behavior and AST is inconsistent.
// Basically `empty_lines` currently use absolute offsets, while `BlockLines` use relative
// offsets. This is not desirable, as e.g. an empty line in the middle of block is not
// possible to express with the current AST (it won't round-trip).
let indent = self.indent();
let mut empty_lines = Vec::new();
let mut line = lines.pop_front().ok_or(MissingLineWithAst)?;
while line.elem.is_none() {
empty_lines.push(line.off + indent);
line = lines.pop_front().ok_or(MissingLineWithAst)?;
}
let elem = line.elem.ok_or(MissingLineWithAst)?;
let off = line.off;
let first_line = ast::BlockLine { elem, off };
let block = ast::Block { indent, empty_lines, first_line, lines };
let body_ast = Ast::new(block, None);
self.set_body_ast(body_ast);
Ok(())
}
/// Sets the definition body to expression with given Ast.
pub fn set_body_ast(&mut self, expression: Ast) {
self.ast.update_shape(|infix| {
// Keep at least one space after `=` for inline expressions, so it
// doesn't look ugly when converting from previous block definition body.
match expression.shape() {
ast::Shape::Block(_) => {}
_ => infix.roff = std::cmp::max(1, infix.roff),
}
infix.rarg = expression;
})
}
/// Tries to interpret a root line (i.e. the AST being placed in a line directly in the module
/// scope) as a definition.
pub fn from_root_line(line: &ast::BlockLine<Option<Ast>>) -> Option<DefinitionInfo> {
Self::from_root_line_ast(line.elem.as_ref()?)
}
/// Tries to interpret a root line's AST as a definition.
pub fn from_root_line_ast(ast: &Ast) -> Option<DefinitionInfo> {
let indent = 0;
Self::from_line_ast(ast, ScopeKind::Root, indent)
}
/// Tries to interpret `Line`'s `Ast` as a function definition.
///
/// Assumes that the AST represents the contents of line (and not e.g. right-hand side of
/// some binding or other kind of subtree).
pub fn from_line_ast(
ast: &Ast,
kind: ScopeKind,
context_indent: usize,
) -> Option<DefinitionInfo> {
if let LineKind::Definition { ast, args, name } = LineKind::discern(ast, kind) {
Some(DefinitionInfo { ast, name, args, context_indent })
} else {
None
}
}
}
/// Definition stored under some known crumbs path.
pub type ChildDefinition = Located<DefinitionInfo>;
/// Tries to add a new crumb to current path to obtain a deeper child.
/// Its crumbs will accumulate both current crumbs and the passed one.
pub fn resolve_single_name(def: ChildDefinition, id: &Crumb) -> FallibleResult<ChildDefinition> {
let child = def.item.def_iter().find_by_name(id)?;
Ok(def.into_descendant(child))
}
// =================
// === Iterators ===
// =================
// === DefinitionIterator ===
/// Iterator that iterates over child definitions.
#[allow(missing_debug_implementations)]
pub struct DefinitionIterator<'a> {
/// Iterator going over ASTs of potential child definitions.
pub iterator: Box<dyn Iterator<Item = ChildAst<'a>> + 'a>,
/// What kind of scope are we getting our ASTs from.
pub scope_kind: ScopeKind,
/// Absolute indentation of the child ASTs we iterate over.
pub indent: usize,
}
impl<'a> Iterator for DefinitionIterator<'a> {
type Item = ChildDefinition;
fn next(&mut self) -> Option<Self::Item> {
let scope_kind = self.scope_kind;
let indent = self.indent;
self.iterator.find_map(|ChildAst { item, crumbs }| {
let definition_opt = DefinitionInfo::from_line_ast(item, scope_kind, indent);
definition_opt.map(|def| ChildDefinition::new(crumbs, def))
})
}
}
impl<'a> DefinitionIterator<'a> {
/// Yields vector of all child definition infos, discarding the crumbs.
pub fn infos_vec(self) -> Vec<DefinitionInfo> {
self.map(|child_def| child_def.item).collect_vec()
}
/// Looks up direct child definition by given name.
pub fn find_by_name(
mut self,
name: &DefinitionName,
) -> Result<ChildDefinition, CannotFindChild> {
let err = || CannotFindChild(name.clone());
self.find(|child_def| &*child_def.item.name == name).ok_or_else(err)
}
}
// === RecursiveDefinitionIterator ===
/// An recursive iterator over child definitions, returned by
/// [`DefinitionProvider::recursive_def_iter`].
#[derive(Clone, Debug, Default)]
pub struct RecursiveDefinitionIterator {
stack: Vec<ChildDefinition>,
}
impl Iterator for RecursiveDefinitionIterator {
type Item = ChildDefinition;
fn next(&mut self) -> Option<Self::Item> {
let next_item = self.stack.pop();
if let Some(some_item) = &next_item {
self.stack.extend(some_item.def_iter())
}
next_item
}
}
impl FusedIterator for RecursiveDefinitionIterator {}
// ==========================
// === DefinitionProvider ===
// ==========================
/// An entity that contains lines that we want to interpret as definitions.
pub trait DefinitionProvider {
/// Absolute indentation level of the scope of this provider's body.
/// (i.e. the indents of the child definitions)
fn indent(&self) -> usize;
/// What kind of scope this is.
fn scope_kind(&self) -> ScopeKind;
/// Iterator going over all line-like Ast's that can hold a child definition.
fn enumerate_asts<'a>(&'a self) -> Box<dyn Iterator<Item = ChildAst<'a>> + 'a>;
/// Returns a scope iterator allowing browsing definition provided under this provider.
fn def_iter(&self) -> DefinitionIterator {
let iterator = self.enumerate_asts();
let scope_kind = self.scope_kind();
let indent = self.indent();
DefinitionIterator { iterator, scope_kind, indent }
}
/// Returns an iterator iterating recursively over definitions provided by this provider and
/// their nested definitions (as [`ChildDefinition`] is also a provider).
fn recursive_def_iter(&self) -> RecursiveDefinitionIterator {
RecursiveDefinitionIterator { stack: self.def_iter().collect() }
}
}
impl DefinitionProvider for known::Block {
fn indent(&self) -> usize {
self.indent
}
fn scope_kind(&self) -> ScopeKind {
ScopeKind::NonRoot
}
fn enumerate_asts<'a>(&'a self) -> Box<dyn Iterator<Item = ChildAst<'a>> + 'a> {
self.ast().children()
}
}
impl DefinitionProvider for DefinitionInfo {
fn indent(&self) -> usize {
match self.ast.rarg.shape() {
ast::Shape::Block(block) => block.indent,
// If definition has no block of its own, it does not introduce any children and
// returned value here is not used anywhere currently. Might matter in the future,
// when we deal with lambdas. Anyway, whatever block we might introduce, it should
// be more indented than our current context.
_ => self.context_indent + INDENT,
}
}
fn scope_kind(&self) -> ScopeKind {
ScopeKind::NonRoot
}
fn enumerate_asts<'a>(&'a self) -> Box<dyn Iterator<Item = ChildAst<'a>> + 'a> {
use ast::crumbs::Crumb;
match self.ast.rarg.shape() {
ast::Shape::Block(_) => {
let parent_crumb = Crumb::Infix(InfixCrumb::RightOperand);
let rarg = &self.ast.rarg;
let iter = rarg.enumerate().map(move |(crumb, ast)| {
let crumbs = vec![parent_crumb, crumb];
ChildAst::new(crumbs, ast)
});
Box::new(iter)
}
_ => Box::new(std::iter::empty()),
}
}
}
impl DefinitionProvider for ChildDefinition {
fn indent(&self) -> usize {
self.item.indent()
}
fn scope_kind(&self) -> ScopeKind {
self.item.scope_kind()
}
fn enumerate_asts<'a>(&'a self) -> Box<dyn Iterator<Item = ChildAst<'a>> + 'a> {
Box::new(
self.item
.enumerate_asts()
.map(|child_ast| self.descendant(child_ast.crumbs, child_ast.item)),
)
}
}
// =============
// === ToAdd ===
// =============
/// Describes a definition to be created.
///
/// Type meant to be easily constructable in the code and be translated into AST.
/// Information-wise it corresponds to DefinitionInfo.
#[derive(Clone, Debug)]
pub struct ToAdd {
/// The name of the introduced definition. May represent plain identifier or an extension.
/// E.g. `add` or `Int.add`.
pub name: DefinitionName,
/// Names of explicit parameters. `this` parameter must not be included.
pub explicit_parameter_names: Vec<String>,
/// The first non-empty line of the definition body.
pub body_head: Ast,
/// Further definition body lines. `None` represents an empty line.
pub body_tail: Vec<Option<Ast>>,
}
impl ToAdd {
/// Create ToAdd description from an arbitrary AST.
pub fn new_with_body(
name: DefinitionName,
explicit_parameter_names: Vec<String>,
body: Ast,
) -> Self {
let (body_head, body_tail) = match body.shape() {
ast::Shape::Block(ast::Block { first_line, lines, .. }) => (
first_line.elem.clone_ref(),
lines.iter().map(|line| line.elem.as_ref().cloned()).collect_vec(),
),
_ => (body.clone_ref(), default()),
};
ToAdd { name, explicit_parameter_names, body_head, body_tail }
}
/// The definition's head, i.e. the left-hand side of the primary assignment.
pub fn head(&self, parser: &Parser) -> FallibleResult<Ast> {
let name = self.name.ast(parser)?;
let args = self.explicit_parameter_names.iter().map(Ast::var);
let head = ast::prefix::Chain::new(name, args).into_ast();
Ok(head)
}
/// The definition's body, i.e. the right-hand side of the primary assignment.
pub fn body(&self, scope_indent: usize) -> Ast {
// Assignment always must be in a block
if self.body_tail.is_empty() && !ast::opr::is_assignment(&self.body_head) {
self.body_head.clone_ref()
} else {
let mut block = ast::Block::from_lines(&self.body_head, &self.body_tail);
block.indent = scope_indent + INDENT;
Ast::from(block)
}
}
/// Generate the definition's Ast from the description.
pub fn ast(&self, scope_indent: usize, parser: &Parser) -> FallibleResult<Ast> {
let body = self.body(scope_indent);
let body_is_block = matches!(body.shape(), ast::Shape::Block { .. });
let infix_shape = ast::Infix {
larg: self.head(parser)?,
loff: 1,
opr: Ast::opr(ast::opr::predefined::ASSIGNMENT),
roff: if body_is_block { 0 } else { 1 },
rarg: body,
};
let ast = Ast::from(infix_shape);
Ok(ast)
}
}
// =============
// === Tests ===
// =============
#[cfg(test)]
mod tests {
use super::*;
use crate::module;
use crate::INDENT;
fn assert_eq_strings(lhs: Vec<impl Str>, rhs: Vec<impl Str>) {
let lhs = lhs.iter().map(|s| s.as_ref()).collect_vec();
let rhs = rhs.iter().map(|s| s.as_ref()).collect_vec();
assert_eq!(lhs, rhs)
}
fn to_names(defs: &[DefinitionInfo]) -> Vec<String> {
defs.iter().map(|def| def.name.to_string()).collect()
}
fn indented(line: impl Display) -> String {
format!(" {line}")
}
#[test]
fn generating_definition_to_add() {
let parser = Parser::new();
let mut to_add = ToAdd {
name: DefinitionName::new_method("Main", "add"),
explicit_parameter_names: vec!["arg1".into(), "arg2".into()],
body_head: Ast::infix_var("ret", "=", "arg2"),
body_tail: default(),
};
// First, if we generate definition with single line and it is assignment,
// it should be placed in a block of its own.
let ast = to_add.ast(4, &parser).unwrap();
assert_eq!(ast.repr(), "Main.add arg1 arg2 =\n ret = arg2");
// Now the single line body will be non-assignment, so it will be safe to place inline.
to_add.body_head = Ast::infix_var("arg1", "+", "arg2");
let ast = to_add.ast(4, &parser).unwrap();
assert_eq!(ast.repr(), "Main.add arg1 arg2 = arg1 + arg2");
// Having more than a single line always requires a block.
to_add.body_tail.push(Some(Ast::infix_var("arg1", "-", "arg2")));
let ast = to_add.ast(4, &parser).unwrap();
// Note 8 spaces indents for definition block lines (as the parent scope was at 4).
// Also, note that there is no space after the definition's assignment operator.
assert_eq!(ast.repr(), "Main.add arg1 arg2 =\n arg1 + arg2\n arg1 - arg2");
}
#[test]
fn definition_name_tests() {
let parser = parser::Parser::new();
let ast = parser.parse_line_ast("Foo.Bar.baz").unwrap();
let name = DefinitionName::from_ast(&ast).unwrap();
assert_eq!(*name.name, "baz");
assert_eq!(name.extended_target[0].as_str(), "Foo");
assert_eq!(name.extended_target[1].as_str(), "Bar");
assert_eq!(ast.get_traversing(&name.name.crumbs).unwrap().repr(), "baz");
assert_eq!(ast.get_traversing(&name.extended_target[0].crumbs).unwrap().repr(), "Foo");
assert_eq!(ast.get_traversing(&name.extended_target[1].crumbs).unwrap().repr(), "Bar");
}
#[test]
fn definition_name_rejecting_incomplete_names() {
let parser = parser::Parser::new();
let ast = parser.parse_line_ast("Foo. .baz").unwrap();
assert!(DefinitionName::from_ast(&ast).is_none());
}
#[test]
fn definition_info_name() {
let parser = parser::Parser::new();
let ast = parser.parse_line_ast("Foo.bar a b c = baz").unwrap();
let definition = DefinitionInfo::from_root_line_ast(&ast).unwrap();
assert_eq!(definition.name.to_string(), "Foo.bar");
assert_eq!(ast.get_traversing(&definition.name.crumbs).unwrap().repr(), "Foo.bar");
}
#[test]
fn located_definition_args() {
let parser = parser::Parser::new();
let ast = parser.parse_line_ast("foo bar baz = a + b + c").unwrap();
let definition = DefinitionInfo::from_root_line_ast(&ast).unwrap();
let (arg0, arg1) = definition.args.expect_tuple();
use ast::crumbs;
use ast::crumbs::InfixCrumb::*;
use ast::crumbs::PrefixCrumb::*;
assert_eq!(arg0.crumbs, crumbs![LeftOperand, Func, Arg]);
assert_eq!(arg1.crumbs, crumbs![LeftOperand, Arg]);
assert_eq!(arg0.item.repr(), "bar");
assert_eq!(ast.get_traversing(&arg0.crumbs).unwrap(), &arg0.item);
assert_eq!(arg1.item.repr(), "baz");
assert_eq!(ast.get_traversing(&arg1.crumbs).unwrap(), &arg1.item);
}
#[test]
fn match_is_not_definition() {
let cons = Ast::cons("Foo");
let arg = Ast::number(5);
let lhs = Ast::prefix(cons, arg.clone());
let rhs = Ast::var("bar");
let ast = Ast::infix(lhs, "=", rhs.clone());
// Not a definition, it is a pattern match/
assert_eq!(ast.repr(), "Foo 5 = bar");
let def_opt = DefinitionInfo::from_line_ast(&ast, ScopeKind::NonRoot, INDENT);
assert!(def_opt.is_none());
let var = Ast::var("foo");
let lhs = Ast::prefix(var, arg);
let ast = Ast::infix(lhs, "=", rhs);
// Now it is a definition.
assert_eq!(ast.repr(), "foo 5 = bar");
let def_opt = DefinitionInfo::from_line_ast(&ast, ScopeKind::NonRoot, INDENT);
assert!(def_opt.is_some());
}
#[test]
fn list_definition_test() {
let parser = parser::Parser::new();
let definition_lines = vec![
"main = _",
"Foo.Bar.foo = _",
"Foo.Bar.baz a b = _",
"+ a = _",
"Int.+ a = _",
"bar = _",
"add a b = 50",
"* a b = _",
];
let expected_def_names_in_module =
vec!["main", "Foo.Bar.foo", "Foo.Bar.baz", "+", "Int.+", "bar", "add", "*"];
// In definition there are no extension methods nor arg-less definitions.
let expected_def_names_in_def = vec!["+", "add", "*"];
// === Program with definitions in root ===
let program = definition_lines.join("\n");
let module = parser.parse_module(program, default()).unwrap();
let definitions = module.def_iter().infos_vec();
assert_eq_strings(to_names(&definitions), expected_def_names_in_module);
// Check that definition can be found and their body is properly described.
let add_name = DefinitionName::new_plain("add");
let add = module.def_iter().find_by_name(&add_name).expect("failed to find `add` function");
let body = known::Number::try_from(*add.body()).expect("add body should be a Block");
assert_eq!(body.int, "50");
// === Program with definition in `some_func`'s body `Block` ===
let indented_lines = definition_lines.iter().map(indented).collect_vec();
let program = format!("some_func arg1 arg2 =\n{}", indented_lines.join("\n"));
let module = parser.parse_module(program, default()).unwrap();
let root_defs = module.def_iter().infos_vec();
let (only_def,) = root_defs.expect_tuple();
assert_eq!(&only_def.name.to_string(), "some_func");
let body_block = known::Block::try_from(*only_def.body()).unwrap();
let nested_defs = body_block.def_iter().infos_vec();
assert_eq_strings(to_names(&nested_defs), expected_def_names_in_def);
}
#[test]
fn finding_root_definition() {
let program_to_expected_main_pos = vec![
("main = bar", 0),
("\nmain = bar", 1),
("\n\nmain = bar", 2),
("foo = bar\nmain = bar", 1),
("foo = bar\n\nmain = bar", 2),
];
let parser = parser::Parser::new();
let main_id = Id::new_plain_name("main");
for (program, expected_line_index) in program_to_expected_main_pos {
let module = parser.parse_module(program, default()).unwrap();
let location = module::locate(&module, &main_id).unwrap();
let (crumb,) = location.crumbs.expect_tuple();
match crumb {
ast::crumbs::Crumb::Module(m) => assert_eq!(m.line_index, expected_line_index),
_ => panic!("Expected module crumb, got: {crumb:?}."),
}
}
}
#[test]
fn getting_nested_definition() {
let program = r"
main =
foo = 2
add a b = a + b
baz arg =
subbaz arg = 4
baz2 arg =
subbaz2 = 4
add foo bar";
let module = parser::Parser::new().parse_module(program, default()).unwrap();
let check_def = |id, expected_body| {
let definition = module::get_definition(&module, &id).unwrap();
assert_eq!(definition.body().repr(), expected_body);
};
let check_not_found = |id| assert!(module::get_definition(&module, &id).is_err());
check_def(Id::new_plain_names(["main", "add"]), "a + b");
check_def(Id::new_plain_names(["main", "baz"]), "\n subbaz arg = 4");
check_def(Id::new_plain_names(["main", "baz", "subbaz"]), "4");
// Node are not definitions
check_not_found(Id::new_plain_names(["main", "foo"]));
check_not_found(Id::new_plain_names(["main", "baz2", "subbaz2"]));
}
}

View File

@ -1,543 +0,0 @@
//! Code for retrieving graph description from AST.
use crate::prelude::*;
use crate::connection;
use crate::connection::Connection;
use crate::context_switch::ContextSwitch;
use crate::definition;
use crate::definition::DefinitionInfo;
use crate::definition::DefinitionProvider;
use crate::node;
use crate::node::LocatedNode;
use crate::node::NodeInfo;
use ast::known;
use ast::Ast;
use ast::BlockLine;
use engine_protocol::language_server::ExecutionEnvironment;
/// Graph uses the same `Id` as the definition which introduces the graph.
pub type Id = definition::Id;
// ====================
// === LocationHint ===
// ====================
/// Describes the desired position of the node's line in the graph's code block.
#[derive(Clone, Copy, Debug)]
pub enum LocationHint {
/// Try placing this node's line before the line described by id.
Before(ast::Id),
/// Try placing this node's line after the line described by id.
After(ast::Id),
/// Try placing this node's line at the start of the graph's code block.
Start,
/// Try placing this node's line at the end of the graph's code block.
End,
}
// =================
// === GraphInfo ===
// =================
/// Description of the graph, based on information available in AST.
#[derive(Clone, Debug, Deref)]
pub struct GraphInfo {
/// The definition providing this graph.
pub source: DefinitionInfo,
}
impl GraphInfo {
/// Look for a node with given id in the graph.
pub fn locate_node(&self, id: node::Id) -> FallibleResult<LocatedNode> {
let lines = self.source.block_lines();
node::locate(&lines, self.source.context_indent, id)
}
/// Describe graph of the given definition.
pub fn from_definition(source: DefinitionInfo) -> GraphInfo {
GraphInfo { source }
}
/// Gets the AST of this graph definition.
pub fn ast(&self) -> Ast {
self.source.ast.clone().into()
}
/// Gets all known nodes in this graph (does not include special pseudo-nodes like graph
/// inputs and outputs).
pub fn nodes(&self) -> Vec<NodeInfo> {
let ast = &self.source.ast;
let body = &ast.rarg;
if Self::is_empty_graph_body(body) {
// Empty graph body is represented as a single `Nothing` value. It has no nodes.
vec![]
} else if let Ok(body_block) = known::Block::try_new(body.clone()) {
let context_indent = self.source.indent();
let lines_iter = body_block.enumerate_non_empty_lines();
let nodes_iter = node::NodeIterator { lines_iter, context_indent };
nodes_iter.map(|n| n.node).collect()
} else if let Some(node) = node::NodeInfo::from_main_line_ast(body) {
// There's no way to attach a documentation comment to an inline node, it consists only
// of the main line.
vec![node]
} else {
// It should not be possible to have empty definition without any nodes but it is
// possible to represent such thing in AST. Anyway, it has no nodes.
vec![]
}
}
/// Gets the list of connections between the nodes in this graph.
pub fn connections(&self) -> Vec<Connection> {
connection::list(&self.source.ast.rarg)
}
/// Adds a new node to this graph.
pub fn add_node(&mut self, node: &NodeInfo, location_hint: LocationHint) -> FallibleResult {
let body = self.source.body();
let mut lines = if Self::is_empty_graph_body(body.item) {
// Adding first node to empty graph. We need to remove the placeholder value.
default()
} else {
self.source.block_lines()
};
let last_non_empty = || lines.iter().rposition(|line| line.elem.is_some());
let index = match location_hint {
LocationHint::Start => 0,
LocationHint::End => last_non_empty().map_or(lines.len(), |ix| ix + 1),
LocationHint::After(id) => self.locate_node(id)?.index.last() + 1,
LocationHint::Before(id) => self.locate_node(id)?.index.first(),
};
let elem = Some(node.ast().clone_ref());
let off = 0;
lines.insert(index, BlockLine { elem, off });
if let Some(documentation) = &node.documentation {
let elem = Some(documentation.ast().into());
let line = BlockLine { elem, off };
lines.insert(index, line);
}
self.source.set_block_lines(lines)
}
/// Locates a node with the given id.
pub fn find_node(&self, id: ast::Id) -> Option<NodeInfo> {
self.nodes().iter().find(|node| node.id() == id).cloned()
}
/// After removing last node, we want to insert a placeholder value for definition value.
/// This defines its AST. Currently it is just `Nothing`.
fn empty_graph_body() -> Ast {
Ast::cons(ast::constants::keywords::NOTHING).with_new_id()
}
/// Check if the graph is empty. (is filled with [`Self::empty_graph_body`])
fn is_empty_graph_body(ast: &Ast) -> bool {
let cons = known::Cons::try_from(ast);
cons.map_or(false, |cons| cons.name == ast::constants::keywords::NOTHING)
}
/// Removes the node from graph.
pub fn remove_node(&mut self, node_id: ast::Id) -> FallibleResult {
self.update_node(node_id, |_| None)
}
/// Sets a new state for the node. The id of the described node must denote already existing
/// node.
pub fn set_node(&mut self, node: &NodeInfo) -> FallibleResult {
self.update_node(node.id(), |_| Some(node.clone()))
}
/// Sets a new state for the node. The id of the described node must denote already existing
/// node.
pub fn update_node(
&mut self,
id: ast::Id,
f: impl FnOnce(NodeInfo) -> Option<NodeInfo>,
) -> FallibleResult {
let LocatedNode { index, node } = self.locate_node(id)?;
let mut lines = self.source.block_lines();
if let Some(updated_node) = f(node) {
lines[index.main_line].elem = Some(updated_node.main_line.ast().clone_ref());
match (index.documentation_line, updated_node.documentation) {
(Some(old_comment_index), None) => {
lines.remove(old_comment_index);
}
(Some(old_comment_index), Some(new_comment)) =>
lines[old_comment_index] = new_comment.block_line(),
(None, Some(new_comment)) =>
lines.insert(index.main_line, new_comment.block_line()),
(None, None) => {}
}
} else {
lines.remove(index.main_line);
if let Some(doc_index) = index.documentation_line {
lines.remove(doc_index);
}
}
let non_empty_lines_count = lines.iter().filter(|line| line.elem.is_some()).count();
if non_empty_lines_count == 0 {
self.source.set_body_ast(Self::empty_graph_body());
Ok(())
} else {
self.source.set_block_lines(lines)
}
// TODO tests for cases with comments involved
}
/// Sets expression of the given node.
#[profile(Debug)]
pub fn edit_node(&mut self, node_id: ast::Id, new_expression: Ast) -> FallibleResult {
self.update_node(node_id, |mut node| {
node.set_expression(new_expression);
Some(node)
})
}
/// Sets expression of the previewed node. Similar to `edit_node`, but also adds the context
/// switch statement to disable the output context. This way execution of the previewed node
/// will not produce unwanted side effects. `execution_environment` is the name of environment
/// output context should be disabled in.
#[profile(Debug)]
pub fn edit_preview_node(
&mut self,
node_id: ast::Id,
new_expression: Ast,
execution_environment: ExecutionEnvironment,
) -> FallibleResult {
self.update_node(node_id, |mut node| {
node.set_expression(new_expression);
node.set_context_switch(crate::context_switch::ContextSwitchExpression {
switch: ContextSwitch::Disable,
context: crate::context_switch::Context::Output,
environment: execution_environment.to_string().into(),
});
Some(node)
})
}
#[cfg(test)]
pub fn expect_code(&self, expected_code: impl Str) {
let code = self.source.ast.repr();
assert_eq!(code, expected_code.as_ref());
}
}
// =============
// === Tests ===
// =============
#[cfg(test)]
mod tests {
use super::*;
use crate::definition::DefinitionName;
use crate::definition::DefinitionProvider;
use crate::module::get_definition;
use ast::macros::DocumentationCommentInfo;
use ast::test_utils::expect_single_line;
use ast::HasRepr;
/// Takes a program with main definition in root and returns main's graph.
fn main_graph(parser: &parser::Parser, program: impl Str) -> GraphInfo {
let module = parser.parse_module(program.as_ref(), default()).unwrap();
let name = DefinitionName::new_plain("main");
let main = module.def_iter().find_by_name(&name).unwrap();
GraphInfo::from_definition(main.item)
}
fn find_graph(parser: &parser::Parser, program: impl Str, name: impl Str) -> GraphInfo {
let module = parser.parse_module(program.as_ref(), default()).unwrap();
let crumbs = name.into().split('.').map(DefinitionName::new_plain).collect();
let id = Id { crumbs };
let definition = get_definition(&module, &id).unwrap();
GraphInfo::from_definition(definition)
}
#[test]
fn detect_a_node() {
let parser = parser::Parser::new();
// Each of these programs should have a `main` definition with a single `2+2` node.
let programs = vec![
"main = 2+2",
"main = \n 2+2",
"main = \n foo = 2+2",
"main = \n foo = 2+2\n bar b = 2+2", // `bar` is a definition, not a node
];
for program in programs {
let graph = main_graph(&parser, program);
let nodes = graph.nodes();
assert_eq!(nodes.len(), 1);
let node = &nodes[0];
assert_eq!(node.expression().repr(), "2+2");
let _ = node.id(); // just to make sure it is available
}
}
fn new_expression_node(parser: &parser::Parser, expression: &str) -> NodeInfo {
let node_ast = parser.parse(expression, default());
let line_ast = expect_single_line(&node_ast).clone();
NodeInfo::from_main_line_ast(&line_ast).unwrap()
}
fn assert_all(nodes: &[NodeInfo], expected: &[NodeInfo]) {
assert_eq!(nodes.len(), expected.len());
for (left, right) in nodes.iter().zip(expected) {
assert_same(left, right)
}
}
fn assert_same(left: &NodeInfo, right: &NodeInfo) {
assert_eq!(left.id(), right.id());
assert_eq!(
left.documentation.as_ref().map(DocumentationCommentInfo::pretty_text),
right.documentation.as_ref().map(DocumentationCommentInfo::pretty_text)
);
assert_eq!(left.main_line.repr(), right.main_line.repr());
}
#[test]
fn add_node_to_graph_with_single_line() {
let program = "main = print \"hello\"";
let parser = parser::Parser::new();
let mut graph = main_graph(&parser, program);
let nodes = graph.nodes();
assert_eq!(nodes.len(), 1);
let initial_node = nodes[0].clone();
assert_eq!(initial_node.expression().repr(), "print \"hello\"");
let expr0 = "a + 2";
let expr1 = "b + 3";
let node_to_add0 = new_expression_node(&parser, expr0);
let node_to_add1 = new_expression_node(&parser, expr1);
graph.add_node(&node_to_add0, LocationHint::Start).unwrap();
assert_eq!(graph.nodes().len(), 2);
graph.add_node(&node_to_add1, LocationHint::Before(graph.nodes()[0].id())).unwrap();
let nodes = graph.nodes();
assert_all(nodes.as_slice(), &[node_to_add1, node_to_add0, initial_node]);
}
#[test]
fn add_node_to_graph_with_multiple_lines() {
// TODO [dg] Also add test for binding node when it's possible to update its id.
let program = r#"main =
foo = node
foo a = not_node
print "hello""#;
let parser = parser::Parser::new();
let mut graph = main_graph(&parser, program);
let node_to_add0 = new_expression_node(&parser, "4 + 4");
let node_to_add1 = new_expression_node(&parser, "a + b");
let node_to_add2 = new_expression_node(&parser, "x * x");
let node_to_add3 = new_expression_node(&parser, "x / x");
let node_to_add4 = new_expression_node(&parser, "2 - 2");
graph.add_node(&node_to_add0, LocationHint::Start).unwrap();
graph.add_node(&node_to_add1, LocationHint::Before(graph.nodes()[0].id())).unwrap();
graph.add_node(&node_to_add2, LocationHint::After(graph.nodes()[1].id())).unwrap();
graph.add_node(&node_to_add3, LocationHint::End).unwrap();
// Node 4 will be added later.
let nodes = graph.nodes();
assert_eq!(nodes.len(), 6);
assert_eq!(nodes[0].expression().repr(), "a + b");
assert_eq!(nodes[0].id(), node_to_add1.id());
// Sic: `node_to_add1` was added at index `0`.
assert_eq!(nodes[1].expression().repr(), "4 + 4");
assert_eq!(nodes[1].id(), node_to_add0.id());
assert_eq!(nodes[2].expression().repr(), "x * x");
assert_eq!(nodes[2].id(), node_to_add2.id());
assert_eq!(nodes[3].expression().repr(), "node");
assert_eq!(nodes[4].expression().repr(), "print \"hello\"");
assert_eq!(nodes[5].expression().repr(), "x / x");
assert_eq!(nodes[5].id(), node_to_add3.id());
let expected_code = r#"main =
a + b
4 + 4
x * x
foo = node
foo a = not_node
print "hello"
x / x"#;
graph.expect_code(expected_code);
let mut graph = find_graph(&parser, program, "main.foo");
assert_eq!(graph.nodes().len(), 1);
graph.add_node(&node_to_add4, LocationHint::Start).unwrap();
assert_eq!(graph.nodes().len(), 2);
assert_eq!(graph.nodes()[0].expression().repr(), "2 - 2");
assert_eq!(graph.nodes()[0].id(), node_to_add4.id());
assert_eq!(graph.nodes()[1].expression().repr(), "not_node");
}
#[test]
fn add_node_to_graph_with_blank_line() {
// The trailing `foo` definition is necessary for the blank line after "node2" to be
// included in the `main` block. Otherwise, the block would end on "node2" and the blank
// line would be parented to the module.
let program = r"main =
node2
foo = 5";
let parser = parser::Parser::new();
let mut graph = main_graph(&parser, program);
let id2 = graph.nodes()[0].id();
let node_to_add0 = new_expression_node(&parser, "node0");
let node_to_add1 = new_expression_node(&parser, "node1");
let node_to_add3 = new_expression_node(&parser, "node3");
let node_to_add4 = new_expression_node(&parser, "node4");
graph.add_node(&node_to_add0, LocationHint::Start).unwrap();
graph.add_node(&node_to_add1, LocationHint::Before(id2)).unwrap();
graph.add_node(&node_to_add3, LocationHint::After(id2)).unwrap();
graph.add_node(&node_to_add4, LocationHint::End).unwrap();
let expected_code = r"main =
node0
node1
node2
node3
node4";
// `foo` is not part of expected code, as it belongs to module, not `main` graph.
graph.expect_code(expected_code);
}
#[test]
fn multiple_node_graph() {
let parser = parser::Parser::new();
let program = r"
main =
## Faux docstring
## Docstring 0
foo = node0
## Docstring 1
# disabled node1
foo a = not_node
## Docstring 2
node2
node3
";
// TODO [mwu]
// Add case like `Int.+ a = not_node` once https://github.com/enso-org/enso/issues/565 is fixed
let graph = main_graph(&parser, program);
let nodes = graph.nodes();
assert_eq!(nodes[0].documentation_text(), Some(" Docstring 0".into()));
assert_eq!(nodes[0].ast().repr(), "foo = node0");
assert_eq!(nodes[1].documentation_text(), Some(" Docstring 1".into()));
assert_eq!(nodes[1].ast().repr(), "# disabled node1");
assert_eq!(nodes[2].documentation_text(), Some(" Docstring 2".into()));
assert_eq!(nodes[2].ast().repr(), "node2");
assert_eq!(nodes[3].documentation_text(), None);
assert_eq!(nodes[3].ast().repr(), "node3");
assert_eq!(nodes.len(), 4);
}
#[test]
fn removing_node_from_graph() {
let parser = parser::Parser::new();
let program = r"
main =
foo = 2 + 2
bar = 3 + 17";
let mut graph = main_graph(&parser, program);
let nodes = graph.nodes();
assert_eq!(nodes.len(), 2);
assert_eq!(nodes[0].expression().repr(), "2 + 2");
assert_eq!(nodes[1].expression().repr(), "3 + 17");
graph.remove_node(nodes[0].id()).unwrap();
let nodes = graph.nodes();
assert_eq!(nodes.len(), 1);
assert_eq!(nodes[0].expression().repr(), "3 + 17");
let expected_code = "main =\n bar = 3 + 17";
graph.expect_code(expected_code);
assert!(graph.remove_node(uuid::Uuid::new_v4()).is_err());
graph.expect_code(expected_code);
}
#[test]
fn removing_last_node_from_graph() {
let parser = parser::Parser::new();
let program = r"
main =
foo = 2 + 2";
let mut graph = main_graph(&parser, program);
debug!("aa");
let (node,) = graph.nodes().expect_tuple();
assert_eq!(node.expression().repr(), "2 + 2");
debug!("vv");
graph.remove_node(node.id()).unwrap();
debug!("zz");
assert!(graph.nodes().is_empty());
graph.expect_code("main = Nothing");
}
#[test]
fn add_first_node_to_empty_graph() {
let parser = parser::Parser::new();
let program = r"main = Nothing";
let mut graph = main_graph(&parser, program);
assert!(graph.nodes().is_empty());
let node_to_add = new_expression_node(&parser, "node0");
graph.add_node(&node_to_add, LocationHint::Start).unwrap();
assert_eq!(graph.nodes().len(), 1);
assert_eq!(graph.nodes()[0].expression().repr(), "node0");
}
#[test]
fn editing_nodes_expression_in_graph() {
let parser = parser::Parser::new();
let program = r"
main =
foo = 2 + 2
bar = 3 + 17";
let new_expression = parser.parse("print \"HELLO\"", default());
let new_expression = expect_single_line(&new_expression).clone();
let mut graph = main_graph(&parser, program);
let nodes = graph.nodes();
assert_eq!(nodes.len(), 2);
assert_eq!(nodes[0].expression().repr(), "2 + 2");
assert_eq!(nodes[1].expression().repr(), "3 + 17");
graph.edit_node(nodes[0].id(), new_expression).unwrap();
let nodes = graph.nodes();
assert_eq!(nodes.len(), 2);
assert_eq!(nodes[0].expression().repr(), "print \"HELLO\"");
assert_eq!(nodes[1].expression().repr(), "3 + 17");
let expected_code = r#"main =
foo = print "HELLO"
bar = 3 + 17"#;
graph.expect_code(expected_code);
assert!(graph.edit_node(uuid::Uuid::new_v4(), Ast::var("foo")).is_err());
graph.expect_code(expected_code);
}
}

View File

@ -1,209 +0,0 @@
//! Module for types and utilities related to dealing with identifiers.
use crate::prelude::*;
use std::cmp::Ordering;
// ==================
// === Identifier ===
// ==================
// === Errors ===
#[allow(missing_docs)]
#[derive(Clone, Debug, Fail)]
#[fail(display = "Identifier contains operator `{}`, so it cannot be made into var.", _0)]
pub struct OperatorCantBeMadeIntoVar(String);
#[allow(missing_docs)]
#[derive(Clone, Debug, Fail)]
#[fail(display = "The `{}` is not a valid identifier.", _0)]
pub struct NotAnIdentifier(String);
#[allow(missing_docs)]
#[derive(Clone, Copy, Debug, Fail)]
#[fail(display = "Empty string is not a valid identifier.")]
pub struct IdentifierCannotBeEmpty;
// === Definition ===
/// Wrapper over an Ast that holds an atomic identifier of any kind.
///
/// Comparisons compare the underlying name strings.
///
/// Invariants: can get identifier name, the name is non-empty.
#[derive(Clone, Debug, Deref)]
pub struct Identifier(Ast);
impl Identifier {
/// Wrap the `Ast` into `Identifier` if it actually is an identifier.
pub fn new(ast: Ast) -> Option<Self> {
let name = ast::identifier::name(&ast)?;
(!name.is_empty()).as_some(Identifier(ast))
}
/// Convert given text into an identifier Ast and wrap.
///
/// Can fail if a given string is not a valid identifier, however the exact scope of validation
/// is currently unspecified.
pub fn from_text(text: impl Into<String>) -> FallibleResult<Self> {
// TODO? [mwu]
// We should be able to call parser or sth to verify that other requirements for the
// referent form identifiers are fulfilled.
// This is expected to become properly possible when the Rust rewrite of parser is done.
// See: https://github.com/enso-org/enso/issues/435
// On the other hand it is not clear how strict we want to be here, so as not to break
// processing invalid syntactically code.
let text = text.into();
let empty_string_error = failure::Error::from(IdentifierCannotBeEmpty);
let first_char = text.chars().next().ok_or(empty_string_error)?;
match first_char {
c if c.is_lowercase() => Ok(Ast::var(text)),
c if c.is_uppercase() => Ok(Ast::cons(text)),
c if ast::opr::SYMBOLS.contains(&c) => Ok(Ast::opr(text)),
_ => Err(NotAnIdentifier(text).into()),
}
.map(Identifier)
}
/// Get the identifier name.
pub fn name(&self) -> &str {
// Unwrap here is safe, as identifiers always allow obtaining an Identifier.
ast::identifier::name(&self.0).unwrap()
}
/// Convert identifier to the variable form (i.e. non-referent). Fails if this is an operator.
pub fn as_var(&self) -> Result<ast::Var, OperatorCantBeMadeIntoVar> {
let name = self.name();
// Unwrap below is safe, as identifier is always non-empty.
let first_char = name.chars().next().unwrap();
if first_char.is_alphabetic() {
let name = name.to_lowercase();
Ok(ast::Var { name })
} else {
Err(OperatorCantBeMadeIntoVar(name.to_owned()))
}
}
/// Get the identifier's node with a newly assigned, unique id.
///
/// This is needed if the identifier from AST is to be reused in a different part of the tree.
/// Cloning it without generating a new ID would introduce two nodes with same id.
pub fn with_new_id(&self) -> Self {
Self(self.0.with_new_id())
}
}
// === Implementations ===
impl PartialOrd for Identifier {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.name().partial_cmp(other.name())
}
}
impl Ord for Identifier {
fn cmp(&self, other: &Self) -> Ordering {
self.name().cmp(other.name())
}
}
impl TryFrom<String> for Identifier {
type Error = failure::Error;
fn try_from(value: String) -> Result<Self, Self::Error> {
Identifier::from_text(value)
}
}
impl From<Identifier> for String {
fn from(value: Identifier) -> Self {
value.name().into()
}
}
impl TryFrom<&str> for Identifier {
type Error = failure::Error;
fn try_from(value: &str) -> Result<Self, Self::Error> {
Identifier::from_text(value)
}
}
impl From<ast::known::Var> for Identifier {
fn from(value: ast::known::Var) -> Self {
Identifier(value.into())
}
}
impl From<ast::known::Cons> for Identifier {
fn from(value: ast::known::Cons) -> Self {
Identifier(value.into())
}
}
impl From<ast::known::Opr> for Identifier {
fn from(value: ast::known::Opr) -> Self {
Identifier(value.into())
}
}
impl From<Identifier> for Ast {
fn from(value: Identifier) -> Self {
value.0
}
}
impl From<&Identifier> for Ast {
fn from(value: &Identifier) -> Self {
value.0.clone()
}
}
impl Display for Identifier {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
Display::fmt(&self.name(), f)
}
}
impl PartialEq for Identifier {
fn eq(&self, other: &Self) -> bool {
self.name().eq(other.name())
}
}
impl Eq for Identifier {}
impl Hash for Identifier {
fn hash<H: Hasher>(&self, state: &mut H) {
self.name().hash(state)
}
}
// =================
// === Utilities ===
// =================
/// Generate an identifier name that is not present in the given sequence.
///
/// The name is generated by taking `base` string and appending subsequent integers.
pub fn generate_name<'a>(
base: &str,
unavailable_names: impl IntoIterator<Item = &'a str>,
) -> FallibleResult<Identifier> {
let unavailable_suffixes = unavailable_names
.into_iter()
.filter_map(|name| name.strip_prefix(base).and_then(|suffix| suffix.parse::<usize>().ok()))
.collect::<HashSet<_>>();
let name = (1..)
.find_map(|i| {
let available = !unavailable_suffixes.contains(&i);
available.then(|| format!("{base}{i}"))
})
.unwrap(); // It never yields `None`, as we iterate infinite sequence until we find match.
Identifier::from_text(name)
}

View File

@ -1,240 +0,0 @@
//! A module with utilities managing imports.
use crate::prelude::*;
use crate::name::NamePath;
use crate::name::QualifiedName;
use ast::Ast;
use std::collections::BTreeSet;
// =================
// === Constants ===
// =================
const ALIAS_KEYWORD: &str = "as";
const ALL_KEYWORD: &str = "all";
const HIDING_KEYWORD: &str = "hiding";
// ===============
// === Aliases ===
// ===============
/// Id for an import.
pub type Id = u64;
// =====================
// === ImportedNames ===
// =====================
/// A structure describing what names are imported from the module in a specific import declaration.
#[allow(missing_docs)]
#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub enum ImportedNames {
/// The import is `import <module> [as <alias>]` and only module name is imported.
Module { alias: Option<String> },
/// The import is `from <module> import all`, and all names defined in the module are imported.
All,
/// The import is `from <module> import all hiding <not_imported>`, and all names except
/// specified in `not_imported` list are imported
AllExcept { not_imported: BTreeSet<String> },
/// The import is `from <module> import <names>`, and only the specified `names` are imported.
List { names: BTreeSet<String> },
}
/// Representation of a single import declaration.
#[derive(Clone, Debug, Eq, Ord, PartialEq, PartialOrd, Hash)]
pub struct Info {
/// The path of the qualified name of the imported module.
pub module: NamePath,
/// Imported names from [`module`].
pub imported: ImportedNames,
}
impl Info {
/// Create qualified import (i.e. `import <module-name>`) importing the given module without
/// alias.
pub fn new_qualified(module: impl Into<NamePath>) -> Self {
Self { module: module.into(), imported: ImportedNames::Module { alias: None } }
}
/// Create a unqualified import importing one name from given module (i. e. `from <module-name>
/// import <name>`).
pub fn new_single_name(module: impl Into<NamePath>, name: impl Into<String>) -> Self {
Self {
module: module.into(),
imported: ImportedNames::List { names: [name.into()].into() },
}
}
/// Return the module path as [`QualifiedName`]. Returns [`Err`] if the path is not a valid
/// module name.
pub fn qualified_module_name(&self) -> FallibleResult<QualifiedName> {
QualifiedName::from_all_segments(&self.module)
}
/// Construct from an AST, if the Ast is an import declaration.
pub fn from_ast(ast: &Ast) -> Option<Self> {
if let ast::Shape::Tree(ast::Tree {
type_info: ast::TreeType::Import { module, imported },
..
}) = ast.shape()
{
let module = module.clone();
let imported = match imported.clone() {
ast::ImportedNames::All { except } if except.is_empty() => ImportedNames::All,
ast::ImportedNames::All { except } =>
ImportedNames::AllExcept { not_imported: except },
ast::ImportedNames::List { names } => ImportedNames::List { names },
ast::ImportedNames::Module { alias } => ImportedNames::Module { alias },
};
Some(Info { module, imported })
} else {
None
}
}
/// Return the ID of the import.
///
/// The ID is based on a hash of the qualified name of the imported target. This ID is GUI
/// internal and not known in the engine.
pub fn id(&self) -> Id {
let mut hasher = DefaultHasher::new();
self.hash(&mut hasher);
hasher.finish()
}
}
impl Display for Info {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let module = self.module.iter().map(ImString::as_str).join(ast::opr::predefined::ACCESS);
let import_kw = ast::macros::QUALIFIED_IMPORT_KEYWORD;
let from_kw = ast::macros::UNQUALIFIED_IMPORT_KEYWORD;
match &self.imported {
ImportedNames::Module { alias } => {
write!(f, "{import_kw} {module}")?;
if let Some(alias) = alias {
write!(f, " {ALIAS_KEYWORD} {alias}")?;
}
Ok(())
}
ImportedNames::All => write!(f, "{from_kw} {module} {import_kw} {ALL_KEYWORD}"),
ImportedNames::List { names } => {
let names = names.iter().join(", ");
write!(f, "{from_kw} {module} {import_kw} {names}")
}
ImportedNames::AllExcept { not_imported: hidden_names } => {
let names = hidden_names.iter().join(", ");
write!(f, "{from_kw} {module} {import_kw} {ALL_KEYWORD} {HIDING_KEYWORD} {names}")
}
}
}
}
// =============
// === Tests ===
// =============
#[cfg(test)]
mod tests {
use super::*;
use parser::Parser;
struct Fixture {
parser: Parser,
}
impl Fixture {
fn new() -> Self {
Self { parser: Parser::new() }
}
fn run_case(&self, code: &str, expected: Info) {
let ast = self.parser.parse_line_ast(code).expect("Parsing import declaration failed");
let info = Info::from_ast(&ast);
assert_eq!(info, Some(expected));
}
}
#[test]
fn qualified_import_info_from_ast() {
let test = Fixture::new();
let make_info = |module: &[&str]| Info {
module: module.iter().map(|&s| ImString::new(s)).collect(),
imported: ImportedNames::Module { alias: None },
};
let normal_case = "import Standard.Base.Data";
let normal_case_expected = make_info(&["Standard", "Base", "Data"]);
test.run_case(normal_case, normal_case_expected);
let single_segment = "import local";
let single_segment_expected = make_info(&["local"]);
test.run_case(single_segment, single_segment_expected);
}
#[test]
fn unrestricted_import_info_from_ast() {
let test = Fixture::new();
let make_info = |module: &[&str]| Info {
module: module.iter().map(|&s| ImString::new(s)).collect(),
imported: ImportedNames::All,
};
let normal_case = "from Standard.Base import all";
let normal_case_expected = make_info(&["Standard", "Base"]);
test.run_case(normal_case, normal_case_expected);
}
#[test]
fn restricted_import_info_from_ast() {
let test = Fixture::new();
let make_info = |module: &[&str], names: &[&str]| Info {
module: module.iter().map(|&s| ImString::new(s)).collect(),
imported: ImportedNames::List { names: names.iter().map(|&s| s.to_owned()).collect() },
};
let normal_case = "from Standard.Base import Foo, Bar";
let normal_case_expected = make_info(&["Standard", "Base"], &["Foo", "Bar"]);
test.run_case(normal_case, normal_case_expected);
let weird_spaces = "from Standard . Base import Foo , Bar ,Buz";
let weird_spaces_expected = make_info(&["Standard", "Base"], &["Foo", "Bar", "Buz"]);
test.run_case(weird_spaces, weird_spaces_expected);
let single_name = "from Standard.Base import Foo";
let single_name_expected = make_info(&["Standard", "Base"], &["Foo"]);
test.run_case(single_name, single_name_expected);
}
#[test]
fn hiding_import_info_from_ast() {
let test = Fixture::new();
let make_info = |module: &[&str], hidden_names: &[&str]| Info {
module: module.iter().map(|&s| ImString::new(s)).collect(),
imported: ImportedNames::AllExcept {
not_imported: hidden_names.iter().map(|&s| s.to_owned()).collect(),
},
};
let normal_case = "from Standard.Base import all hiding Foo, Bar";
let normal_case_expected = make_info(&["Standard", "Base"], &["Foo", "Bar"]);
test.run_case(normal_case, normal_case_expected);
let weird_spaces = "from Standard . Base import all hiding Foo , Bar ,Buz";
let weird_spaces_expected = make_info(&["Standard", "Base"], &["Foo", "Bar", "Buz"]);
test.run_case(weird_spaces, weird_spaces_expected);
let single_name = "from Standard.Base import all hiding Foo";
let single_name_expected = make_info(&["Standard", "Base"], &["Foo"]);
test.run_case(single_name, single_name_expected);
}
}

View File

@ -1,276 +0,0 @@
//! A crate with all functions used to synchronize different representations of our language
// === Features ===
#![feature(associated_type_bounds)]
#![feature(drain_filter)]
#![feature(iter_order_by)]
#![feature(option_result_contains)]
#![feature(type_alias_impl_trait)]
#![feature(iter_next_chunk)]
// === Standard Linter Configuration ===
#![deny(non_ascii_idents)]
#![warn(unsafe_code)]
#![allow(clippy::bool_to_int_with_if)]
#![allow(clippy::let_and_return)]
// === Non-Standard Linter Configuration ===
#![warn(missing_docs)]
#![warn(trivial_casts)]
#![warn(trivial_numeric_casts)]
#![warn(unused_import_braces)]
#![warn(unused_qualifications)]
#![warn(missing_copy_implementations)]
#![warn(missing_debug_implementations)]
use crate::prelude::*;
use crate::definition::DefinitionName;
use crate::definition::ScopeKind;
use ast::crumbs::InfixCrumb;
use ast::crumbs::Located;
use ast::known;
use ast::macros::DocumentationCommentAst;
use ast::opr;
use ast::prefix;
use ast::Ast;
// ==============
// === Export ===
// ==============
pub mod alias_analysis;
pub mod connection;
pub mod context_switch;
pub mod definition;
pub mod graph;
pub mod identifier;
pub mod import;
pub mod module;
pub mod name;
pub mod node;
pub mod refactorings;
#[cfg(test)]
pub mod test_utils;
pub mod text;
// ===============
// === Prelude ===
// ===============
/// Common types that should be visible across the whole IDE crate.
pub mod prelude {
pub use ast::prelude::*;
pub use enso_prelude::*;
pub use enso_profiler as profiler;
pub use enso_profiler::prelude::*;
}
// ==============
// === Consts ===
// ==============
/// Indentation value from language specification:
///
/// Indentation: Indentation is four spaces, and all tabs are converted to 4 spaces. This is not
/// configurable on purpose.
///
/// Link: https://github.com/enso-org/enso/blob/develop/doc/syntax/encoding.md
pub const INDENT: usize = 4;
// ========================
// === Discerning Lines ===
// ========================
/// What kind of node or definition a line should be treated as.
#[derive(Clone, Debug)]
pub enum LineKind {
/// Definition is a binding, which defines a new entity with arguments.
Definition {
/// The binding that introduces the definition.
ast: known::Infix,
/// Name of this definition. Includes typename, if this is an extension method.
name: Located<DefinitionName>,
/// Arguments for this definition. Does not include any implicit ones (e.g. no `self`).
args: Vec<Located<Ast>>,
},
/// Node in a binding form.
ExpressionAssignment {
/// Ast of the whole binding.
ast: known::Infix,
},
/// Node consisting of a plain expression, with no pattern binding.
ExpressionPlain {
/// Ast of the whole expression.
ast: Ast,
},
/// Documentation comment lines are not nodes.
/// Instead, they are discovered and processed as part of nodes that follow them.
DocumentationComment {
/// The comment representation.
documentation: DocumentationCommentAst,
},
}
impl LineKind {
/// Tell how the given line (described by an Ast) should be treated.
// TODO [mwu] This method deserves unit tests of its own.
pub fn discern(ast: &Ast, kind: ScopeKind) -> Self {
use LineKind::*;
// First of all, if non-empty line is not an infix (i.e. binding) it can be only a node or
// a documentation comment.
let ast = match opr::to_assignment(ast) {
Some(infix) => infix,
None =>
return if let Some(documentation) = DocumentationCommentAst::new(ast) {
// e.g. `## My comment.`
DocumentationComment { documentation }
} else {
// The simplest form of node, e.g. `Point 5 10`
ExpressionPlain { ast: ast.clone_ref() }
},
};
// Assignment can be either nodes or definitions. To discern, we check the left hand side.
// For definition it is a prefix chain, where first is the name, then arguments (if
// explicit). For node it is a pattern, either in a form of Var without args on Cons
// application.
let crumb = InfixCrumb::LeftOperand;
let lhs = Located::new(crumb, prefix::Chain::from_ast_non_strict(&ast.larg));
let name = lhs
.entered(|chain| {
let name_ast = chain.located_func();
name_ast.map(DefinitionName::from_ast)
})
.into_opt();
// If this is a pattern match, `name` will fail to construct and we'll treat line as a node.
// e.g. for `Point x y = get_point …`
let name = match name {
Some(name) => name,
None => return ExpressionAssignment { ast },
};
let args = lhs
.enumerate_args()
.map(|Located { crumbs, item }| {
// We already in the left side of assignment, so we need to prepend this crumb.
let crumbs = lhs.crumbs.clone().into_iter().chain(crumbs);
let ast = item.clone();
Located::new(crumbs, ast)
})
.collect_vec();
// Note [Scope Differences]
if kind == ScopeKind::NonRoot {
// 1. Not an extension method but an old setter syntax. Currently not supported in the
// language, treated as node with invalid pattern.
// e.g. `point.x = 5`
let is_setter = !name.extended_target.is_empty();
// 2. No explicit args -- this is a proper node, not a definition.
// e.g. `point = Point 5 10`
let is_node = args.is_empty();
if is_setter || is_node {
return ExpressionAssignment { ast };
}
};
Definition { ast, name, args }
}
}
// Note [Scope Differences]
// ========================
// When we are in definition scope (as opposed to global scope) certain patterns should not be
// considered to be function definitions. These are:
// 1. Expressions like "Int.x = …". In module, they'd be treated as extension methods. In
// definition scope they are treated as invalid constructs (setter syntax in the old design).
// 2. Expression like "foo = 5". In module, this is treated as method definition (with implicit
// this parameter). In definition, this is just a node (evaluated expression).
#[cfg(test)]
mod tests {
use super::*;
use crate::definition::DefinitionProvider;
use ast::macros::DocumentationCommentInfo;
use parser::Parser;
/// Expect `main` method, where first line is a documentation comment.
/// The text of this comment should match the expected one.
fn run_case(parser: &Parser, code: &str, expected_comment_text: &str) {
let ast = parser.parse_module(code, default()).unwrap();
let main_id = definition::Id::new_plain_name("main");
let main = module::get_definition(&ast, &main_id).unwrap();
let lines = main.block_lines();
let first_line = lines[0].transpose_ref().unwrap();
let doc = DocumentationCommentInfo::new(&first_line, main.indent()).unwrap();
let text = doc.pretty_text();
assert_eq!(text, expected_comment_text);
// Now, if we convert our pretty text to code, will it be the same as original line?
let code = DocumentationCommentInfo::text_to_repr(main.indent(), &text);
let ast2 = parser.parse_line(&code).unwrap();
let doc2 = DocumentationCommentInfo::new(&ast2.as_ref(), main.indent())
.unwrap_or_else(|| panic!("Failed to parse `{code}` as comment"));
assert_eq!(doc.line().repr(), doc2.line().repr())
}
#[test]
fn parse_single_line_comment() {
let parser = parser::Parser::new();
// Typical single line case.
let code = r#"
main =
## Single line
node"#;
let expected = " Single line";
run_case(&parser, code, expected);
// Single line case without space after `##`.
let code = r#"
main =
##Single line
node"#;
let expected = "Single line";
run_case(&parser, code, expected);
// Single line case with a single trailing space after `##`.
let code = r#"
main =
##
node"#;
let expected = " ";
run_case(&parser, code, expected);
// Single line case without content.
let code = r#"
main =
##
node"#;
let expected = "";
run_case(&parser, code, expected);
}
#[test]
fn parse_multi_line_comment() {
let parser = parser::Parser::new();
let code = r#"
main =
## First line
Second line
node"#;
let expected = " First line\nSecond line";
run_case(&parser, code, expected);
}
}

View File

@ -1,719 +0,0 @@
//! Code for module-level double representation processing.
use crate::prelude::*;
use enso_text::index::*;
use crate::alias_analysis;
use crate::definition;
use crate::definition::DefinitionProvider;
use crate::definition::EmptyDefinitionId;
use crate::identifier;
use crate::identifier::Identifier;
use crate::import;
use crate::name::NamePath;
use crate::name::QualifiedName;
use ast::crumbs::ChildAst;
use ast::crumbs::Located;
use ast::crumbs::ModuleCrumb;
use ast::known;
use ast::BlockLine;
use engine_protocol::language_server;
use std::fmt::Formatter;
// ==============
// === Errors ===
// ==============
#[derive(Copy, Clone, Debug, Fail)]
#[fail(display = "Id segment list is empty.")]
#[allow(missing_docs)]
pub struct EmptySegments;
#[derive(Clone, Debug, Fail)]
#[fail(display = "Import `{}` was not found in the module.", _0)]
#[allow(missing_docs)]
pub struct ImportNotFound(pub String);
#[derive(Clone, Copy, Debug, Fail)]
#[fail(display = "Import with ID `{}` was not found in the module.", _0)]
#[allow(missing_docs)]
pub struct ImportIdNotFound(pub import::Id);
#[derive(Clone, Copy, Debug, Fail)]
#[fail(display = "Line index is out of bounds.")]
#[allow(missing_docs)]
pub struct LineIndexOutOfBounds;
#[allow(missing_docs)]
#[derive(Fail, Clone, Debug)]
#[fail(display = "Cannot find method with pointer {:?}.", _0)]
pub struct CannotFindMethod(language_server::MethodPointer);
#[allow(missing_docs)]
#[derive(Fail, Clone, Debug)]
#[fail(display = "The definition with crumbs {:?} is not a direct child of the module.", _0)]
pub struct NotDirectChild(ast::Crumbs);
// ==========
// === Id ===
// ==========
/// The segments of module name. Allow finding module in the project.
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Id {
/// The last segment being a module name. For project's main module it should be equal
/// to [`PROJECTS_MAIN_MODULE`].
pub name: ImString,
/// The segments of all parent modules, from the top module to the direct parent. Does **not**
/// include project name.
pub parent_modules: Vec<ImString>,
}
impl Id {
/// Create module id from list of segments. The list shall not contain the project name nor
/// namespace. Fails if the list is empty (the module name is required).
pub fn try_from_segments(
segments: impl IntoIterator<Item: Into<ImString>>,
) -> FallibleResult<Self> {
let mut segments = segments.into_iter().map(Into::into).collect_vec();
let name = segments.pop().ok_or(EmptySegments)?;
Ok(Self { name, parent_modules: segments })
}
/// Return the iterator over id's segments.
pub fn segments(&self) -> impl Iterator<Item = &ImString> {
self.parent_modules.iter().chain(iter::once(&self.name))
}
}
impl IntoIterator for Id {
type Item = ImString;
type IntoIter = impl Iterator<Item = Self::Item>;
fn into_iter(self) -> Self::IntoIter {
self.parent_modules.into_iter().chain(iter::once(self.name))
}
}
impl From<Id> for NamePath {
fn from(id: Id) -> Self {
id.into_iter().collect()
}
}
impl Display for Id {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.segments().format("."))
}
}
// ============
// === Info ===
// ============
/// Wrapper allowing getting information about the module and updating it.
#[derive(Clone, Debug)]
pub struct Info {
#[allow(missing_docs)]
pub ast: known::Module,
}
impl Info {
/// Generate a name for a definition that can be introduced without side-effects.
///
/// The name shall be generated by appending number to the given base string.
pub fn generate_name(&self, base: &str) -> FallibleResult<Identifier> {
let used_names = self.used_names();
let used_names = used_names.iter().map(|name| name.item.as_str());
identifier::generate_name(base, used_names)
}
/// Identifiers introduced or referred to in the module's scope.
///
/// Introducing identifier not included on this list should have no side-effects on the name
/// resolution in the code in this graph.
pub fn used_names(&self) -> Vec<Located<String>> {
let usage = alias_analysis::analyze_crumbable(self.ast.shape());
usage.all_identifiers()
}
/// Iterate over all lines in module that contain an import declaration.
pub fn enumerate_imports(&self) -> impl Iterator<Item = (ModuleCrumb, import::Info)> + '_ {
let children = self.ast.shape().enumerate();
children.filter_map(|(crumb, ast)| Some((crumb, import::Info::from_ast(ast)?)))
}
/// Iterate over all import declarations in the module.
///
/// If the caller wants to know *where* the declarations are, use `enumerate_imports`.
pub fn iter_imports(&self) -> impl Iterator<Item = import::Info> + '_ {
self.enumerate_imports().map(|(_, import)| import)
}
/// Check if module contains import with given id.
pub fn contains_import(&self, id: import::Id) -> bool {
self.iter_imports().any(|import| import.id() == id)
}
/// Add a new line to the module's block.
///
/// Note that indices are the "module line" indices, which usually are quite different from text
/// API line indices (because nested blocks doesn't count as separate "module lines").
pub fn add_line(&mut self, index: usize, ast: Option<Ast>) {
let line = BlockLine::new(ast);
self.ast.update_shape(|shape| shape.lines.insert(index, line))
}
/// Remove line with given index.
///
/// Returns removed line. Fails if the index is out of bounds.
pub fn remove_line(&mut self, index: usize) -> FallibleResult<BlockLine<Option<Ast>>> {
self.ast.update_shape(|shape| {
shape.lines.try_remove(index).ok_or_else(|| LineIndexOutOfBounds.into())
})
}
/// Remove a line that matches given import description.
///
/// If there is more than one line matching, only the first one will be removed.
/// Fails if there is no import matching given argument.
pub fn remove_import(&mut self, to_remove: &import::Info) -> FallibleResult {
let lookup_result = self.enumerate_imports().find(|(_, import)| import == to_remove);
let (crumb, _) = lookup_result.ok_or_else(|| ImportNotFound(to_remove.to_string()))?;
self.remove_line(crumb.line_index)?;
Ok(())
}
/// Remove a line that matches given import ID.
///
/// If there is more than one line matching, only the first one will be removed.
/// Fails if there is no import matching given argument.
pub fn remove_import_by_id(&mut self, to_remove: import::Id) -> FallibleResult {
let lookup_result = self.enumerate_imports().find(|(_, import)| import.id() == to_remove);
let (crumb, _) = lookup_result.ok_or(ImportIdNotFound(to_remove))?;
self.remove_line(crumb.line_index)?;
Ok(())
}
/// Add a new import declaration to a module.
///
/// This function will try to keep imports in lexicographic order. It returns the index where
/// import was added (index of import - an element on the list returned by `enumerate_imports`).
// TODO [mwu]
// Ideally we should not require parser but should use some sane way of generating AST from
// the `ImportInfo` value.
pub fn add_import(&mut self, parser: &parser::Parser, to_add: import::Info) -> usize {
// Find last import that is not "after" the added one lexicographically.
let previous_import =
self.enumerate_imports().take_while(|(_, import)| &to_add > import).last();
let index_to_place_at = previous_import.map_or(0, |(crumb, _)| crumb.line_index + 1);
let import_ast = parser.parse_line_ast(to_add.to_string()).unwrap();
self.add_line(index_to_place_at, Some(import_ast));
index_to_place_at
}
/// Add a new import declaration to a module.
///
/// For more details the mechanics see [`add_import`] documentation.
pub fn add_import_if_missing(
&mut self,
parser: &parser::Parser,
to_add: import::Info,
) -> Option<usize> {
(!self.contains_import(to_add.id())).then(|| self.add_import(parser, to_add))
}
/// Place the line with given AST in the module's body.
///
/// Unlike `add_line` (which is more low-level) will introduce empty lines around introduced
/// line and describes the added line location in relation to other definitions.
///
/// Typically used to place lines with definitions in the module.
pub fn add_ast(&mut self, ast: Ast, location: Placement) -> FallibleResult {
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
enum BlankLinePlacement {
Before,
After,
None,
}
let blank_line = match location {
_ if self.ast.lines.is_empty() => BlankLinePlacement::None,
Placement::Begin => BlankLinePlacement::After,
Placement::End => BlankLinePlacement::Before,
Placement::After(_) => BlankLinePlacement::Before,
Placement::Before(_) => BlankLinePlacement::After,
};
let mut index = match location {
Placement::Begin => 0,
Placement::End => self.ast.lines.len(),
Placement::Before(next_def) => locate_line_with(&self.ast, &next_def)?.line_index,
Placement::After(next_def) => locate_line_with(&self.ast, &next_def)?.line_index + 1,
};
let mut add_line = |ast_opt: Option<Ast>| {
self.add_line(index, ast_opt);
index += 1;
};
if blank_line == BlankLinePlacement::Before {
add_line(None);
}
add_line(Some(ast));
if blank_line == BlankLinePlacement::After {
add_line(None);
}
Ok(())
}
/// Add a new method definition to the module.
pub fn add_method(
&mut self,
method: definition::ToAdd,
location: Placement,
parser: &parser::Parser,
) -> FallibleResult {
let no_indent = 0;
let definition_ast = method.ast(no_indent, parser)?;
self.add_ast(definition_ast, location)
}
/// Updates the given definition using the passed invokable.
pub fn update_definition(
&mut self,
id: &definition::Id,
f: impl FnOnce(definition::DefinitionInfo) -> FallibleResult<definition::DefinitionInfo>,
) -> FallibleResult {
let definition = locate(&self.ast, id)?;
let new_definition = f(definition.item)?;
let new_ast = new_definition.ast.into();
self.ast = self.ast.set_traversing(&definition.crumbs, new_ast)?;
Ok(())
}
#[cfg(test)]
pub fn expect_code(&self, expected_code: impl AsRef<str>) {
assert_eq!(self.ast.repr(), expected_code.as_ref());
}
}
impl From<known::Module> for Info {
fn from(ast: known::Module) -> Self {
Info { ast }
}
}
// =================
// === Placement ===
// =================
/// Structure describing where to place something being added to the module.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum Placement {
/// Place at the beginning of the module.
Begin,
/// Place at the end of the module.
End,
/// Place after given definition;
Before(definition::Crumb),
/// Place before given definition;
After(definition::Crumb),
}
// =======================
// === ChildDefinition ===
// =======================
/// Represents information about a definition being a direct child of this module, including its
/// location.
///
/// Internally it is `definition::ChildDefinition` with only a single `ModuleCrumb` as location.
#[derive(Clone, Debug, Deref)]
pub struct ChildDefinition(definition::ChildDefinition);
impl ChildDefinition {
fn try_retrieving_crumb(child: &definition::ChildDefinition) -> Option<ModuleCrumb> {
match child.crumbs.as_slice() {
[ast::crumbs::Crumb::Module(crumb)] => Some(*crumb),
_ => None,
}
}
/// Try constructing value from `definition::ChildDefinition`. Fails if it is not a direct child
/// of a module.
pub fn new(child: definition::ChildDefinition) -> Result<Self, NotDirectChild> {
if Self::try_retrieving_crumb(&child).is_some() {
Ok(Self(child))
} else {
Err(NotDirectChild(child.crumbs))
}
}
/// The location of this definition child in the module.
pub fn crumb(&self) -> ModuleCrumb {
// Safe, because our only constructor checks that this works. This is the type's invariant.
Self::try_retrieving_crumb(&self.0).unwrap()
}
}
impl TryFrom<definition::ChildDefinition> for ChildDefinition {
type Error = NotDirectChild;
fn try_from(value: definition::ChildDefinition) -> Result<Self, Self::Error> {
Self::new(value)
}
}
// ========================
// === Module Utilities ===
// ========================
/// Looks up graph in the module.
pub fn get_definition(
ast: &known::Module,
id: &definition::Id,
) -> FallibleResult<definition::DefinitionInfo> {
Ok(locate(ast, id)?.item)
}
/// Locate the line with given definition and return crumb that denotes it.
///
/// Fails if there is no matching definition being a direct child of the module.
pub fn locate_line_with(
ast: &known::Module,
crumb: &definition::Crumb,
) -> FallibleResult<ModuleCrumb> {
locate_child(ast, crumb).map(|child| child.crumb())
}
/// Locate the definition being the module's direct child.
pub fn locate_child(
ast: &known::Module,
crumb: &definition::Crumb,
) -> FallibleResult<ChildDefinition> {
let child = ast.def_iter().find_by_name(crumb)?;
Ok(ChildDefinition::try_from(child)?)
}
/// Traverses the module's definition tree following the given Id crumbs, looking up the definition.
pub fn locate(
ast: &known::Module,
id: &definition::Id,
) -> FallibleResult<definition::ChildDefinition> {
let mut crumbs_iter = id.crumbs.iter();
// Not exactly regular - we need special case for the first crumb as it is not a definition nor
// a children. After this we can go just from one definition to another.
let first_crumb = crumbs_iter.next().ok_or(EmptyDefinitionId)?;
let mut child = ast.def_iter().find_by_name(first_crumb)?;
for crumb in crumbs_iter {
child = definition::resolve_single_name(child, crumb)?;
}
Ok(child)
}
/// Get a definition ID that points to a method matching given pointer.
///
/// The module is assumed to be in the file identified by the `method.file` (for the purpose of
/// desugaring implicit extensions methods for modules).
///
/// The `module_name` parameter is the name of the module that contains `ast`.
pub fn lookup_method(
module_name: &QualifiedName,
ast: &known::Module,
method: &language_server::MethodPointer,
) -> FallibleResult<definition::Id> {
let qualified_typename = QualifiedName::from_text(&method.defined_on_type)?;
let defined_in_this_module = module_name == &qualified_typename;
let method_module_name = QualifiedName::from_text(&method.module)?;
let implicit_extension_allowed = method.defined_on_type == method_module_name.to_string();
for child in ast.def_iter() {
let child_name = &child.name.item;
let name_matches = child_name.name.item == method.name;
let type_matches = match child_name.extended_target.as_slice() {
[] => implicit_extension_allowed || defined_in_this_module,
[typename] => typename.item == qualified_typename.name(),
_ => child_name.explicitly_extends_type(&method.defined_on_type),
};
if name_matches && type_matches {
return Ok(definition::Id::new_single_crumb(child_name.clone()));
}
}
Err(CannotFindMethod(method.clone()).into())
}
/// Get a span in module's text representation where the given definition is located.
pub fn definition_span(
ast: &known::Module,
id: &definition::Id,
) -> FallibleResult<enso_text::Range<Byte>> {
let location = locate(ast, id)?;
ast.range_of_descendant_at(&location.crumbs)
}
impl DefinitionProvider for known::Module {
fn indent(&self) -> usize {
0
}
fn scope_kind(&self) -> definition::ScopeKind {
definition::ScopeKind::Root
}
fn enumerate_asts<'a>(&'a self) -> Box<dyn Iterator<Item = ChildAst<'a>> + 'a> {
self.ast().children()
}
}
// ================
// === MethodId ===
// ================
/// A structure identifying a method.
///
/// It is very similar to MethodPointer from language_server API, however it may point to the method
/// outside the currently opened project.
#[derive(Clone, Debug, serde::Deserialize, Eq, Hash, PartialEq, serde::Serialize)]
#[allow(missing_docs)]
pub struct MethodId {
pub module: QualifiedName,
pub defined_on_type: QualifiedName,
pub name: String,
}
// ============
// === Test ===
// ============
#[cfg(test)]
mod tests {
use super::*;
use crate::definition::DefinitionName;
use engine_protocol::language_server::MethodPointer;
#[test]
fn import_listing() {
let parser = parser::Parser::new();
let expect_imports = |code: &str, expected: &[&[&str]]| {
let ast = parser.parse_module(code, default()).unwrap();
let info = Info { ast };
let imports = info.iter_imports().collect_vec();
assert_eq!(imports.len(), expected.len());
for (import, expected_segments) in imports.iter().zip(expected) {
itertools::assert_equal(import.module.iter(), expected_segments.iter());
}
};
// TODO [mwu] waiting for fix https://github.com/enso-org/enso/issues/1016
// expect_imports("import", &[&[]]);
expect_imports("import Foo", &[&["Foo"]]);
expect_imports("import Foo.Bar", &[&["Foo", "Bar"]]);
expect_imports("foo = bar\nimport Foo.Bar", &[&["Foo", "Bar"]]);
expect_imports("import Foo.Bar\nfoo=bar\nimport Foo.Bar", &[&["Foo", "Bar"], &[
"Foo", "Bar",
]]);
}
#[test]
fn import_adding_and_removing() {
let parser = parser::Parser::new();
let code = "import Foo.Bar.Baz";
let ast = parser.parse_module(code, default()).unwrap();
let mut info = Info { ast };
let import = |code| {
let ast = parser.parse_line_ast(code).unwrap();
import::Info::from_ast(&ast).unwrap()
};
info.add_import(&parser, import("import Bar.Gar"));
info.expect_code("import Bar.Gar\nimport Foo.Bar.Baz");
info.add_import(&parser, import("import Gar.Bar"));
info.expect_code("import Bar.Gar\nimport Foo.Bar.Baz\nimport Gar.Bar");
info.remove_import(&import("import Foo.Bar.Baz")).unwrap();
info.expect_code("import Bar.Gar\nimport Gar.Bar");
info.remove_import(&import("import Foo.Bar.Baz")).unwrap_err();
info.expect_code("import Bar.Gar\nimport Gar.Bar");
info.remove_import(&import("import Gar.Bar")).unwrap();
info.expect_code("import Bar.Gar");
info.remove_import(&import("import Bar.Gar")).unwrap();
info.expect_code("");
info.add_import(&parser, import("import Bar.Gar"));
info.expect_code("import Bar.Gar");
}
#[test]
fn implicit_method_resolution() {
let parser = parser::Parser::new();
let module_name =
QualifiedName::from_all_segments(["local", "ProjectName", "Main"]).unwrap();
let expect_find = |method: &MethodPointer, code, expected: &definition::Id| {
let module = parser.parse_module(code, default()).unwrap();
let result = lookup_method(&module_name, &module, method);
assert_eq!(result.unwrap().to_string(), expected.to_string());
// TODO [mwu]
// We should be able to use `assert_eq!(result.unwrap(),expected);`
// But we can't, because definition::Id uses located fields and crumbs won't match.
// Eventually we'll likely need to split definition names into located and unlocated
// ones. Definition ID should not require any location info.
};
let expect_not_found = |method: &MethodPointer, code| {
let module = parser.parse_module(code, default()).unwrap();
lookup_method(&module_name, &module, method).expect_err("expected method not found");
};
// === Lookup the Main (local module type) extension method ===
let ptr = MethodPointer {
defined_on_type: "local.ProjectName.Main".into(),
module: "local.ProjectName.Main".into(),
name: "foo".into(),
};
// Implicit module extension method.
let id = definition::Id::new_plain_name("foo");
expect_find(&ptr, "foo a b = a + b", &id);
// Explicit module extension method.
let id = definition::Id::new_single_crumb(DefinitionName::new_method("Main", "foo"));
expect_find(&ptr, "Main.foo a b = a + b", &id);
// Matching name but extending wrong type.
expect_not_found(&ptr, "Number.foo a b = a + b");
// Mismatched name.
expect_not_found(&ptr, "bar a b = a + b");
// === Lookup the Int (non-local type) extension method ===
let ptr = MethodPointer {
defined_on_type: "std.Base.Main.Number".into(),
module: "local.ProjectName.Main".into(),
name: "foo".into(),
};
expect_not_found(&ptr, "foo a b = a + b");
let id = definition::Id::new_single_crumb(DefinitionName::new_method("Number", "foo"));
expect_find(&ptr, "Number.foo a b = a + b", &id);
expect_not_found(&ptr, "Text.foo a b = a + b");
expect_not_found(&ptr, "bar a b = a + b");
}
#[test]
fn test_definition_location() {
let code = r"
some def =
first line
second line
other def =
first line
second line
nested def =
nested body
last line of other def
last def = inline expression";
let parser = parser::Parser::new();
let module = parser.parse_module(code, default()).unwrap();
let module = Info { ast: module };
let id = definition::Id::new_plain_name("other");
let span = definition_span(&module.ast, &id).unwrap();
assert!(code[span].ends_with("last line of other def"));
let id = definition::Id::new_plain_name("last");
let span = definition_span(&module.ast, &id).unwrap();
assert!(code[span].ends_with("inline expression"));
let id = definition::Id::new_plain_names(["other", "nested"]);
let span = definition_span(&module.ast, &id).unwrap();
assert!(code[span].ends_with("nested body"));
}
#[test]
fn add_method() {
let parser = parser::Parser::new();
let module = r#"Main.method1 arg = body
main = Main.method1 10"#;
let module = Info::from(parser.parse_module(module, default()).unwrap());
let method1_id = DefinitionName::new_method("Main", "method1");
let main_id = DefinitionName::new_plain("main");
let to_add = definition::ToAdd {
name: DefinitionName::new_method("Main", "add"),
explicit_parameter_names: vec!["arg1".into(), "arg2".into()],
body_head: Ast::infix_var("arg1", "+", "arg2"),
body_tail: default(),
};
let repr_after_insertion = |location| {
let mut module = module.clone();
module.add_method(to_add.clone(), location, &parser).unwrap();
module.ast.repr()
};
let expected = r#"Main.add arg1 arg2 = arg1 + arg2
Main.method1 arg = body
main = Main.method1 10"#;
assert_eq!(repr_after_insertion(Placement::Begin), expected);
let expected = r#"Main.method1 arg = body
main = Main.method1 10
Main.add arg1 arg2 = arg1 + arg2"#;
assert_eq!(repr_after_insertion(Placement::End), expected);
let expected = r#"Main.method1 arg = body
Main.add arg1 arg2 = arg1 + arg2
main = Main.method1 10"#;
assert_eq!(repr_after_insertion(Placement::After(method1_id.clone())), expected);
assert_eq!(
repr_after_insertion(Placement::Before(method1_id.clone())),
repr_after_insertion(Placement::Begin)
);
assert_eq!(
repr_after_insertion(Placement::After(method1_id)),
repr_after_insertion(Placement::Before(main_id.clone()))
);
assert_eq!(
repr_after_insertion(Placement::After(main_id)),
repr_after_insertion(Placement::End)
);
// TODO [mwu]
// This test doesn't include multi-lines functions, as the result may seem somewhat
// unexpected due to the way that parser assigns blank lines to the former block
// rather than module. If anyone will care, we might revisit this after the parser
// 2.0 rewrite.
}
}

View File

@ -1,523 +0,0 @@
//! The structures representing the name paths which may appear in Enso code.
use crate::prelude::*;
use crate::module;
use ast::constants::PROJECTS_MAIN_MODULE;
use ast::opr::predefined::ACCESS;
use enso_prelude::serde_reexports::Deserialize;
use enso_prelude::serde_reexports::Serialize;
use std::cmp::Ordering;
// ==============
// === Export ===
// ==============
pub mod project;
// ==============
// === Errors ===
// ==============
#[allow(missing_docs)]
#[derive(Copy, Clone, Debug, Fail)]
pub enum InvalidQualifiedName {
#[fail(display = "The qualified name is empty.")]
EmptyName,
#[fail(display = "Too few segments in qualified name.")]
TooFewSegments,
#[fail(display = "Too many segments in qualified name.")]
TooManySegments,
}
#[allow(missing_docs)]
#[derive(Copy, Clone, Debug, Fail)]
#[fail(display = "No qualified name found in AST")]
pub struct QualifiedNameNotFoundInAst;
// ================
// === NamePath ===
// ================
/// Representation of name path: the list of segments which is separated by dots in the code.
pub type NamePath = Vec<ImString>;
/// Reference to [`NamePath`] or its fragment.
pub type NamePathRef<'a> = &'a [ImString];
// =====================
// === QualifiedName ===
// =====================
/// A QualifiedName template without specified type of segment's list container.
///
/// Usually you should use one of its specialization: the owned [`QualifiedName`] or borrowed
/// [`QualifiedNameRef`].
#[derive(Clone, Debug, Default, Deserialize, Hash, Serialize)]
#[serde(into = "String")]
#[serde(try_from = "String")]
#[serde(bound(
serialize = "Self: Into<String>, Segments: Clone",
deserialize = "Self: TryFrom<String, Error: Display>"
))]
pub struct QualifiedNameTemplate<Segments> {
project: project::QualifiedName,
path: Segments,
}
/// The Fully Qualified Name of language's entity (type, module, method, etc.).
///
/// It's represented in the code as list of identifiers separated by dots, where two first segments
/// are project namespace and name.
///
/// This structure removes project's main module name from the path upon construction to avoid
/// having different [`QualifiedName`]s representing same logical path (what helps when we want to,
/// for example, look up things by their qualified name.
pub type QualifiedName = QualifiedNameTemplate<NamePath>;
/// A reference to [`QualifiedName`] or ots fragment.
pub type QualifiedNameRef<'a> = QualifiedNameTemplate<NamePathRef<'a>>;
impl_clone_ref_as_clone!(['a] QualifiedNameRef<'a>);
// === Construction ===
impl<Segments> QualifiedNameTemplate<Segments> {
fn new(project: project::QualifiedName, path: Segments) -> Self {
Self { project, path }
}
}
impl QualifiedName {
/// Create a qualified name for the project's main module.
pub fn new_main(project: project::QualifiedName) -> Self {
Self::new(project, default())
}
/// Create a qualified name for module in `project` identified by `id`.
pub fn new_module(project: project::QualifiedName, id: module::Id) -> Self {
let without_main = id.into_iter().skip_while(|s| s == PROJECTS_MAIN_MODULE);
Self::new(project, without_main.collect_vec())
}
/// Create a qualified name with new segment pushed at end of the path.
pub fn new_child(mut self, child: impl Into<ImString>) -> Self {
self.push_segment(child);
self
}
/// Constructs a qualified name from its text representation.
///
/// Note, that there is no guarantee that `QualifiedName::from_text(s).to_string() = s`, as the
/// `Main` segment is removed upon constructing [`QualifiedName`].
///
/// Fails, if the text is not a valid name.
///
/// # Example
///
/// ```rust
/// use double_representation::name::QualifiedName;
/// let name = QualifiedName::from_text("ns.Project.Module.Type").unwrap();
/// assert_eq!(name.project().namespace, "ns");
/// assert_eq!(name.project().project, "Project");
/// assert_eq!(name.path()[0], "Module");
/// assert_eq!(name.path()[1], "Type");
///
/// // The "Main" module segment is removed.
/// let main_module_name = QualifiedName::from_text("ns.Project.Main.Type").unwrap();
/// assert_eq!(main_module_name.to_string(), "ns.Project.Type");
/// ```
pub fn from_text(text: impl AsRef<str>) -> FallibleResult<Self> {
let text = text.as_ref();
Self::from_all_segments(text.split(ACCESS))
}
/// Build a module's full qualified name from its name segments and the project name.
///
/// ```
/// # use double_representation::name::QualifiedName;
///
/// let name = QualifiedName::from_all_segments(["Project", "Main"]).unwrap();
/// assert_eq!(name.to_string(), "Project.Main");
/// ```
pub fn from_all_segments<Seg>(segments: impl IntoIterator<Item = Seg>) -> FallibleResult<Self>
where for<'s> Seg: Into<ImString> + PartialEq<&'s str> {
let mut iter = segments.into_iter().map(|name| name.into());
let project_name = match (iter.next(), iter.next()) {
(Some(ns), Some(name)) => project::QualifiedName::new(ns, name),
_ => return Err(InvalidQualifiedName::TooFewSegments.into()),
};
let without_main = iter.skip_while(|s| *s == PROJECTS_MAIN_MODULE);
Ok(Self::new(project_name, without_main.collect()))
}
}
// === Methods Shared By QualifiedName and QualifiedNameRef ===
impl<Segments: AsRef<[ImString]>> QualifiedNameTemplate<Segments> {
/// The project name segments.
pub fn project(&self) -> &project::QualifiedName {
&self.project
}
/// The path part of the qualified name - everything what goes after the project name.
pub fn path(&self) -> &[ImString] {
self.path.as_ref()
}
/// Get the entity's name. In case of Main module it's `Main`, not the project name.
pub fn name(&self) -> &str {
self.path.as_ref().last().map_or(PROJECTS_MAIN_MODULE, ImString::as_str)
}
/// Get the entity's name as visible in the code. In case of Main module it's the project name,
/// not `Main`.
pub fn alias_name(&self) -> &ImString {
let module_name = (!self.is_main_module()).and_option_from(|| self.path.as_ref().last());
module_name.unwrap_or(&self.project.project)
}
/// Check if the name refers to some project's Main module.
pub fn is_main_module(&self) -> bool {
self.path.as_ref().is_empty()
}
/// Check if this name refers to a descendant of another name.
///
/// ```rust
/// # use double_representation::name::QualifiedName;
///
/// let parent = QualifiedName::from_text("ns.Project.Module").unwrap();
/// let descendant = QualifiedName::from_text("ns.Project.Module.SubModule.Element").unwrap();
/// let not_descendant = QualifiedName::from_text("ns.Project.Module2.Element").unwrap();
///
/// assert!(descendant.is_descendant_of(parent.as_ref()));
/// assert!(!not_descendant.is_descendant_of(parent.as_ref()));
/// assert!(parent.is_descendant_of(parent.as_ref()));
pub fn is_descendant_of(&self, other: QualifiedNameRef) -> bool {
self.project == other.project && self.path.as_ref().starts_with(other.path)
}
/// The iterator over name's segments (including project namespace and name).
pub fn segments(&self) -> impl Iterator<Item = &ImString> {
self.project.segments().chain(self.path.as_ref())
}
/// The iterator over name's segments (including project namespace and name) with added
/// [`PROJECTS_MAIN_MODULE`] segment in case of the main module.
pub fn segments_with_main_segment(&self) -> impl Iterator<Item = &str> {
let main_segment = self.is_main_module().then_some(PROJECTS_MAIN_MODULE);
self.segments().map(|s| s.as_str()).chain(main_segment)
}
/// Return the module identifier pointed by this qualified name.
pub fn module_id(&self) -> module::Id {
let module_path = self.path.as_ref();
let parent_modules = &module_path[0..module_path.len().saturating_sub(1)];
module::Id { name: self.name().into(), parent_modules: parent_modules.to_vec() }
}
/// Check if the name refers to entity defined/reexported in library's main module.
pub fn is_top_element(&self) -> bool {
self.path.as_ref().len() == 1
}
/// Return the qualified name referring to same project and some fragment of the [`path`] part.
pub fn sub_path(
&self,
range: impl SliceIndex<[ImString], Output = [ImString]>,
) -> QualifiedNameRef {
QualifiedNameRef { project: self.project.clone_ref(), path: &self.path.as_ref()[range] }
}
/// Split the qualified into two parts: the qualified name of a nth parent module and the
/// remaining access chain of requested length. Returns `None` if the requested access chain
/// length is too long to split off.
pub fn split_chain(&self, access_chain_length: usize) -> Option<(QualifiedNameRef, String)> {
let path = self.path.as_ref();
if access_chain_length >= path.len() {
return None;
}
let (path, chain) = path.split_at(path.len() - access_chain_length);
let parent_name = QualifiedNameRef { project: self.project.clone_ref(), path };
let chain = chain.iter().map(|s| s.as_str()).join(ACCESS);
Some((parent_name, chain))
}
/// Return the [`QualifiedNameRef`] referring to the this name's parent.
///
/// ```rust
/// use double_representation::name::QualifiedName;
/// let name = QualifiedName::from_text("ns.Project.Module.Type").unwrap();
/// let parent = QualifiedName::from_text("ns.Project.Module").unwrap();
/// assert_eq!(name.parent(), Some(parent.as_ref()));
/// ```
pub fn parent(&self) -> Option<QualifiedNameRef> {
let shorter_len = self.path.as_ref().len().checked_sub(1)?;
Some(self.sub_path(0..shorter_len))
}
/// Returns an iterator over all parent entities. The `self` is not included.
///
/// ```rust
/// use double_representation::name::QualifiedName;
/// let name = QualifiedName::from_text("ns.Project.Module.Type").unwrap();
/// let parents: Vec<String> = name.parents().map(|qn| qn.to_string()).collect();
/// assert_eq!(parents, vec!["ns.Project.Module", "ns.Project"]);
/// ```
pub fn parents(&self) -> impl Iterator<Item = QualifiedNameRef> {
let mut path_upper_bounds = (0..self.path.as_ref().len()).rev();
iter::from_fn(move || {
let upper_bound = path_upper_bounds.next()?;
Some(self.sub_path(0..upper_bound))
})
}
/// Convert to [`QualifiedNameRef`].
pub fn as_ref(&self) -> QualifiedNameRef {
QualifiedNameRef { project: self.project.clone_ref(), path: self.path.as_ref() }
}
/// Create a new owned version of this qualified name.
pub fn to_owned(&self) -> QualifiedName {
QualifiedName { project: self.project.clone_ref(), path: self.path.as_ref().into() }
}
/// Convert qualified name to [`String`] adding the [`PROJECTS_MAIN_SEGMENT`] at the end in case
/// of main module.
///
/// ```rust
/// use double_representation::name::QualifiedName;
/// let name = QualifiedName::from_text("ns.Project").unwrap();
/// assert_eq!(name.to_string_with_main_segment(), "ns.Project.Main");
/// ```
pub fn to_string_with_main_segment(&self) -> String {
self.segments_with_main_segment().join(ACCESS)
}
}
// === Owned QualifiedName only Methods ===
impl QualifiedName {
/// Add a segment to this qualified name.
///
/// Because the [`QualifiedName`] always omit the "Main" module name in its path, this function
/// may result in leaving exactly the same name as before (see an example).
///
/// ```rust
/// use double_representation::name::QualifiedName;
/// let mut name = QualifiedName::from_text("ns.Proj.Foo").unwrap();
/// name.push_segment("Bar");
/// assert_eq!(name.to_string(), "ns.Proj.Foo.Bar");
///
/// let mut name = QualifiedName::from_text("ns.Proj").unwrap();
/// name.push_segment("Main");
/// assert_eq!(name.to_string(), "ns.Proj");
/// ```
pub fn push_segment(&mut self, name: impl Into<ImString>) {
let name = name.into();
if name != PROJECTS_MAIN_MODULE || !self.path.is_empty() {
self.path.push(name);
}
}
/// Remove a segment to this qualified name.
///
/// ```rust
/// use double_representation::name::QualifiedName;
/// use enso_prelude::ImString;
///
/// let mut name = QualifiedName::from_text("ns.Proj.Foo").unwrap();
/// assert_eq!(name.pop_segment(), Some(ImString::new("Foo")));
/// assert_eq!(name.pop_segment(), None);
/// ```
pub fn pop_segment(&mut self) -> Option<ImString> {
self.path.pop()
}
}
// === Conversion from AST ===
impl TryFrom<&Ast> for QualifiedName {
type Error = failure::Error;
fn try_from(ast: &Ast) -> Result<Self, Self::Error> {
let segments = ast::opr::Chain::try_new(ast)
.ok_or(QualifiedNameNotFoundInAst)?
.as_qualified_name_segments()
.ok_or(QualifiedNameNotFoundInAst)?;
Self::from_all_segments(segments)
}
}
// === Conversions From and Into String ===
impl TryFrom<&str> for QualifiedName {
type Error = failure::Error;
fn try_from(text: &str) -> Result<Self, Self::Error> {
Self::from_text(text)
}
}
impl TryFrom<String> for QualifiedName {
type Error = failure::Error;
fn try_from(text: String) -> Result<Self, Self::Error> {
Self::from_text(text)
}
}
impl TryFrom<&String> for QualifiedName {
type Error = failure::Error;
fn try_from(text: &String) -> Result<Self, Self::Error> {
Self::from_text(text)
}
}
impl From<QualifiedName> for String {
fn from(name: QualifiedName) -> Self {
String::from(&name)
}
}
impl From<&QualifiedName> for String {
fn from(name: &QualifiedName) -> Self {
name.to_string()
}
}
impl<Segments: AsRef<[ImString]>> Display for QualifiedNameTemplate<Segments> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.segments().format(ACCESS))
}
}
// === Conversion Between Name Representations
impl From<project::QualifiedName> for QualifiedName {
fn from(project: project::QualifiedName) -> Self {
Self::new_main(project)
}
}
impl From<QualifiedName> for NamePath {
fn from(qualified: QualifiedName) -> Self {
qualified.into_iter().collect()
}
}
impl<'a> From<&'a QualifiedName> for NamePath {
fn from(qualified: &'a QualifiedName) -> Self {
qualified.segments().cloned().collect()
}
}
impl<'a, 'b> From<&'a QualifiedNameRef<'b>> for NamePath {
fn from(qualified: &'a QualifiedNameRef<'b>) -> Self {
qualified.segments().cloned().collect()
}
}
impl<'a> From<&'a QualifiedName> for QualifiedNameRef<'a> {
fn from(qualified: &'a QualifiedName) -> Self {
qualified.as_ref()
}
}
// === Conversion Into Iterator ===
impl<'a, 'b> IntoIterator for &'a QualifiedNameRef<'b> {
type Item = &'a ImString;
type IntoIter = impl Iterator<Item = &'a ImString>;
fn into_iter(self) -> Self::IntoIter {
self.segments()
}
}
impl<'a> IntoIterator for &'a QualifiedName {
type Item = &'a ImString;
type IntoIter = impl Iterator<Item = &'a ImString>;
fn into_iter(self) -> Self::IntoIter {
self.segments()
}
}
impl IntoIterator for QualifiedName {
type Item = ImString;
type IntoIter = impl Iterator<Item = ImString>;
fn into_iter(self) -> Self::IntoIter {
iter::once(self.project.namespace).chain(iter::once(self.project.project)).chain(self.path)
}
}
// === Comparing Various Name Representations ===
impl<S1: AsRef<[ImString]>, S2: AsRef<[ImString]>> PartialEq<QualifiedNameTemplate<S1>>
for QualifiedNameTemplate<S2>
{
fn eq(&self, other: &QualifiedNameTemplate<S1>) -> bool {
self.project == other.project && self.path.as_ref() == other.path.as_ref()
}
}
impl<Segments: AsRef<[ImString]>> Eq for QualifiedNameTemplate<Segments> {}
impl<Segments: AsRef<[ImString]>> PartialEq<project::QualifiedName>
for QualifiedNameTemplate<Segments>
{
fn eq(&self, other: &project::QualifiedName) -> bool {
self.project == *other && self.path.as_ref().is_empty()
}
}
impl<Segments: AsRef<[ImString]>> PartialEq<NamePath> for QualifiedNameTemplate<Segments> {
fn eq(&self, other: &NamePath) -> bool {
self.segments().eq(other.iter())
}
}
impl<Segments: AsRef<[ImString]>> PartialEq<QualifiedNameTemplate<Segments>> for NamePath {
fn eq(&self, other: &QualifiedNameTemplate<Segments>) -> bool {
other == self
}
}
impl<'a, Segments: AsRef<[ImString]>> PartialEq<NamePathRef<'a>>
for QualifiedNameTemplate<Segments>
{
fn eq(&self, other: &NamePathRef<'a>) -> bool {
self.segments().eq(other.iter())
}
}
impl<Segments: AsRef<[ImString]>> PartialOrd for QualifiedNameTemplate<Segments> {
fn partial_cmp(&self, other: &QualifiedNameTemplate<Segments>) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<Segments: AsRef<[ImString]>> Ord for QualifiedNameTemplate<Segments> {
fn cmp(&self, other: &QualifiedNameTemplate<Segments>) -> Ordering {
self.project.cmp(&other.project).then(self.path.as_ref().cmp(other.path.as_ref()))
}
}

View File

@ -1,238 +0,0 @@
//! The structures related to the project name in the code.
use crate::prelude::*;
use crate::name::InvalidQualifiedName;
use crate::name::NamePath;
use crate::name::NamePathRef;
use ast::opr::predefined::ACCESS;
use const_format::concatcp;
use enso_prelude::serde_reexports::Deserialize;
use enso_prelude::serde_reexports::Serialize;
// =================
// === Constants ===
// =================
/// The namespace of the standard library.
pub const STANDARD_NAMESPACE: &str = "Standard";
/// The name of the project in the [`STANDARD_NAMESPACE`] containing the base standard library.
pub const BASE_LIBRARY_NAME: &str = "Base";
/// The full path of the [`BASE_LIBRARY_NAME`] project in the [`STANDARD_NAMESPACE`].
pub const STANDARD_BASE_LIBRARY_PATH: &str = concatcp!(STANDARD_NAMESPACE, ".", BASE_LIBRARY_NAME);
// ================
// === Template ===
// ================
#[allow(missing_docs)]
#[derive(Copy, Clone, Debug, Fail)]
pub enum InvalidTemplateName {
#[fail(display = "The template name contains invalid characters.")]
ContainsInvalidCharacters,
}
/// The project template name.
#[derive(Clone, Debug)]
pub struct Template {
name: String,
}
impl Template {
/// Create the project template from string.
///
/// # Example
///
/// ```rust
/// # use double_representation::name::project::Template;
/// assert!(Template::from_text("hello").is_ok());
/// assert!(Template::from_text("hello_world").is_err());
/// ```
pub fn from_text(text: impl AsRef<str>) -> FallibleResult<Self> {
if text.as_ref().contains(|c: char| !c.is_ascii_alphanumeric()) {
Err(InvalidTemplateName::ContainsInvalidCharacters.into())
} else {
Ok(Template { name: text.as_ref().to_owned() })
}
}
/// Create the project template from string without validation.
pub fn unsafe_from_text(text: impl AsRef<str>) -> Self {
Template { name: text.as_ref().to_owned() }
}
/// Create a project name from the template name.
/// # Example
///
/// ```rust
/// # use double_representation::name::project::Template;
/// let template = Template::unsafe_from_text("hello");
/// assert_eq!(template.to_project_name(), "Hello".to_owned());
/// ```
pub fn to_project_name(&self) -> String {
let mut name = self.name.to_string();
// Capitalize
if let Some(r) = name.get_mut(0..1) {
r.make_ascii_uppercase();
}
name
}
}
// === Conversions From and Into String ===
impl TryFrom<&str> for Template {
type Error = failure::Error;
fn try_from(text: &str) -> Result<Self, Self::Error> {
Self::from_text(text)
}
}
impl TryFrom<String> for Template {
type Error = failure::Error;
fn try_from(text: String) -> Result<Self, Self::Error> {
Self::from_text(text)
}
}
impl From<Template> for String {
fn from(template: Template) -> Self {
String::from(&template.name)
}
}
impl From<&Template> for String {
fn from(template: &Template) -> Self {
template.name.to_owned()
}
}
impl Display for Template {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.name)
}
}
// =====================
// === QualifiedName ===
// =====================
/// The project qualified name has a form of `<namespace_name>.<project_name>`. It serves as
/// a prefix for qualified names of other entities (modules, types, etc.).
#[allow(missing_docs)]
#[derive(
Clone,
CloneRef,
Debug,
Default,
Deserialize,
Eq,
Hash,
Ord,
PartialEq,
PartialOrd,
Serialize
)]
#[serde(into = "String")]
#[serde(try_from = "String")]
pub struct QualifiedName {
pub namespace: ImString,
pub project: ImString,
}
impl QualifiedName {
/// Create qualified name from components.
pub fn new(namespace: impl Into<ImString>, project: impl Into<ImString>) -> Self {
Self { namespace: namespace.into(), project: project.into() }
}
/// Create from a text representation. May fail if the text is not valid project Qualified Name.
pub fn from_text(text: impl AsRef<str>) -> FallibleResult<Self> {
let source = text.as_ref();
let all_segments = source.split(ACCESS).collect_vec();
match all_segments.as_slice() {
[namespace, project] => Ok(Self::new(namespace, project)),
[] => Err(InvalidQualifiedName::EmptyName.into()),
[_] => Err(InvalidQualifiedName::TooFewSegments.into()),
_ => Err(InvalidQualifiedName::TooManySegments.into()),
}
}
/// The iterator over name's segments: the namespace and project name.
pub fn segments(&self) -> impl Iterator<Item = &ImString> {
iter::once(&self.namespace).chain(iter::once(&self.project))
}
/// Return the fully qualified name of the [`BASE_LIBRARY_NAME`] project in the
/// [`STANDARD_NAMESPACE`].
pub fn standard_base_library() -> Self {
Self::new(STANDARD_NAMESPACE, BASE_LIBRARY_NAME)
}
}
// === Conversions From and Into String ===
impl TryFrom<&str> for QualifiedName {
type Error = failure::Error;
fn try_from(text: &str) -> Result<Self, Self::Error> {
Self::from_text(text)
}
}
impl TryFrom<String> for QualifiedName {
type Error = failure::Error;
fn try_from(text: String) -> Result<Self, Self::Error> {
Self::from_text(text)
}
}
impl From<QualifiedName> for String {
fn from(name: QualifiedName) -> Self {
String::from(&name)
}
}
impl From<&QualifiedName> for String {
fn from(name: &QualifiedName) -> Self {
name.to_string()
}
}
impl Display for QualifiedName {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}.{}", self.namespace, self.project)
}
}
// === Comparing with NamePath ===
impl<'a> PartialEq<NamePathRef<'a>> for QualifiedName {
fn eq(&self, other: &NamePathRef<'a>) -> bool {
match other {
[first, second] => &self.namespace == first && &self.project == second,
_ => false,
}
}
}
impl PartialEq<NamePath> for QualifiedName {
fn eq(&self, other: &NamePath) -> bool {
*self == other.as_slice()
}
}

View File

@ -1,879 +0,0 @@
//! Code for node discovery and other node-related tasks.
use crate::prelude::*;
use crate::context_switch::ContextSwitchExpression;
use crate::definition::ScopeKind;
use crate::LineKind;
use ast::crumbs::Crumbable;
use ast::enumerate_non_empty_lines;
use ast::known;
use ast::macros::skip_and_freeze::prefix_macro_body;
use ast::macros::skip_and_freeze::prepend_with_macro;
use ast::macros::skip_and_freeze::preserving_skip;
use ast::macros::skip_and_freeze::preserving_skip_and_freeze;
use ast::macros::skip_and_freeze::without_macros;
use ast::macros::skip_and_freeze::MacrosInfo;
use ast::macros::skip_and_freeze::FREEZE_MACRO_IDENTIFIER;
use ast::macros::skip_and_freeze::SKIP_MACRO_IDENTIFIER;
use ast::macros::DocumentationCommentInfo;
use ast::macros::DocumentationCommentLine;
use ast::Ast;
use ast::BlockLine;
use std::cmp::Ordering;
/// Node Id is the AST ID attached to the node's expression.
pub type Id = ast::Id;
// =============
// === Error ===
// =============
#[allow(missing_docs)]
#[derive(Clone, Copy, Fail, Debug)]
#[fail(display = "Node with ID {} was not found.", id)]
pub struct IdNotFound {
pub id: Id,
}
/// Indices of lines belonging to a node.
#[derive(Clone, Copy, Debug)]
pub struct NodeLocation {
/// Documentation comment line index, if present.
pub documentation_line: Option<usize>,
/// Main line is a line that contains the node's expression.
pub main_line: usize,
}
impl PartialEq for NodeLocation {
fn eq(&self, other: &Self) -> bool {
self.partial_cmp(other) == Some(Ordering::Equal)
}
}
impl PartialOrd for NodeLocation {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.main_line.partial_cmp(&other.main_line)
}
}
impl NodeLocation {
/// Index for the first line belonging to the node.
pub fn first(&self) -> usize {
self.documentation_line.unwrap_or(self.main_line)
}
/// Index for the last line belonging to the node.
pub fn last(&self) -> usize {
self.main_line
}
/// Inclusive range between first and last node's lines.
///
/// Note that while a node can contain at most two lines, they may be interspersed by a
/// number of blank lines.
pub fn range(start: NodeLocation, last: NodeLocation) -> RangeInclusive<usize> {
start.first()..=last.last()
}
}
// ===============
// === General ===
// ===============
/// Information about the node coupled with its location within a block.
#[derive(Clone, Debug, Deref)]
pub struct LocatedNode {
/// Line index in the block. Zero for inline definition nodes.
pub index: NodeLocation,
/// Information about the node.
#[deref]
pub node: NodeInfo,
}
/// Tests if given line contents can be seen as node with a given id
pub fn is_main_line_of(line: &BlockLine<Option<Ast>>, id: Id) -> bool {
let main_line = MainLine::from_block_line(line);
main_line.contains_if(|main_line| main_line.id() == id)
}
/// Searches for `NodeInfo` with the associated `id` index in `lines`.
///
/// Returns an error if the Id is not found.
pub fn locate<'a>(
lines: impl IntoIterator<Item = &'a BlockLine<Option<Ast>>> + 'a,
context_indent: usize,
id: Id,
) -> FallibleResult<LocatedNode> {
Ok(locate_many(lines, context_indent, [id])?.remove(&id).unwrap())
}
/// Obtain located node information for multiple nodes in a single pass.
///
/// If any of the looked for nodes is not found, `Err` is returned.
/// Any `Ok(…)` return value is guaranteed to have length equal to `looked_for` argument.
pub fn locate_many<'a>(
lines: impl IntoIterator<Item = &'a BlockLine<Option<Ast>>> + 'a,
context_indent: usize,
looked_for: impl IntoIterator<Item = Id>,
) -> FallibleResult<HashMap<ast::Id, LocatedNode>> {
let mut looked_for = looked_for.into_iter().collect::<HashSet<_>>();
let mut ret = HashMap::new();
// Skip empty lines, there are no nodes.
// However, indices are important.
let lines_iter = enumerate_non_empty_lines(lines);
let nodes = NodeIterator { lines_iter, context_indent };
for node in nodes {
if looked_for.remove(&node.id()) {
ret.insert(node.id(), node);
}
if looked_for.is_empty() {
break;
}
}
if let Some(id) = looked_for.into_iter().next() {
Err(IdNotFound { id }.into())
} else {
Ok(ret)
}
}
// ===================
// === NodeAstInfo ===
// ===================
/// The information about AST content of the node.
#[derive(Debug, Clone, PartialEq, Default)]
pub struct NodeAstInfo {
/// Information about SKIP and FREEZE macros used in the code.
pub macros_info: MacrosInfo,
/// Existing context switch expression, if any.
pub context_switch: Option<ContextSwitchExpression>,
}
impl NodeAstInfo {
/// Constructor.
pub fn from_ast(ast: &Ast) -> Self {
let macros_info = MacrosInfo::from_ast(ast);
let without_macros = without_macros(ast);
let context_switch = ContextSwitchExpression::parse(&without_macros);
Self { macros_info, context_switch }
}
/// Specifies the count of AST crumbs to bypass in order to reach the displayed AST node.
pub fn ast_crumbs_to_skip(&self) -> usize {
let skip_for_context_switch_expr = self.context_switch.as_ref().map_or(0, |_| 1);
let skip_for_macros = self.macros_info.macros_count();
skip_for_macros + skip_for_context_switch_expr
}
}
// ================
// === MainLine ===
// ================
/// Representation of the main line of the node (as opposed to a documentation line).
///
/// Each node must have exactly one main line.
/// Main line always contains an expression, either directly or under binding. The expression ID
/// must be set and it serves as the whole node's expression.
#[derive(Debug, Clone, Deref, DerefMut)]
pub struct MainLine {
/// Node AST, contains a node's expression and an optional pattern binding.
#[deref]
#[deref_mut]
pub ast: NodeAst,
/// Additional information about the AST.
pub ast_info: NodeAstInfo,
}
impl MainLine {
/// Tries to interpret the whole binding as a node. Right-hand side will become node's
/// expression.
pub fn new_binding(infix: known::Infix) -> Option<MainLine> {
infix.rarg.id?;
let ast_info = NodeAstInfo::from_ast(&infix.rarg);
let ast = NodeAst::Binding { infix };
Some(Self { ast, ast_info })
}
/// Tries to interpret AST as node, treating whole AST as an expression.
pub fn new_expression(ast: Ast) -> Option<MainLine> {
ast.id?;
let ast_info = NodeAstInfo::from_ast(&ast);
let ast = NodeAst::Expression { ast };
// TODO what if we are given an assignment.
Some(Self { ast, ast_info })
}
/// Tries to interpret AST as node, treating whole AST as a node's primary line.
pub fn from_ast(ast: &Ast) -> Option<MainLine> {
// By definition, there are no nodes in the root scope.
// Being a node's line, we may assume that this is not a root scope.
let scope = ScopeKind::NonRoot;
Self::from_discerned_line(LineKind::discern(ast, scope))
}
/// Try retrieving node information from an already discerned line data.
pub fn from_discerned_line(line: LineKind) -> Option<MainLine> {
match line {
LineKind::ExpressionPlain { ast } => Self::new_expression(ast),
LineKind::ExpressionAssignment { ast } => Self::new_binding(ast),
LineKind::Definition { .. } => None,
LineKind::DocumentationComment { .. } => None,
}
}
/// Tries to interpret AST as node, treating whole AST as an expression.
pub fn from_block_line(line: &BlockLine<Option<Ast>>) -> Option<MainLine> {
Self::from_ast(line.elem.as_ref()?)
}
}
// ================
// === NodeInfo ===
// ================
/// Iterator over indexed line ASTs that yields nodes.
#[derive(Clone, Debug)]
pub struct NodeIterator<'a, T: Iterator<Item = (usize, BlockLine<&'a Ast>)> + 'a> {
/// Input iterator that yields pairs (line index, line's Ast).
pub lines_iter: T,
/// Absolute indent of lines in the block we are iterating over.
pub context_indent: usize,
}
impl<'a, T: Iterator<Item = (usize, BlockLine<&'a Ast>)> + 'a> Iterator for NodeIterator<'a, T> {
type Item = LocatedNode;
fn next(&mut self) -> Option<Self::Item> {
let mut indexed_documentation = None;
for (index, line) in &mut self.lines_iter {
match LineKind::discern(line.elem, ScopeKind::NonRoot) {
LineKind::DocumentationComment { documentation } => {
let doc_line = DocumentationCommentLine::from_doc_ast(documentation, line.off);
let documentation = DocumentationCommentInfo {
line: doc_line,
block_indent: self.context_indent,
};
indexed_documentation = Some((index, documentation));
}
LineKind::Definition { .. } => {
// Non-node entity consumes any previous documentation.
indexed_documentation = None;
}
line =>
if let Some(main_line) = MainLine::from_discerned_line(line) {
let (documentation_line, documentation) = match indexed_documentation {
Some((index, documentation)) => (Some(index), Some(documentation)),
None => (None, None),
};
let node = NodeInfo { documentation, main_line };
let index = NodeLocation { main_line: index, documentation_line };
return Some(LocatedNode { index, node });
},
}
}
None
}
}
/// Information about node, including both its main line (i.e. line with expression) and optionally
/// attached documentation comment.
#[derive(Clone, Debug, Deref, DerefMut)]
pub struct NodeInfo {
/// If the node has doc comment attached, it will be represented here.
pub documentation: Option<DocumentationCommentInfo>,
/// Primary node AST that contains node's expression and optional pattern binding.
#[deref]
#[deref_mut]
pub main_line: MainLine,
}
impl NodeInfo {
/// Check if a given non-empty line's AST belongs to this node.
pub fn contains_line(&self, line_ast: &Ast) -> bool {
// TODO refactor these two lambdas into methods
let expression_id_matches =
|| MainLine::from_ast(line_ast).as_ref().map(|ml| ml.id()).contains(&self.id());
let doc_comment_id_matches = || match (self.doc_comment_id(), line_ast.id) {
(Some(node_doc_id), Some(line_ast_id)) => node_doc_id == line_ast_id,
_ => false,
};
expression_id_matches() || doc_comment_id_matches()
}
/// Get the AST ID of the line with the node comment (if present).
pub fn doc_comment_id(&self) -> Option<ast::Id> {
self.documentation.as_ref().and_then(|comment| comment.ast().id())
}
/// Construct node information for a single line, without documentation.
pub fn from_main_line_ast(ast: &Ast) -> Option<Self> {
let main_line = MainLine::from_ast(ast)?;
let documentation = None;
Some(Self { documentation, main_line })
}
/// Obtain documentation text.
pub fn documentation_text(&self) -> Option<ImString> {
self.documentation.as_ref().map(|doc| doc.pretty_text())
}
/// The info about macro calls in the expression.
pub fn macros_info(&self) -> &MacrosInfo {
&self.main_line.ast_info.macros_info
}
// Modify AST, adding or removing `SKIP` macro call. Does nothing if [`skip`] argument already
/// matches the inner state.
pub fn set_skip(&mut self, skip: bool) {
if skip != self.macros_info().skip {
if skip {
self.main_line.add_skip_macro();
} else {
self.main_line.remove_skip_macro();
}
self.main_line.ast_info.macros_info.skip = skip;
}
}
/// Modify AST, adding or removing `FREEZE` macro call. Does nothing if [`skip`] argument
/// already matches the inner state.
pub fn set_freeze(&mut self, freeze: bool) {
if freeze != self.macros_info().freeze {
if freeze {
self.main_line.add_freeze_macro();
} else {
self.main_line.remove_freeze_macro();
}
self.main_line.ast_info.macros_info.freeze = freeze;
}
}
/// Clear the pattern (left side of assignment) for node.
///
/// If it is already an Expression node, no change is done.
pub fn clear_pattern(&mut self) {
self.main_line.clear_pattern();
}
/// Add context switch expression to the node. Replaces the existing one, if any.
pub fn set_context_switch(&mut self, context_switch_expr: ContextSwitchExpression) {
if self.main_line.ast_info.context_switch.is_some() {
self.clear_context_switch_expression();
}
self.main_line.modify_expression(|ast| {
*ast = preserving_skip_and_freeze(ast, |ast| {
let prefix = context_switch_expr.to_ast();
let infix = ast::Infix {
larg: prefix,
loff: 1,
opr: ast::opr::right_assoc().into(),
roff: 1,
rarg: ast.clone(),
};
*ast = infix.into();
});
});
self.main_line.ast_info.context_switch = Some(context_switch_expr);
}
/// Remove existing context switch expression from the node.
pub fn clear_context_switch_expression(&mut self) {
if self.main_line.ast_info.context_switch.is_some() {
self.main_line.modify_expression(|ast| {
*ast = preserving_skip_and_freeze(ast, |ast| {
if ContextSwitchExpression::parse(ast).is_some() {
let crumb = ast::crumbs::InfixCrumb::RightOperand.into();
let rarg = ast.get(&crumb).unwrap_or(ast);
*ast = rarg.clone();
}
});
});
self.main_line.ast_info.context_switch = None;
}
}
}
/// AST of a single node.
#[derive(Clone, Debug)]
#[allow(missing_docs)]
pub enum NodeAst {
/// Code with assignment, e.g. `foo = 2 + 2`
Binding { infix: known::Infix },
/// Code without assignment (no variable binding), e.g. `2 + 2`.
Expression { ast: Ast },
}
impl NodeAst {
/// Node's unique ID.
pub fn id(&self) -> Id {
// Panic must not happen, as the only available constructors checks that
// there is an ID present.
self.whole_expression().id.expect("Node AST must bear an ID")
}
/// Updates the node's AST so the node bears the given ID.
pub fn set_id(&mut self, new_id: Id) {
match self {
NodeAst::Binding { ref mut infix, .. } => {
let new_rarg = infix.rarg.with_id(new_id);
let set = infix.set(&ast::crumbs::InfixCrumb::RightOperand.into(), new_rarg);
*infix = set.expect(
"Internal error: setting infix operand should always \
succeed.",
);
}
NodeAst::Expression { ref mut ast, .. } => {
*ast = ast.with_id(new_id);
}
};
}
/// Represents the visible portion of a node's expression. This excludes SKIP and FREEZE macro
/// calls, as well as any context switch expressions.
pub fn expression(&self) -> Ast {
let ast = without_macros(self.whole_expression());
ContextSwitchExpression::without_expression(&ast)
}
/// AST of the node's expression. Typically no external user wants to access it directly. Use
/// [`Self::expression`] instead.
pub fn whole_expression(&self) -> &Ast {
match self {
NodeAst::Binding { infix, .. } => &infix.rarg,
NodeAst::Expression { ast, .. } => ast,
}
}
/// AST of the node's pattern (assignment's left-hand side).
pub fn pattern(&self) -> Option<&Ast> {
match self {
NodeAst::Binding { infix, .. } => Some(&infix.larg),
NodeAst::Expression { .. } => None,
}
}
/// Set AST of the node's expression. Maintains ID.
pub fn set_expression(&mut self, expression: Ast) {
self.modify_expression(move |ast| {
*ast = preserving_skip_and_freeze(ast, |ast| *ast = expression.clone());
});
}
/// The whole AST of node.
pub fn ast(&self) -> &Ast {
match self {
NodeAst::Binding { infix, .. } => infix.into(),
NodeAst::Expression { ast, .. } => ast,
}
}
/// Set the pattern (left side of assignment) for node. If it is an Expression node, the
/// assignment infix will be introduced.
pub fn set_pattern(&mut self, pattern: Ast) {
match self {
NodeAst::Binding { infix, .. } => {
// Setting infix operand never fails.
infix.update_shape(|infix| infix.larg = pattern);
}
NodeAst::Expression { ast, .. } => {
let infix = ast::Infix {
larg: pattern,
loff: 1,
opr: Ast::opr("="),
roff: 1,
rarg: ast.clone(),
};
let infix = known::Infix::new(infix, None);
*self = NodeAst::Binding { infix };
}
}
}
/// Modify expression, preserving the AST ID.
fn modify_expression(&mut self, f: impl FnOnce(&mut Ast)) {
let id = self.id();
match self {
Self::Binding { infix, .. } => {
infix.update_shape(|infix| f(&mut infix.rarg));
}
Self::Expression { ast, .. } => f(ast),
}
self.set_id(id);
}
/// Add [`SKIP`] macro call to the AST. Preserves the expression ID and [`FREEZE`] macro calls.
fn add_skip_macro(&mut self) {
self.modify_expression(|ast| {
prepend_with_macro(ast, SKIP_MACRO_IDENTIFIER);
});
}
/// Remove [`SKIP`] macro call from the AST. Preserves the expression ID and [`FREEZE`] macro
/// calls.
fn remove_skip_macro(&mut self) {
self.modify_expression(|ast| {
*ast = prefix_macro_body(ast);
});
}
/// Add [`FREEZE`] macro call to the AST. Preserves the expression ID and [`SKIP`] macro calls.
fn add_freeze_macro(&mut self) {
self.modify_expression(|ast| {
*ast = preserving_skip(ast, |ast| prepend_with_macro(ast, FREEZE_MACRO_IDENTIFIER));
});
}
/// Remove [`FREEZE`] macro call from the AST. Preserves the expression ID and [`SKIP`] macro
/// calls.
fn remove_freeze_macro(&mut self) {
self.modify_expression(|ast| {
*ast = preserving_skip(ast, |ast| {
*ast = prefix_macro_body(ast);
});
});
}
/// See [`NodeInfo::clear_pattern`]. Preserves the [`MacrosInfo`].
pub fn clear_pattern(&mut self) {
match self {
NodeAst::Binding { infix } =>
*self = NodeAst::Expression { ast: infix.rarg.clone_ref() },
NodeAst::Expression { .. } => {}
}
}
}
impl ast::HasTokens for NodeAst {
fn feed_to(&self, consumer: &mut impl ast::TokenConsumer) {
self.ast().feed_to(consumer)
}
}
// =============
// === Tests ===
// =============
#[cfg(test)]
mod tests {
use crate::context_switch::Context;
use crate::context_switch::ContextSwitch;
use super::*;
use ast::opr::predefined::ASSIGNMENT;
const ENABLE_CONTEXT: &str = "Standard.Base.Runtime.with_enabled_context";
const DISABLE_CONTEXT: &str = "Standard.Base.Runtime.with_disabled_context";
const OUTPUT_CONTEXT: &str = "Standard.Base.Runtime.Context.Output";
fn expect_node(ast: Ast, expression_text: &str, id: Id) {
let node_info = NodeInfo::from_main_line_ast(&ast).expect("expected a node");
assert_eq!(node_info.expression().repr(), expression_text);
assert_eq!(node_info.id(), id);
}
#[test]
fn expression_node_test() {
// expression: `4`
let id = Id::new_v4();
let ast = Ast::new(ast::Number { base: None, int: "4".into() }, Some(id));
expect_node(ast, "4", id);
}
#[test]
fn binding_node_test() {
// expression: `foo = 4`
let id = Id::new_v4();
let number = ast::Number { base: None, int: "4".into() };
let larg = Ast::var("foo");
let rarg = Ast::new(number, Some(id));
let ast = Ast::infix(larg, ASSIGNMENT, rarg);
expect_node(ast, "4", id);
}
#[test]
fn set_expression_binding() {
let ast = Ast::infix(Ast::var("foo"), "=", Ast::number(4).with_new_id());
assert_eq!(ast.repr(), "foo = 4");
let mut node = NodeInfo::from_main_line_ast(&ast).expect("expected a node");
let id = node.id();
node.set_expression(Ast::var("bar"));
assert_eq!(node.expression().repr(), "bar");
assert_eq!(node.ast().repr(), "foo = bar");
assert_eq!(node.id(), id);
}
#[test]
fn set_expression_plain() {
let ast = Ast::number(4).with_new_id();
assert_eq!(ast.repr(), "4");
let mut node = NodeInfo::from_main_line_ast(&ast).expect("expected a node");
let id = node.id();
node.set_expression(Ast::var("bar"));
assert_eq!(node.expression().repr(), "bar");
assert_eq!(node.ast().repr(), "bar");
assert_eq!(node.id(), id);
}
#[test]
fn clearing_pattern_test() {
// expression: `foo = 4`
let id = Id::new_v4();
let number = ast::Number { base: None, int: "4".into() };
let larg = Ast::var("foo");
let rarg = Ast::new(number, Some(id));
let ast = Ast::infix(larg, ASSIGNMENT, rarg);
let mut node = NodeInfo::from_main_line_ast(&ast).unwrap();
assert_eq!(node.repr(), "foo = 4");
assert_eq!(node.id(), id);
node.clear_pattern();
assert_eq!(node.repr(), "4");
assert_eq!(node.id(), id);
node.clear_pattern();
assert_eq!(node.repr(), "4");
assert_eq!(node.id(), id);
}
#[test]
fn setting_pattern_on_expression_node_test() {
let id = uuid::Uuid::new_v4();
let line_ast = Ast::number(2).with_id(id);
let mut node = NodeInfo::from_main_line_ast(&line_ast).unwrap();
assert_eq!(node.repr(), "2");
assert_eq!(node.id(), id);
node.set_pattern(Ast::var("foo"));
assert_eq!(node.repr(), "foo = 2");
assert_eq!(node.id(), id);
}
#[test]
fn setting_pattern_on_binding_node_test() {
let id = uuid::Uuid::new_v4();
let larg = Ast::var("foo");
let rarg = Ast::var("bar").with_id(id);
let line_ast = Ast::infix(larg, ASSIGNMENT, rarg);
let mut node = NodeInfo::from_main_line_ast(&line_ast).unwrap();
assert_eq!(node.repr(), "foo = bar");
assert_eq!(node.id(), id);
node.set_pattern(Ast::var("baz"));
assert_eq!(node.repr(), "baz = bar");
assert_eq!(node.id(), id);
}
#[test]
fn adding_skip_macro_test() {
let id = uuid::Uuid::new_v4();
let larg = Ast::var("foo");
let rarg = Ast::var("bar").with_id(id);
let line_ast = Ast::infix(larg, ASSIGNMENT, rarg);
let mut node = NodeInfo::from_main_line_ast(&line_ast).unwrap();
assert_eq!(node.repr(), "foo = bar");
assert_eq!(node.id(), id);
node.set_skip(true);
assert_eq!(node.repr(), format!("foo = {SKIP_MACRO_IDENTIFIER} bar"));
assert_eq!(node.id(), id);
node.set_skip(false);
assert_eq!(node.repr(), format!("foo = bar"));
assert_eq!(node.id(), id);
}
#[test]
fn adding_freeze_macro_test() {
let id = uuid::Uuid::new_v4();
let larg = Ast::var("foo");
let rarg = Ast::var("bar").with_id(id);
let line_ast = Ast::infix(larg, ASSIGNMENT, rarg);
let mut node = NodeInfo::from_main_line_ast(&line_ast).unwrap();
assert_eq!(node.repr(), "foo = bar");
assert_eq!(node.id(), id);
node.set_freeze(true);
assert_eq!(node.repr(), format!("foo = {FREEZE_MACRO_IDENTIFIER} bar"));
assert_eq!(node.id(), id);
node.set_freeze(false);
assert_eq!(node.repr(), format!("foo = bar"));
assert_eq!(node.id(), id);
}
#[test]
fn mixing_skip_and_freeze_macros_test() {
let id = uuid::Uuid::new_v4();
let larg = Ast::var("foo");
let rarg = Ast::var("bar").with_id(id);
let line_ast = Ast::infix(larg, ASSIGNMENT, rarg);
let mut node = NodeInfo::from_main_line_ast(&line_ast).unwrap();
assert_eq!(node.repr(), "foo = bar");
node.set_skip(true);
assert_eq!(node.repr(), format!("foo = {SKIP_MACRO_IDENTIFIER} bar"));
node.set_freeze(true);
assert_eq!(
node.repr(),
format!("foo = {SKIP_MACRO_IDENTIFIER} {FREEZE_MACRO_IDENTIFIER} bar")
);
node.set_freeze(false);
assert_eq!(node.repr(), format!("foo = {SKIP_MACRO_IDENTIFIER} bar"));
node.set_skip(false);
assert_eq!(node.repr(), format!("foo = bar"));
assert_eq!(node.id(), id);
}
#[test]
fn execution_context_switch_expressions() {
let node = || {
let id = uuid::Uuid::new_v4();
let larg = Ast::var("foo");
let rarg = Ast::var("bar").with_id(id);
let line_ast = Ast::infix(larg, ASSIGNMENT, rarg);
let node = NodeInfo::from_main_line_ast(&line_ast).unwrap();
assert_eq!(node.repr(), "foo = bar");
assert!(node.main_line.ast_info.context_switch.is_none());
node
};
fn test_round_trip(
mut node: NodeInfo,
expected: &str,
context_switch: ContextSwitchExpression,
) {
let original_repr = node.repr();
node.set_context_switch(context_switch.clone());
assert_eq!(node.main_line.ast_info.context_switch, Some(context_switch));
assert_eq!(node.repr(), expected);
node.clear_context_switch_expression();
assert_eq!(node.repr(), original_repr);
assert!(node.main_line.ast_info.context_switch.is_none());
}
let expected = format!("foo = {ENABLE_CONTEXT} {OUTPUT_CONTEXT} \"design\" <| bar");
test_round_trip(node(), &expected, ContextSwitchExpression {
switch: ContextSwitch::Enable,
context: Context::Output,
environment: "design".into(),
});
let expected = format!("foo = {DISABLE_CONTEXT} {OUTPUT_CONTEXT} \"design\" <| bar");
test_round_trip(node(), &expected, ContextSwitchExpression {
switch: ContextSwitch::Disable,
context: Context::Output,
environment: "design".into(),
});
let expected = format!("foo = {ENABLE_CONTEXT} {OUTPUT_CONTEXT} \"env\" <| bar");
test_round_trip(node(), &expected, ContextSwitchExpression {
switch: ContextSwitch::Enable,
context: Context::Output,
environment: "env".into(),
});
}
#[test]
fn mixing_skip_and_freeze_and_context_switch() {
// TODO: Doesn't work because of the broken parsing for SKIP and FREEZE.
// See https://github.com/enso-org/enso/issues/5572
//
// let parser = Parser::new();
// let line = format!("foo = {SKIP_MACRO_IDENTIFIER} {FREEZE_MACRO_IDENTIFIER}
// {ENABLE_CONTEXT} {OUTPUT_CONTEXT} \"env\" <| bar")
// let ast = parser.parse_line_ast(line).unwrap();
// let node = NodeInfo::from_main_line_ast(&ast).unwrap();
// assert!(node.ast_info. context_switch.is_some());
// assert!(node.ast_info.macros_info.skip);
// assert!(node.ast_info.macros_info.freeze);
let foo_bar = || {
let id = uuid::Uuid::new_v4();
let larg = Ast::var("foo");
let rarg = Ast::var("bar").with_id(id);
let line_ast = Ast::infix(larg, ASSIGNMENT, rarg);
NodeInfo::from_main_line_ast(&line_ast).unwrap()
};
let context_switch = || format!("{ENABLE_CONTEXT} {OUTPUT_CONTEXT} \"env\"");
#[derive(Debug)]
struct Case {
skip: bool,
freeze: bool,
}
#[rustfmt::skip]
let cases =
vec![
Case { skip: true, freeze: false },
Case { skip: false, freeze: true },
Case { skip: true, freeze: true },
];
for case in cases {
let mut node = foo_bar();
node.set_skip(case.skip);
node.set_freeze(case.freeze);
node.set_context_switch(ContextSwitchExpression {
switch: ContextSwitch::Enable,
context: Context::Output,
environment: "env".into(),
});
let expected = format!(
"foo = {skip}{space}{freeze} {context_switch} <| bar",
space = if case.skip && case.freeze { " " } else { "" },
skip = if case.skip { SKIP_MACRO_IDENTIFIER } else { "" },
freeze = if case.freeze { FREEZE_MACRO_IDENTIFIER } else { "" },
context_switch = context_switch(),
);
assert_eq!(node.repr(), expected, "{case:?}");
assert!(node.main_line.ast_info.context_switch.is_some());
node.clear_context_switch_expression();
assert!(node.main_line.ast_info.context_switch.is_none());
let expected = format!(
"foo = {skip}{space}{freeze} bar",
space = if case.skip && case.freeze { " " } else { "" },
skip = if case.skip { SKIP_MACRO_IDENTIFIER } else { "" },
freeze = if case.freeze { FREEZE_MACRO_IDENTIFIER } else { "" },
);
assert_eq!(node.repr(), expected, "{case:?}");
}
}
}

View File

@ -1,10 +0,0 @@
//! Module contains refactorings implemented on the IDE side.
// ==============
// === Export ===
// ==============
pub mod collapse;
pub use collapse::collapse;

View File

@ -1,548 +0,0 @@
//! Module with logic for node collapsing.
//!
//! See the [`collapse`] function for details.
use crate::prelude::*;
use crate::connection::Connection;
use crate::connection::Endpoint;
use crate::definition;
use crate::definition::DefinitionInfo;
use crate::graph::GraphInfo;
use crate::identifier::Identifier;
use crate::node;
use crate::node::MainLine;
use crate::node::NodeInfo;
use ast::crumbs::Located;
use ast::BlockLine;
use parser::Parser;
use std::collections::BTreeSet;
// ====================
// === Collapse API ===
// ====================
// === Entry point ===
/// Run the "collapse node" refactoring. Generates output describing how to apply the refactoring.
///
/// "Collapsing nodes" means extracting a number of selected nodes from a graph into a new sibling
/// method definition. In place of them a new node shall be placed that invokes the method.
///
/// Any connections incoming into the extracted nodes shall be translated into the method arguments.
/// Any connections leaving the extracted nodes shall be treated as function outputs. Currently only
/// one output is supported, so an extracted function can return at most one unique identifier.
pub fn collapse(
graph: &GraphInfo,
selected_nodes: impl IntoIterator<Item = node::Id>,
name: Identifier,
parser: &Parser,
module_name: String,
) -> FallibleResult<Collapsed> {
Collapser::new(graph.clone(), selected_nodes, parser.clone_ref(), module_name)?.collapse(name)
}
// === Collapsed ===
/// Result of running node collapse algorithm. Describes update to the refactored definition.
#[derive(Clone, Debug)]
pub struct Collapsed {
/// New contents of the refactored definition.
pub updated_definition: DefinitionInfo,
/// Contents of the new definition that should be placed next to the refactored one.
pub new_method: definition::ToAdd,
/// Identifier of the collapsed node in the updated definition.
pub collapsed_node: node::Id,
}
// === Errors ===
#[allow(missing_docs)]
#[derive(Clone, Copy, Debug, Fail)]
#[fail(display = "At least one node must be selected for collapsing refactoring.")]
pub struct NoNodesSelected;
#[allow(missing_docs)]
#[derive(Clone, Copy, Debug, Fail)]
#[fail(
display = "Internal refactoring error: Endpoint refers to node {} that cannot be resolved.",
_0
)]
pub struct CannotResolveEndpointNode(node::Id);
#[allow(missing_docs)]
#[derive(Clone, Copy, Debug, Fail)]
#[fail(display = "Internal refactoring error: Cannot generate collapsed node description.")]
pub struct CannotConstructCollapsedNode;
#[allow(missing_docs)]
#[derive(Clone, Debug, Fail)]
#[fail(display = "Internal refactoring error: Cannot resolve identifier for the endpoint {:?}", _0)]
pub struct EndpointIdentifierCannotBeResolved(Endpoint);
#[allow(missing_docs)]
#[derive(Clone, Debug, Fail)]
#[fail(
display = "Currently collapsing nodes is supported only when there would be at most one \
output from the collapsed function. Found more than one output: `{}` and `{}`.",
_0, _1
)]
pub struct MultipleOutputIdentifiers(String, String);
// ===================
// === GraphHelper ===
// ===================
/// Helper that stores the refactored graph information and provides methods for its processing.
#[derive(Clone, Debug)]
pub struct GraphHelper {
/// The graph of definition where the node collapsing takes place.
info: GraphInfo,
/// All the nodes in the graph. Cached for performance.
nodes: Vec<NodeInfo>,
}
impl GraphHelper {
/// Create a helper for the given graph.
pub fn new(graph: GraphInfo) -> Self {
GraphHelper { nodes: graph.nodes(), info: graph }
}
/// Get the information about node described byt the given ID.
pub fn lookup_node(&self, id: node::Id) -> FallibleResult<&NodeInfo> {
let found = self.nodes.iter().find(|node| node.id() == id);
found.ok_or_else(|| CannotResolveEndpointNode(id).into())
}
/// Get the identifier constituting a connection's endpoint.
pub fn endpoint_identifier(&self, endpoint: &Endpoint) -> FallibleResult<Identifier> {
let node = self.lookup_node(endpoint.node)?;
let err = || EndpointIdentifierCannotBeResolved(endpoint.clone()).into();
let endpoint_ast = node.ast().get_traversing(&endpoint.port.crumbs)?.clone_ref();
Identifier::new(endpoint_ast).ok_or_else(err)
}
/// Get the variable form of the identifier for the given connection.
pub fn connection_variable(&self, connection: &Connection) -> FallibleResult<Identifier> {
self.endpoint_identifier(&connection.source)
}
/// Rewrite lines of the refactored definition by calling given functor for each line.
pub fn rewrite_definition(
&self,
line_rewriter: impl Fn(&BlockLine<Option<Ast>>) -> FallibleResult<LineDisposition>,
) -> FallibleResult<DefinitionInfo> {
let mut updated_definition = self.info.source.clone();
let mut new_lines = Vec::new();
for line in updated_definition.block_lines() {
match line_rewriter(&line)? {
LineDisposition::Keep => new_lines.push(line),
LineDisposition::Remove => {}
LineDisposition::Replace(ast) => new_lines.push(BlockLine::new(Some(ast))),
}
}
updated_definition.set_block_lines(new_lines)?;
Ok(updated_definition)
}
}
// =================
// === Output ===
// =================
/// Describes the output of the extracted function.
#[derive(Clone, Debug)]
pub struct Output {
/// The node that introduces output variable.
pub node: node::Id,
/// The identifier from the extracted nodes that is used outside.
pub identifier: Identifier,
}
// =================
// === Extracted ===
// =================
/// Describes the nodes to be extracted into a new definition by collapsing.
#[derive(Clone, Debug)]
pub struct Extracted {
/// Identifiers used in the collapsed nodes from the outside scope.
inputs: Vec<Identifier>,
/// Information on node that will act as extracted function output.
/// Currently we allow at most one output, to be revisited in the future.
output: Option<Output>,
/// Nodes that are being collapsed and extracted into a separate method.
extracted_nodes: Vec<NodeInfo>,
}
impl Extracted {
/// Collect the extracted node information.
pub fn new(
graph: &GraphHelper,
selected_nodes: impl IntoIterator<Item = node::Id>,
) -> FallibleResult<Self> {
let extracted_nodes_set: HashSet<_> = selected_nodes.into_iter().collect();
let extracted_nodes: Vec<_> = graph
.nodes
.iter()
.filter(|node| extracted_nodes_set.contains(&node.id()))
.cloned()
.collect();
// Leaf is an extracted node that has no outgoing connections.
let mut leaves = extracted_nodes_set.clone();
let mut inputs = Vec::new();
let mut output = None;
for connection in graph.info.connections() {
let starts_inside = extracted_nodes_set.contains(&connection.source.node);
let ends_inside = extracted_nodes_set.contains(&connection.target.node);
let identifier = graph.connection_variable(&connection)?;
leaves.remove(&connection.source.node);
if !starts_inside && ends_inside {
inputs.push(identifier)
} else if starts_inside && !ends_inside {
match output {
Some(Output { identifier: previous_identifier, .. })
if identifier != previous_identifier =>
{
let ident1 = identifier.to_string();
let ident2 = previous_identifier.to_string();
return Err(MultipleOutputIdentifiers(ident1, ident2).into());
}
Some(_) => {} // Ignore duplicate usage of the same identifier.
None => {
let node = connection.source.node;
output = Some(Output { node, identifier });
}
}
}
}
// If there is no output found so far, it means that none of our nodes is used outside
// the extracted function. In such we will return value from arbitrarily chosen leaf.
output = output.or_else(|| {
let output_leaf_id = leaves.into_iter().next()?;
let output_node = extracted_nodes.iter().find(|node| node.id() == output_leaf_id)?;
let identifier = Identifier::new(output_node.pattern()?.clone_ref())?;
let node = output_node.id();
Some(Output { node, identifier })
});
Ok(Self { inputs, output, extracted_nodes })
}
/// Check if the given line belongs to the selection (i.e. is extracted into a new method).
pub fn belongs_to_selection(&self, line_ast: &Ast) -> bool {
self.extracted_nodes.iter().any(|extracted_node| extracted_node.contains_line(line_ast))
}
/// Generate AST of a line that needs to be appended to the extracted nodes' Asts.
/// None if there is no such need.
pub fn return_line(&self) -> Option<Ast> {
// To return value we just utter its identifier. But the expression needs a new ID.
self.output.as_ref().map(|out| out.identifier.with_new_id().into())
}
/// Generate the description for the new method's definition with the extracted nodes.
pub fn generate(&self, name: Identifier) -> definition::ToAdd {
let name = definition::DefinitionName::new_plain(name);
let inputs = self.inputs.iter().collect::<BTreeSet<_>>();
let return_line = self.return_line();
let mut selected_nodes_iter = self.extracted_nodes.iter().map(|node| node.ast().clone());
let body_head = selected_nodes_iter.next().unwrap();
let body_tail = selected_nodes_iter.chain(return_line).map(Some).collect();
let explicit_parameter_names = inputs.iter().map(|input| input.name().into()).collect();
definition::ToAdd { name, explicit_parameter_names, body_head, body_tail }
}
}
// =================
// === Collapser ===
// =================
/// Collapser rewrites the refactored definition line-by-line. This enum describes action to be
/// taken for a given line.
#[allow(missing_docs)]
#[derive(Clone, Debug)]
pub enum LineDisposition {
Keep,
Remove,
Replace(Ast),
}
/// Helper type that stores some common data used for collapsing algorithm and implements its logic.
#[derive(Clone, Debug)]
pub struct Collapser {
/// The graph of definition where the node collapsing takes place.
graph: GraphHelper,
/// Information about nodes that are extracted into a separate definition.
extracted: Extracted,
/// Which node from the refactored graph should be replaced with a call to a extracted method.
/// This only exists because we care about this node line's position (not its state).
replaced_node: node::Id,
parser: Parser,
/// Identifier of the node to be introduced as a result of collapsing.
collapsed_node: node::Id,
module_name: String,
}
impl Collapser {
/// Does some early pre-processing and gathers common data used in various parts of the
/// refactoring algorithm.
pub fn new(
graph: GraphInfo,
selected_nodes: impl IntoIterator<Item = node::Id>,
parser: Parser,
module_name: String,
) -> FallibleResult<Self> {
let graph = GraphHelper::new(graph);
let extracted = Extracted::new(&graph, selected_nodes)?;
let last_selected = extracted.extracted_nodes.iter().last().ok_or(NoNodesSelected)?.id();
let replaced_node = extracted.output.as_ref().map(|out| out.node).unwrap_or(last_selected);
let collapsed_node = node::Id::new_v4();
Ok(Collapser { graph, extracted, replaced_node, parser, collapsed_node, module_name })
}
/// Generate the expression that calls the extracted method definition.
///
/// Does not include any pattern for assigning the resulting value.
pub fn call_to_extracted(&self, extracted: &definition::ToAdd) -> FallibleResult<Ast> {
// TODO actually check that generated name is single-identifier
let mut target = extracted.name.clone();
target.extended_target.insert(0, Located::new_root(self.module_name.clone()));
let base = target.ast(&self.parser)?;
let args = extracted.explicit_parameter_names.iter().map(Ast::var);
let chain = ast::prefix::Chain::new(base, args);
Ok(chain.into_ast())
}
/// Assign to a line from refactored definition one of 3 dispositions:
/// 1) Lines that are kept intact -- not belonging to selected nodes;
/// 2) Lines that are extracted and removed -- all selected nodes, except:
/// 3) Line that introduces output of the extracted function (if present at all) -> its
/// expression shall be replaced with a call to the extracted function.
/// If there is no usage of the extracted function output, its invocation should be placed
/// in place of the last extracted line.
pub fn rewrite_line(
&self,
line: &BlockLine<Option<Ast>>,
extracted_definition: &definition::ToAdd,
) -> FallibleResult<LineDisposition> {
let ast = match line.elem.as_ref() {
// We leave lines without nodes (blank lines) intact.
None => return Ok(LineDisposition::Keep),
Some(ast) => ast,
};
if !self.extracted.belongs_to_selection(ast) {
Ok(LineDisposition::Keep)
} else if MainLine::from_ast(ast).contains_if(|n| n.id() == self.replaced_node) {
let no_node_err = failure::Error::from(CannotConstructCollapsedNode);
let expression_ast = self.call_to_extracted(extracted_definition)?;
let main_line = MainLine::from_ast(&expression_ast).ok_or(no_node_err)?;
let mut new_node = NodeInfo { documentation: None, main_line };
new_node.set_id(self.collapsed_node);
if let Some(Output { identifier, .. }) = &self.extracted.output {
new_node.set_pattern(identifier.with_new_id().into())
}
Ok(LineDisposition::Replace(new_node.ast().clone_ref()))
} else {
Ok(LineDisposition::Remove)
}
}
/// Run the collapsing refactoring on this input.
pub fn collapse(&self, name: Identifier) -> FallibleResult<Collapsed> {
let new_method = self.extracted.generate(name);
let updated_definition =
self.graph.rewrite_definition(|line| self.rewrite_line(line, &new_method))?;
let collapsed_node = self.collapsed_node;
Ok(Collapsed { updated_definition, new_method, collapsed_node })
}
}
// ============
// === Test ===
// ============
#[cfg(test)]
mod tests {
use super::*;
use crate::definition::DefinitionName;
use crate::graph;
use crate::module;
use ast::crumbs::Crumb;
struct Case {
module_name: String,
refactored_name: DefinitionName,
introduced_name: Identifier,
initial_method_code: &'static str,
extracted_lines: Range<usize>,
expected_generated: &'static str,
expected_refactored: &'static str,
}
impl Case {
fn run(&self, parser: &Parser) {
let ast = parser.parse_module(self.initial_method_code, default()).unwrap();
let main = module::locate_child(&ast, &self.refactored_name).unwrap();
let graph = graph::GraphInfo::from_definition(main.item.clone());
let nodes = graph.nodes();
let run_internal = |selection: &Vec<node::Id>| {
ast::test_utils::assert_unique_ids(ast.as_ref());
let selection = selection.iter().copied();
let new_name = self.introduced_name.clone();
let module_name = self.module_name.clone();
let collapsed = collapse(&graph, selection, new_name, parser, module_name).unwrap();
let new_method = collapsed.new_method.ast(0, parser).unwrap();
let placement = module::Placement::Before(self.refactored_name.clone());
let new_main = &collapsed.updated_definition.ast;
info!("Generated method:\n{new_method}");
info!("Updated method:\n{new_method}");
let mut module = module::Info { ast: ast.clone_ref() };
let main_crumb = Crumb::from(main.crumb());
module.ast = module.ast.set(&main_crumb, new_main.ast().clone()).unwrap();
module.add_method(collapsed.new_method, placement, parser).unwrap();
ast::test_utils::assert_unique_ids(module.ast.as_ref());
info!("Updated method:\n{}", &module.ast);
assert_eq!(new_method.repr(), self.expected_generated);
assert_eq!(new_main.repr(), self.expected_refactored);
};
let extracted_lines = self.extracted_lines.clone();
// We run case twice, with reversed node selection order. This way we assure that test
// isn't passing just because it got selected nodes in some specific order.
// The refactoring is expected to behave the same, no matter what the order of selected
// nodes is.
let mut selected_nodes =
nodes[extracted_lines].iter().map(|node| node.id()).collect_vec();
run_internal(&selected_nodes);
selected_nodes.reverse();
run_internal(&selected_nodes);
}
}
#[allow(unused_parens)] // False warning.
#[test]
fn test_collapse() {
let parser = Parser::new();
let module_name = "Main".to_owned();
let introduced_name = Identifier::try_from("custom_new").unwrap();
let refactored_name = DefinitionName::new_plain("custom_old");
let initial_method_code = r"custom_old =
a = 1
b = 2
c = A + B
d = a + b
c + 7";
let extracted_lines = 1..4;
let expected_generated = r"custom_new a =
b = 2
c = A + B
d = a + b
c";
let expected_refactored = r"custom_old =
a = 1
c = Main.custom_new a
c + 7";
let mut case = Case {
module_name,
refactored_name,
introduced_name,
initial_method_code,
extracted_lines,
expected_generated,
expected_refactored,
};
case.run(&parser);
// Check that refactoring a single assignment line:
// 1) Maintains the assignment and the introduced name for the value in the extracted
// method;
// 2) Extracted method returns the same value as extracted node;
// 3) That invocation appears in the extracted node's place and maintains assignment.
case.extracted_lines = 3..4;
case.expected_generated = r"custom_new a b =
d = a + b
d";
case.expected_refactored = r"custom_old =
a = 1
b = 2
c = A + B
d = Main.custom_new a b
c + 7";
case.run(&parser);
// Check that when refactoring a single non-assignment line:
// 1) the single extracted expression is an inline body of the generated method;
// 2) the invocation appears in the extracted node's place but has no assignment.
case.initial_method_code = r"custom_old =
a = 1
b = 2
c = A + B
a + b
c + 7";
case.extracted_lines = 3..4;
case.expected_generated = r"custom_new a b = a + b";
case.expected_refactored = r"custom_old =
a = 1
b = 2
c = A + B
Main.custom_new a b
c + 7";
case.run(&parser);
// Check that:
// 1) method with no arguments can be extracted;
// 2) method with result used multiple times can be extracted.
// 3) identifiers not defined in the refactored method (`d`) are not made into parameters.
case.initial_method_code = r"custom_old =
c = 50 + d
c + c + 10";
case.extracted_lines = 0..1;
case.expected_generated = r"custom_new =
c = 50 + d
c";
case.expected_refactored = r"custom_old =
c = Main.custom_new
c + c + 10";
case.run(&parser);
// Case reported in https://github.com/enso-org/ide/issues/1234
case.initial_method_code = r"custom_old =
number1 = 1
number2 = 2
range = number1.up_to number2
vector = range.to_vector";
case.extracted_lines = 2..4;
case.expected_generated = r"custom_new number1 number2 =
range = number1.up_to number2
vector = range.to_vector
vector";
case.expected_refactored = r"custom_old =
number1 = 1
number2 = 2
vector = Main.custom_new number1 number2";
case.run(&parser);
}
}

View File

@ -1,50 +0,0 @@
//! General-purpose utilities for creating tests for double representation.
use crate::prelude::*;
use regex::Captures;
use regex::Match;
/// Helper type for markdown-defined test cases with `regex` library.
/// When implementing a `Replacer`, on each match the `process_match` should be called.
#[derive(Clone, Copy, Debug, Default)]
pub struct MarkdownProcessor {
markdown_bytes_consumed: usize,
}
impl MarkdownProcessor {
/// Convert index from marked to unmarked code.
fn marked_to_unmarked_index(&self, i: usize) -> usize {
assert!(self.markdown_bytes_consumed <= i);
i - self.markdown_bytes_consumed
}
/// Convert indices range from marked to unmarked code.
fn marked_to_unmarked_range(&self, range: Range<usize>) -> Range<usize> {
Range {
start: self.marked_to_unmarked_index(range.start),
end: self.marked_to_unmarked_index(range.end),
}
}
/// Assumes that given match is the part of capture that should be passed to the dst string.
/// Appends the `body` match contents to the `dst` and returns its span in unmarked text.
/// All characters in the capture that do not belong to `body` are considered markdown.
pub fn process_match(
&mut self,
captures: &Captures,
body: &Match,
dst: &mut String,
) -> Range<usize> {
let whole_match = captures.get(0).expect("Capture 0 should always be present.");
let bytes_to_body = body.start() - whole_match.start();
let bytes_after_body = whole_match.end() - body.end();
self.markdown_bytes_consumed += bytes_to_body;
let ret = self.marked_to_unmarked_range(body.range());
self.markdown_bytes_consumed += bytes_after_body;
dst.push_str(body.as_str());
ret
}
}

View File

@ -1,356 +0,0 @@
//! A module with functions used to support working with text representation of the language.
use crate::prelude::*;
use enso_text::index::*;
use enso_text::unit::*;
use ast::IdMap;
// ================
// === Text API ===
// ================
/// Update IdMap to reflect the recent code change.
pub fn apply_code_change_to_id_map(
id_map: &mut IdMap,
change: &enso_text::text::Change<Byte, String>,
code: &str,
) {
// TODO [mwu]
// The initial provisional algorithm received some changes to better behave in our typical
// editor use-cases, i.e. to keep node ids when editing its expression. However, this came
// at price of not properly keeping other sub-ids on parts of the node line.
// In future, better and cleaner algorithm will need to be provided, likely with a different
// API. Because of such expected rewrite and deeper restructuring, we don't really want to
// spend much time on refactoring this function right now, even if it could be made nicer.
let removed = &change.range.clone();
let inserted = change.text.as_str();
let new_code = change.applied(code).unwrap_or_else(|_| code.to_owned());
let non_white = |c: char| !c.is_whitespace();
let vector = &mut id_map.vec;
let inserted_size: ByteDiff = inserted.len().into();
info!("Old code:\n```\n{code}\n```");
info!("New code:\n```\n{new_code}\n```");
info!("Updating the ID map with the following text edit: {change:?}.");
// Remove all entries fully covered by the removed span.
vector.drain_filter(|(range, _)| removed.contains_range(range));
// If the edited section ends up being the trailing part of AST node, how many bytes should be
// trimmed from the id. Precalculated, as is constant in the loop below.
let to_trim_back: ByteDiff = {
let last_non_white = inserted.rfind(non_white);
let inserted_len = || inserted.len();
let length_to_last_non_white = |index| inserted.len() - index - 1;
last_non_white.map_or_else(inserted_len, length_to_last_non_white).into()
};
// As above but for the front side.
let to_trim_front: ByteDiff = {
let first_non_white = inserted.find(non_white);
first_non_white.unwrap_or(inserted.len()).into()
};
let inserted_non_white = inserted.chars().any(non_white);
// In case of collisions (when, after resizing spans, multiple ids for the same span are
// present), the mappings from this map will be preferred over other ones.
//
// This is needed for edits like: `foo f` => `foo` — the earlier `foo` in `foo f` also has a
// id map entry, however we want it to be consistently shadowed by the id from the whole App
// expression.
let mut preferred: HashMap<enso_text::Range<Byte>, ast::Id> = default();
for (range, id) in vector.iter_mut() {
let mut trim_front = false;
let mut trim_back = false;
let initial_range = *range;
info!("Processing @{range}: `{}`.", &code[*range]);
if range.start > removed.end {
debug!("Node after the edited region.");
// AST node starts after edited region — it will be simply shifted.
let between_range: enso_text::Range<Byte> = (removed.end..range.start).into();
let code_between = &code[between_range];
*range = range.moved_left(removed.size()).moved_right(inserted_size);
// If there are only spaces between current AST symbol and insertion, extend the symbol.
// This is for cases like line with `foo ` being changed into `foo j`.
debug!("Between: `{code_between}`.");
if all_spaces(code_between) && inserted_non_white {
debug!("Will extend the node leftwards.");
range.start -= inserted_size + between_range.size();
trim_front = true;
}
} else if range.start >= removed.start {
// AST node starts inside the edited region. It does not have to end inside it.
debug!("Node overlapping with the end of the edited region.");
let removed_before = range.start - removed.start;
*range = range.moved_left(removed_before);
range.end -= removed.size() - removed_before;
range.end += inserted_size;
trim_front = true;
} else if range.end >= removed.start {
// AST node starts before the edited region and reaches (or possibly goes past) its end.
debug!("Node overlapping with the beginning of the edited region.");
if range.end <= removed.end {
trim_back = true;
}
let removed_chars = (range.end - removed.start).min(removed.size());
range.end -= removed_chars;
range.end += inserted_size;
} else {
debug!("Node before the edited region.");
// If there are only spaces between current AST symbol and insertion, extend the symbol.
// This is for cases like line with `foo ` being changed into `foo j`.
let between_range: enso_text::Range<Byte> = (range.end..removed.start).into();
let between = &code[between_range];
if all_spaces(between) && inserted_non_white {
debug!("Will extend ");
range.end += between_range.size() + inserted_size;
trim_back = true;
}
}
if trim_front && to_trim_front > 0.byte_diff() {
range.start += to_trim_front;
debug!("Trimming front {} chars.", to_trim_front.as_usize());
}
if trim_back {
if to_trim_back > 0.byte_diff() {
range.end += -to_trim_back;
debug!("Trimming back {} chars.", to_trim_back.as_usize());
}
let new_repr = &new_code[*range];
// Trim trailing spaces
let space_count = spaces_size(new_repr.chars().rev());
let spaces_len: ByteDiff = (space_count.value * ' '.len_utf8()).into();
if spaces_len > 0.byte_diff() {
debug!("Additionally trimming {} trailing spaces.", space_count);
debug!("The would-be code: `{new_repr}`.");
range.end -= spaces_len;
}
}
// If we edited front or end of an AST node, its extended (or shrunk) span will be
// preferred.
if trim_front || trim_back {
preferred.insert(*range, *id);
}
let old_fragment = &code[initial_range];
let new_fragment = &new_code[*range];
info!(
"Processing for id {id}: {initial_range} ->\t{range}.\n
Code: `{old_fragment}` => `{new_fragment}`"
);
}
// If non-preferred entry collides with the preferred one, remove the former.
vector.drain_filter(|(range, id)| {
preferred.get(range).map(|preferred_id| id != preferred_id).unwrap_or(false)
});
}
// ===============
// === Helpers ===
// ===============
/// Returns the chars count of leading space characters sequence.
fn spaces_size(itr: impl Iterator<Item = char>) -> Utf16CodeUnit {
itr.take_while(|c| *c == ' ').fold(0, |acc, _| acc + 1).into()
}
/// Checks if the given string slice contains only space charactesr.
fn all_spaces(text: &str) -> bool {
text.chars().all(|c| c == ' ')
}
// =============
// === Tests ===
// =============
#[cfg(test)]
mod test {
use super::*;
use crate::module;
use ast::HasIdMap;
use enso_prelude::default;
use parser::Parser;
use uuid::Uuid;
/// A sample text edit used to test "text api" properties.
///
/// See `from_markdown` constructor function for helper markdown description.
#[derive(Debug)]
struct Case {
/// The initial enso program code.
pub code: String,
/// The edit made to the initial code.
pub change: enso_text::Change<Byte, String>,
}
impl Case {
/// Markdown supports currently a single edit in the given code piece. It must be of form
/// `«aa⎀bb»` which reads "replace `aa` with `bb`".
fn from_markdown(marked_code: impl AsRef<str>) -> Case {
let marked_code = marked_code.as_ref();
let index_of = |c| marked_code.find(c);
const START: char = '«';
const INSERTION: char = '⎀';
const END: char = '»';
match (index_of(START), index_of(INSERTION), index_of(END)) {
(Some(start), insertion, Some(end)) => {
assert!(start < end, "Markdown markers discovered in wrong order.");
let erased_finish = insertion.unwrap_or(end);
let code = {
let prefix = &marked_code[..start];
let erased = &marked_code[start + START.len_utf8()..erased_finish];
let suffix = &marked_code[end + END.len_utf8()..];
String::from_iter([prefix, erased, suffix].iter().copied())
};
let inserted_code = insertion.map_or("", |insertion| {
&marked_code[insertion + INSERTION.len_utf8()..end]
});
let range_end = (erased_finish - START.len_utf8()).into();
let range = enso_text::Range::new(start.into(), range_end);
let change = enso_text::Change { range, text: inserted_code.to_string() };
Case { code, change }
}
_ => panic!("Invalid markdown in the marked code: {marked_code}."),
}
}
/// Code after applying the change
fn resulting_code(&self) -> String {
self.change.applied(&self.code).expect("Change removed range out of bounds")
}
/// Checks if the text operation described by this case keeps the node IDs intact.
///
/// See `assert_same_node_ids` for details.
fn assert_edit_keeps_main_node_ids(&self, parser: &Parser) {
let ast1 = parser.parse_module(&self.code, default()).unwrap();
let mut id_map = ast1.id_map();
apply_code_change_to_id_map(&mut id_map, &self.change, &self.code);
let code2 = self.resulting_code();
let ast2 = parser.parse_module(code2, id_map.clone()).unwrap();
self.assert_same_node_ids(&ast1, &ast2);
}
/// Checks that both module AST contain `main` function that has the same sequence of node
/// IDs, as described by the `main_nodes` function.
fn assert_same_node_ids(&self, ast1: &ast::known::Module, ast2: &ast::known::Module) {
let ids1 = main_nodes(ast1);
let ids2 = main_nodes(ast2);
assert_eq!(ids1, ids2, "Node ids mismatch in {self:?}");
}
}
/// Pretty prints the code of module with a single function named `main`. The lines should
/// contain unindented main function's block lines.
fn to_main(lines: impl IntoIterator<Item: AsRef<str>>) -> String {
use std::fmt::Write;
let mut ret = "main = ".to_string();
for line in lines {
write!(ret, "\n {}", line.as_ref()).unwrap();
}
ret
}
/// Returns the IDs of nodes in the `main` function in their order of line appearance.
fn main_nodes(module: &ast::known::Module) -> Vec<Uuid> {
use crate::definition::*;
use crate::graph::GraphInfo;
let id = Id::new_plain_name("main");
let definition = module::get_definition(module, &id).unwrap();
let graph = GraphInfo::from_definition(definition);
let nodes = graph.nodes();
nodes.into_iter().map(|node| node.id()).collect()
}
#[test]
fn test_case_markdown() {
let case = Case::from_markdown("foo«aa⎀bb»c");
assert_eq!(case.code, "fooaac");
assert_eq!(case.change.text, "bb");
assert_eq!(case.change.range, 3.byte()..5.byte());
assert_eq!(case.resulting_code(), "foobbc");
let case = Case::from_markdown("foo«aa»c");
assert_eq!(case.code, "fooaac");
assert_eq!(case.change.text, "");
assert_eq!(case.change.range, 3.byte()..5.byte());
assert_eq!(case.resulting_code(), "fooc");
}
#[test]
fn applying_code_changes_to_id_map() {
let parser = Parser::new();
// All the cases describe edit to a middle line in three line main definition.
let cases = [
"a = \"«⎀f»foo\"",
"a = \"«⎀ »foo\"",
"a = \"foo«⎀ »\"",
"a = \"foo«⎀f»\"",
"a = \"«f»foo\"",
"a = \"« »foo\"",
"a = \"foo« »\"",
"a = \"foo«f»\"",
"a = «f»foo",
"a = «⎀f»foo",
"a = «f»foo",
"a = «⎀ »foo",
"a = « »foo",
"a = «⎀f» foo",
"a = foo«⎀ »",
"a = foo«⎀\n»",
"a = foo «⎀\n»",
"a = foo «⎀j»",
"a = foo «j»",
"a = foo«⎀j»",
// Same as above but not in an assignment form
"\"«⎀f»foo\"",
"\"«⎀ »foo\"",
"\"foo«⎀ »\"",
"\"foo«⎀f»\"",
"\"«f»foo\"",
"\"« »foo\"",
"\"foo« »\"",
"\"foo«f»\"",
"«⎀f»foo",
"«f»foo",
// Commented out tests below would fail because of leading whitespace breaking the
// block structure.
// "«⎀ »foo",
// "« »foo",
// "«⎀f» foo",
"foo«⎀ »",
"foo«⎀\n»",
"foo «⎀\n»",
"foo «⎀j»",
"foo «j»",
"foo«⎀j»",
];
for case in cases.iter() {
let all_nodes = ["previous", case, "next"];
let main_def = to_main(all_nodes.iter());
let case = Case::from_markdown(main_def);
case.assert_edit_keeps_main_node_ids(&parser);
}
}
}

View File

@ -1,8 +0,0 @@
[package]
name = "engine-model"
version = "0.1.0"
authors = ["Enso Team <contact@enso.org>"]
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]

View File

@ -1,16 +0,0 @@
// === Standard Linter Configuration ===
#![deny(non_ascii_idents)]
#![warn(unsafe_code)]
#![allow(clippy::bool_to_int_with_if)]
#![allow(clippy::let_and_return)]
#[cfg(test)]
mod tests {
#[test]
fn it_works() {
let result = 2 + 2;
assert_eq!(result, 4);
}
}

View File

@ -1,41 +0,0 @@
[package]
name = "engine-protocol"
version = "0.1.0"
authors = ["Enso Team <contact@enso.org>"]
edition = "2021"
[lib]
crate-type = ["cdylib", "rlib"]
[dependencies]
chrono = { version = "0.4", features = ["serde"] }
enso-data-structures = { path = "../../../../lib/rust/data-structures" }
enso-prelude = { path = "../../../../lib/rust/prelude" }
enso-shapely = { path = "../../../../lib/rust/shapely" }
enso-text = { path = "../../../../lib/rust/text" }
failure = { workspace = true }
flatbuffers = { version = "0.5" }
futures = { workspace = true }
hex = { version = "0.4.2" }
json-rpc = { path = "../../../../lib/rust/json-rpc" }
mockall = { version = "0.7.1", features = ["nightly"] }
serde = { workspace = true }
serde_json = { workspace = true }
sha3 = { version = "0.8.2" }
strum = { workspace = true }
strum_macros = "0.24.0"
uuid = { version = "0.8", features = ["serde", "v4", "wasm-bindgen"] }
[dev-dependencies]
wasm-bindgen-test = { workspace = true }
enso-web = { path = "../../../../lib/rust/web" }
[build-dependencies]
enso-build-utilities = { path = "../../../../build/deprecated/build-utils" }
bytes = { workspace = true }
flatc-rust = { version = "0.1.2" }
futures = { workspace = true }
reqwest = { workspace = true }
tokio = { workspace = true }
# Zip is needed because the build script downloads and extracts artifacts from the Engine.
zip = { version = "0.6.2", default-features = false, features = ["deflate"] }

View File

@ -1,146 +0,0 @@
use std::io::prelude::*;
use std::env;
use std::fs;
use std::fs::File;
use std::path::PathBuf;
// =========================
// == Hardcoded constants ==
// =========================
/// The name of zip containing engine interface files.
const ZIP_NAME: &str = "fbs-schema.zip";
/// The directory structure inside downloaded engine interface folder.
const ZIP_CONTENT: &str = "fbs-upload/fbs-schema/";
/// Commit from `enso` repository that will be used to obtain artifacts from.
/// If you change this commit manually, you must have `flatc` installed to regenerate interface
/// files. Run `cargo build` to do so, before creating a commit.
///
/// Follow to `contribution.md` for more guidance about setting up the development environment.
const COMMIT: &str = "0b363e3e85215aa0468f7ce8c17882f60f9284d9";
/// Currently the flatc-generated files updating shall work purely on opt-in basis. This script
/// should do nothing if the following environment variable has not been defined.
const ENABLE_ENV_VAR_NAME: &str = "ENSO_IDE_ENABLE_FLATC";
/// An URL pointing to engine interface files.
pub fn interface_description_url() -> reqwest::Url {
let url = format!("https://packages.luna-lang.org/fbs-schema/nightly/{COMMIT}/fbs-schema.zip");
let err = format!("{url} is an invalid URL.");
reqwest::Url::parse(&url).expect(&err)
}
// ===================================
// == Download Engine Api Artifacts ==
// ===================================
/// Struct for downloading engine artifacts.
struct ApiProvider {
/// The path where downloaded artifacts will be stored.
out_dir: PathBuf,
}
impl ApiProvider {
/// Creates a provider that can download engine artifacts.
pub fn new() -> ApiProvider {
let out_dir = env::var("OUT_DIR").expect("OUT_DIR isn't environment variable").into();
ApiProvider { out_dir }
}
/// Downloads api artifacts into memory.
pub async fn download(&self) -> bytes::Bytes {
let url = interface_description_url();
let get_error = format!("Failed to get response from {}", &url);
let download_error = format!("Failed to download contents of {}", &url);
let response = reqwest::get(url).await.expect(&get_error);
response.bytes().await.expect(&download_error)
}
/// Saves unzipped artifacts into file.
pub fn unzip(&self, artifacts: bytes::Bytes) {
let zip_path = self.out_dir.join(ZIP_NAME);
let display_path = zip_path.display();
let open_error = format!("Failed to open {display_path}");
let write_error = format!("Failed to write {display_path}");
let flush_error = format!("Failed to flush {display_path}");
let unzip_error = format!("Failed to unzip {display_path}");
let mut file = File::create(&zip_path).expect(&open_error);
file.write_all(&artifacts).expect(&write_error);
file.flush().expect(&flush_error);
let file = File::open(&zip_path).expect(&open_error);
let mut archive = zip::ZipArchive::new(&file).expect(&open_error);
archive.extract(&self.out_dir).expect(&unzip_error);
}
/// Generates rust files from FlatBuffers schemas.
pub fn generate_files(&self) {
let fbs_dir = self.out_dir.join(ZIP_CONTENT);
for entry in fs::read_dir(fbs_dir).expect("Could not read content of dir") {
let path = entry.expect("Invalid content of dir").path();
let result = flatc_rust::run(flatc_rust::Args {
inputs: &[&path],
out_dir: &PathBuf::from("./src/generated"),
..Default::default()
});
if result.is_err() {
println!(
"cargo:info=Engine API files were not regenerated because `flatc` isn't \
installed."
);
break;
}
}
}
/// Places required artifacts in the target location.
pub async fn run(&self) {
let fingerprint = self.out_dir.join("engine.api.fingerprint");
let unchanged = match fs::read_to_string(&fingerprint) {
Ok(commit) => commit == COMMIT,
Err(_) => false,
};
if unchanged {
return;
}
println!("cargo:info=Engine API artifacts version changed. Rebuilding.");
let artifacts = self.download().await;
self.unzip(artifacts);
self.generate_files();
fs::write(&fingerprint, COMMIT).expect("Unable to write artifacts fingerprint.");
}
}
// ==========
// == main ==
// ==========
#[tokio::main]
async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
// Regenerating bindings is now strictly opt-in, see: https://github.com/enso-org/ide/issues/644
if env::var(ENABLE_ENV_VAR_NAME).is_ok() {
let provider = ApiProvider::new();
provider.run().await;
} else {
println!(
"cargo:info=Will not try updating flatc-generated files. Define \
`{ENABLE_ENV_VAR_NAME}` environment variable to enable regeneration of the Engine API \
flatc bindings.",
);
}
println!("cargo:rerun-if-changed=build.rs");
println!("cargo:rerun-if-env-changed={ENABLE_ENV_VAR_NAME}");
Ok(())
}

View File

@ -1,19 +0,0 @@
//! Crate containing the Engine Services binary protocol interface.
// ==============
// === Export ===
// ==============
pub mod client;
pub mod connection;
pub mod message;
pub mod serialization;
pub mod uuid;
pub use client::Client;
pub use client::Event;
pub use client::MockAPI as MockClient;
pub use client::Notification;
pub use client::API;
pub use connection::Connection;

View File

@ -1,397 +0,0 @@
//! Module defines LS binary protocol client `API` and its two implementation: `Client` and
//! `MockClient`.
use crate::prelude::*;
use crate::binary::message::ErrorPayload;
use crate::binary::message::FromServerPayloadOwned;
use crate::binary::message::MessageFromServerOwned;
use crate::binary::message::MessageToServerRef;
use crate::binary::message::ToServerPayload;
use crate::binary::message::VisualizationContext;
use crate::common::error::UnexpectedMessage;
use crate::handler::Disposition;
use crate::handler::Handler;
use crate::language_server::types::Path;
use crate::types::Sha3_224;
use json_rpc::Transport;
use json_rpc::TransportEvent;
use mockall::automock;
// ==============
// === Errors ===
// ==============
#[allow(missing_docs)]
#[derive(Debug, Fail, Clone, Copy)]
#[fail(display = "Received a text message when expecting only the binary ones.")]
pub struct UnexpectedTextMessage;
/// Errors that can cause a remote call to fail.
pub type RpcError = json_rpc::error::RpcError<ErrorPayload>;
// ====================
// === Notification ===
// ====================
/// The notifications that binary protocol client may receive.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum Notification {
/// A new data has been sent for a visualization.
VisualizationUpdate {
/// Identifies the specific visualization.
context: VisualizationContext,
/// Data to be passed to the visualization.
data: Vec<u8>,
},
}
/// Events emitted by the LS binary protocol client.
pub type Event = crate::common::event::Event<Notification>;
// ===========
// === API ===
// ===========
/// The Engine Services Language Server Binary Protocol Client API.
#[automock]
pub trait API {
/// Initializes the protocol. Must be called exactly once before making any other calls.
fn init(&self, client_id: Uuid) -> StaticBoxFuture<FallibleResult>;
/// Writes binary data to the file.
fn write_file(&self, path: &Path, contents: &[u8]) -> StaticBoxFuture<FallibleResult>;
/// Retrieves the file contents as a binary data.
fn read_file(&self, path: &Path) -> StaticBoxFuture<FallibleResult<Vec<u8>>>;
/// Writes a set of bytes to the specified file at the specified offset.
fn write_bytes(
&self,
path: &Path,
byte_offset: u64,
overwrite: bool,
bytes: &[u8],
) -> StaticBoxFuture<FallibleResult<Sha3_224>>;
/// Asynchronous event stream with notification and errors.
///
/// On a repeated call, previous stream is closed.
fn event_stream(&self) -> StaticBoxStream<Event>;
}
// ==============
// === Client ===
// ==============
/// The client for Engine Services Language Server Binary Protocol.
#[derive(Clone, Derivative)]
#[derivative(Debug)]
pub struct Client {
handler: Handler<Uuid, FromServerPayloadOwned, Notification>,
}
impl Client {
/// Helper function that fails if the received message represents a remote error.
fn expect_success(result: FromServerPayloadOwned) -> FallibleResult {
if let FromServerPayloadOwned::Success {} = result {
Ok(())
} else {
Err(RpcError::MismatchedResponseType.into())
}
}
/// Function that does early processing of the peer's message and decides how it shall be
/// handled. Returns a function so that it may be passed to the `Handler`.
fn processor(
) -> impl FnMut(TransportEvent) -> Disposition<Uuid, FromServerPayloadOwned, Notification> + 'static
{
move |event: TransportEvent| {
let binary_data = match event {
TransportEvent::BinaryMessage(data) => data,
_ => return Disposition::error(UnexpectedTextMessage),
};
let message = match MessageFromServerOwned::deserialize(&binary_data) {
Ok(message) => message,
Err(e) => return Disposition::error(e),
};
debug!("Deserialized incoming binary message: {message:?}");
let correlation_id = message.correlation_id;
match message.0.payload {
FromServerPayloadOwned::VisualizationUpdate { context, data } =>
Disposition::notify(Notification::VisualizationUpdate { data, context }),
payload => {
if let Some(id) = correlation_id {
Disposition::HandleReply { id, reply: payload }
} else {
// Not a known notification and yet not a response to our request.
Disposition::error(UnexpectedMessage)
}
}
}
}
}
/// Creates a new client from the given transport to the Language Server Data Endpoint.
///
/// Before client is functional:
/// * `runner` must be scheduled for execution;
/// * `init` must be called or it needs to be wrapped into `Connection`.
pub fn new(transport: impl Transport + 'static) -> Client {
let processor = Self::processor();
Client { handler: Handler::new(transport, processor) }
}
/// Starts a new request, described by the given payload.
/// Function `f` serves to retrieve the request's result from the more general `Reply` type.
pub fn make_request<F, R>(
&self,
payload: ToServerPayload,
f: F,
) -> StaticBoxFuture<FallibleResult<R>>
where
F: FnOnce(FromServerPayloadOwned) -> FallibleResult<R>,
R: 'static,
F: 'static,
{
let message = MessageToServerRef::new(payload);
let id = message.message_id;
let completer = move |reply| {
info!("Completing request {id} with a reply: {reply:?}");
if let FromServerPayloadOwned::Error { code, message, data } = reply {
let code = code as i64;
let error = json_rpc::messages::Error { code, message, data };
Err(RpcError::RemoteError(error).into())
} else {
f(reply)
}
};
let fut = self.handler.make_request(&message, completer);
Box::pin(fut)
}
/// A `runner`. Its execution must be scheduled for `Client` to be able to complete requests and
/// emit events.
pub fn runner(&self) -> impl Future<Output = ()> {
self.handler.runner()
}
}
impl API for Client {
fn init(&self, client_id: Uuid) -> StaticBoxFuture<FallibleResult> {
info!("Initializing binary connection as client with id {client_id}.");
let payload = ToServerPayload::InitSession { client_id };
self.make_request(payload, Self::expect_success)
}
fn write_file(&self, path: &Path, contents: &[u8]) -> StaticBoxFuture<FallibleResult> {
info!("Writing file {} with {} bytes.", path, contents.len());
let payload = ToServerPayload::WriteFile { path, contents };
self.make_request(payload, Self::expect_success)
}
fn read_file(&self, path: &Path) -> StaticBoxFuture<FallibleResult<Vec<u8>>> {
info!("Reading file {path}.");
let payload = ToServerPayload::ReadFile { path };
self.make_request(payload, move |result| {
if let FromServerPayloadOwned::FileContentsReply { contents } = result {
Ok(contents)
} else {
Err(RpcError::MismatchedResponseType.into())
}
})
}
fn write_bytes(
&self,
path: &Path,
byte_offset: u64,
overwrite: bool,
bytes: &[u8],
) -> StaticBoxFuture<FallibleResult<Sha3_224>> {
info!("Writing {} bytes to {path} at offset {byte_offset}", bytes.len());
let payload = ToServerPayload::WriteBytes { path, byte_offset, overwrite, bytes };
self.make_request(payload, move |result| {
if let FromServerPayloadOwned::WriteBytesReply { checksum } = result {
Ok(checksum.into())
} else {
Err(RpcError::MismatchedResponseType.into())
}
})
}
fn event_stream(&self) -> StaticBoxStream<Event> {
self.handler.event_stream().boxed_local()
}
}
// =============
// === Tests ===
// =============
#[cfg(test)]
mod tests {
use super::*;
use crate::binary::message::MessageFromServer;
use crate::binary::message::MessageToServerOwned;
use crate::binary::message::ToServerPayloadOwned;
use futures::task::LocalSpawnExt;
use json_rpc::test_util::transport::mock::MockTransport;
// ===============
// === Fixture ===
// ===============
struct ClientFixture {
transport: MockTransport,
client: Client,
executor: futures::executor::LocalPool,
}
impl ClientFixture {
fn new() -> ClientFixture {
let transport = MockTransport::new();
let client = Client::new(transport.clone());
let executor = futures::executor::LocalPool::new();
executor.spawner().spawn_local(client.runner()).unwrap();
ClientFixture { transport, client, executor }
}
}
// ========================
// === Testing Requests ===
// ========================
fn test_request<R>(
make_request: impl Fn(&Client) -> StaticBoxFuture<FallibleResult<R>>,
expected_result: R,
expected_request: ToServerPayloadOwned,
mock_reply: FromServerPayloadOwned,
) where
R: Debug + PartialEq + Sized,
{
let mut fixture = ClientFixture::new();
let mut fut = make_request(&fixture.client);
let generated_message = fixture.transport.expect_binary_message();
let generated_message = MessageToServerOwned::deserialize(&generated_message).unwrap();
assert_eq!(generated_message.payload, expected_request);
fut.expect_pending();
let mut mock_reply = MessageFromServer::new(mock_reply);
mock_reply.correlation_id = Some(generated_message.message_id);
mock_reply.with_serialized(|data| fixture.transport.mock_peer_binary_message(data));
fixture.executor.run_until_stalled();
assert_eq!(fut.expect_ok(), expected_result);
// Repeat request but now answer with error.
let mut fut = make_request(&fixture.client);
let generated_message = fixture.transport.expect_binary_message();
let generated_message = MessageToServerOwned::deserialize(&generated_message).unwrap();
let mock_error_code = 444;
let mock_error_message = "This is error".to_string();
let mut mock_reply = MessageFromServer::new(FromServerPayloadOwned::Error {
code: mock_error_code,
message: mock_error_message,
data: None,
});
mock_reply.correlation_id = Some(generated_message.message_id);
mock_reply.with_serialized(|data| fixture.transport.mock_peer_binary_message(data));
fixture.executor.run_until_stalled();
fut.expect_err();
}
#[test]
fn test_init() {
let client_id = Uuid::new_v4();
test_request(
|client| client.init(client_id),
(),
ToServerPayloadOwned::InitSession { client_id },
FromServerPayloadOwned::Success {},
);
}
#[test]
fn test_write_file() {
let root_id = Uuid::new_v4();
let path = Path::new(root_id, &["Main.enso"]);
let data = Vec::from("hello".as_bytes());
test_request(
|client| client.write_file(&path, &data),
(),
ToServerPayloadOwned::WriteFile { contents: data.clone(), path: path.clone() },
FromServerPayloadOwned::Success {},
);
}
#[test]
fn test_read_file() {
let root_id = Uuid::new_v4();
let path = Path::new(root_id, &["Main.enso"]);
let data = Vec::from("hello".as_bytes());
test_request(
|client| client.read_file(&path),
data.clone(),
ToServerPayloadOwned::ReadFile { path: path.clone() },
FromServerPayloadOwned::FileContentsReply { contents: data },
);
}
// =============================
// === Testing Notifications ===
// =============================
#[test]
fn test_visualization_update() {
let mut fixture = ClientFixture::new();
let mut event_fut = fixture.client.event_stream().into_future().boxed_local();
fixture.executor.run_until_stalled();
event_fut.expect_pending();
let context = VisualizationContext {
visualization_id: Uuid::new_v4(),
expression_id: Uuid::new_v4(),
context_id: Uuid::new_v4(),
};
let data = Vec::from("Hello".as_bytes());
let message = MessageFromServer::new(FromServerPayloadOwned::VisualizationUpdate {
data: data.clone(),
context,
});
message.with_serialized(|data| fixture.transport.mock_peer_binary_message(data));
fixture.executor.run_until_stalled();
let expected_notification = Notification::VisualizationUpdate { context, data };
let (event, tail) = event_fut.expect_ready();
match event.expect("Expected some notification.") {
Event::Notification(notification) => assert_eq!(notification, expected_notification),
event => panic!("Expected notification event, got: {event:?}"),
}
tail.boxed_local().expect_pending();
}
}

View File

@ -1,108 +0,0 @@
//! Module for utilities regarding establishing and storing the Language Server RPC connection.
use crate::prelude::*;
use crate::binary::MockClient;
use crate::binary::API;
// ==============
// === Errors ===
// ==============
#[allow(missing_docs)]
#[derive(Fail, Debug)]
#[fail(display = "Failed to initialize language server binary connection: {}", _0)]
pub struct FailedToInitializeProtocol(failure::Error);
// ==================
// === Connection ===
// ==================
/// An established, initialized connection to language server's RPC endpoint.
#[derive(Derivative)]
#[derivative(Debug)]
pub struct Connection {
/// The ID of the client.
pub client_id: Uuid,
/// LS client that has already initialized protocol.
#[derivative(Debug = "ignore")]
pub client: Box<dyn API>,
}
impl Connection {
/// Takes a client, generates ID for it and initializes the protocol.
pub async fn new(client: impl API + 'static, client_id: Uuid) -> FallibleResult<Self> {
let init_response = client.init(client_id).await;
init_response.map_err(FailedToInitializeProtocol)?;
let client = Box::new(client);
Ok(Connection { client_id, client })
}
/// Creates a connection which wraps a mock client.
pub fn new_mock(client: MockClient) -> Connection {
Connection { client: Box::new(client), client_id: default() }
}
/// Creates a Rc handle to a connection which wraps a mock client.
pub fn new_mock_rc(client: MockClient) -> Rc<Connection> {
Rc::new(Self::new_mock(client))
}
}
impl Deref for Connection {
type Target = dyn API;
fn deref(&self) -> &Self::Target {
self.client.as_ref()
}
}
impl DerefMut for Connection {
fn deref_mut(&mut self) -> &mut Self::Target {
self.client.deref_mut()
}
}
// =============
// === Tests ===
// =============
#[cfg(test)]
mod tests {
use super::*;
use crate::binary::MockClient;
use futures::task::LocalSpawnExt;
use json_rpc::error::RpcError;
use mockall::predicate::*;
fn ready<T: 'static>(t: impl Into<T>) -> StaticBoxFuture<T> {
futures::future::ready(t.into()).boxed_local()
}
#[test]
fn test_connection() {
let case = async {
let client_id = Uuid::from_u128(159);
let mock_returning = |ret: FallibleResult| {
let mut mock = MockClient::new();
mock.expect_init().with(eq(client_id)).times(1).return_once(|_| ready(ret));
mock
};
let ok = Ok(());
assert!(Connection::new(mock_returning(ok), client_id).await.is_ok());
let err = Err(RpcError::new_remote_error(0, "ErrorMessage").into());
assert!(Connection::new(mock_returning(err), client_id).await.is_err());
};
let mut pool = futures::executor::LocalPool::new();
pool.spawner().spawn_local(case).unwrap();
pool.run();
}
}

View File

@ -1,196 +0,0 @@
//! Module defining types representing messages being sent between client and server.
//! They correspond to the generated types generated by FlatBuffer compiler for our protocol schema.
use crate::prelude::*;
use crate::language_server::Path as LSPath;
use json_rpc::Transport;
// ===============
// === Aliases ===
// ===============
/// An owning representation of the message received from a server.
pub type MessageFromServerOwned = MessageFromServer<FromServerPayloadOwned>;
/// An owning representation of the message received from a server.
pub type MessageToServerOwned = MessageToServer<ToServerPayloadOwned>;
/// An non-owning representation of the message received from a server.
pub type MessageFromServerRef<'a> = MessageFromServer<FromServerPayload<'a>>;
/// An non-owning representation of the message to be sent to the server.
pub type MessageToServerRef<'a> = MessageToServer<ToServerPayload<'a>>;
// ================
// === Newtypes ===
// ================
/// A message sent from client to server (`InboundMessage` in the spec).
#[derive(Clone, Debug, Deref, DerefMut)]
pub struct MessageToServer<T>(pub Message<T>);
impl<T> MessageToServer<T> {
/// Wraps the given payload into a message envelope. Generates a unique ID for the message.
pub fn new(payload: T) -> Self {
Self(Message::new(payload))
}
}
/// A message sent from server to client (`OutboundMessage` in the spec).
#[derive(Clone, Debug, Deref, DerefMut)]
pub struct MessageFromServer<T>(pub Message<T>);
impl<T> MessageFromServer<T> {
/// Wraps the given payload into a message envelope. Generates a unique ID for the message.
pub fn new(payload: T) -> Self {
Self(Message::new(payload))
}
}
// =============
// === Types ===
// =============
/// Identifies the visualization in the update message.
#[allow(missing_docs)]
#[derive(Clone, Debug, Copy, PartialEq, Eq)]
pub struct VisualizationContext {
pub visualization_id: Uuid,
pub context_id: Uuid,
pub expression_id: Uuid,
}
#[allow(missing_docs)]
#[derive(Copy, Clone, Debug)]
pub enum ErrorPayload {
ReadOOB { file_length: u64 },
}
#[allow(missing_docs)]
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct FileSegment {
pub path: LSPath,
pub byte_offset: u64,
pub length: u64,
}
#[allow(missing_docs)]
#[derive(Clone, Debug, Default, PartialEq, Eq)]
pub struct EnsoDigest {
pub bytes: Vec<u8>,
}
// ================
// === Payloads ===
// ================
#[allow(missing_docs)]
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum ToServerPayloadOwned {
InitSession { client_id: Uuid },
WriteFile { path: LSPath, contents: Vec<u8> },
ReadFile { path: LSPath },
WriteBytes { path: LSPath, byte_offset: u64, overwrite: bool, bytes: Vec<u8> },
ReadBytes { segment: FileSegment },
ChecksumBytes { segment: FileSegment },
}
#[allow(missing_docs)]
#[derive(Clone, Debug)]
pub enum FromServerPayloadOwned {
Error { code: i32, message: String, data: Option<ErrorPayload> },
Success {},
VisualizationUpdate { context: VisualizationContext, data: Vec<u8> },
FileContentsReply { contents: Vec<u8> },
WriteBytesReply { checksum: EnsoDigest },
ReadBytesReply { checksum: EnsoDigest, bytes: Vec<u8> },
ChecksumBytesReply { checksum: EnsoDigest },
}
#[allow(missing_docs)]
#[derive(Clone, Debug)]
pub enum ToServerPayload<'a> {
InitSession {
client_id: Uuid,
},
WriteFile {
path: &'a LSPath,
contents: &'a [u8],
},
ReadFile {
path: &'a LSPath,
},
WriteBytes {
path: &'a LSPath,
byte_offset: u64,
overwrite: bool,
bytes: &'a [u8],
},
ReadBytes {
segment: &'a FileSegment,
},
ChecksumBytes {
segment: &'a FileSegment,
},
}
#[allow(missing_docs)]
#[derive(Clone, Debug)]
pub enum FromServerPayload<'a> {
Error { code: i32, message: &'a str, data: Option<ErrorPayload> },
Success {},
VisualizationUpdate { context: VisualizationContext, data: &'a [u8] },
FileContentsReply { contents: &'a [u8] },
WriteBytesReply { checksum: EnsoDigest },
ReadBytesReply { checksum: EnsoDigest, bytes: &'a [u8] },
ChecksumBytesReply { checksum: EnsoDigest },
}
// ===============
// === Message ===
// ===============
/// Common message envelope for binary protocol.
///
/// `T` should represent the payload.
#[derive(Clone, Debug)]
pub struct Message<T> {
/// Each message bears unique id.
pub message_id: Uuid,
/// When sending reply, server sets this to the request's `message_id`.
pub correlation_id: Option<Uuid>,
#[allow(missing_docs)]
pub payload: T,
}
impl<T> Message<T> {
/// Wraps the given payload into a message envelope. Generates a unique ID for the message.
/// Private, as users should use either `MessageToServer::new` or `MessageFromServer::new`.
fn new(payload: T) -> Message<T> {
Message { message_id: Uuid::new_v4(), correlation_id: None, payload }
}
}
impl<'a> crate::handler::IsRequest for MessageToServerRef<'a> {
type Id = Uuid;
fn send(&self, transport: &mut dyn Transport) -> FallibleResult {
self.with_serialized(|data| transport.send_binary(data))
}
fn id(&self) -> Self::Id {
self.message_id
}
}

View File

@ -1,681 +0,0 @@
//! Code for converting between FlatBuffer-generated wrappers and our representation of the protocol
//! messages and their parts.
use crate::generated::binary_protocol_generated::org::enso::languageserver::protocol::binary::*;
use crate::prelude::*;
use crate::binary::message;
use crate::binary::message::FromServerPayload;
use crate::binary::message::FromServerPayloadOwned;
use crate::binary::message::Message;
use crate::binary::message::MessageFromServer;
use crate::binary::message::MessageToServer;
use crate::binary::message::ToServerPayload;
use crate::binary::message::ToServerPayloadOwned;
use crate::common::error::DeserializationError;
use crate::language_server::types::Path as LSPath;
use flatbuffers::FlatBufferBuilder;
use flatbuffers::UnionWIPOffset;
use flatbuffers::WIPOffset;
// =================
// === constants ===
// =================
/// The initial buffer size used when serializing binary message.
/// Should be large enough to fit most of the messages we send, while staying possibly small.
pub const INITIAL_BUFFER_SIZE: usize = 256;
// ==========================
// === SerializableObject ===
// ==========================
// === Trait ===
/// All entities that can be serialized to the FlatBuffers and represented as offsets.
/// That includes tables and vectors, but not primitives, structs nor unions.
///
/// Supports both serialization and deserialization.
trait SerializableDeserializableObject<'a>: Sized {
/// The FlatBuffer's generated type for this type representation.
type Out: Sized;
/// Writes this table to the buffer and returns its handle.
fn serialize(&self, builder: &mut FlatBufferBuilder<'a>) -> WIPOffset<Self::Out>;
/// Instantiates Self and reads the data from the FlatBuffers representation.
fn deserialize(fbs: Self::Out) -> Result<Self, DeserializationError>;
/// Instantiates Self and reads the data from the optional FlatBuffers representation.
/// Will fail always if the representation is not present.
fn deserialize_required_opt(fbs: Option<Self::Out>) -> Result<Self, DeserializationError> {
let missing_expected = || DeserializationError("Missing expected field".to_string());
Self::deserialize(fbs.ok_or_else(missing_expected)?)
}
}
// === impl Vec<String> ===
impl<'a> SerializableDeserializableObject<'a> for Vec<String> {
type Out = flatbuffers::Vector<'a, flatbuffers::ForwardsUOffset<&'a str>>;
fn serialize(&self, builder: &mut FlatBufferBuilder<'a>) -> WIPOffset<Self::Out> {
let strs = self.iter().map(|s| s.as_str()).collect_vec();
builder.create_vector_of_strings(&strs)
}
fn deserialize(fbs: Self::Out) -> Result<Self, DeserializationError> {
let indices = 0..fbs.len();
Ok(indices.map(|ix| fbs.get(ix).to_string()).collect())
}
}
// === impl VisualizationContext ===
impl<'a> SerializableDeserializableObject<'a> for message::VisualizationContext {
type Out = VisualizationContext<'a>;
fn serialize(&self, builder: &mut FlatBufferBuilder<'a>) -> WIPOffset<Self::Out> {
VisualizationContext::create(builder, &VisualizationContextArgs {
visualizationId: Some(&self.visualization_id.into()),
expressionId: Some(&self.expression_id.into()),
contextId: Some(&self.context_id.into()),
})
}
fn deserialize(fbs: Self::Out) -> Result<Self, DeserializationError> {
Ok(message::VisualizationContext {
context_id: fbs.contextId().into(),
visualization_id: fbs.visualizationId().into(),
expression_id: fbs.expressionId().into(),
})
}
}
// === impl language server's Path ===
impl<'a> SerializableDeserializableObject<'a> for LSPath {
type Out = Path<'a>;
fn serialize(&self, builder: &mut FlatBufferBuilder<'a>) -> WIPOffset<Self::Out> {
let root_id = self.root_id.into();
let segments = Vec::serialize(&self.segments, builder);
Path::create(builder, &PathArgs { rootId: Some(&root_id), segments: Some(segments) })
}
fn deserialize(fbs: Self::Out) -> Result<Self, DeserializationError> {
let missing_root_id = || DeserializationError("Missing root ID".to_string());
let root_id = Uuid::from(fbs.rootId().ok_or_else(missing_root_id)?);
let segments = Vec::deserialize_required_opt(fbs.segments())?;
Ok(LSPath { root_id, segments })
}
}
// === impl File Segment ===
impl<'a> SerializableDeserializableObject<'a> for message::FileSegment {
type Out = FileSegment<'a>;
fn serialize(&self, builder: &mut FlatBufferBuilder<'a>) -> WIPOffset<Self::Out> {
let path = self.path.serialize(builder);
FileSegment::create(builder, &FileSegmentArgs {
path: Some(path),
byteOffset: self.byte_offset,
length: self.length,
})
}
fn deserialize(fbs: Self::Out) -> Result<Self, DeserializationError> {
let path = LSPath::deserialize(fbs.path())?;
let byte_offset = fbs.byteOffset();
let length = fbs.length();
Ok(Self { path, byte_offset, length })
}
}
// === impl EnsoDigest ===
impl<'a> SerializableDeserializableObject<'a> for message::EnsoDigest {
type Out = EnsoDigest<'a>;
fn serialize(&self, builder: &mut FlatBufferBuilder<'a>) -> WIPOffset<Self::Out> {
let bytes = builder.create_vector(&self.bytes);
EnsoDigest::create(builder, &EnsoDigestArgs { bytes: Some(bytes) })
}
fn deserialize(fbs: Self::Out) -> Result<Self, DeserializationError> {
let bytes = fbs.bytes().to_vec();
Ok(Self { bytes })
}
}
// =========================
// === SerializableUnion ===
// =========================
/// Traits for serialization of our types that flatbuffers schema represents as unions.
pub trait SerializableUnion: Sized {
/// Type of the FlatBuffers-generated enumeration with the variant index.
type EnumType;
/// Write the enumeration to the builder and return the handle.
fn serialize(&self, builder: &mut FlatBufferBuilder) -> Option<WIPOffset<UnionWIPOffset>>;
/// Obtain the index of the active variant.
fn active_variant(&self) -> Self::EnumType;
}
impl<'a> SerializableUnion for ToServerPayload<'a> {
type EnumType = InboundPayload;
fn serialize(&self, builder: &mut FlatBufferBuilder) -> Option<WIPOffset<UnionWIPOffset>> {
Some(match self {
ToServerPayload::InitSession { client_id } =>
InitSessionCommand::create(builder, &InitSessionCommandArgs {
identifier: Some(&client_id.into()),
})
.as_union_value(),
ToServerPayload::WriteFile { path, contents } => {
let path = path.serialize(builder);
let contents = builder.create_vector(contents);
WriteFileCommand::create(builder, &WriteFileCommandArgs {
path: Some(path),
contents: Some(contents),
})
.as_union_value()
}
ToServerPayload::ReadFile { path } => {
let path = path.serialize(builder);
ReadFileCommand::create(builder, &ReadFileCommandArgs { path: Some(path) })
.as_union_value()
}
ToServerPayload::ReadBytes { segment } => {
let segment = segment.serialize(builder);
ReadBytesCommand::create(builder, &ReadBytesCommandArgs { segment: Some(segment) })
.as_union_value()
}
ToServerPayload::WriteBytes { path, byte_offset, overwrite, bytes } => {
let path = path.serialize(builder);
let bytes = builder.create_vector(bytes);
WriteBytesCommand::create(builder, &WriteBytesCommandArgs {
byteOffset: *byte_offset,
path: Some(path),
overwriteExisting: *overwrite,
bytes: Some(bytes),
})
.as_union_value()
}
ToServerPayload::ChecksumBytes { segment } => {
let segment = segment.serialize(builder);
ChecksumBytesCommand::create(builder, &ChecksumBytesCommandArgs {
segment: Some(segment),
})
.as_union_value()
}
})
}
fn active_variant(&self) -> Self::EnumType {
match self {
ToServerPayload::InitSession { .. } => InboundPayload::INIT_SESSION_CMD,
ToServerPayload::WriteFile { .. } => InboundPayload::WRITE_FILE_CMD,
ToServerPayload::ReadFile { .. } => InboundPayload::READ_FILE_CMD,
ToServerPayload::WriteBytes { .. } => InboundPayload::WRITE_BYTES_CMD,
ToServerPayload::ReadBytes { .. } => InboundPayload::READ_BYTES_CMD,
ToServerPayload::ChecksumBytes { .. } => InboundPayload::CHECKSUM_BYTES_CMD,
}
}
}
impl SerializableUnion for ToServerPayloadOwned {
type EnumType = InboundPayload;
fn serialize(&self, builder: &mut FlatBufferBuilder) -> Option<WIPOffset<UnionWIPOffset>> {
Some(match self {
ToServerPayloadOwned::InitSession { client_id } =>
InitSessionCommand::create(builder, &InitSessionCommandArgs {
identifier: Some(&client_id.into()),
})
.as_union_value(),
ToServerPayloadOwned::WriteFile { path, contents } => {
let path = path.serialize(builder);
let contents = builder.create_vector(contents);
WriteFileCommand::create(builder, &WriteFileCommandArgs {
path: Some(path),
contents: Some(contents),
})
.as_union_value()
}
ToServerPayloadOwned::ReadFile { path } => {
let path = path.serialize(builder);
ReadFileCommand::create(builder, &ReadFileCommandArgs { path: Some(path) })
.as_union_value()
}
ToServerPayloadOwned::ReadBytes { segment } => {
let segment = segment.serialize(builder);
ReadBytesCommand::create(builder, &ReadBytesCommandArgs { segment: Some(segment) })
.as_union_value()
}
ToServerPayloadOwned::WriteBytes { path, byte_offset, overwrite, bytes } => {
let path = path.serialize(builder);
let bytes = builder.create_vector(bytes);
WriteBytesCommand::create(builder, &WriteBytesCommandArgs {
byteOffset: *byte_offset,
path: Some(path),
overwriteExisting: *overwrite,
bytes: Some(bytes),
})
.as_union_value()
}
ToServerPayloadOwned::ChecksumBytes { segment } => {
let segment = segment.serialize(builder);
ChecksumBytesCommand::create(builder, &ChecksumBytesCommandArgs {
segment: Some(segment),
})
.as_union_value()
}
})
}
fn active_variant(&self) -> Self::EnumType {
match self {
ToServerPayloadOwned::InitSession { .. } => InboundPayload::INIT_SESSION_CMD,
ToServerPayloadOwned::WriteFile { .. } => InboundPayload::WRITE_FILE_CMD,
ToServerPayloadOwned::ReadFile { .. } => InboundPayload::READ_FILE_CMD,
ToServerPayloadOwned::WriteBytes { .. } => InboundPayload::WRITE_BYTES_CMD,
ToServerPayloadOwned::ReadBytes { .. } => InboundPayload::READ_BYTES_CMD,
ToServerPayloadOwned::ChecksumBytes { .. } => InboundPayload::CHECKSUM_BYTES_CMD,
}
}
}
impl SerializableUnion for FromServerPayloadOwned {
type EnumType = OutboundPayload;
fn serialize(&self, builder: &mut FlatBufferBuilder) -> Option<WIPOffset<UnionWIPOffset>> {
Some(match self {
FromServerPayloadOwned::Success {} =>
Success::create(builder, &SuccessArgs {}).as_union_value(),
FromServerPayloadOwned::Error { code, message, data } => {
let message = builder.create_string(message);
let data_serialized = data.serialize(builder);
Error::create(builder, &ErrorArgs {
code: *code,
message: Some(message),
data_type: data.active_variant(),
data: data_serialized,
})
.as_union_value()
}
FromServerPayloadOwned::FileContentsReply { contents } => {
let contents = builder.create_vector(contents);
FileContentsReply::create(builder, &FileContentsReplyArgs {
contents: Some(contents),
})
.as_union_value()
}
FromServerPayloadOwned::VisualizationUpdate { data, context } => {
let data = builder.create_vector(data);
let context = context.serialize(builder);
VisualizationUpdate::create(builder, &VisualizationUpdateArgs {
data: Some(data),
visualizationContext: Some(context),
})
.as_union_value()
}
FromServerPayloadOwned::WriteBytesReply { checksum } => {
let checksum = checksum.serialize(builder);
WriteBytesReply::create(builder, &WriteBytesReplyArgs { checksum: Some(checksum) })
.as_union_value()
}
FromServerPayloadOwned::ReadBytesReply { checksum, bytes } => {
let bytes = builder.create_vector(bytes);
let checksum = checksum.serialize(builder);
ReadBytesReply::create(builder, &ReadBytesReplyArgs {
checksum: Some(checksum),
bytes: Some(bytes),
})
.as_union_value()
}
FromServerPayloadOwned::ChecksumBytesReply { checksum } => {
let checksum = checksum.serialize(builder);
ChecksumBytesReply::create(builder, &ChecksumBytesReplyArgs {
checksum: Some(checksum),
})
.as_union_value()
}
})
}
fn active_variant(&self) -> Self::EnumType {
match self {
FromServerPayloadOwned::Error { .. } => OutboundPayload::ERROR,
FromServerPayloadOwned::Success { .. } => OutboundPayload::SUCCESS,
FromServerPayloadOwned::FileContentsReply { .. } =>
OutboundPayload::FILE_CONTENTS_REPLY,
FromServerPayloadOwned::VisualizationUpdate { .. } =>
OutboundPayload::VISUALIZATION_UPDATE,
FromServerPayloadOwned::WriteBytesReply { .. } => OutboundPayload::WRITE_BYTES_REPLY,
FromServerPayloadOwned::ReadBytesReply { .. } => OutboundPayload::READ_BYTES_REPLY,
FromServerPayloadOwned::ChecksumBytesReply { .. } =>
OutboundPayload::CHECKSUM_BYTES_REPLY,
}
}
}
impl SerializableUnion for Option<message::ErrorPayload> {
type EnumType = ErrorPayload;
fn serialize(&self, builder: &mut FlatBufferBuilder) -> Option<WIPOffset<UnionWIPOffset>> {
Some(match self.as_ref()? {
message::ErrorPayload::ReadOOB { file_length } => {
let args = ReadOutOfBoundsErrorArgs { fileLength: *file_length };
ReadOutOfBoundsError::create(builder, &args).as_union_value()
}
})
}
fn active_variant(&self) -> Self::EnumType {
match self {
Some(message::ErrorPayload::ReadOOB { .. }) => ErrorPayload::READ_OOB,
None => ErrorPayload::NONE,
}
}
}
// ================================
// === DeserializableUnionField ===
// ================================
/// Unfortunately the FlatBuffers generated code includes union accessors in the parent type, so
/// we cannot generalize union field deserialization apart from the parent type.
///
/// `ParentType` should be a FlatBuffer-generated type that contains this union field.
pub trait DeserializableUnionField<'a, ParentType: 'a>: Sized {
/// Constructs deserialized representation from the value containing this union field.
fn deserialize(owner: ParentType) -> Result<Self, DeserializationError>;
}
impl<'a> DeserializableUnionField<'a, OutboundMessage<'a>> for FromServerPayload<'a> {
fn deserialize(message: OutboundMessage<'a>) -> Result<Self, DeserializationError> {
match message.payload_type() {
OutboundPayload::ERROR => {
let payload = message.payload_as_error().unwrap();
Ok(FromServerPayload::Error {
code: payload.code(),
message: payload.message(),
data: Option::<message::ErrorPayload>::deserialize(payload)?,
})
}
OutboundPayload::FILE_CONTENTS_REPLY => {
let payload = message.payload_as_file_contents_reply().unwrap();
Ok(FromServerPayload::FileContentsReply {
contents: payload.contents().unwrap_or_default(),
})
}
OutboundPayload::SUCCESS => Ok(FromServerPayload::Success {}),
OutboundPayload::VISUALIZATION_UPDATE => {
let payload = message.payload_as_visualization_update().unwrap();
let context = payload.visualizationContext();
Ok(FromServerPayload::VisualizationUpdate {
data: payload.data(),
context: message::VisualizationContext::deserialize(context)?,
})
}
OutboundPayload::WRITE_BYTES_REPLY => {
let payload = message.payload_as_write_bytes_reply().unwrap();
Ok(FromServerPayload::WriteBytesReply {
checksum: message::EnsoDigest::deserialize(payload.checksum())?,
})
}
OutboundPayload::READ_BYTES_REPLY => {
let payload = message.payload_as_read_bytes_reply().unwrap();
Ok(FromServerPayload::ReadBytesReply {
checksum: message::EnsoDigest::deserialize(payload.checksum())?,
bytes: payload.bytes(),
})
}
OutboundPayload::CHECKSUM_BYTES_REPLY => {
let payload = message.payload_as_checksum_bytes_reply().unwrap();
let checksum = message::EnsoDigest::deserialize(payload.checksum())?;
Ok(FromServerPayload::ChecksumBytesReply { checksum })
}
OutboundPayload::NONE => Err(DeserializationError(
"Received a message without payload. This is not allowed, \
according to the spec."
.into(),
)),
}
}
}
impl<'a> DeserializableUnionField<'a, InboundMessage<'a>> for ToServerPayloadOwned {
fn deserialize(message: InboundMessage<'a>) -> Result<Self, DeserializationError> {
match message.payload_type() {
InboundPayload::INIT_SESSION_CMD => {
let payload = message.payload_as_init_session_cmd().unwrap();
Ok(ToServerPayloadOwned::InitSession { client_id: payload.identifier().into() })
}
InboundPayload::WRITE_FILE_CMD => {
let payload = message.payload_as_write_file_cmd().unwrap();
Ok(ToServerPayloadOwned::WriteFile {
path: LSPath::deserialize_required_opt(payload.path())?,
contents: Vec::from(payload.contents().unwrap_or_default()),
})
}
InboundPayload::READ_FILE_CMD => {
let payload = message.payload_as_read_file_cmd().unwrap();
Ok(ToServerPayloadOwned::ReadFile {
path: LSPath::deserialize_required_opt(payload.path())?,
})
}
InboundPayload::NONE => Err(DeserializationError(
"Received a message without payload. This is not allowed, \
according to the spec."
.into(),
)),
InboundPayload::WRITE_BYTES_CMD => {
let payload = message.payload_as_write_bytes_cmd().unwrap();
Ok(ToServerPayloadOwned::WriteBytes {
path: LSPath::deserialize(payload.path())?,
byte_offset: payload.byteOffset(),
overwrite: payload.overwriteExisting(),
bytes: payload.bytes().to_vec(),
})
}
InboundPayload::READ_BYTES_CMD => {
let payload = message.payload_as_read_bytes_cmd().unwrap();
Ok(ToServerPayloadOwned::ReadBytes {
segment: message::FileSegment::deserialize(payload.segment())?,
})
}
InboundPayload::CHECKSUM_BYTES_CMD => {
let payload = message.payload_as_checksum_bytes_cmd().unwrap();
Ok(ToServerPayloadOwned::ChecksumBytes {
segment: message::FileSegment::deserialize(payload.segment())?,
})
}
}
}
}
impl<'a> DeserializableUnionField<'a, OutboundMessage<'a>> for FromServerPayloadOwned {
fn deserialize(message: OutboundMessage<'a>) -> Result<Self, DeserializationError> {
match message.payload_type() {
OutboundPayload::ERROR => {
let payload = message.payload_as_error().unwrap();
Ok(FromServerPayloadOwned::Error {
code: payload.code(),
message: payload.message().to_string(),
data: Option::<message::ErrorPayload>::deserialize(payload)?,
})
}
OutboundPayload::FILE_CONTENTS_REPLY => {
let payload = message.payload_as_file_contents_reply().unwrap();
Ok(FromServerPayloadOwned::FileContentsReply {
contents: Vec::from(payload.contents().unwrap_or_default()),
})
}
OutboundPayload::SUCCESS => Ok(FromServerPayloadOwned::Success {}),
OutboundPayload::VISUALIZATION_UPDATE => {
let payload = message.payload_as_visualization_update().unwrap();
let context = payload.visualizationContext();
Ok(FromServerPayloadOwned::VisualizationUpdate {
data: Vec::from(payload.data()),
context: message::VisualizationContext::deserialize(context)?,
})
}
OutboundPayload::WRITE_BYTES_REPLY => {
let payload = message.payload_as_write_bytes_reply().unwrap();
Ok(FromServerPayloadOwned::WriteBytesReply {
checksum: message::EnsoDigest::deserialize(payload.checksum())?,
})
}
OutboundPayload::READ_BYTES_REPLY => {
let payload = message.payload_as_read_bytes_reply().unwrap();
Ok(FromServerPayloadOwned::ReadBytesReply {
checksum: message::EnsoDigest::deserialize(payload.checksum())?,
bytes: payload.bytes().to_vec(),
})
}
OutboundPayload::CHECKSUM_BYTES_REPLY => {
let payload = message.payload_as_checksum_bytes_reply().unwrap();
Ok(FromServerPayloadOwned::ChecksumBytesReply {
checksum: message::EnsoDigest::deserialize(payload.checksum())?,
})
}
OutboundPayload::NONE => Err(DeserializationError(
"Received a message without payload. This is not allowed, \
according to the spec."
.into(),
)),
}
}
}
impl<'a> DeserializableUnionField<'a, Error<'a>> for Option<message::ErrorPayload> {
fn deserialize(owner: Error<'a>) -> Result<Self, DeserializationError> {
match owner.data_type() {
ErrorPayload::NONE => Ok(None),
ErrorPayload::READ_OOB => {
let payload = owner.data_as_read_oob().unwrap();
Ok(Some(message::ErrorPayload::ReadOOB { file_length: payload.fileLength() }))
}
}
}
}
// ========================
// === SerializableRoot ===
// ========================
/// Representation of the value that can be written to FlatBuffer-serialized binary blob.
pub trait SerializableRoot {
/// Stores the entity into the builder and calls `finish` on it.
fn write(&self, builder: &mut FlatBufferBuilder);
/// Returns `finish`ed builder with the serialized entity.
fn serialize(&self) -> FlatBufferBuilder {
let mut builder = flatbuffers::FlatBufferBuilder::new_with_capacity(INITIAL_BUFFER_SIZE);
self.write(&mut builder);
builder
}
/// Calls the given function with the binary blob with the serialized entity.
fn with_serialized<R>(&self, f: impl FnOnce(&[u8]) -> R) -> R {
let buffer = self.serialize();
f(buffer.finished_data())
}
}
impl<T> SerializableRoot for MessageToServer<T>
where T: SerializableUnion<EnumType = InboundPayload>
{
fn write(&self, builder: &mut FlatBufferBuilder) {
let correlation_id = self.correlation_id.map(EnsoUUID::from);
let message_id = self.message_id.into();
let payload_type = self.payload.active_variant();
let payload = self.payload.serialize(builder);
let message = InboundMessage::create(builder, &InboundMessageArgs {
correlationId: correlation_id.as_ref(),
messageId: Some(&message_id),
payload_type,
payload,
});
builder.finish(message, None);
}
}
impl<T> SerializableRoot for MessageFromServer<T>
where T: SerializableUnion<EnumType = OutboundPayload>
{
fn write(&self, builder: &mut FlatBufferBuilder) {
let correlation_id = self.correlation_id.map(EnsoUUID::from);
let message_id = self.message_id.into();
let payload_type = self.payload.active_variant();
let payload = self.payload.serialize(builder);
let message = OutboundMessage::create(builder, &OutboundMessageArgs {
correlationId: correlation_id.as_ref(),
messageId: Some(&message_id),
payload_type,
payload,
});
builder.finish(message, None);
}
}
// ==========================
// === DeserializableRoot ===
// ==========================
/// Representation of the value that can be read from FlatBuffer-serialized binary blob.
pub trait DeserializableRoot<'a>: Sized {
/// Construct representation of the value from a binary blob in FlatBuffer format.
fn deserialize(data: &'a [u8]) -> Result<Self, DeserializationError>;
}
impl<'a, T> DeserializableRoot<'a> for MessageToServer<T>
where T: DeserializableUnionField<'a, InboundMessage<'a>>
{
fn deserialize(data: &'a [u8]) -> Result<Self, DeserializationError> {
let message = flatbuffers::get_root::<InboundMessage>(data);
let payload = T::deserialize(message)?;
Ok(MessageToServer(Message {
message_id: message.messageId().into(),
correlation_id: message.correlationId().map(|id| id.into()),
payload,
}))
}
}
impl<'a, T> DeserializableRoot<'a> for MessageFromServer<T>
where T: DeserializableUnionField<'a, OutboundMessage<'a>>
{
fn deserialize(data: &'a [u8]) -> Result<Self, DeserializationError> {
let message = flatbuffers::get_root::<OutboundMessage>(data);
let payload = T::deserialize(message)?;
Ok(MessageFromServer(Message {
message_id: message.messageId().into(),
correlation_id: message.correlationId().map(|id| id.into()),
payload,
}))
}
}

View File

@ -1,97 +0,0 @@
//! Utilities related to UUID: extensions to the `uuid::Uuid`, the binary protocol's `EnsoUUID`
//! and conversions between them.
use crate::prelude::*;
use crate::generated::binary_protocol_generated::org::enso::languageserver::protocol::binary;
use binary::EnsoUUID;
impl EnsoUUID {
/// Creates a new random EnsoUUID.
pub fn new_v4() -> EnsoUUID {
Uuid::new_v4().into()
}
}
/// Utilities extending the Uuid class.
pub trait UuidExt {
/// The most significant 64 bits of this UUID's 128 bit value.
///
/// Compatible with `java.util.UUID.getMostSignificantBits()`.
fn most_significant_bits(&self) -> i64;
/// The least significant 64 bits of this UUID's 128 bit value.
///
/// Compatible with `java.util.UUID.getLeastSignificantBits()`.
fn least_significant_bits(&self) -> i64;
/// Constructs a new UUID using the specified data.
///
/// `most_significant` is used for the most significant 64 bits of the UUID and
/// `least_significant` becomes the least significant 64 bits of the UUID.
fn from_bytes_split(least_significant: [u8; 8], most_significant: [u8; 8]) -> Self;
}
impl UuidExt for Uuid {
fn most_significant_bits(&self) -> i64 {
i64::from_be_bytes(self.as_bytes()[..8].try_into().unwrap())
}
fn least_significant_bits(&self) -> i64 {
i64::from_be_bytes(self.as_bytes()[8..].try_into().unwrap())
}
fn from_bytes_split(least_significant: [u8; 8], most_significant: [u8; 8]) -> Self {
// let most_significant_bytes = most_significant_bits.to_le_bytes();
// let least_significant_bytes = least_significant_bits.to_le_bytes();
let all_bytes = least_significant.iter().chain(most_significant.iter()).rev();
let mut bytes: [u8; 16] = [default(); 16];
for (dst, src) in bytes.iter_mut().zip(all_bytes) {
*dst = *src;
}
Uuid::from_bytes(bytes)
}
}
impls! { From + &From <Uuid> for EnsoUUID {
|uuid|
EnsoUUID::new(uuid.least_significant_bits() as u64, uuid.most_significant_bits() as u64)
}}
impls! { From + &From <EnsoUUID> for Uuid {
|enso_uuid| {
let least_significant = enso_uuid.leastSigBits().to_le_bytes();
let most_significant = enso_uuid.mostSigBits().to_le_bytes();
Uuid::from_bytes_split(least_significant,most_significant)
}
}}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn least_significant_bits() {
let uuid = Uuid::parse_str("38400000-8cf0-11bd-b23e-10b96e4ef00d").unwrap();
assert_eq!(uuid.least_significant_bits(), -5603022497796657139);
}
#[test]
fn most_significant_bits() {
let uuid = Uuid::parse_str("38400000-8cf0-11bd-b23e-10b96e4ef00d").unwrap();
assert_eq!(uuid.most_significant_bits(), 4053239666997989821);
}
#[test]
fn uuid_round_trips() {
let uuid = Uuid::parse_str("6de39f7b-df3a-4a3c-84eb-5eaf96ddbac2").unwrap();
let enso = EnsoUUID::from(uuid);
let uuid2 = Uuid::from(enso);
assert_eq!(uuid, uuid2);
}
}

View File

@ -1,10 +0,0 @@
//! Common code and modules both for binary and json protocols of the Language Server.
// ==============
// === Export ===
// ==============
pub mod error;
pub mod event;
pub mod ongoing_calls;

View File

@ -1,36 +0,0 @@
//! A set of various error types used by the Enso Protocol RPC handler.
use crate::prelude::*;
/// When trying to parse a line, not a single line was produced.
#[derive(Debug, Fail, Clone, Copy)]
#[fail(display = "No active request by id {}", _0)]
pub struct NoSuchRequest<Id: Sync + Send + Debug + Display + 'static>(pub Id);
#[allow(missing_docs)]
#[derive(Debug, Fail, Clone)]
#[fail(display = "Failed to deserialize the received message. {}", _0)]
pub struct DeserializationError(pub String);
#[allow(missing_docs)]
#[derive(Debug, Fail, Clone, Copy)]
#[fail(display = "Received a message that is neither a response nor a notification")]
pub struct UnexpectedMessage;
/// The error codes defined in Enso Protocol (see
/// https://enso.org/docs/developer/enso/language-server/protocol-language-server.html#error)
pub mod code {
/// Informs that the requested content root cannot be found.
pub const CONTENT_ROOT_NOT_FOUND: i64 = 1001;
/// Signals that requested file doesnt exist.
pub const FILE_NOT_FOUND: i64 = 1003;
/// Signals that requested project is already under version control.
pub const VCS_ALREADY_EXISTS: i64 = 1005;
/// Signals that project name is invalid.
pub const PROJECT_NAME_INVALID: i64 = 4001;
}

View File

@ -1,14 +0,0 @@
//! Module defines type for event emitted by the RPC handler.
/// Event emitted by the RPC handler.
#[derive(Debug)]
pub enum Event<N> {
/// The handler's transport has been closed.
Closed,
/// An error has occurred.
Error(failure::Error),
/// A notification has been received.
Notification(N),
}

View File

@ -1,90 +0,0 @@
//! Module defines the `OngoingCalls` structure.
use crate::prelude::*;
use crate::common::error::NoSuchRequest;
use futures::channel::oneshot;
/// Stores active requests, i.e. the requests that were sent to the peer but are still awaiting
/// their answer.
/// `Id` identifies the request.
/// `Reply` represents the answer.
#[derive(Debug, Derivative)]
#[derivative(Default(bound = ""))]
pub struct OngoingCalls<Id, Reply>
where Id: Hash + Eq {
ongoing_calls: HashMap<Id, oneshot::Sender<Reply>>,
}
impl<Id, Reply> OngoingCalls<Id, Reply>
where Id: Copy + Debug + Display + Hash + Eq + Send + Sync + 'static
{
/// Creates a new, empty ongoing request storage.
pub fn new() -> OngoingCalls<Id, Reply> {
default()
}
/// Removes the request from the storage and returns it (if present).
/// The removed request can be used to either feed the reply or cancel the future result.
pub fn remove_request(&mut self, id: &Id) -> Option<oneshot::Sender<Reply>> {
let ret = self.ongoing_calls.remove(id);
if ret.is_some() {
info!("Removing request {id}");
} else {
info!("Failed to remove non-present request {id}");
}
ret
}
/// Inserts a new request with given id and completer (i.e. the channel capable of accepting
/// the peer's reply and completing the request).
pub fn insert_request(&mut self, id: Id, completer: oneshot::Sender<Reply>) {
info!("Storing a new request {id}");
// There will be no previous request, since Ids are assumed to be unique.
// Still, if there was, we can just safely drop it.
self.ongoing_calls.insert(id, completer);
}
/// Creates a new request and inserts it into the storage.
///
/// `f` is a function that must transform peer's reply into the request's returned value.
/// Returns a `Future` that shall yield request result, once it is completed (or cancelled).
pub fn open_new_request<F, R>(
&mut self,
id: Id,
f: F,
) -> impl Future<Output = FallibleResult<R>>
where
F: FnOnce(Reply) -> FallibleResult<R>,
{
let (sender, receiver) = oneshot::channel::<Reply>();
let ret = receiver.map(move |result_or_cancel| {
let result = result_or_cancel?;
f(result)
});
self.insert_request(id, sender);
ret
}
/// Removes all awaiting requests. Their futures will signal cancellation.
pub fn clear(&mut self) {
info!("Clearing all the requests.");
self.ongoing_calls.clear()
}
/// Passes peer's `reply` to complete request with given `id`.
/// Fails, if such request was not present in the storage.
pub fn complete_request(&mut self, id: Id, reply: Reply) -> FallibleResult {
if let Some(request) = self.remove_request(&id) {
// Explicitly ignore error. Can happen only if the other side already dropped future
// with the call result. In such case no one needs to be notified and we are fine.
let _ = request.send(reply);
Ok(())
} else {
Err(NoSuchRequest(id).into())
}
}
}

View File

@ -1,13 +0,0 @@
//! Despite the name, this file is NOT generated. It just re-exports all generated submodules.
// We don't want warnings for the generated code.
// === Non-Standard Linter Configuration ===
#![allow(warnings)]
// ==============
// === Export ===
// ==============
pub mod binary_protocol_generated;

View File

@ -1,305 +0,0 @@
//! Module with the Enso Procol RPC handler.
use crate::prelude::*;
use crate::common::event::Event;
use crate::common::ongoing_calls::OngoingCalls;
use futures::channel::mpsc::UnboundedSender;
use json_rpc::Transport;
use json_rpc::TransportEvent;
use std::future::Future;
// ===================
// === Disposition ===
// ===================
/// Describes how the given server's message should be dealt with.
#[derive(Debug)]
pub enum Disposition<Id, Reply, Notification>
where
Id: Debug,
Reply: Debug,
Notification: Debug, {
/// Ignore the message.
Ignore,
/// Treat as a reply to an open request.
HandleReply {
/// Remote Call ID (correlation ID).
id: Id,
/// The reply contents.
reply: Reply,
},
/// Emit given event (usually error or a notification).
EmitEvent {
/// Event to be emitted.
event: Event<Notification>,
},
}
impl<Id, Reply, Notification> Disposition<Id, Reply, Notification>
where
Id: Debug,
Reply: Debug,
Notification: Debug,
{
/// Creates a notification event disposition.
pub fn notify(notification: Notification) -> Self {
Disposition::EmitEvent { event: Event::Notification(notification) }
}
/// Creates an error event disposition.
pub fn error(error: impl Into<failure::Error> + Debug) -> Self {
Disposition::EmitEvent { event: Event::Error(error.into()) }
}
}
// ===================
// === HandlerData ===
// ===================
#[derive(Derivative)]
#[derivative(Debug(bound = ""))]
struct HandlerData<Id, Reply, Notification>
where
Id: Eq + Hash + Debug,
Notification: Debug,
Reply: Debug, {
#[derivative(Debug = "ignore")]
transport: Box<dyn Transport>,
sender: Option<UnboundedSender<Event<Notification>>>,
ongoing_calls: OngoingCalls<Id, Reply>,
#[derivative(Debug = "ignore")]
processor: Box<dyn FnMut(TransportEvent) -> Disposition<Id, Reply, Notification>>,
}
impl<Id, Reply, Notification> HandlerData<Id, Reply, Notification>
where
Id: Copy + Debug + Display + Hash + Eq + Send + Sync + 'static,
Notification: Debug,
Reply: Debug,
{
fn new<T, P>(transport: T, processor: P) -> HandlerData<Id, Reply, Notification>
where
T: Transport + 'static,
P: FnMut(TransportEvent) -> Disposition<Id, Reply, Notification> + 'static, {
HandlerData {
transport: Box::new(transport),
sender: None,
ongoing_calls: OngoingCalls::new(),
processor: Box::new(processor),
}
}
/// Emits event. Clients can consume them through `event_stream`.
fn emit_event(&mut self, event: Event<Notification>) {
if let Some(sender) = self.sender.as_ref() {
// Error can happen if there is no listener. But we don't mind this.
let _ = sender.unbounded_send(event);
}
}
/// Feeds the reply to complete the corresponding open request.
fn process_reply(&mut self, id: Id, reply: Reply) {
info!("Processing reply to request {id}: {reply:?}");
if let Err(error) = self.ongoing_calls.complete_request(id, reply) {
self.emit_error(error);
}
}
/// Helper that wraps error into an appropriate event value and emits it.
fn emit_error(&mut self, error: impl Into<failure::Error> + Debug) {
info!("Emitting error: {error:?}");
let event = Event::Error(error.into());
self.emit_event(event);
}
/// Handles incoming transport event. The `processor` is used to decide the further processing
/// path.
///
/// Main entry point for input data while running. Should be connected to the `Transport`s
/// output event stream.
pub fn process_event(&mut self, event: TransportEvent) {
debug_span!("Processing incoming transport event").in_scope(|| {
debug!("Transport event contents: {event:?}.");
match event {
TransportEvent::TextMessage(_) | TransportEvent::BinaryMessage(_) => {
let disposition = (self.processor)(event);
debug!("Disposition: {disposition:?}");
match disposition {
Disposition::HandleReply { id, reply } => self.process_reply(id, reply),
Disposition::EmitEvent { event } => self.emit_event(event),
Disposition::Ignore => {}
}
}
TransportEvent::Opened => {}
TransportEvent::Closed => self.emit_event(Event::Closed),
}
});
}
pub fn make_request<F, R>(
&mut self,
message: &dyn IsRequest<Id = Id>,
f: F,
) -> impl Future<Output = FallibleResult<R>>
where
F: FnOnce(Reply) -> FallibleResult<R>,
{
debug_span!("Making a new RPC call").in_scope(|| {
let id = message.id();
let ret = self.ongoing_calls.open_new_request(id, f);
debug!("Sending message {message:?}");
let sending_result = message.send(self.transport.as_mut());
if sending_result.is_err() {
// If we failed to send the request, it should be immediately removed.
// This will result in the returned future immediately yielding error.
self.ongoing_calls.remove_request(&id);
}
ret
})
}
/// Creates a new stream with events from this handler.
///
/// If such stream was already existing, it will be finished (and
/// continuations should be able to process any remaining events).
pub fn event_stream(&mut self) -> impl Stream<Item = Event<Notification>> {
let (transmitter, receiver) = futures::channel::mpsc::unbounded();
self.sender = Some(transmitter);
receiver
}
/// See the `runner` on the `Client`.
pub fn runner(this: &Rc<RefCell<Self>>) -> impl Future<Output = ()> {
let event_receiver = this.borrow_mut().transport.establish_event_stream();
let weak_this = Rc::downgrade(this);
event_receiver.for_each(move |event: TransportEvent| {
if let Some(state) = weak_this.upgrade() {
state.borrow_mut().process_event(event);
}
futures::future::ready(())
})
}
}
// ===============
// === Handler ===
// ===============
/// Handler is a main provider of RPC protocol. Given a transport capable of transporting messages,
/// it manages whole communication with a peer. Works both with binary and text protocols.
///
/// It allows making request, where each request gets a unique `Id` and its future result is
/// represented using `Future`.
/// `Reply` represents peer's reply to a request.
/// `Notification` represents a notification received from a peer.
///
/// Notifications and internal messages are emitted using the `event_stream` stream.
#[derive(CloneRef, Debug, Derivative)]
#[derivative(Clone(bound = ""))]
pub struct Handler<Id, Reply, Notification: Debug>
where
Id: Eq + Hash + Debug,
Notification: Debug,
Reply: Debug, {
state: Rc<RefCell<HandlerData<Id, Reply, Notification>>>,
}
/// A value that can be used to represent a request to remote RPC server.
pub trait IsRequest: Debug {
/// Request ID.
type Id: Copy;
/// Send the message to the peer using the provided transport.
fn send(&self, transport: &mut dyn Transport) -> FallibleResult;
/// Request ID, that will be used later to associate peer's response.
fn id(&self) -> Self::Id;
}
impl<Id, Reply, Notification> Handler<Id, Reply, Notification>
where
Id: Copy + Debug + Display + Hash + Eq + Send + Sync + 'static,
Notification: Debug,
Reply: Debug,
{
/// Creates a new handler operating over given transport.
///
/// `processor` must deal with decoding incoming transport events.
pub fn new<T, P>(transport: T, processor: P) -> Self
where
T: Transport + 'static,
P: FnMut(TransportEvent) -> Disposition<Id, Reply, Notification> + 'static, {
let state = Rc::new(RefCell::new(HandlerData::new(transport, processor)));
Handler { state }
}
/// Starts a new request described by a given message.
///
/// The request shall be sent to the server immediately and then await the reply.
/// Once the reply to this request arrives (or the call is abandoned for other reason, e.g. due
/// to a disconnection) the returned Future shall complete.
///
/// `f` is a function that shall be used to handle server's reply. It should try returning the
/// desired request's result and must handle errors. (thus `FallibleResult`)
///
/// We use here `&dyn IsRequest` rather them generic parameter `impl IsRequest` only to avoid
/// lifetime issues caused by this Rust compiler bug:
/// https://github.com/rust-lang/rust/issues/42940
pub fn make_request<F, R>(
&self,
message: &dyn IsRequest<Id = Id>,
f: F,
) -> impl Future<Output = FallibleResult<R>>
where
F: FnOnce(Reply) -> FallibleResult<R>,
{
self.state.borrow_mut().make_request(message, f)
}
/// See the `runner` on the `Client`.
pub fn runner(&self) -> impl Future<Output = ()> {
HandlerData::runner(&self.state)
}
/// Creates a new stream with events from this handler.
///
/// If such stream was already existing, it will be finished (and continuations should be able
/// to process any remaining events).
pub fn event_stream(&self) -> impl Stream<Item = Event<Notification>> {
self.state.borrow_mut().event_stream()
}
}
#[cfg(test)]
mod tests {
use super::*;
use json_rpc::test_util::transport::mock::MockTransport;
#[test]
fn test_closed_socked_event_passing() {
let mut transport = MockTransport::new();
let processor = |msg| panic!("Must never be called in this test, but got {msg:?}!");
let handler = Handler::<i32, (), ()>::new(transport.clone_ref(), processor);
let mut runner = handler.runner().boxed_local();
let mut events = handler.event_stream().boxed_local();
events.expect_pending();
transport.mock_connection_closed();
events.expect_pending();
// Process events.
runner.expect_pending();
let event = events.expect_next();
assert!(matches!(event, Event::Closed), "Event was: {event:?}");
events.expect_pending();
}
}

View File

@ -1,280 +0,0 @@
//! Client library for the Language Server part of the Enso Protocol.
//!
//! Please refer to https://github.com/enso-org/enso/blob/develop/docs/language-server/README.md---language-server
//! for the full protocol documentation and discussion on the types and terms used here.
//!
//! Also, the Enso Protocol specification is source for many names and comments used here.
//! This file tries to follow the scheme of the protocol specification.
// ==============
// === Export ===
// ==============
pub mod connection;
pub mod constants;
pub mod response;
#[cfg(test)]
mod tests;
pub mod types;
pub use connection::Connection;
pub use types::*;
use crate::prelude::*;
use crate::types::Sha3_224;
use crate::types::UTCDateTime;
use json_rpc::api::Result;
use json_rpc::make_rpc_methods;
use json_rpc::Handler;
use serde::Deserialize;
use serde::Serialize;
use std::future::Future;
use uuid::Uuid;
// ====================
// === API & Client ===
// ====================
make_rpc_methods! {
/// An interface containing all the available file management operations.
trait API {
/// Initialize the connection used to send the textual protocol messages. This initialisation
/// is important such that the client identifier can be correlated between the textual and data
/// connections.
#[MethodInput=InitProtocolInput, rpc_name="session/initProtocolConnection"]
fn init_protocol_connection(&self, client_id: Uuid) -> response::InitProtocolConnection;
/// Copy a specified file system object to another location.
#[MethodInput=CopyFileInput, rpc_name="file/copy"]
fn copy_file(&self, from: Path, to: Path) -> ();
/// Delete the specified file system object.
#[MethodInput=DeleteFileInput, rpc_name="file/delete"]
fn delete_file(&self, path: Path) -> ();
/// Check if file system object exists.
#[MethodInput=FileExistsInput, rpc_name="file/exists"]
fn file_exists(&self, path: Path) -> response::FileExists;
/// List all file-system objects in the specified path.
#[MethodInput=FileListInput, rpc_name="file/list"]
fn file_list(&self, path: Path) -> response::FileList;
/// Move file system object to another location.
#[MethodInput=MoveFileInput, rpc_name="file/move"]
fn move_file(&self, from: Path, to: Path) -> ();
/// Reads file's content as a String.
#[MethodInput=ReadFileInput, rpc_name="file/read"]
fn read_file(&self, path: Path) -> response::Read;
/// Gets file system object's attributes information.
#[MethodInput=FileInfoInput, rpc_name="file/info"]
fn file_info(&self, path: Path) -> response::FileInfo;
/// Requests that the language server provide the checksum of the provided file.
#[MethodInput=FileChecksumInput, rpc_name="file/checksum"]
fn file_checksum(&self, path: Path) -> response::FileChecksum;
/// Creates the specified file system object.
#[MethodInput=CreateInput, rpc_name="file/create"]
fn create_file(&self, object: FileSystemObject) -> ();
/// Writes String contents to a file in the specified path.
#[MethodInput=FileWriteInput, rpc_name="file/write"]
fn write_file(&self, path: Path, contents: String) -> ();
/// Acquire capability permission.
#[MethodInput=AcquireCapabilityInput, rpc_name="capability/acquire"]
fn acquire_capability(&self, method: String, register_options: RegisterOptions) -> ();
/// Open the specified file. If no user has write lock on the opened file, the write lock
/// capability is granted to the caller.
#[MethodInput=OpenTextFileInput, rpc_name="text/openFile"]
fn open_text_file(&self, path: Path) -> response::OpenTextFile;
/// Informs the language server that a client has closed the specified file.
#[MethodInput=CloseTextFileInput, rpc_name="text/closeFile"]
fn close_text_file(&self, path: Path) -> ();
/// Save the specified file. It may fail if the user does not have permission to edit that file.
#[MethodInput=SaveTextFileInput, rpc_name="text/save"]
fn save_text_file(&self, path: Path, current_version:Sha3_224) -> ();
/// Apply edits to the specified text file. This operation may fail if the user does not
/// have permission to edit the resources for which edits are sent. This failure may be partial,
/// in that some edits are applied and others are not.
#[MethodInput=ApplyTextFileEditInput, rpc_name="text/applyEdit"]
fn apply_text_file_edit(&self, edit: FileEdit, execute: bool) -> ();
/// Create a new execution context. Return capabilities executionContext/canModify and
/// executionContext/receivesUpdates containing freshly created ContextId
#[MethodInput=CreateExecutionContextInput, rpc_name="executionContext/create"]
fn create_execution_context(&self, context_id: ContextId) -> response::CreateExecutionContext;
/// Destroy an execution context and free its resources.
#[MethodInput=DestroyExecutionContextInput, rpc_name="executionContext/destroy"]
fn destroy_execution_context(&self, context_id: ContextId) -> ();
/// Move the execution context to a new location deeper down the stack.
#[MethodInput=PushToExecutionContextInput, rpc_name="executionContext/push"]
fn push_to_execution_context(&self, context_id: ContextId, stack_item: StackItem) -> ();
/// Move the execution context up the stack.
#[MethodInput=PopFromExecutionContextInput, rpc_name="executionContext/pop"]
fn pop_from_execution_context(&self, context_id: ContextId) -> ();
/// Attach a visualization, potentially preprocessed by some arbitrary Enso code, to a given
/// node in the program.
#[MethodInput=AttachVisualizationInput, rpc_name="executionContext/attachVisualization"]
fn attach_visualization
( &self
, visualization_id : Uuid
, expression_id : Uuid
, visualization_config : VisualizationConfiguration) -> ();
/// Detach a visualization from the executing code.
#[MethodInput=DetachVisualizationInput, rpc_name="executionContext/detachVisualization"]
fn detach_visualization
(&self, context_id: Uuid, visualization_id: Uuid, expression_id: Uuid) -> ();
/// Modify the configuration for an existing visualization.
#[MethodInput=ModifyVisualizationInput, rpc_name="executionContext/modifyVisualization"]
fn modify_visualization
(&self, visualization_id: Uuid, visualization_config: VisualizationConfiguration) -> ();
/// Interrupt the program execution.
#[MethodInput=InterruptInput, rpc_name="executionContext/interrupt"]
fn interrupt(&self, context_id: ContextId) -> ();
/// Restart the program execution.
#[MethodInput=RecomputeInput, rpc_name="executionContext/recompute"]
fn recompute(&self, context_id: ContextId, invalidated_expressions: InvalidatedExpressions, execution_environment: Option<ExecutionEnvironment>) -> ();
/// Start the profiling of the language server.
#[MethodInput=ProfilingStartInput, rpc_name="profiling/start"]
fn profiling_start(&self, memory_snapshot: Option<bool>) -> ();
/// Stop the profiling of the language server.
#[MethodInput=ProfilingStopInput, rpc_name="profiling/stop"]
fn profiling_stop(&self) -> ();
/// Obtain the full suggestions database.
#[MethodInput=GetSuggestionsDatabaseInput, rpc_name="search/getSuggestionsDatabase"]
fn get_suggestions_database(&self) -> response::GetSuggestionDatabase;
/// Receive the current version of the suggestions database.
#[MethodInput=GetSuggestionsDatabaseVersionInput,
rpc_name="search/getSuggestionsDatabaseVersion"]
fn get_suggestions_database_version(&self) -> response::GetSuggestionDatabaseVersion;
/// Receive the autocomplete suggestion.
#[MethodInput=CompletionInput,rpc_name="search/completion"]
fn completion
( &self
, file : Path
, position : Position
, self_type : Option<String>
, return_type : Option<String>
, tags : Option<Vec<SuggestionEntryType>>
, is_static : Option<bool>
) -> response::Completion;
/// Get the list of component groups available in runtime.
#[MethodInput=GetComponentGroups, rpc_name="executionContext/getComponentGroups"]
fn get_component_groups(&self, context_id: ContextId) -> response::GetComponentGroups;
/// Initialize the VCS at the specified root.
#[MethodInput=VcsInitInput, rpc_name="vcs/init"]
fn init_vcs(&self, root: Path) -> ();
/// Save the project to the VCS at the specified root.
#[MethodInput=VcsWriteInput, rpc_name="vcs/save"]
fn save_vcs(&self, root: Path, name: Option<String>) -> response::SaveVcs;
/// Return a list of all project states that are saved to the VCS.
#[MethodInput=VcsListInput, rpc_name="vcs/list"]
fn list_vcs(&self, root: Path, limit: Option<usize>) -> response::ListVcs;
/// Returns the current status of the VCS, containing changes made to the project since the last
/// VCS snapshot.
#[MethodInput=VcsStatusInput, rpc_name="vcs/status"]
fn vcs_status(&self, root: Path) -> response::VcsStatus;
/// Restore the project from the VCS at the specified root. The project is restored to the last
/// VCS snapshot if no `commit_id` is provided.
#[MethodInput=VcsRestoreInput, rpc_name="vcs/restore"]
fn restore_vcs(&self, root: Path, commit_id: Option<String>) -> response::RestoreVcs;
/// An OpenAI-powered completion to the given prompt, with the given stop sequence.
#[MethodInput=AiCompletionInput, rpc_name="ai/completion"]
fn ai_completion(&self, prompt: String, stop_sequence: String) -> response::AiCompletion;
/// Set the execution environment of the context for future evaluations.
#[MethodInput=SetModeInput, rpc_name="executionContext/setExecutionEnvironment"]
fn set_execution_environment(&self, context_id: ContextId, execution_environment: ExecutionEnvironment) -> ();
}}
// ==============
// === Errors ===
// ==============
/// Check if the given `Error` value corresponds to an RPC call timeout.
///
/// Recognizes both client- and server-side timeouts.
#[rustfmt::skip]
pub fn is_timeout_error(error: &failure::Error) -> bool {
use json_rpc::messages;
use json_rpc::RpcError;
use json_rpc::RpcError::*;
const TIMEOUT: i64 = constants::ErrorCodes::Timeout as i64;
matches!(error.downcast_ref::<RpcError>()
, Some(TimeoutError{..})
| Some(RemoteError(messages::Error{code:TIMEOUT,..})))
}
// =============
// === Tests ===
// =============
#[cfg(test)]
mod test {
use super::*;
#[test]
fn recognize_timeout_errors() {
type RpcError = json_rpc::RpcError<serde_json::Value>;
// Server-side errors.
let text = r#"{"code":11,"message":"Request timeout"}"#;
let msg = serde_json::from_str::<json_rpc::messages::Error>(text).unwrap();
let error = RpcError::RemoteError(msg).into();
assert!(is_timeout_error(&error));
let text = r#"{"code":2007,"message":"Evaluation of the visualization expression failed"}"#;
let msg = serde_json::from_str::<json_rpc::messages::Error>(text).unwrap();
let error = RpcError::RemoteError(msg).into();
assert!(!is_timeout_error(&error));
// Client-side errors.
let error = RpcError::TimeoutError { millis: 500 }.into();
assert!(is_timeout_error(&error));
let error = RpcError::LostConnection.into();
assert!(!is_timeout_error(&error));
}
}

View File

@ -1,103 +0,0 @@
//! Module for utilities regarding establishing and storing the Language Server RPC connection.
use crate::prelude::*;
use crate::language_server::types::ContentRoot;
use crate::language_server::MockClient;
use crate::language_server::API;
use uuid::Uuid;
// ==============
// === Errors ===
// ==============
#[allow(missing_docs)]
#[derive(Fail, Debug)]
#[fail(display = "Failed to initialize language server RPC connection: {}", _0)]
pub struct FailedToInitializeProtocol(failure::Error);
#[allow(missing_docs)]
#[derive(Fail, Clone, Copy, Debug)]
#[fail(display = "Language Server provided no content roots.")]
pub struct MissingContentRoots;
// ==================
// === Connection ===
// ==================
/// An established, initialized connection to language server's RPC endpoint.
#[derive(Derivative)]
#[derivative(Debug)]
pub struct Connection {
/// The ID of the client.
pub client_id: Uuid,
/// LS client that has already initialized protocol.
#[derivative(Debug = "ignore")]
pub client: Box<dyn API>,
/// The Project content root, being an only obligatory content root received.
project_root: ContentRoot,
/// Content roots obtained during initialization other than the `project_root`].
content_roots: Vec<ContentRoot>,
}
impl Connection {
/// Takes a client, generates ID for it and initializes the protocol.
pub async fn new(client: impl API + 'static, client_id: Uuid) -> FallibleResult<Self> {
let client = Box::new(client);
let init_response = client.init_protocol_connection(&client_id).await;
let init_response = init_response.map_err(|e| FailedToInitializeProtocol(e.into()))?;
let mut content_roots = init_response.content_roots;
let project_root = Self::extract_project_root(&mut content_roots)?;
Ok(Connection { client_id, client, project_root, content_roots })
}
fn extract_project_root(content_roots: &mut Vec<ContentRoot>) -> FallibleResult<ContentRoot> {
let opt_index =
content_roots.iter().position(|cr| matches!(cr, ContentRoot::Project { .. }));
let index = opt_index.ok_or(MissingContentRoots)?;
Ok(content_roots.drain(index..=index).next().unwrap())
}
/// Creates a connection which wraps a mock client.
pub fn new_mock(client: MockClient) -> Connection {
Connection {
client: Box::new(client),
client_id: default(),
project_root: ContentRoot::Project { id: default() },
content_roots: default(),
}
}
/// Creates a Rc handle to a connection which wraps a mock client.
pub fn new_mock_rc(client: MockClient) -> Rc<Connection> {
Rc::new(Self::new_mock(client))
}
/// Returns the first content root.
pub fn project_root(&self) -> &ContentRoot {
&self.project_root
}
/// Lists all content roots for this LS connection.
pub fn content_roots(&self) -> impl Iterator<Item = &ContentRoot> {
std::iter::once(&self.project_root).chain(self.content_roots.iter())
}
}
impl Deref for Connection {
type Target = dyn API;
fn deref(&self) -> &Self::Target {
self.client.as_ref()
}
}
impl DerefMut for Connection {
fn deref_mut(&mut self) -> &mut Self::Target {
self.client.deref_mut()
}
}

View File

@ -1,27 +0,0 @@
//! Constants defined for the Language Server JSON-RPC API.
use crate::prelude::*;
/// Recognized error codes used by the Language Server messages.
///
/// They follow `org.enso.jsonrpc.Error` object defined in the `enso` repository.
#[derive(Clone, Copy, Debug)]
pub enum ErrorCodes {
/// Server failed to parse JSON message.
ParseError = -32700,
/// JSON message sent was not a valid Request object.
InvalidRequest = -32600,
/// Requested method does not exist or is unavailable.
MethodNotFound = -32601,
/// Invalid method parameters.
InvalidParams = -32602,
/// Service error.
ServiceError = 1,
/// The requested method is not implemented.
NotImplementedError = 10,
/// Request timeout.
/// Note that timeout can also happen on the client side, as part of the Handler's logic.
Timeout = 11,
}

View File

@ -1,151 +0,0 @@
//! Helper structures wrapping RPC method result types.
use crate::language_server::types::*;
use crate::prelude::*;
use crate::types::Sha3_224;
use serde::Deserialize;
use serde::Serialize;
/// Response of `init_protocol_connection` method.
#[derive(Hash, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct InitProtocolConnection {
/// List of Root IDs.
pub content_roots: Vec<ContentRoot>,
}
/// Response of `file_read` method.
#[derive(Hash, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct Read {
#[allow(missing_docs)]
pub contents: String,
}
/// Response of `file_exists` method.
#[derive(Hash, Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub struct FileExists {
#[allow(missing_docs)]
pub exists: bool,
}
/// Response of `file_lst` method.
#[derive(Hash, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct FileList {
#[allow(missing_docs)]
pub paths: Vec<FileSystemObject>,
}
/// Response of `file_info` method.
#[derive(Hash, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct FileInfo {
#[allow(missing_docs)]
pub attributes: FileAttributes,
}
/// Response of `file_checksum` method.
#[derive(Hash, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct FileChecksum {
#[allow(missing_docs)]
pub checksum: Sha3_224,
}
/// Response of `open_text_file` method.
#[derive(Hash, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
#[allow(missing_docs)]
pub struct OpenTextFile {
pub write_capability: Option<CapabilityRegistration>,
pub content: String,
pub current_version: Sha3_224,
}
/// Response of `create_execution_context` method.
#[derive(Hash, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
#[allow(missing_docs)]
pub struct CreateExecutionContext {
pub context_id: ContextId,
pub can_modify: CapabilityRegistration,
pub receives_updates: CapabilityRegistration,
}
/// Response of `get_suggestions_database` method.
#[derive(Hash, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
#[allow(missing_docs)]
pub struct GetSuggestionDatabase {
pub entries: Vec<SuggestionsDatabaseEntry>,
pub current_version: SuggestionsDatabaseVersion,
}
/// Response of `get_suggestions_database_version` method.
#[derive(Hash, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
#[allow(missing_docs)]
pub struct GetSuggestionDatabaseVersion {
pub current_version: SuggestionsDatabaseVersion,
}
/// Response of `completion` method.
#[derive(Hash, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
#[allow(missing_docs)]
pub struct Completion {
pub results: Vec<SuggestionId>,
pub current_version: SuggestionsDatabaseVersion,
}
/// Response of `ai/completion` method.
#[derive(Hash, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
#[allow(missing_docs)]
pub struct AiCompletion {
pub code: String,
}
/// Response of `get_component_groups` method.
#[derive(Hash, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
#[allow(missing_docs)]
pub struct GetComponentGroups {
pub component_groups: Vec<LibraryComponentGroup>,
}
/// Response of `save_vcs` method.
#[derive(Hash, Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
#[allow(missing_docs)]
pub struct SaveVcs {
pub commit_id: String,
pub message: String,
}
/// Response of `list_vcs` method.
#[derive(Hash, Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
#[allow(missing_docs)]
pub struct ListVcs {
pub saves: Vec<SaveVcs>,
}
/// Response of `vcs_status` method.
#[derive(Hash, Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
#[allow(missing_docs)]
pub struct VcsStatus {
pub dirty: bool,
pub changed: Vec<Path>,
pub last_save: SaveVcs,
}
/// Response of `vcs_restore` method.
#[derive(Hash, Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
#[allow(missing_docs)]
pub struct RestoreVcs {
pub changed: Vec<Path>,
}

View File

@ -1,610 +0,0 @@
use crate::language_server::*;
use enso_prelude::*;
use futures::task::LocalSpawnExt;
use json_rpc::messages::Message;
use json_rpc::messages::RequestMessage;
use json_rpc::test_util::transport::mock::MockTransport;
use serde_json::json;
use serde_json::Value;
use std::future::Future;
// ===============
// === Fixture ===
// ===============
struct Fixture {
transport: MockTransport,
client: Client,
executor: futures::executor::LocalPool,
}
fn setup_language_server() -> Fixture {
let transport = MockTransport::new();
let client = Client::new(transport.clone());
let executor = futures::executor::LocalPool::new();
executor.spawner().spawn_local(client.runner()).unwrap();
Fixture { transport, client, executor }
}
// =============
// === Tests ===
// =============
#[test]
fn test_file_event_notification() {
let mut fixture = setup_language_server();
let mut events = Box::pin(fixture.client.events());
events.expect_pending();
let root_id = Uuid::parse_str("00000000-0000-0000-0000-000000000000");
let root_id = root_id.expect("Couldn't parse uuid.");
let expected_event = FileEvent {
path: Path { root_id, segments: vec!["Main.txt".into()] },
kind: FileEventKind::Modified,
};
let notification_text = r#"{
"jsonrpc": "2.0",
"method": "file/event",
"params": {
"path" : {
"rootId" : "00000000-0000-0000-0000-000000000000",
"segments" : ["Main.txt"]
},
"kind" : "Modified"
}
}"#;
fixture.transport.mock_peer_text_message(notification_text);
events.expect_pending();
fixture.executor.run_until_stalled();
if let Event::Notification(n) = events.expect_next() {
assert_eq!(n, Notification::FileEvent(expected_event));
} else {
panic!("expected notification event");
}
}
/// This function tests making a request using language server. It
/// * creates FM client and uses `make_request` to make a request,
/// * checks that request is made for `expected_method`,
/// * checks that request input is `expected_input`,
/// * mocks receiving a response from server with `result` and
/// * checks that FM-returned Future yields `expected_output`.
fn test_request<Fun, Fut, T>(
make_request: Fun,
expected_method: &str,
expected_input: Value,
result: Value,
expected_output: T,
) where
Fun: FnOnce(&mut Client) -> Fut,
Fut: Future<Output = Result<T>>,
T: Debug + PartialEq,
{
let mut fixture = setup_language_server();
let mut request_future = Box::pin(make_request(&mut fixture.client));
let request = fixture.transport.expect_json_message::<RequestMessage<Value>>();
assert_eq!(request.method, expected_method);
assert_eq!(request.params, expected_input);
let response = Message::new_success(request.id, result);
fixture.transport.mock_peer_json_message(response);
fixture.executor.run_until_stalled();
assert_eq!(request_future.expect_ok(), expected_output);
}
#[test]
fn test_file_requests() {
let root_id = Uuid::parse_str("00000000-0000-0000-0000-000000000000");
let root_id = root_id.expect("Couldn't parse uuid.");
let main = Path { root_id, segments: vec!["Main.txt".into()] };
let target = Path { root_id, segments: vec!["Target.txt".into()] };
let path_main = json!({"path" : {
"rootId" : "00000000-0000-0000-0000-000000000000",
"segments" : ["Main.txt"]
}
});
let from_main_to_target = json!({
"from" : {
"rootId" : "00000000-0000-0000-0000-000000000000",
"segments" : ["Main.txt"]
},
"to" : {
"rootId" : "00000000-0000-0000-0000-000000000000",
"segments" : ["Target.txt"]
}
});
let file_exists_json = json!({"exists":true});
let unit_json = json!(null);
test_request(
|client| client.copy_file(&main, &target),
"file/copy",
from_main_to_target.clone(),
unit_json.clone(),
(),
);
test_request(
|client| client.delete_file(&main),
"file/delete",
path_main.clone(),
unit_json.clone(),
(),
);
test_request(
|client| client.file_exists(&main),
"file/exists",
path_main.clone(),
file_exists_json,
response::FileExists { exists: true },
);
let list_response_json = json!({
"paths" : [
{
"type" : "File",
"name" : "foo.txt",
"path" : {
"rootId" : "00000000-0000-0000-0000-000000000000",
"segments" : []
}
},
{
"type" : "File",
"name" : "bar.txt",
"path" : {
"rootId" : "00000000-0000-0000-0000-000000000000",
"segments" : []
}
}
]
});
let list_response_value = response::FileList {
paths: vec![
FileSystemObject::File {
name: "foo.txt".into(),
path: Path { root_id, segments: default() },
},
FileSystemObject::File {
name: "bar.txt".into(),
path: Path { root_id, segments: default() },
},
],
};
test_request(
|client| client.file_list(&main),
"file/list",
path_main.clone(),
list_response_json,
list_response_value,
);
test_request(
|client| client.move_file(&main, &target),
"file/move",
from_main_to_target,
unit_json.clone(),
(),
);
let read_response_json = json!({"contents":"Hello world!"});
let read_response = response::Read { contents: "Hello world!".into() };
test_request(
|client| client.read_file(&main),
"file/read",
path_main.clone(),
read_response_json,
read_response,
);
let parse_rfc3339 = |s| chrono::DateTime::parse_from_rfc3339(s).unwrap();
let file_system_object = FileSystemObject::File {
name: "test.txt".into(),
path: Path { root_id, segments: default() },
};
let file_system_object_json = json!({
"type" : "File",
"name" : "test.txt",
"path" : {
"rootId" : "00000000-0000-0000-0000-000000000000",
"segments" : []
}
});
let expected_attributes = response::FileInfo {
attributes: FileAttributes {
creation_time: parse_rfc3339("2020-01-07T21:25:26Z"),
last_access_time: parse_rfc3339("2020-01-21T22:16:51.123994500+00:00"),
last_modified_time: parse_rfc3339("2020-01-07T21:25:26Z"),
kind: file_system_object.clone(),
byte_size: 125125,
},
};
let sample_attributes_json = json!({ "attributes" : {
"creationTime" : "2020-01-07T21:25:26Z",
"lastAccessTime" : "2020-01-21T22:16:51.123994500+00:00",
"lastModifiedTime" : "2020-01-07T21:25:26Z",
"kind" : file_system_object_json,
"byteSize" : 125125
}});
test_request(
|client| client.file_info(&main),
"file/info",
path_main,
sample_attributes_json,
expected_attributes,
);
let create_file_json = json!({ "object": file_system_object_json });
test_request(
|client| client.create_file(&file_system_object),
"file/create",
create_file_json,
unit_json.clone(),
(),
);
test_request(
|client| client.write_file(&main, &"Hello world!".to_string()),
"file/write",
json!({
"path" : {
"rootId" : "00000000-0000-0000-0000-000000000000",
"segments" : ["Main.txt"]
},
"contents" : "Hello world!"
}),
unit_json,
(),
);
}
#[test]
fn test_protocol_connection() {
let init_protocol_connection_response = response::InitProtocolConnection {
content_roots: vec![ContentRoot::Project { id: default() }],
};
test_request(
|client| client.init_protocol_connection(&Uuid::default()),
"session/initProtocolConnection",
json!({
"clientId" : "00000000-0000-0000-0000-000000000000"
}),
json!({
"contentRoots" : [{
"id" : "00000000-0000-0000-0000-000000000000",
"type" : "Project",
}]
}),
init_protocol_connection_response,
);
}
#[test]
fn test_acquire_capability() {
let root_id = Uuid::parse_str("00000000-0000-0000-0000-000000000000");
let root_id = root_id.expect("Couldn't parse uuid.");
let unit_json = json!(null);
let path = Path { root_id, segments: default() };
let method = "file/receivesTreeUpdates".to_string();
let register_options = RegisterOptions::Path { path };
test_request(
|client| client.acquire_capability(&method, &register_options),
"capability/acquire",
json!({
"method" : "file/receivesTreeUpdates",
"registerOptions" : {
"path" : {
"rootId" : "00000000-0000-0000-0000-000000000000",
"segments" : []
}
}
}),
unit_json,
(),
);
}
#[test]
fn test_computed_value_update() {
use crate::language_server::Notification;
use json_rpc::Event;
let context_id = Uuid::parse_str("b36dea0b-b75a-40cf-aaad-5fcdf29a0573").unwrap();
let id = Uuid::parse_str("d4b540c0-3ef5-487c-9453-df9d3efd351c").unwrap();
let typename = "Number";
let notification = json!({
"jsonrpc" : "2.0",
"method" : "executionContext/expressionUpdates",
"params" : {
"contextId" : context_id,
"updates" : [{
"expressionId" : id,
"type" : typename,
"methodPointer" : null,
"profilingInfo" : [],
"fromCache" : true,
"payload" : ExpressionUpdatePayload::Value { warnings: None },
}]
}
});
let mut fixture = setup_language_server();
let mut stream = fixture.client.events();
stream.expect_pending();
fixture.transport.mock_peer_json_message(notification);
fixture.executor.run_until_stalled();
let notification = stream.expect_next();
match notification {
Event::Notification(Notification::ExpressionUpdates(expression_updates)) => {
assert_eq!(expression_updates.context_id, context_id);
let update = &expression_updates.updates.first().unwrap();
assert_eq!(update.expression_id, id);
assert_eq!(update.typename.as_deref(), Some(typename));
assert!(update.method_call.is_none());
assert!(update.from_cache);
assert!(matches!(update.payload, ExpressionUpdatePayload::Value { warnings: None }))
}
_ => panic!("Expected Notification::ExpressionUpdates"),
}
}
#[test]
fn test_execution_context() {
let root_id = Uuid::parse_str("00000000-0000-0000-0000-000000000000");
let root_id = root_id.expect("Couldn't parse uuid.");
let main = Path { root_id, segments: vec!["Main.txt".into()] };
let unit_json = json!(null);
let context_id = Uuid::default();
let method = "executionContext/canModify".to_string();
let register_options = RegisterOptions::ExecutionContextId { context_id };
let can_modify = CapabilityRegistration { method, register_options };
let register_options = RegisterOptions::ExecutionContextId { context_id };
let method = "executionContext/receivesUpdates".to_string();
let receives_updates = CapabilityRegistration { method, register_options };
let create_execution_context_response =
response::CreateExecutionContext { context_id, can_modify, receives_updates };
test_request(
|client| client.create_execution_context(&context_id),
"executionContext/create",
json!({"contextId":"00000000-0000-0000-0000-000000000000"}),
json!({
"contextId" : "00000000-0000-0000-0000-000000000000",
"canModify" : {
"method" : "executionContext/canModify",
"registerOptions" : {
"contextId" : "00000000-0000-0000-0000-000000000000"
}
},
"receivesUpdates" : {
"method" : "executionContext/receivesUpdates",
"registerOptions" : {
"contextId" : "00000000-0000-0000-0000-000000000000"
}
}
}),
create_execution_context_response,
);
test_request(
|client| client.destroy_execution_context(&context_id),
"executionContext/destroy",
json!({"contextId":"00000000-0000-0000-0000-000000000000"}),
unit_json.clone(),
(),
);
let expression_id = Uuid::default();
let local_call = LocalCall { expression_id };
let stack_item = StackItem::LocalCall(local_call);
test_request(
|client| client.push_to_execution_context(&context_id, &stack_item),
"executionContext/push",
json!({
"contextId" : "00000000-0000-0000-0000-000000000000",
"stackItem" : {
"type" : "LocalCall",
"expressionId" : "00000000-0000-0000-0000-000000000000"
}
}),
unit_json.clone(),
(),
);
test_request(
|client| client.pop_from_execution_context(&context_id),
"executionContext/pop",
json!({"contextId":"00000000-0000-0000-0000-000000000000"}),
unit_json.clone(),
(),
);
let visualization_id = Uuid::default();
let expression_id = Uuid::default();
let visualization_function = "foo";
let visualization_module = "[Foo.Bar.Baz]";
let expression = MethodPointer {
module: visualization_module.to_string(),
defined_on_type: visualization_module.to_string(),
name: visualization_function.to_string(),
};
let positional_arguments_expressions = vec![1, 2, 3].iter().map(|x| x.to_string()).collect();
let visualization_config = VisualizationConfiguration {
visualization_module: "Foo.Bar.Baz".to_string(),
execution_context_id: context_id,
expression,
positional_arguments_expressions,
};
test_request(
|client| {
client.attach_visualization(&visualization_id, &expression_id, &visualization_config)
},
"executionContext/attachVisualization",
json!({
"visualizationId" : "00000000-0000-0000-0000-000000000000",
"expressionId" : "00000000-0000-0000-0000-000000000000",
"visualizationConfig" : {
"executionContextId" : "00000000-0000-0000-0000-000000000000",
"expression" : {
"module" : "[Foo.Bar.Baz]",
"definedOnType" : "[Foo.Bar.Baz]",
"name" : "foo"
},
"visualizationModule" : "Foo.Bar.Baz",
"positionalArgumentsExpressions" : ["1", "2", "3"]
}
}),
unit_json.clone(),
(),
);
test_request(
|client| client.detach_visualization(&context_id, &visualization_id, &expression_id),
"executionContext/detachVisualization",
json!({
"contextId" : "00000000-0000-0000-0000-000000000000",
"visualizationId" : "00000000-0000-0000-0000-000000000000",
"expressionId" : "00000000-0000-0000-0000-000000000000"
}),
unit_json.clone(),
(),
);
let visualization_function = "foo";
let visualization_module = "[Foo.Bar.Baz]";
let expression = MethodPointer {
module: visualization_module.to_string(),
defined_on_type: visualization_module.to_string(),
name: visualization_function.to_string(),
};
let positional_arguments_expressions = vec!["foo"].iter().map(|x| x.to_string()).collect();
let visualization_config = VisualizationConfiguration {
visualization_module: "Foo.Bar.Baz".to_string(),
execution_context_id: context_id,
expression,
positional_arguments_expressions,
};
test_request(
|client| client.modify_visualization(&visualization_id, &visualization_config),
"executionContext/modifyVisualization",
json!({
"visualizationId" : "00000000-0000-0000-0000-000000000000",
"visualizationConfig" : {
"executionContextId" : "00000000-0000-0000-0000-000000000000",
"expression" : {
"module" : "[Foo.Bar.Baz]",
"definedOnType" : "[Foo.Bar.Baz]",
"name" : "foo"
},
"visualizationModule" : "Foo.Bar.Baz",
"positionalArgumentsExpressions" : ["foo"]
}
}),
unit_json.clone(),
(),
);
let content = b"Hello World!";
let current_version = Sha3_224::new(content);
let content = String::from_utf8_lossy(content).to_string();
let method = "text/canEdit".to_string();
let register_options = RegisterOptions::Path { path: main.clone() };
let write_capability = Some(CapabilityRegistration { method, register_options });
let open_text_file_response = response::OpenTextFile {
content,
current_version: current_version.clone(),
write_capability,
};
test_request(
|client| client.open_text_file(&main),
"text/openFile",
json!({
"path" : {
"rootId" : "00000000-0000-0000-0000-000000000000",
"segments" : ["Main.txt"]
}
}),
json!({
"writeCapability" : {
"method" : "text/canEdit",
"registerOptions": {
"path" : {
"rootId" : "00000000-0000-0000-0000-000000000000",
"segments" : ["Main.txt"]
}
}
},
"content" : "Hello World!",
"currentVersion" : "716596afadfa17cd1cb35133829a02b03e4eed398ce029ce78a2161d"
}),
open_text_file_response,
);
let start = Position { line: 0, character: 5 };
let end = Position { line: 0, character: 5 };
let range = TextRange { start, end };
let text = ",".to_string();
let text_edit = TextEdit { range, text };
let edits = vec![text_edit];
let old_version = Sha3_224::new(b"Hello world!");
let new_version = Sha3_224::new(b"Hello, world!");
let path = main.clone();
let edit = FileEdit { path, edits, old_version, new_version };
test_request(
|client| client.apply_text_file_edit(&edit, &true),
"text/applyEdit",
json!({
"edit" : {
"path" : {
"rootId" : "00000000-0000-0000-0000-000000000000",
"segments" : ["Main.txt"]
},
"edits" : [
{
"range" : {
"start" : {
"line" : 0,
"character" : 5
},
"end" : {
"line" : 0,
"character" : 5
}
},
"text" : ","
}
],
"oldVersion" : "d3ee9b1ba1990fecfd794d2f30e0207aaa7be5d37d463073096d86f8",
"newVersion" : "6a33e22f20f16642697e8bd549ff7b759252ad56c05a1b0acc31dc69"
},
"execute": true
}),
unit_json.clone(),
(),
);
test_request(
|client| client.save_text_file(&main, &current_version),
"text/save",
json!({
"path" : {
"rootId" : "00000000-0000-0000-0000-000000000000",
"segments" : ["Main.txt"]
},
"currentVersion" : "716596afadfa17cd1cb35133829a02b03e4eed398ce029ce78a2161d"
}),
unit_json.clone(),
(),
);
test_request(
|client| client.close_text_file(&main),
"text/closeFile",
json!({
"path" : {
"rootId" : "00000000-0000-0000-0000-000000000000",
"segments" : ["Main.txt"]
}
}),
unit_json,
(),
);
}

File diff suppressed because it is too large Load Diff

View File

@ -1,74 +0,0 @@
//! Client side implementation of the Enso Protocol
//!
//! See https://enso.org/docs/developer/enso/language-server/protocol-architecture.html.
// === Features ===
#![feature(associated_type_bounds)]
#![feature(associated_type_defaults)]
#![feature(coerce_unsized)]
#![feature(trait_alias)]
#![feature(type_alias_impl_trait)]
// === Standard Linter Configuration ===
#![deny(non_ascii_idents)]
#![warn(unsafe_code)]
#![allow(clippy::bool_to_int_with_if)]
#![allow(clippy::let_and_return)]
// === Non-Standard Linter Configuration ===
#![warn(missing_docs)]
#![warn(trivial_casts)]
#![warn(trivial_numeric_casts)]
#![warn(unused_import_braces)]
#![warn(unused_qualifications)]
#![warn(missing_copy_implementations)]
#![warn(missing_debug_implementations)]
// ==============
// === Export ===
// ==============
pub mod binary;
pub mod common;
pub mod generated;
pub mod handler;
pub mod language_server;
pub mod project_manager;
pub mod types;
#[allow(missing_docs)]
pub mod prelude {
pub use crate::traits::*;
pub use enso_prelude::*;
pub use json_rpc::prelude::*;
/// We always use local futures in our single-threaded environment
pub use futures::future::LocalBoxFuture as BoxFuture;
pub use futures::FutureExt;
pub use futures::Stream;
pub use futures::StreamExt;
pub use std::future::Future;
pub use uuid::Uuid;
/// We want most our futures to be static. Otherwise, they would automatically inherit
/// lifetime of the client, which is not the desired behavior.
pub type StaticBoxFuture<T> = futures::future::LocalBoxFuture<'static, T>;
/// We want all our streams to be static. Otherwise, the would automatically inherit
/// lifetime of the client, which is not the desired behavior.
pub type StaticBoxStream<T> = futures::stream::LocalBoxStream<'static, T>;
}
/// Module gathering all traits which may be used by crate's users.
pub mod traits {
pub use crate::binary::client::API;
pub use crate::binary::serialization::DeserializableRoot;
pub use crate::binary::serialization::DeserializableUnionField;
pub use crate::binary::serialization::SerializableRoot;
pub use crate::binary::serialization::SerializableUnion;
pub use crate::binary::uuid::UuidExt;
pub use crate::language_server::API as TRAIT_LanguageServerAPI;
pub use crate::project_manager::API as TRAIT_ProjectManagerAPI;
}

View File

@ -1,559 +0,0 @@
//! Client library for the JSON-RPC-based Project Manager service.
//!
//! The all methods and types are derived from Engine RPC API described
//! here https://enso.org/docs/developer/enso/language-server/protocol-project-manager.html
//FIXME: We need to review the structures' names in Enso Protocol specification
// https://github.com/enso-org/enso/issues/708
use crate::prelude::*;
use crate::types::UTCDateTime;
use json_rpc::api::Result;
use json_rpc::make_rpc_methods;
use json_rpc::Handler;
use serde::Deserialize;
use serde::Serialize;
use std::future::Future;
use uuid::Uuid;
// =============
// === Event ===
// =============
// Project Manager has no notifications, so we create a dummy Notification type for it.
type Notification = ();
/// Event emitted by the Project Manager `Client`.
pub type Event = json_rpc::handler::Event<Notification>;
// ===================
// === RPC Methods ===
// ===================
make_rpc_methods! {
/// An interface containing all the available project management operations.
trait API {
/// Request the project manager to open a specified project. This operation also
/// includes spawning an instance of the language server open on the specified project.
///
/// If the opened project uses Enso version not installed yet, this method outcome is defined
/// by `missing_component_action` argument.
#[MethodInput=OpenProjectInput,rpc_name="project/open"]
fn open_project
(&self, project_id:Uuid, missing_component_action:MissingComponentAction)
-> response::OpenProject;
/// Request the project manager to close a specified project. This operation
/// includes shutting down the language server gracefully so that it can persist state to disk
/// as needed.
#[MethodInput=CloseProjectInput,rpc_name="project/close"]
fn close_project(&self, project_id:Uuid) -> ();
/// Request the project manager to lists all user's projects. The list of projects is sorted by
/// the open time.
#[MethodInput=ListRecentProjectsInput,rpc_name="project/list"]
fn list_projects(&self, number_of_projects:Option<u32>) -> response::ProjectList;
/// Request the creation of a new project.
#[MethodInput=CreateProjectInput,rpc_name="project/create"]
fn create_project
( &self
, name : ProjectName
, project_template : Option<String>
, version : Option<String>
, missing_component_action : MissingComponentAction
) -> response::CreateProject;
/// Request project renaming.
#[MethodInput=RenameProject,rpc_name="project/rename"]
fn rename_project(&self, project_id:Uuid, name:ProjectName) -> ();
/// Request the deletion of a project.
#[MethodInput=DeleteProjectInput,rpc_name="project/delete"]
fn delete_project(&self, project_id:Uuid) -> ();
/// Request a list of sample projects that are available to the user.
#[MethodInput=ListSamplesInput,rpc_name="project/listSample"]
fn list_samples(&self, num_projects:u32) -> response::ProjectList;
}}
// =============
// === Types ===
// =============
/// Address consisting of host and port.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct IpWithSocket {
/// Host name.
pub host: String,
/// Port number.
pub port: u16,
}
impl Display for IpWithSocket {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "ws://{}:{}", self.host, self.port)
}
}
/// Project name.
#[derive(Clone, Debug, Deserialize, Display, Eq, From, Hash, PartialEq, Serialize)]
pub struct ProjectName(String);
impl ProjectName {
/// Create new ProjectName without any validation.
///
/// The caller is responsible for making sure that provided string is a valid project name.
pub fn new_unchecked(name: impl Str) -> Self {
Self(name.into())
}
}
impl AsRef<str> for ProjectName {
fn as_ref(&self) -> &str {
&self.0
}
}
impl From<ProjectName> for String {
fn from(name: ProjectName) -> Self {
name.0
}
}
impl From<ProjectName> for ImString {
fn from(name: ProjectName) -> Self {
name.0.into()
}
}
/// Project module.
#[derive(Clone, Debug, Deserialize, Display, Eq, From, Hash, PartialEq, Serialize)]
pub struct ProjectNormalizedName(String);
impl ProjectNormalizedName {
/// Create new ProjectNormalizedName without any validation.
///
/// The caller is responsible for making sure that provided string is a valid project normalized
/// name (e.g. not empty and starts with a capital letter).
pub fn new_unchecked(name: impl Str) -> Self {
Self(name.into())
}
}
impl AsRef<str> for ProjectNormalizedName {
fn as_ref(&self) -> &str {
&self.0
}
}
impl From<ProjectNormalizedName> for String {
fn from(name: ProjectNormalizedName) -> Self {
name.0
}
}
impl From<ProjectNormalizedName> for ImString {
fn from(name: ProjectNormalizedName) -> Self {
name.0.into()
}
}
/// Project information, such as name, its id and last time it was opened.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "camelCase")]
pub struct ProjectMetadata {
/// Project's name.
pub name: ProjectName,
/// Project's namespace,
pub namespace: String,
/// Project's uuid.
pub id: Uuid,
/// Last time the project was opened.
pub last_opened: Option<UTCDateTime>,
}
/// This type specifies what action should be taken if an Engine's component required to complete
/// the Project Manager operation (like project/open) is missing.
#[derive(Debug, Clone, Copy, Serialize, Deserialize, Eq, PartialEq)]
pub enum MissingComponentAction {
/// Will make the operation fail if any components are missing.
Fail,
/// Will try to install any missing components, unless they are marked as broken.
Install,
/// Will try to install all missing components, even if some of them are marked as broken.
ForceInstallBroken,
}
/// Wrappers for RPC method responses.
pub mod response {
use super::*;
/// Response of `list_projects` and `list_samples`.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct ProjectList {
/// List of projects.
pub projects: Vec<ProjectMetadata>,
}
/// Response of `create_project`.
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "camelCase")]
pub struct CreateProject {
/// Created project uuid.
pub project_id: Uuid,
}
/// Response of `open_project`.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "camelCase")]
pub struct OpenProject {
/// The version of the started language server represented by a semver version string.
pub engine_version: String,
/// Address of the endpoint for JSON-RPC communication.
pub language_server_json_address: IpWithSocket,
/// Address of the endpoint for binary FlatBuffers communication.
pub language_server_binary_address: IpWithSocket,
/// The name of the project as it is opened.
pub project_name: ProjectName,
/// The normalized name of the project.
pub project_normalized_name: ProjectNormalizedName,
/// The namespace of the project.
pub project_namespace: String,
}
}
// ========================
// === MockClient tests ===
// ========================
#[cfg(test)]
mod mock_client_tests {
use super::*;
use chrono::DateTime;
use json_rpc::error::RpcError;
use json_rpc::expect_call;
use json_rpc::messages::Error;
use json_rpc::Result;
use std::future::Future;
use uuid::Uuid;
fn error<T>(message: &str) -> Result<T> {
let code = 1;
let data = None;
let message = message.to_string();
let error = Error { code, data, message };
Err(RpcError::RemoteError(error))
}
fn result<T, F: Future<Output = Result<T>>>(fut: F) -> Result<T> {
let mut fut = Box::pin(fut);
fut.expect_ready()
}
#[test]
fn project_life_cycle() {
let mock_client = MockClient::default();
let expected_uuid = Uuid::default();
let creation_response = response::CreateProject { project_id: expected_uuid };
let host = "localhost".to_string();
let port = 30500;
let language_server_address = IpWithSocket { host, port };
let expected_open_result = response::OpenProject {
engine_version: "0.2.1".to_owned(),
language_server_json_address: language_server_address.clone(),
language_server_binary_address: language_server_address,
project_name: ProjectName::new_unchecked("Test"),
project_normalized_name: ProjectNormalizedName::new_unchecked("Test"),
project_namespace: "local".to_owned(),
};
let open_result = Ok(expected_open_result.clone());
let missing_component_action = MissingComponentAction::Fail;
expect_call!(mock_client.create_project(
name = ProjectName::new_unchecked("HelloWorld"),
project_template = None,
version = None,
missing_component_action = missing_component_action
) => Ok(creation_response));
expect_call!(mock_client.open_project(expected_uuid,missing_component_action) => open_result);
expect_call!(mock_client.close_project(expected_uuid) => error("Project isn't open."));
expect_call!(mock_client.delete_project(expected_uuid) => error("Project doesn't exist."));
let delete_result = mock_client.delete_project(&expected_uuid);
result(delete_result).expect_err("Project shouldn't exist.");
let name = ProjectName::new_unchecked("HelloWorld");
let response = mock_client.create_project(&name, &None, &None, &missing_component_action);
let uuid = result(response).expect("Couldn't create project").project_id;
assert_eq!(uuid, expected_uuid);
let close_result = result(mock_client.close_project(&uuid));
close_result.expect_err("Project shouldn't be open.");
let ip_with_socket = result(mock_client.open_project(&uuid, &missing_component_action));
let ip_with_socket = ip_with_socket.expect("Couldn't open project");
assert_eq!(ip_with_socket, expected_open_result);
expect_call!(mock_client.close_project(expected_uuid) => Ok(()));
result(mock_client.close_project(&uuid)).expect("Couldn't close project.");
expect_call!(mock_client.delete_project(expected_uuid) => Ok(()));
result(mock_client.delete_project(&uuid)).expect("Couldn't delete project.");
}
#[test]
fn list_projects() {
let mock_client = MockClient::default();
let project1 = ProjectMetadata {
name: ProjectName::new_unchecked("project1"),
id: Uuid::default(),
last_opened: Some(DateTime::parse_from_rfc3339("2020-01-07T21:25:26Z").unwrap()),
namespace: "local".to_owned(),
};
let project2 = ProjectMetadata {
name: ProjectName::new_unchecked("project2"),
id: Uuid::default(),
last_opened: Some(DateTime::parse_from_rfc3339("2020-02-02T13:15:20Z").unwrap()),
namespace: "local".to_owned(),
};
let expected_recent_projects = response::ProjectList { projects: vec![project1, project2] };
let sample1 = ProjectMetadata {
name: ProjectName::new_unchecked("sample1"),
id: Uuid::default(),
last_opened: Some(DateTime::parse_from_rfc3339("2019-11-23T05:30:12Z").unwrap()),
namespace: "test".to_owned(),
};
let sample2 = ProjectMetadata {
name: ProjectName::new_unchecked("sample2"),
id: Uuid::default(),
last_opened: Some(DateTime::parse_from_rfc3339("2019-12-25T00:10:58Z").unwrap()),
namespace: "test".to_owned(),
};
let expected_sample_projects = response::ProjectList { projects: vec![sample1, sample2] };
expect_call!(mock_client.list_projects(count=Some(2)) =>
Ok(expected_recent_projects.clone()));
expect_call!(mock_client.list_samples(count=2) => Ok(expected_sample_projects.clone()));
let list_recent_error = "Couldn't get recent projects.";
let list_sample_error = "Couldn't get sample projects.";
let count_limit = Some(2);
let recent_projects = result(mock_client.list_projects(&count_limit));
let recent_projects = recent_projects.expect(list_recent_error);
assert_eq!(recent_projects, expected_recent_projects);
let sample_projects = result(mock_client.list_samples(&2)).expect(list_sample_error);
assert_eq!(sample_projects, expected_sample_projects);
}
}
// ====================
// === Client tests ===
// ====================
#[cfg(test)]
mod remote_client_tests {
use super::*;
use chrono::DateTime;
use futures::task::LocalSpawnExt;
use json_rpc::messages::Message;
use json_rpc::messages::RequestMessage;
use json_rpc::test_util::transport::mock::MockTransport;
use serde_json::json;
use serde_json::Value;
use std::future::Future;
struct Fixture {
transport: MockTransport,
client: Client,
executor: futures::executor::LocalPool,
}
fn setup_fm() -> Fixture {
let transport = MockTransport::new();
let client = Client::new(transport.clone());
let executor = futures::executor::LocalPool::new();
executor.spawner().spawn_local(client.runner()).unwrap();
Fixture { transport, client, executor }
}
/// Tests making a request using project manager:
/// * creates PM client and uses `make_request` to make a request
/// * checks that request is made for `expected_method`
/// * checks that request input is `expected_input`
/// * mocks receiving a response from server with `result`
/// * checks that FM-returned Future yields `expected_output`
fn test_request<Fun, Fut, T>(
make_request: Fun,
expected_method: &str,
expected_input: &Value,
result: &Value,
expected_output: &T,
) where
Fun: FnOnce(&mut Client) -> Fut,
Fut: Future<Output = Result<T>>,
T: Debug + PartialEq,
{
let mut fixture = setup_fm();
let mut fut = Box::pin(make_request(&mut fixture.client));
let request = fixture.transport.expect_json_message::<RequestMessage<Value>>();
assert_eq!(request.method, *expected_method);
assert_eq!(request.params, *expected_input);
let response = Message::new_success(request.id, result);
fixture.transport.mock_peer_json_message(response);
fixture.executor.run_until_stalled();
let output = fut.expect_ok();
assert_eq!(output, *expected_output);
}
#[test]
fn test_requests() {
let unit_json = json!(null);
let project_id = Uuid::default();
let missing_component_action = MissingComponentAction::Install;
let engine_version = "1.0.0".to_owned();
let engine_version_opt = Some(engine_version);
let create_project_response = response::CreateProject { project_id };
let project_id_json = json!({"projectId":"00000000-0000-0000-0000-000000000000"});
let project_id_and_mca = json!({
"projectId" : "00000000-0000-0000-0000-000000000000",
"missingComponentAction" : "Install"
});
let engine_version = "0.2.1".to_owned();
let language_server_json_address =
IpWithSocket { host: "localhost".to_string(), port: 27015 };
let language_server_binary_address =
IpWithSocket { host: "localhost".to_string(), port: 27016 };
let project_name = ProjectName::new_unchecked("Test");
let project_normalized_name = ProjectNormalizedName::new_unchecked("Test");
let project_namespace = "test_ns".to_owned();
let open_result = response::OpenProject {
engine_version,
language_server_json_address,
language_server_binary_address,
project_name,
project_normalized_name,
project_namespace,
};
let open_result_json = json!({
"engineVersion" : "0.2.1",
"languageServerJsonAddress" : {
"host" : "localhost",
"port" : 27015
},
"languageServerBinaryAddress" : {
"host" : "localhost",
"port" : 27016
},
"projectName" : "Test",
"projectNormalizedName" : "Test",
"projectNamespace" : "test_ns",
});
let project_name = ProjectName::new_unchecked("HelloWorld");
let project_template = Some(String::from("template"));
let project_create_json = json!({
"name" : serde_json::to_value(&project_name).unwrap(),
"projectTemplate" : serde_json::to_value(&project_template).unwrap(),
"missingComponentAction" : "Install",
"version" : "1.0.0",
});
let number_of_projects = 2;
let number_of_projects_json = json!({ "numberOfProjects": number_of_projects });
let num_projects_json = json!({ "numProjects": number_of_projects });
let project1 = ProjectMetadata {
name: ProjectName::new_unchecked("project1"),
id: Uuid::default(),
last_opened: Some(DateTime::parse_from_rfc3339("2020-01-07T21:25:26Z").unwrap()),
namespace: "local".to_owned(),
};
let project2 = ProjectMetadata {
name: ProjectName::new_unchecked("project2"),
id: Uuid::default(),
last_opened: Some(DateTime::parse_from_rfc3339("2020-02-02T13:15:20Z").unwrap()),
namespace: "local".to_owned(),
};
let project_list = response::ProjectList { projects: vec![project1, project2] };
let project_list_json = json!({
"projects" : [
{
"id" : "00000000-0000-0000-0000-000000000000",
"lastOpened" : "2020-01-07T21:25:26+00:00",
"name" : "project1",
"namespace" : "local"
},
{
"id" : "00000000-0000-0000-0000-000000000000",
"lastOpened" : "2020-02-02T13:15:20+00:00",
"name" : "project2",
"namespace" : "local"
}
]
});
test_request(
|client| client.list_projects(&Some(number_of_projects)),
"project/list",
&number_of_projects_json,
&project_list_json,
&project_list,
);
test_request(
|client| client.list_samples(&number_of_projects),
"project/listSample",
&num_projects_json,
&project_list_json,
&project_list,
);
test_request(
|client| client.open_project(&project_id, &missing_component_action),
"project/open",
&project_id_and_mca,
&open_result_json,
&open_result,
);
test_request(
|client| client.close_project(&project_id),
"project/close",
&project_id_json,
&unit_json,
&(),
);
test_request(
|client| client.delete_project(&project_id),
"project/delete",
&project_id_json,
&unit_json,
&(),
);
test_request(
|client| {
client.create_project(
&project_name,
&project_template,
&engine_version_opt,
&missing_component_action,
)
},
"project/create",
&project_create_json,
&project_id_json,
&create_project_response,
);
}
}

View File

@ -1,80 +0,0 @@
//! Common types of JSON-RPC-based Enso services used by both Project Manager and Language Server.
use crate::prelude::*;
use crate::binary;
use serde::Deserialize;
use serde::Serialize;
// ===================
// === UTCDateTime ===
// ===================
/// Time in UTC time zone.
pub type UTCDateTime = chrono::DateTime<chrono::FixedOffset>;
// ================
// === Sha3_224 ===
// ================
/// SHA3-224 hash digest.
#[derive(Hash, Debug, Display, Clone, PartialEq, Eq, Serialize, Deserialize, Deref)]
pub struct Sha3_224(String);
impl Sha3_224 {
/// Create new SHA3-224 digest of any arbitrary `data`.
pub fn new(data: &[u8]) -> Self {
use sha3::Digest;
let mut hasher = sha3::Sha3_224::new();
hasher.input(data);
hasher.into()
}
/// Create new SHA3-224 digest of any arbitrary data split into chunks.
pub fn from_parts<'a>(parts: impl IntoIterator<Item = &'a [u8]>) -> Self {
use sha3::Digest;
let mut hasher = sha3::Sha3_224::new();
for part in parts {
hasher.input(part)
}
hasher.into()
}
}
impl From<sha3::Sha3_224> for Sha3_224 {
fn from(hasher: sha3::Sha3_224) -> Self {
use sha3::Digest;
let result = hasher.result();
let digest = hex::encode(&result[..]);
Self(digest)
}
}
impl From<binary::message::EnsoDigest> for Sha3_224 {
fn from(checksum: binary::message::EnsoDigest) -> Self {
let digest = hex::encode(checksum.bytes);
Self(digest)
}
}
// =============
// === Tests ===
// =============
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn sha3_224() {
let digest = Sha3_224::new(b"abc");
let expected = "e642824c3f8cf24ad09234ee7d3c766fc9a3a5168d0c94ad73b46fdf".to_string();
assert_eq!(digest.to_string(), expected);
}
}

View File

@ -1,16 +0,0 @@
// === Standard Linter Configuration ===
#![deny(non_ascii_idents)]
#![warn(unsafe_code)]
#![allow(clippy::bool_to_int_with_if)]
#![allow(clippy::let_and_return)]
#[cfg(test)]
mod tests {
#[test]
fn it_works() {
let result = 2 + 2;
assert_eq!(result, 4);
}
}

View File

@ -1,209 +0,0 @@
# Next Release
This update contains major performance improvements and exposes new privacy user
settings. We will work towards stabilizing it in the next weeks in order to make
these updates be shipped in a stable release before the end of the year.
<br/>![New Features](/docs/assets/tags/new_features.svg)
#### Visual Environment
- [You can now launch missiles directly from the GUI][79270]. It was technically
possible since version 3.0.0-alpha.7, but it was never exposed as a button.
- [The graph editor stops shaking and running away when you are pasting
JavaScript code][79271].
#### Runtime
- [The JiT compiler got new optimizations and produces code up to 10x faster
than C++][79272].
- [You do not need to keep your computer in -20°C to prevent the engine from
crashing anymore][79273].
#### Libraries
- [The new JSON library allows you to parse 2Gb files in 50ms][79274].
- [The Regexp library exposes now methods to test the expressions directly on
humans][79275].
<br/>![Bug Fixes](/docs/assets/tags/bug_fixes.svg)
#### Visual Environment
- [You can now launch missiles directly from the GUI][79270]. It was technically
possible since version 3.0.0-alpha.7, but it was never exposed as a button.
- [The graph editor stops shaking and running away when you are pasting
JavaScript code][79271].
#### Runtime
- [The JiT compiler got new optimizations and produces code up to 10x faster
than C++][79272].
- [You do not need to keep your computer in -20°C to prevent the engine from
crashing anymore][79273].
#### Libraries
- [The new JSON library allows you to parse 2Gb files in 50ms][79274].
- [The Regexp library exposes now methods to test the expressions directly on
humans][79275].
<br/>![New Learning Resources](/docs/assets/tags/new_learning_resources.svg)
#### Visual Environment
- [You can now launch missiles directly from the GUI][79270]. It was technically
possible since version 3.0.0-alpha.7, but it was never exposed as a button.
- [The graph editor stops shaking and running away when you are pasting
JavaScript code][79271].
#### Runtime
- [The JiT compiler got new optimizations and produces code up to 10x faster
than C++][79272].
- [You do not need to keep your computer in -20°C to prevent the engine from
crashing anymore][79273].
#### Libraries
- [The new JSON library allows you to parse 2Gb files in 50ms][79274].
- [The Regexp library exposes now methods to test the expressions directly on
humans][79275].
<br/>![Release Notes](/docs/assets/tags/release_notes.svg)
#### Visual Environment
- [You can now launch missiles directly from the GUI][79270]. It was technically
possible since version 3.0.0-alpha.7, but it was never exposed as a button.
- [The graph editor stops shaking and running away when you are pasting
JavaScript code][79271].
#### Runtime
- [The JiT compiler got new optimizations and produces code up to 10x faster
than C++][79272].
- [You do not need to keep your computer in -20°C to prevent the engine from
crashing anymore][79273].
#### Libraries
- [The new JSON library allows you to parse 2Gb files in 50ms][79274].
- [The Regexp library exposes now methods to test the expressions directly on
humans][79275].
[79270]: http://github.com/ticket
[79271]: http://github.com/ticket
[79272]: http://github.com/ticket
[79273]: http://github.com/ticket
[79274]: http://github.com/ticket
[79275]: http://github.com/ticket
<br/>
# Enso 47.0.0-alpha.8 (2049-01-22)
This update contains major performance improvements and exposes new privacy user
settings. We will work towards stabilizing it in the next weeks in order to make
these updates be shipped in a stable release before the end of the year.
<br/>![New Features](/docs/assets/tags/new_features.svg)
#### Visual Environment
- [You can now launch missiles directly from the GUI][79270]. It was technically
possible since version 3.0.0-alpha.7, but it was never exposed as a button.
- [The graph editor stops shaking and running away when you are pasting
JavaScript code][79271].
#### Runtime
- [The JiT compiler got new optimizations and produces code up to 10x faster
than C++][79272].
- [You do not need to keep your computer in -20°C to prevent the engine from
crashing anymore][79273].
#### Libraries
- [The new JSON library allows you to parse 2Gb files in 50ms][79274].
- [The Regexp library exposes now methods to test the expressions directly on
humans][79275].
<br/>![Bug Fixes](/docs/assets/tags/bug_fixes.svg)
#### Visual Environment
- [You can now launch missiles directly from the GUI][79270]. It was technically
possible since version 3.0.0-alpha.7, but it was never exposed as a button.
- [The graph editor stops shaking and running away when you are pasting
JavaScript code][79271].
#### Runtime
- [The JiT compiler got new optimizations and produces code up to 10x faster
than C++][79272].
- [You do not need to keep your computer in -20°C to prevent the engine from
crashing anymore][79273].
#### Libraries
- [The new JSON library allows you to parse 2Gb files in 50ms][79274].
- [The Regexp library exposes now methods to test the expressions directly on
humans][79275].
<br/>![New Learning Resources](/docs/assets/tags/new_learning_resources.svg)
#### Visual Environment
- [You can now launch missiles directly from the GUI][79270]. It was technically
possible since version 3.0.0-alpha.7, but it was never exposed as a button.
- [The graph editor stops shaking and running away when you are pasting
JavaScript code][79271].
#### Runtime
- [The JiT compiler got new optimizations and produces code up to 10x faster
than C++][79272].
- [You do not need to keep your computer in -20°C to prevent the engine from
crashing anymore][79273].
#### Libraries
- [The new JSON library allows you to parse 2Gb files in 50ms][79274].
- [The Regexp library exposes now methods to test the expressions directly on
humans][79275].
<br/>![Release Notes](/docs/assets/tags/release_notes.svg)
#### Visual Environment
- [You can now launch missiles directly from the GUI][79270]. It was technically
possible since version 3.0.0-alpha.7, but it was never exposed as a button.
- [The graph editor stops shaking and running away when you are pasting
JavaScript code][79271].
#### Runtime
- [The JiT compiler got new optimizations and produces code up to 10x faster
than C++][79272].
- [You do not need to keep your computer in -20°C to prevent the engine from
crashing anymore][79273].
#### Libraries
- [The new JSON library allows you to parse 2Gb files in 50ms][79274].
- [The Regexp library exposes now methods to test the expressions directly on
humans][79275].
[79270]: http://github.com/ticket
[79271]: http://github.com/ticket
[79272]: http://github.com/ticket
[79273]: http://github.com/ticket
[79274]: http://github.com/ticket
[79275]: http://github.com/ticket
<br/>
# Enso 47.0.0-alpha.7 (2049-01-07)
...

View File

@ -1,332 +0,0 @@
---
layout: developer-doc
title: Development & Contributing Guide
category: summary
tags: [summary, contributing]
---
# Development & Contributing Guide
Thank you for your interest in contributing to the Enso IDE! We believe that
only through community involvement can Enso be the best it can be! There are a
whole host of ways to contribute, and every single one is appreciated.
<br/>
## Reporting Issues
**If you are concerned that your bug publicly presents a security risk to the
users of Enso, please contact [security@enso.org](mailto:security@enso.org).**
While it's never great to find a bug, they are a reality of software and
software development! We can't fix or improve on the things that we don't know
about, so report as many bugs as you can! If you're not sure whether something
is a bug, file it anyway!
Even though GitHub search can sometimes be a bit hard to use, we'd appreciate if
you could
[search](https://github.com/enso-org/enso/search?q=&type=Issues&utf8=%E2%9C%93)
for your issue before filing a bug as it's possible that someone else has
already reported the issue. We know the search isn't the best, and it can be
hard to know what to search for, so we really don't mind if you _do_ submit a
duplicate!
Opening an issue is as easy as following
[this link](https://github.com/enso-org/ide/issues/new?template=bug-report.md)
and filling out the fields. The template is intended to collect all the
information we need to best diagnose the issue, so please take the time to fill
it out accurately.
The reproduction steps are particularly important, as the more easily we can
reproduce it, the faster we can fix the bug! It's also helpful to have the
version of the IDE, as that will let us know if the bug is Operating System or
Architecture specific.
<br/>
## Development Environment
The project builds on macOS, Windows, and Linux. Cross-platform targets work
well on all of these platforms, however, macOS package will miss the right
application icon if built on Linux or Windows due to non-trivial icon generation
on these platforms. To develop the source code you will need the following
setup:
- **The Rust Toolchain**
This project uses several features available only in the nightly Rust
toolchain. Please use [the Rust toolchain installer](https://rustup.rs) to
manage Rust toolchains. It will automatically download the toolchain needed to
build the project.
In addition, some custom CLI programs need to be installed manually:
```bash
rustup toolchain install stable # Stable toolchain required for the following tools.
cargo +stable install wasm-pack # Install the wasm-pack toolkit.
cargo +stable install cargo-watch # To enable `./run wasm watch` utility
```
Make sure that your `PATH` environment variable is set up correctly, so that
the binaries installed by cargo
([typically to `$HOME/.cargo/bin`](https://doc.rust-lang.org/cargo/commands/cargo-install.html#description))
can be run from the command line; verify this by running
`wasm-pack --version`.
- **Node and Node Package Manager LTS**
To build the web and desktop applications you will need
[the latest LTS version of node and npm](https://nodejs.org/en/download). Even
minor release changes are known to cause serious issues, thus **we provide
support for the latest LTS version only. Please do not report build issues if
you use other versions.** The easiest way to set up the proper version is by
installing
[a Node version manager that automatically picks up the correct version](https://github.com/shadowspawn/node-version-usage#supporting-products),
like [fnm](https://github.com/Schniz/fnm).
- **(Optional) FlatBuffer compiler `flatc`**
This dependency is needed only if you need to update files generated by the
FlatBuffer from the Engine Services binary protocol description. Otherwise,
relying on the generated files that are being stored in this repository is
fine.
`flatc` must be in the version _newer than 1.12_ due to
[this bug](https://github.com/google/flatbuffers/issues/5055). As of writing
this text there are no official releases with this issue fixed, however
current binaries can be obtained from the project's CI
[build artifacts](https://github.com/google/flatbuffers/actions?query=branch%3Amaster).
`flatc` builds from 8 May 2020 onwards have been confirmed to work.
After placing `flatc` in `PATH` you need to define the `ENSO_IDE_ENABLE_FLATC`
environment variable to explicitly enable regeneration of the interface files.
The `flatc` is run as part of `build.rs` script of the `enso-protocol package.
- **(Optional) The Latest version of `wasm-opt`**
The version of `wasm-opt` that comes bundled with `wasm-pack` can be out of
date and slow. We recommend to install a newer version from the
[binaryen releases](https://github.com/WebAssembly/binaryen/releases), or
[compile the newest version from scratch](https://github.com/WebAssembly/binaryen#building=).
`wasm-pack` will pick up the locally installed version automatically and use
it instead of the bundled one. Binaryen packages are also available through
some system package managers (apt/pacman/brew), check whether the version
there is newer than the bundled version of wasm-pack (which is true for all of
them as of wasm-pack 0.10.2, which bundles version 90 which was released Dec
2019).
<br/>
## Working with sources
Please be sure to carefully read the
[Rust style guide 1](contributing/style-guide.md) and the
[Rust style guide 2](https://enso.org/docs/developer/ide/contributing/style-guide.html)
before contributing to the codebase.
We do also use [`prettier`](https://prettier.io/) for the JavaScript and
Markdown files in our code base. If you have not installed it already you can do
so via `npm install prettier`. To use it manually via command line run
`prettier --write` to all JavaScript files in the project. Alternatively, there
are plugins for many IDEs available to do this for you.
### Repository Structure Overview
**Note**: Currently, the Enso repository is going through a process of
refactoring, and it is not finished yet: the Engine files are still not in the
`app/engine` where they ought to be, but in the root directory instead.
The root directory contains `Cargo.toml` and `build.sbt` files, allowing to open
all rust or scala code as a single project in your favorite IDE. There is also a
`run` and `run.cmd` scripts used for building and running the Enso IDE (see next
section for details).
The subdirectories of interests are:
- `app`: The actual products delivered in this repository:
- `gui`: A rust crate compiled to a WASM library with all the logic of the GUI
layer. The library is used by both the desktop application and the cloud
environment. For further documentation see the documentation of the crate
(at the top of the `src/lib.rs` file).
- `ide-desktop`: The desktop version of the Enso IDE. Implemented as an
electron application which spawns backend services, loads the WASM gui
library and runs the main entry point.
- `engine`: (In the future: see the note at the section beginning). The
implementation of the language itself: CLI tools like compiler or
interpreter, as well as the services used as a backend for the Enso IDE
(Language Server and Project Manager).
- `lib`: All libraries not being the main components of our application. They
are grouped by language. The most prominent are:
- `rust/prelude`: A library containing the most popular utilities and imports.
Should be imported in each rust module - see Contributing guidelines.
- `rust/ensogl`: EnsoGL Framework for creating efficient GUI applications in
WASM.
- `rust/frp`: The library allows following the Functional Reactive Programming
paradigm in rust.
- `build`: Build script that is wrapped by the `run` script.
- `integration-test`: A single crate with all integration tests of our
applications.
Other directories are auto-generated `dist` and `target`, or (currently) are the
Engine files, which will be moved to `app/engine` soon.
### Development
As this is a multipart project with many complex dependencies, it is equipped
with a build script which both validates your working environment and takes care
of providing the most suitable compilation flags for a particular development
stage.
The build-script is invokable by invoking the following script from the working
copy root:
- `./run` in bash-compatible environments, like Linux or macOS;
- `.\run.cmd` on Windows.
For brevity, this guide will use `./run` form from here onwards.
In general, `./run` should also work on Windows ports of `bash` (like the ones
provided by `git` or MSYS2). These configurations are not tested though.
Run `./run --help` to learn about available commands and options. Some
subcommands allow passing additional arguments following `--` argument to the
underlying call. For example `./run ide build -- FLAG` will pass the `FLAG` flag
to `wasm-pack` (Rust WASM build tool). The most common options are presented
below:
- **Interactive mode** Run `./run ide watch` to start a local web-server and a
source-file watch utility which will build the project on every change. Open
`http://localhost:8080` (the port may vary and will be reported in the
terminal if `8080` was already in use) to run the application, or
`http://localhost:8080/?entry` to open example demo scenes list. Please
remember to disable the cache in your browser during the development! By
default, the script disables heavyweight optimizations to provide interactive
development experience.
- **Production mode** In order to compile in a production mode (enable all
optimizations, strip WASM debug symbols, minimize the output binaries, etc.),
run `./run gui build`. To create platform-specific packages and installers use
`./run ide build` instead. The final executables will be located at
`dist/ide`.
- **Selective mode** In order to compile only part of the project, and thus
drastically shorten the incremental compile time, you are advised to use the
selective compilation mode by passing the `--crate-path` option to the `build`
or `watch` command, e.g. `./run ide watch --crate-path ensogl/examples` to
compile only the renderer-related example scenes. Please note, that in order
to run a scene in a web-browser, the scene has to be compiled and has to
expose a public function with a name starting with `entry_point_`. Thus, if
you compile only selected crate, you will have access only to the example
scenes that were defined or re-exported by that crate. In particular, the
`ide` crate exposes the `entry_point_ide` function, so you have to compile it
to test your code in the Enso IDE.
### Using IDE as a library.
In case you want to use the IDE as a library, for example to embed it into
another website, you need to first build it using `./run gui build` and find the
necessary artifacts located at `dist/gui`. Especially, the
`dist/gui/assets/index.js` defines a function `window.enso.main(cfg)` which you
can use to run the IDE. Currently, the configuration argument can contain the
following options:
- `entry` - the entry point, one of predefined scenes. Set it to empty string to
see the list of possible entry points.
- `project` - the project name to open after loading the IDE.
### Testing, Linting, and Validation
After changing the code it's always a good idea to lint and test the code. We
have prepared several scripts which maximally automate the process:
- **Size Validation** Use `./run wasm build` to check if the size of the final
binary did not grew too much in comparison to the previous release. Watching
the resulting binary size is one of the most important responsibility of each
contributor in order to keep the project small and suitable for web-based
usage. In case the size will exceed the limits:
- If the PR does not include any new libraries, you are allowed to increase
the limit by 10KB. In case the limit will be exceeded by more than 10KB,
check which part of the code contribute to it, and talk about it with the
code owner.
- If the PR does include new libraries, you are allowed to increase the limit
by 10KB, but you should also consider if it is possible to get the same
results without using a new library (even by implementing few lines of code
from the library in sources of the project).
- If the PR does include new libraries, and the limit is exceeded by more than
10KB, check which part of the code contributed to it, and talk about it with
the code owner.
- If the PR does include new libraries, and the limit is exceeded by more than
50KB, it would probably not be merged. Research possible alternatives before
talking with code owner about this case.
- **Testing** For the test suite to run you need a current version of Chrome
installed. Use `./run wasm test` run both unit and web-based visual test.
- **Integration Tests** The integration tests are gathered in `integration-test`
crate. You can run them with `./run ide integration-test` command. The script
will spawn required Engine process.
- To run une test suite add `-- --test <suite-name>` at end of command
options. The `<suite-name>` is a name of the file in
`integration-test/tests` directory without extension, for example
`graph_editor`.
- The integration test can create and leave new Enso projects. **Keep it in
mind when running the script with your own backend (the `--external-backend`
option)**. The Engine spawned by the script will use a dedicated workspace
created in temporary directory, so the user workspace will not be affected.
- **Linting** Please be sure to fix all errors reported by `./run lint` before
creating a pull request to this repository.
### Development Branches
The following branches are used to develop the product:
- **wip/[github_user_name]/[feature]** Feature branches. These are temporary
branches used by the team to develop a particular feature.
- **develop** Contains the most recent changes to the product. After successful
review, the feature branches are merged here. Each commit to this branch will
result in a nightly build of the product accessible as CI artifacts.
- **unstable** Contains only those commits which can be considered unstable
product releases. Each commit to this branch will result in an unstable
release of the product and will be published on GitHub as a pre-release. The
build version and build description will be automatically fetched from the
newest `CHANGELOG.md` entry and will fail if the version will not be of the
form `[major].[minor].[patch]-[sfx]`, where `[sfx]` is one of `alpha.[n]`,
`beta.[n]`, or `rc.[n]`, where `[n]` is an unstable build number.
- **stable** Contains only those commits which can be considered stable product
releases. Each commit to this branch will result in a stable release of the
product and will be published on GitHub as a release. The build version and
build description will be automatically fetched from the newest `CHANGELOG.md`
entry and will fail if the version will not be of the form
`[major].[minor].[patch]`.
### Forcing CI builds
By default, CI would not build artifacts from `wip` and `develop` branches in
order to save time and resources. If you want the artifacts to be build for your
PR, simply add `[ci build]` anywhere in the PR description.
### Skipping CHANGELOG.md change assertions
By default, CI would fail if the `CHANGELOG.md` file does not need to be
updated. However, sometimes there are PRs that does not change anything
significant in the final product. You can then simply add the 'CI: No changelog
needed' label to the PR to skip this assertion.
### Publishing Results
All new changes should be proposed in the form of Pull Requests (PRs) to this
repository. Each PR should contain changes to documentation and `CHANGELOG.md`
if applicable.
## Changelog
Please remember to update the `CHANGELOG.md` on every new bug fix or feature
implementation. Please note that `CHANGELOG.md` is used to establish the current
product version (the `run` script extracts it from the newest changelog entry).
Thus, be sure to always increase the newest version in the changelog after a
release, otherwise CI will fail. Please use the `docs/CHANGELOG_TEMPLATE.md` as
the template to create new changelog entries. Please note, that there is a
special syntax for defining features of the upcoming release. The newest
changelog entry can have a title "Next Release". In such a case, the build
version will be `0.0.0` and CI would fail when trying to publish it as a
release.

View File

@ -1,13 +0,0 @@
---
layout: docs-index
title: Enso IDE documentation
category: summary
tags: [doc-index]
---
# Enso IDE documentation
- [**Contributing guidelines**](./contributing/README.md) - helpful instructions
for anyone who wants to contribute.
- [**Product specification**](./product/README.md) - a specification from the
user perspective.

View File

@ -1,7 +0,0 @@
<svg version="1.1" width="91" height="36" xmlns="http://www.w3.org/2000/svg">
<style>
.txt { font: 15px sans-serif; fill:white; }
</style>
<rect width="91" height="36" rx="18" fill="#e02424"/>
<text x="12" y="23" class="txt">Bug Fixes</text>
</svg>

Before

Width:  |  Height:  |  Size: 273 B

View File

@ -1,7 +0,0 @@
<svg version="1.1" width="117" height="36" xmlns="http://www.w3.org/2000/svg">
<style>
.txt { font: 15px sans-serif; fill:white; }
</style>
<rect width="117" height="36" rx="18" fill="#1c64f2"/>
<text x="12" y="23" class="txt">New Features</text>
</svg>

Before

Width:  |  Height:  |  Size: 278 B

View File

@ -1,7 +0,0 @@
<svg version="1.1" width="192" height="36" xmlns="http://www.w3.org/2000/svg">
<style>
.txt { font: 15px sans-serif; fill:white; }
</style>
<rect width="192" height="36" rx="18" fill="#047481"/>
<text x="12" y="23" class="txt">New Learning Resources</text>
</svg>

Before

Width:  |  Height:  |  Size: 288 B

View File

@ -1,7 +0,0 @@
<svg version="1.1" width="123" height="36" xmlns="http://www.w3.org/2000/svg">
<style>
.txt { font: 15px sans-serif; fill:white; }
</style>
<rect width="123" height="36" rx="18" fill="#5850ec"/>
<text x="12" y="23" class="txt">Release Notes</text>
</svg>

Before

Width:  |  Height:  |  Size: 279 B

View File

@ -1,15 +0,0 @@
---
layout: section-summary
title: Contributing
category: contributing
tags: [contributing]
---
# Enso IDE Contributing Documentation
This directory contains helpful resources for anyone who wants to start Enso IDE
development. The main guideline is available [here](../CONTRIBUTING.md).
- [**Enso Team Process**](./process.md) - the development cycle in the core Enso
IDE team.
- [**Style Guide**](./style-guide.md) - Our coding standards.

View File

@ -1,27 +0,0 @@
---
layout: developer-doc
title: Team Process
category: contributing
tags: [contributing]
---
# Enso IDE Team Process
This document specify our core team workflow, described as a lifecycle of a
task:
- A newly created task should be appropriately described, but not estimated
yet - it will be during Backlog Refinement meeting. The task is put in "New
Tasks" column.
- At the beginning of the sprint team leads put to the "To Refine" column all
tasks which will be refined during the next Backlog refinement.
- Each team member should read the descriptions of tasks in "To Refine" column
and ask questions and raise all concerns. All the conversation should be
recorded in the issue's comments.
- During Backlog Refinement we confirm that the task description is clear and
estimate it. The estimate is expressed in work days of one person, and should
include the review process. The task is then moved to the "Backlog" column. If
it turns out that there is no agreement about the task's scope and estimation,
it may be postponed to the next Backlog Refinement.
- During Planning meeting the team decides which tasks are took into next sprint
and assign them.

View File

@ -1,220 +0,0 @@
---
layout: style-guide title: Rust Style Guide category: style-guide tags: [style-guide,contributing]
---
# Rust style guide.
All of the codebase should be formatted by the `rustfmt`. However, the code
style is way more than just formatting. In many cases formatting can be
automated. According to rustfmt docs: “formatting code is a mostly mechanical
task which takes both time and mental effort. By using an automatic formatting
tool, a programmer is relieved of this task and can concentrate on more
important things.”. While in many cases it is true, if the code author does not
take extra effort to make his code pretty by refactoring long lines to
variables, moving code to specific modules, or sections, the formatting tool
will result in code that is hard to read and hard to write. Thus, it is
important to take time to write code in such way that we can be proud of its
quality. The following document provides you with a detailed guide regarding the
code quality we are looking for.
## Code formatting in macros.
Unfortunately, `rustfmt` is not working inside of macros. Thus, this code should
be manually formatted in the same was as `rustfmt` would do it.
## Submodules and imports.
- **Design your files to be imported as module.**
Design names of your libraries, structs, and functions to be imported as
modules. For example, prefer an import `use graph;` and it's usage
`graph::Node::new()` over `use graph::new_node`. This design minimizes the
amount of imports and allows related modules to import shorter names to the
scope.
- **Don't use relative imports.**
Do not use `super::` nor `self::` imports in files (you can use them in localy
defined modules). Use absolute imports or imports from local submodules only.
- **Use Enso Formatter to format your imports**
Run the `build/enso-formatter` script (e.g. by running
`cargo run -p enso-formatter`) to format imports in all files before
contributing your PR.
## Sections.
Source files should be divided into sections. Section headers should be placed
before each new "concept" defined in a file. By "concept" we normally mean a
structure with related implementations. In case related implementations use some
helper structs with very small implementations, these helper structs may be
defined in the same section. Moreover, the code in each section should be
divided into sub-sections, grouping related definitions. At least one section
should be defined in a file (if there is at least one struct definition as
well). For example:
```rust
// =================
// === AxisOrder ===
// =================
/// Defines the order in which particular axis coordinates are processed. Used
/// for example to define the rotation order in `DisplayObject`.
pub enum AxisOrder { XYZ, XZY, YXZ, YZX, ZXY, ZYX }
impl Default for AxisOrder {
fn default() -> Self { Self::XYZ }
}
// =================
// === Transform ===
// =================
/// Defines the order in which transformations (scale, rotate, translate) are
/// applied to a particular object.
pub enum TransformOrder {
ScaleRotateTranslate,
ScaleTranslateRotate,
RotateScaleTranslate,
RotateTranslateScale,
TranslateRotateScale,
TranslateScaleRotate
}
impl Default for TransformOrder {
fn default() -> Self { Self::ScaleRotateTranslate }
}
// =============================
// === HierarchicalTransform ===
// =============================
pub struct HierarchicalTransform<OnChange> {
transform: Transform,
transform_matrix: Matrix4<f32>,
origin: Matrix4<f32>,
matrix: Matrix4<f32>,
pub dirty: dirty::SharedBool<OnChange>,
pub logger: Logger,
}
impl<OnChange> HierarchicalTransform<OnChange> {
pub fn new(logger: Logger, on_change: OnChange) -> Self {
let logger_dirty = logger.sub("dirty");
let transform = default();
let transform_matrix = Matrix4::identity();
let origin = Matrix4::identity();
let matrix = Matrix4::identity();
let dirty = dirty::SharedBool::new(logger_dirty, on_change);
Self { transform, transform_matrix, origin, matrix, dirty, logger }
}
}
// === Getters ===
impl<OnChange> HierarchicalTransform<OnChange> {
pub fn position(&self) -> &Vector3<f32> {
&self.transform.position
}
pub fn rotation(&self) -> &Vector3<f32> {
&self.transform.rotation
}
...
}
// === Setters ===
impl<OnChange: Callback0> HierarchicalTransform<OnChange> {
pub fn position_mut(&mut self) -> &mut Vector3<f32> {
self.dirty.set();
&mut self.transform.position
}
pub fn rotation_mut(&mut self) -> &mut Vector3<f32> {
self.dirty.set();
&mut self.transform.rotation
}
...
}
```
## Multiline Expressions
Most (preferably all) expressions should be single line. Multiline expressions
are hard to read and introduce noise in the code. Often, it is also an indicator
of code that is not properly refactored. Try to refactor parts of multiline
expressions to well-named variables, and divide them to several single-line
expressions.
Example of poorly formatted code:
```rust
pub fn new() -> Self {
let shape_dirty = ShapeDirty::new(logger.sub("shape_dirty"),
on_dirty.clone());
let dirty_flag = MeshRegistryDirty::new(logger.sub("mesh_registry_dirty"),
on_dirty);
Self { dirty_flag, dirty_flag }
}
```
Example of properly formatted code:
```rust
pub fn new() -> Self {
let sub_logger = logger.sub("shape_dirty");
let shape_dirty = ShapeDirty::new(sub_logger, on_dirty.clone());
let sub_logger = logger.sub("mesh_registry_dirty");
let dirty_flag = MeshRegistryDirty::new(sub_logger, on_dirty);
Self { shape_dirty, dirty_flag }
}
```
## Getters and Setters
Getters do not have the `get_` prefix, while setters do. If a setter is provided
(method with the `set_` prefix), a `mut` accessor should be provided as well.
The correct way of defining getters and setters is presented below:
```rust
fn field(&self) -> &Type {
&self.field
}
fn field_mut(&mut self) -> &mut Type {
&mut self.field
}
fn set_field(&mut self, val: Type) {
*self.field_mut = val;
}
```
## Trait exporting
All names should be designed to be used in a qualified fashion. However, this
makes one situation tricky. In order to use methods defined in a trait, it has
to be in scope. Consider a trait `display::Object`. We want to use it as
function bound like `fn test<T:display::Object>(t:T) {...}`, and we also want to
use methods defined in this trait (so it has to be in scope). In such a case,
`Clippy` warns that `display::Object` is unnecessary qualification and could be
replaced simply by `Object`, which is not what we want. Thus, in order to export
traits, please always rename them using the following convention:
```rust
/// Common traits.
pub mod traits {
// Read the Rust Style Guide to learn more about the used naming.
pub use super::Object as TRAIT_Object;
pub use super::ObjectOps as TRAIT_ObjectOps;
}
```
Having such a definition, we can import traits to scope using
`use display::object::traits::*`, and we would not have any warning about
unnecessary qualification anymore.

View File

@ -1,15 +0,0 @@
---
layout: section-summary
title: Product Documentation
tags: [product]
---
# Enso IDE Product Documentation
This section contains detailed specification of Enso IDE from the user
perspective. The implementation is documented in rust code and in the crate's
`docs` directory.
- [**List of Shortcuts**](./shortcuts.md)
- [**Visualizations**](./visualizations.md)
- [**Searcher in Graph Editor**](./searcher.md)

View File

@ -1,34 +0,0 @@
---
layout: developer-doc
title: Searcher Panel In Graph Editor
category: product
tags: [product]
---
# Searcher Panel In Graph Editor
### Behaviour
The Searcher Panel can be brought to the screen in two different ways:
- when user starts to edit node's expression - the node becomes Searcher input
and panel appears below,
- when user press tab with mouse over Graph Editor Panel - the new Searcher
input appears with Searcher panel below. Additionally, if there is exactly one
node selected, the connection is displayed between the selected node and the
Searcher input.
### Suggestion of Node Expressions
The suggestion list is obtained from the Engine using `search/completion` method
of Language Server. The parameters of the call depend on the Current searcher
input, suggestions picked suggestions so far, and if we are adding a node
connected to selection. The current implementation can be found in
`ide::controller::searcher` module.
### Suggestion Database
The `search/completion` method of Language Server returns a list of keys to the
Suggestion Database instead of the whole entries. The database is retrieved by
IDE on Project opening, and keeps it up to date. The Database is implemented in
`ide::model::suggestion_database` module.

View File

@ -1,147 +0,0 @@
---
layout: developer-doc
title: Shortcuts
category: product
tags: [product, ui]
---
## General Assumptions
#### The <kbd>meta</kbd> key.
The <kbd>cmd</kbd> key was introduced to make the shortcuts consistent across
platforms. It is defined as <kbd>command</kbd> on macOS, and as <kbd>ctrl</kbd>
on Windows and Linux.
#### Keyboard-only Workflow
The GUI and all shortcuts were designed in a way to allow both efficient
mouse-only as well as keyboard-only workflows. In most cases, there is a
relation between mouse and keyboard shortcuts, namely, the `left-mouse-button`
corresponds to `enter`. For example, stepping into a node is done by either
double clicking the node, or just pressing the enter key.
#### Missing / not working shortcuts
Some of the shortcuts presented below are marked with the :warning: icon, which
means, that they are planned, but not yet implemented. Feel free to contribute
and help us implement them!
Shortcuts marked with the :bangbang: icon should work, but are reported to be
broken and require further investigation.
## Graph Editor
#### General Shortcuts
| Shortcut | Action |
| ------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| <kbd>cmd</kbd>+<kbd>alt</kbd>+<kbd>shift</kbd>+<kbd>t</kbd> | Toggle light/dark application style. Currently doesn't work properly, as the Theme Switcher is not created yet. (https://github.com/enso-org/ide/issues/795) |
| <kbd>ctrl</kbd>+<kbd>`</kbd> | Show Code Editor. Please note that the Code Editor implementation is in a very early stage and you should not use it. Even just openning it can cause errors in the IDE. Do not try using the graph editor while having the code editor tab openned. |
| <kbd>cmd</kbd>+<kbd>o</kbd> | Open project |
| <kbd>cmd</kbd>+<kbd>s</kbd> | Save module |
| <kbd>cmd</kbd>+<kbd>shift</kbd>+<kbd>r</kbd> | Restore module from last save |
| <kbd>cmd</kbd>+<kbd>z</kbd> | Undo last action |
| <kbd>cmd</kbd>+<kbd>y</kbd> or <kbd>cmd</kbd> + <kbd>shift</kbd> + <kbd>z</kbd> | Redo last undone action |
| <kbd>cmd</kbd>+<kbd>q</kbd> | Close the application (MacOS) |
| <kbd>ctrl</kbd>+<kbd>q</kbd> | Close the application (Linux) |
| <kbd>alt</kbd>+<kbd>F4</kbd> | Close the application (MacOS, Windows, Linux) |
| <kbd>ctrl</kbd>+<kbd>w</kbd> | Close the application (Windows, Linux) |
| :warning: <kbd>ctrl</kbd>+<kbd>p</kbd> | Toggle profiling mode |
| <kbd>escape</kbd> | Cancel current action. For example, drop currently dragged connection. |
| <kbd>cmd</kbd>+<kbd>alt</kbd>+<kbd>t</kbd> | Terminate the program execution |
| <kbd>cmd</kbd>+<kbd>alt</kbd>+<kbd>r</kbd> | Re-execute the program |
| <kbd>cmd</kbd>+<kbd>shift</kbd>+<kbd>k</kbd> | Switch the execution environment to Design. |
| <kbd>cmd</kbd>+<kbd>shift</kbd>+<kbd>l</kbd> | Switch the execution environment to Live. |
| <kbd>cmd</kbd>+<kbd>c</kbd> | Copy the selected nodes to the clipboard. |
| <kbd>cmd</kbd>+<kbd>v</kbd> | Paste a node from the clipboard at the mouse cursor position. |
#### Navigation
| Shortcut | Action |
| ------------------------------------------------ | ------------------------------- |
| Drag gesture (two fingers) | Pan the scene. |
| Pinch gesture (two fingers) | Zoom the scene. |
| <kbd>MMB</kbd> drag | Pan the scene. |
| <kbd>RMB</kbd> drag | Zoom the scene. |
| <kbd>LMB</kbd> double press node name | Step into the node. |
| :warning: <kbd>LMB</kbd> double press background | Step out of the current node. |
| <kbd>cmd</kbd>+<kbd>enter</kbd> | Step in the last selected node. |
| <kbd>alt</kbd>+<kbd>enter</kbd> | Step out of the current node. |
#### Node Layout
| Shortcut | Action |
| ------------------------------------------ | ----------------------------------------------------------------- |
| <kbd>LMB</kbd> drag non-selected node name | Move the node to new position (dragging do not modify selection). |
| <kbd>LMB</kbd> drag selected node name | Move all selected nodes the node to new positions. |
#### Node Selection
| Shortcut | Action |
| ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------- |
| <kbd>LMB</kbd> click node name | Deselect all nodes. Select the target node. |
| <kbd>LMB</kbd> click background | Deselect all nodes. |
| :warning: <kbd>LMB</kbd> drag background | Select nodes using selection-box. |
| <kbd>shift</kbd> + <kbd>LMB</kbd> click node name | Add / remove node to the selection group. |
| :warning: <kbd>shift</kbd> + <kbd>LMB</kbd> drag background | Add / remove nodes to the selection group. |
| :warning: <kbd>\*-arrow</kbd> | Select node on the right side of the newest selected node. |
| :warning: <kbd>cmd</kbd> + <kbd>a</kbd> | Select all nodes. |
| :warning: <kbd>escape</kbd> | Deselect all nodes (if not in a mode, like edit mode). |
| <kbd>shift</kbd> + <kbd>ctrl</kbd> + <kbd>LMB</kbd> click node name | Add node to the selection group. |
| :warning: <kbd>shift</kbd> + <kbd>ctrl</kbd> + <kbd>LMB</kbd> drag background | Add nodes to the selection group. |
| <kbd>shift</kbd> + <kbd>alt</kbd> + <kbd>LMB</kbd> click node name | Remove node to the selection group. |
| :warning: <kbd>shift</kbd> + <kbd>alt</kbd> + <kbd>LMB</kbd> drag background | Remove nodes to the selection group. |
| <kbd>shift</kbd> + <kbd>ctrl</kbd> + <kbd>alt</kbd> + <kbd>LMB</kbd> click node name | Inverse node selection. |
| :warning: <kbd>shift</kbd> + <kbd>ctrl</kbd> + <kbd>alt</kbd> + <kbd>LMB</kbd> drag background | Inverse nodes selection. |
#### Node Editing
| Shortcut | Action |
| ------------------------------------------------ | ----------------------------------------------------------- |
| <kbd>enter</kbd> | Open Component Browser to create a new node . |
| <kbd>backspace</kbd> or <kbd>delete</kbd> | Remove selected nodes. |
| <kbd>cmd</kbd>+<kbd>g</kbd> | Collapse (group) selected nodes. |
| <kbd>cmd</kbd>+<kbd>LMB</kbd> | Start editing node expression. |
| <kbd>cmd</kbd>+<kbd>enter</kbd> | Start editing node expression. |
| <kbd>enter</kbd> or <kbd>LMB</kbd> on suggestion | Pick selected suggestion and commit editing. |
| <kbd>cmd</kbd> + <kbd>enter</kbd> | Accept the current Component Browser expression input as-is |
| <kbd>tab</kbd> | Pick selected suggestion and continue editing. |
#### Visualization
| Shortcut | Action |
| ----------------------------------------- | ------------------------------------------------------------- |
| <kbd>space</kbd> | Toggle visualization visibility of the selected node. |
| <kbd>space</kbd> hold | Preview visualization of the selected node (hide on release). |
| :warning: <kbd>space</kbd> double press | Toggle visualization fullscreen mode |
| <kbd>ctrl</kbd> + <kbd>space</kbd> | Cycle visualizations of the selected node. |
| :bangbang: <kbd>cmd</kbd> + <kbd>\\</kbd> | Toggle documentation view visibility |
#### Visualizations Implementations
| Shortcut | Action |
| ----------------------------- | -------------------------------------------------- |
| <kbd>cmd</kbd> + <kbd>a</kbd> | Show all points if available in visualization. |
| <kbd>cmd</kbd> + <kbd>z</kbd> | Zoom into selection if available in visualization. |
#### Debug
| Shortcut | Action |
| ------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------ |
| <kbd>ctrl</kbd> + <kbd>alt</kbd> + <kbd>,</kbd> | Start the language server profiling. |
| <kbd>ctrl</kbd> + <kbd>alt</kbd> + <kbd>.</kbd> | Stop the language server profiling and save the collected data. |
| <kbd>ctrl</kbd> + <kbd>shift</kbd> + <kbd>x</kbd> | Force reloading file in the backend. May fix some issues with synchronization if they appear. |
| <kbd>ctrl</kbd> + <kbd>shift</kbd> + <kbd>d</kbd> | Toggle Debug Mode. All actions below are only possible when it is activated. |
| <kbd>ctrl</kbd> + <kbd>alt</kbd> + <kbd>shift</kbd> + <kbd>i</kbd> | Open the developer console. |
| <kbd>ctrl</kbd> + <kbd>alt</kbd> + <kbd>shift</kbd> + <kbd>r</kbd> | Reload the visual interface. |
| <kbd>ctrl</kbd> + <kbd>alt</kbd> + <kbd>0 - 10</kbd> | Switch between debug rendering modes (0 is the normal mode). |
| <kbd>ctrl</kbd> + <kbd>alt</kbd> + <kbd>`</kbd> | Toggle profiling monitor (performance, memory usage, etc). |
| <kbd>ctrl</kbd> + <kbd>alt</kbd> + <kbd>p</kbd> | Toggle the visibility of internal components (private API) in the component browser. |
| <kbd>ctrl</kbd> + <kbd>d</kbd> | Send test data to the selected node. |
| <kbd>ctrl</kbd> + <kbd>shift</kbd> + <kbd>enter</kbd> | Push a hardcoded breadcrumb without navigating. |
| <kbd>ctrl</kbd> + <kbd>shift</kbd> + <kbd>arrow up</kbd> | Pop a breadcrumb without navigating. |
| <kbd>cmd</kbd> + <kbd>i</kbd> | Reload visualizations. To see the effect in the currently shown visualizations, you need to switch to another and switch back. |
| <kbd>ctrl</kbd> + <kbd>shift</kbd> + <kbd>b</kbd> | Toggle read-only mode. |
| <kbd>ctrl</kbd> + <kbd>alt</kbd> + <kbd>shift</kbd> + <kbd>x</kbd> | Toggle WebGL Context loss / restoration for testing. |
| <kbd>ctrl</kbd> + <kbd>shift</kbd> + <kbd>u</kbd> | Dump the suggestion database as JSON to the console. Available only in debug mode, and only if the component browser is open. |

View File

@ -1,358 +0,0 @@
---
layout: developer-doc
title: Visualization Workflow
category: product
tags: [product]
---
# Visualization Workflow
## Purpose of visualizations
Visualizations have two main purposes:
- **Display results of nodes** Each node can be assigned with one or more
visualization. After a node computes its new value, the visualization shows it
in an understandable way to the user. Please note that a single node can be
assigned with multiple visualizations at the same time. For example, a node
might want to display a map of locations, and their list at the same time next
to each other.
- **Provide interactive way to generate new data** In a widget mode (described
in detail later), visualizations provide users with an interactive GUI to
define data. For example, a map visualization can both display locations, as
well as allowing the user to pick locations by clicking with a mouse.
Similarly, the histogram can both display a list of numbers, and can be
manually draw with the mouse producing such a list. Several numbers can be
visualized as a table of sliders, which can also be used to interactively
generate a table of numbers. Image visualizations can behave like an image
editor, etc.
## Visualization Display Forms
Visualizations can be displayed in the following ways:
- **Attached to nodes** In this mode, visualizations display the most recent
result of the node. They behave like an integrated part of the node. Whenever
you move the node, the visualization moves as well. This mode can be toggled
by tapping the spacebar.
- **Fullscreen** Visualization attached to node can grow (animate) to occupy
full IDE visual space. This mode can be triggered on the recently selected
node (in case many nodes are selected, the last selected node will be used) by
either pressing keeping the spacebar pressed for longer than approx 0.5s, or
by tapping it twice. In the former case, the visualization shrinks to each
original form whenever we release space, in the later, whenever we press space
again.
- **Detached** Visualizations attached to nodes can be detached, scaled, and
placed freely across the visual canvas (we might introduce a special place
where you can put such visualizations). This is useful when defining
dashboards or reports. We also plan to provide a notebook-like experience
where you can write text mixed with visualizations (including widgets for an
interactive experience).
- **Widgets** In this mode visualizations behave like nodes but do not display
expressions. They have one input and one output port. If the input port is
connected, the visualization displays its value and passes its to the output
port. In case it is not connected, the visualization becomes an interactive
widget allowing the user to specify data. For example, a map visualization
will allow the user to manually pick locations. After each change, the new
locations will be sent to the output port. Under the hood, widgets are
represented as nodes and their code lines are assigned with a dedicated
"visualization" metadata. Visualizations generate expressions always in the
form of `name = data`, where data is a hardcoded data produced from the
visualization. For example, when user clicks the map to define locations, the
data could be a string literal containing locations encoded in JSON.
### Choosing a Visualization Type.
When a new data is provided to a visualization, the visualization registry
searches for all visualizations that match it (see visualization registry to
learn more). For example, when a data of type `[Int]` (list of ints) is
produced, all visualizations which matches `[Int]`, like `[Int]`, `[a]`, or `a`
will be found. Each type can be associated with a default visualization. For
example, `[Int]` might define that its default visualization is a plot. If no
default visualization is defined, a JSON visualization is used. Each
visualization has a drop-down menu allowinh the user switching to another
visualization type.
### Active Visualizations
When visualizations are displayed on the stage, they are not active by default,
which means, they do not capture keyboard shortcuts. Visualization becomes
active when user clicks it. Visualizations are deactivated by clicking in the
background of the node editor. When a visualization is active, all other
elements should be slightly dimmed, or the visualization should get a selection
border (to be defined). Active visualizations capture all keyboard shortcuts,
but the space bar presses. Fullscreen visualizations are considered active by
default.
## HTML and Native Visualizations
There are two main types of visualizations - Html and Native. The later uses the
BaseGL shape API to draw on the screen. We prefer the later as it integrates
tightly with our framework and allows for much better performance. However,
there is already many visualizations in HTML/JS and we need to provide support
for them as well. HTML visualizations are required to be displayed in dedicated
div elements. This has several consequences. Firstly, the browser needs to
layout them, taking into account the current camera view, etc. It is costly.
Refreshing CSS3D styles of 100 visualizations can absolutely kill the
interactive performance. On the other hand, refreshing the position of 10k
Native visualizations is almost free. Secondly, they need to be handled by our
engine in such way that we can interact with them. For that purpose, the current
Scene implementation defines three layers - top HTML layer, middle WebGL layer,
and bottom HTML layer. The HTML visualizations are created and displayed on the
bottom layer by default. Whenever an HTML visualization gets active, it should
be moved to the top layer.
## Visualization Registry
Visualizations are user-defined. Enso ships with a set of predefined
visualizations, but they are in no way different than user-defined, they are
just defined for you. Visualizations can be defined either as HTML or native
visualization and can be defined in JS or WASM (or any language that compiles to
one of these). Visualizations are stored on disk on the server-side and are
provided to the GUI by the server. Users can upload their custom visualizations
as well. Each visualization is registered in the visualization map. The map maps
an Enso type to a set of visualizations defined for that type. The type might be
very generic, like `[a]` (which in Enso terms means list of any elements).
## Defining a Visualization
Currently only JavaScript visualizations can be defined. Support for native
visualizations is planned.
### Defining a JavaScript Visualization
JavaScript visualizations are defined by placing `*.js` files in the
`visualization` subfolder in the project's root directory. As IDE currently
allows only editing `Main.enso` file, users have to create `.js` file manually,
editing it outside IDE.
## Custom JavaScript Visualization Example
Every visualization must reside in the `visualization` folder of the user's
project. For instance:
```
└─ Project_Name
├─ src
│ └─ Main.enso
└─ visualization
└─ bubble.js
```
Visualizations can be defined as a JavaScript function which returns a class of
a shape specified below. Consider the following sample definition:
```javascript
console.log("Hi, this definition is being registered now!");
return class BubbleVisualization extends Visualization {
static inputType = "Any";
onDataReceived(data) {
const xmlns = "http://www.w3.org/2000/svg";
while (this.dom.firstChild) {
this.dom.removeChild(this.dom.lastChild);
}
const width = this.dom.getAttributeNS(null, "width");
const height = this.dom.getAttributeNS(null, "height");
const svgElem = document.createElementNS(xmlns, "svg");
svgElem.setAttributeNS(null, "id", "vis-svg");
svgElem.setAttributeNS(null, "viewBox", "0 0 " + width + " " + height);
svgElem.setAttributeNS(null, "width", "100%");
svgElem.setAttributeNS(null, "height", "100%");
this.dom.appendChild(svgElem);
data.forEach((data) => {
const bubble = document.createElementNS(xmlns, "circle");
bubble.setAttributeNS(null, "stroke", "black");
bubble.setAttributeNS(null, "fill", "red");
bubble.setAttributeNS(null, "r", data[2]);
bubble.setAttributeNS(null, "cx", data[0]);
bubble.setAttributeNS(null, "cy", data[1]);
svgElem.appendChild(bubble);
});
}
setSize(size) {
this.dom.setAttributeNS(null, "width", size[0]);
this.dom.setAttributeNS(null, "height", size[1]);
}
};
```
In particular:
- ### [Required] Source code
Visualization definition has to be a valid body of JavaScript function which
returns a class definition. Instances of that class will be considered
separate visualizations. You are allowed to use global variables / global
state across visualizations of the same type, but you are highly advised not
to do so.
- ### [Required] `Visualization` superclass
The class returned by the definition function should extend the predefined
`Visualization` class. Classes which do not extend it, will not be registered
as visualizations. The superclass defines a default constructor and a set of
utilities:
- #### Method `setPreprocessor(module,method,...arguments)`
Set an Enso method which will be evaluated on the server-side before sending
data to visualization. Note that `arguments` is a vararg. If not called, a
default unspecified method is used that will provide some JSON
representation of the value. See [Lazy visualizations](#lazy-visualizations)
section for details.
- #### Field `dom`
It is initialized in the constructor to the DOM symbol used to host the
visualization content. Users are free to modify the DOM element, including
adding other elements as its children.
- #### Field `theme`
The IDE's current color theme. Exposes the following methods.
- ##### Method `getColorForType`
Takes a qualified type name and returns the color that is used in the GUI
to represent that type.
- ##### Method `getForegroundColorForType`
Takes a qualified type name and returns the color that should be used for
foreground elements (e.g. text) that are shown on top of the background
color returned by `getColorForType`.
- ##### Method `get`
Takes a style sheet path as string and returns the corresponding value
from the theme. For example, `get("graph_editor.node.error.panic")`
returns the orange color that is used to mark nodes in an error state.
- ### [Optional] Field `label`
The static field `label` is an user-facing name used to identify the
visualization. You are not allowed to define several visualizations of the
same name in the same Enso library. In case the field is missing, the name
will be inferred from the class name by splitting the camel-case name into
chunks and converting them to lowercase string.
- ### [Optional] Field `inputType`
The static field `inputType` is used to determine which Enso data types this
visualization can be used for. Its value should be a valid Enso type, like
`"String | Int"`. In case the field is an empty string or it is missing, it
will default to `"Any"`, which is a type containing all other types. It is a
rare case when you want to define a visualization which is able to work with
just any data type, so you are highly advised to provide the type definition.
- ### [Optional] Field `inputFormat`
The static field `inputFormat` is used to determine what format the data
should be provided to the `onDataReceived` function. Currently, the only valid
option is "json", but it will be possible to set it to "binary" in the future.
In the later case, it is up to the visualization author to manage the binary
stream received from the server.
- ### [Optional] Constructor
The visualization will be instantiated by providing the constructor with a
configuration object. The shape of the configuration object is not part of the
public API and can change between releases of this library. You have to pass
it unchanged to the superclass constructor.
- ### [Optional] Function `onDataReceived`
The `onDataReceived(data)` method is called on every new data chunk received
from the server. Note that the visualization will receive the "full data" if
you are not using the `setPreprocessor` method.
- ### [Optional] Function `setSize`
The `setSize(size)` method is called on every size change of the
visualization. You should not draw outside of the provided area, however, if
you do so, it will be clipped to the provided area automatically. The `size`
parameter contains two fields `width` and `height` expressed in pixels.
## Sending Data to Visualizations
### Lazy Visualizations
Very important information is how visualization architecture works to make them
interactive and fast. Whenever new data is computed by the compiler and
visualization is attached to it, it is sent to GUI to be displayed. However,
sending huge chunks of data will kill the performance. When defining a
visualization user is capable of defining a chunk of Enso code (as a string).
This code is part of the visualization definition and is stored server-side.
Visualizations are allowed to change the code at runtime (in JavaScript
visualization you may use the `setPreprocessor` method). This code defines an
Enso function, which will be run by the compiler on data the visualization is
attached to. Only the results of this code will be sent to the GUI. In the case
of the JSON input format, the result of the call should be a valid JSON string.
The code will be evaluated in the context of the module where the preprocessor
method is defined - you may use any symbol defined or imported in that module.
For example, imagine you want to display a heatmap of 10 million points on a
map, and these points change rapidly. Sending such an amount of information via
WebSocket could be too much, and you (as the visualization author) might decide
that the visualization image should be generated on the server, and your
visualization is meant only to display the resulting image. In such a scenario,
you can define in your visualization an Enso function which will compute the the
image on the server!
### Binary and Text (JSON) Formats
Each visualization can choose whether it supports either binary or JSON input.
The input format defaults to JSON. The data from the server is always sent to
GUI in a binary channel, however, when JSON format is selected, it is first
converted to JSON representation on the server side. We can assume that all Enso
data types have defined conversion to JSON by default. If the visualization
input is defined as JSON input, the binary stream will be converted to JSON by
the GUI engine before passing to visualization. It is up to the visualization
author to handle the textual or binary form.
## Builtin Visualizations
IDE comes with a set of predefined visualizations, including charts.
### Dataframes Support
Some of the predefined visualizations have some special support for `Table` from
Enso Dataframes library.
#### Histogram
When using `Histogram` visualization on a `Table` value it will first look for a
column named `value`. If present, it will be used as a data source. Otherwise,
`Histogram` will use the first numerical column.
#### Scatter Plot
The `Scatter Plot` visualization has several properties for each point. If a
column of a matching name is present in the `Table` it will be used.
- `x` — position on horizontal axis. If not present, the index column will be
used. If there is no index set, the row indices will be used. If this column
has a missing value, the point will be omitted.
- `y` — position on vertical axis. If not present, first numerical column of
unrecognized name will be used. If not present, first numerical column will be
used. If this column has a missing value, the point will be omitted.
- `color` — color of the point. The default color is `black` and will be used if
column is not present or for its missing values. `color` should be a `Text`
column with elements being in a
[CSS colors format](https://www.w3schools.com/cssref/css_colors_legal.asp):
- Hexadecimal formats, like `#RGB`, `#RRGGBB` and `#RRGGBBAA`.
- RGB function-like syntax, e.g. `rgb(255,0,128)` or `rgba(255,0,128,0.5)`.
- HSL function-like syntax, e.g. `hsl(120, 100%, 50%)` or
`hsla(120, 100%, 50%, 0.3)`.
- name of one of
[predefined colors](https://www.w3schools.com/colors/colors_names.asp), e.g.
`red` or `SteelBlue`.
- `label` — text to be displayed next to the point.
- `shape` — shape of the point. Supported shapes are:
- `cross`;
- `diamond`;
- `square`;
- `star`;
- `triangle`.
The default shape is a circle.
- `size` — size of the point as a (possible floating point) number. Default size
of the point is `1.0`.

View File

@ -1,14 +0,0 @@
[package]
name = "enso-profiler-enso-data"
version = "0.1.0"
edition = "2021"
authors = ["Enso Team <contact@enso.org>"]
[dependencies]
chrono = { version = "0.4.19", features = ["serde"] }
csv = "1.1"
serde = { workspace = true }
enso-profiler = { path = "../../../lib/rust/profiler" }
enso-profiler-data = { path = "../../../lib/rust/profiler/data" }
ensogl-core = { path = "../../../lib/rust/ensogl/core" }
json-rpc = { path = "../../../lib/rust/json-rpc" }

View File

@ -1,30 +0,0 @@
//! Defines a metadata type for representing backend messages.
// ===============
// === Message ===
// ===============
/// Metadata type for messages between the Language Server and the Compiler.
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize, PartialEq, Eq)]
pub struct Message {
/// Identifies whether the logging process is the sender or receiver of the message.
pub direction: Direction,
/// Used to associate requests and responses.
pub request_id: Option<String>,
/// Identifies an RPC method.
pub endpoint: String,
}
// === Direction ===
/// Identifies which process is the sender of a message.
#[derive(Clone, Copy, Debug, serde::Deserialize, serde::Serialize, PartialEq, Eq)]
pub enum Direction {
/// From logging process to other process.
Request,
/// From other process to logging process.
Response,
}

View File

@ -1,238 +0,0 @@
//! A diagram type for illustrating timings of messages between processes.
//!
//! Like a UML timing diagram, this illustrates interactions between processes over time. However,
//! this diagram is simpler (the UML diagram type supports nonspecific times, process state changes,
//! and many other data complexities); as a result, it is better suited to display of large numbers
//! of events spread unevenly over a long time range than any UML renderers I[KW] am aware of.
use crate::backend::Direction;
use crate::Metadata;
/// The data necessary to create a diagram of message timings.
#[derive(Debug, Default)]
pub struct Diagram<'a> {
processes: Vec<&'a str>,
messages: Vec<Message>,
}
impl<'a> Diagram<'a> {
/// Define a new process. Return a handle.
pub fn process<'b: 'a>(&mut self, name: &'b str) -> Process {
let id = self.processes.len();
self.processes.push(name);
Process { id }
}
/// Log a message between two processes.
pub fn message(&mut self, sender: Process, recipient: Process, time: f64, label: String) {
self.messages.push(Message { sender, recipient, time, label });
}
/// Slice of processes in this diagram.
pub fn processes(&'a self) -> &[&'a str] {
&self.processes
}
/// Slice of messages in this diagram.
pub fn messages(&'a self) -> &[Message] {
&self.messages
}
}
impl<'a> Diagram<'a> {
/// Create a diagram from the given profiles.
pub fn from_profiles(profiles: &[&enso_profiler_data::Profile<Metadata>; 2]) -> Self {
let mut metadata0 = vec![];
let mut metadata1 = vec![];
collect_metadata(profiles[0], profiles[0].root_interval_id(), &mut metadata0);
collect_metadata(profiles[1], profiles[1].root_interval_id(), &mut metadata1);
let mut dia = Self::default();
let frontend = dia.process("Ide");
let ls = dia.process("LanguageServer");
let engine = dia.process("Engine");
let offset_required = "Cannot chart profile without `TimeOffset` headers.";
let mut offset0 = profiles[0].headers.time_offset.expect(offset_required).into_ms();
let mut offset1 = profiles[1].headers.time_offset.expect(offset_required).into_ms();
// Use IDE process's time origin as chart's origin.
offset1 -= offset0;
offset0 -= offset0;
for meta in metadata0.into_iter() {
let time = meta.time.into_ms() + offset0;
match meta.data {
Metadata::RpcEvent(message) => dia.message(ls, frontend, time, message),
Metadata::RpcRequest(message) =>
dia.message(frontend, ls, time, message.to_string()),
_ => {}
}
}
for meta in metadata1.into_iter() {
if let Metadata::BackendMessage(message) = meta.data {
let time = meta.time.into_ms() + offset1;
let (p0, p1) = match message.direction {
Direction::Request => (ls, engine),
Direction::Response => (engine, ls),
};
dia.message(p0, p1, time, message.endpoint);
}
}
dia
}
}
fn collect_metadata<M: Clone>(
profile: &enso_profiler_data::Profile<M>,
interval: enso_profiler_data::IntervalId,
metadata: &mut Vec<enso_profiler_data::Timestamped<M>>,
) {
let interval = &profile[interval];
metadata.extend(interval.metadata.iter().cloned());
for &child in &interval.children {
collect_metadata(profile, child, metadata);
}
}
// === Process ===
/// A process that may send and receive messages.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)]
pub struct Process {
/// Process id.
pub id: usize,
}
// === Message ===
/// An event of communication between processes.
#[derive(Clone, Debug)]
pub struct Message {
/// Process that sent this message.
pub sender: Process,
/// Process that received this message.
pub recipient: Process,
/// Timestamp of this message.
pub time: f64,
/// Label of this message.
pub label: String,
}
// ====================
// === Svg rendering ==
// ====================
/// Rendering a [`Diagram`] in SVG format.
pub mod svg {
use super::*;
// How far apart the poles should be.
// Unit is the same size as 1 millisecond, but the X-axis is not quantitative.
const POLE_SPACING: f64 = 500.0;
const GRID_INTERVAL_MS: f64 = 100.0;
// How much space to allow for headers, in millisecond-equivalent units.
const HEADER_HEIGHT: f64 = 40.0;
// Where to position the header text.
const HEADER_BASELINE: f64 = HEADER_HEIGHT - 10.0;
const FOOTER_HEIGHT: f64 = 40.0;
// Y-offset of the point of the arrow from the text field's `y` attribute.
// With the current hack of using unicode to get arrows, this must be experimentally
// determined. Its exact value isn't important because it only affects placement
// of everything relative to the grid, not differences between measurements, and the
// grid doesn't need to be lined up to the millisecond.
const Y_FUDGE: f64 = 4.0;
const GRID_COLOR_RGB: &str = "bbbbbb";
/// Write a SVG representation of the given [`Diagram`].
pub fn write_diagram(dia: &Diagram, mut f: impl std::io::Write) -> std::io::Result<()> {
writeln!(f, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>")?;
let xmlns = "xmlns=\"http://www.w3.org/2000/svg\"";
let mut max_time = 0.0;
for message in dia.messages.iter() {
if message.time > max_time {
max_time = message.time;
}
}
let height = max_time;
writeln!(f, "<svg height=\"{}\" {}>", height + HEADER_HEIGHT + FOOTER_HEIGHT, xmlns,)?;
const LEFT: &str = "end";
const MIDDLE: &str = "middle";
const RIGHT: &str = "start";
for (i, p) in dia.processes.iter().enumerate() {
let offset_to_center = POLE_SPACING / 2.0;
writeln!(
f,
"<text y=\"{}\" x=\"{}\" text-anchor=\"{}\">{}</text>",
HEADER_BASELINE,
POLE_SPACING * i as f64 + offset_to_center,
MIDDLE,
p,
)?;
}
for i in 0..((height / GRID_INTERVAL_MS) as usize + 1) {
writeln!(
f,
"<text y=\"{}\" x=\"{}\" text-anchor=\"{}\">{}ms</text>",
HEADER_HEIGHT + GRID_INTERVAL_MS * i as f64,
0,
RIGHT,
GRID_INTERVAL_MS * i as f64,
)?;
let y_pos = HEADER_HEIGHT + GRID_INTERVAL_MS * i as f64;
let x_len = POLE_SPACING * dia.processes.len() as f64;
let path = format!("M0,{y_pos} l{x_len},0");
writeln!(f, "<path fill=\"none\" stroke=\"#{GRID_COLOR_RGB}\" d=\"{path}\"/>")?;
}
for i in 1..dia.processes.len() {
let path = format!("M{},{} l0,{}", POLE_SPACING * i as f64, HEADER_HEIGHT, height);
writeln!(f, "<path fill=\"none\" stroke=\"black\" d=\"{path}\"/>")?;
}
let simple_only = "Drawing messages between non-adjacent processes is not implemented.";
let mut pairs = std::collections::HashMap::new();
struct Pair {
index: usize,
forward: bool,
}
for index in 1..dia.processes.len() {
let i0 = Process { id: index - 1 };
let i1 = Process { id: index };
pairs.insert((i0, i1), Pair { index, forward: true });
pairs.insert((i1, i0), Pair { index, forward: false });
}
for m in &dia.messages {
let pair = (m.sender, m.recipient);
let Pair { index, forward } = *pairs.get(&pair).expect(simple_only);
let x = index as f64 * POLE_SPACING;
if forward {
writeln!(
f,
"<text y=\"{}\" x=\"{}\" text-anchor=\"{}\">{}▶</text>",
HEADER_HEIGHT + m.time + Y_FUDGE,
x,
LEFT,
m.label,
)?;
} else {
writeln!(
f,
"<text y=\"{}\" x=\"{}\" text-anchor=\"{}\">◀{}</text>",
HEADER_HEIGHT + m.time + Y_FUDGE,
x,
RIGHT,
m.label,
)?;
}
}
writeln!(f, "</svg>")?;
Ok(())
}
}

View File

@ -1,85 +0,0 @@
//! Tool that translates a backend message log into the [`enso_profiler`] JSON format.
//!
//! # Usage
//!
//! The tool reads a CSV backend message log, and converts it to a
//! [JSON-formatted event log](https://github.com/enso-org/design/blob/main/epics/profiling/implementation.md#file-format)
//! for use with [`enso_profiler`] tools.
//!
//! For example:
//!
//! ```console
//! ~/git/enso/enso_data $ cargo run --bin api_events_to_profile < messages.csv > messages.json
//! ```
// === Features ===
#![feature(test)]
// === Standard Linter Configuration ===
#![deny(non_ascii_idents)]
#![warn(unsafe_code)]
#![allow(clippy::bool_to_int_with_if)]
#![allow(clippy::let_and_return)]
// === Non-Standard Linter Configuration ===
#![deny(unconditional_recursion)]
#![warn(missing_copy_implementations)]
#![warn(missing_debug_implementations)]
#![warn(missing_docs)]
#![warn(trivial_casts)]
#![warn(trivial_numeric_casts)]
#![warn(unused_import_braces)]
use enso_profiler::format;
use enso_profiler_enso_data::backend;
// ==================================
// === Backend message log format ===
// ==================================
mod api_events {
use super::*;
#[derive(Clone, Debug, serde::Deserialize)]
pub struct Message {
pub timestamp: chrono::DateTime<chrono::offset::Utc>,
pub direction: backend::Direction,
pub request_id: Option<String>,
pub endpoint: String,
}
pub fn parse(r: impl std::io::Read) -> Result<Vec<Message>, Box<dyn std::error::Error>> {
let mut log = Vec::new();
let mut reader = csv::ReaderBuilder::new().has_headers(false).from_reader(r);
for message in reader.deserialize() {
let message: Message = message?;
log.push(message);
}
Ok(log)
}
}
// ============
// === Main ===
// ============
fn main() {
use std::io::Write;
let must_be_csv = "Parse error (Is this a CSV logged by the language server?)";
let backend_messages = api_events::parse(std::io::stdin()).expect(must_be_csv);
let mut backend_profile = format::Builder::new();
backend_profile.time_offset(format::Timestamp::from_ms(0.0));
backend_profile.process("LanguageServer");
for message in backend_messages {
let api_events::Message { timestamp, direction, request_id, endpoint } = message;
let data = backend::Message { direction, request_id, endpoint };
let timestamp = timestamp.timestamp_nanos();
let timestamp = format::Timestamp::from_ms(timestamp as f64 / 1_000_000.0);
backend_profile.metadata(timestamp, "BackendMessage", data);
}
let backend_profile = backend_profile.build_string();
std::io::stdout().write_all(backend_profile.as_bytes()).expect("IO Error writing results");
}

View File

@ -1,79 +0,0 @@
//! Produce a diagram illustrating timings of messages between Enso processes.
//!
//! # Interface
//!
//! Reads from stdin a multi-process profile file containing information logged by the IDE and by
//! the language server. Writes to stdout an SVG representation of message timings.
//!
//! ```console
//! ~/git/enso/data $ cargo run --bin message_beanpoles < profile.json > out.svg
//! ```
//!
//! # Usage example
//!
//! First, run the application and collect both IDE and language-server profiling data.
//! To enable collecting language-server data, see:
//! https://github.com/enso-org/enso/pull/3392#issue-1201784793
//! To capture a profile, see:
//! https://github.com/enso-org/design/blob/main/epics/profiling/implementation.md#collecting-the-data
//! Save the IDE profile to `~/profile.json', and the Language Server message log as
//! `~/messages.csv'.
//!
//! Then:
//! ```console
//! # Convert the language server messages to the enso_profiler format.
//! profiler/enso_data $ cargo run --bin api_events_to_profile < ~/messages.csv > ~/messages.json
//! # Merge the IDE profiler and the language server messages profile.
//! profiler/enso_data $ cat ~/profile.json ~/messages.json > ~/fullprofile.json
//! # Render an SVG diagram of the message timings.
//! profiler/enso_data $ cargo run --bin message_beanpoles < ~/fullprofile.json > ~/diagram.svg
//! ```
// === Features ===
#![feature(test)]
// === Standard Linter Configuration ===
#![deny(non_ascii_idents)]
#![warn(unsafe_code)]
#![allow(clippy::bool_to_int_with_if)]
#![allow(clippy::let_and_return)]
// === Non-Standard Linter Configuration ===
#![deny(unconditional_recursion)]
#![warn(missing_copy_implementations)]
#![warn(missing_debug_implementations)]
#![warn(missing_docs)]
#![warn(trivial_casts)]
#![warn(trivial_numeric_casts)]
#![warn(unused_import_braces)]
use enso_profiler_data as data;
use enso_profiler_enso_data as enso_data;
use enso_profiler_enso_data::beanpole;
// ============
// === Main ===
// ============
fn main() {
use std::io::Read;
let mut profile = String::new();
std::io::stdin().read_to_string(&mut profile).unwrap();
let profiles: Vec<Result<data::Profile<enso_data::Metadata>, data::Error<_>>> =
data::parse_multiprocess_profile(&profile).collect();
let mut profiles_ = Vec::new();
for profile in profiles {
match profile {
Ok(profile) => profiles_.push(profile),
Err(data::Error::RecoverableFormatError { with_missing_data, .. }) =>
profiles_.push(with_missing_data),
Err(e) => panic!("{}", e),
}
}
let profiles = profiles_;
assert_eq!(profiles.len(), 2);
let dia = beanpole::Diagram::from_profiles(&[&profiles[0], &profiles[1]]);
beanpole::svg::write_diagram(&dia, std::io::stdout()).unwrap();
}

View File

@ -1,67 +0,0 @@
//! Types for interpreting profiles containing Enso application data.
// === Features ===
#![feature(test)]
// === Standard Linter Configuration ===
#![deny(non_ascii_idents)]
#![warn(unsafe_code)]
#![allow(clippy::bool_to_int_with_if)]
#![allow(clippy::let_and_return)]
// === Non-Standard Linter Configuration ===
#![deny(unconditional_recursion)]
#![warn(missing_copy_implementations)]
#![warn(missing_debug_implementations)]
#![warn(missing_docs)]
#![warn(trivial_casts)]
#![warn(trivial_numeric_casts)]
#![warn(unused_import_braces)]
use serde::Serializer;
use std::fmt::Display;
use std::fmt::Formatter;
// ==============
// === Export ===
// ==============
pub mod backend;
pub mod beanpole;
// ================
// === Metadata ===
// ================
/// Metadata that is logged within the Enso core libraries.
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub enum Metadata {
/// A message received by the IDE from the Language Server.
RpcEvent(String),
/// A message sent from the IDE to the Language Server.
RpcRequest(json_rpc::log::RpcRequest),
/// A message between the Language Server and the Engine.
BackendMessage(backend::Message),
/// Any other metadata type.
///
/// The types defined above are handled specially by `enso-profiler-enso-data` tools: E.g. the
/// RPC events and `RenderStats` are displayed in different ways by the `profiling_run_graph`
/// entry point.
///
/// Other types are logged purely so they they can be seen in the events logs, e.g. when
/// inspecting a log with the `measurements` tool.
#[serde(other)]
Other,
}
impl Display for Metadata {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
Metadata::RpcEvent(name) => f.collect_str(name),
Metadata::RpcRequest(method) => f.collect_str(&method.to_string()),
Metadata::BackendMessage(backend::Message { endpoint, .. }) => f.collect_str(endpoint),
Metadata::Other => f.collect_str("<value>"),
}
}
}

View File

@ -1,22 +0,0 @@
[package]
name = "ast"
version = "0.1.0"
authors = ["Enso Team <contact@enso.org>"]
edition = "2021"
[lib]
crate-type = ["cdylib", "rlib"]
[dependencies]
derive_more = { workspace = true }
failure = { workspace = true }
lazy_static = { workspace = true }
regex = { workspace = true }
serde = { workspace = true }
uuid = { version = "0.8.1", features = ["serde", "v4", "wasm-bindgen"] }
ast-macros = { path = "../macros" }
enso-data-structures = { path = "../../../../../lib/rust/data-structures" }
enso-text = { path = "../../../../../lib/rust/text" }
enso-prelude = { path = "../../../../../lib/rust/prelude" }
enso-shapely = { path = "../../../../../lib/rust/shapely" }
enso-profiler = { path = "../../../../../lib/rust/profiler" }

View File

@ -1,83 +0,0 @@
//! Rules for describing operator associativity.
//!
//! NOTE: They should be kept in sync with enso's implementation at:
//! `enso/Syntax/definition/src/main/scala/org/enso/syntax/text/ast/opr/Assoc.scala`
use crate::prelude::*;
use lazy_static::lazy_static;
use regex::Regex;
/// Operator associativity.
#[allow(missing_docs)]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum Assoc {
Left,
Right,
}
/// Checks if given operator identifier can be considered "applicative".
/// Examples are: `<*>`, `<*`, `<$>`.
pub fn is_applicative(operator: &str) -> bool {
// We want to cache Regex, as library authors recommend, because compiling it is expensive.
lazy_static! {
// Unwrap is safe, as the input is fixed and covered by tests.
static ref PATTERN:Regex = Regex::new("^<?[+*$]>?$").unwrap();
}
PATTERN.is_match(operator)
}
/// Character's "weight" when calculating associativity. Negative value means
/// weighing towards right-associativity, positive - towards left-associativity.
pub fn char_assoc(c: char) -> i32 {
match c {
'=' => -1,
',' => -1,
'>' => -1,
'<' => 1,
_ => 0,
}
}
impl Assoc {
fn operator_weight(operator: &str) -> i32 {
operator.chars().map(char_assoc).sum::<i32>()
}
/// Obtains associativity of given operator identifier.
pub fn of(operator: &str) -> Assoc {
if is_applicative(operator) || Self::operator_weight(operator) >= 0 {
Assoc::Left
} else {
Assoc::Right
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_assoc() {
use Assoc::Left;
use Assoc::Right;
assert_eq!(Assoc::of("+"), Left);
assert_eq!(Assoc::of("*"), Left);
assert_eq!(Assoc::of(","), Right);
assert_eq!(Assoc::of("*>"), Left);
}
#[test]
fn test_applicative() {
assert!(is_applicative("<$>"));
assert!(is_applicative("<*>"));
assert!(is_applicative("<*"));
assert!(is_applicative("*>"));
assert!(!is_applicative("="));
assert!(!is_applicative("++"));
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,107 +0,0 @@
//! A module containing structures describing id-map.
//!
//! The Id Map is a mapping between code spans and some particular id. Its a part of our language's
//! source file: the parser gives the id of particular span to the AST node representing that span.
use crate::prelude::*;
use enso_text::index::*;
use crate::Id;
use enso_text::rope::xi_rope;
use enso_text::rope::xi_rope::rope::Utf16CodeUnitsMetric;
use serde::Deserialize;
use serde::Serialize;
use uuid::Uuid;
// =============
// === IdMap ===
// =============
/// A mapping between text position and immutable ID.
#[derive(Clone, Debug, Default, Eq, PartialEq)]
pub struct IdMap {
pub vec: Vec<(enso_text::Range<Byte>, Id)>,
}
impl IdMap {
/// Create a new instance.
pub fn new(vec: Vec<(enso_text::Range<Byte>, Id)>) -> IdMap {
IdMap { vec }
}
/// Assigns Span to given ID.
pub fn insert(&mut self, span: impl Into<enso_text::Range<Byte>>, id: Id) {
self.vec.push((span.into(), id));
}
/// Generate random Uuid for span.
pub fn generate(&mut self, span: impl Into<enso_text::Range<Byte>>) -> Uuid {
let uuid = Uuid::new_v4();
self.vec.push((span.into(), uuid));
uuid
}
}
// =================
// === JsonIdMap ===
// =================
/// Strongly typed index of char.
///
/// Part of json representation of id_map: see [`JsonIdMap`].
#[allow(missing_docs)]
#[derive(Clone, Copy, Debug, Default, Hash, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
struct Index {
value: usize,
}
/// A size expressed in chars.
///
/// Part of json representation of id_map: see [`JsonIdMap`].
#[allow(missing_docs)]
#[derive(Clone, Copy, Debug, Default, Hash, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
struct Size {
value: usize,
}
/// The index and size of a span of some text.
///
/// Part of json representation of id_map: see [`JsonIdMap`].
#[allow(missing_docs)]
#[derive(Clone, Copy, Debug, Default, Hash, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
struct Span {
index: Index,
size: Size,
}
/// An another representation of id map, which is the exact mirror of the id-map json stored in
/// a source file.
#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)]
#[serde(transparent)]
pub struct JsonIdMap {
vec: Vec<(Span, Id)>,
}
impl JsonIdMap {
/// Create from the [`IdMap`] structure.
///
/// The code is needed for transforming byte offsets to codepoint offsets.
pub fn from_id_map(id_map: &IdMap, code: &enso_text::Rope) -> Self {
// let char_offsets = code.char_indices().map(|(idx, _)| idx).collect_vec();
let mut cursor = xi_rope::Cursor::new(&code.rope, 0);
let char_offsets = iter::once(0).chain(cursor.iter::<Utf16CodeUnitsMetric>()).collect_vec();
let mapped_vec = id_map.vec.iter().map(|(range, id)| {
let byte_start = range.start.value;
let byte_end = range.end.value;
let start = char_offsets.binary_search(&byte_start).unwrap_both();
let end = char_offsets.binary_search(&byte_end).unwrap_both();
let size = end - start;
let span = Span { index: Index { value: start }, size: Size { value: size } };
(span, *id)
});
Self { vec: mapped_vec.collect() }
}
}

View File

@ -1,31 +0,0 @@
//! Functions related to identifiers family of nodes.
use crate::prelude::*;
use crate::Shape;
/// Checks if given Ast node can be used to represent identifier being part of definition name.
pub fn is_identifier(ast: &Ast) -> bool {
name(ast).is_some()
}
/// Retrieves the identifier's name, if the Ast node is an identifier. Otherwise, returns None.
pub fn name(ast: &Ast) -> Option<&str> {
match ast.shape() {
Shape::Var(val) => Some(&val.name),
Shape::Cons(val) => Some(&val.name),
Shape::SectionSides(val) => name(&val.opr),
Shape::Opr(val) => Some(&val.name),
_ => None,
}
}
/// If the node is a variable (lower-cased) identifier, return the name.
pub fn as_var(ast: &Ast) -> Option<&str> {
match ast.shape() {
Shape::Var(val) => Some(&val.name),
_ => None,
}
}

View File

@ -1,19 +0,0 @@
//! Implementation details used by this crate. Not really dangerous but not intended for user
//! to need.
/// Iterate recursively over tree-like structure implementing `IntoIterator`.
pub fn iterate_subtree<T>(ast: T) -> impl Iterator<Item = T::Item>
where T: IntoIterator<Item = T> + Copy {
let generator = move || {
let mut nodes: Vec<T> = vec![ast];
while !nodes.is_empty() {
let ast = nodes.pop().unwrap();
nodes.extend(ast.into_iter());
yield ast;
}
};
enso_shapely::GeneratingIterator(generator)
}

View File

@ -1,222 +0,0 @@
//! This module provides KnownAst<T> wrapper over Ast that allows expressing that we already
//! know what `Shape` variant is being stored within this `Ast` node.
use crate::prelude::*;
use crate::with_shape_variants;
use crate::Ast;
use crate::HasTokens;
use crate::Shape;
use crate::TokenConsumer;
// =================
// === Known AST ===
// =================
/// Wrapper for an AST node of known shape type that we can access.
/// Use `TryFrom<&Ast>` to obtain values.
///
/// Provides `Deref` implementation that allows accessing underlying shape `T` value.
#[derive(CloneRef, Derivative)]
#[derivative(Clone(bound = ""))]
#[derive(Debug, PartialEq, Eq)]
pub struct KnownAst<T> {
ast: Ast,
phantom: ZST<T>,
}
impl<T> KnownAst<T> {
/// Creates a new `KnownAst<T>` from ast node containing shape of variant `T`.
///
/// Note that this API requires caller to ensure that Ast stores proper shape. Violating this
/// rule will lead to panics later.
fn new_unchecked(ast: Ast) -> KnownAst<T> {
KnownAst { ast, phantom: default() }
}
/// Gets AST ID.
pub fn id(&self) -> Option<crate::Id> {
self.ast.id
}
/// Returns a reference to the stored `Ast` with `Shape` of `T`.
pub fn ast(&self) -> &Ast {
&self.ast
}
}
impl<T, E> KnownAst<T>
where for<'t> &'t Shape<Ast>: TryInto<&'t T, Error = E>
{
/// Checks if the shape of given Ast node is compatible with `T`.
/// If yes, returns Ok with Ast node wrapped as KnownAst.
/// Otherwise, returns an error.
pub fn try_new(ast: Ast) -> Result<KnownAst<T>, E> {
if let Some(error_matching) = ast.shape().try_into().err() {
Err(error_matching)
} else {
Ok(KnownAst { ast, phantom: default() })
}
}
/// Returns the AST's shape.
pub fn shape(&self) -> &T
where E: Debug {
self.deref()
}
/// Updated self in place by applying given function on the stored Shape.
pub fn update_shape<R>(&mut self, f: impl FnOnce(&mut T) -> R) -> R
where
T: Clone + Into<Shape<Ast>>,
E: Debug, {
let mut shape = self.shape().clone();
let ret = f(&mut shape);
self.ast = self.ast.with_shape(shape);
ret
}
/// Create new instance of KnownAst with mapped shape.
pub fn with_shape<S, E1>(&self, f: impl FnOnce(T) -> S) -> KnownAst<S>
where
for<'t> &'t Shape<Ast>: TryInto<&'t S, Error = E1>,
T: Clone + Into<Shape<Ast>>,
S: Clone + Into<Shape<Ast>>,
E: Debug,
E1: Debug, {
let shape = self.shape().clone();
let new_shape = f(shape);
KnownAst::new_unchecked(self.ast.with_shape(new_shape))
}
}
impl<T: Into<Shape<Ast>>> KnownAst<T> {
/// Creates a new `KnownAst<T>` from `shape` with random ID if id=None.
pub fn new(shape: T, id: Option<crate::Id>) -> KnownAst<T> {
let ast = Ast::new(shape, id);
Self::new_unchecked(ast)
}
/// Creates a new `KnownAst<T>` from `shape` with no ID.
/// Should be only used on nodes that can't have ID because of scala AST design.
/// Example: Module, Section.opr, MacroMatchSegment.head
/// Tracking issue: https://github.com/enso-org/ide/issues/434
pub fn new_no_id(shape: T) -> KnownAst<T> {
let ast = Ast::new_no_id(shape);
Self::new_unchecked(ast)
}
}
impl<T, E> Deref for KnownAst<T>
where
for<'t> &'t Shape<Ast>: TryInto<&'t T, Error = E>,
E: Debug,
{
type Target = T;
fn deref(&self) -> &Self::Target {
let result = self.ast.shape().try_into();
// Below must never happen, as the only function for constructing values does check
// if the shape type matches the `T`.
result.expect("Internal Error: wrong shape in KnownAst.")
}
}
impl<T> AsRef<Ast> for KnownAst<T> {
fn as_ref(&self) -> &Ast {
&self.ast
}
}
impl<T, E> TryFrom<&Ast> for KnownAst<T>
where for<'t> &'t Shape<Ast>: TryInto<&'t T, Error = E>
{
type Error = E;
fn try_from(ast: &Ast) -> Result<KnownAst<T>, Self::Error> {
KnownAst::try_new(ast.clone())
}
}
impl<T, E> TryFrom<Ast> for KnownAst<T>
where for<'t> &'t Shape<Ast>: TryInto<&'t T, Error = E>
{
type Error = E;
fn try_from(ast: Ast) -> Result<KnownAst<T>, Self::Error> {
KnownAst::try_new(ast)
}
}
/// One can always throw away the knowledge.
impl<T> From<KnownAst<T>> for Ast {
fn from(known_ast: KnownAst<T>) -> Ast {
known_ast.ast
}
}
impl<'a, T> From<&'a KnownAst<T>> for &'a Ast {
fn from(known_ast: &'a KnownAst<T>) -> &'a Ast {
&known_ast.ast
}
}
impl<T> HasTokens for KnownAst<T> {
fn feed_to(&self, consumer: &mut impl TokenConsumer) {
self.ast.feed_to(consumer)
}
}
impl<T> Display for KnownAst<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&self.ast, f)
}
}
// ===============
// === Aliases ===
// ===============
/// For input like `[Unrecognized] [Prefix Ast]` generates aliases like:
/// ```text
/// pub type Unrecognized = KnownAst<crate::Unrecognized>;
/// pub type Prefix = KnownAst<crate::Prefix<Ast>>;
/// // etc ...
/// ```
macro_rules! generate_alias {
( $([$name:ident $($tp:ty)? ])* ) => {$(
#[allow(missing_docs)]
pub type $name = KnownAst<crate::$name $(<$tp>)? >;
)*};
}
// Generates aliases for each Shape variant.
with_shape_variants!(generate_alias);
// =============
// === Tests ===
// =============
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_known_ast() {
let ast_var = crate::Ast::var("foo");
// This is truly var, so we can unwrap and directly access it's fields.
let known_var = Var::try_from(&ast_var).unwrap();
assert_eq!(known_var.name, "foo");
let known_var: Var = ast_var.clone().try_into().unwrap();
assert_eq!(known_var.name, "foo");
// This is not an Infix, so we won't get KnownAst object.
let known_infix_opt = Infix::try_from(&ast_var);
assert!(known_infix_opt.is_err());
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,195 +0,0 @@
//! Utilities for dealing with macro-related parts of AST and language, including `Match` shape and
//! such constructs as lambda expressions.
use crate::prelude::*;
use crate::crumbs::Located;
use crate::known;
use crate::BlockLine;
// ==============
// === Export ===
// ==============
pub mod skip_and_freeze;
// ==================================
// === Recognized Macros Keywords ===
// ==================================
/// The keyword introducing an qualified import declaration. See:
/// https://enso.org/docs/developer/enso/syntax/imports.html#import-syntax
pub const QUALIFIED_IMPORT_KEYWORD: &str = "import";
/// The keyword introducing an unqualified import declaration.
pub const UNQUALIFIED_IMPORT_KEYWORD: &str = "from";
// ========================
// === Disable Comments ===
// ========================
/// Check if this AST is a disabling comment.
pub fn is_disable_comment(ast: &Ast) -> bool {
if let crate::Shape::Tree(tree) = ast.shape()
&& tree.type_info == crate::TreeType::ExpressionWithComment
&& !tree.span_info.iter().any(|e| matches!(e, crate::SpanSeed::Child(_))) {
true
} else {
false
}
}
// ==============================
// === Documentation Comments ===
// ==============================
// === Ast Description ===
/// Describes the AST of a documentation comment.
#[derive(Clone, Debug)]
pub struct DocumentationCommentAst {
ast: known::Tree,
rendered: ImString,
}
impl DocumentationCommentAst {
/// Interpret given Ast as a documentation comment. Return `None` if it is not recognized.
pub fn new(ast: &Ast) -> Option<Self> {
let ast = crate::known::Tree::try_from(ast).ok()?;
if let crate::TreeType::Documentation { rendered } = &ast.type_info {
let rendered = rendered.clone();
Some(DocumentationCommentAst { ast, rendered })
} else {
None
}
}
}
// === Line Description ===
/// Describes the line with a documentation comment.
#[derive(Clone, Debug)]
pub struct DocumentationCommentLine {
/// Stores the documentation AST and the trailing whitespace length.
line: BlockLine<known::Tree>,
rendered: ImString,
}
impl DocumentationCommentLine {
/// Try constructing from a line. Return `None` if this line has no documentation comment.
pub fn new(line: &BlockLine<&Ast>) -> Option<Self> {
let doc_ast_opt = DocumentationCommentAst::new(line.elem);
doc_ast_opt.map(|doc_ast| Self::from_doc_ast(doc_ast, line.off))
}
/// Treat given documentation AST as the line with a given trailing whitespace.
pub fn from_doc_ast(ast_doc: DocumentationCommentAst, off: usize) -> Self {
Self { line: BlockLine { elem: ast_doc.ast, off }, rendered: ast_doc.rendered }
}
/// Get the line with this comment.
fn line(&self) -> &BlockLine<known::Tree> {
&self.line
}
/// Convenience function that throws away some information to return the line description that
/// is used in AST blocks.
fn block_line(&self) -> BlockLine<Option<Ast>> {
self.line.as_ref().map(|known_ast| Some(known_ast.ast().clone_ref()))
}
}
// === Full Description ===
/// Structure holding the documentation comment AST and related information necessary to deal with
/// them.
#[derive(Clone, Debug)]
pub struct DocumentationCommentInfo {
/// Description of the line with the documentation comment.
pub line: DocumentationCommentLine,
/// The absolute indent of the block that contains the line with documentation comment.
pub block_indent: usize,
}
impl DocumentationCommentInfo {
/// Try to obtain information about a documentation comment line from block with a given indent.
pub fn new(line: &BlockLine<&Ast>, block_indent: usize) -> Option<Self> {
Some(Self { line: DocumentationCommentLine::new(line)?, block_indent })
}
/// Get the line with this comment.
pub fn line(&self) -> &BlockLine<known::Tree> {
self.line.line()
}
/// Get the documentation comment's AST.
pub fn ast(&self) -> known::Tree {
self.line.line.elem.clone_ref()
}
/// Convenience function that throws away some information to return the line description that
/// is used in AST blocks.
pub fn block_line(&self) -> BlockLine<Option<Ast>> {
self.line.block_line()
}
/// Get the documentation text.
///
/// The text is pretty printed as per UI perspective--leading whitespace is stripped from all
/// lines up to the column following comment introducer (`##`).
pub fn pretty_text(&self) -> ImString {
self.line.rendered.clone()
}
/// Generates the source code text of the comment line from a pretty text.
pub fn text_to_repr(context_indent: usize, text: &str) -> String {
let indent = " ".repeat(context_indent);
let mut lines = text.lines();
// First line must always exist, even for an empty comment.
let first_line = format!("##{}", lines.next().unwrap_or_default());
let other_lines = lines.map(|line| format!("{indent} {line}"));
let mut out_lines = std::iter::once(first_line).chain(other_lines);
out_lines.join("\n")
}
}
/// Check if given Ast stores a documentation comment.
pub fn is_documentation_comment(ast: &Ast) -> bool {
DocumentationCommentAst::new(ast).is_some()
}
// ===============
// === Lambdas ===
// ===============
/// Describes the lambda-expression's pieces: the argument and the body.
#[allow(missing_docs)]
#[derive(Clone, Debug)]
pub struct LambdaInfo<'a> {
pub arg: Located<&'a Ast>,
pub body: Located<&'a Ast>,
}
/// Describes the given Ast as lambda, if this is a matched `->` builtin macro.
pub fn as_lambda(ast: &Ast) -> Option<LambdaInfo> {
if let crate::Shape::Tree(crate::Tree { type_info: crate::TreeType::Lambda, .. }) = ast.shape()
{
let mut iter = ast.iter_subcrumbs().map(|crumb| ast.get_located(crumb).unwrap());
let arg = iter.next().unwrap();
let body = iter.next().unwrap();
Some(LambdaInfo { arg, body })
} else {
None
}
}

View File

@ -1,302 +0,0 @@
//! Helper functions and data structures for [`SKIP`] and [`FREEZE`] macros support.
//!
//! [`SKIP`] and [`FREEZE`] macros change the behavior of the expression they are prepending.
//!
//! When used together in a single expression, [`SKIP`] macro always takes the priority and is
//! placed at the beginning. For example, `SKIP FREEZE foo`.
use enso_prelude::*;
use crate::known;
use crate::Ast;
use crate::HasRepr;
// =================
// === Constants ===
// =================
/// The text representation of the [`SKIP`] macro's name.
pub const SKIP_MACRO_IDENTIFIER: &str = "SKIP";
/// The text representation of the [`FREEZE`] macro's name.
pub const FREEZE_MACRO_IDENTIFIER: &str = "FREEZE";
// ==================
// === MacrosInfo ===
// ==================
/// The information about macros attached to the expression.
#[derive(Debug, Clone, Copy, Default, PartialEq)]
#[allow(missing_docs)]
pub struct MacrosInfo {
pub skip: bool,
pub freeze: bool,
}
impl MacrosInfo {
/// Check if provided AST contains macros and fill in the info.
pub fn from_ast(ast: &Ast) -> Self {
let is_skip = is_macro_call(ast, SKIP_MACRO_IDENTIFIER);
let is_freeze = if is_skip {
if let Some(body) = maybe_prefix_macro_body(ast) {
is_macro_call(&body, FREEZE_MACRO_IDENTIFIER)
} else {
false
}
} else {
is_macro_call(ast, FREEZE_MACRO_IDENTIFIER)
};
Self { skip: is_skip, freeze: is_freeze }
}
/// `true` if either macros is used.
pub fn has_any_macros(&self) -> bool {
self.skip || self.freeze
}
/// The count of used macros.
pub fn macros_count(&self) -> usize {
self.skip as usize + self.freeze as usize
}
}
// ========================
// === Helper functions ===
// ========================
/// Remove all macros from the AST.
pub fn without_macros(ast: &Ast) -> Ast {
let macros_info = MacrosInfo::from_ast(ast);
if macros_info.skip {
let skip_body = prefix_macro_body(ast);
if macros_info.freeze {
prefix_macro_body(&skip_body)
} else {
skip_body
}
} else if macros_info.freeze {
prefix_macro_body(ast)
} else {
ast.clone()
}
}
/// Execute [`f`], preserving the usage of the [`SKIP`] macro. [`f`] receives AST without [`SKIP`],
/// and the macro would be preserved in the final result if it existed. Preserves the id of the AST.
pub fn preserving_skip(ast: &mut Ast, f: impl FnOnce(&mut Ast)) -> Ast {
preserving_macro(ast, f, SKIP_MACRO_IDENTIFIER, |info| info.skip)
}
/// Execute [`f`], preserving the usage of the [`SKIP`] macro. [`f`] receives AST without [`SKIP`],
/// and the macro would be preserved in the final result if it existed. Preserves the id of the AST.
pub fn preserving_freeze(ast: &mut Ast, f: impl FnOnce(&mut Ast)) -> Ast {
preserving_macro(ast, f, FREEZE_MACRO_IDENTIFIER, |info| info.freeze)
}
/// A combination oof [`preserving_skip`] and [`preserving_freeze`]. Preserves both macros.
pub fn preserving_skip_and_freeze(ast: &mut Ast, f: impl FnOnce(&mut Ast)) -> Ast {
let skip = SKIP_MACRO_IDENTIFIER;
let freeze = FREEZE_MACRO_IDENTIFIER;
let is_skipped = |info: &MacrosInfo| info.skip;
let is_frozen = |info: &MacrosInfo| info.freeze;
let preserve_freeze = move |ast: &mut Ast| *ast = preserving_macro(ast, f, freeze, is_frozen);
preserving_macro(ast, preserve_freeze, skip, is_skipped)
}
/// Helper function for preserving macros in AST.
///
/// [`f`] receives AST without [`SKIP`], and the macro would be preserved in the final result if it
/// existed. Preserves the id of the AST.
fn preserving_macro(
ast: &mut Ast,
f: impl FnOnce(&mut Ast),
macro_name: &str,
does_contain_macro: impl Fn(&MacrosInfo) -> bool,
) -> Ast {
let macros_info = MacrosInfo::from_ast(ast);
let original_ast_id = ast.id.expect("Node AST must bear an ID");
let is_macro_present = does_contain_macro(&macros_info);
let mut ast = if is_macro_present { prefix_macro_body(ast) } else { ast.clone() };
f(&mut ast);
if is_macro_present {
prepend_with_macro(&mut ast, macro_name);
ast = ast.with_id(original_ast_id);
}
ast
}
/// Check if AST contains a prefix-like macro call with a given name.
pub fn is_macro_call(ast: &Ast, identifier: &str) -> bool {
if let Ok(prefix) = known::Prefix::try_from(ast) {
let name = crate::identifier::name(&prefix.func);
name == Some(identifier)
} else {
// TODO: Check for a [`Tree`] macro (https://github.com/enso-org/enso/issues/5572).
false
}
}
/// Returns a body (argument) of the prefix-like macro. See [`is_macro_call`] docs.
pub fn maybe_prefix_macro_body(ast: &Ast) -> Option<Ast> {
if let Ok(prefix) = known::Prefix::try_from(ast) {
Some(prefix.arg.clone())
} else {
// TODO: Check for a [`Tree`] macro (https://github.com/enso-org/enso/issues/5572).
None
}
}
/// Same as [`maybe_prefix_macro_body`], but logs the error and returns the argument in case of
/// failure.
pub fn prefix_macro_body(ast: &Ast) -> Ast {
if let Some(ast) = maybe_prefix_macro_body(ast) {
ast
} else {
error!("Failed to extract prefix macro body from expression {}.", ast.repr());
ast.clone()
}
}
/// Construct a prefix-like macro call with [`ast`] as a body (argument).
pub fn prepend_with_macro(ast: &mut Ast, macro_identifier: &str) {
let func: Ast = crate::Cons { name: String::from(macro_identifier) }.into();
*ast = crate::Prefix { func, off: 1, arg: ast.clone() }.into();
}
#[cfg(test)]
mod tests {
use super::*;
fn foo() -> Ast {
Ast::var("foo")
}
fn bar() -> Ast {
Ast::var("bar")
}
fn skip_foo() -> Ast {
Ast::prefix(Ast::cons(SKIP_MACRO_IDENTIFIER), foo())
}
fn freeze_foo() -> Ast {
Ast::prefix(Ast::cons(FREEZE_MACRO_IDENTIFIER), foo())
}
fn skip_freeze_foo() -> Ast {
Ast::prefix(Ast::cons(SKIP_MACRO_IDENTIFIER), freeze_foo())
}
#[test]
fn test_macros_info_from_ast() {
// === foo ===
let ast = foo();
let macros_info = MacrosInfo::from_ast(&ast);
assert!(!macros_info.has_any_macros());
assert!(!macros_info.skip);
assert!(!macros_info.freeze);
// === SKIP foo ===
let ast = skip_foo();
assert!(is_macro_call(&ast, SKIP_MACRO_IDENTIFIER));
let macros_info = MacrosInfo::from_ast(&ast);
assert!(macros_info.has_any_macros());
assert!(macros_info.skip);
assert!(!macros_info.freeze);
// === FREEZE foo ===
let ast = freeze_foo();
let macros_info = MacrosInfo::from_ast(&ast);
assert!(macros_info.has_any_macros());
assert!(!macros_info.skip);
assert!(macros_info.freeze);
// === SKIP FREEZE foo ===
let ast = skip_freeze_foo();
let macros_info = MacrosInfo::from_ast(&ast);
assert!(macros_info.has_any_macros());
assert!(macros_info.skip);
assert!(macros_info.freeze);
}
#[test]
fn test_maybe_prefix_macro_body() {
assert!(maybe_prefix_macro_body(&foo()).is_none());
assert!(maybe_prefix_macro_body(&skip_foo()).is_some());
assert!(maybe_prefix_macro_body(&freeze_foo()).is_some());
assert!(maybe_prefix_macro_body(&skip_freeze_foo()).is_some());
}
#[test]
fn test_prepend_with_macro() {
let mut ast = foo();
prepend_with_macro(&mut ast, SKIP_MACRO_IDENTIFIER);
assert_eq!(ast.repr(), skip_foo().repr());
}
#[test]
fn test_without_macros() {
let ast = skip_foo();
assert_eq!(without_macros(&ast).repr(), foo().repr());
let ast = freeze_foo();
assert_eq!(without_macros(&ast).repr(), foo().repr());
let ast = skip_freeze_foo();
assert_eq!(without_macros(&ast).repr(), foo().repr());
let ast = foo();
assert_eq!(without_macros(&ast).repr(), foo().repr());
}
#[test]
fn test_preserving_skip() {
let mut ast = skip_foo();
let original_id = ast.id;
let skip_bar = preserving_skip(&mut ast, |ast| *ast = bar());
assert_eq!(skip_bar.id, original_id);
assert_eq!(skip_bar.repr(), "SKIP bar");
let mut ast = skip_freeze_foo();
assert_eq!(preserving_skip(&mut ast, |ast| *ast = foo()).repr(), "SKIP foo");
let mut ast = foo();
assert_eq!(preserving_skip(&mut ast, |ast| *ast = bar()).repr(), "bar");
}
#[test]
fn test_preserving_freeze() {
let mut ast = freeze_foo();
let original_id = ast.id;
let skip_bar = preserving_freeze(&mut ast, |ast| *ast = bar());
assert_eq!(skip_bar.id, original_id);
assert_eq!(skip_bar.repr(), "FREEZE bar");
let mut ast = skip_freeze_foo();
assert_eq!(preserving_freeze(&mut ast, |ast| *ast = foo()).repr(), "FREEZE foo");
let mut ast = foo();
assert_eq!(preserving_freeze(&mut ast, |ast| *ast = bar()).repr(), "bar");
}
#[test]
fn test_preserving_skip_and_freeze() {
let mut ast = skip_freeze_foo();
let original_id = ast.id;
let with_bar = preserving_skip_and_freeze(&mut ast, |ast| *ast = bar());
assert_eq!(with_bar.id, original_id);
assert_eq!(with_bar.repr(), "SKIP FREEZE bar");
let mut ast = skip_foo();
assert_eq!(preserving_skip_and_freeze(&mut ast, |ast| *ast = bar()).repr(), "SKIP bar");
let mut ast = freeze_foo();
assert_eq!(preserving_skip_and_freeze(&mut ast, |ast| *ast = bar()).repr(), "FREEZE bar");
let mut ast = foo();
assert_eq!(preserving_skip_and_freeze(&mut ast, |ast| *ast = bar()).repr(), "bar");
}
}

View File

@ -1,724 +0,0 @@
//! Utilities for dealing with operators and Ast nodes related to them, like `Infix`, `Section*`.
use crate::prelude::*;
use crate::assoc::Assoc;
use crate::crumbs::Crumb;
use crate::crumbs::InfixCrumb;
use crate::crumbs::Located;
use crate::crumbs::SectionLeftCrumb;
use crate::crumbs::SectionRightCrumb;
use crate::crumbs::SectionSidesCrumb;
use crate::known;
use crate::Ast;
use crate::Id;
use crate::Infix;
use crate::Opr;
use crate::SectionLeft;
use crate::SectionRight;
use crate::SectionSides;
use crate::Shape;
use crate::Var;
// =================
// === Constants ===
// =================
/// Symbols that can appear in operator name, as per
/// https://enso.org/docs/developer/enso/syntax/naming.html#operator-naming
pub const SYMBOLS: [char; 25] = [
'!', '$', '%', '&', '*', '+', '-', '/', '<', '>', '?', '^', '~', '|', ':', '\\', ',', '.', '(',
')', '[', ']', '{', '}', '=',
];
/// Identifiers of operators with special meaning for IDE.
pub mod predefined {
/// Used to create type paths (like `Int.+` or `IO.println`).
pub const ACCESS: &str = ".";
/// Used to create bindings, e.g. `add a b = a + b` or `foo = 5`.
pub const ASSIGNMENT: &str = "=";
/// Used to create lambda expressions, e.g. `a -> b -> a + b`.
pub const ARROW: &str = "->";
/// Used to create right-associative operators, e.g. `a <| b <| c`.
pub const RIGHT_ASSOC: &str = "<|";
}
// ====================
// === AST handling ===
// ====================
/// Checks if the given AST has Opr shape with the name matching given string.
pub fn is_opr_named(ast: &Ast, name: impl Str) -> bool {
let name_ref = name.as_ref();
matches!(ast.shape(), Shape::Opr(Opr { name, .. }) if name == name_ref)
}
/// Checks if given Ast is an assignment operator identifier.
pub fn is_assignment_opr(ast: &Ast) -> bool {
is_opr_named(ast, predefined::ASSIGNMENT)
}
/// Checks if given Ast is an arrow operator identifier.
pub fn is_arrow_opr(ast: &Ast) -> bool {
is_opr_named(ast, predefined::ARROW)
}
/// Checks if given Ast is an access operator identifier.
pub fn is_access_opr(ast: &Ast) -> bool {
is_opr_named(ast, predefined::ACCESS)
}
/// Checks if given Ast is a right-associative operator identifier.
pub fn is_right_assoc_opr(ast: &Ast) -> bool {
is_opr_named(ast, predefined::RIGHT_ASSOC)
}
/// Interpret Ast as accessor chain, like `Int.method`.
///
/// Returns `None` if the parameter is not an access.
pub fn as_access_chain(ast: &Ast) -> Option<Chain> {
Chain::try_new_of(ast, predefined::ACCESS)
}
/// If given Ast is a specific infix operator application, returns it.
pub fn to_specific_infix(ast: &Ast, name: &str) -> Option<known::Infix> {
let infix = known::Infix::try_from(ast).ok()?;
is_opr_named(&infix.opr, name).then_some(infix)
}
/// If given Ast is an assignment infix expression, returns it as Some known::Infix.
pub fn to_assignment(ast: &Ast) -> Option<known::Infix> {
to_specific_infix(ast, predefined::ASSIGNMENT)
}
/// If given Ast is an arrow infix expression, returns it as Some known::Infix.
pub fn to_arrow(ast: &Ast) -> Option<known::Infix> {
to_specific_infix(ast, predefined::ARROW)
}
/// If given Ast is an access infix expression, returns it as Some known::Infix.
pub fn to_access(ast: &Ast) -> Option<known::Infix> {
to_specific_infix(ast, predefined::ACCESS)
}
/// Checks if a given node is an access infix expression.
pub fn is_access(ast: &Ast) -> bool {
matches!(ast.shape(), Shape::Infix(Infix { opr, .. }) if is_access_opr(opr))
}
/// Checks if a given node is an assignment infix expression.
pub fn is_assignment(ast: &Ast) -> bool {
matches!(ast.shape(), Shape::Infix(Infix { opr, .. }) if is_assignment_opr(opr))
}
/// Obtains a new `Opr` with an assignment.
pub fn assignment() -> known::Opr {
// TODO? We could cache and reuse, if we care.
let name = predefined::ASSIGNMENT.into();
let opr = Opr { name, right_assoc: false };
known::Opr::new(opr, None)
}
/// Create a new [`ACCESS`] operator.
pub fn access() -> known::Opr {
let name = predefined::ACCESS.into();
let opr = Opr { name, right_assoc: false };
known::Opr::new(opr, None)
}
/// Create a new [`RIGHT_ASSOC`] operator.
pub fn right_assoc() -> known::Opr {
let name = predefined::RIGHT_ASSOC.into();
let opr = Opr { name, right_assoc: true };
known::Opr::new(opr, None)
}
/// Split qualified name into segments, like `"Int.add"` into `["Int","add"]`.
pub fn name_segments(name: &str) -> impl Iterator<Item = &str> {
name.split(predefined::ACCESS)
}
/// Create a chain of access operators representing a fully qualified name, like `"Int.add"`.
pub fn qualified_name_chain(
mut segments: impl Iterator<Item = impl Into<String>>,
) -> Option<Chain> {
let ast_from_identifier = |ident: &str| -> Ast {
let starts_with_uppercase = |s: &str| s.chars().next().map_or(false, |c| c.is_uppercase());
if starts_with_uppercase(ident) {
known::Cons::new(crate::Cons { name: ident.into() }, None).into()
} else {
known::Var::new(crate::Var { name: ident.into() }, None).into()
}
};
let arg_with_offset = |s: &str| ArgWithOffset { arg: ast_from_identifier(s), offset: 0 };
let target = segments.next()?;
let target = Some(arg_with_offset(target.into().as_str()));
let args = segments
.map(|segment| ChainElement {
operator: access(),
operand: Some(arg_with_offset(segment.into().as_str())),
offset: 0,
infix_id: None,
})
.collect_vec();
let operator = access();
Some(Chain { target, args, operator })
}
// =======================
// === Named arguments ===
// =======================
/// Matched AST fragments for named argument, flattened into easy to access structure.
#[allow(missing_docs)]
#[derive(Debug)]
pub struct NamedArgumentDef<'a> {
pub id: Option<Id>,
pub name: &'a str,
pub larg: &'a Ast,
pub loff: usize,
pub opr: &'a Ast,
pub roff: usize,
pub rarg: &'a Ast,
}
/// Match AST against named argument pattern. Pack AST fragments into flat `NamedArgumentDef`
/// structure. Does not clone or allocate.
///
/// ```text
/// name=expression - Infix
/// name |- Var
/// = |- Opr ASSIGN
/// expression `- any Ast
/// ```
pub fn match_named_argument(ast: &Ast) -> Option<NamedArgumentDef<'_>> {
let id = ast.id;
match ast.shape() {
Shape::Infix(Infix { larg, loff, opr, roff, rarg }) if is_assignment_opr(opr) =>
match larg.shape() {
Shape::Var(Var { name }) =>
Some(NamedArgumentDef { id, name, larg, loff: *loff, opr, roff: *roff, rarg }),
_ => None,
},
_ => None,
}
}
// ===========================
// === Chain-related types ===
// ===========================
/// A structure which keeps argument's AST with information about offset between it and an operator.
/// We cannot use `Shifted` because `Shifted` assumes that offset is always before ast it contains,
/// what is not a case here.
#[allow(missing_docs)]
#[derive(Clone, Debug)]
pub struct ArgWithOffset<T> {
pub arg: T,
pub offset: usize,
}
/// Infix operator operand. Optional, as we deal with Section* nodes as well.
pub type Operand = Option<ArgWithOffset<Ast>>;
/// Infix operator standing between (optional) operands.
pub type Operator = known::Opr;
/// Creates `Operand` from `ast` with offset between it and operator.
pub fn make_operand(arg: Ast, offset: usize) -> Operand {
Some(ArgWithOffset { arg, offset })
}
/// Creates `Operator` from `ast`.
pub fn make_operator(opr: &Ast) -> Option<Operator> {
known::Opr::try_from(opr).ok()
}
/// Describes associativity of the given operator AST.
pub fn assoc(ast: &known::Opr) -> Assoc {
match ast.right_assoc {
true => Assoc::Right,
false => Assoc::Left,
}
}
// ========================
// === GeneralizedInfix ===
// ========================
/// An abstraction over `Infix` and all `SectionSth` nodes. Stores crumb locations for all its ASTs.
#[derive(Clone, Debug)]
pub struct GeneralizedInfix {
/// Left operand, if present.
pub left: Operand,
/// The operator, always present.
pub opr: Operator,
/// Right operand, if present.
pub right: Operand,
/// Infix id.
pub id: Option<Id>,
}
/// A structure used for GeneralizedInfix construction which marks operands as _target_ and
/// _argument_. See `target_operand` and `argument_operand` methods.
pub struct MarkedOperands {
/// The self operand, target of the application.
pub target: Operand,
/// Operand other than self.
pub argument: Operand,
}
impl GeneralizedInfix {
/// Tries interpret given AST node as GeneralizedInfix. Returns None, if Ast is not any kind of
/// application on infix operator.
pub fn try_new(ast: &Ast) -> Option<GeneralizedInfix> {
let id = ast.id;
match ast.shape().clone() {
Shape::Infix(infix) => Some(GeneralizedInfix {
id,
left: make_operand(infix.larg, infix.loff),
opr: make_operator(&infix.opr)?,
right: make_operand(infix.rarg, infix.roff),
}),
Shape::SectionLeft(left) => Some(GeneralizedInfix {
id,
left: make_operand(left.arg, left.off),
opr: make_operator(&left.opr)?,
right: None,
}),
Shape::SectionRight(right) => Some(GeneralizedInfix {
id,
left: None,
opr: make_operator(&right.opr)?,
right: make_operand(right.arg, right.off),
}),
Shape::SectionSides(sides) => Some(GeneralizedInfix {
id,
left: None,
opr: make_operator(&sides.opr)?,
right: None,
}),
_ => None,
}
}
/// Constructor with operands marked as target and argument.
pub fn new_from_operands(operands: MarkedOperands, opr: Operator, id: Option<Id>) -> Self {
match assoc(&opr) {
Assoc::Left =>
GeneralizedInfix { opr, id, left: operands.target, right: operands.argument },
Assoc::Right =>
GeneralizedInfix { opr, id, left: operands.argument, right: operands.target },
}
}
/// Convert to AST node.
pub fn into_ast(self) -> Ast {
let ast: Ast = match (self.left, self.right) {
(Some(left), Some(right)) => Infix {
larg: left.arg,
loff: left.offset,
opr: self.opr.into(),
roff: right.offset,
rarg: right.arg,
}
.into(),
(Some(left), None) =>
SectionLeft { arg: left.arg, off: left.offset, opr: self.opr.into() }.into(),
(None, Some(right)) =>
SectionRight { opr: self.opr.into(), off: right.offset, arg: right.arg }.into(),
(None, None) => SectionSides { opr: self.opr.into() }.into(),
};
if let Some(id) = self.id {
ast.with_id(id)
} else {
ast
}
}
/// Associativity of the operator used in this infix expression.
pub fn assoc(&self) -> Assoc {
assoc(&self.opr)
}
/// Identifier name of the operator used in this infix expression.
pub fn name(&self) -> &str {
&self.opr.name
}
/// The self operand, target of the application.
pub fn target_operand(&self) -> &Operand {
match self.assoc() {
Assoc::Left => &self.left,
Assoc::Right => &self.right,
}
}
/// Operand other than self.
pub fn argument_operand(&self) -> &Operand {
match self.assoc() {
Assoc::Left => &self.right,
Assoc::Right => &self.left,
}
}
/// Converts chain of infix applications using the same operator into `Chain`.
/// Sample inputs are `x,y,x` or `a+b+` or `+5+5+5`. Note that `Sides*` nodes
/// are also supported, along the `Infix` nodes.
pub fn flatten(&self) -> Chain {
self.flatten_with_offset(0)
}
fn flatten_with_offset(&self, offset: usize) -> Chain {
let target = self.target_operand().clone();
let rest = ChainElement {
offset,
operator: self.opr.clone(),
operand: self.argument_operand().clone(),
infix_id: self.id,
};
let rest_offset = rest.operand.as_ref().map_or_default(|op| op.offset);
let target_subtree_infix = target.clone().and_then(|arg| {
let offset = arg.offset;
GeneralizedInfix::try_new(&arg.arg).map(|arg| ArgWithOffset { arg, offset }).filter(
|target_infix| {
// For access operators, do not flatten them if there is a space before the dot.
// For example, `Foo . Bar . Baz` should not be flattened to `Foo.Bar.Baz`, as
// those should be treated as potential separate prefix expressions, allowing
// operator placeholders to be inserted.
rest_offset == 0 || target_infix.arg.name() != predefined::ACCESS
},
)
});
let mut target_subtree_flat = match target_subtree_infix {
Some(target_infix) if target_infix.arg.name() == self.name() =>
target_infix.arg.flatten_with_offset(target_infix.offset),
_ => Chain { target, args: Vec::new(), operator: self.opr.clone() },
};
target_subtree_flat.args.push(rest);
target_subtree_flat
}
}
impl From<GeneralizedInfix> for Ast {
fn from(infix: GeneralizedInfix) -> Self {
infix.into_ast()
}
}
// =============
// === Chain ===
// =============
/// Result of flattening infix operator chain, like `a+b+c` or `Foo.Bar.Baz`.
#[derive(Clone, Debug)]
pub struct Chain {
/// The primary application target (left- or right-most operand, depending on
/// operators associativity).
pub target: Operand,
/// Subsequent operands applied to the `target`.
pub args: Vec<ChainElement>,
/// Operator AST. Generally all operators in the chain should be the same (except for id).
/// It is not specified exactly which operators in the chain this AST belongs to.
pub operator: known::Opr,
}
impl Chain {
/// If this is infix, it flattens whole chain and returns result.
/// Otherwise, returns None.
pub fn try_new(ast: &Ast) -> Option<Chain> {
GeneralizedInfix::try_new(ast).map(|infix| infix.flatten())
}
/// Flattens infix chain if this is infix application of given operator.
pub fn try_new_of(ast: &Ast, operator: &str) -> Option<Chain> {
let infix = GeneralizedInfix::try_new(ast)?;
(infix.name() == operator).as_some_from(|| infix.flatten())
}
/// Iterates over operands beginning with target (this argument) and then subsequent
/// arguments.
pub fn enumerate_operands(
&self,
) -> impl Iterator<Item = Option<Located<&ArgWithOffset<Ast>>>> + '_ {
let rev_args = self.args.iter().rev();
let target_crumbs = rev_args.map(ChainElement::crumb_to_previous).collect_vec();
let target = self.target.as_ref();
let loc_target = std::iter::once(target.map(|opr| Located::new(target_crumbs, opr)));
let args = self.args.iter().enumerate();
let loc_args = args.map(move |(i, elem)| {
elem.operand.as_ref().map(|operand| {
let latter_args = self.args.iter().skip(i + 1);
let to_infix = latter_args.rev().map(ChainElement::crumb_to_previous);
let has_target = self.target.is_some() || i > 0;
let crumbs = to_infix.chain(elem.crumb_to_operand(has_target)).collect_vec();
Located::new(crumbs, operand)
})
});
loc_target.chain(loc_args)
}
/// Iterates over non-empty operands beginning with target (this argument) and then subsequent
/// arguments.
pub fn enumerate_non_empty_operands(
&self,
) -> impl Iterator<Item = Located<&ArgWithOffset<Ast>>> + '_ {
self.enumerate_operands().flatten()
}
/// Iterates over all operator's AST in this chain, starting from target side.
pub fn enumerate_operators(&self) -> impl Iterator<Item = Located<&known::Opr>> + '_ {
self.args.iter().enumerate().map(move |(i, elem)| {
let to_infix = self.args.iter().skip(i + 1).rev().map(ChainElement::crumb_to_previous);
let has_target = self.target.is_some() || i > 0;
let crumbs = to_infix.chain(elem.crumb_to_operator(has_target)).collect_vec();
Located::new(crumbs, &elem.operator)
})
}
/// Insert new operand at index. The target's index is 0, the first argument index is 1, and so
/// on. So inserting at index 0 will actually set the new operand as a new target, and the old
/// target will became the first argument.
///
/// Indexing does not skip `None` operands. Function panics, if get index greater than operands
/// count.
pub fn insert_operand(&mut self, at_index: usize, operand: ArgWithOffset<Ast>) {
let offset = operand.offset;
let mut operand = Some(operand);
let operator = self.operator.clone_ref();
let before_target = at_index == 0;
let infix_id: Option<Id> = None;
if before_target {
std::mem::swap(&mut operand, &mut self.target);
self.args.insert(0, ChainElement { operator, operand, offset, infix_id })
} else {
self.args.insert(at_index - 1, ChainElement { operator, operand, offset, infix_id })
}
}
/// Add operand as a new last argument.
pub fn push_operand(&mut self, operand: ArgWithOffset<Ast>) {
let last_index = self.args.len() + 1;
self.insert_operand(last_index, operand)
}
/// Erase the current target from chain, and make the current first operand a new target.
/// Panics if there is no operand besides target.
pub fn erase_target(&mut self) {
let new_target = self.args.remove(0).operand;
self.target = new_target
}
/// Erase `n` leading arguments from chain (including target), and make the next remaining
/// argument a new target. Panics if there are not enough arguments to remove.
pub fn erase_leading_operands(&mut self, n: usize) {
if n == 0 {
return;
}
let last_removed_arg = self.args.drain(0..n).next_back();
self.target = last_removed_arg.expect("Not enough operands to erase").operand;
}
/// The right-most non-empty operand of the chain, if it has one.
pub fn last_operand(&self) -> Option<&ArgWithOffset<Ast>> {
let last_arg = self.args.iter().rev().filter_map(|arg| arg.operand.as_ref()).next();
last_arg.or(self.target.as_ref())
}
/// Replace the target and first argument with a new target being an proper Infix or Section
/// ast node. Does nothing if there are no more operands than target.
pub fn fold_arg(&mut self) {
if let Some(element) = self.args.pop_front() {
let target = std::mem::take(&mut self.target);
let operator = element.operator;
let argument = element.operand;
let operands = MarkedOperands { target, argument };
let id = element.infix_id;
let new_infix = GeneralizedInfix::new_from_operands(operands, operator, id);
let new_with_offset =
ArgWithOffset { arg: new_infix.into_ast(), offset: element.offset };
self.target = Some(new_with_offset)
}
}
/// Consumes the chain and returns AST node generated from it. The ids of all Infixes and
/// Section don't preserve from any AST which was used to generate this chain.
///
/// Panics if called on chain with `None` target and empty arguments list.
pub fn into_ast(mut self) -> Ast {
while !self.args.is_empty() {
self.fold_arg()
}
if let Some(target) = self.target {
target.arg
} else {
SectionSides { opr: self.operator.into() }.into()
}
}
/// True if all operands are set, i.e. there are no section shapes in this chain.
pub fn all_operands_set(&self) -> bool {
self.target.is_some() && self.args.iter().all(|arg| arg.operand.is_some())
}
/// Try to convert the chain into a list of qualified name segments. Qualified name consists of
/// identifiers chained by [`ACCESS`] operator.
pub fn as_qualified_name_segments(&self) -> Option<Vec<ImString>> {
let every_operator_is_access = self
.enumerate_operators()
.all(|opr| opr.item.ast().repr() == crate::opr::predefined::ACCESS);
let name_segments: Option<Vec<_>> = self
.enumerate_operands()
.flatten()
.map(|opr| crate::identifier::name(&opr.item.arg).map(ImString::new))
.collect();
let name_segments = name_segments?;
if every_operator_is_access && !name_segments.is_empty() {
Some(name_segments)
} else {
None
}
}
}
impl From<Chain> for Ast {
fn from(chain: Chain) -> Self {
chain.into_ast()
}
}
// === Chain Element ===
/// Element of the infix application chain, i.e. operator and its operand.
#[derive(Clone, Debug)]
pub struct ChainElement {
#[allow(missing_docs)]
pub operator: Operator,
/// Operand on the opposite side to `this` argument.
/// Depending on operator's associativity it is either right (for left-associative operators)
/// or on the left side of operator.
pub operand: Operand,
/// Offset between this operand and the next operator.
pub offset: usize,
/// Id of infix AST which applies this operand.
pub infix_id: Option<Id>,
}
impl ChainElement {
/// Return AST crumb to the node being a chain of previous operands. It assumes that such
/// node exists.
pub fn crumb_to_previous(&self) -> Crumb {
let has_operand = self.operand.is_some();
match assoc(&self.operator) {
Assoc::Left if has_operand => InfixCrumb::LeftOperand.into(),
Assoc::Left => SectionLeftCrumb::Arg.into(),
Assoc::Right if has_operand => InfixCrumb::RightOperand.into(),
Assoc::Right => SectionRightCrumb::Arg.into(),
}
}
/// Return AST crumb to the operand, assuming that this operand exists.
pub fn crumb_to_operand(&self, has_target: bool) -> Crumb {
match assoc(&self.operator) {
Assoc::Left if has_target => InfixCrumb::RightOperand.into(),
Assoc::Left => SectionRightCrumb::Arg.into(),
Assoc::Right if has_target => InfixCrumb::LeftOperand.into(),
Assoc::Right => SectionLeftCrumb::Arg.into(),
}
}
/// Return AST crumb to the operator.
pub fn crumb_to_operator(&self, has_target: bool) -> Crumb {
let has_operand = self.operand.is_some();
match assoc(&self.operator) {
_ if has_target && has_operand => InfixCrumb::Operator.into(),
Assoc::Left if has_target => SectionLeftCrumb::Opr.into(),
Assoc::Left if has_operand => SectionRightCrumb::Opr.into(),
Assoc::Right if has_target => SectionRightCrumb::Opr.into(),
Assoc::Right if has_operand => SectionLeftCrumb::Opr.into(),
_ => SectionSidesCrumb.into(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
fn expect_at(operand: &Operand, expected_ast: &Ast) {
assert_eq!(&operand.as_ref().unwrap().arg, expected_ast);
}
fn test_enumerating(chain: &Chain, root_ast: &Ast, expected_asts: &[&Ast]) {
assert_eq!(chain.enumerate_non_empty_operands().count(), expected_asts.len());
for (elem, expected) in chain.enumerate_non_empty_operands().zip(expected_asts) {
assert_eq!(elem.item.arg, **expected);
let ast = root_ast.get_traversing(&elem.crumbs).unwrap();
assert_eq!(ast, *expected);
}
}
#[test]
fn infix_chain_tests() {
let a = Ast::var("a");
let b = Ast::var("b");
let c = Ast::var("c");
let a_plus_b = Ast::infix(a.clone(), "+", b.clone());
let a_plus_b_plus_c = Ast::infix(a_plus_b, "+", c.clone());
let chain = Chain::try_new(&a_plus_b_plus_c).unwrap();
expect_at(&chain.target, &a);
expect_at(&chain.args[0].operand, &b);
expect_at(&chain.args[1].operand, &c);
test_enumerating(&chain, &a_plus_b_plus_c, &[&a, &b, &c]);
}
#[test]
fn infix_section() {
let a = Ast::var("a");
let a_plus = Ast::section_left(a.clone(), "+");
let chain = Chain::try_new(&a_plus).unwrap();
expect_at(&chain.target, &a);
test_enumerating(&chain, &a_plus, &[&a]);
}
#[test]
fn infix_chain_tests_right() {
let a = Ast::var("a");
let b = Ast::var("b");
let c = Ast::var("c");
let b_comma_c = Ast::infix(b.clone(), ",", c.clone());
let a_comma_b_comma_c = Ast::infix(a.clone(), ",", b_comma_c);
let chain = Chain::try_new(&a_comma_b_comma_c).unwrap();
expect_at(&chain.target, &c);
expect_at(&chain.args[0].operand, &b);
expect_at(&chain.args[1].operand, &a);
}
#[test]
fn assignment_opr_test() {
let opr = assignment();
assert_eq!(opr.name, "=");
assert_eq!(opr.repr(), "=");
}
// TODO[ao] add tests for modifying chain.
}

View File

@ -1,304 +0,0 @@
//! Utilities for dealing with `Prefix` application Ast nodes.
use crate::prelude::*;
use crate::crumbs::Located;
use crate::crumbs::PrefixCrumb;
use crate::known;
use crate::opr;
use crate::Ast;
use crate::HasTokens;
use crate::Id;
use crate::Infix;
use crate::Prefix;
use crate::Shifted;
use crate::Token;
use crate::TokenConsumer;
// ================
// === Argument ===
// ================
/// Struct representing an element of a Prefix Chain: an argument applied over the function.
#[derive(Clone, Debug)]
pub struct Argument {
/// An argument ast with offset between it and previous arg or function.
pub sast: Shifted<Ast>,
/// The id of Prefix AST of this argument application.
pub prefix_id: Option<Id>,
}
impl Argument {
/// Make an argument with specified AST and offset of one.
pub fn new(expression: Ast, offset: usize, prefix_id: Option<Id>) -> Self {
Self { sast: Shifted::new(offset, expression), prefix_id }
}
/// Make an argument consisting of a single blank placeholder: `_`.
pub fn new_blank(offset: usize, prefix_id: Option<Id>) -> Self {
Self::new(Ast::blank(), offset, prefix_id)
}
/// Convert non-named argument expression into named argument.
///
/// Note: This function does not check if the argument is already named. Calling it on a named
/// argument will return an unspecified invalid result.
pub fn into_named(self, name: impl Str) -> Self {
let named_ast = Ast::named_argument(name, self.sast.wrapped);
Self::new(named_ast, self.sast.off, self.prefix_id)
}
}
impl HasTokens for Argument {
fn feed_to(&self, consumer: &mut impl TokenConsumer) {
self.sast.feed_to(consumer)
}
}
// ====================
// === Prefix Chain ===
// ====================
/// Result of flattening a sequence of prefix applications.
#[derive(Clone, Debug)]
pub struct Chain {
/// The function (initial application target)
pub func: Ast,
/// Subsequent arguments applied over the function.
pub args: Vec<Argument>,
}
impl Chain {
/// Construct a prefix application chain from a function and sequence of arguments.
pub fn new(func: Ast, args: impl IntoIterator<Item = Ast>) -> Self {
let args = args
.into_iter()
.map(|arg| Argument { sast: Shifted::new(1, arg), prefix_id: Some(Id::new_v4()) })
.collect_vec();
Self { func, args }
}
/// Construct a prefix application chain where function is an AST representing this argument
/// applied on a `func` function.
///
/// For example, calling this with `func` being "foo" and `this` being "bar", the prefix chain
/// will be represented by `bar.foo <arguments...>`
pub fn new_with_this(func: Ast, this: Ast, other_args: impl IntoIterator<Item = Ast>) -> Self {
let infix = Infix {
larg: this,
loff: 0,
opr: Ast::opr(opr::predefined::ACCESS),
roff: 0,
rarg: func,
};
let new_func = Ast::new(infix, None);
Self::new(new_func, other_args)
}
/// Translates calls like `a b c` that generate nested prefix chain like
/// App(App(a,b),c) into flat list where first element is the function and
/// then arguments are placed: `{func:a, args:[b,c]}`.
pub fn from_prefix(ast: &known::Prefix) -> Chain {
fn run(ast: &known::Prefix, acc: &mut Vec<Argument>) -> Ast {
let func = match known::Prefix::try_from(&ast.func) {
Ok(lhs_app) => run(&lhs_app, acc),
_ => ast.func.clone(),
};
let sast = Shifted { wrapped: ast.arg.clone(), off: ast.off };
let prefix_id = ast.id();
acc.push(Argument { sast, prefix_id });
func
}
let mut args = Vec::new();
let func = run(ast, &mut args);
Chain { func, args }
}
/// Like `new` but returns None if given Ast is not of a Prefix shape.
pub fn from_ast(ast: &Ast) -> Option<Chain> {
known::Prefix::try_from(ast).as_ref().map(Chain::from_prefix).ok()
}
/// As new but if the AST is not a prefix, interprets is a function with an
/// empty arguments list.
pub fn from_ast_non_strict(ast: &Ast) -> Chain {
if let Ok(ref prefix) = known::Prefix::try_from(ast) {
// Case like `a b c`
Self::from_prefix(prefix)
} else if let Ok(ref section) = known::SectionRight::try_from(ast) {
// Case like `+ a b`
let func = section.opr.clone();
let right_chain = Chain::from_ast_non_strict(&section.arg);
let sast = Shifted { wrapped: right_chain.func, off: section.off };
let prefix_id = section.id();
let mut args = vec![Argument { sast, prefix_id }];
args.extend(right_chain.args);
Chain { func, args }
} else {
// Case like `a`
let func = ast.clone();
let args = Vec::new();
Chain { func, args }
}
}
/// Crumbs location for the application target (function).
#[allow(trivial_bounds)]
pub fn func_location(&self) -> impl Iterator<Item = PrefixCrumb> {
// Location is always like [Func,Func,…,Func].
std::iter::repeat(PrefixCrumb::Func).take(self.args.len())
}
/// Returns an application target `Ast` reference along with its location.
pub fn located_func(&self) -> Located<&Ast> {
Located::new(self.func_location(), &self.func)
}
/// Iterates over all arguments, left-to right.
pub fn enumerate_args(&self) -> impl Iterator<Item = Located<&Ast>> + '_ {
// Location is always like [Func,Func,…,Func,Arg].
// We iterate beginning from the deeply nested args. So we can just create crumbs
// location once and then just pop initial crumb when traversing arguments.
let arg_once = std::iter::once(PrefixCrumb::Arg);
let func_crumbs = self.func_location().chain(arg_once).collect_vec();
let mut i = 0;
self.args.iter().map(move |arg| {
i += 1;
Located::new(&func_crumbs[i..], &arg.sast.wrapped)
})
}
/// Replace the `func` and first argument with a new `func` being an proper Prefix ast node.
/// Does nothing if there are no arguments.
pub fn fold_arg(&mut self) {
if let Some(arg) = self.args.pop_front() {
let new_prefix =
Prefix { arg: arg.sast.wrapped, func: self.func.clone_ref(), off: arg.sast.off };
self.func = Ast::new(new_prefix, arg.prefix_id);
}
}
/// Convert the chain to proper AST node.
pub fn into_ast(mut self) -> Ast {
while !self.args.is_empty() {
self.fold_arg()
}
self.func
}
/// Get the ID of the Ast represented by this chain.
pub fn id(&self) -> Option<Id> {
match self.args.last() {
Some(last_arg) => last_arg.prefix_id,
None => self.func.id,
}
}
/// Insert argument at given position in the prefix chain. If index is out of bounds,
/// additional blank `_` arguments will be placed.
pub fn insert_arg(&mut self, index: usize, argument: Argument) {
if let Some(blanks_to_add) = index.checked_sub(self.args.len()) {
let make_blank = || {
let prefix_id = argument.prefix_id.map(|_| Id::new_v4());
Argument::new_blank(argument.sast.off, prefix_id)
};
self.args.extend(std::iter::repeat_with(make_blank).take(blanks_to_add));
}
self.args.insert(index, argument);
}
}
impl HasTokens for Chain {
fn feed_to(&self, consumer: &mut impl TokenConsumer) {
self.func.feed_to(consumer);
for arg in &self.args {
consumer.feed(Token::Off(arg.sast.off));
arg.sast.wrapped.feed_to(consumer);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use uuid::Uuid;
#[test]
fn prefix_chain() {
let a = Ast::var("a");
let b = Ast::var("b");
let c = Ast::var("c");
let a_b = Ast::prefix(a.clone(), b.clone()).with_id(Uuid::new_v4());
let a_b_c = Ast::prefix(a_b.clone(), c.clone()).with_id(Uuid::new_v4());
let chain = Chain::from_ast(&a_b_c).unwrap();
assert_eq!(chain.func, a);
assert_eq!(chain.args[0].sast.wrapped, b);
assert_eq!(chain.args[1].sast.wrapped, c);
assert_eq!(chain.args[0].prefix_id, a_b.id);
assert_eq!(chain.args[1].prefix_id, a_b_c.id);
let (arg1, arg2) = chain.enumerate_args().expect_tuple();
assert_eq!(arg1.item, &b);
assert_eq!(a_b_c.get_traversing(&arg1.crumbs).unwrap(), &b);
assert_eq!(arg2.item, &c);
assert_eq!(a_b_c.get_traversing(&arg2.crumbs).unwrap(), &c);
}
#[test]
fn prefix_chain_construction() {
let a = Ast::var("a");
let b = Ast::var("b");
let c = Ast::var("c");
let chain = Chain::new(a, vec![b, c]);
assert_eq!(chain.into_ast().repr(), "a b c");
}
#[test]
fn inserting_arg() {
let a = Ast::var("a");
let b = Ast::var("b");
let c = Ast::var("c");
let chain = Chain::new(a, vec![b, c]);
assert_eq!(chain.repr(), "a b c");
let arg =
|text: &str| Argument { prefix_id: None, sast: Shifted::new(1, Ast::var(text)) };
{
let mut chain = chain.clone();
chain.insert_arg(0, arg("arg"));
assert_eq!(chain.repr(), "a arg b c");
}
{
let mut chain = chain.clone();
chain.insert_arg(2, arg("arg"));
assert_eq!(chain.repr(), "a b c arg");
}
{
let mut chain = chain.clone();
chain.insert_arg(3, arg("arg"));
assert_eq!(chain.repr(), "a b c _ arg");
}
{
let mut chain = chain;
chain.insert_arg(4, arg("arg"));
assert_eq!(chain.repr(), "a b c _ _ arg");
}
}
// TODO[ao] add tests for modifying chain.
}

View File

@ -1,174 +0,0 @@
//! Code that supports tokenization of Ast nodes, that powers several features like text
//! generation, obtaining size of source code, generating idmap.
use crate::*;
// ======================
// === Token Literals ===
// =====================
/// Token representing blank.
pub const BLANK_TOKEN: char = '_';
/// Symbol appearing after base of the number literal.
pub const NUMBER_BASE_SEPARATOR: char = '_';
/// Suffix to made a modifier from an operator
pub const MOD_SUFFIX: char = '=';
/// Symbol enclosing raw Text line.
pub const FMT_QUOTE: &str = "'";
/// Symbol enclosing formatted Text line.
pub const RAW_QUOTE: &str = "\"";
/// Symbol used to break lines in Text block.
pub const NEWLINE: char = '\n';
/// Symbol introducing escape segment in the Text.
pub const BACKSLASH: char = '\\';
/// Symbol enclosing expression segment in the formatted Text.
pub const EXPR_QUOTE: char = '`';
/// Symbol that introduces UTF-16 code in the formatted Text segment.
pub const UNICODE16_INTRODUCER: char = 'u';
/// String that opens "UTF-21" code in the formatted Text segment.
pub const UNICODE21_OPENER: &str = "u{";
/// String that closese "UTF-21" code in the formatted Text segment.
pub const UNICODE21_CLOSER: &str = "}";
/// Symbol that introduces UTF-16 code in the formatted Text segment.
pub const UNICODE32_INTRODUCER: char = 'U';
/// Quotes opening block of the raw text.
pub const RAW_BLOCK_QUOTES: &str = "\"\"\"";
/// Quotes opening block of the formatted text.
pub const FMT_BLOCK_QUOTES: &str = "'''";
/// A list of possible delimiters of the text literals.
pub const STRING_DELIMITERS: &[&str] = &[RAW_BLOCK_QUOTES, FMT_BLOCK_QUOTES, RAW_QUOTE, FMT_QUOTE];
// =============
// === Block ===
// =============
has_tokens!(BlockLine<T>, self.elem, self.off);
// ===============
// === Shifted ===
// ===============
has_tokens!(Shifted<T>, self.off, self.wrapped);
has_tokens!(ShiftedVec1<T>, self.head, self.tail);
// =============================================================================
// === Shape ===================================================================
// =============================================================================
// ===================
// === Identifiers ===
// ===================
has_tokens!(Blank, BLANK_TOKEN);
has_tokens!(Var, self.name);
has_tokens!(Cons, self.name);
has_tokens!(Opr, self.name);
has_tokens!(Annotation, self.name);
has_tokens!(Mod, self.name, MOD_SUFFIX);
// ==============
// === Number ===
// ==============
/// Helper to represent that optional number base has additional character.
struct NumberBase<T>(T);
has_tokens!(NumberBase<T>, self.0, NUMBER_BASE_SEPARATOR);
has_tokens!(Number, self.base.as_ref().map(NumberBase), self.int);
// ====================
// === Applications ===
// ====================
has_tokens!(Infix<T>, self.larg, self.loff, self.opr, self.roff, self.rarg);
has_tokens!(Prefix<T>, self.func, self.off, self.arg);
has_tokens!(SectionLeft<T>, self.arg, self.off, self.opr);
has_tokens!(SectionRight<T>, self.opr, self.off, self.arg);
has_tokens!(SectionSides<T>, self.opr);
// ==============
// === Module ===
// ==============
// === Module ==
impl<T: HasTokens> HasTokens for Module<T> {
fn feed_to(&self, consumer: &mut impl TokenConsumer) {
let mut iter = self.lines.iter();
if let Some(first_line) = iter.next() {
first_line.feed_to(consumer);
}
for line in iter {
(NEWLINE, line).feed_to(consumer);
}
}
}
// === Block ==
impl<T: HasTokens> HasTokens for Block<T> {
fn feed_to(&self, consumer: &mut impl TokenConsumer) {
NEWLINE.feed_to(consumer);
for empty_line_space in &self.empty_lines {
(empty_line_space, NEWLINE).feed_to(consumer);
}
(self.indent, &self.first_line).feed_to(consumer);
for line in &self.lines {
(NEWLINE, line.elem.as_ref().map(|_| self.indent), line).feed_to(consumer);
}
}
}
// ============
// === Tree ===
// ============
impl<T: HasTokens> HasTokens for Tree<T> {
fn feed_to(&self, consumer: &mut impl TokenConsumer) {
if let Some(str) = &self.leaf_info {
Token::Str(str).feed_to(consumer)
} else {
for element in &self.span_info {
match element {
SpanSeed::Space(SpanSeedSpace { space }) =>
Token::Off(*space).feed_to(consumer),
SpanSeed::Token(SpanSeedToken { token }) => Token::Str(token).feed_to(consumer),
SpanSeed::Child(SpanSeedChild { node }) => node.feed_to(consumer),
}
}
}
if let Some(str) = &self.trailing_token {
Token::Str(str).feed_to(consumer)
}
}
}

View File

@ -1,55 +0,0 @@
//! Utility code for writing tests dealing with AST types.
use crate::prelude::*;
use crate::Ast;
use crate::HasRepr;
use crate::Module;
use crate::Shape;
/// "Downcasts" given AST's Shape to `T`. Panics if the shape doesn't match.
pub fn expect_shape<'t, T>(ast: &'t Ast) -> &'t T
where &'t Shape<Ast>: TryInto<&'t T> {
match ast.shape().try_into() {
Ok(shape) => shape,
_ => {
let expected_typename = std::any::type_name::<T>();
panic!("failed converting shape into {expected_typename}, got {ast:?}")
}
}
}
/// Takes Ast being a module with a single non-empty line and returns that line's AST.
/// Panics, if this is not a module or if it does not have exactly one line.
pub fn expect_single_line(ast: &Ast) -> &Ast {
let module: &Module<Ast> = expect_shape(ast);
let (line,) = (module.iter()).expect_tuple();
line
}
/// Checks if all nodes in subtree have declared spans equal to
/// spans we calculate.
pub fn validate_spans(ast: &Ast) {
for node in ast.iter_recursive() {
let calculated = node.shape().char_count();
let declared = node.wrapped.wrapped.length;
assert_eq!(calculated, declared, "`{}` part of `{}`", node.repr(), ast.repr());
}
}
/// Panics if in the given AST duplicated IDs are present.
pub fn assert_unique_ids(ast: &Ast) {
let mut ids = HashMap::new();
for node in ast.iter_recursive() {
if let Some(id) = node.id {
if let Some(id2) = ids.insert(id, node) {
panic!(
"Collision for id {id} between `{id2}` and `{node}`.\
\n\nWhole program is:\n{ast}"
)
}
}
}
}

View File

@ -1,11 +0,0 @@
//! Common traits defined by this crate.
// ==============
// === Export ===
// ==============
pub use crate::crumbs::Crumbable;
pub use crate::crumbs::TraversableAst;
pub use crate::HasID;
pub use crate::HasRepr;

View File

@ -1,25 +0,0 @@
[package]
name = "ast-macros"
version = "0.1.0"
authors = ["Enso Team <contact@enso.org>"]
edition = "2021"
[lib]
proc-macro = true
[features]
default = []
[dependencies]
proc-macro2 = { workspace = true }
quote = { workspace = true }
Inflector = "0.11.4"
enso-macro-utils = { path = "../../../../../lib/rust/macro-utils" }
[dependencies.syn]
version = "1.0"
features = [
'extra-traits',
'full'
# for syn::File and syn::ItemFn
]

View File

@ -1,284 +0,0 @@
//! Helper macros used when defining AST structures.
// === Standard Linter Configuration ===
#![deny(non_ascii_idents)]
#![warn(unsafe_code)]
#![allow(clippy::bool_to_int_with_if)]
#![allow(clippy::let_and_return)]
// === Non-Standard Linter Configuration ===
#![warn(missing_docs)]
extern crate proc_macro;
mod token;
use crate::token::TokenDescription;
use enso_macro_utils::gather_all_type_reprs;
use enso_macro_utils::repr;
use proc_macro2::Ident;
use proc_macro2::Span;
use proc_macro2::TokenStream;
use quote::quote;
use std::collections::HashSet;
// ==============
// === Macros ===
// ==============
/// A macro that shall be applied to all AST nodes.
///
/// Derives all the traits that are expected to be implemented by AST nodes.
///
/// Implicitly applied by `ast` on target and generated types. User should not
/// need to use this macro directly.
#[proc_macro_attribute]
pub fn ast_node(
_meta: proc_macro::TokenStream,
input: proc_macro::TokenStream,
) -> proc_macro::TokenStream {
let input: TokenStream = input.into();
let output = quote! {
#[derive(Clone,Eq,PartialEq,Debug)]
#[derive(Iterator)]
#input
};
output.into()
}
/// Marks target declaration as `ast_node`. If it is an enumeration, also
/// applies `to_variant_types`.
#[proc_macro_attribute]
pub fn ast(
attrs: proc_macro::TokenStream,
input: proc_macro::TokenStream,
) -> proc_macro::TokenStream {
let attrs: TokenStream = attrs.into();
let decl = syn::parse_macro_input!(input as syn::DeriveInput);
let output = match &decl.data {
syn::Data::Enum { .. } => quote! {
#[to_variant_types(#attrs)]
#[ast_node]
#decl
},
_ => quote! {
#[ast_node]
#decl
},
};
output.into()
}
// ==============
// === Macros ===
// ==============
// Note [Expansion Example]
// ~~~~~~~~~~~~~~~~~~~~~~~~
// In order to make the definition easier to read, an example expansion of the
// following definition was provided for each quotation:
//
// #[to_variant_types]
// pub enum Shape<T> {
// Var(Var),
// App(App<T>),
// }
/// Produces declaration of the structure for given source enum variant.
fn mk_product_type(
is_flat: bool,
decl: &syn::DeriveInput,
variant: &syn::Variant,
) -> syn::ItemStruct {
use syn::ItemStruct;
let fields = &variant.fields;
let fields = fields.iter();
let types = fields.flat_map(|f| gather_all_type_reprs(&f.ty));
let types = types.collect::<HashSet<_>>();
let ty_vars = decl.generics.params.iter().cloned();
let params = ty_vars.filter(|v| types.contains(&repr(&v))).collect();
let attrs = decl.attrs.clone();
let vis = decl.vis.clone();
let struct_token = syn::token::Struct { span: Span::call_site() };
let ident_flat = variant.ident.clone();
let ident_nested = format!("{}{}", decl.ident, variant.ident);
let ident_nested = Ident::new(&ident_nested, Span::call_site());
let ident = if is_flat { ident_flat } else { ident_nested };
let generics = syn::Generics { params, ..Default::default() };
let mut fields = variant.fields.clone();
let semi_token = None;
fields.iter_mut().for_each(|f| f.vis = vis.clone());
ItemStruct { attrs, vis, struct_token, ident, generics, fields, semi_token }
}
/// Generates rewritten enumeration declaration.
///
/// Each constructor will be a single-elem tuple holder for extracted type.
fn gen_variant_decl(ident: &syn::Ident, variant: &syn::ItemStruct) -> TokenStream {
let variant_ident = &variant.ident;
let params = variant.generics.params.iter();
quote! {
// See note [Expansion Example]
// App(ShapeApp<T>),
// Var(ShapeVar),
#ident(#variant_ident<#(#params),*>)
}
}
/// Generate `From` trait implementations converting from each of extracted
/// types back into primary enumeration.
/// Generate `TryFrom` implementation from primary enumeration into each
/// extracted type.
#[allow(clippy::cognitive_complexity)]
fn gen_from_impls(
ident: &syn::Ident,
decl: &syn::DeriveInput,
variant: &syn::ItemStruct,
) -> TokenStream {
let sum_label = &decl.ident;
let variant_label = &variant.ident;
let variant_name = variant_label.to_string();
let sum_params = &decl.generics.params.iter().cloned().collect::<Vec<_>>();
let variant_params = &variant.generics.params.iter().cloned().collect::<Vec<_>>();
quote! {
// See note [Expansion Example]
// impl<T> From<App<T>> for Shape<T> {
// fn from(t: App<T>) -> Self { Shape::App(t) }
// }
// ...
impl<#(#sum_params),*> From<#variant_label<#(#variant_params),*>>
for #sum_label<#(#sum_params),*> {
fn from(t: #variant_label<#(#variant_params),*>) -> Self {
#sum_label::#ident(t)
}
}
// impl<'t, T> TryFrom<&'t Shape<T>> for &'t Infix<T> {
// type Error = WrongEnum;
// fn try_from(value: &'t Shape<T>) -> Result<Self, Self::Error> {
// match value {
// Shape::Infix(elem) => Ok (elem),
// _ => {
// let error = WrongEnum {
// expected_con : "Infix" };
// Err(error)
// },
// }
// }
// }
impl<'t, #(#sum_params),*> TryFrom<&'t #sum_label<#(#sum_params),*>>
for &'t #variant_label<#(#variant_params),*> {
type Error = WrongEnum;
fn try_from
(value: &'t #sum_label<#(#sum_params),*>)
-> Result<Self, Self::Error> {
match value {
#sum_label::#ident(elem) => Ok(elem),
_ => {
let error = WrongEnum {
expected_con: #variant_name.to_string() };
Err(error)
},
}
}
}
// same as above but for values
impl<#(#sum_params),*> TryFrom<#sum_label<#(#sum_params),*>>
for #variant_label<#(#variant_params),*> {
type Error = WrongEnum;
fn try_from
(value: #sum_label<#(#sum_params),*>)
-> Result<Self, Self::Error> {
match value {
#sum_label::#ident(elem) => Ok(elem),
_ => {
let error = WrongEnum {
expected_con: #variant_name.to_string() };
Err(error)
},
}
}
}
}
}
/// Rewrites enum definition by creating a new type for each constructor.
///
/// Each nested constructor will be converted to a new `struct` and placed in
/// the parent scope. The created type name will be {EnumName}{ConstructorName}.
/// To name generated types with only their constructor name, use `flat`
/// attribute: `#[ast(flat)]`.
#[proc_macro_attribute]
pub fn to_variant_types(
attrs: proc_macro::TokenStream,
input: proc_macro::TokenStream,
) -> proc_macro::TokenStream {
let attrs: TokenStream = attrs.into();
let decl = syn::parse_macro_input!(input as syn::DeriveInput);
let ident = &decl.ident;
let ty_vars = &decl.generics.params;
let variants = match &decl.data {
syn::Data::Enum(ref data) => data.variants.iter(),
_ => unimplemented!(),
}
.collect::<Vec<_>>();
let is_flat = repr(&attrs) == "flat";
let structs = variants.iter().map(|v| mk_product_type(is_flat, &decl, v));
let structs = structs.collect::<Vec<_>>();
let variant_idents = variants.iter().map(|v| &v.ident).collect::<Vec<_>>();
let variant_decls =
variant_idents.iter().zip(structs.iter()).map(|(i, v)| gen_variant_decl(i, v));
let variant_froms =
variant_idents.iter().zip(structs.iter()).map(|(i, v)| gen_from_impls(i, &decl, v));
// Handle single value, unnamed params as created by user.
let structs = structs.iter().filter(|v| match &v.fields {
syn::Fields::Unnamed(f) => f.unnamed.len() != 1,
_ => true,
});
let decl_attrs = &decl.attrs;
let output = quote! {
#(#decl_attrs)*
pub enum #ident <#ty_vars> {
#(#variant_decls),*
}
#(#structs)*
#(#variant_froms)*
};
output.into()
}
/// Creates a `HasTokens` implementations for a given enum type.
///
/// Given type may only consist of single-elem tuple-like variants.
/// The implementation uses underlying HasTokens implementation for
/// stored values.
#[proc_macro_derive(HasTokens)]
pub fn derive_has_tokens(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
let decl = syn::parse_macro_input!(input as syn::DeriveInput);
let ret = match decl.data {
syn::Data::Enum(ref e) => token::derive_for_enum(&decl, e),
_ => quote! {},
};
proc_macro::TokenStream::from(ret)
}
/// Provides only `HasTokens` implementation.
#[proc_macro]
pub fn has_tokens(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
let maker = syn::parse::<TokenDescription>(input).unwrap();
maker.has_tokens().into()
}

View File

@ -1,79 +0,0 @@
use enso_macro_utils::path_segment_generic_args;
use proc_macro2::TokenStream;
use quote::quote;
use syn::punctuated::Punctuated;
use syn::Expr;
use syn::GenericArgument;
use syn::PathSegment;
use syn::Token;
/// Inner logic for `derive_has_tokens`.
pub fn derive_for_enum(decl: &syn::DeriveInput, data: &syn::DataEnum) -> TokenStream {
let ident = &decl.ident;
let params = decl.generics.params.iter().collect::<Vec<_>>();
let token_arms = data.variants.iter().map(|v| {
let con_ident = &v.ident;
quote!( #ident::#con_ident (elem) => elem.feed_to(consumer) )
});
let ret = quote! {
impl<#(#params:HasTokens),*> HasTokens for #ident<#(#params),*> {
fn feed_to(&self, consumer:&mut impl TokenConsumer) {
match self { #(#token_arms),* }
}
}
};
ret
}
/// Structure representing input to macros like `has_tokens!`.
///
/// Basically it consists of a typename (with optional generic arguments) and
/// sequence of expressions that yield values we use to obtain sub-HasTokens.
pub struct TokenDescription {
pub ty: PathSegment,
pub ty_args: Vec<GenericArgument>,
pub exprs: Vec<Expr>,
}
impl syn::parse::Parse for TokenDescription {
/// Parser user-provided input to macro into out structure.
///
/// First should go a type for which implementation is to be provided,
/// then arbitrary sequence of expressions.
/// Panics on invalid input, which is actually fair for a macro code.
fn parse(input: syn::parse::ParseStream) -> syn::Result<Self> {
let ty: PathSegment = input.parse()?;
input.parse::<Option<syn::token::Comma>>()?;
let exprs = Punctuated::<Expr, Token![,]>::parse_terminated(input)?;
let exprs = exprs.iter().cloned().collect::<Vec<_>>();
let ty_args = path_segment_generic_args(&ty);
let ty_args = ty_args.into_iter().cloned().collect(); // get rid of &
Ok(TokenDescription { ty, ty_args, exprs })
}
}
impl TokenDescription {
/// Fills a trait implementation template with given methods.
pub fn make_impl(&self, trait_name: &str, methods: &TokenStream) -> TokenStream {
let trait_name = syn::parse_str::<syn::TypePath>(trait_name).unwrap();
let ty = &self.ty;
let ty_args = &self.ty_args;
quote! {
impl<#(#ty_args:#trait_name),*> #trait_name for #ty {
#methods
}
}
}
/// Generates `HasTokens` instance using user-provided input.
pub fn has_tokens(&self) -> TokenStream {
let exprs = &self.exprs;
self.make_impl("HasTokens", &quote! {
fn feed_to(&self, consumer:&mut impl TokenConsumer) {
#(#exprs.feed_to(consumer);)*
}
})
}
}

View File

@ -1,19 +0,0 @@
[package]
name = "parser"
version = "0.1.0"
authors = ["Enso Team <contact@enso.org>"]
edition = "2021"
[lib]
crate-type = ["cdylib", "rlib"]
[dependencies]
ast = { path = "../ast/impl" }
enso-parser = { path = "../../../../lib/rust/parser" }
enso-prelude = { path = "../../../../lib/rust/prelude" }
enso-profiler = { path = "../../../../lib/rust/profiler" }
serde = { workspace = true }
serde_json = { version = "1.0", features = ["unbounded_depth"] }
enso-text = { path = "../../../../lib/rust/text" }
failure = { version = "0.1" }
uuid = { version = "0.8" }

View File

@ -1,254 +0,0 @@
//! A module containing structures and traits used in parser API.
use enso_prelude::*;
use enso_text::index::*;
use enso_text::unit::*;
use ast::id_map::JsonIdMap;
use ast::HasIdMap;
use ast::HasRepr;
use ast::IdMap;
use enso_text::Range;
/// A parsed file containing source code and attached metadata.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct ParsedSourceFile<M> {
/// Ast representation.
pub ast: ast::known::Module,
/// Raw metadata in json.
pub metadata: M,
}
const NEWLINES_BEFORE_TAG: usize = 3;
const METADATA_TAG: &str = "#### METADATA ####";
impl<M: Metadata> ParsedSourceFile<M> {
/// Serialize to the SourceFile structure,
pub fn serialize(&self) -> std::result::Result<SourceFile, serde_json::Error> {
fn to_json_single_line(
val: &impl serde::Serialize,
) -> std::result::Result<String, serde_json::Error> {
let json = serde_json::to_string(val)?;
let line = json.chars().filter(|c| *c != '\n' && *c != '\r').collect();
Ok(line)
}
let code = self.ast.repr().into();
let before_tag = "\n".repeat(NEWLINES_BEFORE_TAG);
let before_idmap = "\n";
let json_id_map = JsonIdMap::from_id_map(&self.ast.id_map(), &code);
let id_map = to_json_single_line(&json_id_map)?;
let before_metadata = "\n";
let metadata = to_json_single_line(&self.metadata)?;
let id_map_start =
code.len().value + before_tag.len() + METADATA_TAG.len() + before_idmap.len();
let id_map_start_bytes = Byte::from(id_map_start);
let metadata_start = id_map_start + id_map.len() + before_metadata.len();
let metadata_start_bytes = Byte::from(metadata_start);
let content = format!(
"{code}{before_tag}{METADATA_TAG}{before_idmap}{id_map}{before_metadata}{metadata}"
);
Ok(SourceFile {
content,
code: (0.byte()..code.len().to_byte()).into(),
id_map: (id_map_start_bytes..id_map_start_bytes + ByteDiff::from(id_map.len())).into(),
metadata: (metadata_start_bytes..metadata_start_bytes + ByteDiff::from(metadata.len()))
.into(),
})
}
}
impl<M: Metadata> Display for ParsedSourceFile<M> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.serialize() {
Ok(serialized) => write!(f, "{serialized}"),
Err(_) => write!(f, "[UNREPRESENTABLE SOURCE FILE]"),
}
}
}
// ================
// == SourceFile ==
// ================
// === Metadata ===
/// Remove node IDs not present in the id map from the metadata.
///
/// See [`PruneUnusedIds::prune_unused_ids`] method documentation.
pub trait PruneUnusedIds {
/// Remove node IDs not present in the id map from the metadata.
///
/// The IDE loses track of the IDs stored in the metadata section when the user is editing a
/// project in the external editor. It means that the size of the metadata will constantly grow,
/// and IDE won't ever remove the obsolete nodes from the metadata. This method is called while
/// deserializing the [`ParsedSourceFile`] structure and allows to prune of unused ids from the
/// metadata section.
///
/// As [`ParsedSourceFile`] is parametrized with a generic `Metadata`, this is a separate trait
/// that should be implemented for all `Metadata` types.
fn prune_unused_ids(&mut self, _id_map: &IdMap) {}
}
/// Things that are metadata.
pub trait Metadata:
Default + serde::de::DeserializeOwned + serde::Serialize + PruneUnusedIds {
}
/// Raw metadata.
impl PruneUnusedIds for serde_json::Value {}
impl Metadata for serde_json::Value {}
// === Source File ===
/// Source File content with information about section placement.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct SourceFile {
/// The whole content of file.
pub content: String,
/// The range in bytes of module's "Code" section.
pub code: Range<Byte>,
/// The range in bytes of module's "Id Map" section.
pub id_map: Range<Byte>,
/// The range in bytes of module's "Metadata" section.
pub metadata: Range<Byte>,
}
impl Display for SourceFile {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.content)
}
}
impl SourceFile {
/// Describe source file contents. Uses heuristics to locate the metadata section.
///
/// Method investigates the last `METADATA_LINES` lines of content to check for metadata tag and
/// whether idmap and metadata looks "reasonable enough". If proper metadata is not recognized,
/// the whole contents is treated as the code.
pub fn new(content: String) -> Self {
pub const METADATA_LINES: usize = 3;
let nl_indices = content.char_indices().filter_map(|(ix, c)| (c == '\n').as_some(ix));
let nl_indices_bytes = nl_indices.map(Byte::from);
let nl_indices_from_end = nl_indices_bytes.rev().take(METADATA_LINES).collect_vec();
match nl_indices_from_end.as_slice() {
&[last, before_last, two_before_last] => {
// Last line should be metadata. Line before should be id map. Line before is the
// metadata tag.
// We check that tag matches and that trailing lines looks like JSON list/object
// respectively.
let code_length =
two_before_last + 1.byte_diff() - ByteDiff::from(NEWLINES_BEFORE_TAG);
let code_range = 0.byte()..(0.byte() + code_length);
let tag_range = two_before_last + 1.byte_diff()..before_last;
let id_map_range = before_last + 1.byte_diff()..last;
let metadata_range = last + 1.byte_diff()..Byte::from(content.len());
let tag = &content[tag_range.start.value..tag_range.end.value];
let idmap = &content[id_map_range.start.value..id_map_range.end.value];
let metadata = &content[metadata_range.start.value..metadata_range.end.value];
let tag_matching = tag == METADATA_TAG;
let idmap_matching = Self::looks_like_idmap(idmap);
let metadata_matching = Self::looks_like_metadata(metadata);
if tag_matching && idmap_matching && metadata_matching {
SourceFile {
code: code_range.into(),
id_map: id_map_range.into(),
metadata: metadata_range.into(),
content,
}
} else {
Self::new_without_metadata(content)
}
}
_ => Self::new_without_metadata(content),
}
}
/// Create a description of source file consisting only of code, with no metadata.
fn new_without_metadata(content: String) -> Self {
let length = Byte::from(content.len());
Self {
code: (0.byte()..length).into(),
id_map: (length..length).into(),
metadata: (length..length).into(),
content,
}
}
/// Checks if given line might be an ID map.
pub fn looks_like_idmap(line: &str) -> bool {
line.is_enclosed('[', ']')
}
/// Checks if given line might be a metadata map.
pub fn looks_like_metadata(line: &str) -> bool {
line.is_enclosed('{', '}')
}
/// Get fragment of serialized string with code.
pub fn code_slice(&self) -> &str {
self.slice(&self.code)
}
/// Get fragment of serialized string with id map.
pub fn id_map_slice(&self) -> &str {
self.slice(&self.id_map)
}
/// Get fragment of serialized string with metadata.
pub fn metadata_slice(&self) -> &str {
self.slice(&self.metadata)
}
fn slice(&self, range: &Range<Byte>) -> &str {
let start = range.start.value;
let end = range.end.value;
&self.content[start..end]
}
}
// ===========
// == Error ==
// ===========
/// A result of parsing code.
pub type Result<T> = std::result::Result<T, Error>;
/// An error which may be result of parsing code.
#[derive(Debug, Fail)]
pub enum Error {
/// Error due to inner workings of the parser.
#[fail(display = "Internal parser error: {:?}.", _0)]
ParsingError(String),
/// Parser returned non-module AST root.
#[fail(display = "Internal parser error: non-module root node.")]
NonModuleRoot,
/// Error related to wrapping = communication with the parser service.
#[fail(display = "Interop error: {}.", _0)]
InteropError(#[cause] Box<dyn Fail>),
}
/// When trying to parse a line, not a single line was produced.
#[derive(Debug, Fail, Clone, Copy)]
#[fail(display = "Expected a single line, parsed none.")]
pub struct NoLinesProduced;
/// When trying to parse a single line, more were generated.
#[derive(Debug, Fail, Clone, Copy)]
#[fail(display = "Expected just a single line, found more.")]
pub struct TooManyLinesProduced;
/// Wraps an arbitrary `std::error::Error` as an `InteropError.`
pub fn interop_error<T>(error: T) -> Error
where T: Fail {
Error::InteropError(Box::new(error))
}

Some files were not shown because too many files have changed in this diff Show More