Merging with latest develop

This commit is contained in:
Jaroslav Tulach 2024-11-20 14:36:53 +01:00
commit 0f437fc252
265 changed files with 5366 additions and 3297 deletions

View File

@ -512,6 +512,7 @@ jobs:
ENSO_TEST_USER: ${{ secrets.ENSO_CLOUD_TEST_ACCOUNT_USERNAME }}
ENSO_TEST_USER_PASSWORD: ${{ secrets.ENSO_CLOUD_TEST_ACCOUNT_PASSWORD }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
continue-on-error: true
- run: rm $HOME/.enso/credentials
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@ -24,8 +24,11 @@
component.][11452]
- [New documentation editor provides improved Markdown editing experience, and
paves the way for new documentation features.][11469]
- [You can now add images to documentation panel][11547] by pasting them from
clipboard or by drag'n'dropping image files.
- ["Write" button in component menu allows to evaluate it separately from the
rest of the workflow][11523].
- [The documentation editor can now display tables][11564]
[11151]: https://github.com/enso-org/enso/pull/11151
[11271]: https://github.com/enso-org/enso/pull/11271
@ -42,7 +45,9 @@
[11448]: https://github.com/enso-org/enso/pull/11448
[11452]: https://github.com/enso-org/enso/pull/11452
[11469]: https://github.com/enso-org/enso/pull/11469
[11547]: https://github.com/enso-org/enso/pull/11547
[11523]: https://github.com/enso-org/enso/pull/11523
[11564]: https://github.com/enso-org/enso/pull/11564
#### Enso Standard Library

View File

@ -199,3 +199,10 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
---
This project includes components that are licensed under the MIT license. The
full text of the MIT license and its copyright notice can be found in the
`app/licenses/` directory.

View File

@ -804,8 +804,8 @@
"arbitraryFieldInvalid": "This field is invalid",
"arbitraryFieldTooShort": "This field is too short",
"arbitraryFieldTooLong": "This field is too long",
"arbitraryFieldTooSmall": "The value is too small, the minimum is $0",
"arbitraryFieldTooLarge": "The value is too large, the maximum is $0",
"arbitraryFieldTooSmall": "The value must be greater than $0",
"arbitraryFieldTooLarge": "The value must be less than $0",
"arbitraryFieldNotEqual": "This field is not equal to another field",
"arbitraryFieldNotMatch": "This field does not match the pattern",
"arbitraryFieldNotMatchAny": "This field does not match any of the patterns",

View File

@ -85,6 +85,7 @@ export const componentBrowser = componentLocator('.ComponentBrowser')
export const nodeOutputPort = componentLocator('.outputPortHoverArea')
export const smallPlusButton = componentLocator('.SmallPlusButton')
export const editorRoot = componentLocator('.EditorRoot')
export const nodeComment = componentLocator('.GraphNodeComment div[contentEditable]')
/**
* A not-selected variant of Component Browser Entry.

View File

@ -33,8 +33,8 @@ test('Copy node with comment', async ({ page }) => {
// Check state before operation.
const originalNodes = await locate.graphNode(page).count()
await expect(page.locator('.GraphNodeComment')).toExist()
const originalNodeComments = await page.locator('.GraphNodeComment').count()
await expect(locate.nodeComment(page)).toExist()
const originalNodeComments = await locate.nodeComment(page).count()
// Select a node.
const nodeToCopy = locate.graphNodeByBinding(page, 'final')
@ -48,7 +48,7 @@ test('Copy node with comment', async ({ page }) => {
// Node and comment have been copied.
await expect(locate.graphNode(page)).toHaveCount(originalNodes + 1)
await expect(page.locator('.GraphNodeComment')).toHaveCount(originalNodeComments + 1)
await expect(locate.nodeComment(page)).toHaveCount(originalNodeComments + 1)
})
test('Copy multiple nodes', async ({ page }) => {
@ -56,8 +56,8 @@ test('Copy multiple nodes', async ({ page }) => {
// Check state before operation.
const originalNodes = await locate.graphNode(page).count()
await expect(page.locator('.GraphNodeComment')).toExist()
const originalNodeComments = await page.locator('.GraphNodeComment').count()
await expect(locate.nodeComment(page)).toExist()
const originalNodeComments = await locate.nodeComment(page).count()
// Select some nodes.
const node1 = locate.graphNodeByBinding(page, 'final')
@ -76,7 +76,7 @@ test('Copy multiple nodes', async ({ page }) => {
// Nodes and comment have been copied.
await expect(locate.graphNode(page)).toHaveCount(originalNodes + 2)
// `final` node has a comment.
await expect(page.locator('.GraphNodeComment')).toHaveCount(originalNodeComments + 1)
await expect(locate.nodeComment(page)).toHaveCount(originalNodeComments + 1)
// Check that two copied nodes are isolated, i.e. connected to each other, not original nodes.
await expect(locate.graphNodeByBinding(page, 'prod1')).toBeVisible()
await expect(locate.graphNodeByBinding(page, 'final1')).toBeVisible()

View File

@ -0,0 +1,75 @@
import test from 'playwright/test'
import * as actions from './actions'
import { expect } from './customExpect'
import { CONTROL_KEY } from './keyboard'
import * as locate from './locate'
test('Edit comment by click', async ({ page }) => {
await actions.goToGraph(page)
const nodeComment = locate.nodeComment(locate.graphNodeByBinding(page, 'final'))
await expect(nodeComment).toHaveText('This node can be entered')
await nodeComment.click()
await page.keyboard.press(`${CONTROL_KEY}+A`)
const NEW_COMMENT = 'New comment text'
await nodeComment.fill(NEW_COMMENT)
await page.keyboard.press(`Enter`)
await expect(nodeComment).not.toBeFocused()
await expect(nodeComment).toHaveText(NEW_COMMENT)
})
test('Start editing comment via menu', async ({ page }) => {
await actions.goToGraph(page)
const node = locate.graphNodeByBinding(page, 'final')
await node.click()
await locate.circularMenu(node).getByRole('button', { name: 'More' }).click()
await locate.circularMenu(node).getByRole('button', { name: 'Comment' }).click()
await expect(locate.nodeComment(node)).toBeFocused()
})
test('Add new comment via menu', async ({ page }) => {
await actions.goToGraph(page)
const INITIAL_NODE_COMMENTS = 1
await expect(locate.nodeComment(page)).toHaveCount(INITIAL_NODE_COMMENTS)
const node = locate.graphNodeByBinding(page, 'data')
const nodeComment = locate.nodeComment(node)
await node.click()
await locate.circularMenu(node).getByRole('button', { name: 'More' }).click()
await locate.circularMenu(node).getByRole('button', { name: 'Comment' }).click()
await expect(locate.nodeComment(node)).toBeFocused()
const NEW_COMMENT = 'New comment text'
await nodeComment.fill(NEW_COMMENT)
await page.keyboard.press(`Enter`)
await expect(nodeComment).not.toBeFocused()
await expect(nodeComment).toHaveText(NEW_COMMENT)
await expect(locate.nodeComment(page)).toHaveCount(INITIAL_NODE_COMMENTS + 1)
})
test('Delete comment by clearing text', async ({ page }) => {
await actions.goToGraph(page)
const nodeComment = locate.nodeComment(locate.graphNodeByBinding(page, 'final'))
await expect(nodeComment).toHaveText('This node can be entered')
await nodeComment.click()
await page.keyboard.press(`${CONTROL_KEY}+A`)
await page.keyboard.press(`Delete`)
await page.keyboard.press(`Enter`)
await expect(nodeComment).not.toExist()
})
test('URL added to comment is rendered as link', async ({ page }) => {
await actions.goToGraph(page)
const nodeComment = locate.nodeComment(locate.graphNodeByBinding(page, 'final'))
await expect(nodeComment).toHaveText('This node can be entered')
await expect(nodeComment.locator('a')).not.toExist()
await nodeComment.click()
await page.keyboard.press(`${CONTROL_KEY}+A`)
const NEW_COMMENT = "Here's a URL: https://example.com"
await nodeComment.fill(NEW_COMMENT)
await page.keyboard.press(`Enter`)
await expect(nodeComment).not.toBeFocused()
await expect(nodeComment).toHaveText(NEW_COMMENT)
await expect(nodeComment.locator('a')).toHaveCount(1)
})

View File

@ -7,13 +7,18 @@ import * as locate from './locate'
test('Main method documentation', async ({ page }) => {
await actions.goToGraph(page)
const rightDock = locate.rightDock(page)
// Documentation panel hotkey opens right-dock.
await expect(locate.rightDock(page)).toBeHidden()
await expect(rightDock).toBeHidden()
await page.keyboard.press(`${CONTROL_KEY}+D`)
await expect(locate.rightDock(page)).toBeVisible()
await expect(rightDock).toBeVisible()
// Right-dock displays main method documentation.
await expect(locate.editorRoot(locate.rightDock(page))).toHaveText('The main method')
await expect(locate.editorRoot(rightDock)).toContainText('The main method')
// All three images are loaded properly
await expect(rightDock.getByAltText('Image')).toHaveCount(3)
for (const img of await rightDock.getByAltText('Image').all())
await expect(img).toHaveJSProperty('naturalWidth', 3)
// Documentation hotkey closes right-dock.p
await page.keyboard.press(`${CONTROL_KEY}+D`)

View File

@ -44,7 +44,8 @@ test('Removing node', async ({ page }) => {
await page.keyboard.press(`${CONTROL_KEY}+Z`)
await expect(locate.graphNode(page)).toHaveCount(nodesCount)
await expect(deletedNode.locator('.WidgetToken')).toHaveText(['Main', '.', 'func1', 'prod'])
await expect(deletedNode.locator('.GraphNodeComment')).toHaveText('This node can be entered')
await expect(locate.nodeComment(deletedNode)).toHaveText('This node can be entered')
const restoredBBox = await deletedNode.boundingBox()
expect(restoredBBox).toEqual(deletedNodeBBox)

View File

@ -22,7 +22,7 @@
"build-cloud": "cross-env CLOUD_BUILD=true corepack pnpm run build",
"preview": "vite preview",
"//": "max-warnings set to 41 to match the amount of warnings introduced by the new react compiler. Eventual goal is to remove all the warnings.",
"lint": "eslint . --max-warnings=41",
"lint": "eslint . --max-warnings=39",
"format": "prettier --version && prettier --write src/ && eslint . --fix",
"dev:vite": "vite",
"test": "corepack pnpm run /^^^^test:.*/",
@ -94,7 +94,6 @@
"@lexical/plain-text": "^0.16.0",
"@lexical/utils": "^0.16.0",
"@lezer/common": "^1.1.0",
"@lezer/markdown": "^1.3.1",
"@lezer/highlight": "^1.1.6",
"@noble/hashes": "^1.4.0",
"@vueuse/core": "^10.4.1",
@ -118,7 +117,6 @@
"veaury": "^2.3.18",
"vue": "^3.5.2",
"vue-component-type-helpers": "^2.0.29",
"y-codemirror.next": "^0.3.2",
"y-protocols": "^1.0.5",
"y-textarea": "^1.0.0",
"y-websocket": "^1.5.0",

View File

@ -85,7 +85,7 @@ export function useForm<Schema extends types.TSchema, SubmitResult = void>(
errorMap: (issue) => {
switch (issue.code) {
case 'too_small':
if (issue.minimum === 0) {
if (issue.minimum === 1 && issue.type === 'string') {
return {
message: getText('arbitraryFieldRequired'),
}

View File

@ -17,8 +17,11 @@ export interface TextProps
readonly elementType?: keyof HTMLElementTagNameMap
readonly lineClamp?: number
readonly tooltip?: React.ReactElement | string | false | null
readonly tooltipTriggerRef?: React.RefObject<HTMLElement>
readonly tooltipDisplay?: visualTooltip.VisualTooltipProps['display']
readonly tooltipPlacement?: aria.Placement
readonly tooltipOffset?: number
readonly tooltipCrossOffset?: number
}
export const TEXT_STYLE = twv.tv({
@ -134,8 +137,11 @@ export const Text = forwardRef(function Text(props: TextProps, ref: React.Ref<HT
balance,
elementType: ElementType = 'span',
tooltip: tooltipElement = children,
tooltipTriggerRef,
tooltipDisplay = 'whenOverflowing',
tooltipPlacement,
tooltipOffset,
tooltipCrossOffset,
textSelection,
disableLineHeightCompensation = false,
...ariaProps
@ -176,9 +182,18 @@ export const Text = forwardRef(function Text(props: TextProps, ref: React.Ref<HT
const { tooltip, targetProps } = visualTooltip.useVisualTooltip({
isDisabled: isTooltipDisabled(),
targetRef: textElementRef,
triggerRef: tooltipTriggerRef,
display: tooltipDisplay,
children: tooltipElement,
...(tooltipPlacement ? { overlayPositionProps: { placement: tooltipPlacement } } : {}),
...(tooltipPlacement || tooltipOffset != null ?
{
overlayPositionProps: {
...(tooltipPlacement && { placement: tooltipPlacement }),
...(tooltipOffset != null && { offset: tooltipOffset }),
...(tooltipCrossOffset != null && { crossOffset: tooltipCrossOffset }),
},
}
: {}),
})
return (

View File

@ -18,10 +18,11 @@ export interface VisualTooltipProps
readonly children: React.ReactNode
readonly className?: string
readonly targetRef: React.RefObject<HTMLElement>
readonly triggerRef?: React.RefObject<HTMLElement> | undefined
readonly isDisabled?: boolean
readonly overlayPositionProps?: Pick<
aria.AriaPositionProps,
'containerPadding' | 'offset' | 'placement'
'containerPadding' | 'crossOffset' | 'offset' | 'placement'
>
/**
* Determines when the tooltip should be displayed.
@ -56,6 +57,7 @@ export function useVisualTooltip(props: VisualTooltipProps): VisualTooltipReturn
const {
children,
targetRef,
triggerRef = targetRef,
className,
isDisabled = false,
overlayPositionProps = {},
@ -70,6 +72,7 @@ export function useVisualTooltip(props: VisualTooltipProps): VisualTooltipReturn
const {
containerPadding = 0,
offset = DEFAULT_OFFSET,
crossOffset = 0,
placement = 'bottom',
} = overlayPositionProps
@ -115,8 +118,9 @@ export function useVisualTooltip(props: VisualTooltipProps): VisualTooltipReturn
const { overlayProps, updatePosition } = aria.useOverlayPosition({
isOpen: state.isOpen,
overlayRef: popoverRef,
targetRef,
targetRef: triggerRef,
offset,
crossOffset,
placement,
containerPadding,
})

View File

@ -1,6 +1,5 @@
/** @file A select menu with a dropdown. */
import {
useEffect,
useMemo,
useRef,
useState,
@ -92,22 +91,15 @@ export default function Autocomplete<T>(props: AutocompleteProps<T>) {
const [selectedIndex, setSelectedIndex] = useState<number | null>(null)
const valuesSet = useMemo(() => new Set(values), [values])
const canEditText = setText != null && values.length === 0
// We are only interested in the initial value of `canEditText` in effects.
const canEditTextRef = useRef(canEditText)
const isMultipleAndCustomValue = multiple === true && text != null
const matchingItems = useMemo(
() => (text == null ? items : items.filter((item) => matches(item, text))),
[items, matches, text],
)
useEffect(() => {
if (!canEditTextRef.current) {
setIsDropdownVisible(true)
}
}, [])
const fallbackInputRef = useRef<HTMLFieldSetElement>(null)
const inputRef = rawInputRef ?? fallbackInputRef
const containerRef = useRef<HTMLDivElement>(null)
// This type is a little too wide but it is unavoidable.
/** Set values, while also changing the input text. */
@ -184,6 +176,7 @@ export default function Autocomplete<T>(props: AutocompleteProps<T>) {
return (
<div className={twJoin('relative isolate h-6 w-full', isDropdownVisible && 'z-1')}>
<div
ref={containerRef}
onKeyDown={onKeyDown}
className={twMerge(
'absolute w-full grow transition-colors',
@ -259,7 +252,7 @@ export default function Autocomplete<T>(props: AutocompleteProps<T>) {
<div
key={itemToKey(item)}
className={twMerge(
'text relative cursor-pointer whitespace-nowrap px-input-x last:rounded-b-xl hover:bg-hover-bg',
'text relative min-w-max cursor-pointer whitespace-nowrap rounded-full px-input-x last:rounded-b-xl hover:bg-hover-bg',
valuesSet.has(item) && 'bg-hover-bg',
index === selectedIndex && 'bg-black/5',
)}
@ -271,7 +264,12 @@ export default function Autocomplete<T>(props: AutocompleteProps<T>) {
toggleValue(item)
}}
>
<Text truncate="1" className="w-full" tooltipPlacement="left">
<Text
truncate="1"
className="w-full"
tooltipPlacement="top"
tooltipTriggerRef={containerRef}
>
{children(item)}
</Text>
</div>

View File

@ -51,8 +51,13 @@ export default function JSONSchemaInput(props: JSONSchemaInputProps) {
schema.format === 'enso-secret'
const { data: secrets } = useBackendQuery(remoteBackend, 'listSecrets', [], { enabled: isSecret })
const autocompleteItems = isSecret ? secrets?.map((secret) => secret.path) ?? null : null
const validityClassName =
isAbsent || getValidator(path)(value) ? 'border-primary/20' : 'border-red-700/60'
const isInvalid = !isAbsent && !getValidator(path)(value)
const validationErrorClassName =
isInvalid && 'border border-danger focus:border-danger focus:outline-danger'
const errors =
isInvalid && 'description' in schema && typeof schema.description === 'string' ?
[<Text className="px-2 text-danger">{schema.description}</Text>]
: []
// NOTE: `enum` schemas omitted for now as they are not yet used.
if ('const' in schema) {
@ -66,100 +71,120 @@ export default function JSONSchemaInput(props: JSONSchemaInputProps) {
if ('format' in schema && schema.format === 'enso-secret') {
const isValid = typeof value === 'string' && value !== ''
children.push(
<div className={twMerge('w-full rounded-default border-0.5', validityClassName)}>
<Autocomplete
items={autocompleteItems ?? []}
itemToKey={(item) => item}
placeholder={getText('enterSecretPath')}
matches={(item, text) => item.toLowerCase().includes(text.toLowerCase())}
values={isValid ? [value] : []}
setValues={(values) => {
onChange(values[0] ?? '')
}}
text={autocompleteText}
setText={setAutocompleteText}
<div className="flex flex-col">
<div
className={twMerge(
'w-full rounded-default border-0.5 border-primary/20 outline-offset-2 transition-[border-color,outline] duration-200 focus:border-primary/50 focus:outline focus:outline-2 focus:outline-offset-0 focus:outline-primary',
validationErrorClassName,
)}
>
{(item) => item}
</Autocomplete>
<Autocomplete
items={autocompleteItems ?? []}
itemToKey={(item) => item}
placeholder={getText('enterSecretPath')}
matches={(item, text) => item.toLowerCase().includes(text.toLowerCase())}
values={isValid ? [value] : []}
setValues={(values) => {
onChange(values[0] ?? '')
}}
text={autocompleteText}
setText={setAutocompleteText}
>
{(item) => item}
</Autocomplete>
</div>
{...errors}
</div>,
)
} else {
children.push(
<FocusRing>
<Input
type="text"
readOnly={readOnly}
value={typeof value === 'string' ? value : ''}
size={1}
className={twMerge(
'focus-child h-6 w-full grow rounded-input border-0.5 bg-transparent px-2 read-only:read-only',
validityClassName,
)}
placeholder={getText('enterText')}
onChange={(event) => {
const newValue: string = event.currentTarget.value
onChange(newValue)
}}
/>
</FocusRing>,
<div className="flex flex-col">
<FocusRing>
<Input
type="text"
readOnly={readOnly}
value={typeof value === 'string' ? value : ''}
size={1}
className={twMerge(
'focus-child h-6 w-full grow rounded-input border-0.5 border-primary/20 bg-transparent px-2 outline-offset-2 transition-[border-color,outline] duration-200 read-only:read-only focus:border-primary/50 focus:outline focus:outline-2 focus:outline-offset-0 focus:outline-primary',
validationErrorClassName,
)}
placeholder={getText('enterText')}
onChange={(event) => {
const newValue: string = event.currentTarget.value
onChange(newValue)
}}
/>
</FocusRing>
{...errors}
</div>,
)
}
break
}
case 'number': {
children.push(
<FocusRing>
<Input
type="number"
readOnly={readOnly}
value={typeof value === 'number' ? value : ''}
size={1}
className={twMerge(
'focus-child h-6 w-full grow rounded-input border-0.5 bg-transparent px-2 read-only:read-only',
validityClassName,
)}
placeholder={getText('enterNumber')}
onChange={(event) => {
const newValue: number = event.currentTarget.valueAsNumber
if (Number.isFinite(newValue)) {
onChange(newValue)
}
}}
/>
</FocusRing>,
<div className="flex flex-col">
<FocusRing>
<Input
type="number"
readOnly={readOnly}
value={typeof value === 'number' ? value : ''}
size={1}
className={twMerge(
'focus-child h-6 w-full grow rounded-input border-0.5 border-primary/20 bg-transparent px-2 outline-offset-2 transition-[border-color,outline] duration-200 read-only:read-only focus:border-primary/50 focus:outline focus:outline-2 focus:outline-offset-0 focus:outline-primary',
validationErrorClassName,
)}
placeholder={getText('enterNumber')}
onChange={(event) => {
const newValue: number = event.currentTarget.valueAsNumber
if (Number.isFinite(newValue)) {
onChange(newValue)
}
}}
/>
</FocusRing>
{...errors}
</div>,
)
break
}
case 'integer': {
children.push(
<FocusRing>
<Input
type="number"
readOnly={readOnly}
value={typeof value === 'number' ? value : ''}
size={1}
className={twMerge(
'focus-child h-6 w-full grow rounded-input border-0.5 bg-transparent px-2 read-only:read-only',
validityClassName,
)}
placeholder={getText('enterInteger')}
onChange={(event) => {
const newValue: number = Math.floor(event.currentTarget.valueAsNumber)
onChange(newValue)
}}
/>
</FocusRing>,
<div className="flex flex-col">
<FocusRing>
<Input
type="number"
readOnly={readOnly}
value={typeof value === 'number' ? value : ''}
size={1}
className={twMerge(
'focus-child h-6 w-full grow rounded-input border-0.5 border-primary/20 bg-transparent px-2 outline-offset-2 transition-[border-color,outline] duration-200 read-only:read-only focus:border-primary/50 focus:outline focus:outline-2 focus:outline-offset-0 focus:outline-primary',
validationErrorClassName,
)}
placeholder={getText('enterInteger')}
onChange={(event) => {
const newValue: number = Math.floor(event.currentTarget.valueAsNumber)
onChange(newValue)
}}
/>
</FocusRing>
{...errors}
</div>,
)
break
}
case 'boolean': {
children.push(
<Checkbox
name="input"
isReadOnly={readOnly}
isSelected={typeof value === 'boolean' && value}
onChange={onChange}
/>,
<div className="flex flex-col">
<Checkbox
name="input"
isReadOnly={readOnly}
isSelected={typeof value === 'boolean' && value}
onChange={onChange}
/>
{...errors}
</div>,
)
break
}
@ -186,7 +211,7 @@ export default function JSONSchemaInput(props: JSONSchemaInputProps) {
>
{propertyDefinitions.map((definition) => {
const { key, schema: childSchema } = definition
const isOptional = !requiredProperties.includes(key)
const isOptional = !requiredProperties.includes(key) || isAbsent
const isPresent = !isAbsent && value != null && key in value
return constantValueOfSchema(defs, childSchema).length === 1 ?
null
@ -250,7 +275,7 @@ export default function JSONSchemaInput(props: JSONSchemaInputProps) {
newValue = unsafeValue!
}
const fullObject =
value ?? constantValueOfSchema(defs, childSchema, true)[0]
value ?? constantValueOfSchema(defs, schema, true)[0]
onChange(
(
typeof fullObject === 'object' &&
@ -346,6 +371,7 @@ export default function JSONSchemaInput(props: JSONSchemaInputProps) {
path={selectedChildPath}
getValidator={getValidator}
noBorder={noChildBorder}
isAbsent={isAbsent}
value={value}
onChange={onChange}
/>
@ -364,6 +390,7 @@ export default function JSONSchemaInput(props: JSONSchemaInputProps) {
path={`${path}/allOf/${i}`}
getValidator={getValidator}
noBorder={noChildBorder}
isAbsent={isAbsent}
value={value}
onChange={onChange}
/>

View File

@ -117,9 +117,9 @@
"libraryName": { "const": "Standard.Base" },
"path": {
"title": "Path",
"description": "Must start with \"enso://<organization-name>/\".",
"description": "Must start with \"enso://Users/<username>/\" or \"enso://Teams/<team name>/\".",
"type": "string",
"pattern": "^enso://.+/.*$",
"pattern": "^enso://(?:Users|Teams)/.*/.*$",
"format": "enso-file"
},
"format": { "title": "Format", "$ref": "#/$defs/Format" }

View File

@ -21,9 +21,10 @@
--color-frame-selected-bg: rgb(255 255 255 / 0.7);
--color-widget-slight: rgb(255 255 255 / 0.06);
--color-widget: rgb(255 255 255 / 0.12);
--color-widget-focus: rgb(255 255 255 / 0.25);
--color-widget-focus: rgb(255 255 255 / 1);
--color-widget-unfocus: rgb(255 255 255 / 0.6);
--color-widget-selected: rgb(255 255 255 / 0.58);
--color-widget-selection: rgba(255 255 255 / 0.2);
--color-widget-selection: rgba(0 0 0 / 0.2);
--color-port-connected: rgb(255 255 255 / 0.15);
/* colors for specific icons */

View File

@ -12,6 +12,7 @@ export const codeEditorBindings = defineKeybinds('code-editor', {
export const documentationEditorBindings = defineKeybinds('documentation-editor', {
toggle: ['Mod+D'],
openLink: ['Mod+PointerMain'],
paste: ['Mod+V'],
})
export const interactionBindings = defineKeybinds('current-interaction', {

View File

@ -1,380 +1,13 @@
<script setup lang="ts">
import type { ChangeSet, Diagnostic, Highlighter } from '@/components/CodeEditor/codemirror'
import EditorRoot from '@/components/EditorRoot.vue'
import { useGraphStore, type NodeId } from '@/stores/graph'
import { useProjectStore } from '@/stores/project'
import { useSuggestionDbStore } from '@/stores/suggestionDatabase'
import { useAutoBlur } from '@/util/autoBlur'
import { unwrap } from '@/util/data/result'
import { qnJoin, tryQualifiedName } from '@/util/qualifiedName'
import { EditorSelection } from '@codemirror/state'
import * as iter from 'enso-common/src/utilities/data/iter'
import { createDebouncer } from 'lib0/eventloop'
import type { ComponentInstance } from 'vue'
import { computed, onMounted, onUnmounted, ref, shallowRef, watch, watchEffect } from 'vue'
import { MutableModule } from 'ydoc-shared/ast'
import { textChangeToEdits, type SourceRangeEdit } from 'ydoc-shared/util/data/text'
import { rangeEncloses, type Origin } from 'ydoc-shared/yjsModel'
import { defineAsyncComponent } from 'vue'
// Use dynamic imports to aid code splitting. The codemirror dependency is quite large.
const {
Annotation,
StateEffect,
StateField,
bracketMatching,
foldGutter,
lintGutter,
highlightSelectionMatches,
minimalSetup,
EditorState,
EditorView,
syntaxHighlighting,
defaultHighlightStyle,
tooltips,
enso,
linter,
forceLinting,
lsDiagnosticsToCMDiagnostics,
hoverTooltip,
textEditToChangeSpec,
} = await import('@/components/CodeEditor/codemirror')
const projectStore = useProjectStore()
const graphStore = useGraphStore()
const suggestionDbStore = useSuggestionDbStore()
const editorRoot = ref<ComponentInstance<typeof EditorRoot>>()
const rootElement = computed(() => editorRoot.value?.rootElement)
useAutoBlur(rootElement)
const executionContextDiagnostics = shallowRef<Diagnostic[]>([])
// Effect that can be applied to the document to invalidate the linter state.
const diagnosticsUpdated = StateEffect.define()
// State value that is perturbed by any `diagnosticsUpdated` effect.
const diagnosticsVersion = StateField.define({
create: (_state) => 0,
update: (value, transaction) => {
for (const effect of transaction.effects) {
if (effect.is(diagnosticsUpdated)) value += 1
}
return value
},
})
const expressionUpdatesDiagnostics = computed(() => {
const updates = projectStore.computedValueRegistry.db
const panics = updates.type.reverseLookup('Panic')
const errors = updates.type.reverseLookup('DataflowError')
const diagnostics: Diagnostic[] = []
for (const externalId of iter.chain(panics, errors)) {
const update = updates.get(externalId)
if (!update) continue
const astId = graphStore.db.idFromExternal(externalId)
if (!astId) continue
const span = graphStore.moduleSource.getSpan(astId)
if (!span) continue
const [from, to] = span
switch (update.payload.type) {
case 'Panic': {
diagnostics.push({ from, to, message: update.payload.message, severity: 'error' })
break
}
case 'DataflowError': {
const error = projectStore.dataflowErrors.lookup(externalId)
if (error?.value?.message) {
diagnostics.push({ from, to, message: error.value.message, severity: 'error' })
}
break
}
}
}
return diagnostics
})
// == CodeMirror editor setup ==
const editorView = new EditorView()
const viewInitialized = ref(false)
watchEffect(() => {
const module = projectStore.module
if (!module) return
editorView.setState(
EditorState.create({
extensions: [
minimalSetup,
updateListener(),
diagnosticsVersion,
syntaxHighlighting(defaultHighlightStyle as Highlighter),
bracketMatching(),
foldGutter(),
lintGutter(),
highlightSelectionMatches(),
tooltips({ position: 'absolute' }),
hoverTooltip((ast, syn) => {
const dom = document.createElement('div')
const astSpan = ast.span()
let foundNode: NodeId | undefined
for (const [id, node] of graphStore.db.nodeIdToNode.entries()) {
const rootSpan = graphStore.moduleSource.getSpan(node.rootExpr.id)
if (rootSpan && rangeEncloses(rootSpan, astSpan)) {
foundNode = id
break
}
}
const expressionInfo = foundNode && graphStore.db.getExpressionInfo(foundNode)
const nodeColor = foundNode && graphStore.db.getNodeColorStyle(foundNode)
if (foundNode != null) {
dom
.appendChild(document.createElement('div'))
.appendChild(document.createTextNode(`AST ID: ${foundNode}`))
}
if (expressionInfo != null) {
dom
.appendChild(document.createElement('div'))
.appendChild(document.createTextNode(`Type: ${expressionInfo.typename ?? 'Unknown'}`))
}
if (expressionInfo?.profilingInfo[0] != null) {
const profile = expressionInfo.profilingInfo[0]
const executionTime = (profile.ExecutionTime.nanoTime / 1_000_000).toFixed(3)
const text = `Execution Time: ${executionTime}ms`
dom
.appendChild(document.createElement('div'))
.appendChild(document.createTextNode(text))
}
dom
.appendChild(document.createElement('div'))
.appendChild(document.createTextNode(`Syntax: ${syn.toString()}`))
const method = expressionInfo?.methodCall?.methodPointer
if (method != null) {
const moduleName = tryQualifiedName(method.module)
const methodName = tryQualifiedName(method.name)
const qualifiedName = qnJoin(unwrap(moduleName), unwrap(methodName))
const [id] = suggestionDbStore.entries.nameToId.lookup(qualifiedName)
const suggestionEntry = id != null ? suggestionDbStore.entries.get(id) : undefined
if (suggestionEntry != null) {
const groupNode = dom.appendChild(document.createElement('div'))
groupNode.appendChild(document.createTextNode('Group: '))
const groupNameNode = groupNode.appendChild(document.createElement('span'))
groupNameNode.appendChild(document.createTextNode(`${method.module}.${method.name}`))
if (nodeColor) {
groupNameNode.style.color = nodeColor
}
}
}
return { dom }
}),
enso(),
linter(
() => [...executionContextDiagnostics.value, ...expressionUpdatesDiagnostics.value],
{
needsRefresh(update) {
return (
update.state.field(diagnosticsVersion) !==
update.startState.field(diagnosticsVersion)
)
},
},
),
],
}),
)
viewInitialized.value = true
})
function changeSetToTextEdits(changes: ChangeSet) {
const textEdits = new Array<SourceRangeEdit>()
changes.iterChanges((from, to, _fromB, _toB, insert) =>
textEdits.push({ range: [from, to], insert: insert.toString() }),
)
return textEdits
}
let pendingChanges: ChangeSet | undefined
let currentModule: MutableModule | undefined
/** Set the editor contents the current module state, discarding any pending editor-initiated changes. */
function resetView() {
console.info(`Resetting the editor to the module code.`)
pendingChanges = undefined
currentModule = undefined
const viewText = editorView.state.doc.toString()
const code = graphStore.moduleSource.text
editorView.dispatch({
changes: textChangeToEdits(viewText, code).map(textEditToChangeSpec),
annotations: synchronizedModule.of(graphStore.startEdit()),
})
}
/** Apply any pending changes to the currently-synchronized module, clearing the set of pending changes. */
function commitPendingChanges() {
if (!pendingChanges || !currentModule) return
try {
currentModule.applyTextEdits(changeSetToTextEdits(pendingChanges), graphStore.viewModule)
graphStore.commitEdit(currentModule, undefined, 'local:userAction:CodeEditor')
} catch (error) {
console.error(`Code Editor failed to modify module`, error)
resetView()
}
pendingChanges = undefined
}
function updateListener() {
const debouncer = createDebouncer(0)
return EditorView.updateListener.of((update) => {
for (const transaction of update.transactions) {
const newModule = transaction.annotation(synchronizedModule)
if (newModule) {
// Flush the pipeline of edits that were based on the old module.
commitPendingChanges()
currentModule = newModule
} else if (transaction.docChanged && currentModule) {
pendingChanges =
pendingChanges ? pendingChanges.compose(transaction.changes) : transaction.changes
// Defer the update until after pending events have been processed, so that if changes are arriving faster than
// we would be able to apply them individually we coalesce them to keep up.
debouncer(commitPendingChanges)
}
}
})
}
let needResync = false
// Indicates a change updating the text to correspond to the given module state.
const synchronizedModule = Annotation.define<MutableModule>()
watch(
viewInitialized,
(ready) => {
if (ready) graphStore.moduleSource.observe(observeSourceChange)
},
{ immediate: true },
const LazyCodeEditor = defineAsyncComponent(
() => import('@/components/CodeEditor/CodeEditorImpl.vue'),
)
onUnmounted(() => graphStore.moduleSource.unobserve(observeSourceChange))
function observeSourceChange(textEdits: readonly SourceRangeEdit[], origin: Origin | undefined) {
// If we received an update from outside the Code Editor while the editor contained uncommitted changes, we cannot
// proceed incrementally; we wait for the changes to be merged as Y.Js AST updates, and then set the view to the
// resulting code.
if (needResync) {
if (!pendingChanges) {
resetView()
needResync = false
}
return
}
// When we aren't in the `needResync` state, we can ignore updates that originated in the Code Editor.
if (origin === 'local:userAction:CodeEditor') return
if (pendingChanges) {
console.info(`Deferring update (editor dirty).`)
needResync = true
return
}
// If none of the above exit-conditions were reached, the transaction is applicable to our current state.
editorView.dispatch({
changes: textEdits.map(textEditToChangeSpec),
annotations: synchronizedModule.of(graphStore.startEdit()),
})
}
// The LS protocol doesn't identify what version of the file updates are in reference to. When diagnostics are received
// from the LS, we map them to the text assuming that they are applicable to the current version of the module. This
// will be correct if there is no one else editing, and we aren't editing faster than the LS can send updates. Typing
// too quickly can result in incorrect ranges, but at idle it should correct itself when we receive new diagnostics.
watch([viewInitialized, () => projectStore.diagnostics], ([ready, diagnostics]) => {
if (!ready) return
executionContextDiagnostics.value =
graphStore.moduleSource.text ?
lsDiagnosticsToCMDiagnostics(graphStore.moduleSource.text, diagnostics)
: []
})
watch([executionContextDiagnostics, expressionUpdatesDiagnostics], () => {
editorView.dispatch({ effects: diagnosticsUpdated.of(null) })
forceLinting(editorView)
})
onMounted(() => {
editorView.focus()
rootElement.value?.prepend(editorView.dom)
// API for e2e tests.
;(window as any).__codeEditorApi = {
textContent: () => editorView.state.doc.toString(),
textLength: () => editorView.state.doc.length,
indexOf: (substring: string, position?: number) =>
editorView.state.doc.toString().indexOf(substring, position),
placeCursor: (at: number) => {
editorView.dispatch({ selection: EditorSelection.create([EditorSelection.cursor(at)]) })
},
select: (from: number, to: number) => {
editorView.dispatch({ selection: EditorSelection.create([EditorSelection.range(from, to)]) })
},
selectAndReplace: (from: number, to: number, replaceWith: string) => {
editorView.dispatch({ selection: EditorSelection.create([EditorSelection.range(from, to)]) })
editorView.dispatch(editorView.state.update(editorView.state.replaceSelection(replaceWith)))
},
writeText: (text: string, from: number) => {
editorView.dispatch({
changes: [{ from: from, insert: text }],
selection: { anchor: from + text.length },
})
},
}
})
</script>
<template>
<EditorRoot ref="editorRoot" class="CodeEditor" />
<Suspense>
<LazyCodeEditor />
</Suspense>
</template>
<style scoped>
.CodeEditor {
font-family: var(--font-mono);
backdrop-filter: var(--blur-app-bg);
background-color: rgba(255, 255, 255, 0.9);
box-shadow: 0 4px 30px rgba(0, 0, 0, 0.1);
border: 1px solid rgba(255, 255, 255, 0.4);
}
:deep(.cm-scroller) {
font-family: var(--font-mono);
/* Prevent touchpad back gesture, which can be triggered while panning. */
overscroll-behavior: none;
}
:deep(.cm-editor) {
position: relative;
width: 100%;
height: 100%;
opacity: 1;
color: black;
text-shadow: 0 0 2px rgba(255, 255, 255, 0.4);
font-size: 12px;
outline: 1px solid transparent;
transition: outline 0.1s ease-in-out;
}
:deep(.cm-focused) {
outline: 1px solid rgba(0, 0, 0, 0.5);
}
:deep(.cm-tooltip-hover) {
padding: 4px;
border-radius: 4px;
border: 1px solid rgba(0, 0, 0, 0.4);
text-shadow: 0 0 2px rgba(255, 255, 255, 0.4);
&::before {
content: '';
background-color: rgba(255, 255, 255, 0.9);
backdrop-filter: blur(64px);
border-radius: 4px;
}
}
:deep(.cm-gutters) {
border-radius: 3px 0 0 3px;
min-width: 32px;
}
</style>

View File

@ -0,0 +1,123 @@
<script setup lang="ts">
import { useEnsoDiagnostics } from '@/components/CodeEditor/diagnostics'
import { ensoSyntax } from '@/components/CodeEditor/ensoSyntax'
import { useEnsoSourceSync } from '@/components/CodeEditor/sync'
import { ensoHoverTooltip } from '@/components/CodeEditor/tooltips'
import EditorRoot from '@/components/codemirror/EditorRoot.vue'
import { testSupport } from '@/components/codemirror/testSupport'
import { useGraphStore } from '@/stores/graph'
import { useProjectStore } from '@/stores/project'
import { useSuggestionDbStore } from '@/stores/suggestionDatabase'
import { useAutoBlur } from '@/util/autoBlur'
import {
bracketMatching,
defaultHighlightStyle,
foldGutter,
syntaxHighlighting,
} from '@codemirror/language'
import { lintGutter } from '@codemirror/lint'
import { highlightSelectionMatches } from '@codemirror/search'
import { EditorState } from '@codemirror/state'
import { EditorView } from '@codemirror/view'
import { type Highlighter } from '@lezer/highlight'
import { minimalSetup } from 'codemirror'
import { computed, onMounted, ref, watch, type ComponentInstance } from 'vue'
const projectStore = useProjectStore()
const graphStore = useGraphStore()
const suggestionDbStore = useSuggestionDbStore()
const editorRoot = ref<ComponentInstance<typeof EditorRoot>>()
const rootElement = computed(() => editorRoot.value?.rootElement)
useAutoBlur(rootElement)
const editorView = new EditorView()
;(window as any).__codeEditorApi = testSupport(editorView)
const { updateListener, connectModuleListener } = useEnsoSourceSync(graphStore, editorView)
const ensoDiagnostics = useEnsoDiagnostics(projectStore, graphStore, editorView)
watch(
() => projectStore.module,
(module) => {
if (!module) return
editorView.setState(
EditorState.create({
extensions: [
minimalSetup,
syntaxHighlighting(defaultHighlightStyle as Highlighter),
bracketMatching(),
foldGutter(),
lintGutter(),
highlightSelectionMatches(),
ensoSyntax(),
updateListener,
ensoHoverTooltip(graphStore, suggestionDbStore),
ensoDiagnostics,
],
}),
)
connectModuleListener()
},
{ immediate: true },
)
onMounted(() => {
editorView.focus()
rootElement.value?.prepend(editorView.dom)
})
</script>
<template>
<EditorRoot ref="editorRoot" class="CodeEditor" />
</template>
<style scoped>
.CodeEditor {
font-family: var(--font-mono);
backdrop-filter: var(--blur-app-bg);
background-color: rgba(255, 255, 255, 0.9);
box-shadow: 0 4px 30px rgba(0, 0, 0, 0.1);
border: 1px solid rgba(255, 255, 255, 0.4);
}
:deep(.cm-scroller) {
font-family: var(--font-mono);
/* Prevent touchpad back gesture, which can be triggered while panning. */
overscroll-behavior: none;
}
:deep(.cm-editor) {
position: relative;
width: 100%;
height: 100%;
opacity: 1;
color: black;
text-shadow: 0 0 2px rgba(255, 255, 255, 0.4);
font-size: 12px;
outline: 1px solid transparent;
transition: outline 0.1s ease-in-out;
}
:deep(.cm-focused) {
outline: 1px solid rgba(0, 0, 0, 0.5);
}
:deep(.cm-tooltip-hover) {
padding: 4px;
border-radius: 4px;
border: 1px solid rgba(0, 0, 0, 0.4);
text-shadow: 0 0 2px rgba(255, 255, 255, 0.4);
&::before {
content: '';
background-color: rgba(255, 255, 255, 0.9);
backdrop-filter: blur(64px);
border-radius: 4px;
}
}
:deep(.cm-gutters) {
border-radius: 3px 0 0 3px;
min-width: 32px;
}
</style>

View File

@ -1,207 +0,0 @@
/**
* @file This module is a collection of codemirror related imports that are intended to be loaded
* asynchronously using a single dynamic import, allowing for code splitting.
*/
export { defaultKeymap } from '@codemirror/commands'
export {
bracketMatching,
defaultHighlightStyle,
foldGutter,
foldNodeProp,
syntaxHighlighting,
} from '@codemirror/language'
export { forceLinting, lintGutter, linter, type Diagnostic } from '@codemirror/lint'
export { highlightSelectionMatches } from '@codemirror/search'
export { Annotation, EditorState, StateEffect, StateField, type ChangeSet } from '@codemirror/state'
export { EditorView, tooltips, type TooltipView } from '@codemirror/view'
export { type Highlighter } from '@lezer/highlight'
export { minimalSetup } from 'codemirror'
export { yCollab } from 'y-codemirror.next'
import { RawAstExtended } from '@/util/ast/extended'
import { RawAst } from '@/util/ast/raw'
import {
Language,
LanguageSupport,
defineLanguageFacet,
foldNodeProp,
languageDataProp,
syntaxTree,
} from '@codemirror/language'
import { type Diagnostic } from '@codemirror/lint'
import type { ChangeSpec } from '@codemirror/state'
import { hoverTooltip as originalHoverTooltip, type TooltipView } from '@codemirror/view'
import {
NodeProp,
NodeSet,
NodeType,
Parser,
Tree,
type Input,
type PartialParse,
type SyntaxNode,
} from '@lezer/common'
import { styleTags, tags } from '@lezer/highlight'
import { EditorView } from 'codemirror'
import * as iter from 'enso-common/src/utilities/data/iter'
import type { Diagnostic as LSDiagnostic } from 'ydoc-shared/languageServerTypes'
import type { SourceRangeEdit } from 'ydoc-shared/util/data/text'
/** TODO: Add docs */
export function lsDiagnosticsToCMDiagnostics(
source: string,
diagnostics: LSDiagnostic[],
): Diagnostic[] {
if (!diagnostics.length) return []
const results: Diagnostic[] = []
let pos = 0
const lineStartIndices = []
for (const line of source.split('\n')) {
lineStartIndices.push(pos)
pos += line.length + 1
}
for (const diagnostic of diagnostics) {
if (!diagnostic.location) continue
const from =
(lineStartIndices[diagnostic.location.start.line] ?? 0) + diagnostic.location.start.character
const to =
(lineStartIndices[diagnostic.location.end.line] ?? 0) + diagnostic.location.end.character
if (to > source.length || from > source.length) {
// Suppress temporary errors if the source is not the version of the document the LS is reporting diagnostics for.
continue
}
const severity =
diagnostic.kind === 'Error' ? 'error'
: diagnostic.kind === 'Warning' ? 'warning'
: 'info'
results.push({ from, to, message: diagnostic.message, severity })
}
return results
}
type AstNode = RawAstExtended<RawAst.Tree | RawAst.Token, false>
const nodeTypes: NodeType[] = [
...RawAst.Tree.typeNames.map((name, id) => NodeType.define({ id, name })),
...RawAst.Token.typeNames.map((name, id) =>
NodeType.define({ id: id + RawAst.Tree.typeNames.length, name: 'Token' + name }),
),
]
const nodeSet = new NodeSet(nodeTypes).extend(
styleTags({
Ident: tags.variableName,
'Private!': tags.variableName,
Number: tags.number,
'Wildcard!': tags.variableName,
'TextLiteral!': tags.string,
OprApp: tags.operator,
TokenOperator: tags.operator,
'Assignment/TokenOperator': tags.definitionOperator,
UnaryOprApp: tags.operator,
'Function/Ident': tags.function(tags.variableName),
ForeignFunction: tags.function(tags.variableName),
'Import/TokenIdent': tags.function(tags.moduleKeyword),
Export: tags.function(tags.moduleKeyword),
Lambda: tags.function(tags.variableName),
Documented: tags.docComment,
ConstructorDefinition: tags.function(tags.variableName),
}),
foldNodeProp.add({
Function: (node) => node.lastChild,
ArgumentBlockApplication: (node) => node,
OperatorBlockApplication: (node) => node,
}),
)
export const astProp = new NodeProp<AstNode>({ perNode: true })
function astToCodeMirrorTree(
nodeSet: NodeSet,
ast: AstNode,
props?: readonly [number | NodeProp<any>, any][] | undefined,
): Tree {
const [start, end] = ast.span()
const children = ast.children()
const childrenToConvert = iter.tryGetSoleValue(children)?.isToken() ? [] : children
const tree = new Tree(
nodeSet.types[ast.inner.type + (ast.isToken() ? RawAst.Tree.typeNames.length : 0)]!,
childrenToConvert.map((child) => astToCodeMirrorTree(nodeSet, child)),
childrenToConvert.map((child) => child.span()[0] - start),
end - start,
[...(props ?? []), [astProp, ast]],
)
return tree
}
const facet = defineLanguageFacet()
class EnsoParser extends Parser {
nodeSet
constructor() {
super()
this.nodeSet = nodeSet
}
cachedCode: string | undefined
cachedTree: Tree | undefined
createParse(input: Input): PartialParse {
return {
parsedPos: input.length,
stopAt: () => {},
stoppedAt: null,
advance: () => {
const code = input.read(0, input.length)
if (code !== this.cachedCode || this.cachedTree == null) {
this.cachedCode = code
const ast = RawAstExtended.parse(code)
this.cachedTree = astToCodeMirrorTree(this.nodeSet, ast, [[languageDataProp, facet]])
}
return this.cachedTree
},
}
}
}
class EnsoLanguage extends Language {
constructor() {
super(facet, new EnsoParser())
}
}
const ensoLanguage = new EnsoLanguage()
/** TODO: Add docs */
export function enso() {
return new LanguageSupport(ensoLanguage)
}
/** TODO: Add docs */
export function hoverTooltip(
create: (
ast: AstNode,
syntax: SyntaxNode,
) => TooltipView | ((view: EditorView) => TooltipView) | null | undefined,
) {
return originalHoverTooltip((view, pos, side) => {
const syntaxNode = syntaxTree(view.state).resolveInner(pos, side)
const astNode = syntaxNode.tree?.prop(astProp)
if (astNode == null) return null
const domOrCreate = create(astNode, syntaxNode)
if (domOrCreate == null) return null
return {
pos: syntaxNode.from,
end: syntaxNode.to,
above: true,
arrow: true,
create: typeof domOrCreate !== 'function' ? () => domOrCreate : domOrCreate,
}
})
}
/** TODO: Add docs */
export function textEditToChangeSpec({ range: [from, to], insert }: SourceRangeEdit): ChangeSpec {
return { from, to, insert }
}

View File

@ -0,0 +1,139 @@
import { type GraphStore } from '@/stores/graph'
import { type ProjectStore } from '@/stores/project'
import { type Diagnostic, forceLinting, linter } from '@codemirror/lint'
import { type Extension, StateEffect, StateField } from '@codemirror/state'
import { type EditorView } from '@codemirror/view'
import * as iter from 'enso-common/src/utilities/data/iter'
import { computed, shallowRef, watch } from 'vue'
import { type Diagnostic as LSDiagnostic, type Position } from 'ydoc-shared/languageServerTypes'
const executionContextDiagnostics = shallowRef<Diagnostic[]>([])
// Effect that can be applied to the document to invalidate the linter state.
const diagnosticsUpdated = StateEffect.define()
// State value that is perturbed by any `diagnosticsUpdated` effect.
const diagnosticsVersion = StateField.define({
create: (_state) => 0,
update: (value, transaction) => {
for (const effect of transaction.effects) {
if (effect.is(diagnosticsUpdated)) value += 1
}
return value
},
})
/** Given a text, indexes it and returns a function for converting between different ways of identifying positions. */
function stringPosConverter(text: string) {
let pos = 0
const lineStartIndex: number[] = []
for (const line of text.split('\n')) {
lineStartIndex.push(pos)
pos += line.length + 1
}
const length = text.length
function lineColToIndex({
line,
character,
}: {
line: number
character: number
}): number | undefined {
const startIx = lineStartIndex[line]
if (startIx == null) return
const ix = startIx + character
if (ix > length) return
return ix
}
return { lineColToIndex }
}
/** Convert the Language Server's diagnostics to CodeMirror diagnostics. */
function lsDiagnosticsToCMDiagnostics(
diagnostics: LSDiagnostic[],
lineColToIndex: (lineCol: Position) => number | undefined,
) {
const results: Diagnostic[] = []
for (const diagnostic of diagnostics) {
if (!diagnostic.location) continue
const from = lineColToIndex(diagnostic.location.start)
const to = lineColToIndex(diagnostic.location.end)
if (to == null || from == null) {
// Suppress temporary errors if the source is not the version of the document the LS is reporting diagnostics for.
continue
}
const severity =
diagnostic.kind === 'Error' ? 'error'
: diagnostic.kind === 'Warning' ? 'warning'
: 'info'
results.push({ from, to, message: diagnostic.message, severity })
}
return results
}
/**
* CodeMirror extension providing diagnostics for an Enso module. Provides CodeMirror diagnostics based on dataflow
* errors, and diagnostics the LS provided in an `executionStatus` message.
*/
export function useEnsoDiagnostics(
projectStore: Pick<ProjectStore, 'computedValueRegistry' | 'dataflowErrors' | 'diagnostics'>,
graphStore: Pick<GraphStore, 'moduleSource' | 'db'>,
editorView: EditorView,
): Extension {
const expressionUpdatesDiagnostics = computed(() => {
const updates = projectStore.computedValueRegistry.db
const panics = updates.type.reverseLookup('Panic')
const errors = updates.type.reverseLookup('DataflowError')
const diagnostics: Diagnostic[] = []
for (const externalId of iter.chain(panics, errors)) {
const update = updates.get(externalId)
if (!update) continue
const astId = graphStore.db.idFromExternal(externalId)
if (!astId) continue
const span = graphStore.moduleSource.getSpan(astId)
if (!span) continue
const [from, to] = span
switch (update.payload.type) {
case 'Panic': {
diagnostics.push({ from, to, message: update.payload.message, severity: 'error' })
break
}
case 'DataflowError': {
const error = projectStore.dataflowErrors.lookup(externalId)
if (error?.value?.message) {
diagnostics.push({ from, to, message: error.value.message, severity: 'error' })
}
break
}
}
}
return diagnostics
})
watch([executionContextDiagnostics, expressionUpdatesDiagnostics], () => {
editorView.dispatch({ effects: diagnosticsUpdated.of(null) })
forceLinting(editorView)
})
// The LS protocol doesn't identify what version of the file updates are in reference to. When diagnostics are
// received from the LS, we map them to the text assuming that they are applicable to the current version of the
// module. This will be correct if there is no one else editing, and we aren't editing faster than the LS can send
// updates. Typing too quickly can result in incorrect ranges, but at idle it should correct itself when we receive
// new diagnostics.
watch(
() => projectStore.diagnostics,
(diagnostics) => {
const { lineColToIndex } = stringPosConverter(graphStore.moduleSource.text)
executionContextDiagnostics.value = lsDiagnosticsToCMDiagnostics(diagnostics, lineColToIndex)
},
)
return [
diagnosticsVersion,
linter(() => [...executionContextDiagnostics.value, ...expressionUpdatesDiagnostics.value], {
needsRefresh(update) {
return (
update.state.field(diagnosticsVersion) !== update.startState.field(diagnosticsVersion)
)
},
}),
]
}

View File

@ -0,0 +1,116 @@
import { RawAstExtended } from '@/util/ast/extended'
import { RawAst } from '@/util/ast/raw'
import {
defineLanguageFacet,
foldNodeProp,
Language,
languageDataProp,
LanguageSupport,
} from '@codemirror/language'
import {
type Input,
NodeProp,
NodeSet,
NodeType,
Parser,
type PartialParse,
Tree,
} from '@lezer/common'
import { styleTags, tags } from '@lezer/highlight'
import * as iter from 'enso-common/src/utilities/data/iter'
const nodeTypes: NodeType[] = [
...RawAst.Tree.typeNames.map((name, id) => NodeType.define({ id, name })),
...RawAst.Token.typeNames.map((name, id) =>
NodeType.define({ id: id + RawAst.Tree.typeNames.length, name: 'Token' + name }),
),
]
const nodeSet = new NodeSet(nodeTypes).extend(
styleTags({
Ident: tags.variableName,
'Private!': tags.variableName,
Number: tags.number,
'Wildcard!': tags.variableName,
'TextLiteral!': tags.string,
OprApp: tags.operator,
TokenOperator: tags.operator,
'Assignment/TokenOperator': tags.definitionOperator,
UnaryOprApp: tags.operator,
'Function/Ident': tags.function(tags.variableName),
ForeignFunction: tags.function(tags.variableName),
'Import/TokenIdent': tags.function(tags.moduleKeyword),
Export: tags.function(tags.moduleKeyword),
Lambda: tags.function(tags.variableName),
Documented: tags.docComment,
ConstructorDefinition: tags.function(tags.variableName),
}),
foldNodeProp.add({
Function: (node) => node.lastChild,
ArgumentBlockApplication: (node) => node,
OperatorBlockApplication: (node) => node,
}),
)
type AstNode = RawAstExtended<RawAst.Tree | RawAst.Token, false>
const astProp = new NodeProp<AstNode>({ perNode: true })
function astToCodeMirrorTree(
nodeSet: NodeSet,
ast: AstNode,
props?: readonly [number | NodeProp<any>, any][] | undefined,
): Tree {
const [start, end] = ast.span()
const children = ast.children()
const childrenToConvert = iter.tryGetSoleValue(children)?.isToken() ? [] : children
return new Tree(
nodeSet.types[ast.inner.type + (ast.isToken() ? RawAst.Tree.typeNames.length : 0)]!,
childrenToConvert.map((child) => astToCodeMirrorTree(nodeSet, child)),
childrenToConvert.map((child) => child.span()[0] - start),
end - start,
[...(props ?? []), [astProp, ast]],
)
}
const facet = defineLanguageFacet()
class EnsoParser extends Parser {
nodeSet
constructor() {
super()
this.nodeSet = nodeSet
}
cachedCode: string | undefined
cachedTree: Tree | undefined
createParse(input: Input): PartialParse {
return {
parsedPos: input.length,
stopAt: () => {},
stoppedAt: null,
advance: () => {
const code = input.read(0, input.length)
if (code !== this.cachedCode || this.cachedTree == null) {
this.cachedCode = code
const ast = RawAstExtended.parse(code)
this.cachedTree = astToCodeMirrorTree(this.nodeSet, ast, [[languageDataProp, facet]])
}
return this.cachedTree
},
}
}
}
class EnsoLanguage extends Language {
constructor() {
super(facet, new EnsoParser())
}
}
const ensoLanguage = new EnsoLanguage()
/** TODO: Add docs */
export function ensoSyntax() {
return new LanguageSupport(ensoLanguage)
}

View File

@ -0,0 +1,123 @@
import type { GraphStore } from '@/stores/graph'
import { Annotation, ChangeSet, type ChangeSpec } from '@codemirror/state'
import { EditorView } from '@codemirror/view'
import { createDebouncer } from 'lib0/eventloop'
import { onUnmounted } from 'vue'
import { MutableModule } from 'ydoc-shared/ast'
import { SourceRangeEdit, textChangeToEdits } from 'ydoc-shared/util/data/text'
import type { Origin } from 'ydoc-shared/yjsModel'
function changeSetToTextEdits(changes: ChangeSet) {
const textEdits = new Array<SourceRangeEdit>()
changes.iterChanges((from, to, _fromB, _toB, insert) =>
textEdits.push({ range: [from, to], insert: insert.toString() }),
)
return textEdits
}
function textEditToChangeSpec({ range: [from, to], insert }: SourceRangeEdit): ChangeSpec {
return { from, to, insert }
}
// Indicates a change updating the text to correspond to the given module state.
const synchronizedModule = Annotation.define<MutableModule>()
/** @returns A CodeMirror Extension that synchronizes the editor state with the AST of an Enso module. */
export function useEnsoSourceSync(
graphStore: Pick<GraphStore, 'moduleSource' | 'viewModule' | 'startEdit' | 'commitEdit'>,
editorView: EditorView,
) {
let pendingChanges: ChangeSet | undefined
let currentModule: MutableModule | undefined
const debounceUpdates = createDebouncer(0)
const updateListener = EditorView.updateListener.of((update) => {
for (const transaction of update.transactions) {
const newModule = transaction.annotation(synchronizedModule)
if (newModule) {
// Flush the pipeline of edits that were based on the old module.
commitPendingChanges()
currentModule = newModule
} else if (transaction.docChanged && currentModule) {
pendingChanges =
pendingChanges ? pendingChanges.compose(transaction.changes) : transaction.changes
// Defer the update until after pending events have been processed, so that if changes are arriving faster
// than we would be able to apply them individually we coalesce them to keep up.
debounceUpdates(commitPendingChanges)
}
}
})
/** Set the editor contents the current module state, discarding any pending editor-initiated changes. */
function resetView() {
pendingChanges = undefined
currentModule = undefined
const viewText = editorView.state.doc.toString()
const code = graphStore.moduleSource.text
const changes = textChangeToEdits(viewText, code).map(textEditToChangeSpec)
console.info('Resetting the editor to the module code.', changes)
editorView.dispatch({
changes,
annotations: synchronizedModule.of(graphStore.startEdit()),
})
}
function checkSync() {
const code = graphStore.viewModule.root()?.code() ?? ''
const viewText = editorView.state.doc.toString()
const uncommitted = textChangeToEdits(code, viewText).map(textEditToChangeSpec)
if (uncommitted.length > 0) {
console.warn(`Module source was not synced to editor content\n${code}`, uncommitted)
}
}
/** Apply any pending changes to the currently-synchronized module, clearing the set of pending changes. */
function commitPendingChanges() {
if (!pendingChanges || !currentModule) return
const changes = pendingChanges
pendingChanges = undefined
const edits = changeSetToTextEdits(changes)
try {
currentModule.applyTextEdits(edits, graphStore.viewModule)
graphStore.commitEdit(currentModule, undefined, 'local:userAction:CodeEditor')
checkSync()
} catch (error) {
console.error(`Code Editor failed to modify module`, error)
resetView()
}
}
let needResync = false
function observeSourceChange(textEdits: readonly SourceRangeEdit[], origin: Origin | undefined) {
// If we received an update from outside the Code Editor while the editor contained uncommitted changes, we cannot
// proceed incrementally; we wait for the changes to be merged as Y.Js AST updates, and then set the view to the
// resulting code.
if (needResync) {
if (!pendingChanges) {
resetView()
needResync = false
}
return
}
// When we aren't in the `needResync` state, we can ignore updates that originated in the Code Editor.
if (origin === 'local:userAction:CodeEditor') {
return
}
if (pendingChanges) {
console.info(`Deferring update (editor dirty).`)
needResync = true
return
}
// If none of the above exit-conditions were reached, the transaction is applicable to our current state.
editorView.dispatch({
changes: textEdits.map(textEditToChangeSpec),
annotations: synchronizedModule.of(graphStore.startEdit()),
})
}
onUnmounted(() => graphStore.moduleSource.unobserve(observeSourceChange))
return {
updateListener,
connectModuleListener: () => graphStore.moduleSource.observe(observeSourceChange),
}
}

View File

@ -0,0 +1,106 @@
import type { GraphStore, NodeId } from '@/stores/graph'
import { type SuggestionDbStore } from '@/stores/suggestionDatabase'
import { type RawAstExtended } from '@/util/ast/extended'
import { RawAst } from '@/util/ast/raw'
import { qnJoin, tryQualifiedName } from '@/util/qualifiedName'
import { syntaxTree } from '@codemirror/language'
import { type Extension } from '@codemirror/state'
import {
type EditorView,
hoverTooltip as originalHoverTooltip,
tooltips,
type TooltipView,
} from '@codemirror/view'
import { NodeProp, type SyntaxNode } from '@lezer/common'
import { unwrap } from 'ydoc-shared/util/data/result'
import { rangeEncloses } from 'ydoc-shared/yjsModel'
type AstNode = RawAstExtended<RawAst.Tree | RawAst.Token, false>
const astProp = new NodeProp<AstNode>({ perNode: true })
/** TODO: Add docs */
function hoverTooltip(
create: (
ast: AstNode,
syntax: SyntaxNode,
) => TooltipView | ((view: EditorView) => TooltipView) | null | undefined,
): Extension {
return [
tooltips({ position: 'absolute' }),
originalHoverTooltip((view, pos, side) => {
const syntaxNode = syntaxTree(view.state).resolveInner(pos, side)
const astNode = syntaxNode.tree?.prop(astProp)
if (astNode == null) return null
const domOrCreate = create(astNode, syntaxNode)
if (domOrCreate == null) return null
return {
pos: syntaxNode.from,
end: syntaxNode.to,
above: true,
arrow: true,
create: typeof domOrCreate !== 'function' ? () => domOrCreate : domOrCreate,
}
}),
]
}
/** @returns A CodeMirror extension that creates tooltips containing type and syntax information for Enso code. */
export function ensoHoverTooltip(
graphStore: Pick<GraphStore, 'moduleSource' | 'db'>,
suggestionDbStore: Pick<SuggestionDbStore, 'entries'>,
) {
return hoverTooltip((ast, syn) => {
const dom = document.createElement('div')
const astSpan = ast.span()
let foundNode: NodeId | undefined
for (const [id, node] of graphStore.db.nodeIdToNode.entries()) {
const rootSpan = graphStore.moduleSource.getSpan(node.rootExpr.id)
if (rootSpan && rangeEncloses(rootSpan, astSpan)) {
foundNode = id
break
}
}
const expressionInfo = foundNode && graphStore.db.getExpressionInfo(foundNode)
const nodeColor = foundNode && graphStore.db.getNodeColorStyle(foundNode)
if (foundNode != null) {
dom
.appendChild(document.createElement('div'))
.appendChild(document.createTextNode(`AST ID: ${foundNode}`))
}
if (expressionInfo != null) {
dom
.appendChild(document.createElement('div'))
.appendChild(document.createTextNode(`Type: ${expressionInfo.typename ?? 'Unknown'}`))
}
if (expressionInfo?.profilingInfo[0] != null) {
const profile = expressionInfo.profilingInfo[0]
const executionTime = (profile.ExecutionTime.nanoTime / 1_000_000).toFixed(3)
const text = `Execution Time: ${executionTime}ms`
dom.appendChild(document.createElement('div')).appendChild(document.createTextNode(text))
}
dom
.appendChild(document.createElement('div'))
.appendChild(document.createTextNode(`Syntax: ${syn.toString()}`))
const method = expressionInfo?.methodCall?.methodPointer
if (method != null) {
const moduleName = tryQualifiedName(method.module)
const methodName = tryQualifiedName(method.name)
const qualifiedName = qnJoin(unwrap(moduleName), unwrap(methodName))
const [id] = suggestionDbStore.entries.nameToId.lookup(qualifiedName)
const suggestionEntry = id != null ? suggestionDbStore.entries.get(id) : undefined
if (suggestionEntry != null) {
const groupNode = dom.appendChild(document.createElement('div'))
groupNode.appendChild(document.createTextNode('Group: '))
const groupNameNode = groupNode.appendChild(document.createElement('span'))
groupNameNode.appendChild(document.createTextNode(`${method.module}.${method.name}`))
if (nodeColor) {
groupNameNode.style.color = nodeColor
}
}
}
return { dom }
})
}

View File

@ -41,6 +41,8 @@ const PAN_MARGINS = {
}
const COMPONENT_EDITOR_PADDING = 12
const ICON_WIDTH = 16
// Component editor is larger than a typical node, so the edge should touch it a bit higher.
const EDGE_Y_OFFSET = -6
const cssComponentEditorPadding = `${COMPONENT_EDITOR_PADDING}px`
@ -199,7 +201,9 @@ watchEffect(() => {
return
}
const scenePos = originScenePos.value.add(
new Vec2(COMPONENT_EDITOR_PADDING + ICON_WIDTH / 2, 0).scale(clientToSceneFactor.value),
new Vec2(COMPONENT_EDITOR_PADDING + ICON_WIDTH / 2, 0)
.scale(clientToSceneFactor.value)
.add(new Vec2(0, EDGE_Y_OFFSET)),
)
graphStore.cbEditedEdge = {
source,

View File

@ -42,7 +42,7 @@ defineExpose({
const rootStyle = computed(() => {
return {
'--node-color-primary': props.nodeColor,
'--color-node-primary': props.nodeColor,
'--port-edge-width': `${4 * props.navigator.scale}px`,
}
})
@ -72,7 +72,7 @@ const rootStyle = computed(() => {
<style scoped>
.ComponentEditor {
--node-color-port: color-mix(in oklab, var(--node-color-primary) 85%, white 15%);
--node-color-port: color-mix(in oklab, var(--color-node-primary) 85%, white 15%);
--port-padding: 6px;
--icon-height: 16px;
--icon-text-gap: 6px;
@ -105,17 +105,6 @@ const rootStyle = computed(() => {
isolation: isolate;
}
.iconPort::before {
content: '';
position: absolute;
top: calc(var(--port-padding) - var(--component-editor-padding));
width: var(--port-edge-width);
height: calc(var(--component-editor-padding) - var(--port-padding) + var(--icon-height) / 2);
transform: translate(-50%, 0);
background-color: var(--node-color-port);
z-index: -1;
}
.nodeIcon {
color: white;
width: var(--icon-height);

View File

@ -1,13 +1,17 @@
<script setup lang="ts">
import { documentationEditorBindings } from '@/bindings'
import FullscreenButton from '@/components/FullscreenButton.vue'
import MarkdownEditor from '@/components/MarkdownEditor.vue'
import { fetcherUrlTransformer } from '@/components/MarkdownEditor/imageUrlTransformer'
import WithFullscreenMode from '@/components/WithFullscreenMode.vue'
import { useGraphStore } from '@/stores/graph'
import { useProjectStore } from '@/stores/project'
import { useProjectFiles } from '@/stores/projectFiles'
import { Vec2 } from '@/util/data/vec2'
import type { ToValue } from '@/util/reactivity'
import { ref, toRef, toValue, watch } from 'vue'
import type { Path } from 'ydoc-shared/languageServerTypes'
import { useToast } from '@/util/toast'
import { ComponentInstance, computed, reactive, ref, toRef, toValue, watch } from 'vue'
import type { Path, Uuid } from 'ydoc-shared/languageServerTypes'
import { Err, Ok, mapOk, withContext, type Result } from 'ydoc-shared/util/data/result'
import * as Y from 'yjs'
@ -19,26 +23,42 @@ const emit = defineEmits<{
}>()
const toolbarElement = ref<HTMLElement>()
const markdownEditor = ref<ComponentInstance<typeof MarkdownEditor>>()
const graphStore = useGraphStore()
const projectStore = useProjectStore()
const { transformImageUrl } = useDocumentationImages(
const { transformImageUrl, uploadImage } = useDocumentationImages(
toRef(graphStore, 'modulePath'),
projectStore.readFileBinary,
useProjectFiles(projectStore),
)
const uploadErrorToast = useToast.error()
type UploadedImagePosition = { type: 'selection' } | { type: 'coords'; coords: Vec2 }
/**
* A Project File management API for {@link useDocumentationImages} composable.
*/
interface ProjectFilesAPI {
projectRootId: Promise<Uuid | undefined>
readFileBinary(path: Path): Promise<Result<Blob>>
writeFileBinary(path: Path, content: Blob): Promise<Result>
pickUniqueName(path: Path, suggestedName: string): Promise<Result<string>>
ensureDirExists(path: Path): Promise<Result<void>>
}
function useDocumentationImages(
modulePath: ToValue<Path | undefined>,
readFileBinary: (path: Path) => Promise<Result<Blob>>,
projectFiles: ProjectFilesAPI,
) {
async function urlToPath(url: string): Promise<Result<Path> | undefined> {
function urlToPath(url: string): Result<Path> | undefined {
const modulePathValue = toValue(modulePath)
if (!modulePathValue) {
return Err('Current module path is unknown.')
}
const appliedUrl = new URL(url, `file:///${modulePathValue.segments.join('/')}`)
if (appliedUrl.protocol === 'file:') {
const segments = appliedUrl.pathname.split('/')
// The pathname starts with '/', so we remove "" segment.
const segments = decodeURI(appliedUrl.pathname).split('/').slice(1)
return Ok({ rootId: modulePathValue.rootId, segments })
} else {
// Not a relative URL, custom fetching not needed.
@ -54,24 +74,81 @@ function useDocumentationImages(
return pathUniqueId(path)
}
const currentlyUploading = reactive(new Map<string, Promise<Blob>>())
const transformImageUrl = fetcherUrlTransformer(
async (url: string) => {
const path = await urlToPath(url)
if (!path) return
return withContext(
() => `Locating documentation image (${url})`,
() => mapOk(path, (path) => ({ location: path, uniqueId: pathUniqueId(path) })),
() =>
mapOk(path, (path) => {
const id = pathUniqueId(path)
return {
location: path,
uniqueId: id,
uploading: computed(() => currentlyUploading.has(id)),
}
}),
)
},
async (path) => {
return withContext(
() => `Loading documentation image (${pathDebugRepr(path)})`,
async () => await readFileBinary(path),
async () => {
const uploaded = await currentlyUploading.get(pathUniqueId(path))
return uploaded ? Ok(uploaded) : projectFiles.readFileBinary(path)
},
)
},
)
return { transformImageUrl }
async function uploadImage(
name: string,
blobPromise: Promise<Blob>,
position: UploadedImagePosition = { type: 'selection' },
) {
const rootId = await projectFiles.projectRootId
if (!rootId) {
uploadErrorToast.show('Cannot upload image: unknown project file tree root.')
return
}
if (!markdownEditor.value || !markdownEditor.value.loaded) {
console.error('Tried to upload image while mardown editor is still not loaded')
return
}
const dirPath = { rootId, segments: ['images'] }
await projectFiles.ensureDirExists(dirPath)
const filename = await projectFiles.pickUniqueName(dirPath, name)
if (!filename.ok) {
uploadErrorToast.reportError(filename.error)
return
}
const path: Path = { rootId, segments: ['images', filename.value] }
const id = pathUniqueId(path)
currentlyUploading.set(id, blobPromise)
const insertedLink = `\n![Image](/images/${encodeURI(filename.value)})\n`
switch (position.type) {
case 'selection':
markdownEditor.value.putText(insertedLink)
break
case 'coords':
markdownEditor.value.putTextAtCoord(insertedLink, position.coords)
break
}
try {
const blob = await blobPromise
const uploadResult = await projectFiles.writeFileBinary(path, blob)
if (!uploadResult.ok)
uploadErrorToast.reportError(uploadResult.error, 'Failed to upload image')
} finally {
currentlyUploading.delete(id)
}
}
return { transformImageUrl, uploadImage }
}
const fullscreen = ref(false)
@ -81,6 +158,55 @@ watch(
() => fullscreen.value || fullscreenAnimating.value,
(fullscreenOrAnimating) => emit('update:fullscreen', fullscreenOrAnimating),
)
const supportedImageTypes: Record<string, { extension: string }> = {
// List taken from https://developer.mozilla.org/en-US/docs/Web/Media/Formats/Image_types
'image/apng': { extension: 'apng' },
'image/avif': { extension: 'avif' },
'image/gif': { extension: 'gif' },
'image/jpeg': { extension: 'jpg' },
'image/png': { extension: 'png' },
'image/svg+xml': { extension: 'svg' },
'image/webp': { extension: 'webp' },
// Question: do we want to have BMP and ICO here?
}
async function handleFileDrop(event: DragEvent) {
if (!event.dataTransfer?.items) return
for (const item of event.dataTransfer.items) {
if (item.kind !== 'file' || !Object.hasOwn(supportedImageTypes, item.type)) continue
const file = item.getAsFile()
if (!file) continue
const clientPos = new Vec2(event.clientX, event.clientY)
event.stopPropagation()
event.preventDefault()
await uploadImage(file.name, Promise.resolve(file), { type: 'coords', coords: clientPos })
}
}
const handler = documentationEditorBindings.handler({
paste: () => {
window.navigator.clipboard.read().then(async (items) => {
if (markdownEditor.value == null) return
for (const item of items) {
const textType = item.types.find((type) => type === 'text/plain')
if (textType) {
const blob = await item.getType(textType)
markdownEditor.value.putText(await blob.text())
break
}
const imageType = item.types.find((type) => type in supportedImageTypes)
if (imageType) {
const ext = supportedImageTypes[imageType]?.extension ?? ''
uploadImage(`image.${ext}`, item.getType(imageType)).catch((err) =>
uploadErrorToast.show(`Failed to upload image: ${err}`),
)
break
}
}
})
},
})
</script>
<template>
@ -89,9 +215,15 @@ watch(
<div ref="toolbarElement" class="toolbar">
<FullscreenButton v-model="fullscreen" />
</div>
<div class="scrollArea">
<div
class="scrollArea"
@keydown="handler"
@dragover.prevent
@drop.prevent="handleFileDrop($event)"
>
<MarkdownEditor
:yText="yText"
ref="markdownEditor"
:content="yText"
:transformImageUrl="transformImageUrl"
:toolbarContainer="toolbarElement"
/>

View File

@ -664,11 +664,6 @@ async function handleFileDrop(event: DragEvent) {
const MULTIPLE_FILES_GAP = 50
if (!event.dataTransfer?.items) return
const projectRootId = await projectStore.projectRootId
if (projectRootId == null) {
toasts.userActionFailed.show(`Unable to upload file(s): Could not identify project root.`)
return
}
;[...event.dataTransfer.items].forEach(async (item, index) => {
if (item.kind === 'file') {
const file = item.getAsFile()
@ -677,10 +672,7 @@ async function handleFileDrop(event: DragEvent) {
const offset = new Vec2(0, index * -MULTIPLE_FILES_GAP)
const pos = graphNavigator.clientToScenePos(clientPos).add(offset)
const uploader = Uploader.Create(
projectStore.lsRpcConnection,
projectStore.dataConnection,
projectRootId,
projectStore.awareness,
projectStore,
file,
pos,
projectStore.isOnLocalBackend,

View File

@ -73,8 +73,7 @@ const targetPos = computed<Vec2 | undefined>(() => {
if (expr != null && targetNode.value != null && targetNodeRect.value != null) {
const targetRectRelative = graph.getPortRelativeRect(expr)
if (targetRectRelative == null) return
const yAdjustment =
targetIsSelfArgument.value ? -(selfArgumentArrowHeight + selfArgumentArrowYOffset) : 0
const yAdjustment = -(arrowHeight + arrowYOffset)
return targetNodeRect.value.pos.add(new Vec2(targetRectRelative.center().x, yAdjustment))
} else if (mouseAnchorPos.value != null) {
return mouseAnchorPos.value
@ -509,29 +508,18 @@ const backwardEdgeArrowTransform = computed<string | undefined>(() => {
return svgTranslate(origin.add(points[1]))
})
const targetIsSelfArgument = computed(() => {
if ('targetIsSelfArgument' in props.edge && props.edge?.targetIsSelfArgument) return true
if (!targetExpr.value) return
const nodeId = graph.getPortNodeId(targetExpr.value)
if (!nodeId) return
const primarySubject = graph.db.nodeIdToNode.get(nodeId)?.primarySubject
if (!primarySubject) return
return targetExpr.value === primarySubject
})
const selfArgumentArrowHeight = 9
const selfArgumentArrowYOffset = 0
const selfArgumentArrowTransform = computed<string | undefined>(() => {
const selfArgumentArrowTopOffset = 4
const selfArgumentArrowWidth = 12
if (!targetIsSelfArgument.value) return
const arrowHeight = 9
const arrowYOffset = 0
const arrowTransform = computed<string | undefined>(() => {
const arrowTopOffset = 4
const arrowWidth = 12
const target = targetPos.value
if (target == null) return
const pos = target.sub(new Vec2(selfArgumentArrowWidth / 2, selfArgumentArrowTopOffset))
const pos = target.sub(new Vec2(arrowWidth / 2, arrowTopOffset))
return svgTranslate(pos)
})
const selfArgumentArrowPath = [
const arrowPath = [
'M10.9635 1.5547',
'L6.83205 7.75193',
'C6.43623 8.34566 5.56377 8.34566 5.16795 7.75192',
@ -620,9 +608,9 @@ const sourceHoverAnimationStyle = computed(() => {
:data-target-node-id="targetNode"
/>
<path
v-if="selfArgumentArrowTransform"
:transform="selfArgumentArrowTransform"
:d="selfArgumentArrowPath"
v-if="arrowTransform"
:transform="arrowTransform"
:d="arrowPath"
:class="{ arrow: true, visible: true, dimmed: targetEndIsDimmed }"
:style="baseStyle"
/>

View File

@ -462,7 +462,6 @@ function recomputeOnce() {
:nodePosition="nodePosition"
:nodeSize="graphSelectionSize"
:class="{ draggable: true, dragged: isDragged }"
:selected
:color
:externalHovered="nodeHovered"
@visible="selectionVisible = $event"
@ -605,10 +604,21 @@ function recomputeOnce() {
height: var(--node-size-y);
rx: var(--node-border-radius);
fill: var(--node-color-primary);
fill: var(--color-node-background);
transition: fill 0.2s ease;
}
.GraphNode {
--color-node-text: white;
--color-node-primary: var(--node-group-color);
--color-node-background: var(--node-group-color);
}
.GraphNode.selected {
--color-node-background: color-mix(in oklab, var(--color-node-primary) 30%, white 70%);
--color-node-text: color-mix(in oklab, var(--color-node-primary) 70%, black 30%);
}
.GraphNode {
position: absolute;
border-radius: var(--node-border-radius);
@ -617,17 +627,13 @@ function recomputeOnce() {
/** Space between node and component above and below, such as comments and errors. */
--node-vertical-gap: 4px;
--node-color-primary: color-mix(
in oklab,
var(--node-group-color) 100%,
var(--node-group-color) 0%
);
--node-color-port: color-mix(in oklab, var(--node-color-primary) 85%, white 15%);
--color-node-primary: var(--node-group-color);
--node-color-port: color-mix(in oklab, var(--color-node-primary) 85%, white 15%);
--node-color-error: color-mix(in oklab, var(--node-group-color) 30%, rgb(255, 0, 0) 70%);
&.executionState-Unknown,
&.executionState-Pending {
--node-color-primary: color-mix(in oklab, var(--node-group-color) 60%, #aaa 40%);
--color-node-primary: color-mix(in oklab, var(--node-group-color) 60%, #aaa 40%);
}
}

View File

@ -202,7 +202,7 @@ graph.suggestEdgeFromOutput(outputHovered)
z-index: 10;
text-anchor: middle;
opacity: calc(var(--hover-animation) * var(--hover-animation));
fill: var(--node-color-primary);
fill: var(--color-node-primary);
transform: translate(50%, calc(var(--node-size-y) + var(--output-port-max-width) + 16px));
}
</style>

View File

@ -5,7 +5,6 @@ import { computed, ref, watchEffect } from 'vue'
const props = defineProps<{
nodePosition: Vec2
nodeSize: Vec2
selected: boolean
externalHovered: boolean
color: string
}>()
@ -15,7 +14,7 @@ const emit = defineEmits<{
}>()
const hovered = ref(false)
const visible = computed(() => props.selected || props.externalHovered || hovered.value)
const visible = computed(() => props.externalHovered || hovered.value)
watchEffect(() => emit('visible', visible.value))
@ -33,7 +32,7 @@ const rootStyle = computed(() => {
<template>
<div
class="GraphNodeSelection"
:class="{ visible, selected: props.selected }"
:class="{ visible }"
:style="rootStyle"
@pointerenter="hovered = true"
@pointerleave="hovered = false"
@ -52,7 +51,7 @@ const rootStyle = computed(() => {
&:before {
position: absolute;
content: '';
opacity: 0.2;
opacity: 0.3;
display: block;
inset: var(--selected-node-border-width);
box-shadow: 0 0 0 calc(0px - var(--node-border-radius)) var(--selection-color);
@ -67,8 +66,4 @@ const rootStyle = computed(() => {
.GraphNodeSelection.visible::before {
box-shadow: 0 0 0 var(--selected-node-border-width) var(--selection-color);
}
.GraphNodeSelection:not(.selected):hover::before {
opacity: 0.3;
}
</style>

View File

@ -125,7 +125,7 @@ export const ICON_WIDTH = 16
<style scoped>
.NodeWidgetTree {
color: white;
color: var(--color-node-text);
outline: none;
min-height: var(--node-port-height);

View File

@ -1,15 +1,14 @@
import { Awareness } from '@/stores/awareness'
import { ProjectFiles, useProjectFiles } from '@/stores/projectFiles'
import { Vec2 } from '@/util/data/vec2'
import type { DataServer } from '@/util/net/dataServer'
import { Keccak, sha3_224 as SHA3 } from '@noble/hashes/sha3'
import type { Hash } from '@noble/hashes/utils'
import { bytesToHex } from '@noble/hashes/utils'
import { markRaw, toRaw } from 'vue'
import { escapeTextLiteral } from 'ydoc-shared/ast/text'
import type { LanguageServer } from 'ydoc-shared/languageServer'
import { ErrorCode, RemoteRpcError } from 'ydoc-shared/languageServer'
import type { Path, StackItem, Uuid } from 'ydoc-shared/languageServerTypes'
import { Err, Ok, withContext, type Result } from 'ydoc-shared/util/data/result'
import { Err, Ok, type Result } from 'ydoc-shared/util/data/result'
// === Constants ===
@ -47,13 +46,17 @@ export class Uploader {
private checksum: Hash<Keccak>
private uploadedBytes: bigint
private stackItem: StackItem
private awareness: Awareness
private projectFiles: ProjectFiles
private constructor(
private rpc: LanguageServer,
private binary: DataServer,
private awareness: Awareness,
projectStore: {
projectRootId: Promise<Uuid | undefined>
lsRpcConnection: LanguageServer
dataConnection: DataServer
awareness: Awareness
},
private file: File,
private projectRootId: Uuid,
private position: Vec2,
private isOnLocalBackend: boolean,
private disableDirectRead: boolean,
@ -62,14 +65,18 @@ export class Uploader {
this.checksum = SHA3.create()
this.uploadedBytes = BigInt(0)
this.stackItem = markRaw(toRaw(stackItem))
this.awareness = projectStore.awareness
this.projectFiles = useProjectFiles(projectStore)
}
/** Constructor */
static Create(
rpc: LanguageServer,
binary: DataServer,
projectRootId: Uuid,
awareness: Awareness,
projectStore: {
projectRootId: Promise<Uuid | undefined>
lsRpcConnection: LanguageServer
dataConnection: DataServer
awareness: Awareness
},
file: File,
position: Vec2,
isOnLocalBackend: boolean,
@ -77,11 +84,8 @@ export class Uploader {
stackItem: StackItem,
): Uploader {
return new Uploader(
rpc,
binary,
awareness,
projectStore,
file,
projectRootId,
position,
isOnLocalBackend,
disableDirectRead,
@ -100,20 +104,29 @@ export class Uploader {
) {
return Ok({ source: 'FileSystemRoot', name: this.file.path })
}
const dataDirExists = await this.ensureDataDirExists()
const rootId = await this.projectFiles.projectRootId
if (rootId == null) return Err('Could not identify project root.')
const dataDirPath = { rootId, segments: [DATA_DIR_NAME] }
const dataDirExists = await this.projectFiles.ensureDirExists(dataDirPath)
if (!dataDirExists.ok) return dataDirExists
const name = await this.pickUniqueName(this.file.name)
const name = await this.projectFiles.pickUniqueName(dataDirPath, this.file.name)
if (!name.ok) return name
this.awareness.addOrUpdateUpload(name.value, {
sizePercentage: 0,
position: this.position,
stackItem: this.stackItem,
})
const remotePath: Path = { rootId: this.projectRootId, segments: [DATA_DIR_NAME, name.value] }
const remotePath: Path = { rootId, segments: [DATA_DIR_NAME, name.value] }
const cleanup = this.cleanup.bind(this, name.value)
const writableStream = new WritableStream<Uint8Array>({
write: async (chunk: Uint8Array) => {
await this.binary.writeBytes(remotePath, this.uploadedBytes, false, chunk)
const result = await this.projectFiles.writeBytes(
remotePath,
this.uploadedBytes,
false,
chunk,
)
if (!result.ok) throw result.error
this.checksum.update(chunk)
this.uploadedBytes += BigInt(chunk.length)
const bytes = Number(this.uploadedBytes)
@ -127,13 +140,13 @@ export class Uploader {
close: cleanup,
abort: async (reason: string) => {
cleanup()
await this.rpc.deleteFile(remotePath)
await this.projectFiles.deleteFile(remotePath)
throw new Error(`Uploading process aborted. ${reason}`)
},
})
// Disabled until https://github.com/enso-org/enso/issues/6691 is fixed.
// Plus, handle the error here, as it should be displayed to the user.
// uploader.assertChecksum(remotePath)
// this.projectFiles.assertChecksum(remotePath)
await this.file.stream().pipeTo(writableStream)
return Ok({ source: 'Project', name: name.value })
}
@ -141,76 +154,4 @@ export class Uploader {
private cleanup(name: string) {
this.awareness.removeUpload(name)
}
private async assertChecksum(path: Path): Promise<Result<void>> {
const engineChecksum = await this.rpc.fileChecksum(path)
if (!engineChecksum.ok) return engineChecksum
const hexChecksum = bytesToHex(this.checksum.digest())
if (hexChecksum != engineChecksum.value.checksum) {
return Err(
`Uploading file failed, checksum does not match. ${hexChecksum} != ${engineChecksum.value.checksum}`,
)
} else {
return Ok()
}
}
private dataDirPath(): Path {
return { rootId: this.projectRootId, segments: [DATA_DIR_NAME] }
}
private async ensureDataDirExists() {
const exists = await this.dataDirExists()
if (!exists.ok) return exists
if (exists.value) return Ok()
return await withContext(
() => 'When creating directory for uploaded file',
async () => {
return await this.rpc.createFile({
type: 'Directory',
name: DATA_DIR_NAME,
path: { rootId: this.projectRootId, segments: [] },
})
},
)
}
private async dataDirExists(): Promise<Result<boolean>> {
const info = await this.rpc.fileInfo(this.dataDirPath())
if (info.ok) return Ok(info.value.attributes.kind.type == 'Directory')
else if (
info.error.payload.cause instanceof RemoteRpcError &&
(info.error.payload.cause.code === ErrorCode.FILE_NOT_FOUND ||
info.error.payload.cause.code === ErrorCode.CONTENT_ROOT_NOT_FOUND)
) {
return Ok(false)
} else {
return info
}
}
private async pickUniqueName(suggestedName: string): Promise<Result<string>> {
const files = await this.rpc.listFiles(this.dataDirPath())
if (!files.ok) return files
const existingNames = new Set(files.value.paths.map((path) => path.name))
const { stem, extension = '' } = splitFilename(suggestedName)
let candidate = suggestedName
let num = 1
while (existingNames.has(candidate)) {
candidate = `${stem}_${num}.${extension}`
num += 1
}
return Ok(candidate)
}
}
/** Split filename into stem and (optional) extension. */
function splitFilename(fileName: string): { stem: string; extension?: string } {
const dotIndex = fileName.lastIndexOf('.')
if (dotIndex !== -1 && dotIndex !== 0) {
const stem = fileName.substring(0, dotIndex)
const extension = fileName.substring(dotIndex + 1)
return { stem, extension }
}
return { stem: fileName }
}

View File

@ -70,6 +70,6 @@ export const ArgumentNameShownKey: unique symbol = Symbol.for('WidgetInput:Argum
.placeholder,
.name {
color: rgb(255 255 255 / 0.5);
opacity: 0.6;
}
</style>

View File

@ -113,7 +113,7 @@ export const widgetDefinition = defineWidget(
}
.name {
color: rgb(255 255 255 / 0.5);
opacity: 0.5;
margin-right: var(--widget-token-pad-unit);
}
</style>

View File

@ -47,7 +47,7 @@ export const widgetDefinition = defineWidget(
}
.token {
color: rgb(255 255 255 / 0.33);
opacity: 0.33;
user-select: none;
}
</style>

View File

@ -189,7 +189,6 @@ export const widgetDefinition = defineWidget(
enabled,
connected,
isTarget,
isSelfArgument,
widgetRounded: connected,
newToConnect: !hasConnection && isCurrentEdgeHoverTarget,
primary: props.nesting < 2,
@ -215,6 +214,7 @@ export const widgetDefinition = defineWidget(
.WidgetPort.connected {
background-color: var(--node-color-port);
color: white;
}
.GraphEditor.draggingEdge .WidgetPort {
@ -248,16 +248,4 @@ export const widgetDefinition = defineWidget(
right: 0px;
}
}
.WidgetPort.isTarget:not(.isSelfArgument):after {
content: '';
position: absolute;
top: -4px;
left: 50%;
width: 4px;
height: 5px;
transform: translate(-50%, 0);
background-color: var(--node-color-port);
z-index: -1;
}
</style>

View File

@ -471,7 +471,8 @@ declare module '@/providers/widgetRegistry' {
<SizeTransition height :duration="100">
<DropdownWidget
v-if="dropDownInteraction.isActive() && activity == null"
color="var(--node-color-primary)"
color="var(--color-node-text)"
backgroundColor="var(--color-node-background)"
:entries="entries"
@clickEntry="onClick"
/>
@ -517,7 +518,7 @@ svg.arrow {
}
.activityElement {
--background-color: var(--node-color-primary);
--background-color: var(--color-node-primary);
/* Above the circular menu. */
z-index: 26;
}

View File

@ -137,4 +137,12 @@ export const widgetDefinition = defineWidget(
background: var(--color-widget-selection);
}
}
.selected .WidgetText {
background: var(--color-widget-unfocus);
&:has(> :focus) {
outline: none;
background: var(--color-widget-focus);
}
}
</style>

View File

@ -28,12 +28,12 @@ export const widgetDefinition = defineWidget(
display: inline-block;
vertical-align: middle;
white-space: pre;
color: rgb(255 255 255 / 0.33);
opacity: 0.33;
&.Ident,
&.TextSection,
&.Digits {
color: white;
opacity: 1;
}
&.TextSection,

View File

@ -1,21 +1,39 @@
<script setup lang="ts">
import type { UrlTransformer } from '@/components/MarkdownEditor/imageUrlTransformer'
import { defineAsyncComponent } from 'vue'
import {
provideDocumentationImageUrlTransformer,
type UrlTransformer,
} from '@/components/MarkdownEditor/imageUrlTransformer'
import { Vec2 } from '@/util/data/vec2'
import { ComponentInstance, computed, defineAsyncComponent, ref, toRef } from 'vue'
import * as Y from 'yjs'
const props = defineProps<{
yText: Y.Text
content: Y.Text | string
transformImageUrl?: UrlTransformer
toolbarContainer: HTMLElement | undefined
}>()
const inner = ref<ComponentInstance<typeof LazyMarkdownEditor>>()
const LazyMarkdownEditor = defineAsyncComponent(
() => import('@/components/MarkdownEditor/MarkdownEditorImpl.vue'),
)
provideDocumentationImageUrlTransformer(toRef(props, 'transformImageUrl'))
defineExpose({
loaded: computed(() => inner.value != null),
putText: (text: string) => {
inner.value?.putText(text)
},
putTextAtCoord: (text: string, coords: Vec2) => {
inner.value?.putTextAtCoords(text, coords)
},
})
</script>
<template>
<Suspense>
<LazyMarkdownEditor v-bind="props" />
<LazyMarkdownEditor ref="inner" v-bind="props" class="MarkdownEditor" />
</Suspense>
</template>

View File

@ -40,5 +40,10 @@ onUnmounted(() => {
</script>
<template>
<img :src="data?.ok ? data.value.url : ''" :alt="alt" :title="title" />
<img
:src="data?.ok ? data.value.url : ''"
:alt="alt"
:title="title"
:class="{ uploading: data?.ok && data.value.uploading?.value }"
/>
</template>

View File

@ -1,62 +1,88 @@
<script setup lang="ts">
import EditorRoot from '@/components/EditorRoot.vue'
import EditorRoot from '@/components/codemirror/EditorRoot.vue'
import { yCollab } from '@/components/codemirror/yCollab'
import { highlightStyle } from '@/components/MarkdownEditor/highlight'
import {
provideDocumentationImageUrlTransformer,
type UrlTransformer,
} from '@/components/MarkdownEditor/imageUrlTransformer'
import { ensoMarkdown } from '@/components/MarkdownEditor/markdown'
import VueComponentHost from '@/components/VueComponentHost.vue'
import { EditorState } from '@codemirror/state'
import { assert } from '@/util/assert'
import { Vec2 } from '@/util/data/vec2'
import { EditorState, Text } from '@codemirror/state'
import { EditorView } from '@codemirror/view'
import { minimalSetup } from 'codemirror'
import { type ComponentInstance, onMounted, ref, toRef, useCssModule, watch } from 'vue'
import { yCollab } from 'y-codemirror.next'
import * as awarenessProtocol from 'y-protocols/awareness.js'
import { type ComponentInstance, computed, onMounted, ref, toRef, useCssModule, watch } from 'vue'
import { Awareness } from 'y-protocols/awareness.js'
import * as Y from 'yjs'
const editorRoot = ref<ComponentInstance<typeof EditorRoot>>()
const props = defineProps<{
yText: Y.Text
transformImageUrl?: UrlTransformer | undefined
toolbarContainer: HTMLElement | undefined
content: Y.Text | string
toolbarContainer?: HTMLElement | undefined
}>()
const vueHost = ref<ComponentInstance<typeof VueComponentHost>>()
const focused = ref(false)
const readonly = computed(() => typeof props.content === 'string')
const editing = computed(() => !readonly.value && focused.value)
provideDocumentationImageUrlTransformer(toRef(props, 'transformImageUrl'))
const awareness = new awarenessProtocol.Awareness(new Y.Doc())
const awareness = new Awareness(new Y.Doc())
const editorView = new EditorView()
// Disable EditContext API because of https://github.com/codemirror/dev/issues/1458.
;(EditorView as any).EDIT_CONTEXT = false
const constantExtensions = [minimalSetup, highlightStyle(useCssModule()), EditorView.lineWrapping]
watch([vueHost, toRef(props, 'yText')], ([vueHost, yText]) => {
watch([vueHost, toRef(props, 'content')], ([vueHost, content]) => {
if (!vueHost) return
editorView.setState(
EditorState.create({
doc: yText.toString(),
extensions: [...constantExtensions, ensoMarkdown({ vueHost }), yCollab(yText, awareness)],
}),
)
let doc = ''
const extensions = [...constantExtensions, ensoMarkdown({ vueHost })]
if (typeof content === 'string') {
doc = content
} else {
assert(content.doc !== null)
const yTextWithDoc: Y.Text & { doc: Y.Doc } = content as any
doc = content.toString()
extensions.push(yCollab(yTextWithDoc, awareness))
}
editorView.setState(EditorState.create({ doc, extensions }))
})
onMounted(() => {
const content = editorView.dom.getElementsByClassName('cm-content')[0]!
content.addEventListener('focusin', () => (editing.value = true))
// Enable rendering the line containing the current cursor in `editing` mode if focus enters the element *inside* the
// scroll area--if we attached the handler to the editor root, clicking the scrollbar would cause editing mode to be
// activated.
editorView.dom
.getElementsByClassName('cm-content')[0]!
.addEventListener('focusin', () => (focused.value = true))
editorRoot.value?.rootElement?.prepend(editorView.dom)
})
const editing = ref(false)
/**
* Replace text in given document range with `text`, putting text cursor after inserted text.
*
* If text contains multiple lines, it should use '\n', not '\r\n' for line endings.
*/
function putTextAt(text: string, from: number, to: number) {
const insert = Text.of(text.split('\n'))
editorView.dispatch({
changes: { from, to, insert },
selection: { anchor: from + insert.length },
})
}
defineExpose({
putText: (text: string) => {
const range = editorView.state.selection.main
putTextAt(text, range.from, range.to)
},
putTextAt,
putTextAtCoords: (text: string, coords: Vec2) => {
const pos = editorView.posAtCoords(coords, false)
putTextAt(text, pos, pos)
},
})
</script>
<template>
<EditorRoot
ref="editorRoot"
class="MarkdownEditor"
:class="{ editing }"
@focusout="editing = false"
/>
<EditorRoot ref="editorRoot" v-bind="$attrs" :class="{ editing }" @focusout="focused = false" />
<VueComponentHost ref="vueHost" />
</template>
@ -65,19 +91,14 @@ const editing = ref(false)
font-family: var(--font-sans);
}
:deep(.cm-scroller) {
/* Prevent touchpad back gesture, which can be triggered while panning. */
overscroll-behavior: none;
}
.EditorRoot :deep(.cm-editor) {
position: relative;
width: 100%;
height: 100%;
:deep(.cm-editor) {
opacity: 1;
color: black;
font-size: 12px;
outline: none;
}
:deep(img.uploading) {
opacity: 0.5;
}
</style>

View File

@ -0,0 +1,74 @@
<script setup lang="ts">
import MarkdownEditorImpl from '@/components/MarkdownEditor/MarkdownEditorImpl.vue'
import type { Text } from '@codemirror/state'
import { SyntaxNode, TreeCursor } from '@lezer/common'
import { computed } from 'vue'
const { source, parsed } = defineProps<{
source: Text
parsed: SyntaxNode
}>()
function parseRow(cursor: TreeCursor, output: string[]) {
if (!cursor.firstChild()) return
do {
if (cursor.name === 'TableCell') {
output.push(source.sliceString(cursor.from, cursor.to))
} else if (cursor.name !== 'TableDelimiter') {
console.warn('Unexpected in table row:', cursor.name)
}
} while (cursor.nextSibling())
cursor.parent()
}
const content = computed(() => {
const headers: string[] = []
const rows: string[][] = []
const cursor = parsed.cursor()
if (cursor.firstChild()) {
do {
if (cursor.name === 'TableRow') {
const newRow: string[] = []
parseRow(cursor, newRow)
rows.push(newRow)
} else if (cursor.name === 'TableHeader') {
parseRow(cursor, headers)
} else if (cursor.name !== 'TableDelimiter') {
console.warn('Unexpected at top level of table:', cursor.name)
}
} while (cursor.nextSibling())
}
return { headers, rows }
})
</script>
<template>
<table>
<thead>
<tr>
<th v-for="(cell, c) in content.headers" :key="c" class="cell">
<MarkdownEditorImpl :content="cell" />
</th>
</tr>
</thead>
<tbody class="tableBody">
<tr v-for="(row, r) in content.rows" :key="r" class="row">
<td v-for="(cell, c) in row" :key="c" class="cell">
<MarkdownEditorImpl :content="cell" />
</td>
</tr>
</tbody>
</table>
</template>
<style scoped>
.cell {
border: 1px solid #dddddd;
}
.tableBody .row:nth-of-type(even) {
background-color: #f3f3f3;
}
:deep(.cm-line) {
padding-right: 6px;
}
</style>

View File

@ -104,6 +104,21 @@ test.each([
alt: '',
},
},
{
markdown: '![](<https://www.example.com/The image.avif>)',
image: {
src: 'https://www.example.com/The image.avif',
alt: '',
},
},
{
markdown: '![](<https://www.example.com/The image.avif)',
image: null,
},
{
markdown: '![](https://www.example.com/The image.avif)',
image: null,
},
{
markdown: '![Image](https://www.example.com/image.avif',
image: null,

View File

@ -1,9 +1,22 @@
import { createContextStore } from '@/providers'
import type { ToValue } from '@/util/reactivity'
import { toValue } from 'vue'
import { Ref, toValue } from 'vue'
import { mapOk, Ok, type Result } from 'ydoc-shared/util/data/result'
export type TransformUrlResult = Result<{ url: string; dispose?: () => void }>
/**
* A transformed URL.
*
* Once the returned URL is not used anymore, `dispose` callback is called allowing release
* any resource under that URL.
*
* `uploading` is set to true while the image is being uploaded to its target destination
* (as part of pasting image, for example).
*/
export type TransformUrlResult = Result<{
url: string
dispose?: () => void
uploading?: Ref<boolean>
}>
export type UrlTransformer = (url: string) => Promise<TransformUrlResult>
export {
@ -22,11 +35,17 @@ type Url = string
export interface ResourceInfo<T> {
location: T
uniqueId: ResourceId
uploading?: Ref<boolean>
}
export type ResourceLocator<T> = (url: Url) => Promise<Result<ResourceInfo<T>> | undefined>
export type ResourceFetcher<T> = (locator: T) => Promise<Result<Blob>>
/** TODO: Add docs */
/**
* Create {@link UrlTransformer} which fetches and caches the image. Returns a URL created
* with `URL.createObjectURL`.
*
* May be used in cases, when the image is not available for browser through HTTP protocol.
*/
export function fetcherUrlTransformer<ResourceLocation>(
locateResource: ResourceLocator<ResourceLocation>,
fetchResource: ResourceFetcher<ResourceLocation>,
@ -48,7 +67,7 @@ export function fetcherUrlTransformer<ResourceLocation>(
} else if (!resource.ok) {
return resource
} else {
const { uniqueId, location } = resource.value
const { uniqueId, location, uploading } = resource.value
const result = await (allocatedUrls.get(uniqueId) ?? startFetch(uniqueId, location))
if (!result.ok) {
// Changes to external state may allow a future attempt to succeed.
@ -64,6 +83,7 @@ export function fetcherUrlTransformer<ResourceLocation>(
allocatedUrls.delete(uniqueId)
}
},
uploading,
})
}
}

View File

@ -1,9 +1,98 @@
import { markdownDecorators } from '@/components/MarkdownEditor/markdown/decoration'
import { markdown } from '@/components/MarkdownEditor/markdown/parse'
import type { VueHost } from '@/components/VueComponentHost.vue'
import { markdown as markdownExtension } from '@codemirror/lang-markdown'
import {
defineLanguageFacet,
foldNodeProp,
foldService,
indentNodeProp,
Language,
languageDataProp,
syntaxTree,
} from '@codemirror/language'
import type { Extension } from '@codemirror/state'
import { NodeProp, type NodeType, type Parser, type SyntaxNode } from '@lezer/common'
import { markdownParser } from 'ydoc-shared/ast/ensoMarkdown'
/** Markdown extension, with customizations for Enso. */
/** CodeMirror Extension for the Enso Markdown dialect. */
export function ensoMarkdown({ vueHost }: { vueHost: VueHost }): Extension {
return [markdown(), markdownDecorators({ vueHost })]
return [
markdownExtension({
base: mkLang(
markdownParser.configure([
commonmarkCodemirrorLanguageExtension,
tableCodemirrorLanguageExtension,
]),
),
}),
markdownDecorators({ vueHost }),
]
}
function mkLang(parser: Parser) {
return new Language(data, parser, [headerIndent], 'markdown')
}
const data = defineLanguageFacet({ commentTokens: { block: { open: '<!--', close: '-->' } } })
const headingProp = new NodeProp<number>()
const commonmarkCodemirrorLanguageExtension = {
props: [
foldNodeProp.add((type) => {
return !type.is('Block') || type.is('Document') || isHeading(type) != null || isList(type) ?
undefined
: (tree, state) => ({ from: state.doc.lineAt(tree.from).to, to: tree.to })
}),
headingProp.add(isHeading),
indentNodeProp.add({
Document: () => null,
}),
languageDataProp.add({
Document: data,
}),
],
}
function isHeading(type: NodeType) {
const match = /^(?:ATX|Setext)Heading(\d)$/.exec(type.name)
return match ? +match[1]! : undefined
}
function isList(type: NodeType) {
return type.name == 'OrderedList' || type.name == 'BulletList'
}
function findSectionEnd(headerNode: SyntaxNode, level: number) {
let last = headerNode
for (;;) {
const next = last.nextSibling
let heading
if (!next || ((heading = isHeading(next.type)) != null && heading <= level)) break
last = next
}
return last.to
}
const headerIndent = foldService.of((state, start, end) => {
for (
let node: SyntaxNode | null = syntaxTree(state).resolveInner(end, -1);
node;
node = node.parent
) {
if (node.from < start) break
const heading = node.type.prop(headingProp)
if (heading == null) continue
const upto = findSectionEnd(node, heading)
if (upto > end) return { from: end, to: upto }
}
return null
})
const tableCodemirrorLanguageExtension = {
props: [
foldNodeProp.add({
Table: (tree, state) => ({ from: state.doc.lineAt(tree.from).to, to: tree.to }),
}),
],
}

View File

@ -1,4 +1,5 @@
import DocumentationImage from '@/components/MarkdownEditor/DocumentationImage.vue'
import TableEditor from '@/components/MarkdownEditor/TableEditor.vue'
import type { VueHost } from '@/components/VueComponentHost.vue'
import { syntaxTree } from '@codemirror/language'
import { type EditorSelection, type Extension, RangeSetBuilder, type Text } from '@codemirror/state'
@ -19,6 +20,7 @@ export function markdownDecorators({ vueHost }: { vueHost: VueHost }): Extension
const stateDecorator = new TreeStateDecorator(vueHost, [
decorateImageWithClass,
decorateImageWithRendered,
decorateTable,
])
const stateDecoratorExt = EditorView.decorations.compute(['doc'], (state) =>
stateDecorator.decorate(syntaxTree(state), state.doc),
@ -144,12 +146,8 @@ function parseLinkLike(node: SyntaxNode, doc: Text) {
if (!textOpen) return
const textClose = textOpen.nextSibling // ]
if (!textClose) return
const urlOpen = textClose.nextSibling // (
// The parser accepts partial links such as `[Missing url]`.
if (!urlOpen) return
const urlNode = urlOpen.nextSibling
// If the URL is empty, this will be the closing 'LinkMark'.
if (urlNode?.name !== 'URL') return
const urlNode = findNextSiblingNamed(textClose, 'URL')
if (!urlNode) return
return {
textFrom: textOpen.to,
textTo: textClose.from,
@ -268,3 +266,76 @@ class ImageWidget extends WidgetType {
this.container = undefined
}
}
function findNextSiblingNamed(node: SyntaxNode, name: string) {
for (let sibling = node.nextSibling; sibling != null; sibling = sibling.nextSibling) {
if (sibling.name === name) {
return sibling
}
}
}
// === Tables ===
function decorateTable(
nodeRef: SyntaxNodeRef,
doc: Text,
emitDecoration: (from: number, to: number, deco: Decoration) => void,
vueHost: VueHost,
) {
if (nodeRef.name === 'Table') {
const source = doc //.slice(nodeRef.from, nodeRef.to)
const parsed = nodeRef.node
const widget = new TableWidget({ source, parsed }, vueHost)
emitDecoration(
nodeRef.from,
nodeRef.to,
Decoration.replace({
widget,
// Ensure the cursor is drawn relative to the content before the widget.
// If it is drawn relative to the widget, it will be hidden when the widget is hidden (i.e. during editing).
side: 1,
block: true,
}),
)
}
}
class TableWidget extends WidgetType {
private container: HTMLElement | undefined
private vueHostRegistration: { unregister: () => void } | undefined
constructor(
private readonly props: { source: Text; parsed: SyntaxNode },
private readonly vueHost: VueHost,
) {
super()
}
override get estimatedHeight() {
return -1
}
override toDOM(): HTMLElement {
if (!this.container) {
const container = markRaw(document.createElement('div'))
container.className = 'cm-table-editor'
this.vueHostRegistration = this.vueHost.register(
() =>
h(TableEditor, {
source: this.props.source,
parsed: this.props.parsed,
onEdit: () => console.log('onEdit'),
}),
container,
)
this.container = container
}
return this.container
}
override destroy() {
this.vueHostRegistration?.unregister()
this.container = undefined
}
}

View File

@ -26,4 +26,16 @@ defineExpose({ rootElement })
width: 100%;
height: 100%;
}
:deep(.cm-scroller) {
/* Prevent touchpad back gesture, which can be triggered while panning. */
overscroll-behavior: none;
}
:deep(.cm-editor) {
position: relative;
width: 100%;
height: 100%;
outline: none;
}
</style>

View File

@ -0,0 +1,28 @@
import { EditorSelection } from '@codemirror/state'
import { type EditorView } from '@codemirror/view'
/** Returns an API for the editor content, used by the integration tests. */
export function testSupport(editorView: EditorView) {
return {
textContent: () => editorView.state.doc.toString(),
textLength: () => editorView.state.doc.length,
indexOf: (substring: string, position?: number) =>
editorView.state.doc.toString().indexOf(substring, position),
placeCursor: (at: number) => {
editorView.dispatch({ selection: EditorSelection.create([EditorSelection.cursor(at)]) })
},
select: (from: number, to: number) => {
editorView.dispatch({ selection: EditorSelection.create([EditorSelection.range(from, to)]) })
},
selectAndReplace: (from: number, to: number, replaceWith: string) => {
editorView.dispatch({ selection: EditorSelection.create([EditorSelection.range(from, to)]) })
editorView.dispatch(editorView.state.update(editorView.state.replaceSelection(replaceWith)))
},
writeText: (text: string, from: number) => {
editorView.dispatch({
changes: [{ from: from, insert: text }],
selection: { anchor: from + text.length },
})
},
}
}

View File

@ -0,0 +1,65 @@
/**
* @file CodeMirror extension for synchronizing with a Yjs Text object.
* Based on <https://github.com/yjs/y-codemirror.next>. Initial changes from upstream:
* - Translated from JSDoc-typed JS to Typescript.
* - Refactored for stricter typing.
* - Changes to match project code style.
*/
import * as cmView from '@codemirror/view'
import { type Awareness } from 'y-protocols/awareness.js'
import * as Y from 'yjs'
import { YRange } from './y-range'
import { yRemoteSelections, yRemoteSelectionsTheme } from './y-remote-selections'
import { YSyncConfig, ySync, ySyncAnnotation, ySyncFacet } from './y-sync'
import {
YUndoManagerConfig,
redo,
undo,
yUndoManager,
yUndoManagerFacet,
yUndoManagerKeymap,
} from './y-undomanager'
export {
YRange,
YSyncConfig,
yRemoteSelections,
yRemoteSelectionsTheme,
ySync,
ySyncAnnotation,
ySyncFacet,
yUndoManagerKeymap,
}
/* CodeMirror Extension for synchronizing the editor state with a {@link Y.Text}. */
export const yCollab = (
ytext: Y.Text & { doc: Y.Doc },
awareness: Awareness | null,
{
undoManager = new Y.UndoManager(ytext),
}: {
/** Set to false to disable the undo-redo plugin */
undoManager?: Y.UndoManager | false
} = {},
) => {
const ySyncConfig = new YSyncConfig(ytext, awareness)
const plugins = [ySyncFacet.of(ySyncConfig), ySync]
if (awareness) {
plugins.push(yRemoteSelectionsTheme, yRemoteSelections)
}
if (undoManager !== false) {
// By default, only track changes that are produced by the sync plugin (local edits)
plugins.push(
yUndoManagerFacet.of(new YUndoManagerConfig(undoManager)),
yUndoManager,
cmView.EditorView.domEventHandlers({
beforeinput(e, view) {
if (e.inputType === 'historyUndo') return undo(view)
if (e.inputType === 'historyRedo') return redo(view)
return false
},
}),
)
}
return plugins
}

View File

@ -0,0 +1,32 @@
import * as Y from 'yjs'
/**
* Defines a range on text using relative positions that can be transformed back to
* absolute positions. (https://docs.yjs.dev/api/relative-positions)
*/
export class YRange {
/** TODO: Add docs */
constructor(
readonly yanchor: Y.RelativePosition,
readonly yhead: Y.RelativePosition,
) {
this.yanchor = yanchor
this.yhead = yhead
}
/** TODO: Add docs */
toJSON() {
return {
yanchor: Y.relativePositionToJSON(this.yanchor),
yhead: Y.relativePositionToJSON(this.yhead),
}
}
/** TODO: Add docs */
static fromJSON(json: { yanchor: unknown; yhead: unknown }) {
return new YRange(
Y.createRelativePositionFromJSON(json.yanchor),
Y.createRelativePositionFromJSON(json.yhead),
)
}
}

View File

@ -0,0 +1,264 @@
import * as cmState from '@codemirror/state'
import * as cmView from '@codemirror/view'
import * as dom from 'lib0/dom'
import * as math from 'lib0/math'
import * as pair from 'lib0/pair'
import { Awareness } from 'y-protocols/awareness.js'
import { assert } from 'ydoc-shared/util/assert'
import * as Y from 'yjs'
import { type YSyncConfig, ySyncFacet } from './y-sync'
export const yRemoteSelectionsTheme = cmView.EditorView.baseTheme({
'.cm-ySelection': {},
'.cm-yLineSelection': {
padding: 0,
margin: '0px 2px 0px 4px',
},
'.cm-ySelectionCaret': {
position: 'relative',
borderLeft: '1px solid black',
borderRight: '1px solid black',
marginLeft: '-1px',
marginRight: '-1px',
boxSizing: 'border-box',
display: 'inline',
},
'.cm-ySelectionCaretDot': {
borderRadius: '50%',
position: 'absolute',
width: '.4em',
height: '.4em',
top: '-.2em',
left: '-.2em',
backgroundColor: 'inherit',
transition: 'transform .3s ease-in-out',
boxSizing: 'border-box',
},
'.cm-ySelectionCaret:hover > .cm-ySelectionCaretDot': {
transformOrigin: 'bottom center',
transform: 'scale(0)',
},
'.cm-ySelectionInfo': {
position: 'absolute',
top: '-1.05em',
left: '-1px',
fontSize: '.75em',
fontFamily: 'serif',
fontStyle: 'normal',
fontWeight: 'normal',
lineHeight: 'normal',
userSelect: 'none',
color: 'white',
paddingLeft: '2px',
paddingRight: '2px',
zIndex: 101,
transition: 'opacity .3s ease-in-out',
backgroundColor: 'inherit',
// these should be separate
opacity: 0,
transitionDelay: '0s',
whiteSpace: 'nowrap',
},
'.cm-ySelectionCaret:hover > .cm-ySelectionInfo': {
opacity: 1,
transitionDelay: '0s',
},
})
/**
* @todo specify the users that actually changed. Currently, we recalculate positions for every user.
*/
const yRemoteSelectionsAnnotation = cmState.Annotation.define<number[]>()
class YRemoteCaretWidget extends cmView.WidgetType {
constructor(
readonly color: string,
readonly name: string,
) {
super()
}
toDOM() {
return dom.element(
'span',
[
pair.create('class', 'cm-ySelectionCaret'),
pair.create('style', `background-color: ${this.color}; border-color: ${this.color}`),
],
[
dom.text('\u2060'),
dom.element('div', [pair.create('class', 'cm-ySelectionCaretDot')]),
dom.text('\u2060'),
dom.element('div', [pair.create('class', 'cm-ySelectionInfo')], [dom.text(this.name)]),
dom.text('\u2060'),
],
) as HTMLElement
}
override eq(widget: unknown) {
assert(widget instanceof YRemoteCaretWidget)
return widget.color === this.color
}
compare(widget: unknown) {
assert(widget instanceof YRemoteCaretWidget)
return widget.color === this.color
}
override updateDOM() {
return false
}
override get estimatedHeight() {
return -1
}
override ignoreEvent() {
return true
}
}
/** TODO: Add docs */
export class YRemoteSelectionsPluginValue {
private readonly conf: YSyncConfig
private readonly _awareness: Awareness
decorations: cmView.DecorationSet
private readonly _listener: ({ added, updated, removed }: any) => void
/** TODO: Add docs */
constructor(view: cmView.EditorView) {
this.conf = view.state.facet(ySyncFacet)
assert(this.conf.awareness != null)
this._listener = ({ added, updated, removed }: any) => {
const clients = added.concat(updated).concat(removed)
if (clients.findIndex((id: any) => id !== this._awareness.doc.clientID) >= 0) {
view.dispatch({ annotations: [yRemoteSelectionsAnnotation.of([])] })
}
}
this._awareness = this.conf.awareness
this._awareness.on('change', this._listener)
this.decorations = cmState.RangeSet.of([])
}
/** TODO: Add docs */
destroy() {
this._awareness.off('change', this._listener)
}
/** TODO: Add docs */
update(update: cmView.ViewUpdate) {
const ytext = this.conf.ytext
const ydoc = ytext.doc
const awareness = this._awareness
const decorations: cmState.Range<cmView.Decoration>[] = []
const localAwarenessState = this._awareness.getLocalState()
// set local awareness state (update cursors)
if (localAwarenessState != null) {
const hasFocus = update.view.hasFocus && update.view.dom.ownerDocument.hasFocus()
const sel = hasFocus ? update.state.selection.main : null
const currentAnchor =
localAwarenessState.cursor == null ?
null
: Y.createRelativePositionFromJSON(localAwarenessState.cursor.anchor)
const currentHead =
localAwarenessState.cursor == null ?
null
: Y.createRelativePositionFromJSON(localAwarenessState.cursor.head)
if (sel != null) {
const anchor = Y.createRelativePositionFromTypeIndex(ytext, sel.anchor)
const head = Y.createRelativePositionFromTypeIndex(ytext, sel.head)
if (
localAwarenessState.cursor == null ||
!Y.compareRelativePositions(currentAnchor, anchor) ||
!Y.compareRelativePositions(currentHead, head)
) {
awareness.setLocalStateField('cursor', {
anchor,
head,
})
}
} else if (localAwarenessState.cursor != null && hasFocus) {
awareness.setLocalStateField('cursor', null)
}
}
// update decorations (remote selections)
awareness.getStates().forEach((state, clientid) => {
if (clientid === awareness.doc.clientID) {
return
}
const cursor = state.cursor
if (cursor == null || cursor.anchor == null || cursor.head == null) {
return
}
const anchor = Y.createAbsolutePositionFromRelativePosition(cursor.anchor, ydoc)
const head = Y.createAbsolutePositionFromRelativePosition(cursor.head, ydoc)
if (anchor == null || head == null || anchor.type !== ytext || head.type !== ytext) {
return
}
const { color = '#30bced', name = 'Anonymous' } = state.user || {}
const colorLight = (state.user && state.user.colorLight) || color + '33'
const start = math.min(anchor.index, head.index)
const end = math.max(anchor.index, head.index)
const startLine = update.view.state.doc.lineAt(start)
const endLine = update.view.state.doc.lineAt(end)
if (startLine.number === endLine.number) {
// selected content in a single line.
decorations.push({
from: start,
to: end,
value: cmView.Decoration.mark({
attributes: { style: `background-color: ${colorLight}` },
class: 'cm-ySelection',
}),
})
} else {
// selected content in multiple lines
// first, render text-selection in the first line
decorations.push({
from: start,
to: startLine.from + startLine.length,
value: cmView.Decoration.mark({
attributes: { style: `background-color: ${colorLight}` },
class: 'cm-ySelection',
}),
})
// render text-selection in the last line
decorations.push({
from: endLine.from,
to: end,
value: cmView.Decoration.mark({
attributes: { style: `background-color: ${colorLight}` },
class: 'cm-ySelection',
}),
})
for (let i = startLine.number + 1; i < endLine.number; i++) {
const linePos = update.view.state.doc.line(i).from
decorations.push({
from: linePos,
to: linePos,
value: cmView.Decoration.line({
attributes: { style: `background-color: ${colorLight}`, class: 'cm-yLineSelection' },
}),
})
}
}
decorations.push({
from: head.index,
to: head.index,
value: cmView.Decoration.widget({
side: head.index - anchor.index > 0 ? -1 : 1, // the local cursor should be rendered outside the remote selection
block: false,
widget: new YRemoteCaretWidget(color, name),
}),
})
})
this.decorations = cmView.Decoration.set(decorations, true)
}
}
export const yRemoteSelections = cmView.ViewPlugin.fromClass(YRemoteSelectionsPluginValue, {
decorations: (v) => v.decorations,
})

View File

@ -0,0 +1,156 @@
import * as cmState from '@codemirror/state'
import * as cmView from '@codemirror/view'
import { type Awareness } from 'y-protocols/awareness.js'
import { assertDefined } from 'ydoc-shared/util/assert'
import * as Y from 'yjs'
import { YRange } from './y-range'
/** TODO: Add docs */
export class YSyncConfig {
readonly undoManager: Y.UndoManager
readonly ytext: Y.Text & { doc: Y.Doc }
/** TODO: Add docs */
constructor(
ytext: Y.Text & { doc: Y.Doc },
readonly awareness: Awareness | null,
) {
this.ytext = ytext as Y.Text & { doc: Y.Doc }
this.undoManager = new Y.UndoManager(ytext)
}
/**
* Helper function to transform an absolute index position to a Yjs-based relative position
* (https://docs.yjs.dev/api/relative-positions).
*
* A relative position can be transformed back to an absolute position even after the document has changed. The position is
* automatically adapted. This does not require any position transformations. Relative positions are computed based on
* the internal Yjs document model. Peers that share content through Yjs are guaranteed that their positions will always
* synced up when using relatve positions.
*
* ```js
* import { ySyncFacet } from 'y-codemirror'
*
* ..
* const ysync = view.state.facet(ySyncFacet)
* // transform an absolute index position to a ypos
* const ypos = ysync.getYPos(3)
* // transform the ypos back to an absolute position
* ysync.fromYPos(ypos) // => 3
* ```
*
* It cannot be guaranteed that absolute index positions can be synced up between peers.
* This might lead to undesired behavior when implementing features that require that all peers see the
* same marked range (e.g. a comment plugin).
*/
toYPos(pos: number, assoc = 0) {
return Y.createRelativePositionFromTypeIndex(this.ytext, pos, assoc)
}
/** TODO: Add docs */
fromYPos(rpos: Y.RelativePosition | object) {
const pos = Y.createAbsolutePositionFromRelativePosition(
Y.createRelativePositionFromJSON(rpos),
this.ytext.doc,
)
if (pos == null || pos.type !== this.ytext) {
throw new Error(
'[y-codemirror] The position you want to retrieve was created by a different document',
)
}
return {
pos: pos.index,
assoc: pos.assoc,
}
}
/** TODO: Add docs */
toYRange(range: cmState.SelectionRange) {
const assoc = range.assoc
const yanchor = this.toYPos(range.anchor, assoc)
const yhead = this.toYPos(range.head, assoc)
return new YRange(yanchor, yhead)
}
/** TODO: Add docs */
fromYRange(yrange: YRange) {
const anchor = this.fromYPos(yrange.yanchor)
const head = this.fromYPos(yrange.yhead)
if (anchor.pos === head.pos) {
return cmState.EditorSelection.cursor(head.pos, head.assoc)
}
return cmState.EditorSelection.range(anchor.pos, head.pos)
}
}
export const ySyncFacet = cmState.Facet.define<YSyncConfig, YSyncConfig>({
combine(inputs) {
return inputs[inputs.length - 1]!
},
})
export const ySyncAnnotation = cmState.Annotation.define<YSyncConfig>()
class YSyncPluginValue implements cmView.PluginValue {
private readonly _ytext: Y.Text & { doc: Y.Doc }
private readonly conf: YSyncConfig
private readonly _observer: (event: Y.YTextEvent, tr: Y.Transaction) => void
constructor(private readonly view: cmView.EditorView) {
this.conf = view.state.facet(ySyncFacet)
this._observer = (event: Y.YTextEvent, tr: Y.Transaction) => {
if (tr.origin !== this.conf) {
const delta = event.delta
const changes: { from: number; to: number; insert: string }[] = []
let pos = 0
for (const d of delta) {
if (d.insert != null) {
changes.push({ from: pos, to: pos, insert: d.insert as any })
} else if (d.delete != null) {
changes.push({ from: pos, to: pos + d.delete, insert: '' })
pos += d.delete
} else {
assertDefined(d.retain)
pos += d.retain
}
}
view.dispatch({ changes, annotations: [ySyncAnnotation.of(this.conf)] })
}
}
this._ytext = this.conf.ytext
this._ytext.observe(this._observer)
}
update(update: cmView.ViewUpdate) {
if (
!update.docChanged ||
(update.transactions.length > 0 &&
update.transactions[0]!.annotation(ySyncAnnotation) === this.conf)
) {
return
}
const ytext = this.conf.ytext
ytext.doc.transact(() => {
/**
* This variable adjusts the fromA position to the current position in the Y.Text type.
*/
let adj = 0
update.changes.iterChanges((fromA, toA, fromB, toB, insert) => {
const insertText = insert.sliceString(0, insert.length, '\n')
if (fromA !== toA) {
ytext.delete(fromA + adj, toA - fromA)
}
if (insertText.length > 0) {
ytext.insert(fromA + adj, insertText)
}
adj += insertText.length - (toA - fromA)
})
}, this.conf)
}
destroy() {
this._ytext.unobserve(this._observer)
}
}
export const ySync = cmView.ViewPlugin.fromClass(YSyncPluginValue)

View File

@ -0,0 +1,138 @@
import { type StackItemEvent } from '@/components/codemirror/yCollab/yjsTypes'
import * as cmState from '@codemirror/state'
import * as cmView from '@codemirror/view'
import { createMutex } from 'lib0/mutex'
import * as Y from 'yjs'
import { type YRange } from './y-range'
import { ySyncAnnotation, type YSyncConfig, ySyncFacet } from './y-sync'
/** TODO: Add docs */
export class YUndoManagerConfig {
/** TODO: Add docs */
constructor(readonly undoManager: Y.UndoManager) {}
/** TODO: Add docs */
addTrackedOrigin(origin: unknown) {
this.undoManager.addTrackedOrigin(origin)
}
/** TODO: Add docs */
removeTrackedOrigin(origin: unknown) {
this.undoManager.removeTrackedOrigin(origin)
}
/**
* @returns Whether a change was undone.
*/
undo(): boolean {
return this.undoManager.undo() != null
}
/**
* @returns Whether a change was redone.
*/
redo(): boolean {
return this.undoManager.redo() != null
}
}
export const yUndoManagerFacet = cmState.Facet.define<YUndoManagerConfig, YUndoManagerConfig>({
combine(inputs) {
return inputs[inputs.length - 1]!
},
})
export const yUndoManagerAnnotation = cmState.Annotation.define<YUndoManagerConfig>()
class YUndoManagerPluginValue implements cmView.PluginValue {
private readonly conf: YUndoManagerConfig
private readonly syncConf: YSyncConfig
private _beforeChangeSelection: null | YRange
private readonly _undoManager: Y.UndoManager
private readonly _mux: (cb: () => void, elseCb?: (() => void) | undefined) => any
private readonly _storeSelection: () => void
private readonly _onStackItemAdded: (event: StackItemEvent) => void
private readonly _onStackItemPopped: (event: StackItemEvent) => void
constructor(readonly view: cmView.EditorView) {
this.conf = view.state.facet(yUndoManagerFacet)
this._undoManager = this.conf.undoManager
this.syncConf = view.state.facet(ySyncFacet)
this._beforeChangeSelection = null
this._mux = createMutex()
this._onStackItemAdded = ({ stackItem, changedParentTypes }: StackItemEvent) => {
// only store metadata if this type was affected
if (
changedParentTypes.has(this.syncConf.ytext as any) &&
this._beforeChangeSelection &&
!stackItem.meta.has(this)
) {
// do not overwrite previous stored selection
stackItem.meta.set(this, this._beforeChangeSelection)
}
}
this._onStackItemPopped = ({ stackItem }: StackItemEvent) => {
const sel = stackItem.meta.get(this)
if (sel) {
const selection = this.syncConf.fromYRange(sel)
view.dispatch(
view.state.update({
selection,
effects: [cmView.EditorView.scrollIntoView(selection)],
}),
)
this._storeSelection()
}
}
/**
* Do this without mutex, simply use the sync annotation
*/
this._storeSelection = () => {
// store the selection before the change is applied so we can restore it with the undo manager.
this._beforeChangeSelection = this.syncConf.toYRange(this.view.state.selection.main)
}
this._undoManager.on('stack-item-added', this._onStackItemAdded)
this._undoManager.on('stack-item-popped', this._onStackItemPopped)
this._undoManager.addTrackedOrigin(this.syncConf)
}
update(update: cmView.ViewUpdate) {
if (
update.selectionSet &&
(update.transactions.length === 0 ||
update.transactions[0]!.annotation(ySyncAnnotation) !== this.syncConf)
) {
// This only works when YUndoManagerPlugin is included before the sync plugin
this._storeSelection()
}
}
destroy() {
this._undoManager.off('stack-item-added', this._onStackItemAdded)
this._undoManager.off('stack-item-popped', this._onStackItemPopped)
this._undoManager.removeTrackedOrigin(this.syncConf)
}
}
export const yUndoManager = cmView.ViewPlugin.fromClass(YUndoManagerPluginValue)
export const undo: cmState.StateCommand = ({ state }) =>
state.facet(yUndoManagerFacet).undo() || true
export const redo: cmState.StateCommand = ({ state }) =>
state.facet(yUndoManagerFacet).redo() || true
export const undoDepth = (state: cmState.EditorState): number =>
state.facet(yUndoManagerFacet).undoManager.undoStack.length
export const redoDepth = (state: cmState.EditorState): number =>
state.facet(yUndoManagerFacet).undoManager.redoStack.length
/**
* Default key bindings for the undo manager.
*/
export const yUndoManagerKeymap: cmView.KeyBinding[] = [
{ key: 'Mod-z', run: undo, preventDefault: true },
{ key: 'Mod-y', mac: 'Mod-Shift-z', run: redo, preventDefault: true },
{ key: 'Mod-Shift-z', run: redo, preventDefault: true },
]

View File

@ -0,0 +1,28 @@
/** @file Types exposed by Yjs APIs, but not exported by name. */
import * as Y from 'yjs'
export interface StackItemEvent {
stackItem: StackItem
origin: unknown
type: 'undo' | 'redo'
changedParentTypes: Map<Y.AbstractType<Y.YEvent<any>>, Y.YEvent<any>[]>
}
export interface StackItem {
insertions: DeleteSet
deletions: DeleteSet
/**
* Use this to save and restore metadata like selection range
*/
meta: Map<any, any>
}
export interface DeleteSet {
clients: Map<number, DeleteItem[]>
}
export interface DeleteItem {
clock: number
len: number
}

View File

@ -19,6 +19,10 @@ const emit = defineEmits<{ 'update:modelValue': [modelValue: boolean] }>()
background: var(--color-widget);
}
.selected .Checkbox {
background: color-mix(in oklab, var(--color-widget-selected) 30%, var(--color-node-primary) 70%);
}
.Checkbox > div {
width: var(--widget-checkbox-inner-size);
height: var(--widget-checkbox-inner-size);

View File

@ -10,7 +10,7 @@ enum SortDirection {
descending = 'descending',
}
const props = defineProps<{ color: string; entries: Entry[] }>()
const props = defineProps<{ color: string; backgroundColor: string; entries: Entry[] }>()
const emit = defineEmits<{ clickEntry: [entry: Entry, keepOpen: boolean] }>()
const sortDirection = ref<SortDirection>(SortDirection.none)
@ -56,7 +56,8 @@ const enableSortButton = ref(false)
const styleVars = computed(() => {
return {
'--dropdown-bg': props.color,
'--dropdown-fg': props.color,
'--dropdown-bg': props.backgroundColor,
// Slightly shift the top border of drawn dropdown away from node's top border by a fraction of
// a pixel, to prevent it from poking through and disturbing node's siluette.
'--extend-margin': `${0.2 / (graphNavigator?.scale ?? 1)}px`,
@ -112,6 +113,7 @@ export interface DropdownEntry {
padding-top: var(--dropdown-extend);
background-color: var(--dropdown-bg);
border-radius: calc(var(--item-height) / 2 + var(--dropdown-padding));
color: var(--dropdown-fg);
&:before {
content: '';
@ -130,7 +132,6 @@ export interface DropdownEntry {
min-height: 16px;
max-height: calc(var(--visible-items) * var(--item-height) + 2 * var(--dropdown-padding));
list-style-type: none;
color: var(--color-text-light);
scrollbar-width: thin;
padding: var(--dropdown-padding);
position: relative;
@ -146,7 +147,7 @@ export interface DropdownEntry {
overflow: hidden;
&:hover {
background-color: color-mix(in oklab, var(--color-port-connected) 50%, transparent 50%);
background-color: color-mix(in oklab, var(--dropdown-bg) 50%, white 50%);
span {
--text-scroll-max: calc(var(--dropdown-max-width) - 28px);
will-change: transform;
@ -154,10 +155,6 @@ export interface DropdownEntry {
}
}
&:not(.selected):hover {
color: white;
}
&.selected {
background-color: var(--color-port-connected);

View File

@ -132,7 +132,7 @@ const displayedChildren = computed(() => {
const rootNode = ref<HTMLElement>()
const cssPropsToCopy = ['--node-color-primary', '--node-color-port', '--node-border-radius']
const cssPropsToCopy = ['--color-node-primary', '--node-color-port', '--node-border-radius']
function onDragStart(event: DragEvent, index: number) {
if (
@ -490,13 +490,13 @@ div {
}
.item .preview {
background: var(--node-color-primary);
background: var(--color-node-primary);
padding: 4px;
border-radius: var(--node-border-radius);
}
.token {
color: rgb(255 255 255 / 0.33);
opacity: 0.33;
user-select: none;
}
@ -525,21 +525,6 @@ div {
transition: box-shadow 0.2s ease;
pointer-events: none;
cursor: grab;
&:before {
content: '';
opacity: 0;
transition: opacity 0.2s ease;
position: absolute;
display: block;
left: -8px;
right: -16px;
top: -3px;
bottom: -3px;
border-radius: var(--node-border-radius) 0 0 var(--node-border-radius);
background-color: var(--node-color-primary);
z-index: -1;
}
}
.item:hover {
@ -548,16 +533,16 @@ div {
.item:hover .handle {
box-shadow:
2px 0 0 rgb(255 255 255 / 0.5),
-2px 0 0 rgb(255 255 255 / 0.5);
2px 0 0 var(--color-widget-unfocus),
-2px 0 0 var(--color-widget-unfocus);
&:hover {
box-shadow:
2px 0 0 rgb(255 255 255 / 0.8),
-2px 0 0 rgb(255 255 255 / 0.8);
2px 0 0 var(--color-widget-focus),
-2px 0 0 var(--color-widget-focus);
}
background: var(--node-color-primary);
background: var(--color-node-background);
pointer-events: all;
&:before {
@ -605,7 +590,7 @@ div {
left: -5000px;
}
:global(.ListWidget-drag-ghost > div) {
background-color: var(--node-color-primary);
background-color: var(--color-node-primary);
border-radius: var(--node-border-radius);
padding: 4px;
color: white;

View File

@ -164,15 +164,18 @@ defineExpose({
}
}
.NumericInputWidget.slider {
.selected .NumericInputWidget {
background: var(--color-widget-unfocus);
&:focus {
/* Color will be blended with background defined below. */
background-color: var(--color-widget);
background: var(--color-widget-focus);
}
}
.NumericInputWidget.slider {
background: linear-gradient(
to right,
var(--color-widget-focus) 0 calc(var(--slider-width) - 1px),
var(--color-widget-slight) calc(var(--slider-width) - 1px) var(--slider-width),
color-mix(in oklab, var(--color-widget-focus) 30%, var(--color-widget) 70%) 0
var(--slider-width),
var(--color-widget) var(--slider-width) 100%
);
}

View File

@ -60,6 +60,12 @@ func2 a =
r
## The main method
Here we test images:
![Image](/images/image.png)
![Image](../images/image.png)
![Image](</images/image.png>)
main =
five = 5
ten = 10
@ -84,6 +90,16 @@ const fileTree = {
return mainFile
},
},
images: {
get 'image.png'() {
return new Uint16Array([
20617, 18254, 2573, 2586, 0, 3328, 18505, 21060, 0, 768, 0, 768, 772, 0, 41984, 43014, 140,
0, 20501, 21580, 65093, 13106, 11262, 64043, 27756, 24571, 64863, 14906, 12030, 65070,
10023, 29424, 11222, 0, 4352, 17481, 21569, 55048, 28771, 24661, 4960, 24672, 52, 768, 161,
21933, 29603, 124, 0, 18688, 20037, 44612, 24642, 130,
]).buffer
},
},
}
const visualizations = new Map<Uuid, VisualizationConfiguration>()

View File

@ -9,7 +9,6 @@ import {
type Identifier,
} from '@/util/qualifiedName'
import * as array from 'lib0/array'
import * as object from 'lib0/object'
import { ObservableV2 } from 'lib0/observable'
import * as random from 'lib0/random'
import { reactive } from 'vue'

View File

@ -31,13 +31,9 @@ import {
type WatchSource,
type WritableComputedRef,
} from 'vue'
import {
Error as DataError,
OutboundPayload,
VisualizationUpdate,
} from 'ydoc-shared/binaryProtocol'
import { OutboundPayload, VisualizationUpdate } from 'ydoc-shared/binaryProtocol'
import { LanguageServer } from 'ydoc-shared/languageServer'
import type { Diagnostic, ExpressionId, MethodPointer, Path } from 'ydoc-shared/languageServerTypes'
import type { Diagnostic, ExpressionId, MethodPointer } from 'ydoc-shared/languageServerTypes'
import { type AbortScope } from 'ydoc-shared/util/net'
import {
DistributedProject,
@ -130,7 +126,9 @@ export const { provideFn: provideProjectStore, injectFn: useProjectStore } = cre
const clientId = random.uuidv4() as Uuid
const lsUrls = resolveLsUrl(config.value)
const lsRpcConnection = createLsRpcConnection(clientId, lsUrls.rpcUrl, abort)
const contentRoots = lsRpcConnection.contentRoots
const projectRootId = lsRpcConnection.contentRoots.then(
(roots) => roots.find((root) => root.type === 'Project')?.id,
)
const dataConnection = initializeDataConnection(clientId, lsUrls.dataUrl, abort)
const rpcUrl = new URL(lsUrls.rpcUrl)
@ -384,22 +382,6 @@ export const { provideFn: provideProjectStore, injectFn: useProjectStore } = cre
}
})
const projectRootId = contentRoots.then(
(roots) => roots.find((root) => root.type === 'Project')?.id,
)
async function readFileBinary(path: Path): Promise<Result<Blob>> {
const result = await dataConnection.readFile(path)
if (result instanceof DataError) {
return Err(result.message() ?? 'Failed to read file.')
}
const contents = result.contentsArray()
if (contents == null) {
return Err('No file contents received.')
}
return Ok(new Blob([contents]))
}
return proxyRefs({
setObservedFileName(name: string) {
observedFileName.value = name
@ -423,7 +405,6 @@ export const { provideFn: provideProjectStore, injectFn: useProjectStore } = cre
computedValueRegistry: markRaw(computedValueRegistry),
lsRpcConnection: markRaw(lsRpcConnection),
dataConnection: markRaw(dataConnection),
readFileBinary,
useVisualizationData,
isRecordingEnabled,
stopCapturingUndo,

View File

@ -0,0 +1,150 @@
import { DataServer } from '@/util/net/dataServer'
import { bytesToHex, Hash } from '@noble/hashes/utils'
import { Error as DataError } from 'ydoc-shared/binaryProtocol'
import { ErrorCode, LanguageServer, RemoteRpcError } from 'ydoc-shared/languageServer'
import { Path, Uuid } from 'ydoc-shared/languageServerTypes'
import { Err, Ok, Result, withContext } from 'ydoc-shared/util/data/result'
export type ProjectFiles = ReturnType<typeof useProjectFiles>
/**
* A composable with project files operations.
*/
export function useProjectFiles(projectStore: {
projectRootId: Promise<Uuid | undefined>
lsRpcConnection: LanguageServer
dataConnection: DataServer
}) {
const { projectRootId, lsRpcConnection: lsRpc, dataConnection } = projectStore
async function readFileBinary(path: Path): Promise<Result<Blob>> {
const result = await dataConnection.readFile(path)
if (result instanceof DataError) {
return Err(result.message() ?? 'Failed to read file.')
}
const contents = result.contentsArray()
if (contents == null) {
return Err('No file contents received.')
}
return Ok(new Blob([contents]))
}
async function writeFileBinary(path: Path, content: Blob): Promise<Result> {
const result = await dataConnection.writeFile(path, await content.arrayBuffer())
if (result instanceof DataError) {
return Err(result.message() ?? 'Failed to write file.')
}
return Ok()
}
async function writeBytes(
path: Path,
offset: bigint,
overwriteExisting: boolean,
contents: string | ArrayBuffer | Uint8Array,
): Promise<Result> {
const result = await dataConnection.writeBytes(path, offset, overwriteExisting, contents)
if (result instanceof DataError) {
return Err(result.message() ?? 'Failed to write bytes.')
}
return Ok()
}
async function deleteFile(path: Path) {
return lsRpc.deleteFile(path)
}
/** Check if directory exists and try to create one if missing. */
async function ensureDirExists(path: Path): Promise<Result<void>> {
const exists = await dirExists(path)
if (!exists.ok) return exists
if (exists.value) return Ok()
const name = path.segments.at(-1)
if (name == null) return Err('Cannot create context root')
return await withContext(
() => 'When creating directory for uploaded file',
async () => {
return await lsRpc.createFile({
type: 'Directory',
name,
path: { rootId: path.rootId, segments: path.segments.slice(0, -1) },
})
},
)
}
/**
* Check if directory exists. If it does not, or it is a file, `Ok(false)` is returned.
* In case of error, the directory existence is not confirmed nor disproved.
*/
async function dirExists(path: Path): Promise<Result<boolean>> {
const info = await lsRpc.fileInfo(path)
if (info.ok) return Ok(info.value.attributes.kind.type == 'Directory')
else if (
info.error.payload.cause instanceof RemoteRpcError &&
(info.error.payload.cause.code === ErrorCode.FILE_NOT_FOUND ||
info.error.payload.cause.code === ErrorCode.CONTENT_ROOT_NOT_FOUND)
) {
return Ok(false)
} else {
return info
}
}
/**
* Return a name for a file which does not collide with existing files in `path`.
*
* First choice is `suggestedName`, and then try to apply a numeric suffix to stem.
*/
async function pickUniqueName(path: Path, suggestedName: string): Promise<Result<string>> {
const files = await lsRpc.listFiles(path)
if (!files.ok) return files
const existingNames = new Set(files.value.paths.map((path) => path.name))
const { stem, extension = '' } = splitFilename(suggestedName)
let candidate = suggestedName
let num = 1
while (existingNames.has(candidate)) {
candidate = `${stem}_${num}.${extension}`
num += 1
}
return Ok(candidate)
}
async function assertChecksum<T extends Hash<T>>(
path: Path,
checksum: Hash<T>,
): Promise<Result<void>> {
const engineChecksum = await lsRpc.fileChecksum(path)
if (!engineChecksum.ok) return engineChecksum
const hexChecksum = bytesToHex(checksum.digest())
if (hexChecksum != engineChecksum.value.checksum) {
return Err(`Checksum does not match. ${hexChecksum} != ${engineChecksum.value.checksum}`)
} else {
return Ok()
}
}
return {
projectRootId,
readFileBinary,
writeFileBinary,
writeBytes,
deleteFile,
ensureDirExists,
pickUniqueName,
assertChecksum,
}
}
/** Split filename into stem and (optional) extension. */
function splitFilename(fileName: string): { stem: string; extension?: string } {
const dotIndex = fileName.lastIndexOf('.')
if (dotIndex !== -1 && dotIndex !== 0) {
const stem = fileName.substring(0, dotIndex)
const extension = fileName.substring(dotIndex + 1)
return { stem, extension }
}
return { stem: fileName }
}

View File

@ -160,10 +160,15 @@ export class DataServer extends ObservableV2<DataServerEvents> {
return initResult.error.payload
}
}
this.websocket.send(builder.finish(rootTable).toArrayBuffer())
const promise = new Promise<T | Error>((resolve) => {
this.resolveCallbacks.set(messageUuid, resolve)
})
try {
this.websocket.send(builder.finish(rootTable).toArrayBuffer())
} catch (e: unknown) {
this.resolveCallbacks.delete(messageUuid)
throw e
}
return promise
}

View File

@ -1,6 +1,6 @@
// We are using `react-toastify`, since we share toast environment with dashboard.
import type { ResultError } from '@/util/data/result'
import { uuidv4 } from 'lib0/random'
// We are using `react-toastify`, since we share toast environment with dashboard.
import { toast, type ToastContent, type ToastOptions, type TypeOptions } from 'react-toastify'
import { onScopeDispose } from 'vue'
@ -15,7 +15,25 @@ export interface UseToastOptions extends ToastOptions {
outliveScope?: boolean
}
/** TODO: Add docs */
/**
* Composable for new toast - a pop-up message displayed to the user.
*
* ```ts
* // useToast.error is an equivalent of useToast(type: 'error').
* // There's also useToast.info, useToast.warning and useToast.success.
* const toastLspError = useToast.error()
* // Every `useToast` allow displaying only one message at once, so
* // here we create separate toast for every "topic".
* const toastExecutionFailed = useToast.error()
* const toastUserActionFailed = useToast.error()
* // Toast are automatically closed after some time. Here we suppress this.
* const toastStartup = useToast.info({ autoClose: false })
* const toastConnectionLost = useToast.error({ autoClose: false })
*
* ```
*
* For details, read about `toastify` library.
*/
export function useToast(options: UseToastOptions = {}) {
const id = makeToastId()
if (options?.outliveScope !== true) {
@ -23,15 +41,18 @@ export function useToast(options: UseToastOptions = {}) {
}
return {
/** Show or update toast. */
show(content: ToastContent) {
if (toast.isActive(id)) toast.update(id, { ...options, render: content })
else toast(content, { ...options, toastId: id })
},
/** A helper for reporting {@link ResultError} to both toast and console. */
reportError<E>(result: ResultError<E>, preamble?: string) {
const msg = result.message(preamble)
console.error(msg)
this.show(msg)
},
/** Dismiss the displayed toast. */
dismiss() {
toast.dismiss(id)
},

View File

@ -0,0 +1,22 @@
The MIT License (MIT)
Copyright (c) 2024
- Kevin Jahns <kevin.jahns@protonmail.com>.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -35,6 +35,8 @@
},
"dependencies": {
"enso-common": "workspace:*",
"@lezer/common": "^1.1.0",
"@lezer/markdown": "^1.3.1",
"@noble/hashes": "^1.4.0",
"@open-rpc/client-js": "^1.8.1",
"@types/debug": "^4.1.12",

View File

@ -1,3 +1,4 @@
import * as iter from 'enso-common/src/utilities/data/iter'
import { describe, expect, test } from 'vitest'
import { assert } from '../../util/assert'
import { MutableModule } from '../mutableModule'
@ -91,7 +92,7 @@ test('Creating comments: indented', () => {
expect(statement.module.root()?.code()).toBe(`main =\n ## ${docText}\n x = 1`)
})
describe('Markdown documentation', () => {
describe('Function documentation (Markdown)', () => {
const cases = [
{
source: '## My function',
@ -101,6 +102,10 @@ describe('Markdown documentation', () => {
source: '## My function\n\n Second paragraph',
markdown: 'My function\nSecond paragraph',
},
{
source: '## Trailing whitespace \n\n Second paragraph',
markdown: 'Trailing whitespace \nSecond paragraph',
},
{
source: '## My function\n\n\n Second paragraph after extra gap',
markdown: 'My function\n\nSecond paragraph after extra gap',
@ -141,14 +146,23 @@ describe('Markdown documentation', () => {
'the Enso syntax specification which requires line length not to exceed 100 characters.',
].join(' '), // TODO: This should be '\n ' when hard-wrapping is implemented.
},
{
source: '## Table below:\n | a | b |\n |---|---|',
markdown: 'Table below:\n| a | b |\n|---|---|',
},
{
source: '## Table below:\n\n | a | b |\n |---|---|',
markdown: 'Table below:\n\n| a | b |\n|---|---|',
},
]
test.each(cases)('Enso source comments to markdown', ({ source, markdown }) => {
test.each(cases)('Enso source comments to normalized markdown', ({ source, markdown }) => {
const moduleSource = `${source}\nmain =\n x = 1`
const topLevel = parseModule(moduleSource)
topLevel.module.setRoot(topLevel)
const main = [...topLevel.statements()][0]
const main = iter.first(topLevel.statements())
assert(main instanceof MutableFunctionDef)
expect(main.name.code()).toBe('main')
expect(main.mutableDocumentationMarkdown().toJSON()).toBe(markdown)
})
@ -156,7 +170,7 @@ describe('Markdown documentation', () => {
const functionCode = 'main =\n x = 1'
const topLevel = parseModule(functionCode)
topLevel.module.setRoot(topLevel)
const main = [...topLevel.statements()][0]
const main = iter.first(topLevel.statements())
assert(main instanceof MutableFunctionDef)
const markdownYText = main.mutableDocumentationMarkdown()
expect(markdownYText.toJSON()).toBe('')
@ -202,7 +216,7 @@ describe('Markdown documentation', () => {
const topLevel = parseModule(originalSourceWithDocComment)
expect(topLevel.code()).toBe(originalSourceWithDocComment)
const main = [...topLevel.statements()][0]
const main = iter.first(topLevel.statements())
assert(main instanceof MutableFunctionDef)
const markdownYText = main.mutableDocumentationMarkdown()
markdownYText.delete(0, markdownYText.length)

View File

@ -1,4 +1,5 @@
import { LINE_BOUNDARIES } from 'enso-common/src/utilities/data/string'
import { markdownParser } from './ensoMarkdown'
import { xxHash128 } from './ffi'
import type { ConcreteChild, RawConcreteChild } from './print'
import { ensureUnspaced, firstChild, preferUnspaced, unspaced } from './print'
@ -32,6 +33,8 @@ export function* docLineToConcrete(
for (const newline of docLine.newlines) yield preferUnspaced(newline)
}
// === Markdown ===
/**
* Render function documentation to concrete tokens. If the `markdown` content has the same value as when `docLine` was
* parsed (as indicated by `hash`), the `docLine` will be used (preserving concrete formatting). If it is different, the
@ -42,95 +45,161 @@ export function functionDocsToConcrete(
hash: string | undefined,
docLine: DeepReadonly<DocLine> | undefined,
indent: string | null,
): IterableIterator<RawConcreteChild> | undefined {
): Iterable<RawConcreteChild> | undefined {
return (
hash && docLine && xxHash128(markdown) === hash ? docLineToConcrete(docLine, indent)
: markdown ? yTextToTokens(markdown, (indent || '') + ' ')
: markdown ? markdownYTextToTokens(markdown, (indent || '') + ' ')
: undefined
)
}
function markdownYTextToTokens(yText: string, indent: string): Iterable<ConcreteChild<Token>> {
const tokensBuilder = new DocTokensBuilder(indent)
standardizeMarkdown(yText, tokensBuilder)
return tokensBuilder.build()
}
/**
* Given Enso documentation comment tokens, returns a model of their Markdown content. This model abstracts away details
* such as the locations of line breaks that are not paragraph breaks (e.g. lone newlines denoting hard-wrapping of the
* source code).
*/
export function abstractMarkdown(elements: undefined | TextToken<ConcreteRefs>[]) {
let markdown = ''
let newlines = 0
let readingTags = true
let elidedNewline = false
;(elements ?? []).forEach(({ token: { node } }, i) => {
if (node.tokenType_ === TokenType.Newline) {
if (readingTags || newlines > 0) {
markdown += '\n'
elidedNewline = false
} else {
elidedNewline = true
}
newlines += 1
} else {
let nodeCode = node.code()
if (i === 0) nodeCode = nodeCode.trimStart()
if (elidedNewline) markdown += ' '
markdown += nodeCode
newlines = 0
if (readingTags) {
if (!nodeCode.startsWith('ICON ')) {
readingTags = false
}
}
}
})
const { tags, rawMarkdown } = toRawMarkdown(elements)
const markdown = [...tags, normalizeMarkdown(rawMarkdown)].join('\n')
const hash = xxHash128(markdown)
return { markdown, hash }
}
// TODO: Paragraphs should be hard-wrapped to fit within the column limit, but this requires:
// - Recognizing block elements other than paragraphs; we must not split non-paragraph elements.
// - Recognizing inline elements; some cannot be split (e.g. links), while some can be broken into two (e.g. bold).
// If we break inline elements, we must also combine them when encountered during parsing.
const ENABLE_INCOMPLETE_WORD_WRAP_SUPPORT = false
function* yTextToTokens(yText: string, indent: string): IterableIterator<ConcreteChild<Token>> {
yield unspaced(Token.new('##', TokenType.TextStart))
const lines = yText.split(LINE_BOUNDARIES)
let printingTags = true
for (const [i, value] of lines.entries()) {
if (i) {
yield unspaced(Token.new('\n', TokenType.Newline))
if (value && !printingTags) yield unspaced(Token.new('\n', TokenType.Newline))
}
printingTags = printingTags && value.startsWith('ICON ')
let offset = 0
while (offset < value.length) {
if (offset !== 0) yield unspaced(Token.new('\n', TokenType.Newline))
let wrappedLineEnd = value.length
let printableOffset = offset
if (i !== 0) {
while (printableOffset < value.length && value[printableOffset] === ' ')
printableOffset += 1
function toRawMarkdown(elements: undefined | TextToken<ConcreteRefs>[]) {
const tags: string[] = []
let readingTags = true
let rawMarkdown = ''
;(elements ?? []).forEach(({ token: { node } }, i) => {
if (node.tokenType_ === TokenType.Newline) {
if (!readingTags) {
rawMarkdown += '\n'
}
if (ENABLE_INCOMPLETE_WORD_WRAP_SUPPORT && !printingTags) {
const ENSO_SOURCE_MAX_COLUMNS = 100
const MIN_DOC_COLUMNS = 40
const availableWidth = Math.max(
ENSO_SOURCE_MAX_COLUMNS - indent.length - (i === 0 && offset === 0 ? '## '.length : 0),
MIN_DOC_COLUMNS,
)
if (availableWidth < wrappedLineEnd - printableOffset) {
const wrapIndex = value.lastIndexOf(' ', printableOffset + availableWidth)
if (printableOffset < wrapIndex) {
wrappedLineEnd = wrapIndex
}
} else {
let nodeCode = node.code()
if (i === 0) nodeCode = nodeCode.trimStart()
if (readingTags) {
if (nodeCode.startsWith('ICON ')) {
tags.push(nodeCode)
} else {
readingTags = false
}
}
while (printableOffset < value.length && value[printableOffset] === ' ') printableOffset += 1
const whitespace = i === 0 && offset === 0 ? ' ' : indent
const wrappedLine = value.substring(printableOffset, wrappedLineEnd)
yield { whitespace, node: Token.new(wrappedLine, TokenType.TextSection) }
offset = wrappedLineEnd
if (!readingTags) {
rawMarkdown += nodeCode
}
}
}
yield unspaced(Token.new('\n', TokenType.Newline))
})
return { tags, rawMarkdown }
}
/**
* Convert the Markdown input to a format with rendered-style linebreaks: Hard-wrapped lines within a paragraph will be
* joined, and only a single linebreak character is used to separate paragraphs.
*/
function normalizeMarkdown(rawMarkdown: string): string {
let normalized = ''
let prevTo = 0
let prevName: string | undefined = undefined
const cursor = markdownParser.parse(rawMarkdown).cursor()
cursor.firstChild()
do {
if (prevTo < cursor.from) {
const textBetween = rawMarkdown.slice(prevTo, cursor.from)
normalized +=
cursor.name === 'Paragraph' && prevName !== 'Table' ? textBetween.slice(0, -1) : textBetween
}
const text = rawMarkdown.slice(cursor.from, cursor.to)
normalized += cursor.name === 'Paragraph' ? text.replaceAll(/ *\n */g, ' ') : text
prevTo = cursor.to
prevName = cursor.name
} while (cursor.nextSibling())
return normalized
}
/**
* Convert from "normalized" Markdown to the on-disk representation, with paragraphs hard-wrapped and separated by blank
* lines.
*/
function standardizeMarkdown(normalizedMarkdown: string, textConsumer: TextConsumer) {
let prevTo = 0
let prevName: string | undefined = undefined
let printingTags = true
const cursor = markdownParser.parse(normalizedMarkdown).cursor()
cursor.firstChild()
do {
if (prevTo < cursor.from) {
const betweenText = normalizedMarkdown.slice(prevTo, cursor.from)
for (const _match of betweenText.matchAll(LINE_BOUNDARIES)) {
textConsumer.newline()
}
if (cursor.name === 'Paragraph' && prevName !== 'Table') {
textConsumer.newline()
}
}
const lines = normalizedMarkdown.slice(cursor.from, cursor.to).split(LINE_BOUNDARIES)
if (cursor.name === 'Paragraph') {
let printingNonTags = false
lines.forEach((line, i) => {
if (printingTags) {
if (cursor.name === 'Paragraph' && line.startsWith('ICON ')) {
textConsumer.text(line)
} else {
printingTags = false
}
}
if (!printingTags) {
if (i > 0) {
textConsumer.newline()
if (printingNonTags) textConsumer.newline()
}
textConsumer.wrapText(line)
printingNonTags = true
}
})
} else {
lines.forEach((line, i) => {
if (i > 0) textConsumer.newline()
textConsumer.text(line)
})
printingTags = false
}
prevTo = cursor.to
prevName = cursor.name
} while (cursor.nextSibling())
}
interface TextConsumer {
text: (text: string) => void
wrapText: (text: string) => void
newline: () => void
}
class DocTokensBuilder implements TextConsumer {
private readonly tokens: ConcreteChild<Token>[] = [unspaced(Token.new('##', TokenType.TextStart))]
constructor(private readonly indent: string) {}
text(text: string): void {
const whitespace = this.tokens.length === 1 ? ' ' : this.indent
this.tokens.push({ whitespace, node: Token.new(text, TokenType.TextSection) })
}
wrapText(text: string): void {
this.text(text)
}
newline(): void {
this.tokens.push(unspaced(Token.new('\n', TokenType.Newline)))
}
build(): ConcreteChild<Token>[] {
this.newline()
return this.tokens
}
}

View File

@ -1,31 +1,20 @@
import { markdown as baseMarkdown, markdownLanguage } from '@codemirror/lang-markdown'
import type { Extension } from '@codemirror/state'
import type { Tree } from '@lezer/common'
import type { BlockContext, BlockParser, Line, MarkdownParser, NodeSpec } from '@lezer/markdown'
import { Element } from '@lezer/markdown'
import { TreeCursor } from '@lezer/common'
import type {
BlockContext,
BlockParser,
DelimiterType,
InlineContext,
InlineDelimiter,
InlineParser,
Line,
MarkdownParser,
NodeSpec,
} from '@lezer/markdown'
import { parser as baseParser, Element, Emoji, GFM, Subscript, Superscript } from '@lezer/markdown'
import { assertDefined } from 'ydoc-shared/util/assert'
/**
* Enso Markdown extension. Differences from CodeMirror's base Markdown extension:
* - It defines the flavor of Markdown supported in Enso documentation. Currently, this is mostly CommonMark except we
* don't support setext headings. Planned features include support for some GFM extensions.
* - Many of the parsers differ from the `@lezer/markdown` parsers in their treatment of whitespace, in order to support
* a rendering mode where markup (and some associated spacing) is hidden.
*/
export function markdown(): Extension {
return baseMarkdown({
base: markdownLanguage,
extensions: [
{
parseBlock: [headerParser, bulletList, orderedList, blockquoteParser, disableSetextHeading],
defineNodes: [blockquoteNode],
},
],
})
}
function getType({ parser }: { parser: MarkdownParser }, name: string) {
const ty = parser.nodeSet.types.find((ty) => ty.name === name)
const ty = parser.nodeSet.types.find(ty => ty.name === name)
assertDefined(ty)
return ty.id
}
@ -138,8 +127,8 @@ const blockquoteNode: NodeSpec = {
},
}
function elt(type: number, from: number, to: number): Element {
return new (Element as any)(type, from, to)
function elt(type: number, from: number, to: number, children?: readonly Element[]): Element {
return new (Element as any)(type, from, to, children)
}
function isBlockquote(line: Line) {
@ -196,6 +185,212 @@ function getListIndent(line: Line, pos: number) {
return indented >= indentAfter + 5 ? indentAfter + 1 : indented
}
// === Link ===
const enum Mark {
None = 0,
Open = 1,
Close = 2,
}
const LinkStart: DelimiterType = {}
const ImageStart: DelimiterType = {}
const linkParser: InlineParser = {
name: 'Link',
parse: (cx, next, start) => {
return next == 91 /* '[' */ ? cx.addDelimiter(LinkStart, start, start + 1, true, false) : -1
},
}
const imageParser: InlineParser = {
name: 'Image',
parse: (cx, next, start) => {
return next == 33 /* '!' */ && cx.char(start + 1) == 91 /* '[' */ ?
cx.addDelimiter(ImageStart, start, start + 2, true, false)
: -1
},
}
const linkEndParser: InlineParser = {
name: 'LinkEnd',
parse: (cx, next, start) => {
if (next != 93 /* ']' */) return -1
// Scanning back to the next link/image start marker
const openDelim = cx.findOpeningDelimiter(LinkStart) ?? cx.findOpeningDelimiter(ImageStart)
if (openDelim == null) return -1
const part = cx.parts[openDelim] as InlineDelimiter
// If this one has been set invalid (because it would produce
// a nested link) or there's no valid link here ignore both.
if (
!part.side ||
(cx.skipSpace(part.to) == start && !/[([]/.test(cx.slice(start + 1, start + 2)))
) {
cx.parts[openDelim] = null
return -1
}
// Finish the content and replace the entire range in
// this.parts with the link/image node.
const content = cx.takeContent(openDelim)
const link = (cx.parts[openDelim] = finishLink(
cx,
content,
part.type == LinkStart ? getType(cx, 'Link') : getType(cx, 'Image'),
part.from,
start + 1,
))
// Set any open-link markers before this link to invalid.
if (part.type == LinkStart)
for (let j = 0; j < openDelim; j++) {
const p = cx.parts[j]
if (p != null && !(p instanceof Element) && p.type == LinkStart) p.side = Mark.None
}
return link.to
},
}
function finishLink(
cx: InlineContext,
content: Element[],
type: number,
start: number,
startPos: number,
) {
const { text } = cx,
next = cx.char(startPos)
let endPos = startPos
const LinkMarkType = getType(cx, 'LinkMark')
const ImageType = getType(cx, 'Image')
content.unshift(elt(LinkMarkType, start, start + (type == ImageType ? 2 : 1)))
content.push(elt(LinkMarkType, startPos - 1, startPos))
if (next == 40 /* '(' */) {
let pos = cx.skipSpace(startPos + 1)
const dest = parseURL(text, pos - cx.offset, cx.offset, getType(cx, 'URL'), LinkMarkType)
let title
if (dest) {
const last = dest.at(-1)!
pos = cx.skipSpace(last.to)
// The destination and title must be separated by whitespace
if (pos != last.to) {
title = parseLinkTitle(text, pos - cx.offset, cx.offset, getType(cx, 'LinkTitle'))
if (title) pos = cx.skipSpace(title.to)
}
}
if (cx.char(pos) == 41 /* ')' */) {
content.push(elt(LinkMarkType, startPos, startPos + 1))
endPos = pos + 1
if (dest) content.push(...dest)
if (title) content.push(title)
content.push(elt(LinkMarkType, pos, endPos))
}
} else if (next == 91 /* '[' */) {
const label = parseLinkLabel(
text,
startPos - cx.offset,
cx.offset,
false,
getType(cx, 'LinkLabelType'),
)
if (label) {
content.push(label)
endPos = label.to
}
}
return elt(type, start, endPos, content)
}
// These return `null` when falling off the end of the input, `false`
// when parsing fails otherwise (for use in the incremental link
// reference parser).
function parseURL(
text: string,
start: number,
offset: number,
urlType: number,
linkMarkType: number,
): null | false | Element[] {
const next = text.charCodeAt(start)
if (next == 60 /* '<' */) {
for (let pos = start + 1; pos < text.length; pos++) {
const ch = text.charCodeAt(pos)
if (ch == 62 /* '>' */)
return [
elt(linkMarkType, start + offset, start + offset + 1),
elt(urlType, start + offset + 1, pos + offset),
elt(linkMarkType, pos + offset, pos + offset + 1),
]
if (ch == 60 || ch == 10 /* '<\n' */) return false
}
return null
} else {
let depth = 0,
pos = start
for (let escaped = false; pos < text.length; pos++) {
const ch = text.charCodeAt(pos)
if (isSpace(ch)) {
break
} else if (escaped) {
escaped = false
} else if (ch == 40 /* '(' */) {
depth++
} else if (ch == 41 /* ')' */) {
if (!depth) break
depth--
} else if (ch == 92 /* '\\' */) {
escaped = true
}
}
return (
pos > start ? [elt(urlType, start + offset, pos + offset)]
: pos == text.length ? null
: false
)
}
}
function parseLinkTitle(
text: string,
start: number,
offset: number,
linkTitleType: number,
): null | false | Element {
const next = text.charCodeAt(start)
if (next != 39 && next != 34 && next != 40 /* '"\'(' */) return false
const end = next == 40 ? 41 : next
for (let pos = start + 1, escaped = false; pos < text.length; pos++) {
const ch = text.charCodeAt(pos)
if (escaped) escaped = false
else if (ch == end) return elt(linkTitleType, start + offset, pos + 1 + offset)
else if (ch == 92 /* '\\' */) escaped = true
}
return null
}
function parseLinkLabel(
text: string,
start: number,
offset: number,
requireNonWS: boolean,
linkLabelType: number,
): null | false | Element {
for (
let escaped = false, pos = start + 1, end = Math.min(text.length, pos + 999);
pos < end;
pos++
) {
const ch = text.charCodeAt(pos)
if (escaped) escaped = false
else if (ch == 93 /* ']' */)
return requireNonWS ? false : elt(linkLabelType, start + offset, pos + 1 + offset)
else {
if (requireNonWS && !isSpace(ch)) requireNonWS = false
if (ch == 91 /* '[' */) return false
else if (ch == 92 /* '\\' */) escaped = true
}
}
return null
}
// === Debugging ===
/** Represents the structure of a @{link Tree} in a JSON-compatible format. */
@ -207,12 +402,12 @@ export interface DebugTree {
// noinspection JSUnusedGlobalSymbols
/** @returns A debug representation of the provided {@link Tree} */
export function debugTree(tree: Tree): DebugTree {
export function debugTree(tree: { cursor: () => TreeCursor }): DebugTree {
const cursor = tree.cursor()
let current: DebugTree[] = []
const stack: DebugTree[][] = []
cursor.iterate(
(node) => {
node => {
const children: DebugTree[] = []
current.push({
name: node.name,
@ -246,3 +441,25 @@ function isAtxHeading(line: Line) {
function isSpace(ch: number) {
return ch == 32 || ch == 9 || ch == 10 || ch == 13
}
const ensoMarkdownLanguageExtension = {
parseBlock: [headerParser, bulletList, orderedList, blockquoteParser, disableSetextHeading],
parseInline: [linkParser, imageParser, linkEndParser],
defineNodes: [blockquoteNode],
}
/**
* Lezer (CodeMirror) parser for the Enso documentation Markdown dialect.
* Differences from CodeMirror's base Markdown language:
* - It defines the flavor of Markdown supported in Enso documentation. Currently, this is mostly CommonMark except we
* don't support setext headings. Planned features include support for some GFM extensions.
* - Many of the parsers differ from the `@lezer/markdown` parsers in their treatment of whitespace, in order to support
* a rendering mode where markup (and some associated spacing) is hidden.
*/
export const markdownParser: MarkdownParser = baseParser.configure([
GFM,
Subscript,
Superscript,
Emoji,
ensoMarkdownLanguageExtension,
])

View File

@ -32,4 +32,15 @@ declare module '@lezer/markdown' {
writeElements: (elts: readonly Element[], offset?: number) => Buffer
finish: (type: number, length: number) => Tree
}
export interface InlineDelimiter {
readonly type: DelimiterType
readonly from: number
readonly to: number
side: Mark
}
export interface InlineContext {
parts: (Element | InlineDelimiter | null)[]
}
}

View File

@ -534,3 +534,38 @@ export function setExternalIds(edit: MutableModule, spans: SpanMap, ids: IdMap):
}
return astsMatched
}
/**
* Determines the context of `ast`: module root, body block, statement, or expression; parses the given code in the same
* context.
*/
export function parseInSameContext(
module: MutableModule,
code: string,
ast: Ast,
): { root: Owned; spans: SpanMap; toRaw: Map<AstId, RawAst.Tree> } {
const rawParsed = rawParseInContext(code, getParseContext(ast))
return abstract(module, rawParsed, code)
}
type ParseContext = 'module' | 'block' | 'expression' | 'statement'
function getParseContext(ast: Ast): ParseContext {
const astModuleRoot = ast.module.root()
if (ast instanceof BodyBlock) return astModuleRoot && ast.is(astModuleRoot) ? 'module' : 'block'
return ast.isExpression() ? 'expression' : 'statement'
}
function rawParseInContext(code: string, context: ParseContext): RawAst.Tree {
if (context === 'module') return rawParseModule(code)
const block = rawParseBlock(code)
if (context === 'block') return block
const statement = iter.tryGetSoleValue(block.statements)?.expression
if (!statement) return block
if (context === 'statement') return statement
if (context === 'expression')
return statement.type === RawAst.Tree.Type.ExpressionStatement ?
statement.expression
: statement
return context satisfies never
}

View File

@ -1,30 +1,35 @@
import * as iter from 'enso-common/src/utilities/data/iter'
import * as map from 'lib0/map'
import { assert, assertDefined } from '../util/assert'
import type { SourceRangeEdit, SpanTree } from '../util/data/text'
import {
type SourceRangeEdit,
type SpanTree,
applyTextEdits,
applyTextEditsToSpans,
enclosingSpans,
textChangeToEdits,
trimEnd,
} from '../util/data/text'
import type { SourceRange, SourceRangeKey } from '../yjsModel'
import { rangeLength, sourceRangeFromKey, sourceRangeKey } from '../yjsModel'
import {
type SourceRange,
type SourceRangeKey,
rangeLength,
sourceRangeFromKey,
sourceRangeKey,
} from '../yjsModel'
import { xxHash128 } from './ffi'
import * as RawAst from './generated/ast'
import type { NodeKey, NodeSpanMap } from './idMap'
import { newExternalId } from './idMap'
import { type NodeKey, type NodeSpanMap, newExternalId } from './idMap'
import type { Module, MutableModule } from './mutableModule'
import { abstract, rawParseBlock, rawParseModule } from './parse'
import { parseInSameContext } from './parse'
import { printWithSpans } from './print'
import { isTokenId } from './token'
import type { AstId, MutableAst, Owned } from './tree'
import {
Assignment,
Ast,
type AstId,
MutableAssignment,
MutableBodyBlock,
type MutableAst,
type Owned,
rewriteRefs,
syncFields,
syncNodeMetadata,
@ -32,7 +37,6 @@ import {
/**
* Recursion helper for {@link syntaxHash}.
* @internal
*/
function hashSubtreeSyntax(ast: Ast, hashesOut: Map<SyntaxHash, Ast[]>): SyntaxHash {
let content = ''
@ -53,6 +57,7 @@ function hashSubtreeSyntax(ast: Ast, hashesOut: Map<SyntaxHash, Ast[]>): SyntaxH
declare const brandHash: unique symbol
/** See {@link syntaxHash}. */
type SyntaxHash = string & { [brandHash]: never }
/** Applies the syntax-data hashing function to the input, and brands the result as a `SyntaxHash`. */
function hashString(input: string): SyntaxHash {
return xxHash128(input) as SyntaxHash
@ -170,32 +175,18 @@ export function applyTextEditsToAst(
) {
const printed = printWithSpans(ast)
const code = applyTextEdits(printed.code, textEdits)
const astModuleRoot = ast.module.root()
const rawParsedBlock =
ast instanceof MutableBodyBlock && astModuleRoot && ast.is(astModuleRoot) ?
rawParseModule(code)
: rawParseBlock(code)
const rawParsedStatement =
ast instanceof MutableBodyBlock ? undefined : (
iter.tryGetSoleValue(rawParsedBlock.statements)?.expression
ast.module.transact(() => {
const parsed = parseInSameContext(ast.module, code, ast)
const toSync = calculateCorrespondence(
ast,
printed.info.nodes,
parsed.root,
parsed.spans.nodes,
textEdits,
code,
)
const rawParsedExpression =
ast.isExpression() ?
rawParsedStatement?.type === RawAst.Tree.Type.ExpressionStatement ?
rawParsedStatement.expression
: undefined
: undefined
const rawParsed = rawParsedExpression ?? rawParsedStatement ?? rawParsedBlock
const parsed = abstract(ast.module, rawParsed, code)
const toSync = calculateCorrespondence(
ast,
printed.info.nodes,
parsed.root,
parsed.spans.nodes,
textEdits,
code,
)
syncTree(ast, parsed.root, toSync, ast.module, metadataSource)
syncTree(ast, parsed.root, toSync, ast.module, metadataSource)
})
}
/** Replace `target` with `newContent`, reusing nodes according to the correspondence in `toSync`. */

View File

@ -565,8 +565,11 @@ export function syncFields(ast1: MutableAst, ast2: Ast, f: (id: AstId) => AstId
}
function syncYText(target: Y.Text, source: Y.Text) {
target.delete(0, target.length)
target.insert(0, source.toJSON())
const sourceString = source.toJSON()
if (target.toJSON() !== sourceString) {
target.delete(0, target.length)
target.insert(0, sourceString)
}
}
/** TODO: Add docs */

View File

@ -721,6 +721,7 @@ lazy val componentModulesPaths =
(`runtime-instrument-runtime-server` / Compile / exportedModuleBin).value,
(`runtime-language-arrow` / Compile / exportedModuleBin).value,
(`runtime-language-epb` / Compile / exportedModuleBin).value,
(`runtime-version-manager` / Compile / exportedModuleBin).value,
(`persistance` / Compile / exportedModuleBin).value,
(`cli` / Compile / exportedModuleBin).value,
(`json-rpc-server` / Compile / exportedModuleBin).value,
@ -1613,7 +1614,8 @@ lazy val `version-output` = (project in file("lib/scala/version-output"))
defaultDevEnsoVersion = defaultDevEnsoVersion,
ensoVersion = ensoVersion,
scalacVersion = scalacVersion,
graalVersion = graalVersion,
graalVersion = graalMavenPackagesVersion,
javaVersion = graalVersion,
currentEdition = currentEdition
)
}.taskValue
@ -3525,6 +3527,7 @@ lazy val `engine-runner` = project
(`pkg` / Compile / exportedModule).value,
(`engine-runner-common` / Compile / exportedModule).value,
(`runtime-parser` / Compile / exportedModule).value,
(`runtime-version-manager` / Compile / exportedModule).value,
(`version-output` / Compile / exportedModule).value,
(`engine-common` / Compile / exportedModule).value,
(`polyglot-api` / Compile / exportedModule).value,
@ -3700,6 +3703,7 @@ lazy val `engine-runner` = project
.dependsOn(`distribution-manager`)
.dependsOn(`edition-updater`)
.dependsOn(`runtime-parser`)
.dependsOn(`runtime-version-manager`)
.dependsOn(`logging-service`)
.dependsOn(`logging-service-logback` % Runtime)
.dependsOn(`engine-runner-common`)
@ -4337,15 +4341,34 @@ lazy val `connected-lock-manager-server` = project
lazy val `runtime-version-manager` = project
.in(file("lib/scala/runtime-version-manager"))
.enablePlugins(JPMSPlugin)
.configs(Test)
.settings(
frgaalJavaCompilerSetting,
scalaModuleDependencySetting,
mixedJavaScalaProjectSetting,
resolvers += Resolver.bintrayRepo("gn0s1s", "releases"),
libraryDependencies ++= Seq(
"com.typesafe.scala-logging" %% "scala-logging" % scalaLoggingVersion,
"org.apache.commons" % "commons-compress" % commonsCompressVersion,
"org.scalatest" %% "scalatest" % scalatestVersion % Test,
akkaHttp
"org.scalatest" %% "scalatest" % scalatestVersion % Test
),
Compile / moduleDependencies ++= Seq(
"org.apache.commons" % "commons-compress" % commonsCompressVersion,
"org.slf4j" % "slf4j-api" % slf4jVersion
),
Compile / internalModuleDependencies := Seq(
(`cli` / Compile / exportedModule).value,
(`distribution-manager` / Compile / exportedModule).value,
(`downloader` / Compile / exportedModule).value,
(`editions` / Compile / exportedModule).value,
(`edition-updater` / Compile / exportedModule).value,
(`logging-utils` / Compile / exportedModule).value,
(`pkg` / Compile / exportedModule).value,
(`semver` / Compile / exportedModule).value,
(`scala-libs-wrapper` / Compile / exportedModule).value,
(`scala-yaml` / Compile / exportedModule).value,
(`version-output` / Compile / exportedModule).value
)
)
.dependsOn(pkg)

View File

@ -546,13 +546,18 @@ impl JobArchetype for PackageIde {
} else {
shell(TEST_COMMAND)
};
let test_step = test_step
let mut test_step = test_step
.with_env("DEBUG", "pw:browser log:")
.with_secret_exposed_as(secret::ENSO_CLOUD_TEST_ACCOUNT_USERNAME, "ENSO_TEST_USER")
.with_secret_exposed_as(
secret::ENSO_CLOUD_TEST_ACCOUNT_PASSWORD,
"ENSO_TEST_USER_PASSWORD",
);
// Make E2E tests optional on Windows, as we have an ongoing issue with the runner.
// TODO[ib]: remove once the issue is resolved.
if target.0 == OS::Windows {
test_step.continue_on_error = Some(true);
}
steps.push(test_step);
// After the E2E tests run, they create a credentials file in user home directory.

View File

@ -664,6 +664,7 @@ pub async fn runner_sanity_test(
.bin
.join("enso")
.with_executable_extension();
let test_base = Command::new(&enso)
.args(["--run", repo_root.test.join("Base_Tests").as_str()])
.set_env(ENSO_DATA_DIRECTORY, engine_package)?
@ -686,7 +687,25 @@ pub async fn runner_sanity_test(
.run_ok()
.await;
test_base.and(test_internal_base).and(test_geo)
let all_cmds = test_base.and(test_internal_base).and(test_geo);
// The following test does not actually run anything, it just checks if the engine
// can accept `--jvm` argument and evaluates something.
if TARGET_OS != OS::Windows {
let test_jvm_arg = Command::new(&enso)
.args([
"--jvm",
"--run",
repo_root.test.join("Base_Tests").as_str(),
"__NON_EXISTING_TEST__",
])
.set_env(ENSO_DATA_DIRECTORY, engine_package)?
.run_ok()
.await;
all_cmds.and(test_jvm_arg)
} else {
all_cmds
}
} else {
Ok(())
}

View File

@ -36,21 +36,6 @@ The license file can be found at `licenses/APACHE2.0`.
Copyright notices related to this dependency can be found in the directory `com.typesafe.akka.akka-actor_2.13-2.6.20`.
'akka-http-core_2.13', licensed under the Apache-2.0, is distributed with the launcher.
The license file can be found at `licenses/APACHE2.0`.
Copyright notices related to this dependency can be found in the directory `com.typesafe.akka.akka-http-core_2.13-10.2.10`.
'akka-http_2.13', licensed under the Apache-2.0, is distributed with the launcher.
The license file can be found at `licenses/APACHE2.0`.
Copyright notices related to this dependency can be found in the directory `com.typesafe.akka.akka-http_2.13-10.2.10`.
'akka-parsing_2.13', licensed under the Apache-2.0, is distributed with the launcher.
The license file can be found at `licenses/APACHE2.0`.
Copyright notices related to this dependency can be found in the directory `com.typesafe.akka.akka-parsing_2.13-10.2.10`.
'akka-slf4j_2.13', licensed under the Apache-2.0, is distributed with the launcher.
The license file can be found at `licenses/APACHE2.0`.
Copyright notices related to this dependency can be found in the directory `com.typesafe.akka.akka-slf4j_2.13-2.6.20`.

View File

@ -1,11 +0,0 @@
Copyright (C) 2008-2017 Bjoern Hoehrmann <bjoern@hoehrmann.de>
Copyright (C) 2009-2017 Mathias Doenitz, Alexander Myltsev
Copyright (C) 2009-2022 Lightbend Inc. <https://www.lightbend.com>
Copyright 2011 Mark Harrah, Eugene Yokota
Copyright 2014 Twitter, Inc.
Copyright 2015 Heiko Seeberger

View File

@ -1 +0,0 @@
Copyright (C) 2009-2020 Lightbend Inc. <http://www.lightbend.com>

View File

@ -1,7 +0,0 @@
Copyright (C) 2009-2017 Mathias Doenitz, Alexander Myltsev
Copyright (C) 2009-2022 Lightbend Inc. <https://www.lightbend.com>
Copyright (c) 2004, Mikael Grev, MiG InfoCom AB. (base64 @ miginfocom . com)
Copyright (c) 2011-13 Miles Sabin

View File

@ -95,6 +95,7 @@ type AWS
Arguments:
- uri: The URI to resolve.
@uri (Text_Input display=..Always)
resolve_region_and_service : URI -> AWS_Region_Service
resolve_region_and_service (uri:URI=(Missing_Argument.throw "uri")) =
region_regex = regex "^(([a-z]{2}-[^.]+?-\d+)|(global))$"

View File

@ -115,9 +115,10 @@ type AWS_Credential
to_display_text self -> Text = self.to_text.to_display_text
## PRIVATE
default_widget (display : Display = ..When_Modified) -> Widget =
default_widget (add_user_password:Boolean=False) (display : Display = ..When_Modified) -> Widget =
default = Option "Default" "..Default"
profile = Option "Profile" "..Profile" [["profile", make_single_choice AWS_Credential.profile_names]]
key = Option "Key" "..Key" [["access_key_id", make_text_secret_selector], ["secret_access_key", make_text_secret_selector]]
user_password = if add_user_password then [Option "Username_And_Password" "..Username_And_Password"] else []
with_config = Option "With_Configuration" "..With_Configuration"
Widget.Single_Choice values=[default, profile, key, with_config] display=display
Widget.Single_Choice values=[default, profile, key, with_config]+user_password display=display

View File

@ -161,6 +161,9 @@ type Redshift_Dialect
prepare_fetch_types_query : SQL_Expression -> Context -> SQL_Statement
prepare_fetch_types_query self expression context =
Base_Generator.default_fetch_types_query self expression context
## PRIVATE
generate_collate self collation_name:Text -> Text = Base_Generator.default_generate_collate collation_name
## PRIVATE
check_aggregate_support : Aggregate_Column -> Boolean ! Unsupported_Database_Operation
@ -194,6 +197,26 @@ type Redshift_Dialect
Dialect_Flag.Supports_Case_Sensitive_Columns -> True
Dialect_Flag.Supports_Infinity -> True
Dialect_Flag.Case_Sensitive_Text_Comparison -> True
Dialect_Flag.Supports_Sort_Digits_As_Numbers -> False
Dialect_Flag.Case_Insensitive_Ordering -> False
Dialect_Flag.Order_By_Unicode_Normalization_By_Default -> True
Dialect_Flag.Allows_Mixed_Type_Comparisons -> False
Dialect_Flag.Supports_Unicode_Normalization -> False
Dialect_Flag.NaN_Non_Comparable -> True
Dialect_Flag.Distinct_Returns_First_Row_From_Group_If_Ordered -> True
Dialect_Flag.Date_Time -> True
Dialect_Flag.Text_Length_Limited_Columns -> False
Dialect_Flag.Fixed_Length_Text_Columns -> False
Dialect_Flag.Length_Restricted_Text_Columns -> True
Dialect_Flag.Removes_Trailing_Whitespace_Casting_From_Char_To_Varchar -> False
Dialect_Flag.Char_Max_Size_After_Substring_Kept -> True
Dialect_Flag.Different_Size_Integer_Types -> True
Dialect_Flag.Supports_8bit_Integer -> False
Dialect_Flag.Supports_Decimal_Type -> True
Dialect_Flag.Supports_Time_Duration -> False
Dialect_Flag.Supports_Nanoseconds_In_Time -> False
Dialect_Flag.Supports_Mixed_Columns -> False
Dialect_Flag.Supports_Date_Time_Without_Timezone -> False
## PRIVATE
The default table types to use when listing tables.

View File

@ -31,7 +31,15 @@ type Redshift_Details
- use_ssl: Whether to use SSL (defaults to `SSL_Mode.Require`).
- client_cert: The client certificate to use or `Nothing` if not needed.
@host (Text_Input display=..Always)
Redshift (host:Text=(Missing_Argument.throw "host")) (port:Integer=5439) (schema:Text='') (db_user:Text='') (credentials:Credentials|AWS_Credential=AWS_Credential.Profile) (use_ssl:SSL_Mode=..Require) (client_cert:Client_Certificate|Nothing=Nothing)
@credentials AWS_Credential.default_widget add_user_password=True
Redshift
host:Text=(Missing_Argument.throw "host")
port:Integer=5439
schema:Text=''
db_user:Text=''
credentials:Credentials|AWS_Credential=..Profile
use_ssl:SSL_Mode=..Require
client_cert:Client_Certificate|Nothing=Nothing
## PRIVATE
Attempt to resolve the constructor.

View File

@ -1,5 +1,6 @@
from Standard.Base import all
import Standard.Base.Errors.Common.Missing_Argument
import Standard.Base.Metadata.Widget.Text_Input
import Standard.Base.Network.HTTP.Response_Body.Response_Body
import Standard.Base.System.File_Format_Metadata.File_Format_Metadata
import Standard.Base.System.Input_Stream.Input_Stream
@ -71,6 +72,7 @@ list_buckets credentials:AWS_Credential=..Default = handle_s3_errors <|
- If the bucket does not exist, an `S3_Bucket_Not_Found` error is thrown.
- If more items are available than the `max_count` parameter, a
`More_Records_Available` warning is attached to the result.
@bucket (Text_Input display=..Always)
@credentials AWS_Credential.default_widget
list_objects : Text -> Text -> AWS_Credential -> Integer -> Vector S3_File ! S3_Error
list_objects (bucket : Text = Missing_Argument.throw "bucket") prefix:Text="" credentials:AWS_Credential=..Default max_count:Integer=1000 =
@ -126,6 +128,7 @@ get_object bucket key credentials:AWS_Credential=AWS_Credential.Default delimite
`AWS_SDK_Error` will be raised.
- If the bucket does not exist, an `S3_Bucket_Not_Found` error is thrown.
- If the object does not exist, an `S3_Key_Not_Found` error is thrown.
@bucket (Text_Input display=..Always)
@credentials AWS_Credential.default_widget
head : Text -> Text -> AWS_Credential -> Dictionary Text Any ! S3_Error
head (bucket : Text = Missing_Argument.throw "bucket") key:Text="" credentials:AWS_Credential=..Default =

View File

@ -82,7 +82,7 @@ from project.System.File_Format import Auto_Detect, File_Format
import Standard.Examples
example_xls_to_table = Data.read Examples.xls (..Sheet 'Dates')
@path Text_Input
@path (Text_Input display=..Always)
@format File_Format.default_widget
read : Text | URI | File -> File_Format -> Problem_Behavior -> Any ! File_Error
read path=(Missing_Argument.throw "path") format=Auto_Detect (on_problems : Problem_Behavior = ..Report_Warning) = case path of
@ -138,7 +138,7 @@ read path=(Missing_Argument.throw "path") format=Auto_Detect (on_problems : Prob
files = Data.list name_filter="*.csv"
example_csv_dir_to_table = Data.read_many files
@paths (Vector_Editor item_editor=Text_Input item_default='""')
@paths (Vector_Editor item_editor=Text_Input item_default='""' display=..Always)
@format File_Format.default_widget
read_many : Many_Files_List -> File_Format -> Return_As -> Problem_Behavior -> Any ! File_Error
read_many (paths : Many_Files_List = Missing_Argument.throw "paths") format=Auto_Detect return=..Vector (on_problems : Problem_Behavior = ..Report_Warning) =
@ -170,7 +170,7 @@ read_many (paths : Many_Files_List = Missing_Argument.throw "paths") format=Auto
import Standard.Examples
example_read = Data.read_text Examples.csv_path
@path Text_Input
@path (Text_Input display=..Always)
@encoding Encoding.default_widget
read_text : (Text | File) -> Encoding -> Problem_Behavior -> Text
read_text path=(Missing_Argument.throw "path") (encoding : Encoding = Encoding.default) (on_problems : Problem_Behavior = ..Report_Warning) =
@ -228,7 +228,7 @@ read_text path=(Missing_Argument.throw "path") (encoding : Encoding = Encoding.d
example_list_files =
Data.list Examples.data_dir name_filter="**.md" recursive=True
@directory Folder_Browse
@directory (Folder_Browse display=..Always)
@name_filter File_Format.name_filter_widget
list : Text | File -> Text -> Boolean -> Vector File
list (directory:(Text | File)=enso_project.root) (name_filter:Text="") recursive:Boolean=False =

View File

@ -18,7 +18,7 @@ import project.Runtime.Ref.Ref
from project.Data.Boolean import Boolean, False, True
from project.Data.Range.Extensions import all
from project.Errors.Common import Unsupported_Argument_Types
from project.Metadata import Choice, Widget
from project.Metadata import Choice, Display, Widget
polyglot java import java.lang.NullPointerException
polyglot java import org.enso.base.CompareException
@ -133,11 +133,10 @@ type Statistic
## PRIVATE
Bulk widget for Statistic.
bulk_widget : Widget
bulk_widget =
bulk_widget display:Display=..When_Modified -> Widget =
options = ["Count", "Minimum", "Maximum", "Sum", "Product", "Mean", "Variance", "Standard_Deviation", "Skew", "Kurtosis", "Covariance", "Pearson", "Spearman", "R_Squared"].map n-> Choice.Option n ".."+n
items = Widget.Single_Choice options
Widget.Vector_Editor items "..Count"
Widget.Vector_Editor items "..Count" display=display
## PRIVATE
Gets the order needed to compute a statistic for a moment based statistic.

View File

@ -18,7 +18,7 @@ Vector.compute self statistic:Statistic=..Count =
Arguments:
- statistics: Set of statistics to calculate.
@statistics Statistic.bulk_widget
@statistics (Statistic.bulk_widget display=..Always)
Vector.compute_bulk : Vector Statistic -> Vector Any
Vector.compute_bulk self statistics:Vector=[..Count, ..Sum] =
Statistic.compute_bulk self statistics
@ -41,7 +41,7 @@ Vector.running self statistic:Statistic=..Count =
Arguments:
- statistics: Set of statistics to calculate.
@statistics Statistic.bulk_widget
@statistics (Statistic.bulk_widget display=..Always)
Vector.running_bulk : Vector Statistic -> Vector Any
Vector.running_bulk self (statistics:(Vector Statistic)=[..Count, ..Sum]) =
Statistic.running_bulk self statistics

View File

@ -1,4 +1,5 @@
from Standard.Base import all
import Standard.Base.Errors.Common.Missing_Argument
import Standard.Base.Metadata.Display
import Standard.Base.Metadata.Widget
from Standard.Base.Metadata.Choice import Option
@ -7,9 +8,11 @@ from Standard.Base.Widget_Helpers import make_text_secret_selector
type Credentials
## Simple username and password type.
@username make_text_secret_selector
@password make_text_secret_selector
Username_And_Password username:(Text|Enso_Secret) password:(Text|Enso_Secret)
@username (make_text_secret_selector display=..Always)
@password (make_text_secret_selector display=..Always)
Username_And_Password
username:(Text|Enso_Secret)=(Missing_Argument.throw "username")
password:(Text|Enso_Secret)=(Missing_Argument.throw "password")
## PRIVATE
Override `to_text` to mask the password field.

View File

@ -171,6 +171,12 @@ type Dialect
_ = [expression, context]
Unimplemented.throw "This is an interface only."
## PRIVATE
generate_collate self collation_name:Text -> Text =
_ = collation_name
Unimplemented.throw "This is an interface only."
## PRIVATE
Checks if the given aggregate is supported.

View File

@ -29,3 +29,87 @@ type Dialect_Flag
## PRIVATE
Specifies text comparisons are case sensitive by default.
Case_Sensitive_Text_Comparison
## PRIVATE
Specifies if the backend supports natural ordering operations.
Supports_Sort_Digits_As_Numbers
## PRIAVTE
Specifies if the backend supports case insensitive ordering.
Case_Insensitive_Ordering
## PRIVATE
Specifies if the backend supports unicode normalization in its default ordering.
Order_By_Unicode_Normalization_By_Default
## PRIVATE
Specifies if mixed operations comparing
mixed types are allowed by a given backend. Some backends will allow
such comparisons, when mixed type storage is allowed or by coercing to
the target type; others will fail with a type error.
Allows_Mixed_Type_Comparisons
## PRIVATE
Specifies if the backend compares
strings taking Unicode Normalization into account, i.e. whether
's\u0301' is considered equal to 'ś'.
Supports_Unicode_Normalization
## PRIVATE
Specifies if NaN value is
treated as greater than all numbers. If `False`, `NaN` is expected to
yield False to both < and > comparisons.
NaN_Non_Comparable
## PRIAVTE
If `order_by` was
applied before, the distinct operation will return the first row from
each group. Guaranteed in the in-memory backend, but may not be
supported by all databases.
Distinct_Returns_First_Row_From_Group_If_Ordered
## PRIVATE
Specifies if the backend supports date/time operations.
Date_Time
## PRIVATE
Specifies if the backend supports setting
a length limit on text columns.
Text_Length_Limited_Columns
## PRIVATE
Specifies if the backend supports fixed
length text columns.
Fixed_Length_Text_Columns
## PRIVATE
Specifies if the backend supports
length restrictions for text columns.
Length_Restricted_Text_Columns
## PRIVATE
if SELECT concat('X', CAST(CAST(' ' AS CHAR(3)) AS VARCHAR(3)), 'X')
returns XX then this should be set to True
Removes_Trailing_Whitespace_Casting_From_Char_To_Varchar
## PRIVATE
Specifies how the max size of the char
type behaves after text_left/text_right.
If True the orginal size is kept, if False the size is reset.
Char_Max_Size_After_Substring_Kept
## PRIVATE
Specifies if the backend supports
integer types of various sizes, like 16-bit or 32-bit integers.
Different_Size_Integer_Types
## PRIVATE
Specifies if the backend supports 8-bit
integers.
Supports_8bit_Integer
## PRIVATE
Specifies if the backend supports the `Decimal`
high-precision type.
Supports_Decimal_Type
## PRIVATE
Specifies if the backend supports a
`Duration`/`Period` type.
Supports_Time_Duration
## PRIVATE
Specifies if the backend supports
nanosecond precision in time values.
Supports_Nanoseconds_In_Time
## PRIVATE
Specifies if the backend supports mixed-type
columns.
Supports_Mixed_Columns
## PRIVATE
Specifies if the backend supports
date/time operations without a timezone (true for most Database backends).
Defaults to `.is_integer`.
Supports_Date_Time_Without_Timezone

View File

@ -166,7 +166,7 @@ type SQL_Generator
Nulls_Order.Last -> " NULLS LAST"
collation = case order_descriptor.collation of
Nothing -> ""
collation_name -> ' COLLATE "' + collation_name + '"'
collation_name -> dialect.generate_collate collation_name
base_expression = self.generate_expression dialect order_descriptor.expression
base_expression ++ collation ++ order_suffix ++ nulls_suffix
@ -725,6 +725,9 @@ default_fetch_types_query dialect expression context where_filter_always_false_l
empty_context = context.add_where_filters [SQL_Expression.Literal where_filter_always_false_literal]
dialect.generate_sql (Query.Select [["typed_column", expression]] empty_context)
## PRIVATE
default_generate_collate collation_name:Text -> Text = ' COLLATE "' + collation_name + '"'
## PRIVATE
Helper class for shortening the binder names generated for WITH clauses.

View File

@ -236,6 +236,9 @@ type Postgres_Dialect
prepare_fetch_types_query self expression context =
Base_Generator.default_fetch_types_query self expression context
## PRIVATE
generate_collate self collation_name:Text -> Text = Base_Generator.default_generate_collate collation_name
## PRIVATE
check_aggregate_support : Aggregate_Column -> Boolean ! Unsupported_Database_Operation
check_aggregate_support self aggregate =
@ -266,6 +269,26 @@ type Postgres_Dialect
Dialect_Flag.Supports_Case_Sensitive_Columns -> True
Dialect_Flag.Supports_Infinity -> True
Dialect_Flag.Case_Sensitive_Text_Comparison -> True
Dialect_Flag.Supports_Sort_Digits_As_Numbers -> False
Dialect_Flag.Case_Insensitive_Ordering -> True
Dialect_Flag.Order_By_Unicode_Normalization_By_Default -> True
Dialect_Flag.Allows_Mixed_Type_Comparisons -> False
Dialect_Flag.Supports_Unicode_Normalization -> False
Dialect_Flag.NaN_Non_Comparable -> False
Dialect_Flag.Distinct_Returns_First_Row_From_Group_If_Ordered -> True
Dialect_Flag.Date_Time -> True
Dialect_Flag.Text_Length_Limited_Columns -> True
Dialect_Flag.Fixed_Length_Text_Columns -> True
Dialect_Flag.Length_Restricted_Text_Columns -> True
Dialect_Flag.Removes_Trailing_Whitespace_Casting_From_Char_To_Varchar -> True
Dialect_Flag.Char_Max_Size_After_Substring_Kept -> False
Dialect_Flag.Different_Size_Integer_Types -> True
Dialect_Flag.Supports_8bit_Integer -> False
Dialect_Flag.Supports_Decimal_Type -> True
Dialect_Flag.Supports_Time_Duration -> False
Dialect_Flag.Supports_Nanoseconds_In_Time -> False
Dialect_Flag.Supports_Mixed_Columns -> False
Dialect_Flag.Supports_Date_Time_Without_Timezone -> True
## PRIVATE
The default table types to use when listing tables.
@ -555,7 +578,7 @@ make_order_descriptor internal_column sort_direction text_ordering =
Order_Descriptor.Value internal_column.expression sort_direction nulls_order=nulls collation=Nothing
_ ->
## In the future we can modify this error to suggest using a custom defined collation.
if text_ordering.sort_digits_as_numbers then Error.throw (Unsupported_Database_Operation.Error "Natural ordering") else
if text_ordering.sort_digits_as_numbers then Error.throw (Unsupported_Database_Operation.Error "sort_digits_as_numbers") else
case text_ordering.case_sensitivity of
Case_Sensitivity.Default ->
Order_Descriptor.Value internal_column.expression sort_direction nulls_order=nulls collation=Nothing

View File

@ -117,7 +117,7 @@ type SQLite_Dialect
Nothing ->
Order_Descriptor.Value internal_column.expression sort_direction collation=Nothing
_ ->
if text_ordering.sort_digits_as_numbers then Error.throw (Unsupported_Database_Operation.Error "Natural ordering") else
if text_ordering.sort_digits_as_numbers then Error.throw (Unsupported_Database_Operation.Error "sort_digits_as_numbers") else
case text_ordering.case_sensitivity of
Case_Sensitivity.Default ->
Order_Descriptor.Value internal_column.expression sort_direction collation=Nothing
@ -221,6 +221,9 @@ type SQLite_Dialect
_ = [expression, context]
Panic.throw (Illegal_State.Error "Type inference by asking the Database for the expected types is not supported in SQLite since it tended to give wrong results. This should have never been called - if it was - that is a bug in the Database library.")
## PRIVATE
generate_collate self collation_name:Text -> Text = Base_Generator.default_generate_collate collation_name
## PRIVATE
check_aggregate_support : Aggregate_Column -> Boolean ! Unsupported_Database_Operation
check_aggregate_support self aggregate = case aggregate of
@ -277,6 +280,26 @@ type SQLite_Dialect
Dialect_Flag.Supports_Case_Sensitive_Columns -> False
Dialect_Flag.Supports_Infinity -> True
Dialect_Flag.Case_Sensitive_Text_Comparison -> True
Dialect_Flag.Supports_Sort_Digits_As_Numbers -> False
Dialect_Flag.Case_Insensitive_Ordering -> False
Dialect_Flag.Order_By_Unicode_Normalization_By_Default -> False
Dialect_Flag.Allows_Mixed_Type_Comparisons -> True
Dialect_Flag.Supports_Unicode_Normalization -> False
Dialect_Flag.NaN_Non_Comparable -> True
Dialect_Flag.Distinct_Returns_First_Row_From_Group_If_Ordered -> True
Dialect_Flag.Date_Time -> False
Dialect_Flag.Text_Length_Limited_Columns -> False
Dialect_Flag.Fixed_Length_Text_Columns -> False
Dialect_Flag.Length_Restricted_Text_Columns -> False
Dialect_Flag.Removes_Trailing_Whitespace_Casting_From_Char_To_Varchar -> False
Dialect_Flag.Char_Max_Size_After_Substring_Kept -> False
Dialect_Flag.Different_Size_Integer_Types -> False
Dialect_Flag.Supports_8bit_Integer -> False
Dialect_Flag.Supports_Decimal_Type -> False
Dialect_Flag.Supports_Time_Duration -> False
Dialect_Flag.Supports_Nanoseconds_In_Time -> False
Dialect_Flag.Supports_Mixed_Columns -> False
Dialect_Flag.Supports_Date_Time_Without_Timezone -> False
## PRIVATE
The default table types to use when listing tables.

View File

@ -1,7 +1,7 @@
from Standard.Base import all
import Standard.Base.Data.Array_Proxy.Array_Proxy
import Standard.Base.Errors.Common.Missing_Argument
from Standard.Base.Metadata import make_single_choice, Widget
from Standard.Base.Metadata import Display, make_single_choice, Widget
from Standard.Base.Metadata.Choice import Option
from Standard.Base.Metadata.Widget import Single_Choice, Text_Input, Vector_Editor
@ -34,14 +34,14 @@ type Google_Analytics
- credentials: The Google credentials to use. Default is to use the
Google Application Default Credentials in the environment variables.
See https://cloud.google.com/docs/authentication/application-default-credentials
@credentials (Google_Credential.default_widget display=..Always)
@property Google_Analytics_Property.default_widget
@dimensions _make_dimensions_vector_selector
@metrics _make_metrics_vector_selector
@start_date _make_start_date_widget
@end_date (Date.default_widget include_today=True)
@credentials Google_Credential.default_widget
read : Google_Analytics_Property -> (Vector Text) -> (Vector Text) -> Date -> Date -> Google_Credential -> Table
read property:Google_Analytics_Property=(Missing_Argument.throw "property") dimensions:Vector=['country'] metrics:Vector=['activeUsers'] start_date:Date=(Date.today.previous ..Year) end_date:Date=Date.today credentials:Google_Credential=..Default -> Table =
read : Google_Credential -> Google_Analytics_Property -> (Vector Text) -> (Vector Text) -> Date -> Date -> Table
read credentials:Google_Credential=(Missing_Argument.throw "credentials") property:Google_Analytics_Property=(Missing_Argument.throw "property") dimensions:Vector=['country'] metrics:Vector=['activeUsers'] start_date:Date=(Date.today.previous ..Year) end_date:Date=Date.today -> Table =
case credentials of
Google_Credential.Sample -> _read_sample_data dimensions metrics start_date end_date
_ ->
@ -60,7 +60,7 @@ type Google_Analytics
See https://cloud.google.com/docs/authentication/application-default-credentials
- limit: The maximum number of accounts to read. Default is 1000.
- include_deleted: Whether to include deleted accounts. Default is false.
@credentials Google_Credential.default_widget
@credentials (Google_Credential.default_widget display=..Always)
@limit Rows_To_Read.default_widget
list_accounts : Google_Credential -> Rows_To_Read -> Boolean -> Vector
list_accounts credentials:Google_Credential=..Default (limit : Rows_To_Read = ..First_With_Warning 1000) include_deleted:Boolean=False -> Vector =
@ -82,11 +82,11 @@ type Google_Analytics
See https://cloud.google.com/docs/authentication/application-default-credentials
- limit: The maximum number of accounts to read. Default is 1000.
- include_deleted: Whether to include deleted accounts. Default is false.
@account Google_Analytics_Account_Filter.default_widget
@credentials Google_Credential.default_widget
@account Google_Analytics_Account_Filter.default_widget
@limit Rows_To_Read.default_widget
list_properties : Google_Analytics_Account_Filter -> Google_Credential -> Rows_To_Read -> Boolean -> Vector
list_properties account:Google_Analytics_Account_Filter=..All_Accounts credentials:Google_Credential=..Default (limit : Rows_To_Read = ..First_With_Warning 1000) include_deleted:Boolean=False -> Vector =
list_properties : Google_Credential -> Google_Analytics_Account_Filter -> Rows_To_Read -> Boolean -> Vector
list_properties credentials:Google_Credential=..Default account:Google_Analytics_Account_Filter=..All_Accounts (limit : Rows_To_Read = ..First_With_Warning 1000) include_deleted:Boolean=False -> Vector =
java_credentials = credentials.as_java
to_read = limit.rows_to_read.if_nothing 0
filter = account.as_java
@ -103,10 +103,10 @@ type Google_Analytics
- credentials: The Google credentials to use. Default is to use the
Google Application Default Credentials in the environment variables.
See https://cloud.google.com/docs/authentication/application-default-credentials
@credentials (Google_Credential.default_widget display=..Always)
@property Google_Analytics_Property.default_widget
@credentials Google_Credential.default_widget
list_metrics : Google_Analytics_Property -> Google_Credential -> Vector
list_metrics property:Google_Analytics_Property=(Missing_Argument.throw "property") credentials:Google_Credential=..Default -> Vector =
list_metrics : Google_Credential -> Google_Analytics_Property -> Vector
list_metrics credentials:Google_Credential=(Missing_Argument.throw "credentials") property:Google_Analytics_Property=(Missing_Argument.throw "property") -> Vector =
java_credentials = credentials.as_java
array = _handle_google_error <| GoogleAnalyticsReader.listMetrics java_credentials property.java_record
array.if_not_error <| array.map record-> Google_Analytics_Field.Metric record
@ -119,35 +119,36 @@ type Google_Analytics
- credentials: The Google credentials to use. Default is to use the
Google Application Default Credentials in the environment variables.
See https://cloud.google.com/docs/authentication/application-default-credentials
@credentials (Google_Credential.default_widget display=..Always)
@property Google_Analytics_Property.default_widget
@credentials Google_Credential.default_widget
list_dimensions : Google_Analytics_Property -> Google_Credential -> Vector
list_dimensions property:Google_Analytics_Property=(Missing_Argument.throw "property") credentials:Google_Credential=..Default -> Vector =
list_dimensions : Google_Credential -> Google_Analytics_Property -> Vector
list_dimensions credentials:Google_Credential=(Missing_Argument.throw "credentials") property:Google_Analytics_Property=(Missing_Argument.throw "property") -> Vector =
java_credentials = credentials.as_java
array = _handle_google_error <| GoogleAnalyticsReader.listDimensions java_credentials property.java_record
array.if_not_error <| array.map record-> Google_Analytics_Field.Dimension record
## PRIVATE
private _make_metrics_vector_selector self_arg cache -> Widget =
_ = self_arg
items = Panic.catch Any handler=p->[p.payload.to_display_text] <|
property = cache.if_not_nothing <| cache "property"
property.if_not_nothing <|
credentials = cache.if_not_nothing <| cache "credentials"
Google_Analytics.list_metrics property credentials . map f-> Option (f.apiName + " (" + f.category + ")") f.apiName.pretty
item_editor = make_single_choice (items.if_nothing ['activeUsers', 'bounceRate', 'conversions', 'newUsers', 'sessionsPerUser', 'userConversionRate'])
Vector_Editor item_editor=item_editor item_default=item_editor.values.first.value display=..Always
## PRIVATE
private _make_dimensions_vector_selector self_arg cache -> Widget =
private _make_metrics_vector_selector self_arg cache display:Display=..Always -> Widget =
_ = self_arg
items = Panic.catch Any handler=p->[p.payload.to_display_text] <|
property = cache.if_not_nothing <| cache "property"
property.if_not_nothing <|
credentials = cache.if_not_nothing <| cache "credentials"
Google_Analytics.list_dimensions property credentials . map f-> Option (f.api_name + " (" + f.category + ")") f.api_name.pretty
Google_Analytics.list_metrics (credentials.if_nothing Google_Credential.Default) property . map f-> Option (f.api_name + " (" + f.category + ")") f.api_name.pretty
item_editor = make_single_choice (items.if_nothing ['activeUsers', 'bounceRate', 'conversions', 'newUsers', 'sessionsPerUser', 'userConversionRate'])
Widget.Vector_Editor item_editor=item_editor item_default=item_editor.values.first.value display=display
## PRIVATE
private _make_dimensions_vector_selector self_arg cache display:Display=..Always -> Widget =
_ = self_arg
items = Panic.catch Any handler=p->[p.payload.to_display_text] <|
property = cache.if_not_nothing <| cache "property"
property.if_not_nothing <|
credentials = cache.if_not_nothing <| cache "credentials"
Google_Analytics.list_dimensions (credentials.if_nothing Google_Credential.Default) property . map f-> Option (f.api_name + " (" + f.category + ")") f.api_name.pretty
item_editor = make_single_choice (items.if_nothing ['country', 'year', 'month', 'date', 'userAgeBracket', 'userGender'])
Vector_Editor item_editor=item_editor item_default=item_editor.values.first.value display=..Always
Vector_Editor item_editor=item_editor item_default=item_editor.values.first.value display=display
## PRIVATE
private _make_start_date_widget -> Widget =

View File

@ -42,7 +42,8 @@ type Google_Analytics_Account
to_display_text : Text
to_display_text self = "GA Account {" + self.name + " (" + self.id + ")}"
## ICON data_input
## GROUP Standard.Base.Metadata
ICON data_input
List of all properties of the account.
Arguments:
@ -53,8 +54,8 @@ type Google_Analytics_Account
@credentials Google_Credential.default_widget
@limit Rows_To_Read.default_widget
properties : Google_Credential -> Rows_To_Read -> Boolean -> Vector
properties self credential:Google_Credential=..Default (limit : Rows_To_Read = ..First_With_Warning 1000) include_deleted:Boolean=False =
Google_Analytics.list_properties self credential limit include_deleted
properties self credentials:Google_Credential=..Default (limit : Rows_To_Read = ..First_With_Warning 1000) include_deleted:Boolean=False =
Google_Analytics.list_properties credentials self limit include_deleted
## Filter for Google Analytics accounts.
type Google_Analytics_Account_Filter

Some files were not shown because too many files have changed in this diff Show More