1
1
mirror of https://github.com/tweag/nickel.git synced 2024-10-06 08:07:37 +03:00

Type-based completion in the new completer (#1577)

* Collect types into a table

* Type-based completion

This also adds a feature flag for turning off the old completer.

* Remove bogus file

* Explicitly visit type annotations

* Add a test for annotated types

* Only do one linearization traversal

* Fix docs, and avoid some clones

* Combine, and resolve metadata

* Add a test

* Slight improvements to the lsp spec

* Refactor

* Remove commented-out code

* Remove the confusing initial-env thing

* Complete the refactor, and document a little better

* Fix some rebase issues

* Clean up some unused code

* Remove obsolete doc

* Review comments
This commit is contained in:
jneem 2023-09-15 14:19:45 -05:00 committed by GitHub
parent 508c492bfd
commit b2459d3172
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 496 additions and 395 deletions

View File

@ -2,6 +2,7 @@
use once_cell::sync::Lazy;
use serde::{Deserialize, Serialize};
use std::{
borrow::Borrow,
fmt::{self, Debug},
hash::Hash,
};
@ -173,6 +174,12 @@ impl Hash for LocIdent {
}
}
impl Borrow<Ident> for LocIdent {
fn borrow(&self) -> &Ident {
&self.ident
}
}
impl fmt::Display for LocIdent {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.label())

View File

@ -16,59 +16,17 @@
//! into a temporary [Linearizer::Building] structure and linearize them into a
//! [Linearizer::Completed] Linearization.
//! Additionally handles registration in different scopes.
//! - [Linearization]: Linearization in a given state.
//! The state holds context while building or the finalized linearization
//! - [StubHost]: The purpose of this is to do nothing. It serves as an implementation used
//! outside the LSP context meaning to cause as little runtime impact as possible.
use std::marker::PhantomData;
use std::ops::{Deref, DerefMut};
use super::UnifType;
use crate::term::RichTerm;
use crate::{identifier::LocIdent, term::record::Field};
/// Holds the state of a linearization, either in progress or finalized
/// Restricts the possible states of a linearization to entities marked
/// as [LinearizationState]
pub struct Linearization<S: LinearizationState> {
state: S,
}
impl<S: LinearizationState> Linearization<S> {
pub fn into_inner(self) -> S {
self.state
}
}
impl<S: LinearizationState> Deref for Linearization<S> {
type Target = S;
fn deref(&self) -> &Self::Target {
&self.state
}
}
impl<S: LinearizationState> DerefMut for Linearization<S> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.state
}
}
/// Constructors for different phases
impl Linearization<Uninit> {
pub fn new<S: LinearizationState>(state: S) -> Linearization<S> {
Linearization { state }
}
}
pub struct Uninit;
/// Marker trait for possible states of the linearization
pub trait LinearizationState {}
impl LinearizationState for () {}
impl LinearizationState for Uninit {}
/// The linearizer trait is what is referred to during typechecking.
/// It is the interface to recording terms (while tracking their scope)
/// and finalizing a linearization using generically defined external information
@ -77,8 +35,8 @@ impl LinearizationState for Uninit {}
/// `L`: The data type available during build
/// `S`: Type of external state passed into the linearization
pub trait Linearizer {
type Building: LinearizationState;
type Completed: LinearizationState + Default;
type Building;
type Completed: Default;
type ItemId: Copy;
type CompletionExtra;
@ -96,7 +54,7 @@ pub trait Linearizer {
/// location. In this case, the LSP currently ignores it.
fn add_term(
&mut self,
_lin: &mut Linearization<Self::Building>,
_lin: &mut Self::Building,
_term: &RichTerm,
_ty: UnifType,
) -> Option<Self::ItemId>;
@ -109,16 +67,10 @@ pub trait Linearizer {
///
/// In practice this mainly includes environment information. Providing this state is the
/// responsibility of [Linearizer::scope]
fn add_field_metadata(&mut self, _lin: &mut Linearization<Self::Building>, _field: &Field) {}
fn add_field_metadata(&mut self, _lin: &mut Self::Building, _field: &Field) {}
/// Allows to amend the type of an ident in scope
fn retype_ident(
&mut self,
_lin: &mut Linearization<Self::Building>,
_ident: &LocIdent,
_new_type: UnifType,
) {
}
fn retype_ident(&mut self, _lin: &mut Self::Building, _ident: &LocIdent, _new_type: UnifType) {}
/// Allows to amend the type of an item.
///
@ -126,7 +78,7 @@ pub trait Linearizer {
/// optional as well. If the item id is `None`, `retype` simply does nothing.
fn retype(
&mut self,
_lin: &mut Linearization<Self::Building>,
_lin: &mut Self::Building,
_item_id: Option<Self::ItemId>,
_new_type: UnifType,
) {
@ -135,17 +87,11 @@ pub trait Linearizer {
/// Defines how to turn a [Self::Building] Linearization of the tracked type into
/// a [Self::Completed] linearization.
/// By default creates an entirely empty [Self::Completed] object
fn complete(
self,
_lin: Linearization<Self::Building>,
_extra: Self::CompletionExtra,
) -> Linearization<Self::Completed>
fn complete(self, _lin: Self::Building, _extra: &Self::CompletionExtra) -> Self::Completed
where
Self: Sized,
{
Linearization {
state: Self::Completed::default(),
}
Self::Completed::default()
}
/// Ensures the scope structure of the source can be represented in the
@ -170,8 +116,8 @@ pub trait Linearizer {
pub struct StubHost<B = (), C = (), E = ()>(PhantomData<B>, PhantomData<C>, PhantomData<E>);
impl<B, C, E> Linearizer for StubHost<B, C, E>
where
B: LinearizationState + Default,
C: LinearizationState + Default,
B: Default,
C: Default,
{
type Building = B;
type Completed = C;
@ -180,7 +126,7 @@ where
fn add_term(
&mut self,
_lin: &mut Linearization<Self::Building>,
_lin: &mut Self::Building,
_term: &RichTerm,
_ty: UnifType,
) -> Option<()> {

View File

@ -77,7 +77,7 @@ use std::{
num::NonZeroU16,
};
use self::linearization::{Linearization, Linearizer, StubHost};
use self::linearization::{Linearizer, StubHost};
mod destructuring;
pub mod error;
@ -1257,14 +1257,8 @@ pub fn type_check(
initial_ctxt: Context,
resolver: &impl ImportResolver,
) -> Result<Wildcards, TypecheckError> {
type_check_linearize(
t,
initial_ctxt,
resolver,
StubHost::<(), (), _>::new(),
Linearization::new(()),
)
.map(|(wildcards, _)| wildcards)
type_check_linearize(t, initial_ctxt, resolver, StubHost::<(), (), _>::new(), ())
.map(|(wildcards, _)| wildcards)
}
/// Typecheck a term and build its linearization. A linearization is a sequential data structure
@ -1277,7 +1271,7 @@ pub fn type_check_linearize<LL>(
initial_ctxt: Context,
resolver: &impl ImportResolver,
mut linearizer: LL,
mut building: Linearization<LL::Building>,
mut building: LL::Building,
) -> Result<(Wildcards, LL::Completed), TypecheckError>
where
LL: Linearizer<CompletionExtra = Extra>,
@ -1309,7 +1303,7 @@ where
names,
wildcards: result.clone(),
};
let lin = linearizer.complete(building, extra).into_inner();
let lin = linearizer.complete(building, &extra);
Ok((result, lin))
}
@ -1319,7 +1313,7 @@ where
fn walk<L: Linearizer>(
state: &mut State,
mut ctxt: Context,
lin: &mut Linearization<L::Building>,
lin: &mut L::Building,
mut linearizer: L,
rt: &RichTerm,
) -> Result<(), TypecheckError> {
@ -1497,7 +1491,7 @@ fn walk<L: Linearizer>(
fn walk_type<L: Linearizer>(
state: &mut State,
ctxt: Context,
lin: &mut Linearization<L::Building>,
lin: &mut L::Building,
mut linearizer: L,
ty: &Type,
) -> Result<(), TypecheckError> {
@ -1529,7 +1523,7 @@ fn walk_type<L: Linearizer>(
fn walk_rrows<L: Linearizer>(
state: &mut State,
ctxt: Context,
lin: &mut Linearization<L::Building>,
lin: &mut L::Building,
mut linearizer: L,
rrows: &RecordRows,
) -> Result<(), TypecheckError> {
@ -1549,7 +1543,7 @@ fn walk_rrows<L: Linearizer>(
fn walk_field<L: Linearizer>(
state: &mut State,
ctxt: Context,
lin: &mut Linearization<L::Building>,
lin: &mut L::Building,
mut linearizer: L,
field: &Field,
) -> Result<(), TypecheckError> {
@ -1568,7 +1562,7 @@ fn walk_field<L: Linearizer>(
fn walk_annotated<L: Linearizer>(
state: &mut State,
ctxt: Context,
lin: &mut Linearization<L::Building>,
lin: &mut L::Building,
linearizer: L,
annot: &TypeAnnotation,
rt: &RichTerm,
@ -1581,7 +1575,7 @@ fn walk_annotated<L: Linearizer>(
fn walk_with_annot<L: Linearizer>(
state: &mut State,
ctxt: Context,
lin: &mut Linearization<L::Building>,
lin: &mut L::Building,
mut linearizer: L,
annot: &TypeAnnotation,
value: Option<&RichTerm>,
@ -1661,7 +1655,7 @@ fn walk_with_annot<L: Linearizer>(
fn check<L: Linearizer>(
state: &mut State,
ctxt: Context,
lin: &mut Linearization<L::Building>,
lin: &mut L::Building,
linearizer: L,
rt: &RichTerm,
ty: UnifType,
@ -1676,7 +1670,7 @@ fn check<L: Linearizer>(
fn check_visited<L: Linearizer>(
state: &mut State,
mut ctxt: Context,
lin: &mut Linearization<L::Building>,
lin: &mut L::Building,
mut linearizer: L,
rt: &RichTerm,
ty: UnifType,
@ -2098,7 +2092,7 @@ pub fn subsumption(
fn check_field<L: Linearizer>(
state: &mut State,
ctxt: Context,
lin: &mut Linearization<L::Building>,
lin: &mut L::Building,
mut linearizer: L,
id: LocIdent,
field: &Field,
@ -2138,7 +2132,7 @@ fn check_field<L: Linearizer>(
fn infer_annotated<L: Linearizer>(
state: &mut State,
ctxt: Context,
lin: &mut Linearization<L::Building>,
lin: &mut L::Building,
linearizer: L,
annot: &TypeAnnotation,
rt: &RichTerm,
@ -2159,7 +2153,7 @@ fn infer_annotated<L: Linearizer>(
fn infer_with_annot<L: Linearizer>(
state: &mut State,
ctxt: Context,
lin: &mut Linearization<L::Building>,
lin: &mut L::Building,
mut linearizer: L,
annot: &TypeAnnotation,
value: Option<&RichTerm>,
@ -2239,7 +2233,7 @@ fn infer_with_annot<L: Linearizer>(
fn infer<L: Linearizer>(
state: &mut State,
ctxt: Context,
lin: &mut Linearization<L::Building>,
lin: &mut L::Building,
linearizer: L,
rt: &RichTerm,
) -> Result<UnifType, TypecheckError> {
@ -2255,7 +2249,7 @@ fn infer<L: Linearizer>(
fn infer_visited<L: Linearizer>(
state: &mut State,
mut ctxt: Context,
lin: &mut Linearization<L::Building>,
lin: &mut L::Building,
mut linearizer: L,
rt: &RichTerm,
item_id: Option<L::ItemId>,

View File

@ -15,8 +15,9 @@ name = "nls"
path = "src/main.rs"
[features]
default = ["format"]
default = ["format", "old-completer"]
format = ["nickel-lang-core/format"]
old-completer = []
[build-dependencies]
lalrpop.workspace = true

View File

@ -7,10 +7,11 @@ use nickel_lang_core::{
cache::{Cache, CacheError, CacheOp, EntryState, SourcePath, TermEntry},
error::{Error, ImportError},
position::RawPos,
typecheck::{self, linearization::Linearization},
typecheck::{self},
};
use crate::linearization::{building::Building, AnalysisHost, Environment, LinRegistry};
use crate::linearization::{CombinedLinearizer, TypeCollector};
pub trait CacheExt {
fn typecheck_with_analysis(
@ -64,16 +65,24 @@ impl CacheExt for Cache {
Ok(CacheOp::Cached(()))
} else if *state >= EntryState::Parsed {
let host = AnalysisHost::new(file_id, initial_env.clone());
let building = Linearization::new(Building {
let types = TypeCollector::default();
let lin = CombinedLinearizer(host, types);
let building = Building {
lin_registry,
linearization: Vec::new(),
import_locations: HashMap::new(),
cache: self,
});
let (_, linearized) =
typecheck::type_check_linearize(term, initial_ctxt.clone(), self, host, building)
.map_err(|err| vec![Error::TypecheckError(err)])?;
lin_registry.insert(file_id, linearized, term);
};
let (_, (linearized, type_lookups)) = typecheck::type_check_linearize(
term,
initial_ctxt.clone(),
self,
lin,
(building, HashMap::new()),
)
.map_err(|err| vec![Error::TypecheckError(err)])?;
lin_registry.insert(file_id, linearized, type_lookups, term);
self.update_state(file_id, EntryState::Typechecked);
Ok(CacheOp::Done(()))
} else {

View File

@ -1,87 +1,93 @@
use std::collections::{hash_map::Entry, HashMap};
use lsp_types::{CompletionItem, CompletionItemKind, Documentation, MarkupContent, MarkupKind};
use nickel_lang_core::{
identifier::Ident,
pretty::ident_quoted,
term::{record::FieldMetadata, BinaryOp, RichTerm, Term, UnaryOp},
typ::TypeF,
term::{
record::{Field, FieldMetadata, RecordData},
BinaryOp, RichTerm, Term, TypeAnnotation, UnaryOp,
},
typ::{RecordRows, RecordRowsIteratorItem, Type, TypeF},
};
use crate::{identifier::LocIdent, server::Server, usage::Environment};
use crate::{identifier::LocIdent, server::Server};
/// A term and a path.
/// A `FieldHaver` is something that... has fields.
///
/// This is morally equivalent to (but a more convenient representation than)
/// `Op1(StaticAccess("field2"), Op1(StaticAccess("field1"), term))`.
/// You can use a [`FieldResolver`] to resolve terms, or terms with paths, to `FieldHaver`s.
#[derive(Clone, Debug, PartialEq)]
pub struct TermAtPath {
pub term: RichTerm,
/// A path of identifiers, in left-to-right order.
///
/// So, for `term.x.y.z`, this will be `vec!["x", "y", "z"]`.
pub path: Vec<Ident>,
pub enum FieldHaver {
RecordTerm(RecordData),
Dict(Type),
RecordType(RecordRows),
}
impl TermAtPath {}
impl FieldHaver {
/// If this `FieldHaver` has a field named `id`, returns its value.
fn get(&self, id: Ident) -> Option<FieldContent> {
match self {
FieldHaver::RecordTerm(data) => data
.fields
.get(&id)
.map(|field| FieldContent::RecordField(field.clone())),
FieldHaver::Dict(ty) => Some(FieldContent::Type(ty.clone())),
FieldHaver::RecordType(rows) => rows.row_find_path(&[id]).map(FieldContent::Type),
}
}
impl From<RichTerm> for TermAtPath {
fn from(term: RichTerm) -> Self {
Self {
term,
path: Vec::new(),
/// Returns all fields in this `FieldHaver`, rendered as LSP completion items.
pub fn completion_items(&self) -> impl Iterator<Item = CompletionItem> + '_ {
match self {
FieldHaver::RecordTerm(data) => {
let iter = data.fields.iter().map(|(id, val)| CompletionItem {
label: ident_quoted(id),
detail: metadata_detail(&val.metadata),
kind: Some(CompletionItemKind::Property),
documentation: metadata_doc(&val.metadata),
..Default::default()
});
Box::new(iter)
}
FieldHaver::Dict(_) => Box::new(std::iter::empty()) as Box<dyn Iterator<Item = _>>,
FieldHaver::RecordType(rows) => {
let iter = rows.iter().filter_map(|r| match r {
RecordRowsIteratorItem::TailDyn => None,
RecordRowsIteratorItem::TailVar(_) => None,
RecordRowsIteratorItem::Row(r) => Some(CompletionItem {
label: ident_quoted(&r.id),
kind: Some(CompletionItemKind::Property),
detail: Some(r.typ.to_string()),
..Default::default()
}),
});
Box::new(iter)
}
}
}
}
/// The position at which a name is defined (possibly also with a value).
/// [`FieldHaver`]s can have fields that are either record fields or types.
#[derive(Clone, Debug, PartialEq)]
pub struct Def {
/// The identifier at the definition site.
pub ident: LocIdent,
/// The value assigned by the definition, if there is one. If the definition
/// was made by a `let` binding, there will be a value; if it was made in a
/// function definition, there will not be a value.
///
/// For example, in `{ foo = 1 }`, this will point at the `1`.
pub value: Option<RichTerm>,
/// Field metadata.
pub metadata: Option<FieldMetadata>,
enum FieldContent {
RecordField(Field),
Type(Type),
}
impl Def {
fn doc(&self) -> Option<Documentation> {
let doc = self.metadata.as_ref()?.doc.as_ref()?;
Some(Documentation::MarkupContent(MarkupContent {
kind: MarkupKind::Markdown,
value: doc.clone(),
}))
}
fn metadata_doc(m: &FieldMetadata) -> Option<Documentation> {
let doc = m.doc.as_ref()?;
Some(Documentation::MarkupContent(MarkupContent {
kind: MarkupKind::Markdown,
value: doc.clone(),
}))
}
// If the field is annotated, returns its type annotation (preferred) or its
// contract annotation (fallback).
fn detail(&self) -> Option<String> {
self.metadata
.as_ref()
.and_then(|FieldMetadata { annotation, .. }| {
annotation
.typ
.as_ref()
.map(|ty| ty.typ.to_string())
.or_else(|| annotation.contracts_to_string())
})
}
/// Creates a completion item from this definition.
pub fn to_completion_item(&self) -> CompletionItem {
CompletionItem {
label: ident_quoted(&self.ident.into()),
detail: self.detail(),
kind: Some(CompletionItemKind::Property),
documentation: self.doc(),
..Default::default()
}
}
// If the field is annotated, returns its type annotation (preferred) or its
// contract annotation (fallback).
fn metadata_detail(m: &FieldMetadata) -> Option<String> {
m.annotation
.typ
.as_ref()
.map(|ty| ty.typ.to_string())
.or_else(|| m.annotation.contracts_to_string())
}
/// A definition whose value might need to be accessed through a path.
@ -101,89 +107,89 @@ impl Def {
pub struct DefWithPath {
/// The identifier at the definition site.
pub ident: LocIdent,
/// The value assigned by the definition, if there is one.
/// The value assigned by the definition, if there is one. If the definition
/// was made by a `let` binding, there will be a value; if it was made in a
/// function definition, there will not be a value.
///
/// For example, in `{ foo = 1 }`, this could point at the `1`.
///
/// Because of pattern bindings, there could also be a path associated with
/// the value. For example, in
///
/// ```text
/// let { a = { b } } = val in ...
/// ```
///
/// the name `b` is bound to the term `val` at the path `[a, b]`.
pub value: Option<TermAtPath>,
/// Field metadata.
/// For example, in `{ foo = 1 }`, this will point at the `1`.
pub value: Option<RichTerm>,
/// The path within the value that this binding refers to.
pub path: Vec<Ident>,
pub metadata: Option<FieldMetadata>,
}
#[cfg(test)]
impl DefWithPath {
fn resolve_terms(&self, env: &Environment, server: &Server) -> Vec<Def> {
match &self.value {
Some(val) if !val.path.is_empty() => {
// Calling `resolve_path` on x with path [foo, bar] returns all
// the fields *in* x.foo.bar. We want the value(s) of x.foo.bar,
// so use `resolve_path` to get x.foo and then find the bars in it.
// unwrap: we just checked the path is non-empty
let (last, path) = val.path.split_last().unwrap();
FieldDefs::resolve_path(&val.term, path, env, server)
.fields
.remove(last)
.unwrap_or_default()
}
_ => {
vec![Def {
ident: self.ident,
value: self.value.as_ref().map(|tp| tp.term.clone()),
metadata: self.metadata.clone(),
}]
}
}
pub fn path(&self) -> &[Ident] {
&self.path
}
pub fn value(&self) -> Option<&RichTerm> {
self.value.as_ref()
}
}
/// A map from identifiers to the defs that they refer to.
#[derive(Clone, Debug, Default)]
pub struct FieldDefs {
fields: HashMap<Ident, Vec<Def>>,
/// Contains the context needed to resolve fields.
#[derive(Clone)]
pub struct FieldResolver<'a> {
server: &'a Server,
}
impl FieldDefs {
/// Resolve a record path iteratively, returning all the fields defined on the final path element.
impl<'a> FieldResolver<'a> {
pub fn new(server: &'a Server) -> Self {
Self { server }
}
/// Resolve a record path iteratively.
///
/// `env` is an environment that only gets used (and even then, only
/// as a fallback) for the first layer of variable resolutions. After
/// that, variables are resolved using the precomputed usage tables. This
/// mechanism allows providing an initial environment for input that doesn't
/// parse, and hence doesn't exist in the precomputed usage tables.
///
/// For example, in `let x = ... in let y = x in [ y.` we will rely on the
/// initial environment to resolve the `y` in `[ y.`, and after that we will
/// use the precomputed tables to resolve the `x`.
pub fn resolve_path<'a>(
rt: &'a RichTerm,
mut path: &'a [Ident],
env: &Environment,
server: &Server,
) -> Self {
let mut fields = FieldDefs::resolve(rt, env, server);
/// Returns all the field-having objects that the final path element refers to.
pub fn resolve_term_path(&self, rt: &RichTerm, mut path: &[Ident]) -> Vec<FieldHaver> {
let mut fields = self.resolve_term(rt);
while let Some((id, tail)) = path.split_first() {
path = tail;
let defs = fields.fields.remove(id).unwrap_or_default();
fields.fields.clear();
let values = fields
.iter()
.filter_map(|haver| haver.get(*id))
.collect::<Vec<_>>();
fields.clear();
for rt in defs.into_iter().filter_map(|d| d.value) {
fields = fields.merge_from(FieldDefs::resolve(&rt, env, server));
for value in values {
match value {
FieldContent::RecordField(field) => {
if let Some(val) = &field.value {
fields.extend_from_slice(&self.resolve_term(val))
}
fields.extend(self.resolve_annot(&field.metadata.annotation));
}
FieldContent::Type(ty) => {
fields.extend_from_slice(&self.resolve_type(&ty));
}
}
}
}
fields
}
pub fn defs(&self) -> impl Iterator<Item = &Def> {
self.fields.values().flat_map(|defs| defs.iter())
fn resolve_def_with_path(&self, def: &DefWithPath) -> Vec<FieldHaver> {
let mut fields = Vec::new();
if let Some(val) = &def.value {
fields.extend_from_slice(&self.resolve_term_path(val, &def.path))
}
if let Some(meta) = &def.metadata {
fields.extend(self.resolve_annot(&meta.annotation));
}
fields
}
fn resolve_annot(&'a self, annot: &'a TypeAnnotation) -> impl Iterator<Item = FieldHaver> + 'a {
annot
.contracts
.iter()
.chain(annot.typ.iter())
.flat_map(|lty| self.resolve_type(&lty.typ).into_iter())
}
/// Find all the fields that are defined on a term.
@ -191,104 +197,62 @@ impl FieldDefs {
/// This a best-effort thing; it doesn't do full evaluation but it has some reasonable
/// heuristics. For example, it knows that the fields defined on a merge of two records
/// are the fields defined on either record.
///
/// `env` is an environment used only for the initial resolutions; see [`Self::resolve_path`]
fn resolve(rt: &RichTerm, env: &Environment, server: &Server) -> FieldDefs {
match rt.term.as_ref() {
fn resolve_term(&self, rt: &RichTerm) -> Vec<FieldHaver> {
let term_fields = match rt.term.as_ref() {
Term::Record(data) | Term::RecRecord(data, ..) => {
let fields = data
.fields
.iter()
.map(|(&ident, field)| {
(
ident.ident(),
vec![Def {
ident: ident.into(),
value: field.value.clone().map(From::from),
metadata: Some(field.metadata.clone()),
}],
)
})
.collect();
FieldDefs { fields }
vec![FieldHaver::RecordTerm(data.clone())]
}
Term::Var(id) => server
Term::Var(id) => self
.server
.lin_registry
.get_def(&(*id).into())
.or_else(|| env.get(&id.ident()))
.map(|def| {
log::info!("got def {def:?}");
// The definition of this identifier is unlikely to belong to the
// environment we started with, especially because the enviroment
// mechanism is only used for providing definitions to incompletely
// parsed input.
let env = Environment::new();
let defs = def.resolve_terms(&env, server);
let terms = defs.iter().filter_map(|def| def.value.as_ref());
FieldDefs::resolve_all(terms, &env, server)
self.resolve_def_with_path(def)
})
.unwrap_or_default(),
Term::ResolvedImport(file_id) => {
let env = Environment::new();
server
.cache
.get_ref(*file_id)
.map(|term| FieldDefs::resolve(term, &env, server))
.unwrap_or_default()
}
Term::ResolvedImport(file_id) => self
.server
.cache
.get_ref(*file_id)
.map(|term| self.resolve_term(term))
.unwrap_or_default(),
Term::Op2(BinaryOp::Merge(_), t1, t2) => {
FieldDefs::resolve(t1, env, server).merge_from(FieldDefs::resolve(t2, env, server))
}
Term::Let(_, _, body, _) | Term::LetPattern(_, _, _, body) => {
FieldDefs::resolve(body, env, server)
combine(self.resolve_term(t1), self.resolve_term(t2))
}
Term::Let(_, _, body, _) | Term::LetPattern(_, _, _, body) => self.resolve_term(body),
Term::Op1(UnaryOp::StaticAccess(id), term) => {
FieldDefs::resolve_path(term, &[id.ident()], env, server)
self.resolve_term_path(term, &[id.ident()])
}
Term::Annotated(annot, term) => {
// We only check the annotated contracts, not the annotated type.
// (Type-based information comes from the inferred types, which
// already account for annotated types.)
let terms = annot.contracts.iter().filter_map(|ty| {
// TODO: support Dict and Record types also. For now, this is
// enough for completion on basic contract annotations like
// (x | { foo | Number }).fo
if let TypeF::Flat(rt) = &ty.typ.typ {
Some(rt)
} else {
None
}
});
terms.fold(FieldDefs::resolve(term, env, server), |acc, rt| {
acc.merge_from(FieldDefs::resolve(rt, env, server))
})
let defs = self.resolve_annot(annot);
defs.chain(self.resolve_term(term)).collect()
}
_ => Default::default(),
};
let typ_fields = if let Some(typ) = self.server.lin_registry.get_type(rt) {
log::info!("got inferred type {typ:?}");
self.resolve_type(typ)
} else {
Vec::new()
};
combine(term_fields, typ_fields)
}
fn resolve_type(&self, typ: &Type) -> Vec<FieldHaver> {
match &typ.typ {
TypeF::Record(rows) => vec![FieldHaver::RecordType(rows.clone())],
TypeF::Dict { type_fields, .. } => vec![FieldHaver::Dict(type_fields.as_ref().clone())],
TypeF::Flat(rt) => self.resolve_term(rt),
_ => Default::default(),
}
}
fn merge_from(mut self, other: FieldDefs) -> FieldDefs {
for (ident, defs) in other.fields {
match self.fields.entry(ident) {
Entry::Occupied(oc) => {
oc.into_mut().extend_from_slice(&defs);
}
Entry::Vacant(vac) => {
vac.insert(defs);
}
}
}
self
}
fn resolve_all<'a>(
terms: impl Iterator<Item = &'a RichTerm>,
env: &Environment,
server: &Server,
) -> FieldDefs {
terms.fold(FieldDefs::default(), |acc, term| {
acc.merge_from(FieldDefs::resolve(term, env, server))
})
}
}
fn combine<T>(mut left: Vec<T>, mut right: Vec<T>) -> Vec<T> {
left.append(&mut right);
left
}

View File

@ -15,7 +15,11 @@ use nickel_lang_core::{
transform::import_resolution,
};
use crate::{files::typecheck, server::Server};
use crate::{
files::typecheck,
server::Server,
usage::{Environment, UsageLookup},
};
// Take a bunch of tokens and the end of a possibly-delimited sequence, and return the
// index of the beginning of the possibly-delimited sequence. The sequence might not
@ -118,7 +122,11 @@ fn resolve_imports(rt: RichTerm, server: &mut Server) -> RichTerm {
///
/// For example, if the input is `let foo = bar.something.`, we will return
/// `bar.something` (but parsed, of course).
pub fn parse_path_from_incomplete_input(range: RawSpan, server: &mut Server) -> Option<RichTerm> {
pub fn parse_path_from_incomplete_input(
range: RawSpan,
env: &Environment,
server: &mut Server,
) -> Option<RichTerm> {
let text = server.cache.files().source(range.src_id);
let subtext = &text[range.start.to_usize()..range.end.to_usize()];
@ -152,7 +160,13 @@ pub fn parse_path_from_incomplete_input(range: RawSpan, server: &mut Server) ->
.replace_string(SourcePath::Snippet(path), to_parse);
match server.cache.parse_nocache(file_id) {
Ok((rt, _errors)) => Some(resolve_imports(rt, server)),
Ok((rt, _errors)) => {
server
.lin_registry
.usage_lookups
.insert(file_id, UsageLookup::new_with_env(&rt, env));
Some(resolve_imports(rt, server))
}
Err(_) => None,
}
}

View File

@ -8,7 +8,7 @@ use nickel_lang_core::{
position::TermPos,
term::{record::Field, IndexMap, RichTerm},
typ::TypeF,
typecheck::{linearization::LinearizationState, UnifType},
typecheck::UnifType,
};
use crate::linearization::interface::{TermKind, UsageState};
@ -18,7 +18,6 @@ use super::{
Environment, ItemId, LinRegistry, LinearizationItem,
};
/// A concrete [LinearizationState]
/// Holds any inner datatype that can be used as stable resource
/// while recording terms.
pub struct Building<'a> {
@ -315,5 +314,3 @@ impl<'b> Building<'b> {
self.linearization.len()
}
}
impl<'a> LinearizationState for Building<'a> {}

View File

@ -4,7 +4,6 @@ use codespan::FileId;
use nickel_lang_core::{
position::{RawPos, TermPos},
term::{record::FieldMetadata, SharedTerm, Term},
typecheck::linearization::LinearizationState,
};
use super::{
@ -200,5 +199,3 @@ impl Completed {
(item.ty.to_owned(), extra)
}
}
impl LinearizationState for Completed {}

View File

@ -10,15 +10,12 @@ use nickel_lang_core::{
RichTerm, Term, UnaryOp,
},
typ::TypeF,
typecheck::{
linearization::{Linearization, Linearizer},
reporting::NameReg,
UnifType,
},
typecheck::{linearization::Linearizer, reporting::NameReg, UnifType},
};
use crate::{
field_walker::DefWithPath, identifier::LocIdent, position::PositionLookup, usage::UsageLookup,
field_walker::DefWithPath, identifier::LocIdent, position::PositionLookup, term::RichTermPtr,
usage::UsageLookup,
};
use self::{
@ -46,6 +43,7 @@ pub struct LinRegistry {
// which point we'll rename `LinRegistry` (and probably just have one HashMap<FileId, everything>)
pub position_lookups: HashMap<FileId, PositionLookup>,
pub usage_lookups: HashMap<FileId, UsageLookup>,
pub type_lookups: HashMap<RichTermPtr, Type>,
}
impl LinRegistry {
@ -53,11 +51,19 @@ impl LinRegistry {
Self::default()
}
pub fn insert(&mut self, file_id: FileId, linearization: Completed, term: &RichTerm) {
pub fn insert(
&mut self,
file_id: FileId,
linearization: Completed,
type_lookups: HashMap<RichTermPtr, Type>,
term: &RichTerm,
) {
self.map.insert(file_id, linearization);
self.position_lookups
.insert(file_id, PositionLookup::new(term));
self.usage_lookups.insert(file_id, UsageLookup::new(term));
self.type_lookups.extend(type_lookups);
}
/// Look for the linearization corresponding to an item's id, and return the corresponding item
@ -76,6 +82,10 @@ impl LinRegistry {
let file = rt.pos.as_opt_ref()?.src_id;
self.usage_lookups.get(&file)?.env(rt)
}
pub fn get_type(&self, rt: &RichTerm) -> Option<&Type> {
self.type_lookups.get(&RichTermPtr(rt.clone()))
}
}
#[derive(PartialEq, Copy, Debug, Clone, Eq, Hash)]
@ -149,7 +159,7 @@ impl<'a> AnalysisHost<'a> {
}
}
fn next_id(&self, lin: &Linearization<Building>) -> ItemId {
fn next_id(&self, lin: &Building) -> ItemId {
ItemId {
file_id: self.file,
index: lin.next_id(),
@ -168,7 +178,7 @@ impl<'a> AnalysisHost<'a> {
// Panic if `rt` is neither a let/let pattern nor a fun/fun pattern.
fn setup_decl(
&mut self,
lin: &mut Linearization<Building>,
lin: &mut Building,
rt: &RichTerm,
ty: &UnifType,
pos: TermPos,
@ -208,12 +218,7 @@ impl<'a> Linearizer for AnalysisHost<'a> {
type CompletionExtra = Extra;
type ItemId = ItemId;
fn add_term(
&mut self,
lin: &mut Linearization<Building>,
rt: &RichTerm,
ty: UnifType,
) -> Option<ItemId> {
fn add_term(&mut self, lin: &mut Building, rt: &RichTerm, ty: UnifType) -> Option<ItemId> {
let pos = rt.pos;
let term = rt.term.as_ref();
debug!("adding term: {:?} @ {:?}", term, pos);
@ -532,7 +537,7 @@ impl<'a> Linearizer for AnalysisHost<'a> {
Some(main_id)
}
fn add_field_metadata(&mut self, _lin: &mut Linearization<Building>, field: &Field) {
fn add_field_metadata(&mut self, _lin: &mut Building, field: &Field) {
// Notice 1: No push to lin for the `FieldMetadata` itself
// Notice 2: we discard the encoded value as anything we
// would do with the value will be handled in the following
@ -548,15 +553,15 @@ impl<'a> Linearizer for AnalysisHost<'a> {
/// Additionally, resolves concrete types for all items.
fn complete(
self,
mut lin: Linearization<Building>,
mut lin: Building,
Extra {
table,
names: reported_names,
wildcards,
}: Extra,
) -> Linearization<Completed> {
}: &Extra,
) -> Completed {
debug!("linearizing {:?}", self.file);
let mut name_reg = NameReg::new(reported_names);
let mut name_reg = NameReg::new(reported_names.clone());
// TODO: Storing defers while linearizing?
let mut defers: Vec<_> = lin
@ -578,7 +583,7 @@ impl<'a> Linearizer for AnalysisHost<'a> {
mut linearization,
import_locations,
..
} = lin.into_inner();
} = lin;
linearization.sort_by(
|it1, it2| match (it1.pos.as_opt_ref(), it2.pos.as_opt_ref()) {
@ -619,7 +624,7 @@ impl<'a> Linearizer for AnalysisHost<'a> {
kind,
metadata: meta,
}| LinearizationItem {
ty: name_reg.to_type(&table, ty),
ty: name_reg.to_type(table, ty),
term,
env,
id,
@ -629,11 +634,11 @@ impl<'a> Linearizer for AnalysisHost<'a> {
},
)
.map(|item| LinearizationItem {
ty: transform_wildcard(&wildcards, item.ty),
ty: transform_wildcard(wildcards, item.ty),
..item
})
.collect();
Linearization::new(Completed::new(lin_, id_mapping, import_locations))
Completed::new(lin_, id_mapping, import_locations)
}
fn scope(&mut self) -> Self {
@ -669,7 +674,7 @@ impl<'a> Linearizer for AnalysisHost<'a> {
fn retype_ident(
&mut self,
lin: &mut Linearization<Building>,
lin: &mut Building,
ident: &nickel_lang_core::identifier::LocIdent,
new_type: UnifType,
) {
@ -688,12 +693,7 @@ impl<'a> Linearizer for AnalysisHost<'a> {
}
}
fn retype(
&mut self,
lin: &mut Linearization<Building>,
item_id: Option<ItemId>,
new_type: UnifType,
) {
fn retype(&mut self, lin: &mut Building, item_id: Option<ItemId>, new_type: UnifType) {
let Some(item_id) = item_id else {
return;
};
@ -706,3 +706,128 @@ impl<'a> Linearizer for AnalysisHost<'a> {
}
}
}
#[derive(Default)]
pub struct TypeCollector {
// Store a copy of the terms we've added so far. The index in this array is their ItemId.
term_ids: Vec<RichTermPtr>,
}
impl Linearizer for TypeCollector {
type Building = HashMap<RichTermPtr, UnifType>;
type Completed = HashMap<RichTermPtr, Type>;
type CompletionExtra = Extra;
type ItemId = usize;
fn scope(&mut self) -> Self {
TypeCollector::default()
}
fn scope_meta(&mut self) -> Self {
TypeCollector::default()
}
fn add_term(&mut self, lin: &mut Self::Building, rt: &RichTerm, ty: UnifType) -> Option<usize> {
self.term_ids.push(RichTermPtr(rt.clone()));
lin.insert(RichTermPtr(rt.clone()), ty);
Some(self.term_ids.len() - 1)
}
fn complete(
self,
lin: Self::Building,
Extra {
table,
names,
wildcards,
}: &Extra,
) -> Self::Completed {
let mut name_reg = NameReg::new(names.clone());
let mut transform_type = |uty: UnifType| -> Type {
let ty = name_reg.to_type(table, uty);
match ty.typ {
TypeF::Wildcard(i) => wildcards.get(i).unwrap_or(&ty).clone(),
_ => ty,
}
};
lin.into_iter()
.map(|(rt, uty)| (rt, transform_type(uty)))
.collect()
}
fn retype(&mut self, lin: &mut Self::Building, item_id: Option<usize>, new_type: UnifType) {
if let Some(id) = item_id {
lin.insert(self.term_ids[id].clone(), new_type);
}
}
}
pub struct CombinedLinearizer<T, U>(pub T, pub U);
impl<T: Linearizer, U: Linearizer> Linearizer for CombinedLinearizer<T, U>
where
T: Linearizer<CompletionExtra = U::CompletionExtra>,
{
type Building = (T::Building, U::Building);
type Completed = (T::Completed, U::Completed);
type ItemId = (Option<T::ItemId>, Option<U::ItemId>);
// Maybe this should be (T::CompletionExtra, U::CompletionExtra) but in practice
// CompletionExtra is always Extra anyway.
type CompletionExtra = T::CompletionExtra;
fn scope(&mut self) -> Self {
CombinedLinearizer(self.0.scope(), self.1.scope())
}
fn scope_meta(&mut self) -> Self {
CombinedLinearizer(self.0.scope_meta(), self.1.scope_meta())
}
fn add_term(
&mut self,
lin: &mut Self::Building,
term: &RichTerm,
ty: UnifType,
) -> Option<Self::ItemId> {
let id0 = self.0.add_term(&mut lin.0, term, ty.clone());
let id1 = self.1.add_term(&mut lin.1, term, ty);
Some((id0, id1))
}
fn add_field_metadata(&mut self, lin: &mut Self::Building, field: &Field) {
self.0.add_field_metadata(&mut lin.0, field);
self.1.add_field_metadata(&mut lin.1, field);
}
fn retype_ident(
&mut self,
lin: &mut Self::Building,
ident: &nickel_lang_core::identifier::LocIdent,
new_type: UnifType,
) {
self.0.retype_ident(&mut lin.0, ident, new_type.clone());
self.1.retype_ident(&mut lin.1, ident, new_type);
}
fn complete(self, lin: Self::Building, extra: &Self::CompletionExtra) -> Self::Completed
where
Self: Sized,
{
(self.0.complete(lin.0, extra), self.1.complete(lin.1, extra))
}
fn retype(
&mut self,
lin: &mut Self::Building,
item_id: Option<Self::ItemId>,
new_type: UnifType,
) {
if let Some((id0, id1)) = item_id {
self.0.retype(&mut lin.0, id0, new_type.clone());
self.1.retype(&mut lin.1, id1, new_type);
}
}
}

View File

@ -1,6 +1,6 @@
use crate::{
cache::CacheExt,
field_walker::{Def, FieldDefs},
field_walker::{FieldHaver, FieldResolver},
incomplete,
linearization::{
completed::Completed,
@ -688,12 +688,17 @@ fn sanitize_term_for_completion(
) -> Option<RichTerm> {
if let (Term::ParseError(_), Some(range)) = (term.term.as_ref(), term.pos.as_opt_ref()) {
let mut range = *range;
let env = server
.lin_registry
.get_env(term)
.cloned()
.unwrap_or_else(Environment::new);
if cursor.index < range.start || cursor.index > range.end || cursor.src_id != range.src_id {
return None;
}
range.end = cursor.index;
incomplete::parse_path_from_incomplete_input(range, server)
incomplete::parse_path_from_incomplete_input(range, &env, server)
} else if let Term::Op1(UnaryOp::StaticAccess(_), parent) = term.term.as_ref() {
// For completing record paths, we discard the last path element: if we're
// completing `foo.bar.bla`, we only look at `foo.bar` to find the completions.
@ -705,16 +710,14 @@ fn sanitize_term_for_completion(
fn term_based_completion(
term: RichTerm,
initial_env: &Environment,
server: &Server,
) -> Result<Vec<CompletionItem>, ResponseError> {
log::info!("term based completion path: {term:?}");
log::info!("initial env: {initial_env:?}");
let (start_term, path) = extract_static_path(term);
let defs = FieldDefs::resolve_path(&start_term, &path, initial_env, server);
Ok(defs.defs().map(Def::to_completion_item).collect())
let defs = FieldResolver::new(server).resolve_term_path(&start_term, &path);
Ok(defs.iter().flat_map(FieldHaver::completion_items).collect())
}
pub fn handle_completion(
@ -756,40 +759,32 @@ pub fn handle_completion(
.as_ref()
.and_then(|rt| sanitize_term_for_completion(rt, cursor, server));
let mut completions = match term.zip(sanitized_term) {
Some((term, sanitized)) => {
let env = if matches!(term.term.as_ref(), Term::ParseError(_)) {
server
.lin_registry
.get_env(&term)
.cloned()
.unwrap_or_default()
} else {
Environment::new()
};
term_based_completion(sanitized, &env, server)?
}
let mut completions = match sanitized_term {
Some(sanitized) => term_based_completion(sanitized, server)?,
None => Vec::new(),
};
log::info!("term-based completion provided {completions:?}");
let linearization = server.lin_cache_get(&pos.src_id)?;
Trace::enrich(&id, linearization);
#[cfg(feature = "old-completer")]
{
let linearization = server.lin_cache_get(&pos.src_id)?;
Trace::enrich(&id, linearization);
let item = linearization.item_at(pos);
let text = server.cache.files().source(pos.src_id);
let start = pos.index.to_usize();
if let Some(item) = item {
debug!("found closest item: {:?}", item);
let item = linearization.item_at(pos);
let text = server.cache.files().source(pos.src_id);
let start = pos.index.to_usize();
if let Some(item) = item {
debug!("found closest item: {:?}", item);
completions.extend_from_slice(&get_completion_identifiers(
&text[..start],
trigger,
linearization,
item,
server,
)?);
};
completions.extend_from_slice(&get_completion_identifiers(
&text[..start],
trigger,
linearization,
item,
server,
)?);
};
}
let completions = remove_duplicates(&completions);
server.reply(Response::new_ok(id.clone(), completions));

View File

@ -7,10 +7,29 @@ use nickel_lang_core::{
term::{record::FieldMetadata, RichTerm, Term, Traverse, TraverseControl},
};
use crate::{
field_walker::{DefWithPath, TermAtPath},
identifier::LocIdent,
};
use crate::{field_walker::DefWithPath, identifier::LocIdent};
/// A term and a path.
///
/// This is morally equivalent to (but a more convenient representation than)
/// `Op1(StaticAccess("field2"), Op1(StaticAccess("field1"), term))`.
#[derive(Clone, Debug, PartialEq)]
pub struct TermAtPath {
pub term: RichTerm,
/// A path of identifiers, in left-to-right order.
///
/// So, for `term.x.y.z`, this will be `vec!["x", "y", "z"]`.
pub path: Vec<Ident>,
}
impl From<RichTerm> for TermAtPath {
fn from(term: RichTerm) -> Self {
Self {
term,
path: Vec::new(),
}
}
}
pub type Environment = GenericEnvironment<Ident, DefWithPath>;
@ -35,12 +54,17 @@ impl EnvExt for Environment {
meta: Option<FieldMetadata>,
) {
let ident = id.into();
let (term, path) = val
.map(Into::into)
.map(|term_at_path| (term_at_path.term, term_at_path.path))
.unzip();
self.insert(
ident.ident,
DefWithPath {
ident,
value: val.map(Into::into),
value: term.map(Into::into),
metadata: meta,
path: path.unwrap_or_default(),
},
);
}
@ -68,8 +92,12 @@ pub struct UsageLookup {
impl UsageLookup {
/// Create a new lookup table by looking for definitions and usages in the tree rooted at `rt`.
pub fn new(rt: &RichTerm) -> Self {
Self::new_with_env(rt, &Environment::new())
}
pub fn new_with_env(rt: &RichTerm, env: &Environment) -> Self {
let mut table = Self::default();
table.fill(rt, &Environment::new());
table.fill(rt, env);
table
}
@ -215,7 +243,7 @@ mod tests {
let def = table.def(&x1).unwrap();
assert_eq!(def.ident, x0);
assert_matches!(def.value.as_ref().unwrap().term.as_ref(), &Term::Num(_));
assert_matches!(def.value().unwrap().term.as_ref(), Term::Num(_));
}
#[test]
@ -235,18 +263,15 @@ mod tests {
let x_def = table.def(&x1).unwrap();
assert_eq!(x_def.ident, x0);
assert!(x_def.value.as_ref().unwrap().path.is_empty());
assert!(x_def.path().is_empty());
let a_def = table.def(&a1).unwrap();
assert_eq!(a_def.ident, a0);
assert_eq!(a_def.value.as_ref().unwrap().path, vec!["foo".into()]);
assert_eq!(a_def.path(), &["foo".into()]);
let baz_def = table.def(&baz1).unwrap();
assert_eq!(baz_def.ident, baz0);
assert_eq!(
baz_def.value.as_ref().unwrap().path,
vec!["foo".into(), "bar".into()]
);
assert_eq!(baz_def.path(), vec!["foo".into(), "bar".into()]);
}
#[test]

View File

@ -1 +0,0 @@
let x = { ab } | { abcde | Number } in x

View File

@ -21,6 +21,12 @@ in
({} | { field = 1 }).fiel
({} | (let x = { field = 1 } in x)).fiel
({} | { field = { subfield = 1 } }.field).subfiel
(
let f | Number -> { foo : Number } = fun x => { foo : x } in
(f 1).foo : _
)
({} | { field : Number }).fiel
{ foo | { field : Number } }.foo.fiel
]
### [[request]]
### type = "Completion"
@ -84,3 +90,18 @@ in
### type = "Completion"
### textDocument.uri = "file:///completion-basic.ncl"
### position = { line = 16, character = 51 }
###
### [[request]]
### type = "Completion"
### textDocument.uri = "file:///completion-basic.ncl"
### position = { line = 19, character = 13 }
###
### [[request]]
### type = "Completion"
### textDocument.uri = "file:///completion-basic.ncl"
### position = { line = 21, character = 32 }
###
### [[request]]
### type = "Completion"
### textDocument.uri = "file:///completion-basic.ncl"
### position = { line = 22, character = 39 }

View File

@ -14,4 +14,7 @@ expression: output
[config, field]
[field]
[config, subfield]
[config, f, foo]
[config, field]
[config, field]

View File

@ -110,7 +110,7 @@ struct DefInfo {
}
```
Then we have a function `field_infos(RichTerm) -> DefInfo` that looks something like:
Then we have a function `field_infos(RichTerm or Type) -> DefInfo` that looks something like:
```text
field_infos(e1 & e2) = field_infos(e1) U field_infos(e2)
@ -120,6 +120,10 @@ field_infos(head x) = field_infos(head)
field_infos(var) = field_infos(goto_definition(var))
field_infos(e1 | C) = field_infos(e1) U field_infos(C)
field_infos(foo.bar) = field_infos(goto_definition(bar in foo.bar))
field_infos(Term::RecRecord) = ... the actual fields defined on the record
field_infos(TypeF::Record) = ... the actual fields defined on the record type
field_infos(TypeF::Dict) = ... every possible identifier as a field (represented
lazily, obviously)
other cases => empty
```