Add custom log file and improve logging

This commit is contained in:
Silas Marvin 2024-08-11 13:52:15 -07:00
parent 8335d5c73c
commit 600fb6f533
12 changed files with 182 additions and 98 deletions

1
.gitignore vendored
View File

@ -5,3 +5,4 @@ out
dist
lsp-ai.log
.vsix
lsp-ai-chat.md

21
Cargo.lock generated
View File

@ -76,9 +76,9 @@ dependencies = [
[[package]]
name = "anstyle"
version = "1.0.6"
version = "1.0.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc"
checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1"
[[package]]
name = "anstyle-parse"
@ -342,9 +342,9 @@ dependencies = [
[[package]]
name = "clap"
version = "4.5.2"
version = "4.5.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b230ab84b0ffdf890d5a10abdbc8b83ae1c4918275daea1ab8801f71536b2651"
checksum = "c937d4061031a6d0c8da4b9a4f98a172fc2976dfb1c19213a9cf7d0d3c837e36"
dependencies = [
"clap_builder",
"clap_derive",
@ -352,9 +352,9 @@ dependencies = [
[[package]]
name = "clap_builder"
version = "4.5.2"
version = "4.5.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4"
checksum = "85379ba512b21a328adf887e85f7742d12e96eb31f3ef077df4ffc26b506ffed"
dependencies = [
"anstream",
"anstyle",
@ -364,11 +364,11 @@ dependencies = [
[[package]]
name = "clap_derive"
version = "4.5.0"
version = "4.5.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "307bc0538d5f0f83b8248db3087aa92fe504e4691294d0c96c0eabc33f47ba47"
checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0"
dependencies = [
"heck 0.4.1",
"heck 0.5.0",
"proc-macro2",
"quote",
"syn 2.0.52",
@ -1566,12 +1566,13 @@ dependencies = [
[[package]]
name = "lsp-ai"
version = "0.4.0"
version = "0.4.1"
dependencies = [
"anyhow",
"assert_cmd",
"async-trait",
"cc",
"clap",
"directories",
"futures",
"fxhash",

View File

@ -1,6 +1,6 @@
[package]
name = "lsp-ai"
version = "0.4.1"
version = "0.5.0"
description.workspace = true
repository.workspace = true
@ -42,6 +42,7 @@ md5 = "0.7.0"
fxhash = "0.2.1"
ordered-float = "4.2.1"
futures = "0.3"
clap = { version = "4.5.14", features = ["derive"] }
[build-dependencies]
cc="1"

View File

@ -1,17 +1,21 @@
use anyhow::Result;
use clap::Parser;
use directories::BaseDirs;
use lsp_server::{Connection, ExtractError, Message, Notification, Request, RequestId};
use lsp_types::{
request::{CodeActionRequest, CodeActionResolveRequest, Completion},
CodeActionOptions, CompletionOptions, DidChangeTextDocumentParams, DidOpenTextDocumentParams,
RenameFilesParams, ServerCapabilities, TextDocumentSyncKind,
};
use std::sync::Mutex;
use std::{
collections::HashMap,
fs,
path::Path,
sync::{mpsc, Arc},
thread,
};
use tracing::error;
use tracing::{error, info};
use tracing_subscriber::{EnvFilter, FmtSubscriber};
mod config;
@ -54,19 +58,58 @@ where
req.extract(R::METHOD)
}
// LSP-AI parameters
#[derive(Parser)]
#[command(version)]
struct Args {
// Whether to use a custom log file
#[arg(long, default_value_t = false)]
use_seperate_log_file: bool,
}
fn create_log_file(base_path: &Path) -> anyhow::Result<fs::File> {
let dir_path = base_path.join("lsp-ai");
fs::create_dir_all(&dir_path)?;
let file_path = dir_path.join("lsp-ai.log");
Ok(fs::File::create(file_path)?)
}
// Builds a tracing subscriber from the `LSP_AI_LOG` environment variable
// If the variables value is malformed or missing, sets the default log level to ERROR
fn init_logger() {
FmtSubscriber::builder()
.with_writer(std::io::stderr)
.with_ansi(false)
.without_time()
.with_env_filter(EnvFilter::from_env("LSP_AI_LOG"))
.init();
fn init_logger(args: &Args) {
let builder = FmtSubscriber::builder().with_env_filter(EnvFilter::from_env("LSP_AI_LOG"));
let base_dirs = BaseDirs::new();
if args.use_seperate_log_file && base_dirs.is_some() {
let base_dirs = base_dirs.unwrap();
let cache_dir = base_dirs.cache_dir();
// Linux: /home/alice/.cache
// Windows: C:\Users\Alice\AppData\Local
// macOS: /Users/Alice/Library/Caches
match create_log_file(&cache_dir) {
Ok(log_file) => builder.with_writer(Mutex::new(log_file)).init(),
Err(e) => {
eprintln!("creating log file: {e:?} - falling back to stderr");
builder
.with_writer(std::io::stderr)
.without_time()
.with_ansi(false)
.init()
}
}
} else {
builder
.with_writer(std::io::stderr)
.without_time()
.with_ansi(false)
.init()
}
}
fn main() -> Result<()> {
init_logger();
let args = Args::parse();
init_logger(&args);
info!("lsp-ai logger initialized starting server");
let (connection, io_threads) = Connection::stdio();
let server_capabilities = serde_json::to_value(ServerCapabilities {
@ -181,7 +224,7 @@ fn main_loop(connection: Connection, args: serde_json::Value) -> Result<()> {
Err(err) => error!("{err:?}"),
}
} else {
error!("lsp-ai currently only supports textDocument/completion, textDocument/generation and textDocument/generationStream")
error!("Unsupported command - see the wiki for a list of supported commands")
}
}
Message::Notification(not) => {

View File

@ -101,9 +101,7 @@ impl FileStore {
match parse_tree(uri, &contents, None) {
Ok(tree) => Some(tree),
Err(e) => {
error!(
"Failed to parse tree for {uri} with error {e}, falling back to no tree"
);
warn!("Failed to parse tree for {uri} with error {e}, falling back to no tree");
None
}
}

View File

@ -1,5 +1,5 @@
use splitter_tree_sitter::TreeSitterCodeSplitter;
use tracing::error;
use tracing::warn;
use tree_sitter::Tree;
use crate::{config, memory_backends::file_store::File, utils::parse_tree};
@ -43,7 +43,7 @@ impl Splitter for TreeSitter {
match self.split_tree(tree, file.rope().to_string().as_bytes()) {
Ok(chunks) => chunks,
Err(e) => {
error!(
warn!(
"Failed to parse tree for file with error: {e:?}. Falling back to default splitter.",
);
self.text_splitter.split(file)
@ -59,14 +59,14 @@ impl Splitter for TreeSitter {
Ok(tree) => match self.split_tree(&tree, contents.as_bytes()) {
Ok(chunks) => chunks,
Err(e) => {
error!(
warn!(
"Failed to parse tree for file: {uri} with error: {e:?}. Falling back to default splitter.",
);
self.text_splitter.split_file_contents(uri, contents)
}
},
Err(e) => {
error!(
warn!(
"Failed to parse tree for file {uri} with error: {e:?}. Falling back to default splitter.",
);
self.text_splitter.split_file_contents(uri, contents)

View File

@ -3,7 +3,7 @@ use std::collections::HashMap;
use anyhow::Context;
use serde::Deserialize;
use serde_json::{json, Value};
use tracing::instrument;
use tracing::{info, instrument};
use crate::{
config::{self, ChatMessage},
@ -80,6 +80,18 @@ impl Anthropic {
"Please set `auth_token_env_var_name` or `auth_token` to use an Anthropic"
);
};
let params = json!({
"model": self.config.model,
"system": system_prompt,
"max_tokens": params.max_tokens,
"top_p": params.top_p,
"temperature": params.temperature,
"messages": messages
});
info!(
"Calling Anthropic compatible API with parameters:\n{}",
serde_json::to_string_pretty(&params).unwrap()
);
let res: AnthropicChatResponse = client
.post(
self.config
@ -91,14 +103,7 @@ impl Anthropic {
.header("anthropic-version", "2023-06-01")
.header("Content-Type", "application/json")
.header("Accept", "application/json")
.json(&json!({
"model": self.config.model,
"system": system_prompt,
"max_tokens": params.max_tokens,
"top_p": params.top_p,
"temperature": params.temperature,
"messages": messages
}))
.json(&params)
.send()
.await?
.json()

View File

@ -1,7 +1,7 @@
use anyhow::Context;
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
use tracing::instrument;
use tracing::{info, instrument};
use super::TransformerBackend;
use crate::{
@ -110,6 +110,15 @@ impl Gemini {
) -> anyhow::Result<String> {
let client = reqwest::Client::new();
let token = self.get_token()?;
let params = json!({
"contents": messages,
"systemInstruction": params.system_instruction,
"generationConfig": params.generation_config,
});
info!(
"Calling Gemini compatible chat API with parameters:\n{}",
serde_json::to_string_pretty(&params).unwrap()
);
let res: serde_json::Value = client
.post(
self.configuration
@ -122,11 +131,7 @@ impl Gemini {
+ token.as_ref(),
)
.header("Content-Type", "application/json")
.json(&json!({
"contents": messages,
"systemInstruction": params.system_instruction,
"generationConfig": params.generation_config,
}))
.json(&params)
.send()
.await?
.json()

View File

@ -9,7 +9,7 @@ use llama_cpp_2::{
};
use once_cell::sync::Lazy;
use std::{num::NonZeroU32, path::PathBuf, time::Duration};
use tracing::{debug, info, instrument};
use tracing::{info, instrument};
use crate::config::{self, ChatMessage};
@ -29,7 +29,10 @@ impl Model {
let model_params = LlamaModelParams::default().with_n_gpu_layers(config.n_gpu_layers);
// Load the model
debug!("Loading model at path: {:?}", model_path);
info!(
"Loading llama.cpp compatible model at path: {:?}",
model_path
);
let model = LlamaModel::load_from_file(&BACKEND, model_path, &model_params)?;
Ok(Model {
@ -40,6 +43,8 @@ impl Model {
#[instrument(skip(self))]
pub fn complete(&self, prompt: &str, params: LLaMACPPRunParams) -> anyhow::Result<String> {
info!("Completing with llama.cpp with prompt:\n{prompt}");
// initialize the context
let ctx_params = LlamaContextParams::default().with_n_ctx(Some(self.n_ctx));

View File

@ -1,7 +1,7 @@
use anyhow::Context;
use serde::Deserialize;
use serde_json::{json, Value};
use tracing::instrument;
use tracing::{info, instrument};
use super::{open_ai::OpenAIChatResponse, TransformerBackend};
use crate::{
@ -67,6 +67,21 @@ impl MistralFIM {
) -> anyhow::Result<String> {
let client = reqwest::Client::new();
let token = self.get_token()?;
let params = json!({
"prompt": prompt.prompt,
"suffix": prompt.suffix,
"model": self.config.model,
"max_tokens": params.max_tokens,
"top_p": params.top_p,
"temperature": params.temperature,
"min_tokens": params.min_tokens,
"random_seed": params.random_seed,
"stop": params.stop
});
info!(
"Calling Mistral compatible FIM API with parameters:\n{}",
serde_json::to_string_pretty(&params).unwrap()
);
let res: OpenAIChatResponse = client
.post(
self.config
@ -77,17 +92,7 @@ impl MistralFIM {
.bearer_auth(token)
.header("Content-Type", "application/json")
.header("Accept", "application/json")
.json(&json!({
"prompt": prompt.prompt,
"suffix": prompt.suffix,
"model": self.config.model,
"max_tokens": params.max_tokens,
"top_p": params.top_p,
"temperature": params.temperature,
"min_tokens": params.min_tokens,
"random_seed": params.random_seed,
"stop": params.stop
}))
.json(&params)
.send()
.await?
.json()

View File

@ -1,7 +1,7 @@
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
use std::collections::HashMap;
use tracing::instrument;
use tracing::{info, instrument};
use crate::{
config::{self, ChatMessage, FIM},
@ -66,6 +66,18 @@ impl Ollama {
params: OllamaRunParams,
) -> anyhow::Result<String> {
let client = reqwest::Client::new();
let params = json!({
"model": self.configuration.model,
"prompt": prompt,
"options": params.options,
"keep_alive": params.keep_alive,
"raw": true,
"stream": false
});
info!(
"Calling Ollama compatible completion API with parameters:\n{}",
serde_json::to_string_pretty(&params).unwrap()
);
let res: OllamaCompletionsResponse = client
.post(
self.configuration
@ -75,14 +87,7 @@ impl Ollama {
)
.header("Content-Type", "application/json")
.header("Accept", "application/json")
.json(&json!({
"model": self.configuration.model,
"prompt": prompt,
"options": params.options,
"keep_alive": params.keep_alive,
"raw": true,
"stream": false
}))
.json(&params)
.send()
.await?
.json()
@ -105,6 +110,19 @@ impl Ollama {
params: OllamaRunParams,
) -> anyhow::Result<String> {
let client = reqwest::Client::new();
let params = json!({
"model": self.configuration.model,
"system": params.system,
"template": params.template,
"messages": messages,
"options": params.options,
"keep_alive": params.keep_alive,
"stream": false
});
info!(
"Calling Ollama compatible chat API with parameters:\n{}",
serde_json::to_string_pretty(&params).unwrap()
);
let res: OllamaChatResponse = client
.post(
self.configuration
@ -114,15 +132,7 @@ impl Ollama {
)
.header("Content-Type", "application/json")
.header("Accept", "application/json")
.json(&json!({
"model": self.configuration.model,
"system": params.system,
"template": params.template,
"messages": messages,
"options": params.options,
"keep_alive": params.keep_alive,
"stream": false
}))
.json(&params)
.send()
.await?
.json()

View File

@ -3,7 +3,7 @@ use std::collections::HashMap;
use anyhow::Context;
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
use tracing::instrument;
use tracing::{info, instrument};
use crate::{
config::{self, ChatMessage, FIM},
@ -116,6 +116,21 @@ impl OpenAI {
) -> anyhow::Result<String> {
let client = reqwest::Client::new();
let token = self.get_token()?;
let params = json!({
"model": self.configuration.model,
"max_tokens": params.max_tokens,
"n": 1,
"top_p": params.top_p,
"presence_penalty": params.presence_penalty,
"frequency_penalty": params.frequency_penalty,
"temperature": params.temperature,
"echo": false,
"prompt": prompt
});
info!(
"Calling OpenAI compatible completions API with parameters:\n{}",
serde_json::to_string_pretty(&params).unwrap()
);
let res: OpenAICompletionsResponse = client
.post(
self.configuration
@ -126,17 +141,7 @@ impl OpenAI {
.bearer_auth(token)
.header("Content-Type", "application/json")
.header("Accept", "application/json")
.json(&json!({
"model": self.configuration.model,
"max_tokens": params.max_tokens,
"n": 1,
"top_p": params.top_p,
"presence_penalty": params.presence_penalty,
"frequency_penalty": params.frequency_penalty,
"temperature": params.temperature,
"echo": false,
"prompt": prompt
}))
.json(&params)
.send().await?
.json().await?;
if let Some(error) = res.error {
@ -158,6 +163,20 @@ impl OpenAI {
) -> anyhow::Result<String> {
let client = reqwest::Client::new();
let token = self.get_token()?;
let params = json!({
"model": self.configuration.model,
"max_tokens": params.max_tokens,
"n": 1,
"top_p": params.top_p,
"presence_penalty": params.presence_penalty,
"frequency_penalty": params.frequency_penalty,
"temperature": params.temperature,
"messages": messages
});
info!(
"Calling OpenAI compatible chat API with parameters:\n{}",
serde_json::to_string_pretty(&params).unwrap()
);
let res: OpenAIChatResponse = client
.post(
self.configuration
@ -168,16 +187,7 @@ impl OpenAI {
.bearer_auth(token)
.header("Content-Type", "application/json")
.header("Accept", "application/json")
.json(&json!({
"model": self.configuration.model,
"max_tokens": params.max_tokens,
"n": 1,
"top_p": params.top_p,
"presence_penalty": params.presence_penalty,
"frequency_penalty": params.frequency_penalty,
"temperature": params.temperature,
"messages": messages
}))
.json(&params)
.send()
.await?
.json()