mirror of
https://github.com/zed-industries/zed.git
synced 2024-12-26 07:12:03 +03:00
Checkpoint
This commit is contained in:
parent
89f4718ea1
commit
909fbb9538
38
Cargo.lock
generated
38
Cargo.lock
generated
@ -1449,6 +1449,43 @@ dependencies = [
|
||||
"uuid 1.4.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "client2"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-recursion 0.3.2",
|
||||
"async-tungstenite",
|
||||
"collections",
|
||||
"db",
|
||||
"feature_flags",
|
||||
"futures 0.3.28",
|
||||
"gpui",
|
||||
"gpui2",
|
||||
"image",
|
||||
"lazy_static",
|
||||
"log",
|
||||
"parking_lot 0.11.2",
|
||||
"postage",
|
||||
"rand 0.8.5",
|
||||
"rpc",
|
||||
"schemars",
|
||||
"serde",
|
||||
"serde_derive",
|
||||
"settings",
|
||||
"smol",
|
||||
"sum_tree",
|
||||
"sysinfo",
|
||||
"tempfile",
|
||||
"text",
|
||||
"thiserror",
|
||||
"time",
|
||||
"tiny_http",
|
||||
"url",
|
||||
"util",
|
||||
"uuid 1.4.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "clock"
|
||||
version = "0.1.0"
|
||||
@ -10379,6 +10416,7 @@ dependencies = [
|
||||
"backtrace",
|
||||
"chrono",
|
||||
"cli",
|
||||
"client2",
|
||||
"collections",
|
||||
"ctor",
|
||||
"env_logger 0.9.3",
|
||||
|
@ -10,6 +10,7 @@ members = [
|
||||
"crates/channel",
|
||||
"crates/cli",
|
||||
"crates/client",
|
||||
"crates/client2",
|
||||
"crates/clock",
|
||||
"crates/collab",
|
||||
"crates/collab_ui",
|
||||
|
52
crates/client2/Cargo.toml
Normal file
52
crates/client2/Cargo.toml
Normal file
@ -0,0 +1,52 @@
|
||||
[package]
|
||||
name = "client2"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
publish = false
|
||||
|
||||
[lib]
|
||||
path = "src/client2.rs"
|
||||
doctest = false
|
||||
|
||||
[features]
|
||||
test-support = ["collections/test-support", "gpui/test-support", "rpc/test-support"]
|
||||
|
||||
[dependencies]
|
||||
collections = { path = "../collections" }
|
||||
db = { path = "../db" }
|
||||
gpui2 = { path = "../gpui2" }
|
||||
util = { path = "../util" }
|
||||
rpc = { path = "../rpc" }
|
||||
text = { path = "../text" }
|
||||
settings = { path = "../settings" }
|
||||
feature_flags = { path = "../feature_flags" }
|
||||
sum_tree = { path = "../sum_tree" }
|
||||
|
||||
anyhow.workspace = true
|
||||
async-recursion = "0.3"
|
||||
async-tungstenite = { version = "0.16", features = ["async-tls"] }
|
||||
futures.workspace = true
|
||||
image = "0.23"
|
||||
lazy_static.workspace = true
|
||||
log.workspace = true
|
||||
parking_lot.workspace = true
|
||||
postage.workspace = true
|
||||
rand.workspace = true
|
||||
schemars.workspace = true
|
||||
serde.workspace = true
|
||||
serde_derive.workspace = true
|
||||
smol.workspace = true
|
||||
sysinfo.workspace = true
|
||||
tempfile = "3"
|
||||
thiserror.workspace = true
|
||||
time.workspace = true
|
||||
tiny_http = "0.8"
|
||||
uuid.workspace = true
|
||||
url = "2.2"
|
||||
|
||||
[dev-dependencies]
|
||||
collections = { path = "../collections", features = ["test-support"] }
|
||||
gpui2 = { path = "../gpui2", features = ["test-support"] }
|
||||
rpc = { path = "../rpc", features = ["test-support"] }
|
||||
settings = { path = "../settings", features = ["test-support"] }
|
||||
util = { path = "../util", features = ["test-support"] }
|
1723
crates/client2/src/client2.rs
Normal file
1723
crates/client2/src/client2.rs
Normal file
File diff suppressed because it is too large
Load Diff
317
crates/client2/src/telemetry.rs
Normal file
317
crates/client2/src/telemetry.rs
Normal file
@ -0,0 +1,317 @@
|
||||
use crate::{TelemetrySettings, ZED_SECRET_CLIENT_TOKEN, ZED_SERVER_URL};
|
||||
use gpui::{executor::Background, serde_json, AppContext, Task};
|
||||
use lazy_static::lazy_static;
|
||||
use parking_lot::Mutex;
|
||||
use serde::Serialize;
|
||||
use std::{env, io::Write, mem, path::PathBuf, sync::Arc, time::Duration};
|
||||
use sysinfo::{Pid, PidExt, ProcessExt, System, SystemExt};
|
||||
use tempfile::NamedTempFile;
|
||||
use util::http::HttpClient;
|
||||
use util::{channel::ReleaseChannel, TryFutureExt};
|
||||
|
||||
pub struct Telemetry {
|
||||
http_client: Arc<dyn HttpClient>,
|
||||
executor: Arc<Background>,
|
||||
state: Mutex<TelemetryState>,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct TelemetryState {
|
||||
metrics_id: Option<Arc<str>>, // Per logged-in user
|
||||
installation_id: Option<Arc<str>>, // Per app installation (different for dev, preview, and stable)
|
||||
session_id: Option<Arc<str>>, // Per app launch
|
||||
app_version: Option<Arc<str>>,
|
||||
release_channel: Option<&'static str>,
|
||||
os_name: &'static str,
|
||||
os_version: Option<Arc<str>>,
|
||||
architecture: &'static str,
|
||||
clickhouse_events_queue: Vec<ClickhouseEventWrapper>,
|
||||
flush_clickhouse_events_task: Option<Task<()>>,
|
||||
log_file: Option<NamedTempFile>,
|
||||
is_staff: Option<bool>,
|
||||
}
|
||||
|
||||
const CLICKHOUSE_EVENTS_URL_PATH: &'static str = "/api/events";
|
||||
|
||||
lazy_static! {
|
||||
static ref CLICKHOUSE_EVENTS_URL: String =
|
||||
format!("{}{}", *ZED_SERVER_URL, CLICKHOUSE_EVENTS_URL_PATH);
|
||||
}
|
||||
|
||||
#[derive(Serialize, Debug)]
|
||||
struct ClickhouseEventRequestBody {
|
||||
token: &'static str,
|
||||
installation_id: Option<Arc<str>>,
|
||||
session_id: Option<Arc<str>>,
|
||||
is_staff: Option<bool>,
|
||||
app_version: Option<Arc<str>>,
|
||||
os_name: &'static str,
|
||||
os_version: Option<Arc<str>>,
|
||||
architecture: &'static str,
|
||||
release_channel: Option<&'static str>,
|
||||
events: Vec<ClickhouseEventWrapper>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Debug)]
|
||||
struct ClickhouseEventWrapper {
|
||||
signed_in: bool,
|
||||
#[serde(flatten)]
|
||||
event: ClickhouseEvent,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Debug)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum AssistantKind {
|
||||
Panel,
|
||||
Inline,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Debug)]
|
||||
#[serde(tag = "type")]
|
||||
pub enum ClickhouseEvent {
|
||||
Editor {
|
||||
operation: &'static str,
|
||||
file_extension: Option<String>,
|
||||
vim_mode: bool,
|
||||
copilot_enabled: bool,
|
||||
copilot_enabled_for_language: bool,
|
||||
},
|
||||
Copilot {
|
||||
suggestion_id: Option<String>,
|
||||
suggestion_accepted: bool,
|
||||
file_extension: Option<String>,
|
||||
},
|
||||
Call {
|
||||
operation: &'static str,
|
||||
room_id: Option<u64>,
|
||||
channel_id: Option<u64>,
|
||||
},
|
||||
Assistant {
|
||||
conversation_id: Option<String>,
|
||||
kind: AssistantKind,
|
||||
model: &'static str,
|
||||
},
|
||||
Cpu {
|
||||
usage_as_percentage: f32,
|
||||
core_count: u32,
|
||||
},
|
||||
Memory {
|
||||
memory_in_bytes: u64,
|
||||
virtual_memory_in_bytes: u64,
|
||||
},
|
||||
}
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
const MAX_QUEUE_LEN: usize = 1;
|
||||
|
||||
#[cfg(not(debug_assertions))]
|
||||
const MAX_QUEUE_LEN: usize = 10;
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
const DEBOUNCE_INTERVAL: Duration = Duration::from_secs(1);
|
||||
|
||||
#[cfg(not(debug_assertions))]
|
||||
const DEBOUNCE_INTERVAL: Duration = Duration::from_secs(30);
|
||||
|
||||
impl Telemetry {
|
||||
pub fn new(client: Arc<dyn HttpClient>, cx: &AppContext) -> Arc<Self> {
|
||||
let platform = cx.platform();
|
||||
let release_channel = if cx.has_global::<ReleaseChannel>() {
|
||||
Some(cx.global::<ReleaseChannel>().display_name())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
// TODO: Replace all hardware stuff with nested SystemSpecs json
|
||||
let this = Arc::new(Self {
|
||||
http_client: client,
|
||||
executor: cx.background().clone(),
|
||||
state: Mutex::new(TelemetryState {
|
||||
os_name: platform.os_name().into(),
|
||||
os_version: platform.os_version().ok().map(|v| v.to_string().into()),
|
||||
architecture: env::consts::ARCH,
|
||||
app_version: platform.app_version().ok().map(|v| v.to_string().into()),
|
||||
release_channel,
|
||||
installation_id: None,
|
||||
metrics_id: None,
|
||||
session_id: None,
|
||||
clickhouse_events_queue: Default::default(),
|
||||
flush_clickhouse_events_task: Default::default(),
|
||||
log_file: None,
|
||||
is_staff: None,
|
||||
}),
|
||||
});
|
||||
|
||||
this
|
||||
}
|
||||
|
||||
pub fn log_file_path(&self) -> Option<PathBuf> {
|
||||
Some(self.state.lock().log_file.as_ref()?.path().to_path_buf())
|
||||
}
|
||||
|
||||
pub fn start(
|
||||
self: &Arc<Self>,
|
||||
installation_id: Option<String>,
|
||||
session_id: String,
|
||||
cx: &mut AppContext,
|
||||
) {
|
||||
let mut state = self.state.lock();
|
||||
state.installation_id = installation_id.map(|id| id.into());
|
||||
state.session_id = Some(session_id.into());
|
||||
let has_clickhouse_events = !state.clickhouse_events_queue.is_empty();
|
||||
drop(state);
|
||||
|
||||
if has_clickhouse_events {
|
||||
self.flush_clickhouse_events();
|
||||
}
|
||||
|
||||
let this = self.clone();
|
||||
cx.spawn(|mut cx| async move {
|
||||
let mut system = System::new_all();
|
||||
system.refresh_all();
|
||||
|
||||
loop {
|
||||
// Waiting some amount of time before the first query is important to get a reasonable value
|
||||
// https://docs.rs/sysinfo/0.29.10/sysinfo/trait.ProcessExt.html#tymethod.cpu_usage
|
||||
const DURATION_BETWEEN_SYSTEM_EVENTS: Duration = Duration::from_secs(60);
|
||||
smol::Timer::after(DURATION_BETWEEN_SYSTEM_EVENTS).await;
|
||||
|
||||
system.refresh_memory();
|
||||
system.refresh_processes();
|
||||
|
||||
let current_process = Pid::from_u32(std::process::id());
|
||||
let Some(process) = system.processes().get(¤t_process) else {
|
||||
let process = current_process;
|
||||
log::error!("Failed to find own process {process:?} in system process table");
|
||||
// TODO: Fire an error telemetry event
|
||||
return;
|
||||
};
|
||||
|
||||
let memory_event = ClickhouseEvent::Memory {
|
||||
memory_in_bytes: process.memory(),
|
||||
virtual_memory_in_bytes: process.virtual_memory(),
|
||||
};
|
||||
|
||||
let cpu_event = ClickhouseEvent::Cpu {
|
||||
usage_as_percentage: process.cpu_usage(),
|
||||
core_count: system.cpus().len() as u32,
|
||||
};
|
||||
|
||||
let telemetry_settings = cx.update(|cx| *settings::get::<TelemetrySettings>(cx));
|
||||
|
||||
this.report_clickhouse_event(memory_event, telemetry_settings);
|
||||
this.report_clickhouse_event(cpu_event, telemetry_settings);
|
||||
}
|
||||
})
|
||||
.detach();
|
||||
}
|
||||
|
||||
pub fn set_authenticated_user_info(
|
||||
self: &Arc<Self>,
|
||||
metrics_id: Option<String>,
|
||||
is_staff: bool,
|
||||
cx: &AppContext,
|
||||
) {
|
||||
if !settings::get::<TelemetrySettings>(cx).metrics {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut state = self.state.lock();
|
||||
let metrics_id: Option<Arc<str>> = metrics_id.map(|id| id.into());
|
||||
state.metrics_id = metrics_id.clone();
|
||||
state.is_staff = Some(is_staff);
|
||||
drop(state);
|
||||
}
|
||||
|
||||
pub fn report_clickhouse_event(
|
||||
self: &Arc<Self>,
|
||||
event: ClickhouseEvent,
|
||||
telemetry_settings: TelemetrySettings,
|
||||
) {
|
||||
if !telemetry_settings.metrics {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut state = self.state.lock();
|
||||
let signed_in = state.metrics_id.is_some();
|
||||
state
|
||||
.clickhouse_events_queue
|
||||
.push(ClickhouseEventWrapper { signed_in, event });
|
||||
|
||||
if state.installation_id.is_some() {
|
||||
if state.clickhouse_events_queue.len() >= MAX_QUEUE_LEN {
|
||||
drop(state);
|
||||
self.flush_clickhouse_events();
|
||||
} else {
|
||||
let this = self.clone();
|
||||
let executor = self.executor.clone();
|
||||
state.flush_clickhouse_events_task = Some(self.executor.spawn(async move {
|
||||
executor.timer(DEBOUNCE_INTERVAL).await;
|
||||
this.flush_clickhouse_events();
|
||||
}));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn metrics_id(self: &Arc<Self>) -> Option<Arc<str>> {
|
||||
self.state.lock().metrics_id.clone()
|
||||
}
|
||||
|
||||
pub fn installation_id(self: &Arc<Self>) -> Option<Arc<str>> {
|
||||
self.state.lock().installation_id.clone()
|
||||
}
|
||||
|
||||
pub fn is_staff(self: &Arc<Self>) -> Option<bool> {
|
||||
self.state.lock().is_staff
|
||||
}
|
||||
|
||||
fn flush_clickhouse_events(self: &Arc<Self>) {
|
||||
let mut state = self.state.lock();
|
||||
let mut events = mem::take(&mut state.clickhouse_events_queue);
|
||||
state.flush_clickhouse_events_task.take();
|
||||
drop(state);
|
||||
|
||||
let this = self.clone();
|
||||
self.executor
|
||||
.spawn(
|
||||
async move {
|
||||
let mut json_bytes = Vec::new();
|
||||
|
||||
if let Some(file) = &mut this.state.lock().log_file {
|
||||
let file = file.as_file_mut();
|
||||
for event in &mut events {
|
||||
json_bytes.clear();
|
||||
serde_json::to_writer(&mut json_bytes, event)?;
|
||||
file.write_all(&json_bytes)?;
|
||||
file.write(b"\n")?;
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
let state = this.state.lock();
|
||||
let request_body = ClickhouseEventRequestBody {
|
||||
token: ZED_SECRET_CLIENT_TOKEN,
|
||||
installation_id: state.installation_id.clone(),
|
||||
session_id: state.session_id.clone(),
|
||||
is_staff: state.is_staff.clone(),
|
||||
app_version: state.app_version.clone(),
|
||||
os_name: state.os_name,
|
||||
os_version: state.os_version.clone(),
|
||||
architecture: state.architecture,
|
||||
|
||||
release_channel: state.release_channel,
|
||||
events,
|
||||
};
|
||||
json_bytes.clear();
|
||||
serde_json::to_writer(&mut json_bytes, &request_body)?;
|
||||
}
|
||||
|
||||
this.http_client
|
||||
.post_json(CLICKHOUSE_EVENTS_URL.as_str(), json_bytes.into())
|
||||
.await?;
|
||||
anyhow::Ok(())
|
||||
}
|
||||
.log_err(),
|
||||
)
|
||||
.detach();
|
||||
}
|
||||
}
|
215
crates/client2/src/test.rs
Normal file
215
crates/client2/src/test.rs
Normal file
@ -0,0 +1,215 @@
|
||||
use crate::{Client, Connection, Credentials, EstablishConnectionError, UserStore};
|
||||
use anyhow::{anyhow, Result};
|
||||
use futures::{stream::BoxStream, StreamExt};
|
||||
use gpui::{executor, ModelHandle, TestAppContext};
|
||||
use parking_lot::Mutex;
|
||||
use rpc::{
|
||||
proto::{self, GetPrivateUserInfo, GetPrivateUserInfoResponse},
|
||||
ConnectionId, Peer, Receipt, TypedEnvelope,
|
||||
};
|
||||
use std::{rc::Rc, sync::Arc};
|
||||
use util::http::FakeHttpClient;
|
||||
|
||||
pub struct FakeServer {
|
||||
peer: Arc<Peer>,
|
||||
state: Arc<Mutex<FakeServerState>>,
|
||||
user_id: u64,
|
||||
executor: Rc<executor::Foreground>,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct FakeServerState {
|
||||
incoming: Option<BoxStream<'static, Box<dyn proto::AnyTypedEnvelope>>>,
|
||||
connection_id: Option<ConnectionId>,
|
||||
forbid_connections: bool,
|
||||
auth_count: usize,
|
||||
access_token: usize,
|
||||
}
|
||||
|
||||
impl FakeServer {
|
||||
pub async fn for_client(
|
||||
client_user_id: u64,
|
||||
client: &Arc<Client>,
|
||||
cx: &TestAppContext,
|
||||
) -> Self {
|
||||
let server = Self {
|
||||
peer: Peer::new(0),
|
||||
state: Default::default(),
|
||||
user_id: client_user_id,
|
||||
executor: cx.foreground(),
|
||||
};
|
||||
|
||||
client
|
||||
.override_authenticate({
|
||||
let state = Arc::downgrade(&server.state);
|
||||
move |cx| {
|
||||
let state = state.clone();
|
||||
cx.spawn(move |_| async move {
|
||||
let state = state.upgrade().ok_or_else(|| anyhow!("server dropped"))?;
|
||||
let mut state = state.lock();
|
||||
state.auth_count += 1;
|
||||
let access_token = state.access_token.to_string();
|
||||
Ok(Credentials {
|
||||
user_id: client_user_id,
|
||||
access_token,
|
||||
})
|
||||
})
|
||||
}
|
||||
})
|
||||
.override_establish_connection({
|
||||
let peer = Arc::downgrade(&server.peer);
|
||||
let state = Arc::downgrade(&server.state);
|
||||
move |credentials, cx| {
|
||||
let peer = peer.clone();
|
||||
let state = state.clone();
|
||||
let credentials = credentials.clone();
|
||||
cx.spawn(move |cx| async move {
|
||||
let state = state.upgrade().ok_or_else(|| anyhow!("server dropped"))?;
|
||||
let peer = peer.upgrade().ok_or_else(|| anyhow!("server dropped"))?;
|
||||
if state.lock().forbid_connections {
|
||||
Err(EstablishConnectionError::Other(anyhow!(
|
||||
"server is forbidding connections"
|
||||
)))?
|
||||
}
|
||||
|
||||
assert_eq!(credentials.user_id, client_user_id);
|
||||
|
||||
if credentials.access_token != state.lock().access_token.to_string() {
|
||||
Err(EstablishConnectionError::Unauthorized)?
|
||||
}
|
||||
|
||||
let (client_conn, server_conn, _) = Connection::in_memory(cx.background());
|
||||
let (connection_id, io, incoming) =
|
||||
peer.add_test_connection(server_conn, cx.background());
|
||||
cx.background().spawn(io).detach();
|
||||
{
|
||||
let mut state = state.lock();
|
||||
state.connection_id = Some(connection_id);
|
||||
state.incoming = Some(incoming);
|
||||
}
|
||||
peer.send(
|
||||
connection_id,
|
||||
proto::Hello {
|
||||
peer_id: Some(connection_id.into()),
|
||||
},
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
Ok(client_conn)
|
||||
})
|
||||
}
|
||||
});
|
||||
|
||||
client
|
||||
.authenticate_and_connect(false, &cx.to_async())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
server
|
||||
}
|
||||
|
||||
pub fn disconnect(&self) {
|
||||
if self.state.lock().connection_id.is_some() {
|
||||
self.peer.disconnect(self.connection_id());
|
||||
let mut state = self.state.lock();
|
||||
state.connection_id.take();
|
||||
state.incoming.take();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn auth_count(&self) -> usize {
|
||||
self.state.lock().auth_count
|
||||
}
|
||||
|
||||
pub fn roll_access_token(&self) {
|
||||
self.state.lock().access_token += 1;
|
||||
}
|
||||
|
||||
pub fn forbid_connections(&self) {
|
||||
self.state.lock().forbid_connections = true;
|
||||
}
|
||||
|
||||
pub fn allow_connections(&self) {
|
||||
self.state.lock().forbid_connections = false;
|
||||
}
|
||||
|
||||
pub fn send<T: proto::EnvelopedMessage>(&self, message: T) {
|
||||
self.peer.send(self.connection_id(), message).unwrap();
|
||||
}
|
||||
|
||||
#[allow(clippy::await_holding_lock)]
|
||||
pub async fn receive<M: proto::EnvelopedMessage>(&self) -> Result<TypedEnvelope<M>> {
|
||||
self.executor.start_waiting();
|
||||
|
||||
loop {
|
||||
let message = self
|
||||
.state
|
||||
.lock()
|
||||
.incoming
|
||||
.as_mut()
|
||||
.expect("not connected")
|
||||
.next()
|
||||
.await
|
||||
.ok_or_else(|| anyhow!("other half hung up"))?;
|
||||
self.executor.finish_waiting();
|
||||
let type_name = message.payload_type_name();
|
||||
let message = message.into_any();
|
||||
|
||||
if message.is::<TypedEnvelope<M>>() {
|
||||
return Ok(*message.downcast().unwrap());
|
||||
}
|
||||
|
||||
if message.is::<TypedEnvelope<GetPrivateUserInfo>>() {
|
||||
self.respond(
|
||||
message
|
||||
.downcast::<TypedEnvelope<GetPrivateUserInfo>>()
|
||||
.unwrap()
|
||||
.receipt(),
|
||||
GetPrivateUserInfoResponse {
|
||||
metrics_id: "the-metrics-id".into(),
|
||||
staff: false,
|
||||
flags: Default::default(),
|
||||
},
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
panic!(
|
||||
"fake server received unexpected message type: {:?}",
|
||||
type_name
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn respond<T: proto::RequestMessage>(&self, receipt: Receipt<T>, response: T::Response) {
|
||||
self.peer.respond(receipt, response).unwrap()
|
||||
}
|
||||
|
||||
fn connection_id(&self) -> ConnectionId {
|
||||
self.state.lock().connection_id.expect("not connected")
|
||||
}
|
||||
|
||||
pub async fn build_user_store(
|
||||
&self,
|
||||
client: Arc<Client>,
|
||||
cx: &mut TestAppContext,
|
||||
) -> ModelHandle<UserStore> {
|
||||
let http_client = FakeHttpClient::with_404_response();
|
||||
let user_store = cx.add_model(|cx| UserStore::new(client, http_client, cx));
|
||||
assert_eq!(
|
||||
self.receive::<proto::GetUsers>()
|
||||
.await
|
||||
.unwrap()
|
||||
.payload
|
||||
.user_ids,
|
||||
&[self.user_id]
|
||||
);
|
||||
user_store
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for FakeServer {
|
||||
fn drop(&mut self) {
|
||||
self.disconnect();
|
||||
}
|
||||
}
|
737
crates/client2/src/user.rs
Normal file
737
crates/client2/src/user.rs
Normal file
@ -0,0 +1,737 @@
|
||||
use super::{proto, Client, Status, TypedEnvelope};
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use collections::{hash_map::Entry, HashMap, HashSet};
|
||||
use feature_flags::FeatureFlagAppExt;
|
||||
use futures::{channel::mpsc, future, AsyncReadExt, Future, StreamExt};
|
||||
use gpui::{AsyncAppContext, Entity, ImageData, ModelContext, ModelHandle, Task};
|
||||
use postage::{sink::Sink, watch};
|
||||
use rpc::proto::{RequestMessage, UsersResponse};
|
||||
use std::sync::{Arc, Weak};
|
||||
use text::ReplicaId;
|
||||
use util::http::HttpClient;
|
||||
use util::TryFutureExt as _;
|
||||
|
||||
pub type UserId = u64;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct ParticipantIndex(pub u32);
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
pub struct User {
|
||||
pub id: UserId,
|
||||
pub github_login: String,
|
||||
pub avatar: Option<Arc<ImageData>>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub struct Collaborator {
|
||||
pub peer_id: proto::PeerId,
|
||||
pub replica_id: ReplicaId,
|
||||
pub user_id: UserId,
|
||||
}
|
||||
|
||||
impl PartialOrd for User {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl Ord for User {
|
||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||
self.github_login.cmp(&other.github_login)
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for User {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.id == other.id && self.github_login == other.github_login
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for User {}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub struct Contact {
|
||||
pub user: Arc<User>,
|
||||
pub online: bool,
|
||||
pub busy: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum ContactRequestStatus {
|
||||
None,
|
||||
RequestSent,
|
||||
RequestReceived,
|
||||
RequestAccepted,
|
||||
}
|
||||
|
||||
pub struct UserStore {
|
||||
users: HashMap<u64, Arc<User>>,
|
||||
participant_indices: HashMap<u64, ParticipantIndex>,
|
||||
update_contacts_tx: mpsc::UnboundedSender<UpdateContacts>,
|
||||
current_user: watch::Receiver<Option<Arc<User>>>,
|
||||
contacts: Vec<Arc<Contact>>,
|
||||
incoming_contact_requests: Vec<Arc<User>>,
|
||||
outgoing_contact_requests: Vec<Arc<User>>,
|
||||
pending_contact_requests: HashMap<u64, usize>,
|
||||
invite_info: Option<InviteInfo>,
|
||||
client: Weak<Client>,
|
||||
http: Arc<dyn HttpClient>,
|
||||
_maintain_contacts: Task<()>,
|
||||
_maintain_current_user: Task<()>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct InviteInfo {
|
||||
pub count: u32,
|
||||
pub url: Arc<str>,
|
||||
}
|
||||
|
||||
pub enum Event {
|
||||
Contact {
|
||||
user: Arc<User>,
|
||||
kind: ContactEventKind,
|
||||
},
|
||||
ShowContacts,
|
||||
ParticipantIndicesChanged,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
pub enum ContactEventKind {
|
||||
Requested,
|
||||
Accepted,
|
||||
Cancelled,
|
||||
}
|
||||
|
||||
impl Entity for UserStore {
|
||||
type Event = Event;
|
||||
}
|
||||
|
||||
enum UpdateContacts {
|
||||
Update(proto::UpdateContacts),
|
||||
Wait(postage::barrier::Sender),
|
||||
Clear(postage::barrier::Sender),
|
||||
}
|
||||
|
||||
impl UserStore {
|
||||
pub fn new(
|
||||
client: Arc<Client>,
|
||||
http: Arc<dyn HttpClient>,
|
||||
cx: &mut ModelContext<Self>,
|
||||
) -> Self {
|
||||
let (mut current_user_tx, current_user_rx) = watch::channel();
|
||||
let (update_contacts_tx, mut update_contacts_rx) = mpsc::unbounded();
|
||||
let rpc_subscriptions = vec![
|
||||
client.add_message_handler(cx.handle(), Self::handle_update_contacts),
|
||||
client.add_message_handler(cx.handle(), Self::handle_update_invite_info),
|
||||
client.add_message_handler(cx.handle(), Self::handle_show_contacts),
|
||||
];
|
||||
Self {
|
||||
users: Default::default(),
|
||||
current_user: current_user_rx,
|
||||
contacts: Default::default(),
|
||||
incoming_contact_requests: Default::default(),
|
||||
participant_indices: Default::default(),
|
||||
outgoing_contact_requests: Default::default(),
|
||||
invite_info: None,
|
||||
client: Arc::downgrade(&client),
|
||||
update_contacts_tx,
|
||||
http,
|
||||
_maintain_contacts: cx.spawn_weak(|this, mut cx| async move {
|
||||
let _subscriptions = rpc_subscriptions;
|
||||
while let Some(message) = update_contacts_rx.next().await {
|
||||
if let Some(this) = this.upgrade(&cx) {
|
||||
this.update(&mut cx, |this, cx| this.update_contacts(message, cx))
|
||||
.log_err()
|
||||
.await;
|
||||
}
|
||||
}
|
||||
}),
|
||||
_maintain_current_user: cx.spawn_weak(|this, mut cx| async move {
|
||||
let mut status = client.status();
|
||||
while let Some(status) = status.next().await {
|
||||
match status {
|
||||
Status::Connected { .. } => {
|
||||
if let Some((this, user_id)) = this.upgrade(&cx).zip(client.user_id()) {
|
||||
let fetch_user = this
|
||||
.update(&mut cx, |this, cx| this.get_user(user_id, cx))
|
||||
.log_err();
|
||||
let fetch_metrics_id =
|
||||
client.request(proto::GetPrivateUserInfo {}).log_err();
|
||||
let (user, info) = futures::join!(fetch_user, fetch_metrics_id);
|
||||
|
||||
if let Some(info) = info {
|
||||
cx.update(|cx| {
|
||||
cx.update_flags(info.staff, info.flags);
|
||||
client.telemetry.set_authenticated_user_info(
|
||||
Some(info.metrics_id.clone()),
|
||||
info.staff,
|
||||
cx,
|
||||
)
|
||||
});
|
||||
} else {
|
||||
cx.read(|cx| {
|
||||
client
|
||||
.telemetry
|
||||
.set_authenticated_user_info(None, false, cx)
|
||||
});
|
||||
}
|
||||
|
||||
current_user_tx.send(user).await.ok();
|
||||
|
||||
this.update(&mut cx, |_, cx| {
|
||||
cx.notify();
|
||||
});
|
||||
}
|
||||
}
|
||||
Status::SignedOut => {
|
||||
current_user_tx.send(None).await.ok();
|
||||
if let Some(this) = this.upgrade(&cx) {
|
||||
this.update(&mut cx, |this, cx| {
|
||||
cx.notify();
|
||||
this.clear_contacts()
|
||||
})
|
||||
.await;
|
||||
}
|
||||
}
|
||||
Status::ConnectionLost => {
|
||||
if let Some(this) = this.upgrade(&cx) {
|
||||
this.update(&mut cx, |this, cx| {
|
||||
cx.notify();
|
||||
this.clear_contacts()
|
||||
})
|
||||
.await;
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}),
|
||||
pending_contact_requests: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "test-support")]
|
||||
pub fn clear_cache(&mut self) {
|
||||
self.users.clear();
|
||||
}
|
||||
|
||||
async fn handle_update_invite_info(
|
||||
this: ModelHandle<Self>,
|
||||
message: TypedEnvelope<proto::UpdateInviteInfo>,
|
||||
_: Arc<Client>,
|
||||
mut cx: AsyncAppContext,
|
||||
) -> Result<()> {
|
||||
this.update(&mut cx, |this, cx| {
|
||||
this.invite_info = Some(InviteInfo {
|
||||
url: Arc::from(message.payload.url),
|
||||
count: message.payload.count,
|
||||
});
|
||||
cx.notify();
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn handle_show_contacts(
|
||||
this: ModelHandle<Self>,
|
||||
_: TypedEnvelope<proto::ShowContacts>,
|
||||
_: Arc<Client>,
|
||||
mut cx: AsyncAppContext,
|
||||
) -> Result<()> {
|
||||
this.update(&mut cx, |_, cx| cx.emit(Event::ShowContacts));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn invite_info(&self) -> Option<&InviteInfo> {
|
||||
self.invite_info.as_ref()
|
||||
}
|
||||
|
||||
async fn handle_update_contacts(
|
||||
this: ModelHandle<Self>,
|
||||
message: TypedEnvelope<proto::UpdateContacts>,
|
||||
_: Arc<Client>,
|
||||
mut cx: AsyncAppContext,
|
||||
) -> Result<()> {
|
||||
this.update(&mut cx, |this, _| {
|
||||
this.update_contacts_tx
|
||||
.unbounded_send(UpdateContacts::Update(message.payload))
|
||||
.unwrap();
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn update_contacts(
|
||||
&mut self,
|
||||
message: UpdateContacts,
|
||||
cx: &mut ModelContext<Self>,
|
||||
) -> Task<Result<()>> {
|
||||
match message {
|
||||
UpdateContacts::Wait(barrier) => {
|
||||
drop(barrier);
|
||||
Task::ready(Ok(()))
|
||||
}
|
||||
UpdateContacts::Clear(barrier) => {
|
||||
self.contacts.clear();
|
||||
self.incoming_contact_requests.clear();
|
||||
self.outgoing_contact_requests.clear();
|
||||
drop(barrier);
|
||||
Task::ready(Ok(()))
|
||||
}
|
||||
UpdateContacts::Update(message) => {
|
||||
let mut user_ids = HashSet::default();
|
||||
for contact in &message.contacts {
|
||||
user_ids.insert(contact.user_id);
|
||||
}
|
||||
user_ids.extend(message.incoming_requests.iter().map(|req| req.requester_id));
|
||||
user_ids.extend(message.outgoing_requests.iter());
|
||||
|
||||
let load_users = self.get_users(user_ids.into_iter().collect(), cx);
|
||||
cx.spawn(|this, mut cx| async move {
|
||||
load_users.await?;
|
||||
|
||||
// Users are fetched in parallel above and cached in call to get_users
|
||||
// No need to paralellize here
|
||||
let mut updated_contacts = Vec::new();
|
||||
for contact in message.contacts {
|
||||
let should_notify = contact.should_notify;
|
||||
updated_contacts.push((
|
||||
Arc::new(Contact::from_proto(contact, &this, &mut cx).await?),
|
||||
should_notify,
|
||||
));
|
||||
}
|
||||
|
||||
let mut incoming_requests = Vec::new();
|
||||
for request in message.incoming_requests {
|
||||
incoming_requests.push({
|
||||
let user = this
|
||||
.update(&mut cx, |this, cx| this.get_user(request.requester_id, cx))
|
||||
.await?;
|
||||
(user, request.should_notify)
|
||||
});
|
||||
}
|
||||
|
||||
let mut outgoing_requests = Vec::new();
|
||||
for requested_user_id in message.outgoing_requests {
|
||||
outgoing_requests.push(
|
||||
this.update(&mut cx, |this, cx| this.get_user(requested_user_id, cx))
|
||||
.await?,
|
||||
);
|
||||
}
|
||||
|
||||
let removed_contacts =
|
||||
HashSet::<u64>::from_iter(message.remove_contacts.iter().copied());
|
||||
let removed_incoming_requests =
|
||||
HashSet::<u64>::from_iter(message.remove_incoming_requests.iter().copied());
|
||||
let removed_outgoing_requests =
|
||||
HashSet::<u64>::from_iter(message.remove_outgoing_requests.iter().copied());
|
||||
|
||||
this.update(&mut cx, |this, cx| {
|
||||
// Remove contacts
|
||||
this.contacts
|
||||
.retain(|contact| !removed_contacts.contains(&contact.user.id));
|
||||
// Update existing contacts and insert new ones
|
||||
for (updated_contact, should_notify) in updated_contacts {
|
||||
if should_notify {
|
||||
cx.emit(Event::Contact {
|
||||
user: updated_contact.user.clone(),
|
||||
kind: ContactEventKind::Accepted,
|
||||
});
|
||||
}
|
||||
match this.contacts.binary_search_by_key(
|
||||
&&updated_contact.user.github_login,
|
||||
|contact| &contact.user.github_login,
|
||||
) {
|
||||
Ok(ix) => this.contacts[ix] = updated_contact,
|
||||
Err(ix) => this.contacts.insert(ix, updated_contact),
|
||||
}
|
||||
}
|
||||
|
||||
// Remove incoming contact requests
|
||||
this.incoming_contact_requests.retain(|user| {
|
||||
if removed_incoming_requests.contains(&user.id) {
|
||||
cx.emit(Event::Contact {
|
||||
user: user.clone(),
|
||||
kind: ContactEventKind::Cancelled,
|
||||
});
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
});
|
||||
// Update existing incoming requests and insert new ones
|
||||
for (user, should_notify) in incoming_requests {
|
||||
if should_notify {
|
||||
cx.emit(Event::Contact {
|
||||
user: user.clone(),
|
||||
kind: ContactEventKind::Requested,
|
||||
});
|
||||
}
|
||||
|
||||
match this
|
||||
.incoming_contact_requests
|
||||
.binary_search_by_key(&&user.github_login, |contact| {
|
||||
&contact.github_login
|
||||
}) {
|
||||
Ok(ix) => this.incoming_contact_requests[ix] = user,
|
||||
Err(ix) => this.incoming_contact_requests.insert(ix, user),
|
||||
}
|
||||
}
|
||||
|
||||
// Remove outgoing contact requests
|
||||
this.outgoing_contact_requests
|
||||
.retain(|user| !removed_outgoing_requests.contains(&user.id));
|
||||
// Update existing incoming requests and insert new ones
|
||||
for request in outgoing_requests {
|
||||
match this
|
||||
.outgoing_contact_requests
|
||||
.binary_search_by_key(&&request.github_login, |contact| {
|
||||
&contact.github_login
|
||||
}) {
|
||||
Ok(ix) => this.outgoing_contact_requests[ix] = request,
|
||||
Err(ix) => this.outgoing_contact_requests.insert(ix, request),
|
||||
}
|
||||
}
|
||||
|
||||
cx.notify();
|
||||
});
|
||||
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn contacts(&self) -> &[Arc<Contact>] {
|
||||
&self.contacts
|
||||
}
|
||||
|
||||
pub fn has_contact(&self, user: &Arc<User>) -> bool {
|
||||
self.contacts
|
||||
.binary_search_by_key(&&user.github_login, |contact| &contact.user.github_login)
|
||||
.is_ok()
|
||||
}
|
||||
|
||||
pub fn incoming_contact_requests(&self) -> &[Arc<User>] {
|
||||
&self.incoming_contact_requests
|
||||
}
|
||||
|
||||
pub fn outgoing_contact_requests(&self) -> &[Arc<User>] {
|
||||
&self.outgoing_contact_requests
|
||||
}
|
||||
|
||||
pub fn is_contact_request_pending(&self, user: &User) -> bool {
|
||||
self.pending_contact_requests.contains_key(&user.id)
|
||||
}
|
||||
|
||||
pub fn contact_request_status(&self, user: &User) -> ContactRequestStatus {
|
||||
if self
|
||||
.contacts
|
||||
.binary_search_by_key(&&user.github_login, |contact| &contact.user.github_login)
|
||||
.is_ok()
|
||||
{
|
||||
ContactRequestStatus::RequestAccepted
|
||||
} else if self
|
||||
.outgoing_contact_requests
|
||||
.binary_search_by_key(&&user.github_login, |user| &user.github_login)
|
||||
.is_ok()
|
||||
{
|
||||
ContactRequestStatus::RequestSent
|
||||
} else if self
|
||||
.incoming_contact_requests
|
||||
.binary_search_by_key(&&user.github_login, |user| &user.github_login)
|
||||
.is_ok()
|
||||
{
|
||||
ContactRequestStatus::RequestReceived
|
||||
} else {
|
||||
ContactRequestStatus::None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn request_contact(
|
||||
&mut self,
|
||||
responder_id: u64,
|
||||
cx: &mut ModelContext<Self>,
|
||||
) -> Task<Result<()>> {
|
||||
self.perform_contact_request(responder_id, proto::RequestContact { responder_id }, cx)
|
||||
}
|
||||
|
||||
pub fn remove_contact(
|
||||
&mut self,
|
||||
user_id: u64,
|
||||
cx: &mut ModelContext<Self>,
|
||||
) -> Task<Result<()>> {
|
||||
self.perform_contact_request(user_id, proto::RemoveContact { user_id }, cx)
|
||||
}
|
||||
|
||||
pub fn respond_to_contact_request(
|
||||
&mut self,
|
||||
requester_id: u64,
|
||||
accept: bool,
|
||||
cx: &mut ModelContext<Self>,
|
||||
) -> Task<Result<()>> {
|
||||
self.perform_contact_request(
|
||||
requester_id,
|
||||
proto::RespondToContactRequest {
|
||||
requester_id,
|
||||
response: if accept {
|
||||
proto::ContactRequestResponse::Accept
|
||||
} else {
|
||||
proto::ContactRequestResponse::Decline
|
||||
} as i32,
|
||||
},
|
||||
cx,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn dismiss_contact_request(
|
||||
&mut self,
|
||||
requester_id: u64,
|
||||
cx: &mut ModelContext<Self>,
|
||||
) -> Task<Result<()>> {
|
||||
let client = self.client.upgrade();
|
||||
cx.spawn_weak(|_, _| async move {
|
||||
client
|
||||
.ok_or_else(|| anyhow!("can't upgrade client reference"))?
|
||||
.request(proto::RespondToContactRequest {
|
||||
requester_id,
|
||||
response: proto::ContactRequestResponse::Dismiss as i32,
|
||||
})
|
||||
.await?;
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
fn perform_contact_request<T: RequestMessage>(
|
||||
&mut self,
|
||||
user_id: u64,
|
||||
request: T,
|
||||
cx: &mut ModelContext<Self>,
|
||||
) -> Task<Result<()>> {
|
||||
let client = self.client.upgrade();
|
||||
*self.pending_contact_requests.entry(user_id).or_insert(0) += 1;
|
||||
cx.notify();
|
||||
|
||||
cx.spawn(|this, mut cx| async move {
|
||||
let response = client
|
||||
.ok_or_else(|| anyhow!("can't upgrade client reference"))?
|
||||
.request(request)
|
||||
.await;
|
||||
this.update(&mut cx, |this, cx| {
|
||||
if let Entry::Occupied(mut request_count) =
|
||||
this.pending_contact_requests.entry(user_id)
|
||||
{
|
||||
*request_count.get_mut() -= 1;
|
||||
if *request_count.get() == 0 {
|
||||
request_count.remove();
|
||||
}
|
||||
}
|
||||
cx.notify();
|
||||
});
|
||||
response?;
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
pub fn clear_contacts(&mut self) -> impl Future<Output = ()> {
|
||||
let (tx, mut rx) = postage::barrier::channel();
|
||||
self.update_contacts_tx
|
||||
.unbounded_send(UpdateContacts::Clear(tx))
|
||||
.unwrap();
|
||||
async move {
|
||||
rx.next().await;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn contact_updates_done(&mut self) -> impl Future<Output = ()> {
|
||||
let (tx, mut rx) = postage::barrier::channel();
|
||||
self.update_contacts_tx
|
||||
.unbounded_send(UpdateContacts::Wait(tx))
|
||||
.unwrap();
|
||||
async move {
|
||||
rx.next().await;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_users(
|
||||
&mut self,
|
||||
user_ids: Vec<u64>,
|
||||
cx: &mut ModelContext<Self>,
|
||||
) -> Task<Result<Vec<Arc<User>>>> {
|
||||
let mut user_ids_to_fetch = user_ids.clone();
|
||||
user_ids_to_fetch.retain(|id| !self.users.contains_key(id));
|
||||
|
||||
cx.spawn(|this, mut cx| async move {
|
||||
if !user_ids_to_fetch.is_empty() {
|
||||
this.update(&mut cx, |this, cx| {
|
||||
this.load_users(
|
||||
proto::GetUsers {
|
||||
user_ids: user_ids_to_fetch,
|
||||
},
|
||||
cx,
|
||||
)
|
||||
})
|
||||
.await?;
|
||||
}
|
||||
|
||||
this.read_with(&cx, |this, _| {
|
||||
user_ids
|
||||
.iter()
|
||||
.map(|user_id| {
|
||||
this.users
|
||||
.get(user_id)
|
||||
.cloned()
|
||||
.ok_or_else(|| anyhow!("user {} not found", user_id))
|
||||
})
|
||||
.collect()
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
pub fn fuzzy_search_users(
|
||||
&mut self,
|
||||
query: String,
|
||||
cx: &mut ModelContext<Self>,
|
||||
) -> Task<Result<Vec<Arc<User>>>> {
|
||||
self.load_users(proto::FuzzySearchUsers { query }, cx)
|
||||
}
|
||||
|
||||
pub fn get_cached_user(&self, user_id: u64) -> Option<Arc<User>> {
|
||||
self.users.get(&user_id).cloned()
|
||||
}
|
||||
|
||||
pub fn get_user(
|
||||
&mut self,
|
||||
user_id: u64,
|
||||
cx: &mut ModelContext<Self>,
|
||||
) -> Task<Result<Arc<User>>> {
|
||||
if let Some(user) = self.users.get(&user_id).cloned() {
|
||||
return cx.foreground().spawn(async move { Ok(user) });
|
||||
}
|
||||
|
||||
let load_users = self.get_users(vec![user_id], cx);
|
||||
cx.spawn(|this, mut cx| async move {
|
||||
load_users.await?;
|
||||
this.update(&mut cx, |this, _| {
|
||||
this.users
|
||||
.get(&user_id)
|
||||
.cloned()
|
||||
.ok_or_else(|| anyhow!("server responded with no users"))
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
pub fn current_user(&self) -> Option<Arc<User>> {
|
||||
self.current_user.borrow().clone()
|
||||
}
|
||||
|
||||
pub fn watch_current_user(&self) -> watch::Receiver<Option<Arc<User>>> {
|
||||
self.current_user.clone()
|
||||
}
|
||||
|
||||
fn load_users(
|
||||
&mut self,
|
||||
request: impl RequestMessage<Response = UsersResponse>,
|
||||
cx: &mut ModelContext<Self>,
|
||||
) -> Task<Result<Vec<Arc<User>>>> {
|
||||
let client = self.client.clone();
|
||||
let http = self.http.clone();
|
||||
cx.spawn_weak(|this, mut cx| async move {
|
||||
if let Some(rpc) = client.upgrade() {
|
||||
let response = rpc.request(request).await.context("error loading users")?;
|
||||
let users = future::join_all(
|
||||
response
|
||||
.users
|
||||
.into_iter()
|
||||
.map(|user| User::new(user, http.as_ref())),
|
||||
)
|
||||
.await;
|
||||
|
||||
if let Some(this) = this.upgrade(&cx) {
|
||||
this.update(&mut cx, |this, _| {
|
||||
for user in &users {
|
||||
this.users.insert(user.id, user.clone());
|
||||
}
|
||||
});
|
||||
}
|
||||
Ok(users)
|
||||
} else {
|
||||
Ok(Vec::new())
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn set_participant_indices(
|
||||
&mut self,
|
||||
participant_indices: HashMap<u64, ParticipantIndex>,
|
||||
cx: &mut ModelContext<Self>,
|
||||
) {
|
||||
if participant_indices != self.participant_indices {
|
||||
self.participant_indices = participant_indices;
|
||||
cx.emit(Event::ParticipantIndicesChanged);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn participant_indices(&self) -> &HashMap<u64, ParticipantIndex> {
|
||||
&self.participant_indices
|
||||
}
|
||||
}
|
||||
|
||||
impl User {
|
||||
async fn new(message: proto::User, http: &dyn HttpClient) -> Arc<Self> {
|
||||
Arc::new(User {
|
||||
id: message.id,
|
||||
github_login: message.github_login,
|
||||
avatar: fetch_avatar(http, &message.avatar_url).warn_on_err().await,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Contact {
|
||||
async fn from_proto(
|
||||
contact: proto::Contact,
|
||||
user_store: &ModelHandle<UserStore>,
|
||||
cx: &mut AsyncAppContext,
|
||||
) -> Result<Self> {
|
||||
let user = user_store
|
||||
.update(cx, |user_store, cx| {
|
||||
user_store.get_user(contact.user_id, cx)
|
||||
})
|
||||
.await?;
|
||||
Ok(Self {
|
||||
user,
|
||||
online: contact.online,
|
||||
busy: contact.busy,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Collaborator {
|
||||
pub fn from_proto(message: proto::Collaborator) -> Result<Self> {
|
||||
Ok(Self {
|
||||
peer_id: message.peer_id.ok_or_else(|| anyhow!("invalid peer id"))?,
|
||||
replica_id: message.replica_id as ReplicaId,
|
||||
user_id: message.user_id as UserId,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async fn fetch_avatar(http: &dyn HttpClient, url: &str) -> Result<Arc<ImageData>> {
|
||||
let mut response = http
|
||||
.get(url, Default::default(), true)
|
||||
.await
|
||||
.map_err(|e| anyhow!("failed to send user avatar request: {}", e))?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
return Err(anyhow!("avatar request failed {:?}", response.status()));
|
||||
}
|
||||
|
||||
let mut body = Vec::new();
|
||||
response
|
||||
.body_mut()
|
||||
.read_to_end(&mut body)
|
||||
.await
|
||||
.map_err(|e| anyhow!("failed to read user avatar response body: {}", e))?;
|
||||
let format = image::guess_format(&body)?;
|
||||
let image = image::load_from_memory_with_format(&body, format)?.into_bgra8();
|
||||
Ok(ImageData::new(image))
|
||||
}
|
@ -11,8 +11,8 @@ use smallvec::SmallVec;
|
||||
use crate::{
|
||||
current_platform, image_cache::ImageCache, Action, AssetSource, Context, DisplayId, Executor,
|
||||
FocusEvent, FocusHandle, FocusId, KeyBinding, Keymap, LayoutId, MainThread, MainThreadOnly,
|
||||
Platform, SharedString, SubscriberSet, SvgRenderer, Task, TextStyle, TextStyleRefinement,
|
||||
TextSystem, View, Window, WindowContext, WindowHandle, WindowId,
|
||||
Platform, SemanticVersion, SharedString, SubscriberSet, SvgRenderer, Task, TextStyle,
|
||||
TextStyleRefinement, TextSystem, View, Window, WindowContext, WindowHandle, WindowId,
|
||||
};
|
||||
use anyhow::{anyhow, Result};
|
||||
use collections::{HashMap, HashSet, VecDeque};
|
||||
@ -125,6 +125,18 @@ impl App {
|
||||
self
|
||||
}
|
||||
|
||||
pub fn app_version(&self) -> Result<SemanticVersion> {
|
||||
self.0.lock().platform.borrow_on_main_thread().app_version()
|
||||
}
|
||||
|
||||
pub fn os_name(&self) -> &'static str {
|
||||
self.0.lock().platform.borrow_on_main_thread().os_name()
|
||||
}
|
||||
|
||||
pub fn os_version(&self) -> Result<SemanticVersion> {
|
||||
self.0.lock().platform.borrow_on_main_thread().os_version()
|
||||
}
|
||||
|
||||
pub fn executor(&self) -> Executor {
|
||||
self.0.lock().executor.clone()
|
||||
}
|
||||
@ -348,7 +360,7 @@ impl AppContext {
|
||||
.ok();
|
||||
}
|
||||
|
||||
pub fn apply_refresh(&mut self) {
|
||||
fn apply_refresh(&mut self) {
|
||||
for window in self.windows.values_mut() {
|
||||
if let Some(window) = window.as_mut() {
|
||||
window.dirty = true;
|
||||
|
@ -27,7 +27,7 @@ collections = { path = "../collections" }
|
||||
# command_palette = { path = "../command_palette" }
|
||||
# component_test = { path = "../component_test" }
|
||||
# context_menu = { path = "../context_menu" }
|
||||
# client = { path = "../client" }
|
||||
client2 = { path = "../client2" }
|
||||
# clock = { path = "../clock" }
|
||||
# copilot = { path = "../copilot" }
|
||||
# copilot_button = { path = "../copilot_button" }
|
||||
|
@ -3,13 +3,15 @@
|
||||
|
||||
use crate::open_listener::{OpenListener, OpenRequest};
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use backtrace::Backtrace;
|
||||
use cli::{
|
||||
ipc::{self, IpcSender},
|
||||
CliRequest, CliResponse, IpcHandshake, FORCE_CLI_MODE_ENV_VAR_NAME,
|
||||
};
|
||||
use fs::RealFs;
|
||||
use futures::{channel::mpsc, SinkExt, StreamExt};
|
||||
use gpui2::{App, AssetSource, AsyncAppContext, Task};
|
||||
use gpui2::{App, AppContext, AssetSource, AsyncAppContext, SemanticVersion, Task};
|
||||
use isahc::{prelude::Configurable, Request};
|
||||
use log::LevelFilter;
|
||||
|
||||
use parking_lot::Mutex;
|
||||
@ -19,17 +21,21 @@ use simplelog::ConfigBuilder;
|
||||
use smol::process::Command;
|
||||
use std::{
|
||||
env,
|
||||
ffi::OsStr,
|
||||
fs::OpenOptions,
|
||||
io::IsTerminal,
|
||||
io::{IsTerminal, Write},
|
||||
panic,
|
||||
path::Path,
|
||||
sync::{
|
||||
atomic::{AtomicU32, Ordering},
|
||||
Arc,
|
||||
},
|
||||
thread,
|
||||
time::{SystemTime, UNIX_EPOCH},
|
||||
};
|
||||
use util::{
|
||||
channel::{parse_zed_link, RELEASE_CHANNEL},
|
||||
channel::{parse_zed_link, ReleaseChannel, RELEASE_CHANNEL},
|
||||
http::HttpClient,
|
||||
paths, ResultExt,
|
||||
};
|
||||
use zed2::{ensure_only_instance, AppState, Assets, IsOnlyInstance};
|
||||
@ -75,8 +81,8 @@ fn main() {
|
||||
|
||||
let (listener, mut open_rx) = OpenListener::new();
|
||||
let listener = Arc::new(listener);
|
||||
let callback_listener = listener.clone();
|
||||
app.on_open_urls(move |urls, _| callback_listener.open_urls(urls));
|
||||
let open_listener = listener.clone();
|
||||
app.on_open_urls(move |urls, _| open_listener.open_urls(urls));
|
||||
app.on_reopen(move |_cx| {
|
||||
// todo!("workspace")
|
||||
// if cx.has_global::<Weak<AppState>>() {
|
||||
@ -394,192 +400,184 @@ struct PanicRequest {
|
||||
token: String,
|
||||
}
|
||||
|
||||
static _PANIC_COUNT: AtomicU32 = AtomicU32::new(0);
|
||||
static PANIC_COUNT: AtomicU32 = AtomicU32::new(0);
|
||||
|
||||
// fn init_panic_hook(app: &App, installation_id: Option<String>, session_id: String) {
|
||||
// let is_pty = stdout_is_a_pty();
|
||||
// let platform = app.platform();
|
||||
fn init_panic_hook(app: &App, installation_id: Option<String>, session_id: String) {
|
||||
let is_pty = stdout_is_a_pty();
|
||||
let app_version = app.app_version().ok();
|
||||
let os_name = app.os_name();
|
||||
let os_version = app.os_version().ok();
|
||||
|
||||
// panic::set_hook(Box::new(move |info| {
|
||||
// let prior_panic_count = PANIC_COUNT.fetch_add(1, Ordering::SeqCst);
|
||||
// if prior_panic_count > 0 {
|
||||
// // Give the panic-ing thread time to write the panic file
|
||||
// loop {
|
||||
// std::thread::yield_now();
|
||||
// }
|
||||
// }
|
||||
panic::set_hook(Box::new(move |info| {
|
||||
let prior_panic_count = PANIC_COUNT.fetch_add(1, Ordering::SeqCst);
|
||||
if prior_panic_count > 0 {
|
||||
// Give the panic-ing thread time to write the panic file
|
||||
loop {
|
||||
std::thread::yield_now();
|
||||
}
|
||||
}
|
||||
|
||||
// let thread = thread::current();
|
||||
// let thread_name = thread.name().unwrap_or("<unnamed>");
|
||||
let thread = thread::current();
|
||||
let thread_name = thread.name().unwrap_or("<unnamed>");
|
||||
|
||||
// let payload = info
|
||||
// .payload()
|
||||
// .downcast_ref::<&str>()
|
||||
// .map(|s| s.to_string())
|
||||
// .or_else(|| info.payload().downcast_ref::<String>().map(|s| s.clone()))
|
||||
// .unwrap_or_else(|| "Box<Any>".to_string());
|
||||
let payload = info
|
||||
.payload()
|
||||
.downcast_ref::<&str>()
|
||||
.map(|s| s.to_string())
|
||||
.or_else(|| info.payload().downcast_ref::<String>().map(|s| s.clone()))
|
||||
.unwrap_or_else(|| "Box<Any>".to_string());
|
||||
|
||||
// if *util::channel::RELEASE_CHANNEL == ReleaseChannel::Dev {
|
||||
// let location = info.location().unwrap();
|
||||
// let backtrace = Backtrace::new();
|
||||
// eprintln!(
|
||||
// "Thread {:?} panicked with {:?} at {}:{}:{}\n{:?}",
|
||||
// thread_name,
|
||||
// payload,
|
||||
// location.file(),
|
||||
// location.line(),
|
||||
// location.column(),
|
||||
// backtrace,
|
||||
// );
|
||||
// std::process::exit(-1);
|
||||
// }
|
||||
if *util::channel::RELEASE_CHANNEL == ReleaseChannel::Dev {
|
||||
let location = info.location().unwrap();
|
||||
let backtrace = Backtrace::new();
|
||||
eprintln!(
|
||||
"Thread {:?} panicked with {:?} at {}:{}:{}\n{:?}",
|
||||
thread_name,
|
||||
payload,
|
||||
location.file(),
|
||||
location.line(),
|
||||
location.column(),
|
||||
backtrace,
|
||||
);
|
||||
std::process::exit(-1);
|
||||
}
|
||||
|
||||
// let app_version = ZED_APP_VERSION
|
||||
// .or_else(|| platform.app_version().ok())
|
||||
// .map_or("dev".to_string(), |v| v.to_string());
|
||||
let app_version = client::ZED_APP_VERSION
|
||||
.or(app_version)
|
||||
.map_or("dev".to_string(), |v| v.to_string());
|
||||
|
||||
// let backtrace = Backtrace::new();
|
||||
// let mut backtrace = backtrace
|
||||
// .frames()
|
||||
// .iter()
|
||||
// .filter_map(|frame| Some(format!("{:#}", frame.symbols().first()?.name()?)))
|
||||
// .collect::<Vec<_>>();
|
||||
let backtrace = Backtrace::new();
|
||||
let mut backtrace = backtrace
|
||||
.frames()
|
||||
.iter()
|
||||
.filter_map(|frame| Some(format!("{:#}", frame.symbols().first()?.name()?)))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// // Strip out leading stack frames for rust panic-handling.
|
||||
// if let Some(ix) = backtrace
|
||||
// .iter()
|
||||
// .position(|name| name == "rust_begin_unwind")
|
||||
// {
|
||||
// backtrace.drain(0..=ix);
|
||||
// }
|
||||
// Strip out leading stack frames for rust panic-handling.
|
||||
if let Some(ix) = backtrace
|
||||
.iter()
|
||||
.position(|name| name == "rust_begin_unwind")
|
||||
{
|
||||
backtrace.drain(0..=ix);
|
||||
}
|
||||
|
||||
// let panic_data = Panic {
|
||||
// thread: thread_name.into(),
|
||||
// payload: payload.into(),
|
||||
// location_data: info.location().map(|location| LocationData {
|
||||
// file: location.file().into(),
|
||||
// line: location.line(),
|
||||
// }),
|
||||
// app_version: app_version.clone(),
|
||||
// release_channel: RELEASE_CHANNEL.display_name().into(),
|
||||
// os_name: platform.os_name().into(),
|
||||
// os_version: platform
|
||||
// .os_version()
|
||||
// .ok()
|
||||
// .map(|os_version| os_version.to_string()),
|
||||
// architecture: env::consts::ARCH.into(),
|
||||
// panicked_on: SystemTime::now()
|
||||
// .duration_since(UNIX_EPOCH)
|
||||
// .unwrap()
|
||||
// .as_millis(),
|
||||
// backtrace,
|
||||
// installation_id: installation_id.clone(),
|
||||
// session_id: session_id.clone(),
|
||||
// };
|
||||
let panic_data = Panic {
|
||||
thread: thread_name.into(),
|
||||
payload: payload.into(),
|
||||
location_data: info.location().map(|location| LocationData {
|
||||
file: location.file().into(),
|
||||
line: location.line(),
|
||||
}),
|
||||
app_version: app_version.clone(),
|
||||
release_channel: RELEASE_CHANNEL.display_name().into(),
|
||||
os_name: os_name.into(),
|
||||
os_version: os_version.as_ref().map(SemanticVersion::to_string),
|
||||
architecture: env::consts::ARCH.into(),
|
||||
panicked_on: SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_millis(),
|
||||
backtrace,
|
||||
installation_id: installation_id.clone(),
|
||||
session_id: session_id.clone(),
|
||||
};
|
||||
|
||||
// if let Some(panic_data_json) = serde_json::to_string_pretty(&panic_data).log_err() {
|
||||
// log::error!("{}", panic_data_json);
|
||||
// }
|
||||
if let Some(panic_data_json) = serde_json::to_string_pretty(&panic_data).log_err() {
|
||||
log::error!("{}", panic_data_json);
|
||||
}
|
||||
|
||||
// if !is_pty {
|
||||
// if let Some(panic_data_json) = serde_json::to_string(&panic_data).log_err() {
|
||||
// let timestamp = chrono::Utc::now().format("%Y_%m_%d %H_%M_%S").to_string();
|
||||
// let panic_file_path = paths::LOGS_DIR.join(format!("zed-{}.panic", timestamp));
|
||||
// let panic_file = std::fs::OpenOptions::new()
|
||||
// .append(true)
|
||||
// .create(true)
|
||||
// .open(&panic_file_path)
|
||||
// .log_err();
|
||||
// if let Some(mut panic_file) = panic_file {
|
||||
// writeln!(&mut panic_file, "{}", panic_data_json).log_err();
|
||||
// panic_file.flush().log_err();
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
if !is_pty {
|
||||
if let Some(panic_data_json) = serde_json::to_string(&panic_data).log_err() {
|
||||
let timestamp = chrono::Utc::now().format("%Y_%m_%d %H_%M_%S").to_string();
|
||||
let panic_file_path = paths::LOGS_DIR.join(format!("zed-{}.panic", timestamp));
|
||||
let panic_file = std::fs::OpenOptions::new()
|
||||
.append(true)
|
||||
.create(true)
|
||||
.open(&panic_file_path)
|
||||
.log_err();
|
||||
if let Some(mut panic_file) = panic_file {
|
||||
writeln!(&mut panic_file, "{}", panic_data_json).log_err();
|
||||
panic_file.flush().log_err();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// std::process::abort();
|
||||
// }));
|
||||
// }
|
||||
std::process::abort();
|
||||
}));
|
||||
}
|
||||
|
||||
// fn upload_previous_panics(http: Arc<dyn HttpClient>, cx: &mut AppContext) {
|
||||
// let telemetry_settings = *settings::get::<TelemetrySettings>(cx);
|
||||
fn upload_previous_panics(http: Arc<dyn HttpClient>, cx: &mut AppContext) {
|
||||
let telemetry_settings = *settings2::get::<client::TelemetrySettings>(cx);
|
||||
|
||||
// cx.background()
|
||||
// .spawn({
|
||||
// async move {
|
||||
// let panic_report_url = format!("{}/api/panic", &*client::ZED_SERVER_URL);
|
||||
// let mut children = smol::fs::read_dir(&*paths::LOGS_DIR).await?;
|
||||
// while let Some(child) = children.next().await {
|
||||
// let child = child?;
|
||||
// let child_path = child.path();
|
||||
cx.executor()
|
||||
.spawn(async move {
|
||||
let panic_report_url = format!("{}/api/panic", &*client::ZED_SERVER_URL);
|
||||
let mut children = smol::fs::read_dir(&*paths::LOGS_DIR).await?;
|
||||
while let Some(child) = children.next().await {
|
||||
let child = child?;
|
||||
let child_path = child.path();
|
||||
|
||||
// if child_path.extension() != Some(OsStr::new("panic")) {
|
||||
// continue;
|
||||
// }
|
||||
// let filename = if let Some(filename) = child_path.file_name() {
|
||||
// filename.to_string_lossy()
|
||||
// } else {
|
||||
// continue;
|
||||
// };
|
||||
if child_path.extension() != Some(OsStr::new("panic")) {
|
||||
continue;
|
||||
}
|
||||
let filename = if let Some(filename) = child_path.file_name() {
|
||||
filename.to_string_lossy()
|
||||
} else {
|
||||
continue;
|
||||
};
|
||||
|
||||
// if !filename.starts_with("zed") {
|
||||
// continue;
|
||||
// }
|
||||
if !filename.starts_with("zed") {
|
||||
continue;
|
||||
}
|
||||
|
||||
// if telemetry_settings.diagnostics {
|
||||
// let panic_file_content = smol::fs::read_to_string(&child_path)
|
||||
// .await
|
||||
// .context("error reading panic file")?;
|
||||
if telemetry_settings.diagnostics {
|
||||
let panic_file_content = smol::fs::read_to_string(&child_path)
|
||||
.await
|
||||
.context("error reading panic file")?;
|
||||
|
||||
// let panic = serde_json::from_str(&panic_file_content)
|
||||
// .ok()
|
||||
// .or_else(|| {
|
||||
// panic_file_content
|
||||
// .lines()
|
||||
// .next()
|
||||
// .and_then(|line| serde_json::from_str(line).ok())
|
||||
// })
|
||||
// .unwrap_or_else(|| {
|
||||
// log::error!(
|
||||
// "failed to deserialize panic file {:?}",
|
||||
// panic_file_content
|
||||
// );
|
||||
// None
|
||||
// });
|
||||
let panic = serde_json::from_str(&panic_file_content)
|
||||
.ok()
|
||||
.or_else(|| {
|
||||
panic_file_content
|
||||
.lines()
|
||||
.next()
|
||||
.and_then(|line| serde_json::from_str(line).ok())
|
||||
})
|
||||
.unwrap_or_else(|| {
|
||||
log::error!(
|
||||
"failed to deserialize panic file {:?}",
|
||||
panic_file_content
|
||||
);
|
||||
None
|
||||
});
|
||||
|
||||
// if let Some(panic) = panic {
|
||||
// let body = serde_json::to_string(&PanicRequest {
|
||||
// panic,
|
||||
// token: ZED_SECRET_CLIENT_TOKEN.into(),
|
||||
// })
|
||||
// .unwrap();
|
||||
if let Some(panic) = panic {
|
||||
let body = serde_json::to_string(&PanicRequest {
|
||||
panic,
|
||||
token: client::ZED_SECRET_CLIENT_TOKEN.into(),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
// let request = Request::post(&panic_report_url)
|
||||
// .redirect_policy(isahc::config::RedirectPolicy::Follow)
|
||||
// .header("Content-Type", "application/json")
|
||||
// .body(body.into())?;
|
||||
// let response =
|
||||
// http.send(request).await.context("error sending panic")?;
|
||||
// if !response.status().is_success() {
|
||||
// log::error!(
|
||||
// "Error uploading panic to server: {}",
|
||||
// response.status()
|
||||
// );
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
let request = Request::post(&panic_report_url)
|
||||
.redirect_policy(isahc::config::RedirectPolicy::Follow)
|
||||
.header("Content-Type", "application/json")
|
||||
.body(body.into())?;
|
||||
let response = http.send(request).await.context("error sending panic")?;
|
||||
if !response.status().is_success() {
|
||||
log::error!("Error uploading panic to server: {}", response.status());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// // We've done what we can, delete the file
|
||||
// std::fs::remove_file(child_path)
|
||||
// .context("error removing panic")
|
||||
// .log_err();
|
||||
// }
|
||||
// Ok::<_, anyhow::Error>(())
|
||||
// }
|
||||
// .log_err()
|
||||
// })
|
||||
// .detach();
|
||||
// }
|
||||
// We've done what we can, delete the file
|
||||
std::fs::remove_file(child_path)
|
||||
.context("error removing panic")
|
||||
.log_err();
|
||||
}
|
||||
Ok::<_, anyhow::Error>(())
|
||||
})
|
||||
.detach_and_log_err(cx);
|
||||
}
|
||||
|
||||
async fn load_login_shell_environment() -> Result<()> {
|
||||
let marker = "ZED_LOGIN_SHELL_START";
|
||||
|
Loading…
Reference in New Issue
Block a user