1
1
mirror of https://github.com/ellie/atuin.git synced 2024-08-17 17:00:33 +03:00

Update docs, unify on SQLx, bugfixes (#40)

* Begin moving to sqlx for local too

* Stupid scanners should just have a nice cup of tea

Random internet shit searching for /.env or whatever

* Remove diesel and rusqlite fully
This commit is contained in:
Ellie Huxtable 2021-04-25 18:21:52 +01:00 committed by GitHub
parent 4210e8de5a
commit 156893d774
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
39 changed files with 860 additions and 625 deletions

View File

@ -38,7 +38,7 @@ jobs:
override: true
- name: Run cargo test
run: cargo test
run: cargo test --workspace
clippy:
runs-on: ubuntu-latest

42
Cargo.lock generated
View File

@ -92,6 +92,7 @@ dependencies = [
"chrono",
"chrono-english",
"cli-table",
"crossbeam-channel",
"directories",
"eyre",
"fork",
@ -100,11 +101,11 @@ dependencies = [
"itertools",
"log",
"pretty_env_logger",
"rusqlite",
"serde 1.0.125",
"serde_derive",
"serde_json",
"structopt",
"tabwriter",
"termion",
"tokio",
"tui",
@ -132,13 +133,13 @@ dependencies = [
"rand 0.8.3",
"reqwest",
"rmp-serde",
"rusqlite",
"rust-crypto",
"serde 1.0.125",
"serde_derive",
"serde_json",
"shellexpand",
"sodiumoxide",
"sqlx",
"tokio",
"urlencoding",
"uuid",
@ -606,18 +607,6 @@ dependencies = [
"once_cell",
]
[[package]]
name = "fallible-iterator"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7"
[[package]]
name = "fallible-streaming-iterator"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a"
[[package]]
name = "fern"
version = "0.6.0"
@ -1817,21 +1806,6 @@ dependencies = [
"serde 1.0.125",
]
[[package]]
name = "rusqlite"
version = "0.25.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fbc783b7ddae608338003bac1fa00b6786a75a9675fbd8e87243ecfdea3f6ed2"
dependencies = [
"bitflags",
"fallible-iterator",
"fallible-streaming-iterator",
"hashlink",
"libsqlite3-sys",
"memchr",
"smallvec",
]
[[package]]
name = "rust-argon2"
version = "0.8.3"
@ -2116,6 +2090,7 @@ dependencies = [
"hmac",
"itoa",
"libc",
"libsqlite3-sys",
"log",
"md-5",
"memchr",
@ -2234,6 +2209,15 @@ dependencies = [
"unicode-xid",
]
[[package]]
name = "tabwriter"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "36205cfc997faadcc4b0b87aaef3fbedafe20d38d4959a7ca6ff803564051111"
dependencies = [
"unicode-width",
]
[[package]]
name = "tap"
version = "1.0.1"

View File

@ -37,7 +37,6 @@ chrono-english = "0.1.4"
cli-table = "0.4"
base64 = "0.13.0"
humantime = "2.1.0"
tabwriter = "1.2.1"
crossbeam-channel = "0.5.1"
[dependencies.rusqlite]
version = "0.25"
features = ["bundled"]

View File

@ -21,7 +21,7 @@ FROM debian:buster-slim as runtime
WORKDIR app
ENV TZ=Etc/UTC
ENV RUST_LOG=info
ENV RUST_LOG=atuin::api=info
ENV ATUIN_CONFIG_DIR=/config
COPY --from=builder /app/target/release/atuin /usr/local/bin

151
README.md
View File

@ -1,9 +1,7 @@
<h1 align="center">
A'Tuin
Atuin
</h1>
<blockquote align="center">
Through the fathomless deeps of space swims the star turtle Great ATuin, bearing on its back the four giant elephants who carry on their shoulders the mass of the Discworld.
</blockquote>
<em align="center">Magical shell history</em>
<p align="center">
<a href="https://github.com/ellie/atuin/actions?query=workflow%3ARust"><img src="https://img.shields.io/github/workflow/status/ellie/atuin/Rust?style=flat-square" /></a>
@ -12,28 +10,42 @@
<a href="https://github.com/ellie/atuin/blob/main/LICENSE"><img src="https://img.shields.io/crates/l/atuin.svg?style=flat-square" /></a>
</p>
A'Tuin manages and synchronizes your shell history! Instead of storing
everything in a text file (such as ~/.history), A'Tuin uses a sqlite database.
While being a little more complex, this allows for more functionality.
- store shell history in a sqlite database
- back up e2e encrypted history to the cloud, and synchronize between machines
- log exit code, cwd, hostname, session, command duration, etc
- smart interactive history search to replace ctrl-r
- calculate statistics such as "most used command"
- old history file is not replaced
As well as the expected command, A'Tuin stores
## Documentation
- duration
- exit code
- working directory
- hostname
- time
- a unique session ID
- [Quickstart](#quickstart)
- [Install](#install)
- [Import](docs/import.md)
- [Configuration](docs/config.md)
- [Searching history](docs/search.md)
- [Cloud history sync](docs/sync.md)
- [History stats](docs/stats.md)
## Supported Shells
- zsh
# Quickstart
```
curl https://github.com/ellie/atuin/blob/main/install.sh | bash
atuin register -u <USERNAME> -e <EMAIL> -p <PASSWORD>
atuin import auto
atuin sync
```
## Install
### AUR
A'Tuin is available on the [AUR](https://aur.archlinux.org/packages/atuin/)
Atuin is available on the [AUR](https://aur.archlinux.org/packages/atuin/)
```
yay -S atuin # or your AUR helper of choice
@ -41,19 +53,16 @@ yay -S atuin # or your AUR helper of choice
### With cargo
`atuin` needs a nightly version of Rust + Cargo! It's best to use
[rustup](https://rustup.rs/) for getting set up there.
It's best to use [rustup](https://rustup.rs/) to get setup with a Rust
toolchain, then you can run:
```
rustup default nightly
cargo install atuin
```
### From source
```
rustup default nightly
git clone https://github.com/ellie/atuin.git
cd atuin
cargo install --path .
@ -67,107 +76,9 @@ Once the binary is installed, the shell plugin requires installing. Add
eval "$(atuin init)"
```
to your `.zshrc`/`.bashrc`/whatever your shell uses.
## Usage
### History search
By default A'Tuin will rebind ctrl-r and the up arrow to search your history.
You can prevent this by putting
```
export ATUIN_BINDKEYS="false"
```
into your shell config.
### Import history
```
atuin import auto # detect shell, then import
or
atuin import zsh # specify shell
```
### List history
List all history
```
atuin history list
```
List history for the current directory
```
atuin history list --cwd
atuin h l -c # alternative, shorter version
```
List history for the current session
```
atuin history list --session
atuin h l -s # similarly short
```
### Stats
A'Tuin can calculate statistics for a single day, and accepts "natural language" style date input, as well as absolute dates:
```
$ atuin stats day last friday
+---------------------+------------+
| Statistic | Value |
+---------------------+------------+
| Most used command | git status |
+---------------------+------------+
| Commands ran | 450 |
+---------------------+------------+
| Unique commands ran | 213 |
+---------------------+------------+
$ atuin stats day 01/01/21 # also accepts absolute dates
```
It can also calculate statistics for all of known history:
```
$ atuin stats all
+---------------------+-------+
| Statistic | Value |
+---------------------+-------+
| Most used command | ls |
+---------------------+-------+
| Commands ran | 8190 |
+---------------------+-------+
| Unique commands ran | 2996 |
+---------------------+-------+
```
## Config
A'Tuin is configurable via TOML. The file lives at ` ~/.config/atuin/config.toml`,
and looks like this:
```
[local]
dialect = "uk" # or us. sets the date format used by stats
server_address = "https://atuin.elliehuxtable.com/" # the server to sync with
[local.db]
path = "~/.local/share/atuin/history.db" # the local database for history
```
to your `.zshrc`
## ...what's with the name?
A'Tuin is named after "The Great A'Tuin", a giant turtle from Terry Pratchett's
Atuin is named after "The Great A'Tuin", a giant turtle from Terry Pratchett's
Discworld series of books.

View File

@ -37,6 +37,6 @@ tokio = { version = "1", features = ["full"] }
async-trait = "0.1.49"
urlencoding = "1.1.1"
humantime = "2.1.0"
rusqlite= { version = "0.25", features = ["bundled"] }
itertools = "0.10.0"
shellexpand = "2"
sqlx = { version = "0.5", features = [ "runtime-tokio-rustls", "uuid", "chrono", "sqlite" ] }

View File

@ -0,0 +1,16 @@
-- Add migration script here
create table if not exists history (
id text primary key,
timestamp text not null,
duration integer not null,
exit integer not null,
command text not null,
cwd text not null,
session text not null,
hostname text not null,
unique(timestamp, cwd, command)
);
create index if not exists idx_history_timestamp on history(timestamp);
create index if not exists idx_history_command on history(command);

View File

@ -1,44 +1,48 @@
use chrono::prelude::*;
use chrono::Utc;
use std::path::Path;
use std::str::FromStr;
use eyre::{eyre, Result};
use async_trait::async_trait;
use chrono::Utc;
use rusqlite::{params, Connection};
use rusqlite::{Params, Transaction};
use eyre::Result;
use sqlx::sqlite::{SqliteConnectOptions, SqliteJournalMode, SqlitePool, SqlitePoolOptions};
use super::history::History;
#[async_trait]
pub trait Database {
fn save(&mut self, h: &History) -> Result<()>;
fn save_bulk(&mut self, h: &[History]) -> Result<()>;
async fn save(&mut self, h: &History) -> Result<()>;
async fn save_bulk(&mut self, h: &[History]) -> Result<()>;
fn load(&self, id: &str) -> Result<History>;
fn list(&self, max: Option<usize>, unique: bool) -> Result<Vec<History>>;
fn range(&self, from: chrono::DateTime<Utc>, to: chrono::DateTime<Utc>)
-> Result<Vec<History>>;
async fn load(&self, id: &str) -> Result<History>;
async fn list(&self, max: Option<usize>, unique: bool) -> Result<Vec<History>>;
async fn range(
&self,
from: chrono::DateTime<Utc>,
to: chrono::DateTime<Utc>,
) -> Result<Vec<History>>;
fn query(&self, query: &str, params: impl Params) -> Result<Vec<History>>;
fn update(&self, h: &History) -> Result<()>;
fn history_count(&self) -> Result<i64>;
async fn update(&self, h: &History) -> Result<()>;
async fn history_count(&self) -> Result<i64>;
fn first(&self) -> Result<History>;
fn last(&self) -> Result<History>;
fn before(&self, timestamp: chrono::DateTime<Utc>, count: i64) -> Result<Vec<History>>;
async fn first(&self) -> Result<History>;
async fn last(&self) -> Result<History>;
async fn before(&self, timestamp: chrono::DateTime<Utc>, count: i64) -> Result<Vec<History>>;
fn prefix_search(&self, query: &str) -> Result<Vec<History>>;
async fn search(&self, limit: Option<i64>, query: &str) -> Result<Vec<History>>;
fn search(&self, cwd: Option<String>, exit: Option<i64>, query: &str) -> Result<Vec<History>>;
async fn query_history(&self, query: &str) -> Result<Vec<History>>;
}
// Intended for use on a developer machine and not a sync server.
// TODO: implement IntoIterator
pub struct Sqlite {
conn: Connection,
pool: SqlitePool,
}
impl Sqlite {
pub fn new(path: impl AsRef<Path>) -> Result<Self> {
pub async fn new(path: impl AsRef<Path>) -> Result<Self> {
let path = path.as_ref();
debug!("opening sqlite database at {:?}", path);
@ -49,137 +53,106 @@ impl Sqlite {
}
}
let conn = Connection::open(path)?;
let opts = SqliteConnectOptions::from_str(path.as_os_str().to_str().unwrap())?
.journal_mode(SqliteJournalMode::Wal)
.create_if_missing(true);
Self::setup_db(&conn)?;
let pool = SqlitePoolOptions::new().connect_with(opts).await?;
Ok(Self { conn })
Self::setup_db(&pool).await?;
Ok(Self { pool })
}
fn setup_db(conn: &Connection) -> Result<()> {
async fn setup_db(pool: &SqlitePool) -> Result<()> {
debug!("running sqlite database setup");
conn.execute(
"create table if not exists history (
id text primary key,
timestamp integer not null,
duration integer not null,
exit integer not null,
command text not null,
cwd text not null,
session text not null,
hostname text not null,
unique(timestamp, cwd, command)
)",
[],
)?;
conn.execute(
"create table if not exists history_encrypted (
id text primary key,
data blob not null
)",
[],
)?;
conn.execute(
"create index if not exists idx_history_timestamp on history(timestamp)",
[],
)?;
conn.execute(
"create index if not exists idx_history_command on history(command)",
[],
)?;
sqlx::migrate!("./migrations").run(pool).await?;
Ok(())
}
fn save_raw(tx: &Transaction, h: &History) -> Result<()> {
tx.execute(
"insert or ignore into history (
id,
timestamp,
duration,
exit,
command,
cwd,
session,
hostname
) values (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)",
params![
h.id,
h.timestamp.timestamp_nanos(),
h.duration,
h.exit,
h.command,
h.cwd,
h.session,
h.hostname
],
)?;
async fn save_raw(tx: &mut sqlx::Transaction<'_, sqlx::Sqlite>, h: &History) -> Result<()> {
sqlx::query(
"insert or ignore into history(id, timestamp, duration, exit, command, cwd, session, hostname)
values(?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)",
)
.bind(h.id.as_str())
.bind(h.timestamp.to_rfc3339())
.bind(h.duration)
.bind(h.exit)
.bind(h.command.as_str())
.bind(h.cwd.as_str())
.bind(h.session.as_str())
.bind(h.hostname.as_str())
.execute(tx)
.await?;
Ok(())
}
}
#[async_trait]
impl Database for Sqlite {
fn save(&mut self, h: &History) -> Result<()> {
async fn save(&mut self, h: &History) -> Result<()> {
debug!("saving history to sqlite");
let tx = self.conn.transaction()?;
Self::save_raw(&tx, h)?;
tx.commit()?;
let mut tx = self.pool.begin().await?;
Self::save_raw(&mut tx, h).await?;
tx.commit().await?;
Ok(())
}
fn save_bulk(&mut self, h: &[History]) -> Result<()> {
async fn save_bulk(&mut self, h: &[History]) -> Result<()> {
debug!("saving history to sqlite");
let tx = self.conn.transaction()?;
let mut tx = self.pool.begin().await?;
for i in h {
Self::save_raw(&tx, i)?
Self::save_raw(&mut tx, i).await?
}
tx.commit()?;
tx.commit().await?;
Ok(())
}
fn load(&self, id: &str) -> Result<History> {
async fn load(&self, id: &str) -> Result<History> {
debug!("loading history item {}", id);
let history = self.query(
"select id, timestamp, duration, exit, command, cwd, session, hostname from history
where id = ?1 limit 1",
&[id],
)?;
let res = sqlx::query_as::<_, History>("select * from history where id = ?1")
.bind(id)
.fetch_one(&self.pool)
.await?;
if history.is_empty() {
return Err(eyre!("could not find history with id {}", id));
}
let history = history[0].clone();
Ok(history)
Ok(res)
}
fn update(&self, h: &History) -> Result<()> {
async fn update(&self, h: &History) -> Result<()> {
debug!("updating sqlite history");
self.conn.execute(
sqlx::query(
"update history
set timestamp = ?2, duration = ?3, exit = ?4, command = ?5, cwd = ?6, session = ?7, hostname = ?8
where id = ?1",
params![h.id, h.timestamp.timestamp_nanos(), h.duration, h.exit, h.command, h.cwd, h.session, h.hostname],
)?;
)
.bind(h.id.as_str())
.bind(h.timestamp.to_rfc3339())
.bind(h.duration)
.bind(h.exit)
.bind(h.command.as_str())
.bind(h.cwd.as_str())
.bind(h.session.as_str())
.bind(h.hostname.as_str())
.execute(&self.pool)
.await?;
Ok(())
}
// make a unique list, that only shows the *newest* version of things
fn list(&self, max: Option<usize>, unique: bool) -> Result<Vec<History>> {
async fn list(&self, max: Option<usize>, unique: bool) -> Result<Vec<History>> {
debug!("listing history");
// very likely vulnerable to SQL injection
@ -208,144 +181,96 @@ impl Database for Sqlite {
}
);
let history = self.query(query.as_str(), params![])?;
let res = sqlx::query_as::<_, History>(query.as_str())
.fetch_all(&self.pool)
.await?;
Ok(history)
Ok(res)
}
fn range(
async fn range(
&self,
from: chrono::DateTime<Utc>,
to: chrono::DateTime<Utc>,
) -> Result<Vec<History>> {
debug!("listing history from {:?} to {:?}", from, to);
let mut stmt = self.conn.prepare(
"SELECT * FROM history where timestamp >= ?1 and timestamp <= ?2 order by timestamp asc",
)?;
let history_iter = stmt.query_map(
params![from.timestamp_nanos(), to.timestamp_nanos()],
|row| history_from_sqlite_row(None, row),
)?;
Ok(history_iter.filter_map(Result::ok).collect())
}
fn first(&self) -> Result<History> {
let mut stmt = self
.conn
.prepare("SELECT * FROM history order by timestamp asc limit 1")?;
let history = stmt.query_row(params![], |row| history_from_sqlite_row(None, row))?;
Ok(history)
}
fn last(&self) -> Result<History> {
let mut stmt = self
.conn
.prepare("SELECT * FROM history where duration >= 0 order by timestamp desc limit 1")?;
let history = stmt.query_row(params![], |row| history_from_sqlite_row(None, row))?;
Ok(history)
}
fn before(&self, timestamp: chrono::DateTime<Utc>, count: i64) -> Result<Vec<History>> {
let mut stmt = self
.conn
.prepare("SELECT * FROM history where timestamp < ? order by timestamp desc limit ?")?;
let history_iter = stmt.query_map(params![timestamp.timestamp_nanos(), count], |row| {
history_from_sqlite_row(None, row)
})?;
Ok(history_iter.filter_map(Result::ok).collect())
}
fn query(&self, query: &str, params: impl Params) -> Result<Vec<History>> {
let mut stmt = self.conn.prepare(query)?;
let history_iter = stmt.query_map(params, |row| history_from_sqlite_row(None, row))?;
Ok(history_iter.filter_map(Result::ok).collect())
}
fn prefix_search(&self, query: &str) -> Result<Vec<History>> {
let query = query.to_string().replace("*", "%"); // allow wildcard char
self.query(
"select * from history h
where command like ?1 || '%'
and timestamp = (
select max(timestamp) from history
where h.command = history.command
)
order by timestamp desc limit 200",
&[query.as_str()],
let res = sqlx::query_as::<_, History>(
"select * from history where timestamp >= ?1 and timestamp <= ?2 order by timestamp asc",
)
}
fn history_count(&self) -> Result<i64> {
let res: i64 =
self.conn
.query_row_and_then("select count(1) from history;", params![], |row| row.get(0))?;
.bind(from)
.bind(to)
.fetch_all(&self.pool)
.await?;
Ok(res)
}
fn search(&self, cwd: Option<String>, exit: Option<i64>, query: &str) -> Result<Vec<History>> {
match (cwd, exit) {
(Some(cwd), Some(exit)) => self.query(
"select * from history
where command like ?1 || '%'
and cwd = ?2
and exit = ?3
order by timestamp asc limit 1000",
&[query, cwd.as_str(), exit.to_string().as_str()],
),
(Some(cwd), None) => self.query(
"select * from history
where command like ?1 || '%'
and cwd = ?2
order by timestamp asc limit 1000",
&[query, cwd.as_str()],
),
(None, Some(exit)) => self.query(
"select * from history
async fn first(&self) -> Result<History> {
let res = sqlx::query_as::<_, History>(
"select * from history where duration >= 0 order by timestamp asc limit 1",
)
.fetch_one(&self.pool)
.await?;
Ok(res)
}
async fn last(&self) -> Result<History> {
let res = sqlx::query_as::<_, History>(
"select * from history where duration >= 0 order by timestamp desc limit 1",
)
.fetch_one(&self.pool)
.await?;
Ok(res)
}
async fn before(&self, timestamp: chrono::DateTime<Utc>, count: i64) -> Result<Vec<History>> {
let res = sqlx::query_as::<_, History>(
"select * from history where timestamp < ?1 order by timestamp desc limit ?2",
)
.bind(timestamp)
.bind(count)
.fetch_all(&self.pool)
.await?;
Ok(res)
}
async fn history_count(&self) -> Result<i64> {
let res: (i64,) = sqlx::query_as("select count(1) from history")
.fetch_one(&self.pool)
.await?;
Ok(res.0)
}
async fn search(&self, limit: Option<i64>, query: &str) -> Result<Vec<History>> {
let query = query.to_string().replace("*", "%"); // allow wildcard char
let limit = limit.map_or("".to_owned(), |l| format!("limit {}", l));
let res = sqlx::query_as::<_, History>(
format!(
"select * from history
where command like ?1 || '%'
and exit = ?2
order by timestamp asc limit 1000",
&[query, exit.to_string().as_str()],
),
(None, None) => self.query(
"select * from history
where command like ?1 || '%'
order by timestamp asc limit 1000",
&[query],
),
}
order by timestamp desc {}",
limit.clone()
)
.as_str(),
)
.bind(query)
.fetch_all(&self.pool)
.await?;
Ok(res)
}
async fn query_history(&self, query: &str) -> Result<Vec<History>> {
let res = sqlx::query_as::<_, History>(query)
.fetch_all(&self.pool)
.await?;
Ok(res)
}
}
fn history_from_sqlite_row(
id: Option<String>,
row: &rusqlite::Row,
) -> Result<History, rusqlite::Error> {
let id = match id {
Some(id) => id,
None => row.get(0)?,
};
Ok(History {
id,
timestamp: Utc.timestamp_nanos(row.get(1)?),
duration: row.get(2)?,
exit: row.get(3)?,
command: row.get(4)?,
cwd: row.get(5)?,
session: row.get(6)?,
hostname: row.get(7)?,
})
}

View File

@ -98,7 +98,7 @@ pub fn decrypt(encrypted_history: &EncryptedHistory, key: &secretbox::Key) -> Re
mod test {
use sodiumoxide::crypto::secretbox;
use crate::local::history::History;
use crate::history::History;
use super::{decrypt, encrypt};

View File

@ -6,7 +6,7 @@ use chrono::Utc;
use atuin_common::utils::uuid_v4;
// Any new fields MUST be Optional<>!
#[derive(Debug, Clone, Serialize, Deserialize, Ord, PartialOrd)]
#[derive(Debug, Clone, Serialize, Deserialize, Ord, PartialOrd, sqlx::FromRow)]
pub struct History {
pub id: String,
pub timestamp: chrono::DateTime<Utc>,

View File

@ -5,7 +5,6 @@ use std::path::{Path, PathBuf};
use chrono::prelude::*;
use chrono::Utc;
use config::{Config, Environment, File as ConfigFile};
use directories::ProjectDirs;
use eyre::{eyre, Result};
use parse_duration::parse;
@ -28,9 +27,10 @@ pub struct Settings {
impl Settings {
pub fn save_sync_time() -> Result<()> {
let sync_time_path = ProjectDirs::from("com", "elliehuxtable", "atuin")
.ok_or_else(|| eyre!("could not determine key file location"))?;
let sync_time_path = sync_time_path.data_dir().join("last_sync_time");
let data_dir = atuin_common::utils::data_dir();
let data_dir = data_dir.as_path();
let sync_time_path = data_dir.join("last_sync_time");
std::fs::write(sync_time_path, Utc::now().to_rfc3339())?;
@ -38,15 +38,10 @@ impl Settings {
}
pub fn last_sync() -> Result<chrono::DateTime<Utc>> {
let sync_time_path = ProjectDirs::from("com", "elliehuxtable", "atuin");
let data_dir = atuin_common::utils::data_dir();
let data_dir = data_dir.as_path();
if sync_time_path.is_none() {
debug!("failed to load projectdirs, not syncing");
return Err(eyre!("could not load project dirs"));
}
let sync_time_path = sync_time_path.unwrap();
let sync_time_path = sync_time_path.data_dir().join("last_sync_time");
let sync_time_path = data_dir.join("last_sync_time");
if !sync_time_path.exists() {
return Ok(Utc.ymd(1970, 1, 1).and_hms(0, 0, 0));
@ -73,10 +68,14 @@ impl Settings {
}
pub fn new() -> Result<Self> {
let config_dir = ProjectDirs::from("com", "elliehuxtable", "atuin").unwrap();
let config_dir = config_dir.config_dir();
let config_dir = atuin_common::utils::config_dir();
let config_dir = config_dir.as_path();
let data_dir = atuin_common::utils::data_dir();
let data_dir = data_dir.as_path();
create_dir_all(config_dir)?;
create_dir_all(data_dir)?;
let mut config_file = if let Ok(p) = std::env::var("ATUIN_CONFIG_DIR") {
PathBuf::from(p)
@ -90,27 +89,16 @@ impl Settings {
let mut s = Config::new();
let db_path = ProjectDirs::from("com", "elliehuxtable", "atuin")
.ok_or_else(|| eyre!("could not determine db file location"))?
.data_dir()
.join("history.db");
let key_path = ProjectDirs::from("com", "elliehuxtable", "atuin")
.ok_or_else(|| eyre!("could not determine key file location"))?
.data_dir()
.join("key");
let session_path = ProjectDirs::from("com", "elliehuxtable", "atuin")
.ok_or_else(|| eyre!("could not determine session file location"))?
.data_dir()
.join("session");
let db_path = data_dir.join("history.db");
let key_path = data_dir.join("key");
let session_path = data_dir.join("session");
s.set_default("db_path", db_path.to_str())?;
s.set_default("key_path", key_path.to_str())?;
s.set_default("session_path", session_path.to_str())?;
s.set_default("dialect", "us")?;
s.set_default("auto_sync", true)?;
s.set_default("sync_frequency", "5m")?;
s.set_default("sync_frequency", "1h")?;
s.set_default("sync_address", "https://api.atuin.sh")?;
if config_file.exists() {

View File

@ -30,7 +30,7 @@ async fn sync_download(
let remote_count = client.count().await?;
let initial_local = db.history_count()?;
let initial_local = db.history_count().await?;
let mut local_count = initial_local;
let mut last_sync = if force {
@ -48,9 +48,9 @@ async fn sync_download(
.get_history(last_sync, last_timestamp, host.clone())
.await?;
db.save_bulk(&page)?;
db.save_bulk(&page).await?;
local_count = db.history_count()?;
local_count = db.history_count().await?;
if page.len() < HISTORY_PAGE_SIZE.try_into().unwrap() {
break;
@ -87,7 +87,7 @@ async fn sync_upload(
let initial_remote_count = client.count().await?;
let mut remote_count = initial_remote_count;
let local_count = db.history_count()?;
let local_count = db.history_count().await?;
debug!("remote has {}, we have {}", remote_count, local_count);
@ -98,7 +98,7 @@ async fn sync_upload(
let mut cursor = Utc::now();
while local_count > remote_count {
let last = db.before(cursor, HISTORY_PAGE_SIZE)?;
let last = db.before(cursor, HISTORY_PAGE_SIZE).await?;
let mut buffer = Vec::<AddHistoryRequest>::new();
if last.is_empty() {

View File

@ -1,3 +1,5 @@
use std::path::PathBuf;
use crypto::digest::Digest;
use crypto::sha2::Sha256;
use sodiumoxide::crypto::pwhash::argon2id13;
@ -27,3 +29,40 @@ pub fn hash_str(string: &str) -> String {
pub fn uuid_v4() -> String {
Uuid::new_v4().to_simple().to_string()
}
pub fn config_dir() -> PathBuf {
// TODO: more reliable, more tested
// I don't want to use ProjectDirs, it puts config in awkward places on
// mac. Data too. Seems to be more intended for GUI apps.
let home = std::env::var("HOME").expect("$HOME not found");
let home = PathBuf::from(home);
std::env::var("XDG_CONFIG_HOME").map_or_else(
|_| {
let mut config = home.clone();
config.push(".config");
config.push("atuin");
config
},
PathBuf::from,
)
}
pub fn data_dir() -> PathBuf {
// TODO: more reliable, more tested
// I don't want to use ProjectDirs, it puts config in awkward places on
// mac. Data too. Seems to be more intended for GUI apps.
let home = std::env::var("HOME").expect("$HOME not found");
let home = PathBuf::from(home);
std::env::var("XDG_DATA_HOME").map_or_else(
|_| {
let mut data = home.clone();
data.push(".local");
data.push("share");
data.push("atuin");
data
},
PathBuf::from,
)
}

View File

@ -1,5 +1,3 @@
-- Your SQL goes here
-- lower case SQL please, this isn't a shouting match
create table history (
id bigserial primary key,
client_id text not null unique, -- the client-generated ID

View File

@ -1,4 +1,3 @@
-- Your SQL goes here
create table users (
id bigserial primary key, -- also store our own ID
username varchar(32) not null unique, -- being able to contact users is useful

View File

@ -1,4 +1,4 @@
-- Your SQL goes here
-- Add migration script here
create table sessions (
id bigserial primary key,
user_id bigserial,

View File

@ -40,6 +40,8 @@ impl Postgres {
.connect(uri)
.await?;
sqlx::migrate!("./migrations").run(&pool).await?;
Ok(Self { pool })
}
}

View File

@ -1,7 +1,7 @@
use std::convert::Infallible;
use eyre::Result;
use warp::Filter;
use warp::{hyper::StatusCode, Filter};
use atuin_common::api::SyncHistoryRequest;
@ -56,7 +56,7 @@ fn with_user(
pub async fn router(
settings: &Settings,
) -> Result<impl Filter<Extract = impl warp::Reply, Error = warp::Rejection> + Clone> {
) -> Result<impl Filter<Extract = impl warp::Reply, Error = Infallible> + Clone> {
let postgres = Postgres::new(settings.db_uri.as_str()).await?;
let index = warp::get().and(warp::path::end()).map(handlers::index);
@ -115,7 +115,8 @@ pub async fn router(
.or(add_history)
.or(user)
.or(register)
.or(login),
.or(login)
.or(warp::any().map(|| warp::reply::with_status("", StatusCode::IM_A_TEAPOT))),
)
.with(warp::filters::log::log("atuin::api"));

View File

@ -3,7 +3,6 @@ use std::io::prelude::*;
use std::path::PathBuf;
use config::{Config, Environment, File as ConfigFile};
use directories::ProjectDirs;
use eyre::{eyre, Result};
pub const HISTORY_PAGE_SIZE: i64 = 100;
@ -18,8 +17,8 @@ pub struct Settings {
impl Settings {
pub fn new() -> Result<Self> {
let config_dir = ProjectDirs::from("com", "elliehuxtable", "atuin").unwrap();
let config_dir = config_dir.config_dir();
let config_dir = atuin_common::utils::config_dir();
let config_dir = config_dir.as_path();
create_dir_all(config_dir)?;

99
docs/config.md Normal file
View File

@ -0,0 +1,99 @@
# Config
Atuin maintains two configuration files, stored in `~/.config/atuin/`. We store
data in `~/.local/share/atuin` (unless overridden by XDG\_\*).
You can also change the path to the configuration directory by setting
`ATUIN_CONFIG_DIR`. For example
```
export ATUIN_CONFIG_DIR = /home/ellie/.atuin
```
## Client config
```
~/.config/atuin/config.toml
```
The client runs on a user's machine, and unless you're running a server, this
is what you care about.
See [config.toml](../atuin-client/config.toml) for an example
### `dialect`
This configures how the [stats](stats.md) command parses dates. It has two
possible values
```
dialect = "uk"
```
or
```
dialect = "us"
```
and defaults to "us".
### `auto_sync`
Configures whether or not to automatically sync, when logged in. Defaults to
true
```
auto_sync = true/false
```
### `sync_address`
The address of the server to sync with! Defaults to `https://api.atuin.sh`.
```
sync_address = "https://api.atuin.sh"
```
### `sync_frequency`
How often to automatically sync with the server. This can be given in a
"human readable" format. For example, `10s`, `20m`, `1h`, etc. Defaults to `1h`.
If set to `0`, Atuin will sync after every command. Some servers may potentially
rate limit, which won't cause any issues.
```
sync_frequency = "1h"
```
### `db_path`
The path to the Atuin SQlite database. Defaults to
`~/.local/share/atuin/history.db`.
```
db_path = "~/.history.db"
```
### `key_path`
The path to the Atuin encryption key. Defaults to
`~/.local/share/atuin/key`.
```
key = "~/.atuin-key"
```
### `session_path`
The path to the Atuin server session file. Defaults to
`~/.local/share/atuin/session`. This is essentially just an API token
```
key = "~/.atuin-session"
```
## Server config
`// TODO`

27
docs/import.md Normal file
View File

@ -0,0 +1,27 @@
# `atuin import`
Atuin can import your history from your "old" history file
`atuin import auto` will attempt to figure out your shell (via \$SHELL) and run
the correct importer
Unfortunately these older files do not store as much information as Atuin does,
so not all features are available with imported data.
# zsh
```
atuin import zsh
```
If you've set HISTFILE, this should be picked up! If not, try
```
HISTFILE=/path/to/history/file atuin import zsh
```
This supports both the simple and extended format
# bash
TODO

11
docs/list.md Normal file
View File

@ -0,0 +1,11 @@
# Listing history
```
atuin history list
```
| Arg | Description |
| -------------- | ----------------------------------------------------------------------------- |
| `--cwd/-c` | The directory to list history for (default: all dirs) |
| `--session/-s` | Enable listing history for the current session only (default: false) |
| `--human/-h` | Use human-readable formatting for the timestamp and duration (default: false) |

39
docs/search.md Normal file
View File

@ -0,0 +1,39 @@
# `atuin search`
```
atuin search <query>
```
Atuin search also supports wildcards, with either the `*` or `%` character. By
default, a prefix search is performed (ie, all queries are automatically
appended with a wildcard.
| Arg | Description |
| ------------------ | ----------------------------------------------------------------------------- |
| `--cwd/-c` | The directory to list history for (default: all dirs) |
| `--exclude-cwd` | Do not include commands that ran in this directory (default: none) |
| `--exit/-e` | Filter by exit code (default: none) |
| `--exclude-exit` | Do not include commands that exited with this value (default: none) |
| `--before` | Only include commands ran before this time(default: none) |
| `--after` | Only include commands ran after this time(default: none) |
| `--interactive/-i` | Open the interactive search UI (default: false) |
| `--human/-h` | Use human-readable formatting for the timestamp and duration (default: false) |
## Examples
```
# Open the interactive search TUI
atuin search -i
# Open the interactive search TUI preloaded with a query
atuin search -i atuin
# Search for all commands, beginning with cargo, that exited successfully
atuin search --exit 0 cargo
# Search for all commands, that failed, from the current dir, and were ran before April 1st 2021
atuin search --exclude-exit 0 --before 01/04/2021 --cwd .
# Search for all commands, beginning with cargo, that exited successfully, and were ran after yesterday at 3pm
atuin search --exit 0 --after "yesterday 3pm" cargo
```

36
docs/stats.md Normal file
View File

@ -0,0 +1,36 @@
# `atuin stats`
Atuin can also calculate stats based on your history - this is currently a
little basic, but more features to come
```
$ atuin stats day last friday
+---------------------+------------+
| Statistic | Value |
+---------------------+------------+
| Most used command | git status |
+---------------------+------------+
| Commands ran | 450 |
+---------------------+------------+
| Unique commands ran | 213 |
+---------------------+------------+
$ atuin stats day 01/01/21 # also accepts absolute dates
```
It can also calculate statistics for all of known history:
```
$ atuin stats all
+---------------------+-------+
| Statistic | Value |
+---------------------+-------+
| Most used command | ls |
+---------------------+-------+
| Commands ran | 8190 |
+---------------------+-------+
| Unique commands ran | 2996 |
+---------------------+-------+
```

55
docs/sync.md Normal file
View File

@ -0,0 +1,55 @@
# `atuin sync`
Atuin can backup your history to a server, and use this to ensure multiple
machines have the same shell history. This is all encrypted end-to-end, so the
server operator can _never_ see your data!
Anyone can host a server (try `atuin server start`, more docs to follow), but I
host one at https://api.atuin.sh. This is the default server address, which can
be changed in the [config](docs/config.md). Again, I _cannot_ see your data, and
do not want to.
## Sync frequency
Syncing will happen automatically, unless configured otherwise. The sync
frequency is configurable in [config](docs/config.md)
## Sync
You can manually trigger a sync with `atuin sync`
## Register
Register for a sync account with
```
atuin register -u <USERNAME> -e <EMAIL> -p <PASSWORD>
```
Usernames must be unique, and emails shall only be used for important
notifications (security breaches, changes to service, etc).
Upon success, you are also logged in :) Syncing should happen automatically from
here!
## Key
As all your data is encrypted, Atuin generates a key for you. It's stored in the
Atuin data directory (`~/.local/share/atuin` on Linux).
You can also get this with
```
atuin key
```
Never share this with anyone!
## Login
If you want to login to a new machine, you will require your encryption key
(`atuin key`).
```
atuin login -u <USERNAME> -p <PASSWORD> -k <KEY>
```

1
install.sh Normal file
View File

@ -0,0 +1 @@
#!/

View File

View File

@ -1,6 +0,0 @@
-- This file was automatically created by Diesel to setup helper functions
-- and other internal bookkeeping. This file is safe to edit, any future
-- changes will be added to existing projects as new migrations.
DROP FUNCTION IF EXISTS diesel_manage_updated_at(_tbl regclass);
DROP FUNCTION IF EXISTS diesel_set_updated_at();

View File

@ -1,36 +0,0 @@
-- This file was automatically created by Diesel to setup helper functions
-- and other internal bookkeeping. This file is safe to edit, any future
-- changes will be added to existing projects as new migrations.
-- Sets up a trigger for the given table to automatically set a column called
-- `updated_at` whenever the row is modified (unless `updated_at` was included
-- in the modified columns)
--
-- # Example
--
-- ```sql
-- CREATE TABLE users (id SERIAL PRIMARY KEY, updated_at TIMESTAMP NOT NULL DEFAULT NOW());
--
-- SELECT diesel_manage_updated_at('users');
-- ```
CREATE OR REPLACE FUNCTION diesel_manage_updated_at(_tbl regclass) RETURNS VOID AS $$
BEGIN
EXECUTE format('CREATE TRIGGER set_updated_at BEFORE UPDATE ON %s
FOR EACH ROW EXECUTE PROCEDURE diesel_set_updated_at()', _tbl);
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION diesel_set_updated_at() RETURNS trigger AS $$
BEGIN
IF (
NEW IS DISTINCT FROM OLD AND
NEW.updated_at IS NOT DISTINCT FROM OLD.updated_at
) THEN
NEW.updated_at := current_timestamp;
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;

View File

@ -1,2 +0,0 @@
-- This file should undo anything in `up.sql`
drop table history;

View File

@ -1,2 +0,0 @@
-- This file should undo anything in `up.sql`
drop table users;

View File

@ -1,2 +0,0 @@
-- This file should undo anything in `up.sql`
drop table sessions;

View File

@ -1,7 +1,7 @@
use std::sync::mpsc;
use std::thread;
use std::time::Duration;
use crossbeam_channel::unbounded;
use termion::event::Key;
use termion::input::TermRead;
@ -13,7 +13,7 @@ pub enum Event<I> {
/// A small event handler that wrap termion input and tick events. Each event
/// type is handled in its own thread and returned to a common `Receiver`
pub struct Events {
rx: mpsc::Receiver<Event<Key>>,
rx: crossbeam_channel::Receiver<Event<Key>>,
}
#[derive(Debug, Clone, Copy)]
@ -37,7 +37,7 @@ impl Events {
}
pub fn with_config(config: Config) -> Events {
let (tx, rx) = mpsc::channel();
let (tx, rx) = unbounded();
{
let tx = tx.clone();
@ -62,7 +62,7 @@ impl Events {
Events { rx }
}
pub fn next(&self) -> Result<Event<Key>, mpsc::RecvError> {
pub fn next(&self) -> Result<Event<Key>, crossbeam_channel::RecvError> {
self.rx.recv()
}
}

View File

@ -1,7 +1,10 @@
use std::env;
use std::io::Write;
use std::time::Duration;
use eyre::Result;
use structopt::StructOpt;
use tabwriter::TabWriter;
use atuin_client::database::Database;
use atuin_client::history::History;
@ -36,29 +39,65 @@ pub enum Cmd {
#[structopt(long, short)]
session: bool,
},
#[structopt(
about="search for a command",
aliases=&["se", "sea", "sear", "searc"],
)]
Search { query: Vec<String> },
#[structopt(long, short)]
human: bool,
},
#[structopt(
about="get the last command ran",
aliases=&["la", "las"],
)]
Last {},
Last {
#[structopt(long, short)]
human: bool,
},
}
fn print_list(h: &[History]) {
for i in h {
println!("{}", i.command);
#[allow(clippy::clippy::cast_sign_loss)]
pub fn print_list(h: &[History], human: bool) {
let mut writer = TabWriter::new(std::io::stdout()).padding(2);
let lines = h.iter().map(|h| {
if human {
let duration = humantime::format_duration(Duration::from_nanos(std::cmp::max(
h.duration, 0,
) as u64))
.to_string();
let duration: Vec<&str> = duration.split(' ').collect();
let duration = duration[0];
format!(
"{}\t{}\t{}\n",
h.timestamp.format("%Y-%m-%d %H:%M:%S"),
h.command.trim(),
duration,
)
} else {
format!(
"{}\t{}\t{}\n",
h.timestamp.timestamp_nanos(),
h.command.trim(),
h.duration
)
}
});
for i in lines.rev() {
writer
.write_all(i.as_bytes())
.expect("failed to write to tab writer");
}
writer.flush().expect("failed to flush tab writer");
}
impl Cmd {
pub async fn run(&self, settings: &Settings, db: &mut (impl Database + Send)) -> Result<()> {
pub async fn run(
&self,
settings: &Settings,
db: &mut (impl Database + Send + Sync),
) -> Result<()> {
match self {
Self::Start { command: words } => {
let command = words.join(" ");
@ -69,7 +108,7 @@ impl Cmd {
// print the ID
// we use this as the key for calling end
println!("{}", h.id);
db.save(&h)?;
db.save(&h).await?;
Ok(())
}
@ -78,7 +117,7 @@ impl Cmd {
return Ok(());
}
let mut h = db.load(id)?;
let mut h = db.load(id).await?;
if h.duration > 0 {
debug!("cannot end history - already has duration");
@ -90,7 +129,7 @@ impl Cmd {
h.exit = *exit;
h.duration = chrono::Utc::now().timestamp_nanos() - h.timestamp.timestamp_nanos();
db.update(&h)?;
db.update(&h).await?;
if settings.should_sync()? {
debug!("running periodic background sync");
@ -102,41 +141,38 @@ impl Cmd {
Ok(())
}
Self::List { session, cwd, .. } => {
const QUERY_SESSION: &str = "select * from history where session = ?;";
const QUERY_DIR: &str = "select * from history where cwd = ?;";
const QUERY_SESSION_DIR: &str =
"select * from history where cwd = ?1 and session = ?2;";
Self::List {
session,
cwd,
human,
} => {
let params = (session, cwd);
let cwd = env::current_dir()?.display().to_string();
let session = env::var("ATUIN_SESSION")?;
let query_session = format!("select * from history where session = {};", session);
let query_dir = format!("select * from history where cwd = {};", cwd);
let query_session_dir = format!(
"select * from history where cwd = {} and session = {};",
cwd, session
);
let history = match params {
(false, false) => db.list(None, false)?,
(true, false) => db.query(QUERY_SESSION, &[session.as_str()])?,
(false, true) => db.query(QUERY_DIR, &[cwd.as_str()])?,
(true, true) => {
db.query(QUERY_SESSION_DIR, &[cwd.as_str(), session.as_str()])?
}
(false, false) => db.list(None, false).await?,
(true, false) => db.query_history(query_session.as_str()).await?,
(false, true) => db.query_history(query_dir.as_str()).await?,
(true, true) => db.query_history(query_session_dir.as_str()).await?,
};
print_list(&history);
print_list(&history, *human);
Ok(())
}
Self::Search { query } => {
let history = db.prefix_search(&query.join(""))?;
print_list(&history);
Ok(())
}
Self::Last {} => {
let last = db.last()?;
print_list(&[last]);
Self::Last { human } => {
let last = db.last().await?;
print_list(&[last], *human);
Ok(())
}

View File

@ -26,7 +26,7 @@ pub enum Cmd {
}
impl Cmd {
pub fn run(&self, db: &mut impl Database) -> Result<()> {
pub async fn run(&self, db: &mut (impl Database + Send + Sync)) -> Result<()> {
println!(" A'Tuin ");
println!("======================");
println!(" \u{1f30d} ");
@ -41,19 +41,19 @@ impl Cmd {
if shell.ends_with("/zsh") {
println!("Detected ZSH");
import_zsh(db)
import_zsh(db).await
} else {
println!("cannot import {} history", shell);
Ok(())
}
}
Self::Zsh => import_zsh(db),
Self::Zsh => import_zsh(db).await,
}
}
}
fn import_zsh(db: &mut impl Database) -> Result<()> {
async fn import_zsh(db: &mut (impl Database + Send + Sync)) -> Result<()> {
// oh-my-zsh sets HISTFILE=~/.zhistory
// zsh has no default value for this var, but uses ~/.zhistory.
// we could maybe be smarter about this in the future :)
@ -103,7 +103,7 @@ fn import_zsh(db: &mut impl Database) -> Result<()> {
buf.push(i);
if buf.len() == buf_size {
db.save_bulk(&buf)?;
db.save_bulk(&buf).await?;
progress.inc(buf.len() as u64);
buf.clear();
@ -111,7 +111,7 @@ fn import_zsh(db: &mut impl Database) -> Result<()> {
}
if !buf.is_empty() {
db.save_bulk(&buf)?;
db.save_bulk(&buf).await?;
progress.inc(buf.len() as u64);
}

View File

@ -47,12 +47,27 @@ pub enum AtuinCmd {
#[structopt(long, short, about = "filter search result by directory")]
cwd: Option<String>,
#[structopt(long = "exclude-cwd", about = "exclude directory from results")]
exclude_cwd: Option<String>,
#[structopt(long, short, about = "filter search result by exit code")]
exit: Option<i64>,
#[structopt(long = "exclude-exit", about = "exclude results with this exit code")]
exclude_exit: Option<i64>,
#[structopt(long, short, about = "only include results added before this date")]
before: Option<String>,
#[structopt(long, about = "only include results after this date")]
after: Option<String>,
#[structopt(long, short, about = "open interactive search UI")]
interactive: bool,
#[structopt(long, short, about = "use human-readable formatting for time")]
human: bool,
query: Vec<String>,
},
@ -79,20 +94,39 @@ impl AtuinCmd {
let db_path = PathBuf::from(client_settings.db_path.as_str());
let mut db = Sqlite::new(db_path)?;
let mut db = Sqlite::new(db_path).await?;
match self {
Self::History(history) => history.run(&client_settings, &mut db).await,
Self::Import(import) => import.run(&mut db),
Self::Import(import) => import.run(&mut db).await,
Self::Server(server) => server.run(&server_settings).await,
Self::Stats(stats) => stats.run(&mut db, &client_settings),
Self::Stats(stats) => stats.run(&mut db, &client_settings).await,
Self::Init => init::init(),
Self::Search {
cwd,
exit,
interactive,
human,
exclude_exit,
exclude_cwd,
before,
after,
query,
} => search::run(cwd, exit, interactive, &query, &mut db),
} => {
search::run(
cwd,
exit,
interactive,
human,
exclude_exit,
exclude_cwd,
before,
after,
&query,
&mut db,
)
.await
}
Self::Sync { force } => sync::run(&client_settings, force, &mut db).await,
Self::Login(l) => l.run(&client_settings),

View File

@ -1,15 +1,16 @@
use chrono::Utc;
use eyre::Result;
use std::time::Duration;
use std::{io::stdout, ops::Sub};
use termion::{event::Key, input::MouseTerminal, raw::IntoRawMode, screen::AlternateScreen};
use tui::{
backend::TermionBackend,
backend::{Backend, TermionBackend},
layout::{Alignment, Constraint, Corner, Direction, Layout},
style::{Color, Modifier, Style},
text::{Span, Spans, Text},
widgets::{Block, Borders, List, ListItem, ListState, Paragraph},
Terminal,
Frame, Terminal,
};
use unicode_width::UnicodeWidthStr;
@ -28,8 +29,8 @@ struct State {
results_state: ListState,
}
#[allow(clippy::clippy::cast_sign_loss)]
impl State {
#[allow(clippy::clippy::cast_sign_loss)]
fn durations(&self) -> Vec<(String, String)> {
self.results
.iter()
@ -129,24 +130,28 @@ impl State {
}
}
fn query_results(app: &mut State, db: &mut impl Database) {
async fn query_results(app: &mut State, db: &mut (impl Database + Send + Sync)) -> Result<()> {
let results = match app.input.as_str() {
"" => db.list(Some(200), true),
i => db.prefix_search(i),
"" => db.list(Some(200), true).await?,
i => db.search(Some(200), i).await?,
};
if let Ok(results) = results {
app.results = results;
}
app.results = results;
if app.results.is_empty() {
app.results_state.select(None);
} else {
app.results_state.select(Some(0));
}
Ok(())
}
fn key_handler(input: Key, db: &mut impl Database, app: &mut State) -> Option<String> {
async fn key_handler(
input: Key,
db: &mut (impl Database + Send + Sync),
app: &mut State,
) -> Option<String> {
match input {
Key::Esc => return Some(String::from("")),
Key::Char('\n') => {
@ -160,11 +165,11 @@ fn key_handler(input: Key, db: &mut impl Database, app: &mut State) -> Option<St
}
Key::Char(c) => {
app.input.push(c);
query_results(app, db);
query_results(app, db).await.unwrap();
}
Key::Backspace => {
app.input.pop();
query_results(app, db);
query_results(app, db).await.unwrap();
}
Key::Down => {
let i = match app.results_state.selected() {
@ -198,11 +203,82 @@ fn key_handler(input: Key, db: &mut impl Database, app: &mut State) -> Option<St
None
}
#[allow(clippy::clippy::cast_possible_truncation)]
fn draw<T: Backend>(f: &mut Frame<'_, T>, history_count: i64, app: &mut State) {
let chunks = Layout::default()
.direction(Direction::Vertical)
.margin(1)
.constraints(
[
Constraint::Length(2),
Constraint::Min(1),
Constraint::Length(3),
]
.as_ref(),
)
.split(f.size());
let top_chunks = Layout::default()
.direction(Direction::Horizontal)
.constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref())
.split(chunks[0]);
let top_left_chunks = Layout::default()
.direction(Direction::Vertical)
.constraints([Constraint::Length(1), Constraint::Length(1)].as_ref())
.split(top_chunks[0]);
let top_right_chunks = Layout::default()
.direction(Direction::Vertical)
.constraints([Constraint::Length(1), Constraint::Length(1)].as_ref())
.split(top_chunks[1]);
let title = Paragraph::new(Text::from(Span::styled(
format!("A'tuin v{}", VERSION),
Style::default().add_modifier(Modifier::BOLD),
)));
let help = vec![
Span::raw("Press "),
Span::styled("Esc", Style::default().add_modifier(Modifier::BOLD)),
Span::raw(" to exit."),
];
let help = Text::from(Spans::from(help));
let help = Paragraph::new(help);
let input = Paragraph::new(app.input.clone())
.block(Block::default().borders(Borders::ALL).title("Query"));
let stats = Paragraph::new(Text::from(Span::raw(format!(
"history count: {}",
history_count,
))))
.alignment(Alignment::Right);
f.render_widget(title, top_left_chunks[0]);
f.render_widget(help, top_left_chunks[1]);
app.render_results(f, chunks[1]);
f.render_widget(stats, top_right_chunks[0]);
f.render_widget(input, chunks[2]);
f.set_cursor(
// Put cursor past the end of the input text
chunks[2].x + app.input.width() as u16 + 1,
// Move one line down, from the border to the input line
chunks[2].y + 1,
);
}
// this is a big blob of horrible! clean it up!
// for now, it works. But it'd be great if it were more easily readable, and
// modular. I'd like to add some more stats and stuff at some point
#[allow(clippy::clippy::cast_possible_truncation)]
fn select_history(query: &[String], db: &mut impl Database) -> Result<String> {
async fn select_history(
query: &[String],
db: &mut (impl Database + Send + Sync),
) -> Result<String> {
let stdout = stdout().into_raw_mode()?;
let stdout = MouseTerminal::from(stdout);
let stdout = AlternateScreen::from(stdout);
@ -218,91 +294,35 @@ fn select_history(query: &[String], db: &mut impl Database) -> Result<String> {
results_state: ListState::default(),
};
query_results(&mut app, db);
query_results(&mut app, db).await?;
loop {
let history_count = db.history_count().await?;
// Handle input
if let Event::Input(input) = events.next()? {
if let Some(output) = key_handler(input, db, &mut app) {
if let Some(output) = key_handler(input, db, &mut app).await {
return Ok(output);
}
}
terminal.draw(|f| {
let chunks = Layout::default()
.direction(Direction::Vertical)
.margin(1)
.constraints(
[
Constraint::Length(2),
Constraint::Min(1),
Constraint::Length(3),
]
.as_ref(),
)
.split(f.size());
let top_chunks = Layout::default()
.direction(Direction::Horizontal)
.constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref())
.split(chunks[0]);
let top_left_chunks = Layout::default()
.direction(Direction::Vertical)
.constraints([Constraint::Length(1), Constraint::Length(1)].as_ref())
.split(top_chunks[0]);
let top_right_chunks = Layout::default()
.direction(Direction::Vertical)
.constraints([Constraint::Length(1), Constraint::Length(1)].as_ref())
.split(top_chunks[1]);
let title = Paragraph::new(Text::from(Span::styled(
format!("A'tuin v{}", VERSION),
Style::default().add_modifier(Modifier::BOLD),
)));
let help = vec![
Span::raw("Press "),
Span::styled("Esc", Style::default().add_modifier(Modifier::BOLD)),
Span::raw(" to exit."),
];
let help = Text::from(Spans::from(help));
let help = Paragraph::new(help);
let input = Paragraph::new(app.input.clone())
.block(Block::default().borders(Borders::ALL).title("Query"));
let stats = Paragraph::new(Text::from(Span::raw(format!(
"history count: {}",
db.history_count().unwrap()
))))
.alignment(Alignment::Right);
f.render_widget(title, top_left_chunks[0]);
f.render_widget(help, top_left_chunks[1]);
app.render_results(f, chunks[1]);
f.render_widget(stats, top_right_chunks[0]);
f.render_widget(input, chunks[2]);
f.set_cursor(
// Put cursor past the end of the input text
chunks[2].x + app.input.width() as u16 + 1,
// Move one line down, from the border to the input line
chunks[2].y + 1,
);
})?;
terminal.draw(|f| draw(f, history_count, &mut app))?;
}
}
pub fn run(
// This is supposed to more-or-less mirror the command line version, so ofc
// it is going to have a lot of args
#[allow(clippy::clippy::clippy::too_many_arguments)]
pub async fn run(
cwd: Option<String>,
exit: Option<i64>,
interactive: bool,
human: bool,
exclude_exit: Option<i64>,
exclude_cwd: Option<String>,
before: Option<String>,
after: Option<String>,
query: &[String],
db: &mut impl Database,
db: &mut (impl Database + Send + Sync),
) -> Result<()> {
let dir = if let Some(cwd) = cwd {
if cwd == "." {
@ -319,14 +339,70 @@ pub fn run(
};
if interactive {
let item = select_history(query, db)?;
let item = select_history(query, db).await?;
eprintln!("{}", item);
} else {
let results = db.search(dir, exit, query.join(" ").as_str())?;
let results = db.search(None, query.join(" ").as_str()).await?;
for i in &results {
println!("{}", i.command);
}
// TODO: This filtering would be better done in the SQL query, I just
// need a nice way of building queries.
let results: Vec<History> = results
.iter()
.filter(|h| {
if let Some(exit) = exit {
if h.exit != exit {
return false;
}
}
if let Some(exit) = exclude_exit {
if h.exit == exit {
return false;
}
}
if let Some(cwd) = &exclude_cwd {
if h.cwd.as_str() == cwd.as_str() {
return false;
}
}
if let Some(cwd) = &dir {
if h.cwd.as_str() != cwd.as_str() {
return false;
}
}
if let Some(before) = &before {
let before = chrono_english::parse_date_string(
before.as_str(),
Utc::now(),
chrono_english::Dialect::Uk,
);
if before.is_err() || h.timestamp.gt(&before.unwrap()) {
return false;
}
}
if let Some(after) = &after {
let after = chrono_english::parse_date_string(
after.as_str(),
Utc::now(),
chrono_english::Dialect::Uk,
);
if after.is_err() || h.timestamp.lt(&after.unwrap()) {
return false;
}
}
true
})
.map(std::borrow::ToOwned::to_owned)
.collect();
super::history::print_list(&results, human);
}
Ok(())

View File

@ -71,7 +71,11 @@ fn compute_stats(history: &[History]) -> Result<()> {
}
impl Cmd {
pub fn run(&self, db: &mut impl Database, settings: &Settings) -> Result<()> {
pub async fn run(
&self,
db: &mut (impl Database + Send + Sync),
settings: &Settings,
) -> Result<()> {
match self {
Self::Day { words } => {
let words = if words.is_empty() {
@ -86,7 +90,7 @@ impl Cmd {
};
let end = start + Duration::days(1);
let history = db.range(start.into(), end.into())?;
let history = db.range(start.into(), end.into()).await?;
compute_stats(&history)?;
@ -94,7 +98,7 @@ impl Cmd {
}
Self::All => {
let history = db.list(None, false)?;
let history = db.list(None, false).await?;
compute_stats(&history)?;

View File

@ -4,11 +4,15 @@ use atuin_client::database::Database;
use atuin_client::settings::Settings;
use atuin_client::sync;
pub async fn run(settings: &Settings, force: bool, db: &mut (impl Database + Send)) -> Result<()> {
pub async fn run(
settings: &Settings,
force: bool,
db: &mut (impl Database + Send + Sync),
) -> Result<()> {
sync::sync(settings, force, db).await?;
println!(
"Sync complete! {} items in database, force: {}",
db.history_count()?,
db.history_count().await?,
force
);
Ok(())