mononoke: remove diesel from codebase

Reviewed By: StanislavGlebik

Differential Revision: D10512736

fbshipit-source-id: 9cd8d3abdc896a9e7b01aaec63ff69380efc4f0c
This commit is contained in:
Lukas Piatkowski 2018-10-29 05:14:44 -07:00 committed by Facebook Github Bot
parent 248922636a
commit fc352b60fe
11 changed files with 9 additions and 315 deletions

View File

@ -326,7 +326,7 @@ impl BlobRepo {
cachelib::get_pool("filenodes").ok_or(Error::from(ErrorKind::MissingCachePool(
"filenodes".to_string(),
)))?,
"dieselfilenodes",
"sqlfilenodes",
&args.db_address,
);

View File

@ -1,148 +0,0 @@
// Copyright (c) 2018-present, Facebook, Inc.
// All Rights Reserved.
//
// This software may be used and distributed according to the terms of the
// GNU General Public License version 2 or any later version.
// Provide SqliteConnInner and MysqlConnInner logic to share
#![deny(warnings)]
#![feature(try_from, never_type)]
extern crate diesel;
extern crate failure_ext as failure;
extern crate heapsize;
extern crate tokio;
extern crate db;
extern crate lazy_static;
use std::result;
use std::sync::{Arc, Mutex, MutexGuard};
use diesel::{Connection, MysqlConnection, SqliteConnection};
use diesel::connection::SimpleConnection;
use diesel::r2d2::{ConnectionManager, Pool, PooledConnection};
use failure::{Error, Result};
use db::{get_connection_params, ConnectionParams, InstanceRequirement, ProxyRequirement};
#[derive(Clone)]
pub struct SqliteConnInner {
connection: Arc<Mutex<SqliteConnection>>,
}
impl SqliteConnInner {
/// Open a SQLite database. This is synchronous because the SQLite backend hits local
/// disk or memory.
pub fn open<P: AsRef<str>>(path: P) -> Result<Self> {
let path = path.as_ref();
let conn = SqliteConnection::establish(path)?;
Ok(Self {
connection: Arc::new(Mutex::new(conn)),
})
}
fn create_tables(&mut self, up_query: &str) -> Result<()> {
self.connection
.lock()
.expect("lock poisoned")
.batch_execute(up_query)?;
Ok(())
}
/// Create a new SQLite database.
pub fn create<P: AsRef<str>>(path: P, up_query: &str) -> Result<Self> {
let mut conn = Self::open(path)?;
conn.create_tables(up_query)?;
Ok(conn)
}
/// Open a SQLite database, and create the tables if they are missing
pub fn open_or_create<P: AsRef<str>>(path: P, up_query: &str) -> Result<Self> {
let mut conn = Self::open(path)?;
let _ = conn.create_tables(up_query);
Ok(conn)
}
/// Create a new in-memory empty database. Great for tests.
pub fn in_memory(up_query: &str) -> Result<Self> {
Self::create(":memory:", up_query)
}
pub fn get_conn(&self) -> result::Result<MutexGuard<SqliteConnection>, !> {
Ok(self.connection.lock().expect("lock poisoned"))
}
pub fn get_master_conn(&self) -> result::Result<MutexGuard<SqliteConnection>, !> {
Ok(self.connection.lock().expect("lock poisoned"))
}
}
#[derive(Clone)]
pub struct MysqlConnInner {
pool: Pool<ConnectionManager<MysqlConnection>>,
master_pool: Pool<ConnectionManager<MysqlConnection>>,
}
impl MysqlConnInner {
pub fn open(db_address: &str) -> Result<Self> {
let local_connection_params = get_connection_params(
db_address.to_string(),
InstanceRequirement::Closest,
None,
Some(ProxyRequirement::Forbidden),
)?;
let master_connection_params = get_connection_params(
db_address.to_string(),
InstanceRequirement::Master,
None,
Some(ProxyRequirement::Forbidden),
)?;
Self::open_with_params(&local_connection_params, &master_connection_params)
}
pub fn open_with_params(
local_connection_params: &ConnectionParams,
master_connection_params: &ConnectionParams,
) -> Result<Self> {
let local_url = local_connection_params.to_diesel_url()?;
let master_url = master_connection_params.to_diesel_url()?;
let pool = Pool::builder()
.max_size(10)
.min_idle(Some(1))
.build(ConnectionManager::new(local_url.clone()))?;
let master_pool = Pool::builder()
.max_size(1)
.min_idle(Some(1))
.build(ConnectionManager::new(master_url.clone()))?;
Ok(Self { pool, master_pool })
}
pub fn create_test_db<P: AsRef<str>>(prefix: P, up_query: &str) -> Result<Self> {
let params = db::create_test_db(prefix)?;
Self::create(&params, up_query)
}
pub fn create(params: &ConnectionParams, up_query: &str) -> Result<Self> {
let me = Self::from(MysqlConnInner::open_with_params(params, params)?);
me.get_master_conn()?.batch_execute(up_query)?;
Ok(me)
}
pub fn get_conn(&self) -> Result<PooledConnection<ConnectionManager<MysqlConnection>>> {
self.pool.get().map_err(Error::from)
}
pub fn get_master_conn(&self) -> Result<PooledConnection<ConnectionManager<MysqlConnection>>> {
self.master_pool.get().map_err(Error::from)
}
}

View File

@ -7,7 +7,6 @@
#![feature(duration_as_u128)]
extern crate clap;
extern crate db;
extern crate failure_ext as failure;
extern crate filenodes;
extern crate futures;

View File

@ -8,7 +8,6 @@ license = "GPLv2+"
ascii = "0.8.6"
bincode = "0.9.2"
bytes = "0.4.5"
diesel = "1.3.2"
error-chain = "0.11.0"
futures = "0.1.17"
heapsize = "0.4.1"

View File

@ -43,11 +43,7 @@
//! differences. These are for interfacing at the edges, but are not used within Mononoke's core
//! structures at all.
#![deny(warnings)]
// FIXME T34253207, remove when https://github.com/diesel-rs/diesel/issues/1785 fixed
#![allow(proc_macro_derive_resolution_fallback)]
#![feature(const_fn)]
#![feature(never_type)]
#![feature(try_from)]
extern crate abomonation;
#[macro_use]
@ -60,8 +56,6 @@ extern crate bitflags;
extern crate bytes;
extern crate crypto;
#[macro_use]
extern crate diesel;
#[macro_use]
extern crate failure_ext as failure;
extern crate itertools;
extern crate rust_thrift;

View File

@ -19,7 +19,6 @@ use thrift;
use RepoPath;
use errors::*;
use hash::{self, Sha1};
use sql_types::{HgChangesetIdSql, HgFileNodeIdSql, HgManifestIdSql};
pub const NULL_HASH: HgNodeHash = HgNodeHash(hash::NULL);
pub const NULL_CSID: HgChangesetId = HgChangesetId(NULL_HASH);
@ -190,8 +189,7 @@ impl Arbitrary for HgNodeHash {
}
#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Debug, Hash)]
#[derive(HeapSizeOf, FromSqlRow, AsExpression, Abomonation)]
#[sql_type = "HgChangesetIdSql"]
#[derive(HeapSizeOf, Abomonation)]
pub struct HgChangesetId(HgNodeHash);
impl HgChangesetId {
@ -268,16 +266,10 @@ impl Arbitrary for HgChangesetId {
}
#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Debug, Hash)]
#[derive(HeapSizeOf, FromSqlRow, AsExpression)]
#[sql_type = "HgManifestIdSql"]
#[derive(HeapSizeOf)]
pub struct HgManifestId(HgNodeHash);
impl HgManifestId {
#[inline]
pub(crate) fn as_nodehash(&self) -> &HgNodeHash {
&self.0
}
pub fn into_nodehash(self) -> HgNodeHash {
self.0
}
@ -311,8 +303,7 @@ impl Arbitrary for HgManifestId {
}
#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Debug, Hash)]
#[derive(Abomonation, HeapSizeOf, FromSqlRow, AsExpression)]
#[sql_type = "HgFileNodeIdSql"]
#[derive(Abomonation, HeapSizeOf)]
pub struct HgFileNodeId(HgNodeHash);
impl HgFileNodeId {

View File

@ -4,15 +4,12 @@
// This software may be used and distributed according to the terms of the
// GNU General Public License version 2 or any later version.
use diesel::sql_types::Integer;
// XXX RepositoryId might want to be a short string like a Phabricator callsign.
// TODO: (rain1) T31391673 move this to the mononoke-types crate
/// Represents a repository. This ID is used throughout storage.
#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Debug, Hash)]
#[derive(HeapSizeOf, FromSqlRow, AsExpression, Abomonation, Serialize)]
#[sql_type = "Integer"]
#[derive(HeapSizeOf, Abomonation, Serialize)]
pub struct RepositoryId(i32);
impl RepositoryId {

View File

@ -4,96 +4,14 @@
// This software may be used and distributed according to the terms of the
// GNU General Public License version 2 or any later version.
// Definitions for interfacing with SQL data stores using the diesel library.
// Definitions for interfacing with SQL data stores
use std::io::Write;
use diesel::backend::Backend;
use diesel::deserialize::{self, FromSql};
use diesel::serialize::{self, IsNull, Output, ToSql};
use diesel::sql_types::{Binary, Integer};
use sql::mysql_async::{FromValueError, Value, prelude::{ConvIr, FromValue}};
use {HgChangesetId, HgFileNodeId, HgManifestId, HgNodeHash, RepositoryId};
use errors::*;
use {HgChangesetId, HgFileNodeId, HgNodeHash, RepositoryId};
type FromValueResult<T> = ::std::result::Result<T, FromValueError>;
#[derive(QueryId, SqlType)]
#[mysql_type = "Blob"]
#[sqlite_type = "Binary"]
pub struct HgChangesetIdSql;
#[derive(QueryId, SqlType)]
#[mysql_type = "Blob"]
#[sqlite_type = "Binary"]
pub struct HgManifestIdSql;
#[derive(QueryId, SqlType)]
#[mysql_type = "Blob"]
#[sqlite_type = "Binary"]
pub struct HgFileNodeIdSql;
impl<DB: Backend> ToSql<HgChangesetIdSql, DB> for HgChangesetId {
fn to_sql<W: Write>(&self, out: &mut Output<W, DB>) -> serialize::Result {
out.write_all(self.as_nodehash().0.as_ref())?;
Ok(IsNull::No)
}
}
impl<DB: Backend> FromSql<HgChangesetIdSql, DB> for HgChangesetId
where
*const [u8]: FromSql<Binary, DB>,
{
fn from_sql(bytes: Option<&DB::RawValue>) -> deserialize::Result<Self> {
// Using unsafe here saves on a heap allocation. See https://goo.gl/K6hapb.
let raw_bytes: *const [u8] = FromSql::<Binary, DB>::from_sql(bytes)?;
let raw_bytes: &[u8] = unsafe { &*raw_bytes };
let hash = HgNodeHash::from_bytes(raw_bytes).compat()?;
Ok(Self::new(hash))
}
}
impl<DB: Backend> ToSql<HgManifestIdSql, DB> for HgManifestId {
fn to_sql<W: Write>(&self, out: &mut Output<W, DB>) -> serialize::Result {
out.write_all(self.as_nodehash().0.as_ref())?;
Ok(IsNull::No)
}
}
impl<DB: Backend> FromSql<HgManifestIdSql, DB> for HgManifestId
where
*const [u8]: FromSql<Binary, DB>,
{
fn from_sql(bytes: Option<&DB::RawValue>) -> deserialize::Result<Self> {
// Using unsafe here saves on a heap allocation. See https://goo.gl/K6hapb.
let raw_bytes: *const [u8] = FromSql::<Binary, DB>::from_sql(bytes)?;
let raw_bytes: &[u8] = unsafe { &*raw_bytes };
let hash = HgNodeHash::from_bytes(raw_bytes).compat()?;
Ok(Self::new(hash))
}
}
impl<DB: Backend> ToSql<HgFileNodeIdSql, DB> for HgFileNodeId {
fn to_sql<W: Write>(&self, out: &mut Output<W, DB>) -> serialize::Result {
out.write_all(self.as_nodehash().0.as_ref())?;
Ok(IsNull::No)
}
}
impl<DB: Backend> FromSql<HgFileNodeIdSql, DB> for HgFileNodeId
where
*const [u8]: FromSql<Binary, DB>,
{
fn from_sql(bytes: Option<&DB::RawValue>) -> deserialize::Result<Self> {
// Using unsafe here saves on a heap allocation. See https://goo.gl/K6hapb.
let raw_bytes: *const [u8] = FromSql::<Binary, DB>::from_sql(bytes)?;
let raw_bytes: &[u8] = unsafe { &*raw_bytes };
let hash = HgNodeHash::from_bytes(raw_bytes).compat()?;
Ok(Self::new(hash))
}
}
impl From<HgFileNodeId> for Value {
fn from(id: HgFileNodeId) -> Self {
Value::Bytes(id.as_nodehash().0.as_ref().into())
@ -149,25 +67,6 @@ impl FromValue for HgChangesetId {
type Intermediate = HgNodeHash;
}
impl<DB: Backend> ToSql<Integer, DB> for RepositoryId
where
i32: ToSql<Integer, DB>,
{
fn to_sql<W: Write>(&self, out: &mut Output<W, DB>) -> serialize::Result {
self.id().to_sql(out)
}
}
impl<DB: Backend> FromSql<Integer, DB> for RepositoryId
where
i32: FromSql<Integer, DB>,
{
fn from_sql(bytes: Option<&DB::RawValue>) -> deserialize::Result<Self> {
let val = FromSql::<Integer, DB>::from_sql(bytes)?;
Ok(RepositoryId::new(val))
}
}
impl From<RepositoryId> for Value {
fn from(repo_id: RepositoryId) -> Self {
Value::UInt(repo_id.id() as u64)

View File

@ -7,12 +7,9 @@
//! Base types used throughout Mononoke.
#![deny(warnings)]
// FIXME T34253207, remove when https://github.com/diesel-rs/diesel/issues/1785 fixed
#![allow(proc_macro_derive_resolution_fallback)]
// The allow(dead_code) is temporary until Thrift serialization is done.
#![allow(dead_code)]
#![feature(try_from)]
#![feature(const_fn)]
#![feature(const_fn, try_from)]
extern crate abomonation;
#[macro_use]
@ -24,8 +21,6 @@ extern crate blake2;
extern crate bytes;
extern crate chrono;
#[macro_use]
extern crate diesel;
#[macro_use]
extern crate failure_ext as failure;
extern crate heapsize;
#[macro_use]

View File

@ -4,13 +4,6 @@
// This software may be used and distributed according to the terms of the
// GNU General Public License version 2 or any later version.
use std::io::Write;
use diesel::backend::Backend;
use diesel::deserialize::{self, FromSql};
use diesel::serialize::{self, IsNull, Output, ToSql};
use diesel::sql_types::Binary;
use failure::ResultExt;
use hash::Blake2;
use sql::mysql_async::{FromValueError, Value, prelude::{ConvIr, FromValue}};
@ -18,29 +11,6 @@ use typed_hash::ChangesetId;
type FromValueResult<T> = ::std::result::Result<T, FromValueError>;
#[derive(QueryId, SqlType)]
#[mysql_type = "Blob"]
#[sqlite_type = "Binary"]
pub struct ChangesetIdSql;
impl<DB: Backend> ToSql<ChangesetIdSql, DB> for ChangesetId {
fn to_sql<W: Write>(&self, out: &mut Output<W, DB>) -> serialize::Result {
out.write_all(self.as_ref())?;
Ok(IsNull::No)
}
}
impl<DB: Backend> FromSql<ChangesetIdSql, DB> for ChangesetId
where
*const [u8]: FromSql<Binary, DB>,
{
fn from_sql(bytes: Option<&DB::RawValue>) -> deserialize::Result<Self> {
let raw_bytes: *const [u8] = FromSql::<Binary, DB>::from_sql(bytes)?;
let raw_bytes: &[u8] = unsafe { &*raw_bytes };
Ok(ChangesetId::from_bytes(raw_bytes).compat()?)
}
}
impl From<ChangesetId> for Value {
fn from(id: ChangesetId) -> Self {
Value::Bytes(id.as_ref().into())

View File

@ -17,7 +17,6 @@ use bonsai_changeset::BonsaiChangeset;
use errors::*;
use file_contents::FileContents;
use hash::{Blake2, Context};
use sql_types::ChangesetIdSql;
use thrift;
// There is no NULL_HASH for typed hashes. Any places that need a null hash should use an
@ -37,8 +36,7 @@ pub trait MononokeId: Copy + Send + 'static {
/// An identifier for a changeset in Mononoke.
#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Debug, Hash)]
#[derive(Abomonation, HeapSizeOf, FromSqlRow, AsExpression)]
#[sql_type = "ChangesetIdSql"]
#[derive(Abomonation, HeapSizeOf)]
pub struct ChangesetId(Blake2);
/// An identifier for file contents in Mononoke.