mononoke: add log-only mode in redaction

Summary:
Before redacting something it would be good to check that this file is not
accessed by anything. Having log-only mode would help with that.

Reviewed By: ikostia

Differential Revision: D23503666

fbshipit-source-id: ae492d4e0e6f2da792d36ee42a73f591e632dfa4
This commit is contained in:
Stanislau Hlebik 2020-09-04 07:35:10 -07:00 committed by Facebook GitHub Bot
parent 0740f99f13
commit 7b323a4fd9
14 changed files with 295 additions and 94 deletions

View File

@ -44,7 +44,7 @@ use mononoke_types::RepositoryId;
use newfilenodes::NewFilenodesBuilder;
use phases::SqlPhasesFactory;
use readonlyblob::ReadOnlyBlobstore;
use redactedblobstore::SqlRedactedContentStore;
use redactedblobstore::{RedactedMetadata, SqlRedactedContentStore};
use repo_blobstore::RepoBlobstoreArgs;
use scuba_ext::{ScubaSampleBuilder, ScubaSampleBuilderExt};
use segmented_changelog::{
@ -289,7 +289,7 @@ pub async fn open_blobrepo_given_datasources(
pub struct TestRepoBuilder {
repo_id: RepositoryId,
blobstore: Arc<dyn Blobstore>,
redacted: Option<HashMap<String, String>>,
redacted: Option<HashMap<String, RedactedMetadata>>,
}
impl TestRepoBuilder {
@ -306,7 +306,7 @@ impl TestRepoBuilder {
self
}
pub fn redacted(mut self, redacted: Option<HashMap<String, String>>) -> Self {
pub fn redacted(mut self, redacted: Option<HashMap<String, RedactedMetadata>>) -> Self {
self.redacted = redacted;
self
}
@ -501,7 +501,7 @@ async fn new_development(
fb: FacebookInit,
sql_factory: &MetadataSqlFactory,
blobstore: Arc<dyn Blobstore>,
redacted_blobs: Option<HashMap<String, String>>,
redacted_blobs: Option<HashMap<String, RedactedMetadata>>,
censored_scuba_params: CensoredScubaParams,
repoid: RepositoryId,
filestore_config: FilestoreConfig,
@ -665,7 +665,7 @@ async fn new_production(
fb: FacebookInit,
sql_factory: &MetadataSqlFactory,
blobstore: Arc<dyn Blobstore>,
redacted_blobs: Option<HashMap<String, String>>,
redacted_blobs: Option<HashMap<String, RedactedMetadata>>,
censored_scuba_params: CensoredScubaParams,
repoid: RepositoryId,
bookmarks_cache_ttl: Option<Duration>,

View File

@ -11,7 +11,7 @@ use context::CoreContext;
use futures::future::BoxFuture;
use mononoke_types::{BlobstoreBytes, RepositoryId};
use prefixblob::PrefixBlobstore;
use redactedblobstore::{RedactedBlobstore, RedactedBlobstoreConfig};
use redactedblobstore::{RedactedBlobstore, RedactedBlobstoreConfig, RedactedMetadata};
use scuba_ext::ScubaSampleBuilder;
use std::collections::HashMap;
use std::ops::Deref;
@ -90,7 +90,7 @@ pub struct RepoBlobstoreArgs {
impl RepoBlobstoreArgs {
pub fn new<T: Blobstore + Clone>(
blobstore: T,
redacted_blobs: Option<HashMap<String, String>>,
redacted_blobs: Option<HashMap<String, RedactedMetadata>>,
repoid: RepositoryId,
scuba_builder: ScubaSampleBuilder,
) -> Self {

View File

@ -18,7 +18,6 @@ use futures_old::{future, future::Either, Future, IntoFuture};
use mononoke_types::BlobstoreBytes;
use prefixblob::PrefixBlobstore;
use redactedblobstore::{config::GET_OPERATION, RedactedBlobstore};
use slog::debug;
use stats::prelude::*;
use std::fmt;
use std::sync::Arc;
@ -443,18 +442,7 @@ impl<T: CacheBlobstoreExt + Clone> CacheBlobstoreExt for RedactedBlobstore<T> {
ctx: CoreContext,
key: String,
) -> BoxFuture01<Option<BlobstoreGetData>, Error> {
self.access_blobstore(&key)
.map_err({
cloned!(ctx, key);
move |err| {
debug!(
ctx.logger(),
"Accessing redacted blobstore with key {:?}", key
);
self.to_scuba_redacted_blob_accessed(&ctx, &key, GET_OPERATION);
err
}
})
self.access_blobstore(&ctx, &key, GET_OPERATION)
.map(move |blobstore| blobstore.get_no_cache_fill(ctx, key))
.into_future()
.flatten()
@ -467,18 +455,7 @@ impl<T: CacheBlobstoreExt + Clone> CacheBlobstoreExt for RedactedBlobstore<T> {
ctx: CoreContext,
key: String,
) -> BoxFuture01<Option<BlobstoreGetData>, Error> {
self.access_blobstore(&key)
.map_err({
cloned!(ctx, key);
move |err| {
debug!(
ctx.logger(),
"Accessing redacted blobstore with key {:?}", key
);
self.to_scuba_redacted_blob_accessed(&ctx, &key, GET_OPERATION);
err
}
})
self.access_blobstore(&ctx, &key, GET_OPERATION)
.map(move |blobstore| blobstore.get_cache_only(ctx, key))
.into_future()
.flatten()

View File

@ -9,7 +9,9 @@ CREATE TABLE `censored_contents` (
`id` INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
`content_key` VARCHAR(255) NOT NULL,
`task` VARCHAR(64) NOT NULL,
`add_timestamp` BIGINT(20) NOT NULL
`add_timestamp` BIGINT(20) NOT NULL,
`log_only` BIT DEFAULT NULL,
UNIQUE(`content_key`)
);
CREATE INDEX `content_key`

View File

@ -17,7 +17,6 @@ use slog::debug;
use std::collections::HashMap;
mod errors;
pub use crate::errors::ErrorKind;
use cloned::cloned;
use std::{
ops::Deref,
sync::{
@ -26,7 +25,7 @@ use std::{
},
};
mod store;
pub use crate::store::SqlRedactedContentStore;
pub use crate::store::{RedactedMetadata, SqlRedactedContentStore};
pub mod config {
pub const GET_OPERATION: &str = "GET";
@ -36,7 +35,7 @@ pub mod config {
#[derive(Debug, Clone)]
pub struct RedactedBlobstoreConfigInner {
redacted: Option<HashMap<String, String>>,
redacted: Option<HashMap<String, RedactedMetadata>>,
scuba_builder: ScubaSampleBuilder,
}
@ -55,7 +54,7 @@ impl Deref for RedactedBlobstoreConfig {
impl RedactedBlobstoreConfig {
pub fn new(
redacted: Option<HashMap<String, String>>,
redacted: Option<HashMap<String, RedactedMetadata>>,
scuba_builder: ScubaSampleBuilder,
) -> Self {
Self {
@ -117,10 +116,25 @@ impl<T: Blobstore + Clone> RedactedBlobstoreInner<T> {
}
// Checks for access to this key, then yields the blobstore if access is allowed.
pub fn access_blobstore(&self, key: &str) -> Result<&T, Error> {
pub fn access_blobstore(
&self,
ctx: &CoreContext,
key: &str,
operation: &'static str,
) -> Result<&T, Error> {
match &self.config.redacted {
Some(redacted) => redacted.get(key).map_or(Ok(&self.blobstore), |task| {
Err(ErrorKind::Censored(key.to_string(), task.to_string()).into())
Some(redacted) => redacted.get(key).map_or(Ok(&self.blobstore), |metadata| {
debug!(
ctx.logger(),
"{} operation with redacted blobstore with key {:?}", operation, key
);
self.to_scuba_redacted_blob_accessed(&ctx, &key, operation);
if metadata.log_only {
Ok(&self.blobstore)
} else {
Err(ErrorKind::Censored(key.to_string(), metadata.task.to_string()).into())
}
}),
None => Ok(&self.blobstore),
}
@ -163,18 +177,7 @@ impl<T: Blobstore + Clone> Blobstore for RedactedBlobstoreInner<T> {
key: String,
) -> BoxFuture<'static, Result<Option<BlobstoreGetData>, Error>> {
let get = self
.access_blobstore(&key)
.map_err({
cloned!(ctx, key);
move |err| {
debug!(
ctx.logger(),
"Accessing redacted blobstore with key {:?}", key
);
self.to_scuba_redacted_blob_accessed(&ctx, &key, config::GET_OPERATION);
err
}
})
.access_blobstore(&ctx, &key, config::GET_OPERATION)
.map(move |blobstore| blobstore.get(ctx, key));
async move { get?.await }.boxed()
}
@ -186,19 +189,7 @@ impl<T: Blobstore + Clone> Blobstore for RedactedBlobstoreInner<T> {
value: BlobstoreBytes,
) -> BoxFuture<'static, Result<(), Error>> {
let put = self
.access_blobstore(&key)
.map_err({
cloned!(ctx, key);
move |err| {
debug!(
ctx.logger(),
"Updating redacted blobstore with key {:?}", key
);
self.to_scuba_redacted_blob_accessed(&ctx, &key, config::PUT_OPERATION);
err
}
})
.access_blobstore(&ctx, &key, config::PUT_OPERATION)
.map(move |blobstore| blobstore.put(ctx, key, value));
async move { put?.await }.boxed()
}
@ -275,7 +266,10 @@ mod test {
let inner = EagerMemblob::new();
let redacted_pairs = hashmap! {
redacted_key.clone() => redacted_task.clone(),
redacted_key.clone() => RedactedMetadata {
task: redacted_task.clone(),
log_only: false,
},
};
let blob = RedactedBlobstore::new(
@ -319,4 +313,35 @@ mod test {
let res = blob.get(ctx.clone(), unredacted_key.clone()).await;
assert!(res.is_ok(), "the key should be found and available");
}
#[fbinit::compat_test]
async fn test_log_only_redacted_key(fb: FacebookInit) -> Result<(), Error> {
let redacted_log_only_key = "bar".to_string();
let redacted_task = "bar task".to_string();
let ctx = CoreContext::test_mock(fb);
let inner = EagerMemblob::new();
let redacted_pairs = hashmap! {
redacted_log_only_key.clone() => RedactedMetadata {
task: redacted_task.clone(),
log_only: true,
},
};
let blob = RedactedBlobstore::new(
PrefixBlobstore::new(inner, "prefix"),
RedactedBlobstoreConfig::new(Some(redacted_pairs), ScubaSampleBuilder::with_discard()),
);
// Since this is a log-only mode it should succeed
let val = BlobstoreBytes::from_bytes("test bar");
blob.put(ctx.clone(), redacted_log_only_key.clone(), val.clone())
.await?;
let actual = blob.get(ctx.clone(), redacted_log_only_key.clone()).await?;
assert_eq!(Some(val), actual.map(|val| val.into_bytes()));
Ok(())
}
}

View File

@ -14,7 +14,6 @@ use sql::{queries, Connection};
use sql_construct::{SqlConstruct, SqlConstructFromMetadataDatabaseConfig};
use sql_ext::SqlConnections;
use std::collections::HashMap;
use std::iter::FromIterator;
#[derive(Clone)]
pub struct SqlRedactedContentStore {
@ -25,14 +24,21 @@ pub struct SqlRedactedContentStore {
queries! {
write InsertRedactedBlobs(
values: (content_key: String, task: String, add_timestamp: Timestamp)
values: (content_key: String, task: String, add_timestamp: Timestamp, log_only: bool)
) {
none,
"INSERT into censored_contents(content_key, task, add_timestamp) VALUES {values}"
mysql(
"INSERT INTO censored_contents(content_key, task, add_timestamp, log_only) VALUES {values}
ON DUPLICATE KEY UPDATE task = VALUES(task), add_timestamp = VALUES(add_timestamp), log_ONLY = VALUES(log_only)
"
)
sqlite(
"REPLACE INTO censored_contents(content_key, task, add_timestamp, log_only) VALUES {values}"
)
}
read GetAllRedactedBlobs() -> (String, String) {
"SELECT content_key, task
read GetAllRedactedBlobs() -> (String, String, Option<bool>) {
"SELECT content_key, task, log_only
FROM censored_contents"
}
@ -58,10 +64,27 @@ impl SqlConstruct for SqlRedactedContentStore {
impl SqlConstructFromMetadataDatabaseConfig for SqlRedactedContentStore {}
#[derive(Clone, Debug)]
pub struct RedactedMetadata {
pub task: String,
pub log_only: bool,
}
impl SqlRedactedContentStore {
pub fn get_all_redacted_blobs(&self) -> BoxFuture<HashMap<String, String>, Error> {
pub fn get_all_redacted_blobs(&self) -> BoxFuture<HashMap<String, RedactedMetadata>, Error> {
GetAllRedactedBlobs::query(&self.read_connection)
.map(HashMap::from_iter)
.map(|redacted_blobs| {
redacted_blobs
.into_iter()
.map(|(key, task, log_only)| {
let redacted_metadata = RedactedMetadata {
task,
log_only: log_only.unwrap_or(false),
};
(key, redacted_metadata)
})
.collect()
})
.boxify()
}
@ -70,10 +93,12 @@ impl SqlRedactedContentStore {
content_keys: &Vec<String>,
task: &String,
add_timestamp: &Timestamp,
log_only: bool,
) -> impl Future<Item = (), Error = Error> {
let log_only = &log_only;
let redacted_inserts: Vec<_> = content_keys
.iter()
.map(move |key| (key, task, add_timestamp))
.map(move |key| (key, task, add_timestamp, log_only))
.collect();
InsertRedactedBlobs::query(&self.write_connection, &redacted_inserts[..])
@ -109,12 +134,12 @@ mod test {
let store = SqlRedactedContentStore::with_sqlite_in_memory().unwrap();
store
.insert_redacted_blobs(&redacted_keys1, &task1, &Timestamp::now())
.insert_redacted_blobs(&redacted_keys1, &task1, &Timestamp::now(), false)
.compat()
.await
.expect("insert failed");
store
.insert_redacted_blobs(&redacted_keys2, &task2, &Timestamp::now())
.insert_redacted_blobs(&redacted_keys2, &task2, &Timestamp::now(), true)
.compat()
.await
.expect("insert failed");
@ -125,6 +150,23 @@ mod test {
.await
.expect("select failed");
assert_eq!(res.len(), 4);
assert!(!res.get(&key_a).unwrap().log_only);
assert!(!res.get(&key_b).unwrap().log_only);
assert!(res.get(&key_c).unwrap().log_only);
assert!(res.get(&key_d).unwrap().log_only);
store
.insert_redacted_blobs(&redacted_keys1, &task1, &Timestamp::now(), true)
.compat()
.await
.expect("insert failed");
let res = store
.get_all_redacted_blobs()
.compat()
.await
.expect("select failed");
assert!(res.get(&key_a).unwrap().log_only);
assert!(res.get(&key_b).unwrap().log_only);
store
.delete_redacted_blobs(&redacted_keys1)

View File

@ -31,7 +31,9 @@ use mercurial_types::{HgChangesetEnvelope, HgFileEnvelope, HgManifestEnvelope};
use metaconfig_types::{BlobConfig, BlobstoreId, Redaction, ScrubAction, StorageConfig};
use mononoke_types::{FileContents, RepositoryId};
use prefixblob::PrefixBlobstore;
use redactedblobstore::{RedactedBlobstore, RedactedBlobstoreConfig, SqlRedactedContentStore};
use redactedblobstore::{
RedactedBlobstore, RedactedBlobstoreConfig, RedactedMetadata, SqlRedactedContentStore,
};
use scuba_ext::{ScubaSampleBuilder, ScubaSampleBuilderExt};
use slog::{info, warn, Logger};
use sql_ext::facebook::MysqlOptions;
@ -273,7 +275,7 @@ fn get_from_sources<T: Blobstore + Clone>(
no_prefix: bool,
key: String,
ctx: CoreContext,
redacted_blobs: Option<HashMap<String, String>>,
redacted_blobs: Option<HashMap<String, RedactedMetadata>>,
scuba_redaction_builder: ScubaSampleBuilder,
repo_id: RepositoryId,
) -> BoxFuture<Option<BlobstoreGetData>, Error> {

View File

@ -38,6 +38,7 @@ pub const REDACTION: &str = "redaction";
const REDACTION_ADD: &str = "add";
const REDACTION_REMOVE: &str = "remove";
const REDACTION_LIST: &str = "list";
const ARG_LOG_ONLY: &str = "log-only";
const ARG_FORCE: &str = "force";
const ARG_INPUT_FILE: &str = "input-file";
const ARG_MAIN_BOOKMARK: &str = "main-bookmark";
@ -75,6 +76,12 @@ pub fn build_subcommand<'a, 'b>() -> App<'a, 'b> {
.takes_value(false)
.help("by default redaction fails if any of the redacted files is in main-bookmark. This flag overrides it.")
)
.arg(
Arg::with_name(ARG_LOG_ONLY)
.long(ARG_LOG_ONLY)
.takes_value(false)
.help("redact file in log-only mode. All accesses to this file will be allowed, but they will all be logged")
)
))
.subcommand(add_path_parameters(
SubCommand::with_name(REDACTION_REMOVE)
@ -313,6 +320,7 @@ async fn redaction_add<'a, 'b>(
.collect();
let force = sub_m.is_present(ARG_FORCE);
let log_only = sub_m.is_present(ARG_LOG_ONLY);
if !force {
let main_bookmark = sub_m
@ -329,7 +337,7 @@ async fn redaction_add<'a, 'b>(
let timestamp = Timestamp::now();
redacted_blobs
.insert_redacted_blobs(&blobstore_keys, &task, &timestamp)
.insert_redacted_blobs(&blobstore_keys, &task, &timestamp, log_only)
.compat()
.await?;
@ -373,10 +381,11 @@ fn redaction_list(
Ok(path_keys
.into_iter()
.filter_map(move |(path, key)| {
redacted_blobs
.get(&key.blobstore_key())
.cloned()
.map(|task| (task, path, key))
redacted_blobs.get(&key.blobstore_key()).cloned().map(
|redacted_meta| {
(redacted_meta.task, path, redacted_meta.log_only)
},
)
})
.collect::<Vec<_>>())
}
@ -389,8 +398,13 @@ fn redaction_list(
info!(logger, "No files are redacted at this commit");
} else {
res.sort();
res.into_iter().for_each(|(task_id, file_path, _)| {
info!(logger, "{:20}: {}", task_id, file_path);
res.into_iter().for_each(|(task_id, file_path, log_only)| {
let log_only_msg =
if log_only { " (log only)" } else { "" };
info!(
logger,
"{:20}: {}{}", task_id, file_path, log_only_msg
);
})
}
}

View File

@ -568,6 +568,7 @@ mod test {
use futures_old::stream as stream_old;
use hyper::Uri;
use mononoke_types_mocks::hash::ONES_SHA256;
use redactedblobstore::RedactedMetadata;
use std::sync::Arc;
use lfs_protocol::Sha256 as LfsSha256;
@ -833,7 +834,10 @@ mod test {
// into it, which has the data (but now it is redacted)!
let repo = TestRepoBuilder::new()
.redacted(Some(
hashmap! { meta.content_id.blobstore_key() => "test".to_string() },
hashmap! { meta.content_id.blobstore_key() => RedactedMetadata {
task: "test".to_string(),
log_only: false,
}},
))
.build()?
.dangerous_override(|_: Arc<dyn Blobstore>| stub_blobstore);

View File

@ -129,6 +129,7 @@ mod test {
use maplit::hashmap;
use mononoke_types::typed_hash::MononokeId;
use mononoke_types_mocks::contentid::ONES_CTID;
use redactedblobstore::RedactedMetadata;
#[fbinit::compat_test]
async fn test_redacted_fetch(fb: FacebookInit) -> Result<(), Error> {
@ -137,7 +138,10 @@ mod test {
let repo = TestRepoBuilder::new()
.redacted(Some(
hashmap! { content_id.blobstore_key() => reason.to_string() },
hashmap! { content_id.blobstore_key() => RedactedMetadata {
task: reason.to_string(),
log_only: false,
}},
))
.build()?;

View File

@ -35,6 +35,7 @@ EPHEMERAL_DB_ALLOWLIST = {
"test-mononoke-hg-sync-job-generate-bundles-loop.t",
"test-blobstore-healer.t",
"test-infinitepush-mutation.t",
"test-redaction-sql.t",
}
# At this time, all tests support the network void script (except when

View File

@ -160,3 +160,20 @@ List redacted files for a commit without any
* Listing redacted files for ChangesetId: HgChangesetId(HgNodeHash(Sha1(ac82d8b1f7c418c61a493ed229ffaa981bda8e90))) (glob)
* Please be patient. (glob)
* No files are redacted at this commit (glob)
Redact a file in log-only mode
$ mononoke_admin redaction add "[TASK]Censor b" 2cc2702dde1d7133c30a1ed763ee82c04befb237 dir/g --log-only --force
* using repo "repo" repoid RepositoryId(0) (glob)
* changeset resolved as: ChangesetId(Blake2(*)) (glob)
$ mononoke_admin redaction list 2cc2702dde1d7133c30a1ed763ee82c04befb237
* using repo "repo" repoid RepositoryId(0) (glob)
* changeset resolved as: ChangesetId(Blake2(*)) (glob)
* Listing redacted files for ChangesetId: HgChangesetId(HgNodeHash(Sha1(*))) (glob)
* Please be patient. (glob)
* [TASK]Censor b : b (glob)
* [TASK]Censor b : dir/g (log only) (glob)
* [TASK]Censor c : dir/c (glob)
$ sqlite3 "$TESTTMP/monsql/sqlite_dbs" 'SELECT * FROM censored_contents;'
1|content.blake2.21c519fe0eb401bc97888f270902935f858d0c5361211f892fd26ed9ce127ff9|[TASK]Censor b|*|0 (glob)
2|content.blake2.096c8cc4a38f793ac05fc3506ed6346deb5b857100642adbf4de6720411b10e2|[TASK]Censor c|*|0 (glob)
6|content.blake2.0991063aafe55b2bcbbfa6b349e76ab5d57a102c89e841abdac8ce3f84d55b8a|[TASK]Censor b|*|1 (glob)

View File

@ -0,0 +1,85 @@
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License found in the LICENSE file in the root
# directory of this source tree.
$ . "${TEST_FIXTURES}/library.sh"
setup configuration
$ REPOTYPE="blob_files"
$ setup_common_config $REPOTYPE
$ cd $TESTTMP
setup hg server repo
$ hginit_treemanifest repo-hg
$ cd repo-hg
$ touch a && hg ci -A -q -m 'add a'
create master bookmark
$ hg bookmark master_bookmark -r tip
$ cd $TESTTMP
setup repo-pull and repo-push
$ hgclone_treemanifest ssh://user@dummy/repo-hg repo-push --noupdate
blobimport
$ blobimport repo-hg/.hg repo
start mononoke
$ mononoke
$ wait_for_mononoke
$ cd repo-push
$ cat >> .hg/hgrc <<EOF
> [extensions]
> pushrebase =
> rebase =
> remotenames =
> EOF
$ cd ../repo-push
$ hgmn up -q 0
Push files
$ echo b > b
$ echo f > f
$ mkdir dir
$ mkdir dir/dirdir
$ echo 'c' > dir/c
$ echo 'd' > dir/d
$ echo 'g' > dir/g
$ echo 'e' > dir/dirdir/e
$ hg ci -A -q -m "add b,c,d and e"
$ hgmn push -q -r . --to master_bookmark
$ tglogpnr
@ 2cc2702dde1d public 'add b,c,d and e' default/master_bookmark
|
o ac82d8b1f7c4 public 'add a' master_bookmark
Censor file (file 'b' in commit '2cc2702dde1d7133c30a1ed763ee82c04befb237')
$ mononoke_admin redaction add "[TASK]Censor b" 2cc2702dde1d7133c30a1ed763ee82c04befb237 b --force --log-only
* using repo "repo" repoid RepositoryId(0) (glob)
* changeset resolved as: ChangesetId(Blake2(*)) (glob)
$ mononoke_admin redaction list 2cc2702dde1d7133c30a1ed763ee82c04befb237
* using repo "repo" repoid RepositoryId(0) (glob)
* changeset resolved as: ChangesetId(Blake2(*)) (glob)
* Listing redacted files for ChangesetId: HgChangesetId(HgNodeHash(Sha1(*))) (glob)
* Please be patient. (glob)
* [TASK]Censor b : b (log only) (glob)
$ mononoke_admin redaction add "[TASK]Censor b" 2cc2702dde1d7133c30a1ed763ee82c04befb237 b --force
* using repo "repo" repoid RepositoryId(0) (glob)
* changeset resolved as: ChangesetId(Blake2(*)) (glob)
$ mononoke_admin redaction list 2cc2702dde1d7133c30a1ed763ee82c04befb237
* using repo "repo" repoid RepositoryId(0) (glob)
* changeset resolved as: ChangesetId(Blake2(*)) (glob)
* Listing redacted files for ChangesetId: HgChangesetId(HgNodeHash(Sha1(*))) (glob)
* Please be patient. (glob)
* [TASK]Censor b : b (glob)

View File

@ -144,11 +144,17 @@ Update redacted blob
$ cd "$TESTTMP/repo-push"
$ echo "testcupdate" > c
$ hg ci -q -m "uncensore c"
$ hgmn push -q -r . --to master_bookmark
$ echo "log-only" > log_only
$ hg add log_only
$ hg ci -q -m "log-only"
$ hgmn push -q -r . --to master_bookmark
$ tglogpnr
@ bbb84cdc8ec0 public 'uncensore c' default/master_bookmark
@ 73f850a22540 public 'log-only' default/master_bookmark
|
o bbb84cdc8ec0 public 'uncensore c'
|
o 064d994d0240 public 'add censored c'
|
@ -158,6 +164,7 @@ Update redacted blob
$ hg log -T '{node}\n'
73f850a225400422723d433ab3ea194c09c2c8c5
bbb84cdc8ec039fe71d78a3adb6f5cf244fafb6a
064d994d0240f9738dba1ef7479f0a4ce8486b05
14961831bd3af3a6331fef7e63367d61cb6c9f6b
@ -167,6 +174,9 @@ Censore the redacted blob (file 'c' in commit '064d994d0240f9738dba1ef7479f0a4ce
$ mononoke_admin redaction add my_task 064d994d0240f9738dba1ef7479f0a4ce8486b05 c --force
* using repo "repo" repoid RepositoryId(0) (glob)
* changeset resolved as: * (glob)
$ mononoke_admin redaction add my_task_2 73f850a225400422723d433ab3ea194c09c2c8c5 log_only --force --log-only
* using repo "repo" repoid RepositoryId(0) (glob)
* changeset resolved as: ChangesetId(Blake2(aac5f17ddfcadf26a410f701b860b2a7c7d5c082cec420b816296014660f7fca)) (glob)
Restart mononoke
$ killandwait $MONONOKE_PID
@ -256,7 +266,9 @@ As of the time of writing, updating redacted files throws an error - artifact of
$ hgmn pull -q
$ tglogpnr
o bbb84cdc8ec0 public 'uncensore c' default/master_bookmark
o 73f850a22540 public 'log-only' default/master_bookmark
|
o bbb84cdc8ec0 public 'uncensore c'
|
@ 064d994d0240 public 'add censored c'
|
@ -273,7 +285,9 @@ Expect success (no blob in this commit is redacted)
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ tglogpnr
@ bbb84cdc8ec0 public 'uncensore c' default/master_bookmark
o 73f850a22540 public 'log-only' default/master_bookmark
|
@ bbb84cdc8ec0 public 'uncensore c'
|
o 064d994d0240 public 'add censored c'
|
@ -297,7 +311,9 @@ Test rebasing local commit on top of master_bookmark, when base commit contains
$ hgmn pull -q
$ tglogpnr
o bbb84cdc8ec0 public 'uncensore c' default/master_bookmark
o 73f850a22540 public 'log-only' default/master_bookmark
|
o bbb84cdc8ec0 public 'uncensore c'
|
| @ c6e4e7cae299 draft 'update a'
|/
@ -315,7 +331,9 @@ Should be successful
$ tglogpnr
@ d967612e0cc1 draft 'update a'
|
o bbb84cdc8ec0 public 'uncensore c' default/master_bookmark
| o 73f850a22540 public 'log-only' default/master_bookmark
|/
o bbb84cdc8ec0 public 'uncensore c'
|
o 064d994d0240 public 'add censored c'
|
@ -330,7 +348,9 @@ Should be successful
$ tglogpnr
o d967612e0cc1 draft 'update a'
|
o bbb84cdc8ec0 public 'uncensore c' default/master_bookmark
| o 73f850a22540 public 'log-only' default/master_bookmark
|/
o bbb84cdc8ec0 public 'uncensore c'
|
@ 064d994d0240 public 'add censored c'
|
@ -345,7 +365,9 @@ Updating from a commit that contains a redacted file to another commit should su
$ tglogpnr
o d967612e0cc1 draft 'update a'
|
@ bbb84cdc8ec0 public 'uncensore c' default/master_bookmark
| o 73f850a22540 public 'log-only' default/master_bookmark
|/
@ bbb84cdc8ec0 public 'uncensore c'
|
o 064d994d0240 public 'add censored c'
|
@ -372,8 +394,14 @@ Updating to a commit with censored files works in getpackv2 repo
$ cat c
This version of the file is redacted and you are not allowed to access it. Update or rebase to a newer commit.
Update to log_only commit
$ hgmn up -q 73f850a225400422723d433ab3ea194c09c2c8c5
$ cat log_only
log-only
Check logging
$ cat "$TESTTMP/censored_scuba.json"
{"int":{"time":*},"normal":{"key":"content.blake2.096c8cc4a38f793ac05fc3506ed6346deb5b857100642adbf4de6720411b10e2","operation":"GET","session_uuid":"*","unix_username":"None"}} (glob)
{"int":{"time":*},"normal":{"key":"content.blake2.096c8cc4a38f793ac05fc3506ed6346deb5b857100642adbf4de6720411b10e2","operation":"GET","session_uuid":"*","unix_username":"None"}} (glob)
{"int":{"time":*},"normal":{"key":"content.blake2.096c8cc4a38f793ac05fc3506ed6346deb5b857100642adbf4de6720411b10e2","operation":"GET","session_uuid":"*","unix_username":"None"}} (glob)
{"int":{"time":*},"normal":{"key":"content.blake2.8e86b59a7708c54ab97f95db4a39e27886e5d3775c24a7d0d8106f82b3d38d49","operation":"GET","session_uuid":"*","unix_username":"None"}} (glob)