From 4175942cc458f10ad351f8b033437845f2861861 Mon Sep 17 00:00:00 2001 From: guillaume-be Date: Thu, 9 Dec 2021 09:33:27 +0100 Subject: [PATCH] Fixed Clippy warnings (#204) --- benches/sst2_benchmark.rs | 1 - examples/sentiment_analysis_sst2.rs | 3 +-- src/pipelines/generation_utils.rs | 28 +--------------------------- src/reformer/attention.rs | 5 ----- src/reformer/embeddings.rs | 9 --------- src/t5/attention.rs | 2 -- src/xlnet/attention.rs | 6 ------ 7 files changed, 2 insertions(+), 52 deletions(-) diff --git a/benches/sst2_benchmark.rs b/benches/sst2_benchmark.rs index 297f9f8..313b4dd 100644 --- a/benches/sst2_benchmark.rs +++ b/benches/sst2_benchmark.rs @@ -46,7 +46,6 @@ fn sst2_forward_pass(iters: u64, model: &SentimentModel, sst2_data: &[String]) - #[derive(Debug, Deserialize)] struct Record { sentence: String, - label: i8, } fn ss2_processor(file_path: PathBuf) -> Result, Box> { diff --git a/examples/sentiment_analysis_sst2.rs b/examples/sentiment_analysis_sst2.rs index 830a369..54c1b9b 100644 --- a/examples/sentiment_analysis_sst2.rs +++ b/examples/sentiment_analysis_sst2.rs @@ -22,7 +22,6 @@ use std::{env, fs}; #[derive(Debug, Deserialize)] struct Record { sentence: String, - label: i8, } fn ss2_processor(file_path: PathBuf) -> Result, Box> { @@ -47,7 +46,7 @@ fn main() -> anyhow::Result<()> { let mut sst2_path = PathBuf::from(env::var("SST2_PATH") .expect("Please set the \"squad_dataset\" environment variable pointing to the SQuAD dataset folder")); sst2_path.push("train.tsv"); - let inputs = ss2_processor(sst2_path).unwrap(); + let inputs = &ss2_processor(sst2_path).unwrap()[..100]; // Run model let batch_size = 64; diff --git a/src/pipelines/generation_utils.rs b/src/pipelines/generation_utils.rs index fc9cf9d..6598820 100644 --- a/src/pipelines/generation_utils.rs +++ b/src/pipelines/generation_utils.rs @@ -1427,7 +1427,7 @@ pub struct GeneratedIndicesOutput { pub score: Option, } -#[derive(Clone, Copy)] +#[derive(Clone, Copy, Default)] /// # Generation options for text generation. /// When provided to a `generate` method, these options will take priority over the `GenerateConfig` used to create the /// `LanguageGenerator`. Some of these options may be left as `None`, options without a value will individually default @@ -1476,32 +1476,6 @@ pub struct GenerateOptions<'a> { pub output_scores: bool, } -impl Default for GenerateOptions<'_> { - fn default() -> Self { - GenerateOptions { - min_length: None, - max_length: None, - max_new_tokens: None, - early_stopping: None, - num_return_sequences: None, - num_beams: None, - num_beam_groups: None, - do_sample: None, - temperature: None, - top_k: None, - top_p: None, - repetition_penalty: None, - length_penalty: None, - no_repeat_ngram_size: None, - diversity_penalty: None, - decoder_start_token_id: None, - forced_bos_token_id: None, - prefix_allowed_tokens_fn: None, - bad_word_ids: None, - output_scores: false, - } - } -} macro_rules! unpack_config { ($field_name:ident, $generate_options: ident, $generate_config: ident) => { $generate_options.map_or($generate_config.$field_name, |opts| { diff --git a/src/reformer/attention.rs b/src/reformer/attention.rs index 83e843d..b9b6035 100644 --- a/src/reformer/attention.rs +++ b/src/reformer/attention.rs @@ -936,10 +936,8 @@ pub struct LocalSelfAttention { num_chunks_after: i64, is_decoder: bool, dropout: Dropout, - pad_token_id: i64, num_attention_heads: i64, attention_head_size: i64, - hidden_size: i64, query: nn::Linear, key: nn::Linear, value: nn::Linear, @@ -965,7 +963,6 @@ impl LocalSelfAttention { let num_chunks_before = config.local_num_chunks_before.unwrap_or(1); let num_chunks_after = config.local_num_chunks_after.unwrap_or(0); let is_decoder = config.is_decoder; - let pad_token_id = config.pad_token_id; let dropout = Dropout::new(config.hidden_dropout_prob); @@ -994,10 +991,8 @@ impl LocalSelfAttention { num_chunks_after, is_decoder, dropout, - pad_token_id, num_attention_heads, attention_head_size, - hidden_size, query, key, value, diff --git a/src/reformer/embeddings.rs b/src/reformer/embeddings.rs index 4609a1d..f8d165a 100644 --- a/src/reformer/embeddings.rs +++ b/src/reformer/embeddings.rs @@ -13,7 +13,6 @@ use crate::common::dropout::Dropout; use crate::common::embeddings::process_ids_embeddings_pair; -use crate::reformer::attention_utils::get_least_common_mult_chunk_len; use crate::reformer::ReformerConfig; use crate::RustBertError; use std::borrow::Borrow; @@ -25,7 +24,6 @@ use tch::{nn, Kind, Tensor}; pub struct AxialPositionEmbeddings { weights: Vec, axial_pos_shape: Vec, - least_common_mult_chunk_length: i64, dropout_prob: f64, } @@ -46,12 +44,6 @@ impl AxialPositionEmbeddings { ))); }; - let least_common_mult_chunk_length = get_least_common_mult_chunk_len( - &config.attn_layers, - config.lsh_attn_chunk_length, - config.local_attn_chunk_length, - ); - let mut weights: Vec = vec![]; let p_weights = p / "weights"; for (axis_index, axial_pos_embd_dim) in config.axial_pos_embds_dim.iter().enumerate() { @@ -64,7 +56,6 @@ impl AxialPositionEmbeddings { Ok(AxialPositionEmbeddings { weights, axial_pos_shape, - least_common_mult_chunk_length, dropout_prob: config.hidden_dropout_prob, }) } diff --git a/src/t5/attention.rs b/src/t5/attention.rs index 9d46497..4ea6a45 100644 --- a/src/t5/attention.rs +++ b/src/t5/attention.rs @@ -49,7 +49,6 @@ pub struct T5Attention { is_bidirectional: bool, has_relative_attention_bias: bool, relative_attention_num_buckets: i64, - d_model: i64, d_kv: i64, n_heads: i64, dropout: Dropout, @@ -106,7 +105,6 @@ impl T5Attention { is_bidirectional, has_relative_attention_bias, relative_attention_num_buckets: config.relative_attention_num_buckets, - d_model: config.d_model, d_kv: config.d_kv, n_heads: config.num_heads, dropout, diff --git a/src/xlnet/attention.rs b/src/xlnet/attention.rs index dde3536..88dddac 100644 --- a/src/xlnet/attention.rs +++ b/src/xlnet/attention.rs @@ -42,9 +42,6 @@ impl LayerState { #[derive(Debug)] pub struct XLNetRelativeAttention { - num_attention_heads: i64, - attention_head_size: i64, - hidden_size: i64, dropout: Dropout, output_attentions: bool, query: Tensor, @@ -135,9 +132,6 @@ impl XLNetRelativeAttention { let scale = 1f64 / ((config.d_head as f64).powf(0.5f64)); XLNetRelativeAttention { - num_attention_heads: config.n_head, - attention_head_size: config.d_head, - hidden_size: config.d_model, dropout, output_attentions, query,