Fixed Clippy warnings (#204)

This commit is contained in:
guillaume-be 2021-12-09 09:33:27 +01:00 committed by GitHub
parent b444780c18
commit 4175942cc4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 2 additions and 52 deletions

View File

@ -46,7 +46,6 @@ fn sst2_forward_pass(iters: u64, model: &SentimentModel, sst2_data: &[String]) -
#[derive(Debug, Deserialize)]
struct Record {
sentence: String,
label: i8,
}
fn ss2_processor(file_path: PathBuf) -> Result<Vec<String>, Box<dyn Error>> {

View File

@ -22,7 +22,6 @@ use std::{env, fs};
#[derive(Debug, Deserialize)]
struct Record {
sentence: String,
label: i8,
}
fn ss2_processor(file_path: PathBuf) -> Result<Vec<String>, Box<dyn Error>> {
@ -47,7 +46,7 @@ fn main() -> anyhow::Result<()> {
let mut sst2_path = PathBuf::from(env::var("SST2_PATH")
.expect("Please set the \"squad_dataset\" environment variable pointing to the SQuAD dataset folder"));
sst2_path.push("train.tsv");
let inputs = ss2_processor(sst2_path).unwrap();
let inputs = &ss2_processor(sst2_path).unwrap()[..100];
// Run model
let batch_size = 64;

View File

@ -1427,7 +1427,7 @@ pub struct GeneratedIndicesOutput {
pub score: Option<f64>,
}
#[derive(Clone, Copy)]
#[derive(Clone, Copy, Default)]
/// # Generation options for text generation.
/// When provided to a `generate` method, these options will take priority over the `GenerateConfig` used to create the
/// `LanguageGenerator`. Some of these options may be left as `None`, options without a value will individually default
@ -1476,32 +1476,6 @@ pub struct GenerateOptions<'a> {
pub output_scores: bool,
}
impl Default for GenerateOptions<'_> {
fn default() -> Self {
GenerateOptions {
min_length: None,
max_length: None,
max_new_tokens: None,
early_stopping: None,
num_return_sequences: None,
num_beams: None,
num_beam_groups: None,
do_sample: None,
temperature: None,
top_k: None,
top_p: None,
repetition_penalty: None,
length_penalty: None,
no_repeat_ngram_size: None,
diversity_penalty: None,
decoder_start_token_id: None,
forced_bos_token_id: None,
prefix_allowed_tokens_fn: None,
bad_word_ids: None,
output_scores: false,
}
}
}
macro_rules! unpack_config {
($field_name:ident, $generate_options: ident, $generate_config: ident) => {
$generate_options.map_or($generate_config.$field_name, |opts| {

View File

@ -936,10 +936,8 @@ pub struct LocalSelfAttention {
num_chunks_after: i64,
is_decoder: bool,
dropout: Dropout,
pad_token_id: i64,
num_attention_heads: i64,
attention_head_size: i64,
hidden_size: i64,
query: nn::Linear,
key: nn::Linear,
value: nn::Linear,
@ -965,7 +963,6 @@ impl LocalSelfAttention {
let num_chunks_before = config.local_num_chunks_before.unwrap_or(1);
let num_chunks_after = config.local_num_chunks_after.unwrap_or(0);
let is_decoder = config.is_decoder;
let pad_token_id = config.pad_token_id;
let dropout = Dropout::new(config.hidden_dropout_prob);
@ -994,10 +991,8 @@ impl LocalSelfAttention {
num_chunks_after,
is_decoder,
dropout,
pad_token_id,
num_attention_heads,
attention_head_size,
hidden_size,
query,
key,
value,

View File

@ -13,7 +13,6 @@
use crate::common::dropout::Dropout;
use crate::common::embeddings::process_ids_embeddings_pair;
use crate::reformer::attention_utils::get_least_common_mult_chunk_len;
use crate::reformer::ReformerConfig;
use crate::RustBertError;
use std::borrow::Borrow;
@ -25,7 +24,6 @@ use tch::{nn, Kind, Tensor};
pub struct AxialPositionEmbeddings {
weights: Vec<Tensor>,
axial_pos_shape: Vec<i64>,
least_common_mult_chunk_length: i64,
dropout_prob: f64,
}
@ -46,12 +44,6 @@ impl AxialPositionEmbeddings {
)));
};
let least_common_mult_chunk_length = get_least_common_mult_chunk_len(
&config.attn_layers,
config.lsh_attn_chunk_length,
config.local_attn_chunk_length,
);
let mut weights: Vec<Tensor> = vec![];
let p_weights = p / "weights";
for (axis_index, axial_pos_embd_dim) in config.axial_pos_embds_dim.iter().enumerate() {
@ -64,7 +56,6 @@ impl AxialPositionEmbeddings {
Ok(AxialPositionEmbeddings {
weights,
axial_pos_shape,
least_common_mult_chunk_length,
dropout_prob: config.hidden_dropout_prob,
})
}

View File

@ -49,7 +49,6 @@ pub struct T5Attention {
is_bidirectional: bool,
has_relative_attention_bias: bool,
relative_attention_num_buckets: i64,
d_model: i64,
d_kv: i64,
n_heads: i64,
dropout: Dropout,
@ -106,7 +105,6 @@ impl T5Attention {
is_bidirectional,
has_relative_attention_bias,
relative_attention_num_buckets: config.relative_attention_num_buckets,
d_model: config.d_model,
d_kv: config.d_kv,
n_heads: config.num_heads,
dropout,

View File

@ -42,9 +42,6 @@ impl LayerState {
#[derive(Debug)]
pub struct XLNetRelativeAttention {
num_attention_heads: i64,
attention_head_size: i64,
hidden_size: i64,
dropout: Dropout,
output_attentions: bool,
query: Tensor,
@ -135,9 +132,6 @@ impl XLNetRelativeAttention {
let scale = 1f64 / ((config.d_head as f64).powf(0.5f64));
XLNetRelativeAttention {
num_attention_heads: config.n_head,
attention_head_size: config.d_head,
hidden_size: config.d_model,
dropout,
output_attentions,
query,