Updated for additional tokenizer settings & configuration validity check

This commit is contained in:
Guillaume B 2020-08-30 09:35:38 +02:00
parent 3200026c0b
commit 50ee1a1e4e
20 changed files with 178 additions and 61 deletions

View File

@ -1,6 +1,6 @@
[package]
name = "rust-bert"
version = "0.8.1"
version = "0.9.0"
authors = ["Guillaume Becquin <guillaume.becquin@gmail.com>"]
edition = "2018"
description = "Ready-to-use NLP pipelines and transformer-based models (BERT, DistilBERT, GPT2,...)"
@ -30,7 +30,7 @@ all-tests = []
features = ["doc-only"]
[dependencies]
rust_tokenizers = {version = "~4.1.0", path = "E:/Coding/backup-rust/rust-tokenizers/main"}
rust_tokenizers = {version = "~5.0.0", path = "E:/Coding/backup-rust/rust-tokenizers/main"}
tch = "~0.2.0"
serde_json = "1.0.56"
serde = { version = "1.0.114", features = ["derive"] }

View File

@ -35,7 +35,8 @@ fn main() -> anyhow::Result<()> {
// Set-up masked LM model
let device = Device::Cpu;
let mut vs = nn::VarStore::new(device);
let tokenizer: BertTokenizer = BertTokenizer::from_file(vocab_path.to_str().unwrap(), true)?;
let tokenizer: BertTokenizer =
BertTokenizer::from_file(vocab_path.to_str().unwrap(), true, true)?;
let config = BertConfig::from_file(config_path);
let bert_model = BertForMaskedLM::new(&vs.root(), &config);
vs.load(weights_path)?;

View File

@ -40,7 +40,8 @@ fn main() -> anyhow::Result<()> {
// Set-up masked LM model
let device = Device::Cpu;
let mut vs = nn::VarStore::new(device);
let tokenizer: BertTokenizer = BertTokenizer::from_file(vocab_path.to_str().unwrap(), true)?;
let tokenizer: BertTokenizer =
BertTokenizer::from_file(vocab_path.to_str().unwrap(), true, true)?;
let config = DistilBertConfig::from_file(config_path);
let distil_bert_model = DistilBertModelMaskedLM::new(&vs.root(), &config);
vs.load(weights_path)?;

View File

@ -39,7 +39,8 @@ fn main() -> anyhow::Result<()> {
// Set-up masked LM model
let device = Device::Cpu;
let mut vs = nn::VarStore::new(device);
let tokenizer: BertTokenizer = BertTokenizer::from_file(vocab_path.to_str().unwrap(), true)?;
let tokenizer: BertTokenizer =
BertTokenizer::from_file(vocab_path.to_str().unwrap(), true, true)?;
let config = ElectraConfig::from_file(config_path);
let electra_model = ElectraDiscriminator::new(&vs.root(), &config);
vs.load(weights_path)?;

View File

@ -39,7 +39,8 @@ fn main() -> anyhow::Result<()> {
// Set-up masked LM model
let device = Device::Cpu;
let mut vs = nn::VarStore::new(device);
let tokenizer: BertTokenizer = BertTokenizer::from_file(vocab_path.to_str().unwrap(), true)?;
let tokenizer: BertTokenizer =
BertTokenizer::from_file(vocab_path.to_str().unwrap(), true, true)?;
let config = ElectraConfig::from_file(config_path);
let electra_model = ElectraForMaskedLM::new(&vs.root(), &config);
vs.load(weights_path)?;

View File

@ -31,6 +31,7 @@ fn main() -> anyhow::Result<()> {
None, //merges resource only relevant with ModelType::Roberta
false, //lowercase
false,
None,
);
let qa_model = QuestionAnsweringModel::new(config)?;

View File

@ -33,6 +33,7 @@ fn main() -> anyhow::Result<()> {
None, //merges resource only relevant with ModelType::Roberta
false, //lowercase
false,
None,
LabelAggregationOption::Mode,
);

View File

@ -42,7 +42,8 @@
//! let weights_path = download_resource(&weights_resource)?;
//! let device = Device::cuda_if_available();
//! let mut vs = nn::VarStore::new(device);
//! let tokenizer: BertTokenizer = BertTokenizer::from_file(vocab_path.to_str().unwrap(), true)?;
//! let tokenizer: BertTokenizer =
//! BertTokenizer::from_file(vocab_path.to_str().unwrap(), true, true)?;
//! let config = BertConfig::from_file(config_path);
//! let bert_model = BertForMaskedLM::new(&vs.root(), &config);
//! vs.load(weights_path)?;

View File

@ -15,6 +15,9 @@ pub enum RustBertError {
#[error("Tokenizer error: {0}")]
TokenizerError(String),
#[error("Invalid configuration error: {0}")]
InvalidConfigurationError(String),
}
impl From<reqwest::Error> for RustBertError {

View File

@ -44,7 +44,8 @@
//! let weights_path = download_resource(&weights_resource)?;
//! let device = Device::cuda_if_available();
//! let mut vs = nn::VarStore::new(device);
//! let tokenizer: BertTokenizer = BertTokenizer::from_file(vocab_path.to_str().unwrap(), true)?;
//! let tokenizer: BertTokenizer =
//! BertTokenizer::from_file(vocab_path.to_str().unwrap(), true, true)?;
//! let config = DistilBertConfig::from_file(config_path);
//! let bert_model = DistilBertModelMaskedLM::new(&vs.root(), &config);
//! vs.load(weights_path)?;

View File

@ -46,7 +46,8 @@
//! let weights_path = download_resource(&weights_resource)?;
//! let device = Device::cuda_if_available();
//! let mut vs = nn::VarStore::new(device);
//! let tokenizer: BertTokenizer = BertTokenizer::from_file(vocab_path.to_str().unwrap(), true)?;
//! let tokenizer: BertTokenizer =
//! BertTokenizer::from_file(vocab_path.to_str().unwrap(), true, true)?;
//! let config = ElectraConfig::from_file(config_path);
//! let electra_model = ElectraForMaskedLM::new(&vs.root(), &config);
//! vs.load(weights_path)?;

View File

@ -42,7 +42,7 @@ use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::path::Path;
#[derive(Clone, Copy, Serialize, Deserialize)]
#[derive(Clone, Copy, Serialize, Deserialize, Debug)]
/// # Identifies the type of model
pub enum ModelType {
Bert,
@ -131,33 +131,106 @@ impl TokenizerOption {
vocab_path: &str,
merges_path: Option<&str>,
lower_case: bool,
add_prefix_space: bool,
strip_accents: Option<bool>,
add_prefix_space: Option<bool>,
) -> Result<Self, RustBertError> {
Ok(match model_type {
let tokenizer = match model_type {
ModelType::Bert | ModelType::DistilBert | ModelType::Electra => {
TokenizerOption::Bert(BertTokenizer::from_file(vocab_path, lower_case)?)
if add_prefix_space.is_some() {
return Err(RustBertError::InvalidConfigurationError(
format!("Optional input `add_prefix_space` set to value {} but cannot be used by {:?}",
add_prefix_space.unwrap(),
model_type)));
}
TokenizerOption::Bert(BertTokenizer::from_file(
vocab_path,
lower_case,
strip_accents.unwrap_or(lower_case),
)?)
}
ModelType::Roberta => {
if strip_accents.is_some() {
return Err(RustBertError::InvalidConfigurationError(format!(
"Optional input `strip_accents` set to value {} but cannot be used by {:?}",
strip_accents.unwrap(),
model_type
)));
}
TokenizerOption::Roberta(RobertaTokenizer::from_file(
vocab_path,
merges_path.expect("No merges specified!"),
lower_case,
add_prefix_space.unwrap_or(false),
)?)
}
ModelType::Marian => {
if strip_accents.is_some() {
return Err(RustBertError::InvalidConfigurationError(format!(
"Optional input `strip_accents` set to value {} but cannot be used by {:?}",
strip_accents.unwrap(),
model_type
)));
}
if add_prefix_space.is_some() {
return Err(RustBertError::InvalidConfigurationError(
format!("Optional input `add_prefix_space` set to value {} but cannot be used by {:?}",
add_prefix_space.unwrap(),
model_type)));
}
TokenizerOption::Marian(MarianTokenizer::from_files(
vocab_path,
merges_path.expect("No merges specified!"),
lower_case,
)?)
}
ModelType::T5 => {
if strip_accents.is_some() {
return Err(RustBertError::InvalidConfigurationError(format!(
"Optional input `strip_accents` set to value {} but cannot be used by {:?}",
strip_accents.unwrap(),
model_type
)));
}
if add_prefix_space.is_some() {
return Err(RustBertError::InvalidConfigurationError(
format!("Optional input `add_prefix_space` set to value {} but cannot be used by {:?}",
add_prefix_space.unwrap(),
model_type)));
}
TokenizerOption::T5(T5Tokenizer::from_file(vocab_path, lower_case)?)
}
ModelType::Roberta => TokenizerOption::Roberta(RobertaTokenizer::from_file(
vocab_path,
merges_path.expect("No merges specified!"),
lower_case,
add_prefix_space,
)?),
ModelType::Marian => TokenizerOption::Marian(MarianTokenizer::from_files(
vocab_path,
merges_path.expect("No merges specified!"),
lower_case,
)?),
ModelType::T5 => TokenizerOption::T5(T5Tokenizer::from_file(vocab_path, lower_case)?),
ModelType::XLMRoberta => {
if strip_accents.is_some() {
return Err(RustBertError::InvalidConfigurationError(format!(
"Optional input `strip_accents` set to value {} but cannot be used by {:?}",
strip_accents.unwrap(),
model_type
)));
}
if add_prefix_space.is_some() {
return Err(RustBertError::InvalidConfigurationError(
format!("Optional input `add_prefix_space` set to value {} but cannot be used by {:?}",
add_prefix_space.unwrap(),
model_type)));
}
TokenizerOption::XLMRoberta(XLMRobertaTokenizer::from_file(vocab_path, lower_case)?)
}
ModelType::Albert => TokenizerOption::Albert(AlbertTokenizer::from_file(
vocab_path,
lower_case,
!lower_case,
)?),
})
ModelType::Albert => {
if strip_accents.is_some() {
return Err(RustBertError::InvalidConfigurationError(format!(
"Optional input `strip_accents` set to value {} but cannot be used by {:?}",
strip_accents.unwrap(),
model_type
)));
}
TokenizerOption::Albert(AlbertTokenizer::from_file(
vocab_path,
lower_case,
strip_accents.unwrap_or(lower_case),
)?)
}
};
Ok(tokenizer)
}
/// Returns the model type

View File

@ -191,8 +191,10 @@ pub struct QuestionAnsweringConfig {
pub model_type: ModelType,
/// Flag indicating if the model expects a lower casing of the input
pub lower_case: bool,
/// Flag indicating if the tokenizer should strip accents (normalization). Only used for BERT / ALBERT models
pub strip_accents: Option<bool>,
/// Flag indicating if the tokenizer should add a white space before each tokenized input (needed for some Roberta models)
pub add_prefix_space: bool,
pub add_prefix_space: Option<bool>,
}
impl QuestionAnsweringConfig {
@ -213,7 +215,8 @@ impl QuestionAnsweringConfig {
vocab_resource: Resource,
merges_resource: Option<Resource>,
lower_case: bool,
add_prefix_space: bool,
strip_accents: impl Into<Option<bool>>,
add_prefix_space: impl Into<Option<bool>>,
) -> QuestionAnsweringConfig {
QuestionAnsweringConfig {
model_type,
@ -222,7 +225,8 @@ impl QuestionAnsweringConfig {
vocab_resource,
merges_resource,
lower_case,
add_prefix_space,
strip_accents: strip_accents.into(),
add_prefix_space: add_prefix_space.into(),
device: Device::cuda_if_available(),
}
}
@ -244,7 +248,8 @@ impl Default for QuestionAnsweringConfig {
device: Device::cuda_if_available(),
model_type: ModelType::DistilBert,
lower_case: false,
add_prefix_space: false,
add_prefix_space: None,
strip_accents: None,
}
}
}
@ -417,6 +422,7 @@ impl QuestionAnsweringModel {
vocab_path.to_str().unwrap(),
merges_path.map(|path| path.to_str().unwrap()),
question_answering_config.lower_case,
question_answering_config.strip_accents,
question_answering_config.add_prefix_space,
)?;
let pad_idx = tokenizer

View File

@ -28,7 +28,8 @@
//! Resource::Remote(RemoteResource::from_pretrained(DistilBertConfigResources::DISTIL_BERT_SST2)),
//! None, //merges resource only relevant with ModelType::Roberta
//! true, //lowercase
//! false, //add_prefix_space
//! None, //strip_accents
//! None, //add_prefix_space
//! );
//!
//! //Create the model
@ -104,8 +105,10 @@ pub struct SequenceClassificationConfig {
pub merges_resource: Option<Resource>,
/// Automatically lower case all input upon tokenization (assumes a lower-cased model)
pub lower_case: bool,
/// Flag indicating if the tokenizer should strip accents (normalization). Only used for BERT / ALBERT models
pub strip_accents: Option<bool>,
/// Flag indicating if the tokenizer should add a white space before each tokenized input (needed for some Roberta models)
pub add_prefix_space: bool,
pub add_prefix_space: Option<bool>,
/// Device to place the model on (default: CUDA/GPU when available)
pub device: Device,
}
@ -128,7 +131,8 @@ impl SequenceClassificationConfig {
vocab_resource: Resource,
merges_resource: Option<Resource>,
lower_case: bool,
add_prefix_space: bool,
strip_accents: impl Into<Option<bool>>,
add_prefix_space: impl Into<Option<bool>>,
) -> SequenceClassificationConfig {
SequenceClassificationConfig {
model_type,
@ -137,7 +141,8 @@ impl SequenceClassificationConfig {
vocab_resource,
merges_resource,
lower_case,
add_prefix_space,
strip_accents: strip_accents.into(),
add_prefix_space: add_prefix_space.into(),
device: Device::cuda_if_available(),
}
}
@ -159,7 +164,8 @@ impl Default for SequenceClassificationConfig {
)),
merges_resource: None,
lower_case: true,
add_prefix_space: false,
strip_accents: None,
add_prefix_space: None,
device: Device::cuda_if_available(),
}
}
@ -361,6 +367,7 @@ impl SequenceClassificationModel {
vocab_path.to_str().unwrap(),
merges_path.map(|path| path.to_str().unwrap()),
config.lower_case,
config.strip_accents,
config.add_prefix_space,
)?;
let mut var_store = VarStore::new(device);

View File

@ -29,7 +29,8 @@
//! Resource::Remote(RemoteResource::from_pretrained(BertConfigResources::BERT_NER)),
//! None, //merges resource only relevant with ModelType::Roberta
//! false, //lowercase
//! false, //add_prefix_space
//! None, //strip_accents
//! None, //add_prefix_space
//! LabelAggregationOption::Mode
//! );
//!
@ -212,8 +213,10 @@ pub struct TokenClassificationConfig {
pub merges_resource: Option<Resource>,
/// Automatically lower case all input upon tokenization (assumes a lower-cased model)
pub lower_case: bool,
/// Flag indicating if the tokenizer should strip accents (normalization). Only used for BERT / ALBERT models
pub strip_accents: Option<bool>,
/// Flag indicating if the tokenizer should add a white space before each tokenized input (needed for some Roberta models)
pub add_prefix_space: bool,
pub add_prefix_space: Option<bool>,
/// Device to place the model on (default: CUDA/GPU when available)
pub device: Device,
/// Sub-tokens aggregation method (default: `LabelAggregationOption::First`)
@ -228,8 +231,8 @@ impl TokenClassificationConfig {
/// * `model_type` - `ModelType` indicating the model type to load (must match with the actual data to be loaded!)
/// * model - The `Resource` pointing to the model to load (e.g. model.ot)
/// * config - The `Resource' pointing to the model configuration to load (e.g. config.json)
/// * vocab - The `Resource' pointing to the tokenizer's vocabulary to load (e.g. vocab.txt/vocab.json)
/// * vocab - An optional `Resource` tuple (`Option<Resource>`) pointing to the tokenizer's merge file to load (e.g. merges.txt), needed only for Roberta.
/// * vocab - The `Resource' pointing to the tokenizers' vocabulary to load (e.g. vocab.txt/vocab.json)
/// * vocab - An optional `Resource` tuple (`Option<Resource>`) pointing to the tokenizers' merge file to load (e.g. merges.txt), needed only for Roberta.
/// * lower_case - A `bool' indicating whether the tokenizer should lower case all input (in case of a lower-cased model)
pub fn new(
model_type: ModelType,
@ -238,7 +241,8 @@ impl TokenClassificationConfig {
vocab_resource: Resource,
merges_resource: Option<Resource>,
lower_case: bool,
add_prefix_space: bool,
strip_accents: impl Into<Option<bool>>,
add_prefix_space: impl Into<Option<bool>>,
label_aggregation_function: LabelAggregationOption,
) -> TokenClassificationConfig {
TokenClassificationConfig {
@ -248,7 +252,8 @@ impl TokenClassificationConfig {
vocab_resource,
merges_resource,
lower_case,
add_prefix_space,
strip_accents: strip_accents.into(),
add_prefix_space: add_prefix_space.into(),
device: Device::cuda_if_available(),
label_aggregation_function,
}
@ -256,7 +261,7 @@ impl TokenClassificationConfig {
}
impl Default for TokenClassificationConfig {
/// Provides a default CONLL-2003 NER model (English)
/// Provides a default CoNLL-2003 NER model (English)
fn default() -> TokenClassificationConfig {
TokenClassificationConfig {
model_type: ModelType::Bert,
@ -271,7 +276,8 @@ impl Default for TokenClassificationConfig {
)),
merges_resource: None,
lower_case: false,
add_prefix_space: false,
strip_accents: None,
add_prefix_space: None,
device: Device::cuda_if_available(),
label_aggregation_function: LabelAggregationOption::First,
}
@ -492,6 +498,7 @@ impl TokenClassificationModel {
vocab_path.to_str().unwrap(),
merges_path.map(|path| path.to_str().unwrap()),
config.lower_case,
config.strip_accents,
config.add_prefix_space,
)?;
let mut var_store = VarStore::new(device);

View File

@ -52,7 +52,7 @@
//! vocab_path.to_str().unwrap(),
//! merges_path.to_str().unwrap(),
//! true,
//! true
//! true,
//! )?;
//! let config = BertConfig::from_file(config_path);
//! let bert_model = RobertaForMaskedLM::new(&vs.root(), &config);

View File

@ -33,7 +33,8 @@ fn bert_masked_lm() -> anyhow::Result<()> {
// Set-up masked LM model
let device = Device::Cpu;
let mut vs = nn::VarStore::new(device);
let tokenizer: BertTokenizer = BertTokenizer::from_file(vocab_path.to_str().unwrap(), true)?;
let tokenizer: BertTokenizer =
BertTokenizer::from_file(vocab_path.to_str().unwrap(), true, true)?;
let config = BertConfig::from_file(config_path);
let bert_model = BertForMaskedLM::new(&vs.root(), &config);
vs.load(weights_path)?;
@ -107,7 +108,8 @@ fn bert_for_sequence_classification() -> anyhow::Result<()> {
// Set-up model
let device = Device::Cpu;
let vs = nn::VarStore::new(device);
let tokenizer: BertTokenizer = BertTokenizer::from_file(vocab_path.to_str().unwrap(), true)?;
let tokenizer: BertTokenizer =
BertTokenizer::from_file(vocab_path.to_str().unwrap(), true, true)?;
let mut config = BertConfig::from_file(config_path);
let mut dummy_label_mapping = HashMap::new();
dummy_label_mapping.insert(0, String::from("Positive"));
@ -171,7 +173,8 @@ fn bert_for_multiple_choice() -> anyhow::Result<()> {
// Set-up model
let device = Device::Cpu;
let vs = nn::VarStore::new(device);
let tokenizer: BertTokenizer = BertTokenizer::from_file(vocab_path.to_str().unwrap(), true)?;
let tokenizer: BertTokenizer =
BertTokenizer::from_file(vocab_path.to_str().unwrap(), true, true)?;
let mut config = BertConfig::from_file(config_path);
config.output_attentions = Some(true);
config.output_hidden_states = Some(true);
@ -232,7 +235,8 @@ fn bert_for_token_classification() -> anyhow::Result<()> {
// Set-up model
let device = Device::Cpu;
let vs = nn::VarStore::new(device);
let tokenizer: BertTokenizer = BertTokenizer::from_file(vocab_path.to_str().unwrap(), true)?;
let tokenizer: BertTokenizer =
BertTokenizer::from_file(vocab_path.to_str().unwrap(), true, true)?;
let mut config = BertConfig::from_file(config_path);
let mut dummy_label_mapping = HashMap::new();
dummy_label_mapping.insert(0, String::from("O"));
@ -297,7 +301,8 @@ fn bert_for_question_answering() -> anyhow::Result<()> {
// Set-up model
let device = Device::Cpu;
let vs = nn::VarStore::new(device);
let tokenizer: BertTokenizer = BertTokenizer::from_file(vocab_path.to_str().unwrap(), true)?;
let tokenizer: BertTokenizer =
BertTokenizer::from_file(vocab_path.to_str().unwrap(), true, true)?;
let mut config = BertConfig::from_file(config_path);
config.output_attentions = Some(true);
config.output_hidden_states = Some(true);
@ -390,8 +395,9 @@ fn bert_question_answering() -> anyhow::Result<()> {
)),
Resource::Remote(RemoteResource::from_pretrained(BertVocabResources::BERT_QA)),
None, //merges resource only relevant with ModelType::Roberta
true, //lowercase
false,
true,
true,
None,
);
let qa_model = QuestionAnsweringModel::new(config)?;

View File

@ -59,7 +59,8 @@ fn distilbert_masked_lm() -> anyhow::Result<()> {
// Set-up masked LM model
let device = Device::cuda_if_available();
let mut vs = nn::VarStore::new(device);
let tokenizer: BertTokenizer = BertTokenizer::from_file(vocab_path.to_str().unwrap(), true)?;
let tokenizer: BertTokenizer =
BertTokenizer::from_file(vocab_path.to_str().unwrap(), true, true)?;
let config = DistilBertConfig::from_file(config_path);
let distil_bert_model = DistilBertModelMaskedLM::new(&vs.root(), &config);
vs.load(weights_path)?;
@ -128,7 +129,8 @@ fn distilbert_for_question_answering() -> anyhow::Result<()> {
// Set-up masked LM model
let device = Device::cuda_if_available();
let vs = nn::VarStore::new(device);
let tokenizer: BertTokenizer = BertTokenizer::from_file(vocab_path.to_str().unwrap(), true)?;
let tokenizer: BertTokenizer =
BertTokenizer::from_file(vocab_path.to_str().unwrap(), true, true)?;
let mut config = DistilBertConfig::from_file(config_path);
config.output_attentions = Some(true);
config.output_hidden_states = Some(true);
@ -187,7 +189,8 @@ fn distilbert_for_token_classification() -> anyhow::Result<()> {
// Set-up masked LM model
let device = Device::cuda_if_available();
let vs = nn::VarStore::new(device);
let tokenizer: BertTokenizer = BertTokenizer::from_file(vocab_path.to_str().unwrap(), true)?;
let tokenizer: BertTokenizer =
BertTokenizer::from_file(vocab_path.to_str().unwrap(), true, true)?;
let mut config = DistilBertConfig::from_file(config_path);
config.output_attentions = Some(true);
config.output_hidden_states = Some(true);

View File

@ -26,7 +26,8 @@ fn electra_masked_lm() -> anyhow::Result<()> {
// Set-up masked LM model
let device = Device::Cpu;
let mut vs = nn::VarStore::new(device);
let tokenizer: BertTokenizer = BertTokenizer::from_file(vocab_path.to_str().unwrap(), true)?;
let tokenizer: BertTokenizer =
BertTokenizer::from_file(vocab_path.to_str().unwrap(), true, true)?;
let mut config = ElectraConfig::from_file(config_path);
config.output_attentions = Some(true);
config.output_hidden_states = Some(true);
@ -99,7 +100,8 @@ fn electra_discriminator() -> anyhow::Result<()> {
// Set-up masked LM model
let device = Device::Cpu;
let mut vs = nn::VarStore::new(device);
let tokenizer: BertTokenizer = BertTokenizer::from_file(vocab_path.to_str().unwrap(), true)?;
let tokenizer: BertTokenizer =
BertTokenizer::from_file(vocab_path.to_str().unwrap(), true, true)?;
let config = ElectraConfig::from_file(config_path);
let electra_model = ElectraDiscriminator::new(&vs.root(), &config);
vs.load(weights_path)?;

View File

@ -346,6 +346,7 @@ fn roberta_question_answering() -> anyhow::Result<()> {
RobertaMergesResources::ROBERTA_QA,
))), //merges resource only relevant with ModelType::Roberta
true, //lowercase
None,
true,
);