mirror of
https://github.com/guillaume-be/rust-bert.git
synced 2024-07-14 16:00:23 +03:00
* Fixed Clippy warnings
* Revert "Shallow clone optimization (#243)"
This reverts commit ba584653bc
.
* updated dependencies
* tryouts
* GPT2 tryouts
* WIP GPT2
* input mapping
* Cache storage
* Initial GPT2 prototype
* Initial ONNX Config and decoder implementation
* ONNXDecoder first draft
* Use Decoders in example
* Automated tch-ort conversion, decoder implementation
* ONNXCausalDecoder implementation
* Refactored _get_var_store to be optional, added get_device to gen trait
* updated example
* Added decoder_start_token_id to ConfigOption
* Addition of ONNXModelConfig, make max_position_embeddigs optional
* Addition of forward pass function for ONNXModel
* Working ONNX causal decoder
* Simplify tensor conversion
* refactor translation to facilitate ONNX integration
* Implementation of ONNXEncoder
* Implementation of ONNXConditionalGenerator
* working ONNXCausalGenerator
* - Reworked model resources type for pipelines and generators
* Aligned ONNXConditionalGenerator with other generators to use GenerateConfig for creation
* Moved force_token_id_generation to common utils function, fixed tests, Translation implementation
* generalized forced_bos and forced_eos tokens generation
* Aligned the `encode_prompt_text` method across language models
* Fix prompt encoding for causal generation
* Fix prompt encoding for causal generation
* Support for ONNX models for SequenceClassification
* Support for ONNX models for TokenClassification
* Support for ONNX models for POS and NER pipelines
* Support for ONNX models for ZeroShotClassification pipeline
* Support for ONNX models for QuestionAnswering pipeline
* Support for ONNX models for MaskedLM pipeline
* Added token_type_ids , updated layer cache i/o parsing for ONNX pipelines
* Support for ONNX models for TextGenerationPipeline, updated examples for remote resources
* Remove ONNX zero-shot classification example (lack of correct pretrained model)
* Addition of tests for ONNX pipelines support
* Made onnx feature optional
* Fix device lookup with onnx feature enabled
* Updates from main branch
* Flexible tokenizer creation for M2M100 (NLLB support), make NLLB test optional du to their size
* Fixed Clippy warnings
* Addition of documentation for ONNX
* Added documentation for ONNX support
* upcoming tch 1.12 fixes
* Fix merge conflicts
* Fix merge conflicts (2)
* Add download libtorch feature to ONNX tests
* Add download-onnx feature
* attempt to enable onnx download
* add remote resources feature
* onnx download
* pin ort version
* Update ort version
43 lines
1.7 KiB
Rust
43 lines
1.7 KiB
Rust
use rust_bert::pipelines::common::{ModelResource, ModelType, ONNXModelResources};
|
|
use rust_bert::pipelines::text_generation::{TextGenerationConfig, TextGenerationModel};
|
|
use rust_bert::resources::RemoteResource;
|
|
|
|
fn main() -> anyhow::Result<()> {
|
|
let text_generation_model = TextGenerationModel::new(TextGenerationConfig {
|
|
model_type: ModelType::GPT2,
|
|
model_resource: ModelResource::ONNX(ONNXModelResources {
|
|
encoder_resource: None,
|
|
decoder_resource: Some(Box::new(RemoteResource::new(
|
|
"https://huggingface.co/optimum/gpt2/resolve/main/decoder_model.onnx",
|
|
"onnx-gpt2",
|
|
))),
|
|
decoder_with_past_resource: Some(Box::new(RemoteResource::new(
|
|
"https://huggingface.co/optimum/gpt2/resolve/main/decoder_with_past_model.onnx",
|
|
"onnx-gpt2",
|
|
))),
|
|
}),
|
|
config_resource: Box::new(RemoteResource::new(
|
|
"https://huggingface.co/optimum/gpt2/resolve/main/config.json",
|
|
"onnx-gpt2",
|
|
)),
|
|
vocab_resource: Box::new(RemoteResource::new(
|
|
"https://huggingface.co/gpt2/resolve/main/vocab.json",
|
|
"onnx-gpt2",
|
|
)),
|
|
merges_resource: Some(Box::new(RemoteResource::new(
|
|
"https://huggingface.co/gpt2/resolve/main/merges.txt",
|
|
"onnx-gpt2",
|
|
))),
|
|
max_length: Some(30),
|
|
do_sample: false,
|
|
num_beams: 1,
|
|
temperature: 1.0,
|
|
num_return_sequences: 1,
|
|
..Default::default()
|
|
})?;
|
|
let prompts = ["It was a very nice and sunny"];
|
|
let output = text_generation_model.generate(&prompts, None);
|
|
println!("{:?}", output);
|
|
Ok(())
|
|
}
|