This commit is contained in:
Hieu Hoang 2017-02-10 13:24:33 +00:00
commit ff7bb00756
3 changed files with 57 additions and 34 deletions

View File

@ -1,8 +1,10 @@
# AmuNMT
[![Join the chat at https://gitter.im/amunmt/amunmt](https://badges.gitter.im/amunmt/amunmt.svg)](https://gitter.im/amunmt/amunmt?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[![CUDA build Status](http://37.247.57.181:8000/job/amunmt_compilation_cuda/badge/icon)](http://37.247.57.181:8000/job/amunmt_compilation_cuda/)
[![CPU Build Status](http://37.247.57.181:8000/job/amunmt_compilation_cpu/badge/icon)](http://37.247.57.181:8000/job/amunmt_compilation_cpu/)
[![CUDABuild Status](http://vali.inf.ed.ac.uk/jenkins/buildStatus/icon?job=amunmt_compilation_cuda)](http://vali.inf.ed.ac.uk/jenkins/job/amunmt_compilation_cuda/)
[![CPU Build Status](http://vali.inf.ed.ac.uk/jenkins/buildStatus/icon?job=amunmt_compilation_cpu)](http://vali.inf.ed.ac.uk/jenkins/job/amunmt_compilation_cpu/)
A C++ inference engine for Neural Machine Translation (NMT) models trained with Theano-based scripts from
Nematus (https://github.com/rsennrich/nematus) or DL4MT (https://github.com/nyu-dl/dl4mt-tutorial)
@ -92,14 +94,35 @@ AmuNMT has integrated support for [BPE encoding](https://github.com/rsennrich/su
bpe: bpe.codes
debpe: true
## Python Bindings
Python bindings allow to run AmuNMT decoder in python scripts. The compilation of the bindings requires `python-dev` package. To compile the bindings run:
```
make python
```
The Python bindings consist of 2 function: `init` and `translate`:
```python
import libamunmt
libamunmt.init('-c config.yml')
print libamunmt.translate(['this is a little test .'])
```
The `init` function init the decoder and the syntax is the same as in command line. The `translate`
function takes a list of sentences to translate. For real-world example, see the `scripts/amunmt_erver.py`
script, which uses python bindings to run REST server.
## Using GPU/CPU threads
AmuNMT can use GPUs, CPUs, or both, to distribute translation of different sentences.
AmuNMT can use GPUs, CPUs, or both, to distribute translation of different sentences. **However, it is unlikely that CPUs used together with GPUs yield any performance improvement. It is probably better to only use the GPU if one or more are available.**
cpu-threads: 8
gpu-threads: 2
devices: [0, 1]
The setting above uses 8 CPU threads and 4 GPU threads (2 GPUs x 2 threads). The `gpu-threads` and `devices` options are only available when AmuNMT has been compiled with CUDA support. Multiple GPU threads can be used to increase GPU saturation, but will likely not result in a large performance boost. By default, `gpu-threads` is set to `1` and `cpu-threads` to `0` if CUDA is available. Otherwise `cpu-threads` is set to `1`. To disable the GPU set `gpu-threads` to `0`. Setting both `gpu-threads` and `cpu-threads` to `0` will result in an exception.
The setting above uses 8 CPU threads and 4 GPU threads (2 GPUs x 2 threads). The `gpu-threads` and `devices` options are only available when AmuNMT has been compiled with CUDA support. Multiple GPU threads can be used to increase GPU saturation, but will likely not result in a large performance boost. By default, `gpu-threads` is set to `1` and `cpu-threads` to `0` if CUDA is available. Otherwise `cpu-threads` is set to `1`. To disable the GPU set `gpu-threads` to `0`. Setting both `gpu-threads` and `cpu-threads` to `0` will result in an exception.
## Example usage

View File

@ -64,7 +64,7 @@ cuda_add_executable(
)
if(PYTHONLIBS_FOUND)
cuda_add_library(amunmt SHARED
cuda_add_library(python SHARED
python/amunmt.cpp
# gpu/decoder/ape_penalty.cu
gpu/decoder/encoder_decoder.cu
@ -80,7 +80,9 @@ cuda_add_library(amunmt SHARED
$<TARGET_OBJECTS:cpumode>
$<TARGET_OBJECTS:libyaml-cpp>
)
set_target_properties("amunmt" PROPERTIES EXCLUDE_FROM_ALL 1)
set_target_properties("python" PROPERTIES EXCLUDE_FROM_ALL 1)
set_target_properties("python" PROPERTIES OUTPUT_NAME "amunmt")
endif(PYTHONLIBS_FOUND)
cuda_add_library(mosesplugin STATIC
@ -116,7 +118,7 @@ add_executable(
)
if(PYTHONLIBS_FOUND)
add_library(amunmt SHARED
add_library(python SHARED
python/amunmt.cpp
common/loader_factory.cpp
$<TARGET_OBJECTS:libcnpy>
@ -124,14 +126,15 @@ add_library(amunmt SHARED
$<TARGET_OBJECTS:libcommon>
$<TARGET_OBJECTS:libyaml-cpp>
)
set_target_properties("amunmt" PROPERTIES EXCLUDE_FROM_ALL 1)
set_target_properties("python" PROPERTIES EXCLUDE_FROM_ALL 1)
set_target_properties("python" PROPERTIES OUTPUT_NAME "amunmt")
endif(PYTHONLIBS_FOUND)
endif(CUDA_FOUND)
SET(EXES "amun")
if(PYTHONLIBS_FOUND)
SET(EXES ${EXES} "amunmt")
SET(EXES ${EXES} "python")
endif(PYTHONLIBS_FOUND)
foreach(exec ${EXES})

View File

@ -22,20 +22,22 @@ using namespace std;
namespace amunmt {
God::God()
:threadIncr_(0)
: threadIncr_(0)
{
}
God::~God() {}
God::~God()
{
}
God& God::Init(const std::string& options) {
std::vector<std::string> args = boost::program_options::split_unix(options);
int argc = args.size() + 1;
char* argv[argc];
argv[0] = const_cast<char*>("bogus");
for(int i = 1; i < argc; i++)
for (int i = 1; i < argc; ++i) {
argv[i] = const_cast<char*>(args[i-1].c_str());
}
return Init(argc, argv);
}
@ -49,35 +51,35 @@ God& God::Init(int argc, char** argv) {
config_.AddOptions(argc, argv);
config_.LogOptions();
if(Get("source-vocab").IsSequence()) {
for(auto sourceVocabPath : Get<std::vector<std::string>>("source-vocab"))
sourceVocabs_.emplace_back(new Vocab(sourceVocabPath));
}
else {
sourceVocabs_.emplace_back(new Vocab(Get<std::string>("source-vocab")));
if (Get("source-vocab").IsSequence()) {
for (auto sourceVocabPath : Get<std::vector<std::string>>("source-vocab")) {
sourceVocabs_.emplace_back(new Vocab(sourceVocabPath));
}
} else {
sourceVocabs_.emplace_back(new Vocab(Get<std::string>("source-vocab")));
}
targetVocab_.reset(new Vocab(Get<std::string>("target-vocab")));
weights_ = Get<std::map<std::string, float>>("weights");
if(Get<bool>("show-weights")) {
LOG(info) << "Outputting weights and exiting";
for(auto && pair : weights_) {
std::cout << pair.first << "= " << pair.second << std::endl;
}
exit(0);
LOG(info) << "Outputting weights and exiting";
for(auto && pair : weights_) {
std::cout << pair.first << "= " << pair.second << std::endl;
}
exit(0);
}
LoadScorers();
LoadFiltering();
if (Has("input-file")) {
LOG(info) << "Reading from " << Get<std::string>("input-file");
inputStream_.reset(new InputFileStream(Get<std::string>("input-file")));
LOG(info) << "Reading from " << Get<std::string>("input-file");
inputStream_.reset(new InputFileStream(Get<std::string>("input-file")));
}
else {
LOG(info) << "Reading from stdin";
inputStream_.reset(new InputFileStream(std::cin));
LOG(info) << "Reading from stdin";
inputStream_.reset(new InputFileStream(std::cin));
}
LoadPrePostProcessing();
@ -184,11 +186,9 @@ std::vector<ScorerPtr> God::GetScorers(const DeviceInfo &deviceInfo) const {
std::vector<ScorerPtr> scorers;
if (deviceInfo.deviceType == CPUDevice) {
//cerr << "CPU GetScorers" << endl;
for (auto&& loader : cpuLoaders_ | boost::adaptors::map_values)
scorers.emplace_back(loader->NewScorer(*this, deviceInfo));
} else {
//cerr << "GPU GetScorers" << endl;
for (auto&& loader : gpuLoaders_ | boost::adaptors::map_values)
scorers.emplace_back(loader->NewScorer(*this, deviceInfo));
}
@ -233,14 +233,12 @@ std::vector<std::string> God::Postprocess(const std::vector<std::string>& input)
}
return processed;
}
// clean up cuda vectors before cuda context goes out of scope
void God::CleanUp() {
for (Loaders::value_type& loader : cpuLoaders_) {
//cerr << "cpu loader=" << loader.first << endl;
loader.second.reset(nullptr);
}
for (Loaders::value_type& loader : gpuLoaders_) {
//cerr << "gpu loader=" << loader.first << endl;
loader.second.reset(nullptr);
}
}
@ -274,7 +272,6 @@ DeviceInfo God::GetNextDevice() const
++threadIncr_;
//cerr << "GetNextDevice=" << ret << endl;
return ret;
}