From 0eb98df0fe32a8e31188fa107f789afd73d81ccf Mon Sep 17 00:00:00 2001 From: Kenneth Heafield Date: Mon, 15 Oct 2012 13:58:33 +0100 Subject: [PATCH] Submodules are evil. Undo it. --- .gitmodules | 3 - Jamroot | 4 +- lazy | 1 - lm/Jamfile | 12 + lm/bhiksha.cc | 95 +++++ lm/bhiksha.hh | 115 ++++++ lm/binary_format.cc | 253 +++++++++++++ lm/binary_format.hh | 108 ++++++ lm/blank.hh | 43 +++ lm/build_binary.cc | 255 +++++++++++++ lm/config.cc | 29 ++ lm/config.hh | 120 +++++++ lm/enumerate_vocab.hh | 28 ++ lm/facade.hh | 64 ++++ lm/fragment.cc | 37 ++ lm/left.hh | 212 +++++++++++ lm/left_test.cc | 397 +++++++++++++++++++++ lm/lm_exception.cc | 23 ++ lm/lm_exception.hh | 50 +++ lm/max_order.cc | 6 + lm/max_order.hh | 12 + lm/model.cc | 307 ++++++++++++++++ lm/model.hh | 159 +++++++++ lm/model_test.cc | 438 +++++++++++++++++++++++ lm/model_type.hh | 23 ++ lm/ngram_query.cc | 47 +++ lm/ngram_query.hh | 72 ++++ lm/partial.hh | 167 +++++++++ lm/partial_test.cc | 199 +++++++++++ lm/quantize.cc | 93 +++++ lm/quantize.hh | 232 ++++++++++++ lm/read_arpa.cc | 154 ++++++++ lm/read_arpa.hh | 90 +++++ lm/return.hh | 42 +++ lm/search_hashed.cc | 294 +++++++++++++++ lm/search_hashed.hh | 201 +++++++++++ lm/search_trie.cc | 611 ++++++++++++++++++++++++++++++++ lm/search_trie.hh | 130 +++++++ lm/state.hh | 125 +++++++ lm/test.arpa | 124 +++++++ lm/test_nounk.arpa | 120 +++++++ lm/trie.cc | 128 +++++++ lm/trie.hh | 155 ++++++++ lm/trie_sort.cc | 292 +++++++++++++++ lm/trie_sort.hh | 115 ++++++ lm/value.hh | 157 ++++++++ lm/value_build.cc | 58 +++ lm/value_build.hh | 97 +++++ lm/virtual_interface.cc | 19 + lm/virtual_interface.hh | 154 ++++++++ lm/vocab.cc | 239 +++++++++++++ lm/vocab.hh | 182 ++++++++++ lm/weights.hh | 22 ++ lm/word_index.hh | 14 + mert/Jamfile | 2 +- moses/src/DynSAInclude/Jamfile | 2 +- moses/src/Incremental/Jamfile | 2 +- moses/src/Jamfile | 2 +- moses/src/LM/Jamfile | 2 +- search/Jamfile | 5 + search/arity.hh | 8 + search/config.hh | 25 ++ search/context.hh | 65 ++++ search/edge.hh | 31 ++ search/edge_generator.cc | 120 +++++++ search/edge_generator.hh | 58 +++ search/edge_queue.cc | 25 ++ search/edge_queue.hh | 73 ++++ search/final.hh | 39 ++ search/note.hh | 12 + search/rule.cc | 43 +++ search/rule.hh | 20 ++ search/source.hh | 48 +++ search/types.hh | 18 + search/vertex.cc | 48 +++ search/vertex.hh | 158 +++++++++ search/vertex_generator.cc | 83 +++++ search/vertex_generator.hh | 59 +++ search/weights.cc | 71 ++++ search/weights.hh | 52 +++ search/weights_test.cc | 38 ++ util/Jamfile | 10 + util/bit_packing.cc | 40 +++ util/bit_packing.hh | 186 ++++++++++ util/bit_packing_test.cc | 59 +++ util/check.hh | 21 ++ util/ersatz_progress.cc | 45 +++ util/ersatz_progress.hh | 56 +++ util/exception.cc | 90 +++++ util/exception.hh | 138 ++++++++ util/file.cc | 302 ++++++++++++++++ util/file.hh | 110 ++++++ util/file_piece.cc | 314 ++++++++++++++++ util/file_piece.hh | 132 +++++++ util/file_piece_test.cc | 123 +++++++ util/getopt.c | 78 ++++ util/getopt.hh | 33 ++ util/have.hh | 23 ++ util/joint_sort.hh | 151 ++++++++ util/joint_sort_test.cc | 50 +++ util/mmap.cc | 184 ++++++++++ util/mmap.hh | 114 ++++++ util/murmur_hash.cc | 168 +++++++++ util/murmur_hash.hh | 14 + util/probing_hash_table.hh | 144 ++++++++ util/probing_hash_table_test.cc | 50 +++ util/proxy_iterator.hh | 96 +++++ util/scoped.hh | 97 +++++ util/sized_iterator.hh | 107 ++++++ util/sorted_uniform.hh | 127 +++++++ util/sorted_uniform_test.cc | 127 +++++++ util/string_piece.cc | 192 ++++++++++ util/string_piece.hh | 288 +++++++++++++++ util/tokenize_piece.hh | 108 ++++++ util/tokenize_piece_test.cc | 48 +++ util/usage.cc | 46 +++ util/usage.hh | 8 + 117 files changed, 12104 insertions(+), 11 deletions(-) delete mode 160000 lazy create mode 100644 lm/Jamfile create mode 100644 lm/bhiksha.cc create mode 100644 lm/bhiksha.hh create mode 100644 lm/binary_format.cc create mode 100644 lm/binary_format.hh create mode 100644 lm/blank.hh create mode 100644 lm/build_binary.cc create mode 100644 lm/config.cc create mode 100644 lm/config.hh create mode 100644 lm/enumerate_vocab.hh create mode 100644 lm/facade.hh create mode 100644 lm/fragment.cc create mode 100644 lm/left.hh create mode 100644 lm/left_test.cc create mode 100644 lm/lm_exception.cc create mode 100644 lm/lm_exception.hh create mode 100644 lm/max_order.cc create mode 100644 lm/max_order.hh create mode 100644 lm/model.cc create mode 100644 lm/model.hh create mode 100644 lm/model_test.cc create mode 100644 lm/model_type.hh create mode 100644 lm/ngram_query.cc create mode 100644 lm/ngram_query.hh create mode 100644 lm/partial.hh create mode 100644 lm/partial_test.cc create mode 100644 lm/quantize.cc create mode 100644 lm/quantize.hh create mode 100644 lm/read_arpa.cc create mode 100644 lm/read_arpa.hh create mode 100644 lm/return.hh create mode 100644 lm/search_hashed.cc create mode 100644 lm/search_hashed.hh create mode 100644 lm/search_trie.cc create mode 100644 lm/search_trie.hh create mode 100644 lm/state.hh create mode 100644 lm/test.arpa create mode 100644 lm/test_nounk.arpa create mode 100644 lm/trie.cc create mode 100644 lm/trie.hh create mode 100644 lm/trie_sort.cc create mode 100644 lm/trie_sort.hh create mode 100644 lm/value.hh create mode 100644 lm/value_build.cc create mode 100644 lm/value_build.hh create mode 100644 lm/virtual_interface.cc create mode 100644 lm/virtual_interface.hh create mode 100644 lm/vocab.cc create mode 100644 lm/vocab.hh create mode 100644 lm/weights.hh create mode 100644 lm/word_index.hh create mode 100644 search/Jamfile create mode 100644 search/arity.hh create mode 100644 search/config.hh create mode 100644 search/context.hh create mode 100644 search/edge.hh create mode 100644 search/edge_generator.cc create mode 100644 search/edge_generator.hh create mode 100644 search/edge_queue.cc create mode 100644 search/edge_queue.hh create mode 100644 search/final.hh create mode 100644 search/note.hh create mode 100644 search/rule.cc create mode 100644 search/rule.hh create mode 100644 search/source.hh create mode 100644 search/types.hh create mode 100644 search/vertex.cc create mode 100644 search/vertex.hh create mode 100644 search/vertex_generator.cc create mode 100644 search/vertex_generator.hh create mode 100644 search/weights.cc create mode 100644 search/weights.hh create mode 100644 search/weights_test.cc create mode 100644 util/Jamfile create mode 100644 util/bit_packing.cc create mode 100644 util/bit_packing.hh create mode 100644 util/bit_packing_test.cc create mode 100644 util/check.hh create mode 100644 util/ersatz_progress.cc create mode 100644 util/ersatz_progress.hh create mode 100644 util/exception.cc create mode 100644 util/exception.hh create mode 100644 util/file.cc create mode 100644 util/file.hh create mode 100644 util/file_piece.cc create mode 100644 util/file_piece.hh create mode 100644 util/file_piece_test.cc create mode 100644 util/getopt.c create mode 100644 util/getopt.hh create mode 100644 util/have.hh create mode 100644 util/joint_sort.hh create mode 100644 util/joint_sort_test.cc create mode 100644 util/mmap.cc create mode 100644 util/mmap.hh create mode 100644 util/murmur_hash.cc create mode 100644 util/murmur_hash.hh create mode 100644 util/probing_hash_table.hh create mode 100644 util/probing_hash_table_test.cc create mode 100644 util/proxy_iterator.hh create mode 100644 util/scoped.hh create mode 100644 util/sized_iterator.hh create mode 100644 util/sorted_uniform.hh create mode 100644 util/sorted_uniform_test.cc create mode 100644 util/string_piece.cc create mode 100644 util/string_piece.hh create mode 100644 util/tokenize_piece.hh create mode 100644 util/tokenize_piece_test.cc create mode 100644 util/usage.cc create mode 100644 util/usage.hh diff --git a/.gitmodules b/.gitmodules index 01f80a2c9..19ceb74f6 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,6 +1,3 @@ [submodule "regression-testing/tests"] path = regression-testing/tests url = git@github.com:moses-smt/moses-regression-tests.git -[submodule "lazy"] - path = lazy - url = git@github.com:kpu/lazy diff --git a/Jamroot b/Jamroot index 8bd850847..e6ba3b483 100644 --- a/Jamroot +++ b/Jamroot @@ -101,9 +101,9 @@ project : requirements ; #Add directories here if you want their incidental targets too (i.e. tests). -build-projects lazy mert moses-cmd/src moses-chart-cmd/src scripts regression-testing contrib/relent-filter/src ; +build-projects lm util search mert moses-cmd/src moses-chart-cmd/src scripts regression-testing contrib/relent-filter/src ; -alias programs : lazy/lm//programs moses-chart-cmd/src//moses_chart moses-cmd/src//programs OnDiskPt//CreateOnDiskPt OnDiskPt//queryOnDiskPt mert//programs contrib/server//mosesserver misc//programs symal phrase-extract phrase-extract//lexical-reordering phrase-extract//extract-ghkm phrase-extract//pcfg-extract phrase-extract//pcfg-score biconcor contrib/relent-filter/src//calcDivergence ; +alias programs : lm//programs moses-chart-cmd/src//moses_chart moses-cmd/src//programs OnDiskPt//CreateOnDiskPt OnDiskPt//queryOnDiskPt mert//programs contrib/server//mosesserver misc//programs symal phrase-extract phrase-extract//lexical-reordering phrase-extract//extract-ghkm phrase-extract//pcfg-extract phrase-extract//pcfg-score biconcor contrib/relent-filter/src//calcDivergence ; install-bin-libs programs ; install-headers headers-base : [ path.glob-tree biconcor contrib lm mert misc moses-chart-cmd moses-cmd OnDiskPt phrase-extract symal util : *.hh *.h ] : . ; diff --git a/lazy b/lazy deleted file mode 160000 index a43576d05..000000000 --- a/lazy +++ /dev/null @@ -1 +0,0 @@ -Subproject commit a43576d0576613c2587b3c0e012c22a0976ff53f diff --git a/lm/Jamfile b/lm/Jamfile new file mode 100644 index 000000000..045dfee92 --- /dev/null +++ b/lm/Jamfile @@ -0,0 +1,12 @@ +lib kenlm : bhiksha.cc binary_format.cc config.cc lm_exception.cc model.cc quantize.cc read_arpa.cc search_hashed.cc search_trie.cc trie.cc trie_sort.cc value_build.cc virtual_interface.cc vocab.cc ../util//kenutil : .. : : .. ../util//kenutil ; + +import testing ; + +run left_test.cc ../util//kenutil kenlm /top//boost_unit_test_framework : : test.arpa ; +run model_test.cc ../util//kenutil kenlm /top//boost_unit_test_framework : : test.arpa test_nounk.arpa ; + +exe query : ngram_query.cc kenlm ../util//kenutil ; +exe build_binary : build_binary.cc kenlm ../util//kenutil ; +exe kenlm_max_order : max_order.cc : .. ; + +alias programs : query build_binary kenlm_max_order ; diff --git a/lm/bhiksha.cc b/lm/bhiksha.cc new file mode 100644 index 000000000..088ea98d4 --- /dev/null +++ b/lm/bhiksha.cc @@ -0,0 +1,95 @@ +#include "lm/bhiksha.hh" +#include "lm/config.hh" +#include "util/file.hh" +#include "util/exception.hh" + +#include + +namespace lm { +namespace ngram { +namespace trie { + +DontBhiksha::DontBhiksha(const void * /*base*/, uint64_t /*max_offset*/, uint64_t max_next, const Config &/*config*/) : + next_(util::BitsMask::ByMax(max_next)) {} + +const uint8_t kArrayBhikshaVersion = 0; + +// TODO: put this in binary file header instead when I change the binary file format again. +void ArrayBhiksha::UpdateConfigFromBinary(int fd, Config &config) { + uint8_t version; + uint8_t configured_bits; + util::ReadOrThrow(fd, &version, 1); + util::ReadOrThrow(fd, &configured_bits, 1); + if (version != kArrayBhikshaVersion) UTIL_THROW(FormatLoadException, "This file has sorted array compression version " << (unsigned) version << " but the code expects version " << (unsigned)kArrayBhikshaVersion); + config.pointer_bhiksha_bits = configured_bits; +} + +namespace { + +// Find argmin_{chopped \in [0, RequiredBits(max_next)]} ChoppedDelta(max_offset) +uint8_t ChopBits(uint64_t max_offset, uint64_t max_next, const Config &config) { + uint8_t required = util::RequiredBits(max_next); + uint8_t best_chop = 0; + int64_t lowest_change = std::numeric_limits::max(); + // There are probably faster ways but I don't care because this is only done once per order at construction time. + for (uint8_t chop = 0; chop <= std::min(required, config.pointer_bhiksha_bits); ++chop) { + int64_t change = (max_next >> (required - chop)) * 64 /* table cost in bits */ + - max_offset * static_cast(chop); /* savings in bits*/ + if (change < lowest_change) { + lowest_change = change; + best_chop = chop; + } + } + return best_chop; +} + +std::size_t ArrayCount(uint64_t max_offset, uint64_t max_next, const Config &config) { + uint8_t required = util::RequiredBits(max_next); + uint8_t chopping = ChopBits(max_offset, max_next, config); + return (max_next >> (required - chopping)) + 1 /* we store 0 too */; +} +} // namespace + +uint64_t ArrayBhiksha::Size(uint64_t max_offset, uint64_t max_next, const Config &config) { + return sizeof(uint64_t) * (1 /* header */ + ArrayCount(max_offset, max_next, config)) + 7 /* 8-byte alignment */; +} + +uint8_t ArrayBhiksha::InlineBits(uint64_t max_offset, uint64_t max_next, const Config &config) { + return util::RequiredBits(max_next) - ChopBits(max_offset, max_next, config); +} + +namespace { + +void *AlignTo8(void *from) { + uint8_t *val = reinterpret_cast(from); + std::size_t remainder = reinterpret_cast(val) & 7; + if (!remainder) return val; + return val + 8 - remainder; +} + +} // namespace + +ArrayBhiksha::ArrayBhiksha(void *base, uint64_t max_offset, uint64_t max_next, const Config &config) + : next_inline_(util::BitsMask::ByBits(InlineBits(max_offset, max_next, config))), + offset_begin_(reinterpret_cast(AlignTo8(base)) + 1 /* 8-byte header */), + offset_end_(offset_begin_ + ArrayCount(max_offset, max_next, config)), + write_to_(reinterpret_cast(AlignTo8(base)) + 1 /* 8-byte header */ + 1 /* first entry is 0 */), + original_base_(base) {} + +void ArrayBhiksha::FinishedLoading(const Config &config) { + // *offset_begin_ = 0 but without a const_cast. + *(write_to_ - (write_to_ - offset_begin_)) = 0; + + if (write_to_ != offset_end_) UTIL_THROW(util::Exception, "Did not get all the array entries that were expected."); + + uint8_t *head_write = reinterpret_cast(original_base_); + *(head_write++) = kArrayBhikshaVersion; + *(head_write++) = config.pointer_bhiksha_bits; +} + +void ArrayBhiksha::LoadedBinary() { +} + +} // namespace trie +} // namespace ngram +} // namespace lm diff --git a/lm/bhiksha.hh b/lm/bhiksha.hh new file mode 100644 index 000000000..8ff88654d --- /dev/null +++ b/lm/bhiksha.hh @@ -0,0 +1,115 @@ +/* Simple implementation of + * @inproceedings{bhikshacompression, + * author={Bhiksha Raj and Ed Whittaker}, + * year={2003}, + * title={Lossless Compression of Language Model Structure and Word Identifiers}, + * booktitle={Proceedings of IEEE International Conference on Acoustics, Speech and Signal Processing}, + * pages={388--391}, + * } + * + * Currently only used for next pointers. + */ + +#ifndef LM_BHIKSHA__ +#define LM_BHIKSHA__ + +#include +#include + +#include "lm/model_type.hh" +#include "lm/trie.hh" +#include "util/bit_packing.hh" +#include "util/sorted_uniform.hh" + +namespace lm { +namespace ngram { +struct Config; + +namespace trie { + +class DontBhiksha { + public: + static const ModelType kModelTypeAdd = static_cast(0); + + static void UpdateConfigFromBinary(int /*fd*/, Config &/*config*/) {} + + static uint64_t Size(uint64_t /*max_offset*/, uint64_t /*max_next*/, const Config &/*config*/) { return 0; } + + static uint8_t InlineBits(uint64_t /*max_offset*/, uint64_t max_next, const Config &/*config*/) { + return util::RequiredBits(max_next); + } + + DontBhiksha(const void *base, uint64_t max_offset, uint64_t max_next, const Config &config); + + void ReadNext(const void *base, uint64_t bit_offset, uint64_t /*index*/, uint8_t total_bits, NodeRange &out) const { + out.begin = util::ReadInt57(base, bit_offset, next_.bits, next_.mask); + out.end = util::ReadInt57(base, bit_offset + total_bits, next_.bits, next_.mask); + //assert(out.end >= out.begin); + } + + void WriteNext(void *base, uint64_t bit_offset, uint64_t /*index*/, uint64_t value) { + util::WriteInt57(base, bit_offset, next_.bits, value); + } + + void FinishedLoading(const Config &/*config*/) {} + + void LoadedBinary() {} + + uint8_t InlineBits() const { return next_.bits; } + + private: + util::BitsMask next_; +}; + +class ArrayBhiksha { + public: + static const ModelType kModelTypeAdd = kArrayAdd; + + static void UpdateConfigFromBinary(int fd, Config &config); + + static uint64_t Size(uint64_t max_offset, uint64_t max_next, const Config &config); + + static uint8_t InlineBits(uint64_t max_offset, uint64_t max_next, const Config &config); + + ArrayBhiksha(void *base, uint64_t max_offset, uint64_t max_value, const Config &config); + + void ReadNext(const void *base, uint64_t bit_offset, uint64_t index, uint8_t total_bits, NodeRange &out) const { + const uint64_t *begin_it = util::BinaryBelow(util::IdentityAccessor(), offset_begin_, offset_end_, index); + const uint64_t *end_it; + for (end_it = begin_it; (end_it < offset_end_) && (*end_it <= index + 1); ++end_it) {} + --end_it; + out.begin = ((begin_it - offset_begin_) << next_inline_.bits) | + util::ReadInt57(base, bit_offset, next_inline_.bits, next_inline_.mask); + out.end = ((end_it - offset_begin_) << next_inline_.bits) | + util::ReadInt57(base, bit_offset + total_bits, next_inline_.bits, next_inline_.mask); + //assert(out.end >= out.begin); + } + + void WriteNext(void *base, uint64_t bit_offset, uint64_t index, uint64_t value) { + uint64_t encode = value >> next_inline_.bits; + for (; write_to_ <= offset_begin_ + encode; ++write_to_) *write_to_ = index; + util::WriteInt57(base, bit_offset, next_inline_.bits, value & next_inline_.mask); + } + + void FinishedLoading(const Config &config); + + void LoadedBinary(); + + uint8_t InlineBits() const { return next_inline_.bits; } + + private: + const util::BitsMask next_inline_; + + const uint64_t *const offset_begin_; + const uint64_t *const offset_end_; + + uint64_t *write_to_; + + void *original_base_; +}; + +} // namespace trie +} // namespace ngram +} // namespace lm + +#endif // LM_BHIKSHA__ diff --git a/lm/binary_format.cc b/lm/binary_format.cc new file mode 100644 index 000000000..efa670561 --- /dev/null +++ b/lm/binary_format.cc @@ -0,0 +1,253 @@ +#include "lm/binary_format.hh" + +#include "lm/lm_exception.hh" +#include "util/file.hh" +#include "util/file_piece.hh" + +#include +#include +#include +#include + +#include + +namespace lm { +namespace ngram { +namespace { +const char kMagicBeforeVersion[] = "mmap lm http://kheafield.com/code format version"; +const char kMagicBytes[] = "mmap lm http://kheafield.com/code format version 5\n\0"; +// This must be shorter than kMagicBytes and indicates an incomplete binary file (i.e. build failed). +const char kMagicIncomplete[] = "mmap lm http://kheafield.com/code incomplete\n"; +const long int kMagicVersion = 5; + +// Old binary files built on 32-bit machines have this header. +// TODO: eliminate with next binary release. +struct OldSanity { + char magic[sizeof(kMagicBytes)]; + float zero_f, one_f, minus_half_f; + WordIndex one_word_index, max_word_index; + uint64_t one_uint64; + + void SetToReference() { + std::memset(this, 0, sizeof(OldSanity)); + std::memcpy(magic, kMagicBytes, sizeof(magic)); + zero_f = 0.0; one_f = 1.0; minus_half_f = -0.5; + one_word_index = 1; + max_word_index = std::numeric_limits::max(); + one_uint64 = 1; + } +}; + + +// Test values aligned to 8 bytes. +struct Sanity { + char magic[ALIGN8(sizeof(kMagicBytes))]; + float zero_f, one_f, minus_half_f; + WordIndex one_word_index, max_word_index, padding_to_8; + uint64_t one_uint64; + + void SetToReference() { + std::memset(this, 0, sizeof(Sanity)); + std::memcpy(magic, kMagicBytes, sizeof(kMagicBytes)); + zero_f = 0.0; one_f = 1.0; minus_half_f = -0.5; + one_word_index = 1; + max_word_index = std::numeric_limits::max(); + padding_to_8 = 0; + one_uint64 = 1; + } +}; + +const char *kModelNames[6] = {"probing hash tables", "probing hash tables with rest costs", "trie", "trie with quantization", "trie with array-compressed pointers", "trie with quantization and array-compressed pointers"}; + +std::size_t TotalHeaderSize(unsigned char order) { + return ALIGN8(sizeof(Sanity) + sizeof(FixedWidthParameters) + sizeof(uint64_t) * order); +} + +void WriteHeader(void *to, const Parameters ¶ms) { + Sanity header = Sanity(); + header.SetToReference(); + std::memcpy(to, &header, sizeof(Sanity)); + char *out = reinterpret_cast(to) + sizeof(Sanity); + + *reinterpret_cast(out) = params.fixed; + out += sizeof(FixedWidthParameters); + + uint64_t *counts = reinterpret_cast(out); + for (std::size_t i = 0; i < params.counts.size(); ++i) { + counts[i] = params.counts[i]; + } +} + +} // namespace + +uint8_t *SetupJustVocab(const Config &config, uint8_t order, std::size_t memory_size, Backing &backing) { + if (config.write_mmap) { + std::size_t total = TotalHeaderSize(order) + memory_size; + backing.file.reset(util::CreateOrThrow(config.write_mmap)); + if (config.write_method == Config::WRITE_MMAP) { + backing.vocab.reset(util::MapZeroedWrite(backing.file.get(), total), total, util::scoped_memory::MMAP_ALLOCATED); + } else { + util::ResizeOrThrow(backing.file.get(), 0); + util::MapAnonymous(total, backing.vocab); + } + strncpy(reinterpret_cast(backing.vocab.get()), kMagicIncomplete, TotalHeaderSize(order)); + return reinterpret_cast(backing.vocab.get()) + TotalHeaderSize(order); + } else { + util::MapAnonymous(memory_size, backing.vocab); + return reinterpret_cast(backing.vocab.get()); + } +} + +uint8_t *GrowForSearch(const Config &config, std::size_t vocab_pad, std::size_t memory_size, Backing &backing) { + std::size_t adjusted_vocab = backing.vocab.size() + vocab_pad; + if (config.write_mmap) { + // Grow the file to accomodate the search, using zeros. + try { + util::ResizeOrThrow(backing.file.get(), adjusted_vocab + memory_size); + } catch (util::ErrnoException &e) { + e << " for file " << config.write_mmap; + throw e; + } + + if (config.write_method == Config::WRITE_AFTER) { + util::MapAnonymous(memory_size, backing.search); + return reinterpret_cast(backing.search.get()); + } + // mmap it now. + // We're skipping over the header and vocab for the search space mmap. mmap likes page aligned offsets, so some arithmetic to round the offset down. + std::size_t page_size = util::SizePage(); + std::size_t alignment_cruft = adjusted_vocab % page_size; + backing.search.reset(util::MapOrThrow(alignment_cruft + memory_size, true, util::kFileFlags, false, backing.file.get(), adjusted_vocab - alignment_cruft), alignment_cruft + memory_size, util::scoped_memory::MMAP_ALLOCATED); + return reinterpret_cast(backing.search.get()) + alignment_cruft; + } else { + util::MapAnonymous(memory_size, backing.search); + return reinterpret_cast(backing.search.get()); + } +} + +void FinishFile(const Config &config, ModelType model_type, unsigned int search_version, const std::vector &counts, std::size_t vocab_pad, Backing &backing) { + if (!config.write_mmap) return; + switch (config.write_method) { + case Config::WRITE_MMAP: + util::SyncOrThrow(backing.vocab.get(), backing.vocab.size()); + util::SyncOrThrow(backing.search.get(), backing.search.size()); + break; + case Config::WRITE_AFTER: + util::SeekOrThrow(backing.file.get(), 0); + util::WriteOrThrow(backing.file.get(), backing.vocab.get(), backing.vocab.size()); + util::SeekOrThrow(backing.file.get(), backing.vocab.size() + vocab_pad); + util::WriteOrThrow(backing.file.get(), backing.search.get(), backing.search.size()); + util::FSyncOrThrow(backing.file.get()); + break; + } + // header and vocab share the same mmap. The header is written here because we know the counts. + Parameters params = Parameters(); + params.counts = counts; + params.fixed.order = counts.size(); + params.fixed.probing_multiplier = config.probing_multiplier; + params.fixed.model_type = model_type; + params.fixed.has_vocabulary = config.include_vocab; + params.fixed.search_version = search_version; + WriteHeader(backing.vocab.get(), params); + if (config.write_method == Config::WRITE_AFTER) { + util::SeekOrThrow(backing.file.get(), 0); + util::WriteOrThrow(backing.file.get(), backing.vocab.get(), TotalHeaderSize(counts.size())); + } +} + +namespace detail { + +bool IsBinaryFormat(int fd) { + const uint64_t size = util::SizeFile(fd); + if (size == util::kBadSize || (size <= static_cast(sizeof(Sanity)))) return false; + // Try reading the header. + util::scoped_memory memory; + try { + util::MapRead(util::LAZY, fd, 0, sizeof(Sanity), memory); + } catch (const util::Exception &e) { + return false; + } + Sanity reference_header = Sanity(); + reference_header.SetToReference(); + if (!memcmp(memory.get(), &reference_header, sizeof(Sanity))) return true; + if (!memcmp(memory.get(), kMagicIncomplete, strlen(kMagicIncomplete))) { + UTIL_THROW(FormatLoadException, "This binary file did not finish building"); + } + if (!memcmp(memory.get(), kMagicBeforeVersion, strlen(kMagicBeforeVersion))) { + char *end_ptr; + const char *begin_version = static_cast(memory.get()) + strlen(kMagicBeforeVersion); + long int version = strtol(begin_version, &end_ptr, 10); + if ((end_ptr != begin_version) && version != kMagicVersion) { + UTIL_THROW(FormatLoadException, "Binary file has version " << version << " but this implementation expects version " << kMagicVersion << " so you'll have to use the ARPA to rebuild your binary"); + } + + OldSanity old_sanity = OldSanity(); + old_sanity.SetToReference(); + UTIL_THROW_IF(!memcmp(memory.get(), &old_sanity, sizeof(OldSanity)), FormatLoadException, "Looks like this is an old 32-bit format. The old 32-bit format has been removed so that 64-bit and 32-bit files are exchangeable."); + UTIL_THROW(FormatLoadException, "File looks like it should be loaded with mmap, but the test values don't match. Try rebuilding the binary format LM using the same code revision, compiler, and architecture"); + } + return false; +} + +void ReadHeader(int fd, Parameters &out) { + util::SeekOrThrow(fd, sizeof(Sanity)); + util::ReadOrThrow(fd, &out.fixed, sizeof(out.fixed)); + if (out.fixed.probing_multiplier < 1.0) + UTIL_THROW(FormatLoadException, "Binary format claims to have a probing multiplier of " << out.fixed.probing_multiplier << " which is < 1.0."); + + out.counts.resize(static_cast(out.fixed.order)); + if (out.fixed.order) util::ReadOrThrow(fd, &*out.counts.begin(), sizeof(uint64_t) * out.fixed.order); +} + +void MatchCheck(ModelType model_type, unsigned int search_version, const Parameters ¶ms) { + if (params.fixed.model_type != model_type) { + if (static_cast(params.fixed.model_type) >= (sizeof(kModelNames) / sizeof(const char *))) + UTIL_THROW(FormatLoadException, "The binary file claims to be model type " << static_cast(params.fixed.model_type) << " but this is not implemented for in this inference code."); + UTIL_THROW(FormatLoadException, "The binary file was built for " << kModelNames[params.fixed.model_type] << " but the inference code is trying to load " << kModelNames[model_type]); + } + UTIL_THROW_IF(search_version != params.fixed.search_version, FormatLoadException, "The binary file has " << kModelNames[params.fixed.model_type] << " version " << params.fixed.search_version << " but this code expects " << kModelNames[params.fixed.model_type] << " version " << search_version); +} + +void SeekPastHeader(int fd, const Parameters ¶ms) { + util::SeekOrThrow(fd, TotalHeaderSize(params.counts.size())); +} + +uint8_t *SetupBinary(const Config &config, const Parameters ¶ms, uint64_t memory_size, Backing &backing) { + const uint64_t file_size = util::SizeFile(backing.file.get()); + // The header is smaller than a page, so we have to map the whole header as well. + std::size_t total_map = util::CheckOverflow(TotalHeaderSize(params.counts.size()) + memory_size); + if (file_size != util::kBadSize && static_cast(file_size) < total_map) + UTIL_THROW(FormatLoadException, "Binary file has size " << file_size << " but the headers say it should be at least " << total_map); + + util::MapRead(config.load_method, backing.file.get(), 0, total_map, backing.search); + + if (config.enumerate_vocab && !params.fixed.has_vocabulary) + UTIL_THROW(FormatLoadException, "The decoder requested all the vocabulary strings, but this binary file does not have them. You may need to rebuild the binary file with an updated version of build_binary."); + + // Seek to vocabulary words + util::SeekOrThrow(backing.file.get(), total_map); + return reinterpret_cast(backing.search.get()) + TotalHeaderSize(params.counts.size()); +} + +void ComplainAboutARPA(const Config &config, ModelType model_type) { + if (config.write_mmap || !config.messages) return; + if (config.arpa_complain == Config::ALL) { + *config.messages << "Loading the LM will be faster if you build a binary file." << std::endl; + } else if (config.arpa_complain == Config::EXPENSIVE && model_type == TRIE_SORTED) { + *config.messages << "Building " << kModelNames[model_type] << " from ARPA is expensive. Save time by building a binary format." << std::endl; + } +} + +} // namespace detail + +bool RecognizeBinary(const char *file, ModelType &recognized) { + util::scoped_fd fd(util::OpenReadOrThrow(file)); + if (!detail::IsBinaryFormat(fd.get())) return false; + Parameters params; + detail::ReadHeader(fd.get(), params); + recognized = params.fixed.model_type; + return true; +} + +} // namespace ngram +} // namespace lm diff --git a/lm/binary_format.hh b/lm/binary_format.hh new file mode 100644 index 000000000..bf699d5f4 --- /dev/null +++ b/lm/binary_format.hh @@ -0,0 +1,108 @@ +#ifndef LM_BINARY_FORMAT__ +#define LM_BINARY_FORMAT__ + +#include "lm/config.hh" +#include "lm/model_type.hh" +#include "lm/read_arpa.hh" + +#include "util/file_piece.hh" +#include "util/mmap.hh" +#include "util/scoped.hh" + +#include +#include + +#include + +namespace lm { +namespace ngram { + +/*Inspect a file to determine if it is a binary lm. If not, return false. + * If so, return true and set recognized to the type. This is the only API in + * this header designed for use by decoder authors. + */ +bool RecognizeBinary(const char *file, ModelType &recognized); + +struct FixedWidthParameters { + unsigned char order; + float probing_multiplier; + // What type of model is this? + ModelType model_type; + // Does the end of the file have the actual strings in the vocabulary? + bool has_vocabulary; + unsigned int search_version; +}; + +// This is a macro instead of an inline function so constants can be assigned using it. +#define ALIGN8(a) ((std::ptrdiff_t(((a)-1)/8)+1)*8) + +// Parameters stored in the header of a binary file. +struct Parameters { + FixedWidthParameters fixed; + std::vector counts; +}; + +struct Backing { + // File behind memory, if any. + util::scoped_fd file; + // Vocabulary lookup table. Not to be confused with the vocab words themselves. + util::scoped_memory vocab; + // Raw block of memory backing the language model data structures + util::scoped_memory search; +}; + +// Create just enough of a binary file to write vocabulary to it. +uint8_t *SetupJustVocab(const Config &config, uint8_t order, std::size_t memory_size, Backing &backing); +// Grow the binary file for the search data structure and set backing.search, returning the memory address where the search data structure should begin. +uint8_t *GrowForSearch(const Config &config, std::size_t vocab_pad, std::size_t memory_size, Backing &backing); + +// Write header to binary file. This is done last to prevent incomplete files +// from loading. +void FinishFile(const Config &config, ModelType model_type, unsigned int search_version, const std::vector &counts, std::size_t vocab_pad, Backing &backing); + +namespace detail { + +bool IsBinaryFormat(int fd); + +void ReadHeader(int fd, Parameters ¶ms); + +void MatchCheck(ModelType model_type, unsigned int search_version, const Parameters ¶ms); + +void SeekPastHeader(int fd, const Parameters ¶ms); + +uint8_t *SetupBinary(const Config &config, const Parameters ¶ms, uint64_t memory_size, Backing &backing); + +void ComplainAboutARPA(const Config &config, ModelType model_type); + +} // namespace detail + +template void LoadLM(const char *file, const Config &config, To &to) { + Backing &backing = to.MutableBacking(); + backing.file.reset(util::OpenReadOrThrow(file)); + + try { + if (detail::IsBinaryFormat(backing.file.get())) { + Parameters params; + detail::ReadHeader(backing.file.get(), params); + detail::MatchCheck(To::kModelType, To::kVersion, params); + // Replace the run-time configured probing_multiplier with the one in the file. + Config new_config(config); + new_config.probing_multiplier = params.fixed.probing_multiplier; + detail::SeekPastHeader(backing.file.get(), params); + To::UpdateConfigFromBinary(backing.file.get(), params.counts, new_config); + uint64_t memory_size = To::Size(params.counts, new_config); + uint8_t *start = detail::SetupBinary(new_config, params, memory_size, backing); + to.InitializeFromBinary(start, params, new_config, backing.file.get()); + } else { + detail::ComplainAboutARPA(config, To::kModelType); + to.InitializeFromARPA(file, config); + } + } catch (util::Exception &e) { + e << " File: " << file; + throw; + } +} + +} // namespace ngram +} // namespace lm +#endif // LM_BINARY_FORMAT__ diff --git a/lm/blank.hh b/lm/blank.hh new file mode 100644 index 000000000..4da812096 --- /dev/null +++ b/lm/blank.hh @@ -0,0 +1,43 @@ +#ifndef LM_BLANK__ +#define LM_BLANK__ + +#include + +#include +#include + +namespace lm { +namespace ngram { + +/* Suppose "foo bar" appears with zero backoff but there is no trigram + * beginning with these words. Then, when scoring "foo bar", the model could + * return out_state containing "bar" or even null context if "bar" also has no + * backoff and is never followed by another word. Then the backoff is set to + * kNoExtensionBackoff. If the n-gram might be extended, then out_state must + * contain the full n-gram, in which case kExtensionBackoff is set. In any + * case, if an n-gram has non-zero backoff, the full state is returned so + * backoff can be properly charged. + * These differ only in sign bit because the backoff is in fact zero in either + * case. + */ +const float kNoExtensionBackoff = -0.0; +const float kExtensionBackoff = 0.0; +const uint64_t kNoExtensionQuant = 0; +const uint64_t kExtensionQuant = 1; + +inline void SetExtension(float &backoff) { + if (backoff == kNoExtensionBackoff) backoff = kExtensionBackoff; +} + +// This compiles down nicely. +inline bool HasExtension(const float &backoff) { + typedef union { float f; uint32_t i; } UnionValue; + UnionValue compare, interpret; + compare.f = kNoExtensionBackoff; + interpret.f = backoff; + return compare.i != interpret.i; +} + +} // namespace ngram +} // namespace lm +#endif // LM_BLANK__ diff --git a/lm/build_binary.cc b/lm/build_binary.cc new file mode 100644 index 000000000..2b8c9d5b2 --- /dev/null +++ b/lm/build_binary.cc @@ -0,0 +1,255 @@ +#include "lm/model.hh" +#include "util/file_piece.hh" + +#include +#include +#include +#include + +#include +#include + +#ifdef WIN32 +#include "util/getopt.hh" +#else +#include +#endif + +namespace lm { +namespace ngram { +namespace { + +void Usage(const char *name) { + std::cerr << "Usage: " << name << " [-u log10_unknown_probability] [-s] [-i] [-w mmap|after] [-p probing_multiplier] [-t trie_temporary] [-m trie_building_megabytes] [-q bits] [-b bits] [-a bits] [type] input.arpa [output.mmap]\n\n" +"-u sets the log10 probability for if the ARPA file does not have one.\n" +" Default is -100. The ARPA file will always take precedence.\n" +"-s allows models to be built even if they do not have and .\n" +"-i allows buggy models from IRSTLM by mapping positive log probability to 0.\n" +"-w mmap|after determines how writing is done.\n" +" mmap maps the binary file and writes to it. Default for trie.\n" +" after allocates anonymous memory, builds, and writes. Default for probing.\n" +"-r \"order1.arpa order2 order3 order4\" adds lower-order rest costs from these\n" +" model files. order1.arpa must be an ARPA file. All others may be ARPA or\n" +" the same data structure as being built. All files must have the same\n" +" vocabulary. For probing, the unigrams must be in the same order.\n\n" +"type is either probing or trie. Default is probing.\n\n" +"probing uses a probing hash table. It is the fastest but uses the most memory.\n" +"-p sets the space multiplier and must be >1.0. The default is 1.5.\n\n" +"trie is a straightforward trie with bit-level packing. It uses the least\n" +"memory and is still faster than SRI or IRST. Building the trie format uses an\n" +"on-disk sort to save memory.\n" +"-t is the temporary directory prefix. Default is the output file name.\n" +"-m limits memory use for sorting. Measured in MB. Default is 1024MB.\n" +"-q turns quantization on and sets the number of bits (e.g. -q 8).\n" +"-b sets backoff quantization bits. Requires -q and defaults to that value.\n" +"-a compresses pointers using an array of offsets. The parameter is the\n" +" maximum number of bits encoded by the array. Memory is minimized subject\n" +" to the maximum, so pick 255 to minimize memory.\n\n" +"Get a memory estimate by passing an ARPA file without an output file name.\n"; + exit(1); +} + +// I could really use boost::lexical_cast right about now. +float ParseFloat(const char *from) { + char *end; + float ret = strtod(from, &end); + if (*end) throw util::ParseNumberException(from); + return ret; +} +unsigned long int ParseUInt(const char *from) { + char *end; + unsigned long int ret = strtoul(from, &end, 10); + if (*end) throw util::ParseNumberException(from); + return ret; +} + +uint8_t ParseBitCount(const char *from) { + unsigned long val = ParseUInt(from); + if (val > 25) { + util::ParseNumberException e(from); + e << " bit counts are limited to 25."; + } + return val; +} + +void ParseFileList(const char *from, std::vector &to) { + to.clear(); + while (true) { + const char *i; + for (i = from; *i && *i != ' '; ++i) {} + to.push_back(std::string(from, i - from)); + if (!*i) break; + from = i + 1; + } +} + +void ShowSizes(const char *file, const lm::ngram::Config &config) { + std::vector counts; + util::FilePiece f(file); + lm::ReadARPACounts(f, counts); + uint64_t sizes[6]; + sizes[0] = ProbingModel::Size(counts, config); + sizes[1] = RestProbingModel::Size(counts, config); + sizes[2] = TrieModel::Size(counts, config); + sizes[3] = QuantTrieModel::Size(counts, config); + sizes[4] = ArrayTrieModel::Size(counts, config); + sizes[5] = QuantArrayTrieModel::Size(counts, config); + uint64_t max_length = *std::max_element(sizes, sizes + sizeof(sizes) / sizeof(uint64_t)); + uint64_t min_length = *std::min_element(sizes, sizes + sizeof(sizes) / sizeof(uint64_t)); + uint64_t divide; + char prefix; + if (min_length < (1 << 10) * 10) { + prefix = ' '; + divide = 1; + } else if (min_length < (1 << 20) * 10) { + prefix = 'k'; + divide = 1 << 10; + } else if (min_length < (1ULL << 30) * 10) { + prefix = 'M'; + divide = 1 << 20; + } else { + prefix = 'G'; + divide = 1 << 30; + } + long int length = std::max(2, static_cast(ceil(log10((double) max_length / divide)))); + std::cout << "Memory estimate:\ntype "; + // right align bytes. + for (long int i = 0; i < length - 2; ++i) std::cout << ' '; + std::cout << prefix << "B\n" + "probing " << std::setw(length) << (sizes[0] / divide) << " assuming -p " << config.probing_multiplier << "\n" + "probing " << std::setw(length) << (sizes[1] / divide) << " assuming -r models -p " << config.probing_multiplier << "\n" + "trie " << std::setw(length) << (sizes[2] / divide) << " without quantization\n" + "trie " << std::setw(length) << (sizes[3] / divide) << " assuming -q " << (unsigned)config.prob_bits << " -b " << (unsigned)config.backoff_bits << " quantization \n" + "trie " << std::setw(length) << (sizes[4] / divide) << " assuming -a " << (unsigned)config.pointer_bhiksha_bits << " array pointer compression\n" + "trie " << std::setw(length) << (sizes[5] / divide) << " assuming -a " << (unsigned)config.pointer_bhiksha_bits << " -q " << (unsigned)config.prob_bits << " -b " << (unsigned)config.backoff_bits<< " array pointer compression and quantization\n"; +} + +void ProbingQuantizationUnsupported() { + std::cerr << "Quantization is only implemented in the trie data structure." << std::endl; + exit(1); +} + +} // namespace ngram +} // namespace lm +} // namespace + +int main(int argc, char *argv[]) { + using namespace lm::ngram; + + try { + bool quantize = false, set_backoff_bits = false, bhiksha = false, set_write_method = false, rest = false; + lm::ngram::Config config; + int opt; + while ((opt = getopt(argc, argv, "q:b:a:u:p:t:m:w:sir:")) != -1) { + switch(opt) { + case 'q': + config.prob_bits = ParseBitCount(optarg); + if (!set_backoff_bits) config.backoff_bits = config.prob_bits; + quantize = true; + break; + case 'b': + config.backoff_bits = ParseBitCount(optarg); + set_backoff_bits = true; + break; + case 'a': + config.pointer_bhiksha_bits = ParseBitCount(optarg); + bhiksha = true; + break; + case 'u': + config.unknown_missing_logprob = ParseFloat(optarg); + break; + case 'p': + config.probing_multiplier = ParseFloat(optarg); + break; + case 't': + config.temporary_directory_prefix = optarg; + break; + case 'm': + config.building_memory = ParseUInt(optarg) * 1048576; + break; + case 'w': + set_write_method = true; + if (!strcmp(optarg, "mmap")) { + config.write_method = Config::WRITE_MMAP; + } else if (!strcmp(optarg, "after")) { + config.write_method = Config::WRITE_AFTER; + } else { + Usage(argv[0]); + } + break; + case 's': + config.sentence_marker_missing = lm::SILENT; + break; + case 'i': + config.positive_log_probability = lm::SILENT; + break; + case 'r': + rest = true; + ParseFileList(optarg, config.rest_lower_files); + config.rest_function = Config::REST_LOWER; + break; + default: + Usage(argv[0]); + } + } + if (!quantize && set_backoff_bits) { + std::cerr << "You specified backoff quantization (-b) but not probability quantization (-q)" << std::endl; + abort(); + } + if (optind + 1 == argc) { + ShowSizes(argv[optind], config); + return 0; + } + const char *model_type; + const char *from_file; + + if (optind + 2 == argc) { + model_type = "probing"; + from_file = argv[optind]; + config.write_mmap = argv[optind + 1]; + } else if (optind + 3 == argc) { + model_type = argv[optind]; + from_file = argv[optind + 1]; + config.write_mmap = argv[optind + 2]; + } else { + Usage(argv[0]); + } + if (!strcmp(model_type, "probing")) { + if (!set_write_method) config.write_method = Config::WRITE_AFTER; + if (quantize || set_backoff_bits) ProbingQuantizationUnsupported(); + if (rest) { + RestProbingModel(from_file, config); + } else { + ProbingModel(from_file, config); + } + } else if (!strcmp(model_type, "trie")) { + if (rest) { + std::cerr << "Rest + trie is not supported yet." << std::endl; + return 1; + } + if (!set_write_method) config.write_method = Config::WRITE_MMAP; + if (quantize) { + if (bhiksha) { + QuantArrayTrieModel(from_file, config); + } else { + QuantTrieModel(from_file, config); + } + } else { + if (bhiksha) { + ArrayTrieModel(from_file, config); + } else { + TrieModel(from_file, config); + } + } + } else { + Usage(argv[0]); + } + } + catch (const std::exception &e) { + std::cerr << e.what() << std::endl; + std::cerr << "ERROR" << std::endl; + return 1; + } + std::cerr << "SUCCESS" << std::endl; + return 0; +} diff --git a/lm/config.cc b/lm/config.cc new file mode 100644 index 000000000..f9d988cab --- /dev/null +++ b/lm/config.cc @@ -0,0 +1,29 @@ +#include "lm/config.hh" + +#include + +namespace lm { +namespace ngram { + +Config::Config() : + messages(&std::cerr), + enumerate_vocab(NULL), + unknown_missing(COMPLAIN), + sentence_marker_missing(THROW_UP), + positive_log_probability(THROW_UP), + unknown_missing_logprob(-100.0), + probing_multiplier(1.5), + building_memory(1073741824ULL), // 1 GB + temporary_directory_prefix(NULL), + arpa_complain(ALL), + write_mmap(NULL), + write_method(WRITE_AFTER), + include_vocab(true), + rest_function(REST_MAX), + prob_bits(8), + backoff_bits(8), + pointer_bhiksha_bits(22), + load_method(util::POPULATE_OR_READ) {} + +} // namespace ngram +} // namespace lm diff --git a/lm/config.hh b/lm/config.hh new file mode 100644 index 000000000..739cee9c1 --- /dev/null +++ b/lm/config.hh @@ -0,0 +1,120 @@ +#ifndef LM_CONFIG__ +#define LM_CONFIG__ + +#include "lm/lm_exception.hh" +#include "util/mmap.hh" + +#include +#include +#include + +/* Configuration for ngram model. Separate header to reduce pollution. */ + +namespace lm { + +class EnumerateVocab; + +namespace ngram { + +struct Config { + // EFFECTIVE FOR BOTH ARPA AND BINARY READS + + // Where to log messages including the progress bar. Set to NULL for + // silence. + std::ostream *messages; + + // This will be called with every string in the vocabulary. See + // enumerate_vocab.hh for more detail. Config does not take ownership; you + // are still responsible for deleting it (or stack allocating). + EnumerateVocab *enumerate_vocab; + + + + // ONLY EFFECTIVE WHEN READING ARPA + + // What to do when isn't in the provided model. + WarningAction unknown_missing; + // What to do when or is missing from the model. + // If THROW_UP, the exception will be of type util::SpecialWordMissingException. + WarningAction sentence_marker_missing; + + // What to do with a positive log probability. For COMPLAIN and SILENT, map + // to 0. + WarningAction positive_log_probability; + + // The probability to substitute for if it's missing from the model. + // No effect if the model has or unknown_missing == THROW_UP. + float unknown_missing_logprob; + + // Size multiplier for probing hash table. Must be > 1. Space is linear in + // this. Time is probing_multiplier / (probing_multiplier - 1). No effect + // for sorted variant. + // If you find yourself setting this to a low number, consider using the + // TrieModel which has lower memory consumption. + float probing_multiplier; + + // Amount of memory to use for building. The actual memory usage will be + // higher since this just sets sort buffer size. Only applies to trie + // models. + std::size_t building_memory; + + // Template for temporary directory appropriate for passing to mkdtemp. + // The characters XXXXXX are appended before passing to mkdtemp. Only + // applies to trie. If NULL, defaults to write_mmap. If that's NULL, + // defaults to input file name. + const char *temporary_directory_prefix; + + // Level of complaining to do when loading from ARPA instead of binary format. + enum ARPALoadComplain {ALL, EXPENSIVE, NONE}; + ARPALoadComplain arpa_complain; + + // While loading an ARPA file, also write out this binary format file. Set + // to NULL to disable. + const char *write_mmap; + + enum WriteMethod { + WRITE_MMAP, // Map the file directly. + WRITE_AFTER // Write after we're done. + }; + WriteMethod write_method; + + // Include the vocab in the binary file? Only effective if write_mmap != NULL. + bool include_vocab; + + + // Left rest options. Only used when the model includes rest costs. + enum RestFunction { + REST_MAX, // Maximum of any score to the left + REST_LOWER, // Use lower-order files given below. + }; + RestFunction rest_function; + // Only used for REST_LOWER. + std::vector rest_lower_files; + + + + // Quantization options. Only effective for QuantTrieModel. One value is + // reserved for each of prob and backoff, so 2^bits - 1 buckets will be used + // to quantize (and one of the remaining backoffs will be 0). + uint8_t prob_bits, backoff_bits; + + // Bhiksha compression (simple form). Only works with trie. + uint8_t pointer_bhiksha_bits; + + + + // ONLY EFFECTIVE WHEN READING BINARY + + // How to get the giant array into memory: lazy mmap, populate, read etc. + // See util/mmap.hh for details of MapMethod. + util::LoadMethod load_method; + + + + // Set defaults. + Config(); +}; + +} /* namespace ngram */ } /* namespace lm */ + +#endif // LM_CONFIG__ diff --git a/lm/enumerate_vocab.hh b/lm/enumerate_vocab.hh new file mode 100644 index 000000000..27263621e --- /dev/null +++ b/lm/enumerate_vocab.hh @@ -0,0 +1,28 @@ +#ifndef LM_ENUMERATE_VOCAB__ +#define LM_ENUMERATE_VOCAB__ + +#include "lm/word_index.hh" +#include "util/string_piece.hh" + +namespace lm { + +/* If you need the actual strings in the vocabulary, inherit from this class + * and implement Add. Then put a pointer in Config.enumerate_vocab; it does + * not take ownership. Add is called once per vocab word. index starts at 0 + * and increases by 1 each time. This is only used by the Model constructor; + * the pointer is not retained by the class. + */ +class EnumerateVocab { + public: + virtual ~EnumerateVocab() {} + + virtual void Add(WordIndex index, const StringPiece &str) = 0; + + protected: + EnumerateVocab() {} +}; + +} // namespace lm + +#endif // LM_ENUMERATE_VOCAB__ + diff --git a/lm/facade.hh b/lm/facade.hh new file mode 100644 index 000000000..8b1860176 --- /dev/null +++ b/lm/facade.hh @@ -0,0 +1,64 @@ +#ifndef LM_FACADE__ +#define LM_FACADE__ + +#include "lm/virtual_interface.hh" +#include "util/string_piece.hh" + +#include + +namespace lm { +namespace base { + +// Common model interface that depends on knowing the specific classes. +// Curiously recurring template pattern. +template class ModelFacade : public Model { + public: + typedef StateT State; + typedef VocabularyT Vocabulary; + + // Default Score function calls FullScore. Model can override this. + float Score(const State &in_state, const WordIndex new_word, State &out_state) const { + return static_cast(this)->FullScore(in_state, new_word, out_state).prob; + } + + /* Translate from void* to State */ + FullScoreReturn FullScore(const void *in_state, const WordIndex new_word, void *out_state) const { + return static_cast(this)->FullScore( + *reinterpret_cast(in_state), + new_word, + *reinterpret_cast(out_state)); + } + float Score(const void *in_state, const WordIndex new_word, void *out_state) const { + return static_cast(this)->Score( + *reinterpret_cast(in_state), + new_word, + *reinterpret_cast(out_state)); + } + + const State &BeginSentenceState() const { return begin_sentence_; } + const State &NullContextState() const { return null_context_; } + const Vocabulary &GetVocabulary() const { return *static_cast(&BaseVocabulary()); } + + protected: + ModelFacade() : Model(sizeof(State)) {} + + virtual ~ModelFacade() {} + + // begin_sentence and null_context can disappear after. vocab should stay. + void Init(const State &begin_sentence, const State &null_context, const Vocabulary &vocab, unsigned char order) { + begin_sentence_ = begin_sentence; + null_context_ = null_context; + begin_sentence_memory_ = &begin_sentence_; + null_context_memory_ = &null_context_; + base_vocab_ = &vocab; + order_ = order; + } + + private: + State begin_sentence_, null_context_; +}; + +} // mamespace base +} // namespace lm + +#endif // LM_FACADE__ diff --git a/lm/fragment.cc b/lm/fragment.cc new file mode 100644 index 000000000..0267cd4eb --- /dev/null +++ b/lm/fragment.cc @@ -0,0 +1,37 @@ +#include "lm/binary_format.hh" +#include "lm/model.hh" +#include "lm/left.hh" +#include "util/tokenize_piece.hh" + +template void Query(const char *name) { + Model model(name); + std::string line; + lm::ngram::ChartState ignored; + while (getline(std::cin, line)) { + lm::ngram::RuleScore scorer(model, ignored); + for (util::TokenIter i(line, ' '); i; ++i) { + scorer.Terminal(model.GetVocabulary().Index(*i)); + } + std::cout << scorer.Finish() << '\n'; + } +} + +int main(int argc, char *argv[]) { + if (argc != 2) { + std::cerr << "Expected model file name." << std::endl; + return 1; + } + const char *name = argv[1]; + lm::ngram::ModelType model_type = lm::ngram::PROBING; + lm::ngram::RecognizeBinary(name, model_type); + switch (model_type) { + case lm::ngram::PROBING: + Query(name); + break; + case lm::ngram::REST_PROBING: + Query(name); + break; + default: + std::cerr << "Model type not supported yet." << std::endl; + } +} diff --git a/lm/left.hh b/lm/left.hh new file mode 100644 index 000000000..8c27232e5 --- /dev/null +++ b/lm/left.hh @@ -0,0 +1,212 @@ +/* Efficient left and right language model state for sentence fragments. + * Intended usage: + * Store ChartState with every chart entry. + * To do a rule application: + * 1. Make a ChartState object for your new entry. + * 2. Construct RuleScore. + * 3. Going from left to right, call Terminal or NonTerminal. + * For terminals, just pass the vocab id. + * For non-terminals, pass that non-terminal's ChartState. + * If your decoder expects scores inclusive of subtree scores (i.e. you + * label entries with the highest-scoring path), pass the non-terminal's + * score as prob. + * If your decoder expects relative scores and will walk the chart later, + * pass prob = 0.0. + * In other words, the only effect of prob is that it gets added to the + * returned log probability. + * 4. Call Finish. It returns the log probability. + * + * There's a couple more details: + * Do not pass to Terminal as it is formally not a word in the sentence, + * only context. Instead, call BeginSentence. If called, it should be the + * first call after RuleScore is constructed (since is always the + * leftmost). + * + * If the leftmost RHS is a non-terminal, it's faster to call BeginNonTerminal. + * + * Hashing and sorting comparison operators are provided. All state objects + * are POD. If you intend to use memcmp on raw state objects, you must call + * ZeroRemaining first, as the value of array entries beyond length is + * otherwise undefined. + * + * Usage is of course not limited to chart decoding. Anything that generates + * sentence fragments missing left context could benefit. For example, a + * phrase-based decoder could pre-score phrases, storing ChartState with each + * phrase, even if hypotheses are generated left-to-right. + */ + +#ifndef LM_LEFT__ +#define LM_LEFT__ + +#include "lm/max_order.hh" +#include "lm/state.hh" +#include "lm/return.hh" + +#include "util/murmur_hash.hh" + +#include + +namespace lm { +namespace ngram { + +template class RuleScore { + public: + explicit RuleScore(const M &model, ChartState &out) : model_(model), out_(out), left_done_(false), prob_(0.0) { + out.left.length = 0; + out.right.length = 0; + } + + void BeginSentence() { + out_.right = model_.BeginSentenceState(); + // out_.left is empty. + left_done_ = true; + } + + void Terminal(WordIndex word) { + State copy(out_.right); + FullScoreReturn ret(model_.FullScore(copy, word, out_.right)); + if (left_done_) { prob_ += ret.prob; return; } + if (ret.independent_left) { + prob_ += ret.prob; + left_done_ = true; + return; + } + out_.left.pointers[out_.left.length++] = ret.extend_left; + prob_ += ret.rest; + if (out_.right.length != copy.length + 1) + left_done_ = true; + } + + // Faster version of NonTerminal for the case where the rule begins with a non-terminal. + void BeginNonTerminal(const ChartState &in, float prob = 0.0) { + prob_ = prob; + out_ = in; + left_done_ = in.left.full; + } + + void NonTerminal(const ChartState &in, float prob = 0.0) { + prob_ += prob; + + if (!in.left.length) { + if (in.left.full) { + for (const float *i = out_.right.backoff; i < out_.right.backoff + out_.right.length; ++i) prob_ += *i; + left_done_ = true; + out_.right = in.right; + } + return; + } + + if (!out_.right.length) { + out_.right = in.right; + if (left_done_) { + prob_ += model_.UnRest(in.left.pointers, in.left.pointers + in.left.length, 1); + return; + } + if (out_.left.length) { + left_done_ = true; + } else { + out_.left = in.left; + left_done_ = in.left.full; + } + return; + } + + float backoffs[KENLM_MAX_ORDER - 1], backoffs2[KENLM_MAX_ORDER - 1]; + float *back = backoffs, *back2 = backoffs2; + unsigned char next_use = out_.right.length; + + // First word + if (ExtendLeft(in, next_use, 1, out_.right.backoff, back)) return; + + // Words after the first, so extending a bigram to begin with + for (unsigned char extend_length = 2; extend_length <= in.left.length; ++extend_length) { + if (ExtendLeft(in, next_use, extend_length, back, back2)) return; + std::swap(back, back2); + } + + if (in.left.full) { + for (const float *i = back; i != back + next_use; ++i) prob_ += *i; + left_done_ = true; + out_.right = in.right; + return; + } + + // Right state was minimized, so it's already independent of the new words to the left. + if (in.right.length < in.left.length) { + out_.right = in.right; + return; + } + + // Shift exisiting words down. + for (WordIndex *i = out_.right.words + next_use - 1; i >= out_.right.words; --i) { + *(i + in.right.length) = *i; + } + // Add words from in.right. + std::copy(in.right.words, in.right.words + in.right.length, out_.right.words); + // Assemble backoff composed on the existing state's backoff followed by the new state's backoff. + std::copy(in.right.backoff, in.right.backoff + in.right.length, out_.right.backoff); + std::copy(back, back + next_use, out_.right.backoff + in.right.length); + out_.right.length = in.right.length + next_use; + } + + float Finish() { + // A N-1-gram might extend left and right but we should still set full to true because it's an N-1-gram. + out_.left.full = left_done_ || (out_.left.length == model_.Order() - 1); + return prob_; + } + + void Reset() { + prob_ = 0.0; + left_done_ = false; + out_.left.length = 0; + out_.right.length = 0; + } + + private: + bool ExtendLeft(const ChartState &in, unsigned char &next_use, unsigned char extend_length, const float *back_in, float *back_out) { + ProcessRet(model_.ExtendLeft( + out_.right.words, out_.right.words + next_use, // Words to extend into + back_in, // Backoffs to use + in.left.pointers[extend_length - 1], extend_length, // Words to be extended + back_out, // Backoffs for the next score + next_use)); // Length of n-gram to use in next scoring. + if (next_use != out_.right.length) { + left_done_ = true; + if (!next_use) { + // Early exit. + out_.right = in.right; + prob_ += model_.UnRest(in.left.pointers + extend_length, in.left.pointers + in.left.length, extend_length + 1); + return true; + } + } + // Continue scoring. + return false; + } + + void ProcessRet(const FullScoreReturn &ret) { + if (left_done_) { + prob_ += ret.prob; + return; + } + if (ret.independent_left) { + prob_ += ret.prob; + left_done_ = true; + return; + } + out_.left.pointers[out_.left.length++] = ret.extend_left; + prob_ += ret.rest; + } + + const M &model_; + + ChartState &out_; + + bool left_done_; + + float prob_; +}; + +} // namespace ngram +} // namespace lm + +#endif // LM_LEFT__ diff --git a/lm/left_test.cc b/lm/left_test.cc new file mode 100644 index 000000000..b45614613 --- /dev/null +++ b/lm/left_test.cc @@ -0,0 +1,397 @@ +#include "lm/left.hh" +#include "lm/model.hh" + +#include "util/tokenize_piece.hh" + +#include + +#define BOOST_TEST_MODULE LeftTest +#include +#include + +namespace lm { +namespace ngram { +namespace { + +#define Term(word) score.Terminal(m.GetVocabulary().Index(word)); +#define VCheck(word, value) BOOST_CHECK_EQUAL(m.GetVocabulary().Index(word), value); + +// Apparently some Boost versions use templates and are pretty strict about types matching. +#define SLOPPY_CHECK_CLOSE(ref, value, tol) BOOST_CHECK_CLOSE(static_cast(ref), static_cast(value), static_cast(tol)); + +template void Short(const M &m) { + ChartState base; + { + RuleScore score(m, base); + Term("more"); + Term("loin"); + SLOPPY_CHECK_CLOSE(-1.206319 - 0.3561665, score.Finish(), 0.001); + } + BOOST_CHECK(base.left.full); + BOOST_CHECK_EQUAL(2, base.left.length); + BOOST_CHECK_EQUAL(1, base.right.length); + VCheck("loin", base.right.words[0]); + + ChartState more_left; + { + RuleScore score(m, more_left); + Term("little"); + score.NonTerminal(base, -1.206319 - 0.3561665); + // p(little more loin | null context) + SLOPPY_CHECK_CLOSE(-1.56538, score.Finish(), 0.001); + } + BOOST_CHECK_EQUAL(3, more_left.left.length); + BOOST_CHECK_EQUAL(1, more_left.right.length); + VCheck("loin", more_left.right.words[0]); + BOOST_CHECK(more_left.left.full); + + ChartState shorter; + { + RuleScore score(m, shorter); + Term("to"); + score.NonTerminal(base, -1.206319 - 0.3561665); + SLOPPY_CHECK_CLOSE(-0.30103 - 1.687872 - 1.206319 - 0.3561665, score.Finish(), 0.01); + } + BOOST_CHECK_EQUAL(1, shorter.left.length); + BOOST_CHECK_EQUAL(1, shorter.right.length); + VCheck("loin", shorter.right.words[0]); + BOOST_CHECK(shorter.left.full); +} + +template void Charge(const M &m) { + ChartState base; + { + RuleScore score(m, base); + Term("on"); + Term("more"); + SLOPPY_CHECK_CLOSE(-1.509559 -0.4771212 -1.206319, score.Finish(), 0.001); + } + BOOST_CHECK_EQUAL(1, base.left.length); + BOOST_CHECK_EQUAL(1, base.right.length); + VCheck("more", base.right.words[0]); + BOOST_CHECK(base.left.full); + + ChartState extend; + { + RuleScore score(m, extend); + Term("looking"); + score.NonTerminal(base, -1.509559 -0.4771212 -1.206319); + SLOPPY_CHECK_CLOSE(-3.91039, score.Finish(), 0.001); + } + BOOST_CHECK_EQUAL(2, extend.left.length); + BOOST_CHECK_EQUAL(1, extend.right.length); + VCheck("more", extend.right.words[0]); + BOOST_CHECK(extend.left.full); + + ChartState tobos; + { + RuleScore score(m, tobos); + score.BeginSentence(); + score.NonTerminal(extend, -3.91039); + SLOPPY_CHECK_CLOSE(-3.471169, score.Finish(), 0.001); + } + BOOST_CHECK_EQUAL(0, tobos.left.length); + BOOST_CHECK_EQUAL(1, tobos.right.length); +} + +template float LeftToRight(const M &m, const std::vector &words, bool begin_sentence = false) { + float ret = 0.0; + State right = begin_sentence ? m.BeginSentenceState() : m.NullContextState(); + for (std::vector::const_iterator i = words.begin(); i != words.end(); ++i) { + State copy(right); + ret += m.Score(copy, *i, right); + } + return ret; +} + +template float RightToLeft(const M &m, const std::vector &words, bool begin_sentence = false) { + float ret = 0.0; + ChartState state; + state.left.length = 0; + state.right.length = 0; + state.left.full = false; + for (std::vector::const_reverse_iterator i = words.rbegin(); i != words.rend(); ++i) { + ChartState copy(state); + RuleScore score(m, state); + score.Terminal(*i); + score.NonTerminal(copy, ret); + ret = score.Finish(); + } + if (begin_sentence) { + ChartState copy(state); + RuleScore score(m, state); + score.BeginSentence(); + score.NonTerminal(copy, ret); + ret = score.Finish(); + } + return ret; +} + +template float TreeMiddle(const M &m, const std::vector &words, bool begin_sentence = false) { + std::vector > states(words.size()); + for (unsigned int i = 0; i < words.size(); ++i) { + RuleScore score(m, states[i].first); + score.Terminal(words[i]); + states[i].second = score.Finish(); + } + while (states.size() > 1) { + std::vector > upper((states.size() + 1) / 2); + for (unsigned int i = 0; i < states.size() / 2; ++i) { + RuleScore score(m, upper[i].first); + score.NonTerminal(states[i*2].first, states[i*2].second); + score.NonTerminal(states[i*2+1].first, states[i*2+1].second); + upper[i].second = score.Finish(); + } + if (states.size() % 2) { + upper.back() = states.back(); + } + std::swap(states, upper); + } + + if (states.empty()) return 0.0; + + if (begin_sentence) { + ChartState ignored; + RuleScore score(m, ignored); + score.BeginSentence(); + score.NonTerminal(states.front().first, states.front().second); + return score.Finish(); + } else { + return states.front().second; + } + +} + +template void LookupVocab(const M &m, const StringPiece &str, std::vector &out) { + out.clear(); + for (util::TokenIter i(str, ' '); i; ++i) { + out.push_back(m.GetVocabulary().Index(*i)); + } +} + +#define TEXT_TEST(str) \ + LookupVocab(m, str, words); \ + expect = LeftToRight(m, words, rest); \ + SLOPPY_CHECK_CLOSE(expect, RightToLeft(m, words, rest), 0.001); \ + SLOPPY_CHECK_CLOSE(expect, TreeMiddle(m, words, rest), 0.001); \ + +// Build sentences, or parts thereof, from right to left. +template void GrowBig(const M &m, bool rest = false) { + std::vector words; + float expect; + TEXT_TEST("in biarritz watching considering looking . on a little more loin also would consider higher to look good unknown the screening foo bar , unknown however unknown "); + TEXT_TEST("on a little more loin also would consider higher to look good unknown the screening foo bar , unknown however unknown "); + TEXT_TEST("on a little more loin also would consider higher to look good"); + TEXT_TEST("more loin also would consider higher to look good"); + TEXT_TEST("more loin also would consider higher to look"); + TEXT_TEST("also would consider higher to look"); + TEXT_TEST("also would consider higher"); + TEXT_TEST("would consider higher to look"); + TEXT_TEST("consider higher to look"); + TEXT_TEST("consider higher to"); + TEXT_TEST("consider higher"); +} + +template void GrowSmall(const M &m, bool rest = false) { + std::vector words; + float expect; + TEXT_TEST("in biarritz watching considering looking . "); + TEXT_TEST("in biarritz watching considering looking ."); + TEXT_TEST("in biarritz"); +} + +template void AlsoWouldConsiderHigher(const M &m) { + ChartState also; + { + RuleScore score(m, also); + score.Terminal(m.GetVocabulary().Index("also")); + SLOPPY_CHECK_CLOSE(-1.687872, score.Finish(), 0.001); + } + ChartState would; + { + RuleScore score(m, would); + score.Terminal(m.GetVocabulary().Index("would")); + SLOPPY_CHECK_CLOSE(-1.687872, score.Finish(), 0.001); + } + ChartState combine_also_would; + { + RuleScore score(m, combine_also_would); + score.NonTerminal(also, -1.687872); + score.NonTerminal(would, -1.687872); + SLOPPY_CHECK_CLOSE(-1.687872 - 2.0, score.Finish(), 0.001); + } + BOOST_CHECK_EQUAL(2, combine_also_would.right.length); + + ChartState also_would; + { + RuleScore score(m, also_would); + score.Terminal(m.GetVocabulary().Index("also")); + score.Terminal(m.GetVocabulary().Index("would")); + SLOPPY_CHECK_CLOSE(-1.687872 - 2.0, score.Finish(), 0.001); + } + BOOST_CHECK_EQUAL(2, also_would.right.length); + + ChartState consider; + { + RuleScore score(m, consider); + score.Terminal(m.GetVocabulary().Index("consider")); + SLOPPY_CHECK_CLOSE(-1.687872, score.Finish(), 0.001); + } + BOOST_CHECK_EQUAL(1, consider.left.length); + BOOST_CHECK_EQUAL(1, consider.right.length); + BOOST_CHECK(!consider.left.full); + + ChartState higher; + float higher_score; + { + RuleScore score(m, higher); + score.Terminal(m.GetVocabulary().Index("higher")); + higher_score = score.Finish(); + } + SLOPPY_CHECK_CLOSE(-1.509559, higher_score, 0.001); + BOOST_CHECK_EQUAL(1, higher.left.length); + BOOST_CHECK_EQUAL(1, higher.right.length); + BOOST_CHECK(!higher.left.full); + VCheck("higher", higher.right.words[0]); + SLOPPY_CHECK_CLOSE(-0.30103, higher.right.backoff[0], 0.001); + + ChartState consider_higher; + { + RuleScore score(m, consider_higher); + score.NonTerminal(consider, -1.687872); + score.NonTerminal(higher, higher_score); + SLOPPY_CHECK_CLOSE(-1.509559 - 1.687872 - 0.30103, score.Finish(), 0.001); + } + BOOST_CHECK_EQUAL(2, consider_higher.left.length); + BOOST_CHECK(!consider_higher.left.full); + + ChartState full; + { + RuleScore score(m, full); + score.NonTerminal(combine_also_would, -1.687872 - 2.0); + score.NonTerminal(consider_higher, -1.509559 - 1.687872 - 0.30103); + SLOPPY_CHECK_CLOSE(-10.6879, score.Finish(), 0.001); + } + BOOST_CHECK_EQUAL(4, full.right.length); +} + +#define CHECK_SCORE(str, val) \ +{ \ + float got = val; \ + std::vector indices; \ + LookupVocab(m, str, indices); \ + SLOPPY_CHECK_CLOSE(LeftToRight(m, indices), got, 0.001); \ +} + +template void FullGrow(const M &m) { + std::vector words; + LookupVocab(m, "in biarritz watching considering looking . ", words); + + ChartState lexical[7]; + float lexical_scores[7]; + for (unsigned int i = 0; i < 7; ++i) { + RuleScore score(m, lexical[i]); + score.Terminal(words[i]); + lexical_scores[i] = score.Finish(); + } + CHECK_SCORE("in", lexical_scores[0]); + CHECK_SCORE("biarritz", lexical_scores[1]); + CHECK_SCORE("watching", lexical_scores[2]); + CHECK_SCORE("", lexical_scores[6]); + + ChartState l1[4]; + float l1_scores[4]; + { + RuleScore score(m, l1[0]); + score.NonTerminal(lexical[0], lexical_scores[0]); + score.NonTerminal(lexical[1], lexical_scores[1]); + CHECK_SCORE("in biarritz", l1_scores[0] = score.Finish()); + } + { + RuleScore score(m, l1[1]); + score.NonTerminal(lexical[2], lexical_scores[2]); + score.NonTerminal(lexical[3], lexical_scores[3]); + CHECK_SCORE("watching considering", l1_scores[1] = score.Finish()); + } + { + RuleScore score(m, l1[2]); + score.NonTerminal(lexical[4], lexical_scores[4]); + score.NonTerminal(lexical[5], lexical_scores[5]); + CHECK_SCORE("looking .", l1_scores[2] = score.Finish()); + } + BOOST_CHECK_EQUAL(l1[2].left.length, 1); + l1[3] = lexical[6]; + l1_scores[3] = lexical_scores[6]; + + ChartState l2[2]; + float l2_scores[2]; + { + RuleScore score(m, l2[0]); + score.NonTerminal(l1[0], l1_scores[0]); + score.NonTerminal(l1[1], l1_scores[1]); + CHECK_SCORE("in biarritz watching considering", l2_scores[0] = score.Finish()); + } + { + RuleScore score(m, l2[1]); + score.NonTerminal(l1[2], l1_scores[2]); + score.NonTerminal(l1[3], l1_scores[3]); + CHECK_SCORE("looking . ", l2_scores[1] = score.Finish()); + } + BOOST_CHECK_EQUAL(l2[1].left.length, 1); + BOOST_CHECK(l2[1].left.full); + + ChartState top; + { + RuleScore score(m, top); + score.NonTerminal(l2[0], l2_scores[0]); + score.NonTerminal(l2[1], l2_scores[1]); + CHECK_SCORE("in biarritz watching considering looking . ", score.Finish()); + } +} + +const char *FileLocation() { + if (boost::unit_test::framework::master_test_suite().argc < 2) { + return "test.arpa"; + } + return boost::unit_test::framework::master_test_suite().argv[1]; +} + +template void Everything() { + Config config; + config.messages = NULL; + M m(FileLocation(), config); + + Short(m); + Charge(m); + GrowBig(m); + AlsoWouldConsiderHigher(m); + GrowSmall(m); + FullGrow(m); +} + +BOOST_AUTO_TEST_CASE(ProbingAll) { + Everything(); +} +BOOST_AUTO_TEST_CASE(TrieAll) { + Everything(); +} +BOOST_AUTO_TEST_CASE(QuantTrieAll) { + Everything(); +} +BOOST_AUTO_TEST_CASE(ArrayQuantTrieAll) { + Everything(); +} +BOOST_AUTO_TEST_CASE(ArrayTrieAll) { + Everything(); +} + +BOOST_AUTO_TEST_CASE(RestProbing) { + Config config; + config.messages = NULL; + RestProbingModel m(FileLocation(), config); + GrowBig(m, true); +} + +} // namespace +} // namespace ngram +} // namespace lm diff --git a/lm/lm_exception.cc b/lm/lm_exception.cc new file mode 100644 index 000000000..0b572e984 --- /dev/null +++ b/lm/lm_exception.cc @@ -0,0 +1,23 @@ +#include "lm/lm_exception.hh" + +#include +#include + +namespace lm { + +ConfigException::ConfigException() throw() {} +ConfigException::~ConfigException() throw() {} + +LoadException::LoadException() throw() {} +LoadException::~LoadException() throw() {} + +FormatLoadException::FormatLoadException() throw() {} +FormatLoadException::~FormatLoadException() throw() {} + +VocabLoadException::VocabLoadException() throw() {} +VocabLoadException::~VocabLoadException() throw() {} + +SpecialWordMissingException::SpecialWordMissingException() throw() {} +SpecialWordMissingException::~SpecialWordMissingException() throw() {} + +} // namespace lm diff --git a/lm/lm_exception.hh b/lm/lm_exception.hh new file mode 100644 index 000000000..f607ced16 --- /dev/null +++ b/lm/lm_exception.hh @@ -0,0 +1,50 @@ +#ifndef LM_LM_EXCEPTION__ +#define LM_LM_EXCEPTION__ + +// Named to avoid conflict with util/exception.hh. + +#include "util/exception.hh" +#include "util/string_piece.hh" + +#include +#include + +namespace lm { + +typedef enum {THROW_UP, COMPLAIN, SILENT} WarningAction; + +class ConfigException : public util::Exception { + public: + ConfigException() throw(); + ~ConfigException() throw(); +}; + +class LoadException : public util::Exception { + public: + virtual ~LoadException() throw(); + + protected: + LoadException() throw(); +}; + +class FormatLoadException : public LoadException { + public: + FormatLoadException() throw(); + ~FormatLoadException() throw(); +}; + +class VocabLoadException : public LoadException { + public: + virtual ~VocabLoadException() throw(); + VocabLoadException() throw(); +}; + +class SpecialWordMissingException : public VocabLoadException { + public: + explicit SpecialWordMissingException() throw(); + ~SpecialWordMissingException() throw(); +}; + +} // namespace lm + +#endif // LM_LM_EXCEPTION diff --git a/lm/max_order.cc b/lm/max_order.cc new file mode 100644 index 000000000..94221201c --- /dev/null +++ b/lm/max_order.cc @@ -0,0 +1,6 @@ +#include "lm/max_order.hh" +#include + +int main(int argc, char *argv[]) { + std::cerr << "KenLM was compiled with a maximum supported n-gram order set to " << KENLM_MAX_ORDER << "." << std::endl; +} diff --git a/lm/max_order.hh b/lm/max_order.hh new file mode 100644 index 000000000..989f83247 --- /dev/null +++ b/lm/max_order.hh @@ -0,0 +1,12 @@ +/* IF YOUR BUILD SYSTEM PASSES -DKENLM_MAX_ORDER, THEN CHANGE THE BUILD SYSTEM. + * If not, this is the default maximum order. + * Having this limit means that State can be + * (kMaxOrder - 1) * sizeof(float) bytes instead of + * sizeof(float*) + (kMaxOrder - 1) * sizeof(float) + malloc overhead + */ +#ifndef KENLM_MAX_ORDER +#define KENLM_MAX_ORDER 6 +#endif +#ifndef KENLM_ORDER_MESSAGE +#define KENLM_ORDER_MESSAGE "If your build system supports changing KENLM_MAX_ORDER, change it there and recompile. In the KenLM tarball or Moses, use e.g. `bjam --kenlm-max-order=6 -a'. Otherwise, edit lm/max_order.hh." +#endif diff --git a/lm/model.cc b/lm/model.cc new file mode 100644 index 000000000..40af8a637 --- /dev/null +++ b/lm/model.cc @@ -0,0 +1,307 @@ +#include "lm/model.hh" + +#include "lm/blank.hh" +#include "lm/lm_exception.hh" +#include "lm/search_hashed.hh" +#include "lm/search_trie.hh" +#include "lm/read_arpa.hh" +#include "util/have.hh" +#include "util/murmur_hash.hh" + +#include +#include +#include +#include +#include + +namespace lm { +namespace ngram { +namespace detail { + +template const ModelType GenericModel::kModelType = Search::kModelType; + +template uint64_t GenericModel::Size(const std::vector &counts, const Config &config) { + return VocabularyT::Size(counts[0], config) + Search::Size(counts, config); +} + +template void GenericModel::SetupMemory(void *base, const std::vector &counts, const Config &config) { + size_t goal_size = util::CheckOverflow(Size(counts, config)); + uint8_t *start = static_cast(base); + size_t allocated = VocabularyT::Size(counts[0], config); + vocab_.SetupMemory(start, allocated, counts[0], config); + start += allocated; + start = search_.SetupMemory(start, counts, config); + if (static_cast(start - static_cast(base)) != goal_size) UTIL_THROW(FormatLoadException, "The data structures took " << (start - static_cast(base)) << " but Size says they should take " << goal_size); +} + +template GenericModel::GenericModel(const char *file, const Config &config) { + LoadLM(file, config, *this); + + // g++ prints warnings unless these are fully initialized. + State begin_sentence = State(); + begin_sentence.length = 1; + begin_sentence.words[0] = vocab_.BeginSentence(); + typename Search::Node ignored_node; + bool ignored_independent_left; + uint64_t ignored_extend_left; + begin_sentence.backoff[0] = search_.LookupUnigram(begin_sentence.words[0], ignored_node, ignored_independent_left, ignored_extend_left).Backoff(); + State null_context = State(); + null_context.length = 0; + P::Init(begin_sentence, null_context, vocab_, search_.Order()); +} + +namespace { +void CheckCounts(const std::vector &counts) { + UTIL_THROW_IF(counts.size() > KENLM_MAX_ORDER, FormatLoadException, "This model has order " << counts.size() << " but KenLM was compiled to support up to " << KENLM_MAX_ORDER << ". " << KENLM_ORDER_MESSAGE); + if (sizeof(uint64_t) > sizeof(std::size_t)) { + for (std::vector::const_iterator i = counts.begin(); i != counts.end(); ++i) { + UTIL_THROW_IF(*i > static_cast(std::numeric_limits::max()), util::OverflowException, "This model has " << *i << " " << (i - counts.begin() + 1) << "-grams which is too many for 32-bit machines."); + } + } +} +} // namespace + +template void GenericModel::InitializeFromBinary(void *start, const Parameters ¶ms, const Config &config, int fd) { + CheckCounts(params.counts); + SetupMemory(start, params.counts, config); + vocab_.LoadedBinary(params.fixed.has_vocabulary, fd, config.enumerate_vocab); + search_.LoadedBinary(); +} + +template void GenericModel::InitializeFromARPA(const char *file, const Config &config) { + // Backing file is the ARPA. Steal it so we can make the backing file the mmap output if any. + util::FilePiece f(backing_.file.release(), file, config.messages); + try { + std::vector counts; + // File counts do not include pruned trigrams that extend to quadgrams etc. These will be fixed by search_. + ReadARPACounts(f, counts); + CheckCounts(counts); + if (counts.size() < 2) UTIL_THROW(FormatLoadException, "This ngram implementation assumes at least a bigram model."); + if (config.probing_multiplier <= 1.0) UTIL_THROW(ConfigException, "probing multiplier must be > 1.0"); + + std::size_t vocab_size = util::CheckOverflow(VocabularyT::Size(counts[0], config)); + // Setup the binary file for writing the vocab lookup table. The search_ is responsible for growing the binary file to its needs. + vocab_.SetupMemory(SetupJustVocab(config, counts.size(), vocab_size, backing_), vocab_size, counts[0], config); + + if (config.write_mmap) { + WriteWordsWrapper wrap(config.enumerate_vocab); + vocab_.ConfigureEnumerate(&wrap, counts[0]); + search_.InitializeFromARPA(file, f, counts, config, vocab_, backing_); + wrap.Write(backing_.file.get()); + } else { + vocab_.ConfigureEnumerate(config.enumerate_vocab, counts[0]); + search_.InitializeFromARPA(file, f, counts, config, vocab_, backing_); + } + + if (!vocab_.SawUnk()) { + assert(config.unknown_missing != THROW_UP); + // Default probabilities for unknown. + search_.UnknownUnigram().backoff = 0.0; + search_.UnknownUnigram().prob = config.unknown_missing_logprob; + } + FinishFile(config, kModelType, kVersion, counts, vocab_.UnkCountChangePadding(), backing_); + } catch (util::Exception &e) { + e << " Byte: " << f.Offset(); + throw; + } +} + +template void GenericModel::UpdateConfigFromBinary(int fd, const std::vector &counts, Config &config) { + util::AdvanceOrThrow(fd, VocabularyT::Size(counts[0], config)); + Search::UpdateConfigFromBinary(fd, counts, config); +} + +template FullScoreReturn GenericModel::FullScore(const State &in_state, const WordIndex new_word, State &out_state) const { + FullScoreReturn ret = ScoreExceptBackoff(in_state.words, in_state.words + in_state.length, new_word, out_state); + for (const float *i = in_state.backoff + ret.ngram_length - 1; i < in_state.backoff + in_state.length; ++i) { + ret.prob += *i; + } + return ret; +} + +template FullScoreReturn GenericModel::FullScoreForgotState(const WordIndex *context_rbegin, const WordIndex *context_rend, const WordIndex new_word, State &out_state) const { + context_rend = std::min(context_rend, context_rbegin + P::Order() - 1); + FullScoreReturn ret = ScoreExceptBackoff(context_rbegin, context_rend, new_word, out_state); + + // Add the backoff weights for n-grams of order start to (context_rend - context_rbegin). + unsigned char start = ret.ngram_length; + if (context_rend - context_rbegin < static_cast(start)) return ret; + + bool independent_left; + uint64_t extend_left; + typename Search::Node node; + if (start <= 1) { + ret.prob += search_.LookupUnigram(*context_rbegin, node, independent_left, extend_left).Backoff(); + start = 2; + } else if (!search_.FastMakeNode(context_rbegin, context_rbegin + start - 1, node)) { + return ret; + } + // i is the order of the backoff we're looking for. + unsigned char order_minus_2 = start - 2; + for (const WordIndex *i = context_rbegin + start - 1; i < context_rend; ++i, ++order_minus_2) { + typename Search::MiddlePointer p(search_.LookupMiddle(order_minus_2, *i, node, independent_left, extend_left)); + if (!p.Found()) break; + ret.prob += p.Backoff(); + } + return ret; +} + +template void GenericModel::GetState(const WordIndex *context_rbegin, const WordIndex *context_rend, State &out_state) const { + // Generate a state from context. + context_rend = std::min(context_rend, context_rbegin + P::Order() - 1); + if (context_rend == context_rbegin) { + out_state.length = 0; + return; + } + typename Search::Node node; + bool independent_left; + uint64_t extend_left; + out_state.backoff[0] = search_.LookupUnigram(*context_rbegin, node, independent_left, extend_left).Backoff(); + out_state.length = HasExtension(out_state.backoff[0]) ? 1 : 0; + float *backoff_out = out_state.backoff + 1; + unsigned char order_minus_2 = 0; + for (const WordIndex *i = context_rbegin + 1; i < context_rend; ++i, ++backoff_out, ++order_minus_2) { + typename Search::MiddlePointer p(search_.LookupMiddle(order_minus_2, *i, node, independent_left, extend_left)); + if (!p.Found()) { + std::copy(context_rbegin, context_rbegin + out_state.length, out_state.words); + return; + } + *backoff_out = p.Backoff(); + if (HasExtension(*backoff_out)) out_state.length = i - context_rbegin + 1; + } + std::copy(context_rbegin, context_rbegin + out_state.length, out_state.words); +} + +template FullScoreReturn GenericModel::ExtendLeft( + const WordIndex *add_rbegin, const WordIndex *add_rend, + const float *backoff_in, + uint64_t extend_pointer, + unsigned char extend_length, + float *backoff_out, + unsigned char &next_use) const { + FullScoreReturn ret; + typename Search::Node node; + if (extend_length == 1) { + typename Search::UnigramPointer ptr(search_.LookupUnigram(static_cast(extend_pointer), node, ret.independent_left, ret.extend_left)); + ret.rest = ptr.Rest(); + ret.prob = ptr.Prob(); + assert(!ret.independent_left); + } else { + typename Search::MiddlePointer ptr(search_.Unpack(extend_pointer, extend_length, node)); + ret.rest = ptr.Rest(); + ret.prob = ptr.Prob(); + ret.extend_left = extend_pointer; + // If this function is called, then it does depend on left words. + ret.independent_left = false; + } + float subtract_me = ret.rest; + ret.ngram_length = extend_length; + next_use = extend_length; + ResumeScore(add_rbegin, add_rend, extend_length - 1, node, backoff_out, next_use, ret); + next_use -= extend_length; + // Charge backoffs. + for (const float *b = backoff_in + ret.ngram_length - extend_length; b < backoff_in + (add_rend - add_rbegin); ++b) ret.prob += *b; + ret.prob -= subtract_me; + ret.rest -= subtract_me; + return ret; +} + +namespace { +// Do a paraonoid copy of history, assuming new_word has already been copied +// (hence the -1). out_state.length could be zero so I avoided using +// std::copy. +void CopyRemainingHistory(const WordIndex *from, State &out_state) { + WordIndex *out = out_state.words + 1; + const WordIndex *in_end = from + static_cast(out_state.length) - 1; + for (const WordIndex *in = from; in < in_end; ++in, ++out) *out = *in; +} +} // namespace + +/* Ugly optimized function. Produce a score excluding backoff. + * The search goes in increasing order of ngram length. + * Context goes backward, so context_begin is the word immediately preceeding + * new_word. + */ +template FullScoreReturn GenericModel::ScoreExceptBackoff( + const WordIndex *const context_rbegin, + const WordIndex *const context_rend, + const WordIndex new_word, + State &out_state) const { + FullScoreReturn ret; + // ret.ngram_length contains the last known non-blank ngram length. + ret.ngram_length = 1; + + typename Search::Node node; + typename Search::UnigramPointer uni(search_.LookupUnigram(new_word, node, ret.independent_left, ret.extend_left)); + out_state.backoff[0] = uni.Backoff(); + ret.prob = uni.Prob(); + ret.rest = uni.Rest(); + + // This is the length of the context that should be used for continuation to the right. + out_state.length = HasExtension(out_state.backoff[0]) ? 1 : 0; + // We'll write the word anyway since it will probably be used and does no harm being there. + out_state.words[0] = new_word; + if (context_rbegin == context_rend) return ret; + + ResumeScore(context_rbegin, context_rend, 0, node, out_state.backoff + 1, out_state.length, ret); + CopyRemainingHistory(context_rbegin, out_state); + return ret; +} + +template void GenericModel::ResumeScore(const WordIndex *hist_iter, const WordIndex *const context_rend, unsigned char order_minus_2, typename Search::Node &node, float *backoff_out, unsigned char &next_use, FullScoreReturn &ret) const { + for (; ; ++order_minus_2, ++hist_iter, ++backoff_out) { + if (hist_iter == context_rend) return; + if (ret.independent_left) return; + if (order_minus_2 == P::Order() - 2) break; + + typename Search::MiddlePointer pointer(search_.LookupMiddle(order_minus_2, *hist_iter, node, ret.independent_left, ret.extend_left)); + if (!pointer.Found()) return; + *backoff_out = pointer.Backoff(); + ret.prob = pointer.Prob(); + ret.rest = pointer.Rest(); + ret.ngram_length = order_minus_2 + 2; + if (HasExtension(*backoff_out)) { + next_use = ret.ngram_length; + } + } + ret.independent_left = true; + typename Search::LongestPointer longest(search_.LookupLongest(*hist_iter, node)); + if (longest.Found()) { + ret.prob = longest.Prob(); + ret.rest = ret.prob; + // There is no blank in longest_. + ret.ngram_length = P::Order(); + } +} + +template float GenericModel::InternalUnRest(const uint64_t *pointers_begin, const uint64_t *pointers_end, unsigned char first_length) const { + float ret; + typename Search::Node node; + if (first_length == 1) { + if (pointers_begin >= pointers_end) return 0.0; + bool independent_left; + uint64_t extend_left; + typename Search::UnigramPointer ptr(search_.LookupUnigram(static_cast(*pointers_begin), node, independent_left, extend_left)); + ret = ptr.Prob() - ptr.Rest(); + ++first_length; + ++pointers_begin; + } else { + ret = 0.0; + } + for (const uint64_t *i = pointers_begin; i < pointers_end; ++i, ++first_length) { + typename Search::MiddlePointer ptr(search_.Unpack(*i, first_length, node)); + ret += ptr.Prob() - ptr.Rest(); + } + return ret; +} + +template class GenericModel, ProbingVocabulary>; +template class GenericModel, ProbingVocabulary>; +template class GenericModel, SortedVocabulary>; +template class GenericModel, SortedVocabulary>; +template class GenericModel, SortedVocabulary>; +template class GenericModel, SortedVocabulary>; + +} // namespace detail +} // namespace ngram +} // namespace lm diff --git a/lm/model.hh b/lm/model.hh new file mode 100644 index 000000000..13ff864e1 --- /dev/null +++ b/lm/model.hh @@ -0,0 +1,159 @@ +#ifndef LM_MODEL__ +#define LM_MODEL__ + +#include "lm/bhiksha.hh" +#include "lm/binary_format.hh" +#include "lm/config.hh" +#include "lm/facade.hh" +#include "lm/quantize.hh" +#include "lm/search_hashed.hh" +#include "lm/search_trie.hh" +#include "lm/state.hh" +#include "lm/value.hh" +#include "lm/vocab.hh" +#include "lm/weights.hh" + +#include "util/murmur_hash.hh" + +#include +#include + +#include + +namespace util { class FilePiece; } + +namespace lm { +namespace ngram { +namespace detail { + +// Should return the same results as SRI. +// ModelFacade typedefs Vocabulary so we use VocabularyT to avoid naming conflicts. +template class GenericModel : public base::ModelFacade, State, VocabularyT> { + private: + typedef base::ModelFacade, State, VocabularyT> P; + public: + // This is the model type returned by RecognizeBinary. + static const ModelType kModelType; + + static const unsigned int kVersion = Search::kVersion; + + /* Get the size of memory that will be mapped given ngram counts. This + * does not include small non-mapped control structures, such as this class + * itself. + */ + static uint64_t Size(const std::vector &counts, const Config &config = Config()); + + /* Load the model from a file. It may be an ARPA or binary file. Binary + * files must have the format expected by this class or you'll get an + * exception. So TrieModel can only load ARPA or binary created by + * TrieModel. To classify binary files, call RecognizeBinary in + * lm/binary_format.hh. + */ + explicit GenericModel(const char *file, const Config &config = Config()); + + /* Score p(new_word | in_state) and incorporate new_word into out_state. + * Note that in_state and out_state must be different references: + * &in_state != &out_state. + */ + FullScoreReturn FullScore(const State &in_state, const WordIndex new_word, State &out_state) const; + + /* Slower call without in_state. Try to remember state, but sometimes it + * would cost too much memory or your decoder isn't setup properly. + * To use this function, make an array of WordIndex containing the context + * vocabulary ids in reverse order. Then, pass the bounds of the array: + * [context_rbegin, context_rend). The new_word is not part of the context + * array unless you intend to repeat words. + */ + FullScoreReturn FullScoreForgotState(const WordIndex *context_rbegin, const WordIndex *context_rend, const WordIndex new_word, State &out_state) const; + + /* Get the state for a context. Don't use this if you can avoid it. Use + * BeginSentenceState or EmptyContextState and extend from those. If + * you're only going to use this state to call FullScore once, use + * FullScoreForgotState. + * To use this function, make an array of WordIndex containing the context + * vocabulary ids in reverse order. Then, pass the bounds of the array: + * [context_rbegin, context_rend). + */ + void GetState(const WordIndex *context_rbegin, const WordIndex *context_rend, State &out_state) const; + + /* More efficient version of FullScore where a partial n-gram has already + * been scored. + * NOTE: THE RETURNED .rest AND .prob ARE RELATIVE TO THE .rest RETURNED BEFORE. + */ + FullScoreReturn ExtendLeft( + // Additional context in reverse order. This will update add_rend to + const WordIndex *add_rbegin, const WordIndex *add_rend, + // Backoff weights to use. + const float *backoff_in, + // extend_left returned by a previous query. + uint64_t extend_pointer, + // Length of n-gram that the pointer corresponds to. + unsigned char extend_length, + // Where to write additional backoffs for [extend_length + 1, min(Order() - 1, return.ngram_length)] + float *backoff_out, + // Amount of additional content that should be considered by the next call. + unsigned char &next_use) const; + + /* Return probabilities minus rest costs for an array of pointers. The + * first length should be the length of the n-gram to which pointers_begin + * points. + */ + float UnRest(const uint64_t *pointers_begin, const uint64_t *pointers_end, unsigned char first_length) const { + // Compiler should optimize this if away. + return Search::kDifferentRest ? InternalUnRest(pointers_begin, pointers_end, first_length) : 0.0; + } + + private: + friend void lm::ngram::LoadLM<>(const char *file, const Config &config, GenericModel &to); + + static void UpdateConfigFromBinary(int fd, const std::vector &counts, Config &config); + + FullScoreReturn ScoreExceptBackoff(const WordIndex *const context_rbegin, const WordIndex *const context_rend, const WordIndex new_word, State &out_state) const; + + // Score bigrams and above. Do not include backoff. + void ResumeScore(const WordIndex *context_rbegin, const WordIndex *const context_rend, unsigned char starting_order_minus_2, typename Search::Node &node, float *backoff_out, unsigned char &next_use, FullScoreReturn &ret) const; + + // Appears after Size in the cc file. + void SetupMemory(void *start, const std::vector &counts, const Config &config); + + void InitializeFromBinary(void *start, const Parameters ¶ms, const Config &config, int fd); + + void InitializeFromARPA(const char *file, const Config &config); + + float InternalUnRest(const uint64_t *pointers_begin, const uint64_t *pointers_end, unsigned char first_length) const; + + Backing &MutableBacking() { return backing_; } + + Backing backing_; + + VocabularyT vocab_; + + Search search_; +}; + +} // namespace detail + +// Instead of typedef, inherit. This allows the Model etc to be forward declared. +// Oh the joys of C and C++. +#define LM_COMMA() , +#define LM_NAME_MODEL(name, from)\ +class name : public from {\ + public:\ + name(const char *file, const Config &config = Config()) : from(file, config) {}\ +}; + +LM_NAME_MODEL(ProbingModel, detail::GenericModel LM_COMMA() ProbingVocabulary>); +LM_NAME_MODEL(RestProbingModel, detail::GenericModel LM_COMMA() ProbingVocabulary>); +LM_NAME_MODEL(TrieModel, detail::GenericModel LM_COMMA() SortedVocabulary>); +LM_NAME_MODEL(ArrayTrieModel, detail::GenericModel LM_COMMA() SortedVocabulary>); +LM_NAME_MODEL(QuantTrieModel, detail::GenericModel LM_COMMA() SortedVocabulary>); +LM_NAME_MODEL(QuantArrayTrieModel, detail::GenericModel LM_COMMA() SortedVocabulary>); + +// Default implementation. No real reason for it to be the default. +typedef ::lm::ngram::ProbingVocabulary Vocabulary; +typedef ProbingModel Model; + +} // namespace ngram +} // namespace lm + +#endif // LM_MODEL__ diff --git a/lm/model_test.cc b/lm/model_test.cc new file mode 100644 index 000000000..32084b5b5 --- /dev/null +++ b/lm/model_test.cc @@ -0,0 +1,438 @@ +#include "lm/model.hh" + +#include + +#define BOOST_TEST_MODULE ModelTest +#include +#include + +// Apparently some Boost versions use templates and are pretty strict about types matching. +#define SLOPPY_CHECK_CLOSE(ref, value, tol) BOOST_CHECK_CLOSE(static_cast(ref), static_cast(value), static_cast(tol)); + +namespace lm { +namespace ngram { + +std::ostream &operator<<(std::ostream &o, const State &state) { + o << "State length " << static_cast(state.length) << ':'; + for (const WordIndex *i = state.words; i < state.words + state.length; ++i) { + o << ' ' << *i; + } + return o; +} + +namespace { + +const char *TestLocation() { + if (boost::unit_test::framework::master_test_suite().argc < 2) { + return "test.arpa"; + } + return boost::unit_test::framework::master_test_suite().argv[1]; +} +const char *TestNoUnkLocation() { + if (boost::unit_test::framework::master_test_suite().argc < 3) { + return "test_nounk.arpa"; + } + return boost::unit_test::framework::master_test_suite().argv[2]; +} + +template State GetState(const Model &model, const char *word, const State &in) { + WordIndex context[in.length + 1]; + context[0] = model.GetVocabulary().Index(word); + std::copy(in.words, in.words + in.length, context + 1); + State ret; + model.GetState(context, context + in.length + 1, ret); + return ret; +} + +#define StartTest(word, ngram, score, indep_left) \ + ret = model.FullScore( \ + state, \ + model.GetVocabulary().Index(word), \ + out);\ + SLOPPY_CHECK_CLOSE(score, ret.prob, 0.001); \ + BOOST_CHECK_EQUAL(static_cast(ngram), ret.ngram_length); \ + BOOST_CHECK_GE(std::min(ngram, 5 - 1), out.length); \ + BOOST_CHECK_EQUAL(indep_left, ret.independent_left); \ + BOOST_CHECK_EQUAL(out, GetState(model, word, state)); + +#define AppendTest(word, ngram, score, indep_left) \ + StartTest(word, ngram, score, indep_left) \ + state = out; + +template void Starters(const M &model) { + FullScoreReturn ret; + Model::State state(model.BeginSentenceState()); + Model::State out; + + StartTest("looking", 2, -0.4846522, true); + + // , probability plus backoff + StartTest(",", 1, -1.383514 + -0.4149733, true); + // probability plus backoff + StartTest("this_is_not_found", 1, -1.995635 + -0.4149733, true); +} + +template void Continuation(const M &model) { + FullScoreReturn ret; + Model::State state(model.BeginSentenceState()); + Model::State out; + + AppendTest("looking", 2, -0.484652, true); + AppendTest("on", 3, -0.348837, true); + AppendTest("a", 4, -0.0155266, true); + AppendTest("little", 5, -0.00306122, true); + State preserve = state; + AppendTest("the", 1, -4.04005, true); + AppendTest("biarritz", 1, -1.9889, true); + AppendTest("not_found", 1, -2.29666, true); + AppendTest("more", 1, -1.20632 - 20.0, true); + AppendTest(".", 2, -0.51363, true); + AppendTest("", 3, -0.0191651, true); + BOOST_CHECK_EQUAL(0, state.length); + + state = preserve; + AppendTest("more", 5, -0.00181395, true); + BOOST_CHECK_EQUAL(4, state.length); + AppendTest("loin", 5, -0.0432557, true); + BOOST_CHECK_EQUAL(1, state.length); +} + +template void Blanks(const M &model) { + FullScoreReturn ret; + State state(model.NullContextState()); + State out; + AppendTest("also", 1, -1.687872, false); + AppendTest("would", 2, -2, true); + AppendTest("consider", 3, -3, true); + State preserve = state; + AppendTest("higher", 4, -4, true); + AppendTest("looking", 5, -5, true); + BOOST_CHECK_EQUAL(1, state.length); + + state = preserve; + // also would consider not_found + AppendTest("not_found", 1, -1.995635 - 7.0 - 0.30103, true); + + state = model.NullContextState(); + // higher looking is a blank. + AppendTest("higher", 1, -1.509559, false); + AppendTest("looking", 2, -1.285941 - 0.30103, false); + + State higher_looking = state; + + BOOST_CHECK_EQUAL(1, state.length); + AppendTest("not_found", 1, -1.995635 - 0.4771212, true); + + state = higher_looking; + // higher looking consider + AppendTest("consider", 1, -1.687872 - 0.4771212, true); + + state = model.NullContextState(); + AppendTest("would", 1, -1.687872, false); + BOOST_CHECK_EQUAL(1, state.length); + AppendTest("consider", 2, -1.687872 -0.30103, false); + BOOST_CHECK_EQUAL(2, state.length); + AppendTest("higher", 3, -1.509559 - 0.30103, false); + BOOST_CHECK_EQUAL(3, state.length); + AppendTest("looking", 4, -1.285941 - 0.30103, false); +} + +template void Unknowns(const M &model) { + FullScoreReturn ret; + State state(model.NullContextState()); + State out; + + AppendTest("not_found", 1, -1.995635, false); + State preserve = state; + AppendTest("not_found2", 2, -15.0, true); + AppendTest("not_found3", 2, -15.0 - 2.0, true); + + state = preserve; + AppendTest("however", 2, -4, true); + AppendTest("not_found3", 3, -6, true); +} + +template void MinimalState(const M &model) { + FullScoreReturn ret; + State state(model.NullContextState()); + State out; + + AppendTest("baz", 1, -6.535897, true); + BOOST_CHECK_EQUAL(0, state.length); + state = model.NullContextState(); + AppendTest("foo", 1, -3.141592, true); + BOOST_CHECK_EQUAL(1, state.length); + AppendTest("bar", 2, -6.0, true); + // Has to include the backoff weight. + BOOST_CHECK_EQUAL(1, state.length); + AppendTest("bar", 1, -2.718281 + 3.0, true); + BOOST_CHECK_EQUAL(1, state.length); + + state = model.NullContextState(); + AppendTest("to", 1, -1.687872, false); + AppendTest("look", 2, -0.2922095, true); + BOOST_CHECK_EQUAL(2, state.length); + AppendTest("good", 3, -7, true); +} + +template void ExtendLeftTest(const M &model) { + State right; + FullScoreReturn little(model.FullScore(model.NullContextState(), model.GetVocabulary().Index("little"), right)); + const float kLittleProb = -1.285941; + SLOPPY_CHECK_CLOSE(kLittleProb, little.prob, 0.001); + unsigned char next_use; + float backoff_out[4]; + + FullScoreReturn extend_none(model.ExtendLeft(NULL, NULL, NULL, little.extend_left, 1, NULL, next_use)); + BOOST_CHECK_EQUAL(0, next_use); + BOOST_CHECK_EQUAL(little.extend_left, extend_none.extend_left); + SLOPPY_CHECK_CLOSE(little.prob - little.rest, extend_none.prob, 0.001); + BOOST_CHECK_EQUAL(1, extend_none.ngram_length); + + const WordIndex a = model.GetVocabulary().Index("a"); + float backoff_in = 3.14; + // a little + FullScoreReturn extend_a(model.ExtendLeft(&a, &a + 1, &backoff_in, little.extend_left, 1, backoff_out, next_use)); + BOOST_CHECK_EQUAL(1, next_use); + SLOPPY_CHECK_CLOSE(-0.69897, backoff_out[0], 0.001); + SLOPPY_CHECK_CLOSE(-0.09132547 - little.rest, extend_a.prob, 0.001); + BOOST_CHECK_EQUAL(2, extend_a.ngram_length); + BOOST_CHECK(!extend_a.independent_left); + + const WordIndex on = model.GetVocabulary().Index("on"); + FullScoreReturn extend_on(model.ExtendLeft(&on, &on + 1, &backoff_in, extend_a.extend_left, 2, backoff_out, next_use)); + BOOST_CHECK_EQUAL(1, next_use); + SLOPPY_CHECK_CLOSE(-0.4771212, backoff_out[0], 0.001); + SLOPPY_CHECK_CLOSE(-0.0283603 - (extend_a.rest + little.rest), extend_on.prob, 0.001); + BOOST_CHECK_EQUAL(3, extend_on.ngram_length); + BOOST_CHECK(!extend_on.independent_left); + + const WordIndex both[2] = {a, on}; + float backoff_in_arr[4]; + FullScoreReturn extend_both(model.ExtendLeft(both, both + 2, backoff_in_arr, little.extend_left, 1, backoff_out, next_use)); + BOOST_CHECK_EQUAL(2, next_use); + SLOPPY_CHECK_CLOSE(-0.69897, backoff_out[0], 0.001); + SLOPPY_CHECK_CLOSE(-0.4771212, backoff_out[1], 0.001); + SLOPPY_CHECK_CLOSE(-0.0283603 - little.rest, extend_both.prob, 0.001); + BOOST_CHECK_EQUAL(3, extend_both.ngram_length); + BOOST_CHECK(!extend_both.independent_left); + BOOST_CHECK_EQUAL(extend_on.extend_left, extend_both.extend_left); +} + +#define StatelessTest(word, provide, ngram, score) \ + ret = model.FullScoreForgotState(indices + num_words - word, indices + num_words - word + provide, indices[num_words - word - 1], state); \ + SLOPPY_CHECK_CLOSE(score, ret.prob, 0.001); \ + BOOST_CHECK_EQUAL(static_cast(ngram), ret.ngram_length); \ + model.GetState(indices + num_words - word, indices + num_words - word + provide, before); \ + ret = model.FullScore(before, indices[num_words - word - 1], out); \ + BOOST_CHECK(state == out); \ + SLOPPY_CHECK_CLOSE(score, ret.prob, 0.001); \ + BOOST_CHECK_EQUAL(static_cast(ngram), ret.ngram_length); + +template void Stateless(const M &model) { + const char *words[] = {"", "looking", "on", "a", "little", "the", "biarritz", "not_found", "more", ".", ""}; + const size_t num_words = sizeof(words) / sizeof(const char*); + // Silience "array subscript is above array bounds" when extracting end pointer. + WordIndex indices[num_words + 1]; + for (unsigned int i = 0; i < num_words; ++i) { + indices[num_words - 1 - i] = model.GetVocabulary().Index(words[i]); + } + FullScoreReturn ret; + State state, out, before; + + ret = model.FullScoreForgotState(indices + num_words - 1, indices + num_words, indices[num_words - 2], state); + SLOPPY_CHECK_CLOSE(-0.484652, ret.prob, 0.001); + StatelessTest(1, 1, 2, -0.484652); + + // looking + StatelessTest(1, 2, 2, -0.484652); + // on + AppendTest("on", 3, -0.348837, true); + StatelessTest(2, 3, 3, -0.348837); + StatelessTest(2, 2, 3, -0.348837); + StatelessTest(2, 1, 2, -0.4638903); + // a + StatelessTest(3, 4, 4, -0.0155266); + // little + AppendTest("little", 5, -0.00306122, true); + StatelessTest(4, 5, 5, -0.00306122); + // the + AppendTest("the", 1, -4.04005, true); + StatelessTest(5, 5, 1, -4.04005); + // No context of the. + StatelessTest(5, 0, 1, -1.687872); + // biarritz + StatelessTest(6, 1, 1, -1.9889); + // not found + StatelessTest(7, 1, 1, -2.29666); + StatelessTest(7, 0, 1, -1.995635); + + WordIndex unk[1]; + unk[0] = 0; + model.GetState(unk, unk + 1, state); + BOOST_CHECK_EQUAL(1, state.length); + BOOST_CHECK_EQUAL(static_cast(0), state.words[0]); +} + +template void NoUnkCheck(const M &model) { + WordIndex unk_index = 0; + State state; + + FullScoreReturn ret = model.FullScoreForgotState(&unk_index, &unk_index + 1, unk_index, state); + SLOPPY_CHECK_CLOSE(-100.0, ret.prob, 0.001); +} + +template void Everything(const M &m) { + Starters(m); + Continuation(m); + Blanks(m); + Unknowns(m); + MinimalState(m); + ExtendLeftTest(m); + Stateless(m); +} + +class ExpectEnumerateVocab : public EnumerateVocab { + public: + ExpectEnumerateVocab() {} + + void Add(WordIndex index, const StringPiece &str) { + BOOST_CHECK_EQUAL(seen.size(), index); + seen.push_back(std::string(str.data(), str.length())); + } + + void Check(const base::Vocabulary &vocab) { + BOOST_CHECK_EQUAL(37ULL, seen.size()); + BOOST_REQUIRE(!seen.empty()); + BOOST_CHECK_EQUAL("", seen[0]); + for (WordIndex i = 0; i < seen.size(); ++i) { + BOOST_CHECK_EQUAL(i, vocab.Index(seen[i])); + } + } + + void Clear() { + seen.clear(); + } + + std::vector seen; +}; + +template void LoadingTest() { + Config config; + config.arpa_complain = Config::NONE; + config.messages = NULL; + config.probing_multiplier = 2.0; + { + ExpectEnumerateVocab enumerate; + config.enumerate_vocab = &enumerate; + ModelT m(TestLocation(), config); + enumerate.Check(m.GetVocabulary()); + BOOST_CHECK_EQUAL((WordIndex)37, m.GetVocabulary().Bound()); + Everything(m); + } + { + ExpectEnumerateVocab enumerate; + config.enumerate_vocab = &enumerate; + ModelT m(TestNoUnkLocation(), config); + enumerate.Check(m.GetVocabulary()); + BOOST_CHECK_EQUAL((WordIndex)37, m.GetVocabulary().Bound()); + NoUnkCheck(m); + } +} + +BOOST_AUTO_TEST_CASE(probing) { + LoadingTest(); +} +BOOST_AUTO_TEST_CASE(trie) { + LoadingTest(); +} +BOOST_AUTO_TEST_CASE(quant_trie) { + LoadingTest(); +} +BOOST_AUTO_TEST_CASE(bhiksha_trie) { + LoadingTest(); +} +BOOST_AUTO_TEST_CASE(quant_bhiksha_trie) { + LoadingTest(); +} + +template void BinaryTest() { + Config config; + config.write_mmap = "test.binary"; + config.messages = NULL; + ExpectEnumerateVocab enumerate; + config.enumerate_vocab = &enumerate; + + { + ModelT copy_model(TestLocation(), config); + enumerate.Check(copy_model.GetVocabulary()); + enumerate.Clear(); + Everything(copy_model); + } + + config.write_mmap = NULL; + + ModelType type; + BOOST_REQUIRE(RecognizeBinary("test.binary", type)); + BOOST_CHECK_EQUAL(ModelT::kModelType, type); + + { + ModelT binary("test.binary", config); + enumerate.Check(binary.GetVocabulary()); + Everything(binary); + } + unlink("test.binary"); + + // Now test without . + config.write_mmap = "test_nounk.binary"; + config.messages = NULL; + enumerate.Clear(); + { + ModelT copy_model(TestNoUnkLocation(), config); + enumerate.Check(copy_model.GetVocabulary()); + enumerate.Clear(); + NoUnkCheck(copy_model); + } + config.write_mmap = NULL; + { + ModelT binary(TestNoUnkLocation(), config); + enumerate.Check(binary.GetVocabulary()); + NoUnkCheck(binary); + } + unlink("test_nounk.binary"); +} + +BOOST_AUTO_TEST_CASE(write_and_read_probing) { + BinaryTest(); +} +BOOST_AUTO_TEST_CASE(write_and_read_rest_probing) { + BinaryTest(); +} +BOOST_AUTO_TEST_CASE(write_and_read_trie) { + BinaryTest(); +} +BOOST_AUTO_TEST_CASE(write_and_read_quant_trie) { + BinaryTest(); +} +BOOST_AUTO_TEST_CASE(write_and_read_array_trie) { + BinaryTest(); +} +BOOST_AUTO_TEST_CASE(write_and_read_quant_array_trie) { + BinaryTest(); +} + +BOOST_AUTO_TEST_CASE(rest_max) { + Config config; + config.arpa_complain = Config::NONE; + config.messages = NULL; + + RestProbingModel model(TestLocation(), config); + State state, out; + FullScoreReturn ret(model.FullScore(model.NullContextState(), model.GetVocabulary().Index("."), state)); + SLOPPY_CHECK_CLOSE(-0.2705918, ret.rest, 0.001); + SLOPPY_CHECK_CLOSE(-0.01916512, model.FullScore(state, model.GetVocabulary().EndSentence(), out).rest, 0.001); +} + +} // namespace +} // namespace ngram +} // namespace lm diff --git a/lm/model_type.hh b/lm/model_type.hh new file mode 100644 index 000000000..8b35c793a --- /dev/null +++ b/lm/model_type.hh @@ -0,0 +1,23 @@ +#ifndef LM_MODEL_TYPE__ +#define LM_MODEL_TYPE__ + +namespace lm { +namespace ngram { + +/* Not the best numbering system, but it grew this way for historical reasons + * and I want to preserve existing binary files. */ +typedef enum {PROBING=0, REST_PROBING=1, TRIE=2, QUANT_TRIE=3, ARRAY_TRIE=4, QUANT_ARRAY_TRIE=5} ModelType; + +// Historical names. +const ModelType HASH_PROBING = PROBING; +const ModelType TRIE_SORTED = TRIE; +const ModelType QUANT_TRIE_SORTED = QUANT_TRIE; +const ModelType ARRAY_TRIE_SORTED = ARRAY_TRIE; +const ModelType QUANT_ARRAY_TRIE_SORTED = QUANT_ARRAY_TRIE; + +const static ModelType kQuantAdd = static_cast(QUANT_TRIE - TRIE); +const static ModelType kArrayAdd = static_cast(ARRAY_TRIE - TRIE); + +} // namespace ngram +} // namespace lm +#endif // LM_MODEL_TYPE__ diff --git a/lm/ngram_query.cc b/lm/ngram_query.cc new file mode 100644 index 000000000..49757d9aa --- /dev/null +++ b/lm/ngram_query.cc @@ -0,0 +1,47 @@ +#include "lm/ngram_query.hh" + +int main(int argc, char *argv[]) { + if (!(argc == 2 || (argc == 3 && !strcmp(argv[2], "null")))) { + std::cerr << "Usage: " << argv[0] << " lm_file [null]" << std::endl; + std::cerr << "Input is wrapped in and unless null is passed." << std::endl; + return 1; + } + try { + bool sentence_context = (argc == 2); + using namespace lm::ngram; + ModelType model_type; + if (RecognizeBinary(argv[1], model_type)) { + switch(model_type) { + case PROBING: + Query(argv[1], sentence_context, std::cin, std::cout); + break; + case REST_PROBING: + Query(argv[1], sentence_context, std::cin, std::cout); + break; + case TRIE: + Query(argv[1], sentence_context, std::cin, std::cout); + break; + case QUANT_TRIE: + Query(argv[1], sentence_context, std::cin, std::cout); + break; + case ARRAY_TRIE: + Query(argv[1], sentence_context, std::cin, std::cout); + break; + case QUANT_ARRAY_TRIE: + Query(argv[1], sentence_context, std::cin, std::cout); + break; + default: + std::cerr << "Unrecognized kenlm model type " << model_type << std::endl; + abort(); + } + } else { + Query(argv[1], sentence_context, std::cin, std::cout); + } + std::cerr << "Total time including destruction:\n"; + util::PrintUsage(std::cerr); + } catch (const std::exception &e) { + std::cerr << e.what() << std::endl; + return 1; + } + return 0; +} diff --git a/lm/ngram_query.hh b/lm/ngram_query.hh new file mode 100644 index 000000000..dfcda170e --- /dev/null +++ b/lm/ngram_query.hh @@ -0,0 +1,72 @@ +#ifndef LM_NGRAM_QUERY__ +#define LM_NGRAM_QUERY__ + +#include "lm/enumerate_vocab.hh" +#include "lm/model.hh" +#include "util/usage.hh" + +#include +#include +#include +#include +#include + +namespace lm { +namespace ngram { + +template void Query(const Model &model, bool sentence_context, std::istream &in_stream, std::ostream &out_stream) { + std::cerr << "Loading statistics:\n"; + util::PrintUsage(std::cerr); + typename Model::State state, out; + lm::FullScoreReturn ret; + std::string word; + + while (in_stream) { + state = sentence_context ? model.BeginSentenceState() : model.NullContextState(); + float total = 0.0; + bool got = false; + unsigned int oov = 0; + while (in_stream >> word) { + got = true; + lm::WordIndex vocab = model.GetVocabulary().Index(word); + if (vocab == 0) ++oov; + ret = model.FullScore(state, vocab, out); + total += ret.prob; + out_stream << word << '=' << vocab << ' ' << static_cast(ret.ngram_length) << ' ' << ret.prob << '\t'; + state = out; + char c; + while (true) { + c = in_stream.get(); + if (!in_stream) break; + if (c == '\n') break; + if (!isspace(c)) { + in_stream.unget(); + break; + } + } + if (c == '\n') break; + } + if (!got && !in_stream) break; + if (sentence_context) { + ret = model.FullScore(state, model.GetVocabulary().EndSentence(), out); + total += ret.prob; + out_stream << "=" << model.GetVocabulary().EndSentence() << ' ' << static_cast(ret.ngram_length) << ' ' << ret.prob << '\t'; + } + out_stream << "Total: " << total << " OOV: " << oov << '\n'; + } + std::cerr << "After queries:\n"; + util::PrintUsage(std::cerr); +} + +template void Query(const char *file, bool sentence_context, std::istream &in_stream, std::ostream &out_stream) { + Config config; + M model(file, config); + Query(model, sentence_context, in_stream, out_stream); +} + +} // namespace ngram +} // namespace lm + +#endif // LM_NGRAM_QUERY__ + + diff --git a/lm/partial.hh b/lm/partial.hh new file mode 100644 index 000000000..1dede359a --- /dev/null +++ b/lm/partial.hh @@ -0,0 +1,167 @@ +#ifndef LM_PARTIAL__ +#define LM_PARTIAL__ + +#include "lm/return.hh" +#include "lm/state.hh" + +#include + +#include + +namespace lm { +namespace ngram { + +struct ExtendReturn { + float adjust; + bool make_full; + unsigned char next_use; +}; + +template ExtendReturn ExtendLoop( + const Model &model, + unsigned char seen, const WordIndex *add_rbegin, const WordIndex *add_rend, const float *backoff_start, + const uint64_t *pointers, const uint64_t *pointers_end, + uint64_t *&pointers_write, + float *backoff_write) { + unsigned char add_length = add_rend - add_rbegin; + + float backoff_buf[2][KENLM_MAX_ORDER - 1]; + float *backoff_in = backoff_buf[0], *backoff_out = backoff_buf[1]; + std::copy(backoff_start, backoff_start + add_length, backoff_in); + + ExtendReturn value; + value.make_full = false; + value.adjust = 0.0; + value.next_use = add_length; + + unsigned char i = 0; + unsigned char length = pointers_end - pointers; + // pointers_write is NULL means that the existing left state is full, so we should use completed probabilities. + if (pointers_write) { + // Using full context, writing to new left state. + for (; i < length; ++i) { + FullScoreReturn ret(model.ExtendLeft( + add_rbegin, add_rbegin + value.next_use, + backoff_in, + pointers[i], i + seen + 1, + backoff_out, + value.next_use)); + std::swap(backoff_in, backoff_out); + if (ret.independent_left) { + value.adjust += ret.prob; + value.make_full = true; + ++i; + break; + } + value.adjust += ret.rest; + *pointers_write++ = ret.extend_left; + if (value.next_use != add_length) { + value.make_full = true; + ++i; + break; + } + } + } + // Using some of the new context. + for (; i < length && value.next_use; ++i) { + FullScoreReturn ret(model.ExtendLeft( + add_rbegin, add_rbegin + value.next_use, + backoff_in, + pointers[i], i + seen + 1, + backoff_out, + value.next_use)); + std::swap(backoff_in, backoff_out); + value.adjust += ret.prob; + } + float unrest = model.UnRest(pointers + i, pointers_end, i + seen + 1); + // Using none of the new context. + value.adjust += unrest; + + std::copy(backoff_in, backoff_in + value.next_use, backoff_write); + return value; +} + +template float RevealBefore(const Model &model, const Right &reveal, const unsigned char seen, bool reveal_full, Left &left, Right &right) { + assert(seen < reveal.length || reveal_full); + uint64_t *pointers_write = reveal_full ? NULL : left.pointers; + float backoff_buffer[KENLM_MAX_ORDER - 1]; + ExtendReturn value(ExtendLoop( + model, + seen, reveal.words + seen, reveal.words + reveal.length, reveal.backoff + seen, + left.pointers, left.pointers + left.length, + pointers_write, + left.full ? backoff_buffer : (right.backoff + right.length))); + if (reveal_full) { + left.length = 0; + value.make_full = true; + } else { + left.length = pointers_write - left.pointers; + value.make_full |= (left.length == model.Order() - 1); + } + if (left.full) { + for (unsigned char i = 0; i < value.next_use; ++i) value.adjust += backoff_buffer[i]; + } else { + // If left wasn't full when it came in, put words into right state. + std::copy(reveal.words + seen, reveal.words + seen + value.next_use, right.words + right.length); + right.length += value.next_use; + left.full = value.make_full || (right.length == model.Order() - 1); + } + return value.adjust; +} + +template float RevealAfter(const Model &model, Left &left, Right &right, const Left &reveal, unsigned char seen) { + assert(seen < reveal.length || reveal.full); + uint64_t *pointers_write = left.full ? NULL : (left.pointers + left.length); + ExtendReturn value(ExtendLoop( + model, + seen, right.words, right.words + right.length, right.backoff, + reveal.pointers + seen, reveal.pointers + reveal.length, + pointers_write, + right.backoff)); + if (reveal.full) { + for (unsigned char i = 0; i < value.next_use; ++i) value.adjust += right.backoff[i]; + right.length = 0; + value.make_full = true; + } else { + right.length = value.next_use; + value.make_full |= (right.length == model.Order() - 1); + } + if (!left.full) { + left.length = pointers_write - left.pointers; + left.full = value.make_full || (left.length == model.Order() - 1); + } + return value.adjust; +} + +template float Subsume(const Model &model, Left &first_left, const Right &first_right, const Left &second_left, Right &second_right, const unsigned int between_length) { + assert(first_right.length < KENLM_MAX_ORDER); + assert(second_left.length < KENLM_MAX_ORDER); + assert(between_length < KENLM_MAX_ORDER - 1); + uint64_t *pointers_write = first_left.full ? NULL : (first_left.pointers + first_left.length); + float backoff_buffer[KENLM_MAX_ORDER - 1]; + ExtendReturn value(ExtendLoop( + model, + between_length, first_right.words, first_right.words + first_right.length, first_right.backoff, + second_left.pointers, second_left.pointers + second_left.length, + pointers_write, + second_left.full ? backoff_buffer : (second_right.backoff + second_right.length))); + if (second_left.full) { + for (unsigned char i = 0; i < value.next_use; ++i) value.adjust += backoff_buffer[i]; + } else { + std::copy(first_right.words, first_right.words + value.next_use, second_right.words + second_right.length); + second_right.length += value.next_use; + value.make_full |= (second_right.length == model.Order() - 1); + } + if (!first_left.full) { + first_left.length = pointers_write - first_left.pointers; + first_left.full = value.make_full || second_left.full || (first_left.length == model.Order() - 1); + } + assert(first_left.length < KENLM_MAX_ORDER); + assert(second_right.length < KENLM_MAX_ORDER); + return value.adjust; +} + +} // namespace ngram +} // namespace lm + +#endif // LM_PARTIAL__ diff --git a/lm/partial_test.cc b/lm/partial_test.cc new file mode 100644 index 000000000..8d309c85a --- /dev/null +++ b/lm/partial_test.cc @@ -0,0 +1,199 @@ +#include "lm/partial.hh" + +#include "lm/left.hh" +#include "lm/model.hh" +#include "util/tokenize_piece.hh" + +#define BOOST_TEST_MODULE PartialTest +#include +#include + +namespace lm { +namespace ngram { +namespace { + +const char *TestLocation() { + if (boost::unit_test::framework::master_test_suite().argc < 2) { + return "test.arpa"; + } + return boost::unit_test::framework::master_test_suite().argv[1]; +} + +Config SilentConfig() { + Config config; + config.arpa_complain = Config::NONE; + config.messages = NULL; + return config; +} + +struct ModelFixture { + ModelFixture() : m(TestLocation(), SilentConfig()) {} + + RestProbingModel m; +}; + +BOOST_FIXTURE_TEST_SUITE(suite, ModelFixture) + +BOOST_AUTO_TEST_CASE(SimpleBefore) { + Left left; + left.full = false; + left.length = 0; + Right right; + right.length = 0; + + Right reveal; + reveal.length = 1; + WordIndex period = m.GetVocabulary().Index("."); + reveal.words[0] = period; + reveal.backoff[0] = -0.845098; + + BOOST_CHECK_CLOSE(0.0, RevealBefore(m, reveal, 0, false, left, right), 0.001); + BOOST_CHECK_EQUAL(0, left.length); + BOOST_CHECK(!left.full); + BOOST_CHECK_EQUAL(1, right.length); + BOOST_CHECK_EQUAL(period, right.words[0]); + BOOST_CHECK_CLOSE(-0.845098, right.backoff[0], 0.001); + + WordIndex more = m.GetVocabulary().Index("more"); + reveal.words[1] = more; + reveal.backoff[1] = -0.4771212; + reveal.length = 2; + BOOST_CHECK_CLOSE(0.0, RevealBefore(m, reveal, 1, false, left, right), 0.001); + BOOST_CHECK_EQUAL(0, left.length); + BOOST_CHECK(!left.full); + BOOST_CHECK_EQUAL(2, right.length); + BOOST_CHECK_EQUAL(period, right.words[0]); + BOOST_CHECK_EQUAL(more, right.words[1]); + BOOST_CHECK_CLOSE(-0.845098, right.backoff[0], 0.001); + BOOST_CHECK_CLOSE(-0.4771212, right.backoff[1], 0.001); +} + +BOOST_AUTO_TEST_CASE(AlsoWouldConsider) { + WordIndex would = m.GetVocabulary().Index("would"); + WordIndex consider = m.GetVocabulary().Index("consider"); + + ChartState current; + current.left.length = 1; + current.left.pointers[0] = would; + current.left.full = false; + current.right.length = 1; + current.right.words[0] = would; + current.right.backoff[0] = -0.30103; + + Left after; + after.full = false; + after.length = 1; + after.pointers[0] = consider; + + // adjustment for would consider + BOOST_CHECK_CLOSE(-1.687872 - -0.2922095 - 0.30103, RevealAfter(m, current.left, current.right, after, 0), 0.001); + + BOOST_CHECK_EQUAL(2, current.left.length); + BOOST_CHECK_EQUAL(would, current.left.pointers[0]); + BOOST_CHECK_EQUAL(false, current.left.full); + + WordIndex also = m.GetVocabulary().Index("also"); + Right before; + before.length = 1; + before.words[0] = also; + before.backoff[0] = -0.30103; + // r(would) = -0.2922095 [i would], r(would -> consider) = -1.988902 [b(would) + p(consider)] + // p(also -> would) = -2, p(also would -> consider) = -3 + BOOST_CHECK_CLOSE(-2 + 0.2922095 -3 + 1.988902, RevealBefore(m, before, 0, false, current.left, current.right), 0.001); + BOOST_CHECK_EQUAL(0, current.left.length); + BOOST_CHECK(current.left.full); + BOOST_CHECK_EQUAL(2, current.right.length); + BOOST_CHECK_EQUAL(would, current.right.words[0]); + BOOST_CHECK_EQUAL(also, current.right.words[1]); +} + +BOOST_AUTO_TEST_CASE(EndSentence) { + WordIndex loin = m.GetVocabulary().Index("loin"); + WordIndex period = m.GetVocabulary().Index("."); + WordIndex eos = m.GetVocabulary().EndSentence(); + + ChartState between; + between.left.length = 1; + between.left.pointers[0] = eos; + between.left.full = true; + between.right.length = 0; + + Right before; + before.words[0] = period; + before.words[1] = loin; + before.backoff[0] = -0.845098; + before.backoff[1] = 0.0; + + before.length = 1; + BOOST_CHECK_CLOSE(-0.0410707, RevealBefore(m, before, 0, true, between.left, between.right), 0.001); + BOOST_CHECK_EQUAL(0, between.left.length); +} + +float ScoreFragment(const RestProbingModel &model, unsigned int *begin, unsigned int *end, ChartState &out) { + RuleScore scorer(model, out); + for (unsigned int *i = begin; i < end; ++i) { + scorer.Terminal(*i); + } + return scorer.Finish(); +} + +void CheckAdjustment(const RestProbingModel &model, float expect, const Right &before_in, bool before_full, ChartState between, const Left &after_in) { + Right before(before_in); + Left after(after_in); + after.full = false; + float got = 0.0; + for (unsigned int i = 1; i < 5; ++i) { + if (before_in.length >= i) { + before.length = i; + got += RevealBefore(model, before, i - 1, false, between.left, between.right); + } + if (after_in.length >= i) { + after.length = i; + got += RevealAfter(model, between.left, between.right, after, i - 1); + } + } + if (after_in.full) { + after.full = true; + got += RevealAfter(model, between.left, between.right, after, after.length); + } + if (before_full) { + got += RevealBefore(model, before, before.length, true, between.left, between.right); + } + // Sometimes they're zero and BOOST_CHECK_CLOSE fails for this. + BOOST_CHECK(fabs(expect - got) < 0.001); +} + +void FullDivide(const RestProbingModel &model, StringPiece str) { + std::vector indices; + for (util::TokenIter i(str, ' '); i; ++i) { + indices.push_back(model.GetVocabulary().Index(*i)); + } + ChartState full_state; + float full = ScoreFragment(model, &indices.front(), &indices.back() + 1, full_state); + + ChartState before_state; + before_state.left.full = false; + RuleScore before_scorer(model, before_state); + float before_score = 0.0; + for (unsigned int before = 0; before < indices.size(); ++before) { + for (unsigned int after = before; after <= indices.size(); ++after) { + ChartState after_state, between_state; + float after_score = ScoreFragment(model, &indices.front() + after, &indices.front() + indices.size(), after_state); + float between_score = ScoreFragment(model, &indices.front() + before, &indices.front() + after, between_state); + CheckAdjustment(model, full - before_score - after_score - between_score, before_state.right, before_state.left.full, between_state, after_state.left); + } + before_scorer.Terminal(indices[before]); + before_score = before_scorer.Finish(); + } +} + +BOOST_AUTO_TEST_CASE(Strings) { + FullDivide(m, "also would consider"); + FullDivide(m, "looking on a little more loin . "); + FullDivide(m, "in biarritz watching considering looking . on a little more loin also would consider higher to look good unknown the screening foo bar , unknown however unknown "); +} + +BOOST_AUTO_TEST_SUITE_END() +} // namespace +} // namespace ngram +} // namespace lm diff --git a/lm/quantize.cc b/lm/quantize.cc new file mode 100644 index 000000000..b58c3f3f6 --- /dev/null +++ b/lm/quantize.cc @@ -0,0 +1,93 @@ +/* Quantize into bins of equal size as described in + * M. Federico and N. Bertoldi. 2006. How many bits are needed + * to store probabilities for phrase-based translation? In Proc. + * of the Workshop on Statistical Machine Translation, pages + * 94–101, New York City, June. Association for Computa- + * tional Linguistics. + */ + +#include "lm/quantize.hh" + +#include "lm/binary_format.hh" +#include "lm/lm_exception.hh" +#include "util/file.hh" + +#include +#include + +namespace lm { +namespace ngram { + +namespace { + +void MakeBins(std::vector &values, float *centers, uint32_t bins) { + std::sort(values.begin(), values.end()); + std::vector::const_iterator start = values.begin(), finish; + for (uint32_t i = 0; i < bins; ++i, ++centers, start = finish) { + finish = values.begin() + ((values.size() * static_cast(i + 1)) / bins); + if (finish == start) { + // zero length bucket. + *centers = i ? *(centers - 1) : -std::numeric_limits::infinity(); + } else { + *centers = std::accumulate(start, finish, 0.0) / static_cast(finish - start); + } + } +} + +const char kSeparatelyQuantizeVersion = 2; + +} // namespace + +void SeparatelyQuantize::UpdateConfigFromBinary(int fd, const std::vector &/*counts*/, Config &config) { + char version; + util::ReadOrThrow(fd, &version, 1); + util::ReadOrThrow(fd, &config.prob_bits, 1); + util::ReadOrThrow(fd, &config.backoff_bits, 1); + if (version != kSeparatelyQuantizeVersion) UTIL_THROW(FormatLoadException, "This file has quantization version " << (unsigned)version << " but the code expects version " << (unsigned)kSeparatelyQuantizeVersion); + util::AdvanceOrThrow(fd, -3); +} + +void SeparatelyQuantize::SetupMemory(void *base, unsigned char order, const Config &config) { + prob_bits_ = config.prob_bits; + backoff_bits_ = config.backoff_bits; + // We need the reserved values. + if (config.prob_bits == 0) UTIL_THROW(ConfigException, "You can't quantize probability to zero"); + if (config.backoff_bits == 0) UTIL_THROW(ConfigException, "You can't quantize backoff to zero"); + if (config.prob_bits > 25) UTIL_THROW(ConfigException, "For efficiency reasons, quantizing probability supports at most 25 bits. Currently you have requested " << static_cast(config.prob_bits) << " bits."); + if (config.backoff_bits > 25) UTIL_THROW(ConfigException, "For efficiency reasons, quantizing backoff supports at most 25 bits. Currently you have requested " << static_cast(config.backoff_bits) << " bits."); + // Reserve 8 byte header for bit counts. + actual_base_ = static_cast(base); + float *start = reinterpret_cast(actual_base_ + 8); + for (unsigned char i = 0; i < order - 2; ++i) { + tables_[i][0] = Bins(prob_bits_, start); + start += (1ULL << prob_bits_); + tables_[i][1] = Bins(backoff_bits_, start); + start += (1ULL << backoff_bits_); + } + longest_ = tables_[order - 2][0] = Bins(prob_bits_, start); +} + +void SeparatelyQuantize::Train(uint8_t order, std::vector &prob, std::vector &backoff) { + TrainProb(order, prob); + + // Backoff + float *centers = tables_[order - 2][1].Populate(); + *(centers++) = kNoExtensionBackoff; + *(centers++) = kExtensionBackoff; + MakeBins(backoff, centers, (1ULL << backoff_bits_) - 2); +} + +void SeparatelyQuantize::TrainProb(uint8_t order, std::vector &prob) { + float *centers = tables_[order - 2][0].Populate(); + MakeBins(prob, centers, (1ULL << prob_bits_)); +} + +void SeparatelyQuantize::FinishedLoading(const Config &config) { + uint8_t *actual_base = actual_base_; + *(actual_base++) = kSeparatelyQuantizeVersion; // version + *(actual_base++) = config.prob_bits; + *(actual_base++) = config.backoff_bits; +} + +} // namespace ngram +} // namespace lm diff --git a/lm/quantize.hh b/lm/quantize.hh new file mode 100644 index 000000000..8ce2378a7 --- /dev/null +++ b/lm/quantize.hh @@ -0,0 +1,232 @@ +#ifndef LM_QUANTIZE_H__ +#define LM_QUANTIZE_H__ + +#include "lm/blank.hh" +#include "lm/config.hh" +#include "lm/max_order.hh" +#include "lm/model_type.hh" +#include "util/bit_packing.hh" + +#include +#include + +#include + +#include + +namespace lm { +namespace ngram { + +struct Config; + +/* Store values directly and don't quantize. */ +class DontQuantize { + public: + static const ModelType kModelTypeAdd = static_cast(0); + static void UpdateConfigFromBinary(int, const std::vector &, Config &) {} + static uint64_t Size(uint8_t /*order*/, const Config &/*config*/) { return 0; } + static uint8_t MiddleBits(const Config &/*config*/) { return 63; } + static uint8_t LongestBits(const Config &/*config*/) { return 31; } + + class MiddlePointer { + public: + MiddlePointer(const DontQuantize & /*quant*/, unsigned char /*order_minus_2*/, util::BitAddress address) : address_(address) {} + + MiddlePointer() : address_(NULL, 0) {} + + bool Found() const { + return address_.base != NULL; + } + + float Prob() const { + return util::ReadNonPositiveFloat31(address_.base, address_.offset); + } + + float Backoff() const { + return util::ReadFloat32(address_.base, address_.offset + 31); + } + + float Rest() const { return Prob(); } + + void Write(float prob, float backoff) { + util::WriteNonPositiveFloat31(address_.base, address_.offset, prob); + util::WriteFloat32(address_.base, address_.offset + 31, backoff); + } + + private: + util::BitAddress address_; + }; + + class LongestPointer { + public: + explicit LongestPointer(const DontQuantize &/*quant*/, util::BitAddress address) : address_(address) {} + + LongestPointer() : address_(NULL, 0) {} + + bool Found() const { + return address_.base != NULL; + } + + float Prob() const { + return util::ReadNonPositiveFloat31(address_.base, address_.offset); + } + + void Write(float prob) { + util::WriteNonPositiveFloat31(address_.base, address_.offset, prob); + } + + private: + util::BitAddress address_; + }; + + DontQuantize() {} + + void SetupMemory(void * /*start*/, unsigned char /*order*/, const Config & /*config*/) {} + + static const bool kTrain = false; + // These should never be called because kTrain is false. + void Train(uint8_t /*order*/, std::vector &/*prob*/, std::vector &/*backoff*/) {} + void TrainProb(uint8_t, std::vector &/*prob*/) {} + + void FinishedLoading(const Config &) {} +}; + +class SeparatelyQuantize { + private: + class Bins { + public: + // Sigh C++ default constructor + Bins() {} + + Bins(uint8_t bits, float *begin) : begin_(begin), end_(begin_ + (1ULL << bits)), bits_(bits), mask_((1ULL << bits) - 1) {} + + float *Populate() { return begin_; } + + uint64_t EncodeProb(float value) const { + return Encode(value, 0); + } + + uint64_t EncodeBackoff(float value) const { + if (value == 0.0) { + return HasExtension(value) ? kExtensionQuant : kNoExtensionQuant; + } + return Encode(value, 2); + } + + float Decode(std::size_t off) const { return begin_[off]; } + + uint8_t Bits() const { return bits_; } + + uint64_t Mask() const { return mask_; } + + private: + uint64_t Encode(float value, size_t reserved) const { + const float *above = std::lower_bound(static_cast(begin_) + reserved, end_, value); + if (above == begin_ + reserved) return reserved; + if (above == end_) return end_ - begin_ - 1; + return above - begin_ - (value - *(above - 1) < *above - value); + } + + float *begin_; + const float *end_; + uint8_t bits_; + uint64_t mask_; + }; + + public: + static const ModelType kModelTypeAdd = kQuantAdd; + + static void UpdateConfigFromBinary(int fd, const std::vector &counts, Config &config); + + static uint64_t Size(uint8_t order, const Config &config) { + uint64_t longest_table = (static_cast(1) << static_cast(config.prob_bits)) * sizeof(float); + uint64_t middle_table = (static_cast(1) << static_cast(config.backoff_bits)) * sizeof(float) + longest_table; + // unigrams are currently not quantized so no need for a table. + return (order - 2) * middle_table + longest_table + /* for the bit counts and alignment padding) */ 8; + } + + static uint8_t MiddleBits(const Config &config) { return config.prob_bits + config.backoff_bits; } + static uint8_t LongestBits(const Config &config) { return config.prob_bits; } + + class MiddlePointer { + public: + MiddlePointer(const SeparatelyQuantize &quant, unsigned char order_minus_2, const util::BitAddress &address) : bins_(quant.GetTables(order_minus_2)), address_(address) {} + + MiddlePointer() : address_(NULL, 0) {} + + bool Found() const { return address_.base != NULL; } + + float Prob() const { + return ProbBins().Decode(util::ReadInt25(address_.base, address_.offset + BackoffBins().Bits(), ProbBins().Bits(), ProbBins().Mask())); + } + + float Backoff() const { + return BackoffBins().Decode(util::ReadInt25(address_.base, address_.offset, BackoffBins().Bits(), BackoffBins().Mask())); + } + + float Rest() const { return Prob(); } + + void Write(float prob, float backoff) const { + util::WriteInt57(address_.base, address_.offset, ProbBins().Bits() + BackoffBins().Bits(), + (ProbBins().EncodeProb(prob) << BackoffBins().Bits()) | BackoffBins().EncodeBackoff(backoff)); + } + + private: + const Bins &ProbBins() const { return bins_[0]; } + const Bins &BackoffBins() const { return bins_[1]; } + const Bins *bins_; + + util::BitAddress address_; + }; + + class LongestPointer { + public: + LongestPointer(const SeparatelyQuantize &quant, const util::BitAddress &address) : table_(&quant.LongestTable()), address_(address) {} + + LongestPointer() : address_(NULL, 0) {} + + bool Found() const { return address_.base != NULL; } + + void Write(float prob) const { + util::WriteInt25(address_.base, address_.offset, table_->Bits(), table_->EncodeProb(prob)); + } + + float Prob() const { + return table_->Decode(util::ReadInt25(address_.base, address_.offset, table_->Bits(), table_->Mask())); + } + + private: + const Bins *table_; + util::BitAddress address_; + }; + + SeparatelyQuantize() {} + + void SetupMemory(void *start, unsigned char order, const Config &config); + + static const bool kTrain = true; + // Assumes 0.0 is removed from backoff. + void Train(uint8_t order, std::vector &prob, std::vector &backoff); + // Train just probabilities (for longest order). + void TrainProb(uint8_t order, std::vector &prob); + + void FinishedLoading(const Config &config); + + const Bins *GetTables(unsigned char order_minus_2) const { return tables_[order_minus_2]; } + + const Bins &LongestTable() const { return longest_; } + + private: + Bins tables_[KENLM_MAX_ORDER - 1][2]; + + Bins longest_; + + uint8_t *actual_base_; + + uint8_t prob_bits_, backoff_bits_; +}; + +} // namespace ngram +} // namespace lm + +#endif // LM_QUANTIZE_H__ diff --git a/lm/read_arpa.cc b/lm/read_arpa.cc new file mode 100644 index 000000000..b709fef94 --- /dev/null +++ b/lm/read_arpa.cc @@ -0,0 +1,154 @@ +#include "lm/read_arpa.hh" + +#include "lm/blank.hh" + +#include +#include +#include +#include +#include + +#include +#include +#include + +#ifdef WIN32 +#include +#endif + +namespace lm { + +// 1 for '\t', '\n', and ' '. This is stricter than isspace. +const bool kARPASpaces[256] = {0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; + +namespace { + +bool IsEntirelyWhiteSpace(const StringPiece &line) { + for (size_t i = 0; i < static_cast(line.size()); ++i) { + if (!isspace(line.data()[i])) return false; + } + return true; +} + +const char kBinaryMagic[] = "mmap lm http://kheafield.com/code"; + +// strtoull isn't portable enough :-( +uint64_t ReadCount(const std::string &from) { + std::stringstream stream(from); + uint64_t ret; + stream >> ret; + UTIL_THROW_IF(!stream, FormatLoadException, "Bad count " << from); + return ret; +} + +} // namespace + +void ReadARPACounts(util::FilePiece &in, std::vector &number) { + number.clear(); + StringPiece line; + while (IsEntirelyWhiteSpace(line = in.ReadLine())) {} + if (line != "\\data\\") { + if ((line.size() >= 2) && (line.data()[0] == 0x1f) && (static_cast(line.data()[1]) == 0x8b)) { + UTIL_THROW(FormatLoadException, "Looks like a gzip file. If this is an ARPA file, pipe " << in.FileName() << " through zcat. If this already in binary format, you need to decompress it because mmap doesn't work on top of gzip."); + } + if (static_cast(line.size()) >= strlen(kBinaryMagic) && StringPiece(line.data(), strlen(kBinaryMagic)) == kBinaryMagic) + UTIL_THROW(FormatLoadException, "This looks like a binary file but got sent to the ARPA parser. Did you compress the binary file or pass a binary file where only ARPA files are accepted?"); + UTIL_THROW_IF(line.size() >= 4 && StringPiece(line.data(), 4) == "blmt", FormatLoadException, "This looks like an IRSTLM binary file. Did you forget to pass --text yes to compile-lm?"); + UTIL_THROW_IF(line == "iARPA", FormatLoadException, "This looks like an IRSTLM iARPA file. You need an ARPA file. Run\n compile-lm --text yes " << in.FileName() << " " << in.FileName() << ".arpa\nfirst."); + UTIL_THROW(FormatLoadException, "first non-empty line was \"" << line << "\" not \\data\\."); + } + while (!IsEntirelyWhiteSpace(line = in.ReadLine())) { + if (line.size() < 6 || strncmp(line.data(), "ngram ", 6)) UTIL_THROW(FormatLoadException, "count line \"" << line << "\"doesn't begin with \"ngram \""); + // So strtol doesn't go off the end of line. + std::string remaining(line.data() + 6, line.size() - 6); + char *end_ptr; + unsigned int length = std::strtol(remaining.c_str(), &end_ptr, 10); + if ((end_ptr == remaining.c_str()) || (length - 1 != number.size())) UTIL_THROW(FormatLoadException, "ngram count lengths should be consecutive starting with 1: " << line); + if (*end_ptr != '=') UTIL_THROW(FormatLoadException, "Expected = immediately following the first number in the count line " << line); + ++end_ptr; + number.push_back(ReadCount(end_ptr)); + } +} + +void ReadNGramHeader(util::FilePiece &in, unsigned int length) { + StringPiece line; + while (IsEntirelyWhiteSpace(line = in.ReadLine())) {} + std::stringstream expected; + expected << '\\' << length << "-grams:"; + if (line != expected.str()) UTIL_THROW(FormatLoadException, "Was expecting n-gram header " << expected.str() << " but got " << line << " instead"); +} + +void ReadBackoff(util::FilePiece &in, Prob &/*weights*/) { + switch (in.get()) { + case '\t': + { + float got = in.ReadFloat(); + if (got != 0.0) + UTIL_THROW(FormatLoadException, "Non-zero backoff " << got << " provided for an n-gram that should have no backoff"); + } + break; + case '\n': + break; + default: + UTIL_THROW(FormatLoadException, "Expected tab or newline for backoff"); + } +} + +void ReadBackoff(util::FilePiece &in, float &backoff) { + // Always make zero negative. + // Negative zero means that no (n+1)-gram has this n-gram as context. + // Therefore the hypothesis state can be shorter. Of course, many n-grams + // are context for (n+1)-grams. An algorithm in the data structure will go + // back and set the backoff to positive zero in these cases. + switch (in.get()) { + case '\t': + backoff = in.ReadFloat(); + if (backoff == ngram::kExtensionBackoff) backoff = ngram::kNoExtensionBackoff; + { +#ifdef WIN32 + int float_class = _fpclass(backoff); + UTIL_THROW_IF(float_class == _FPCLASS_SNAN || float_class == _FPCLASS_QNAN || float_class == _FPCLASS_NINF || float_class == _FPCLASS_PINF, FormatLoadException, "Bad backoff " << backoff); +#else + int float_class = std::fpclassify(backoff); + UTIL_THROW_IF(float_class == FP_NAN || float_class == FP_INFINITE, FormatLoadException, "Bad backoff " << backoff); +#endif + } + UTIL_THROW_IF(in.get() != '\n', FormatLoadException, "Expected newline after backoff"); + break; + case '\n': + backoff = ngram::kNoExtensionBackoff; + break; + default: + UTIL_THROW(FormatLoadException, "Expected tab or newline for backoff"); + } +} + +void ReadEnd(util::FilePiece &in) { + StringPiece line; + do { + line = in.ReadLine(); + } while (IsEntirelyWhiteSpace(line)); + if (line != "\\end\\") UTIL_THROW(FormatLoadException, "Expected \\end\\ but the ARPA file has " << line); + + try { + while (true) { + line = in.ReadLine(); + if (!IsEntirelyWhiteSpace(line)) UTIL_THROW(FormatLoadException, "Trailing line " << line); + } + } catch (const util::EndOfFileException &e) {} +} + +void PositiveProbWarn::Warn(float prob) { + switch (action_) { + case THROW_UP: + UTIL_THROW(FormatLoadException, "Positive log probability " << prob << " in the model. This is a bug in IRSTLM; you can set config.positive_log_probability = SILENT or pass -i to build_binary to substitute 0.0 for the log probability. Error"); + case COMPLAIN: + std::cerr << "There's a positive log probability " << prob << " in the APRA file, probably because of a bug in IRSTLM. This and subsequent entires will be mapepd to 0 log probability." << std::endl; + action_ = SILENT; + break; + case SILENT: + break; + } +} + +} // namespace lm diff --git a/lm/read_arpa.hh b/lm/read_arpa.hh new file mode 100644 index 000000000..234d130c2 --- /dev/null +++ b/lm/read_arpa.hh @@ -0,0 +1,90 @@ +#ifndef LM_READ_ARPA__ +#define LM_READ_ARPA__ + +#include "lm/lm_exception.hh" +#include "lm/word_index.hh" +#include "lm/weights.hh" +#include "util/file_piece.hh" + +#include +#include +#include + +namespace lm { + +void ReadARPACounts(util::FilePiece &in, std::vector &number); +void ReadNGramHeader(util::FilePiece &in, unsigned int length); + +void ReadBackoff(util::FilePiece &in, Prob &weights); +void ReadBackoff(util::FilePiece &in, float &backoff); +inline void ReadBackoff(util::FilePiece &in, ProbBackoff &weights) { + ReadBackoff(in, weights.backoff); +} +inline void ReadBackoff(util::FilePiece &in, RestWeights &weights) { + ReadBackoff(in, weights.backoff); +} + +void ReadEnd(util::FilePiece &in); + +extern const bool kARPASpaces[256]; + +// Positive log probability warning. +class PositiveProbWarn { + public: + PositiveProbWarn() : action_(THROW_UP) {} + + explicit PositiveProbWarn(WarningAction action) : action_(action) {} + + void Warn(float prob); + + private: + WarningAction action_; +}; + +template void Read1Gram(util::FilePiece &f, Voc &vocab, Weights *unigrams, PositiveProbWarn &warn) { + try { + float prob = f.ReadFloat(); + if (prob > 0.0) { + warn.Warn(prob); + prob = 0.0; + } + if (f.get() != '\t') UTIL_THROW(FormatLoadException, "Expected tab after probability"); + Weights &value = unigrams[vocab.Insert(f.ReadDelimited(kARPASpaces))]; + value.prob = prob; + ReadBackoff(f, value); + } catch(util::Exception &e) { + e << " in the 1-gram at byte " << f.Offset(); + throw; + } +} + +// Return true if a positive log probability came out. +template void Read1Grams(util::FilePiece &f, std::size_t count, Voc &vocab, Weights *unigrams, PositiveProbWarn &warn) { + ReadNGramHeader(f, 1); + for (std::size_t i = 0; i < count; ++i) { + Read1Gram(f, vocab, unigrams, warn); + } + vocab.FinishedLoading(unigrams); +} + +// Return true if a positive log probability came out. +template void ReadNGram(util::FilePiece &f, const unsigned char n, const Voc &vocab, WordIndex *const reverse_indices, Weights &weights, PositiveProbWarn &warn) { + try { + weights.prob = f.ReadFloat(); + if (weights.prob > 0.0) { + warn.Warn(weights.prob); + weights.prob = 0.0; + } + for (WordIndex *vocab_out = reverse_indices + n - 1; vocab_out >= reverse_indices; --vocab_out) { + *vocab_out = vocab.Index(f.ReadDelimited(kARPASpaces)); + } + ReadBackoff(f, weights); + } catch(util::Exception &e) { + e << " in the " << static_cast(n) << "-gram at byte " << f.Offset(); + throw; + } +} + +} // namespace lm + +#endif // LM_READ_ARPA__ diff --git a/lm/return.hh b/lm/return.hh new file mode 100644 index 000000000..622320ce1 --- /dev/null +++ b/lm/return.hh @@ -0,0 +1,42 @@ +#ifndef LM_RETURN__ +#define LM_RETURN__ + +#include + +namespace lm { +/* Structure returned by scoring routines. */ +struct FullScoreReturn { + // log10 probability + float prob; + + /* The length of n-gram matched. Do not use this for recombination. + * Consider a model containing only the following n-grams: + * -1 foo + * -3.14 bar + * -2.718 baz -5 + * -6 foo bar + * + * If you score ``bar'' then ngram_length is 1 and recombination state is the + * empty string because bar has zero backoff and does not extend to the + * right. + * If you score ``foo'' then ngram_length is 1 and recombination state is + * ``foo''. + * + * Ideally, keep output states around and compare them. Failing that, + * get out_state.ValidLength() and use that length for recombination. + */ + unsigned char ngram_length; + + /* Left extension information. If independent_left is set, then prob is + * independent of words to the left (up to additional backoff). Otherwise, + * extend_left indicates how to efficiently extend further to the left. + */ + bool independent_left; + uint64_t extend_left; // Defined only if independent_left + + // Rest cost for extension to the left. + float rest; +}; + +} // namespace lm +#endif // LM_RETURN__ diff --git a/lm/search_hashed.cc b/lm/search_hashed.cc new file mode 100644 index 000000000..a16238342 --- /dev/null +++ b/lm/search_hashed.cc @@ -0,0 +1,294 @@ +#include "lm/search_hashed.hh" + +#include "lm/binary_format.hh" +#include "lm/blank.hh" +#include "lm/lm_exception.hh" +#include "lm/model.hh" +#include "lm/read_arpa.hh" +#include "lm/value.hh" +#include "lm/vocab.hh" + +#include "util/bit_packing.hh" +#include "util/file_piece.hh" + +#include + +namespace lm { +namespace ngram { + +class ProbingModel; + +namespace { + +/* These are passed to ReadNGrams so that n-grams with zero backoff that appear as context will still be used in state. */ +template class ActivateLowerMiddle { + public: + explicit ActivateLowerMiddle(Middle &middle) : modify_(middle) {} + + void operator()(const WordIndex *vocab_ids, const unsigned int n) { + uint64_t hash = static_cast(vocab_ids[1]); + for (const WordIndex *i = vocab_ids + 2; i < vocab_ids + n; ++i) { + hash = detail::CombineWordHash(hash, *i); + } + typename Middle::MutableIterator i; + // TODO: somehow get text of n-gram for this error message. + if (!modify_.UnsafeMutableFind(hash, i)) + UTIL_THROW(FormatLoadException, "The context of every " << n << "-gram should appear as a " << (n-1) << "-gram"); + SetExtension(i->value.backoff); + } + + private: + Middle &modify_; +}; + +template class ActivateUnigram { + public: + explicit ActivateUnigram(Weights *unigram) : modify_(unigram) {} + + void operator()(const WordIndex *vocab_ids, const unsigned int /*n*/) { + // assert(n == 2); + SetExtension(modify_[vocab_ids[1]].backoff); + } + + private: + Weights *modify_; +}; + +// Find the lower order entry, inserting blanks along the way as necessary. +template void FindLower( + const std::vector &keys, + typename Value::Weights &unigram, + std::vector > &middle, + std::vector &between) { + typename util::ProbingHashTable::MutableIterator iter; + typename Value::ProbingEntry entry; + // Backoff will always be 0.0. We'll get the probability and rest in another pass. + entry.value.backoff = kNoExtensionBackoff; + // Go back and find the longest right-aligned entry, informing it that it extends left. Normally this will match immediately, but sometimes SRI is dumb. + for (int lower = keys.size() - 2; ; --lower) { + if (lower == -1) { + between.push_back(&unigram); + return; + } + entry.key = keys[lower]; + bool found = middle[lower].FindOrInsert(entry, iter); + between.push_back(&iter->value); + if (found) return; + } +} + +// Between usually has single entry, the value to adjust. But sometimes SRI stupidly pruned entries so it has unitialized blank values to be set here. +template void AdjustLower( + const Added &added, + const Build &build, + std::vector &between, + const unsigned int n, + const std::vector &vocab_ids, + typename Build::Value::Weights *unigrams, + std::vector > &middle) { + typedef typename Build::Value Value; + if (between.size() == 1) { + build.MarkExtends(*between.front(), added); + return; + } + typedef util::ProbingHashTable Middle; + float prob = -fabs(between.back()->prob); + // Order of the n-gram on which probabilities are based. + unsigned char basis = n - between.size(); + assert(basis != 0); + typename Build::Value::Weights **change = &between.back(); + // Skip the basis. + --change; + if (basis == 1) { + // Hallucinate a bigram based on a unigram's backoff and a unigram probability. + float &backoff = unigrams[vocab_ids[1]].backoff; + SetExtension(backoff); + prob += backoff; + (*change)->prob = prob; + build.SetRest(&*vocab_ids.begin(), 2, **change); + basis = 2; + --change; + } + uint64_t backoff_hash = static_cast(vocab_ids[1]); + for (unsigned char i = 2; i <= basis; ++i) { + backoff_hash = detail::CombineWordHash(backoff_hash, vocab_ids[i]); + } + for (; basis < n - 1; ++basis, --change) { + typename Middle::MutableIterator gotit; + if (middle[basis - 2].UnsafeMutableFind(backoff_hash, gotit)) { + float &backoff = gotit->value.backoff; + SetExtension(backoff); + prob += backoff; + } + (*change)->prob = prob; + build.SetRest(&*vocab_ids.begin(), basis + 1, **change); + backoff_hash = detail::CombineWordHash(backoff_hash, vocab_ids[basis+1]); + } + + typename std::vector::const_iterator i(between.begin()); + build.MarkExtends(**i, added); + const typename Value::Weights *longer = *i; + // Everything has probability but is not marked as extending. + for (++i; i != between.end(); ++i) { + build.MarkExtends(**i, *longer); + longer = *i; + } +} + +// Continue marking lower entries even they know that they extend left. This is used for upper/lower bounds. +template void MarkLower( + const std::vector &keys, + const Build &build, + typename Build::Value::Weights &unigram, + std::vector > &middle, + int start_order, + const typename Build::Value::Weights &longer) { + if (start_order == 0) return; + typename util::ProbingHashTable::MutableIterator iter; + // Hopefully the compiler will realize that if MarkExtends always returns false, it can simplify this code. + for (int even_lower = start_order - 2 /* index in middle */; ; --even_lower) { + if (even_lower == -1) { + build.MarkExtends(unigram, longer); + return; + } + middle[even_lower].UnsafeMutableFind(keys[even_lower], iter); + if (!build.MarkExtends(iter->value, longer)) return; + } +} + +template void ReadNGrams( + util::FilePiece &f, + const unsigned int n, + const size_t count, + const ProbingVocabulary &vocab, + const Build &build, + typename Build::Value::Weights *unigrams, + std::vector > &middle, + Activate activate, + Store &store, + PositiveProbWarn &warn) { + typedef typename Build::Value Value; + typedef util::ProbingHashTable Middle; + assert(n >= 2); + ReadNGramHeader(f, n); + + // Both vocab_ids and keys are non-empty because n >= 2. + // vocab ids of words in reverse order. + std::vector vocab_ids(n); + std::vector keys(n-1); + typename Store::Entry entry; + std::vector between; + for (size_t i = 0; i < count; ++i) { + ReadNGram(f, n, vocab, &*vocab_ids.begin(), entry.value, warn); + build.SetRest(&*vocab_ids.begin(), n, entry.value); + + keys[0] = detail::CombineWordHash(static_cast(vocab_ids.front()), vocab_ids[1]); + for (unsigned int h = 1; h < n - 1; ++h) { + keys[h] = detail::CombineWordHash(keys[h-1], vocab_ids[h+1]); + } + // Initially the sign bit is on, indicating it does not extend left. Most already have this but there might +0.0. + util::SetSign(entry.value.prob); + entry.key = keys[n-2]; + + store.Insert(entry); + between.clear(); + FindLower(keys, unigrams[vocab_ids.front()], middle, between); + AdjustLower(entry.value, build, between, n, vocab_ids, unigrams, middle); + if (Build::kMarkEvenLower) MarkLower(keys, build, unigrams[vocab_ids.front()], middle, n - between.size() - 1, *between.back()); + activate(&*vocab_ids.begin(), n); + } + + store.FinishedInserting(); +} + +} // namespace +namespace detail { + +template uint8_t *HashedSearch::SetupMemory(uint8_t *start, const std::vector &counts, const Config &config) { + std::size_t allocated = Unigram::Size(counts[0]); + unigram_ = Unigram(start, counts[0], allocated); + start += allocated; + for (unsigned int n = 2; n < counts.size(); ++n) { + allocated = Middle::Size(counts[n - 1], config.probing_multiplier); + middle_.push_back(Middle(start, allocated)); + start += allocated; + } + allocated = Longest::Size(counts.back(), config.probing_multiplier); + longest_ = Longest(start, allocated); + start += allocated; + return start; +} + +template void HashedSearch::InitializeFromARPA(const char * /*file*/, util::FilePiece &f, const std::vector &counts, const Config &config, ProbingVocabulary &vocab, Backing &backing) { + // TODO: fix sorted. + SetupMemory(GrowForSearch(config, vocab.UnkCountChangePadding(), Size(counts, config), backing), counts, config); + + PositiveProbWarn warn(config.positive_log_probability); + Read1Grams(f, counts[0], vocab, unigram_.Raw(), warn); + CheckSpecials(config, vocab); + DispatchBuild(f, counts, config, vocab, warn); +} + +template <> void HashedSearch::DispatchBuild(util::FilePiece &f, const std::vector &counts, const Config &config, const ProbingVocabulary &vocab, PositiveProbWarn &warn) { + NoRestBuild build; + ApplyBuild(f, counts, config, vocab, warn, build); +} + +template <> void HashedSearch::DispatchBuild(util::FilePiece &f, const std::vector &counts, const Config &config, const ProbingVocabulary &vocab, PositiveProbWarn &warn) { + switch (config.rest_function) { + case Config::REST_MAX: + { + MaxRestBuild build; + ApplyBuild(f, counts, config, vocab, warn, build); + } + break; + case Config::REST_LOWER: + { + LowerRestBuild build(config, counts.size(), vocab); + ApplyBuild(f, counts, config, vocab, warn, build); + } + break; + } +} + +template template void HashedSearch::ApplyBuild(util::FilePiece &f, const std::vector &counts, const Config &config, const ProbingVocabulary &vocab, PositiveProbWarn &warn, const Build &build) { + for (WordIndex i = 0; i < counts[0]; ++i) { + build.SetRest(&i, (unsigned int)1, unigram_.Raw()[i]); + } + + try { + if (counts.size() > 2) { + ReadNGrams, Middle>( + f, 2, counts[1], vocab, build, unigram_.Raw(), middle_, ActivateUnigram(unigram_.Raw()), middle_[0], warn); + } + for (unsigned int n = 3; n < counts.size(); ++n) { + ReadNGrams, Middle>( + f, n, counts[n-1], vocab, build, unigram_.Raw(), middle_, ActivateLowerMiddle(middle_[n-3]), middle_[n-2], warn); + } + if (counts.size() > 2) { + ReadNGrams, Longest>( + f, counts.size(), counts[counts.size() - 1], vocab, build, unigram_.Raw(), middle_, ActivateLowerMiddle(middle_.back()), longest_, warn); + } else { + ReadNGrams, Longest>( + f, counts.size(), counts[counts.size() - 1], vocab, build, unigram_.Raw(), middle_, ActivateUnigram(unigram_.Raw()), longest_, warn); + } + } catch (util::ProbingSizeException &e) { + UTIL_THROW(util::ProbingSizeException, "Avoid pruning n-grams like \"bar baz quux\" when \"foo bar baz quux\" is still in the model. KenLM will work when this pruning happens, but the probing model assumes these events are rare enough that using blank space in the probing hash table will cover all of them. Increase probing_multiplier (-p to build_binary) to add more blank spaces.\n"); + } + ReadEnd(f); +} + +template void HashedSearch::LoadedBinary() { + unigram_.LoadedBinary(); + for (typename std::vector::iterator i = middle_.begin(); i != middle_.end(); ++i) { + i->LoadedBinary(); + } + longest_.LoadedBinary(); +} + +template class HashedSearch; +template class HashedSearch; + +} // namespace detail +} // namespace ngram +} // namespace lm diff --git a/lm/search_hashed.hh b/lm/search_hashed.hh new file mode 100644 index 000000000..a52f107ba --- /dev/null +++ b/lm/search_hashed.hh @@ -0,0 +1,201 @@ +#ifndef LM_SEARCH_HASHED__ +#define LM_SEARCH_HASHED__ + +#include "lm/model_type.hh" +#include "lm/config.hh" +#include "lm/read_arpa.hh" +#include "lm/return.hh" +#include "lm/weights.hh" + +#include "util/bit_packing.hh" +#include "util/probing_hash_table.hh" + +#include +#include +#include + +namespace util { class FilePiece; } + +namespace lm { +namespace ngram { +struct Backing; +class ProbingVocabulary; +namespace detail { + +inline uint64_t CombineWordHash(uint64_t current, const WordIndex next) { + uint64_t ret = (current * 8978948897894561157ULL) ^ (static_cast(1 + next) * 17894857484156487943ULL); + return ret; +} + +#pragma pack(push) +#pragma pack(4) +struct ProbEntry { + uint64_t key; + Prob value; + typedef uint64_t Key; + typedef Prob Value; + uint64_t GetKey() const { + return key; + } +}; + +#pragma pack(pop) + +class LongestPointer { + public: + explicit LongestPointer(const float &to) : to_(&to) {} + + LongestPointer() : to_(NULL) {} + + bool Found() const { + return to_ != NULL; + } + + float Prob() const { + return *to_; + } + + private: + const float *to_; +}; + +template class HashedSearch { + public: + typedef uint64_t Node; + + typedef typename Value::ProbingProxy UnigramPointer; + typedef typename Value::ProbingProxy MiddlePointer; + typedef ::lm::ngram::detail::LongestPointer LongestPointer; + + static const ModelType kModelType = Value::kProbingModelType; + static const bool kDifferentRest = Value::kDifferentRest; + static const unsigned int kVersion = 0; + + // TODO: move probing_multiplier here with next binary file format update. + static void UpdateConfigFromBinary(int, const std::vector &, Config &) {} + + static uint64_t Size(const std::vector &counts, const Config &config) { + uint64_t ret = Unigram::Size(counts[0]); + for (unsigned char n = 1; n < counts.size() - 1; ++n) { + ret += Middle::Size(counts[n], config.probing_multiplier); + } + return ret + Longest::Size(counts.back(), config.probing_multiplier); + } + + uint8_t *SetupMemory(uint8_t *start, const std::vector &counts, const Config &config); + + void InitializeFromARPA(const char *file, util::FilePiece &f, const std::vector &counts, const Config &config, ProbingVocabulary &vocab, Backing &backing); + + void LoadedBinary(); + + unsigned char Order() const { + return middle_.size() + 2; + } + + typename Value::Weights &UnknownUnigram() { return unigram_.Unknown(); } + + UnigramPointer LookupUnigram(WordIndex word, Node &next, bool &independent_left, uint64_t &extend_left) const { + extend_left = static_cast(word); + next = extend_left; + UnigramPointer ret(unigram_.Lookup(word)); + independent_left = ret.IndependentLeft(); + return ret; + } + +#pragma GCC diagnostic ignored "-Wuninitialized" + MiddlePointer Unpack(uint64_t extend_pointer, unsigned char extend_length, Node &node) const { + node = extend_pointer; + typename Middle::ConstIterator found; + bool got = middle_[extend_length - 2].Find(extend_pointer, found); + assert(got); + (void)got; + return MiddlePointer(found->value); + } + + MiddlePointer LookupMiddle(unsigned char order_minus_2, WordIndex word, Node &node, bool &independent_left, uint64_t &extend_pointer) const { + node = CombineWordHash(node, word); + typename Middle::ConstIterator found; + if (!middle_[order_minus_2].Find(node, found)) { + independent_left = true; + return MiddlePointer(); + } + extend_pointer = node; + MiddlePointer ret(found->value); + independent_left = ret.IndependentLeft(); + return ret; + } + + LongestPointer LookupLongest(WordIndex word, const Node &node) const { + // Sign bit is always on because longest n-grams do not extend left. + typename Longest::ConstIterator found; + if (!longest_.Find(CombineWordHash(node, word), found)) return LongestPointer(); + return LongestPointer(found->value.prob); + } + + // Generate a node without necessarily checking that it actually exists. + // Optionally return false if it's know to not exist. + bool FastMakeNode(const WordIndex *begin, const WordIndex *end, Node &node) const { + assert(begin != end); + node = static_cast(*begin); + for (const WordIndex *i = begin + 1; i < end; ++i) { + node = CombineWordHash(node, *i); + } + return true; + } + + private: + // Interpret config's rest cost build policy and pass the right template argument to ApplyBuild. + void DispatchBuild(util::FilePiece &f, const std::vector &counts, const Config &config, const ProbingVocabulary &vocab, PositiveProbWarn &warn); + + template void ApplyBuild(util::FilePiece &f, const std::vector &counts, const Config &config, const ProbingVocabulary &vocab, PositiveProbWarn &warn, const Build &build); + + class Unigram { + public: + Unigram() {} + + Unigram(void *start, uint64_t count, std::size_t /*allocated*/) : + unigram_(static_cast(start)) +#ifdef DEBUG + , count_(count) +#endif + {} + + static uint64_t Size(uint64_t count) { + return (count + 1) * sizeof(typename Value::Weights); // +1 for hallucinate + } + + const typename Value::Weights &Lookup(WordIndex index) const { +#ifdef DEBUG + assert(index < count_); +#endif + return unigram_[index]; + } + + typename Value::Weights &Unknown() { return unigram_[0]; } + + void LoadedBinary() {} + + // For building. + typename Value::Weights *Raw() { return unigram_; } + + private: + typename Value::Weights *unigram_; +#ifdef DEBUG + uint64_t count_; +#endif + }; + + Unigram unigram_; + + typedef util::ProbingHashTable Middle; + std::vector middle_; + + typedef util::ProbingHashTable Longest; + Longest longest_; +}; + +} // namespace detail +} // namespace ngram +} // namespace lm + +#endif // LM_SEARCH_HASHED__ diff --git a/lm/search_trie.cc b/lm/search_trie.cc new file mode 100644 index 000000000..debcfd077 --- /dev/null +++ b/lm/search_trie.cc @@ -0,0 +1,611 @@ +/* This is where the trie is built. It's on-disk. */ +#include "lm/search_trie.hh" + +#include "lm/bhiksha.hh" +#include "lm/binary_format.hh" +#include "lm/blank.hh" +#include "lm/lm_exception.hh" +#include "lm/max_order.hh" +#include "lm/quantize.hh" +#include "lm/trie.hh" +#include "lm/trie_sort.hh" +#include "lm/vocab.hh" +#include "lm/weights.hh" +#include "lm/word_index.hh" +#include "util/ersatz_progress.hh" +#include "util/mmap.hh" +#include "util/proxy_iterator.hh" +#include "util/scoped.hh" +#include "util/sized_iterator.hh" + +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(_WIN32) || defined(_WIN64) +#include +#endif + +namespace lm { +namespace ngram { +namespace trie { +namespace { + +void ReadOrThrow(FILE *from, void *data, size_t size) { + UTIL_THROW_IF(1 != std::fread(data, size, 1, from), util::ErrnoException, "Short read"); +} + +int Compare(unsigned char order, const void *first_void, const void *second_void) { + const WordIndex *first = reinterpret_cast(first_void), *second = reinterpret_cast(second_void); + const WordIndex *end = first + order; + for (; first != end; ++first, ++second) { + if (*first < *second) return -1; + if (*first > *second) return 1; + } + return 0; +} + +struct ProbPointer { + unsigned char array; + uint64_t index; +}; + +// Array of n-grams and float indices. +class BackoffMessages { + public: + void Init(std::size_t entry_size) { + current_ = NULL; + allocated_ = NULL; + entry_size_ = entry_size; + } + + void Add(const WordIndex *to, ProbPointer index) { + while (current_ + entry_size_ > allocated_) { + std::size_t allocated_size = allocated_ - (uint8_t*)backing_.get(); + Resize(std::max(allocated_size * 2, entry_size_)); + } + memcpy(current_, to, entry_size_ - sizeof(ProbPointer)); + *reinterpret_cast(current_ + entry_size_ - sizeof(ProbPointer)) = index; + current_ += entry_size_; + } + + void Apply(float *const *const base, FILE *unigrams) { + FinishedAdding(); + if (current_ == allocated_) return; + rewind(unigrams); + ProbBackoff weights; + WordIndex unigram = 0; + ReadOrThrow(unigrams, &weights, sizeof(weights)); + for (; current_ != allocated_; current_ += entry_size_) { + const WordIndex &cur_word = *reinterpret_cast(current_); + for (; unigram < cur_word; ++unigram) { + ReadOrThrow(unigrams, &weights, sizeof(weights)); + } + if (!HasExtension(weights.backoff)) { + weights.backoff = kExtensionBackoff; + UTIL_THROW_IF(fseek(unigrams, -sizeof(weights), SEEK_CUR), util::ErrnoException, "Seeking backwards to denote unigram extension failed."); + util::WriteOrThrow(unigrams, &weights, sizeof(weights)); + } + const ProbPointer &write_to = *reinterpret_cast(current_ + sizeof(WordIndex)); + base[write_to.array][write_to.index] += weights.backoff; + } + backing_.reset(); + } + + void Apply(float *const *const base, RecordReader &reader) { + FinishedAdding(); + if (current_ == allocated_) return; + // We'll also use the same buffer to record messages to blanks that they extend. + WordIndex *extend_out = reinterpret_cast(current_); + const unsigned char order = (entry_size_ - sizeof(ProbPointer)) / sizeof(WordIndex); + for (reader.Rewind(); reader && (current_ != allocated_); ) { + switch (Compare(order, reader.Data(), current_)) { + case -1: + ++reader; + break; + case 1: + // Message but nobody to receive it. Write it down at the beginning of the buffer so we can inform this blank that it extends. + for (const WordIndex *w = reinterpret_cast(current_); w != reinterpret_cast(current_) + order; ++w, ++extend_out) *extend_out = *w; + current_ += entry_size_; + break; + case 0: + float &backoff = reinterpret_cast((uint8_t*)reader.Data() + order * sizeof(WordIndex))->backoff; + if (!HasExtension(backoff)) { + backoff = kExtensionBackoff; + reader.Overwrite(&backoff, sizeof(float)); + } else { + const ProbPointer &write_to = *reinterpret_cast(current_ + entry_size_ - sizeof(ProbPointer)); + base[write_to.array][write_to.index] += backoff; + } + current_ += entry_size_; + break; + } + } + // Now this is a list of blanks that extend right. + entry_size_ = sizeof(WordIndex) * order; + Resize(sizeof(WordIndex) * (extend_out - (const WordIndex*)backing_.get())); + current_ = (uint8_t*)backing_.get(); + } + + // Call after Apply + bool Extends(unsigned char order, const WordIndex *words) { + if (current_ == allocated_) return false; + assert(order * sizeof(WordIndex) == entry_size_); + while (true) { + switch(Compare(order, words, current_)) { + case 1: + current_ += entry_size_; + if (current_ == allocated_) return false; + break; + case -1: + return false; + case 0: + return true; + } + } + } + + private: + void FinishedAdding() { + Resize(current_ - (uint8_t*)backing_.get()); + // Sort requests in same order as files. + std::sort( + util::SizedIterator(util::SizedProxy(backing_.get(), entry_size_)), + util::SizedIterator(util::SizedProxy(current_, entry_size_)), + util::SizedCompare(EntryCompare((entry_size_ - sizeof(ProbPointer)) / sizeof(WordIndex)))); + current_ = (uint8_t*)backing_.get(); + } + + void Resize(std::size_t to) { + std::size_t current = current_ - (uint8_t*)backing_.get(); + backing_.call_realloc(to); + current_ = (uint8_t*)backing_.get() + current; + allocated_ = (uint8_t*)backing_.get() + to; + } + + util::scoped_malloc backing_; + + uint8_t *current_, *allocated_; + + std::size_t entry_size_; +}; + +const float kBadProb = std::numeric_limits::infinity(); + +class SRISucks { + public: + SRISucks() { + for (BackoffMessages *i = messages_; i != messages_ + KENLM_MAX_ORDER - 1; ++i) + i->Init(sizeof(ProbPointer) + sizeof(WordIndex) * (i - messages_ + 1)); + } + + void Send(unsigned char begin, unsigned char order, const WordIndex *to, float prob_basis) { + assert(prob_basis != kBadProb); + ProbPointer pointer; + pointer.array = order - 1; + pointer.index = values_[order - 1].size(); + for (unsigned char i = begin; i < order; ++i) { + messages_[i - 1].Add(to, pointer); + } + values_[order - 1].push_back(prob_basis); + } + + void ObtainBackoffs(unsigned char total_order, FILE *unigram_file, RecordReader *reader) { + for (unsigned char i = 0; i < KENLM_MAX_ORDER - 1; ++i) { + it_[i] = values_[i].empty() ? NULL : &*values_[i].begin(); + } + messages_[0].Apply(it_, unigram_file); + BackoffMessages *messages = messages_ + 1; + const RecordReader *end = reader + total_order - 2 /* exclude unigrams and longest order */; + for (; reader != end; ++messages, ++reader) { + messages->Apply(it_, *reader); + } + } + + ProbBackoff GetBlank(unsigned char total_order, unsigned char order, const WordIndex *indices) { + assert(order > 1); + ProbBackoff ret; + ret.prob = *(it_[order - 1]++); + ret.backoff = ((order != total_order - 1) && messages_[order - 1].Extends(order, indices)) ? kExtensionBackoff : kNoExtensionBackoff; + return ret; + } + + const std::vector &Values(unsigned char order) const { + return values_[order - 1]; + } + + private: + // This used to be one array. Then I needed to separate it by order for quantization to work. + std::vector values_[KENLM_MAX_ORDER - 1]; + BackoffMessages messages_[KENLM_MAX_ORDER - 1]; + + float *it_[KENLM_MAX_ORDER - 1]; +}; + +class FindBlanks { + public: + FindBlanks(unsigned char order, const ProbBackoff *unigrams, SRISucks &messages) + : counts_(order), unigrams_(unigrams), sri_(messages) {} + + float UnigramProb(WordIndex index) const { + return unigrams_[index].prob; + } + + void Unigram(WordIndex /*index*/) { + ++counts_[0]; + } + + void MiddleBlank(const unsigned char order, const WordIndex *indices, unsigned char lower, float prob_basis) { + sri_.Send(lower, order, indices + 1, prob_basis); + ++counts_[order - 1]; + } + + void Middle(const unsigned char order, const void * /*data*/) { + ++counts_[order - 1]; + } + + void Longest(const void * /*data*/) { + ++counts_.back(); + } + + // Unigrams wrote one past. + void Cleanup() { + --counts_[0]; + } + + const std::vector &Counts() const { + return counts_; + } + + private: + std::vector counts_; + + const ProbBackoff *unigrams_; + + SRISucks &sri_; +}; + +// Phase to actually write n-grams to the trie. +template class WriteEntries { + public: + WriteEntries(RecordReader *contexts, const Quant &quant, UnigramValue *unigrams, BitPackedMiddle *middle, BitPackedLongest &longest, unsigned char order, SRISucks &sri) : + contexts_(contexts), + quant_(quant), + unigrams_(unigrams), + middle_(middle), + longest_(longest), + bigram_pack_((order == 2) ? static_cast(longest_) : static_cast(*middle_)), + order_(order), + sri_(sri) {} + + float UnigramProb(WordIndex index) const { return unigrams_[index].weights.prob; } + + void Unigram(WordIndex word) { + unigrams_[word].next = bigram_pack_.InsertIndex(); + } + + void MiddleBlank(const unsigned char order, const WordIndex *indices, unsigned char /*lower*/, float /*prob_base*/) { + ProbBackoff weights = sri_.GetBlank(order_, order, indices); + typename Quant::MiddlePointer(quant_, order - 2, middle_[order - 2].Insert(indices[order - 1])).Write(weights.prob, weights.backoff); + } + + void Middle(const unsigned char order, const void *data) { + RecordReader &context = contexts_[order - 1]; + const WordIndex *words = reinterpret_cast(data); + ProbBackoff weights = *reinterpret_cast(words + order); + if (context && !memcmp(data, context.Data(), sizeof(WordIndex) * order)) { + SetExtension(weights.backoff); + ++context; + } + typename Quant::MiddlePointer(quant_, order - 2, middle_[order - 2].Insert(words[order - 1])).Write(weights.prob, weights.backoff); + } + + void Longest(const void *data) { + const WordIndex *words = reinterpret_cast(data); + typename Quant::LongestPointer(quant_, longest_.Insert(words[order_ - 1])).Write(reinterpret_cast(words + order_)->prob); + } + + void Cleanup() {} + + private: + RecordReader *contexts_; + const Quant &quant_; + UnigramValue *const unigrams_; + BitPackedMiddle *const middle_; + BitPackedLongest &longest_; + BitPacked &bigram_pack_; + const unsigned char order_; + SRISucks &sri_; +}; + +struct Gram { + Gram(const WordIndex *in_begin, unsigned char order) : begin(in_begin), end(in_begin + order) {} + + const WordIndex *begin, *end; + + // For queue, this is the direction we want. + bool operator<(const Gram &other) const { + return std::lexicographical_compare(other.begin, other.end, begin, end); + } +}; + +template class BlankManager { + public: + BlankManager(unsigned char total_order, Doing &doing) : total_order_(total_order), been_length_(0), doing_(doing) { + for (float *i = basis_; i != basis_ + KENLM_MAX_ORDER - 1; ++i) *i = kBadProb; + } + + void Visit(const WordIndex *to, unsigned char length, float prob) { + basis_[length - 1] = prob; + unsigned char overlap = std::min(length - 1, been_length_); + const WordIndex *cur; + WordIndex *pre; + for (cur = to, pre = been_; cur != to + overlap; ++cur, ++pre) { + if (*pre != *cur) break; + } + if (cur == to + length - 1) { + *pre = *cur; + been_length_ = length; + return; + } + // There are blanks to insert starting with order blank. + unsigned char blank = cur - to + 1; + UTIL_THROW_IF(blank == 1, FormatLoadException, "Missing a unigram that appears as context."); + const float *lower_basis; + for (lower_basis = basis_ + blank - 2; *lower_basis == kBadProb; --lower_basis) {} + unsigned char based_on = lower_basis - basis_ + 1; + for (; cur != to + length - 1; ++blank, ++cur, ++pre) { + assert(*lower_basis != kBadProb); + doing_.MiddleBlank(blank, to, based_on, *lower_basis); + *pre = *cur; + // Mark that the probability is a blank so it shouldn't be used as the basis for a later n-gram. + basis_[blank - 1] = kBadProb; + } + *pre = *cur; + been_length_ = length; + } + + private: + const unsigned char total_order_; + + WordIndex been_[KENLM_MAX_ORDER]; + unsigned char been_length_; + + float basis_[KENLM_MAX_ORDER]; + + Doing &doing_; +}; + +template void RecursiveInsert(const unsigned char total_order, const WordIndex unigram_count, RecordReader *input, std::ostream *progress_out, const char *message, Doing &doing) { + util::ErsatzProgress progress(unigram_count + 1, progress_out, message); + WordIndex unigram = 0; + std::priority_queue grams; + grams.push(Gram(&unigram, 1)); + for (unsigned char i = 2; i <= total_order; ++i) { + if (input[i-2]) grams.push(Gram(reinterpret_cast(input[i-2].Data()), i)); + } + + BlankManager blank(total_order, doing); + + while (true) { + Gram top = grams.top(); + grams.pop(); + unsigned char order = top.end - top.begin; + if (order == 1) { + blank.Visit(&unigram, 1, doing.UnigramProb(unigram)); + doing.Unigram(unigram); + progress.Set(unigram); + if (++unigram == unigram_count + 1) break; + grams.push(top); + } else { + if (order == total_order) { + blank.Visit(top.begin, order, reinterpret_cast(top.end)->prob); + doing.Longest(top.begin); + } else { + blank.Visit(top.begin, order, reinterpret_cast(top.end)->prob); + doing.Middle(order, top.begin); + } + RecordReader &reader = input[order - 2]; + if (++reader) grams.push(top); + } + } + assert(grams.empty()); + doing.Cleanup(); +} + +void SanityCheckCounts(const std::vector &initial, const std::vector &fixed) { + if (fixed[0] != initial[0]) UTIL_THROW(util::Exception, "Unigram count should be constant but initial is " << initial[0] << " and recounted is " << fixed[0]); + if (fixed.back() != initial.back()) UTIL_THROW(util::Exception, "Longest count should be constant but it changed from " << initial.back() << " to " << fixed.back()); + for (unsigned char i = 0; i < initial.size(); ++i) { + if (fixed[i] < initial[i]) UTIL_THROW(util::Exception, "Counts came out lower than expected. This shouldn't happen"); + } +} + +template void TrainQuantizer(uint8_t order, uint64_t count, const std::vector &additional, RecordReader &reader, util::ErsatzProgress &progress, Quant &quant) { + std::vector probs(additional), backoffs; + probs.reserve(count + additional.size()); + backoffs.reserve(count); + for (reader.Rewind(); reader; ++reader) { + const ProbBackoff &weights = *reinterpret_cast(reinterpret_cast(reader.Data()) + sizeof(WordIndex) * order); + probs.push_back(weights.prob); + if (weights.backoff != 0.0) backoffs.push_back(weights.backoff); + ++progress; + } + quant.Train(order, probs, backoffs); +} + +template void TrainProbQuantizer(uint8_t order, uint64_t count, RecordReader &reader, util::ErsatzProgress &progress, Quant &quant) { + std::vector probs, backoffs; + probs.reserve(count); + for (reader.Rewind(); reader; ++reader) { + const Prob &weights = *reinterpret_cast(reinterpret_cast(reader.Data()) + sizeof(WordIndex) * order); + probs.push_back(weights.prob); + ++progress; + } + quant.TrainProb(order, probs); +} + +void PopulateUnigramWeights(FILE *file, WordIndex unigram_count, RecordReader &contexts, UnigramValue *unigrams) { + // Fill unigram probabilities. + try { + rewind(file); + for (WordIndex i = 0; i < unigram_count; ++i) { + ReadOrThrow(file, &unigrams[i].weights, sizeof(ProbBackoff)); + if (contexts && *reinterpret_cast(contexts.Data()) == i) { + SetExtension(unigrams[i].weights.backoff); + ++contexts; + } + } + } catch (util::Exception &e) { + e << " while re-reading unigram probabilities"; + throw; + } +} + +} // namespace + +template void BuildTrie(SortedFiles &files, std::vector &counts, const Config &config, TrieSearch &out, Quant &quant, const SortedVocabulary &vocab, Backing &backing) { + RecordReader inputs[KENLM_MAX_ORDER - 1]; + RecordReader contexts[KENLM_MAX_ORDER - 1]; + + for (unsigned char i = 2; i <= counts.size(); ++i) { + inputs[i-2].Init(files.Full(i), i * sizeof(WordIndex) + (i == counts.size() ? sizeof(Prob) : sizeof(ProbBackoff))); + contexts[i-2].Init(files.Context(i), (i-1) * sizeof(WordIndex)); + } + + SRISucks sri; + std::vector fixed_counts; + util::scoped_FILE unigram_file; + util::scoped_fd unigram_fd(files.StealUnigram()); + { + util::scoped_memory unigrams; + MapRead(util::POPULATE_OR_READ, unigram_fd.get(), 0, counts[0] * sizeof(ProbBackoff), unigrams); + FindBlanks finder(counts.size(), reinterpret_cast(unigrams.get()), sri); + RecursiveInsert(counts.size(), counts[0], inputs, config.messages, "Identifying n-grams omitted by SRI", finder); + fixed_counts = finder.Counts(); + } + unigram_file.reset(util::FDOpenOrThrow(unigram_fd)); + for (const RecordReader *i = inputs; i != inputs + counts.size() - 2; ++i) { + if (*i) UTIL_THROW(FormatLoadException, "There's a bug in the trie implementation: the " << (i - inputs + 2) << "-gram table did not complete reading"); + } + SanityCheckCounts(counts, fixed_counts); + counts = fixed_counts; + + sri.ObtainBackoffs(counts.size(), unigram_file.get(), inputs); + + out.SetupMemory(GrowForSearch(config, vocab.UnkCountChangePadding(), TrieSearch::Size(fixed_counts, config), backing), fixed_counts, config); + + for (unsigned char i = 2; i <= counts.size(); ++i) { + inputs[i-2].Rewind(); + } + if (Quant::kTrain) { + util::ErsatzProgress progress(std::accumulate(counts.begin() + 1, counts.end(), 0), config.messages, "Quantizing"); + for (unsigned char i = 2; i < counts.size(); ++i) { + TrainQuantizer(i, counts[i-1], sri.Values(i), inputs[i-2], progress, quant); + } + TrainProbQuantizer(counts.size(), counts.back(), inputs[counts.size() - 2], progress, quant); + quant.FinishedLoading(config); + } + + UnigramValue *unigrams = out.unigram_.Raw(); + PopulateUnigramWeights(unigram_file.get(), counts[0], contexts[0], unigrams); + unigram_file.reset(); + + for (unsigned char i = 2; i <= counts.size(); ++i) { + inputs[i-2].Rewind(); + } + // Fill entries except unigram probabilities. + { + WriteEntries writer(contexts, quant, unigrams, out.middle_begin_, out.longest_, counts.size(), sri); + RecursiveInsert(counts.size(), counts[0], inputs, config.messages, "Writing trie", writer); + } + + // Do not disable this error message or else too little state will be returned. Both WriteEntries::Middle and returning state based on found n-grams will need to be fixed to handle this situation. + for (unsigned char order = 2; order <= counts.size(); ++order) { + const RecordReader &context = contexts[order - 2]; + if (context) { + FormatLoadException e; + e << "A " << static_cast(order) << "-gram has context"; + const WordIndex *ctx = reinterpret_cast(context.Data()); + for (const WordIndex *i = ctx; i != ctx + order - 1; ++i) { + e << ' ' << *i; + } + e << " so this context must appear in the model as a " << static_cast(order - 1) << "-gram but it does not"; + throw e; + } + } + + /* Set ending offsets so the last entry will be sized properly */ + // Last entry for unigrams was already set. + if (out.middle_begin_ != out.middle_end_) { + for (typename TrieSearch::Middle *i = out.middle_begin_; i != out.middle_end_ - 1; ++i) { + i->FinishedLoading((i+1)->InsertIndex(), config); + } + (out.middle_end_ - 1)->FinishedLoading(out.longest_.InsertIndex(), config); + } +} + +template uint8_t *TrieSearch::SetupMemory(uint8_t *start, const std::vector &counts, const Config &config) { + quant_.SetupMemory(start, counts.size(), config); + start += Quant::Size(counts.size(), config); + unigram_.Init(start); + start += Unigram::Size(counts[0]); + FreeMiddles(); + middle_begin_ = static_cast(malloc(sizeof(Middle) * (counts.size() - 2))); + middle_end_ = middle_begin_ + (counts.size() - 2); + std::vector middle_starts(counts.size() - 2); + for (unsigned char i = 2; i < counts.size(); ++i) { + middle_starts[i-2] = start; + start += Middle::Size(Quant::MiddleBits(config), counts[i-1], counts[0], counts[i], config); + } + // Crazy backwards thing so we initialize using pointers to ones that have already been initialized + for (unsigned char i = counts.size() - 1; i >= 2; --i) { + new (middle_begin_ + i - 2) Middle( + middle_starts[i-2], + quant_.MiddleBits(config), + counts[i-1], + counts[0], + counts[i], + (i == counts.size() - 1) ? static_cast(longest_) : static_cast(middle_begin_[i-1]), + config); + } + longest_.Init(start, quant_.LongestBits(config), counts[0]); + return start + Longest::Size(Quant::LongestBits(config), counts.back(), counts[0]); +} + +template void TrieSearch::LoadedBinary() { + unigram_.LoadedBinary(); + for (Middle *i = middle_begin_; i != middle_end_; ++i) { + i->LoadedBinary(); + } + longest_.LoadedBinary(); +} + +template void TrieSearch::InitializeFromARPA(const char *file, util::FilePiece &f, std::vector &counts, const Config &config, SortedVocabulary &vocab, Backing &backing) { + std::string temporary_prefix; + if (config.temporary_directory_prefix) { + temporary_prefix = config.temporary_directory_prefix; + } else if (config.write_mmap) { + temporary_prefix = config.write_mmap; + } else { + temporary_prefix = file; + } + // At least 1MB sorting memory. + SortedFiles sorted(config, f, counts, std::max(config.building_memory, 1048576), temporary_prefix, vocab); + + BuildTrie(sorted, counts, config, *this, quant_, vocab, backing); +} + +template class TrieSearch; +template class TrieSearch; +template class TrieSearch; +template class TrieSearch; + +} // namespace trie +} // namespace ngram +} // namespace lm diff --git a/lm/search_trie.hh b/lm/search_trie.hh new file mode 100644 index 000000000..1264baf5a --- /dev/null +++ b/lm/search_trie.hh @@ -0,0 +1,130 @@ +#ifndef LM_SEARCH_TRIE__ +#define LM_SEARCH_TRIE__ + +#include "lm/config.hh" +#include "lm/model_type.hh" +#include "lm/return.hh" +#include "lm/trie.hh" +#include "lm/weights.hh" + +#include "util/file.hh" +#include "util/file_piece.hh" + +#include + +#include + +namespace lm { +namespace ngram { +struct Backing; +class SortedVocabulary; +namespace trie { + +template class TrieSearch; +class SortedFiles; +template void BuildTrie(SortedFiles &files, std::vector &counts, const Config &config, TrieSearch &out, Quant &quant, const SortedVocabulary &vocab, Backing &backing); + +template class TrieSearch { + public: + typedef NodeRange Node; + + typedef ::lm::ngram::trie::UnigramPointer UnigramPointer; + typedef typename Quant::MiddlePointer MiddlePointer; + typedef typename Quant::LongestPointer LongestPointer; + + static const bool kDifferentRest = false; + + static const ModelType kModelType = static_cast(TRIE_SORTED + Quant::kModelTypeAdd + Bhiksha::kModelTypeAdd); + + static const unsigned int kVersion = 1; + + static void UpdateConfigFromBinary(int fd, const std::vector &counts, Config &config) { + Quant::UpdateConfigFromBinary(fd, counts, config); + util::AdvanceOrThrow(fd, Quant::Size(counts.size(), config) + Unigram::Size(counts[0])); + Bhiksha::UpdateConfigFromBinary(fd, config); + } + + static uint64_t Size(const std::vector &counts, const Config &config) { + uint64_t ret = Quant::Size(counts.size(), config) + Unigram::Size(counts[0]); + for (unsigned char i = 1; i < counts.size() - 1; ++i) { + ret += Middle::Size(Quant::MiddleBits(config), counts[i], counts[0], counts[i+1], config); + } + return ret + Longest::Size(Quant::LongestBits(config), counts.back(), counts[0]); + } + + TrieSearch() : middle_begin_(NULL), middle_end_(NULL) {} + + ~TrieSearch() { FreeMiddles(); } + + uint8_t *SetupMemory(uint8_t *start, const std::vector &counts, const Config &config); + + void LoadedBinary(); + + void InitializeFromARPA(const char *file, util::FilePiece &f, std::vector &counts, const Config &config, SortedVocabulary &vocab, Backing &backing); + + unsigned char Order() const { + return middle_end_ - middle_begin_ + 2; + } + + ProbBackoff &UnknownUnigram() { return unigram_.Unknown(); } + + UnigramPointer LookupUnigram(WordIndex word, Node &next, bool &independent_left, uint64_t &extend_left) const { + extend_left = static_cast(word); + UnigramPointer ret(unigram_.Find(word, next)); + independent_left = (next.begin == next.end); + return ret; + } + + MiddlePointer Unpack(uint64_t extend_pointer, unsigned char extend_length, Node &node) const { + return MiddlePointer(quant_, extend_length - 2, middle_begin_[extend_length - 2].ReadEntry(extend_pointer, node)); + } + + MiddlePointer LookupMiddle(unsigned char order_minus_2, WordIndex word, Node &node, bool &independent_left, uint64_t &extend_left) const { + util::BitAddress address(middle_begin_[order_minus_2].Find(word, node, extend_left)); + independent_left = (address.base == NULL) || (node.begin == node.end); + return MiddlePointer(quant_, order_minus_2, address); + } + + LongestPointer LookupLongest(WordIndex word, const Node &node) const { + return LongestPointer(quant_, longest_.Find(word, node)); + } + + bool FastMakeNode(const WordIndex *begin, const WordIndex *end, Node &node) const { + assert(begin != end); + bool independent_left; + uint64_t ignored; + LookupUnigram(*begin, node, independent_left, ignored); + for (const WordIndex *i = begin + 1; i < end; ++i) { + if (independent_left || !LookupMiddle(i - begin - 1, *i, node, independent_left, ignored).Found()) return false; + } + return true; + } + + private: + friend void BuildTrie(SortedFiles &files, std::vector &counts, const Config &config, TrieSearch &out, Quant &quant, const SortedVocabulary &vocab, Backing &backing); + + // Middles are managed manually so we can delay construction and they don't have to be copyable. + void FreeMiddles() { + for (const Middle *i = middle_begin_; i != middle_end_; ++i) { + i->~Middle(); + } + free(middle_begin_); + } + + typedef trie::BitPackedMiddle Middle; + + typedef trie::BitPackedLongest Longest; + Longest longest_; + + Middle *middle_begin_, *middle_end_; + Quant quant_; + + typedef ::lm::ngram::trie::Unigram Unigram; + Unigram unigram_; +}; + +} // namespace trie +} // namespace ngram +} // namespace lm + +#endif // LM_SEARCH_TRIE__ diff --git a/lm/state.hh b/lm/state.hh new file mode 100644 index 000000000..551510a8e --- /dev/null +++ b/lm/state.hh @@ -0,0 +1,125 @@ +#ifndef LM_STATE__ +#define LM_STATE__ + +#include "lm/max_order.hh" +#include "lm/word_index.hh" +#include "util/murmur_hash.hh" + +#include + +namespace lm { +namespace ngram { + +// This is a POD but if you want memcmp to return the same as operator==, call +// ZeroRemaining first. +class State { + public: + bool operator==(const State &other) const { + if (length != other.length) return false; + return !memcmp(words, other.words, length * sizeof(WordIndex)); + } + + // Three way comparison function. + int Compare(const State &other) const { + if (length != other.length) return length < other.length ? -1 : 1; + return memcmp(words, other.words, length * sizeof(WordIndex)); + } + + bool operator<(const State &other) const { + if (length != other.length) return length < other.length; + return memcmp(words, other.words, length * sizeof(WordIndex)) < 0; + } + + // Call this before using raw memcmp. + void ZeroRemaining() { + for (unsigned char i = length; i < KENLM_MAX_ORDER - 1; ++i) { + words[i] = 0; + backoff[i] = 0.0; + } + } + + unsigned char Length() const { return length; } + + // You shouldn't need to touch anything below this line, but the members are public so FullState will qualify as a POD. + // This order minimizes total size of the struct if WordIndex is 64 bit, float is 32 bit, and alignment of 64 bit integers is 64 bit. + WordIndex words[KENLM_MAX_ORDER - 1]; + float backoff[KENLM_MAX_ORDER - 1]; + unsigned char length; +}; + +typedef State Right; + +inline uint64_t hash_value(const State &state, uint64_t seed = 0) { + return util::MurmurHashNative(state.words, sizeof(WordIndex) * state.length, seed); +} + +struct Left { + bool operator==(const Left &other) const { + return + (length == other.length) && + pointers[length - 1] == other.pointers[length - 1] && + full == other.full; + } + + int Compare(const Left &other) const { + if (length < other.length) return -1; + if (length > other.length) return 1; + if (pointers[length - 1] > other.pointers[length - 1]) return 1; + if (pointers[length - 1] < other.pointers[length - 1]) return -1; + return (int)full - (int)other.full; + } + + bool operator<(const Left &other) const { + return Compare(other) == -1; + } + + void ZeroRemaining() { + for (uint64_t * i = pointers + length; i < pointers + KENLM_MAX_ORDER - 1; ++i) + *i = 0; + } + + uint64_t pointers[KENLM_MAX_ORDER - 1]; + unsigned char length; + bool full; +}; + +inline uint64_t hash_value(const Left &left) { + unsigned char add[2]; + add[0] = left.length; + add[1] = left.full; + return util::MurmurHashNative(add, 2, left.length ? left.pointers[left.length - 1] : 0); +} + +struct ChartState { + bool operator==(const ChartState &other) { + return (right == other.right) && (left == other.left); + } + + int Compare(const ChartState &other) const { + int lres = left.Compare(other.left); + if (lres) return lres; + return right.Compare(other.right); + } + + bool operator<(const ChartState &other) const { + return Compare(other) == -1; + } + + void ZeroRemaining() { + left.ZeroRemaining(); + right.ZeroRemaining(); + } + + Left left; + State right; +}; + +inline uint64_t hash_value(const ChartState &state) { + return hash_value(state.right, hash_value(state.left)); +} + + +} // namespace ngram +} // namespace lm + +#endif // LM_STATE__ diff --git a/lm/test.arpa b/lm/test.arpa new file mode 100644 index 000000000..ef214eae3 --- /dev/null +++ b/lm/test.arpa @@ -0,0 +1,124 @@ + +\data\ +ngram 1=37 +ngram 2=47 +ngram 3=11 +ngram 4=6 +ngram 5=4 + +\1-grams: +-1.383514 , -0.30103 +-1.139057 . -0.845098 +-1.029493 +-99 -0.4149733 +-1.995635 -20 +-1.285941 a -0.69897 +-1.687872 also -0.30103 +-1.687872 beyond -0.30103 +-1.687872 biarritz -0.30103 +-1.687872 call -0.30103 +-1.687872 concerns -0.30103 +-1.687872 consider -0.30103 +-1.687872 considering -0.30103 +-1.687872 for -0.30103 +-1.509559 higher -0.30103 +-1.687872 however -0.30103 +-1.687872 i -0.30103 +-1.687872 immediate -0.30103 +-1.687872 in -0.30103 +-1.687872 is -0.30103 +-1.285941 little -0.69897 +-1.383514 loin -0.30103 +-1.687872 look -0.30103 +-1.285941 looking -0.4771212 +-1.206319 more -0.544068 +-1.509559 on -0.4771212 +-1.509559 screening -0.4771212 +-1.687872 small -0.30103 +-1.687872 the -0.30103 +-1.687872 to -0.30103 +-1.687872 watch -0.30103 +-1.687872 watching -0.30103 +-1.687872 what -0.30103 +-1.687872 would -0.30103 +-3.141592 foo +-2.718281 bar 3.0 +-6.535897 baz -0.0 + +\2-grams: +-0.6925742 , . +-0.7522095 , however +-0.7522095 , is +-0.0602359 . +-0.4846522 looking -0.4771214 +-1.051485 screening +-1.07153 the +-1.07153 watching +-1.07153 what +-0.09132547 a little -0.69897 +-0.2922095 also call +-0.2922095 beyond immediate +-0.2705918 biarritz . +-0.2922095 call for +-0.2922095 concerns in +-0.2922095 consider watch +-0.2922095 considering consider +-0.2834328 for , +-0.5511513 higher more +-0.5845945 higher small +-0.2834328 however , +-0.2922095 i would +-0.2922095 immediate concerns +-0.2922095 in biarritz +-0.2922095 is to +-0.09021038 little more -0.1998621 +-0.7273645 loin , +-0.6925742 loin . +-0.6708385 loin +-0.2922095 look beyond +-0.4638903 looking higher +-0.4638903 looking on -0.4771212 +-0.5136299 more . -0.4771212 +-0.3561665 more loin +-0.1649931 on a -0.4771213 +-0.1649931 screening a -0.4771213 +-0.2705918 small . +-0.287799 the screening +-0.2922095 to look +-0.2622373 watch +-0.2922095 watching considering +-0.2922095 what i +-0.2922095 would also +-2 also would -6 +-15 -2 +-4 however -1 +-6 foo bar + +\3-grams: +-0.01916512 more . +-0.0283603 on a little -0.4771212 +-0.0283603 screening a little -0.4771212 +-0.01660496 a little more -0.09409451 +-0.3488368 looking higher +-0.3488368 looking on -0.4771212 +-0.1892331 little more loin +-0.04835128 looking on a -0.4771212 +-3 also would consider -7 +-6 however -12 +-7 to look good + +\4-grams: +-0.009249173 looking on a little -0.4771212 +-0.005464747 on a little more -0.4771212 +-0.005464747 screening a little more +-0.1453306 a little more loin +-0.01552657 looking on a -0.4771212 +-4 also would consider higher -8 + +\5-grams: +-0.003061223 looking on a little +-0.001813953 looking on a little more +-0.0432557 on a little more loin +-5 also would consider higher looking + +\end\ diff --git a/lm/test_nounk.arpa b/lm/test_nounk.arpa new file mode 100644 index 000000000..060733d98 --- /dev/null +++ b/lm/test_nounk.arpa @@ -0,0 +1,120 @@ + +\data\ +ngram 1=36 +ngram 2=45 +ngram 3=10 +ngram 4=6 +ngram 5=4 + +\1-grams: +-1.383514 , -0.30103 +-1.139057 . -0.845098 +-1.029493 +-99 -0.4149733 +-1.285941 a -0.69897 +-1.687872 also -0.30103 +-1.687872 beyond -0.30103 +-1.687872 biarritz -0.30103 +-1.687872 call -0.30103 +-1.687872 concerns -0.30103 +-1.687872 consider -0.30103 +-1.687872 considering -0.30103 +-1.687872 for -0.30103 +-1.509559 higher -0.30103 +-1.687872 however -0.30103 +-1.687872 i -0.30103 +-1.687872 immediate -0.30103 +-1.687872 in -0.30103 +-1.687872 is -0.30103 +-1.285941 little -0.69897 +-1.383514 loin -0.30103 +-1.687872 look -0.30103 +-1.285941 looking -0.4771212 +-1.206319 more -0.544068 +-1.509559 on -0.4771212 +-1.509559 screening -0.4771212 +-1.687872 small -0.30103 +-1.687872 the -0.30103 +-1.687872 to -0.30103 +-1.687872 watch -0.30103 +-1.687872 watching -0.30103 +-1.687872 what -0.30103 +-1.687872 would -0.30103 +-3.141592 foo +-2.718281 bar 3.0 +-6.535897 baz -0.0 + +\2-grams: +-0.6925742 , . +-0.7522095 , however +-0.7522095 , is +-0.0602359 . +-0.4846522 looking -0.4771214 +-1.051485 screening +-1.07153 the +-1.07153 watching +-1.07153 what +-0.09132547 a little -0.69897 +-0.2922095 also call +-0.2922095 beyond immediate +-0.2705918 biarritz . +-0.2922095 call for +-0.2922095 concerns in +-0.2922095 consider watch +-0.2922095 considering consider +-0.2834328 for , +-0.5511513 higher more +-0.5845945 higher small +-0.2834328 however , +-0.2922095 i would +-0.2922095 immediate concerns +-0.2922095 in biarritz +-0.2922095 is to +-0.09021038 little more -0.1998621 +-0.7273645 loin , +-0.6925742 loin . +-0.6708385 loin +-0.2922095 look beyond +-0.4638903 looking higher +-0.4638903 looking on -0.4771212 +-0.5136299 more . -0.4771212 +-0.3561665 more loin +-0.1649931 on a -0.4771213 +-0.1649931 screening a -0.4771213 +-0.2705918 small . +-0.287799 the screening +-0.2922095 to look +-0.2622373 watch +-0.2922095 watching considering +-0.2922095 what i +-0.2922095 would also +-2 also would -6 +-6 foo bar + +\3-grams: +-0.01916512 more . +-0.0283603 on a little -0.4771212 +-0.0283603 screening a little -0.4771212 +-0.01660496 a little more -0.09409451 +-0.3488368 looking higher +-0.3488368 looking on -0.4771212 +-0.1892331 little more loin +-0.04835128 looking on a -0.4771212 +-3 also would consider -7 +-7 to look good + +\4-grams: +-0.009249173 looking on a little -0.4771212 +-0.005464747 on a little more -0.4771212 +-0.005464747 screening a little more +-0.1453306 a little more loin +-0.01552657 looking on a -0.4771212 +-4 also would consider higher -8 + +\5-grams: +-0.003061223 looking on a little +-0.001813953 looking on a little more +-0.0432557 on a little more loin +-5 also would consider higher looking + +\end\ diff --git a/lm/trie.cc b/lm/trie.cc new file mode 100644 index 000000000..d9895f89d --- /dev/null +++ b/lm/trie.cc @@ -0,0 +1,128 @@ +#include "lm/trie.hh" + +#include "lm/bhiksha.hh" +#include "util/bit_packing.hh" +#include "util/exception.hh" +#include "util/sorted_uniform.hh" + +#include + +namespace lm { +namespace ngram { +namespace trie { +namespace { + +class KeyAccessor { + public: + KeyAccessor(const void *base, uint64_t key_mask, uint8_t key_bits, uint8_t total_bits) + : base_(reinterpret_cast(base)), key_mask_(key_mask), key_bits_(key_bits), total_bits_(total_bits) {} + + typedef uint64_t Key; + + Key operator()(uint64_t index) const { + return util::ReadInt57(base_, index * static_cast(total_bits_), key_bits_, key_mask_); + } + + private: + const uint8_t *const base_; + const WordIndex key_mask_; + const uint8_t key_bits_, total_bits_; +}; + +bool FindBitPacked(const void *base, uint64_t key_mask, uint8_t key_bits, uint8_t total_bits, uint64_t begin_index, uint64_t end_index, const uint64_t max_vocab, const uint64_t key, uint64_t &at_index) { + KeyAccessor accessor(base, key_mask, key_bits, total_bits); + if (!util::BoundedSortedUniformFind::T>(accessor, begin_index - 1, (uint64_t)0, end_index, max_vocab, key, at_index)) return false; + return true; +} +} // namespace + +uint64_t BitPacked::BaseSize(uint64_t entries, uint64_t max_vocab, uint8_t remaining_bits) { + uint8_t total_bits = util::RequiredBits(max_vocab) + remaining_bits; + // Extra entry for next pointer at the end. + // +7 then / 8 to round up bits and convert to bytes + // +sizeof(uint64_t) so that ReadInt57 etc don't go segfault. + // Note that this waste is O(order), not O(number of ngrams). + return ((1 + entries) * total_bits + 7) / 8 + sizeof(uint64_t); +} + +void BitPacked::BaseInit(void *base, uint64_t max_vocab, uint8_t remaining_bits) { + util::BitPackingSanity(); + word_bits_ = util::RequiredBits(max_vocab); + word_mask_ = (1ULL << word_bits_) - 1ULL; + if (word_bits_ > 57) UTIL_THROW(util::Exception, "Sorry, word indices more than " << (1ULL << 57) << " are not implemented. Edit util/bit_packing.hh and fix the bit packing functions."); + total_bits_ = word_bits_ + remaining_bits; + + base_ = static_cast(base); + insert_index_ = 0; + max_vocab_ = max_vocab; +} + +template uint64_t BitPackedMiddle::Size(uint8_t quant_bits, uint64_t entries, uint64_t max_vocab, uint64_t max_ptr, const Config &config) { + return Bhiksha::Size(entries + 1, max_ptr, config) + BaseSize(entries, max_vocab, quant_bits + Bhiksha::InlineBits(entries + 1, max_ptr, config)); +} + +template BitPackedMiddle::BitPackedMiddle(void *base, uint8_t quant_bits, uint64_t entries, uint64_t max_vocab, uint64_t max_next, const BitPacked &next_source, const Config &config) : + BitPacked(), + quant_bits_(quant_bits), + // If the offset of the method changes, also change TrieSearch::UpdateConfigFromBinary. + bhiksha_(base, entries + 1, max_next, config), + next_source_(&next_source) { + if (entries + 1 >= (1ULL << 57) || (max_next >= (1ULL << 57))) UTIL_THROW(util::Exception, "Sorry, this does not support more than " << (1ULL << 57) << " n-grams of a particular order. Edit util/bit_packing.hh and fix the bit packing functions."); + BaseInit(reinterpret_cast(base) + Bhiksha::Size(entries + 1, max_next, config), max_vocab, quant_bits_ + bhiksha_.InlineBits()); +} + +template util::BitAddress BitPackedMiddle::Insert(WordIndex word) { + assert(word <= word_mask_); + uint64_t at_pointer = insert_index_ * total_bits_; + + util::WriteInt57(base_, at_pointer, word_bits_, word); + at_pointer += word_bits_; + util::BitAddress ret(base_, at_pointer); + at_pointer += quant_bits_; + uint64_t next = next_source_->InsertIndex(); + bhiksha_.WriteNext(base_, at_pointer, insert_index_, next); + ++insert_index_; + return ret; +} + +template util::BitAddress BitPackedMiddle::Find(WordIndex word, NodeRange &range, uint64_t &pointer) const { + uint64_t at_pointer; + if (!FindBitPacked(base_, word_mask_, word_bits_, total_bits_, range.begin, range.end, max_vocab_, word, at_pointer)) { + return util::BitAddress(NULL, 0); + } + pointer = at_pointer; + at_pointer *= total_bits_; + at_pointer += word_bits_; + bhiksha_.ReadNext(base_, at_pointer + quant_bits_, pointer, total_bits_, range); + + return util::BitAddress(base_, at_pointer); +} + +template void BitPackedMiddle::FinishedLoading(uint64_t next_end, const Config &config) { + uint64_t last_next_write = (insert_index_ + 1) * total_bits_ - bhiksha_.InlineBits(); + bhiksha_.WriteNext(base_, last_next_write, insert_index_ + 1, next_end); + bhiksha_.FinishedLoading(config); +} + +util::BitAddress BitPackedLongest::Insert(WordIndex index) { + assert(index <= word_mask_); + uint64_t at_pointer = insert_index_ * total_bits_; + util::WriteInt57(base_, at_pointer, word_bits_, index); + at_pointer += word_bits_; + ++insert_index_; + return util::BitAddress(base_, at_pointer); +} + +util::BitAddress BitPackedLongest::Find(WordIndex word, const NodeRange &range) const { + uint64_t at_pointer; + if (!FindBitPacked(base_, word_mask_, word_bits_, total_bits_, range.begin, range.end, max_vocab_, word, at_pointer)) return util::BitAddress(NULL, 0); + at_pointer = at_pointer * total_bits_ + word_bits_; + return util::BitAddress(base_, at_pointer); +} + +template class BitPackedMiddle; +template class BitPackedMiddle; + +} // namespace trie +} // namespace ngram +} // namespace lm diff --git a/lm/trie.hh b/lm/trie.hh new file mode 100644 index 000000000..9ea3c5466 --- /dev/null +++ b/lm/trie.hh @@ -0,0 +1,155 @@ +#ifndef LM_TRIE__ +#define LM_TRIE__ + +#include "lm/weights.hh" +#include "lm/word_index.hh" +#include "util/bit_packing.hh" + +#include + +#include + +namespace lm { +namespace ngram { +struct Config; +namespace trie { + +struct NodeRange { + uint64_t begin, end; +}; + +// TODO: if the number of unigrams is a concern, also bit pack these records. +struct UnigramValue { + ProbBackoff weights; + uint64_t next; + uint64_t Next() const { return next; } +}; + +class UnigramPointer { + public: + explicit UnigramPointer(const ProbBackoff &to) : to_(&to) {} + + UnigramPointer() : to_(NULL) {} + + bool Found() const { return to_ != NULL; } + + float Prob() const { return to_->prob; } + float Backoff() const { return to_->backoff; } + float Rest() const { return Prob(); } + + private: + const ProbBackoff *to_; +}; + +class Unigram { + public: + Unigram() {} + + void Init(void *start) { + unigram_ = static_cast(start); + } + + static uint64_t Size(uint64_t count) { + // +1 in case unknown doesn't appear. +1 for the final next. + return (count + 2) * sizeof(UnigramValue); + } + + const ProbBackoff &Lookup(WordIndex index) const { return unigram_[index].weights; } + + ProbBackoff &Unknown() { return unigram_[0].weights; } + + UnigramValue *Raw() { + return unigram_; + } + + void LoadedBinary() {} + + UnigramPointer Find(WordIndex word, NodeRange &next) const { + UnigramValue *val = unigram_ + word; + next.begin = val->next; + next.end = (val+1)->next; + return UnigramPointer(val->weights); + } + + private: + UnigramValue *unigram_; +}; + +class BitPacked { + public: + BitPacked() {} + + uint64_t InsertIndex() const { + return insert_index_; + } + + protected: + static uint64_t BaseSize(uint64_t entries, uint64_t max_vocab, uint8_t remaining_bits); + + void BaseInit(void *base, uint64_t max_vocab, uint8_t remaining_bits); + + uint8_t word_bits_; + uint8_t total_bits_; + uint64_t word_mask_; + + uint8_t *base_; + + uint64_t insert_index_, max_vocab_; +}; + +template class BitPackedMiddle : public BitPacked { + public: + static uint64_t Size(uint8_t quant_bits, uint64_t entries, uint64_t max_vocab, uint64_t max_next, const Config &config); + + // next_source need not be initialized. + BitPackedMiddle(void *base, uint8_t quant_bits, uint64_t entries, uint64_t max_vocab, uint64_t max_next, const BitPacked &next_source, const Config &config); + + util::BitAddress Insert(WordIndex word); + + void FinishedLoading(uint64_t next_end, const Config &config); + + void LoadedBinary() { bhiksha_.LoadedBinary(); } + + util::BitAddress Find(WordIndex word, NodeRange &range, uint64_t &pointer) const; + + util::BitAddress ReadEntry(uint64_t pointer, NodeRange &range) { + uint64_t addr = pointer * total_bits_; + addr += word_bits_; + bhiksha_.ReadNext(base_, addr + quant_bits_, pointer, total_bits_, range); + return util::BitAddress(base_, addr); + } + + private: + uint8_t quant_bits_; + Bhiksha bhiksha_; + + const BitPacked *next_source_; +}; + +class BitPackedLongest : public BitPacked { + public: + static uint64_t Size(uint8_t quant_bits, uint64_t entries, uint64_t max_vocab) { + return BaseSize(entries, max_vocab, quant_bits); + } + + BitPackedLongest() {} + + void Init(void *base, uint8_t quant_bits, uint64_t max_vocab) { + BaseInit(base, max_vocab, quant_bits); + } + + void LoadedBinary() {} + + util::BitAddress Insert(WordIndex word); + + util::BitAddress Find(WordIndex word, const NodeRange &node) const; + + private: + uint8_t quant_bits_; +}; + +} // namespace trie +} // namespace ngram +} // namespace lm + +#endif // LM_TRIE__ diff --git a/lm/trie_sort.cc b/lm/trie_sort.cc new file mode 100644 index 000000000..8663e94e1 --- /dev/null +++ b/lm/trie_sort.cc @@ -0,0 +1,292 @@ +#include "lm/trie_sort.hh" + +#include "lm/config.hh" +#include "lm/lm_exception.hh" +#include "lm/read_arpa.hh" +#include "lm/vocab.hh" +#include "lm/weights.hh" +#include "lm/word_index.hh" +#include "util/file_piece.hh" +#include "util/mmap.hh" +#include "util/proxy_iterator.hh" +#include "util/sized_iterator.hh" + +#include +#include +#include +#include +#include +#include +#include + +namespace lm { +namespace ngram { +namespace trie { +namespace { + +typedef util::SizedIterator NGramIter; + +// Proxy for an entry except there is some extra cruft between the entries. This is used to sort (n-1)-grams using the same memory as the sorted n-grams. +class PartialViewProxy { + public: + PartialViewProxy() : attention_size_(0), inner_() {} + + PartialViewProxy(void *ptr, std::size_t block_size, std::size_t attention_size) : attention_size_(attention_size), inner_(ptr, block_size) {} + + operator std::string() const { + return std::string(reinterpret_cast(inner_.Data()), attention_size_); + } + + PartialViewProxy &operator=(const PartialViewProxy &from) { + memcpy(inner_.Data(), from.inner_.Data(), attention_size_); + return *this; + } + + PartialViewProxy &operator=(const std::string &from) { + memcpy(inner_.Data(), from.data(), attention_size_); + return *this; + } + + const void *Data() const { return inner_.Data(); } + void *Data() { return inner_.Data(); } + + private: + friend class util::ProxyIterator; + + typedef std::string value_type; + + const std::size_t attention_size_; + + typedef util::SizedInnerIterator InnerIterator; + InnerIterator &Inner() { return inner_; } + const InnerIterator &Inner() const { return inner_; } + InnerIterator inner_; +}; + +typedef util::ProxyIterator PartialIter; + +FILE *DiskFlush(const void *mem_begin, const void *mem_end, const util::TempMaker &maker) { + util::scoped_fd file(maker.Make()); + util::WriteOrThrow(file.get(), mem_begin, (uint8_t*)mem_end - (uint8_t*)mem_begin); + return util::FDOpenOrThrow(file); +} + +FILE *WriteContextFile(uint8_t *begin, uint8_t *end, const util::TempMaker &maker, std::size_t entry_size, unsigned char order) { + const size_t context_size = sizeof(WordIndex) * (order - 1); + // Sort just the contexts using the same memory. + PartialIter context_begin(PartialViewProxy(begin + sizeof(WordIndex), entry_size, context_size)); + PartialIter context_end(PartialViewProxy(end + sizeof(WordIndex), entry_size, context_size)); + +#if defined(_WIN32) || defined(_WIN64) + std::stable_sort +#else + std::sort +#endif + (context_begin, context_end, util::SizedCompare(EntryCompare(order - 1))); + + util::scoped_FILE out(maker.MakeFile()); + + // Write out to file and uniqueify at the same time. Could have used unique_copy if there was an appropriate OutputIterator. + if (context_begin == context_end) return out.release(); + PartialIter i(context_begin); + util::WriteOrThrow(out.get(), i->Data(), context_size); + const void *previous = i->Data(); + ++i; + for (; i != context_end; ++i) { + if (memcmp(previous, i->Data(), context_size)) { + util::WriteOrThrow(out.get(), i->Data(), context_size); + previous = i->Data(); + } + } + return out.release(); +} + +struct ThrowCombine { + void operator()(std::size_t /*entry_size*/, const void * /*first*/, const void * /*second*/, FILE * /*out*/) const { + UTIL_THROW(FormatLoadException, "Duplicate n-gram detected."); + } +}; + +// Useful for context files that just contain records with no value. +struct FirstCombine { + void operator()(std::size_t entry_size, const void *first, const void * /*second*/, FILE *out) const { + util::WriteOrThrow(out, first, entry_size); + } +}; + +template FILE *MergeSortedFiles(FILE *first_file, FILE *second_file, const util::TempMaker &maker, std::size_t weights_size, unsigned char order, const Combine &combine) { + std::size_t entry_size = sizeof(WordIndex) * order + weights_size; + RecordReader first, second; + first.Init(first_file, entry_size); + second.Init(second_file, entry_size); + util::scoped_FILE out_file(maker.MakeFile()); + EntryCompare less(order); + while (first && second) { + if (less(first.Data(), second.Data())) { + util::WriteOrThrow(out_file.get(), first.Data(), entry_size); + ++first; + } else if (less(second.Data(), first.Data())) { + util::WriteOrThrow(out_file.get(), second.Data(), entry_size); + ++second; + } else { + combine(entry_size, first.Data(), second.Data(), out_file.get()); + ++first; ++second; + } + } + for (RecordReader &remains = (first ? first : second); remains; ++remains) { + util::WriteOrThrow(out_file.get(), remains.Data(), entry_size); + } + return out_file.release(); +} + +} // namespace + +void RecordReader::Init(FILE *file, std::size_t entry_size) { + entry_size_ = entry_size; + data_.reset(malloc(entry_size)); + UTIL_THROW_IF(!data_.get(), util::ErrnoException, "Failed to malloc read buffer"); + file_ = file; + if (file) { + rewind(file); + remains_ = true; + ++*this; + } else { + remains_ = false; + } +} + +void RecordReader::Overwrite(const void *start, std::size_t amount) { + long internal = (uint8_t*)start - (uint8_t*)data_.get(); + UTIL_THROW_IF(fseek(file_, internal - entry_size_, SEEK_CUR), util::ErrnoException, "Couldn't seek backwards for revision"); + util::WriteOrThrow(file_, start, amount); + long forward = entry_size_ - internal - amount; +#if !defined(_WIN32) && !defined(_WIN64) + if (forward) +#endif + UTIL_THROW_IF(fseek(file_, forward, SEEK_CUR), util::ErrnoException, "Couldn't seek forwards past revision"); +} + +void RecordReader::Rewind() { + if (file_) { + rewind(file_); + remains_ = true; + ++*this; + } else { + remains_ = false; + } +} + +SortedFiles::SortedFiles(const Config &config, util::FilePiece &f, std::vector &counts, size_t buffer, const std::string &file_prefix, SortedVocabulary &vocab) { + util::TempMaker maker(file_prefix); + PositiveProbWarn warn(config.positive_log_probability); + unigram_.reset(maker.Make()); + { + // In case appears. + size_t size_out = (counts[0] + 1) * sizeof(ProbBackoff); + util::scoped_mmap unigram_mmap(util::MapZeroedWrite(unigram_.get(), size_out), size_out); + Read1Grams(f, counts[0], vocab, reinterpret_cast(unigram_mmap.get()), warn); + CheckSpecials(config, vocab); + if (!vocab.SawUnk()) ++counts[0]; + } + + // Only use as much buffer as we need. + size_t buffer_use = 0; + for (unsigned int order = 2; order < counts.size(); ++order) { + buffer_use = std::max(buffer_use, static_cast((sizeof(WordIndex) * order + 2 * sizeof(float)) * counts[order - 1])); + } + buffer_use = std::max(buffer_use, static_cast((sizeof(WordIndex) * counts.size() + sizeof(float)) * counts.back())); + buffer = std::min(buffer, buffer_use); + + util::scoped_malloc mem; + mem.reset(malloc(buffer)); + if (!mem.get()) UTIL_THROW(util::ErrnoException, "malloc failed for sort buffer size " << buffer); + + for (unsigned char order = 2; order <= counts.size(); ++order) { + ConvertToSorted(f, vocab, counts, maker, order, warn, mem.get(), buffer); + } + ReadEnd(f); +} + +namespace { +class Closer { + public: + explicit Closer(std::deque &files) : files_(files) {} + + ~Closer() { + for (std::deque::iterator i = files_.begin(); i != files_.end(); ++i) { + util::scoped_FILE deleter(*i); + } + } + + void PopFront() { + util::scoped_FILE deleter(files_.front()); + files_.pop_front(); + } + private: + std::deque &files_; +}; +} // namespace + +void SortedFiles::ConvertToSorted(util::FilePiece &f, const SortedVocabulary &vocab, const std::vector &counts, const util::TempMaker &maker, unsigned char order, PositiveProbWarn &warn, void *mem, std::size_t mem_size) { + ReadNGramHeader(f, order); + const size_t count = counts[order - 1]; + // Size of weights. Does it include backoff? + const size_t words_size = sizeof(WordIndex) * order; + const size_t weights_size = sizeof(float) + ((order == counts.size()) ? 0 : sizeof(float)); + const size_t entry_size = words_size + weights_size; + const size_t batch_size = std::min(count, mem_size / entry_size); + uint8_t *const begin = reinterpret_cast(mem); + + std::deque files, contexts; + Closer files_closer(files), contexts_closer(contexts); + + for (std::size_t batch = 0, done = 0; done < count; ++batch) { + uint8_t *out = begin; + uint8_t *out_end = out + std::min(count - done, batch_size) * entry_size; + if (order == counts.size()) { + for (; out != out_end; out += entry_size) { + ReadNGram(f, order, vocab, reinterpret_cast(out), *reinterpret_cast(out + words_size), warn); + } + } else { + for (; out != out_end; out += entry_size) { + ReadNGram(f, order, vocab, reinterpret_cast(out), *reinterpret_cast(out + words_size), warn); + } + } + // Sort full records by full n-gram. + util::SizedProxy proxy_begin(begin, entry_size), proxy_end(out_end, entry_size); + // parallel_sort uses too much RAM. TODO: figure out why windows sort doesn't like my proxies. +#if defined(_WIN32) || defined(_WIN64) + std::stable_sort +#else + std::sort +#endif + (NGramIter(proxy_begin), NGramIter(proxy_end), util::SizedCompare(EntryCompare(order))); + files.push_back(DiskFlush(begin, out_end, maker)); + contexts.push_back(WriteContextFile(begin, out_end, maker, entry_size, order)); + + done += (out_end - begin) / entry_size; + } + + // All individual files created. Merge them. + + while (files.size() > 1) { + files.push_back(MergeSortedFiles(files[0], files[1], maker, weights_size, order, ThrowCombine())); + files_closer.PopFront(); + files_closer.PopFront(); + contexts.push_back(MergeSortedFiles(contexts[0], contexts[1], maker, 0, order - 1, FirstCombine())); + contexts_closer.PopFront(); + contexts_closer.PopFront(); + } + + if (!files.empty()) { + // Steal from closers. + full_[order - 2].reset(files.front()); + files.pop_front(); + context_[order - 2].reset(contexts.front()); + contexts.pop_front(); + } +} + +} // namespace trie +} // namespace ngram +} // namespace lm diff --git a/lm/trie_sort.hh b/lm/trie_sort.hh new file mode 100644 index 000000000..2197b80ce --- /dev/null +++ b/lm/trie_sort.hh @@ -0,0 +1,115 @@ +// Step of trie builder: create sorted files. + +#ifndef LM_TRIE_SORT__ +#define LM_TRIE_SORT__ + +#include "lm/max_order.hh" +#include "lm/word_index.hh" + +#include "util/file.hh" +#include "util/scoped.hh" + +#include +#include +#include +#include + +#include + +namespace util { +class FilePiece; +class TempMaker; +} // namespace util + +namespace lm { +class PositiveProbWarn; +namespace ngram { +class SortedVocabulary; +struct Config; + +namespace trie { + +class EntryCompare : public std::binary_function { + public: + explicit EntryCompare(unsigned char order) : order_(order) {} + + bool operator()(const void *first_void, const void *second_void) const { + const WordIndex *first = static_cast(first_void); + const WordIndex *second = static_cast(second_void); + const WordIndex *end = first + order_; + for (; first != end; ++first, ++second) { + if (*first < *second) return true; + if (*first > *second) return false; + } + return false; + } + private: + unsigned char order_; +}; + +class RecordReader { + public: + RecordReader() : remains_(true) {} + + void Init(FILE *file, std::size_t entry_size); + + void *Data() { return data_.get(); } + const void *Data() const { return data_.get(); } + + RecordReader &operator++() { + std::size_t ret = fread(data_.get(), entry_size_, 1, file_); + if (!ret) { + UTIL_THROW_IF(!feof(file_), util::ErrnoException, "Error reading temporary file"); + remains_ = false; + } + return *this; + } + + operator bool() const { return remains_; } + + void Rewind(); + + std::size_t EntrySize() const { return entry_size_; } + + void Overwrite(const void *start, std::size_t amount); + + private: + FILE *file_; + + util::scoped_malloc data_; + + bool remains_; + + std::size_t entry_size_; +}; + +class SortedFiles { + public: + // Build from ARPA + SortedFiles(const Config &config, util::FilePiece &f, std::vector &counts, std::size_t buffer, const std::string &file_prefix, SortedVocabulary &vocab); + + int StealUnigram() { + return unigram_.release(); + } + + FILE *Full(unsigned char order) { + return full_[order - 2].get(); + } + + FILE *Context(unsigned char of_order) { + return context_[of_order - 2].get(); + } + + private: + void ConvertToSorted(util::FilePiece &f, const SortedVocabulary &vocab, const std::vector &counts, const util::TempMaker &maker, unsigned char order, PositiveProbWarn &warn, void *mem, std::size_t mem_size); + + util::scoped_fd unigram_; + + util::scoped_FILE full_[KENLM_MAX_ORDER - 1], context_[KENLM_MAX_ORDER - 1]; +}; + +} // namespace trie +} // namespace ngram +} // namespace lm + +#endif // LM_TRIE_SORT__ diff --git a/lm/value.hh b/lm/value.hh new file mode 100644 index 000000000..ba716713a --- /dev/null +++ b/lm/value.hh @@ -0,0 +1,157 @@ +#ifndef LM_VALUE__ +#define LM_VALUE__ + +#include "lm/model_type.hh" +#include "lm/value_build.hh" +#include "lm/weights.hh" +#include "util/bit_packing.hh" + +#include + +namespace lm { +namespace ngram { + +// Template proxy for probing unigrams and middle. +template class GenericProbingProxy { + public: + explicit GenericProbingProxy(const Weights &to) : to_(&to) {} + + GenericProbingProxy() : to_(0) {} + + bool Found() const { return to_ != 0; } + + float Prob() const { + util::FloatEnc enc; + enc.f = to_->prob; + enc.i |= util::kSignBit; + return enc.f; + } + + float Backoff() const { return to_->backoff; } + + bool IndependentLeft() const { + util::FloatEnc enc; + enc.f = to_->prob; + return enc.i & util::kSignBit; + } + + protected: + const Weights *to_; +}; + +// Basic proxy for trie unigrams. +template class GenericTrieUnigramProxy { + public: + explicit GenericTrieUnigramProxy(const Weights &to) : to_(&to) {} + + GenericTrieUnigramProxy() : to_(0) {} + + bool Found() const { return to_ != 0; } + float Prob() const { return to_->prob; } + float Backoff() const { return to_->backoff; } + float Rest() const { return Prob(); } + + protected: + const Weights *to_; +}; + +struct BackoffValue { + typedef ProbBackoff Weights; + static const ModelType kProbingModelType = PROBING; + + class ProbingProxy : public GenericProbingProxy { + public: + explicit ProbingProxy(const Weights &to) : GenericProbingProxy(to) {} + ProbingProxy() {} + float Rest() const { return Prob(); } + }; + + class TrieUnigramProxy : public GenericTrieUnigramProxy { + public: + explicit TrieUnigramProxy(const Weights &to) : GenericTrieUnigramProxy(to) {} + TrieUnigramProxy() {} + float Rest() const { return Prob(); } + }; + + struct ProbingEntry { + typedef uint64_t Key; + typedef Weights Value; + uint64_t key; + ProbBackoff value; + uint64_t GetKey() const { return key; } + }; + + struct TrieUnigramValue { + Weights weights; + uint64_t next; + uint64_t Next() const { return next; } + }; + + const static bool kDifferentRest = false; + + template void Callback(const Config &, unsigned int, typename Model::Vocabulary &, C &callback) { + NoRestBuild build; + callback(build); + } +}; + +struct RestValue { + typedef RestWeights Weights; + static const ModelType kProbingModelType = REST_PROBING; + + class ProbingProxy : public GenericProbingProxy { + public: + explicit ProbingProxy(const Weights &to) : GenericProbingProxy(to) {} + ProbingProxy() {} + float Rest() const { return to_->rest; } + }; + + class TrieUnigramProxy : public GenericTrieUnigramProxy { + public: + explicit TrieUnigramProxy(const Weights &to) : GenericTrieUnigramProxy(to) {} + TrieUnigramProxy() {} + float Rest() const { return to_->rest; } + }; + +// gcc 4.1 doesn't properly back dependent types :-(. +#pragma pack(push) +#pragma pack(4) + struct ProbingEntry { + typedef uint64_t Key; + typedef Weights Value; + Key key; + Value value; + Key GetKey() const { return key; } + }; + + struct TrieUnigramValue { + Weights weights; + uint64_t next; + uint64_t Next() const { return next; } + }; +#pragma pack(pop) + + const static bool kDifferentRest = true; + + template void Callback(const Config &config, unsigned int order, typename Model::Vocabulary &vocab, C &callback) { + switch (config.rest_function) { + case Config::REST_MAX: + { + MaxRestBuild build; + callback(build); + } + break; + case Config::REST_LOWER: + { + LowerRestBuild build(config, order, vocab); + callback(build); + } + break; + } + } +}; + +} // namespace ngram +} // namespace lm + +#endif // LM_VALUE__ diff --git a/lm/value_build.cc b/lm/value_build.cc new file mode 100644 index 000000000..6124f8da9 --- /dev/null +++ b/lm/value_build.cc @@ -0,0 +1,58 @@ +#include "lm/value_build.hh" + +#include "lm/model.hh" +#include "lm/read_arpa.hh" + +namespace lm { +namespace ngram { + +template LowerRestBuild::LowerRestBuild(const Config &config, unsigned int order, const typename Model::Vocabulary &vocab) { + UTIL_THROW_IF(config.rest_lower_files.size() != order - 1, ConfigException, "This model has order " << order << " so there should be " << (order - 1) << " lower-order models for rest cost purposes."); + Config for_lower = config; + for_lower.rest_lower_files.clear(); + + // Unigram models aren't supported, so this is a custom loader. + // TODO: optimize the unigram loading? + { + util::FilePiece uni(config.rest_lower_files[0].c_str()); + std::vector number; + ReadARPACounts(uni, number); + UTIL_THROW_IF(number.size() != 1, FormatLoadException, "Expected the unigram model to have order 1, not " << number.size()); + ReadNGramHeader(uni, 1); + unigrams_.resize(number[0]); + unigrams_[0] = config.unknown_missing_logprob; + PositiveProbWarn warn; + for (uint64_t i = 0; i < number[0]; ++i) { + WordIndex w; + Prob entry; + ReadNGram(uni, 1, vocab, &w, entry, warn); + unigrams_[w] = entry.prob; + } + } + + try { + for (unsigned int i = 2; i < order; ++i) { + models_.push_back(new Model(config.rest_lower_files[i - 1].c_str(), for_lower)); + UTIL_THROW_IF(models_.back()->Order() != i, FormatLoadException, "Lower order file " << config.rest_lower_files[i-1] << " should have order " << i); + } + } catch (...) { + for (typename std::vector::const_iterator i = models_.begin(); i != models_.end(); ++i) { + delete *i; + } + models_.clear(); + throw; + } + + // TODO: force/check same vocab. +} + +template LowerRestBuild::~LowerRestBuild() { + for (typename std::vector::const_iterator i = models_.begin(); i != models_.end(); ++i) { + delete *i; + } +} + +template class LowerRestBuild; + +} // namespace ngram +} // namespace lm diff --git a/lm/value_build.hh b/lm/value_build.hh new file mode 100644 index 000000000..461e6a5c9 --- /dev/null +++ b/lm/value_build.hh @@ -0,0 +1,97 @@ +#ifndef LM_VALUE_BUILD__ +#define LM_VALUE_BUILD__ + +#include "lm/weights.hh" +#include "lm/word_index.hh" +#include "util/bit_packing.hh" + +#include + +namespace lm { +namespace ngram { + +struct Config; +struct BackoffValue; +struct RestValue; + +class NoRestBuild { + public: + typedef BackoffValue Value; + + NoRestBuild() {} + + void SetRest(const WordIndex *, unsigned int, const Prob &/*prob*/) const {} + void SetRest(const WordIndex *, unsigned int, const ProbBackoff &) const {} + + template bool MarkExtends(ProbBackoff &weights, const Second &) const { + util::UnsetSign(weights.prob); + return false; + } + + // Probing doesn't need to go back to unigram. + const static bool kMarkEvenLower = false; +}; + +class MaxRestBuild { + public: + typedef RestValue Value; + + MaxRestBuild() {} + + void SetRest(const WordIndex *, unsigned int, const Prob &/*prob*/) const {} + void SetRest(const WordIndex *, unsigned int, RestWeights &weights) const { + weights.rest = weights.prob; + util::SetSign(weights.rest); + } + + bool MarkExtends(RestWeights &weights, const RestWeights &to) const { + util::UnsetSign(weights.prob); + if (weights.rest >= to.rest) return false; + weights.rest = to.rest; + return true; + } + bool MarkExtends(RestWeights &weights, const Prob &to) const { + util::UnsetSign(weights.prob); + if (weights.rest >= to.prob) return false; + weights.rest = to.prob; + return true; + } + + // Probing does need to go back to unigram. + const static bool kMarkEvenLower = true; +}; + +template class LowerRestBuild { + public: + typedef RestValue Value; + + LowerRestBuild(const Config &config, unsigned int order, const typename Model::Vocabulary &vocab); + + ~LowerRestBuild(); + + void SetRest(const WordIndex *, unsigned int, const Prob &/*prob*/) const {} + void SetRest(const WordIndex *vocab_ids, unsigned int n, RestWeights &weights) const { + typename Model::State ignored; + if (n == 1) { + weights.rest = unigrams_[*vocab_ids]; + } else { + weights.rest = models_[n-2]->FullScoreForgotState(vocab_ids + 1, vocab_ids + n, *vocab_ids, ignored).prob; + } + } + + template bool MarkExtends(RestWeights &weights, const Second &) const { + util::UnsetSign(weights.prob); + return false; + } + + const static bool kMarkEvenLower = false; + + std::vector unigrams_; + + std::vector models_; +}; + +} // namespace ngram +} // namespace lm + +#endif // LM_VALUE_BUILD__ diff --git a/lm/virtual_interface.cc b/lm/virtual_interface.cc new file mode 100644 index 000000000..17a74c3c1 --- /dev/null +++ b/lm/virtual_interface.cc @@ -0,0 +1,19 @@ +#include "lm/virtual_interface.hh" + +#include "lm/lm_exception.hh" + +namespace lm { +namespace base { + +Vocabulary::~Vocabulary() {} + +void Vocabulary::SetSpecial(WordIndex begin_sentence, WordIndex end_sentence, WordIndex not_found) { + begin_sentence_ = begin_sentence; + end_sentence_ = end_sentence; + not_found_ = not_found; +} + +Model::~Model() {} + +} // namespace base +} // namespace lm diff --git a/lm/virtual_interface.hh b/lm/virtual_interface.hh new file mode 100644 index 000000000..6a5a0196f --- /dev/null +++ b/lm/virtual_interface.hh @@ -0,0 +1,154 @@ +#ifndef LM_VIRTUAL_INTERFACE__ +#define LM_VIRTUAL_INTERFACE__ + +#include "lm/return.hh" +#include "lm/word_index.hh" +#include "util/string_piece.hh" + +#include + +namespace lm { +namespace base { + +template class ModelFacade; + +/* Vocabulary interface. Call Index(string) and get a word index for use in + * calling Model. It provides faster convenience functions for , , and + * although you can also find these using Index. + * + * Some models do not load the mapping from index to string. If you need this, + * check if the model Vocabulary class implements such a function and access it + * directly. + * + * The Vocabulary object is always owned by the Model and can be retrieved from + * the Model using BaseVocabulary() for this abstract interface or + * GetVocabulary() for the actual implementation (in which case you'll need the + * actual implementation of the Model too). + */ +class Vocabulary { + public: + virtual ~Vocabulary(); + + WordIndex BeginSentence() const { return begin_sentence_; } + WordIndex EndSentence() const { return end_sentence_; } + WordIndex NotFound() const { return not_found_; } + + /* Most implementations allow StringPiece lookups and need only override + * Index(StringPiece). SRI requires null termination and overrides all + * three methods. + */ + virtual WordIndex Index(const StringPiece &str) const = 0; + virtual WordIndex Index(const std::string &str) const { + return Index(StringPiece(str)); + } + virtual WordIndex Index(const char *str) const { + return Index(StringPiece(str)); + } + + protected: + // Call SetSpecial afterward. + Vocabulary() {} + + Vocabulary(WordIndex begin_sentence, WordIndex end_sentence, WordIndex not_found) { + SetSpecial(begin_sentence, end_sentence, not_found); + } + + void SetSpecial(WordIndex begin_sentence, WordIndex end_sentence, WordIndex not_found); + + WordIndex begin_sentence_, end_sentence_, not_found_; + + private: + // Disable copy constructors. They're private and undefined. + // Ersatz boost::noncopyable. + Vocabulary(const Vocabulary &); + Vocabulary &operator=(const Vocabulary &); +}; + +/* There are two ways to access a Model. + * + * + * OPTION 1: Access the Model directly (e.g. lm::ngram::Model in model.hh). + * + * Every Model implements the scoring function: + * float Score( + * const Model::State &in_state, + * const WordIndex new_word, + * Model::State &out_state) const; + * + * It can also return the length of n-gram matched by the model: + * FullScoreReturn FullScore( + * const Model::State &in_state, + * const WordIndex new_word, + * Model::State &out_state) const; + * + * + * There are also accessor functions: + * const State &BeginSentenceState() const; + * const State &NullContextState() const; + * const Vocabulary &GetVocabulary() const; + * unsigned int Order() const; + * + * NB: In case you're wondering why the model implementation looks like it's + * missing these methods, see facade.hh. + * + * This is the fastest way to use a model and presents a normal State class to + * be included in a hypothesis state structure. + * + * + * OPTION 2: Use the virtual interface below. + * + * The virtual interface allow you to decide which Model to use at runtime + * without templatizing everything on the Model type. However, each Model has + * its own State class, so a single State cannot be efficiently provided (it + * would require using the maximum memory of any Model's State or memory + * allocation with each lookup). This means you become responsible for + * allocating memory with size StateSize() and passing it to the Score or + * FullScore functions provided here. + * + * For example, cdec has a std::string containing the entire state of a + * hypothesis. It can reserve StateSize bytes in this string for the model + * state. + * + * All the State objects are POD, so it's ok to use raw memory for storing + * State. + * in_state and out_state must not have the same address. + */ +class Model { + public: + virtual ~Model(); + + size_t StateSize() const { return state_size_; } + const void *BeginSentenceMemory() const { return begin_sentence_memory_; } + const void *NullContextMemory() const { return null_context_memory_; } + + // Requires in_state != out_state + virtual float Score(const void *in_state, const WordIndex new_word, void *out_state) const = 0; + + // Requires in_state != out_state + virtual FullScoreReturn FullScore(const void *in_state, const WordIndex new_word, void *out_state) const = 0; + + unsigned char Order() const { return order_; } + + const Vocabulary &BaseVocabulary() const { return *base_vocab_; } + + private: + template friend class ModelFacade; + explicit Model(size_t state_size) : state_size_(state_size) {} + + const size_t state_size_; + const void *begin_sentence_memory_, *null_context_memory_; + + const Vocabulary *base_vocab_; + + unsigned char order_; + + // Disable copy constructors. They're private and undefined. + // Ersatz boost::noncopyable. + Model(const Model &); + Model &operator=(const Model &); +}; + +} // mamespace base +} // namespace lm + +#endif // LM_VIRTUAL_INTERFACE__ diff --git a/lm/vocab.cc b/lm/vocab.cc new file mode 100644 index 000000000..398475bee --- /dev/null +++ b/lm/vocab.cc @@ -0,0 +1,239 @@ +#include "lm/vocab.hh" + +#include "lm/binary_format.hh" +#include "lm/enumerate_vocab.hh" +#include "lm/lm_exception.hh" +#include "lm/config.hh" +#include "lm/weights.hh" +#include "util/exception.hh" +#include "util/file.hh" +#include "util/joint_sort.hh" +#include "util/murmur_hash.hh" +#include "util/probing_hash_table.hh" + +#include + +#include + +namespace lm { +namespace ngram { + +namespace detail { +uint64_t HashForVocab(const char *str, std::size_t len) { + // This proved faster than Boost's hash in speed trials: total load time Murmur 67090000, Boost 72210000 + // Chose to use 64A instead of native so binary format will be portable across 64 and 32 bit. + return util::MurmurHash64A(str, len, 0); +} +} // namespace detail + +namespace { +// Normally static initialization is a bad idea but MurmurHash is pure arithmetic, so this is ok. +const uint64_t kUnknownHash = detail::HashForVocab("", 5); +// Sadly some LMs have . +const uint64_t kUnknownCapHash = detail::HashForVocab("", 5); + +void ReadWords(int fd, EnumerateVocab *enumerate, WordIndex expected_count) { + // Check that we're at the right place by reading which is always first. + char check_unk[6]; + util::ReadOrThrow(fd, check_unk, 6); + UTIL_THROW_IF( + memcmp(check_unk, "", 6), + FormatLoadException, + "Vocabulary words are in the wrong place. This could be because the binary file was built with stale gcc and old kenlm. Stale gcc, including the gcc distributed with RedHat and OS X, has a bug that ignores pragma pack for template-dependent types. New kenlm works around this, so you'll save memory but have to rebuild any binary files using the probing data structure."); + if (!enumerate) return; + enumerate->Add(0, ""); + + // Read all the words after unk. + const std::size_t kInitialRead = 16384; + std::string buf; + buf.reserve(kInitialRead + 100); + buf.resize(kInitialRead); + WordIndex index = 1; // Read already. + while (true) { + std::size_t got = util::ReadOrEOF(fd, &buf[0], kInitialRead); + if (got == 0) break; + buf.resize(got); + while (buf[buf.size() - 1]) { + char next_char; + util::ReadOrThrow(fd, &next_char, 1); + buf.push_back(next_char); + } + // Ok now we have null terminated strings. + for (const char *i = buf.data(); i != buf.data() + buf.size();) { + std::size_t length = strlen(i); + enumerate->Add(index++, StringPiece(i, length)); + i += length + 1 /* null byte */; + } + } + + UTIL_THROW_IF(expected_count != index, FormatLoadException, "The binary file has the wrong number of words at the end. This could be caused by a truncated binary file."); +} + +} // namespace + +WriteWordsWrapper::WriteWordsWrapper(EnumerateVocab *inner) : inner_(inner) {} +WriteWordsWrapper::~WriteWordsWrapper() {} + +void WriteWordsWrapper::Add(WordIndex index, const StringPiece &str) { + if (inner_) inner_->Add(index, str); + buffer_.append(str.data(), str.size()); + buffer_.push_back(0); +} + +void WriteWordsWrapper::Write(int fd) { + util::SeekEnd(fd); + util::WriteOrThrow(fd, buffer_.data(), buffer_.size()); +} + +SortedVocabulary::SortedVocabulary() : begin_(NULL), end_(NULL), enumerate_(NULL) {} + +uint64_t SortedVocabulary::Size(uint64_t entries, const Config &/*config*/) { + // Lead with the number of entries. + return sizeof(uint64_t) + sizeof(uint64_t) * entries; +} + +void SortedVocabulary::SetupMemory(void *start, std::size_t allocated, std::size_t entries, const Config &config) { + assert(allocated >= Size(entries, config)); + // Leave space for number of entries. + begin_ = reinterpret_cast(start) + 1; + end_ = begin_; + saw_unk_ = false; +} + +void SortedVocabulary::ConfigureEnumerate(EnumerateVocab *to, std::size_t max_entries) { + enumerate_ = to; + if (enumerate_) { + enumerate_->Add(0, ""); + strings_to_enumerate_.resize(max_entries); + } +} + +WordIndex SortedVocabulary::Insert(const StringPiece &str) { + uint64_t hashed = detail::HashForVocab(str); + if (hashed == kUnknownHash || hashed == kUnknownCapHash) { + saw_unk_ = true; + return 0; + } + *end_ = hashed; + if (enumerate_) { + strings_to_enumerate_[end_ - begin_].assign(str.data(), str.size()); + } + ++end_; + // This is 1 + the offset where it was inserted to make room for unk. + return end_ - begin_; +} + +void SortedVocabulary::FinishedLoading(ProbBackoff *reorder_vocab) { + if (enumerate_) { + if (!strings_to_enumerate_.empty()) { + util::PairedIterator values(reorder_vocab + 1, &*strings_to_enumerate_.begin()); + util::JointSort(begin_, end_, values); + } + for (WordIndex i = 0; i < static_cast(end_ - begin_); ++i) { + // strikes again: +1 here. + enumerate_->Add(i + 1, strings_to_enumerate_[i]); + } + strings_to_enumerate_.clear(); + } else { + util::JointSort(begin_, end_, reorder_vocab + 1); + } + SetSpecial(Index(""), Index(""), 0); + // Save size. Excludes UNK. + *(reinterpret_cast(begin_) - 1) = end_ - begin_; + // Includes UNK. + bound_ = end_ - begin_ + 1; +} + +void SortedVocabulary::LoadedBinary(bool have_words, int fd, EnumerateVocab *to) { + end_ = begin_ + *(reinterpret_cast(begin_) - 1); + SetSpecial(Index(""), Index(""), 0); + bound_ = end_ - begin_ + 1; + if (have_words) ReadWords(fd, to, bound_); +} + +namespace { +const unsigned int kProbingVocabularyVersion = 0; +} // namespace + +namespace detail { +struct ProbingVocabularyHeader { + // Lowest unused vocab id. This is also the number of words, including . + unsigned int version; + WordIndex bound; +}; +} // namespace detail + +ProbingVocabulary::ProbingVocabulary() : enumerate_(NULL) {} + +uint64_t ProbingVocabulary::Size(uint64_t entries, const Config &config) { + return ALIGN8(sizeof(detail::ProbingVocabularyHeader)) + Lookup::Size(entries, config.probing_multiplier); +} + +void ProbingVocabulary::SetupMemory(void *start, std::size_t allocated, std::size_t /*entries*/, const Config &/*config*/) { + header_ = static_cast(start); + lookup_ = Lookup(static_cast(start) + ALIGN8(sizeof(detail::ProbingVocabularyHeader)), allocated); + bound_ = 1; + saw_unk_ = false; +} + +void ProbingVocabulary::ConfigureEnumerate(EnumerateVocab *to, std::size_t /*max_entries*/) { + enumerate_ = to; + if (enumerate_) { + enumerate_->Add(0, ""); + } +} + +WordIndex ProbingVocabulary::Insert(const StringPiece &str) { + uint64_t hashed = detail::HashForVocab(str); + // Prevent unknown from going into the table. + if (hashed == kUnknownHash || hashed == kUnknownCapHash) { + saw_unk_ = true; + return 0; + } else { + if (enumerate_) enumerate_->Add(bound_, str); + lookup_.Insert(ProbingVocabuaryEntry::Make(hashed, bound_)); + return bound_++; + } +} + +void ProbingVocabulary::InternalFinishedLoading() { + lookup_.FinishedInserting(); + header_->bound = bound_; + header_->version = kProbingVocabularyVersion; + SetSpecial(Index(""), Index(""), 0); +} + +void ProbingVocabulary::LoadedBinary(bool have_words, int fd, EnumerateVocab *to) { + UTIL_THROW_IF(header_->version != kProbingVocabularyVersion, FormatLoadException, "The binary file has probing version " << header_->version << " but the code expects version " << kProbingVocabularyVersion << ". Please rerun build_binary using the same version of the code."); + lookup_.LoadedBinary(); + bound_ = header_->bound; + SetSpecial(Index(""), Index(""), 0); + if (have_words) ReadWords(fd, to, bound_); +} + +void MissingUnknown(const Config &config) throw(SpecialWordMissingException) { + switch(config.unknown_missing) { + case SILENT: + return; + case COMPLAIN: + if (config.messages) *config.messages << "The ARPA file is missing . Substituting log10 probability " << config.unknown_missing_logprob << "." << std::endl; + break; + case THROW_UP: + UTIL_THROW(SpecialWordMissingException, "The ARPA file is missing and the model is configured to throw an exception."); + } +} + +void MissingSentenceMarker(const Config &config, const char *str) throw(SpecialWordMissingException) { + switch (config.sentence_marker_missing) { + case SILENT: + return; + case COMPLAIN: + if (config.messages) *config.messages << "Missing special word " << str << "; will treat it as ."; + break; + case THROW_UP: + UTIL_THROW(SpecialWordMissingException, "The ARPA file is missing " << str << " and the model is configured to reject these models. Run build_binary -s to disable this check."); + } +} + +} // namespace ngram +} // namespace lm diff --git a/lm/vocab.hh b/lm/vocab.hh new file mode 100644 index 000000000..074cd446e --- /dev/null +++ b/lm/vocab.hh @@ -0,0 +1,182 @@ +#ifndef LM_VOCAB__ +#define LM_VOCAB__ + +#include "lm/enumerate_vocab.hh" +#include "lm/lm_exception.hh" +#include "lm/virtual_interface.hh" +#include "util/probing_hash_table.hh" +#include "util/sorted_uniform.hh" +#include "util/string_piece.hh" + +#include +#include +#include + +namespace lm { +struct ProbBackoff; +class EnumerateVocab; + +namespace ngram { +struct Config; + +namespace detail { +uint64_t HashForVocab(const char *str, std::size_t len); +inline uint64_t HashForVocab(const StringPiece &str) { + return HashForVocab(str.data(), str.length()); +} +class ProbingVocabularyHeader; +} // namespace detail + +class WriteWordsWrapper : public EnumerateVocab { + public: + WriteWordsWrapper(EnumerateVocab *inner); + + ~WriteWordsWrapper(); + + void Add(WordIndex index, const StringPiece &str); + + void Write(int fd); + + private: + EnumerateVocab *inner_; + + std::string buffer_; +}; + +// Vocabulary based on sorted uniform find storing only uint64_t values and using their offsets as indices. +class SortedVocabulary : public base::Vocabulary { + public: + SortedVocabulary(); + + WordIndex Index(const StringPiece &str) const { + const uint64_t *found; + if (util::BoundedSortedUniformFind, util::Pivot64>( + util::IdentityAccessor(), + begin_ - 1, 0, + end_, std::numeric_limits::max(), + detail::HashForVocab(str), found)) { + return found - begin_ + 1; // +1 because is 0 and does not appear in the lookup table. + } else { + return 0; + } + } + + // Size for purposes of file writing + static uint64_t Size(uint64_t entries, const Config &config); + + // Vocab words are [0, Bound()) Only valid after FinishedLoading/LoadedBinary. + WordIndex Bound() const { return bound_; } + + // Everything else is for populating. I'm too lazy to hide and friend these, but you'll only get a const reference anyway. + void SetupMemory(void *start, std::size_t allocated, std::size_t entries, const Config &config); + + void ConfigureEnumerate(EnumerateVocab *to, std::size_t max_entries); + + WordIndex Insert(const StringPiece &str); + + // Reorders reorder_vocab so that the IDs are sorted. + void FinishedLoading(ProbBackoff *reorder_vocab); + + // Trie stores the correct counts including in the header. If this was previously sized based on a count exluding , padding with 8 bytes will make it the correct size based on a count including . + std::size_t UnkCountChangePadding() const { return SawUnk() ? 0 : sizeof(uint64_t); } + + bool SawUnk() const { return saw_unk_; } + + void LoadedBinary(bool have_words, int fd, EnumerateVocab *to); + + private: + uint64_t *begin_, *end_; + + WordIndex bound_; + + WordIndex highest_value_; + + bool saw_unk_; + + EnumerateVocab *enumerate_; + + // Actual strings. Used only when loading from ARPA and enumerate_ != NULL + std::vector strings_to_enumerate_; +}; + +#pragma pack(push) +#pragma pack(4) +struct ProbingVocabuaryEntry { + uint64_t key; + WordIndex value; + + typedef uint64_t Key; + uint64_t GetKey() const { + return key; + } + + static ProbingVocabuaryEntry Make(uint64_t key, WordIndex value) { + ProbingVocabuaryEntry ret; + ret.key = key; + ret.value = value; + return ret; + } +}; +#pragma pack(pop) + +// Vocabulary storing a map from uint64_t to WordIndex. +class ProbingVocabulary : public base::Vocabulary { + public: + ProbingVocabulary(); + + WordIndex Index(const StringPiece &str) const { + Lookup::ConstIterator i; + return lookup_.Find(detail::HashForVocab(str), i) ? i->value : 0; + } + + static uint64_t Size(uint64_t entries, const Config &config); + + // Vocab words are [0, Bound()). + WordIndex Bound() const { return bound_; } + + // Everything else is for populating. I'm too lazy to hide and friend these, but you'll only get a const reference anyway. + void SetupMemory(void *start, std::size_t allocated, std::size_t entries, const Config &config); + + void ConfigureEnumerate(EnumerateVocab *to, std::size_t max_entries); + + WordIndex Insert(const StringPiece &str); + + template void FinishedLoading(Weights * /*reorder_vocab*/) { + InternalFinishedLoading(); + } + + std::size_t UnkCountChangePadding() const { return 0; } + + bool SawUnk() const { return saw_unk_; } + + void LoadedBinary(bool have_words, int fd, EnumerateVocab *to); + + private: + void InternalFinishedLoading(); + + typedef util::ProbingHashTable Lookup; + + Lookup lookup_; + + WordIndex bound_; + + bool saw_unk_; + + EnumerateVocab *enumerate_; + + detail::ProbingVocabularyHeader *header_; +}; + +void MissingUnknown(const Config &config) throw(SpecialWordMissingException); +void MissingSentenceMarker(const Config &config, const char *str) throw(SpecialWordMissingException); + +template void CheckSpecials(const Config &config, const Vocab &vocab) throw(SpecialWordMissingException) { + if (!vocab.SawUnk()) MissingUnknown(config); + if (vocab.BeginSentence() == vocab.NotFound()) MissingSentenceMarker(config, ""); + if (vocab.EndSentence() == vocab.NotFound()) MissingSentenceMarker(config, ""); +} + +} // namespace ngram +} // namespace lm + +#endif // LM_VOCAB__ diff --git a/lm/weights.hh b/lm/weights.hh new file mode 100644 index 000000000..bd5d80342 --- /dev/null +++ b/lm/weights.hh @@ -0,0 +1,22 @@ +#ifndef LM_WEIGHTS__ +#define LM_WEIGHTS__ + +// Weights for n-grams. Probability and possibly a backoff. + +namespace lm { +struct Prob { + float prob; +}; +// No inheritance so this will be a POD. +struct ProbBackoff { + float prob; + float backoff; +}; +struct RestWeights { + float prob; + float backoff; + float rest; +}; + +} // namespace lm +#endif // LM_WEIGHTS__ diff --git a/lm/word_index.hh b/lm/word_index.hh new file mode 100644 index 000000000..e09557a7c --- /dev/null +++ b/lm/word_index.hh @@ -0,0 +1,14 @@ +// Separate header because this is used often. +#ifndef LM_WORD_INDEX__ +#define LM_WORD_INDEX__ + +#include + +namespace lm { +typedef unsigned int WordIndex; +const WordIndex kMaxWordIndex = UINT_MAX; +} // namespace lm + +typedef lm::WordIndex LMWordIndex; + +#endif diff --git a/mert/Jamfile b/mert/Jamfile index 3325b37b4..84ff03701 100644 --- a/mert/Jamfile +++ b/mert/Jamfile @@ -49,7 +49,7 @@ SentenceLevelScorer.cpp Permutation.cpp PermutationScorer.cpp StatisticsBasedScorer.cpp -../lazy/util//kenutil m ..//z ; +../util//kenutil m ..//z ; exe mert : mert.cpp mert_lib ../moses/src//ThreadPool ; diff --git a/moses/src/DynSAInclude/Jamfile b/moses/src/DynSAInclude/Jamfile index 23f91ff1d..ff4bd6f76 100644 --- a/moses/src/DynSAInclude/Jamfile +++ b/moses/src/DynSAInclude/Jamfile @@ -1 +1 @@ -lib dynsa : [ glob *.cpp ] ../../../lazy/util//kenutil : : : . ; +lib dynsa : [ glob *.cpp ] ../../../util//kenutil : : : . ; diff --git a/moses/src/Incremental/Jamfile b/moses/src/Incremental/Jamfile index e4433cc9d..5b4c703e9 100644 --- a/moses/src/Incremental/Jamfile +++ b/moses/src/Incremental/Jamfile @@ -1 +1 @@ -lib Incremental : [ glob *.cpp ] ../../../lazy/search//search ..//moses_internal ; +lib Incremental : [ glob *.cpp ] ../../../search//search ..//moses_internal ; diff --git a/moses/src/Jamfile b/moses/src/Jamfile index 61f2b7c5a..43e2b1411 100644 --- a/moses/src/Jamfile +++ b/moses/src/Jamfile @@ -1,4 +1,4 @@ -alias headers : ../../lazy/util//kenutil : : : . ; +alias headers : ../../util//kenutil : : : . ; alias ThreadPool : ThreadPool.cpp ; diff --git a/moses/src/LM/Jamfile b/moses/src/LM/Jamfile index 7cdd90907..3011a620c 100644 --- a/moses/src/LM/Jamfile +++ b/moses/src/LM/Jamfile @@ -93,4 +93,4 @@ obj Factory.o : Factory.cpp ..//headers $(dependencies) : ../DynSAInclu #Top-level LM library. If you've added a file that doesn't depend on external #libraries, put it here. lib LM : Base.cpp Factory.o Implementation.cpp Joint.cpp Ken.cpp MultiFactor.cpp Remote.cpp SingleFactor.cpp ORLM.o - ../../../lazy/lm//kenlm ..//headers ../Incremental//Incremental $(dependencies) ; + ../../../lm//kenlm ..//headers ../Incremental//Incremental $(dependencies) ; diff --git a/search/Jamfile b/search/Jamfile new file mode 100644 index 000000000..e8b14363d --- /dev/null +++ b/search/Jamfile @@ -0,0 +1,5 @@ +lib search : weights.cc vertex.cc vertex_generator.cc edge_queue.cc edge_generator.cc rule.cc ../lm//kenlm ../util//kenutil /top//boost_system : : : .. ; + +import testing ; + +unit-test weights_test : weights_test.cc search /top//boost_unit_test_framework ; diff --git a/search/arity.hh b/search/arity.hh new file mode 100644 index 000000000..09c2c671d --- /dev/null +++ b/search/arity.hh @@ -0,0 +1,8 @@ +#ifndef SEARCH_ARITY__ +#define SEARCH_ARITY__ +namespace search { + +const unsigned int kMaxArity = 2; + +} // namespace search +#endif // SEARCH_ARITY__ diff --git a/search/config.hh b/search/config.hh new file mode 100644 index 000000000..ef8e2354a --- /dev/null +++ b/search/config.hh @@ -0,0 +1,25 @@ +#ifndef SEARCH_CONFIG__ +#define SEARCH_CONFIG__ + +#include "search/weights.hh" +#include "util/string_piece.hh" + +namespace search { + +class Config { + public: + Config(const Weights &weights, unsigned int pop_limit) : + weights_(weights), pop_limit_(pop_limit) {} + + const Weights &GetWeights() const { return weights_; } + + unsigned int PopLimit() const { return pop_limit_; } + + private: + Weights weights_; + unsigned int pop_limit_; +}; + +} // namespace search + +#endif // SEARCH_CONFIG__ diff --git a/search/context.hh b/search/context.hh new file mode 100644 index 000000000..27940053b --- /dev/null +++ b/search/context.hh @@ -0,0 +1,65 @@ +#ifndef SEARCH_CONTEXT__ +#define SEARCH_CONTEXT__ + +#include "lm/model.hh" +#include "search/config.hh" +#include "search/final.hh" +#include "search/types.hh" +#include "search/vertex.hh" +#include "util/exception.hh" + +#include +#include + +#include + +namespace search { + +class Weights; + +class ContextBase { + public: + explicit ContextBase(const Config &config) : pop_limit_(config.PopLimit()), weights_(config.GetWeights()) {} + + Final *NewFinal() { + Final *ret = final_pool_.construct(); + assert(ret); + return ret; + } + + VertexNode *NewVertexNode() { + VertexNode *ret = vertex_node_pool_.construct(); + assert(ret); + return ret; + } + + void DeleteVertexNode(VertexNode *node) { + vertex_node_pool_.destroy(node); + } + + unsigned int PopLimit() const { return pop_limit_; } + + const Weights &GetWeights() const { return weights_; } + + private: + boost::object_pool final_pool_; + boost::object_pool vertex_node_pool_; + + unsigned int pop_limit_; + + const Weights &weights_; +}; + +template class Context : public ContextBase { + public: + Context(const Config &config, const Model &model) : ContextBase(config), model_(model) {} + + const Model &LanguageModel() const { return model_; } + + private: + const Model &model_; +}; + +} // namespace search + +#endif // SEARCH_CONTEXT__ diff --git a/search/edge.hh b/search/edge.hh new file mode 100644 index 000000000..77ab0ade6 --- /dev/null +++ b/search/edge.hh @@ -0,0 +1,31 @@ +#ifndef SEARCH_EDGE__ +#define SEARCH_EDGE__ + +#include "lm/state.hh" +#include "search/arity.hh" +#include "search/rule.hh" +#include "search/types.hh" +#include "search/vertex.hh" + +#include + +namespace search { + +struct PartialEdge { + Score score; + // Terminals + lm::ngram::ChartState between[kMaxArity + 1]; + // Non-terminals + PartialVertex nt[kMaxArity]; + + const lm::ngram::ChartState &CompletedState() const { + return between[0]; + } + + bool operator<(const PartialEdge &other) const { + return score < other.score; + } +}; + +} // namespace search +#endif // SEARCH_EDGE__ diff --git a/search/edge_generator.cc b/search/edge_generator.cc new file mode 100644 index 000000000..56239dfbb --- /dev/null +++ b/search/edge_generator.cc @@ -0,0 +1,120 @@ +#include "search/edge_generator.hh" + +#include "lm/left.hh" +#include "lm/partial.hh" +#include "search/context.hh" +#include "search/vertex.hh" +#include "search/vertex_generator.hh" + +#include + +namespace search { + +EdgeGenerator::EdgeGenerator(PartialEdge &root, unsigned char arity, Note note) : arity_(arity), note_(note) { +/* for (unsigned char i = 0; i < edge.Arity(); ++i) { + root.nt[i] = edge.GetVertex(i).RootPartial(); + } + for (unsigned char i = edge.Arity(); i < 2; ++i) { + root.nt[i] = kBlankPartialVertex; + }*/ + generate_.push(&root); + top_score_ = root.score; +} + +namespace { + +template float FastScore(const Context &context, unsigned char victim, unsigned char arity, const PartialEdge &previous, PartialEdge &update) { + memcpy(update.between, previous.between, sizeof(lm::ngram::ChartState) * (arity + 1)); + + float ret = 0.0; + lm::ngram::ChartState *before, *after; + if (victim == 0) { + before = &update.between[0]; + after = &update.between[(arity == 2 && previous.nt[1].Complete()) ? 2 : 1]; + } else { + assert(victim == 1); + assert(arity == 2); + before = &update.between[previous.nt[0].Complete() ? 0 : 1]; + after = &update.between[2]; + } + const lm::ngram::ChartState &previous_reveal = previous.nt[victim].State(); + const PartialVertex &update_nt = update.nt[victim]; + const lm::ngram::ChartState &update_reveal = update_nt.State(); + float just_after = 0.0; + if ((update_reveal.left.length > previous_reveal.left.length) || (update_reveal.left.full && !previous_reveal.left.full)) { + just_after += lm::ngram::RevealAfter(context.LanguageModel(), before->left, before->right, update_reveal.left, previous_reveal.left.length); + } + if ((update_reveal.right.length > previous_reveal.right.length) || (update_nt.RightFull() && !previous.nt[victim].RightFull())) { + ret += lm::ngram::RevealBefore(context.LanguageModel(), update_reveal.right, previous_reveal.right.length, update_nt.RightFull(), after->left, after->right); + } + if (update_nt.Complete()) { + if (update_reveal.left.full) { + before->left.full = true; + } else { + assert(update_reveal.left.length == update_reveal.right.length); + ret += lm::ngram::Subsume(context.LanguageModel(), before->left, before->right, after->left, after->right, update_reveal.left.length); + } + if (victim == 0) { + update.between[0].right = after->right; + } else { + update.between[2].left = before->left; + } + } + return previous.score + (ret + just_after) * context.GetWeights().LM(); +} + +} // namespace + +template PartialEdge *EdgeGenerator::Pop(Context &context, boost::pool<> &partial_edge_pool) { + assert(!generate_.empty()); + PartialEdge &top = *generate_.top(); + generate_.pop(); + unsigned int victim = 0; + unsigned char lowest_length = 255; + for (unsigned char i = 0; i != arity_; ++i) { + if (!top.nt[i].Complete() && top.nt[i].Length() < lowest_length) { + lowest_length = top.nt[i].Length(); + victim = i; + } + } + if (lowest_length == 255) { + // All states report complete. + top.between[0].right = top.between[arity_].right; + // Now top.between[0] is the full edge state. + top_score_ = generate_.empty() ? -kScoreInf : generate_.top()->score; + return ⊤ + } + + unsigned int stay = !victim; + PartialEdge &continuation = *static_cast(partial_edge_pool.malloc()); + float old_bound = top.nt[victim].Bound(); + // The alternate's score will change because alternate.nt[victim] changes. + bool split = top.nt[victim].Split(continuation.nt[victim]); + // top is now the alternate. + + continuation.nt[stay] = top.nt[stay]; + continuation.score = FastScore(context, victim, arity_, top, continuation); + // TODO: dedupe? + generate_.push(&continuation); + + if (split) { + // We have an alternate. + top.score += top.nt[victim].Bound() - old_bound; + // TODO: dedupe? + generate_.push(&top); + } else { + partial_edge_pool.free(&top); + } + + top_score_ = generate_.top()->score; + return NULL; +} + +template PartialEdge *EdgeGenerator::Pop(Context &context, boost::pool<> &partial_edge_pool); +template PartialEdge *EdgeGenerator::Pop(Context &context, boost::pool<> &partial_edge_pool); +template PartialEdge *EdgeGenerator::Pop(Context &context, boost::pool<> &partial_edge_pool); +template PartialEdge *EdgeGenerator::Pop(Context &context, boost::pool<> &partial_edge_pool); +template PartialEdge *EdgeGenerator::Pop(Context &context, boost::pool<> &partial_edge_pool); +template PartialEdge *EdgeGenerator::Pop(Context &context, boost::pool<> &partial_edge_pool); + +} // namespace search diff --git a/search/edge_generator.hh b/search/edge_generator.hh new file mode 100644 index 000000000..875ccc5ea --- /dev/null +++ b/search/edge_generator.hh @@ -0,0 +1,58 @@ +#ifndef SEARCH_EDGE_GENERATOR__ +#define SEARCH_EDGE_GENERATOR__ + +#include "search/edge.hh" +#include "search/note.hh" + +#include +#include + +#include +#include + +namespace lm { +namespace ngram { +class ChartState; +} // namespace ngram +} // namespace lm + +namespace search { + +template class Context; + +class VertexGenerator; + +struct PartialEdgePointerLess : std::binary_function { + bool operator()(const PartialEdge *first, const PartialEdge *second) const { + return *first < *second; + } +}; + +class EdgeGenerator { + public: + EdgeGenerator(PartialEdge &root, unsigned char arity, Note note); + + Score TopScore() const { + return top_score_; + } + + Note GetNote() const { + return note_; + } + + // Pop. If there's a complete hypothesis, return it. Otherwise return NULL. + template PartialEdge *Pop(Context &context, boost::pool<> &partial_edge_pool); + + private: + Score top_score_; + + unsigned char arity_; + + typedef std::priority_queue, PartialEdgePointerLess> Generate; + Generate generate_; + + Note note_; +}; + +} // namespace search +#endif // SEARCH_EDGE_GENERATOR__ diff --git a/search/edge_queue.cc b/search/edge_queue.cc new file mode 100644 index 000000000..e3ae6ebf7 --- /dev/null +++ b/search/edge_queue.cc @@ -0,0 +1,25 @@ +#include "search/edge_queue.hh" + +#include "lm/left.hh" +#include "search/context.hh" + +#include + +namespace search { + +EdgeQueue::EdgeQueue(unsigned int pop_limit_hint) : partial_edge_pool_(sizeof(PartialEdge), pop_limit_hint * 2) { + take_ = static_cast(partial_edge_pool_.malloc()); +} + +/*void EdgeQueue::AddEdge(PartialEdge &root, unsigned char arity, Note note) { + // Ignore empty edges. + for (unsigned char i = 0; i < edge.Arity(); ++i) { + PartialVertex root(edge.GetVertex(i).RootPartial()); + if (root.Empty()) return; + total_score += root.Bound(); + } + PartialEdge &allocated = *static_cast(partial_edge_pool_.malloc()); + allocated.score = total_score; +}*/ + +} // namespace search diff --git a/search/edge_queue.hh b/search/edge_queue.hh new file mode 100644 index 000000000..187eaed71 --- /dev/null +++ b/search/edge_queue.hh @@ -0,0 +1,73 @@ +#ifndef SEARCH_EDGE_QUEUE__ +#define SEARCH_EDGE_QUEUE__ + +#include "search/edge.hh" +#include "search/edge_generator.hh" +#include "search/note.hh" + +#include +#include + +#include + +namespace search { + +template class Context; + +class EdgeQueue { + public: + explicit EdgeQueue(unsigned int pop_limit_hint); + + PartialEdge &InitializeEdge() { + return *take_; + } + + void AddEdge(unsigned char arity, Note note) { + generate_.push(edge_pool_.construct(*take_, arity, note)); + take_ = static_cast(partial_edge_pool_.malloc()); + } + + bool Empty() const { return generate_.empty(); } + + /* Generate hypotheses and send them to output. Normally, output is a + * VertexGenerator, but the decoder may want to route edges to different + * vertices i.e. if they have different LHS non-terminal labels. + */ + template void Search(Context &context, Output &output) { + int to_pop = context.PopLimit(); + while (to_pop > 0 && !generate_.empty()) { + EdgeGenerator *top = generate_.top(); + generate_.pop(); + PartialEdge *ret = top->Pop(context, partial_edge_pool_); + if (ret) { + output.NewHypothesis(*ret, top->GetNote()); + --to_pop; + if (top->TopScore() != -kScoreInf) { + generate_.push(top); + } + } else { + generate_.push(top); + } + } + output.FinishedSearch(); + } + + private: + boost::object_pool edge_pool_; + + struct LessByTopScore : public std::binary_function { + bool operator()(const EdgeGenerator *first, const EdgeGenerator *second) const { + return first->TopScore() < second->TopScore(); + } + }; + + typedef std::priority_queue, LessByTopScore> Generate; + Generate generate_; + + boost::pool<> partial_edge_pool_; + + PartialEdge *take_; +}; + +} // namespace search +#endif // SEARCH_EDGE_QUEUE__ diff --git a/search/final.hh b/search/final.hh new file mode 100644 index 000000000..1b3092ac4 --- /dev/null +++ b/search/final.hh @@ -0,0 +1,39 @@ +#ifndef SEARCH_FINAL__ +#define SEARCH_FINAL__ + +#include "search/arity.hh" +#include "search/note.hh" +#include "search/types.hh" + +#include + +namespace search { + +class Final { + public: + typedef boost::array ChildArray; + + void Reset(Score bound, Note note, const Final &left, const Final &right) { + bound_ = bound; + note_ = note; + children_[0] = &left; + children_[1] = &right; + } + + const ChildArray &Children() const { return children_; } + + Note GetNote() const { return note_; } + + Score Bound() const { return bound_; } + + private: + Score bound_; + + Note note_; + + ChildArray children_; +}; + +} // namespace search + +#endif // SEARCH_FINAL__ diff --git a/search/note.hh b/search/note.hh new file mode 100644 index 000000000..50bed06ec --- /dev/null +++ b/search/note.hh @@ -0,0 +1,12 @@ +#ifndef SEARCH_NOTE__ +#define SEARCH_NOTE__ + +namespace search { + +union Note { + const void *vp; +}; + +} // namespace search + +#endif // SEARCH_NOTE__ diff --git a/search/rule.cc b/search/rule.cc new file mode 100644 index 000000000..5b00207ef --- /dev/null +++ b/search/rule.cc @@ -0,0 +1,43 @@ +#include "search/rule.hh" + +#include "search/context.hh" +#include "search/final.hh" + +#include + +#include + +namespace search { + +template float ScoreRule(const Context &context, const std::vector &words, bool prepend_bos, lm::ngram::ChartState *writing) { + unsigned int oov_count = 0; + float prob = 0.0; + const Model &model = context.LanguageModel(); + const lm::WordIndex oov = model.GetVocabulary().NotFound(); + for (std::vector::const_iterator word = words.begin(); ; ++word) { + lm::ngram::RuleScore scorer(model, *(writing++)); + // TODO: optimize + if (prepend_bos && (word == words.begin())) { + scorer.BeginSentence(); + } + for (; ; ++word) { + if (word == words.end()) { + prob += scorer.Finish(); + return static_cast(oov_count) * context.GetWeights().OOV() + prob * context.GetWeights().LM(); + } + if (*word == kNonTerminal) break; + if (*word == oov) ++oov_count; + scorer.Terminal(*word); + } + prob += scorer.Finish(); + } +} + +template float ScoreRule(const Context &model, const std::vector &words, bool prepend_bos, lm::ngram::ChartState *writing); +template float ScoreRule(const Context &model, const std::vector &words, bool prepend_bos, lm::ngram::ChartState *writing); +template float ScoreRule(const Context &model, const std::vector &words, bool prepend_bos, lm::ngram::ChartState *writing); +template float ScoreRule(const Context &model, const std::vector &words, bool prepend_bos, lm::ngram::ChartState *writing); +template float ScoreRule(const Context &model, const std::vector &words, bool prepend_bos, lm::ngram::ChartState *writing); +template float ScoreRule(const Context &model, const std::vector &words, bool prepend_bos, lm::ngram::ChartState *writing); + +} // namespace search diff --git a/search/rule.hh b/search/rule.hh new file mode 100644 index 000000000..0ce2794db --- /dev/null +++ b/search/rule.hh @@ -0,0 +1,20 @@ +#ifndef SEARCH_RULE__ +#define SEARCH_RULE__ + +#include "lm/left.hh" +#include "lm/word_index.hh" +#include "search/types.hh" + +#include + +namespace search { + +template class Context; + +const lm::WordIndex kNonTerminal = lm::kMaxWordIndex; + +template float ScoreRule(const Context &context, const std::vector &words, bool prepend_bos, lm::ngram::ChartState *state_out); + +} // namespace search + +#endif // SEARCH_RULE__ diff --git a/search/source.hh b/search/source.hh new file mode 100644 index 000000000..11839f7bc --- /dev/null +++ b/search/source.hh @@ -0,0 +1,48 @@ +#ifndef SEARCH_SOURCE__ +#define SEARCH_SOURCE__ + +#include "search/types.hh" + +#include +#include + +namespace search { + +template class Source { + public: + Source() : bound_(kScoreInf) {} + + Index Size() const { + return final_.size(); + } + + Score Bound() const { + return bound_; + } + + const Final &operator[](Index index) const { + return *final_[index]; + } + + Score ScoreOrBound(Index index) const { + return Size() > index ? final_[index]->Total() : Bound(); + } + + protected: + void AddFinal(const Final &store) { + final_.push_back(&store); + } + + void SetBound(Score to) { + assert(to <= bound_ + 0.001); + bound_ = to; + } + + private: + std::vector final_; + + Score bound_; +}; + +} // namespace search +#endif // SEARCH_SOURCE__ diff --git a/search/types.hh b/search/types.hh new file mode 100644 index 000000000..9726379fb --- /dev/null +++ b/search/types.hh @@ -0,0 +1,18 @@ +#ifndef SEARCH_TYPES__ +#define SEARCH_TYPES__ + +#include + +namespace search { + +typedef float Score; +const Score kScoreInf = INFINITY; + +// This could have been an enum but gcc wants 4 bytes. +typedef bool ExtendDirection; +const ExtendDirection kExtendLeft = 0; +const ExtendDirection kExtendRight = 1; + +} // namespace search + +#endif // SEARCH_TYPES__ diff --git a/search/vertex.cc b/search/vertex.cc new file mode 100644 index 000000000..cc53c0dd5 --- /dev/null +++ b/search/vertex.cc @@ -0,0 +1,48 @@ +#include "search/vertex.hh" + +#include "search/context.hh" + +#include +#include + +#include + +namespace search { + +namespace { + +struct GreaterByBound : public std::binary_function { + bool operator()(const VertexNode *first, const VertexNode *second) const { + return first->Bound() > second->Bound(); + } +}; + +} // namespace + +void VertexNode::SortAndSet(ContextBase &context, VertexNode **parent_ptr) { + if (Complete()) { + assert(end_); + assert(extend_.empty()); + bound_ = end_->Bound(); + return; + } + if (extend_.size() == 1 && parent_ptr) { + *parent_ptr = extend_[0]; + extend_[0]->SortAndSet(context, parent_ptr); + context.DeleteVertexNode(this); + return; + } + for (std::vector::iterator i = extend_.begin(); i != extend_.end(); ++i) { + (*i)->SortAndSet(context, &*i); + } + std::sort(extend_.begin(), extend_.end(), GreaterByBound()); + bound_ = extend_.front()->Bound(); +} + +namespace { +VertexNode kBlankVertexNode; +} // namespace + +PartialVertex kBlankPartialVertex(kBlankVertexNode); + +} // namespace search diff --git a/search/vertex.hh b/search/vertex.hh new file mode 100644 index 000000000..e1a9ad113 --- /dev/null +++ b/search/vertex.hh @@ -0,0 +1,158 @@ +#ifndef SEARCH_VERTEX__ +#define SEARCH_VERTEX__ + +#include "lm/left.hh" +#include "search/final.hh" +#include "search/types.hh" + +#include + +#include +#include + +#include + +namespace search { + +class ContextBase; + +class VertexNode { + public: + VertexNode() : end_(NULL) {} + + void InitRoot() { + extend_.clear(); + state_.left.full = false; + state_.left.length = 0; + state_.right.length = 0; + right_full_ = false; + bound_ = -kScoreInf; + end_ = NULL; + } + + lm::ngram::ChartState &MutableState() { return state_; } + bool &MutableRightFull() { return right_full_; } + + void AddExtend(VertexNode *next) { + extend_.push_back(next); + } + + void SetEnd(Final *end) { end_ = end; } + + Final &MutableEnd() { return *end_; } + + void SortAndSet(ContextBase &context, VertexNode **parent_pointer); + + // Should only happen to a root node when the entire vertex is empty. + bool Empty() const { + return !end_ && extend_.empty(); + } + + bool Complete() const { + return end_; + } + + const lm::ngram::ChartState &State() const { return state_; } + bool RightFull() const { return right_full_; } + + Score Bound() const { + return bound_; + } + + unsigned char Length() const { + return state_.left.length + state_.right.length; + } + + // May be NULL. + const Final *End() const { return end_; } + + const VertexNode &operator[](size_t index) const { + return *extend_[index]; + } + + size_t Size() const { + return extend_.size(); + } + + private: + std::vector extend_; + + lm::ngram::ChartState state_; + bool right_full_; + + Score bound_; + Final *end_; +}; + +class PartialVertex { + public: + PartialVertex() {} + + explicit PartialVertex(const VertexNode &back) : back_(&back), index_(0) {} + + bool Empty() const { return back_->Empty(); } + + bool Complete() const { return back_->Complete(); } + + const lm::ngram::ChartState &State() const { return back_->State(); } + bool RightFull() const { return back_->RightFull(); } + + Score Bound() const { return Complete() ? back_->End()->Bound() : (*back_)[index_].Bound(); } + + unsigned char Length() const { return back_->Length(); } + + bool HasAlternative() const { + return index_ + 1 < back_->Size(); + } + + // Split into continuation and alternative, rendering this the alternative. + bool Split(PartialVertex &continuation) { + assert(!Complete()); + continuation.back_ = &((*back_)[index_]); + continuation.index_ = 0; + if (index_ + 1 < back_->Size()) { + ++index_; + return true; + } + return false; + } + + const Final &End() const { + return *back_->End(); + } + + private: + const VertexNode *back_; + unsigned int index_; +}; + +extern PartialVertex kBlankPartialVertex; + +class Vertex { + public: + Vertex() {} + + PartialVertex RootPartial() const { return PartialVertex(root_); } + + const Final *BestChild() const { + PartialVertex top(RootPartial()); + if (top.Empty()) { + return NULL; + } else { + PartialVertex continuation; + while (!top.Complete()) { + top.Split(continuation); + top = continuation; + } + return &top.End(); + } + } + + private: + friend class VertexGenerator; + + VertexNode root_; +}; + +} // namespace search +#endif // SEARCH_VERTEX__ diff --git a/search/vertex_generator.cc b/search/vertex_generator.cc new file mode 100644 index 000000000..d94e6e06e --- /dev/null +++ b/search/vertex_generator.cc @@ -0,0 +1,83 @@ +#include "search/vertex_generator.hh" + +#include "lm/left.hh" +#include "search/context.hh" +#include "search/edge.hh" + +#include + +namespace search { + +VertexGenerator::VertexGenerator(ContextBase &context, Vertex &gen) : context_(context), gen_(gen) { + gen.root_.InitRoot(); + root_.under = &gen.root_; +} + +namespace { +const uint64_t kCompleteAdd = static_cast(-1); +} // namespace + +void VertexGenerator::NewHypothesis(const PartialEdge &partial, Note note) { + const lm::ngram::ChartState &state = partial.CompletedState(); + std::pair got(existing_.insert(std::pair(hash_value(state), NULL))); + if (!got.second) { + // Found it already. + Final &exists = *got.first->second; + if (exists.Bound() < partial.score) { + exists.Reset(partial.score, note, partial.nt[0].End(), partial.nt[1].End()); + } + return; + } + unsigned char left = 0, right = 0; + Trie *node = &root_; + while (true) { + if (left == state.left.length) { + node = &FindOrInsert(*node, kCompleteAdd - state.left.full, state, left, true, right, false); + for (; right < state.right.length; ++right) { + node = &FindOrInsert(*node, state.right.words[right], state, left, true, right + 1, false); + } + break; + } + node = &FindOrInsert(*node, state.left.pointers[left], state, left + 1, false, right, false); + left++; + if (right == state.right.length) { + node = &FindOrInsert(*node, kCompleteAdd - state.left.full, state, left, false, right, true); + for (; left < state.left.length; ++left) { + node = &FindOrInsert(*node, state.left.pointers[left], state, left + 1, false, right, true); + } + break; + } + node = &FindOrInsert(*node, state.right.words[right], state, left, false, right + 1, false); + right++; + } + + node = &FindOrInsert(*node, kCompleteAdd - state.left.full, state, state.left.length, true, state.right.length, true); + got.first->second = CompleteTransition(*node, state, note, partial); +} + +VertexGenerator::Trie &VertexGenerator::FindOrInsert(VertexGenerator::Trie &node, uint64_t added, const lm::ngram::ChartState &state, unsigned char left, bool left_full, unsigned char right, bool right_full) { + VertexGenerator::Trie &next = node.extend[added]; + if (!next.under) { + next.under = context_.NewVertexNode(); + lm::ngram::ChartState &writing = next.under->MutableState(); + writing = state; + writing.left.full &= left_full && state.left.full; + next.under->MutableRightFull() = right_full && state.left.full; + writing.left.length = left; + writing.right.length = right; + node.under->AddExtend(next.under); + } + return next; +} + +Final *VertexGenerator::CompleteTransition(VertexGenerator::Trie &starter, const lm::ngram::ChartState &state, Note note, const PartialEdge &partial) { + VertexNode &node = *starter.under; + assert(node.State().left.full == state.left.full); + assert(!node.End()); + Final *final = context_.NewFinal(); + final->Reset(partial.score, note, partial.nt[0].End(), partial.nt[1].End()); + node.SetEnd(final); + return final; +} + +} // namespace search diff --git a/search/vertex_generator.hh b/search/vertex_generator.hh new file mode 100644 index 000000000..6b98da3e3 --- /dev/null +++ b/search/vertex_generator.hh @@ -0,0 +1,59 @@ +#ifndef SEARCH_VERTEX_GENERATOR__ +#define SEARCH_VERTEX_GENERATOR__ + +#include "search/note.hh" +#include "search/vertex.hh" + +#include + +#include + +namespace lm { +namespace ngram { +class ChartState; +} // namespace ngram +} // namespace lm + +namespace search { + +class ContextBase; +class Final; +struct PartialEdge; + +class VertexGenerator { + public: + VertexGenerator(ContextBase &context, Vertex &gen); + + void NewHypothesis(const PartialEdge &partial, Note note); + + void FinishedSearch() { + root_.under->SortAndSet(context_, NULL); + } + + const Vertex &Generating() const { return gen_; } + + private: + // Parallel structure to VertexNode. + struct Trie { + Trie() : under(NULL) {} + + VertexNode *under; + boost::unordered_map extend; + }; + + Trie &FindOrInsert(Trie &node, uint64_t added, const lm::ngram::ChartState &state, unsigned char left, bool left_full, unsigned char right, bool right_full); + + Final *CompleteTransition(Trie &node, const lm::ngram::ChartState &state, Note note, const PartialEdge &partial); + + ContextBase &context_; + + Vertex &gen_; + + Trie root_; + + typedef boost::unordered_map Existing; + Existing existing_; +}; + +} // namespace search +#endif // SEARCH_VERTEX_GENERATOR__ diff --git a/search/weights.cc b/search/weights.cc new file mode 100644 index 000000000..d65471ad7 --- /dev/null +++ b/search/weights.cc @@ -0,0 +1,71 @@ +#include "search/weights.hh" +#include "util/tokenize_piece.hh" + +#include + +namespace search { + +namespace { +struct Insert { + void operator()(boost::unordered_map &map, StringPiece name, search::Score score) const { + std::string copy(name.data(), name.size()); + map[copy] = score; + } +}; + +struct DotProduct { + search::Score total; + DotProduct() : total(0.0) {} + + void operator()(const boost::unordered_map &map, StringPiece name, search::Score score) { + boost::unordered_map::const_iterator i(FindStringPiece(map, name)); + if (i != map.end()) + total += score * i->second; + } +}; + +template void Parse(StringPiece text, Map &map, Op &op) { + for (util::TokenIter spaces(text, ' '); spaces; ++spaces) { + util::TokenIter equals(*spaces, '='); + UTIL_THROW_IF(!equals, WeightParseException, "Bad weight token " << *spaces); + StringPiece name(*equals); + UTIL_THROW_IF(!++equals, WeightParseException, "Bad weight token " << *spaces); + char *end; + // Assumes proper termination. + double value = std::strtod(equals->data(), &end); + UTIL_THROW_IF(end != equals->data() + equals->size(), WeightParseException, "Failed to parse weight" << *equals); + UTIL_THROW_IF(++equals, WeightParseException, "Too many equals in " << *spaces); + op(map, name, value); + } +} + +} // namespace + +Weights::Weights(StringPiece text) { + Insert op; + Parse(text, map_, op); + lm_ = Steal("LanguageModel"); + oov_ = Steal("OOV"); + word_penalty_ = Steal("WordPenalty"); +} + +Weights::Weights(Score lm, Score oov, Score word_penalty) : lm_(lm), oov_(oov), word_penalty_(word_penalty) {} + +search::Score Weights::DotNoLM(StringPiece text) const { + DotProduct dot; + Parse(text, map_, dot); + return dot.total; +} + +float Weights::Steal(const std::string &str) { + Map::iterator i(map_.find(str)); + if (i == map_.end()) { + return 0.0; + } else { + float ret = i->second; + map_.erase(i); + return ret; + } +} + +} // namespace search diff --git a/search/weights.hh b/search/weights.hh new file mode 100644 index 000000000..df1c419f0 --- /dev/null +++ b/search/weights.hh @@ -0,0 +1,52 @@ +// For now, the individual features are not kept. +#ifndef SEARCH_WEIGHTS__ +#define SEARCH_WEIGHTS__ + +#include "search/types.hh" +#include "util/exception.hh" +#include "util/string_piece.hh" + +#include + +#include + +namespace search { + +class WeightParseException : public util::Exception { + public: + WeightParseException() {} + ~WeightParseException() throw() {} +}; + +class Weights { + public: + // Parses weights, sets lm_weight_, removes it from map_. + explicit Weights(StringPiece text); + + // Just the three scores we care about adding. + Weights(Score lm, Score oov, Score word_penalty); + + Score DotNoLM(StringPiece text) const; + + Score LM() const { return lm_; } + + Score OOV() const { return oov_; } + + Score WordPenalty() const { return word_penalty_; } + + // Mostly for testing. + const boost::unordered_map &GetMap() const { return map_; } + + private: + float Steal(const std::string &str); + + typedef boost::unordered_map Map; + + Map map_; + + Score lm_, oov_, word_penalty_; +}; + +} // namespace search + +#endif // SEARCH_WEIGHTS__ diff --git a/search/weights_test.cc b/search/weights_test.cc new file mode 100644 index 000000000..4811ff060 --- /dev/null +++ b/search/weights_test.cc @@ -0,0 +1,38 @@ +#include "search/weights.hh" + +#define BOOST_TEST_MODULE WeightTest +#include +#include + +namespace search { +namespace { + +#define CHECK_WEIGHT(value, string) \ + i = parsed.find(string); \ + BOOST_REQUIRE(i != parsed.end()); \ + BOOST_CHECK_CLOSE((value), i->second, 0.001); + +BOOST_AUTO_TEST_CASE(parse) { + // These are not real feature weights. + Weights w("rarity=0 phrase-SGT=0 phrase-TGS=9.45117 lhsGrhs=0 lexical-SGT=2.33833 lexical-TGS=-28.3317 abstract?=0 LanguageModel=3 lexical?=1 glue?=5"); + const boost::unordered_map &parsed = w.GetMap(); + boost::unordered_map::const_iterator i; + CHECK_WEIGHT(0.0, "rarity"); + CHECK_WEIGHT(0.0, "phrase-SGT"); + CHECK_WEIGHT(9.45117, "phrase-TGS"); + CHECK_WEIGHT(2.33833, "lexical-SGT"); + BOOST_CHECK(parsed.end() == parsed.find("lm")); + BOOST_CHECK_CLOSE(3.0, w.LM(), 0.001); + CHECK_WEIGHT(-28.3317, "lexical-TGS"); + CHECK_WEIGHT(5.0, "glue?"); +} + +BOOST_AUTO_TEST_CASE(dot) { + Weights w("rarity=0 phrase-SGT=0 phrase-TGS=9.45117 lhsGrhs=0 lexical-SGT=2.33833 lexical-TGS=-28.3317 abstract?=0 LanguageModel=3 lexical?=1 glue?=5"); + BOOST_CHECK_CLOSE(9.45117 * 3.0, w.DotNoLM("phrase-TGS=3.0"), 0.001); + BOOST_CHECK_CLOSE(9.45117 * 3.0, w.DotNoLM("phrase-TGS=3.0 LanguageModel=10"), 0.001); + BOOST_CHECK_CLOSE(9.45117 * 3.0 + 28.3317 * 17.4, w.DotNoLM("rarity=5 phrase-TGS=3.0 LanguageModel=10 lexical-TGS=-17.4"), 0.001); +} + +} // namespace +} // namespace search diff --git a/util/Jamfile b/util/Jamfile new file mode 100644 index 000000000..2b76b2b1b --- /dev/null +++ b/util/Jamfile @@ -0,0 +1,10 @@ +lib kenutil : bit_packing.cc ersatz_progress.cc exception.cc file.cc file_piece.cc mmap.cc murmur_hash.cc string_piece.cc usage.cc /top//z : .. : : .. ; + +import testing ; + +unit-test bit_packing_test : bit_packing_test.cc kenutil /top//boost_unit_test_framework ; +run file_piece_test.cc kenutil /top//boost_unit_test_framework : : file_piece.cc ; +unit-test joint_sort_test : joint_sort_test.cc kenutil /top//boost_unit_test_framework ; +unit-test probing_hash_table_test : probing_hash_table_test.cc kenutil /top//boost_unit_test_framework ; +unit-test sorted_uniform_test : sorted_uniform_test.cc kenutil /top//boost_unit_test_framework ; +unit-test tokenize_piece_test : tokenize_piece_test.cc kenutil /top//boost_unit_test_framework ; diff --git a/util/bit_packing.cc b/util/bit_packing.cc new file mode 100644 index 000000000..41999b726 --- /dev/null +++ b/util/bit_packing.cc @@ -0,0 +1,40 @@ +#include "util/bit_packing.hh" +#include "util/exception.hh" + +#include + +namespace util { + +namespace { +template struct StaticCheck {}; +template <> struct StaticCheck { typedef bool StaticAssertionPassed; }; + +// If your float isn't 4 bytes, we're hosed. +typedef StaticCheck::StaticAssertionPassed FloatSize; + +} // namespace + +uint8_t RequiredBits(uint64_t max_value) { + if (!max_value) return 0; + uint8_t ret = 1; + while (max_value >>= 1) ++ret; + return ret; +} + +void BitPackingSanity() { + const FloatEnc neg1 = { -1.0 }, pos1 = { 1.0 }; + if ((neg1.i ^ pos1.i) != 0x80000000) UTIL_THROW(Exception, "Sign bit is not 0x80000000"); + char mem[57+8]; + memset(mem, 0, sizeof(mem)); + const uint64_t test57 = 0x123456789abcdefULL; + for (uint64_t b = 0; b < 57 * 8; b += 57) { + WriteInt57(mem, b, 57, test57); + } + for (uint64_t b = 0; b < 57 * 8; b += 57) { + if (test57 != ReadInt57(mem, b, 57, (1ULL << 57) - 1)) + UTIL_THROW(Exception, "The bit packing routines are failing for your architecture. Please send a bug report with your architecture, operating system, and compiler."); + } + // TODO: more checks. +} + +} // namespace util diff --git a/util/bit_packing.hh b/util/bit_packing.hh new file mode 100644 index 000000000..dcbd814c3 --- /dev/null +++ b/util/bit_packing.hh @@ -0,0 +1,186 @@ +#ifndef UTIL_BIT_PACKING__ +#define UTIL_BIT_PACKING__ + +/* Bit-level packing routines + * + * WARNING WARNING WARNING: + * The write functions assume that memory is zero initially. This makes them + * faster and is the appropriate case for mmapped language model construction. + * These routines assume that unaligned access to uint64_t is fast. This is + * the case on x86_64. I'm not sure how fast unaligned 64-bit access is on + * x86 but my target audience is large language models for which 64-bit is + * necessary. + * + * Call the BitPackingSanity function to sanity check. Calling once suffices, + * but it may be called multiple times when that's inconvenient. + * + * ARM and MinGW ports contributed by Hideo Okuma and Tomoyuki Yoshimura at + * NICT. + */ + +#include +#ifdef __APPLE__ +#include +#elif __linux__ +#include +#elif !defined(_WIN32) && !defined(_WIN64) +#include +#endif + +#include + +#include + +namespace util { + +// Fun fact: __BYTE_ORDER is wrong on Solaris Sparc, but the version without __ is correct. +#if BYTE_ORDER == LITTLE_ENDIAN +inline uint8_t BitPackShift(uint8_t bit, uint8_t /*length*/) { + return bit; +} +#elif BYTE_ORDER == BIG_ENDIAN +inline uint8_t BitPackShift(uint8_t bit, uint8_t length) { + return 64 - length - bit; +} +#else +#error "Bit packing code isn't written for your byte order." +#endif + +inline uint64_t ReadOff(const void *base, uint64_t bit_off) { +#if defined(__arm) || defined(__arm__) + const uint8_t *base_off = reinterpret_cast(base) + (bit_off >> 3); + uint64_t value64; + memcpy(&value64, base_off, sizeof(value64)); + return value64; +#else + return *reinterpret_cast(reinterpret_cast(base) + (bit_off >> 3)); +#endif +} + +/* Pack integers up to 57 bits using their least significant digits. + * The length is specified using mask: + * Assumes mask == (1 << length) - 1 where length <= 57. + */ +inline uint64_t ReadInt57(const void *base, uint64_t bit_off, uint8_t length, uint64_t mask) { + return (ReadOff(base, bit_off) >> BitPackShift(bit_off & 7, length)) & mask; +} +/* Assumes value < (1 << length) and length <= 57. + * Assumes the memory is zero initially. + */ +inline void WriteInt57(void *base, uint64_t bit_off, uint8_t length, uint64_t value) { +#if defined(__arm) || defined(__arm__) + uint8_t *base_off = reinterpret_cast(base) + (bit_off >> 3); + uint64_t value64; + memcpy(&value64, base_off, sizeof(value64)); + value64 |= (value << BitPackShift(bit_off & 7, length)); + memcpy(base_off, &value64, sizeof(value64)); +#else + *reinterpret_cast(reinterpret_cast(base) + (bit_off >> 3)) |= + (value << BitPackShift(bit_off & 7, length)); +#endif +} + +/* Same caveats as above, but for a 25 bit limit. */ +inline uint32_t ReadInt25(const void *base, uint64_t bit_off, uint8_t length, uint32_t mask) { +#if defined(__arm) || defined(__arm__) + const uint8_t *base_off = reinterpret_cast(base) + (bit_off >> 3); + uint32_t value32; + memcpy(&value32, base_off, sizeof(value32)); + return (value32 >> BitPackShift(bit_off & 7, length)) & mask; +#else + return (*reinterpret_cast(reinterpret_cast(base) + (bit_off >> 3)) >> BitPackShift(bit_off & 7, length)) & mask; +#endif +} + +inline void WriteInt25(void *base, uint64_t bit_off, uint8_t length, uint32_t value) { +#if defined(__arm) || defined(__arm__) + uint8_t *base_off = reinterpret_cast(base) + (bit_off >> 3); + uint32_t value32; + memcpy(&value32, base_off, sizeof(value32)); + value32 |= (value << BitPackShift(bit_off & 7, length)); + memcpy(base_off, &value32, sizeof(value32)); +#else + *reinterpret_cast(reinterpret_cast(base) + (bit_off >> 3)) |= + (value << BitPackShift(bit_off & 7, length)); +#endif +} + +typedef union { float f; uint32_t i; } FloatEnc; + +inline float ReadFloat32(const void *base, uint64_t bit_off) { + FloatEnc encoded; + encoded.i = ReadOff(base, bit_off) >> BitPackShift(bit_off & 7, 32); + return encoded.f; +} +inline void WriteFloat32(void *base, uint64_t bit_off, float value) { + FloatEnc encoded; + encoded.f = value; + WriteInt57(base, bit_off, 32, encoded.i); +} + +const uint32_t kSignBit = 0x80000000; + +inline void SetSign(float &to) { + FloatEnc enc; + enc.f = to; + enc.i |= kSignBit; + to = enc.f; +} + +inline void UnsetSign(float &to) { + FloatEnc enc; + enc.f = to; + enc.i &= ~kSignBit; + to = enc.f; +} + +inline float ReadNonPositiveFloat31(const void *base, uint64_t bit_off) { + FloatEnc encoded; + encoded.i = ReadOff(base, bit_off) >> BitPackShift(bit_off & 7, 31); + // Sign bit set means negative. + encoded.i |= kSignBit; + return encoded.f; +} +inline void WriteNonPositiveFloat31(void *base, uint64_t bit_off, float value) { + FloatEnc encoded; + encoded.f = value; + encoded.i &= ~kSignBit; + WriteInt57(base, bit_off, 31, encoded.i); +} + +void BitPackingSanity(); + +// Return bits required to store integers upto max_value. Not the most +// efficient implementation, but this is only called a few times to size tries. +uint8_t RequiredBits(uint64_t max_value); + +struct BitsMask { + static BitsMask ByMax(uint64_t max_value) { + BitsMask ret; + ret.FromMax(max_value); + return ret; + } + static BitsMask ByBits(uint8_t bits) { + BitsMask ret; + ret.bits = bits; + ret.mask = (1ULL << bits) - 1; + return ret; + } + void FromMax(uint64_t max_value) { + bits = RequiredBits(max_value); + mask = (1ULL << bits) - 1; + } + uint8_t bits; + uint64_t mask; +}; + +struct BitAddress { + BitAddress(void *in_base, uint64_t in_offset) : base(in_base), offset(in_offset) {} + + void *base; + uint64_t offset; +}; + +} // namespace util + +#endif // UTIL_BIT_PACKING__ diff --git a/util/bit_packing_test.cc b/util/bit_packing_test.cc new file mode 100644 index 000000000..4edc2004c --- /dev/null +++ b/util/bit_packing_test.cc @@ -0,0 +1,59 @@ +#include "util/bit_packing.hh" + +#define BOOST_TEST_MODULE BitPackingTest +#include + +#include + +namespace util { +namespace { + +const uint64_t test57 = 0x123456789abcdefULL; +const uint32_t test25 = 0x1234567; + +BOOST_AUTO_TEST_CASE(ZeroBit57) { + char mem[16]; + memset(mem, 0, sizeof(mem)); + WriteInt57(mem, 0, 57, test57); + BOOST_CHECK_EQUAL(test57, ReadInt57(mem, 0, 57, (1ULL << 57) - 1)); +} + +BOOST_AUTO_TEST_CASE(EachBit57) { + char mem[16]; + for (uint8_t b = 0; b < 8; ++b) { + memset(mem, 0, sizeof(mem)); + WriteInt57(mem, b, 57, test57); + BOOST_CHECK_EQUAL(test57, ReadInt57(mem, b, 57, (1ULL << 57) - 1)); + } +} + +BOOST_AUTO_TEST_CASE(Consecutive57) { + char mem[57+8]; + memset(mem, 0, sizeof(mem)); + for (uint64_t b = 0; b < 57 * 8; b += 57) { + WriteInt57(mem, b, 57, test57); + BOOST_CHECK_EQUAL(test57, ReadInt57(mem, b, 57, (1ULL << 57) - 1)); + } + for (uint64_t b = 0; b < 57 * 8; b += 57) { + BOOST_CHECK_EQUAL(test57, ReadInt57(mem, b, 57, (1ULL << 57) - 1)); + } +} + +BOOST_AUTO_TEST_CASE(Consecutive25) { + char mem[25+8]; + memset(mem, 0, sizeof(mem)); + for (uint64_t b = 0; b < 25 * 8; b += 25) { + WriteInt25(mem, b, 25, test25); + BOOST_CHECK_EQUAL(test25, ReadInt25(mem, b, 25, (1ULL << 25) - 1)); + } + for (uint64_t b = 0; b < 25 * 8; b += 25) { + BOOST_CHECK_EQUAL(test25, ReadInt25(mem, b, 25, (1ULL << 25) - 1)); + } +} + +BOOST_AUTO_TEST_CASE(Sanity) { + BitPackingSanity(); +} + +} // namespace +} // namespace util diff --git a/util/check.hh b/util/check.hh new file mode 100644 index 000000000..2c63b5630 --- /dev/null +++ b/util/check.hh @@ -0,0 +1,21 @@ +/* People have been abusing assert by assuming it will always execute. To + * rememdy the situation, asserts were replaced with CHECK. These should then + * be manually replaced with assert (when used correctly) or UTIL_THROW (for + * runtime checks). + */ +#ifndef UTIL_CHECK__ +#define UTIL_CHECK__ + +#include +#include + +#include + +#define CHECK(Condition) do { \ + if (!(Condition)) { \ + std::cerr << "Check " << #Condition << " failed in " << __FILE__ << ":" << __LINE__ << std::endl; \ + abort(); \ + } \ +} while (0) // swallow ; + +#endif // UTIL_CHECK__ diff --git a/util/ersatz_progress.cc b/util/ersatz_progress.cc new file mode 100644 index 000000000..eb635ad8a --- /dev/null +++ b/util/ersatz_progress.cc @@ -0,0 +1,45 @@ +#include "util/ersatz_progress.hh" + +#include +#include +#include +#include + +namespace util { + +namespace { const unsigned char kWidth = 100; } + +ErsatzProgress::ErsatzProgress() : current_(0), next_(std::numeric_limits::max()), complete_(next_), out_(NULL) {} + +ErsatzProgress::~ErsatzProgress() { + if (out_) Finished(); +} + +ErsatzProgress::ErsatzProgress(uint64_t complete, std::ostream *to, const std::string &message) + : current_(0), next_(complete / kWidth), complete_(complete), stones_written_(0), out_(to) { + if (!out_) { + next_ = std::numeric_limits::max(); + return; + } + if (!message.empty()) *out_ << message << '\n'; + *out_ << "----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100\n"; +} + +void ErsatzProgress::Milestone() { + if (!out_) { current_ = 0; return; } + if (!complete_) return; + unsigned char stone = std::min(static_cast(kWidth), (current_ * kWidth) / complete_); + + for (; stones_written_ < stone; ++stones_written_) { + (*out_) << '*'; + } + if (stone == kWidth) { + (*out_) << std::endl; + next_ = std::numeric_limits::max(); + out_ = NULL; + } else { + next_ = std::max(next_, (stone * complete_) / kWidth); + } +} + +} // namespace util diff --git a/util/ersatz_progress.hh b/util/ersatz_progress.hh new file mode 100644 index 000000000..ff4d590ff --- /dev/null +++ b/util/ersatz_progress.hh @@ -0,0 +1,56 @@ +#ifndef UTIL_ERSATZ_PROGRESS__ +#define UTIL_ERSATZ_PROGRESS__ + +#include +#include + +#include + +// Ersatz version of boost::progress so core language model doesn't depend on +// boost. Also adds option to print nothing. + +namespace util { +class ErsatzProgress { + public: + // No output. + ErsatzProgress(); + + // Null means no output. The null value is useful for passing along the ostream pointer from another caller. + explicit ErsatzProgress(uint64_t complete, std::ostream *to = &std::cerr, const std::string &message = ""); + + ~ErsatzProgress(); + + ErsatzProgress &operator++() { + if (++current_ >= next_) Milestone(); + return *this; + } + + ErsatzProgress &operator+=(uint64_t amount) { + if ((current_ += amount) >= next_) Milestone(); + return *this; + } + + void Set(uint64_t to) { + if ((current_ = to) >= next_) Milestone(); + Milestone(); + } + + void Finished() { + Set(complete_); + } + + private: + void Milestone(); + + uint64_t current_, next_, complete_; + unsigned char stones_written_; + std::ostream *out_; + + // noncopyable + ErsatzProgress(const ErsatzProgress &other); + ErsatzProgress &operator=(const ErsatzProgress &other); +}; + +} // namespace util + +#endif // UTIL_ERSATZ_PROGRESS__ diff --git a/util/exception.cc b/util/exception.cc new file mode 100644 index 000000000..3806e6de4 --- /dev/null +++ b/util/exception.cc @@ -0,0 +1,90 @@ +#include "util/exception.hh" + +#ifdef __GXX_RTTI +#include +#endif + +#include +#include + +namespace util { + +Exception::Exception() throw() {} +Exception::~Exception() throw() {} + +Exception::Exception(const Exception &from) : std::exception() { + stream_ << from.stream_.str(); +} + +Exception &Exception::operator=(const Exception &from) { + stream_ << from.stream_.str(); + return *this; +} + +const char *Exception::what() const throw() { + text_ = stream_.str(); + return text_.c_str(); +} + +void Exception::SetLocation(const char *file, unsigned int line, const char *func, const char *child_name, const char *condition) { + /* The child class might have set some text, but we want this to come first. + * Another option would be passing this information to the constructor, but + * then child classes would have to accept constructor arguments and pass + * them down. + */ + text_ = stream_.str(); + stream_.str(""); + stream_ << file << ':' << line; + if (func) stream_ << " in " << func << " threw "; + if (child_name) { + stream_ << child_name; + } else { +#ifdef __GXX_RTTI + stream_ << typeid(this).name(); +#else + stream_ << "an exception"; +#endif + } + if (condition) stream_ << " because `" << condition; + stream_ << "'.\n"; + stream_ << text_; +} + +namespace { +// The XOPEN version. +const char *HandleStrerror(int ret, const char *buf) { + if (!ret) return buf; + return NULL; +} + +// The GNU version. +const char *HandleStrerror(const char *ret, const char * /*buf*/) { + return ret; +} +} // namespace + +ErrnoException::ErrnoException() throw() : errno_(errno) { + char buf[200]; + buf[0] = 0; +#if defined(sun) || defined(_WIN32) || defined(_WIN64) + const char *add = strerror(errno); +#else + const char *add = HandleStrerror(strerror_r(errno, buf, 200), buf); +#endif + + if (add) { + *this << add << ' '; + } +} + +ErrnoException::~ErrnoException() throw() {} + +EndOfFileException::EndOfFileException() throw() { + *this << "End of file"; +} +EndOfFileException::~EndOfFileException() throw() {} + +OverflowException::OverflowException() throw() {} +OverflowException::~OverflowException() throw() {} + +} // namespace util diff --git a/util/exception.hh b/util/exception.hh new file mode 100644 index 000000000..83f99cd6f --- /dev/null +++ b/util/exception.hh @@ -0,0 +1,138 @@ +#ifndef UTIL_EXCEPTION__ +#define UTIL_EXCEPTION__ + +#include +#include +#include +#include + +#include + +namespace util { + +template typename Except::template ExceptionTag::Identity operator<<(Except &e, const Data &data); + +class Exception : public std::exception { + public: + Exception() throw(); + virtual ~Exception() throw(); + + Exception(const Exception &from); + Exception &operator=(const Exception &from); + + // Not threadsafe, but probably doesn't matter. FWIW, Boost's exception guidance implies that what() isn't threadsafe. + const char *what() const throw(); + + // For use by the UTIL_THROW macros. + void SetLocation( + const char *file, + unsigned int line, + const char *func, + const char *child_name, + const char *condition); + + private: + template friend typename Except::template ExceptionTag::Identity operator<<(Except &e, const Data &data); + + // This helps restrict operator<< defined below. + template struct ExceptionTag { + typedef T Identity; + }; + + std::stringstream stream_; + mutable std::string text_; +}; + +/* This implements the normal operator<< for Exception and all its children. + * SNIFAE means it only applies to Exception. Think of this as an ersatz + * boost::enable_if. + */ +template typename Except::template ExceptionTag::Identity operator<<(Except &e, const Data &data) { + e.stream_ << data; + return e; +} + +#ifdef __GNUC__ +#define UTIL_FUNC_NAME __PRETTY_FUNCTION__ +#else +#ifdef _WIN32 +#define UTIL_FUNC_NAME __FUNCTION__ +#else +#define UTIL_FUNC_NAME NULL +#endif +#endif + +#define UTIL_SET_LOCATION(UTIL_e, child, condition) do { \ + (UTIL_e).SetLocation(__FILE__, __LINE__, UTIL_FUNC_NAME, (child), (condition)); \ +} while (0) + +/* Create an instance of Exception, add the message Modify, and throw it. + * Modify is appended to the what() message and can contain << for ostream + * operations. + * + * do .. while kludge to swallow trailing ; character + * http://gcc.gnu.org/onlinedocs/cpp/Swallowing-the-Semicolon.html . + */ +#define UTIL_THROW(Exception, Modify) do { \ + Exception UTIL_e; \ + UTIL_SET_LOCATION(UTIL_e, #Exception, NULL); \ + UTIL_e << Modify; \ + throw UTIL_e; \ +} while (0) + +#define UTIL_THROW_VAR(Var, Modify) do { \ + Exception &UTIL_e = (Var); \ + UTIL_SET_LOCATION(UTIL_e, NULL, NULL); \ + UTIL_e << Modify; \ + throw UTIL_e; \ +} while (0) + +#define UTIL_THROW_IF(Condition, Exception, Modify) do { \ + if (Condition) { \ + Exception UTIL_e; \ + UTIL_SET_LOCATION(UTIL_e, #Exception, #Condition); \ + UTIL_e << Modify; \ + throw UTIL_e; \ + } \ +} while (0) + +class ErrnoException : public Exception { + public: + ErrnoException() throw(); + + virtual ~ErrnoException() throw(); + + int Error() const throw() { return errno_; } + + private: + int errno_; +}; + +class EndOfFileException : public Exception { + public: + EndOfFileException() throw(); + ~EndOfFileException() throw(); +}; + +class OverflowException : public Exception { + public: + OverflowException() throw(); + ~OverflowException() throw(); +}; + +template inline std::size_t CheckOverflowInternal(uint64_t value) { + UTIL_THROW_IF(value > static_cast(std::numeric_limits::max()), OverflowException, "Integer overflow detected. This model is too big for 32-bit code."); + return value; +} + +template <> inline std::size_t CheckOverflowInternal<8>(uint64_t value) { + return value; +} + +inline std::size_t CheckOverflow(uint64_t value) { + return CheckOverflowInternal(value); +} + +} // namespace util + +#endif // UTIL_EXCEPTION__ diff --git a/util/file.cc b/util/file.cc new file mode 100644 index 000000000..6bf879ac7 --- /dev/null +++ b/util/file.cc @@ -0,0 +1,302 @@ +#include "util/file.hh" + +#include "util/exception.hh" + +#include +#include +#include + +#include +#include +#include +#include +#include + +#if defined(_WIN32) || defined(_WIN64) +#include +#include +#else +#include +#endif + +namespace util { + +scoped_fd::~scoped_fd() { + if (fd_ != -1 && close(fd_)) { + std::cerr << "Could not close file " << fd_ << std::endl; + std::abort(); + } +} + +scoped_FILE::~scoped_FILE() { + if (file_ && std::fclose(file_)) { + std::cerr << "Could not close file " << std::endl; + std::abort(); + } +} + +int OpenReadOrThrow(const char *name) { + int ret; +#if defined(_WIN32) || defined(_WIN64) + UTIL_THROW_IF(-1 == (ret = _open(name, _O_BINARY | _O_RDONLY)), ErrnoException, "while opening " << name); +#else + UTIL_THROW_IF(-1 == (ret = open(name, O_RDONLY)), ErrnoException, "while opening " << name); +#endif + return ret; +} + +int CreateOrThrow(const char *name) { + int ret; +#if defined(_WIN32) || defined(_WIN64) + UTIL_THROW_IF(-1 == (ret = _open(name, _O_CREAT | _O_TRUNC | _O_RDWR, _S_IREAD | _S_IWRITE)), ErrnoException, "while creating " << name); +#else + UTIL_THROW_IF(-1 == (ret = open(name, O_CREAT | O_TRUNC | O_RDWR, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH)), ErrnoException, "while creating " << name); +#endif + return ret; +} + +uint64_t SizeFile(int fd) { +#if defined(_WIN32) || defined(_WIN64) + __int64 ret = _filelengthi64(fd); + return (ret == -1) ? kBadSize : ret; +#else + struct stat sb; + if (fstat(fd, &sb) == -1 || (!sb.st_size && !S_ISREG(sb.st_mode))) return kBadSize; + return sb.st_size; +#endif +} + +void ResizeOrThrow(int fd, uint64_t to) { +#if defined(_WIN32) || defined(_WIN64) + UTIL_THROW_IF(_chsize_s(fd, to), ErrnoException, "Resizing to " << to << " bytes failed"); +#else + UTIL_THROW_IF(ftruncate(fd, to), ErrnoException, "Resizing to " << to << " bytes failed"); +#endif +} + +#ifdef WIN32 +typedef int ssize_t; +#endif + +void ReadOrThrow(int fd, void *to_void, std::size_t amount) { + uint8_t *to = static_cast(to_void); + while (amount) { + ssize_t ret = read(fd, to, amount); + UTIL_THROW_IF(ret == -1, ErrnoException, "Reading " << amount << " from fd " << fd << " failed."); + UTIL_THROW_IF(ret == 0, EndOfFileException, "Hit EOF in fd " << fd << " but there should be " << amount << " more bytes to read."); + amount -= ret; + to += ret; + } +} + +std::size_t ReadOrEOF(int fd, void *to_void, std::size_t amount) { + uint8_t *to = static_cast(to_void); + std::size_t remaining = amount; + while (remaining) { + ssize_t ret = read(fd, to, remaining); + UTIL_THROW_IF(ret == -1, ErrnoException, "Reading " << remaining << " from fd " << fd << " failed."); + if (!ret) return amount - remaining; + remaining -= ret; + to += ret; + } + return amount; +} + +void WriteOrThrow(int fd, const void *data_void, std::size_t size) { + const uint8_t *data = static_cast(data_void); + while (size) { + ssize_t ret = write(fd, data, size); + if (ret < 1) UTIL_THROW(util::ErrnoException, "Write failed"); + data += ret; + size -= ret; + } +} + +void WriteOrThrow(FILE *to, const void *data, std::size_t size) { + assert(size); + if (1 != std::fwrite(data, size, 1, to)) UTIL_THROW(util::ErrnoException, "Short write; requested size " << size); +} + +void FSyncOrThrow(int fd) { +// Apparently windows doesn't have fsync? +#if !defined(_WIN32) && !defined(_WIN64) + UTIL_THROW_IF(-1 == fsync(fd), ErrnoException, "Sync of " << fd << " failed."); +#endif +} + +namespace { +void InternalSeek(int fd, int64_t off, int whence) { +#if defined(_WIN32) || defined(_WIN64) + UTIL_THROW_IF((__int64)-1 == _lseeki64(fd, off, whence), ErrnoException, "Windows seek failed"); + +#else + UTIL_THROW_IF((off_t)-1 == lseek(fd, off, whence), ErrnoException, "Seek failed"); +#endif +} +} // namespace + +void SeekOrThrow(int fd, uint64_t off) { + InternalSeek(fd, off, SEEK_SET); +} + +void AdvanceOrThrow(int fd, int64_t off) { + InternalSeek(fd, off, SEEK_CUR); +} + +void SeekEnd(int fd) { + InternalSeek(fd, 0, SEEK_END); +} + +std::FILE *FDOpenOrThrow(scoped_fd &file) { + std::FILE *ret = fdopen(file.get(), "r+b"); + if (!ret) UTIL_THROW(util::ErrnoException, "Could not fdopen"); + file.release(); + return ret; +} + +std::FILE *FOpenOrThrow(const char *path, const char *mode) { + std::FILE *ret; + UTIL_THROW_IF(!(ret = fopen(path, mode)), util::ErrnoException, "Could not fopen " << path << " for " << mode); + return ret; +} + +TempMaker::TempMaker(const std::string &prefix) : base_(prefix) { + base_ += "XXXXXX"; +} + +// Sigh. Windows temporary file creation is full of race conditions. +#if defined(_WIN32) || defined(_WIN64) +/* mkstemp extracted from libc/sysdeps/posix/tempname.c. Copyright + (C) 1991-1999, 2000, 2001, 2006 Free Software Foundation, Inc. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. */ + +/* This has been modified from the original version to rename the function and + * set the Windows temporary flag. */ + +static const char letters[] = +"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"; + +/* Generate a temporary file name based on TMPL. TMPL must match the + rules for mk[s]temp (i.e. end in "XXXXXX"). The name constructed + does not exist at the time of the call to mkstemp. TMPL is + overwritten with the result. */ +int +mkstemp_and_unlink(char *tmpl) +{ + int len; + char *XXXXXX; + static unsigned long long value; + unsigned long long random_time_bits; + unsigned int count; + int fd = -1; + int save_errno = errno; + + /* A lower bound on the number of temporary files to attempt to + generate. The maximum total number of temporary file names that + can exist for a given template is 62**6. It should never be + necessary to try all these combinations. Instead if a reasonable + number of names is tried (we define reasonable as 62**3) fail to + give the system administrator the chance to remove the problems. */ +#define ATTEMPTS_MIN (62 * 62 * 62) + + /* The number of times to attempt to generate a temporary file. To + conform to POSIX, this must be no smaller than TMP_MAX. */ +#if ATTEMPTS_MIN < TMP_MAX + unsigned int attempts = TMP_MAX; +#else + unsigned int attempts = ATTEMPTS_MIN; +#endif + + len = strlen (tmpl); + if (len < 6 || strcmp (&tmpl[len - 6], "XXXXXX")) + { + errno = EINVAL; + return -1; + } + +/* This is where the Xs start. */ + XXXXXX = &tmpl[len - 6]; + + /* Get some more or less random data. */ + { + SYSTEMTIME stNow; + FILETIME ftNow; + + // get system time + GetSystemTime(&stNow); + stNow.wMilliseconds = 500; + if (!SystemTimeToFileTime(&stNow, &ftNow)) + { + errno = -1; + return -1; + } + + random_time_bits = (((unsigned long long)ftNow.dwHighDateTime << 32) + | (unsigned long long)ftNow.dwLowDateTime); + } + value += random_time_bits ^ (unsigned long long)GetCurrentThreadId (); + + for (count = 0; count < attempts; value += 7777, ++count) + { + unsigned long long v = value; + + /* Fill in the random bits. */ + XXXXXX[0] = letters[v % 62]; + v /= 62; + XXXXXX[1] = letters[v % 62]; + v /= 62; + XXXXXX[2] = letters[v % 62]; + v /= 62; + XXXXXX[3] = letters[v % 62]; + v /= 62; + XXXXXX[4] = letters[v % 62]; + v /= 62; + XXXXXX[5] = letters[v % 62]; + + /* Modified for windows and to unlink */ + // fd = open (tmpl, O_RDWR | O_CREAT | O_EXCL, _S_IREAD | _S_IWRITE); + int flags = _O_RDWR | _O_CREAT | _O_EXCL | _O_BINARY; + flags |= _O_TEMPORARY; + fd = _open (tmpl, flags, _S_IREAD | _S_IWRITE); + if (fd >= 0) + { + errno = save_errno; + return fd; + } + else if (errno != EEXIST) + return -1; + } + + /* We got out of the loop because we ran out of combinations to try. */ + errno = EEXIST; + return -1; +} +#else +int +mkstemp_and_unlink(char *tmpl) { + int ret = mkstemp(tmpl); + if (ret != -1) { + UTIL_THROW_IF(unlink(tmpl), util::ErrnoException, "Failed to delete " << tmpl); + } + return ret; +} +#endif + +int TempMaker::Make() const { + std::string name(base_); + name.push_back(0); + int ret; + UTIL_THROW_IF(-1 == (ret = mkstemp_and_unlink(&name[0])), util::ErrnoException, "Failed to make a temporary based on " << base_); + return ret; +} + +std::FILE *TempMaker::MakeFile() const { + util::scoped_fd file(Make()); + return FDOpenOrThrow(file); +} + +} // namespace util diff --git a/util/file.hh b/util/file.hh new file mode 100644 index 000000000..185cb1f31 --- /dev/null +++ b/util/file.hh @@ -0,0 +1,110 @@ +#ifndef UTIL_FILE__ +#define UTIL_FILE__ + +#include +#include +#include + +#include + +namespace util { + +class scoped_fd { + public: + scoped_fd() : fd_(-1) {} + + explicit scoped_fd(int fd) : fd_(fd) {} + + ~scoped_fd(); + + void reset(int to) { + scoped_fd other(fd_); + fd_ = to; + } + + int get() const { return fd_; } + + int operator*() const { return fd_; } + + int release() { + int ret = fd_; + fd_ = -1; + return ret; + } + + operator bool() { return fd_ != -1; } + + private: + int fd_; + + scoped_fd(const scoped_fd &); + scoped_fd &operator=(const scoped_fd &); +}; + +class scoped_FILE { + public: + explicit scoped_FILE(std::FILE *file = NULL) : file_(file) {} + + ~scoped_FILE(); + + std::FILE *get() { return file_; } + const std::FILE *get() const { return file_; } + + void reset(std::FILE *to = NULL) { + scoped_FILE other(file_); + file_ = to; + } + + std::FILE *release() { + std::FILE *ret = file_; + file_ = NULL; + return ret; + } + + private: + std::FILE *file_; +}; + +// Open for read only. +int OpenReadOrThrow(const char *name); +// Create file if it doesn't exist, truncate if it does. Opened for write. +int CreateOrThrow(const char *name); + +// Return value for SizeFile when it can't size properly. +const uint64_t kBadSize = (uint64_t)-1; +uint64_t SizeFile(int fd); + +void ResizeOrThrow(int fd, uint64_t to); + +void ReadOrThrow(int fd, void *to, std::size_t size); +std::size_t ReadOrEOF(int fd, void *to_void, std::size_t amount); + +void WriteOrThrow(int fd, const void *data_void, std::size_t size); +void WriteOrThrow(FILE *to, const void *data, std::size_t size); + +void FSyncOrThrow(int fd); + +// Seeking +void SeekOrThrow(int fd, uint64_t off); +void AdvanceOrThrow(int fd, int64_t off); +void SeekEnd(int fd); + +std::FILE *FDOpenOrThrow(scoped_fd &file); + +std::FILE *FOpenOrThrow(const char *path, const char *mode); + +class TempMaker { + public: + explicit TempMaker(const std::string &prefix); + + // These will already be unlinked for you. + int Make() const; + std::FILE *MakeFile() const; + + private: + std::string base_; +}; + +} // namespace util + +#endif // UTIL_FILE__ diff --git a/util/file_piece.cc b/util/file_piece.cc new file mode 100644 index 000000000..280f438c3 --- /dev/null +++ b/util/file_piece.cc @@ -0,0 +1,314 @@ +#include "util/file_piece.hh" + +#include "util/exception.hh" +#include "util/file.hh" +#include "util/mmap.hh" +#ifdef WIN32 +#include +#else +#include +#endif // WIN32 + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace util { + +ParseNumberException::ParseNumberException(StringPiece value) throw() { + *this << "Could not parse \"" << value << "\" into a number"; +} + +#ifdef HAVE_ZLIB +GZException::GZException(gzFile file) { + int num; + *this << gzerror(file, &num) << " from zlib"; +} +#endif // HAVE_ZLIB + +// Sigh this is the only way I could come up with to do a _const_ bool. It has ' ', '\f', '\n', '\r', '\t', and '\v' (same as isspace on C locale). +const bool kSpaces[256] = {0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; + +FilePiece::FilePiece(const char *name, std::ostream *show_progress, std::size_t min_buffer) : + file_(OpenReadOrThrow(name)), total_size_(SizeFile(file_.get())), page_(SizePage()), + progress_(total_size_, total_size_ == kBadSize ? NULL : show_progress, std::string("Reading ") + name) { + Initialize(name, show_progress, min_buffer); +} + +FilePiece::FilePiece(int fd, const char *name, std::ostream *show_progress, std::size_t min_buffer) : + file_(fd), total_size_(SizeFile(file_.get())), page_(SizePage()), + progress_(total_size_, total_size_ == kBadSize ? NULL : show_progress, std::string("Reading ") + name) { + Initialize(name, show_progress, min_buffer); +} + +FilePiece::~FilePiece() { +#ifdef HAVE_ZLIB + if (gz_file_) { + // zlib took ownership + file_.release(); + int ret; + if (Z_OK != (ret = gzclose(gz_file_))) { + std::cerr << "could not close file " << file_name_ << " using zlib" << std::endl; + abort(); + } + } +#endif +} + +StringPiece FilePiece::ReadLine(char delim) { + std::size_t skip = 0; + while (true) { + for (const char *i = position_ + skip; i < position_end_; ++i) { + if (*i == delim) { + StringPiece ret(position_, i - position_); + position_ = i + 1; + return ret; + } + } + if (at_end_) { + if (position_ == position_end_) Shift(); + return Consume(position_end_); + } + skip = position_end_ - position_; + Shift(); + } +} + +float FilePiece::ReadFloat() { + return ReadNumber(); +} +double FilePiece::ReadDouble() { + return ReadNumber(); +} +long int FilePiece::ReadLong() { + return ReadNumber(); +} +unsigned long int FilePiece::ReadULong() { + return ReadNumber(); +} + +void FilePiece::Initialize(const char *name, std::ostream *show_progress, std::size_t min_buffer) { +#ifdef HAVE_ZLIB + gz_file_ = NULL; +#endif + file_name_ = name; + + default_map_size_ = page_ * std::max((min_buffer / page_ + 1), 2); + position_ = NULL; + position_end_ = NULL; + mapped_offset_ = 0; + at_end_ = false; + + if (total_size_ == kBadSize) { + // So the assertion passes. + fallback_to_read_ = false; + if (show_progress) + *show_progress << "File " << name << " isn't normal. Using slower read() instead of mmap(). No progress bar." << std::endl; + TransitionToRead(); + } else { + fallback_to_read_ = false; + } + Shift(); + // gzip detect. + if ((position_end_ - position_) > 2 && *position_ == 0x1f && static_cast(*(position_ + 1)) == 0x8b) { +#ifndef HAVE_ZLIB + UTIL_THROW(GZException, "Looks like a gzip file but support was not compiled in."); +#endif + if (!fallback_to_read_) { + at_end_ = false; + TransitionToRead(); + } + } +} + +namespace { +void ParseNumber(const char *begin, char *&end, float &out) { +#if defined(sun) || defined(WIN32) + out = static_cast(strtod(begin, &end)); +#else + out = strtof(begin, &end); +#endif +} +void ParseNumber(const char *begin, char *&end, double &out) { + out = strtod(begin, &end); +} +void ParseNumber(const char *begin, char *&end, long int &out) { + out = strtol(begin, &end, 10); +} +void ParseNumber(const char *begin, char *&end, unsigned long int &out) { + out = strtoul(begin, &end, 10); +} +} // namespace + +template T FilePiece::ReadNumber() { + SkipSpaces(); + while (last_space_ < position_) { + if (at_end_) { + // Hallucinate a null off the end of the file. + std::string buffer(position_, position_end_); + char *end; + T ret; + ParseNumber(buffer.c_str(), end, ret); + if (buffer.c_str() == end) throw ParseNumberException(buffer); + position_ += end - buffer.c_str(); + return ret; + } + Shift(); + } + char *end; + T ret; + ParseNumber(position_, end, ret); + if (end == position_) throw ParseNumberException(ReadDelimited()); + position_ = end; + return ret; +} + +const char *FilePiece::FindDelimiterOrEOF(const bool *delim) { + std::size_t skip = 0; + while (true) { + for (const char *i = position_ + skip; i < position_end_; ++i) { + if (delim[static_cast(*i)]) return i; + } + if (at_end_) { + if (position_ == position_end_) Shift(); + return position_end_; + } + skip = position_end_ - position_; + Shift(); + } +} + +void FilePiece::Shift() { + if (at_end_) { + progress_.Finished(); + throw EndOfFileException(); + } + uint64_t desired_begin = position_ - data_.begin() + mapped_offset_; + + if (!fallback_to_read_) MMapShift(desired_begin); + // Notice an mmap failure might set the fallback. + if (fallback_to_read_) ReadShift(); + + for (last_space_ = position_end_ - 1; last_space_ >= position_; --last_space_) { + if (isspace(*last_space_)) break; + } +} + +void FilePiece::MMapShift(uint64_t desired_begin) { + // Use mmap. + uint64_t ignore = desired_begin % page_; + // Duplicate request for Shift means give more data. + if (position_ == data_.begin() + ignore) { + default_map_size_ *= 2; + } + // Local version so that in case of failure it doesn't overwrite the class variable. + uint64_t mapped_offset = desired_begin - ignore; + + uint64_t mapped_size; + if (default_map_size_ >= static_cast(total_size_ - mapped_offset)) { + at_end_ = true; + mapped_size = total_size_ - mapped_offset; + } else { + mapped_size = default_map_size_; + } + + // Forcibly clear the existing mmap first. + data_.reset(); + try { + MapRead(POPULATE_OR_LAZY, *file_, mapped_offset, mapped_size, data_); + } catch (const util::ErrnoException &e) { + if (desired_begin) { + SeekOrThrow(*file_, desired_begin); + } + // The mmap was scheduled to end the file, but now we're going to read it. + at_end_ = false; + TransitionToRead(); + return; + } + mapped_offset_ = mapped_offset; + position_ = data_.begin() + ignore; + position_end_ = data_.begin() + mapped_size; + + progress_.Set(desired_begin); +} + +void FilePiece::TransitionToRead() { + assert(!fallback_to_read_); + fallback_to_read_ = true; + data_.reset(); + data_.reset(malloc(default_map_size_), default_map_size_, scoped_memory::MALLOC_ALLOCATED); + UTIL_THROW_IF(!data_.get(), ErrnoException, "malloc failed for " << default_map_size_); + position_ = data_.begin(); + position_end_ = position_; + +#ifdef HAVE_ZLIB + assert(!gz_file_); + gz_file_ = gzdopen(file_.get(), "r"); + UTIL_THROW_IF(!gz_file_, GZException, "zlib failed to open " << file_name_); +#endif +} + +#ifdef WIN32 +typedef int ssize_t; +#endif + +void FilePiece::ReadShift() { + assert(fallback_to_read_); + // Bytes [data_.begin(), position_) have been consumed. + // Bytes [position_, position_end_) have been read into the buffer. + + // Start at the beginning of the buffer if there's nothing useful in it. + if (position_ == position_end_) { + mapped_offset_ += (position_end_ - data_.begin()); + position_ = data_.begin(); + position_end_ = position_; + } + + std::size_t already_read = position_end_ - data_.begin(); + + if (already_read == default_map_size_) { + if (position_ == data_.begin()) { + // Buffer too small. + std::size_t valid_length = position_end_ - position_; + default_map_size_ *= 2; + data_.call_realloc(default_map_size_); + UTIL_THROW_IF(!data_.get(), ErrnoException, "realloc failed for " << default_map_size_); + position_ = data_.begin(); + position_end_ = position_ + valid_length; + } else { + size_t moving = position_end_ - position_; + memmove(data_.get(), position_, moving); + position_ = data_.begin(); + position_end_ = position_ + moving; + already_read = moving; + } + } + + ssize_t read_return; +#ifdef HAVE_ZLIB + read_return = gzread(gz_file_, static_cast(data_.get()) + already_read, default_map_size_ - already_read); + if (read_return == -1) throw GZException(gz_file_); + if (total_size_ != kBadSize) { + // Just get the position, don't actually seek. Apparently this is how you do it. . . + off_t ret = lseek(file_.get(), 0, SEEK_CUR); + if (ret != -1) progress_.Set(ret); + } +#else + read_return = read(file_.get(), static_cast(data_.get()) + already_read, default_map_size_ - already_read); + UTIL_THROW_IF(read_return == -1, ErrnoException, "read failed"); + progress_.Set(mapped_offset_); +#endif + if (read_return == 0) { + at_end_ = true; + } + position_end_ += read_return; +} + +} // namespace util diff --git a/util/file_piece.hh b/util/file_piece.hh new file mode 100644 index 000000000..af93d8aab --- /dev/null +++ b/util/file_piece.hh @@ -0,0 +1,132 @@ +#ifndef UTIL_FILE_PIECE__ +#define UTIL_FILE_PIECE__ + +#include "util/ersatz_progress.hh" +#include "util/exception.hh" +#include "util/file.hh" +#include "util/have.hh" +#include "util/mmap.hh" +#include "util/string_piece.hh" + +#include +#include + +#include + +#ifdef HAVE_ZLIB +#include +#endif + +namespace util { + +class ParseNumberException : public Exception { + public: + explicit ParseNumberException(StringPiece value) throw(); + ~ParseNumberException() throw() {} +}; + +class GZException : public Exception { + public: +#ifdef HAVE_ZLIB + explicit GZException(gzFile file); +#endif + GZException() throw() {} + ~GZException() throw() {} +}; + +extern const bool kSpaces[256]; + +// Memory backing the returned StringPiece may vanish on the next call. +class FilePiece { + public: + // 32 MB default. + explicit FilePiece(const char *file, std::ostream *show_progress = NULL, std::size_t min_buffer = 33554432); + // Takes ownership of fd. name is used for messages. + explicit FilePiece(int fd, const char *name, std::ostream *show_progress = NULL, std::size_t min_buffer = 33554432); + + ~FilePiece(); + + char get() { + if (position_ == position_end_) { + Shift(); + if (at_end_) throw EndOfFileException(); + } + return *(position_++); + } + + // Leaves the delimiter, if any, to be returned by get(). Delimiters defined by isspace(). + StringPiece ReadDelimited(const bool *delim = kSpaces) { + SkipSpaces(delim); + return Consume(FindDelimiterOrEOF(delim)); + } + + // Unlike ReadDelimited, this includes leading spaces and consumes the delimiter. + // It is similar to getline in that way. + StringPiece ReadLine(char delim = '\n'); + + float ReadFloat(); + double ReadDouble(); + long int ReadLong(); + unsigned long int ReadULong(); + + // Skip spaces defined by isspace. + void SkipSpaces(const bool *delim = kSpaces) { + for (; ; ++position_) { + if (position_ == position_end_) Shift(); + if (!delim[static_cast(*position_)]) return; + } + } + + uint64_t Offset() const { + return position_ - data_.begin() + mapped_offset_; + } + + const std::string &FileName() const { return file_name_; } + + private: + void Initialize(const char *name, std::ostream *show_progress, std::size_t min_buffer); + + template T ReadNumber(); + + StringPiece Consume(const char *to) { + StringPiece ret(position_, to - position_); + position_ = to; + return ret; + } + + const char *FindDelimiterOrEOF(const bool *delim = kSpaces); + + void Shift(); + // Backends to Shift(). + void MMapShift(uint64_t desired_begin); + + void TransitionToRead(); + void ReadShift(); + + const char *position_, *last_space_, *position_end_; + + scoped_fd file_; + const uint64_t total_size_; + const uint64_t page_; + + std::size_t default_map_size_; + uint64_t mapped_offset_; + + // Order matters: file_ should always be destroyed after this. + scoped_memory data_; + + bool at_end_; + bool fallback_to_read_; + + ErsatzProgress progress_; + + std::string file_name_; + +#ifdef HAVE_ZLIB + gzFile gz_file_; +#endif // HAVE_ZLIB +}; + +} // namespace util + +#endif // UTIL_FILE_PIECE__ diff --git a/util/file_piece_test.cc b/util/file_piece_test.cc new file mode 100644 index 000000000..f912e18af --- /dev/null +++ b/util/file_piece_test.cc @@ -0,0 +1,123 @@ +// Tests might fail if you have creative characters in your path. Sue me. +#include "util/file_piece.hh" + +#include "util/scoped.hh" + +#define BOOST_TEST_MODULE FilePieceTest +#include +#include +#include + +#include +#include +#include + +namespace util { +namespace { + +std::string FileLocation() { + if (boost::unit_test::framework::master_test_suite().argc < 2) { + return "file_piece.cc"; + } + std::string ret(boost::unit_test::framework::master_test_suite().argv[1]); + return ret; +} + +/* mmap implementation */ +BOOST_AUTO_TEST_CASE(MMapReadLine) { + std::fstream ref(FileLocation().c_str(), std::ios::in); + FilePiece test(FileLocation().c_str(), NULL, 1); + std::string ref_line; + while (getline(ref, ref_line)) { + StringPiece test_line(test.ReadLine()); + // I submitted a bug report to ICU: http://bugs.icu-project.org/trac/ticket/7924 + if (!test_line.empty() || !ref_line.empty()) { + BOOST_CHECK_EQUAL(ref_line, test_line); + } + } + BOOST_CHECK_THROW(test.get(), EndOfFileException); +} + +#ifndef __APPLE__ +/* Apple isn't happy with the popen, fileno, dup. And I don't want to + * reimplement popen. This is an issue with the test. + */ +/* read() implementation */ +BOOST_AUTO_TEST_CASE(StreamReadLine) { + std::fstream ref(FileLocation().c_str(), std::ios::in); + + std::string popen_args = "cat \""; + popen_args += FileLocation(); + popen_args += '"'; + + FILE *catter = popen(popen_args.c_str(), "r"); + BOOST_REQUIRE(catter); + + FilePiece test(dup(fileno(catter)), "file_piece.cc", NULL, 1); + std::string ref_line; + while (getline(ref, ref_line)) { + StringPiece test_line(test.ReadLine()); + // I submitted a bug report to ICU: http://bugs.icu-project.org/trac/ticket/7924 + if (!test_line.empty() || !ref_line.empty()) { + BOOST_CHECK_EQUAL(ref_line, test_line); + } + } + BOOST_CHECK_THROW(test.get(), EndOfFileException); + BOOST_REQUIRE(!pclose(catter)); +} +#endif // __APPLE__ + +#ifdef HAVE_ZLIB + +// gzip file +BOOST_AUTO_TEST_CASE(PlainZipReadLine) { + std::string location(FileLocation()); + std::fstream ref(location.c_str(), std::ios::in); + + std::string command("gzip <\""); + command += location + "\" >\"" + location + "\".gz"; + + BOOST_REQUIRE_EQUAL(0, system(command.c_str())); + FilePiece test((location + ".gz").c_str(), NULL, 1); + unlink((location + ".gz").c_str()); + std::string ref_line; + while (getline(ref, ref_line)) { + StringPiece test_line(test.ReadLine()); + // I submitted a bug report to ICU: http://bugs.icu-project.org/trac/ticket/7924 + if (!test_line.empty() || !ref_line.empty()) { + BOOST_CHECK_EQUAL(ref_line, test_line); + } + } + BOOST_CHECK_THROW(test.get(), EndOfFileException); +} + +// gzip stream. Apple doesn't like popen, fileno, dup. This is an issue with +// the test. +#ifndef __APPLE__ +BOOST_AUTO_TEST_CASE(StreamZipReadLine) { + std::fstream ref(FileLocation().c_str(), std::ios::in); + + std::string command("gzip <\""); + command += FileLocation() + "\""; + + FILE * catter = popen(command.c_str(), "r"); + BOOST_REQUIRE(catter); + + FilePiece test(dup(fileno(catter)), "file_piece.cc.gz", NULL, 1); + std::string ref_line; + while (getline(ref, ref_line)) { + StringPiece test_line(test.ReadLine()); + // I submitted a bug report to ICU: http://bugs.icu-project.org/trac/ticket/7924 + if (!test_line.empty() || !ref_line.empty()) { + BOOST_CHECK_EQUAL(ref_line, test_line); + } + } + BOOST_CHECK_THROW(test.get(), EndOfFileException); + BOOST_REQUIRE(!pclose(catter)); +} +#endif // __APPLE__ + +#endif // HAVE_ZLIB + +} // namespace +} // namespace util diff --git a/util/getopt.c b/util/getopt.c new file mode 100644 index 000000000..992c96b0c --- /dev/null +++ b/util/getopt.c @@ -0,0 +1,78 @@ +/* +POSIX getopt for Windows + +AT&T Public License + +Code given out at the 1985 UNIFORUM conference in Dallas. +*/ + +#ifndef __GNUC__ + +#include "getopt.hh" +#include +#include + +#define NULL 0 +#define EOF (-1) +#define ERR(s, c) if(opterr){\ + char errbuf[2];\ + errbuf[0] = c; errbuf[1] = '\n';\ + fputs(argv[0], stderr);\ + fputs(s, stderr);\ + fputc(c, stderr);} + //(void) write(2, argv[0], (unsigned)strlen(argv[0]));\ + //(void) write(2, s, (unsigned)strlen(s));\ + //(void) write(2, errbuf, 2);} + +int opterr = 1; +int optind = 1; +int optopt; +char *optarg; + +int +getopt(argc, argv, opts) +int argc; +char **argv, *opts; +{ + static int sp = 1; + register int c; + register char *cp; + + if(sp == 1) + if(optind >= argc || + argv[optind][0] != '-' || argv[optind][1] == '\0') + return(EOF); + else if(strcmp(argv[optind], "--") == NULL) { + optind++; + return(EOF); + } + optopt = c = argv[optind][sp]; + if(c == ':' || (cp=strchr(opts, c)) == NULL) { + ERR(": illegal option -- ", c); + if(argv[optind][++sp] == '\0') { + optind++; + sp = 1; + } + return('?'); + } + if(*++cp == ':') { + if(argv[optind][sp+1] != '\0') + optarg = &argv[optind++][sp+1]; + else if(++optind >= argc) { + ERR(": option requires an argument -- ", c); + sp = 1; + return('?'); + } else + optarg = argv[optind++]; + sp = 1; + } else { + if(argv[optind][++sp] == '\0') { + sp = 1; + optind++; + } + optarg = NULL; + } + return(c); +} + +#endif /* __GNUC__ */ diff --git a/util/getopt.hh b/util/getopt.hh new file mode 100644 index 000000000..6ad977324 --- /dev/null +++ b/util/getopt.hh @@ -0,0 +1,33 @@ +/* +POSIX getopt for Windows + +AT&T Public License + +Code given out at the 1985 UNIFORUM conference in Dallas. +*/ + +#ifdef __GNUC__ +#include +#endif +#ifndef __GNUC__ + +#ifndef _WINGETOPT_H_ +#define _WINGETOPT_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +extern int opterr; +extern int optind; +extern int optopt; +extern char *optarg; +extern int getopt(int argc, char **argv, char *opts); + +#ifdef __cplusplus +} +#endif + +#endif /* _GETOPT_H_ */ +#endif /* __GNUC__ */ + diff --git a/util/have.hh b/util/have.hh new file mode 100644 index 000000000..b8181e999 --- /dev/null +++ b/util/have.hh @@ -0,0 +1,23 @@ +/* Optional packages. You might want to integrate this with your build system e.g. config.h from ./configure. */ +#ifndef UTIL_HAVE__ +#define UTIL_HAVE__ + +#ifndef HAVE_ZLIB +#if !defined(_WIN32) && !defined(_WIN64) +#define HAVE_ZLIB +#endif +#endif + +#ifndef HAVE_ICU +//#define HAVE_ICU +#endif + +#ifndef HAVE_BOOST +#define HAVE_BOOST +#endif + +#ifndef HAVE_THREADS +//#define HAVE_THREADS +#endif + +#endif // UTIL_HAVE__ diff --git a/util/joint_sort.hh b/util/joint_sort.hh new file mode 100644 index 000000000..cf3d84321 --- /dev/null +++ b/util/joint_sort.hh @@ -0,0 +1,151 @@ +#ifndef UTIL_JOINT_SORT__ +#define UTIL_JOINT_SORT__ + +/* A terrifying amount of C++ to coax std::sort into soring one range while + * also permuting another range the same way. + */ + +#include "util/proxy_iterator.hh" + +#include +#include +#include + +namespace util { + +namespace detail { + +template class JointProxy; + +template class JointIter { + public: + JointIter() {} + + JointIter(const KeyIter &key_iter, const ValueIter &value_iter) : key_(key_iter), value_(value_iter) {} + + bool operator==(const JointIter &other) const { return key_ == other.key_; } + + bool operator<(const JointIter &other) const { return (key_ < other.key_); } + + std::ptrdiff_t operator-(const JointIter &other) const { return key_ - other.key_; } + + JointIter &operator+=(std::ptrdiff_t amount) { + key_ += amount; + value_ += amount; + return *this; + } + + void swap(const JointIter &other) { + std::swap(key_, other.key_); + std::swap(value_, other.value_); + } + + private: + friend class JointProxy; + KeyIter key_; + ValueIter value_; +}; + +template class JointProxy { + private: + typedef JointIter InnerIterator; + + public: + typedef struct { + typename std::iterator_traits::value_type key; + typename std::iterator_traits::value_type value; + const typename std::iterator_traits::value_type &GetKey() const { return key; } + } value_type; + + JointProxy(const KeyIter &key_iter, const ValueIter &value_iter) : inner_(key_iter, value_iter) {} + JointProxy(const JointProxy &other) : inner_(other.inner_) {} + + operator const value_type() const { + value_type ret; + ret.key = *inner_.key_; + ret.value = *inner_.value_; + return ret; + } + + JointProxy &operator=(const JointProxy &other) { + *inner_.key_ = *other.inner_.key_; + *inner_.value_ = *other.inner_.value_; + return *this; + } + + JointProxy &operator=(const value_type &other) { + *inner_.key_ = other.key; + *inner_.value_ = other.value; + return *this; + } + + typename std::iterator_traits::reference GetKey() const { + return *(inner_.key_); + } + + void swap(JointProxy &other) { + std::swap(*inner_.key_, *other.inner_.key_); + std::swap(*inner_.value_, *other.inner_.value_); + } + + private: + friend class ProxyIterator >; + + InnerIterator &Inner() { return inner_; } + const InnerIterator &Inner() const { return inner_; } + InnerIterator inner_; +}; + +template class LessWrapper : public std::binary_function { + public: + explicit LessWrapper(const Less &less) : less_(less) {} + + bool operator()(const Proxy &left, const Proxy &right) const { + return less_(left.GetKey(), right.GetKey()); + } + bool operator()(const Proxy &left, const typename Proxy::value_type &right) const { + return less_(left.GetKey(), right.GetKey()); + } + bool operator()(const typename Proxy::value_type &left, const Proxy &right) const { + return less_(left.GetKey(), right.GetKey()); + } + bool operator()(const typename Proxy::value_type &left, const typename Proxy::value_type &right) const { + return less_(left.GetKey(), right.GetKey()); + } + + private: + const Less less_; +}; + +} // namespace detail + +template class PairedIterator : public ProxyIterator > { + public: + PairedIterator(const KeyIter &key, const ValueIter &value) : + ProxyIterator >(detail::JointProxy(key, value)) {} +}; + +template void JointSort(const KeyIter &key_begin, const KeyIter &key_end, const ValueIter &value_begin, const Less &less) { + ProxyIterator > full_begin(detail::JointProxy(key_begin, value_begin)); + detail::LessWrapper, Less> less_wrap(less); + std::sort(full_begin, full_begin + (key_end - key_begin), less_wrap); +} + + +template void JointSort(const KeyIter &key_begin, const KeyIter &key_end, const ValueIter &value_begin) { + JointSort(key_begin, key_end, value_begin, std::less::value_type>()); +} + +} // namespace util + +namespace std { +template void swap(util::detail::JointIter &left, util::detail::JointIter &right) { + left.swap(right); +} + +template void swap(util::detail::JointProxy &left, util::detail::JointProxy &right) { + left.swap(right); +} +} // namespace std + +#endif // UTIL_JOINT_SORT__ diff --git a/util/joint_sort_test.cc b/util/joint_sort_test.cc new file mode 100644 index 000000000..4dc859164 --- /dev/null +++ b/util/joint_sort_test.cc @@ -0,0 +1,50 @@ +#include "util/joint_sort.hh" + +#define BOOST_TEST_MODULE JointSortTest +#include + +namespace util { namespace { + +BOOST_AUTO_TEST_CASE(just_flip) { + char keys[2]; + int values[2]; + keys[0] = 1; values[0] = 327; + keys[1] = 0; values[1] = 87897; + JointSort(keys + 0, keys + 2, values + 0); + BOOST_CHECK_EQUAL(0, keys[0]); + BOOST_CHECK_EQUAL(87897, values[0]); + BOOST_CHECK_EQUAL(1, keys[1]); + BOOST_CHECK_EQUAL(327, values[1]); +} + +BOOST_AUTO_TEST_CASE(three) { + char keys[3]; + int values[3]; + keys[0] = 1; values[0] = 327; + keys[1] = 2; values[1] = 87897; + keys[2] = 0; values[2] = 10; + JointSort(keys + 0, keys + 3, values + 0); + BOOST_CHECK_EQUAL(0, keys[0]); + BOOST_CHECK_EQUAL(1, keys[1]); + BOOST_CHECK_EQUAL(2, keys[2]); +} + +BOOST_AUTO_TEST_CASE(char_int) { + char keys[4]; + int values[4]; + keys[0] = 3; values[0] = 327; + keys[1] = 1; values[1] = 87897; + keys[2] = 2; values[2] = 10; + keys[3] = 0; values[3] = 24347; + JointSort(keys + 0, keys + 4, values + 0); + BOOST_CHECK_EQUAL(0, keys[0]); + BOOST_CHECK_EQUAL(24347, values[0]); + BOOST_CHECK_EQUAL(1, keys[1]); + BOOST_CHECK_EQUAL(87897, values[1]); + BOOST_CHECK_EQUAL(2, keys[2]); + BOOST_CHECK_EQUAL(10, values[2]); + BOOST_CHECK_EQUAL(3, keys[3]); + BOOST_CHECK_EQUAL(327, values[3]); +} + +}} // namespace anonymous util diff --git a/util/mmap.cc b/util/mmap.cc new file mode 100644 index 000000000..bc9e3f815 --- /dev/null +++ b/util/mmap.cc @@ -0,0 +1,184 @@ +/* Memory mapping wrappers. + * ARM and MinGW ports contributed by Hideo Okuma and Tomoyuki Yoshimura at + * NICT. + */ +#include "util/mmap.hh" + +#include "util/exception.hh" +#include "util/file.hh" + +#include + +#include +#include +#include +#include +#include + +#if defined(_WIN32) || defined(_WIN64) +#include +#include +#else +#include +#include +#endif + +namespace util { + +long SizePage() { +#if defined(_WIN32) || defined(_WIN64) + SYSTEM_INFO si; + GetSystemInfo(&si); + return si.dwAllocationGranularity; +#else + return sysconf(_SC_PAGE_SIZE); +#endif +} + +void SyncOrThrow(void *start, size_t length) { +#if defined(_WIN32) || defined(_WIN64) + UTIL_THROW_IF(!::FlushViewOfFile(start, length), ErrnoException, "Failed to sync mmap"); +#else + UTIL_THROW_IF(msync(start, length, MS_SYNC), ErrnoException, "Failed to sync mmap"); +#endif +} + +void UnmapOrThrow(void *start, size_t length) { +#if defined(_WIN32) || defined(_WIN64) + UTIL_THROW_IF(!::UnmapViewOfFile(start), ErrnoException, "Failed to unmap a file"); +#else + UTIL_THROW_IF(munmap(start, length), ErrnoException, "munmap failed"); +#endif +} + +scoped_mmap::~scoped_mmap() { + if (data_ != (void*)-1) { + try { + // Thanks Denis Filimonov for pointing out NFS likes msync first. + SyncOrThrow(data_, size_); + UnmapOrThrow(data_, size_); + } catch (const util::ErrnoException &e) { + std::cerr << e.what(); + abort(); + } + } +} + +void scoped_memory::reset(void *data, std::size_t size, Alloc source) { + switch(source_) { + case MMAP_ALLOCATED: + scoped_mmap(data_, size_); + break; + case ARRAY_ALLOCATED: + delete [] reinterpret_cast(data_); + break; + case MALLOC_ALLOCATED: + free(data_); + break; + case NONE_ALLOCATED: + break; + } + data_ = data; + size_ = size; + source_ = source; +} + +void scoped_memory::call_realloc(std::size_t size) { + assert(source_ == MALLOC_ALLOCATED || source_ == NONE_ALLOCATED); + void *new_data = realloc(data_, size); + if (!new_data) { + reset(); + } else { + reset(new_data, size, MALLOC_ALLOCATED); + } +} + +void *MapOrThrow(std::size_t size, bool for_write, int flags, bool prefault, int fd, uint64_t offset) { +#ifdef MAP_POPULATE // Linux specific + if (prefault) { + flags |= MAP_POPULATE; + } +#endif +#if defined(_WIN32) || defined(_WIN64) + int protectC = for_write ? PAGE_READWRITE : PAGE_READONLY; + int protectM = for_write ? FILE_MAP_WRITE : FILE_MAP_READ; + uint64_t total_size = size + offset; + HANDLE hMapping = CreateFileMapping((HANDLE)_get_osfhandle(fd), NULL, protectC, total_size >> 32, static_cast(total_size), NULL); + UTIL_THROW_IF(!hMapping, ErrnoException, "CreateFileMapping failed"); + LPVOID ret = MapViewOfFile(hMapping, protectM, offset >> 32, offset, size); + CloseHandle(hMapping); + UTIL_THROW_IF(!ret, ErrnoException, "MapViewOfFile failed"); +#else + int protect = for_write ? (PROT_READ | PROT_WRITE) : PROT_READ; + void *ret = mmap(NULL, size, protect, flags, fd, offset); + UTIL_THROW_IF(ret == MAP_FAILED, ErrnoException, "mmap failed for size " << size << " at offset " << offset); +#endif + return ret; +} + +const int kFileFlags = +#if defined(_WIN32) || defined(_WIN64) + 0 // MapOrThrow ignores flags on windows +#elif defined(MAP_FILE) + MAP_FILE | MAP_SHARED +#else + MAP_SHARED +#endif + ; + +void MapRead(LoadMethod method, int fd, uint64_t offset, std::size_t size, scoped_memory &out) { + switch (method) { + case LAZY: + out.reset(MapOrThrow(size, false, kFileFlags, false, fd, offset), size, scoped_memory::MMAP_ALLOCATED); + break; + case POPULATE_OR_LAZY: +#ifdef MAP_POPULATE + case POPULATE_OR_READ: +#endif + out.reset(MapOrThrow(size, false, kFileFlags, true, fd, offset), size, scoped_memory::MMAP_ALLOCATED); + break; +#ifndef MAP_POPULATE + case POPULATE_OR_READ: +#endif + case READ: + out.reset(malloc(size), size, scoped_memory::MALLOC_ALLOCATED); + if (!out.get()) UTIL_THROW(util::ErrnoException, "Allocating " << size << " bytes with malloc"); + SeekOrThrow(fd, offset); + ReadOrThrow(fd, out.get(), size); + break; + } +} + +// Allocates zeroed memory in to. +void MapAnonymous(std::size_t size, util::scoped_memory &to) { + to.reset(); +#if defined(_WIN32) || defined(_WIN64) + to.reset(calloc(1, size), size, scoped_memory::MALLOC_ALLOCATED); +#else + to.reset(MapOrThrow(size, true, +# if defined(MAP_ANONYMOUS) + MAP_ANONYMOUS | MAP_PRIVATE // Linux +# else + MAP_ANON | MAP_PRIVATE // BSD +# endif + , false, -1, 0), size, scoped_memory::MMAP_ALLOCATED); +#endif +} + +void *MapZeroedWrite(int fd, std::size_t size) { + ResizeOrThrow(fd, 0); + ResizeOrThrow(fd, size); + return MapOrThrow(size, true, kFileFlags, false, fd, 0); +} + +void *MapZeroedWrite(const char *name, std::size_t size, scoped_fd &file) { + file.reset(CreateOrThrow(name)); + try { + return MapZeroedWrite(file.get(), size); + } catch (ErrnoException &e) { + e << " in file " << name; + throw; + } +} + +} // namespace util diff --git a/util/mmap.hh b/util/mmap.hh new file mode 100644 index 000000000..b218c4d19 --- /dev/null +++ b/util/mmap.hh @@ -0,0 +1,114 @@ +#ifndef UTIL_MMAP__ +#define UTIL_MMAP__ +// Utilities for mmaped files. + +#include + +#include +#include + +namespace util { + +class scoped_fd; + +long SizePage(); + +// (void*)-1 is MAP_FAILED; this is done to avoid including the mmap header here. +class scoped_mmap { + public: + scoped_mmap() : data_((void*)-1), size_(0) {} + scoped_mmap(void *data, std::size_t size) : data_(data), size_(size) {} + ~scoped_mmap(); + + void *get() const { return data_; } + + const uint8_t *begin() const { return reinterpret_cast(data_); } + const uint8_t *end() const { return reinterpret_cast(data_) + size_; } + std::size_t size() const { return size_; } + + void reset(void *data, std::size_t size) { + scoped_mmap other(data_, size_); + data_ = data; + size_ = size; + } + + void reset() { + reset((void*)-1, 0); + } + + private: + void *data_; + std::size_t size_; + + scoped_mmap(const scoped_mmap &); + scoped_mmap &operator=(const scoped_mmap &); +}; + +/* For when the memory might come from mmap, new char[], or malloc. Uses NULL + * and 0 for blanks even though mmap signals errors with (void*)-1). The reset + * function checks that blank for mmap. + */ +class scoped_memory { + public: + typedef enum {MMAP_ALLOCATED, ARRAY_ALLOCATED, MALLOC_ALLOCATED, NONE_ALLOCATED} Alloc; + + scoped_memory() : data_(NULL), size_(0), source_(NONE_ALLOCATED) {} + + ~scoped_memory() { reset(); } + + void *get() const { return data_; } + const char *begin() const { return reinterpret_cast(data_); } + const char *end() const { return reinterpret_cast(data_) + size_; } + std::size_t size() const { return size_; } + + Alloc source() const { return source_; } + + void reset() { reset(NULL, 0, NONE_ALLOCATED); } + + void reset(void *data, std::size_t size, Alloc from); + + // realloc allows the current data to escape hence the need for this call + // If realloc fails, destroys the original too and get() returns NULL. + void call_realloc(std::size_t to); + + private: + + void *data_; + std::size_t size_; + + Alloc source_; + + scoped_memory(const scoped_memory &); + scoped_memory &operator=(const scoped_memory &); +}; + +typedef enum { + // mmap with no prepopulate + LAZY, + // On linux, pass MAP_POPULATE to mmap. + POPULATE_OR_LAZY, + // Populate on Linux. malloc and read on non-Linux. + POPULATE_OR_READ, + // malloc and read. + READ +} LoadMethod; + +extern const int kFileFlags; + +// Wrapper around mmap to check it worked and hide some platform macros. +void *MapOrThrow(std::size_t size, bool for_write, int flags, bool prefault, int fd, uint64_t offset = 0); + +void MapRead(LoadMethod method, int fd, uint64_t offset, std::size_t size, scoped_memory &out); + +void MapAnonymous(std::size_t size, scoped_memory &to); + +// Open file name with mmap of size bytes, all of which are initially zero. +void *MapZeroedWrite(int fd, std::size_t size); +void *MapZeroedWrite(const char *name, std::size_t size, scoped_fd &file); + +// msync wrapper +void SyncOrThrow(void *start, size_t length); + +} // namespace util + +#endif // UTIL_MMAP__ diff --git a/util/murmur_hash.cc b/util/murmur_hash.cc new file mode 100644 index 000000000..4f519312d --- /dev/null +++ b/util/murmur_hash.cc @@ -0,0 +1,168 @@ +/* Downloaded from http://sites.google.com/site/murmurhash/ which says "All + * code is released to the public domain. For business purposes, Murmurhash is + * under the MIT license." + * This is modified from the original: + * ULL tag on 0xc6a4a7935bd1e995 so this will compile on 32-bit. + * length changed to unsigned int. + * placed in namespace util + * add MurmurHashNative + * default option = 0 for seed + * ARM port from NICT + */ + +#include "util/murmur_hash.hh" +#include + +namespace util { + +//----------------------------------------------------------------------------- +// MurmurHash2, 64-bit versions, by Austin Appleby + +// The same caveats as 32-bit MurmurHash2 apply here - beware of alignment +// and endian-ness issues if used across multiple platforms. + +// 64-bit hash for 64-bit platforms + +uint64_t MurmurHash64A ( const void * key, std::size_t len, uint64_t seed ) +{ + const uint64_t m = 0xc6a4a7935bd1e995ULL; + const int r = 47; + + uint64_t h = seed ^ (len * m); + +#if defined(__arm) || defined(__arm__) + const size_t ksize = sizeof(uint64_t); + const unsigned char * data = (const unsigned char *)key; + const unsigned char * end = data + (std::size_t)(len/8) * ksize; +#else + const uint64_t * data = (const uint64_t *)key; + const uint64_t * end = data + (len/8); +#endif + + while(data != end) + { +#if defined(__arm) || defined(__arm__) + uint64_t k; + memcpy(&k, data, ksize); + data += ksize; +#else + uint64_t k = *data++; +#endif + + k *= m; + k ^= k >> r; + k *= m; + + h ^= k; + h *= m; + } + + const unsigned char * data2 = (const unsigned char*)data; + + switch(len & 7) + { + case 7: h ^= uint64_t(data2[6]) << 48; + case 6: h ^= uint64_t(data2[5]) << 40; + case 5: h ^= uint64_t(data2[4]) << 32; + case 4: h ^= uint64_t(data2[3]) << 24; + case 3: h ^= uint64_t(data2[2]) << 16; + case 2: h ^= uint64_t(data2[1]) << 8; + case 1: h ^= uint64_t(data2[0]); + h *= m; + }; + + h ^= h >> r; + h *= m; + h ^= h >> r; + + return h; +} + + +// 64-bit hash for 32-bit platforms + +uint64_t MurmurHash64B ( const void * key, std::size_t len, uint64_t seed ) +{ + const unsigned int m = 0x5bd1e995; + const int r = 24; + + unsigned int h1 = seed ^ len; + unsigned int h2 = 0; + +#if defined(__arm) || defined(__arm__) + size_t ksize = sizeof(unsigned int); + const unsigned char * data = (const unsigned char *)key; +#else + const unsigned int * data = (const unsigned int *)key; +#endif + + unsigned int k1, k2; + while(len >= 8) + { +#if defined(__arm) || defined(__arm__) + memcpy(&k1, data, ksize); + data += ksize; + memcpy(&k2, data, ksize); + data += ksize; +#else + k1 = *data++; + k2 = *data++; +#endif + + k1 *= m; k1 ^= k1 >> r; k1 *= m; + h1 *= m; h1 ^= k1; + len -= 4; + + k2 *= m; k2 ^= k2 >> r; k2 *= m; + h2 *= m; h2 ^= k2; + len -= 4; + } + + if(len >= 4) + { +#if defined(__arm) || defined(__arm__) + memcpy(&k1, data, ksize); + data += ksize; +#else + k1 = *data++; +#endif + k1 *= m; k1 ^= k1 >> r; k1 *= m; + h1 *= m; h1 ^= k1; + len -= 4; + } + + switch(len) + { + case 3: h2 ^= ((unsigned char*)data)[2] << 16; + case 2: h2 ^= ((unsigned char*)data)[1] << 8; + case 1: h2 ^= ((unsigned char*)data)[0]; + h2 *= m; + }; + + h1 ^= h2 >> 18; h1 *= m; + h2 ^= h1 >> 22; h2 *= m; + h1 ^= h2 >> 17; h1 *= m; + h2 ^= h1 >> 19; h2 *= m; + + uint64_t h = h1; + + h = (h << 32) | h2; + + return h; +} + +// Trick to test for 64-bit architecture at compile time. +namespace { +template inline uint64_t MurmurHashNativeBackend(const void * key, std::size_t len, uint64_t seed) { + return MurmurHash64A(key, len, seed); +} +template <> inline uint64_t MurmurHashNativeBackend<4>(const void * key, std::size_t len, uint64_t seed) { + return MurmurHash64B(key, len, seed); +} +} // namespace + +uint64_t MurmurHashNative(const void * key, std::size_t len, uint64_t seed) { + return MurmurHashNativeBackend(key, len, seed); +} + +} // namespace util diff --git a/util/murmur_hash.hh b/util/murmur_hash.hh new file mode 100644 index 000000000..ae7e88dec --- /dev/null +++ b/util/murmur_hash.hh @@ -0,0 +1,14 @@ +#ifndef UTIL_MURMUR_HASH__ +#define UTIL_MURMUR_HASH__ +#include +#include + +namespace util { + +uint64_t MurmurHash64A(const void * key, std::size_t len, uint64_t seed = 0); +uint64_t MurmurHash64B(const void * key, std::size_t len, uint64_t seed = 0); +uint64_t MurmurHashNative(const void * key, std::size_t len, uint64_t seed = 0); + +} // namespace util + +#endif // UTIL_MURMUR_HASH__ diff --git a/util/probing_hash_table.hh b/util/probing_hash_table.hh new file mode 100644 index 000000000..770faa7e3 --- /dev/null +++ b/util/probing_hash_table.hh @@ -0,0 +1,144 @@ +#ifndef UTIL_PROBING_HASH_TABLE__ +#define UTIL_PROBING_HASH_TABLE__ + +#include "util/exception.hh" + +#include +#include +#include + +#include +#include + +namespace util { + +/* Thrown when table grows too large */ +class ProbingSizeException : public Exception { + public: + ProbingSizeException() throw() {} + ~ProbingSizeException() throw() {} +}; + +// std::identity is an SGI extension :-( +struct IdentityHash { + template T operator()(T arg) const { return arg; } +}; + +/* Non-standard hash table + * Buckets must be set at the beginning and must be greater than maximum number + * of elements, else it throws ProbingSizeException. + * Memory management and initialization is externalized to make it easier to + * serialize these to disk and load them quickly. + * Uses linear probing to find value. + * Only insert and lookup operations. + */ + +template > class ProbingHashTable { + public: + typedef EntryT Entry; + typedef typename Entry::Key Key; + typedef const Entry *ConstIterator; + typedef Entry *MutableIterator; + typedef HashT Hash; + typedef EqualT Equal; + + public: + static uint64_t Size(uint64_t entries, float multiplier) { + uint64_t buckets = std::max(entries + 1, static_cast(multiplier * static_cast(entries))); + return buckets * sizeof(Entry); + } + + // Must be assigned to later. + ProbingHashTable() : entries_(0) +#ifdef DEBUG + , initialized_(false) +#endif + {} + + ProbingHashTable(void *start, std::size_t allocated, const Key &invalid = Key(), const Hash &hash_func = Hash(), const Equal &equal_func = Equal()) + : begin_(reinterpret_cast(start)), + buckets_(allocated / sizeof(Entry)), + end_(begin_ + buckets_), + invalid_(invalid), + hash_(hash_func), + equal_(equal_func), + entries_(0) +#ifdef DEBUG + , initialized_(true) +#endif + {} + + template MutableIterator Insert(const T &t) { +#ifdef DEBUG + assert(initialized_); +#endif + UTIL_THROW_IF(++entries_ >= buckets_, ProbingSizeException, "Hash table with " << buckets_ << " buckets is full."); + for (MutableIterator i(begin_ + (hash_(t.GetKey()) % buckets_));;) { + if (equal_(i->GetKey(), invalid_)) { *i = t; return i; } + if (++i == end_) { i = begin_; } + } + } + + // Return true if the value was found (and not inserted). This is consistent with Find but the opposite if hash_map! + template bool FindOrInsert(const T &t, MutableIterator &out) { +#ifdef DEBUG + assert(initialized_); +#endif + for (MutableIterator i(begin_ + (hash_(t.GetKey()) % buckets_));;) { + Key got(i->GetKey()); + if (equal_(got, t.GetKey())) { out = i; return true; } + if (equal_(got, invalid_)) { + UTIL_THROW_IF(++entries_ >= buckets_, ProbingSizeException, "Hash table with " << buckets_ << " buckets is full."); + *i = t; + out = i; + return false; + } + if (++i == end_) i = begin_; + } + } + + void FinishedInserting() {} + + void LoadedBinary() {} + + // Don't change anything related to GetKey, + template bool UnsafeMutableFind(const Key key, MutableIterator &out) { +#ifdef DEBUG + assert(initialized_); +#endif + for (MutableIterator i(begin_ + (hash_(key) % buckets_));;) { + Key got(i->GetKey()); + if (equal_(got, key)) { out = i; return true; } + if (equal_(got, invalid_)) return false; + if (++i == end_) i = begin_; + } + } + + template bool Find(const Key key, ConstIterator &out) const { +#ifdef DEBUG + assert(initialized_); +#endif + for (ConstIterator i(begin_ + (hash_(key) % buckets_));;) { + Key got(i->GetKey()); + if (equal_(got, key)) { out = i; return true; } + if (equal_(got, invalid_)) return false; + if (++i == end_) i = begin_; + } + } + + private: + MutableIterator begin_; + std::size_t buckets_; + MutableIterator end_; + Key invalid_; + Hash hash_; + Equal equal_; + std::size_t entries_; +#ifdef DEBUG + bool initialized_; +#endif +}; + +} // namespace util + +#endif // UTIL_PROBING_HASH_TABLE__ diff --git a/util/probing_hash_table_test.cc b/util/probing_hash_table_test.cc new file mode 100644 index 000000000..be0fa8597 --- /dev/null +++ b/util/probing_hash_table_test.cc @@ -0,0 +1,50 @@ +#include "util/probing_hash_table.hh" + +#define BOOST_TEST_MODULE ProbingHashTableTest +#include +#include +#include +#include +#include +#include + +namespace util { +namespace { + +struct Entry { + unsigned char key; + typedef unsigned char Key; + + unsigned char GetKey() const { + return key; + } + + uint64_t GetValue() const { + return value; + } + + uint64_t value; +}; + +typedef ProbingHashTable > Table; + +BOOST_AUTO_TEST_CASE(simple) { + size_t size = Table::Size(10, 1.2); + boost::scoped_array mem(new char[size]); + memset(mem.get(), 0, size); + + Table table(mem.get(), size); + const Entry *i = NULL; + BOOST_CHECK(!table.Find(2, i)); + Entry to_ins; + to_ins.key = 3; + to_ins.value = 328920; + table.Insert(to_ins); + BOOST_REQUIRE(table.Find(3, i)); + BOOST_CHECK_EQUAL(3, i->GetKey()); + BOOST_CHECK_EQUAL(static_cast(328920), i->GetValue()); + BOOST_CHECK(!table.Find(2, i)); +} + +} // namespace +} // namespace util diff --git a/util/proxy_iterator.hh b/util/proxy_iterator.hh new file mode 100644 index 000000000..121a45fa3 --- /dev/null +++ b/util/proxy_iterator.hh @@ -0,0 +1,96 @@ +#ifndef UTIL_PROXY_ITERATOR__ +#define UTIL_PROXY_ITERATOR__ + +#include +#include + +/* This is a RandomAccessIterator that uses a proxy to access the underlying + * data. Useful for packing data at bit offsets but still using STL + * algorithms. + * + * Normally I would use boost::iterator_facade but some people are too lazy to + * install boost and still want to use my language model. It's amazing how + * many operators an iterator has. + * + * The Proxy needs to provide: + * class InnerIterator; + * InnerIterator &Inner(); + * const InnerIterator &Inner() const; + * + * InnerIterator has to implement: + * operator==(InnerIterator) + * operator<(InnerIterator) + * operator+=(std::ptrdiff_t) + * operator-(InnerIterator) + * and of course whatever Proxy needs to dereference it. + * + * It's also a good idea to specialize std::swap for Proxy. + */ + +namespace util { +template class ProxyIterator { + private: + // Self. + typedef ProxyIterator S; + typedef typename Proxy::InnerIterator InnerIterator; + + public: + typedef std::random_access_iterator_tag iterator_category; + typedef typename Proxy::value_type value_type; + typedef std::ptrdiff_t difference_type; + typedef Proxy reference; + typedef Proxy * pointer; + + ProxyIterator() {} + + // For cast from non const to const. + template ProxyIterator(const ProxyIterator &in) : p_(*in) {} + explicit ProxyIterator(const Proxy &p) : p_(p) {} + + // p_'s operator= does value copying, but here we want iterator copying. + S &operator=(const S &other) { + I() = other.I(); + return *this; + } + + bool operator==(const S &other) const { return I() == other.I(); } + bool operator!=(const S &other) const { return !(*this == other); } + bool operator<(const S &other) const { return I() < other.I(); } + bool operator>(const S &other) const { return other < *this; } + bool operator<=(const S &other) const { return !(*this > other); } + bool operator>=(const S &other) const { return !(*this < other); } + + S &operator++() { return *this += 1; } + S operator++(int) { S ret(*this); ++*this; return ret; } + S &operator+=(std::ptrdiff_t amount) { I() += amount; return *this; } + S operator+(std::ptrdiff_t amount) const { S ret(*this); ret += amount; return ret; } + + S &operator--() { return *this -= 1; } + S operator--(int) { S ret(*this); --*this; return ret; } + S &operator-=(std::ptrdiff_t amount) { I() += (-amount); return *this; } + S operator-(std::ptrdiff_t amount) const { S ret(*this); ret -= amount; return ret; } + + std::ptrdiff_t operator-(const S &other) const { return I() - other.I(); } + + Proxy operator*() { return p_; } + const Proxy operator*() const { return p_; } + Proxy *operator->() { return &p_; } + const Proxy *operator->() const { return &p_; } + Proxy operator[](std::ptrdiff_t amount) const { return *(*this + amount); } + + const InnerIterator &Inner() { return p_.Inner(); } + + private: + InnerIterator &I() { return p_.Inner(); } + const InnerIterator &I() const { return p_.Inner(); } + + Proxy p_; +}; + +template ProxyIterator operator+(std::ptrdiff_t amount, const ProxyIterator &it) { + return it + amount; +} + +} // namespace util + +#endif // UTIL_PROXY_ITERATOR__ diff --git a/util/scoped.hh b/util/scoped.hh new file mode 100644 index 000000000..93e2e8176 --- /dev/null +++ b/util/scoped.hh @@ -0,0 +1,97 @@ +#ifndef UTIL_SCOPED__ +#define UTIL_SCOPED__ + +#include "util/exception.hh" + +/* Other scoped objects in the style of scoped_ptr. */ +#include +#include + +namespace util { + +template class scoped_thing { + public: + explicit scoped_thing(T *c = static_cast(0)) : c_(c) {} + + ~scoped_thing() { if (c_) Free(c_); } + + void reset(T *c) { + if (c_) Free(c_); + c_ = c; + } + + T &operator*() { return *c_; } + const T&operator*() const { return *c_; } + T &operator->() { return *c_; } + const T&operator->() const { return *c_; } + + T *get() { return c_; } + const T *get() const { return c_; } + + private: + T *c_; + + scoped_thing(const scoped_thing &); + scoped_thing &operator=(const scoped_thing &); +}; + +class scoped_malloc { + public: + scoped_malloc() : p_(NULL) {} + + scoped_malloc(void *p) : p_(p) {} + + ~scoped_malloc() { std::free(p_); } + + void reset(void *p = NULL) { + scoped_malloc other(p_); + p_ = p; + } + + void call_realloc(std::size_t to) { + void *ret; + UTIL_THROW_IF(!(ret = std::realloc(p_, to)) && to, util::ErrnoException, "realloc to " << to << " bytes failed."); + p_ = ret; + } + + void *get() { return p_; } + const void *get() const { return p_; } + + private: + void *p_; + + scoped_malloc(const scoped_malloc &); + scoped_malloc &operator=(const scoped_malloc &); +}; + +// Hat tip to boost. +template class scoped_array { + public: + explicit scoped_array(T *content = NULL) : c_(content) {} + + ~scoped_array() { delete [] c_; } + + T *get() { return c_; } + const T* get() const { return c_; } + + T &operator*() { return *c_; } + const T&operator*() const { return *c_; } + + T &operator->() { return *c_; } + const T&operator->() const { return *c_; } + + T &operator[](std::size_t idx) { return c_[idx]; } + const T &operator[](std::size_t idx) const { return c_[idx]; } + + void reset(T *to = NULL) { + scoped_array other(c_); + c_ = to; + } + + private: + T *c_; +}; + +} // namespace util + +#endif // UTIL_SCOPED__ diff --git a/util/sized_iterator.hh b/util/sized_iterator.hh new file mode 100644 index 000000000..aabcc5319 --- /dev/null +++ b/util/sized_iterator.hh @@ -0,0 +1,107 @@ +#ifndef UTIL_SIZED_ITERATOR__ +#define UTIL_SIZED_ITERATOR__ + +#include "util/proxy_iterator.hh" + +#include +#include + +#include +#include + +namespace util { + +class SizedInnerIterator { + public: + SizedInnerIterator() {} + + SizedInnerIterator(void *ptr, std::size_t size) : ptr_(static_cast(ptr)), size_(size) {} + + bool operator==(const SizedInnerIterator &other) const { + return ptr_ == other.ptr_; + } + bool operator<(const SizedInnerIterator &other) const { + return ptr_ < other.ptr_; + } + SizedInnerIterator &operator+=(std::ptrdiff_t amount) { + ptr_ += amount * size_; + return *this; + } + std::ptrdiff_t operator-(const SizedInnerIterator &other) const { + return (ptr_ - other.ptr_) / size_; + } + + const void *Data() const { return ptr_; } + void *Data() { return ptr_; } + std::size_t EntrySize() const { return size_; } + + private: + uint8_t *ptr_; + std::size_t size_; +}; + +class SizedProxy { + public: + SizedProxy() {} + + SizedProxy(void *ptr, std::size_t size) : inner_(ptr, size) {} + + operator std::string() const { + return std::string(reinterpret_cast(inner_.Data()), inner_.EntrySize()); + } + + SizedProxy &operator=(const SizedProxy &from) { + memcpy(inner_.Data(), from.inner_.Data(), inner_.EntrySize()); + return *this; + } + + SizedProxy &operator=(const std::string &from) { + memcpy(inner_.Data(), from.data(), inner_.EntrySize()); + return *this; + } + + const void *Data() const { return inner_.Data(); } + void *Data() { return inner_.Data(); } + + private: + friend class util::ProxyIterator; + + typedef std::string value_type; + + typedef SizedInnerIterator InnerIterator; + + InnerIterator &Inner() { return inner_; } + const InnerIterator &Inner() const { return inner_; } + InnerIterator inner_; +}; + +typedef ProxyIterator SizedIterator; + +inline SizedIterator SizedIt(void *ptr, std::size_t size) { return SizedIterator(SizedProxy(ptr, size)); } + +// Useful wrapper for a comparison function i.e. sort. +template class SizedCompare : public std::binary_function { + public: + explicit SizedCompare(const Delegate &delegate = Delegate()) : delegate_(delegate) {} + + bool operator()(const Proxy &first, const Proxy &second) const { + return delegate_(first.Data(), second.Data()); + } + bool operator()(const Proxy &first, const std::string &second) const { + return delegate_(first.Data(), second.data()); + } + bool operator()(const std::string &first, const Proxy &second) const { + return delegate_(first.data(), second.Data()); + } + bool operator()(const std::string &first, const std::string &second) const { + return delegate_(first.data(), second.data()); + } + + const Delegate &GetDelegate() const { return delegate_; } + + private: + const Delegate delegate_; +}; + +} // namespace util +#endif // UTIL_SIZED_ITERATOR__ diff --git a/util/sorted_uniform.hh b/util/sorted_uniform.hh new file mode 100644 index 000000000..7700d9e64 --- /dev/null +++ b/util/sorted_uniform.hh @@ -0,0 +1,127 @@ +#ifndef UTIL_SORTED_UNIFORM__ +#define UTIL_SORTED_UNIFORM__ + +#include +#include + +#include +#include + +namespace util { + +template class IdentityAccessor { + public: + typedef T Key; + T operator()(const T *in) const { return *in; } +}; + +struct Pivot64 { + static inline std::size_t Calc(uint64_t off, uint64_t range, std::size_t width) { + std::size_t ret = static_cast(static_cast(off) / static_cast(range) * static_cast(width)); + // Cap for floating point rounding + return (ret < width) ? ret : width - 1; + } +}; + +// Use when off * width is <2^64. This is guaranteed when each of them is actually a 32-bit value. +struct Pivot32 { + static inline std::size_t Calc(uint64_t off, uint64_t range, uint64_t width) { + return static_cast((off * width) / (range + 1)); + } +}; + +// Usage: PivotSelect::T +template struct PivotSelect; +template <> struct PivotSelect<8> { typedef Pivot64 T; }; +template <> struct PivotSelect<4> { typedef Pivot32 T; }; +template <> struct PivotSelect<2> { typedef Pivot32 T; }; + +/* Binary search. */ +template bool BinaryFind( + const Accessor &accessor, + Iterator begin, + Iterator end, + const typename Accessor::Key key, Iterator &out) { + while (end > begin) { + Iterator pivot(begin + (end - begin) / 2); + typename Accessor::Key mid(accessor(pivot)); + if (mid < key) { + begin = pivot + 1; + } else if (mid > key) { + end = pivot; + } else { + out = pivot; + return true; + } + } + return false; +} + +// Search the range [before_it + 1, after_it - 1] for key. +// Preconditions: +// before_v <= key <= after_v +// before_v <= all values in the range [before_it + 1, after_it - 1] <= after_v +// range is sorted. +template bool BoundedSortedUniformFind( + const Accessor &accessor, + Iterator before_it, typename Accessor::Key before_v, + Iterator after_it, typename Accessor::Key after_v, + const typename Accessor::Key key, Iterator &out) { + while (after_it - before_it > 1) { + Iterator pivot(before_it + (1 + Pivot::Calc(key - before_v, after_v - before_v, after_it - before_it - 1))); + typename Accessor::Key mid(accessor(pivot)); + if (mid < key) { + before_it = pivot; + before_v = mid; + } else if (mid > key) { + after_it = pivot; + after_v = mid; + } else { + out = pivot; + return true; + } + } + return false; +} + +template bool SortedUniformFind(const Accessor &accessor, Iterator begin, Iterator end, const typename Accessor::Key key, Iterator &out) { + if (begin == end) return false; + typename Accessor::Key below(accessor(begin)); + if (key <= below) { + if (key == below) { out = begin; return true; } + return false; + } + // Make the range [begin, end]. + --end; + typename Accessor::Key above(accessor(end)); + if (key >= above) { + if (key == above) { out = end; return true; } + return false; + } + return BoundedSortedUniformFind(accessor, begin, below, end, above, key, out); +} + +// May return begin - 1. +template Iterator BinaryBelow( + const Accessor &accessor, + Iterator begin, + Iterator end, + const typename Accessor::Key key) { + while (end > begin) { + Iterator pivot(begin + (end - begin) / 2); + typename Accessor::Key mid(accessor(pivot)); + if (mid < key) { + begin = pivot + 1; + } else if (mid > key) { + end = pivot; + } else { + for (++pivot; (pivot < end) && accessor(pivot) == mid; ++pivot) {} + return pivot - 1; + } + } + return begin - 1; +} + +} // namespace util + +#endif // UTIL_SORTED_UNIFORM__ diff --git a/util/sorted_uniform_test.cc b/util/sorted_uniform_test.cc new file mode 100644 index 000000000..d9f6fad1e --- /dev/null +++ b/util/sorted_uniform_test.cc @@ -0,0 +1,127 @@ +#include "util/sorted_uniform.hh" + +#include +#include +#include +#include +#include + +#define BOOST_TEST_MODULE SortedUniformTest +#include + +#include +#include +#include + +namespace util { +namespace { + +template struct Entry { + typedef KeyT Key; + typedef ValueT Value; + + Key key; + Value value; + + Key GetKey() const { + return key; + } + + Value GetValue() const { + return value; + } + + bool operator<(const Entry &other) const { + return key < other.key; + } +}; + +template struct Accessor { + typedef KeyT Key; + template Key operator()(const Entry *entry) const { + return entry->GetKey(); + } +}; + +template void Check(const Entry *begin, const Entry *end, const boost::unordered_map &reference, const Key key) { + typename boost::unordered_map::const_iterator ref = reference.find(key); + typedef const Entry *It; + // g++ can't tell that require will crash and burn. + It i = NULL; + bool ret = SortedUniformFind, Pivot64>(Accessor(), begin, end, key, i); + if (ref == reference.end()) { + BOOST_CHECK(!ret); + } else { + BOOST_REQUIRE(ret); + BOOST_CHECK_EQUAL(ref->second, i->GetValue()); + } +} + +BOOST_AUTO_TEST_CASE(empty) { + typedef const Entry T; + const T *i; + bool ret = SortedUniformFind, Pivot64>(Accessor(), (const T*)NULL, (const T*)NULL, (uint64_t)10, i); + BOOST_CHECK(!ret); +} + +template void RandomTest(Key upper, size_t entries, size_t queries) { + typedef unsigned char Value; + boost::mt19937 rng; + boost::uniform_int range_key(0, upper); + boost::uniform_int range_value(0, 255); + boost::variate_generator > gen_key(rng, range_key); + boost::variate_generator > gen_value(rng, range_value); + + typedef Entry Ent; + std::vector backing; + boost::unordered_map reference; + Ent ent; + for (size_t i = 0; i < entries; ++i) { + Key key = gen_key(); + unsigned char value = gen_value(); + if (reference.insert(std::make_pair(key, value)).second) { + ent.key = key; + ent.value = value; + backing.push_back(ent); + } + } + std::sort(backing.begin(), backing.end()); + + // Random queries. + for (size_t i = 0; i < queries; ++i) { + const Key key = gen_key(); + Check(&*backing.begin(), &*backing.end(), reference, key); + } + + typename boost::unordered_map::const_iterator it = reference.begin(); + for (size_t i = 0; (i < queries) && (it != reference.end()); ++i, ++it) { + Check(&*backing.begin(), &*backing.end(), reference, it->second); + } +} + +BOOST_AUTO_TEST_CASE(basic) { + RandomTest(11, 10, 200); +} + +BOOST_AUTO_TEST_CASE(tiny_dense_random) { + RandomTest(11, 50, 200); +} + +BOOST_AUTO_TEST_CASE(small_dense_random) { + RandomTest(100, 100, 200); +} + +BOOST_AUTO_TEST_CASE(small_sparse_random) { + RandomTest(200, 15, 200); +} + +BOOST_AUTO_TEST_CASE(medium_sparse_random) { + RandomTest(32000, 1000, 2000); +} + +BOOST_AUTO_TEST_CASE(sparse_random) { + RandomTest(std::numeric_limits::max(), 100000, 2000); +} + +} // namespace +} // namespace util diff --git a/util/string_piece.cc b/util/string_piece.cc new file mode 100644 index 000000000..b422cefc9 --- /dev/null +++ b/util/string_piece.cc @@ -0,0 +1,192 @@ +// Copyright 2004 The RE2 Authors. All Rights Reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in string_piece.hh. + +#include "util/string_piece.hh" + +#include + +#include + +#ifndef HAVE_ICU + +typedef StringPiece::size_type size_type; + +void StringPiece::CopyToString(std::string* target) const { + target->assign(ptr_, length_); +} + +size_type StringPiece::find(const StringPiece& s, size_type pos) const { + if (length_ < 0 || pos > static_cast(length_)) + return npos; + + const char* result = std::search(ptr_ + pos, ptr_ + length_, + s.ptr_, s.ptr_ + s.length_); + const size_type xpos = result - ptr_; + return xpos + s.length_ <= length_ ? xpos : npos; +} + +size_type StringPiece::find(char c, size_type pos) const { + if (length_ <= 0 || pos >= static_cast(length_)) { + return npos; + } + const char* result = std::find(ptr_ + pos, ptr_ + length_, c); + return result != ptr_ + length_ ? result - ptr_ : npos; +} + +size_type StringPiece::rfind(const StringPiece& s, size_type pos) const { + if (length_ < s.length_) return npos; + const size_t ulen = length_; + if (s.length_ == 0) return std::min(ulen, pos); + + const char* last = ptr_ + std::min(ulen - s.length_, pos) + s.length_; + const char* result = std::find_end(ptr_, last, s.ptr_, s.ptr_ + s.length_); + return result != last ? result - ptr_ : npos; +} + +size_type StringPiece::rfind(char c, size_type pos) const { + if (length_ <= 0) return npos; + for (int i = std::min(pos, static_cast(length_ - 1)); + i >= 0; --i) { + if (ptr_[i] == c) { + return i; + } + } + return npos; +} + +// For each character in characters_wanted, sets the index corresponding +// to the ASCII code of that character to 1 in table. This is used by +// the find_.*_of methods below to tell whether or not a character is in +// the lookup table in constant time. +// The argument `table' must be an array that is large enough to hold all +// the possible values of an unsigned char. Thus it should be be declared +// as follows: +// bool table[UCHAR_MAX + 1] +static inline void BuildLookupTable(const StringPiece& characters_wanted, + bool* table) { + const size_type length = characters_wanted.length(); + const char* const data = characters_wanted.data(); + for (size_type i = 0; i < length; ++i) { + table[static_cast(data[i])] = true; + } +} + +size_type StringPiece::find_first_of(const StringPiece& s, + size_type pos) const { + if (length_ == 0 || s.length_ == 0) + return npos; + + // Avoid the cost of BuildLookupTable() for a single-character search. + if (s.length_ == 1) + return find_first_of(s.ptr_[0], pos); + + bool lookup[UCHAR_MAX + 1] = { false }; + BuildLookupTable(s, lookup); + for (size_type i = pos; i < length_; ++i) { + if (lookup[static_cast(ptr_[i])]) { + return i; + } + } + return npos; +} + +size_type StringPiece::find_first_not_of(const StringPiece& s, + size_type pos) const { + if (length_ == 0) + return npos; + + if (s.length_ == 0) + return 0; + + // Avoid the cost of BuildLookupTable() for a single-character search. + if (s.length_ == 1) + return find_first_not_of(s.ptr_[0], pos); + + bool lookup[UCHAR_MAX + 1] = { false }; + BuildLookupTable(s, lookup); + for (size_type i = pos; i < length_; ++i) { + if (!lookup[static_cast(ptr_[i])]) { + return i; + } + } + return npos; +} + +size_type StringPiece::find_first_not_of(char c, size_type pos) const { + if (length_ == 0) + return npos; + + for (; pos < length_; ++pos) { + if (ptr_[pos] != c) { + return pos; + } + } + return npos; +} + +size_type StringPiece::find_last_of(const StringPiece& s, size_type pos) const { + if (length_ == 0 || s.length_ == 0) + return npos; + + // Avoid the cost of BuildLookupTable() for a single-character search. + if (s.length_ == 1) + return find_last_of(s.ptr_[0], pos); + + bool lookup[UCHAR_MAX + 1] = { false }; + BuildLookupTable(s, lookup); + for (size_type i = std::min(pos, length_ - 1); ; --i) { + if (lookup[static_cast(ptr_[i])]) + return i; + if (i == 0) + break; + } + return npos; +} + +size_type StringPiece::find_last_not_of(const StringPiece& s, + size_type pos) const { + if (length_ == 0) + return npos; + + size_type i = std::min(pos, length_ - 1); + if (s.length_ == 0) + return i; + + // Avoid the cost of BuildLookupTable() for a single-character search. + if (s.length_ == 1) + return find_last_not_of(s.ptr_[0], pos); + + bool lookup[UCHAR_MAX + 1] = { false }; + BuildLookupTable(s, lookup); + for (; ; --i) { + if (!lookup[static_cast(ptr_[i])]) + return i; + if (i == 0) + break; + } + return npos; +} + +size_type StringPiece::find_last_not_of(char c, size_type pos) const { + if (length_ == 0) + return npos; + + for (size_type i = std::min(pos, length_ - 1); ; --i) { + if (ptr_[i] != c) + return i; + if (i == 0) + break; + } + return npos; +} + +StringPiece StringPiece::substr(size_type pos, size_type n) const { + if (pos > length_) pos = length_; + if (n > length_ - pos) n = length_ - pos; + return StringPiece(ptr_ + pos, n); +} + +const size_type StringPiece::npos = size_type(-1); + +#endif // !HAVE_ICU diff --git a/util/string_piece.hh b/util/string_piece.hh new file mode 100644 index 000000000..be6a643d0 --- /dev/null +++ b/util/string_piece.hh @@ -0,0 +1,288 @@ +/* If you use ICU in your program, then compile with -DHAVE_ICU -licui18n. If + * you don't use ICU, then this will use the Google implementation from Chrome. + * This has been modified from the original version to let you choose. + */ + +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copied from strings/stringpiece.h with modifications +// +// A string-like object that points to a sized piece of memory. +// +// Functions or methods may use const StringPiece& parameters to accept either +// a "const char*" or a "string" value that will be implicitly converted to +// a StringPiece. The implicit conversion means that it is often appropriate +// to include this .h file in other files rather than forward-declaring +// StringPiece as would be appropriate for most other Google classes. +// +// Systematic usage of StringPiece is encouraged as it will reduce unnecessary +// conversions from "const char*" to "string" and back again. +// + +#ifndef BASE_STRING_PIECE_H__ +#define BASE_STRING_PIECE_H__ + +#include "util/have.hh" + +#ifdef HAVE_BOOST +#include +#endif // HAVE_BOOST + +#include +#include +#include + +#ifdef HAVE_ICU +#include +#include + +// Old versions of ICU don't define operator== and operator!=. +#if (U_ICU_VERSION_MAJOR_NUM < 4) || ((U_ICU_VERSION_MAJOR_NUM == 4) && (U_ICU_VERSION_MINOR_NUM < 4)) +#warning You are using an old version of ICU. Consider upgrading to ICU >= 4.6. +inline bool operator==(const StringPiece& x, const StringPiece& y) { + if (x.size() != y.size()) + return false; + + return std::memcmp(x.data(), y.data(), x.size()) == 0; +} + +inline bool operator!=(const StringPiece& x, const StringPiece& y) { + return !(x == y); +} +#endif // old version of ICU + +U_NAMESPACE_BEGIN +#else + +#include +#include +#include +#include + +#ifdef WIN32 +#undef max +#undef min +#endif + +class StringPiece { + public: + typedef size_t size_type; + + private: + const char* ptr_; + size_type length_; + + public: + // We provide non-explicit singleton constructors so users can pass + // in a "const char*" or a "string" wherever a "StringPiece" is + // expected. + StringPiece() : ptr_(NULL), length_(0) { } + StringPiece(const char* str) + : ptr_(str), length_((str == NULL) ? 0 : strlen(str)) { } + StringPiece(const std::string& str) + : ptr_(str.data()), length_(str.size()) { } + StringPiece(const char* offset, size_type len) + : ptr_(offset), length_(len) { } + + // data() may return a pointer to a buffer with embedded NULs, and the + // returned buffer may or may not be null terminated. Therefore it is + // typically a mistake to pass data() to a routine that expects a NUL + // terminated string. + const char* data() const { return ptr_; } + size_type size() const { return length_; } + size_type length() const { return length_; } + bool empty() const { return length_ == 0; } + + void clear() { ptr_ = NULL; length_ = 0; } + void set(const char* data, size_type len) { ptr_ = data; length_ = len; } + void set(const char* str) { + ptr_ = str; + length_ = str ? strlen(str) : 0; + } + void set(const void* data, size_type len) { + ptr_ = reinterpret_cast(data); + length_ = len; + } + + char operator[](size_type i) const { return ptr_[i]; } + + void remove_prefix(size_type n) { + ptr_ += n; + length_ -= n; + } + + void remove_suffix(size_type n) { + length_ -= n; + } + + int compare(const StringPiece& x) const { + int r = wordmemcmp(ptr_, x.ptr_, std::min(length_, x.length_)); + if (r == 0) { + if (length_ < x.length_) r = -1; + else if (length_ > x.length_) r = +1; + } + return r; + } + + std::string as_string() const { + // std::string doesn't like to take a NULL pointer even with a 0 size. + return std::string(!empty() ? data() : "", size()); + } + + void CopyToString(std::string* target) const; + void AppendToString(std::string* target) const; + + // Does "this" start with "x" + bool starts_with(const StringPiece& x) const { + return ((length_ >= x.length_) && + (wordmemcmp(ptr_, x.ptr_, x.length_) == 0)); + } + + // Does "this" end with "x" + bool ends_with(const StringPiece& x) const { + return ((length_ >= x.length_) && + (wordmemcmp(ptr_ + (length_-x.length_), x.ptr_, x.length_) == 0)); + } + + // standard STL container boilerplate + typedef char value_type; + typedef const char* pointer; + typedef const char& reference; + typedef const char& const_reference; + typedef ptrdiff_t difference_type; + static const size_type npos; + typedef const char* const_iterator; + typedef const char* iterator; + typedef std::reverse_iterator const_reverse_iterator; + typedef std::reverse_iterator reverse_iterator; + iterator begin() const { return ptr_; } + iterator end() const { return ptr_ + length_; } + const_reverse_iterator rbegin() const { + return const_reverse_iterator(ptr_ + length_); + } + const_reverse_iterator rend() const { + return const_reverse_iterator(ptr_); + } + + size_type max_size() const { return length_; } + size_type capacity() const { return length_; } + + size_type copy(char* buf, size_type n, size_type pos = 0) const; + + size_type find(const StringPiece& s, size_type pos = 0) const; + size_type find(char c, size_type pos = 0) const; + size_type rfind(const StringPiece& s, size_type pos = npos) const; + size_type rfind(char c, size_type pos = npos) const; + + size_type find_first_of(const StringPiece& s, size_type pos = 0) const; + size_type find_first_of(char c, size_type pos = 0) const { + return find(c, pos); + } + size_type find_first_not_of(const StringPiece& s, size_type pos = 0) const; + size_type find_first_not_of(char c, size_type pos = 0) const; + size_type find_last_of(const StringPiece& s, size_type pos = npos) const; + size_type find_last_of(char c, size_type pos = npos) const { + return rfind(c, pos); + } + size_type find_last_not_of(const StringPiece& s, size_type pos = npos) const; + size_type find_last_not_of(char c, size_type pos = npos) const; + + StringPiece substr(size_type pos, size_type n = npos) const; + + static int wordmemcmp(const char* p, const char* p2, size_type N) { + return memcmp(p, p2, N); + } +}; + +inline bool operator==(const StringPiece& x, const StringPiece& y) { + if (x.size() != y.size()) + return false; + + return std::memcmp(x.data(), y.data(), x.size()) == 0; +} + +inline bool operator!=(const StringPiece& x, const StringPiece& y) { + return !(x == y); +} + +#endif // HAVE_ICU undefined + +inline bool operator<(const StringPiece& x, const StringPiece& y) { + const int r = std::memcmp(x.data(), y.data(), + std::min(x.size(), y.size())); + return ((r < 0) || ((r == 0) && (x.size() < y.size()))); +} + +inline bool operator>(const StringPiece& x, const StringPiece& y) { + return y < x; +} + +inline bool operator<=(const StringPiece& x, const StringPiece& y) { + return !(x > y); +} + +inline bool operator>=(const StringPiece& x, const StringPiece& y) { + return !(x < y); +} + +// allow StringPiece to be logged (needed for unit testing). +inline std::ostream& operator<<(std::ostream& o, const StringPiece& piece) { + return o.write(piece.data(), static_cast(piece.size())); +} + +#ifdef HAVE_BOOST +inline size_t hash_value(const StringPiece &str) { + return boost::hash_range(str.data(), str.data() + str.length()); +} + +/* Support for lookup of StringPiece in boost::unordered_map */ +struct StringPieceCompatibleHash : public std::unary_function { + size_t operator()(const StringPiece &str) const { + return hash_value(str); + } +}; + +struct StringPieceCompatibleEquals : public std::binary_function { + bool operator()(const StringPiece &first, const StringPiece &second) const { + return first == second; + } +}; +template typename T::const_iterator FindStringPiece(const T &t, const StringPiece &key) { + return t.find(key, StringPieceCompatibleHash(), StringPieceCompatibleEquals()); +} +template typename T::iterator FindStringPiece(T &t, const StringPiece &key) { + return t.find(key, StringPieceCompatibleHash(), StringPieceCompatibleEquals()); +} +#endif + +#ifdef HAVE_ICU +U_NAMESPACE_END +#endif + +#endif // BASE_STRING_PIECE_H__ diff --git a/util/tokenize_piece.hh b/util/tokenize_piece.hh new file mode 100644 index 000000000..c7e1c8633 --- /dev/null +++ b/util/tokenize_piece.hh @@ -0,0 +1,108 @@ +#ifndef UTIL_TOKENIZE_PIECE__ +#define UTIL_TOKENIZE_PIECE__ + +#include "util/exception.hh" +#include "util/string_piece.hh" + +#include + +#include +#include + +namespace util { + +// Thrown on dereference when out of tokens to parse +class OutOfTokens : public Exception { + public: + OutOfTokens() throw() {} + ~OutOfTokens() throw() {} +}; + +class SingleCharacter { + public: + explicit SingleCharacter(char delim) : delim_(delim) {} + + StringPiece Find(const StringPiece &in) const { + return StringPiece(std::find(in.data(), in.data() + in.size(), delim_), 1); + } + + private: + char delim_; +}; + +class MultiCharacter { + public: + explicit MultiCharacter(const StringPiece &delimiter) : delimiter_(delimiter) {} + + StringPiece Find(const StringPiece &in) const { + return StringPiece(std::search(in.data(), in.data() + in.size(), delimiter_.data(), delimiter_.data() + delimiter_.size()), delimiter_.size()); + } + + private: + StringPiece delimiter_; +}; + +class AnyCharacter { + public: + explicit AnyCharacter(const StringPiece &chars) : chars_(chars) {} + + StringPiece Find(const StringPiece &in) const { + return StringPiece(std::find_first_of(in.data(), in.data() + in.size(), chars_.data(), chars_.data() + chars_.size()), 1); + } + + private: + StringPiece chars_; +}; + +template class TokenIter : public boost::iterator_facade, const StringPiece, boost::forward_traversal_tag> { + public: + TokenIter() {} + + template TokenIter(const StringPiece &str, const Construct &construct) : after_(str), finder_(construct) { + increment(); + } + + bool operator!() const { + return current_.data() == 0; + } + operator bool() const { + return current_.data() != 0; + } + + static TokenIter end() { + return TokenIter(); + } + + private: + friend class boost::iterator_core_access; + + void increment() { + do { + StringPiece found(finder_.Find(after_)); + current_ = StringPiece(after_.data(), found.data() - after_.data()); + if (found.data() == after_.data() + after_.size()) { + after_ = StringPiece(NULL, 0); + } else { + after_ = StringPiece(found.data() + found.size(), after_.data() - found.data() + after_.size() - found.size()); + } + } while (SkipEmpty && current_.data() && current_.empty()); // Compiler should optimize this away if SkipEmpty is false. + } + + bool equal(const TokenIter &other) const { + return after_.data() == other.after_.data(); + } + + const StringPiece &dereference() const { + UTIL_THROW_IF(!current_.data(), OutOfTokens, "Ran out of tokens"); + return current_; + } + + StringPiece current_; + StringPiece after_; + + Find finder_; +}; + +} // namespace util + +#endif // UTIL_TOKENIZE_PIECE__ diff --git a/util/tokenize_piece_test.cc b/util/tokenize_piece_test.cc new file mode 100644 index 000000000..d856018fb --- /dev/null +++ b/util/tokenize_piece_test.cc @@ -0,0 +1,48 @@ +#include "util/tokenize_piece.hh" +#include "util/string_piece.hh" + +#define BOOST_TEST_MODULE TokenIteratorTest +#include + +#include + +namespace util { +namespace { + +BOOST_AUTO_TEST_CASE(pipe_pipe_none) { + const char str[] = "nodelimit at all"; + TokenIter it(str, MultiCharacter("|||")); + BOOST_REQUIRE(it); + BOOST_CHECK_EQUAL(StringPiece(str), *it); + ++it; + BOOST_CHECK(!it); +} +BOOST_AUTO_TEST_CASE(pipe_pipe_two) { + const char str[] = "|||"; + TokenIter it(str, MultiCharacter("|||")); + BOOST_REQUIRE(it); + BOOST_CHECK_EQUAL(StringPiece(), *it); + ++it; + BOOST_REQUIRE(it); + BOOST_CHECK_EQUAL(StringPiece(), *it); + ++it; + BOOST_CHECK(!it); +} + +BOOST_AUTO_TEST_CASE(remove_empty) { + const char str[] = "|||"; + TokenIter it(str, MultiCharacter("|||")); + BOOST_CHECK(!it); +} + +BOOST_AUTO_TEST_CASE(remove_empty_keep) { + const char str[] = " |||"; + TokenIter it(str, MultiCharacter("|||")); + BOOST_REQUIRE(it); + BOOST_CHECK_EQUAL(StringPiece(" "), *it); + ++it; + BOOST_CHECK(!it); +} + +} // namespace +} // namespace util diff --git a/util/usage.cc b/util/usage.cc new file mode 100644 index 000000000..e5cf76f05 --- /dev/null +++ b/util/usage.cc @@ -0,0 +1,46 @@ +#include "util/usage.hh" + +#include +#include + +#include +#include +#if !defined(_WIN32) && !defined(_WIN64) +#include +#include +#endif + +namespace util { + +namespace { +#if !defined(_WIN32) && !defined(_WIN64) +float FloatSec(const struct timeval &tv) { + return static_cast(tv.tv_sec) + (static_cast(tv.tv_usec) / 1000000.0); +} +#endif +} // namespace + +void PrintUsage(std::ostream &out) { +#if !defined(_WIN32) && !defined(_WIN64) + struct rusage usage; + if (getrusage(RUSAGE_SELF, &usage)) { + perror("getrusage"); + return; + } + out << "user\t" << FloatSec(usage.ru_utime) << "\nsys\t" << FloatSec(usage.ru_stime) << '\n'; + + // Linux doesn't set memory usage :-(. + std::ifstream status("/proc/self/status", std::ios::in); + std::string line; + while (getline(status, line)) { + if (!strncmp(line.c_str(), "VmRSS:\t", 7)) { + out << "VmRSS: " << (line.c_str() + 7) << '\n'; + break; + } else if (!strncmp(line.c_str(), "VmPeak:\t", 8)) { + out << "VmPeak: " << (line.c_str() + 8) << '\n'; + } + } +#endif +} + +} // namespace util diff --git a/util/usage.hh b/util/usage.hh new file mode 100644 index 000000000..d331ff74c --- /dev/null +++ b/util/usage.hh @@ -0,0 +1,8 @@ +#ifndef UTIL_USAGE__ +#define UTIL_USAGE__ +#include + +namespace util { +void PrintUsage(std::ostream &to); +} // namespace util +#endif // UTIL_USAGE__