diff --git a/moses/ContextScope.h b/moses/ContextScope.h index 7d2d6dc3b..4e1583a78 100644 --- a/moses/ContextScope.h +++ b/moses/ContextScope.h @@ -125,25 +125,25 @@ public: return m_context_weights; } #endif - + bool SetContextWeights(std::string const& spec) { if (m_context_weights) return false; boost::unique_lock lock(m_lock); SPTR > M(new std::map); - + // TO DO; This needs to be done with StringPiece.find, not Tokenize // PRIORITY: low std::vector tokens = Tokenize(spec,":"); - for (std::vector::iterator it = tokens.begin(); - it != tokens.end(); it++) { + for (std::vector::iterator it = tokens.begin(); + it != tokens.end(); it++) { std::vector key_and_value = Tokenize(*it, ","); (*M)[key_and_value[0]] = atof(key_and_value[1].c_str()); } m_context_weights = M; return true; } - + bool SetContextWeights(SPTR const> const& w) { if (m_context_weights) return false; diff --git a/moses/ExportInterface.cpp b/moses/ExportInterface.cpp index 2372d5c1c..5acb90a92 100644 --- a/moses/ExportInterface.cpp +++ b/moses/ExportInterface.cpp @@ -228,10 +228,10 @@ batch_run() if (context_window) task->SetContextWindow(context_window); - + if (context_weights != "" && !task->GetScope()->GetContextWeights()) task->GetScope()->SetContextWeights(context_weights); - + // Allow for (sentence-)context-specific processing prior to // decoding. This can be used, for example, for context-sensitive // phrase lookup. diff --git a/moses/LM/DALMWrapper.cpp b/moses/LM/DALMWrapper.cpp index 746a2741b..60eee0250 100644 --- a/moses/LM/DALMWrapper.cpp +++ b/moses/LM/DALMWrapper.cpp @@ -39,16 +39,16 @@ void read_ini(const char *inifile, string &model, string &words, string &wordstx namespace Moses { -class Murmur: public DALM::State::HashFunction +class Murmur: public DALM::State::HashFunction { public: - Murmur(std::size_t seed=0): seed(seed){ - } - virtual std::size_t operator()(const DALM::VocabId *words, std::size_t size) const{ - return util::MurmurHashNative(words, sizeof(DALM::VocabId) * size, seed); - } + Murmur(std::size_t seed=0): seed(seed) { + } + virtual std::size_t operator()(const DALM::VocabId *words, std::size_t size) const { + return util::MurmurHashNative(words, sizeof(DALM::VocabId) * size, seed); + } private: - std::size_t seed; + std::size_t seed; }; class DALMState : public FFState @@ -173,13 +173,13 @@ public: virtual bool operator==(const FFState& other) const { const DALMChartState &o = static_cast(other); - + // check left state. if(prefixLength != o.prefixLength) return false; const DALM::Fragment &f = prefixFragments[prefixLength-1]; const DALM::Fragment &of = o.prefixFragments[prefixLength-1]; if(DALM::compare_fragments(f, of) != 0) return false; - + // check right state. if(rightContext.get_count() != o.rightContext.get_count()) return false; return rightContext.compare(o.rightContext) == 0; @@ -301,7 +301,7 @@ void LanguageModelDALM::CalcScore(const Phrase &phrase, float &fullScore, float } currPos++; - if (currPos >= m_ContextSize){ + if (currPos >= m_ContextSize) { break; } } @@ -564,7 +564,7 @@ void LanguageModelDALM::EvaluateTerminal( } else { hypoScore += score; prefixLength++; - if(state.get_count() < std::min(prevLen+1, (int)m_ContextSize)){ + if(state.get_count() < std::min(prevLen+1, (int)m_ContextSize)) { newState->SetAsLarge(); } if(prefixLength >= m_ContextSize) newState->SetAsLarge(); @@ -626,8 +626,8 @@ void LanguageModelDALM::EvaluateNonTerminal( state = prevState->GetRightContext(); return; } else if(state.get_count() <= prefixPos+1) { - if(state.get_count() == prefixPos+1 && !gap.is_finalized()){ - prefixLength++; + if(state.get_count() == prefixPos+1 && !gap.is_finalized()) { + prefixLength++; } newState->SetAsLarge(); state = prevState->GetRightContext(); @@ -636,10 +636,10 @@ void LanguageModelDALM::EvaluateNonTerminal( newState->SetAsLarge(); } else { prefixLength++; - if(state.get_count() < std::min(prevLen+1, (int)m_ContextSize)){ + if(state.get_count() < std::min(prevLen+1, (int)m_ContextSize)) { newState->SetAsLarge(); } - + if(prefixLength >= m_ContextSize) newState->SetAsLarge(); } } @@ -651,7 +651,7 @@ void LanguageModelDALM::EvaluateNonTerminal( if (prevState->LargeEnough()) { newState->SetAsLarge(); //if(prevPrefixLength < prevState->GetHypoSize()) { - hypoScore += m_lm->sum_bows(state, prevPrefixLength, state.get_count()); + hypoScore += m_lm->sum_bows(state, prevPrefixLength, state.get_count()); //} // copy language model state state = prevState->GetRightContext(); diff --git a/moses/Phrase.h b/moses/Phrase.h index 8fd3911d3..fa9487e33 100644 --- a/moses/Phrase.h +++ b/moses/Phrase.h @@ -69,11 +69,11 @@ public: virtual bool HasScope() const { return false; } - + virtual SPTR GetScope() const { return SPTR(); } - + /** No longer does anything as not using mem pool for Phrase class anymore */ static void InitializeMemPool(); diff --git a/moses/TargetPhrase.cpp b/moses/TargetPhrase.cpp index c6bab251b..fe114f164 100644 --- a/moses/TargetPhrase.cpp +++ b/moses/TargetPhrase.cpp @@ -68,7 +68,7 @@ TargetPhrase::TargetPhrase(ttasksptr& ttask, std::string out_string, const Phras , m_container(pt) { if (ttask) m_scope = ttask->GetScope(); - + //ACAT const StaticData &staticData = StaticData::Instance(); // XXX should this really be InputFactorOrder??? diff --git a/moses/TranslationTask.cpp b/moses/TranslationTask.cpp index 5eab55c82..95035ae9c 100644 --- a/moses/TranslationTask.cpp +++ b/moses/TranslationTask.cpp @@ -167,7 +167,7 @@ interpret_dlt() m_scope->SetContextWeights(j->second); } } - + void TranslationTask::Run() {