mirror of
https://github.com/moses-smt/mosesdecoder.git
synced 2024-12-26 05:14:36 +03:00
Replace assert with CHECK until people learn how to use assert properly
This commit is contained in:
parent
1192e6f2b0
commit
bf78f7a1ac
@ -21,7 +21,7 @@
|
||||
#include <direct.h>
|
||||
#endif
|
||||
#include <sys/stat.h>
|
||||
#include <cassert>
|
||||
#include "util/check.hh"
|
||||
#include <string>
|
||||
#include "OnDiskWrapper.h"
|
||||
|
||||
@ -56,19 +56,19 @@ bool OnDiskWrapper::BeginLoad(const std::string &filePath)
|
||||
bool OnDiskWrapper::OpenForLoad(const std::string &filePath)
|
||||
{
|
||||
m_fileSource.open((filePath + "/Source.dat").c_str(), ios::in | ios::binary);
|
||||
assert(m_fileSource.is_open());
|
||||
CHECK(m_fileSource.is_open());
|
||||
|
||||
m_fileTargetInd.open((filePath + "/TargetInd.dat").c_str(), ios::in | ios::binary);
|
||||
assert(m_fileTargetInd.is_open());
|
||||
CHECK(m_fileTargetInd.is_open());
|
||||
|
||||
m_fileTargetColl.open((filePath + "/TargetColl.dat").c_str(), ios::in | ios::binary);
|
||||
assert(m_fileTargetColl.is_open());
|
||||
CHECK(m_fileTargetColl.is_open());
|
||||
|
||||
m_fileVocab.open((filePath + "/Vocab.dat").c_str(), ios::in);
|
||||
assert(m_fileVocab.is_open());
|
||||
CHECK(m_fileVocab.is_open());
|
||||
|
||||
m_fileMisc.open((filePath + "/Misc.dat").c_str(), ios::in);
|
||||
assert(m_fileMisc.is_open());
|
||||
CHECK(m_fileMisc.is_open());
|
||||
|
||||
// set up root node
|
||||
LoadMisc();
|
||||
@ -86,7 +86,7 @@ bool OnDiskWrapper::LoadMisc()
|
||||
while(m_fileMisc.getline(line, 100000)) {
|
||||
vector<string> tokens;
|
||||
Moses::Tokenize(tokens, line);
|
||||
assert(tokens.size() == 2);
|
||||
CHECK(tokens.size() == 2);
|
||||
const string &key = tokens[0];
|
||||
m_miscInfo[key] = Moses::Scan<UINT64>(tokens[1]);
|
||||
}
|
||||
@ -109,33 +109,33 @@ bool OnDiskWrapper::BeginSave(const std::string &filePath
|
||||
#endif
|
||||
|
||||
m_fileSource.open((filePath + "/Source.dat").c_str(), ios::out | ios::in | ios::binary | ios::ate | ios::trunc);
|
||||
assert(m_fileSource.is_open());
|
||||
CHECK(m_fileSource.is_open());
|
||||
|
||||
m_fileTargetInd.open((filePath + "/TargetInd.dat").c_str(), ios::out | ios::binary | ios::ate | ios::trunc);
|
||||
assert(m_fileTargetInd.is_open());
|
||||
CHECK(m_fileTargetInd.is_open());
|
||||
|
||||
m_fileTargetColl.open((filePath + "/TargetColl.dat").c_str(), ios::out | ios::binary | ios::ate | ios::trunc);
|
||||
assert(m_fileTargetColl.is_open());
|
||||
CHECK(m_fileTargetColl.is_open());
|
||||
|
||||
m_fileVocab.open((filePath + "/Vocab.dat").c_str(), ios::out | ios::ate | ios::trunc);
|
||||
assert(m_fileVocab.is_open());
|
||||
CHECK(m_fileVocab.is_open());
|
||||
|
||||
m_fileMisc.open((filePath + "/Misc.dat").c_str(), ios::out | ios::ate | ios::trunc);
|
||||
assert(m_fileMisc.is_open());
|
||||
CHECK(m_fileMisc.is_open());
|
||||
|
||||
// offset by 1. 0 offset is reserved
|
||||
char c = 0xff;
|
||||
m_fileSource.write(&c, 1);
|
||||
assert(1 == m_fileSource.tellp());
|
||||
CHECK(1 == m_fileSource.tellp());
|
||||
|
||||
m_fileTargetInd.write(&c, 1);
|
||||
assert(1 == m_fileTargetInd.tellp());
|
||||
CHECK(1 == m_fileTargetInd.tellp());
|
||||
|
||||
m_fileTargetColl.write(&c, 1);
|
||||
assert(1 == m_fileTargetColl.tellp());
|
||||
CHECK(1 == m_fileTargetColl.tellp());
|
||||
|
||||
// set up root node
|
||||
assert(GetNumCounts() == 1);
|
||||
CHECK(GetNumCounts() == 1);
|
||||
vector<float> counts(GetNumCounts());
|
||||
counts[0] = DEFAULT_COUNT;
|
||||
m_rootSourceNode = new PhraseNode();
|
||||
@ -147,7 +147,7 @@ bool OnDiskWrapper::BeginSave(const std::string &filePath
|
||||
void OnDiskWrapper::EndSave()
|
||||
{
|
||||
bool ret = m_rootSourceNode->Saved();
|
||||
assert(ret);
|
||||
CHECK(ret);
|
||||
|
||||
GetVocab().Save(*this);
|
||||
|
||||
@ -184,7 +184,7 @@ UINT64 OnDiskWrapper::GetMisc(const std::string &key) const
|
||||
{
|
||||
std::map<std::string, UINT64>::const_iterator iter;
|
||||
iter = m_miscInfo.find(key);
|
||||
assert(iter != m_miscInfo.end());
|
||||
CHECK(iter != m_miscInfo.end());
|
||||
|
||||
return iter->second;
|
||||
}
|
||||
@ -205,7 +205,7 @@ Word *OnDiskWrapper::ConvertFromMoses(Moses::FactorDirection /* direction */
|
||||
size_t factorType = factorsVec[ind];
|
||||
|
||||
const Moses::Factor *factor = origWord.GetFactor(factorType);
|
||||
assert(factor);
|
||||
CHECK(factor);
|
||||
|
||||
string str = factor->GetString();
|
||||
if (isNonTerminal) {
|
||||
|
@ -18,7 +18,7 @@
|
||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
***********************************************************************/
|
||||
#include <iostream>
|
||||
#include <cassert>
|
||||
#include "util/check.hh"
|
||||
#include "../../moses/src/Util.h"
|
||||
#include "Phrase.h"
|
||||
|
||||
@ -49,7 +49,7 @@ void Phrase::AddWord(Word *word)
|
||||
|
||||
void Phrase::AddWord(Word *word, size_t pos)
|
||||
{
|
||||
assert(pos < m_words.size());
|
||||
CHECK(pos < m_words.size());
|
||||
m_words.insert(m_words.begin() + pos + 1, word);
|
||||
}
|
||||
|
||||
@ -73,7 +73,7 @@ int Phrase::Compare(const Phrase &compare) const
|
||||
}
|
||||
|
||||
if (ret == 0) {
|
||||
assert(compare.GetSize() >= GetSize());
|
||||
CHECK(compare.GetSize() >= GetSize());
|
||||
ret = (compare.GetSize() > GetSize()) ? 1 : 0;
|
||||
}
|
||||
return ret;
|
||||
|
@ -17,7 +17,7 @@
|
||||
License along with this library; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
***********************************************************************/
|
||||
#include <cassert>
|
||||
#include "util/check.hh"
|
||||
#include "PhraseNode.h"
|
||||
#include "OnDiskWrapper.h"
|
||||
#include "TargetPhraseCollection.h"
|
||||
@ -55,7 +55,7 @@ PhraseNode::PhraseNode(UINT64 filePos, OnDiskWrapper &onDiskWrapper)
|
||||
|
||||
std::fstream &file = onDiskWrapper.GetFileSource();
|
||||
file.seekg(filePos);
|
||||
assert(filePos == file.tellg());
|
||||
CHECK(filePos == file.tellg());
|
||||
|
||||
file.read((char*) &m_numChildrenLoad, sizeof(UINT64));
|
||||
|
||||
@ -64,11 +64,11 @@ PhraseNode::PhraseNode(UINT64 filePos, OnDiskWrapper &onDiskWrapper)
|
||||
|
||||
// go to start of node again
|
||||
file.seekg(filePos);
|
||||
assert(filePos == file.tellg());
|
||||
CHECK(filePos == file.tellg());
|
||||
|
||||
// read everything into memory
|
||||
file.read(m_memLoad, memAlloc);
|
||||
assert(filePos + memAlloc == file.tellg());
|
||||
CHECK(filePos + memAlloc == file.tellg());
|
||||
|
||||
// get value
|
||||
m_value = ((UINT64*)m_memLoad)[1];
|
||||
@ -76,7 +76,7 @@ PhraseNode::PhraseNode(UINT64 filePos, OnDiskWrapper &onDiskWrapper)
|
||||
// get counts
|
||||
float *memFloat = (float*) (m_memLoad + sizeof(UINT64) * 2);
|
||||
|
||||
assert(countSize == 1);
|
||||
CHECK(countSize == 1);
|
||||
m_counts[0] = memFloat[0];
|
||||
|
||||
m_memLoadLast = m_memLoad + memAlloc;
|
||||
@ -85,7 +85,7 @@ PhraseNode::PhraseNode(UINT64 filePos, OnDiskWrapper &onDiskWrapper)
|
||||
PhraseNode::~PhraseNode()
|
||||
{
|
||||
free(m_memLoad);
|
||||
//assert(m_saved);
|
||||
//CHECK(m_saved);
|
||||
}
|
||||
|
||||
float PhraseNode::GetCount(size_t ind) const
|
||||
@ -95,7 +95,7 @@ float PhraseNode::GetCount(size_t ind) const
|
||||
|
||||
void PhraseNode::Save(OnDiskWrapper &onDiskWrapper, size_t pos, size_t tableLimit)
|
||||
{
|
||||
assert(!m_saved);
|
||||
CHECK(!m_saved);
|
||||
|
||||
// save this node
|
||||
m_targetPhraseColl.Sort(tableLimit);
|
||||
@ -116,7 +116,7 @@ void PhraseNode::Save(OnDiskWrapper &onDiskWrapper, size_t pos, size_t tableLimi
|
||||
|
||||
// count info
|
||||
float *memFloat = (float*) (mem + memUsed);
|
||||
assert(numCounts == 1);
|
||||
CHECK(numCounts == 1);
|
||||
memFloat[0] = (m_counts.size() == 0) ? DEFAULT_COUNT : m_counts[0]; // if count = 0, put in very large num to make sure its still used. HACK
|
||||
memUsed += sizeof(float) * numCounts;
|
||||
|
||||
@ -142,7 +142,7 @@ void PhraseNode::Save(OnDiskWrapper &onDiskWrapper, size_t pos, size_t tableLimi
|
||||
|
||||
// save this node
|
||||
//Moses::DebugMem(mem, memAlloc);
|
||||
assert(memUsed == memAlloc);
|
||||
CHECK(memUsed == memAlloc);
|
||||
|
||||
std::fstream &file = onDiskWrapper.GetFileSource();
|
||||
m_filePos = file.tellp();
|
||||
@ -150,7 +150,7 @@ void PhraseNode::Save(OnDiskWrapper &onDiskWrapper, size_t pos, size_t tableLimi
|
||||
file.write(mem, memUsed);
|
||||
|
||||
UINT64 endPos = file.tellp();
|
||||
assert(m_filePos + memUsed == endPos);
|
||||
CHECK(m_filePos + memUsed == endPos);
|
||||
|
||||
free(mem);
|
||||
|
||||
@ -234,7 +234,7 @@ void PhraseNode::GetChild(Word &wordFound, UINT64 &childFilePos, size_t ind, OnD
|
||||
+ childSize * ind;
|
||||
|
||||
size_t memRead = ReadChild(wordFound, childFilePos, currMem, numFactors);
|
||||
assert(memRead == childSize);
|
||||
CHECK(memRead == childSize);
|
||||
}
|
||||
|
||||
size_t PhraseNode::ReadChild(Word &wordFound, UINT64 &childFilePos, const char *mem, size_t numFactors) const
|
||||
|
@ -17,7 +17,7 @@
|
||||
License along with this library; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
***********************************************************************/
|
||||
#include <cassert>
|
||||
#include "util/check.hh"
|
||||
#include "SourcePhrase.h"
|
||||
|
||||
namespace OnDiskPt
|
||||
|
@ -57,13 +57,13 @@ void TargetPhrase::Create1AlignFromString(const std::string &align1Str)
|
||||
{
|
||||
vector<size_t> alignPoints;
|
||||
Moses::Tokenize<size_t>(alignPoints, align1Str, "-");
|
||||
assert(alignPoints.size() == 2);
|
||||
CHECK(alignPoints.size() == 2);
|
||||
m_align.push_back(pair<size_t, size_t>(alignPoints[0], alignPoints[1]) );
|
||||
}
|
||||
|
||||
void TargetPhrase::SetScore(float score, size_t ind)
|
||||
{
|
||||
assert(ind < m_scores.size());
|
||||
CHECK(ind < m_scores.size());
|
||||
m_scores[ind] = score;
|
||||
}
|
||||
|
||||
@ -101,7 +101,7 @@ char *TargetPhrase::WriteToMemory(OnDiskWrapper &onDiskWrapper, size_t &memUsed)
|
||||
memUsed += word.WriteToMemory((char*) currPtr);
|
||||
}
|
||||
|
||||
assert(memUsed == memNeeded);
|
||||
CHECK(memUsed == memNeeded);
|
||||
return (char *) mem;
|
||||
}
|
||||
|
||||
@ -119,7 +119,7 @@ void TargetPhrase::Save(OnDiskWrapper &onDiskWrapper)
|
||||
file.write(mem, memUsed);
|
||||
|
||||
UINT64 endPos = file.tellp();
|
||||
assert(startPos + memUsed == endPos);
|
||||
CHECK(startPos + memUsed == endPos);
|
||||
|
||||
m_filePos = startPos;
|
||||
free(mem);
|
||||
@ -151,7 +151,7 @@ char *TargetPhrase::WriteOtherInfoToMemory(OnDiskWrapper &onDiskWrapper, size_t
|
||||
memUsed += WriteScoresToMemory(mem + memUsed);
|
||||
|
||||
//DebugMem(mem, memNeeded);
|
||||
assert(memNeeded == memUsed);
|
||||
CHECK(memNeeded == memUsed);
|
||||
return mem;
|
||||
}
|
||||
|
||||
@ -203,7 +203,7 @@ Moses::TargetPhrase *TargetPhrase::ConvertToMoses(const std::vector<Moses::Facto
|
||||
|
||||
// words
|
||||
size_t phraseSize = GetSize();
|
||||
assert(phraseSize > 0); // last word is lhs
|
||||
CHECK(phraseSize > 0); // last word is lhs
|
||||
--phraseSize;
|
||||
|
||||
for (size_t pos = 0; pos < phraseSize; ++pos) {
|
||||
@ -232,18 +232,18 @@ Moses::TargetPhrase *TargetPhrase::ConvertToMoses(const std::vector<Moses::Facto
|
||||
|
||||
UINT64 TargetPhrase::ReadOtherInfoFromFile(UINT64 filePos, std::fstream &fileTPColl)
|
||||
{
|
||||
assert(filePos == fileTPColl.tellg());
|
||||
CHECK(filePos == fileTPColl.tellg());
|
||||
|
||||
UINT64 memUsed = 0;
|
||||
fileTPColl.read((char*) &m_filePos, sizeof(UINT64));
|
||||
memUsed += sizeof(UINT64);
|
||||
assert(m_filePos != 0);
|
||||
CHECK(m_filePos != 0);
|
||||
|
||||
memUsed += ReadAlignFromFile(fileTPColl);
|
||||
assert((memUsed + filePos) == fileTPColl.tellg());
|
||||
CHECK((memUsed + filePos) == fileTPColl.tellg());
|
||||
|
||||
memUsed += ReadScoresFromFile(fileTPColl);
|
||||
assert((memUsed + filePos) == fileTPColl.tellg());
|
||||
CHECK((memUsed + filePos) == fileTPColl.tellg());
|
||||
|
||||
return memUsed;
|
||||
}
|
||||
@ -289,7 +289,7 @@ UINT64 TargetPhrase::ReadAlignFromFile(std::fstream &fileTPColl)
|
||||
|
||||
UINT64 TargetPhrase::ReadScoresFromFile(std::fstream &fileTPColl)
|
||||
{
|
||||
assert(m_scores.size() > 0);
|
||||
CHECK(m_scores.size() > 0);
|
||||
|
||||
UINT64 bytesRead = 0;
|
||||
|
||||
|
@ -107,7 +107,7 @@ void TargetPhraseCollection::Save(OnDiskWrapper &onDiskWrapper)
|
||||
free(mem);
|
||||
|
||||
UINT64 endPos = file.tellp();
|
||||
assert(startPos + memUsed == endPos);
|
||||
CHECK(startPos + memUsed == endPos);
|
||||
|
||||
m_filePos = startPos;
|
||||
|
||||
|
@ -36,7 +36,7 @@ bool Vocab::Load(OnDiskWrapper &onDiskWrapper)
|
||||
while(getline(file, line)) {
|
||||
vector<string> tokens;
|
||||
Moses::Tokenize(tokens, line);
|
||||
assert(tokens.size() == 2);
|
||||
CHECK(tokens.size() == 2);
|
||||
const string &key = tokens[0];
|
||||
m_vocabColl[key] = Moses::Scan<UINT64>(tokens[1]);
|
||||
}
|
||||
|
@ -93,7 +93,7 @@ size_t Word::ReadFromFile(std::fstream &file, size_t numFactors)
|
||||
file.read(mem, memAlloc);
|
||||
|
||||
size_t memUsed = ReadFromMemory(mem, numFactors);
|
||||
assert(memAlloc == memUsed);
|
||||
CHECK(memAlloc == memUsed);
|
||||
free(mem);
|
||||
|
||||
return memUsed;
|
||||
|
@ -7,7 +7,7 @@
|
||||
*/
|
||||
|
||||
#include <algorithm>
|
||||
#include <cassert>
|
||||
#include "util/check.hh"
|
||||
#include <cmath>
|
||||
#include <fstream>
|
||||
|
||||
@ -149,12 +149,12 @@ void Data::mergeSparseFeatures() {
|
||||
void Data::createShards(size_t shard_count, float shard_size, const string& scorerconfig,
|
||||
std::vector<Data>& shards)
|
||||
{
|
||||
assert(shard_count);
|
||||
assert(shard_size >= 0);
|
||||
assert(shard_size <= 1);
|
||||
CHECK(shard_count);
|
||||
CHECK(shard_size >= 0);
|
||||
CHECK(shard_size <= 1);
|
||||
|
||||
size_t data_size = scoredata->size();
|
||||
assert(data_size == featdata->size());
|
||||
CHECK(data_size == featdata->size());
|
||||
|
||||
shard_size *= data_size;
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
#include "Optimizer.h"
|
||||
|
||||
#include <cmath>
|
||||
#include <cassert>
|
||||
#include "util/check.hh"
|
||||
#include <vector>
|
||||
#include <limits>
|
||||
#include <map>
|
||||
@ -49,7 +49,7 @@ Optimizer::Optimizer(unsigned Pd, vector<unsigned> i2O, vector<parameter_t> star
|
||||
// Warning: the init vector is a full set of parameters, of dimension pdim!
|
||||
Point::pdim = Pd;
|
||||
|
||||
assert(start.size() == Pd);
|
||||
CHECK(start.size() == Pd);
|
||||
Point::dim = i2O.size();
|
||||
Point::optindices = i2O;
|
||||
if (Point::pdim > Point::dim) {
|
||||
@ -90,7 +90,7 @@ map<float,diff_t >::iterator AddThreshold(map<float,diff_t >& thresholdmap, floa
|
||||
} else {
|
||||
// normal case
|
||||
pair<map<float,diff_t>::iterator, bool> ins = thresholdmap.insert(threshold(newt, diff_t(1, newdiff)));
|
||||
assert(ins.second); // we really inserted something
|
||||
CHECK(ins.second); // we really inserted something
|
||||
it = ins.first;
|
||||
}
|
||||
return it;
|
||||
@ -174,7 +174,7 @@ statscore_t Optimizer::LineOptimize(const Point& origin, const Point& direction,
|
||||
// The rightmost bestindex is the one with the highest slope.
|
||||
|
||||
// They should be equal but there might be.
|
||||
assert(abs(leftmost->first-gradient.rbegin()->first) < 0.0001);
|
||||
CHECK(abs(leftmost->first-gradient.rbegin()->first) < 0.0001);
|
||||
// A small difference due to rounding error
|
||||
break;
|
||||
}
|
||||
@ -195,7 +195,7 @@ statscore_t Optimizer::LineOptimize(const Point& origin, const Point& direction,
|
||||
map<float,diff_t>::iterator tit = thresholdmap.find(leftmostx);
|
||||
if (tit == previnserted) {
|
||||
// The threshold is the same as before can happen if 2 candidates are the same for example.
|
||||
assert(previnserted->second.back().first == newd.first);
|
||||
CHECK(previnserted->second.back().first == newd.first);
|
||||
previnserted->second.back()=newd; // just replace the 1 best for sentence S
|
||||
// previnsert doesn't change
|
||||
} else {
|
||||
@ -209,14 +209,14 @@ statscore_t Optimizer::LineOptimize(const Point& origin, const Point& direction,
|
||||
} else {
|
||||
// We append the diffs in previnsert to tit before destroying previnsert.
|
||||
tit->second.insert(tit->second.end(),previnserted->second.begin(),previnserted->second.end());
|
||||
assert(tit->second.back().first == newd.first);
|
||||
CHECK(tit->second.back().first == newd.first);
|
||||
tit->second.back()=newd; // change diff for sentence S
|
||||
thresholdmap.erase(previnserted); // erase old previnsert
|
||||
previnserted = tit; // point previnsert to the new threshold
|
||||
}
|
||||
}
|
||||
|
||||
assert(previnserted != thresholdmap.end());
|
||||
CHECK(previnserted != thresholdmap.end());
|
||||
} else { //normal insertion process
|
||||
previnserted = AddThreshold(thresholdmap, leftmostx, newd);
|
||||
}
|
||||
@ -252,7 +252,7 @@ statscore_t Optimizer::LineOptimize(const Point& origin, const Point& direction,
|
||||
float bestx = MIN_FLOAT;
|
||||
|
||||
// We skipped the first el of thresholdlist but GetIncStatScore return 1 more for first1best.
|
||||
assert(scores.size() == thresholdmap.size());
|
||||
CHECK(scores.size() == thresholdmap.size());
|
||||
for (unsigned int sc = 0; sc != scores.size(); sc++) {
|
||||
//cerr << "x=" << thrit->first << " => " << scores[sc] << endl;
|
||||
if (scores[sc] > bestscore) {
|
||||
@ -309,7 +309,7 @@ statscore_t Optimizer::LineOptimize(const Point& origin, const Point& direction,
|
||||
|
||||
void Optimizer::Get1bests(const Point& P, vector<unsigned>& bests) const
|
||||
{
|
||||
assert(FData);
|
||||
CHECK(FData);
|
||||
bests.clear();
|
||||
bests.resize(size());
|
||||
|
||||
@ -362,7 +362,7 @@ statscore_t Optimizer::Run(Point& P) const
|
||||
|
||||
vector<statscore_t> Optimizer::GetIncStatScore(vector<unsigned> thefirst, vector<vector <pair<unsigned,unsigned> > > thediffs) const
|
||||
{
|
||||
assert(scorer);
|
||||
CHECK(scorer);
|
||||
|
||||
vector<statscore_t> theres;
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
#include <cmath>
|
||||
#include <cstdlib>
|
||||
#include <cassert>
|
||||
#include "util/check.hh"
|
||||
#include <limits>
|
||||
#include "FeatureStats.h"
|
||||
|
||||
@ -37,7 +37,7 @@ Point::Point(const vector<parameter_t>& init,
|
||||
m_max[i] = max[i];
|
||||
}
|
||||
} else {
|
||||
assert(init.size()==pdim);
|
||||
CHECK(init.size()==pdim);
|
||||
for (unsigned int i=0; i<Point::dim; i++) {
|
||||
operator[](i)=init[optindices[i]];
|
||||
m_min[i] = min[optindices[i]];
|
||||
@ -50,8 +50,8 @@ Point::~Point() {}
|
||||
|
||||
void Point::Randomize()
|
||||
{
|
||||
assert(m_min.size()==Point::dim);
|
||||
assert(m_max.size()==Point::dim);
|
||||
CHECK(m_min.size()==Point::dim);
|
||||
CHECK(m_max.size()==Point::dim);
|
||||
for (unsigned int i=0; i<size(); i++) {
|
||||
operator[](i) = m_min[i] +
|
||||
(float)random()/(float)RAND_MAX * (float)(m_max[i]-m_min[i]);
|
||||
@ -76,7 +76,7 @@ double Point::operator*(const FeatureStats& F) const
|
||||
|
||||
Point Point::operator+(const Point& p2) const
|
||||
{
|
||||
assert(p2.size() == size());
|
||||
CHECK(p2.size() == size());
|
||||
Point Res(*this);
|
||||
for (unsigned i = 0; i < size(); i++) {
|
||||
Res[i] += p2[i];
|
||||
@ -88,7 +88,7 @@ Point Point::operator+(const Point& p2) const
|
||||
|
||||
void Point::operator+=(const Point& p2)
|
||||
{
|
||||
assert(p2.size() == size());
|
||||
CHECK(p2.size() == size());
|
||||
for (unsigned i = 0; i < size(); i++) {
|
||||
operator[](i) += p2[i];
|
||||
}
|
||||
|
@ -110,7 +110,7 @@ struct GCData {
|
||||
const std::vector<std::vector<float> >& b)
|
||||
: pdicts(a),weights(b),totalTuples(0),distinctTuples(0) {
|
||||
|
||||
assert(pdicts.size()==weights.size());
|
||||
CHECK(pdicts.size()==weights.size());
|
||||
std::set<FactorType> distinctOutFset;
|
||||
inF.resize(pdicts.size());
|
||||
outF.resize(pdicts.size());
|
||||
@ -152,7 +152,7 @@ void GeneratePerFactorTgtList(size_t factorType,PPtr pptr,GCData& data,Len2Cands
|
||||
data.pdicts[factorType]->GetTargetCandidates(pptr,cands);
|
||||
|
||||
for(std::vector<FactorTgtCand>::const_iterator cand=cands.begin(); cand!=cands.end(); ++cand) {
|
||||
assert(data.weights[factorType].size()==cand->second.size());
|
||||
CHECK(data.weights[factorType].size()==cand->second.size());
|
||||
float costs=std::inner_product(data.weights[factorType].begin(),
|
||||
data.weights[factorType].end(),
|
||||
cand->second.begin(),
|
||||
@ -176,7 +176,7 @@ void GenerateTupleTgtCands(OutputFactor2TgtCandList& tCand,E2Costs& e2costs,GCDa
|
||||
|
||||
if(gotCands) {
|
||||
// enumerate tuples
|
||||
assert(data.DistinctOutFactors()==tCand.size());
|
||||
CHECK(data.DistinctOutFactors()==tCand.size());
|
||||
std::vector<unsigned> radix(data.DistinctOutFactors());
|
||||
for(size_t i=0; i<tCand.size(); ++i) radix[i]=tCand[i].size();
|
||||
|
||||
@ -189,7 +189,7 @@ void GenerateTupleTgtCands(OutputFactor2TgtCandList& tCand,E2Costs& e2costs,GCDa
|
||||
mPhrase e(radix.size());
|
||||
float costs=0.0;
|
||||
for(size_t j=0; j<radix.size(); ++j) {
|
||||
assert(tuples[radix.size()*i+j]<tCand[j].size());
|
||||
CHECK(tuples[radix.size()*i+j]<tCand[j].size());
|
||||
std::pair<float,vFactor> const& mycand=tCand[j][tuples[radix.size()*i+j]];
|
||||
e[j]=mycand.second;
|
||||
costs+=mycand.first;
|
||||
@ -198,7 +198,7 @@ void GenerateTupleTgtCands(OutputFactor2TgtCandList& tCand,E2Costs& e2costs,GCDa
|
||||
bool mismatch=0;
|
||||
for(size_t j=1; !mismatch && j<e.size(); ++j)
|
||||
if(e[j].size()!=e[j-1].size()) mismatch=1;
|
||||
assert(mismatch==0);
|
||||
CHECK(mismatch==0);
|
||||
#endif
|
||||
std::pair<E2Costs::iterator,bool> p=e2costs.insert(std::make_pair(e,costs));
|
||||
if(p.second) ++data.distinctTuples;
|
||||
@ -244,7 +244,7 @@ void GenerateCandidates(const ConfusionNet& src,
|
||||
|
||||
//std::cerr<<"processing state "<<curr<<" stack size: "<<stack.size()<<"\n";
|
||||
|
||||
assert(curr.end()<src.GetSize());
|
||||
CHECK(curr.end()<src.GetSize());
|
||||
const ConfusionNet::Column &currCol=src[curr.end()];
|
||||
for(size_t colidx=0; colidx<currCol.size(); ++colidx) {
|
||||
const Word& w=currCol[colidx].first;
|
||||
|
@ -152,7 +152,7 @@ InputType*IOWrapper::GetInput(InputType* inputType)
|
||||
*/
|
||||
void OutputSurface(std::ostream &out, const Phrase &phrase, const std::vector<FactorType> &outputFactorOrder, bool reportAllFactors)
|
||||
{
|
||||
assert(outputFactorOrder.size() > 0);
|
||||
CHECK(outputFactorOrder.size() > 0);
|
||||
if (reportAllFactors == true) {
|
||||
out << phrase;
|
||||
} else {
|
||||
@ -257,7 +257,7 @@ void IOWrapper::OutputDetailedTranslationReport(
|
||||
}
|
||||
std::ostringstream out;
|
||||
OutputTranslationOptions(out, hypo, translationId);
|
||||
assert(m_detailOutputCollector);
|
||||
CHECK(m_detailOutputCollector);
|
||||
m_detailOutputCollector->Write(translationId, out.str());
|
||||
}
|
||||
|
||||
@ -283,7 +283,7 @@ void IOWrapper::OutputBestHypo(const ChartHypothesis *hypo, long translationId,
|
||||
hypo->CreateOutputPhrase(outPhrase);
|
||||
|
||||
// delete 1st & last
|
||||
assert(outPhrase.GetSize() >= 2);
|
||||
CHECK(outPhrase.GetSize() >= 2);
|
||||
outPhrase.RemoveWord(0);
|
||||
outPhrase.RemoveWord(outPhrase.GetSize() - 1);
|
||||
|
||||
@ -337,7 +337,7 @@ void IOWrapper::OutputNBestList(const ChartTrellisPathList &nBestList, const Cha
|
||||
Moses::Phrase outputPhrase = path.GetOutputPhrase();
|
||||
|
||||
// delete 1st & last
|
||||
assert(outputPhrase.GetSize() >= 2);
|
||||
CHECK(outputPhrase.GetSize() >= 2);
|
||||
outputPhrase.RemoveWord(0);
|
||||
outputPhrase.RemoveWord(outputPhrase.GetSize() - 1);
|
||||
|
||||
@ -438,7 +438,7 @@ void IOWrapper::OutputNBestList(const ChartTrellisPathList &nBestList, const Cha
|
||||
|
||||
out <<std::flush;
|
||||
|
||||
assert(m_nBestOutputCollector);
|
||||
CHECK(m_nBestOutputCollector);
|
||||
m_nBestOutputCollector->Write(translationId, out.str());
|
||||
}
|
||||
|
||||
|
@ -92,7 +92,7 @@ public:
|
||||
ChartManager manager(*m_source, &system);
|
||||
manager.ProcessSentence();
|
||||
|
||||
assert(!staticData.UseMBR());
|
||||
CHECK(!staticData.UseMBR());
|
||||
|
||||
// 1-best
|
||||
const ChartHypothesis *bestHypo = manager.GetBestHypothesis();
|
||||
@ -123,7 +123,7 @@ public:
|
||||
std::ostringstream out;
|
||||
manager.GetSearchGraph(lineNumber, out);
|
||||
OutputCollector *oc = m_ioWrapper.GetSearchGraphOutputCollector();
|
||||
assert(oc);
|
||||
CHECK(oc);
|
||||
oc->Write(lineNumber, out.str());
|
||||
}
|
||||
|
||||
@ -226,7 +226,7 @@ int main(int argc, char* argv[])
|
||||
exit(0);
|
||||
}
|
||||
|
||||
assert(staticData.GetSearchAlgorithm() == ChartDecoding);
|
||||
CHECK(staticData.GetSearchAlgorithm() == ChartDecoding);
|
||||
|
||||
// set up read/writing class
|
||||
IOWrapper *ioWrapper = GetIODevice(staticData);
|
||||
|
@ -158,13 +158,13 @@ void IOWrapper::Initialization(const std::vector<FactorType> &/*inputFactorOrder
|
||||
if (staticData.IsDetailedTranslationReportingEnabled()) {
|
||||
const std::string &path = staticData.GetDetailedTranslationReportingFilePath();
|
||||
m_detailedTranslationReportingStream = new std::ofstream(path.c_str());
|
||||
assert(m_detailedTranslationReportingStream->good());
|
||||
CHECK(m_detailedTranslationReportingStream->good());
|
||||
}
|
||||
|
||||
// sentence alignment output
|
||||
if (! staticData.GetAlignmentOutputFile().empty()) {
|
||||
m_alignmentOutputStream = new ofstream(staticData.GetAlignmentOutputFile().c_str());
|
||||
assert(m_alignmentOutputStream->good());
|
||||
CHECK(m_alignmentOutputStream->good());
|
||||
}
|
||||
|
||||
}
|
||||
@ -188,7 +188,7 @@ InputType*IOWrapper::GetInput(InputType* inputType)
|
||||
*/
|
||||
void OutputSurface(std::ostream &out, const Phrase &phrase, const std::vector<FactorType> &outputFactorOrder, bool reportAllFactors)
|
||||
{
|
||||
assert(outputFactorOrder.size() > 0);
|
||||
CHECK(outputFactorOrder.size() > 0);
|
||||
if (reportAllFactors == true) {
|
||||
out << phrase;
|
||||
} else {
|
||||
|
@ -64,7 +64,7 @@ public:
|
||||
/** Add a parameter with key, command line argument, and default value */
|
||||
void addParam(gridkey key, const string& arg, float defaultValue) {
|
||||
m_args[arg] = key;
|
||||
assert(m_grid.find(key) == m_grid.end());
|
||||
CHECK(m_grid.find(key) == m_grid.end());
|
||||
m_grid[key].push_back(defaultValue);
|
||||
}
|
||||
|
||||
|
@ -17,7 +17,7 @@
|
||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
***********************************************************************/
|
||||
#include <algorithm>
|
||||
#include <cassert>
|
||||
#include "util/check.hh"
|
||||
#include "AlignmentInfo.h"
|
||||
#include "TypeDef.h"
|
||||
#include "StaticData.h"
|
||||
@ -76,7 +76,7 @@ std::vector< const std::pair<size_t,size_t>* > AlignmentInfo::GetSortedAlignment
|
||||
break;
|
||||
|
||||
default:
|
||||
assert(false);
|
||||
CHECK(false);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -49,7 +49,7 @@ bool BilingualDynSuffixArray::Load(
|
||||
LoadCorpus(sourceStrme, m_inputFactors, Input, *m_srcCorpus, m_srcSntBreaks, m_srcVocab);
|
||||
cerr << "Loading target corpus...\n";
|
||||
LoadCorpus(targetStrme, m_outputFactors, Output, *m_trgCorpus, m_trgSntBreaks, m_trgVocab);
|
||||
assert(m_srcSntBreaks.size() == m_trgSntBreaks.size());
|
||||
CHECK(m_srcSntBreaks.size() == m_trgSntBreaks.size());
|
||||
|
||||
// build suffix arrays and auxilliary arrays
|
||||
cerr << "Building Source Suffix Array...\n";
|
||||
@ -76,7 +76,7 @@ int BilingualDynSuffixArray::LoadRawAlignments(InputFileStream& align)
|
||||
std::vector<int> vtmp;
|
||||
while(getline(align, line)) {
|
||||
Utils::splitToInt(line, vtmp, "- ");
|
||||
assert(vtmp.size() % 2 == 0);
|
||||
CHECK(vtmp.size() % 2 == 0);
|
||||
std::vector<short> vAlgn; // store as short ints for memory
|
||||
for (std::vector<int>::const_iterator itr = vtmp.begin();
|
||||
itr != vtmp.end(); ++itr) {
|
||||
@ -90,7 +90,7 @@ int BilingualDynSuffixArray::LoadRawAlignments(string& align) {
|
||||
// stores the alignments in the raw file format
|
||||
vector<int> vtmp;
|
||||
Utils::splitToInt(align, vtmp, "- ");
|
||||
assert(vtmp.size() % 2 == 0);
|
||||
CHECK(vtmp.size() % 2 == 0);
|
||||
vector<short> vAlgn; // store as short ints for memory
|
||||
for (std::vector<int>::const_iterator itr = vtmp.begin();
|
||||
itr != vtmp.end(); ++itr) {
|
||||
@ -108,7 +108,7 @@ int BilingualDynSuffixArray::LoadAlignments(InputFileStream& align)
|
||||
|
||||
while(getline(align, line)) {
|
||||
Utils::splitToInt(line, vtmp, "- ");
|
||||
assert(vtmp.size() % 2 == 0);
|
||||
CHECK(vtmp.size() % 2 == 0);
|
||||
|
||||
int sourceSize = GetSourceSentenceSize(sntIndex);
|
||||
int targetSize = GetTargetSentenceSize(sntIndex);
|
||||
@ -117,8 +117,8 @@ int BilingualDynSuffixArray::LoadAlignments(InputFileStream& align)
|
||||
for(int i=0; i < (int)vtmp.size(); i+=2) {
|
||||
int sourcePos = vtmp[i];
|
||||
int targetPos = vtmp[i+1];
|
||||
assert(sourcePos < sourceSize);
|
||||
assert(targetPos < targetSize);
|
||||
CHECK(sourcePos < sourceSize);
|
||||
CHECK(targetPos < targetSize);
|
||||
|
||||
curSnt.alignedList[sourcePos].push_back(targetPos); // list of target nodes for each source word
|
||||
curSnt.numberAligned[targetPos]++; // cnt of how many source words connect to this target word
|
||||
@ -240,7 +240,7 @@ pair<float, float> BilingualDynSuffixArray::GetLexicalWeight(const PhrasePair& p
|
||||
CacheWordProbs(srcWord);
|
||||
itrCache = m_wordPairCache.find(wordpair); // search cache again
|
||||
}
|
||||
assert(itrCache != m_wordPairCache.end());
|
||||
CHECK(itrCache != m_wordPairCache.end());
|
||||
srcSumPairProbs += itrCache->second.first;
|
||||
targetProbs[wordpair] = itrCache->second.second;
|
||||
}
|
||||
@ -255,7 +255,7 @@ pair<float, float> BilingualDynSuffixArray::GetLexicalWeight(const PhrasePair& p
|
||||
CacheWordProbs(srcWord);
|
||||
itrCache = m_wordPairCache.find(wordpair); // search cache again
|
||||
}
|
||||
assert(itrCache != m_wordPairCache.end());
|
||||
CHECK(itrCache != m_wordPairCache.end());
|
||||
srcSumPairProbs += itrCache->second.first;
|
||||
targetProbs[wordpair] = itrCache->second.second;
|
||||
}
|
||||
@ -306,13 +306,13 @@ void BilingualDynSuffixArray::CacheWordProbs(wordID_t srcWord) const
|
||||
std::map<wordID_t, int> counts;
|
||||
std::vector<wordID_t> sword(1, srcWord), wrdIndices;
|
||||
bool ret = m_srcSA->GetCorpusIndex(&sword, &wrdIndices);
|
||||
assert(ret);
|
||||
CHECK(ret);
|
||||
std::vector<int> sntIndexes = GetSntIndexes(wrdIndices, 1, m_srcSntBreaks);
|
||||
float denom(0);
|
||||
// for each occurrence of this word
|
||||
for(size_t snt = 0; snt < sntIndexes.size(); ++snt) {
|
||||
int sntIdx = sntIndexes.at(snt); // get corpus index for sentence
|
||||
assert(sntIdx != -1);
|
||||
CHECK(sntIdx != -1);
|
||||
int srcWrdSntIdx = wrdIndices.at(snt) - m_srcSntBreaks.at(sntIdx); // get word index in sentence
|
||||
const std::vector<int> srcAlg = GetSentenceAlignment(sntIdx).alignedList.at(srcWrdSntIdx); // list of target words for this source word
|
||||
if(srcAlg.size() == 0) {
|
||||
@ -356,7 +356,7 @@ TargetPhrase* BilingualDynSuffixArray::GetMosesFactorIDs(const SAPhrase& phrase)
|
||||
TargetPhrase* targetPhrase = new TargetPhrase(Output);
|
||||
for(size_t i=0; i < phrase.words.size(); ++i) { // look up trg words
|
||||
Word& word = m_trgVocab->GetWord( phrase.words[i]);
|
||||
assert(word != m_trgVocab->GetkOOVWord());
|
||||
CHECK(word != m_trgVocab->GetkOOVWord());
|
||||
targetPhrase->AddWord(word);
|
||||
}
|
||||
// scoring
|
||||
@ -409,7 +409,7 @@ void BilingualDynSuffixArray::GetTargetPhrasesByLexicalWeight(const Phrase& src,
|
||||
for(iterPhrases = phraseCounts.begin(); iterPhrases != phraseCounts.end(); ++iterPhrases) {
|
||||
float trg2SrcMLE = float(iterPhrases->second) / totalTrgPhrases;
|
||||
itrLexW = lexicalWeights.find(iterPhrases->first);
|
||||
assert(itrLexW != lexicalWeights.end());
|
||||
CHECK(itrLexW != lexicalWeights.end());
|
||||
Scores scoreVector(3);
|
||||
scoreVector[0] = trg2SrcMLE;
|
||||
scoreVector[1] = itrLexW->second.first;
|
||||
|
@ -22,7 +22,7 @@ public:
|
||||
|
||||
void SetId(size_t pos, wordID_t id)
|
||||
{
|
||||
assert(pos < words.size());
|
||||
CHECK(pos < words.size());
|
||||
words[pos] = id;
|
||||
}
|
||||
bool operator<(const SAPhrase& phr2) const
|
||||
|
@ -59,7 +59,7 @@ public:
|
||||
const TranslationSystem* m_system;
|
||||
|
||||
bool operator()(const Hypothesis* hypoA, const Hypothesis* hypoB) const {
|
||||
assert (m_transOptRange != NULL);
|
||||
CHECK(m_transOptRange != NULL);
|
||||
|
||||
const float weightDistortion = m_system->GetWeightDistortion();
|
||||
const DistortionScoreProducer *dsp = m_system->GetDistortionProducer();
|
||||
@ -149,11 +149,11 @@ BackwardsEdge::BackwardsEdge(const BitmapContainer &prevBitmapContainer
|
||||
}
|
||||
|
||||
if (m_translations.size() > 1) {
|
||||
assert(m_translations.Get(0)->GetFutureScore() >= m_translations.Get(1)->GetFutureScore());
|
||||
CHECK(m_translations.Get(0)->GetFutureScore() >= m_translations.Get(1)->GetFutureScore());
|
||||
}
|
||||
|
||||
if (m_hypotheses.size() > 1) {
|
||||
assert(m_hypotheses[0]->GetTotalScore() >= m_hypotheses[1]->GetTotalScore());
|
||||
CHECK(m_hypotheses[0]->GetTotalScore() >= m_hypotheses[1]->GetTotalScore());
|
||||
}
|
||||
|
||||
HypothesisScoreOrdererWithDistortion orderer (&transOptRange, system);
|
||||
@ -202,8 +202,8 @@ BackwardsEdge::SeenPosition(const size_t x, const size_t y)
|
||||
void
|
||||
BackwardsEdge::SetSeenPosition(const size_t x, const size_t y)
|
||||
{
|
||||
assert(x < (1<<17));
|
||||
assert(y < (1<<17));
|
||||
CHECK(x < (1<<17));
|
||||
CHECK(y < (1<<17));
|
||||
|
||||
m_seenPosition.insert((x<<16) + y);
|
||||
}
|
||||
@ -367,7 +367,7 @@ BitmapContainer::AddHypothesis(Hypothesis *hypothesis)
|
||||
|
||||
++iter;
|
||||
}
|
||||
assert(itemExists == false);
|
||||
CHECK(itemExists == false);
|
||||
m_hypotheses.push_back(hypothesis);
|
||||
}
|
||||
|
||||
@ -410,12 +410,12 @@ BitmapContainer::ProcessBestHypothesis()
|
||||
HypothesisQueueItem *item = Dequeue();
|
||||
|
||||
// If the priority queue is exhausted, we are done and should have exited
|
||||
assert(item != NULL);
|
||||
CHECK(item != NULL);
|
||||
|
||||
// check we are pulling things off of priority queue in right order
|
||||
if (!Empty()) {
|
||||
HypothesisQueueItem *check = Dequeue(true);
|
||||
assert(item->GetHypothesis()->GetTotalScore() >= check->GetHypothesis()->GetTotalScore());
|
||||
CHECK(item->GetHypothesis()->GetTotalScore() >= check->GetHypothesis()->GetTotalScore());
|
||||
}
|
||||
|
||||
// Logging for the criminally insane
|
||||
|
@ -62,7 +62,7 @@ const HypoList &ChartCell::GetSortedHypotheses(const Word &constituentLabel) con
|
||||
{
|
||||
std::map<Word, ChartHypothesisCollection>::const_iterator
|
||||
iter = m_hypoColl.find(constituentLabel);
|
||||
assert(iter != m_hypoColl.end());
|
||||
CHECK(iter != m_hypoColl.end());
|
||||
return iter->second.GetSortedHypotheses();
|
||||
}
|
||||
|
||||
@ -117,7 +117,7 @@ void ChartCell::ProcessSentence(const ChartTranslationOptionList &transOptList
|
||||
void ChartCell::SortHypotheses()
|
||||
{
|
||||
// sort each mini cells & fill up target lhs list
|
||||
assert(m_targetLabelSet.Empty());
|
||||
CHECK(m_targetLabelSet.Empty());
|
||||
std::map<Word, ChartHypothesisCollection>::iterator iter;
|
||||
for (iter = m_hypoColl.begin(); iter != m_hypoColl.end(); ++iter) {
|
||||
ChartHypothesisCollection &coll = iter->second;
|
||||
@ -135,7 +135,7 @@ const ChartHypothesis *ChartCell::GetBestHypothesis() const
|
||||
std::map<Word, ChartHypothesisCollection>::const_iterator iter;
|
||||
for (iter = m_hypoColl.begin(); iter != m_hypoColl.end(); ++iter) {
|
||||
const HypoList &sortedList = iter->second.GetSortedHypotheses();
|
||||
assert(sortedList.size() > 0);
|
||||
CHECK(sortedList.size() > 0);
|
||||
|
||||
const ChartHypothesis *hypo = sortedList[0];
|
||||
if (hypo->GetTotalScore() > bestScore) {
|
||||
|
@ -72,7 +72,7 @@ public:
|
||||
const ChartHypothesis *GetBestHypothesis() const;
|
||||
|
||||
const ChartCellLabel &GetSourceWordLabel() const {
|
||||
assert(m_coverage.GetNumWordsCovered() == 1);
|
||||
CHECK(m_coverage.GetNumWordsCovered() == 1);
|
||||
return *m_sourceWordLabel;
|
||||
}
|
||||
|
||||
|
@ -73,7 +73,7 @@ bool ChartHypothesisCollection::AddHypothesis(ChartHypothesis *hypo, ChartManage
|
||||
// equiv hypo exists, recombine with other hypo
|
||||
HCType::iterator &iterExisting = addRet.first;
|
||||
ChartHypothesis *hypoExisting = *iterExisting;
|
||||
assert(iterExisting != m_hypos.end());
|
||||
CHECK(iterExisting != m_hypos.end());
|
||||
|
||||
//StaticData::Instance().GetSentenceStats().AddRecombination(*hypo, **iterExisting);
|
||||
|
||||
@ -225,7 +225,7 @@ void ChartHypothesisCollection::PruneToSize(ChartManager &manager)
|
||||
for (iter = hyposOrdered.begin() + (m_maxHypoStackSize * 2); iter != hyposOrdered.end(); ++iter) {
|
||||
ChartHypothesis *hypo = *iter;
|
||||
HCType::iterator iterFindHypo = m_hypos.find(hypo);
|
||||
assert(iterFindHypo != m_hypos.end());
|
||||
CHECK(iterFindHypo != m_hypos.end());
|
||||
Remove(iterFindHypo);
|
||||
}
|
||||
}
|
||||
@ -234,7 +234,7 @@ void ChartHypothesisCollection::PruneToSize(ChartManager &manager)
|
||||
|
||||
void ChartHypothesisCollection::SortHypotheses()
|
||||
{
|
||||
assert(m_hyposOrdered.empty());
|
||||
CHECK(m_hyposOrdered.empty());
|
||||
if (!m_hypos.empty()) {
|
||||
// done everything for this cell.
|
||||
// sort
|
||||
|
@ -44,10 +44,10 @@ public:
|
||||
// assert in same cell
|
||||
const WordsRange &rangeA = hypoA->GetCurrSourceRange()
|
||||
, &rangeB = hypoB->GetCurrSourceRange();
|
||||
assert(rangeA == rangeB);
|
||||
CHECK(rangeA == rangeB);
|
||||
|
||||
// shouldn't be mixing hypos with different lhs
|
||||
assert(hypoA->GetTargetLHS() == hypoB->GetTargetLHS());
|
||||
CHECK(hypoA->GetTargetLHS() == hypoB->GetTargetLHS());
|
||||
|
||||
int ret = hypoA->RecombineCompare(*hypoB);
|
||||
if (ret != 0)
|
||||
|
@ -198,7 +198,7 @@ void ChartManager::CalcNBest(size_t count, ChartTrellisPathList &ret,bool onlyDi
|
||||
++i) {
|
||||
// Get the best detour from the queue.
|
||||
std::auto_ptr<const ChartTrellisDetour> detour(contenders.Pop());
|
||||
assert(detour.get());
|
||||
CHECK(detour.get());
|
||||
|
||||
// Create a full base path from the chosen detour.
|
||||
basePath.reset(new ChartTrellisPath(*detour));
|
||||
@ -206,7 +206,7 @@ void ChartManager::CalcNBest(size_t count, ChartTrellisPathList &ret,bool onlyDi
|
||||
// Generate new detours from this base path and add them to the queue of
|
||||
// contenders. The new detours deviate from the base path by a single
|
||||
// replacement along the previous detour sub-path.
|
||||
assert(basePath->GetDeviationPoint());
|
||||
CHECK(basePath->GetDeviationPoint());
|
||||
CreateDeviantPaths(basePath, *(basePath->GetDeviationPoint()), contenders);
|
||||
|
||||
// If the n-best list is allowed to contain duplicate translations (at the
|
||||
|
@ -38,7 +38,7 @@ ChartRuleLookupManagerMemory::ChartRuleLookupManagerMemory(
|
||||
: ChartRuleLookupManager(src, cellColl)
|
||||
, m_ruleTable(ruleTable)
|
||||
{
|
||||
assert(m_dottedRuleColls.size() == 0);
|
||||
CHECK(m_dottedRuleColls.size() == 0);
|
||||
size_t sourceSize = src.GetSize();
|
||||
m_dottedRuleColls.resize(sourceSize);
|
||||
|
||||
|
@ -53,7 +53,7 @@ ChartRuleLookupManagerOnDisk::ChartRuleLookupManagerOnDisk(
|
||||
, m_weight(weight)
|
||||
, m_filePath(filePath)
|
||||
{
|
||||
assert(m_expandableDottedRuleListVec.size() == 0);
|
||||
CHECK(m_expandableDottedRuleListVec.size() == 0);
|
||||
size_t sourceSize = sentence.GetSize();
|
||||
m_expandableDottedRuleListVec.resize(sourceSize);
|
||||
|
||||
@ -258,7 +258,7 @@ void ChartRuleLookupManagerOnDisk::GetChartRuleCollection(
|
||||
targetPhraseCollection = iterCache->second;
|
||||
}
|
||||
|
||||
assert(targetPhraseCollection);
|
||||
CHECK(targetPhraseCollection);
|
||||
if (!targetPhraseCollection->IsEmpty()) {
|
||||
outColl.Add(*targetPhraseCollection, prevDottedRule,
|
||||
GetCellCollection(), adhereTableLimit, rulesLimit);
|
||||
|
@ -43,7 +43,7 @@ void ChartTranslationOption::CalcEstimateOfBestScore(
|
||||
// add the score of the best underlying hypothesis
|
||||
const ChartCellLabel &cellLabel = rule->GetChartCellLabel();
|
||||
const ChartHypothesisCollection *hypoColl = cellLabel.GetStack();
|
||||
assert(hypoColl);
|
||||
CHECK(hypoColl);
|
||||
m_estimateOfBestScore += hypoColl->GetBestScore();
|
||||
}
|
||||
rule = rule->GetPrev();
|
||||
|
@ -24,7 +24,7 @@
|
||||
#include "TargetPhraseCollection.h"
|
||||
#include "WordsRange.h"
|
||||
|
||||
#include <cassert>
|
||||
#include "util/check.hh"
|
||||
#include <vector>
|
||||
|
||||
namespace Moses
|
||||
|
@ -19,7 +19,7 @@
|
||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
***********************************************************************/
|
||||
|
||||
#include <cassert>
|
||||
#include "util/check.hh"
|
||||
#include "ChartTranslationOptionCollection.h"
|
||||
#include "ChartCellCollection.h"
|
||||
#include "InputType.h"
|
||||
@ -77,12 +77,12 @@ void ChartTranslationOptionCollection::CreateTranslationOptionsForRange(
|
||||
ChartTranslationOptionList &chartRuleColl = GetTranslationOptionList(startPos, endPos);
|
||||
const WordsRange &wordsRange = chartRuleColl.GetSourceRange();
|
||||
|
||||
assert(m_decodeGraphList.size() == m_ruleLookupManagers.size());
|
||||
CHECK(m_decodeGraphList.size() == m_ruleLookupManagers.size());
|
||||
std::vector <DecodeGraph*>::const_iterator iterDecodeGraph;
|
||||
std::vector <ChartRuleLookupManager*>::const_iterator iterRuleLookupManagers = m_ruleLookupManagers.begin();
|
||||
for (iterDecodeGraph = m_decodeGraphList.begin(); iterDecodeGraph != m_decodeGraphList.end(); ++iterDecodeGraph, ++iterRuleLookupManagers) {
|
||||
const DecodeGraph &decodeGraph = **iterDecodeGraph;
|
||||
assert(decodeGraph.GetSize() == 1);
|
||||
CHECK(decodeGraph.GetSize() == 1);
|
||||
ChartRuleLookupManager &ruleLookupManager = **iterRuleLookupManagers;
|
||||
size_t maxSpan = decodeGraph.GetMaxChartSpan();
|
||||
if (maxSpan == 0 || (endPos-startPos+1) <= maxSpan) {
|
||||
@ -125,7 +125,7 @@ void ChartTranslationOptionCollection::ProcessUnknownWord(size_t startPos, size_
|
||||
ruleLookupManager.GetChartRuleCollection(wordsRange, false, fullList);
|
||||
}
|
||||
}
|
||||
assert(iterRuleLookupManagers == m_ruleLookupManagers.end());
|
||||
CHECK(iterRuleLookupManagers == m_ruleLookupManagers.end());
|
||||
|
||||
bool alwaysCreateDirectTranslationOption = StaticData::Instance().IsAlwaysCreateDirectTranslationOption();
|
||||
// create unknown words for 1 word coverage where we don't have any trans options
|
||||
@ -137,13 +137,13 @@ void ChartTranslationOptionCollection::ProcessUnknownWord(size_t startPos, size_
|
||||
ChartTranslationOptionList &ChartTranslationOptionCollection::GetTranslationOptionList(size_t startPos, size_t endPos)
|
||||
{
|
||||
size_t sizeVec = m_collection[startPos].size();
|
||||
assert(endPos-startPos < sizeVec);
|
||||
CHECK(endPos-startPos < sizeVec);
|
||||
return m_collection[startPos][endPos - startPos];
|
||||
}
|
||||
const ChartTranslationOptionList &ChartTranslationOptionCollection::GetTranslationOptionList(size_t startPos, size_t endPos) const
|
||||
{
|
||||
size_t sizeVec = m_collection[startPos].size();
|
||||
assert(endPos-startPos < sizeVec);
|
||||
CHECK(endPos-startPos < sizeVec);
|
||||
return m_collection[startPos][endPos - startPos];
|
||||
}
|
||||
|
||||
@ -221,7 +221,7 @@ void ChartTranslationOptionCollection::ProcessOneUnknownWord(const Word &sourceW
|
||||
Word targetLHS(true);
|
||||
|
||||
targetLHS.CreateFromString(Output, staticData.GetOutputFactorOrder(), targetLHSStr, true);
|
||||
assert(targetLHS.GetFactor(0) != NULL);
|
||||
CHECK(targetLHS.GetFactor(0) != NULL);
|
||||
|
||||
// add to dictionary
|
||||
TargetPhrase *targetPhrase = new TargetPhrase(Output);
|
||||
@ -264,7 +264,7 @@ void ChartTranslationOptionCollection::ProcessOneUnknownWord(const Word &sourceW
|
||||
|
||||
Word targetLHS(true);
|
||||
targetLHS.CreateFromString(Output, staticData.GetOutputFactorOrder(), targetLHSStr, true);
|
||||
assert(targetLHS.GetFactor(0) != NULL);
|
||||
CHECK(targetLHS.GetFactor(0) != NULL);
|
||||
|
||||
m_cacheTargetPhraseCollection.push_back(tpc);
|
||||
targetPhrase->SetSourcePhrase(m_unksrc);
|
||||
|
@ -102,7 +102,7 @@ void ChartTranslationOptionList::Add(const TargetPhraseCollection &targetPhraseC
|
||||
|
||||
void ChartTranslationOptionList::Add(ChartTranslationOption *transOpt)
|
||||
{
|
||||
assert(transOpt);
|
||||
CHECK(transOpt);
|
||||
m_collection.push_back(transOpt);
|
||||
}
|
||||
|
||||
|
@ -103,7 +103,7 @@ Phrase ChartTrellisNode::GetOutputPhrase() const
|
||||
|
||||
void ChartTrellisNode::CreateChildren()
|
||||
{
|
||||
assert(m_children.empty());
|
||||
CHECK(m_children.empty());
|
||||
const std::vector<const ChartHypothesis*> &prevHypos = m_hypo.GetPrevHypos();
|
||||
m_children.reserve(prevHypos.size());
|
||||
for (size_t ind = 0; ind < prevHypos.size(); ++ind) {
|
||||
@ -118,7 +118,7 @@ void ChartTrellisNode::CreateChildren(const ChartTrellisNode &rootNode,
|
||||
const ChartHypothesis &replacementHypo,
|
||||
ChartTrellisNode *&deviationPoint)
|
||||
{
|
||||
assert(m_children.empty());
|
||||
CHECK(m_children.empty());
|
||||
const NodeChildren &children = rootNode.GetChildren();
|
||||
m_children.reserve(children.size());
|
||||
for (size_t ind = 0; ind < children.size(); ++ind) {
|
||||
|
@ -42,7 +42,7 @@ ChartTrellisPath::ChartTrellisPath(const ChartTrellisDetour &detour)
|
||||
, m_scoreBreakdown(detour.GetBasePath().m_scoreBreakdown)
|
||||
, m_totalScore(0)
|
||||
{
|
||||
assert(m_deviationPoint);
|
||||
CHECK(m_deviationPoint);
|
||||
ScoreComponentCollection scoreChange;
|
||||
scoreChange = detour.GetReplacementHypo().GetScoreBreakdown();
|
||||
scoreChange.MinusEquals(detour.GetSubstitutedNode().GetHypothesis().GetScoreBreakdown());
|
||||
|
@ -247,7 +247,7 @@ ConfusionNet::CreateTranslationOptionCollection(const TranslationSystem* system)
|
||||
size_t maxNoTransOptPerCoverage = StaticData::Instance().GetMaxNoTransOptPerCoverage();
|
||||
float translationOptionThreshold = StaticData::Instance().GetTranslationOptionThreshold();
|
||||
TranslationOptionCollection *rv= new TranslationOptionCollectionConfusionNet(system, *this, maxNoTransOptPerCoverage, translationOptionThreshold);
|
||||
assert(rv);
|
||||
CHECK(rv);
|
||||
return rv;
|
||||
}
|
||||
|
||||
|
@ -39,7 +39,7 @@ public:
|
||||
}
|
||||
|
||||
const Column& GetColumn(size_t i) const {
|
||||
assert(i<data.size());
|
||||
CHECK(i<data.size());
|
||||
return data[i];
|
||||
}
|
||||
const Column& operator[](size_t i) const {
|
||||
@ -69,7 +69,7 @@ public:
|
||||
TranslationOptionCollection* CreateTranslationOptionCollection(const TranslationSystem* system) const;
|
||||
|
||||
const NonTerminalSet &GetLabelSet(size_t /*startPos*/, size_t /*endPos*/) const {
|
||||
assert(false);
|
||||
CHECK(false);
|
||||
return *(new NonTerminalSet());
|
||||
}
|
||||
|
||||
|
@ -23,7 +23,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
#ifndef moses_DecodeGraph_h
|
||||
#define moses_DecodeGraph_h
|
||||
|
||||
#include <cassert>
|
||||
#include "util/check.hh"
|
||||
#include <list>
|
||||
#include <iterator>
|
||||
#include "TypeDef.h"
|
||||
@ -78,7 +78,7 @@ public:
|
||||
}
|
||||
|
||||
size_t GetMaxChartSpan() const {
|
||||
assert(m_maxChartSpan != NOT_FOUND);
|
||||
CHECK(m_maxChartSpan != NOT_FOUND);
|
||||
return m_maxChartSpan;
|
||||
}
|
||||
|
||||
|
@ -22,7 +22,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
#ifndef moses_DecodeStep_h
|
||||
#define moses_DecodeStep_h
|
||||
|
||||
#include <cassert>
|
||||
#include "util/check.hh"
|
||||
#include "TypeDef.h"
|
||||
#include "Dictionary.h"
|
||||
|
||||
|
@ -26,7 +26,7 @@
|
||||
#include "DotChart.h"
|
||||
#include "PhraseDictionaryNodeSCFG.h"
|
||||
|
||||
#include <cassert>
|
||||
#include "util/check.hh"
|
||||
#include <vector>
|
||||
|
||||
namespace Moses
|
||||
@ -96,7 +96,7 @@ public:
|
||||
}
|
||||
|
||||
void Add(size_t pos, const DottedRuleInMemory *dottedRule) {
|
||||
assert(dottedRule);
|
||||
CHECK(dottedRule);
|
||||
m_coll[pos].push_back(dottedRule);
|
||||
if (!dottedRule->GetLastNode().IsLeaf()) {
|
||||
m_expandableDottedRuleList.push_back(dottedRule);
|
||||
|
@ -20,7 +20,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
#include <cassert>
|
||||
#include "util/check.hh"
|
||||
|
||||
#include "DotChart.h"
|
||||
|
||||
@ -108,7 +108,7 @@ class SavedNodeOnDisk
|
||||
public:
|
||||
SavedNodeOnDisk(const DottedRuleOnDisk *dottedRule)
|
||||
:m_dottedRule(dottedRule) {
|
||||
assert(m_dottedRule);
|
||||
CHECK(m_dottedRule);
|
||||
}
|
||||
|
||||
~SavedNodeOnDisk() {
|
||||
@ -164,7 +164,7 @@ public:
|
||||
}
|
||||
|
||||
void Add(size_t pos, const DottedRuleOnDisk *dottedRule) {
|
||||
assert(dottedRule);
|
||||
CHECK(dottedRule);
|
||||
|
||||
m_coll[pos]->Add(dottedRule);
|
||||
m_savedNode.push_back(new SavedNodeOnDisk(dottedRule));
|
||||
|
@ -1,6 +1,6 @@
|
||||
// $Id$
|
||||
|
||||
#include <cassert>
|
||||
#include "util/check.hh"
|
||||
#include "FFState.h"
|
||||
#include "StaticData.h"
|
||||
#include "DummyScoreProducers.h"
|
||||
|
@ -36,7 +36,7 @@ public:
|
||||
const ChartHypothesis&,
|
||||
int /* featureID */,
|
||||
ScoreComponentCollection*) const {
|
||||
assert(0); // feature function not valid in chart decoder
|
||||
CHECK(0); // feature function not valid in chart decoder
|
||||
return NULL;
|
||||
}
|
||||
};
|
||||
|
@ -37,15 +37,15 @@ namespace randlm {
|
||||
// number of bits in T
|
||||
cell_width_ = sizeof(T) << 3;
|
||||
// current implementation has following constraints
|
||||
assert(cell_width_ > 0 && cell_width_ <= 64 && cell_width_ >= width);
|
||||
CHECK(cell_width_ > 0 && cell_width_ <= 64 && cell_width_ >= width);
|
||||
// used for >> division
|
||||
log_cell_width_ = static_cast<int>(floor(log(cell_width_)/log(2) + 0.000001));
|
||||
// size of underlying data in Ts
|
||||
cells_ = ((addresses * width) + cell_width_ - 1) >> log_cell_width_;
|
||||
// instantiate underlying data
|
||||
data_ = new T[cells_];
|
||||
assert(data_ != NULL);
|
||||
assert(reset());
|
||||
CHECK(data_ != NULL);
|
||||
CHECK(reset());
|
||||
// 'first_bit' marks the first bit used by 'address' (left padded with zeros).
|
||||
first_bit_ = (width % cell_width_ == 0) ? 0 : cell_width_ - (width % cell_width_);
|
||||
// mask for full cell
|
||||
@ -54,9 +54,9 @@ namespace randlm {
|
||||
address_mask_ = full_mask_ >> first_bit_;
|
||||
}
|
||||
Filter(FileHandler* fin, bool loaddata = true) : data_(NULL) {
|
||||
assert(loadHeader(fin));
|
||||
CHECK(loadHeader(fin));
|
||||
if (loaddata)
|
||||
assert(loadData(fin));
|
||||
CHECK(loadData(fin));
|
||||
}
|
||||
virtual ~Filter() {
|
||||
delete[] data_;
|
||||
@ -72,7 +72,7 @@ namespace randlm {
|
||||
}
|
||||
// read / write functions
|
||||
inline bool read(uint64_t address, T* value) {
|
||||
assert(address <= addresses_);
|
||||
CHECK(address <= addresses_);
|
||||
// copy address to 'value'
|
||||
uint64_t data_bit = address * width_;
|
||||
uint32_t data_cell = (data_bit >> log_cell_width_); // % cells_;
|
||||
@ -94,7 +94,7 @@ namespace randlm {
|
||||
return true;
|
||||
}
|
||||
inline T read(uint64_t address) {
|
||||
assert(address <= addresses_);
|
||||
CHECK(address <= addresses_);
|
||||
// return value at address
|
||||
T value = 0;
|
||||
uint64_t data_bit = address * width_;
|
||||
@ -116,8 +116,8 @@ namespace randlm {
|
||||
return value;
|
||||
}
|
||||
inline bool write(uint64_t address, T value) {
|
||||
assert(address <= addresses_);
|
||||
assert(log2(value) <= width_);
|
||||
CHECK(address <= addresses_);
|
||||
CHECK(log2(value) <= width_);
|
||||
// write 'value' to address
|
||||
uint64_t data_bit = address * width_;
|
||||
uint32_t data_cell = (data_bit >> log_cell_width_); // % cells_;
|
||||
@ -207,50 +207,50 @@ namespace randlm {
|
||||
int getCellWidth() { return cell_width_; }
|
||||
uint32_t getCells() { return cells_; }
|
||||
virtual bool save(FileHandler* out) {
|
||||
assert(out != NULL);
|
||||
assert(out->write((char*)&cells_, sizeof(cells_)));
|
||||
assert(out->write((char*)&cell_width_, sizeof(cell_width_)));
|
||||
assert(out->write((char*)&log_cell_width_, sizeof(log_cell_width_)));
|
||||
assert(out->write((char*)&addresses_, sizeof(addresses_)));
|
||||
assert(out->write((char*)&width_, sizeof(width_)));
|
||||
assert(out->write((char*)&first_bit_, sizeof(first_bit_)));
|
||||
assert(out->write((char*)&full_mask_, sizeof(full_mask_)));
|
||||
assert(out->write((char*)&address_mask_, sizeof(address_mask_)));
|
||||
//assert(out->write((char*)data_, cells_ * sizeof(T)));
|
||||
CHECK(out != NULL);
|
||||
CHECK(out->write((char*)&cells_, sizeof(cells_)));
|
||||
CHECK(out->write((char*)&cell_width_, sizeof(cell_width_)));
|
||||
CHECK(out->write((char*)&log_cell_width_, sizeof(log_cell_width_)));
|
||||
CHECK(out->write((char*)&addresses_, sizeof(addresses_)));
|
||||
CHECK(out->write((char*)&width_, sizeof(width_)));
|
||||
CHECK(out->write((char*)&first_bit_, sizeof(first_bit_)));
|
||||
CHECK(out->write((char*)&full_mask_, sizeof(full_mask_)));
|
||||
CHECK(out->write((char*)&address_mask_, sizeof(address_mask_)));
|
||||
//CHECK(out->write((char*)data_, cells_ * sizeof(T)));
|
||||
const uint64_t jump = 524288032ul; //(uint64_t)pow(2, 29);
|
||||
if((width_ == 1) || cells_ < jump)
|
||||
assert(out->write((char*)data_, cells_ * sizeof(T)));
|
||||
CHECK(out->write((char*)data_, cells_ * sizeof(T)));
|
||||
else {
|
||||
uint64_t idx(0);
|
||||
while(idx + jump < cells_) {
|
||||
assert(out->write((char*)&data_[idx], jump * sizeof(T)));
|
||||
CHECK(out->write((char*)&data_[idx], jump * sizeof(T)));
|
||||
idx += jump;
|
||||
}
|
||||
assert(out->write((char*)&data_[idx], (cells_ - idx) * sizeof(T)));
|
||||
CHECK(out->write((char*)&data_[idx], (cells_ - idx) * sizeof(T)));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
protected:
|
||||
bool loadHeader(FileHandler* fin) {
|
||||
assert(fin != NULL);
|
||||
assert(fin->read((char*)&cells_, sizeof(cells_)));
|
||||
assert(fin->read((char*)&cell_width_, sizeof(cell_width_)));
|
||||
assert(cell_width_ == sizeof(T) << 3); // make sure correct underlying data type
|
||||
assert(fin->read((char*)&log_cell_width_, sizeof(log_cell_width_)));
|
||||
assert(fin->read((char*)&addresses_, sizeof(addresses_)));
|
||||
assert(fin->read((char*)&width_, sizeof(width_)));
|
||||
assert(fin->read((char*)&first_bit_, sizeof(first_bit_)));
|
||||
assert(fin->read((char*)&full_mask_, sizeof(full_mask_)));
|
||||
assert(fin->read((char*)&address_mask_, sizeof(address_mask_)));
|
||||
CHECK(fin != NULL);
|
||||
CHECK(fin->read((char*)&cells_, sizeof(cells_)));
|
||||
CHECK(fin->read((char*)&cell_width_, sizeof(cell_width_)));
|
||||
CHECK(cell_width_ == sizeof(T) << 3); // make sure correct underlying data type
|
||||
CHECK(fin->read((char*)&log_cell_width_, sizeof(log_cell_width_)));
|
||||
CHECK(fin->read((char*)&addresses_, sizeof(addresses_)));
|
||||
CHECK(fin->read((char*)&width_, sizeof(width_)));
|
||||
CHECK(fin->read((char*)&first_bit_, sizeof(first_bit_)));
|
||||
CHECK(fin->read((char*)&full_mask_, sizeof(full_mask_)));
|
||||
CHECK(fin->read((char*)&address_mask_, sizeof(address_mask_)));
|
||||
return true;
|
||||
}
|
||||
bool loadData(FileHandler* fin) {
|
||||
// instantiate underlying array
|
||||
data_ = new T[cells_];
|
||||
assert(data_ != NULL);
|
||||
assert(fin->read((char*)data_, cells_ * sizeof(T)));
|
||||
//assert(fin->read((char*)&data_[0], ceil(float(cells_) / 2.0) * sizeof(T)));
|
||||
//assert(fin->read((char*)&data_[cells_ / 2], (cells_ / 2) * sizeof(T)));
|
||||
CHECK(data_ != NULL);
|
||||
CHECK(fin->read((char*)data_, cells_ * sizeof(T)));
|
||||
//CHECK(fin->read((char*)&data_[0], ceil(float(cells_) / 2.0) * sizeof(T)));
|
||||
//CHECK(fin->read((char*)&data_[cells_ / 2], (cells_ / 2) * sizeof(T)));
|
||||
return true;
|
||||
}
|
||||
uint64_t cells_; // number T making up 'data_'
|
||||
@ -271,7 +271,7 @@ namespace randlm {
|
||||
BitFilter(FileHandler* fin, bool loaddata = true)
|
||||
: Filter<uint8_t>(fin, loaddata) {
|
||||
if (loaddata)
|
||||
assert(load(fin));
|
||||
CHECK(load(fin));
|
||||
}
|
||||
// TODO: overload operator[]
|
||||
virtual bool testBit(uint64_t location) {
|
||||
@ -289,7 +289,7 @@ namespace randlm {
|
||||
return true;
|
||||
}
|
||||
bool save(FileHandler* fout) {
|
||||
assert(Filter<uint8_t>::save(fout));
|
||||
CHECK(Filter<uint8_t>::save(fout));
|
||||
std::cerr << "Saved BitFilter. Rho = " << rho() << "." << std::endl;;
|
||||
return true;
|
||||
}
|
||||
@ -316,10 +316,10 @@ namespace randlm {
|
||||
class ResizedBitFilter : public BitFilter {
|
||||
public:
|
||||
ResizedBitFilter(FileHandler* fin) : BitFilter(fin) {
|
||||
assert(load(fin));
|
||||
CHECK(load(fin));
|
||||
}
|
||||
ResizedBitFilter(FileHandler* fin, uint64_t newsize) : BitFilter(newsize) {
|
||||
assert(resizeFromFile(fin, newsize));
|
||||
CHECK(resizeFromFile(fin, newsize));
|
||||
}
|
||||
bool resizeFromFile(FileHandler* oldin, uint64_t newsize);
|
||||
virtual bool testBit(uint64_t location) {
|
||||
@ -332,18 +332,18 @@ namespace randlm {
|
||||
}
|
||||
bool save(FileHandler* fout) {
|
||||
// re-hashing parameters
|
||||
assert(BitFilter::save(fout));
|
||||
CHECK(BitFilter::save(fout));
|
||||
std::cerr << "Saved ResizedBitFilter. Rho = " << rho() << "." << std::endl;
|
||||
assert(fout->write((char*)&old_addresses_, sizeof(old_addresses_)));
|
||||
assert(fout->write((char*)&a_, sizeof(a_)));
|
||||
CHECK(fout->write((char*)&old_addresses_, sizeof(old_addresses_)));
|
||||
CHECK(fout->write((char*)&a_, sizeof(a_)));
|
||||
return fout->write((char*)&b_, sizeof(b_));
|
||||
}
|
||||
protected:
|
||||
bool load(FileHandler* fin) {
|
||||
// re-hashing parameters
|
||||
std::cerr << "Loaded ResizedBitFilter. Rho = " << rho() << "." << std::endl;
|
||||
assert(fin->read((char*)&old_addresses_, sizeof(old_addresses_)));
|
||||
assert(fin->read((char*)&a_, sizeof(a_)));
|
||||
CHECK(fin->read((char*)&old_addresses_, sizeof(old_addresses_)));
|
||||
CHECK(fin->read((char*)&a_, sizeof(a_)));
|
||||
return fin->read((char*)&b_, sizeof(b_));
|
||||
}
|
||||
// member data
|
||||
@ -360,7 +360,7 @@ namespace randlm {
|
||||
CountingFilter(uint64_t addresses, int width, bool wrap_around = true) :
|
||||
Filter<T>(addresses, width), wrap_around_(wrap_around) {}
|
||||
CountingFilter(FileHandler* fin) : Filter<T>(fin, true) {
|
||||
assert(load(fin));
|
||||
CHECK(load(fin));
|
||||
}
|
||||
~CountingFilter() {}
|
||||
// increment this address by one
|
||||
@ -384,11 +384,11 @@ namespace randlm {
|
||||
return true;
|
||||
// wrapped round so check whether need to reset to max count
|
||||
if (!wrap_around_)
|
||||
assert(this->write(address, this->address_mask_));
|
||||
CHECK(this->write(address, this->address_mask_));
|
||||
return false; // false to indicate that overflowed
|
||||
}
|
||||
bool save(FileHandler* fout) {
|
||||
assert(Filter<T>::save(fout));
|
||||
CHECK(Filter<T>::save(fout));
|
||||
return fout->write((char*)&wrap_around_, sizeof(wrap_around_));
|
||||
}
|
||||
private:
|
||||
|
@ -24,7 +24,7 @@ FileHandler::FileHandler(const std::string & path, std::ios_base::openmode flags
|
||||
exit(EXIT_FAILURE);
|
||||
} else {
|
||||
bool ret = setStreamBuffer(flags & std::ios::in);
|
||||
assert(ret);
|
||||
CHECK(ret);
|
||||
}
|
||||
this->precision(32);
|
||||
}
|
||||
@ -59,11 +59,11 @@ bool FileHandler::setStreamBuffer(bool checkExists)
|
||||
{
|
||||
// redirect stdin or stdout if necesary
|
||||
if (path_ == FileHandler::kStdInDescriptor) {
|
||||
assert(flags_ & std::ios::in);
|
||||
CHECK(flags_ & std::ios::in);
|
||||
std::streambuf* sb = std::cin.rdbuf();
|
||||
buffer_ = sb;
|
||||
} else if (path_ == FileHandler::kStdOutDescriptor) {
|
||||
assert(flags_ & std::ios::out);
|
||||
CHECK(flags_ & std::ios::out);
|
||||
std::streambuf* sb = std::cout.rdbuf();
|
||||
buffer_ = sb;
|
||||
} else {
|
||||
|
@ -7,7 +7,7 @@
|
||||
#include <cstdlib>
|
||||
#include <sys/stat.h>
|
||||
#include <string>
|
||||
#include <cassert>
|
||||
#include "util/check.hh"
|
||||
#include "fdstream.h"
|
||||
#include "utils.h"
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
#ifndef INC_ALLHASHFUNCS_H
|
||||
#define INC_ALLHASHFUNCS_H
|
||||
|
||||
#include <cassert>
|
||||
#include "util/check.hh"
|
||||
#include <cmath>
|
||||
#include "types.h"
|
||||
#include "utils.h"
|
||||
@ -28,12 +28,12 @@ class HashBase {
|
||||
virtual T hash(const wordID_t* id, const int len, count_t h)=0; // vocab mapped hashing
|
||||
count_t size() { return H_;}
|
||||
virtual void save(FileHandler* fout) {
|
||||
assert(fout != 0);
|
||||
CHECK(fout != 0);
|
||||
fout->write((char*)&m_, sizeof(m_));
|
||||
fout->write((char*)&H_, sizeof(H_));
|
||||
}
|
||||
virtual void load(FileHandler* fin) {
|
||||
assert(fin != 0);
|
||||
CHECK(fin != 0);
|
||||
fin->read((char*)&m_, sizeof(m_));
|
||||
fin->read((char*)&H_, sizeof(H_));
|
||||
}
|
||||
@ -43,7 +43,7 @@ class UnivHash_linear: public HashBase<T> {
|
||||
public:
|
||||
UnivHash_linear(float m, count_t H, P pr):
|
||||
HashBase<T>(m, H), pr_(pr) {
|
||||
//assert(isPrime(pr_));
|
||||
//CHECK(isPrime(pr_));
|
||||
initSeeds();
|
||||
}
|
||||
UnivHash_linear(FileHandler* fin):
|
||||
@ -177,7 +177,7 @@ T UnivHash_tableXOR<T>::hash(const char* s, count_t h = 0) {
|
||||
unsigned char c;
|
||||
while((c = *s++) && (++pos < MAX_STR_LEN))
|
||||
value ^= table_[h][idx += c];
|
||||
assert(value < this->m_);
|
||||
CHECK(value < this->m_);
|
||||
return value;
|
||||
}
|
||||
|
||||
@ -265,7 +265,7 @@ void UnivHash_linear<T>::freeSeeds() {
|
||||
template <typename T>
|
||||
inline T UnivHash_linear<T>::hash(const wordID_t* id, const int len,
|
||||
count_t h=0) {
|
||||
assert(h < this->H_);
|
||||
CHECK(h < this->H_);
|
||||
T value = 0;
|
||||
int pos(0);
|
||||
while(pos < len) {
|
||||
@ -277,7 +277,7 @@ inline T UnivHash_linear<T>::hash(const wordID_t* id, const int len,
|
||||
template <typename T>
|
||||
inline T UnivHash_linear<T>::hash(const wordID_t id, const count_t pos,
|
||||
const T prevValue, count_t h=0) {
|
||||
assert(h < this->H_);
|
||||
CHECK(h < this->H_);
|
||||
T value = prevValue + ((a_[h][pos] * id) + b_[h][pos]); // % pr_;
|
||||
return value % this->m_;
|
||||
}
|
||||
@ -315,7 +315,7 @@ void UnivHash_linear<T>::load(FileHandler* fin) {
|
||||
/*
|
||||
template <typename T>
|
||||
T UnivHash_linear<T>::hash(const char* s, count_t h=0) {
|
||||
assert(h < this->H_);
|
||||
CHECK(h < this->H_);
|
||||
T value = 0;
|
||||
int pos(0);
|
||||
unsigned char c;
|
||||
|
@ -21,7 +21,7 @@ public:
|
||||
OnlineRLM(uint16_t MBs, int width, int bucketRange, count_t order,
|
||||
Vocab* v, float qBase = 8): PerfectHash<T>(MBs, width, bucketRange, qBase),
|
||||
vocab_(v), bAdapting_(false), order_(order), corpusSize_(0), alpha_(0) {
|
||||
assert(vocab_ != 0);
|
||||
CHECK(vocab_ != 0);
|
||||
//instantiate quantizer class here
|
||||
cache_ = new Cache<float>(8888.8888, 9999.9999); // unknown_value, null_value
|
||||
alpha_ = new float[order_ + 1];
|
||||
@ -137,7 +137,7 @@ int OnlineRLM<T>::query(const wordID_t* IDs, int len) {
|
||||
value -= ((value & this->hitMask_) != 0) ? this->hitMask_ : 0; // check for previous hit marks
|
||||
}
|
||||
else {
|
||||
assert(filterIdx < this->cells_);
|
||||
CHECK(filterIdx < this->cells_);
|
||||
//markQueried(filterIdx);
|
||||
}
|
||||
}
|
||||
@ -158,12 +158,12 @@ bool OnlineRLM<T>::markPrefix(const wordID_t* IDs, const int len, bool bSet) {
|
||||
return false;
|
||||
}
|
||||
if(filterIndex != this->cells_ + 1) {
|
||||
assert(hpdItr == this->dict_.end());
|
||||
CHECK(hpdItr == this->dict_.end());
|
||||
if(bSet) bPrefix_->setBit(filterIndex); // mark index
|
||||
else bPrefix_->clearBit(filterIndex); // unset index
|
||||
}
|
||||
else {
|
||||
assert(filterIndex == this->cells_ + 1);
|
||||
CHECK(filterIndex == this->cells_ + 1);
|
||||
//how to handle hpd prefixes?
|
||||
}
|
||||
if(pfCache.nodes() > 10000) pfCache.clear();
|
||||
@ -289,14 +289,14 @@ float OnlineRLM<T>::getProb(const wordID_t* ngram, int len,
|
||||
logprob = alpha_[len] + oovprob;
|
||||
break;
|
||||
case 1: // unigram found only
|
||||
assert(in[len - 1] > 0);
|
||||
CHECK(in[len - 1] > 0);
|
||||
logprob = alpha_[len - 1] + (corpusSize_ > 0 ?
|
||||
log10(static_cast<float>(in[len - 1]) / static_cast<float>(corpusSize_)) : 0);
|
||||
//logprob = alpha_[len - 1] +
|
||||
//log10(static_cast<float>(in[len - 1]) / static_cast<float>(corpusSize_));
|
||||
break;
|
||||
default:
|
||||
assert(den_val > 0);
|
||||
CHECK(den_val > 0);
|
||||
//if(subgram == in[len - found]) ++subgram; // avoid returning zero probs????
|
||||
logprob = alpha_[len - num_fnd] +
|
||||
log10(static_cast<float>(in[len - num_fnd]) / static_cast<float>(den_val));
|
||||
@ -313,7 +313,7 @@ template<typename T>
|
||||
const void* OnlineRLM<T>::getContext(const wordID_t* ngram, int len) {
|
||||
int dummy(0);
|
||||
float* addresses[len]; // only interested in addresses of cache
|
||||
assert(cache_->getCache2(ngram, len, &addresses[0], &dummy) == len);
|
||||
CHECK(cache_->getCache2(ngram, len, &addresses[0], &dummy) == len);
|
||||
// return address of cache node
|
||||
return (const void*)addresses[0];
|
||||
}
|
||||
@ -391,7 +391,7 @@ void OnlineRLM<T>::load(FileHandler* fin) {
|
||||
cerr << "Loading ORLM...\n";
|
||||
// load vocab first
|
||||
vocab_ = new Vocab(fin);
|
||||
assert(vocab_ != 0);
|
||||
CHECK(vocab_ != 0);
|
||||
fin->read((char*)&corpusSize_, sizeof(corpusSize_));
|
||||
cerr << "\tCorpus size = " << corpusSize_ << endl;
|
||||
fin->read((char*)&order_, sizeof(order_));
|
||||
|
@ -59,9 +59,9 @@ bool Parameters::loadParams(int argc, char ** argv) {
|
||||
//if the parameter is of type booL no corresponding value
|
||||
if( getValueType(param) == kBoolValue ) {
|
||||
jumpBy = 1;
|
||||
assert(setParamValue(param, kTrueValue));
|
||||
CHECK(setParamValue(param, kTrueValue));
|
||||
} else { //not of type bool so must have corresponding value
|
||||
assert(i+1 < argc);
|
||||
CHECK(i+1 < argc);
|
||||
jumpBy = 2;
|
||||
std::string val = argv[i+1];
|
||||
Utils::trim(val);
|
||||
|
@ -5,7 +5,7 @@
|
||||
#include <map>
|
||||
#include <set>
|
||||
#include <vector>
|
||||
#include <cassert>
|
||||
#include "util/check.hh"
|
||||
#include "file.h"
|
||||
#include "utils.h"
|
||||
#include "types.h"
|
||||
|
@ -22,7 +22,7 @@ class PerfectHash {
|
||||
public:
|
||||
PerfectHash(uint16_t MBs, int width, int bucketRange, float qBase);
|
||||
PerfectHash(FileHandler* fin) {
|
||||
assert(fin != 0);
|
||||
CHECK(fin != 0);
|
||||
}
|
||||
virtual ~PerfectHash();
|
||||
void analyze();
|
||||
@ -116,7 +116,7 @@ uint64_t PerfectHash<T>::insert(const wordID_t* IDs, const int len,
|
||||
}
|
||||
++index;
|
||||
}
|
||||
assert((emptyidx < index) && (filter_->read(emptyidx) == 0)); // should have found empty index if it gets here
|
||||
CHECK((emptyidx < index) && (filter_->read(emptyidx) == 0)); // should have found empty index if it gets here
|
||||
T code = (T)qtizer_->code(value);
|
||||
filter_->write(emptyidx, fp); // insert the fprint
|
||||
values_->write(emptyidx, code);
|
||||
@ -214,8 +214,8 @@ void PerfectHash<T>::remove(const wordID_t* IDs, const int len) {
|
||||
}
|
||||
template<typename T> // clear filter index
|
||||
void PerfectHash<T>::remove(uint64_t index) {
|
||||
assert(index < cells_);
|
||||
assert(filter_->read(index) != 0); // slow
|
||||
CHECK(index < cells_);
|
||||
CHECK(filter_->read(index) != 0); // slow
|
||||
filter_->write(index, 0);
|
||||
values_->write(index, 0);
|
||||
//reduce bucket size
|
||||
@ -255,7 +255,7 @@ count_t PerfectHash<T>::bucketsMemUse() {
|
||||
}
|
||||
template<typename T>
|
||||
void PerfectHash<T>::save(FileHandler* fout) {
|
||||
assert(fout != 0);
|
||||
CHECK(fout != 0);
|
||||
cerr << "\tSaving perfect hash parameters...\n";
|
||||
fout->write((char*)&hitMask_, sizeof(hitMask_));
|
||||
fout->write((char*)&memBound_, sizeof(memBound_));
|
||||
@ -280,7 +280,7 @@ void PerfectHash<T>::save(FileHandler* fout) {
|
||||
}
|
||||
template<typename T>
|
||||
void PerfectHash<T>::load(FileHandler* fin) {
|
||||
assert(fin != 0);
|
||||
CHECK(fin != 0);
|
||||
cerr << "\tLoading perfect hash parameters...\n";
|
||||
fin->read((char*)&hitMask_, sizeof(hitMask_));
|
||||
fin->read((char*)&memBound_, sizeof(memBound_));
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
#include <vector>
|
||||
#include <cmath>
|
||||
#include <cassert>
|
||||
#include "util/check.hh"
|
||||
#include <algorithm>
|
||||
#include "types.h"
|
||||
|
||||
@ -11,7 +11,7 @@ static const float kFloatErr = 0.00001f;
|
||||
class LogQtizer {
|
||||
public:
|
||||
LogQtizer(float i): base_(pow(2, 1 / i)) {
|
||||
assert(base_ > 1);
|
||||
CHECK(base_ > 1);
|
||||
max_code_ = 0;
|
||||
float value = 1; // code = 1 -> value = 1 for any base
|
||||
std::vector<float> code_to_value_vec;
|
||||
@ -34,12 +34,12 @@ public:
|
||||
std::cerr << "Initialized quantization (size = " << max_code_ + 1 << ")" << std::endl;
|
||||
}
|
||||
LogQtizer(FileHandler* fin) {
|
||||
assert(fin != NULL);
|
||||
CHECK(fin != NULL);
|
||||
load(fin);
|
||||
}
|
||||
int code(float value) {
|
||||
// should just be: return log_b(value)
|
||||
assert(!(value < min_value_ || value > max_value_));
|
||||
CHECK(!(value < min_value_ || value > max_value_));
|
||||
// but binary search removes errors due to floor operator above
|
||||
int code = static_cast<int>(std::lower_bound(code_to_value_, code_to_value_+ max_code_,
|
||||
value) - code_to_value_);
|
||||
|
@ -118,7 +118,7 @@ bool Vocab::Load(FileHandler* vcbin, const FactorDirection& direction,
|
||||
wordID_t id;
|
||||
|
||||
void *ret = getline(*vcbin, line);
|
||||
assert(ret);
|
||||
CHECK(ret);
|
||||
std::istringstream first(line.c_str());
|
||||
uint32_t vcbsize(0);
|
||||
first >> vcbsize;
|
||||
@ -132,7 +132,7 @@ bool Vocab::Load(FileHandler* vcbin, const FactorDirection& direction,
|
||||
// may be no id (i.e. file may just be a word list)
|
||||
if (id == 0 && word != GetkOOVWord())
|
||||
id = m_ids2words.size() + 1; // assign ids sequentially starting from 1
|
||||
assert(m_ids2words.count(id) == 0 && m_words2ids.count(word) == 0);
|
||||
CHECK(m_ids2words.count(id) == 0 && m_words2ids.count(word) == 0);
|
||||
m_ids2words[id] = word;
|
||||
m_words2ids[word] = id;
|
||||
}
|
||||
|
@ -39,7 +39,7 @@ public:
|
||||
m_kBOSWordID(1) {
|
||||
InitSpecialWords();
|
||||
bool ret = Load(vocab_path, direction, factors, closed);
|
||||
assert(ret);
|
||||
CHECK(ret);
|
||||
}
|
||||
Vocab(FileHandler * fin, const FactorDirection& direction,
|
||||
const FactorList& factors, bool closed = true):
|
||||
@ -47,7 +47,7 @@ public:
|
||||
m_kBOSWordID(1) {
|
||||
InitSpecialWords();
|
||||
bool ret = Load(fin, direction, factors, closed);
|
||||
assert(ret);
|
||||
CHECK(ret);
|
||||
}
|
||||
Vocab(FileHandler *fin):
|
||||
m_kOOVWordID(0),
|
||||
|
@ -109,7 +109,7 @@ void DynSuffixArray::Insert(vuint_t* newSent, unsigned newIndex)
|
||||
//stage 3...all words of new sentence are inserted backwards
|
||||
// stage 2: k=ISA[newIndex], tmp= L[k], L[k] = newChar
|
||||
//PrintAuxArrays();
|
||||
assert(newIndex <= m_SA->size());
|
||||
CHECK(newIndex <= m_SA->size());
|
||||
int k(-1), kprime(-1);
|
||||
k = (newIndex < m_SA->size() ? m_ISA->at(newIndex) : m_ISA->at(0)); // k is now index of the cycle that starts at newindex
|
||||
int true_pos = LastFirstFunc(k); // track cycle shift (newIndex - 1)
|
||||
@ -161,7 +161,7 @@ void DynSuffixArray::Reorder(unsigned j, unsigned jprime)
|
||||
//cerr << "j=" << j << "\tj'=" << jprime << endl;
|
||||
int isaIdx(-1);
|
||||
int new_j = LastFirstFunc(j);
|
||||
assert(j <= jprime);
|
||||
CHECK(j <= jprime);
|
||||
// for SA and L, the element at pos j is moved to pos j'
|
||||
m_L->insert(m_L->begin() + jprime + 1, m_L->at(j));
|
||||
m_L->erase(m_L->begin() + j);
|
||||
|
@ -1,7 +1,7 @@
|
||||
#ifndef moses_FFState_h
|
||||
#define moses_FFState_h
|
||||
|
||||
#include <cassert>
|
||||
#include "util/check.hh"
|
||||
#include <vector>
|
||||
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
#include "FeatureFunction.h"
|
||||
|
||||
#include <cassert>
|
||||
#include "util/check.hh"
|
||||
|
||||
namespace Moses
|
||||
{
|
||||
@ -19,7 +19,7 @@ void StatelessFeatureFunction::Evaluate(
|
||||
const TargetPhrase& /* cur_hypo */,
|
||||
ScoreComponentCollection* /* accumulator */) const
|
||||
{
|
||||
assert(!"Please implement Evaluate or set ComputeValueInTranslationOption to true");
|
||||
CHECK(!"Please implement Evaluate or set ComputeValueInTranslationOption to true");
|
||||
}
|
||||
|
||||
bool StatefulFeatureFunction::IsStateless() const
|
||||
|
@ -10,7 +10,7 @@
|
||||
#include <cstdio>
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
#include <cassert>
|
||||
#include "util/check.hh"
|
||||
#include "UserMessage.h"
|
||||
#include "TypeDef.h"
|
||||
#include "Util.h"
|
||||
@ -161,7 +161,7 @@ inline FILE* fOpen(const char* fn,const char* m)
|
||||
return f;
|
||||
else {
|
||||
UserMessage::Add(std::string("ERROR: could not open file ") + fn + " with mode " + m + "\n");
|
||||
assert(false);
|
||||
CHECK(false);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
#include <cassert>
|
||||
#include "util/check.hh"
|
||||
#include <climits>
|
||||
#include <vector>
|
||||
|
||||
@ -11,7 +11,7 @@ using namespace std;
|
||||
// All-pairs shortest path algorithm
|
||||
void floyd_warshall(const std::vector<std::vector<bool> >& edges, std::vector<std::vector<int> >& dist)
|
||||
{
|
||||
assert(edges.size() == edges.front().size());
|
||||
CHECK(edges.size() == edges.front().size());
|
||||
dist.clear();
|
||||
dist.resize(edges.size(), std::vector<int>(edges.size(), 0));
|
||||
|
||||
|
@ -19,7 +19,7 @@ License along with this library; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
***********************************************************************/
|
||||
|
||||
#include <cassert>
|
||||
#include "util/check.hh"
|
||||
#include <iostream>
|
||||
#include <limits>
|
||||
#include <vector>
|
||||
@ -101,7 +101,7 @@ Hypothesis::Hypothesis(const Hypothesis &prevHypo, const TranslationOption &tran
|
||||
{
|
||||
// assert that we are not extending our hypothesis by retranslating something
|
||||
// that this hypothesis has already translated!
|
||||
assert(!m_sourceCompleted.Overlap(m_currSourceWordsRange));
|
||||
CHECK(!m_sourceCompleted.Overlap(m_currSourceWordsRange));
|
||||
|
||||
//_hash_computed = false;
|
||||
m_sourceCompleted.SetValue(m_currSourceWordsRange.GetStartPos(), m_currSourceWordsRange.GetEndPos(), true);
|
||||
@ -318,7 +318,7 @@ float Hypothesis::CalcExpectedScore( const SquareMatrix &futureScore )
|
||||
t = clock(); // track time excluding LM
|
||||
}
|
||||
|
||||
assert(!"Need to add code to get the distortion scores");
|
||||
CHECK(!"Need to add code to get the distortion scores");
|
||||
//CalcDistortionScore();
|
||||
|
||||
// LANGUAGE MODEL ESTIMATE (includes word penalty cost)
|
||||
@ -342,7 +342,7 @@ void Hypothesis::CalcRemainingScore()
|
||||
clock_t t=0; // used to track time
|
||||
|
||||
// LANGUAGE MODEL COST
|
||||
assert(!"Need to add code to get the LM score(s)");
|
||||
CHECK(!"Need to add code to get the LM score(s)");
|
||||
//CalcLMScore(staticData.GetAllLM());
|
||||
|
||||
IFVERBOSE(2) {
|
||||
|
@ -186,7 +186,7 @@ public:
|
||||
const Hypothesis *hypo = this;
|
||||
while (pos < hypo->GetCurrTargetWordsRange().GetStartPos()) {
|
||||
hypo = hypo->GetPrevHypo();
|
||||
assert(hypo != NULL);
|
||||
CHECK(hypo != NULL);
|
||||
}
|
||||
return hypo->GetCurrWord(pos - hypo->GetCurrTargetWordsRange().GetStartPos());
|
||||
}
|
||||
|
@ -99,7 +99,7 @@ bool HypothesisStackCubePruning::AddPrune(Hypothesis *hypo)
|
||||
// equiv hypo exists, recombine with other hypo
|
||||
iterator &iterExisting = addRet.first;
|
||||
Hypothesis *hypoExisting = *iterExisting;
|
||||
assert(iterExisting != m_hypos.end());
|
||||
CHECK(iterExisting != m_hypos.end());
|
||||
|
||||
m_manager.GetSentenceStats().AddRecombination(*hypo, **iterExisting);
|
||||
|
||||
@ -119,7 +119,7 @@ bool HypothesisStackCubePruning::AddPrune(Hypothesis *hypo)
|
||||
if (!added) {
|
||||
iterExisting = m_hypos.find(hypo);
|
||||
TRACE_ERR("Offending hypo = " << **iterExisting << endl);
|
||||
assert(false);
|
||||
CHECK(false);
|
||||
}
|
||||
return false;
|
||||
} else {
|
||||
@ -137,7 +137,7 @@ bool HypothesisStackCubePruning::AddPrune(Hypothesis *hypo)
|
||||
void HypothesisStackCubePruning::AddInitial(Hypothesis *hypo)
|
||||
{
|
||||
std::pair<iterator, bool> addRet = Add(hypo);
|
||||
assert (addRet.second);
|
||||
CHECK(addRet.second);
|
||||
|
||||
const WordsBitmap &bitmap = hypo->GetWordsBitmap();
|
||||
m_bitmapAccessor[bitmap] = new BitmapContainer(bitmap, *this);
|
||||
|
@ -109,7 +109,7 @@ bool HypothesisStackNormal::AddPrune(Hypothesis *hypo)
|
||||
// equiv hypo exists, recombine with other hypo
|
||||
iterator &iterExisting = addRet.first;
|
||||
Hypothesis *hypoExisting = *iterExisting;
|
||||
assert(iterExisting != m_hypos.end());
|
||||
CHECK(iterExisting != m_hypos.end());
|
||||
|
||||
m_manager.GetSentenceStats().AddRecombination(*hypo, **iterExisting);
|
||||
|
||||
|
@ -19,7 +19,6 @@ License along with this library; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
***********************************************************************/
|
||||
|
||||
#include <cassert>
|
||||
#include <limits>
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
|
@ -19,7 +19,7 @@ License along with this library; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
***********************************************************************/
|
||||
|
||||
#include <cassert>
|
||||
#include "util/check.hh"
|
||||
#include <limits>
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
@ -95,7 +95,7 @@ void LanguageModelImplementation::CalcScore(const Phrase &phrase, float &fullSco
|
||||
}
|
||||
} else {
|
||||
ShiftOrPush(contextFactor, word);
|
||||
assert(contextFactor.size() <= GetNGramOrder());
|
||||
CHECK(contextFactor.size() <= GetNGramOrder());
|
||||
|
||||
if (word == GetSentenceStartArray()) {
|
||||
// do nothing, don't include prob for <s> unigram
|
||||
@ -264,7 +264,7 @@ private:
|
||||
*/
|
||||
size_t CalcSuffix(const ChartHypothesis &hypo, int featureID, Phrase &ret, size_t size) const
|
||||
{
|
||||
assert(m_contextPrefix.GetSize() <= m_numTargetTerminals);
|
||||
CHECK(m_contextPrefix.GetSize() <= m_numTargetTerminals);
|
||||
|
||||
// special handling for small hypotheses
|
||||
// does the prefix match the entire hypothesis string? -> just copy prefix
|
||||
@ -407,7 +407,7 @@ FFState* LanguageModelImplementation::EvaluateChart(const ChartHypothesis& hypo,
|
||||
// beginning of sentence symbol <s>? -> just update state
|
||||
if (word == GetSentenceStartArray())
|
||||
{
|
||||
assert(phrasePos == 0);
|
||||
CHECK(phrasePos == 0);
|
||||
delete lmState;
|
||||
lmState = NewState( GetBeginSentenceState() );
|
||||
}
|
||||
|
@ -19,7 +19,6 @@ License along with this library; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
***********************************************************************/
|
||||
|
||||
#include <cassert>
|
||||
#include <cstring>
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
|
@ -1,4 +1,3 @@
|
||||
#include <cassert>
|
||||
#include <limits>
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
@ -22,7 +21,6 @@ bool LanguageModelORLM::Load(const std::string &filePath, FactorType factorType,
|
||||
m_lm = new OnlineRLM<T>(&fLmIn, m_nGramOrder);
|
||||
fLmIn.close();
|
||||
//m_lm = new MultiOnlineRLM<T>(m_filePath, m_nGramOrder);
|
||||
assert(m_lm != NULL);
|
||||
// get special word ids
|
||||
m_oov_id = m_lm->vocab_->GetWordID("<unk>");
|
||||
CreateFactors();
|
||||
|
@ -67,7 +67,6 @@ bool LanguageModelParallelBackoff::Load(const std::string &filePath, const std::
|
||||
cerr << "Factored stats\n";
|
||||
|
||||
FNgram* fngramLM = new FNgram(*m_srilmVocab,*fnSpecs);
|
||||
assert(fngramLM != 0);
|
||||
|
||||
cerr << "FNgram object created\n";
|
||||
|
||||
|
@ -17,7 +17,7 @@ License along with this library; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
***********************************************************************/
|
||||
|
||||
#include <cassert>
|
||||
#include "util/check.hh"
|
||||
#include <limits>
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
@ -28,6 +28,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
#include "InputFileStream.h"
|
||||
#include "StaticData.h"
|
||||
|
||||
|
||||
namespace Moses
|
||||
{
|
||||
using namespace std;
|
||||
@ -42,7 +43,7 @@ bool LanguageModelRandLM::Load(const std::string &filePath, FactorType factorTyp
|
||||
m_nGramOrder = nGramOrder;
|
||||
int cache_MB = 50; // increase cache size
|
||||
m_lm = randlm::RandLM::initRandLM(filePath, nGramOrder, cache_MB);
|
||||
assert(m_lm != NULL);
|
||||
CHECK(m_lm != NULL);
|
||||
// get special word ids
|
||||
m_oov_id = m_lm->getWordID(m_lm->getOOV());
|
||||
CreateFactors(factorCollection);
|
||||
|
@ -19,7 +19,7 @@ License along with this library; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
***********************************************************************/
|
||||
|
||||
#include <cassert>
|
||||
#include "util/check.hh"
|
||||
#include <limits>
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
@ -149,7 +149,7 @@ LMResult LanguageModelSRI::GetValue(const vector<const Word*> &contextFactor, St
|
||||
}
|
||||
ngram[count] = Vocab_None;
|
||||
|
||||
assert((*contextFactor[count-1])[factorType] != NULL);
|
||||
CHECK((*contextFactor[count-1])[factorType] != NULL);
|
||||
// call sri lm fn
|
||||
VocabIndex lmId = GetLmID((*contextFactor[count-1])[factorType]);
|
||||
ret = GetValue(lmId, ngram+1);
|
||||
|
@ -19,7 +19,6 @@ License along with this library; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
***********************************************************************/
|
||||
|
||||
#include <cassert>
|
||||
#include <limits>
|
||||
#include <iostream>
|
||||
#include <sstream>
|
||||
|
@ -37,11 +37,11 @@ public:
|
||||
std::pair<typename M::iterator,bool> p
|
||||
=m.insert(std::make_pair(k,data.size()));
|
||||
if(p.second) data.push_back(k);
|
||||
assert(static_cast<size_t>(p.first->second)<data.size());
|
||||
CHECK(static_cast<size_t>(p.first->second)<data.size());
|
||||
return p.first->second;
|
||||
}
|
||||
Key const& symbol(LabelId i) const {
|
||||
assert(static_cast<size_t>(i)<data.size());
|
||||
CHECK(static_cast<size_t>(i)<data.size());
|
||||
return data[i];
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,7 @@ public:
|
||||
virtual FFState* EvaluateChart(const ChartHypothesis&,
|
||||
int /* featureID */,
|
||||
ScoreComponentCollection*) const {
|
||||
assert(0); // not valid for chart decoder
|
||||
CHECK(0); // not valid for chart decoder
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <cassert>
|
||||
#include "util/check.hh"
|
||||
|
||||
#include "FFState.h"
|
||||
#include "Hypothesis.h"
|
||||
@ -121,7 +121,7 @@ LexicalReorderingState *LexicalReorderingConfiguration::CreateLexicalReorderingS
|
||||
void LexicalReorderingState::CopyScores(Scores& scores, const TranslationOption &topt, ReorderingType reoType) const
|
||||
{
|
||||
// don't call this on a bidirectional object
|
||||
assert(m_direction == LexicalReorderingConfiguration::Backward || m_direction == LexicalReorderingConfiguration::Forward);
|
||||
CHECK(m_direction == LexicalReorderingConfiguration::Backward || m_direction == LexicalReorderingConfiguration::Forward);
|
||||
const Scores *cachedScores = (m_direction == LexicalReorderingConfiguration::Backward) ?
|
||||
topt.GetCachedScores(m_configuration.GetScoreProducer()) : m_prevScore;
|
||||
|
||||
@ -183,7 +183,7 @@ int PhraseBasedReorderingState::Compare(const FFState& o) const
|
||||
return 0;
|
||||
|
||||
const PhraseBasedReorderingState* other = dynamic_cast<const PhraseBasedReorderingState*>(&o);
|
||||
assert(other != NULL);
|
||||
CHECK(other != NULL);
|
||||
if (m_prevRange == other->m_prevRange) {
|
||||
if (m_direction == LexicalReorderingConfiguration::Forward) {
|
||||
return ComparePrevScores(other->m_prevScore);
|
||||
@ -400,7 +400,7 @@ int HierarchicalReorderingForwardState::Compare(const FFState& o) const
|
||||
return 0;
|
||||
|
||||
const HierarchicalReorderingForwardState* other = dynamic_cast<const HierarchicalReorderingForwardState*>(&o);
|
||||
assert(other != NULL);
|
||||
CHECK(other != NULL);
|
||||
if (m_prevRange == other->m_prevRange) {
|
||||
return ComparePrevScores(other->m_prevScore);
|
||||
} else if (m_prevRange < other->m_prevRange) {
|
||||
|
@ -258,7 +258,7 @@ Scores LexicalReorderingTableTree::GetScore(const Phrase& f, const Phrase& e, co
|
||||
}
|
||||
|
||||
if(m_FactorsC.empty()) {
|
||||
assert(1 == cands.size());
|
||||
CHECK(1 == cands.size());
|
||||
return cands[0].GetScore(0);
|
||||
} else {
|
||||
score = auxFindScoreForContext(cands, c);
|
||||
@ -273,7 +273,7 @@ Scores LexicalReorderingTableTree::GetScore(const Phrase& f, const Phrase& e, co
|
||||
Scores LexicalReorderingTableTree::auxFindScoreForContext(const Candidates& cands, const Phrase& context)
|
||||
{
|
||||
if(m_FactorsC.empty()) {
|
||||
assert(cands.size() <= 1);
|
||||
CHECK(cands.size() <= 1);
|
||||
return (1 == cands.size())?(cands[0].GetScore(0)):(Scores());
|
||||
} else {
|
||||
std::vector<std::string> cvec;
|
||||
@ -380,7 +380,7 @@ bool LexicalReorderingTableTree::Create(std::istream& inFile,
|
||||
}
|
||||
} else {
|
||||
//sanity check ALL lines must have same number of tokens
|
||||
assert(numTokens == tokens.size());
|
||||
CHECK(numTokens == tokens.size());
|
||||
}
|
||||
int phrase = 0;
|
||||
for(; phrase < numKeyTokens; ++phrase) {
|
||||
@ -424,7 +424,7 @@ bool LexicalReorderingTableTree::Create(std::istream& inFile,
|
||||
if(currKey.empty()) {
|
||||
currKey = key;
|
||||
//insert key into tree
|
||||
assert(psa);
|
||||
CHECK(psa);
|
||||
PSA::Data& d = psa->insert(key);
|
||||
if(d == InvalidOffT) {
|
||||
d = fTell(ot);
|
||||
@ -454,7 +454,7 @@ bool LexicalReorderingTableTree::Create(std::istream& inFile,
|
||||
currFirstWord = key[0];
|
||||
}
|
||||
//c) insert key into tree
|
||||
assert(psa);
|
||||
CHECK(psa);
|
||||
PSA::Data& d = psa->insert(key);
|
||||
if(d == InvalidOffT) {
|
||||
d = fTell(ot);
|
||||
|
@ -232,7 +232,7 @@ void Manager::CalcNBest(size_t count, TrellisPathList &ret,bool onlyDistinct) co
|
||||
for (size_t iteration = 0 ; (onlyDistinct ? distinctHyps.size() : ret.GetSize()) < count && contenders.GetSize() > 0 && (iteration < count * nBestFactor) ; iteration++) {
|
||||
// get next best from list of contenders
|
||||
TrellisPath *path = contenders.pop();
|
||||
assert(path);
|
||||
CHECK(path);
|
||||
// create deviations from current best
|
||||
path->CreateDeviantPaths(contenders);
|
||||
if(onlyDistinct) {
|
||||
@ -304,11 +304,11 @@ void Manager::CalcLatticeSamples(size_t count, TrellisPathList &ret) const {
|
||||
//forward from current
|
||||
if (i->forward >= 0) {
|
||||
map<int,const Hypothesis*>::const_iterator idToHypIter = idToHyp.find(i->forward);
|
||||
assert(idToHypIter != idToHyp.end());
|
||||
CHECK(idToHypIter != idToHyp.end());
|
||||
const Hypothesis* nextHypo = idToHypIter->second;
|
||||
outgoingHyps[hypo].insert(nextHypo);
|
||||
map<int,float>::const_iterator fscoreIter = fscores.find(nextHypo->GetId());
|
||||
assert(fscoreIter != fscores.end());
|
||||
CHECK(fscoreIter != fscores.end());
|
||||
edgeScores[Edge(hypo->GetId(),nextHypo->GetId())] =
|
||||
i->fscore - fscoreIter->second;
|
||||
}
|
||||
@ -325,15 +325,15 @@ void Manager::CalcLatticeSamples(size_t count, TrellisPathList &ret) const {
|
||||
map<const Hypothesis*, set<const Hypothesis*> >::const_iterator outIter =
|
||||
outgoingHyps.find(i->hypo);
|
||||
|
||||
assert(outIter != outgoingHyps.end());
|
||||
CHECK(outIter != outgoingHyps.end());
|
||||
float sigma = 0;
|
||||
for (set<const Hypothesis*>::const_iterator j = outIter->second.begin();
|
||||
j != outIter->second.end(); ++j) {
|
||||
map<const Hypothesis*, float>::const_iterator succIter = sigmas.find(*j);
|
||||
assert(succIter != sigmas.end());
|
||||
CHECK(succIter != sigmas.end());
|
||||
map<Edge,float>::const_iterator edgeScoreIter =
|
||||
edgeScores.find(Edge(i->hypo->GetId(),(*j)->GetId()));
|
||||
assert(edgeScoreIter != edgeScores.end());
|
||||
CHECK(edgeScoreIter != edgeScores.end());
|
||||
float term = edgeScoreIter->second + succIter->second; // Add sigma(*j)
|
||||
if (sigma == 0) {
|
||||
sigma = term;
|
||||
@ -347,7 +347,7 @@ void Manager::CalcLatticeSamples(size_t count, TrellisPathList &ret) const {
|
||||
|
||||
//The actual sampling!
|
||||
const Hypothesis* startHypo = searchGraph.back().hypo;
|
||||
assert(startHypo->GetId() == 0);
|
||||
CHECK(startHypo->GetId() == 0);
|
||||
for (size_t i = 0; i < count; ++i) {
|
||||
vector<const Hypothesis*> path;
|
||||
path.push_back(startHypo);
|
||||
@ -365,9 +365,9 @@ void Manager::CalcLatticeSamples(size_t count, TrellisPathList &ret) const {
|
||||
for (set<const Hypothesis*>::const_iterator j = outIter->second.begin();
|
||||
j != outIter->second.end(); ++j) {
|
||||
candidates.push_back(*j);
|
||||
assert(sigmas.find(*j) != sigmas.end());
|
||||
CHECK(sigmas.find(*j) != sigmas.end());
|
||||
Edge edge(path.back()->GetId(),(*j)->GetId());
|
||||
assert(edgeScores.find(edge) != edgeScores.end());
|
||||
CHECK(edgeScores.find(edge) != edgeScores.end());
|
||||
candidateScores.push_back(sigmas[*j] + edgeScores[edge]);
|
||||
if (scoreTotal == 0) {
|
||||
scoreTotal = candidateScores.back();
|
||||
@ -875,7 +875,7 @@ void Manager::SerializeSearchGraphPB(
|
||||
ArcList::const_iterator iterArcList;
|
||||
for (iterArcList = arcList->begin() ; iterArcList != arcList->end() ; ++iterArcList) {
|
||||
const Hypothesis *loserHypo = *iterArcList;
|
||||
assert(connected[loserHypo->GetId()]);
|
||||
CHECK(connected[loserHypo->GetId()]);
|
||||
Hypergraph_Edge* edge = hg.add_edges();
|
||||
SerializeEdgeInfo(loserHypo, edge);
|
||||
edge->set_head_node(headNodeIdx);
|
||||
|
@ -100,7 +100,7 @@ public:
|
||||
}
|
||||
|
||||
void CleanUp() {
|
||||
assert(m_dict);
|
||||
CHECK(m_dict);
|
||||
m_dict->FreeMemory();
|
||||
for(size_t i=0; i<m_tgtColls.size(); ++i) delete m_tgtColls[i];
|
||||
m_tgtColls.clear();
|
||||
@ -111,7 +111,7 @@ public:
|
||||
|
||||
void AddEquivPhrase(const Phrase &source, const TargetPhrase &targetPhrase) {
|
||||
std::cerr << "AddEquivPhrase(const Phrase &source, const TargetPhrase &targetPhrase)" << std::endl;
|
||||
assert(GetTargetPhraseCollection(source)==0);
|
||||
CHECK(GetTargetPhraseCollection(source)==0);
|
||||
|
||||
VERBOSE(2, "adding unk source phrase "<<source<<"\n");
|
||||
std::pair<MapSrc2Tgt::iterator,bool> p
|
||||
@ -127,7 +127,7 @@ public:
|
||||
TargetPhraseCollection const*
|
||||
GetTargetPhraseCollection(Phrase const &src) const {
|
||||
|
||||
assert(m_dict);
|
||||
CHECK(m_dict);
|
||||
if(src.GetSize()==0) return 0;
|
||||
|
||||
std::pair<MapSrc2Tgt::iterator,bool> piter;
|
||||
@ -281,7 +281,7 @@ public:
|
||||
|
||||
for(size_t k=0; k<factorStrings.size(); ++k) {
|
||||
std::vector<std::string> factors=TokenizeMultiCharSeparator(*factorStrings[k],StaticData::Instance().GetFactorDelimiter());
|
||||
assert(factors.size()==m_output.size());
|
||||
CHECK(factors.size()==m_output.size());
|
||||
Word& w=targetPhrase.AddWord();
|
||||
for(size_t l=0; l<m_output.size(); ++l) {
|
||||
w[m_output[l]]= factorCollection.AddFactor(Output, m_output[l], factors[l]);
|
||||
@ -326,7 +326,7 @@ public:
|
||||
};
|
||||
|
||||
void CacheSource(ConfusionNet const& src) {
|
||||
assert(m_dict);
|
||||
CHECK(m_dict);
|
||||
const size_t srcSize=src.GetSize();
|
||||
|
||||
std::vector<size_t> exploredPaths(srcSize+1,0);
|
||||
@ -373,7 +373,7 @@ public:
|
||||
State curr(stack.back());
|
||||
stack.pop_back();
|
||||
|
||||
assert(curr.end()<srcSize);
|
||||
CHECK(curr.end()<srcSize);
|
||||
const ConfusionNet::Column &currCol=src[curr.end()];
|
||||
// in a given column, loop over all possibilities
|
||||
for(size_t colidx=0; colidx<currCol.size(); ++colidx) {
|
||||
@ -383,7 +383,7 @@ public:
|
||||
bool isEpsilon=(s=="" || s==EPSILON);
|
||||
|
||||
//assert that we have the right number of link params in this CN option
|
||||
assert(currCol[colidx].second.size() >= m_numInputScores);
|
||||
CHECK(currCol[colidx].second.size() >= m_numInputScores);
|
||||
|
||||
// do not start with epsilon (except at first position)
|
||||
if(isEpsilon && curr.begin()==curr.end() && curr.begin()>0) continue;
|
||||
@ -444,7 +444,7 @@ public:
|
||||
//put in phrase table scores, logging as we insert
|
||||
std::transform(tcands[i].second.begin(),tcands[i].second.end(),nscores.begin() + m_numInputScores,TransformScore);
|
||||
|
||||
assert(nscores.size()==m_weights.size());
|
||||
CHECK(nscores.size()==m_weights.size());
|
||||
|
||||
//tally up
|
||||
float score=std::inner_product(nscores.begin(), nscores.end(), m_weights.begin(), 0.0f);
|
||||
@ -485,10 +485,10 @@ public:
|
||||
m_rangeCache.resize(src.GetSize(),vTPC(src.GetSize(),0));
|
||||
|
||||
for(std::map<Range,E2Costs>::const_iterator i=cov2cand.begin(); i!=cov2cand.end(); ++i) {
|
||||
assert(i->first.first<m_rangeCache.size());
|
||||
assert(i->first.second>0);
|
||||
assert(static_cast<size_t>(i->first.second-1)<m_rangeCache[i->first.first].size());
|
||||
assert(m_rangeCache[i->first.first][i->first.second-1]==0);
|
||||
CHECK(i->first.first<m_rangeCache.size());
|
||||
CHECK(i->first.second>0);
|
||||
CHECK(static_cast<size_t>(i->first.second-1)<m_rangeCache[i->first.first].size());
|
||||
CHECK(m_rangeCache[i->first.first][i->first.second-1]==0);
|
||||
|
||||
std::vector<TargetPhrase> tCands;
|
||||
tCands.reserve(i->second.size());
|
||||
|
@ -20,7 +20,7 @@ License along with this library; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
***********************************************************************/
|
||||
|
||||
#include <cassert>
|
||||
#include "util/check.hh"
|
||||
#include <algorithm>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
@ -74,7 +74,7 @@ Phrase::~Phrase()
|
||||
|
||||
void Phrase::MergeFactors(const Phrase ©)
|
||||
{
|
||||
assert(GetSize() == copy.GetSize());
|
||||
CHECK(GetSize() == copy.GetSize());
|
||||
size_t size = GetSize();
|
||||
const size_t maxNumFactors = StaticData::Instance().GetMaxNumFactors(this->GetDirection());
|
||||
for (size_t currPos = 0 ; currPos < size ; currPos++) {
|
||||
@ -89,14 +89,14 @@ void Phrase::MergeFactors(const Phrase ©)
|
||||
|
||||
void Phrase::MergeFactors(const Phrase ©, FactorType factorType)
|
||||
{
|
||||
assert(GetSize() == copy.GetSize());
|
||||
CHECK(GetSize() == copy.GetSize());
|
||||
for (size_t currPos = 0 ; currPos < GetSize() ; currPos++)
|
||||
SetFactor(currPos, factorType, copy.GetFactor(currPos, factorType));
|
||||
}
|
||||
|
||||
void Phrase::MergeFactors(const Phrase ©, const std::vector<FactorType>& factorVec)
|
||||
{
|
||||
assert(GetSize() == copy.GetSize());
|
||||
CHECK(GetSize() == copy.GetSize());
|
||||
for (size_t currPos = 0 ; currPos < GetSize() ; currPos++)
|
||||
for (std::vector<FactorType>::const_iterator i = factorVec.begin();
|
||||
i != factorVec.end(); ++i) {
|
||||
@ -199,7 +199,7 @@ void Phrase::CreateFromStringNewFormat(FactorDirection direction
|
||||
isNonTerminal = true;
|
||||
|
||||
size_t nextPos = annotatedWord.find("[", 1);
|
||||
assert(nextPos != string::npos);
|
||||
CHECK(nextPos != string::npos);
|
||||
|
||||
if (direction == Input)
|
||||
annotatedWord = annotatedWord.substr(1, nextPos - 2);
|
||||
@ -216,11 +216,11 @@ void Phrase::CreateFromStringNewFormat(FactorDirection direction
|
||||
|
||||
// lhs
|
||||
string &annotatedWord = annotatedWordVector.back();
|
||||
assert(annotatedWord.substr(0, 1) == "[" && annotatedWord.substr(annotatedWord.size()-1, 1) == "]");
|
||||
CHECK(annotatedWord.substr(0, 1) == "[" && annotatedWord.substr(annotatedWord.size()-1, 1) == "]");
|
||||
annotatedWord = annotatedWord.substr(1, annotatedWord.size() - 2);
|
||||
|
||||
lhs.CreateFromString(direction, factorOrder, annotatedWord, true);
|
||||
assert(lhs.IsNonTerminal());
|
||||
CHECK(lhs.IsNonTerminal());
|
||||
}
|
||||
|
||||
int Phrase::Compare(const Phrase &other) const
|
||||
|
@ -145,7 +145,7 @@ public:
|
||||
}
|
||||
|
||||
void RemoveWord(size_t pos) {
|
||||
assert(pos < m_words.size());
|
||||
CHECK(pos < m_words.size());
|
||||
m_words.erase(m_words.begin() + pos);
|
||||
}
|
||||
|
||||
|
@ -88,7 +88,7 @@ PhraseDictionary* PhraseDictionaryFeature::LoadPhraseTable(const TranslationSyst
|
||||
}
|
||||
if (staticData.GetInputType() != SentenceInput) {
|
||||
UserMessage::Add("Must use binary phrase table for this input type");
|
||||
assert(false);
|
||||
CHECK(false);
|
||||
}
|
||||
|
||||
PhraseDictionaryMemory* pdm = new PhraseDictionaryMemory(m_numScoreComponent,this);
|
||||
@ -98,7 +98,7 @@ PhraseDictionary* PhraseDictionaryFeature::LoadPhraseTable(const TranslationSyst
|
||||
, m_tableLimit
|
||||
, system->GetLanguageModels()
|
||||
, system->GetWeightWordPenalty());
|
||||
assert(ret);
|
||||
CHECK(ret);
|
||||
return pdm;
|
||||
} else if (m_implementation == Binary) {
|
||||
PhraseDictionaryTreeAdaptor* pdta = new PhraseDictionaryTreeAdaptor(m_numScoreComponent, m_numInputScores,this);
|
||||
@ -109,7 +109,7 @@ PhraseDictionary* PhraseDictionaryFeature::LoadPhraseTable(const TranslationSyst
|
||||
, m_tableLimit
|
||||
, system->GetLanguageModels()
|
||||
, system->GetWeightWordPenalty());
|
||||
assert(ret);
|
||||
CHECK(ret);
|
||||
return pdta;
|
||||
} else if (m_implementation == SCFG) {
|
||||
// memory phrase table
|
||||
@ -127,7 +127,7 @@ PhraseDictionary* PhraseDictionaryFeature::LoadPhraseTable(const TranslationSyst
|
||||
, m_tableLimit
|
||||
, system->GetLanguageModels()
|
||||
, system->GetWordPenaltyProducer());
|
||||
assert(ret);
|
||||
CHECK(ret);
|
||||
return pdm;
|
||||
} else if (m_implementation == Hiero) {
|
||||
// memory phrase table
|
||||
@ -145,7 +145,7 @@ PhraseDictionary* PhraseDictionaryFeature::LoadPhraseTable(const TranslationSyst
|
||||
, m_tableLimit
|
||||
, system->GetLanguageModels()
|
||||
, system->GetWordPenaltyProducer());
|
||||
assert(ret);
|
||||
CHECK(ret);
|
||||
return pdm;
|
||||
} else if (m_implementation == ALSuffixArray) {
|
||||
// memory phrase table
|
||||
@ -163,7 +163,7 @@ PhraseDictionary* PhraseDictionaryFeature::LoadPhraseTable(const TranslationSyst
|
||||
, m_tableLimit
|
||||
, system->GetLanguageModels()
|
||||
, system->GetWordPenaltyProducer());
|
||||
assert(ret);
|
||||
CHECK(ret);
|
||||
return pdm;
|
||||
} else if (m_implementation == OnDisk) {
|
||||
|
||||
@ -175,7 +175,7 @@ PhraseDictionary* PhraseDictionaryFeature::LoadPhraseTable(const TranslationSyst
|
||||
, m_tableLimit
|
||||
, system->GetLanguageModels()
|
||||
, system->GetWordPenaltyProducer());
|
||||
assert(ret);
|
||||
CHECK(ret);
|
||||
return pdta;
|
||||
} else if (m_implementation == SuffixArray) {
|
||||
#ifndef WIN32
|
||||
@ -196,11 +196,11 @@ PhraseDictionary* PhraseDictionaryFeature::LoadPhraseTable(const TranslationSyst
|
||||
std::cerr << "Suffix array phrase table loaded" << std::endl;
|
||||
return pd;
|
||||
#else
|
||||
assert(false);
|
||||
CHECK(false);
|
||||
#endif
|
||||
} else {
|
||||
std::cerr << "Unknown phrase table type " << m_implementation << endl;
|
||||
assert(false);
|
||||
CHECK(false);
|
||||
}
|
||||
}
|
||||
|
||||
@ -231,7 +231,7 @@ void PhraseDictionaryFeature::InitDictionary(const TranslationSystem* system, co
|
||||
}
|
||||
dict = m_threadUnsafePhraseDictionary.get();
|
||||
}
|
||||
assert(dict);
|
||||
CHECK(dict);
|
||||
dict->InitializeForInput(source);
|
||||
}
|
||||
|
||||
@ -243,7 +243,7 @@ const PhraseDictionary* PhraseDictionaryFeature::GetDictionary() const
|
||||
} else {
|
||||
dict = m_threadUnsafePhraseDictionary.get();
|
||||
}
|
||||
assert(dict);
|
||||
CHECK(dict);
|
||||
return dict;
|
||||
}
|
||||
|
||||
|
@ -58,7 +58,7 @@ void PhraseDictionaryALSuffixArray::InitializeForInput(InputType const& source)
|
||||
bool ret = loader->Load(*m_input, *m_output, inFile, *m_weight, m_tableLimit,
|
||||
*m_languageModels, m_wpProducer, *this);
|
||||
|
||||
assert(ret);
|
||||
CHECK(ret);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ bool PhraseDictionaryDynSuffixArray::Load(const std::vector<FactorType>& input,
|
||||
|
||||
void PhraseDictionaryDynSuffixArray::InitializeForInput(const InputType& input)
|
||||
{
|
||||
assert(&input == &input);
|
||||
CHECK(&input == &input);
|
||||
}
|
||||
|
||||
void PhraseDictionaryDynSuffixArray::CleanUp()
|
||||
@ -81,7 +81,7 @@ void PhraseDictionaryDynSuffixArray::deleteSnt(unsigned /* idx */, unsigned /* n
|
||||
|
||||
ChartRuleLookupManager *PhraseDictionaryDynSuffixArray::CreateRuleLookupManager(const InputType&, const ChartCellCollection&)
|
||||
{
|
||||
assert(false);
|
||||
CHECK(false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -67,7 +67,7 @@ public:
|
||||
virtual ChartRuleLookupManager *CreateRuleLookupManager(
|
||||
const InputType &,
|
||||
const ChartCellCollection &) {
|
||||
assert(false);
|
||||
CHECK(false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -64,7 +64,7 @@ void PhraseDictionaryNodeSCFG::Sort(size_t tableLimit)
|
||||
|
||||
PhraseDictionaryNodeSCFG *PhraseDictionaryNodeSCFG::GetOrCreateChild(const Word &sourceTerm)
|
||||
{
|
||||
//assert(!sourceTerm.IsNonTerminal());
|
||||
//CHECK(!sourceTerm.IsNonTerminal());
|
||||
|
||||
std::pair <TerminalMap::iterator,bool> insResult;
|
||||
insResult = m_sourceTermMap.insert( std::make_pair(sourceTerm, PhraseDictionaryNodeSCFG()) );
|
||||
@ -75,8 +75,8 @@ PhraseDictionaryNodeSCFG *PhraseDictionaryNodeSCFG::GetOrCreateChild(const Word
|
||||
|
||||
PhraseDictionaryNodeSCFG *PhraseDictionaryNodeSCFG::GetOrCreateChild(const Word &sourceNonTerm, const Word &targetNonTerm)
|
||||
{
|
||||
assert(sourceNonTerm.IsNonTerminal());
|
||||
assert(targetNonTerm.IsNonTerminal());
|
||||
CHECK(sourceNonTerm.IsNonTerminal());
|
||||
CHECK(targetNonTerm.IsNonTerminal());
|
||||
|
||||
NonTerminalMapKey key(sourceNonTerm, targetNonTerm);
|
||||
std::pair <NonTerminalMap::iterator,bool> insResult;
|
||||
@ -88,7 +88,7 @@ PhraseDictionaryNodeSCFG *PhraseDictionaryNodeSCFG::GetOrCreateChild(const Word
|
||||
|
||||
const PhraseDictionaryNodeSCFG *PhraseDictionaryNodeSCFG::GetChild(const Word &sourceTerm) const
|
||||
{
|
||||
assert(!sourceTerm.IsNonTerminal());
|
||||
CHECK(!sourceTerm.IsNonTerminal());
|
||||
|
||||
TerminalMap::const_iterator p = m_sourceTermMap.find(sourceTerm);
|
||||
return (p == m_sourceTermMap.end()) ? NULL : &p->second;
|
||||
@ -96,8 +96,8 @@ const PhraseDictionaryNodeSCFG *PhraseDictionaryNodeSCFG::GetChild(const Word &s
|
||||
|
||||
const PhraseDictionaryNodeSCFG *PhraseDictionaryNodeSCFG::GetChild(const Word &sourceNonTerm, const Word &targetNonTerm) const
|
||||
{
|
||||
assert(sourceNonTerm.IsNonTerminal());
|
||||
assert(targetNonTerm.IsNonTerminal());
|
||||
CHECK(sourceNonTerm.IsNonTerminal());
|
||||
CHECK(targetNonTerm.IsNonTerminal());
|
||||
|
||||
NonTerminalMapKey key(sourceNonTerm, targetNonTerm);
|
||||
NonTerminalMap::const_iterator p = m_nonTermMap.find(key);
|
||||
|
@ -58,10 +58,10 @@ bool PhraseDictionaryOnDisk::Load(const std::vector<FactorType> &input
|
||||
if (!m_dbWrapper.BeginLoad(filePath))
|
||||
return false;
|
||||
|
||||
assert(m_dbWrapper.GetMisc("Version") == 3);
|
||||
assert(m_dbWrapper.GetMisc("NumSourceFactors") == input.size());
|
||||
assert(m_dbWrapper.GetMisc("NumTargetFactors") == output.size());
|
||||
assert(m_dbWrapper.GetMisc("NumScores") == weight.size());
|
||||
CHECK(m_dbWrapper.GetMisc("Version") == 3);
|
||||
CHECK(m_dbWrapper.GetMisc("NumSourceFactors") == input.size());
|
||||
CHECK(m_dbWrapper.GetMisc("NumTargetFactors") == output.size());
|
||||
CHECK(m_dbWrapper.GetMisc("NumScores") == weight.size());
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -69,13 +69,13 @@ bool PhraseDictionaryOnDisk::Load(const std::vector<FactorType> &input
|
||||
//! find list of translations that can translates src. Only for phrase input
|
||||
const TargetPhraseCollection *PhraseDictionaryOnDisk::GetTargetPhraseCollection(const Phrase& /* src */) const
|
||||
{
|
||||
assert(false);
|
||||
CHECK(false);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void PhraseDictionaryOnDisk::AddEquivPhrase(const Phrase& /* source */, const TargetPhrase& /*targetPhrase */)
|
||||
{
|
||||
assert(false); // TODO
|
||||
CHECK(false); // TODO
|
||||
}
|
||||
|
||||
|
||||
|
@ -89,8 +89,8 @@ PhraseDictionaryNodeSCFG &PhraseDictionarySCFG::GetOrCreateNode(const Phrase &so
|
||||
// indexed by source label 1st
|
||||
const Word &sourceNonTerm = word;
|
||||
|
||||
assert(iterAlign != target.GetAlignmentInfo().end());
|
||||
assert(iterAlign->first == pos);
|
||||
CHECK(iterAlign != target.GetAlignmentInfo().end());
|
||||
CHECK(iterAlign->first == pos);
|
||||
size_t targetNonTermInd = iterAlign->second;
|
||||
++iterAlign;
|
||||
const Word &targetNonTerm = target.GetWord(targetNonTermInd);
|
||||
@ -100,12 +100,12 @@ PhraseDictionaryNodeSCFG &PhraseDictionarySCFG::GetOrCreateNode(const Phrase &so
|
||||
currNode = currNode->GetOrCreateChild(word);
|
||||
}
|
||||
|
||||
assert(currNode != NULL);
|
||||
CHECK(currNode != NULL);
|
||||
}
|
||||
|
||||
// finally, the source LHS
|
||||
//currNode = currNode->GetOrCreateChild(sourceLHS);
|
||||
//assert(currNode != NULL);
|
||||
//CHECK(currNode != NULL);
|
||||
|
||||
|
||||
return *currNode;
|
||||
|
@ -56,14 +56,14 @@ class PhraseDictionarySCFG : public PhraseDictionary
|
||||
// Required by PhraseDictionary.
|
||||
const TargetPhraseCollection *GetTargetPhraseCollection(const Phrase &) const
|
||||
{
|
||||
assert(false);
|
||||
CHECK(false);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Required by PhraseDictionary.
|
||||
void AddEquivPhrase(const Phrase &, const TargetPhrase &)
|
||||
{
|
||||
assert(false);
|
||||
CHECK(false);
|
||||
}
|
||||
|
||||
void InitializeForInput(const InputType& i);
|
||||
|
@ -2,7 +2,7 @@
|
||||
// vim:tabstop=2
|
||||
#include "PhraseDictionaryTree.h"
|
||||
#include <map>
|
||||
#include <cassert>
|
||||
#include "util/check.hh"
|
||||
#include <sstream>
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
@ -190,7 +190,7 @@ public:
|
||||
if(f.empty()) return;
|
||||
if(f[0]>=data.size()) return;
|
||||
if(!data[f[0]]) return;
|
||||
assert(data[f[0]]->findKey(f[0])<data[f[0]]->size());
|
||||
CHECK(data[f[0]]->findKey(f[0])<data[f[0]]->size());
|
||||
OFF_T tCandOffset=data[f[0]]->find(f);
|
||||
if(tCandOffset==InvalidOffT) return;
|
||||
fSeek(ot,tCandOffset);
|
||||
@ -202,7 +202,7 @@ public:
|
||||
typedef PhraseDictionaryTree::PrefixPtr PPtr;
|
||||
|
||||
void GetTargetCandidates(PPtr p,TgtCands& tgtCands) {
|
||||
assert(p);
|
||||
CHECK(p);
|
||||
if(p.imp->isRoot()) return;
|
||||
OFF_T tCandOffset=p.imp->ptr()->getData(p.imp->idx);
|
||||
if(tCandOffset==InvalidOffT) return;
|
||||
@ -245,7 +245,7 @@ public:
|
||||
}
|
||||
|
||||
PPtr Extend(PPtr p,const std::string& w) {
|
||||
assert(p);
|
||||
CHECK(p);
|
||||
if(w.empty() || w==EPSILON) return p;
|
||||
|
||||
LabelId wi=sv->index(w);
|
||||
@ -254,7 +254,7 @@ public:
|
||||
else if(p.imp->isRoot()) {
|
||||
if(wi<data.size() && data[wi]) {
|
||||
const void* ptr = data[wi]->findKeyPtr(wi);
|
||||
assert(ptr);
|
||||
CHECK(ptr);
|
||||
return PPtr(pPool.get(PPimp(data[wi],data[wi]->findKey(wi),0)));
|
||||
}
|
||||
} else if(PTF const* nextP=p.imp->ptr()->getPtr(p.imp->idx)) {
|
||||
@ -478,7 +478,7 @@ int PhraseDictionaryTree::Create(std::istream& inFile,const std::string& out)
|
||||
if (numElement == NOT_FOUND) {
|
||||
// init numElement
|
||||
numElement = tokens.size();
|
||||
assert(numElement >= 3);
|
||||
CHECK(numElement >= 3);
|
||||
}
|
||||
|
||||
if (tokens.size() != numElement) {
|
||||
@ -522,7 +522,7 @@ int PhraseDictionaryTree::Create(std::istream& inFile,const std::string& out)
|
||||
++count;
|
||||
currF=f;
|
||||
// insert src phrase in prefix tree
|
||||
assert(psa);
|
||||
CHECK(psa);
|
||||
PSA::Data& d=psa->insert(f);
|
||||
if(d==InvalidOffT) d=fTell(ot);
|
||||
else {
|
||||
@ -560,7 +560,7 @@ int PhraseDictionaryTree::Create(std::istream& inFile,const std::string& out)
|
||||
}
|
||||
|
||||
// insert src phrase in prefix tree
|
||||
assert(psa);
|
||||
CHECK(psa);
|
||||
PSA::Data& d=psa->insert(f);
|
||||
if(d==InvalidOffT) d=fTell(ot);
|
||||
else {
|
||||
@ -570,7 +570,7 @@ int PhraseDictionaryTree::Create(std::istream& inFile,const std::string& out)
|
||||
}
|
||||
}
|
||||
tgtCands.push_back(TgtCand(e,sc, alignmentString));
|
||||
assert(currFirstWord!=InvalidLabelId);
|
||||
CHECK(currFirstWord!=InvalidLabelId);
|
||||
}
|
||||
if (PrintWordAlignment())
|
||||
tgtCands.writeBinWithAlignment(ot);
|
||||
|
@ -4,7 +4,7 @@
|
||||
#define moses_PhraseDictionaryTreeAdaptor_h
|
||||
|
||||
#include <vector>
|
||||
#include <cassert>
|
||||
#include "util/check.hh"
|
||||
#include "TypeDef.h"
|
||||
#include "PhraseDictionaryMemory.h"
|
||||
#include "TargetPhraseCollection.h"
|
||||
@ -69,7 +69,7 @@ public:
|
||||
virtual ChartRuleLookupManager *CreateRuleLookupManager(
|
||||
const InputType &,
|
||||
const ChartCellCollection &) {
|
||||
assert(false);
|
||||
CHECK(false);
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
|
@ -9,7 +9,7 @@
|
||||
|
||||
#include <vector>
|
||||
#include <algorithm>
|
||||
#include <cassert>
|
||||
#include "util/check.hh"
|
||||
#include <deque>
|
||||
#include "Util.h"
|
||||
#include "FilePtr.h"
|
||||
|
@ -143,7 +143,7 @@ void PrefixTreeMap::GetCandidates(const IPhrase& key, Candidates* cands)
|
||||
if(key.empty() || key[0] >= m_Data.size() || !m_Data[key[0]]) {
|
||||
return;
|
||||
}
|
||||
assert(m_Data[key[0]]->findKey(key[0])<m_Data[key[0]]->size());
|
||||
CHECK(m_Data[key[0]]->findKey(key[0])<m_Data[key[0]]->size());
|
||||
|
||||
OFF_T candOffset = m_Data[key[0]]->find(key);
|
||||
if(candOffset == InvalidOffT) {
|
||||
@ -155,7 +155,7 @@ void PrefixTreeMap::GetCandidates(const IPhrase& key, Candidates* cands)
|
||||
|
||||
void PrefixTreeMap::GetCandidates(const PPimp& p, Candidates* cands)
|
||||
{
|
||||
assert(p.isValid());
|
||||
CHECK(p.isValid());
|
||||
if(p.isRoot()) {
|
||||
return;
|
||||
};
|
||||
@ -169,7 +169,7 @@ void PrefixTreeMap::GetCandidates(const PPimp& p, Candidates* cands)
|
||||
|
||||
std::vector< std::string const * > PrefixTreeMap::ConvertPhrase(const IPhrase& p, unsigned int voc) const
|
||||
{
|
||||
assert(voc < m_Voc.size() && m_Voc[voc] != 0);
|
||||
CHECK(voc < m_Voc.size() && m_Voc[voc] != 0);
|
||||
std::vector< std::string const * > result;
|
||||
result.reserve(p.size());
|
||||
for(IPhrase::const_iterator i = p.begin(); i != p.end(); ++i) {
|
||||
@ -180,7 +180,7 @@ std::vector< std::string const * > PrefixTreeMap::ConvertPhrase(const IPhrase& p
|
||||
|
||||
IPhrase PrefixTreeMap::ConvertPhrase(const std::vector< std::string >& p, unsigned int voc) const
|
||||
{
|
||||
assert(voc < m_Voc.size() && m_Voc[voc] != 0);
|
||||
CHECK(voc < m_Voc.size() && m_Voc[voc] != 0);
|
||||
IPhrase result;
|
||||
result.reserve(p.size());
|
||||
for(size_t i = 0; i < p.size(); ++i) {
|
||||
@ -191,13 +191,13 @@ IPhrase PrefixTreeMap::ConvertPhrase(const std::vector< std::string >& p, unsign
|
||||
|
||||
LabelId PrefixTreeMap::ConvertWord(const std::string& w, unsigned int voc) const
|
||||
{
|
||||
assert(voc < m_Voc.size() && m_Voc[voc] != 0);
|
||||
CHECK(voc < m_Voc.size() && m_Voc[voc] != 0);
|
||||
return m_Voc[voc]->index(w);
|
||||
}
|
||||
|
||||
std::string PrefixTreeMap::ConvertWord(LabelId w, unsigned int voc) const
|
||||
{
|
||||
assert(voc < m_Voc.size() && m_Voc[voc] != 0);
|
||||
CHECK(voc < m_Voc.size() && m_Voc[voc] != 0);
|
||||
if(w == PrefixTreeMap::MagicWord) {
|
||||
return "|||";
|
||||
} else if (w == InvalidLabelId) {
|
||||
@ -214,7 +214,7 @@ PPimp* PrefixTreeMap::GetRoot()
|
||||
|
||||
PPimp* PrefixTreeMap::Extend(PPimp* p, LabelId wi)
|
||||
{
|
||||
assert(p->isValid());
|
||||
CHECK(p->isValid());
|
||||
|
||||
if(wi == InvalidLabelId) {
|
||||
return 0; // unknown word, return invalid pointer
|
||||
@ -222,7 +222,7 @@ PPimp* PrefixTreeMap::Extend(PPimp* p, LabelId wi)
|
||||
} else if(p->isRoot()) {
|
||||
if(wi < m_Data.size() && m_Data[wi]) {
|
||||
const void* ptr = m_Data[wi]->findKeyPtr(wi);
|
||||
assert(ptr);
|
||||
CHECK(ptr);
|
||||
return m_PtrPool.get(PPimp(m_Data[wi],m_Data[wi]->findKey(wi),0));
|
||||
}
|
||||
} else if(PTF const* nextP = p->ptr()->getPtr(p->idx)) {
|
||||
|
@ -31,7 +31,7 @@
|
||||
#include <boost/unordered_set.hpp>
|
||||
#include <boost/version.hpp>
|
||||
|
||||
#include <cassert>
|
||||
#include "util/check.hh"
|
||||
#include <queue>
|
||||
#include <set>
|
||||
#include <vector>
|
||||
@ -93,7 +93,7 @@ class RuleCube
|
||||
~RuleCube();
|
||||
|
||||
float GetTopScore() const {
|
||||
assert(!m_queue.empty());
|
||||
CHECK(!m_queue.empty());
|
||||
RuleCubeItem *item = m_queue.top();
|
||||
return item->GetScore();
|
||||
}
|
||||
|
@ -86,7 +86,7 @@ void RuleCubeItem::CreateHypothesis(const ChartTranslationOption &transOpt,
|
||||
|
||||
ChartHypothesis *RuleCubeItem::ReleaseHypothesis()
|
||||
{
|
||||
assert(m_hypothesis);
|
||||
CHECK(m_hypothesis);
|
||||
ChartHypothesis *hypo = m_hypothesis;
|
||||
m_hypothesis = 0;
|
||||
return hypo;
|
||||
@ -98,7 +98,7 @@ void RuleCubeItem::CreateHypothesisDimensions(
|
||||
const DottedRule &dottedRule,
|
||||
const ChartCellCollection &allChartCells)
|
||||
{
|
||||
assert(!dottedRule.IsRoot());
|
||||
CHECK(!dottedRule.IsRoot());
|
||||
|
||||
const DottedRule *prev = dottedRule.GetPrev();
|
||||
if (!prev->IsRoot()) {
|
||||
@ -110,12 +110,12 @@ void RuleCubeItem::CreateHypothesisDimensions(
|
||||
// get a sorted list of the underlying hypotheses
|
||||
const ChartCellLabel &cellLabel = dottedRule.GetChartCellLabel();
|
||||
const ChartHypothesisCollection *hypoColl = cellLabel.GetStack();
|
||||
assert(hypoColl);
|
||||
CHECK(hypoColl);
|
||||
const HypoList &hypoList = hypoColl->GetSortedHypotheses();
|
||||
|
||||
// there have to be hypothesis with the desired non-terminal
|
||||
// (otherwise the rule would not be considered)
|
||||
assert(!hypoList.empty());
|
||||
CHECK(!hypoList.empty());
|
||||
|
||||
// create a list of hypotheses that match the non-terminal
|
||||
HypothesisDimension dimension(0, hypoList);
|
||||
|
@ -70,7 +70,7 @@ void ReformatHieroRule(int sourceTarget, string &phrase, map<size_t, pair<size_t
|
||||
if (tok.substr(0, 1) == "[" && tok.substr(tokLen - 1, 1) == "]")
|
||||
{ // no-term
|
||||
vector<string> split = Tokenize(tok, ",");
|
||||
assert(split.size() == 2);
|
||||
CHECK(split.size() == 2);
|
||||
|
||||
tok = "[X]" + split[0] + "]";
|
||||
size_t coIndex = Scan<size_t>(split[1]);
|
||||
@ -202,7 +202,7 @@ bool RuleTableLoaderStandard::Load(FormatType format
|
||||
UserMessage::Add(strme.str());
|
||||
abort();
|
||||
}
|
||||
assert(scoreVector.size() == numScoreComponents);
|
||||
CHECK(scoreVector.size() == numScoreComponents);
|
||||
|
||||
// parse source & find pt node
|
||||
|
||||
|
@ -23,7 +23,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
#define moses_ScoreComponentCollection_h
|
||||
|
||||
#include <numeric>
|
||||
#include <cassert>
|
||||
#include "util/check.hh"
|
||||
|
||||
#include "LMList.h"
|
||||
#include "ScoreProducer.h"
|
||||
@ -85,7 +85,7 @@ public:
|
||||
|
||||
//! add the score in rhs
|
||||
void PlusEquals(const ScoreComponentCollection& rhs) {
|
||||
assert(m_scores.size() >= rhs.m_scores.size());
|
||||
CHECK(m_scores.size() >= rhs.m_scores.size());
|
||||
const size_t l = rhs.m_scores.size();
|
||||
for (size_t i=0; i<l; i++) {
|
||||
m_scores[i] += rhs.m_scores[i];
|
||||
@ -94,7 +94,7 @@ public:
|
||||
|
||||
//! subtract the score in rhs
|
||||
void MinusEquals(const ScoreComponentCollection& rhs) {
|
||||
assert(m_scores.size() >= rhs.m_scores.size());
|
||||
CHECK(m_scores.size() >= rhs.m_scores.size());
|
||||
const size_t l = rhs.m_scores.size();
|
||||
for (size_t i=0; i<l; i++) {
|
||||
m_scores[i] -= rhs.m_scores[i];
|
||||
@ -105,7 +105,7 @@ public:
|
||||
//! The length of scores must be equal to the number of score components
|
||||
//! produced by sp
|
||||
void PlusEquals(const ScoreProducer* sp, const std::vector<float>& scores) {
|
||||
assert(scores.size() == sp->GetNumScoreComponents());
|
||||
CHECK(scores.size() == sp->GetNumScoreComponents());
|
||||
size_t i = m_sim->GetBeginIndex(sp->GetScoreBookkeepingID());
|
||||
for (std::vector<float>::const_iterator vi = scores.begin();
|
||||
vi != scores.end(); ++vi) {
|
||||
@ -128,13 +128,13 @@ public:
|
||||
//! to add the score from a single ScoreProducer that produces
|
||||
//! a single value
|
||||
void PlusEquals(const ScoreProducer* sp, float score) {
|
||||
assert(1 == sp->GetNumScoreComponents());
|
||||
CHECK(1 == sp->GetNumScoreComponents());
|
||||
const size_t i = m_sim->GetBeginIndex(sp->GetScoreBookkeepingID());
|
||||
m_scores[i] += score;
|
||||
}
|
||||
|
||||
void Assign(const ScoreProducer* sp, const std::vector<float>& scores) {
|
||||
assert(scores.size() == sp->GetNumScoreComponents());
|
||||
CHECK(scores.size() == sp->GetNumScoreComponents());
|
||||
size_t i = m_sim->GetBeginIndex(sp->GetScoreBookkeepingID());
|
||||
for (std::vector<float>::const_iterator vi = scores.begin();
|
||||
vi != scores.end(); ++vi) {
|
||||
@ -150,7 +150,7 @@ public:
|
||||
//! to add the score from a single ScoreProducer that produces
|
||||
//! a single value
|
||||
void Assign(const ScoreProducer* sp, float score) {
|
||||
assert(1 == sp->GetNumScoreComponents());
|
||||
CHECK(1 == sp->GetNumScoreComponents());
|
||||
const size_t i = m_sim->GetBeginIndex(sp->GetScoreBookkeepingID());
|
||||
m_scores[i] = score;
|
||||
}
|
||||
@ -163,7 +163,7 @@ public:
|
||||
|
||||
float PartialInnerProduct(const ScoreProducer* sp, const std::vector<float>& rhs) const {
|
||||
std::vector<float> lhs = GetScoresForProducer(sp);
|
||||
assert(lhs.size() == rhs.size());
|
||||
CHECK(lhs.size() == rhs.size());
|
||||
return std::inner_product(lhs.begin(), lhs.end(), rhs.begin(), 0.0f);
|
||||
}
|
||||
|
||||
@ -187,7 +187,7 @@ public:
|
||||
const size_t begin = m_sim->GetBeginIndex(id);
|
||||
#ifndef NDEBUG
|
||||
const size_t end = m_sim->GetEndIndex(id);
|
||||
assert(end-begin == 1);
|
||||
CHECK(end-begin == 1);
|
||||
#endif
|
||||
return m_scores[begin];
|
||||
}
|
||||
|
@ -4,7 +4,7 @@
|
||||
#include <iomanip>
|
||||
#include <string>
|
||||
#include <cstdio>
|
||||
#include <cassert>
|
||||
#include "util/check.hh"
|
||||
#include "Util.h"
|
||||
#include "StaticData.h"
|
||||
#include "ScoreIndexManager.h"
|
||||
@ -19,13 +19,13 @@ void ScoreIndexManager::AddScoreProducer(const ScoreProducer* sp)
|
||||
{
|
||||
// Producers must be inserted in the order they are created
|
||||
const_cast<ScoreProducer*>(sp)->CreateScoreBookkeepingID();
|
||||
assert(m_begins.size() == (sp->GetScoreBookkeepingID()));
|
||||
CHECK(m_begins.size() == (sp->GetScoreBookkeepingID()));
|
||||
|
||||
m_producers.push_back(sp);
|
||||
|
||||
m_begins.push_back(m_last);
|
||||
size_t numScoreCompsProduced = sp->GetNumScoreComponents();
|
||||
assert(numScoreCompsProduced > 0);
|
||||
CHECK(numScoreCompsProduced > 0);
|
||||
m_last += numScoreCompsProduced;
|
||||
m_ends.push_back(m_last);
|
||||
VERBOSE(3,"Added ScoreProducer(" << sp->GetScoreBookkeepingID()
|
||||
@ -42,7 +42,7 @@ void ScoreIndexManager::PrintLabeledScores(std::ostream& os, const ScoreComponen
|
||||
|
||||
void ScoreIndexManager::PrintLabeledWeightedScores(std::ostream& os, const ScoreComponentCollection& scores, const std::vector<float>& weights) const
|
||||
{
|
||||
assert(m_featureShortNames.size() == weights.size());
|
||||
CHECK(m_featureShortNames.size() == weights.size());
|
||||
string lastName = "";
|
||||
for (size_t i = 0; i < m_featureShortNames.size(); ++i) {
|
||||
if (i>0) {
|
||||
@ -102,9 +102,9 @@ void ScoreIndexManager::SerializeFeatureNamesToPB(hgmert::Hypergraph* hg) const
|
||||
|
||||
void ScoreIndexManager::InitWeightVectorFromFile(const std::string& fnam, vector<float>* m_allWeights) const
|
||||
{
|
||||
assert(m_allWeights->size() == m_featureNames.size());
|
||||
CHECK(m_allWeights->size() == m_featureNames.size());
|
||||
ifstream in(fnam.c_str());
|
||||
assert(in.good());
|
||||
CHECK(in.good());
|
||||
char buf[2000];
|
||||
map<string, double> name2val;
|
||||
while (!in.eof()) {
|
||||
@ -116,10 +116,10 @@ void ScoreIndexManager::InitWeightVectorFromFile(const std::string& fnam, vector
|
||||
double val;
|
||||
is >> fname >> val;
|
||||
map<string, double>::iterator i = name2val.find(fname);
|
||||
assert(i == name2val.end()); // duplicate weight name
|
||||
CHECK(i == name2val.end()); // duplicate weight name
|
||||
name2val[fname] = val;
|
||||
}
|
||||
assert(m_allWeights->size() == m_featureNames.size());
|
||||
CHECK(m_allWeights->size() == m_featureNames.size());
|
||||
for (size_t i = 0; i < m_featureNames.size(); ++i) {
|
||||
map<string, double>::iterator iter = name2val.find(m_featureNames[i]);
|
||||
if (iter == name2val.end()) {
|
||||
|
@ -37,7 +37,7 @@ Sentence::Sentence(FactorDirection direction)
|
||||
: Phrase(direction, 0)
|
||||
, InputType()
|
||||
{
|
||||
assert(direction == Input);
|
||||
CHECK(direction == Input);
|
||||
const StaticData& staticData = StaticData::Instance();
|
||||
if (staticData.GetSearchAlgorithm() == ChartDecoding) {
|
||||
m_defaultLabelSet.insert(StaticData::Instance().GetInputDefaultNonTerminal());
|
||||
@ -176,7 +176,7 @@ Sentence::CreateTranslationOptionCollection(const TranslationSystem* system) con
|
||||
size_t maxNoTransOptPerCoverage = StaticData::Instance().GetMaxNoTransOptPerCoverage();
|
||||
float transOptThreshold = StaticData::Instance().GetTranslationOptionThreshold();
|
||||
TranslationOptionCollection *rv= new TranslationOptionCollectionText(system, *this, maxNoTransOptPerCoverage, transOptThreshold);
|
||||
assert(rv);
|
||||
CHECK(rv);
|
||||
return rv;
|
||||
}
|
||||
void Sentence::Print(std::ostream& out) const
|
||||
|
@ -21,7 +21,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
***********************************************************************/
|
||||
|
||||
#include <string>
|
||||
#include <cassert>
|
||||
#include "util/check.hh"
|
||||
#include "PhraseDictionaryMemory.h"
|
||||
#include "DecodeStepTranslation.h"
|
||||
#include "DecodeStepGeneration.h"
|
||||
@ -465,7 +465,7 @@ bool StaticData::LoadData(Parameter *parameter)
|
||||
phrase.CreateFromString(GetOutputFactorOrder(), vecStr[1], GetFactorDelimiter());
|
||||
m_constraints.insert(make_pair(sentenceID,phrase));
|
||||
} else {
|
||||
assert(false);
|
||||
CHECK(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -916,13 +916,13 @@ bool StaticData::LoadGenerationTables()
|
||||
VERBOSE(1, filePath << endl);
|
||||
|
||||
m_generationDictionary.push_back(new GenerationDictionary(numFeatures, m_scoreIndexManager, input,output));
|
||||
assert(m_generationDictionary.back() && "could not create GenerationDictionary");
|
||||
CHECK(m_generationDictionary.back() && "could not create GenerationDictionary");
|
||||
if (!m_generationDictionary.back()->Load(filePath, Output)) {
|
||||
delete m_generationDictionary.back();
|
||||
return false;
|
||||
}
|
||||
for(size_t i = 0; i < numFeatures; i++) {
|
||||
assert(currWeightNum < weight.size());
|
||||
CHECK(currWeightNum < weight.size());
|
||||
m_allWeights.push_back(weight[currWeightNum++]);
|
||||
}
|
||||
}
|
||||
@ -940,7 +940,7 @@ bool StaticData::LoadPhraseTables()
|
||||
VERBOSE(2,"Creating phrase table features" << endl);
|
||||
|
||||
// language models must be loaded prior to loading phrase tables
|
||||
assert(m_fLMsLoaded);
|
||||
CHECK(m_fLMsLoaded);
|
||||
// load phrase translation tables
|
||||
if (m_parameter->GetParam("ttable-file").size() > 0) {
|
||||
// weights
|
||||
@ -987,7 +987,7 @@ bool StaticData::LoadPhraseTables()
|
||||
} else
|
||||
implementation = (PhraseTableImplementation) Scan<int>(token[0]);
|
||||
|
||||
assert(token.size() >= 5);
|
||||
CHECK(token.size() >= 5);
|
||||
//characteristics of the phrase table
|
||||
|
||||
vector<FactorType> input = Tokenize<FactorType>(token[1], ",")
|
||||
@ -998,7 +998,7 @@ bool StaticData::LoadPhraseTables()
|
||||
size_t numScoreComponent = Scan<size_t>(token[3]);
|
||||
string filePath= token[4];
|
||||
|
||||
assert(weightAll.size() >= weightAllOffset + numScoreComponent);
|
||||
CHECK(weightAll.size() >= weightAllOffset + numScoreComponent);
|
||||
|
||||
// weights for this phrase dictionary
|
||||
// first InputScores (if any), then translation scores
|
||||
@ -1063,7 +1063,7 @@ bool StaticData::LoadPhraseTables()
|
||||
alignmentsFile= token[6];
|
||||
}
|
||||
|
||||
assert(numScoreComponent==weight.size());
|
||||
CHECK(numScoreComponent==weight.size());
|
||||
|
||||
std::copy(weight.begin(),weight.end(),std::back_inserter(m_allWeights));
|
||||
|
||||
@ -1131,7 +1131,7 @@ void StaticData::LoadNonTerminals()
|
||||
string line;
|
||||
while(getline(inStream, line)) {
|
||||
vector<string> tokens = Tokenize(line);
|
||||
assert(tokens.size() == 2);
|
||||
CHECK(tokens.size() == 2);
|
||||
UnknownLHSEntry entry(tokens[0], Scan<float>(tokens[1]));
|
||||
m_unknownLHS.push_back(entry);
|
||||
}
|
||||
@ -1191,7 +1191,7 @@ bool StaticData::LoadDecodeGraphs()
|
||||
// For specifying multiple translation model
|
||||
decodeGraphInd = Scan<size_t>(token[0]);
|
||||
//the vectorList index can only increment by one
|
||||
assert(decodeGraphInd == prevDecodeGraphInd || decodeGraphInd == prevDecodeGraphInd + 1);
|
||||
CHECK(decodeGraphInd == prevDecodeGraphInd || decodeGraphInd == prevDecodeGraphInd + 1);
|
||||
if (decodeGraphInd > prevDecodeGraphInd) {
|
||||
prev = NULL;
|
||||
}
|
||||
@ -1199,7 +1199,7 @@ bool StaticData::LoadDecodeGraphs()
|
||||
index = Scan<size_t>(token[2]);
|
||||
} else {
|
||||
UserMessage::Add("Malformed mapping!");
|
||||
assert(false);
|
||||
CHECK(false);
|
||||
}
|
||||
|
||||
DecodeStep* decodeStep = NULL;
|
||||
@ -1210,7 +1210,7 @@ bool StaticData::LoadDecodeGraphs()
|
||||
strme << "No phrase dictionary with index "
|
||||
<< index << " available!";
|
||||
UserMessage::Add(strme.str());
|
||||
assert(false);
|
||||
CHECK(false);
|
||||
}
|
||||
decodeStep = new DecodeStepTranslation(m_phraseDictionary[index], prev);
|
||||
break;
|
||||
@ -1220,16 +1220,16 @@ bool StaticData::LoadDecodeGraphs()
|
||||
strme << "No generation dictionary with index "
|
||||
<< index << " available!";
|
||||
UserMessage::Add(strme.str());
|
||||
assert(false);
|
||||
CHECK(false);
|
||||
}
|
||||
decodeStep = new DecodeStepGeneration(m_generationDictionary[index], prev);
|
||||
break;
|
||||
case InsertNullFertilityWord:
|
||||
assert(!"Please implement NullFertilityInsertion.");
|
||||
CHECK(!"Please implement NullFertilityInsertion.");
|
||||
break;
|
||||
}
|
||||
|
||||
assert(decodeStep);
|
||||
CHECK(decodeStep);
|
||||
if (m_decodeGraphs.size() < decodeGraphInd + 1) {
|
||||
DecodeGraph *decodeGraph;
|
||||
if (m_searchAlgorithm == ChartDecoding) {
|
||||
@ -1267,7 +1267,7 @@ void StaticData::SetWeightsForScoreProducer(const ScoreProducer* sp, const std::
|
||||
const size_t id = sp->GetScoreBookkeepingID();
|
||||
const size_t begin = m_scoreIndexManager.GetBeginIndex(id);
|
||||
const size_t end = m_scoreIndexManager.GetEndIndex(id);
|
||||
assert(end - begin == weights.size());
|
||||
CHECK(end - begin == weights.size());
|
||||
if (m_allWeights.size() < end)
|
||||
m_allWeights.resize(end);
|
||||
std::vector<float>::const_iterator weightIter = weights.begin();
|
||||
|
@ -37,7 +37,7 @@ template <class MH, class MO>
|
||||
std::cerr << "Reading syntactic language model files...\n";
|
||||
// For each model file...
|
||||
for ( int a=0, n=filePaths.size(); a<n; a++ ) { // read models
|
||||
FILE* pf = fopen(filePaths[a].c_str(),"r"); //assert(pf); // Read model file
|
||||
FILE* pf = fopen(filePaths[a].c_str(),"r"); //CHECK(pf); // Read model file
|
||||
if(!pf){
|
||||
std::cerr << "Error loading model file " << filePaths[a] << std::endl;
|
||||
return;
|
||||
|
@ -19,7 +19,7 @@ License along with this library; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
***********************************************************************/
|
||||
|
||||
#include <cassert>
|
||||
#include "util/check.hh"
|
||||
#include <algorithm>
|
||||
#include <boost/lexical_cast.hpp>
|
||||
#include "util/tokenize_piece.hh"
|
||||
@ -140,7 +140,7 @@ void TargetPhrase::SetScore(const TranslationSystem* system, const Scores &score
|
||||
std::copy(allWeights.begin() +beginIndex, allWeights.begin() + endIndex,std::back_inserter(weights));
|
||||
|
||||
//expand the input weight vector
|
||||
assert(scoreVector.size() <= prod->GetNumScoreComponents());
|
||||
CHECK(scoreVector.size() <= prod->GetNumScoreComponents());
|
||||
Scores sizedScoreVector = scoreVector;
|
||||
sizedScoreVector.resize(prod->GetNumScoreComponents(),0.0f);
|
||||
|
||||
@ -152,7 +152,7 @@ void TargetPhrase::SetScore(const ScoreProducer* translationScoreProducer,
|
||||
const vector<float> &weightT,
|
||||
float weightWP, const LMList &languageModels)
|
||||
{
|
||||
assert(weightT.size() == scoreVector.size());
|
||||
CHECK(weightT.size() == scoreVector.size());
|
||||
// calc average score if non-best
|
||||
|
||||
m_transScore = std::inner_product(scoreVector.begin(), scoreVector.end(), weightT.begin(), 0.0f);
|
||||
@ -205,7 +205,7 @@ void TargetPhrase::SetScoreChart(const ScoreProducer* translationScoreProducer,
|
||||
,const WordPenaltyProducer* wpProducer)
|
||||
{
|
||||
|
||||
assert(weightT.size() == scoreVector.size());
|
||||
CHECK(weightT.size() == scoreVector.size());
|
||||
|
||||
// calc average score if non-best
|
||||
m_transScore = std::inner_product(scoreVector.begin(), scoreVector.end(), weightT.begin(), 0.0f);
|
||||
@ -266,7 +266,7 @@ void TargetPhrase::SetScore(const ScoreProducer* producer, const Scores &scoreVe
|
||||
void TargetPhrase::SetWeights(const ScoreProducer* translationScoreProducer, const vector<float> &weightT)
|
||||
{
|
||||
// calling this function in case of confusion net input is undefined
|
||||
assert(StaticData::Instance().GetInputType()==SentenceInput);
|
||||
CHECK(StaticData::Instance().GetInputType()==SentenceInput);
|
||||
|
||||
/* one way to fix this, you have to make sure the weightT contains (in
|
||||
addition to the usual phrase translation scaling factors) the input
|
||||
|
@ -103,7 +103,7 @@ TranslationOption::TranslationOption(const TranslationOption ©, const WordsR
|
||||
|
||||
void TranslationOption::MergeNewFeatures(const Phrase& phrase, const ScoreComponentCollection& score, const std::vector<FactorType>& featuresToAdd)
|
||||
{
|
||||
assert(phrase.GetSize() == m_targetPhrase.GetSize());
|
||||
CHECK(phrase.GetSize() == m_targetPhrase.GetSize());
|
||||
if (featuresToAdd.size() == 1) {
|
||||
m_targetPhrase.MergeFactors(phrase, featuresToAdd[0]);
|
||||
} else if (featuresToAdd.empty()) {
|
||||
|
@ -565,7 +565,7 @@ void TranslationOptionCollection::CreateXmlOptionsForRange(size_t, size_t)
|
||||
void TranslationOptionCollection::Add(TranslationOption *translationOption)
|
||||
{
|
||||
const WordsRange &coverage = translationOption->GetSourceWordsRange();
|
||||
assert(coverage.GetEndPos() - coverage.GetStartPos() < m_collection[coverage.GetStartPos()].size());
|
||||
CHECK(coverage.GetEndPos() - coverage.GetStartPos() < m_collection[coverage.GetStartPos()].size());
|
||||
m_collection[coverage.GetStartPos()][coverage.GetEndPos() - coverage.GetStartPos()].Add(translationOption);
|
||||
}
|
||||
|
||||
@ -636,7 +636,7 @@ TranslationOptionList &TranslationOptionCollection::GetTranslationOptionList(siz
|
||||
size_t maxSizePhrase = StaticData::Instance().GetMaxPhraseLength();
|
||||
maxSize = std::min(maxSize, maxSizePhrase);
|
||||
|
||||
assert(maxSize < m_collection[startPos].size());
|
||||
CHECK(maxSize < m_collection[startPos].size());
|
||||
return m_collection[startPos][maxSize];
|
||||
}
|
||||
const TranslationOptionList &TranslationOptionCollection::GetTranslationOptionList(size_t startPos, size_t endPos) const
|
||||
@ -645,7 +645,7 @@ const TranslationOptionList &TranslationOptionCollection::GetTranslationOptionLi
|
||||
size_t maxSizePhrase = StaticData::Instance().GetMaxPhraseLength();
|
||||
maxSize = std::min(maxSize, maxSizePhrase);
|
||||
|
||||
assert(maxSize < m_collection[startPos].size());
|
||||
CHECK(maxSize < m_collection[startPos].size());
|
||||
return m_collection[startPos][maxSize];
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user