Merge branch 'master' of git://github.com/moses-smt/mosesdecoder

This commit is contained in:
phikoehn 2012-10-18 02:20:45 +01:00
commit 98dafc0301
344 changed files with 23422 additions and 9562 deletions

8
.gitignore vendored
View File

@ -2,8 +2,13 @@
*.lo
*.o
*.so
*.lo
*.o
*.la
*.a
*.swp
*.save
*.cmd
*~
*.gch
dist*
@ -21,6 +26,9 @@ mert/kbmira
misc/processLexicalTable
misc/processPhraseTable
misc/queryLexicalTable
mira/mira
mira/Makefile
mira/Makefile.in
misc/queryPhraseTable
moses-chart-cmd/src/moses_chart
moses-cmd/src/checkplf

3
.gitmodules vendored
View File

@ -1,3 +0,0 @@
[submodule "regression-testing/tests"]
path = regression-testing/tests
url = git@github.com:moses-smt/moses-regression-tests.git

17
Jamroot
View File

@ -50,11 +50,11 @@
#
# debug-symbols=on|off include (default) or exclude debugging
# information also known as -g
#
# --notrace compiles without TRACE macros
#
# --enable-boost-pool uses Boost pools for the memory SCFG table
#
# --enable-mpi switch on mpi
# --without-libsegfault does not link with libSegFault
#
# --max-kenlm-order maximum ngram order that kenlm can process (default 6)
@ -78,6 +78,17 @@ if [ option.get "with-tcmalloc" : : "yes" ] {
requirements += <library>tcmalloc ;
}
if [ option.get "enable-mpi" : : "yes" ] {
import mpi ;
using mpi ;
external-lib boost_mpi ;
external-lib boost_serialization ;
requirements += <define>MPI_ENABLE ;
requirements += <library>mpi ;
requirements += <library>boost_mpi ;
requirements += <library>boost_serialization ;
}
requirements += [ option.get "notrace" : <define>TRACE_ENABLE=1 ] ;
requirements += [ option.get "enable-boost-pool" : : <define>USE_BOOST_POOL ] ;
@ -101,9 +112,9 @@ project : requirements
;
#Add directories here if you want their incidental targets too (i.e. tests).
build-projects util lm mert moses-cmd/src moses-chart-cmd/src scripts regression-testing contrib/relent-filter/src ;
build-projects lm util search moses/src mert moses-cmd/src moses-chart-cmd/src mira scripts regression-testing ;
alias programs : lm//query lm//build_binary lm//kenlm_max_order moses-chart-cmd/src//moses_chart moses-cmd/src//programs OnDiskPt//CreateOnDiskPt OnDiskPt//queryOnDiskPt mert//programs contrib/server//mosesserver misc//programs symal phrase-extract phrase-extract//lexical-reordering phrase-extract//extract-ghkm phrase-extract//pcfg-extract phrase-extract//pcfg-score biconcor contrib/relent-filter/src//calcDivergence ;
alias programs : lm//programs moses-chart-cmd/src//moses_chart moses-cmd/src//programs OnDiskPt//CreateOnDiskPt OnDiskPt//queryOnDiskPt mert//programs contrib/server//mosesserver misc//programs symal phrase-extract phrase-extract//lexical-reordering phrase-extract//extract-ghkm phrase-extract//pcfg-extract phrase-extract//pcfg-score biconcor mira//mira ;
install-bin-libs programs ;
install-headers headers-base : [ path.glob-tree biconcor contrib lm mert misc moses-chart-cmd moses-cmd OnDiskPt phrase-extract symal util : *.hh *.h ] : . ;

View File

@ -44,7 +44,10 @@ int main (int argc, char * const argv[])
Moses::ResetUserTime();
Moses::PrintUserTime("Starting");
assert(argc == 8);
if (argc != 8) {
std::cerr << "Usage: " << argv[0] << " numSourceFactors numTargetFactors numScores tableLimit inputPath outputPath" << std::endl;
return 1;
}
int numSourceFactors = Moses::Scan<int>(argv[1])
, numTargetFactors = Moses::Scan<int>(argv[2])
@ -56,7 +59,6 @@ int main (int argc, char * const argv[])
const string filePath = argv[6]
,destPath = argv[7];
Moses::InputFileStream inStream(filePath);
OnDiskWrapper onDiskWrapper;
@ -78,10 +80,10 @@ int main (int argc, char * const argv[])
std::vector<float> misc(1);
SourcePhrase sourcePhrase;
TargetPhrase *targetPhrase = new TargetPhrase(numScores);
Tokenize(sourcePhrase, *targetPhrase, line, onDiskWrapper, numScores, misc);
OnDiskPt::PhrasePtr spShort = Tokenize(sourcePhrase, *targetPhrase, line, onDiskWrapper, numScores, misc);
assert(misc.size() == onDiskWrapper.GetNumCounts());
rootNode.AddTargetPhrase(sourcePhrase, targetPhrase, onDiskWrapper, tableLimit, misc);
rootNode.AddTargetPhrase(sourcePhrase, targetPhrase, onDiskWrapper, tableLimit, misc, spShort);
}
rootNode.Save(onDiskWrapper, 0, tableLimit);
@ -106,7 +108,7 @@ bool Flush(const OnDiskPt::SourcePhrase *prevSourcePhrase, const OnDiskPt::Sourc
return ret;
}
void Tokenize(SourcePhrase &sourcePhrase, TargetPhrase &targetPhrase, char *line, OnDiskWrapper &onDiskWrapper, int numScores, vector<float> &misc)
OnDiskPt::PhrasePtr Tokenize(SourcePhrase &sourcePhrase, TargetPhrase &targetPhrase, char *line, OnDiskWrapper &onDiskWrapper, int numScores, vector<float> &misc)
{
size_t scoreInd = 0;
@ -119,13 +121,17 @@ void Tokenize(SourcePhrase &sourcePhrase, TargetPhrase &targetPhrase, char *line
4 = count
*/
char *tok = strtok (line," ");
OnDiskPt::PhrasePtr out(new Phrase());
while (tok != NULL) {
if (0 == strcmp(tok, "|||")) {
++stage;
} else {
switch (stage) {
case 0: {
Tokenize(sourcePhrase, tok, true, true, onDiskWrapper);
WordPtr w = Tokenize(sourcePhrase, tok, true, true, onDiskWrapper);
if (w != NULL)
out->AddWord(w);
break;
}
case 1: {
@ -139,19 +145,34 @@ void Tokenize(SourcePhrase &sourcePhrase, TargetPhrase &targetPhrase, char *line
break;
}
case 3: {
targetPhrase.Create1AlignFromString(tok);
//targetPhrase.Create1AlignFromString(tok);
targetPhrase.CreateAlignFromString(tok);
break;
}
case 4:
++stage;
break;
case 5: {
/* case 5: {
// count info. Only store the 2nd one
float val = Moses::Scan<float>(tok);
misc[0] = val;
++stage;
break;
}*/
case 5: {
// count info. Only store the 2nd one
//float val = Moses::Scan<float>(tok);
//misc[0] = val;
++stage;
break;
}
case 6: {
// store only the 3rd one (rule count)
float val = Moses::Scan<float>(tok);
misc[0] = val;
++stage;
break;
}
default:
assert(false);
break;
@ -163,10 +184,10 @@ void Tokenize(SourcePhrase &sourcePhrase, TargetPhrase &targetPhrase, char *line
assert(scoreInd == numScores);
targetPhrase.SortAlign();
return out;
} // Tokenize()
void Tokenize(OnDiskPt::Phrase &phrase
OnDiskPt::WordPtr Tokenize(OnDiskPt::Phrase &phrase
, const std::string &token, bool addSourceNonTerm, bool addTargetNonTerm
, OnDiskPt::OnDiskWrapper &onDiskWrapper)
{
@ -180,6 +201,7 @@ void Tokenize(OnDiskPt::Phrase &phrase
nonTerm = comStr == 0;
}
OnDiskPt::WordPtr out;
if (nonTerm) {
// non-term
size_t splitPos = token.find_first_of("[", 2);
@ -187,31 +209,35 @@ void Tokenize(OnDiskPt::Phrase &phrase
if (splitPos == string::npos) {
// lhs - only 1 word
Word *word = new Word();
WordPtr word(new Word());
word->CreateFromString(wordStr, onDiskWrapper.GetVocab());
phrase.AddWord(word);
} else {
// source & target non-terms
if (addSourceNonTerm) {
Word *word = new Word();
WordPtr word(new Word());
word->CreateFromString(wordStr, onDiskWrapper.GetVocab());
phrase.AddWord(word);
phrase.AddWord(word);
}
wordStr = token.substr(splitPos, tokSize - splitPos);
if (addTargetNonTerm) {
Word *word = new Word();
WordPtr word(new Word());
word->CreateFromString(wordStr, onDiskWrapper.GetVocab());
phrase.AddWord(word);
out = word;
}
}
} else {
// term
Word *word = new Word();
WordPtr word(new Word());
word->CreateFromString(token, onDiskWrapper.GetVocab());
phrase.AddWord(word);
out = word;
}
return out;
}
void InsertTargetNonTerminals(std::vector<std::string> &sourceToks, const std::vector<std::string> &targetToks, const ::AlignType &alignments)

View File

@ -25,10 +25,10 @@
typedef std::pair<size_t, size_t> AlignPair;
typedef std::vector<AlignPair> AlignType;
void Tokenize(OnDiskPt::Phrase &phrase
OnDiskPt::WordPtr Tokenize(OnDiskPt::Phrase &phrase
, const std::string &token, bool addSourceNonTerm, bool addTargetNonTerm
, OnDiskPt::OnDiskWrapper &onDiskWrapper);
void Tokenize(OnDiskPt::SourcePhrase &sourcePhrase, OnDiskPt::TargetPhrase &targetPhrase
OnDiskPt::PhrasePtr Tokenize(OnDiskPt::SourcePhrase &sourcePhrase, OnDiskPt::TargetPhrase &targetPhrase
, char *line, OnDiskPt::OnDiskWrapper &onDiskWrapper
, int numScores
, std::vector<float> &misc);

View File

@ -30,6 +30,8 @@ using namespace std;
namespace OnDiskPt
{
int OnDiskWrapper::VERSION_NUM = 5;
OnDiskWrapper::OnDiskWrapper()
{
}
@ -163,7 +165,7 @@ void OnDiskWrapper::EndSave()
void OnDiskWrapper::SaveMisc()
{
m_fileMisc << "Version 4" << endl;
m_fileMisc << "Version " << VERSION_NUM << endl;
m_fileMisc << "NumSourceFactors " << m_numSourceFactors << endl;
m_fileMisc << "NumTargetFactors " << m_numTargetFactors << endl;
m_fileMisc << "NumScores " << m_numScores << endl;

View File

@ -50,6 +50,8 @@ protected:
bool LoadMisc();
public:
static int VERSION_NUM;
OnDiskWrapper();
~OnDiskWrapper();

View File

@ -27,27 +27,13 @@ using namespace std;
namespace OnDiskPt
{
Phrase::Phrase(const Phrase &copy)
:m_words(copy.GetSize())
{
for (size_t pos = 0; pos < copy.GetSize(); ++pos) {
const Word &oldWord = copy.GetWord(pos);
Word *newWord = new Word(oldWord);
m_words[pos] = newWord;
}
}
Phrase::~Phrase()
{
Moses::RemoveAllInColl(m_words);
}
void Phrase::AddWord(Word *word)
void Phrase::AddWord(WordPtr word)
{
m_words.push_back(word);
}
void Phrase::AddWord(Word *word, size_t pos)
void Phrase::AddWord(WordPtr word, size_t pos)
{
CHECK(pos < m_words.size());
m_words.insert(m_words.begin() + pos + 1, word);

View File

@ -20,12 +20,14 @@
***********************************************************************/
#include <vector>
#include <iostream>
#include <boost/shared_ptr.hpp>
#include "Word.h"
namespace OnDiskPt
{
class Vocab;
/** A contiguous phrase. SourcePhrase & TargetPhrase inherit from this and add the on-disk functionality
*/
class Phrase
@ -33,16 +35,14 @@ class Phrase
friend std::ostream& operator<<(std::ostream&, const Phrase&);
protected:
std::vector<Word*> m_words;
std::vector<WordPtr> m_words;
public:
Phrase()
{}
Phrase(const Phrase &copy);
virtual ~Phrase();
void AddWord(Word *word);
void AddWord(Word *word, size_t pos);
void AddWord(WordPtr word);
void AddWord(WordPtr word, size_t pos);
const Word &GetWord(size_t pos) const {
return *m_words[pos];
@ -59,4 +59,6 @@ public:
bool operator==(const Phrase &compare) const;
};
typedef boost::shared_ptr<Phrase> PhrasePtr;
}

View File

@ -58,7 +58,7 @@ PhraseNode::PhraseNode(UINT64 filePos, OnDiskWrapper &onDiskWrapper)
CHECK(filePos == (UINT64)file.tellg());
file.read((char*) &m_numChildrenLoad, sizeof(UINT64));
size_t memAlloc = GetNodeSize(m_numChildrenLoad, onDiskWrapper.GetSourceWordSize(), countSize);
m_memLoad = (char*) malloc(memAlloc);
@ -160,15 +160,15 @@ void PhraseNode::Save(OnDiskWrapper &onDiskWrapper, size_t pos, size_t tableLimi
void PhraseNode::AddTargetPhrase(const SourcePhrase &sourcePhrase, TargetPhrase *targetPhrase
, OnDiskWrapper &onDiskWrapper, size_t tableLimit
, const std::vector<float> &counts)
, const std::vector<float> &counts, OnDiskPt::PhrasePtr spShort)
{
AddTargetPhrase(0, sourcePhrase, targetPhrase, onDiskWrapper, tableLimit, counts);
AddTargetPhrase(0, sourcePhrase, targetPhrase, onDiskWrapper, tableLimit, counts, spShort);
}
void PhraseNode::AddTargetPhrase(size_t pos, const SourcePhrase &sourcePhrase
, TargetPhrase *targetPhrase, OnDiskWrapper &onDiskWrapper
, size_t tableLimit, const std::vector<float> &counts)
{
, size_t tableLimit, const std::vector<float> &counts, OnDiskPt::PhrasePtr spShort)
{
size_t phraseSize = sourcePhrase.GetSize();
if (pos < phraseSize) {
const Word &word = sourcePhrase.GetWord(pos);
@ -185,10 +185,12 @@ void PhraseNode::AddTargetPhrase(size_t pos, const SourcePhrase &sourcePhrase
m_currChild = &node;
}
node.AddTargetPhrase(pos + 1, sourcePhrase, targetPhrase, onDiskWrapper, tableLimit, counts);
// keep searching for target phrase node..
node.AddTargetPhrase(pos + 1, sourcePhrase, targetPhrase, onDiskWrapper, tableLimit, counts, spShort);
} else {
// drilled down to the right node
m_counts = counts;
targetPhrase->SetSourcePhrase(spShort);
m_targetPhraseColl.AddTargetPhrase(targetPhrase);
}
}

View File

@ -23,6 +23,7 @@
#include <map>
#include "Word.h"
#include "TargetPhraseCollection.h"
#include "Phrase.h"
namespace OnDiskPt
{
@ -51,8 +52,8 @@ protected:
void AddTargetPhrase(size_t pos, const SourcePhrase &sourcePhrase
, TargetPhrase *targetPhrase, OnDiskWrapper &onDiskWrapper
, size_t tableLimit, const std::vector<float> &counts);
size_t ReadChild(Word &wordFound, UINT64 &childFilePos, const char *mem) const;
, size_t tableLimit, const std::vector<float> &counts, OnDiskPt::PhrasePtr spShort);
size_t ReadChild(Word &wordFound, UINT64 &childFilePos, const char *mem) const;
void GetChild(Word &wordFound, UINT64 &childFilePos, size_t ind, OnDiskWrapper &onDiskWrapper) const;
public:
@ -67,7 +68,7 @@ public:
void AddTargetPhrase(const SourcePhrase &sourcePhrase, TargetPhrase *targetPhrase
, OnDiskWrapper &onDiskWrapper, size_t tableLimit
, const std::vector<float> &counts);
, const std::vector<float> &counts, OnDiskPt::PhrasePtr spShort);
UINT64 GetFilePos() const {
return m_filePos;

View File

@ -34,4 +34,5 @@ protected:
public:
};
}

View File

@ -27,6 +27,8 @@
#include "TargetPhrase.h"
#include "OnDiskWrapper.h"
#include <boost/algorithm/string.hpp>
using namespace std;
namespace OnDiskPt
@ -48,7 +50,7 @@ TargetPhrase::~TargetPhrase()
{
}
void TargetPhrase::SetLHS(Word *lhs)
void TargetPhrase::SetLHS(WordPtr lhs)
{
AddWord(lhs);
}
@ -61,6 +63,18 @@ void TargetPhrase::Create1AlignFromString(const std::string &align1Str)
m_align.push_back(pair<size_t, size_t>(alignPoints[0], alignPoints[1]) );
}
void TargetPhrase::CreateAlignFromString(const std::string &alignStr)
{
vector<std::string> alignPairs;
boost::split(alignPairs, alignStr, boost::is_any_of("\t "));
for (size_t i = 0; i < alignPairs.size(); ++i) {
vector<size_t> alignPoints;
Moses::Tokenize<size_t>(alignPoints, alignPairs[i], "-");
m_align.push_back(pair<size_t, size_t>(alignPoints[0], alignPoints[1]) );
}
}
void TargetPhrase::SetScore(float score, size_t ind)
{
CHECK(ind < m_scores.size());
@ -84,9 +98,16 @@ char *TargetPhrase::WriteToMemory(OnDiskWrapper &onDiskWrapper, size_t &memUsed)
{
size_t phraseSize = GetSize();
size_t targetWordSize = onDiskWrapper.GetTargetWordSize();
const PhrasePtr sp = GetSourcePhrase();
size_t spSize = sp->GetSize();
size_t sourceWordSize = onDiskWrapper.GetSourceWordSize();
size_t memNeeded = sizeof(UINT64) // num of words
+ targetWordSize * phraseSize; // actual words. lhs as last words
+ targetWordSize * phraseSize // actual words. lhs as last words
+ sizeof(UINT64) // num source words
+ sourceWordSize * spSize; // actual source words
memUsed = 0;
UINT64 *mem = (UINT64*) malloc(memNeeded);
@ -101,6 +122,17 @@ char *TargetPhrase::WriteToMemory(OnDiskWrapper &onDiskWrapper, size_t &memUsed)
memUsed += word.WriteToMemory((char*) currPtr);
}
// write size of source phrase and all source words
char *currPtr = (char*)mem + memUsed;
UINT64 *memTmp = (UINT64*) currPtr;
memTmp[0] = spSize;
memUsed += sizeof(UINT64);
for (size_t pos = 0; pos < spSize; ++pos) {
const Word &word = sp->GetWord(pos);
char *currPtr = (char*)mem + memUsed;
memUsed += word.WriteToMemory((char*) currPtr);
}
CHECK(memUsed == memNeeded);
return (char *) mem;
}
@ -143,9 +175,10 @@ char *TargetPhrase::WriteOtherInfoToMemory(OnDiskWrapper &onDiskWrapper, size_t
// phrase id
memcpy(mem, &m_filePos, sizeof(UINT64));
memUsed += sizeof(UINT64);
// align
memUsed += WriteAlignToMemory(mem + memUsed);
size_t tmp = WriteAlignToMemory(mem + memUsed);
memUsed += tmp;
// scores
memUsed += WriteScoresToMemory(mem + memUsed);
@ -191,7 +224,7 @@ size_t TargetPhrase::WriteScoresToMemory(char *mem) const
}
Moses::TargetPhrase *TargetPhrase::ConvertToMoses(const std::vector<Moses::FactorType> & /*inputFactors */
Moses::TargetPhrase *TargetPhrase::ConvertToMoses(const std::vector<Moses::FactorType> & inputFactors
, const std::vector<Moses::FactorType> &outputFactors
, const Vocab &vocab
, const Moses::PhraseDictionary &phraseDict
@ -214,15 +247,27 @@ Moses::TargetPhrase *TargetPhrase::ConvertToMoses(const std::vector<Moses::Facto
ret->SetScoreChart(phraseDict.GetFeature(), m_scores, weightT, lmList, wpProducer);
// alignments
int indicator[m_align.size()];
int index = 0;
std::set<std::pair<size_t, size_t> > alignmentInfo;
const PhrasePtr sp = GetSourcePhrase();
for (size_t ind = 0; ind < m_align.size(); ++ind) {
const std::pair<size_t, size_t> &entry = m_align[ind];
alignmentInfo.insert(entry);
size_t sourcePos = entry.first;
indicator[index++] = sp->GetWord(sourcePos).IsNonTerminal() ? 1: 0;
}
ret->SetAlignmentInfo(alignmentInfo);
ret->SetAlignmentInfo(alignmentInfo, indicator);
GetWord(GetSize() - 1).ConvertToMoses(outputFactors, vocab, ret->MutableTargetLHS());
// set source phrase
Moses::Phrase mosesSP(Moses::Input);
for (size_t pos = 0; pos < sp->GetSize(); ++pos) {
sp->GetWord(pos).ConvertToMoses(inputFactors, vocab, mosesSP.AddWord());
}
ret->SetSourcePhrase(mosesSP);
return ret;
}
@ -255,10 +300,23 @@ UINT64 TargetPhrase::ReadFromFile(std::fstream &fileTP)
bytesRead += sizeof(UINT64);
for (size_t ind = 0; ind < numWords; ++ind) {
Word *word = new Word();
WordPtr word(new Word());
bytesRead += word->ReadFromFile(fileTP);
AddWord(word);
}
// read source words
UINT64 numSourceWords;
fileTP.read((char*) &numSourceWords, sizeof(UINT64));
bytesRead += sizeof(UINT64);
PhrasePtr sp(new SourcePhrase());
for (size_t ind = 0; ind < numSourceWords; ++ind) {
WordPtr word( new Word());
bytesRead += word->ReadFromFile(fileTP);
sp->AddWord(word);
}
SetSourcePhrase(sp);
return bytesRead;
}

View File

@ -24,6 +24,7 @@
#include <vector>
#include "Word.h"
#include "Phrase.h"
#include "SourcePhrase.h"
namespace Moses
{
@ -50,6 +51,7 @@ class TargetPhrase: public Phrase
friend std::ostream& operator<<(std::ostream&, const TargetPhrase&);
protected:
AlignType m_align;
PhrasePtr m_sourcePhrase;
std::vector<float> m_scores;
UINT64 m_filePos;
@ -65,9 +67,17 @@ public:
TargetPhrase(const TargetPhrase &copy);
virtual ~TargetPhrase();
void SetLHS(Word *lhs);
void SetSourcePhrase(PhrasePtr p) {
m_sourcePhrase = p;
}
const PhrasePtr GetSourcePhrase() const {
return m_sourcePhrase;
}
void SetLHS(WordPtr lhs);
void Create1AlignFromString(const std::string &align1Str);
void CreateAlignFromString(const std::string &align1Str);
void SetScore(float score, size_t ind);
const AlignType &GetAlign() const {

View File

@ -82,7 +82,7 @@ void TargetPhraseCollection::Save(OnDiskWrapper &onDiskWrapper)
CollType::iterator iter;
for (iter = m_coll.begin(); iter != m_coll.end(); ++iter) {
// save phrase
TargetPhrase &targetPhrase = **iter;
TargetPhrase &targetPhrase = **iter;
targetPhrase.Save(onDiskWrapper);
// save coll
@ -154,8 +154,9 @@ void TargetPhraseCollection::ReadFromFile(size_t tableLimit, UINT64 filePos, OnD
{
fstream &fileTPColl = onDiskWrapper.GetFileTargetColl();
fstream &fileTP = onDiskWrapper.GetFileTargetInd();
size_t numScores = onDiskWrapper.GetNumScores();
UINT64 numPhrases;
@ -167,9 +168,9 @@ void TargetPhraseCollection::ReadFromFile(size_t tableLimit, UINT64 filePos, OnD
numPhrases = std::min(numPhrases, (UINT64) tableLimit);
currFilePos += sizeof(UINT64);
for (size_t ind = 0; ind < numPhrases; ++ind) {
TargetPhrase *tp = new TargetPhrase(numScores);
TargetPhrase *tp = new TargetPhrase(numScores);
UINT64 sizeOtherInfo = tp->ReadOtherInfoFromFile(currFilePos, fileTPColl);
tp->ReadFromFile(fileTP);

View File

@ -43,7 +43,7 @@ bool Vocab::Load(OnDiskWrapper &onDiskWrapper)
// create lookup
// assume contiguous vocab id
m_lookup.resize(m_vocabColl.size() + 1);
m_nextId = m_lookup.size();
m_nextId = m_lookup.size();
CollType::const_iterator iter;
for (iter = m_vocabColl.begin(); iter != m_vocabColl.end(); ++iter) {

View File

@ -86,15 +86,14 @@ size_t Word::ReadFromMemory(const char *mem)
size_t Word::ReadFromFile(std::fstream &file)
{
size_t memAlloc = sizeof(UINT64) + sizeof(char);
char *mem = (char*) malloc(memAlloc);
const size_t memAlloc = sizeof(UINT64) + sizeof(char);
char mem[sizeof(UINT64) + sizeof(char)];
file.read(mem, memAlloc);
size_t memUsed = ReadFromMemory(mem);
CHECK(memAlloc == memUsed);
free(mem);
return memUsed;
return memAlloc;
}
void Word::ConvertToMoses(

View File

@ -22,6 +22,7 @@
#include <vector>
#include <iostream>
#include <fstream>
#include <boost/shared_ptr.hpp>
#include "Vocab.h"
namespace Moses
@ -82,5 +83,7 @@ public:
bool operator==(const Word &compare) const;
};
typedef boost::shared_ptr<Word> WordPtr;
}

View File

@ -38,20 +38,20 @@ void Tokenize(OnDiskPt::Phrase &phrase
if (splitPos == string::npos) {
// lhs - only 1 word
Word *word = new Word();
WordPtr word (new Word());
word->CreateFromString(wordStr, onDiskWrapper.GetVocab());
phrase.AddWord(word);
} else {
// source & target non-terms
if (addSourceNonTerm) {
Word *word = new Word();
WordPtr word( new Word());
word->CreateFromString(wordStr, onDiskWrapper.GetVocab());
phrase.AddWord(word);
}
wordStr = token.substr(splitPos, tokSize - splitPos);
if (addTargetNonTerm) {
Word *word = new Word();
WordPtr word(new Word());
word->CreateFromString(wordStr, onDiskWrapper.GetVocab());
phrase.AddWord(word);
}
@ -59,7 +59,7 @@ void Tokenize(OnDiskPt::Phrase &phrase
}
} else {
// term
Word *word = new Word();
WordPtr word(new Word());
word->CreateFromString(token, onDiskWrapper.GetVocab());
phrase.AddWord(word);
}

View File

@ -1,16 +0,0 @@
all: suffix-test fuzzy-match fuzzy-match2
clean:
rm -f *.o
.cpp.o:
g++ -O6 -g -c $<
suffix-test: Vocabulary.o SuffixArray.o suffix-test.o
g++ Vocabulary.o SuffixArray.o suffix-test.o -o suffix-test
fuzzy-match: Vocabulary.o SuffixArray.o old/fuzzy-match.o
g++ Vocabulary.o SuffixArray.o fuzzy-match.o -o fuzzy-match
fuzzy-match2: Vocabulary.o SuffixArray.o fuzzy-match2.o Util.o
g++ Vocabulary.o SuffixArray.o fuzzy-match2.o Util.o -o fuzzy-match2

View File

@ -1,29 +0,0 @@
//
// Match.h
// fuzzy-match
//
// Created by Hieu Hoang on 25/07/2012.
// Copyright 2012 __MyCompanyName__. All rights reserved.
//
#ifndef fuzzy_match_Match_h
#define fuzzy_match_Match_h
/* data structure for n-gram match between input and corpus */
class Match {
public:
int input_start;
int input_end;
int tm_start;
int tm_end;
int min_cost;
int max_cost;
int internal_cost;
Match( int is, int ie, int ts, int te, int min, int max, int i )
:input_start(is), input_end(ie), tm_start(ts), tm_end(te), min_cost(min), max_cost(max), internal_cost(i)
{}
};
#endif

View File

@ -1,48 +0,0 @@
//
// SentenceAlignment.h
// fuzzy-match
//
// Created by Hieu Hoang on 25/07/2012.
// Copyright 2012 __MyCompanyName__. All rights reserved.
//
#ifndef fuzzy_match_SentenceAlignment_h
#define fuzzy_match_SentenceAlignment_h
#include <sstream>
#include "Vocabulary.h"
extern Vocabulary vocabulary;
struct SentenceAlignment
{
int count;
vector< WORD_ID > target;
vector< pair<int,int> > alignment;
SentenceAlignment()
{}
string getTargetString() const
{
stringstream strme;
for (size_t i = 0; i < target.size(); ++i) {
const WORD &word = vocabulary.GetWord(target[i]);
strme << word << " ";
}
return strme.str();
}
string getAlignmentString() const
{
stringstream strme;
for (size_t i = 0; i < alignment.size(); ++i) {
const pair<int,int> &alignPair = alignment[i];
strme << alignPair.first << "-" << alignPair.second << " ";
}
return strme.str();
}
};
#endif

View File

@ -1,244 +0,0 @@
#include "SuffixArray.h"
#include <string>
#include <stdlib.h>
#include <cstring>
using namespace std;
SuffixArray::SuffixArray( string fileName )
{
m_vcb.StoreIfNew( "<uNk>" );
m_endOfSentence = m_vcb.StoreIfNew( "<s>" );
ifstream extractFile;
char line[LINE_MAX_LENGTH];
// count the number of words first;
extractFile.open(fileName.c_str());
istream *fileP = &extractFile;
m_size = 0;
size_t sentenceCount = 0;
while(!fileP->eof()) {
SAFE_GETLINE((*fileP), line, LINE_MAX_LENGTH, '\n');
if (fileP->eof()) break;
vector< WORD_ID > words = m_vcb.Tokenize( line );
m_size += words.size() + 1;
sentenceCount++;
}
extractFile.close();
cerr << m_size << " words (incl. sentence boundaries)" << endl;
// allocate memory
m_array = (WORD_ID*) calloc( sizeof( WORD_ID ), m_size );
m_index = (INDEX*) calloc( sizeof( INDEX ), m_size );
m_wordInSentence = (char*) calloc( sizeof( char ), m_size );
m_sentence = (size_t*) calloc( sizeof( size_t ), m_size );
m_sentenceLength = (char*) calloc( sizeof( char ), sentenceCount );
// fill the array
int wordIndex = 0;
int sentenceId = 0;
extractFile.open(fileName.c_str());
fileP = &extractFile;
while(!fileP->eof()) {
SAFE_GETLINE((*fileP), line, LINE_MAX_LENGTH, '\n');
if (fileP->eof()) break;
vector< WORD_ID > words = m_vcb.Tokenize( line );
vector< WORD_ID >::const_iterator i;
for( i=words.begin(); i!=words.end(); i++)
{
m_index[ wordIndex ] = wordIndex;
m_sentence[ wordIndex ] = sentenceId;
m_wordInSentence[ wordIndex ] = i-words.begin();
m_array[ wordIndex++ ] = *i;
}
m_index[ wordIndex ] = wordIndex;
m_array[ wordIndex++ ] = m_endOfSentence;
m_sentenceLength[ sentenceId++ ] = words.size();
}
extractFile.close();
cerr << "done reading " << wordIndex << " words, " << sentenceId << " sentences." << endl;
// List(0,9);
// sort
m_buffer = (INDEX*) calloc( sizeof( INDEX ), m_size );
Sort( 0, m_size-1 );
free( m_buffer );
cerr << "done sorting" << endl;
}
// good ol' quick sort
void SuffixArray::Sort(INDEX start, INDEX end) {
if (start == end) return;
INDEX mid = (start+end+1)/2;
Sort( start, mid-1 );
Sort( mid, end );
// merge
int i = start;
int j = mid;
int k = 0;
int length = end-start+1;
while( k<length )
{
if (i == mid )
{
m_buffer[ k++ ] = m_index[ j++ ];
}
else if (j > end )
{
m_buffer[ k++ ] = m_index[ i++ ];
}
else {
if (CompareIndex( m_index[i], m_index[j] ) < 0)
{
m_buffer[ k++ ] = m_index[ i++ ];
}
else
{
m_buffer[ k++ ] = m_index[ j++ ];
}
}
}
memcpy( ((char*)m_index) + sizeof( INDEX ) * start,
((char*)m_buffer), sizeof( INDEX ) * (end-start+1) );
}
SuffixArray::~SuffixArray()
{
free(m_index);
free(m_array);
}
int SuffixArray::CompareIndex( INDEX a, INDEX b ) const
{
// skip over identical words
INDEX offset = 0;
while( a+offset < m_size &&
b+offset < m_size &&
m_array[ a+offset ] == m_array[ b+offset ] )
{ offset++; }
if( a+offset == m_size ) return -1;
if( b+offset == m_size ) return 1;
return CompareWord( m_array[ a+offset ], m_array[ b+offset ] );
}
inline int SuffixArray::CompareWord( WORD_ID a, WORD_ID b ) const
{
// cerr << "c(" << m_vcb.GetWord(a) << ":" << m_vcb.GetWord(b) << ")=" << m_vcb.GetWord(a).compare( m_vcb.GetWord(b) ) << endl;
return m_vcb.GetWord(a).compare( m_vcb.GetWord(b) );
}
int SuffixArray::Count( const vector< WORD > &phrase )
{
INDEX dummy;
return LimitedCount( phrase, m_size, dummy, dummy, 0, m_size-1 );
}
bool SuffixArray::MinCount( const vector< WORD > &phrase, INDEX min )
{
INDEX dummy;
return LimitedCount( phrase, min, dummy, dummy, 0, m_size-1 ) >= min;
}
bool SuffixArray::Exists( const vector< WORD > &phrase )
{
INDEX dummy;
return LimitedCount( phrase, 1, dummy, dummy, 0, m_size-1 ) == 1;
}
int SuffixArray::FindMatches( const vector< WORD > &phrase, INDEX &firstMatch, INDEX &lastMatch, INDEX search_start, INDEX search_end )
{
return LimitedCount( phrase, m_size, firstMatch, lastMatch, search_start, search_end );
}
int SuffixArray::LimitedCount( const vector< WORD > &phrase, INDEX min, INDEX &firstMatch, INDEX &lastMatch, INDEX search_start, INDEX search_end )
{
// cerr << "FindFirst\n";
INDEX start = search_start;
INDEX end = (search_end == -1) ? (m_size-1) : search_end;
INDEX mid = FindFirst( phrase, start, end );
// cerr << "done\n";
if (mid == m_size) return 0; // no matches
if (min == 1) return 1; // only existance check
int matchCount = 1;
//cerr << "before...\n";
firstMatch = FindLast( phrase, mid, start, -1 );
matchCount += mid - firstMatch;
//cerr << "after...\n";
lastMatch = FindLast( phrase, mid, end, 1 );
matchCount += lastMatch - mid;
return matchCount;
}
SuffixArray::INDEX SuffixArray::FindLast( const vector< WORD > &phrase, INDEX start, INDEX end, int direction )
{
end += direction;
while(true)
{
INDEX mid = ( start + end + (direction>0 ? 0 : 1) )/2;
int match = Match( phrase, mid );
int matchNext = Match( phrase, mid+direction );
//cerr << "\t" << start << ";" << mid << ";" << end << " -> " << match << "," << matchNext << endl;
if (match == 0 && matchNext != 0) return mid;
if (match == 0) // mid point is a match
start = mid;
else
end = mid;
}
}
SuffixArray::INDEX SuffixArray::FindFirst( const vector< WORD > &phrase, INDEX &start, INDEX &end )
{
while(true)
{
INDEX mid = ( start + end + 1 )/2;
//cerr << "FindFirst(" << start << ";" << mid << ";" << end << ")\n";
int match = Match( phrase, mid );
if (match == 0) return mid;
if (start >= end && match != 0 ) return m_size;
if (match > 0)
start = mid+1;
else
end = mid-1;
}
}
int SuffixArray::Match( const vector< WORD > &phrase, INDEX index )
{
INDEX pos = m_index[ index ];
for(INDEX i=0; i<phrase.size() && i+pos<m_size; i++)
{
int match = CompareWord( m_vcb.GetWordID( phrase[i] ), m_array[ pos+i ] );
// cerr << "{" << index << "+" << i << "," << pos+i << ":" << match << "}" << endl;
if (match != 0)
return match;
}
return 0;
}
void SuffixArray::List(INDEX start, INDEX end)
{
for(INDEX i=start; i<=end; i++)
{
INDEX pos = m_index[ i ];
// cerr << i << ":" << pos << "\t";
for(int j=0; j<5 && j+pos<m_size; j++)
{
cout << " " << m_vcb.GetWord( m_array[ pos+j ] );
}
// cerr << "\n";
}
}

View File

@ -1,45 +0,0 @@
#include "Vocabulary.h"
#pragma once
#define LINE_MAX_LENGTH 10000
class SuffixArray
{
public:
typedef unsigned int INDEX;
private:
WORD_ID *m_array;
INDEX *m_index;
INDEX *m_buffer;
char *m_wordInSentence;
size_t *m_sentence;
char *m_sentenceLength;
WORD_ID m_endOfSentence;
Vocabulary m_vcb;
INDEX m_size;
public:
SuffixArray( string fileName );
~SuffixArray();
void Sort(INDEX start, INDEX end);
int CompareIndex( INDEX a, INDEX b ) const;
inline int CompareWord( WORD_ID a, WORD_ID b ) const;
int Count( const vector< WORD > &phrase );
bool MinCount( const vector< WORD > &phrase, INDEX min );
bool Exists( const vector< WORD > &phrase );
int FindMatches( const vector< WORD > &phrase, INDEX &firstMatch, INDEX &lastMatch, INDEX search_start = 0, INDEX search_end = -1 );
int LimitedCount( const vector< WORD > &phrase, INDEX min, INDEX &firstMatch, INDEX &lastMatch, INDEX search_start = -1, INDEX search_end = 0 );
INDEX FindFirst( const vector< WORD > &phrase, INDEX &start, INDEX &end );
INDEX FindLast( const vector< WORD > &phrase, INDEX start, INDEX end, int direction );
int Match( const vector< WORD > &phrase, INDEX index );
void List( INDEX start, INDEX end );
inline INDEX GetPosition( INDEX index ) { return m_index[ index ]; }
inline size_t GetSentence( INDEX position ) { return m_sentence[position]; }
inline char GetWordInSentence( INDEX position ) { return m_wordInSentence[position]; }
inline char GetSentenceLength( size_t sentenceId ) { return m_sentenceLength[sentenceId]; }
inline INDEX GetSize() { return m_size; }
};

View File

@ -1,147 +0,0 @@
//
// Util.cpp
// fuzzy-match
//
// Created by Hieu Hoang on 26/07/2012.
// Copyright 2012 __MyCompanyName__. All rights reserved.
//
#include <iostream>
#include <stdio.h>
#include "Util.h"
#include "SentenceAlignment.h"
#include "SuffixArray.h"
void load_corpus( const char* fileName, vector< vector< WORD_ID > > &corpus )
{ // source
ifstream fileStream;
fileStream.open(fileName);
if (!fileStream) {
cerr << "file not found: " << fileName << endl;
exit(1);
}
cerr << "loading " << fileName << endl;
istream *fileStreamP = &fileStream;
char line[LINE_MAX_LENGTH];
while(true)
{
SAFE_GETLINE((*fileStreamP), line, LINE_MAX_LENGTH, '\n');
if (fileStreamP->eof()) break;
corpus.push_back( vocabulary.Tokenize( line ) );
}
}
void load_target( const char* fileName, vector< vector< SentenceAlignment > > &corpus)
{
ifstream fileStream;
fileStream.open(fileName);
if (!fileStream) {
cerr << "file not found: " << fileName << endl;
exit(1);
}
cerr << "loading " << fileName << endl;
istream *fileStreamP = &fileStream;
WORD_ID delimiter = vocabulary.StoreIfNew("|||");
int lineNum = 0;
char line[LINE_MAX_LENGTH];
while(true)
{
SAFE_GETLINE((*fileStreamP), line, LINE_MAX_LENGTH, '\n');
if (fileStreamP->eof()) break;
vector<WORD_ID> toks = vocabulary.Tokenize( line );
corpus.push_back(vector< SentenceAlignment >());
vector< SentenceAlignment > &vec = corpus.back();
vec.push_back(SentenceAlignment());
SentenceAlignment *sentence = &vec.back();
const WORD &countStr = vocabulary.GetWord(toks[0]);
sentence->count = atoi(countStr.c_str());
for (size_t i = 1; i < toks.size(); ++i) {
WORD_ID wordId = toks[i];
if (wordId == delimiter) {
// target and alignments can have multiple sentences.
vec.push_back(SentenceAlignment());
sentence = &vec.back();
// count
++i;
const WORD &countStr = vocabulary.GetWord(toks[i]);
sentence->count = atoi(countStr.c_str());
}
else {
// just a normal word, add
sentence->target.push_back(wordId);
}
}
++lineNum;
}
}
void load_alignment( const char* fileName, vector< vector< SentenceAlignment > > &corpus )
{
ifstream fileStream;
fileStream.open(fileName);
if (!fileStream) {
cerr << "file not found: " << fileName << endl;
exit(1);
}
cerr << "loading " << fileName << endl;
istream *fileStreamP = &fileStream;
string delimiter = "|||";
int lineNum = 0;
char line[LINE_MAX_LENGTH];
while(true)
{
SAFE_GETLINE((*fileStreamP), line, LINE_MAX_LENGTH, '\n');
if (fileStreamP->eof()) break;
vector< SentenceAlignment > &vec = corpus[lineNum];
size_t targetInd = 0;
SentenceAlignment *sentence = &vec[targetInd];
vector<string> toks = Tokenize(line);
for (size_t i = 0; i < toks.size(); ++i) {
string &tok = toks[i];
if (tok == delimiter) {
// target and alignments can have multiple sentences.
++targetInd;
sentence = &vec[targetInd];
++i;
}
else {
// just a normal alignment, add
vector<int> alignPoint = Tokenize<int>(tok, "-");
assert(alignPoint.size() == 2);
sentence->alignment.push_back(pair<int,int>(alignPoint[0], alignPoint[1]));
}
}
++lineNum;
}
}

View File

@ -1,87 +0,0 @@
//
// Util.h
// fuzzy-match
//
// Created by Hieu Hoang on 25/07/2012.
// Copyright 2012 __MyCompanyName__. All rights reserved.
//
#ifndef fuzzy_match_Util_h
#define fuzzy_match_Util_h
#include <vector>
#include <sstream>
#include "Vocabulary.h"
class SentenceAlignment;
void load_corpus( const char* fileName, std::vector< std::vector< WORD_ID > > &corpus );
void load_target( const char* fileName, std::vector< std::vector< SentenceAlignment > > &corpus);
void load_alignment( const char* fileName, std::vector< std::vector< SentenceAlignment > > &corpus );
/**
* Convert vector of type T to string
*/
template <typename T>
std::string Join(const std::string& delimiter, const std::vector<T>& items)
{
std::ostringstream outstr;
if(items.size() == 0) return "";
outstr << items[0];
for(unsigned int i = 1; i < items.size(); i++)
outstr << delimiter << items[i];
return outstr.str();
}
//! convert string to variable of type T. Used to reading floats, int etc from files
template<typename T>
inline T Scan(const std::string &input)
{
std::stringstream stream(input);
T ret;
stream >> ret;
return ret;
}
//! convert vectors of string to vectors of type T variables
template<typename T>
inline std::vector<T> Scan(const std::vector< std::string > &input)
{
std::vector<T> output(input.size());
for (size_t i = 0 ; i < input.size() ; i++) {
output[i] = Scan<T>( input[i] );
}
return output;
}
inline std::vector<std::string> Tokenize(const std::string& str,
const std::string& delimiters = " \t")
{
std::vector<std::string> tokens;
// Skip delimiters at beginning.
std::string::size_type lastPos = str.find_first_not_of(delimiters, 0);
// Find first "non-delimiter".
std::string::size_type pos = str.find_first_of(delimiters, lastPos);
while (std::string::npos != pos || std::string::npos != lastPos) {
// Found a token, add it to the vector.
tokens.push_back(str.substr(lastPos, pos - lastPos));
// Skip delimiters. Note the "not_of"
lastPos = str.find_first_not_of(delimiters, pos);
// Find next "non-delimiter"
pos = str.find_first_of(delimiters, lastPos);
}
return tokens;
}
template<typename T>
inline std::vector<T> Tokenize( const std::string &input
, const std::string& delimiters = " \t")
{
std::vector<std::string> stringVector = Tokenize(input, delimiters);
return Scan<T>( stringVector );
}
#endif

View File

@ -1,45 +0,0 @@
// $Id: Vocabulary.cpp 1565 2008-02-22 14:42:01Z bojar $
#include "Vocabulary.h"
// as in beamdecoder/tables.cpp
vector<WORD_ID> Vocabulary::Tokenize( const char input[] ) {
vector< WORD_ID > token;
bool betweenWords = true;
int start=0;
int i=0;
for(; input[i] != '\0'; i++) {
bool isSpace = (input[i] == ' ' || input[i] == '\t');
if (!isSpace && betweenWords) {
start = i;
betweenWords = false;
}
else if (isSpace && !betweenWords) {
token.push_back( StoreIfNew ( string( input+start, i-start ) ) );
betweenWords = true;
}
}
if (!betweenWords)
token.push_back( StoreIfNew ( string( input+start, i-start ) ) );
return token;
}
WORD_ID Vocabulary::StoreIfNew( const WORD& word ) {
map<WORD, WORD_ID>::iterator i = lookup.find( word );
if( i != lookup.end() )
return i->second;
WORD_ID id = vocab.size();
vocab.push_back( word );
lookup[ word ] = id;
return id;
}
WORD_ID Vocabulary::GetWordID( const WORD &word ) {
map<WORD, WORD_ID>::iterator i = lookup.find( word );
if( i == lookup.end() )
return 0;
WORD_ID w= (WORD_ID) i->second;
return w;
}

View File

@ -1,40 +0,0 @@
// $Id: tables-core.h 1470 2007-10-02 21:43:54Z redpony $
#pragma once
#include <iostream>
#include <fstream>
#include <assert.h>
#include <stdlib.h>
#include <string>
#include <queue>
#include <map>
#include <cmath>
using namespace std;
#define MAX_LENGTH 10000
#define SAFE_GETLINE(_IS, _LINE, _SIZE, _DELIM) { \
_IS.getline(_LINE, _SIZE, _DELIM); \
if(_IS.fail() && !_IS.bad() && !_IS.eof()) _IS.clear(); \
if (_IS.gcount() == _SIZE-1) { \
cerr << "Line too long! Buffer overflow. Delete lines >=" \
<< _SIZE << " chars or raise MAX_LENGTH in phrase-extract/tables-core.cpp" \
<< endl; \
exit(1); \
} \
}
typedef string WORD;
typedef unsigned int WORD_ID;
class Vocabulary {
public:
map<WORD, WORD_ID> lookup;
vector< WORD > vocab;
WORD_ID StoreIfNew( const WORD& );
WORD_ID GetWordID( const WORD& );
vector<WORD_ID> Tokenize( const char[] );
inline WORD &GetWord( WORD_ID id ) const { WORD &i = (WORD&) vocab[ id ]; return i; }
};

View File

@ -1,460 +0,0 @@
#include <stdio.h>
#include <stdlib.h>
#include <getopt.h>
#include <map>
#include <algorithm>
#include <iostream>
#include <fstream>
#include <cstring>
#include <time.h>
#include <fstream>
#include "SentenceAlignment.h"
#include "fuzzy-match2.h"
#include "SuffixArray.h"
/** This implementation is explained in
Koehn and Senellart: "Fast Approximate String Matching
with Suffix Arrays and A* Parsing" (AMTA 2010) ***/
using namespace std;
int main(int argc, char* argv[])
{
vector< vector< WORD_ID > > source, input;
vector< vector< SentenceAlignment > > targetAndAlignment;
while(1) {
static struct option long_options[] = {
{"basic", no_argument, &basic_flag, 1},
{"word", no_argument, &lsed_flag, 0},
{"unrefined", no_argument, &refined_flag, 0},
{"nolengthfilter", no_argument, &length_filter_flag, 0},
{"noparse", no_argument, &parse_flag, 0},
{"multiple", no_argument, &multiple_flag, 1},
{"minmatch", required_argument, 0, 'm'},
{0, 0, 0, 0}
};
int option_index = 0;
int c = getopt_long (argc, argv, "m:", long_options, &option_index);
if (c == -1) break;
switch (c) {
case 0:
// if (long_options[option_index].flag != 0)
// break;
// printf ("option %s", long_options[option_index].name);
// if (optarg)
// printf (" with arg %s", optarg);
// printf ("\n");
break;
case 'm':
min_match = atoi(optarg);
if (min_match < 1 || min_match > 100) {
cerr << "error: --minmatch must have value in range 1..100\n";
exit(1);
}
cerr << "setting min match to " << min_match << endl;
break;
default:
cerr << "usage: syntax: ./fuzzy-match input corpus [--basic] [--word] [--minmatch 1..100]\n";
exit(1);
}
}
if (lsed_flag) { cerr << "lsed\n"; }
if (basic_flag) { cerr << "basic\n"; }
if (refined_flag) { cerr << "refined\n"; }
if (length_filter_flag) { cerr << "length filter\n"; }
if (parse_flag) { cerr << "parse\n"; }
// exit(1);
if (optind+4 != argc) {
cerr << "syntax: ./fuzzy-match input source target alignment [--basic] [--word] [--minmatch 1..100]\n";
exit(1);
}
load_corpus(argv[optind], input);
load_corpus(argv[optind+1], source);
load_target(argv[optind+2], targetAndAlignment);
load_alignment(argv[optind+3], targetAndAlignment);
// ./fuzzy-match input corpus [-basic]
// load_corpus("../corpus/tm.truecased.4.en", source);
// load_corpus("../corpus/tm.truecased.4.it", target);
// load_corpus("../evaluation/test.input.tc.4", input);
// load_corpus("../../acquis-truecase/corpus/acquis.truecased.190.en", source);
// load_corpus("../../acquis-truecase/evaluation/ac-test.input.tc.190", input);
// load_corpus("../corpus/tm.truecased.16.en", source);
// load_corpus("../evaluation/test.input.tc.16", input);
if (basic_flag) {
cerr << "using basic method\n";
clock_t start_main_clock2 = clock();
basic_fuzzy_match( source, input );
cerr << "total: " << (1000 * (clock()-start_main_clock2) / CLOCKS_PER_SEC) << endl;
exit(1);
}
cerr << "number of input sentences " << input.size() << endl;
cerr << "creating suffix array...\n";
// SuffixArray suffixArray( "../corpus/tm.truecased.4.en" );
// SuffixArray suffixArray( "../../acquis-truecase/corpus/acquis.truecased.190.en" );
SuffixArray suffixArray( argv[optind+1] );
clock_t start_main_clock = clock();
// looping through all input sentences...
cerr << "looping...\n";
for(unsigned int sentenceInd = 0; sentenceInd < input.size(); sentenceInd++)
{
clock_t start_clock = clock();
// if (i % 10 == 0) cerr << ".";
// establish some basic statistics
// int input_length = compute_length( input[i] );
int input_length = input[sentenceInd].size();
int best_cost = input_length * (100-min_match) / 100 + 1;
int match_count = 0; // how many substring matches to be considered
//cerr << endl << "sentence " << i << ", length " << input_length << ", best_cost " << best_cost << endl;
// find match ranges in suffix array
vector< vector< pair< SuffixArray::INDEX, SuffixArray::INDEX > > > match_range;
for(size_t start=0;start<input[sentenceInd].size();start++)
{
SuffixArray::INDEX prior_first_match = 0;
SuffixArray::INDEX prior_last_match = suffixArray.GetSize()-1;
vector< string > substring;
bool stillMatched = true;
vector< pair< SuffixArray::INDEX, SuffixArray::INDEX > > matchedAtThisStart;
//cerr << "start: " << start;
for(int word=start; stillMatched && word<input[sentenceInd].size(); word++)
{
substring.push_back( vocabulary.GetWord( input[sentenceInd][word] ) );
// only look up, if needed (i.e. no unnecessary short gram lookups)
// if (! word-start+1 <= short_match_max_length( input_length ) )
// {
SuffixArray::INDEX first_match, last_match;
stillMatched = false;
if (suffixArray.FindMatches( substring, first_match, last_match, prior_first_match, prior_last_match ) )
{
stillMatched = true;
matchedAtThisStart.push_back( make_pair( first_match, last_match ) );
//cerr << " (" << first_match << "," << last_match << ")";
//cerr << " " << ( last_match - first_match + 1 );
prior_first_match = first_match;
prior_last_match = last_match;
}
//}
}
//cerr << endl;
match_range.push_back( matchedAtThisStart );
}
clock_t clock_range = clock();
map< int, vector< Match > > sentence_match;
map< int, int > sentence_match_word_count;
// go through all matches, longest first
for(int length = input[sentenceInd].size(); length >= 1; length--)
{
// do not create matches, if these are handled by the short match function
if (length <= short_match_max_length( input_length ) )
{
continue;
}
unsigned int count = 0;
for(int start = 0; start <= input[sentenceInd].size() - length; start++)
{
if (match_range[start].size() >= length)
{
pair< SuffixArray::INDEX, SuffixArray::INDEX > &range = match_range[start][length-1];
// cerr << " (" << range.first << "," << range.second << ")";
count += range.second - range.first + 1;
for(SuffixArray::INDEX i=range.first; i<=range.second; i++)
{
int position = suffixArray.GetPosition( i );
// sentence length mismatch
size_t sentence_id = suffixArray.GetSentence( position );
int sentence_length = suffixArray.GetSentenceLength( sentence_id );
int diff = abs( (int)sentence_length - (int)input_length );
// cerr << endl << i << "\tsentence " << sentence_id << ", length " << sentence_length;
//if (length <= 2 && input_length>=5 &&
// sentence_match.find( sentence_id ) == sentence_match.end())
// continue;
if (diff > best_cost)
continue;
// compute minimal cost
int start_pos = suffixArray.GetWordInSentence( position );
int end_pos = start_pos + length-1;
// cerr << endl << "\t" << start_pos << "-" << end_pos << " (" << sentence_length << ") vs. "
// << start << "-" << (start+length-1) << " (" << input_length << ")";
// different number of prior words -> cost is at least diff
int min_cost = abs( start - start_pos );
// same number of words, but not sent. start -> cost is at least 1
if (start == start_pos && start>0)
min_cost++;
// different number of remaining words -> cost is at least diff
min_cost += abs( ( sentence_length-1 - end_pos ) -
( input_length-1 - (start+length-1) ) );
// same number of words, but not sent. end -> cost is at least 1
if ( sentence_length-1 - end_pos ==
input_length-1 - (start+length-1)
&& end_pos != sentence_length-1 )
min_cost++;
// cerr << " -> min_cost " << min_cost;
if (min_cost > best_cost)
continue;
// valid match
match_count++;
// compute maximal cost
int max_cost = max( start, start_pos )
+ max( sentence_length-1 - end_pos,
input_length-1 - (start+length-1) );
// cerr << ", max_cost " << max_cost;
Match m = Match( start, start+length-1,
start_pos, start_pos+length-1,
min_cost, max_cost, 0);
sentence_match[ sentence_id ].push_back( m );
sentence_match_word_count[ sentence_id ] += length;
if (max_cost < best_cost)
{
best_cost = max_cost;
if (best_cost == 0) break;
}
//if (match_count >= MAX_MATCH_COUNT) break;
}
}
// cerr << endl;
if (best_cost == 0) break;
//if (match_count >= MAX_MATCH_COUNT) break;
}
// cerr << count << " matches at length " << length << " in " << sentence_match.size() << " tm." << endl;
if (best_cost == 0) break;
//if (match_count >= MAX_MATCH_COUNT) break;
}
cerr << match_count << " matches in " << sentence_match.size() << " sentences." << endl;
clock_t clock_matches = clock();
// consider each sentence for which we have matches
int old_best_cost = best_cost;
int tm_count_word_match = 0;
int tm_count_word_match2 = 0;
int pruned_match_count = 0;
if (short_match_max_length( input_length ))
{
init_short_matches( input[sentenceInd] );
}
vector< int > best_tm;
typedef map< int, vector< Match > >::iterator I;
clock_t clock_validation_sum = 0;
for(I tm=sentence_match.begin(); tm!=sentence_match.end(); tm++)
{
int tmID = tm->first;
int tm_length = suffixArray.GetSentenceLength(tmID);
vector< Match > &match = tm->second;
add_short_matches( match, source[tmID], input_length, best_cost );
//cerr << "match in sentence " << tmID << ": " << match.size() << " [" << tm_length << "]" << endl;
// quick look: how many words are matched
int words_matched = 0;
for(int m=0;m<match.size();m++) {
if (match[m].min_cost <= best_cost) // makes no difference
words_matched += match[m].input_end - match[m].input_start + 1;
}
if (max(input_length,tm_length) - words_matched > best_cost)
{
if (length_filter_flag) continue;
}
tm_count_word_match++;
// prune, check again how many words are matched
vector< Match > pruned = prune_matches( match, best_cost );
words_matched = 0;
for(int p=0;p<pruned.size();p++) {
words_matched += pruned[p].input_end - pruned[p].input_start + 1;
}
if (max(input_length,tm_length) - words_matched > best_cost)
{
if (length_filter_flag) continue;
}
tm_count_word_match2++;
pruned_match_count += pruned.size();
int prior_best_cost = best_cost;
int cost;
clock_t clock_validation_start = clock();
if (! parse_flag ||
pruned.size()>=10) // to prevent worst cases
{
string path;
cost = sed( input[sentenceInd], source[tmID], path, false );
if (cost < best_cost)
{
best_cost = cost;
}
}
else
{
cost = parse_matches( pruned, input_length, tm_length, best_cost );
if (prior_best_cost != best_cost)
{
best_tm.clear();
}
}
clock_validation_sum += clock() - clock_validation_start;
if (cost == best_cost)
{
best_tm.push_back( tmID );
}
}
cerr << "reduced best cost from " << old_best_cost << " to " << best_cost << endl;
cerr << "tm considered: " << sentence_match.size()
<< " word-matched: " << tm_count_word_match
<< " word-matched2: " << tm_count_word_match2
<< " best: " << best_tm.size() << endl;
cerr << "pruned matches: " << ((float)pruned_match_count/(float)tm_count_word_match2) << endl;
// create xml and extract files
string inputStr, sourceStr;
for (size_t pos = 0; pos < input_length; ++pos) {
inputStr += vocabulary.GetWord(input[sentenceInd][pos]) + " ";
}
// do not try to find the best ... report multiple matches
if (multiple_flag) {
int input_letter_length = compute_length( input[sentenceInd] );
for(int si=0; si<best_tm.size(); si++) {
int s = best_tm[si];
string path;
unsigned int letter_cost = sed( input[sentenceInd], source[s], path, true );
// do not report multiple identical sentences, but just their count
cout << sentenceInd << " "; // sentence number
cout << letter_cost << "/" << input_letter_length << " ";
cout << "(" << best_cost <<"/" << input_length <<") ";
cout << "||| " << s << " ||| " << path << endl;
vector<WORD_ID> &sourceSentence = source[s];
vector<SentenceAlignment> &targets = targetAndAlignment[s];
create_extract(sentenceInd, best_cost, sourceSentence, targets, inputStr, path);
}
} // if (multiple_flag)
else {
// find the best matches according to letter sed
string best_path = "";
int best_match = -1;
int best_letter_cost;
if (lsed_flag) {
best_letter_cost = compute_length( input[sentenceInd] ) * min_match / 100 + 1;
for(int si=0; si<best_tm.size(); si++)
{
int s = best_tm[si];
string path;
unsigned int letter_cost = sed( input[sentenceInd], source[s], path, true );
if (letter_cost < best_letter_cost)
{
best_letter_cost = letter_cost;
best_path = path;
best_match = s;
}
}
}
// if letter sed turned off, just compute path for first match
else {
if (best_tm.size() > 0) {
string path;
sed( input[sentenceInd], source[best_tm[0]], path, false );
best_path = path;
best_match = best_tm[0];
}
}
cerr << "elapsed: " << (1000 * (clock()-start_clock) / CLOCKS_PER_SEC)
<< " ( range: " << (1000 * (clock_range-start_clock) / CLOCKS_PER_SEC)
<< " match: " << (1000 * (clock_matches-clock_range) / CLOCKS_PER_SEC)
<< " tm: " << (1000 * (clock()-clock_matches) / CLOCKS_PER_SEC)
<< " (validation: " << (1000 * (clock_validation_sum) / CLOCKS_PER_SEC) << ")"
<< " )" << endl;
if (lsed_flag) {
cout << best_letter_cost << "/" << compute_length( input[sentenceInd] ) << " (";
}
cout << best_cost <<"/" << input_length;
if (lsed_flag) cout << ")";
cout << " ||| " << best_match << " ||| " << best_path << endl;
// creat xml & extracts
vector<WORD_ID> &sourceSentence = source[best_match];
vector<SentenceAlignment> &targets = targetAndAlignment[best_match];
create_extract(sentenceInd, best_cost, sourceSentence, targets, inputStr, best_path);
} // else if (multiple_flag)
}
cerr << "total: " << (1000 * (clock()-start_main_clock) / CLOCKS_PER_SEC) << endl;
}
void create_extract(int sentenceInd, int cost, const vector< WORD_ID > &sourceSentence, const vector<SentenceAlignment> &targets, const string &inputStr, const string &path)
{
string sourceStr;
for (size_t pos = 0; pos < sourceSentence.size(); ++pos) {
WORD_ID wordId = sourceSentence[pos];
sourceStr += vocabulary.GetWord(wordId) + " ";
}
char *inputFileName = tmpnam(NULL);
ofstream inputFile(inputFileName);
for (size_t targetInd = 0; targetInd < targets.size(); ++targetInd) {
const SentenceAlignment &sentenceAlignment = targets[targetInd];
string targetStr = sentenceAlignment.getTargetString();
string alignStr = sentenceAlignment.getAlignmentString();
inputFile
<< sentenceInd << endl
<< cost << endl
<< sourceStr << endl
<< inputStr << endl
<< targetStr << endl
<< alignStr << endl
<< path << endl
<< sentenceAlignment.count << endl;
}
string cmd = string("perl create_xml.perl < ") + inputFileName;
cerr << cmd << endl;
inputFile.close();
}

View File

@ -1,561 +0,0 @@
//
// fuzzy-match2.h
// fuzzy-match
//
// Created by Hieu Hoang on 25/07/2012.
// Copyright 2012 __MyCompanyName__. All rights reserved.
//
#ifndef fuzzy_match_fuzzy_match2_h
#define fuzzy_match_fuzzy_match2_h
#include <string>
#include <sstream>
#include <vector>
#include "Vocabulary.h"
#include "SuffixArray.h"
#include "Util.h"
#include "Match.h"
#define MAX_MATCH_COUNT 10000000
Vocabulary vocabulary;
int basic_flag = false;
int lsed_flag = true;
int refined_flag = true;
int length_filter_flag = true;
int parse_flag = true;
int min_match = 70;
int multiple_flag = false;
int multiple_slack = 0;
int multiple_max = 100;
map< WORD_ID,vector< int > > single_word_index;
// global cache for word pairs
map< pair< WORD_ID, WORD_ID >, unsigned int > lsed;
void create_extract(int sentenceInd, int cost, const vector< WORD_ID > &sourceSentence, const vector<SentenceAlignment> &targets, const string &inputStr, const string &path);
/* Letter string edit distance, e.g. sub 'their' to 'there' costs 2 */
unsigned int letter_sed( WORD_ID aIdx, WORD_ID bIdx )
{
// check if already computed -> lookup in cache
pair< WORD_ID, WORD_ID > pIdx = make_pair( aIdx, bIdx );
map< pair< WORD_ID, WORD_ID >, unsigned int >::const_iterator lookup = lsed.find( pIdx );
if (lookup != lsed.end())
{
return (lookup->second);
}
// get surface strings for word indices
const string &a = vocabulary.GetWord( aIdx );
const string &b = vocabulary.GetWord( bIdx );
// initialize cost matrix
unsigned int **cost = (unsigned int**) calloc( sizeof( unsigned int* ), a.size()+1 );
for( unsigned int i=0; i<=a.size(); i++ ) {
cost[i] = (unsigned int*) calloc( sizeof(unsigned int), b.size()+1 );
cost[i][0] = i;
}
for( unsigned int j=0; j<=b.size(); j++ ) {
cost[0][j] = j;
}
// core string edit distance loop
for( unsigned int i=1; i<=a.size(); i++ ) {
for( unsigned int j=1; j<=b.size(); j++ ) {
unsigned int ins = cost[i-1][j] + 1;
unsigned int del = cost[i][j-1] + 1;
bool match = (a.substr(i-1,1).compare( b.substr(j-1,1) ) == 0);
unsigned int diag = cost[i-1][j-1] + (match ? 0 : 1);
unsigned int min = (ins < del) ? ins : del;
min = (diag < min) ? diag : min;
cost[i][j] = min;
}
}
// clear out memory
unsigned int final = cost[a.size()][b.size()];
for( unsigned int i=0; i<=a.size(); i++ ) {
free( cost[i] );
}
free( cost );
// cache and return result
lsed[ pIdx ] = final;
return final;
}
/* string edit distance implementation */
unsigned int sed( const vector< WORD_ID > &a, const vector< WORD_ID > &b, string &best_path, bool use_letter_sed ) {
// initialize cost and path matrices
unsigned int **cost = (unsigned int**) calloc( sizeof( unsigned int* ), a.size()+1 );
char **path = (char**) calloc( sizeof( char* ), a.size()+1 );
for( unsigned int i=0; i<=a.size(); i++ ) {
cost[i] = (unsigned int*) calloc( sizeof(unsigned int), b.size()+1 );
path[i] = (char*) calloc( sizeof(char), b.size()+1 );
if (i>0)
{
cost[i][0] = cost[i-1][0];
if (use_letter_sed)
{
cost[i][0] += vocabulary.GetWord( a[i-1] ).size();
}
else
{
cost[i][0]++;
}
}
else
{
cost[i][0] = 0;
}
path[i][0] = 'I';
}
for( unsigned int j=0; j<=b.size(); j++ ) {
if (j>0)
{
cost[0][j] = cost[0][j-1];
if (use_letter_sed)
{
cost[0][j] += vocabulary.GetWord( b[j-1] ).size();
}
else
{
cost[0][j]++;
}
}
else
{
cost[0][j] = 0;
}
path[0][j] = 'D';
}
// core string edit distance algorithm
for( unsigned int i=1; i<=a.size(); i++ ) {
for( unsigned int j=1; j<=b.size(); j++ ) {
unsigned int ins = cost[i-1][j];
unsigned int del = cost[i][j-1];
unsigned int match;
if (use_letter_sed)
{
ins += vocabulary.GetWord( a[i-1] ).size();
del += vocabulary.GetWord( b[j-1] ).size();
match = letter_sed( a[i-1], b[j-1] );
}
else
{
ins++;
del++;
match = ( a[i-1] == b[j-1] ) ? 0 : 1;
}
unsigned int diag = cost[i-1][j-1] + match;
char action = (ins < del) ? 'I' : 'D';
unsigned int min = (ins < del) ? ins : del;
if (diag < min)
{
action = (match>0) ? 'S' : 'M';
min = diag;
}
cost[i][j] = min;
path[i][j] = action;
}
}
// construct string for best path
unsigned int i = a.size();
unsigned int j = b.size();
best_path = "";
while( i>0 || j>0 )
{
best_path = path[i][j] + best_path;
if (path[i][j] == 'I')
{
i--;
}
else if (path[i][j] == 'D')
{
j--;
}
else
{
i--;
j--;
}
}
// clear out memory
unsigned int final = cost[a.size()][b.size()];
for( unsigned int i=0; i<=a.size(); i++ ) {
free( cost[i] );
free( path[i] );
}
free( cost );
free( path );
// return result
return final;
}
/* utlility function: compute length of sentence in characters
(spaces do not count) */
unsigned int compute_length( const vector< WORD_ID > &sentence )
{
unsigned int length = 0; for( unsigned int i=0; i<sentence.size(); i++ )
{
length += vocabulary.GetWord( sentence[i] ).size();
}
return length;
}
/* brute force method: compare input to all corpus sentences */
int basic_fuzzy_match( vector< vector< WORD_ID > > source,
vector< vector< WORD_ID > > input )
{
// go through input set...
for(unsigned int i=0;i<input.size();i++)
{
bool use_letter_sed = false;
// compute sentence length and worst allowed cost
unsigned int input_length;
if (use_letter_sed)
{
input_length = compute_length( input[i] );
}
else
{
input_length = input[i].size();
}
unsigned int best_cost = input_length * (100-min_match) / 100 + 2;
string best_path = "";
int best_match = -1;
// go through all corpus sentences
for(unsigned int s=0;s<source.size();s++)
{
int source_length;
if (use_letter_sed)
{
source_length = compute_length( source[s] );
}
else
{
source_length = source[s].size();
}
int diff = abs((int)source_length - (int)input_length);
if (length_filter_flag && (diff >= best_cost))
{
continue;
}
// compute string edit distance
string path;
unsigned int cost = sed( input[i], source[s], path, use_letter_sed );
// update if new best
if (cost < best_cost)
{
best_cost = cost;
best_path = path;
best_match = s;
}
}
cout << best_cost << " ||| " << best_match << " ||| " << best_path << endl;
}
}
/* definition of short matches
very short n-gram matches (1-grams) will not be looked up in
the suffix array, since there are too many matches
and for longer sentences, at least one 2-gram match must occur */
inline int short_match_max_length( int input_length )
{
if ( ! refined_flag )
return 0;
if ( input_length >= 5 )
return 1;
return 0;
}
/* if we have non-short matches in a sentence, we need to
take a closer look at it.
this function creates a hash map for all input words and their positions
(to be used by the next function)
(done here, because this has be done only once for an input sentence) */
void init_short_matches( const vector< WORD_ID > &input )
{
int max_length = short_match_max_length( input.size() );
if (max_length == 0)
return;
single_word_index.clear();
// store input words and their positions in hash map
for(int i=0; i<input.size(); i++)
{
if (single_word_index.find( input[i] ) == single_word_index.end())
{
vector< int > position_vector;
single_word_index[ input[i] ] = position_vector;
}
single_word_index[ input[i] ].push_back( i );
}
}
/* add all short matches to list of matches for a sentence */
void add_short_matches( vector< Match > &match, const vector< WORD_ID > &tm, int input_length, int best_cost )
{
int max_length = short_match_max_length( input_length );
if (max_length == 0)
return;
int tm_length = tm.size();
map< WORD_ID,vector< int > >::iterator input_word_hit;
for(int t_pos=0; t_pos<tm.size(); t_pos++)
{
input_word_hit = single_word_index.find( tm[t_pos] );
if (input_word_hit != single_word_index.end())
{
vector< int > &position_vector = input_word_hit->second;
for(int j=0; j<position_vector.size(); j++)
{
int &i_pos = position_vector[j];
// before match
int max_cost = max( i_pos , t_pos );
int min_cost = abs( i_pos - t_pos );
if ( i_pos>0 && i_pos == t_pos )
min_cost++;
// after match
max_cost += max( (input_length-i_pos) , (tm_length-t_pos));
min_cost += abs( (input_length-i_pos) - (tm_length-t_pos));
if ( i_pos != input_length-1 && (input_length-i_pos) == (tm_length-t_pos))
min_cost++;
if (min_cost <= best_cost)
{
Match new_match( i_pos,i_pos, t_pos,t_pos, min_cost,max_cost,0 );
match.push_back( new_match );
}
}
}
}
}
/* remove matches that are subsumed by a larger match */
vector< Match > prune_matches( const vector< Match > &match, int best_cost )
{
//cerr << "\tpruning";
vector< Match > pruned;
for(int i=match.size()-1; i>=0; i--)
{
//cerr << " (" << match[i].input_start << "," << match[i].input_end
// << " ; " << match[i].tm_start << "," << match[i].tm_end
// << " * " << match[i].min_cost << ")";
//if (match[i].min_cost > best_cost)
// continue;
bool subsumed = false;
for(int j=match.size()-1; j>=0; j--)
{
if (i!=j // do not compare match with itself
&& ( match[i].input_end - match[i].input_start <=
match[j].input_end - match[j].input_start ) // i shorter than j
&& ((match[i].input_start == match[j].input_start &&
match[i].tm_start == match[j].tm_start ) ||
(match[i].input_end == match[j].input_end &&
match[i].tm_end == match[j].tm_end) ) )
{
subsumed = true;
}
}
if (! subsumed && match[i].min_cost <= best_cost)
{
//cerr << "*";
pruned.push_back( match[i] );
}
}
//cerr << endl;
return pruned;
}
/* A* parsing method to compute string edit distance */
int parse_matches( vector< Match > &match, int input_length, int tm_length, int &best_cost )
{
// cerr << "sentence has " << match.size() << " matches, best cost: " << best_cost << ", lengths input: " << input_length << " tm: " << tm_length << endl;
if (match.size() == 1)
return match[0].max_cost;
if (match.size() == 0)
return input_length+tm_length;
int this_best_cost = input_length + tm_length;
for(int i=0;i<match.size();i++)
{
this_best_cost = min( this_best_cost, match[i].max_cost );
}
// cerr << "\tthis best cost: " << this_best_cost << endl;
// bottom up combination of spans
vector< vector< Match > > multi_match;
multi_match.push_back( match );
int match_level = 1;
while(multi_match[ match_level-1 ].size()>0)
{
// init vector
vector< Match > empty;
multi_match.push_back( empty );
for(int first_level = 0; first_level <= (match_level-1)/2; first_level++)
{
int second_level = match_level - first_level -1;
//cerr << "\tcombining level " << first_level << " and " << second_level << endl;
vector< Match > &first_match = multi_match[ first_level ];
vector< Match > &second_match = multi_match[ second_level ];
for(int i1 = 0; i1 < first_match.size(); i1++) {
for(int i2 = 0; i2 < second_match.size(); i2++) {
// do not combine the same pair twice
if (first_level == second_level && i2 <= i1)
{
continue;
}
// get sorted matches (first is before second)
Match *first, *second;
if (first_match[i1].input_start < second_match[i2].input_start )
{
first = &first_match[i1];
second = &second_match[i2];
}
else
{
second = &first_match[i1];
first = &second_match[i2];
}
//cerr << "\tcombining "
// << "(" << first->input_start << "," << first->input_end << "), "
// << first->tm_start << " [" << first->internal_cost << "]"
// << " with "
// << "(" << second->input_start << "," << second->input_end << "), "
// << second->tm_start<< " [" << second->internal_cost << "]"
// << endl;
// do not process overlapping matches
if (first->input_end >= second->input_start)
{
continue;
}
// no overlap / mismatch in tm
if (first->tm_end >= second->tm_start)
{
continue;
}
// compute cost
int min_cost = 0;
int max_cost = 0;
// initial
min_cost += abs( first->input_start - first->tm_start );
max_cost += max( first->input_start, first->tm_start );
// same number of words, but not sent. start -> cost is at least 1
if (first->input_start == first->tm_start && first->input_start > 0)
{
min_cost++;
}
// in-between
int skipped_words = second->input_start - first->input_end -1;
int skipped_words_tm = second->tm_start - first->tm_end -1;
int internal_cost = max( skipped_words, skipped_words_tm );
internal_cost += first->internal_cost + second->internal_cost;
min_cost += internal_cost;
max_cost += internal_cost;
// final
min_cost += abs( (tm_length-1 - second->tm_end) -
(input_length-1 - second->input_end) );
max_cost += max( (tm_length-1 - second->tm_end),
(input_length-1 - second->input_end) );
// same number of words, but not sent. end -> cost is at least 1
if ( ( input_length-1 - second->input_end
== tm_length-1 - second->tm_end )
&& input_length-1 != second->input_end )
{
min_cost++;
}
// cerr << "\tcost: " << min_cost << "-" << max_cost << endl;
// if worst than best cost, forget it
if (min_cost > best_cost)
{
continue;
}
// add match
Match new_match( first->input_start,
second->input_end,
first->tm_start,
second->tm_end,
min_cost,
max_cost,
internal_cost);
multi_match[ match_level ].push_back( new_match );
// cerr << "\tstored\n";
// possibly updating this_best_cost
if (max_cost < this_best_cost)
{
// cerr << "\tupdating this best cost to " << max_cost << "\n";
this_best_cost = max_cost;
// possibly updating best_cost
if (max_cost < best_cost)
{
// cerr << "\tupdating best cost to " << max_cost << "\n";
best_cost = max_cost;
}
}
}
}
}
match_level++;
}
return this_best_cost;
}
#endif

View File

@ -1,214 +0,0 @@
#!/usr/bin/perl -w
use strict;
my $DEBUG = 1;
my $match_file = "tm/BEST.acquis-xml-escaped.4.uniq";
my $source_file = "data/acquis.truecased.4.en.uniq";
my $target_file = "data/acquis.truecased.4.fr.uniq.most-frequent";
my $alignment_file = "data/acquis.truecased.4.align.uniq.most-frequent";
my $out_file = "data/ac-test.input.xml.4.uniq";
my $in_file = "evaluation/ac-test.input.tc.4";
#my $match_file = "tm/BEST.acquis-xml-escaped.4";
#my $source_file = "corpus/acquis.truecased.4.en";
#my $target_file = "corpus/acquis.truecased.4.fr";
#my $alignment_file = "model/aligned.4.grow-diag-final-and";
#my $out_file = "data/ac-test.input.xml.4";
#my $in_file = "evaluation/ac-test.input.tc.4";
#my $match_file = "tm/BEST.acquis.with";
#my $source_file = "../acquis-truecase/corpus/acquis.truecased.190.en";
#my $target_file = "../acquis-truecase/corpus/acquis.truecased.190.fr";
#my $alignment_file = "../acquis-truecase/model/aligned.190.grow-diag-final-and";
#my $out_file = "data/ac-test.input.xml";
#my $in_file = "evaluation/ac-test.input.tc.1";
my @INPUT = `cat $in_file`; chop(@INPUT);
my @SOURCE = `cat $source_file`; chop(@SOURCE);
my @TARGET = `cat $target_file`; chop(@TARGET);
my @ALIGNMENT = `cat $alignment_file`; chop(@ALIGNMENT);
open(MATCH,$match_file);
open(FRAME,">$out_file");
for(my $i=0;$i<4107;$i++) {
# get match data
my $match = <MATCH>;
chop($match);
my ($score,$sentence,$path) = split(/ \|\|\| /,$match);
# construct frame
if ($sentence < 1e9 && $sentence >= 0) {
my $frame = &create_xml($SOURCE[$sentence],
$INPUT[$i],
$TARGET[$sentence],
$ALIGNMENT[$sentence],
$path);
print FRAME $frame."\n";
}
# no frame -> output source
else {
print FRAME $INPUT[$i]."\n";
}
}
close(FRAME);
close(MATCH);
sub create_xml {
my ($source,$input,$target,$alignment,$path) = @_;
my @INPUT = split(/ /,$input);
my @SOURCE = split(/ /,$source);
my @TARGET = split(/ /,$target);
my %ALIGN = &create_alignment($alignment);
my %FRAME_INPUT;
my @TARGET_BITMAP;
foreach (@TARGET) { push @TARGET_BITMAP,1 }
### STEP 1: FIND MISMATCHES
my ($s,$i) = (0,0);
my $currently_matching = 0;
my ($start_s,$start_i) = (0,0);
$path .= "X"; # indicate end
print "$input\n$source\n$target\n$path\n";
for(my $p=0;$p<length($path);$p++) {
my $action = substr($path,$p,1);
# beginning of a mismatch
if ($currently_matching && $action ne "M" && $action ne "X") {
$start_i = $i;
$start_s = $s;
$currently_matching = 0;
}
# end of a mismatch
elsif (!$currently_matching &&
($action eq "M" || $action eq "X")) {
# remove use of affected target words
for(my $ss = $start_s; $ss<$s; $ss++) {
foreach my $tt (keys %{${$ALIGN{'s'}}[$ss]}) {
$TARGET_BITMAP[$tt] = 0;
}
# also remove enclosed unaligned words?
}
# are there input words that need to be inserted ?
print "($start_i<$i)?\n";
if ($start_i<$i) {
# take note of input words to be inserted
my $insertion = "";
for(my $ii = $start_i; $ii<$i; $ii++) {
$insertion .= $INPUT[$ii]." ";
}
# find position for inserted input words
# find first removed target word
my $start_t = 1000;
for(my $ss = $start_s; $ss<$s; $ss++) {
foreach my $tt (keys %{${$ALIGN{'s'}}[$ss]}) {
$start_t = $tt if $tt < $start_t;
}
}
# end of sentence? add to end
if ($start_t == 1000 && $i > $#INPUT) {
$start_t = $#TARGET;
}
# backtrack to previous words if unaligned
if ($start_t == 1000) {
$start_t = -1;
for(my $ss = $s-1; $start_t==-1 && $ss>=0; $ss--) {
foreach my $tt (keys %{${$ALIGN{'s'}}[$ss]}) {
$start_t = $tt if $tt > $start_t;
}
}
}
$FRAME_INPUT{$start_t} .= $insertion;
}
$currently_matching = 1;
}
print "$action $s $i ($start_s $start_i) $currently_matching";
if ($action ne "I") {
print " ->";
foreach my $tt (keys %{${$ALIGN{'s'}}[$s]}) {
print " ".$tt;
}
}
print "\n";
$s++ unless $action eq "I";
$i++ unless $action eq "D";
}
print $target."\n";
foreach (@TARGET_BITMAP) { print $_; } print "\n";
foreach (sort keys %FRAME_INPUT) {
print "$_: $FRAME_INPUT{$_}\n";
}
### STEP 2: BUILD FRAME
# modify frame
my $frame = "";
$frame = $FRAME_INPUT{-1} if defined $FRAME_INPUT{-1};
my $currently_included = 0;
my $start_t = -1;
push @TARGET_BITMAP,0; # indicate end
for(my $t=0;$t<=scalar(@TARGET);$t++) {
# beginning of tm target inclusion
if (!$currently_included && $TARGET_BITMAP[$t]) {
$start_t = $t;
$currently_included = 1;
}
# end of tm target inclusion (not included word or inserted input)
elsif ($currently_included &&
(!$TARGET_BITMAP[$t] || defined($FRAME_INPUT{$t}))) {
# add xml (unless change is at the beginning of the sentence
if ($start_t >= 0) {
my $target = "";
print "for(tt=$start_t;tt<$t+$TARGET_BITMAP[$t]);\n";
for(my $tt=$start_t;$tt<$t+$TARGET_BITMAP[$t];$tt++) {
$target .= $TARGET[$tt] . " ";
}
chop($target);
$frame .= "<xml translation=\"$target\"> x </xml> ";
}
$currently_included = 0;
}
$frame .= $FRAME_INPUT{$t} if defined $FRAME_INPUT{$t};
print "$TARGET_BITMAP[$t] $t ($start_t) $currently_included\n";
}
print $frame."\n-------------------------------------\n";
return $frame;
}
sub create_alignment {
my ($line) = @_;
my (@ALIGNED_TO_S,@ALIGNED_TO_T);
foreach my $point (split(/ /,$line)) {
my ($s,$t) = split(/\-/,$point);
$ALIGNED_TO_S[$s]{$t}++;
$ALIGNED_TO_T[$t]{$s}++;
}
my %ALIGNMENT = ( 's' => \@ALIGNED_TO_S, 't' => \@ALIGNED_TO_T );
return %ALIGNMENT;
}

View File

@ -1,982 +0,0 @@
#include <stdio.h>
#include <stdlib.h>
#include <getopt.h>
#include <vector>
#include <map>
#include <string>
#include <algorithm>
#include <iostream>
#include <fstream>
#include <cstring>
#include <time.h>
#include "Vocabulary.h"
#include "SuffixArray.h"
/** This implementation is explained in
Koehn and Senellart: "Fast Approximate String Matching
with Suffix Arrays and A* Parsing" (AMTA 2010) ***/
using namespace std;
Vocabulary vocabulary;
int basic_flag = false;
int lsed_flag = true;
int refined_flag = true;
int length_filter_flag = true;
int parse_flag = true;
int min_match = 70;
int multiple_flag = false;
int multiple_slack = 0;
int multiple_max = 100;
void load_corpus( char* fileName, vector< vector< WORD_ID > > &corpus )
{
ifstream fileStream;
fileStream.open(fileName);
if (!fileStream) {
cerr << "file not found: " << fileName << endl;
exit(1);
}
istream *fileStreamP = &fileStream;
char line[LINE_MAX_LENGTH];
while(true)
{
SAFE_GETLINE((*fileStreamP), line, LINE_MAX_LENGTH, '\n');
if (fileStreamP->eof()) break;
corpus.push_back( vocabulary.Tokenize( line ) );
}
}
/* Letter string edit distance, e.g. sub 'their' to 'there' costs 2 */
// global cache for word pairs
map< pair< WORD_ID, WORD_ID >, unsigned int > lsed;
unsigned int letter_sed( WORD_ID aIdx, WORD_ID bIdx )
{
// check if already computed -> lookup in cache
pair< WORD_ID, WORD_ID > pIdx = make_pair( aIdx, bIdx );
map< pair< WORD_ID, WORD_ID >, unsigned int >::const_iterator lookup = lsed.find( pIdx );
if (lookup != lsed.end())
{
return (lookup->second);
}
// get surface strings for word indices
const string &a = vocabulary.GetWord( aIdx );
const string &b = vocabulary.GetWord( bIdx );
// initialize cost matrix
unsigned int **cost = (unsigned int**) calloc( sizeof( unsigned int* ), a.size()+1 );
for( unsigned int i=0; i<=a.size(); i++ ) {
cost[i] = (unsigned int*) calloc( sizeof(unsigned int), b.size()+1 );
cost[i][0] = i;
}
for( unsigned int j=0; j<=b.size(); j++ ) {
cost[0][j] = j;
}
// core string edit distance loop
for( unsigned int i=1; i<=a.size(); i++ ) {
for( unsigned int j=1; j<=b.size(); j++ ) {
unsigned int ins = cost[i-1][j] + 1;
unsigned int del = cost[i][j-1] + 1;
bool match = (a.substr(i-1,1).compare( b.substr(j-1,1) ) == 0);
unsigned int diag = cost[i-1][j-1] + (match ? 0 : 1);
unsigned int min = (ins < del) ? ins : del;
min = (diag < min) ? diag : min;
cost[i][j] = min;
}
}
// clear out memory
unsigned int final = cost[a.size()][b.size()];
for( unsigned int i=0; i<=a.size(); i++ ) {
free( cost[i] );
}
free( cost );
// cache and return result
lsed[ pIdx ] = final;
return final;
}
/* string edit distance implementation */
unsigned int sed( const vector< WORD_ID > &a, const vector< WORD_ID > &b, string &best_path, bool use_letter_sed ) {
// initialize cost and path matrices
unsigned int **cost = (unsigned int**) calloc( sizeof( unsigned int* ), a.size()+1 );
char **path = (char**) calloc( sizeof( char* ), a.size()+1 );
for( unsigned int i=0; i<=a.size(); i++ ) {
cost[i] = (unsigned int*) calloc( sizeof(unsigned int), b.size()+1 );
path[i] = (char*) calloc( sizeof(char), b.size()+1 );
if (i>0)
{
cost[i][0] = cost[i-1][0];
if (use_letter_sed)
{
cost[i][0] += vocabulary.GetWord( a[i-1] ).size();
}
else
{
cost[i][0]++;
}
}
else
{
cost[i][0] = 0;
}
path[i][0] = 'I';
}
for( unsigned int j=0; j<=b.size(); j++ ) {
if (j>0)
{
cost[0][j] = cost[0][j-1];
if (use_letter_sed)
{
cost[0][j] += vocabulary.GetWord( b[j-1] ).size();
}
else
{
cost[0][j]++;
}
}
else
{
cost[0][j] = 0;
}
path[0][j] = 'D';
}
// core string edit distance algorithm
for( unsigned int i=1; i<=a.size(); i++ ) {
for( unsigned int j=1; j<=b.size(); j++ ) {
unsigned int ins = cost[i-1][j];
unsigned int del = cost[i][j-1];
unsigned int match;
if (use_letter_sed)
{
ins += vocabulary.GetWord( a[i-1] ).size();
del += vocabulary.GetWord( b[j-1] ).size();
match = letter_sed( a[i-1], b[j-1] );
}
else
{
ins++;
del++;
match = ( a[i-1] == b[j-1] ) ? 0 : 1;
}
unsigned int diag = cost[i-1][j-1] + match;
char action = (ins < del) ? 'I' : 'D';
unsigned int min = (ins < del) ? ins : del;
if (diag < min)
{
action = (match>0) ? 'S' : 'M';
min = diag;
}
cost[i][j] = min;
path[i][j] = action;
}
}
// construct string for best path
unsigned int i = a.size();
unsigned int j = b.size();
best_path = "";
while( i>0 || j>0 )
{
best_path = path[i][j] + best_path;
if (path[i][j] == 'I')
{
i--;
}
else if (path[i][j] == 'D')
{
j--;
}
else
{
i--;
j--;
}
}
// clear out memory
unsigned int final = cost[a.size()][b.size()];
for( unsigned int i=0; i<=a.size(); i++ ) {
free( cost[i] );
free( path[i] );
}
free( cost );
free( path );
// return result
return final;
}
/* utlility function: compute length of sentence in characters
(spaces do not count) */
unsigned int compute_length( const vector< WORD_ID > &sentence )
{
unsigned int length = 0; for( unsigned int i=0; i<sentence.size(); i++ )
{
length += vocabulary.GetWord( sentence[i] ).size();
}
return length;
}
/* brute force method: compare input to all corpus sentences */
int basic_fuzzy_match( vector< vector< WORD_ID > > source,
vector< vector< WORD_ID > > input )
{
// go through input set...
for(unsigned int i=0;i<input.size();i++)
{
bool use_letter_sed = false;
// compute sentence length and worst allowed cost
unsigned int input_length;
if (use_letter_sed)
{
input_length = compute_length( input[i] );
}
else
{
input_length = input[i].size();
}
unsigned int best_cost = input_length * (100-min_match) / 100 + 2;
string best_path = "";
int best_match = -1;
// go through all corpus sentences
for(unsigned int s=0;s<source.size();s++)
{
int source_length;
if (use_letter_sed)
{
source_length = compute_length( source[s] );
}
else
{
source_length = source[s].size();
}
int diff = abs((int)source_length - (int)input_length);
if (length_filter_flag && (diff >= best_cost))
{
continue;
}
// compute string edit distance
string path;
unsigned int cost = sed( input[i], source[s], path, use_letter_sed );
// update if new best
if (cost < best_cost)
{
best_cost = cost;
best_path = path;
best_match = s;
}
}
cout << best_cost << " ||| " << best_match << " ||| " << best_path << endl;
}
}
#define MAX_MATCH_COUNT 10000000
/* data structure for n-gram match between input and corpus */
class Match {
public:
int input_start;
int input_end;
int tm_start;
int tm_end;
int min_cost;
int max_cost;
int internal_cost;
Match( int is, int ie, int ts, int te, int min, int max, int i )
:input_start(is), input_end(ie), tm_start(ts), tm_end(te), min_cost(min), max_cost(max), internal_cost(i)
{}
};
map< WORD_ID,vector< int > > single_word_index;
/* definition of short matches
very short n-gram matches (1-grams) will not be looked up in
the suffix array, since there are too many matches
and for longer sentences, at least one 2-gram match must occur */
inline int short_match_max_length( int input_length )
{
if ( ! refined_flag )
return 0;
if ( input_length >= 5 )
return 1;
return 0;
}
/* if we have non-short matches in a sentence, we need to
take a closer look at it.
this function creates a hash map for all input words and their positions
(to be used by the next function)
(done here, because this has be done only once for an input sentence) */
void init_short_matches( const vector< WORD_ID > &input )
{
int max_length = short_match_max_length( input.size() );
if (max_length == 0)
return;
single_word_index.clear();
// store input words and their positions in hash map
for(int i=0; i<input.size(); i++)
{
if (single_word_index.find( input[i] ) == single_word_index.end())
{
vector< int > position_vector;
single_word_index[ input[i] ] = position_vector;
}
single_word_index[ input[i] ].push_back( i );
}
}
/* add all short matches to list of matches for a sentence */
void add_short_matches( vector< Match > &match, const vector< WORD_ID > &tm, int input_length, int best_cost )
{
int max_length = short_match_max_length( input_length );
if (max_length == 0)
return;
int tm_length = tm.size();
map< WORD_ID,vector< int > >::iterator input_word_hit;
for(int t_pos=0; t_pos<tm.size(); t_pos++)
{
input_word_hit = single_word_index.find( tm[t_pos] );
if (input_word_hit != single_word_index.end())
{
vector< int > &position_vector = input_word_hit->second;
for(int j=0; j<position_vector.size(); j++)
{
int &i_pos = position_vector[j];
// before match
int max_cost = max( i_pos , t_pos );
int min_cost = abs( i_pos - t_pos );
if ( i_pos>0 && i_pos == t_pos )
min_cost++;
// after match
max_cost += max( (input_length-i_pos) , (tm_length-t_pos));
min_cost += abs( (input_length-i_pos) - (tm_length-t_pos));
if ( i_pos != input_length-1 && (input_length-i_pos) == (tm_length-t_pos))
min_cost++;
if (min_cost <= best_cost)
{
Match new_match( i_pos,i_pos, t_pos,t_pos, min_cost,max_cost,0 );
match.push_back( new_match );
}
}
}
}
}
/* remove matches that are subsumed by a larger match */
vector< Match > prune_matches( const vector< Match > &match, int best_cost )
{
//cerr << "\tpruning";
vector< Match > pruned;
for(int i=match.size()-1; i>=0; i--)
{
//cerr << " (" << match[i].input_start << "," << match[i].input_end
// << " ; " << match[i].tm_start << "," << match[i].tm_end
// << " * " << match[i].min_cost << ")";
//if (match[i].min_cost > best_cost)
// continue;
bool subsumed = false;
for(int j=match.size()-1; j>=0; j--)
{
if (i!=j // do not compare match with itself
&& ( match[i].input_end - match[i].input_start <=
match[j].input_end - match[j].input_start ) // i shorter than j
&& ((match[i].input_start == match[j].input_start &&
match[i].tm_start == match[j].tm_start ) ||
(match[i].input_end == match[j].input_end &&
match[i].tm_end == match[j].tm_end) ) )
{
subsumed = true;
}
}
if (! subsumed && match[i].min_cost <= best_cost)
{
//cerr << "*";
pruned.push_back( match[i] );
}
}
//cerr << endl;
return pruned;
}
/* A* parsing method to compute string edit distance */
int parse_matches( vector< Match > &match, int input_length, int tm_length, int &best_cost )
{
// cerr << "sentence has " << match.size() << " matches, best cost: " << best_cost << ", lengths input: " << input_length << " tm: " << tm_length << endl;
if (match.size() == 1)
return match[0].max_cost;
if (match.size() == 0)
return input_length+tm_length;
int this_best_cost = input_length + tm_length;
for(int i=0;i<match.size();i++)
{
this_best_cost = min( this_best_cost, match[i].max_cost );
}
// cerr << "\tthis best cost: " << this_best_cost << endl;
// bottom up combination of spans
vector< vector< Match > > multi_match;
multi_match.push_back( match );
int match_level = 1;
while(multi_match[ match_level-1 ].size()>0)
{
// init vector
vector< Match > empty;
multi_match.push_back( empty );
for(int first_level = 0; first_level <= (match_level-1)/2; first_level++)
{
int second_level = match_level - first_level -1;
//cerr << "\tcombining level " << first_level << " and " << second_level << endl;
vector< Match > &first_match = multi_match[ first_level ];
vector< Match > &second_match = multi_match[ second_level ];
for(int i1 = 0; i1 < first_match.size(); i1++) {
for(int i2 = 0; i2 < second_match.size(); i2++) {
// do not combine the same pair twice
if (first_level == second_level && i2 <= i1)
{
continue;
}
// get sorted matches (first is before second)
Match *first, *second;
if (first_match[i1].input_start < second_match[i2].input_start )
{
first = &first_match[i1];
second = &second_match[i2];
}
else
{
second = &first_match[i1];
first = &second_match[i2];
}
//cerr << "\tcombining "
// << "(" << first->input_start << "," << first->input_end << "), "
// << first->tm_start << " [" << first->internal_cost << "]"
// << " with "
// << "(" << second->input_start << "," << second->input_end << "), "
// << second->tm_start<< " [" << second->internal_cost << "]"
// << endl;
// do not process overlapping matches
if (first->input_end >= second->input_start)
{
continue;
}
// no overlap / mismatch in tm
if (first->tm_end >= second->tm_start)
{
continue;
}
// compute cost
int min_cost = 0;
int max_cost = 0;
// initial
min_cost += abs( first->input_start - first->tm_start );
max_cost += max( first->input_start, first->tm_start );
// same number of words, but not sent. start -> cost is at least 1
if (first->input_start == first->tm_start && first->input_start > 0)
{
min_cost++;
}
// in-between
int skipped_words = second->input_start - first->input_end -1;
int skipped_words_tm = second->tm_start - first->tm_end -1;
int internal_cost = max( skipped_words, skipped_words_tm );
internal_cost += first->internal_cost + second->internal_cost;
min_cost += internal_cost;
max_cost += internal_cost;
// final
min_cost += abs( (tm_length-1 - second->tm_end) -
(input_length-1 - second->input_end) );
max_cost += max( (tm_length-1 - second->tm_end),
(input_length-1 - second->input_end) );
// same number of words, but not sent. end -> cost is at least 1
if ( ( input_length-1 - second->input_end
== tm_length-1 - second->tm_end )
&& input_length-1 != second->input_end )
{
min_cost++;
}
// cerr << "\tcost: " << min_cost << "-" << max_cost << endl;
// if worst than best cost, forget it
if (min_cost > best_cost)
{
continue;
}
// add match
Match new_match( first->input_start,
second->input_end,
first->tm_start,
second->tm_end,
min_cost,
max_cost,
internal_cost);
multi_match[ match_level ].push_back( new_match );
// cerr << "\tstored\n";
// possibly updating this_best_cost
if (max_cost < this_best_cost)
{
// cerr << "\tupdating this best cost to " << max_cost << "\n";
this_best_cost = max_cost;
// possibly updating best_cost
if (max_cost < best_cost)
{
// cerr << "\tupdating best cost to " << max_cost << "\n";
best_cost = max_cost;
}
}
}
}
}
match_level++;
}
return this_best_cost;
}
int main(int argc, char* argv[])
{
vector< vector< WORD_ID > > source, input;
while(1) {
static struct option long_options[] = {
{"basic", no_argument, &basic_flag, 1},
{"word", no_argument, &lsed_flag, 0},
{"unrefined", no_argument, &refined_flag, 0},
{"nolengthfilter", no_argument, &length_filter_flag, 0},
{"noparse", no_argument, &parse_flag, 0},
{"multiple", no_argument, &multiple_flag, 1},
{"minmatch", required_argument, 0, 'm'},
{0, 0, 0, 0}
};
int option_index = 0;
int c = getopt_long (argc, argv, "m:", long_options, &option_index);
if (c == -1) break;
switch (c) {
case 0:
// if (long_options[option_index].flag != 0)
// break;
// printf ("option %s", long_options[option_index].name);
// if (optarg)
// printf (" with arg %s", optarg);
// printf ("\n");
break;
case 'm':
min_match = atoi(optarg);
if (min_match < 1 || min_match > 100) {
cerr << "error: --minmatch must have value in range 1..100\n";
exit(1);
}
cerr << "setting min match to " << min_match << endl;
break;
default:
cerr << "usage: syntax: ./fuzzy-match input corpus [--basic] [--word] [--minmatch 1..100]\n";
exit(1);
}
}
if (lsed_flag) { cerr << "lsed\n"; }
if (basic_flag) { cerr << "basic\n"; }
if (refined_flag) { cerr << "refined\n"; }
if (length_filter_flag) { cerr << "length filter\n"; }
if (parse_flag) { cerr << "parse\n"; }
// exit(1);
if (optind+2 != argc) {
cerr << "syntax: ./fuzzy-match input corpus [--basic] [--word] [--minmatch 1..100]\n";
exit(1);
}
cerr << "loading corpus...\n";
load_corpus(argv[optind], input);
load_corpus(argv[optind+1], source);
// ./fuzzy-match input corpus [-basic]
// load_corpus("../corpus/tm.truecased.4.en", source);
// load_corpus("../corpus/tm.truecased.4.it", target);
// load_corpus("../evaluation/test.input.tc.4", input);
// load_corpus("../../acquis-truecase/corpus/acquis.truecased.190.en", source);
// load_corpus("../../acquis-truecase/evaluation/ac-test.input.tc.190", input);
// load_corpus("../corpus/tm.truecased.16.en", source);
// load_corpus("../evaluation/test.input.tc.16", input);
if (basic_flag) {
cerr << "using basic method\n";
clock_t start_main_clock2 = clock();
basic_fuzzy_match( source, input );
cerr << "total: " << (1000 * (clock()-start_main_clock2) / CLOCKS_PER_SEC) << endl;
exit(1);
}
cerr << "number of input sentences " << input.size() << endl;
cerr << "creating suffix array...\n";
// SuffixArray suffixArray( "../corpus/tm.truecased.4.en" );
// SuffixArray suffixArray( "../../acquis-truecase/corpus/acquis.truecased.190.en" );
SuffixArray suffixArray( argv[optind+1] );
clock_t start_main_clock = clock();
// looping through all input sentences...
cerr << "looping...\n";
for(unsigned int i=0;i<input.size();i++)
{
clock_t start_clock = clock();
// if (i % 10 == 0) cerr << ".";
int input_id = i; // clean up this mess!
// establish some basic statistics
// int input_length = compute_length( input[i] );
int input_length = input[i].size();
int best_cost = input_length * (100-min_match) / 100 + 1;
int match_count = 0; // how many substring matches to be considered
//cerr << endl << "sentence " << i << ", length " << input_length << ", best_cost " << best_cost << endl;
// find match ranges in suffix array
vector< vector< pair< SuffixArray::INDEX, SuffixArray::INDEX > > > match_range;
for(size_t start=0;start<input[i].size();start++)
{
SuffixArray::INDEX prior_first_match = 0;
SuffixArray::INDEX prior_last_match = suffixArray.GetSize()-1;
vector< string > substring;
bool stillMatched = true;
vector< pair< SuffixArray::INDEX, SuffixArray::INDEX > > matchedAtThisStart;
//cerr << "start: " << start;
for(int word=start; stillMatched && word<input[i].size(); word++)
{
substring.push_back( vocabulary.GetWord( input[i][word] ) );
// only look up, if needed (i.e. no unnecessary short gram lookups)
// if (! word-start+1 <= short_match_max_length( input_length ) )
// {
SuffixArray::INDEX first_match, last_match;
stillMatched = false;
if (suffixArray.FindMatches( substring, first_match, last_match, prior_first_match, prior_last_match ) )
{
stillMatched = true;
matchedAtThisStart.push_back( make_pair( first_match, last_match ) );
//cerr << " (" << first_match << "," << last_match << ")";
//cerr << " " << ( last_match - first_match + 1 );
prior_first_match = first_match;
prior_last_match = last_match;
}
//}
}
//cerr << endl;
match_range.push_back( matchedAtThisStart );
}
clock_t clock_range = clock();
map< int, vector< Match > > sentence_match;
map< int, int > sentence_match_word_count;
// go through all matches, longest first
for(int length = input[i].size(); length >= 1; length--)
{
// do not create matches, if these are handled by the short match function
if (length <= short_match_max_length( input_length ) )
{
continue;
}
unsigned int count = 0;
for(int start = 0; start <= input[i].size() - length; start++)
{
if (match_range[start].size() >= length)
{
pair< SuffixArray::INDEX, SuffixArray::INDEX > &range = match_range[start][length-1];
// cerr << " (" << range.first << "," << range.second << ")";
count += range.second - range.first + 1;
for(SuffixArray::INDEX i=range.first; i<=range.second; i++)
{
int position = suffixArray.GetPosition( i );
// sentence length mismatch
size_t sentence_id = suffixArray.GetSentence( position );
int sentence_length = suffixArray.GetSentenceLength( sentence_id );
int diff = abs( (int)sentence_length - (int)input_length );
// cerr << endl << i << "\tsentence " << sentence_id << ", length " << sentence_length;
//if (length <= 2 && input_length>=5 &&
// sentence_match.find( sentence_id ) == sentence_match.end())
// continue;
if (diff > best_cost)
continue;
// compute minimal cost
int start_pos = suffixArray.GetWordInSentence( position );
int end_pos = start_pos + length-1;
// cerr << endl << "\t" << start_pos << "-" << end_pos << " (" << sentence_length << ") vs. "
// << start << "-" << (start+length-1) << " (" << input_length << ")";
// different number of prior words -> cost is at least diff
int min_cost = abs( start - start_pos );
// same number of words, but not sent. start -> cost is at least 1
if (start == start_pos && start>0)
min_cost++;
// different number of remaining words -> cost is at least diff
min_cost += abs( ( sentence_length-1 - end_pos ) -
( input_length-1 - (start+length-1) ) );
// same number of words, but not sent. end -> cost is at least 1
if ( sentence_length-1 - end_pos ==
input_length-1 - (start+length-1)
&& end_pos != sentence_length-1 )
min_cost++;
// cerr << " -> min_cost " << min_cost;
if (min_cost > best_cost)
continue;
// valid match
match_count++;
// compute maximal cost
int max_cost = max( start, start_pos )
+ max( sentence_length-1 - end_pos,
input_length-1 - (start+length-1) );
// cerr << ", max_cost " << max_cost;
Match m = Match( start, start+length-1,
start_pos, start_pos+length-1,
min_cost, max_cost, 0);
sentence_match[ sentence_id ].push_back( m );
sentence_match_word_count[ sentence_id ] += length;
if (max_cost < best_cost)
{
best_cost = max_cost;
if (best_cost == 0) break;
}
//if (match_count >= MAX_MATCH_COUNT) break;
}
}
// cerr << endl;
if (best_cost == 0) break;
//if (match_count >= MAX_MATCH_COUNT) break;
}
// cerr << count << " matches at length " << length << " in " << sentence_match.size() << " tm." << endl;
if (best_cost == 0) break;
//if (match_count >= MAX_MATCH_COUNT) break;
}
cerr << match_count << " matches in " << sentence_match.size() << " sentences." << endl;
clock_t clock_matches = clock();
// consider each sentence for which we have matches
int old_best_cost = best_cost;
int tm_count_word_match = 0;
int tm_count_word_match2 = 0;
int pruned_match_count = 0;
if (short_match_max_length( input_length ))
{
init_short_matches( input[i] );
}
vector< int > best_tm;
typedef map< int, vector< Match > >::iterator I;
clock_t clock_validation_sum = 0;
for(I tm=sentence_match.begin(); tm!=sentence_match.end(); tm++)
{
int tmID = tm->first;
int tm_length = suffixArray.GetSentenceLength(tmID);
vector< Match > &match = tm->second;
add_short_matches( match, source[tmID], input_length, best_cost );
//cerr << "match in sentence " << tmID << ": " << match.size() << " [" << tm_length << "]" << endl;
// quick look: how many words are matched
int words_matched = 0;
for(int m=0;m<match.size();m++) {
if (match[m].min_cost <= best_cost) // makes no difference
words_matched += match[m].input_end - match[m].input_start + 1;
}
if (max(input_length,tm_length) - words_matched > best_cost)
{
if (length_filter_flag) continue;
}
tm_count_word_match++;
// prune, check again how many words are matched
vector< Match > pruned = prune_matches( match, best_cost );
words_matched = 0;
for(int p=0;p<pruned.size();p++) {
words_matched += pruned[p].input_end - pruned[p].input_start + 1;
}
if (max(input_length,tm_length) - words_matched > best_cost)
{
if (length_filter_flag) continue;
}
tm_count_word_match2++;
pruned_match_count += pruned.size();
int prior_best_cost = best_cost;
int cost;
clock_t clock_validation_start = clock();
if (! parse_flag ||
pruned.size()>=10) // to prevent worst cases
{
string path;
cost = sed( input[input_id], source[tmID], path, false );
if (cost < best_cost)
{
best_cost = cost;
}
}
else
{
cost = parse_matches( pruned, input_length, tm_length, best_cost );
if (prior_best_cost != best_cost)
{
best_tm.clear();
}
}
clock_validation_sum += clock() - clock_validation_start;
if (cost == best_cost)
{
best_tm.push_back( tmID );
}
}
cerr << "reduced best cost from " << old_best_cost << " to " << best_cost << endl;
cerr << "tm considered: " << sentence_match.size()
<< " word-matched: " << tm_count_word_match
<< " word-matched2: " << tm_count_word_match2
<< " best: " << best_tm.size() << endl;
cerr << "pruned matches: " << ((float)pruned_match_count/(float)tm_count_word_match2) << endl;
// do not try to find the best ... report multiple matches
if (multiple_flag) {
int input_letter_length = compute_length( input[input_id] );
for(int si=0; si<best_tm.size(); si++) {
int s = best_tm[si];
string path;
unsigned int letter_cost = sed( input[input_id], source[s], path, true );
// do not report multiple identical sentences, but just their count
cout << i << " "; // sentence number
cout << letter_cost << "/" << input_letter_length << " ";
cout << "(" << best_cost <<"/" << input_length <<") ";
cout << "||| " << s << " ||| " << path << endl;
}
continue;
}
// find the best matches according to letter sed
string best_path = "";
int best_match = -1;
int best_letter_cost;
if (lsed_flag) {
best_letter_cost = compute_length( input[input_id] ) * min_match / 100 + 1;
for(int si=0; si<best_tm.size(); si++)
{
int s = best_tm[si];
string path;
unsigned int letter_cost = sed( input[input_id], source[s], path, true );
if (letter_cost < best_letter_cost)
{
best_letter_cost = letter_cost;
best_path = path;
best_match = s;
}
}
}
// if letter sed turned off, just compute path for first match
else {
if (best_tm.size() > 0) {
string path;
sed( input[input_id], source[best_tm[0]], path, false );
best_path = path;
best_match = best_tm[0];
}
}
cerr << "elapsed: " << (1000 * (clock()-start_clock) / CLOCKS_PER_SEC)
<< " ( range: " << (1000 * (clock_range-start_clock) / CLOCKS_PER_SEC)
<< " match: " << (1000 * (clock_matches-clock_range) / CLOCKS_PER_SEC)
<< " tm: " << (1000 * (clock()-clock_matches) / CLOCKS_PER_SEC)
<< " (validation: " << (1000 * (clock_validation_sum) / CLOCKS_PER_SEC) << ")"
<< " )" << endl;
if (lsed_flag) {
cout << best_letter_cost << "/" << compute_length( input[input_id] ) << " (";
}
cout << best_cost <<"/" << input_length;
if (lsed_flag) cout << ")";
cout << " ||| " << best_match << " ||| " << best_path << endl;
}
cerr << "total: " << (1000 * (clock()-start_main_clock) / CLOCKS_PER_SEC) << endl;
}

View File

@ -1,58 +0,0 @@
#!/usr/bin/perl -w
use strict;
my $src_in = "corpus/acquis.truecased.4.en";
my $tgt_in = "corpus/acquis.truecased.4.fr";
my $align_in = "model/aligned.4.grow-diag-final-and";
my $src_out = "data/acquis.truecased.4.en.uniq";
my $tgt_out = "data/acquis.truecased.4.fr.uniq";
my $tgt_mf = "data/acquis.truecased.4.fr.uniq.most-frequent";
my $align_out = "data/acquis.truecased.4.align.uniq";
my $align_mf = "data/acquis.truecased.4.align.uniq.most-frequent";
my (%TRANS,%ALIGN);
open(SRC,$src_in);
open(TGT,$tgt_in);
open(ALIGN,$align_in);
while(my $src = <SRC>) {
my $tgt = <TGT>;
my $align = <ALIGN>;
chop($tgt);
chop($align);
$TRANS{$src}{$tgt}++;
$ALIGN{$src}{$tgt} = $align;
}
close(SRC);
close(TGT);
open(SRC_OUT,">$src_out");
open(TGT_OUT,">$tgt_out");
open(TGT_MF, ">$tgt_mf");
open(ALIGN_OUT,">$align_out");
open(ALIGN_MF, ">$align_mf");
foreach my $src (keys %TRANS) {
print SRC_OUT $src;
my $first = 1;
my ($max,$best) = (0);
foreach my $tgt (keys %{$TRANS{$src}}) {
print TGT_OUT " ||| " unless $first;
print TGT_OUT $TRANS{$src}{$tgt}." ".$tgt;
print ALIGN_OUT " ||| " unless $first;
print ALIGN_OUT $ALIGN{$src}{$tgt};
if ($TRANS{$src}{$tgt} > $max) {
$max = $TRANS{$src}{$tgt};
$best = $tgt;
}
$first = 0;
}
print TGT_OUT "\n";
print ALIGN_OUT "\n";
print TGT_MF $best."\n";
print ALIGN_MF $ALIGN{$src}{$best}."\n";
}
close(SRC_OUT);
close(TGT_OUT);

View File

@ -1,308 +0,0 @@
#!/usr/bin/perl -w
use strict;
use FindBin qw($RealBin);
use File::Basename;
my $DEBUG = 1;
my $OUTPUT_RULES = 1;
#my $data_root = "/Users/hieuhoang/workspace/experiment/data/tm-mt-integration/";
my $in_file = $ARGV[0]; #"$data_root/in/ac-test.input.tc.4";
my $source_file = $ARGV[1]; #"$data_root/in/acquis.truecased.4.en.uniq";
my $target_file = $ARGV[2]; #"$data_root/in/acquis.truecased.4.fr.uniq";
my $alignment_file = $ARGV[3]; #"$data_root/in/acquis.truecased.4.align.uniq";
my $lex_file = $ARGV[4]; #$data_root/in/lex.4;
my $pt_file = $ARGV[5]; #"$data_root/out/pt";
my $cmd;
my $TMPDIR=dirname($pt_file) ."/tmp.$$";
$cmd = "mkdir -p $TMPDIR";
`$cmd`;
my $match_file = "$TMPDIR/match";
# suffix array creation and extraction
$cmd = "$RealBin/fuzzy-match --multiple $in_file $source_file > $match_file";
print STDERR "$cmd \n";
`$cmd`;
# make into xml and pt
my $out_file = "$TMPDIR/ac-test.input.xml.4.uniq.multi.tuning";
my @INPUT = `cat $in_file`; chop(@INPUT);
my @ALL_SOURCE = `cat $source_file`; chop(@ALL_SOURCE);
my @ALL_TARGET = `cat $target_file`; chop(@ALL_TARGET);
my @ALL_ALIGNMENT = `cat $alignment_file`; chop(@ALL_ALIGNMENT);
open(MATCH,$match_file);
open(FRAME,">$out_file");
open(RULE,">$out_file.extract") if $OUTPUT_RULES;
open(RULE_INV,">$out_file.extract.inv") if $OUTPUT_RULES;
open(INFO,">$out_file.info");
while( my $match = <MATCH> ) {
chop($match);
my ($score,$sentence,$path) = split(/ \|\|\| /,$match);
$score =~ /^(\d+) (.+)/ || die;
my ($i,$match_score) = ($1,$2);
print STDERR "i=$i match_score=$match_score\n";
# construct frame
if ($sentence < 1e9 && $sentence >= 0) {
my $SOURCE = $ALL_SOURCE[$sentence];
my @ALIGNMENT = split(/ \|\|\| /,$ALL_ALIGNMENT[$sentence]);
my @TARGET = split(/ \|\|\| /,$ALL_TARGET[$sentence]);
for(my $j=0;$j<scalar(@TARGET);$j++) {
$TARGET[$j] =~ /^(\d+) (.+)$/ || die;
my ($target_count,$target) = ($1,$2);
my ($frame,$rule_s,$rule_t,$rule_alignment,$rule_alignment_inv) =
&create_xml($SOURCE,
$INPUT[$i],
$target,
$ALIGNMENT[$j],
$path);
print FRAME $frame."\n";
print RULE "$rule_s [X] ||| $rule_t [X] ||| $rule_alignment ||| $target_count\n" if $OUTPUT_RULES;
print RULE_INV "$rule_t [X] ||| $rule_s [X] ||| $rule_alignment_inv ||| $target_count\n" if $OUTPUT_RULES;
print INFO "$i ||| $match_score ||| $target_count\n";
}
}
}
close(FRAME);
close(MATCH);
close(RULE) if $OUTPUT_RULES;
close(RULE_INV) if $OUTPUT_RULES;
`LC_ALL=C sort $out_file.extract | gzip -c > $out_file.extract.sorted.gz`;
`LC_ALL=C sort $out_file.extract.inv | gzip -c > $out_file.extract.inv.sorted.gz`;
if ($OUTPUT_RULES)
{
$cmd = "$RealBin/../../scripts/training/train-model.perl -dont-zip -first-step 6 -last-step 6 -f en -e fr -hierarchical -extract-file $out_file.extract -lexical-file $lex_file -phrase-translation-table $pt_file";
print STDERR "Executing: $cmd \n";
`$cmd`;
}
#$cmd = "rm -rf $TMPDIR";
#`$cmd`;
#######################################################
sub create_xml {
my ($source,$input,$target,$alignment,$path) = @_;
print STDERR " HIEU \n $source \n $input \n $target \n $alignment \n $path \n";
my @INPUT = split(/ /,$input);
my @SOURCE = split(/ /,$source);
my @TARGET = split(/ /,$target);
my %ALIGN = &create_alignment($alignment);
my %FRAME_INPUT;
my (@NT,@INPUT_BITMAP,@TARGET_BITMAP,%ALIGNMENT_I_TO_S);
foreach (@TARGET) { push @TARGET_BITMAP,1 }
### STEP 1: FIND MISMATCHES
my ($s,$i) = (0,0);
my $currently_matching = 0;
my ($start_s,$start_i) = (0,0);
$path .= "X"; # indicate end
print STDERR "$input\n$source\n$target\n$path\n";
for(my $p=0;$p<length($path);$p++) {
my $action = substr($path,$p,1);
# beginning of a mismatch
if ($currently_matching && $action ne "M" && $action ne "X") {
$start_i = $i;
$start_s = $s;
$currently_matching = 0;
}
# end of a mismatch
elsif (!$currently_matching &&
($action eq "M" || $action eq "X")) {
# remove use of affected target words
for(my $ss = $start_s; $ss<$s; $ss++) {
foreach my $tt (keys %{${$ALIGN{'s'}}[$ss]}) {
$TARGET_BITMAP[$tt] = 0;
}
# also remove enclosed unaligned words?
}
# are there input words that need to be inserted ?
print STDERR "($start_i<$i)?\n";
if ($start_i<$i) {
# take note of input words to be inserted
my $insertion = "";
for(my $ii = $start_i; $ii<$i; $ii++) {
$insertion .= $INPUT[$ii]." ";
}
# find position for inserted input words
# find first removed target word
my $start_t = 1000;
for(my $ss = $start_s; $ss<$s; $ss++) {
foreach my $tt (keys %{${$ALIGN{'s'}}[$ss]}) {
$start_t = $tt if $tt < $start_t;
}
}
# end of sentence? add to end
if ($start_t == 1000 && $i > $#INPUT) {
$start_t = $#TARGET;
}
# backtrack to previous words if unaligned
if ($start_t == 1000) {
$start_t = -1;
for(my $ss = $s-1; $start_t==-1 && $ss>=0; $ss--) {
foreach my $tt (keys %{${$ALIGN{'s'}}[$ss]}) {
$start_t = $tt if $tt > $start_t;
}
}
}
$FRAME_INPUT{$start_t} .= $insertion;
my %NT = ("start_t" => $start_t,
"start_i" => $start_i );
push @NT,\%NT;
}
$currently_matching = 1;
}
print STDERR "$action $s $i ($start_s $start_i) $currently_matching";
if ($action ne "I") {
print STDERR " ->";
foreach my $tt (keys %{${$ALIGN{'s'}}[$s]}) {
print STDERR " ".$tt;
}
}
print STDERR "\n";
$s++ unless $action eq "I";
$i++ unless $action eq "D";
$ALIGNMENT_I_TO_S{$i} = $s unless $action eq "D";
push @INPUT_BITMAP, 1 if $action eq "M";
push @INPUT_BITMAP, 0 if $action eq "I" || $action eq "S";
}
print STDERR $target."\n";
foreach (@TARGET_BITMAP) { print STDERR $_; } print STDERR "\n";
foreach (sort keys %FRAME_INPUT) {
print STDERR "$_: $FRAME_INPUT{$_}\n";
}
### STEP 2: BUILD RULE AND FRAME
# hierarchical rule
my $rule_s = "";
my $rule_pos_s = 0;
my %RULE_ALIGNMENT_S;
for(my $i=0;$i<scalar(@INPUT_BITMAP);$i++) {
if ($INPUT_BITMAP[$i]) {
$rule_s .= $INPUT[$i]." ";
$RULE_ALIGNMENT_S{$ALIGNMENT_I_TO_S{$i}} = $rule_pos_s++;
}
foreach my $NT (@NT) {
if ($i == $$NT{"start_i"}) {
$rule_s .= "[X][X] ";
$$NT{"rule_pos_s"} = $rule_pos_s++;
}
}
}
my $rule_t = "";
my $rule_pos_t = 0;
my %RULE_ALIGNMENT_T;
for(my $t=-1;$t<scalar(@TARGET_BITMAP);$t++) {
if ($t>=0 && $TARGET_BITMAP[$t]) {
$rule_t .= $TARGET[$t]." ";
$RULE_ALIGNMENT_T{$t} = $rule_pos_t++;
}
foreach my $NT (@NT) {
if ($t == $$NT{"start_t"}) {
$rule_t .= "[X][X] ";
$$NT{"rule_pos_t"} = $rule_pos_t++;
}
}
}
my $rule_alignment = "";
foreach my $s (sort { $a <=> $b} keys %RULE_ALIGNMENT_S) {
foreach my $t (keys %{$ALIGN{"s"}[$s]}) {
next unless defined($RULE_ALIGNMENT_T{$t});
$rule_alignment .= $RULE_ALIGNMENT_S{$s}."-".$RULE_ALIGNMENT_T{$t}." ";
}
}
foreach my $NT (@NT) {
$rule_alignment .= $$NT{"rule_pos_s"}."-".$$NT{"rule_pos_t"}." ";
}
chop($rule_s);
chop($rule_t);
chop($rule_alignment);
my $rule_alignment_inv = "";
foreach (split(/ /,$rule_alignment)) {
/^(\d+)\-(\d+)$/;
$rule_alignment_inv .= "$2-$1 ";
}
chop($rule_alignment_inv);
# frame
my $frame = "";
$frame = $FRAME_INPUT{-1} if defined $FRAME_INPUT{-1};
my $currently_included = 0;
my $start_t = -1;
push @TARGET_BITMAP,0; # indicate end
for(my $t=0;$t<=scalar(@TARGET);$t++) {
# beginning of tm target inclusion
if (!$currently_included && $TARGET_BITMAP[$t]) {
$start_t = $t;
$currently_included = 1;
}
# end of tm target inclusion (not included word or inserted input)
elsif ($currently_included &&
(!$TARGET_BITMAP[$t] || defined($FRAME_INPUT{$t}))) {
# add xml (unless change is at the beginning of the sentence
if ($start_t >= 0) {
my $target = "";
print STDERR "for(tt=$start_t;tt<$t+$TARGET_BITMAP[$t]);\n";
for(my $tt=$start_t;$tt<$t+$TARGET_BITMAP[$t];$tt++) {
$target .= $TARGET[$tt] . " ";
}
chop($target);
$frame .= "<xml translation=\"$target\"> x </xml> ";
}
$currently_included = 0;
}
$frame .= $FRAME_INPUT{$t} if defined $FRAME_INPUT{$t};
print STDERR "$TARGET_BITMAP[$t] $t ($start_t) $currently_included\n";
}
print STDERR $frame."\n-------------------------------------\n";
return ($frame,$rule_s,$rule_t,$rule_alignment,$rule_alignment_inv);
}
sub create_alignment {
my ($line) = @_;
my (@ALIGNED_TO_S,@ALIGNED_TO_T);
foreach my $point (split(/ /,$line)) {
my ($s,$t) = split(/\-/,$point);
$ALIGNED_TO_S[$s]{$t}++;
$ALIGNED_TO_T[$t]{$s}++;
}
my %ALIGNMENT = ( 's' => \@ALIGNED_TO_S, 't' => \@ALIGNED_TO_T );
return %ALIGNMENT;
}

View File

@ -1,300 +0,0 @@
#!/usr/bin/perl -w -d
use strict;
use FindBin qw($RealBin);
use File::Basename;
my $DEBUG = 1;
my $OUTPUT_RULES = 1;
#my $data_root = "/Users/hieuhoang/workspace/experiment/data/tm-mt-integration/";
my $in_file = $ARGV[0]; #"$data_root/in/ac-test.input.tc.4";
my $source_file = $ARGV[1]; #"$data_root/in/acquis.truecased.4.en.uniq";
my $target_file = $ARGV[2]; #"$data_root/in/acquis.truecased.4.fr.uniq";
my $alignment_file = $ARGV[3]; #"$data_root/in/acquis.truecased.4.align.uniq";
my $lex_file = $ARGV[4]; #$data_root/in/lex.4;
my $pt_file = $ARGV[5]; #"$data_root/out/pt";
my $cmd;
my $TMPDIR= "/tmp/tmp.$$";
$cmd = "mkdir -p $TMPDIR";
`$cmd`;
$TMPDIR = "/Users/hieuhoang/workspace/experiment/data/tm-mt-integration/out/tmp.3196";
my $match_file = "$TMPDIR/match";
# suffix array creation and extraction
$cmd = "$RealBin/fuzzy-match --multiple $in_file $source_file > $match_file";
`$cmd`;
# make into xml and pt
my $out_file = "$TMPDIR/ac-test.input.xml.4.uniq.multi.tuning";
open(MATCH,$match_file);
open(FRAME,">$out_file");
open(RULE,">$out_file.extract") if $OUTPUT_RULES;
open(RULE_INV,">$out_file.extract.inv") if $OUTPUT_RULES;
open(INFO,">$out_file.info");
while( my $match = <MATCH> ) {
chop($match);
my ($score,$sentence,$path) = split(/ \|\|\| /,$match);
$score =~ /^(\d+) (.+)/ || die;
my ($i,$match_score) = ($1,$2);
# construct frame
if ($sentence < 1e9 && $sentence >= 0) {
my $SOURCE = $ALL_SOURCE[$sentence];
my @ALIGNMENT = split(/ \|\|\| /,$ALL_ALIGNMENT[$sentence]);
my @TARGET = split(/ \|\|\| /,$ALL_TARGET[$sentence]);
for(my $j=0;$j<scalar(@TARGET);$j++) {
$TARGET[$j] =~ /^(\d+) (.+)$/ || die;
my ($target_count,$target) = ($1,$2);
my ($frame,$rule_s,$rule_t,$rule_alignment,$rule_alignment_inv) =
&create_xml($SOURCE,
$INPUT[$i],
$target,
$ALIGNMENT[$j],
$path);
print FRAME $frame."\n";
print RULE "$rule_s [X] ||| $rule_t [X] ||| $rule_alignment ||| $target_count\n" if $OUTPUT_RULES;
print RULE_INV "$rule_t [X] ||| $rule_s [X] ||| $rule_alignment_inv ||| $target_count\n" if $OUTPUT_RULES;
print INFO "$i ||| $match_score ||| $target_count\n";
}
}
}
close(FRAME);
close(MATCH);
close(RULE) if $OUTPUT_RULES;
close(RULE_INV) if $OUTPUT_RULES;
`LC_ALL=C sort $out_file.extract | gzip -c > $out_file.extract.sorted.gz`;
`LC_ALL=C sort $out_file.extract.inv | gzip -c > $out_file.extract.inv.sorted.gz`;
if ($OUTPUT_RULES)
{
$cmd = "$RealBin/../../scripts/training/train-model.perl -dont-zip -first-step 6 -last-step 6 -f en -e fr -hierarchical -extract-file $out_file.extract -lexical-file $lex_file -phrase-translation-table $pt_file";
print STDERR "Executing: $cmd \n";
`$cmd`;
}
#$cmd = "rm -rf $TMPDIR";
#`$cmd`;
#######################################################
sub create_xml {
my ($source,$input,$target,$alignment,$path) = @_;
my @INPUT = split(/ /,$input);
my @SOURCE = split(/ /,$source);
my @TARGET = split(/ /,$target);
my %ALIGN = &create_alignment($alignment);
my %FRAME_INPUT;
my (@NT,@INPUT_BITMAP,@TARGET_BITMAP,%ALIGNMENT_I_TO_S);
foreach (@TARGET) { push @TARGET_BITMAP,1 }
### STEP 1: FIND MISMATCHES
my ($s,$i) = (0,0);
my $currently_matching = 0;
my ($start_s,$start_i) = (0,0);
$path .= "X"; # indicate end
print STDERR "$input\n$source\n$target\n$path\n";
for(my $p=0;$p<length($path);$p++) {
my $action = substr($path,$p,1);
# beginning of a mismatch
if ($currently_matching && $action ne "M" && $action ne "X") {
$start_i = $i;
$start_s = $s;
$currently_matching = 0;
}
# end of a mismatch
elsif (!$currently_matching &&
($action eq "M" || $action eq "X")) {
# remove use of affected target words
for(my $ss = $start_s; $ss<$s; $ss++) {
foreach my $tt (keys %{${$ALIGN{'s'}}[$ss]}) {
$TARGET_BITMAP[$tt] = 0;
}
# also remove enclosed unaligned words?
}
# are there input words that need to be inserted ?
print STDERR "($start_i<$i)?\n";
if ($start_i<$i) {
# take note of input words to be inserted
my $insertion = "";
for(my $ii = $start_i; $ii<$i; $ii++) {
$insertion .= $INPUT[$ii]." ";
}
# find position for inserted input words
# find first removed target word
my $start_t = 1000;
for(my $ss = $start_s; $ss<$s; $ss++) {
foreach my $tt (keys %{${$ALIGN{'s'}}[$ss]}) {
$start_t = $tt if $tt < $start_t;
}
}
# end of sentence? add to end
if ($start_t == 1000 && $i > $#INPUT) {
$start_t = $#TARGET;
}
# backtrack to previous words if unaligned
if ($start_t == 1000) {
$start_t = -1;
for(my $ss = $s-1; $start_t==-1 && $ss>=0; $ss--) {
foreach my $tt (keys %{${$ALIGN{'s'}}[$ss]}) {
$start_t = $tt if $tt > $start_t;
}
}
}
$FRAME_INPUT{$start_t} .= $insertion;
my %NT = ("start_t" => $start_t,
"start_i" => $start_i );
push @NT,\%NT;
}
$currently_matching = 1;
}
print STDERR "$action $s $i ($start_s $start_i) $currently_matching";
if ($action ne "I") {
print STDERR " ->";
foreach my $tt (keys %{${$ALIGN{'s'}}[$s]}) {
print STDERR " ".$tt;
}
}
print STDERR "\n";
$s++ unless $action eq "I";
$i++ unless $action eq "D";
$ALIGNMENT_I_TO_S{$i} = $s unless $action eq "D";
push @INPUT_BITMAP, 1 if $action eq "M";
push @INPUT_BITMAP, 0 if $action eq "I" || $action eq "S";
}
print STDERR $target."\n";
foreach (@TARGET_BITMAP) { print STDERR $_; } print STDERR "\n";
foreach (sort keys %FRAME_INPUT) {
print STDERR "$_: $FRAME_INPUT{$_}\n";
}
### STEP 2: BUILD RULE AND FRAME
# hierarchical rule
my $rule_s = "";
my $rule_pos_s = 0;
my %RULE_ALIGNMENT_S;
for(my $i=0;$i<scalar(@INPUT_BITMAP);$i++) {
if ($INPUT_BITMAP[$i]) {
$rule_s .= $INPUT[$i]." ";
$RULE_ALIGNMENT_S{$ALIGNMENT_I_TO_S{$i}} = $rule_pos_s++;
}
foreach my $NT (@NT) {
if ($i == $$NT{"start_i"}) {
$rule_s .= "[X][X] ";
$$NT{"rule_pos_s"} = $rule_pos_s++;
}
}
}
my $rule_t = "";
my $rule_pos_t = 0;
my %RULE_ALIGNMENT_T;
for(my $t=-1;$t<scalar(@TARGET_BITMAP);$t++) {
if ($t>=0 && $TARGET_BITMAP[$t]) {
$rule_t .= $TARGET[$t]." ";
$RULE_ALIGNMENT_T{$t} = $rule_pos_t++;
}
foreach my $NT (@NT) {
if ($t == $$NT{"start_t"}) {
$rule_t .= "[X][X] ";
$$NT{"rule_pos_t"} = $rule_pos_t++;
}
}
}
my $rule_alignment = "";
foreach my $s (sort { $a <=> $b} keys %RULE_ALIGNMENT_S) {
foreach my $t (keys %{$ALIGN{"s"}[$s]}) {
next unless defined($RULE_ALIGNMENT_T{$t});
$rule_alignment .= $RULE_ALIGNMENT_S{$s}."-".$RULE_ALIGNMENT_T{$t}." ";
}
}
foreach my $NT (@NT) {
$rule_alignment .= $$NT{"rule_pos_s"}."-".$$NT{"rule_pos_t"}." ";
}
chop($rule_s);
chop($rule_t);
chop($rule_alignment);
my $rule_alignment_inv = "";
foreach (split(/ /,$rule_alignment)) {
/^(\d+)\-(\d+)$/;
$rule_alignment_inv .= "$2-$1 ";
}
chop($rule_alignment_inv);
# frame
my $frame = "";
$frame = $FRAME_INPUT{-1} if defined $FRAME_INPUT{-1};
my $currently_included = 0;
my $start_t = -1;
push @TARGET_BITMAP,0; # indicate end
for(my $t=0;$t<=scalar(@TARGET);$t++) {
# beginning of tm target inclusion
if (!$currently_included && $TARGET_BITMAP[$t]) {
$start_t = $t;
$currently_included = 1;
}
# end of tm target inclusion (not included word or inserted input)
elsif ($currently_included &&
(!$TARGET_BITMAP[$t] || defined($FRAME_INPUT{$t}))) {
# add xml (unless change is at the beginning of the sentence
if ($start_t >= 0) {
my $target = "";
print STDERR "for(tt=$start_t;tt<$t+$TARGET_BITMAP[$t]);\n";
for(my $tt=$start_t;$tt<$t+$TARGET_BITMAP[$t];$tt++) {
$target .= $TARGET[$tt] . " ";
}
chop($target);
$frame .= "<xml translation=\"$target\"> x </xml> ";
}
$currently_included = 0;
}
$frame .= $FRAME_INPUT{$t} if defined $FRAME_INPUT{$t};
print STDERR "$TARGET_BITMAP[$t] $t ($start_t) $currently_included\n";
}
print STDERR $frame."\n-------------------------------------\n";
return ($frame,$rule_s,$rule_t,$rule_alignment,$rule_alignment_inv);
}
sub create_alignment {
my ($line) = @_;
my (@ALIGNED_TO_S,@ALIGNED_TO_T);
foreach my $point (split(/ /,$line)) {
my ($s,$t) = split(/\-/,$point);
$ALIGNED_TO_S[$s]{$t}++;
$ALIGNED_TO_T[$t]{$s}++;
}
my %ALIGNMENT = ( 's' => \@ALIGNED_TO_S, 't' => \@ALIGNED_TO_T );
return %ALIGNMENT;
}

View File

@ -1,288 +0,0 @@
#!/usr/bin/perl -w
use strict;
my $DEBUG = 1;
my $OUTPUT_RULES = 1;
my $scripts_root_dir = "/Users/hieuhoang/workspace/github/hieuhoang/scripts";
my $data_root = "/Users/hieuhoang/workspace/experiment/data/tm-mt-integration/";
#my $match_file = "$data_root/in/BEST.acquis-xml-escaped.4.uniq.multi.tuning";
my $match_file = "$data_root/out/BEST";
my $source_file = "$data_root/in/acquis.truecased.4.en.uniq";
my $target_file = "$data_root/in/acquis.truecased.4.fr.uniq";
my $alignment_file = "$data_root/in/acquis.truecased.4.align.uniq";
my $out_file = "$data_root/out/ac-test.input.xml.4.uniq.multi.tuning";
my $in_file = "$data_root/in/ac-test.input.tc.4";
#my $match_file = "tm/BEST.acquis-xml-escaped.4.uniq.multi";
#my $source_file = "data/acquis.truecased.4.en.uniq";
#my $target_file = "data/acquis.truecased.4.fr.uniq";
#my $alignment_file = "data/acquis.truecased.4.align.uniq";
#my $out_file = "data/ac-test.input.xml.4.uniq.multi.xxx";
#my $in_file = "evaluation/ac-test.input.tc.4";
my @INPUT = `cat $in_file`; chop(@INPUT);
my @ALL_SOURCE = `cat $source_file`; chop(@ALL_SOURCE);
my @ALL_TARGET = `cat $target_file`; chop(@ALL_TARGET);
my @ALL_ALIGNMENT = `cat $alignment_file`; chop(@ALL_ALIGNMENT);
open(MATCH,$match_file);
open(FRAME,">$out_file");
open(RULE,">$out_file.extract") if $OUTPUT_RULES;
open(RULE_INV,">$out_file.extract.inv") if $OUTPUT_RULES;
open(INFO,">$out_file.info");
while( my $match = <MATCH> ) {
chop($match);
my ($score,$sentence,$path) = split(/ \|\|\| /,$match);
$score =~ /^(\d+) (.+)/ || die;
my ($i,$match_score) = ($1,$2);
# construct frame
if ($sentence < 1e9 && $sentence >= 0) {
my $SOURCE = $ALL_SOURCE[$sentence];
my @ALIGNMENT = split(/ \|\|\| /,$ALL_ALIGNMENT[$sentence]);
my @TARGET = split(/ \|\|\| /,$ALL_TARGET[$sentence]);
for(my $j=0;$j<scalar(@TARGET);$j++) {
$TARGET[$j] =~ /^(\d+) (.+)$/ || die;
my ($target_count,$target) = ($1,$2);
my ($frame,$rule_s,$rule_t,$rule_alignment,$rule_alignment_inv) =
&create_xml($SOURCE,
$INPUT[$i],
$target,
$ALIGNMENT[$j],
$path);
print FRAME $frame."\n";
print RULE "$rule_s [X] ||| $rule_t [X] ||| $rule_alignment ||| $target_count\n" if $OUTPUT_RULES;
print RULE_INV "$rule_t [X] ||| $rule_s [X] ||| $rule_alignment_inv ||| $target_count\n" if $OUTPUT_RULES;
print INFO "$i ||| $match_score ||| $target_count\n";
}
}
}
close(FRAME);
close(MATCH);
close(RULE) if $OUTPUT_RULES;
close(RULE_INV) if $OUTPUT_RULES;
`LC_ALL=C sort $out_file.extract | gzip -c > $out_file.extract.sorted.gz`;
`LC_ALL=C sort $out_file.extract.inv | gzip -c > $out_file.extract.inv.sorted.gz`;
`$scripts_root_dir/training/train-model.perl -dont-zip -first-step 6 -last-step 6 -f en -e fr -hierarchical -extract-file $out_file.extract -lexical-file $data_root/in/lex.4 -phrase-translation-table $out_file.phrase-table` if $OUTPUT_RULES;
sub create_xml {
my ($source,$input,$target,$alignment,$path) = @_;
my @INPUT = split(/ /,$input);
my @SOURCE = split(/ /,$source);
my @TARGET = split(/ /,$target);
my %ALIGN = &create_alignment($alignment);
my %FRAME_INPUT;
my (@NT,@INPUT_BITMAP,@TARGET_BITMAP,%ALIGNMENT_I_TO_S);
foreach (@TARGET) { push @TARGET_BITMAP,1 }
### STEP 1: FIND MISMATCHES
my ($s,$i) = (0,0);
my $currently_matching = 0;
my ($start_s,$start_i) = (0,0);
$path .= "X"; # indicate end
print "$input\n$source\n$target\n$path\n";
for(my $p=0;$p<length($path);$p++) {
my $action = substr($path,$p,1);
# beginning of a mismatch
if ($currently_matching && $action ne "M" && $action ne "X") {
$start_i = $i;
$start_s = $s;
$currently_matching = 0;
}
# end of a mismatch
elsif (!$currently_matching &&
($action eq "M" || $action eq "X")) {
# remove use of affected target words
for(my $ss = $start_s; $ss<$s; $ss++) {
foreach my $tt (keys %{${$ALIGN{'s'}}[$ss]}) {
$TARGET_BITMAP[$tt] = 0;
}
# also remove enclosed unaligned words?
}
# are there input words that need to be inserted ?
print "($start_i<$i)?\n";
if ($start_i<$i) {
# take note of input words to be inserted
my $insertion = "";
for(my $ii = $start_i; $ii<$i; $ii++) {
$insertion .= $INPUT[$ii]." ";
}
# find position for inserted input words
# find first removed target word
my $start_t = 1000;
for(my $ss = $start_s; $ss<$s; $ss++) {
foreach my $tt (keys %{${$ALIGN{'s'}}[$ss]}) {
$start_t = $tt if $tt < $start_t;
}
}
# end of sentence? add to end
if ($start_t == 1000 && $i > $#INPUT) {
$start_t = $#TARGET;
}
# backtrack to previous words if unaligned
if ($start_t == 1000) {
$start_t = -1;
for(my $ss = $s-1; $start_t==-1 && $ss>=0; $ss--) {
foreach my $tt (keys %{${$ALIGN{'s'}}[$ss]}) {
$start_t = $tt if $tt > $start_t;
}
}
}
$FRAME_INPUT{$start_t} .= $insertion;
my %NT = ("start_t" => $start_t,
"start_i" => $start_i );
push @NT,\%NT;
}
$currently_matching = 1;
}
print "$action $s $i ($start_s $start_i) $currently_matching";
if ($action ne "I") {
print " ->";
foreach my $tt (keys %{${$ALIGN{'s'}}[$s]}) {
print " ".$tt;
}
}
print "\n";
$s++ unless $action eq "I";
$i++ unless $action eq "D";
$ALIGNMENT_I_TO_S{$i} = $s unless $action eq "D";
push @INPUT_BITMAP, 1 if $action eq "M";
push @INPUT_BITMAP, 0 if $action eq "I" || $action eq "S";
}
print $target."\n";
foreach (@TARGET_BITMAP) { print $_; } print "\n";
foreach (sort keys %FRAME_INPUT) {
print "$_: $FRAME_INPUT{$_}\n";
}
### STEP 2: BUILD RULE AND FRAME
# hierarchical rule
my $rule_s = "";
my $rule_pos_s = 0;
my %RULE_ALIGNMENT_S;
for(my $i=0;$i<scalar(@INPUT_BITMAP);$i++) {
if ($INPUT_BITMAP[$i]) {
$rule_s .= $INPUT[$i]." ";
$RULE_ALIGNMENT_S{$ALIGNMENT_I_TO_S{$i}} = $rule_pos_s++;
}
foreach my $NT (@NT) {
if ($i == $$NT{"start_i"}) {
$rule_s .= "[X][X] ";
$$NT{"rule_pos_s"} = $rule_pos_s++;
}
}
}
my $rule_t = "";
my $rule_pos_t = 0;
my %RULE_ALIGNMENT_T;
for(my $t=-1;$t<scalar(@TARGET_BITMAP);$t++) {
if ($t>=0 && $TARGET_BITMAP[$t]) {
$rule_t .= $TARGET[$t]." ";
$RULE_ALIGNMENT_T{$t} = $rule_pos_t++;
}
foreach my $NT (@NT) {
if ($t == $$NT{"start_t"}) {
$rule_t .= "[X][X] ";
$$NT{"rule_pos_t"} = $rule_pos_t++;
}
}
}
my $rule_alignment = "";
foreach my $s (sort { $a <=> $b} keys %RULE_ALIGNMENT_S) {
foreach my $t (keys %{$ALIGN{"s"}[$s]}) {
next unless defined($RULE_ALIGNMENT_T{$t});
$rule_alignment .= $RULE_ALIGNMENT_S{$s}."-".$RULE_ALIGNMENT_T{$t}." ";
}
}
foreach my $NT (@NT) {
$rule_alignment .= $$NT{"rule_pos_s"}."-".$$NT{"rule_pos_t"}." ";
}
chop($rule_s);
chop($rule_t);
chop($rule_alignment);
my $rule_alignment_inv = "";
foreach (split(/ /,$rule_alignment)) {
/^(\d+)\-(\d+)$/;
$rule_alignment_inv .= "$2-$1 ";
}
chop($rule_alignment_inv);
# frame
my $frame = "";
$frame = $FRAME_INPUT{-1} if defined $FRAME_INPUT{-1};
my $currently_included = 0;
my $start_t = -1;
push @TARGET_BITMAP,0; # indicate end
for(my $t=0;$t<=scalar(@TARGET);$t++) {
# beginning of tm target inclusion
if (!$currently_included && $TARGET_BITMAP[$t]) {
$start_t = $t;
$currently_included = 1;
}
# end of tm target inclusion (not included word or inserted input)
elsif ($currently_included &&
(!$TARGET_BITMAP[$t] || defined($FRAME_INPUT{$t}))) {
# add xml (unless change is at the beginning of the sentence
if ($start_t >= 0) {
my $target = "";
print "for(tt=$start_t;tt<$t+$TARGET_BITMAP[$t]);\n";
for(my $tt=$start_t;$tt<$t+$TARGET_BITMAP[$t];$tt++) {
$target .= $TARGET[$tt] . " ";
}
chop($target);
$frame .= "<xml translation=\"$target\"> x </xml> ";
}
$currently_included = 0;
}
$frame .= $FRAME_INPUT{$t} if defined $FRAME_INPUT{$t};
print "$TARGET_BITMAP[$t] $t ($start_t) $currently_included\n";
}
print $frame."\n-------------------------------------\n";
return ($frame,$rule_s,$rule_t,$rule_alignment,$rule_alignment_inv);
}
sub create_alignment {
my ($line) = @_;
my (@ALIGNED_TO_S,@ALIGNED_TO_T);
foreach my $point (split(/ /,$line)) {
my ($s,$t) = split(/\-/,$point);
$ALIGNED_TO_S[$s]{$t}++;
$ALIGNED_TO_T[$t]{$s}++;
}
my %ALIGNMENT = ( 's' => \@ALIGNED_TO_S, 't' => \@ALIGNED_TO_T );
return %ALIGNMENT;
}

View File

@ -1,27 +0,0 @@
#include "SuffixArray.h"
using namespace std;
int main(int argc, char* argv[])
{
SuffixArray suffixArray( "/home/pkoehn/syntax/grammars/wmt09-de-en/corpus.1k.de" );
//suffixArray.List(10,20);
vector< string > der;
der.push_back("der");
vector< string > inDer;
inDer.push_back("in");
inDer.push_back("der");
vector< string > zzz;
zzz.push_back("zzz");
vector< string > derDer;
derDer.push_back("der");
derDer.push_back("der");
cout << "count of 'der' " << suffixArray.Count( der ) << endl;
cout << "limited count of 'der' " << suffixArray.MinCount( der, 2 ) << endl;
cout << "count of 'in der' " << suffixArray.Count( inDer ) << endl;
cout << "count of 'der der' " << suffixArray.Count( derDer ) << endl;
cout << "limited count of 'der der' " << suffixArray.MinCount( derDer, 1 ) << endl;
// cout << "count of 'zzz' " << suffixArray.Count( zzz ) << endl;
// cout << "limited count of 'zzz' " << suffixArray.LimitedCount( zzz, 1 ) << endl;
}

View File

@ -226,6 +226,7 @@
GCC_OPTIMIZATION_LEVEL = 0;
GCC_WARN_ABOUT_RETURN_TYPE = YES;
GCC_WARN_UNUSED_VARIABLE = YES;
HEADER_SEARCH_PATHS = /usr/local/include;
ONLY_ACTIVE_ARCH = YES;
PREBINDING = NO;
SDKROOT = macosx10.6;
@ -239,6 +240,7 @@
GCC_C_LANGUAGE_STANDARD = gnu99;
GCC_WARN_ABOUT_RETURN_TYPE = YES;
GCC_WARN_UNUSED_VARIABLE = YES;
HEADER_SEARCH_PATHS = /usr/local/include;
ONLY_ACTIVE_ARCH = YES;
PREBINDING = NO;
SDKROOT = macosx10.6;

View File

@ -47,6 +47,7 @@
</option>
<option id="gnu.cpp.compiler.option.preprocessor.def.1052680347" name="Defined symbols (-D)" superClass="gnu.cpp.compiler.option.preprocessor.def" valueType="definedSymbols">
<listOptionValue builtIn="false" value="TRACE_ENABLE"/>
<listOptionValue builtIn="false" value="WITH_THREADS"/>
</option>
<inputType id="cdt.managedbuild.tool.gnu.cpp.compiler.input.1930757481" superClass="cdt.managedbuild.tool.gnu.cpp.compiler.input"/>
</tool>

View File

@ -83,6 +83,8 @@
1EBA459F14B97E92003CC0EA /* string_piece.hh in Headers */ = {isa = PBXBuildFile; fileRef = 1EBA455914B97E92003CC0EA /* string_piece.hh */; };
1EBA45A014B97E92003CC0EA /* tokenize_piece_test.cc in Sources */ = {isa = PBXBuildFile; fileRef = 1EBA455A14B97E92003CC0EA /* tokenize_piece_test.cc */; };
1EBA45A114B97E92003CC0EA /* tokenize_piece.hh in Headers */ = {isa = PBXBuildFile; fileRef = 1EBA455B14B97E92003CC0EA /* tokenize_piece.hh */; };
1EC2B30916233A8C00614D71 /* usage.cc in Sources */ = {isa = PBXBuildFile; fileRef = 1EC2B30716233A8C00614D71 /* usage.cc */; };
1EC2B30A16233A8C00614D71 /* usage.hh in Headers */ = {isa = PBXBuildFile; fileRef = 1EC2B30816233A8C00614D71 /* usage.hh */; };
/* End PBXBuildFile section */
/* Begin PBXContainerItemProxy section */
@ -185,6 +187,8 @@
1EBA455A14B97E92003CC0EA /* tokenize_piece_test.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = tokenize_piece_test.cc; path = ../../util/tokenize_piece_test.cc; sourceTree = "<group>"; };
1EBA455B14B97E92003CC0EA /* tokenize_piece.hh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = tokenize_piece.hh; path = ../../util/tokenize_piece.hh; sourceTree = "<group>"; };
1EBA455C14B97E92003CC0EA /* util.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = util.xcodeproj; path = ../../util/util.xcodeproj; sourceTree = "<group>"; };
1EC2B30716233A8C00614D71 /* usage.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = usage.cc; path = ../../util/usage.cc; sourceTree = "<group>"; };
1EC2B30816233A8C00614D71 /* usage.hh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = usage.hh; path = ../../util/usage.hh; sourceTree = "<group>"; };
1EE8C2E91476A48E002496F2 /* liblm.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = liblm.a; sourceTree = BUILT_PRODUCTS_DIR; };
/* End PBXFileReference section */
@ -261,6 +265,8 @@
1EBA44FC14B97E81003CC0EA /* util */ = {
isa = PBXGroup;
children = (
1EC2B30716233A8C00614D71 /* usage.cc */,
1EC2B30816233A8C00614D71 /* usage.hh */,
1EBA453614B97E92003CC0EA /* bit_packing_test.cc */,
1EBA453714B97E92003CC0EA /* bit_packing.cc */,
1EBA453814B97E92003CC0EA /* bit_packing.hh */,
@ -377,6 +383,7 @@
1EBA45A114B97E92003CC0EA /* tokenize_piece.hh in Headers */,
1E890C72159D1B260031F9F3 /* value_build.hh in Headers */,
1E890C73159D1B260031F9F3 /* value.hh in Headers */,
1EC2B30A16233A8C00614D71 /* usage.hh in Headers */,
);
runOnlyForDeploymentPostprocessing = 0;
};
@ -479,6 +486,7 @@
1EBA459D14B97E92003CC0EA /* sorted_uniform_test.cc in Sources */,
1EBA45A014B97E92003CC0EA /* tokenize_piece_test.cc in Sources */,
1E890C71159D1B260031F9F3 /* value_build.cc in Sources */,
1EC2B30916233A8C00614D71 /* usage.cc in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};

View File

@ -0,0 +1,147 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?fileVersion 4.0.0?>
<cproject storage_type_id="org.eclipse.cdt.core.XmlProjectDescriptionStorage">
<storageModule moduleId="org.eclipse.cdt.core.settings">
<cconfiguration id="cdt.managedbuild.config.gnu.exe.debug.162355801">
<storageModule buildSystemId="org.eclipse.cdt.managedbuilder.core.configurationDataProvider" id="cdt.managedbuild.config.gnu.exe.debug.162355801" moduleId="org.eclipse.cdt.core.settings" name="Debug">
<externalSettings/>
<extensions>
<extension id="org.eclipse.cdt.core.ELF" point="org.eclipse.cdt.core.BinaryParser"/>
<extension id="org.eclipse.cdt.core.GmakeErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.CWDLocator" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.GCCErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.GASErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.GLDErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
</extensions>
</storageModule>
<storageModule moduleId="cdtBuildSystem" version="4.0.0">
<configuration artifactName="${ProjName}" buildArtefactType="org.eclipse.cdt.build.core.buildArtefactType.exe" buildProperties="org.eclipse.cdt.build.core.buildType=org.eclipse.cdt.build.core.buildType.debug,org.eclipse.cdt.build.core.buildArtefactType=org.eclipse.cdt.build.core.buildArtefactType.exe" cleanCommand="rm -rf" description="" id="cdt.managedbuild.config.gnu.exe.debug.162355801" name="Debug" parent="cdt.managedbuild.config.gnu.exe.debug">
<folderInfo id="cdt.managedbuild.config.gnu.exe.debug.162355801." name="/" resourcePath="">
<toolChain id="cdt.managedbuild.toolchain.gnu.exe.debug.1633424067" name="Linux GCC" superClass="cdt.managedbuild.toolchain.gnu.exe.debug">
<targetPlatform id="cdt.managedbuild.target.gnu.platform.exe.debug.1437309068" name="Debug Platform" superClass="cdt.managedbuild.target.gnu.platform.exe.debug"/>
<builder buildPath="${workspace_loc:/moses-chart-cmd/Debug}" id="cdt.managedbuild.target.gnu.builder.exe.debug.1495140314" keepEnvironmentInBuildfile="false" managedBuildOn="true" name="Gnu Make Builder" superClass="cdt.managedbuild.target.gnu.builder.exe.debug"/>
<tool id="cdt.managedbuild.tool.gnu.archiver.base.1247128100" name="GCC Archiver" superClass="cdt.managedbuild.tool.gnu.archiver.base"/>
<tool id="cdt.managedbuild.tool.gnu.cpp.compiler.exe.debug.1087697480" name="GCC C++ Compiler" superClass="cdt.managedbuild.tool.gnu.cpp.compiler.exe.debug">
<option id="gnu.cpp.compiler.exe.debug.option.optimization.level.1163099464" name="Optimization Level" superClass="gnu.cpp.compiler.exe.debug.option.optimization.level" value="gnu.cpp.compiler.optimization.level.none" valueType="enumerated"/>
<option id="gnu.cpp.compiler.exe.debug.option.debugging.level.1584931166" name="Debug Level" superClass="gnu.cpp.compiler.exe.debug.option.debugging.level" value="gnu.cpp.compiler.debugging.level.max" valueType="enumerated"/>
<option id="gnu.cpp.compiler.option.include.paths.65842083" name="Include paths (-I)" superClass="gnu.cpp.compiler.option.include.paths" valueType="includePath">
<listOptionValue builtIn="false" value="&quot;${workspace_loc}/../../moses/src&quot;"/>
<listOptionValue builtIn="false" value="&quot;${workspace_loc}/../..&quot;"/>
</option>
<option id="gnu.cpp.compiler.option.preprocessor.def.1785368241" name="Defined symbols (-D)" superClass="gnu.cpp.compiler.option.preprocessor.def" valueType="definedSymbols">
<listOptionValue builtIn="false" value="WITH_THREADS"/>
</option>
<inputType id="cdt.managedbuild.tool.gnu.cpp.compiler.input.1402496521" superClass="cdt.managedbuild.tool.gnu.cpp.compiler.input"/>
</tool>
<tool id="cdt.managedbuild.tool.gnu.c.compiler.exe.debug.827478809" name="GCC C Compiler" superClass="cdt.managedbuild.tool.gnu.c.compiler.exe.debug">
<option defaultValue="gnu.c.optimization.level.none" id="gnu.c.compiler.exe.debug.option.optimization.level.1840610682" name="Optimization Level" superClass="gnu.c.compiler.exe.debug.option.optimization.level" valueType="enumerated"/>
<option id="gnu.c.compiler.exe.debug.option.debugging.level.1437095112" name="Debug Level" superClass="gnu.c.compiler.exe.debug.option.debugging.level" value="gnu.c.debugging.level.max" valueType="enumerated"/>
<inputType id="cdt.managedbuild.tool.gnu.c.compiler.input.128236233" superClass="cdt.managedbuild.tool.gnu.c.compiler.input"/>
</tool>
<tool id="cdt.managedbuild.tool.gnu.c.linker.exe.debug.755343734" name="GCC C Linker" superClass="cdt.managedbuild.tool.gnu.c.linker.exe.debug"/>
<tool id="cdt.managedbuild.tool.gnu.cpp.linker.exe.debug.816413868" name="GCC C++ Linker" superClass="cdt.managedbuild.tool.gnu.cpp.linker.exe.debug">
<option id="gnu.cpp.link.option.paths.330225535" name="Library search path (-L)" superClass="gnu.cpp.link.option.paths" valueType="libPaths">
<listOptionValue builtIn="false" value="&quot;${workspace_loc:}/../../boost/lib&quot;"/>
<listOptionValue builtIn="false" value="&quot;${workspace_loc:}/../../irstlm/lib&quot;"/>
<listOptionValue builtIn="false" value="&quot;${workspace_loc:/moses}/Debug&quot;"/>
<listOptionValue builtIn="false" value="&quot;${workspace_loc:/lm}/Debug&quot;"/>
<listOptionValue builtIn="false" value="&quot;${workspace_loc:/OnDiskPt}/Debug&quot;"/>
<listOptionValue builtIn="false" value="&quot;${workspace_loc:/util}/Debug&quot;"/>
<listOptionValue builtIn="false" value="&quot;${workspace_loc:/search}/Debug&quot;"/>
</option>
<option id="gnu.cpp.link.option.libs.1177721357" name="Libraries (-l)" superClass="gnu.cpp.link.option.libs" valueType="libs">
<listOptionValue builtIn="false" value="moses"/>
<listOptionValue builtIn="false" value="search"/>
<listOptionValue builtIn="false" value="irstlm"/>
<listOptionValue builtIn="false" value="OnDiskPt"/>
<listOptionValue builtIn="false" value="z"/>
<listOptionValue builtIn="false" value="rt"/>
<listOptionValue builtIn="false" value="boost_system"/>
<listOptionValue builtIn="false" value="boost_thread"/>
<listOptionValue builtIn="false" value="boost_filesystem"/>
<listOptionValue builtIn="false" value="lm"/>
<listOptionValue builtIn="false" value="util"/>
</option>
<inputType id="cdt.managedbuild.tool.gnu.cpp.linker.input.128214028" superClass="cdt.managedbuild.tool.gnu.cpp.linker.input">
<additionalInput kind="additionalinputdependency" paths="$(USER_OBJS)"/>
<additionalInput kind="additionalinput" paths="$(LIBS)"/>
</inputType>
</tool>
<tool id="cdt.managedbuild.tool.gnu.assembler.exe.debug.1267270542" name="GCC Assembler" superClass="cdt.managedbuild.tool.gnu.assembler.exe.debug">
<inputType id="cdt.managedbuild.tool.gnu.assembler.input.612723114" superClass="cdt.managedbuild.tool.gnu.assembler.input"/>
</tool>
</toolChain>
</folderInfo>
</configuration>
</storageModule>
<storageModule moduleId="org.eclipse.cdt.core.externalSettings"/>
</cconfiguration>
<cconfiguration id="cdt.managedbuild.config.gnu.exe.release.516628324">
<storageModule buildSystemId="org.eclipse.cdt.managedbuilder.core.configurationDataProvider" id="cdt.managedbuild.config.gnu.exe.release.516628324" moduleId="org.eclipse.cdt.core.settings" name="Release">
<externalSettings/>
<extensions>
<extension id="org.eclipse.cdt.core.ELF" point="org.eclipse.cdt.core.BinaryParser"/>
<extension id="org.eclipse.cdt.core.GmakeErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.CWDLocator" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.GCCErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.GASErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.GLDErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
</extensions>
</storageModule>
<storageModule moduleId="cdtBuildSystem" version="4.0.0">
<configuration artifactName="${ProjName}" buildArtefactType="org.eclipse.cdt.build.core.buildArtefactType.exe" buildProperties="org.eclipse.cdt.build.core.buildType=org.eclipse.cdt.build.core.buildType.release,org.eclipse.cdt.build.core.buildArtefactType=org.eclipse.cdt.build.core.buildArtefactType.exe" cleanCommand="rm -rf" description="" id="cdt.managedbuild.config.gnu.exe.release.516628324" name="Release" parent="cdt.managedbuild.config.gnu.exe.release">
<folderInfo id="cdt.managedbuild.config.gnu.exe.release.516628324." name="/" resourcePath="">
<toolChain id="cdt.managedbuild.toolchain.gnu.exe.release.1782680519" name="Linux GCC" superClass="cdt.managedbuild.toolchain.gnu.exe.release">
<targetPlatform id="cdt.managedbuild.target.gnu.platform.exe.release.587667692" name="Debug Platform" superClass="cdt.managedbuild.target.gnu.platform.exe.release"/>
<builder buildPath="${workspace_loc:/moses-chart-cmd/Release}" id="cdt.managedbuild.target.gnu.builder.exe.release.330540300" keepEnvironmentInBuildfile="false" managedBuildOn="true" name="Gnu Make Builder" superClass="cdt.managedbuild.target.gnu.builder.exe.release"/>
<tool id="cdt.managedbuild.tool.gnu.archiver.base.1062976385" name="GCC Archiver" superClass="cdt.managedbuild.tool.gnu.archiver.base"/>
<tool id="cdt.managedbuild.tool.gnu.cpp.compiler.exe.release.1344864210" name="GCC C++ Compiler" superClass="cdt.managedbuild.tool.gnu.cpp.compiler.exe.release">
<option id="gnu.cpp.compiler.exe.release.option.optimization.level.1422341509" name="Optimization Level" superClass="gnu.cpp.compiler.exe.release.option.optimization.level" value="gnu.cpp.compiler.optimization.level.most" valueType="enumerated"/>
<option id="gnu.cpp.compiler.exe.release.option.debugging.level.1573362644" name="Debug Level" superClass="gnu.cpp.compiler.exe.release.option.debugging.level" value="gnu.cpp.compiler.debugging.level.none" valueType="enumerated"/>
<inputType id="cdt.managedbuild.tool.gnu.cpp.compiler.input.1937178483" superClass="cdt.managedbuild.tool.gnu.cpp.compiler.input"/>
</tool>
<tool id="cdt.managedbuild.tool.gnu.c.compiler.exe.release.1116405938" name="GCC C Compiler" superClass="cdt.managedbuild.tool.gnu.c.compiler.exe.release">
<option defaultValue="gnu.c.optimization.level.most" id="gnu.c.compiler.exe.release.option.optimization.level.32856289" name="Optimization Level" superClass="gnu.c.compiler.exe.release.option.optimization.level" valueType="enumerated"/>
<option id="gnu.c.compiler.exe.release.option.debugging.level.1235489953" name="Debug Level" superClass="gnu.c.compiler.exe.release.option.debugging.level" value="gnu.c.debugging.level.none" valueType="enumerated"/>
<inputType id="cdt.managedbuild.tool.gnu.c.compiler.input.1583852187" superClass="cdt.managedbuild.tool.gnu.c.compiler.input"/>
</tool>
<tool id="cdt.managedbuild.tool.gnu.c.linker.exe.release.1007421110" name="GCC C Linker" superClass="cdt.managedbuild.tool.gnu.c.linker.exe.release"/>
<tool id="cdt.managedbuild.tool.gnu.cpp.linker.exe.release.195880914" name="GCC C++ Linker" superClass="cdt.managedbuild.tool.gnu.cpp.linker.exe.release">
<inputType id="cdt.managedbuild.tool.gnu.cpp.linker.input.518921609" superClass="cdt.managedbuild.tool.gnu.cpp.linker.input">
<additionalInput kind="additionalinputdependency" paths="$(USER_OBJS)"/>
<additionalInput kind="additionalinput" paths="$(LIBS)"/>
</inputType>
</tool>
<tool id="cdt.managedbuild.tool.gnu.assembler.exe.release.330494310" name="GCC Assembler" superClass="cdt.managedbuild.tool.gnu.assembler.exe.release">
<inputType id="cdt.managedbuild.tool.gnu.assembler.input.1407747418" superClass="cdt.managedbuild.tool.gnu.assembler.input"/>
</tool>
</toolChain>
</folderInfo>
</configuration>
</storageModule>
<storageModule moduleId="org.eclipse.cdt.core.externalSettings"/>
</cconfiguration>
</storageModule>
<storageModule moduleId="cdtBuildSystem" version="4.0.0">
<project id="moses-chart-cmd.cdt.managedbuild.target.gnu.exe.532411209" name="Executable" projectType="cdt.managedbuild.target.gnu.exe"/>
</storageModule>
<storageModule moduleId="scannerConfiguration">
<autodiscovery enabled="true" problemReportingEnabled="true" selectedProfileId=""/>
<scannerConfigBuildInfo instanceId="cdt.managedbuild.config.gnu.exe.release.516628324;cdt.managedbuild.config.gnu.exe.release.516628324.;cdt.managedbuild.tool.gnu.c.compiler.exe.release.1116405938;cdt.managedbuild.tool.gnu.c.compiler.input.1583852187">
<autodiscovery enabled="true" problemReportingEnabled="true" selectedProfileId="org.eclipse.cdt.managedbuilder.core.GCCManagedMakePerProjectProfileC"/>
</scannerConfigBuildInfo>
<scannerConfigBuildInfo instanceId="cdt.managedbuild.config.gnu.exe.debug.162355801;cdt.managedbuild.config.gnu.exe.debug.162355801.;cdt.managedbuild.tool.gnu.c.compiler.exe.debug.827478809;cdt.managedbuild.tool.gnu.c.compiler.input.128236233">
<autodiscovery enabled="true" problemReportingEnabled="true" selectedProfileId="org.eclipse.cdt.managedbuilder.core.GCCManagedMakePerProjectProfileC"/>
</scannerConfigBuildInfo>
<scannerConfigBuildInfo instanceId="cdt.managedbuild.config.gnu.exe.debug.162355801;cdt.managedbuild.config.gnu.exe.debug.162355801.;cdt.managedbuild.tool.gnu.cpp.compiler.exe.debug.1087697480;cdt.managedbuild.tool.gnu.cpp.compiler.input.1402496521">
<autodiscovery enabled="true" problemReportingEnabled="true" selectedProfileId="org.eclipse.cdt.managedbuilder.core.GCCManagedMakePerProjectProfileCPP"/>
</scannerConfigBuildInfo>
<scannerConfigBuildInfo instanceId="cdt.managedbuild.config.gnu.exe.release.516628324;cdt.managedbuild.config.gnu.exe.release.516628324.;cdt.managedbuild.tool.gnu.cpp.compiler.exe.release.1344864210;cdt.managedbuild.tool.gnu.cpp.compiler.input.1937178483">
<autodiscovery enabled="true" problemReportingEnabled="true" selectedProfileId="org.eclipse.cdt.managedbuilder.core.GCCManagedMakePerProjectProfileCPP"/>
</scannerConfigBuildInfo>
</storageModule>
<storageModule moduleId="refreshScope" versionNumber="1">
<resource resourceType="PROJECT" workspacePath="/moses-chart-cmd"/>
</storageModule>
</cproject>

View File

@ -0,0 +1,199 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>moses-chart-cmd</name>
<comment></comment>
<projects>
<project>lm</project>
<project>moses</project>
<project>OnDiskPt</project>
<project>util</project>
</projects>
<buildSpec>
<buildCommand>
<name>org.eclipse.cdt.managedbuilder.core.genmakebuilder</name>
<triggers>clean,full,incremental,</triggers>
<arguments>
<dictionary>
<key>?name?</key>
<value></value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.append_environment</key>
<value>true</value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.autoBuildTarget</key>
<value>all</value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.buildArguments</key>
<value></value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.buildCommand</key>
<value>make</value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.buildLocation</key>
<value>${workspace_loc:/moses-chart-cmd/Debug}</value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.cleanBuildTarget</key>
<value>clean</value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.contents</key>
<value>org.eclipse.cdt.make.core.activeConfigSettings</value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.enableAutoBuild</key>
<value>false</value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.enableCleanBuild</key>
<value>true</value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.enableFullBuild</key>
<value>true</value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.fullBuildTarget</key>
<value>all</value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.stopOnError</key>
<value>true</value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.useDefaultBuildCmd</key>
<value>true</value>
</dictionary>
</arguments>
</buildCommand>
<buildCommand>
<name>org.eclipse.cdt.managedbuilder.core.ScannerConfigBuilder</name>
<triggers>full,incremental,</triggers>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.eclipse.cdt.core.cnature</nature>
<nature>org.eclipse.cdt.core.ccnature</nature>
<nature>org.eclipse.cdt.managedbuilder.core.managedBuildNature</nature>
<nature>org.eclipse.cdt.managedbuilder.core.ScannerConfigNature</nature>
</natures>
<linkedResources>
<link>
<name>IOWrapper.cpp</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-chart-cmd/src/IOWrapper.cpp</locationURI>
</link>
<link>
<name>IOWrapper.h</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-chart-cmd/src/IOWrapper.h</locationURI>
</link>
<link>
<name>Jamfile</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-chart-cmd/src/Jamfile</locationURI>
</link>
<link>
<name>Main.cpp</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-chart-cmd/src/Main.cpp</locationURI>
</link>
<link>
<name>Main.h</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-chart-cmd/src/Main.h</locationURI>
</link>
<link>
<name>TranslationAnalysis.cpp</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-chart-cmd/src/TranslationAnalysis.cpp</locationURI>
</link>
<link>
<name>TranslationAnalysis.h</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-chart-cmd/src/TranslationAnalysis.h</locationURI>
</link>
<link>
<name>bin</name>
<type>2</type>
<locationURI>virtual:/virtual</locationURI>
</link>
<link>
<name>mbr.cpp</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-chart-cmd/src/mbr.cpp</locationURI>
</link>
<link>
<name>mbr.h</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-chart-cmd/src/mbr.h</locationURI>
</link>
<link>
<name>moses_chart</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-chart-cmd/src/moses_chart</locationURI>
</link>
<link>
<name>bin/gcc-4.6</name>
<type>2</type>
<locationURI>virtual:/virtual</locationURI>
</link>
<link>
<name>bin/gcc-4.6/release</name>
<type>2</type>
<locationURI>virtual:/virtual</locationURI>
</link>
<link>
<name>bin/gcc-4.6/release/debug-symbols-on</name>
<type>2</type>
<locationURI>virtual:/virtual</locationURI>
</link>
<link>
<name>bin/gcc-4.6/release/debug-symbols-on/link-static</name>
<type>2</type>
<locationURI>virtual:/virtual</locationURI>
</link>
<link>
<name>bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi</name>
<type>2</type>
<locationURI>virtual:/virtual</locationURI>
</link>
<link>
<name>bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/IOWrapper.o</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-chart-cmd/src/bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/IOWrapper.o</locationURI>
</link>
<link>
<name>bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/Main.o</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-chart-cmd/src/bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/Main.o</locationURI>
</link>
<link>
<name>bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/PhraseDictionary.o</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-chart-cmd/src/bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/PhraseDictionary.o</locationURI>
</link>
<link>
<name>bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/TranslationAnalysis.o</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-chart-cmd/src/bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/TranslationAnalysis.o</locationURI>
</link>
<link>
<name>bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/mbr.o</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-chart-cmd/src/bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/mbr.o</locationURI>
</link>
<link>
<name>bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/moses_chart</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-chart-cmd/src/bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/moses_chart</locationURI>
</link>
</linkedResources>
</projectDescription>

View File

@ -3,11 +3,11 @@
<cproject storage_type_id="org.eclipse.cdt.core.XmlProjectDescriptionStorage">
<storageModule moduleId="org.eclipse.cdt.core.settings">
<cconfiguration id="cdt.managedbuild.config.gnu.macosx.exe.debug.341255150">
<storageModule buildSystemId="org.eclipse.cdt.managedbuilder.core.configurationDataProvider" id="cdt.managedbuild.config.gnu.macosx.exe.debug.341255150" moduleId="org.eclipse.cdt.core.settings" name="Debug">
<cconfiguration id="cdt.managedbuild.config.gnu.exe.debug.461114338">
<storageModule buildSystemId="org.eclipse.cdt.managedbuilder.core.configurationDataProvider" id="cdt.managedbuild.config.gnu.exe.debug.461114338" moduleId="org.eclipse.cdt.core.settings" name="Debug">
<externalSettings/>
<extensions>
<extension id="org.eclipse.cdt.core.MachO64" point="org.eclipse.cdt.core.BinaryParser"/>
<extension id="org.eclipse.cdt.core.ELF" point="org.eclipse.cdt.core.BinaryParser"/>
<extension id="org.eclipse.cdt.core.GmakeErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.CWDLocator" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.GCCErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
@ -16,78 +16,71 @@
</extensions>
</storageModule>
<storageModule moduleId="cdtBuildSystem" version="4.0.0">
<configuration artifactName="${ProjName}" buildArtefactType="org.eclipse.cdt.build.core.buildArtefactType.exe" buildProperties="org.eclipse.cdt.build.core.buildType=org.eclipse.cdt.build.core.buildType.debug,org.eclipse.cdt.build.core.buildArtefactType=org.eclipse.cdt.build.core.buildArtefactType.exe" cleanCommand="rm -rf" description="" id="cdt.managedbuild.config.gnu.macosx.exe.debug.341255150" name="Debug" parent="cdt.managedbuild.config.gnu.macosx.exe.debug">
<folderInfo id="cdt.managedbuild.config.gnu.macosx.exe.debug.341255150." name="/" resourcePath="">
<toolChain id="cdt.managedbuild.toolchain.gnu.macosx.exe.debug.1679946908" name="MacOSX GCC" superClass="cdt.managedbuild.toolchain.gnu.macosx.exe.debug">
<targetPlatform id="cdt.managedbuild.target.gnu.platform.macosx.exe.debug.451172468" name="Debug Platform" superClass="cdt.managedbuild.target.gnu.platform.macosx.exe.debug"/>
<builder buildPath="${workspace_loc:/moses-cmd/Debug}" id="cdt.managedbuild.target.gnu.builder.macosx.exe.debug.1382407954" keepEnvironmentInBuildfile="false" managedBuildOn="true" name="Gnu Make Builder" superClass="cdt.managedbuild.target.gnu.builder.macosx.exe.debug"/>
<tool id="cdt.managedbuild.tool.macosx.c.linker.macosx.exe.debug.2118670613" name="MacOS X C Linker" superClass="cdt.managedbuild.tool.macosx.c.linker.macosx.exe.debug"/>
<tool id="cdt.managedbuild.tool.macosx.cpp.linker.macosx.exe.debug.84059290" name="MacOS X C++ Linker" superClass="cdt.managedbuild.tool.macosx.cpp.linker.macosx.exe.debug">
<option id="macosx.cpp.link.option.libs.1641794848" name="Libraries (-l)" superClass="macosx.cpp.link.option.libs" valueType="libs">
<configuration artifactName="${ProjName}" buildArtefactType="org.eclipse.cdt.build.core.buildArtefactType.exe" buildProperties="org.eclipse.cdt.build.core.buildType=org.eclipse.cdt.build.core.buildType.debug,org.eclipse.cdt.build.core.buildArtefactType=org.eclipse.cdt.build.core.buildArtefactType.exe" cleanCommand="rm -rf" description="" id="cdt.managedbuild.config.gnu.exe.debug.461114338" name="Debug" parent="cdt.managedbuild.config.gnu.exe.debug">
<folderInfo id="cdt.managedbuild.config.gnu.exe.debug.461114338." name="/" resourcePath="">
<toolChain id="cdt.managedbuild.toolchain.gnu.exe.debug.1896491482" name="Linux GCC" superClass="cdt.managedbuild.toolchain.gnu.exe.debug">
<targetPlatform id="cdt.managedbuild.target.gnu.platform.exe.debug.2144309834" name="Debug Platform" superClass="cdt.managedbuild.target.gnu.platform.exe.debug"/>
<builder buildPath="${workspace_loc:/moses-cmd/Debug}" id="cdt.managedbuild.target.gnu.builder.exe.debug.56664170" keepEnvironmentInBuildfile="false" managedBuildOn="true" name="Gnu Make Builder" superClass="cdt.managedbuild.target.gnu.builder.exe.debug"/>
<tool id="cdt.managedbuild.tool.gnu.archiver.base.1278274354" name="GCC Archiver" superClass="cdt.managedbuild.tool.gnu.archiver.base"/>
<tool id="cdt.managedbuild.tool.gnu.cpp.compiler.exe.debug.626095182" name="GCC C++ Compiler" superClass="cdt.managedbuild.tool.gnu.cpp.compiler.exe.debug">
<option id="gnu.cpp.compiler.exe.debug.option.optimization.level.2084031389" name="Optimization Level" superClass="gnu.cpp.compiler.exe.debug.option.optimization.level" value="gnu.cpp.compiler.optimization.level.none" valueType="enumerated"/>
<option id="gnu.cpp.compiler.exe.debug.option.debugging.level.811344734" name="Debug Level" superClass="gnu.cpp.compiler.exe.debug.option.debugging.level" value="gnu.cpp.compiler.debugging.level.max" valueType="enumerated"/>
<option id="gnu.cpp.compiler.option.include.paths.2118465683" name="Include paths (-I)" superClass="gnu.cpp.compiler.option.include.paths" valueType="includePath">
<listOptionValue builtIn="false" value="&quot;${workspace_loc}/../../moses/src&quot;"/>
<listOptionValue builtIn="false" value="&quot;${workspace_loc}/../..&quot;"/>
</option>
<option id="gnu.cpp.compiler.option.preprocessor.def.849384962" name="Defined symbols (-D)" superClass="gnu.cpp.compiler.option.preprocessor.def" valueType="definedSymbols">
<listOptionValue builtIn="false" value="WITH_THREADS"/>
</option>
<inputType id="cdt.managedbuild.tool.gnu.cpp.compiler.input.363379373" superClass="cdt.managedbuild.tool.gnu.cpp.compiler.input"/>
</tool>
<tool id="cdt.managedbuild.tool.gnu.c.compiler.exe.debug.504208780" name="GCC C Compiler" superClass="cdt.managedbuild.tool.gnu.c.compiler.exe.debug">
<option defaultValue="gnu.c.optimization.level.none" id="gnu.c.compiler.exe.debug.option.optimization.level.782785840" name="Optimization Level" superClass="gnu.c.compiler.exe.debug.option.optimization.level" valueType="enumerated"/>
<option id="gnu.c.compiler.exe.debug.option.debugging.level.1722468661" name="Debug Level" superClass="gnu.c.compiler.exe.debug.option.debugging.level" value="gnu.c.debugging.level.max" valueType="enumerated"/>
<inputType id="cdt.managedbuild.tool.gnu.c.compiler.input.860636318" superClass="cdt.managedbuild.tool.gnu.c.compiler.input"/>
</tool>
<tool id="cdt.managedbuild.tool.gnu.c.linker.exe.debug.2096997198" name="GCC C Linker" superClass="cdt.managedbuild.tool.gnu.c.linker.exe.debug"/>
<tool id="cdt.managedbuild.tool.gnu.cpp.linker.exe.debug.1546774818" name="GCC C++ Linker" superClass="cdt.managedbuild.tool.gnu.cpp.linker.exe.debug">
<option id="gnu.cpp.link.option.paths.523170942" name="Library search path (-L)" superClass="gnu.cpp.link.option.paths" valueType="libPaths">
<listOptionValue builtIn="false" value="&quot;${workspace_loc:}/../../boost/lib&quot;"/>
<listOptionValue builtIn="false" value="&quot;${workspace_loc:}/../../irstlm/lib&quot;"/>
<listOptionValue builtIn="false" value="&quot;${workspace_loc:/moses}/Debug&quot;"/>
<listOptionValue builtIn="false" value="&quot;${workspace_loc:/lm}/Debug&quot;"/>
<listOptionValue builtIn="false" value="&quot;${workspace_loc:/OnDiskPt}/Debug&quot;"/>
<listOptionValue builtIn="false" value="&quot;${workspace_loc:/util}/Debug&quot;"/>
<listOptionValue builtIn="false" value="&quot;${workspace_loc:/search}/Debug&quot;"/>
</option>
<option id="gnu.cpp.link.option.libs.998577284" name="Libraries (-l)" superClass="gnu.cpp.link.option.libs" valueType="libs">
<listOptionValue builtIn="false" value="moses"/>
<listOptionValue builtIn="false" value="rt"/>
<listOptionValue builtIn="false" value="misc"/>
<listOptionValue builtIn="false" value="dstruct"/>
<listOptionValue builtIn="false" value="oolm"/>
<listOptionValue builtIn="false" value="flm"/>
<listOptionValue builtIn="false" value="lattice"/>
<listOptionValue builtIn="false" value="search"/>
<listOptionValue builtIn="false" value="irstlm"/>
<listOptionValue builtIn="false" value="OnDiskPt"/>
<listOptionValue builtIn="false" value="z"/>
<listOptionValue builtIn="false" value="rt"/>
<listOptionValue builtIn="false" value="boost_system"/>
<listOptionValue builtIn="false" value="boost_thread"/>
<listOptionValue builtIn="false" value="lm"/>
<listOptionValue builtIn="false" value="util"/>
<listOptionValue builtIn="false" value="irstlm"/>
<listOptionValue builtIn="false" value="z"/>
<listOptionValue builtIn="false" value="boost_system"/>
<listOptionValue builtIn="false" value="boost_filesystem"/>
</option>
<option id="macosx.cpp.link.option.paths.1615268628" name="Library search path (-L)" superClass="macosx.cpp.link.option.paths" valueType="libPaths">
<listOptionValue builtIn="false" value="${workspace_loc:/moses}/Debug"/>
<listOptionValue builtIn="false" value="${workspace_loc:}/../../srilm/lib/i686-m64"/>
<listOptionValue builtIn="false" value="${workspace_loc:/OnDiskPt}/Debug"/>
<listOptionValue builtIn="false" value="${workspace_loc:/lm}/Debug"/>
<listOptionValue builtIn="false" value="${workspace_loc:/util}/Debug"/>
<listOptionValue builtIn="false" value="${workspace_loc:}/../../irstlm/lib"/>
</option>
<inputType id="cdt.managedbuild.tool.macosx.cpp.linker.input.412058804" superClass="cdt.managedbuild.tool.macosx.cpp.linker.input">
<inputType id="cdt.managedbuild.tool.gnu.cpp.linker.input.983725033" superClass="cdt.managedbuild.tool.gnu.cpp.linker.input">
<additionalInput kind="additionalinputdependency" paths="$(USER_OBJS)"/>
<additionalInput kind="additionalinput" paths="$(LIBS)"/>
</inputType>
</tool>
<tool id="cdt.managedbuild.tool.gnu.assembler.macosx.exe.debug.896987906" name="GCC Assembler" superClass="cdt.managedbuild.tool.gnu.assembler.macosx.exe.debug">
<inputType id="cdt.managedbuild.tool.gnu.assembler.input.187427846" superClass="cdt.managedbuild.tool.gnu.assembler.input"/>
</tool>
<tool id="cdt.managedbuild.tool.gnu.archiver.macosx.base.2033983602" name="GCC Archiver" superClass="cdt.managedbuild.tool.gnu.archiver.macosx.base"/>
<tool id="cdt.managedbuild.tool.gnu.cpp.compiler.macosx.exe.debug.1808603697" name="GCC C++ Compiler" superClass="cdt.managedbuild.tool.gnu.cpp.compiler.macosx.exe.debug">
<option id="gnu.cpp.compilermacosx.exe.debug.option.optimization.level.2018824611" name="Optimization Level" superClass="gnu.cpp.compilermacosx.exe.debug.option.optimization.level" value="gnu.cpp.compiler.optimization.level.none" valueType="enumerated"/>
<option id="gnu.cpp.compiler.macosx.exe.debug.option.debugging.level.1176009559" name="Debug Level" superClass="gnu.cpp.compiler.macosx.exe.debug.option.debugging.level" value="gnu.cpp.compiler.debugging.level.max" valueType="enumerated"/>
<option id="gnu.cpp.compiler.option.include.paths.1024398579" name="Include paths (-I)" superClass="gnu.cpp.compiler.option.include.paths" valueType="includePath">
<listOptionValue builtIn="false" value="/opt/local/include"/>
<listOptionValue builtIn="false" value="${workspace_loc}/../../moses/src"/>
<listOptionValue builtIn="false" value="${workspace_loc}/../../"/>
</option>
<option id="gnu.cpp.compiler.option.preprocessor.def.491464216" name="Defined symbols (-D)" superClass="gnu.cpp.compiler.option.preprocessor.def" valueType="definedSymbols">
<listOptionValue builtIn="false" value="TRACE_ENABLE"/>
</option>
<inputType id="cdt.managedbuild.tool.gnu.cpp.compiler.input.240921565" superClass="cdt.managedbuild.tool.gnu.cpp.compiler.input"/>
</tool>
<tool id="cdt.managedbuild.tool.gnu.c.compiler.macosx.exe.debug.1201400609" name="GCC C Compiler" superClass="cdt.managedbuild.tool.gnu.c.compiler.macosx.exe.debug">
<option defaultValue="gnu.c.optimization.level.none" id="gnu.c.compiler.macosx.exe.debug.option.optimization.level.748558048" name="Optimization Level" superClass="gnu.c.compiler.macosx.exe.debug.option.optimization.level" valueType="enumerated"/>
<option id="gnu.c.compiler.macosx.exe.debug.option.debugging.level.1014626120" name="Debug Level" superClass="gnu.c.compiler.macosx.exe.debug.option.debugging.level" value="gnu.c.debugging.level.max" valueType="enumerated"/>
<inputType id="cdt.managedbuild.tool.gnu.c.compiler.input.2031799877" superClass="cdt.managedbuild.tool.gnu.c.compiler.input"/>
<tool id="cdt.managedbuild.tool.gnu.assembler.exe.debug.1646579979" name="GCC Assembler" superClass="cdt.managedbuild.tool.gnu.assembler.exe.debug">
<inputType id="cdt.managedbuild.tool.gnu.assembler.input.1206872262" superClass="cdt.managedbuild.tool.gnu.assembler.input"/>
</tool>
</toolChain>
</folderInfo>
<sourceEntries>
<entry excluding="LatticeMBRGrid.cpp" flags="VALUE_WORKSPACE_PATH|RESOLVED" kind="sourcePath" name=""/>
</sourceEntries>
</configuration>
</storageModule>
<storageModule moduleId="org.eclipse.cdt.core.externalSettings"/>
</cconfiguration>
<cconfiguration id="cdt.managedbuild.config.macosx.exe.release.1916112479">
<storageModule buildSystemId="org.eclipse.cdt.managedbuilder.core.configurationDataProvider" id="cdt.managedbuild.config.macosx.exe.release.1916112479" moduleId="org.eclipse.cdt.core.settings" name="Release">
<cconfiguration id="cdt.managedbuild.config.gnu.exe.release.2121690436">
<storageModule buildSystemId="org.eclipse.cdt.managedbuilder.core.configurationDataProvider" id="cdt.managedbuild.config.gnu.exe.release.2121690436" moduleId="org.eclipse.cdt.core.settings" name="Release">
<externalSettings/>
<extensions>
<extension id="org.eclipse.cdt.core.MachO64" point="org.eclipse.cdt.core.BinaryParser"/>
<extension id="org.eclipse.cdt.core.ELF" point="org.eclipse.cdt.core.BinaryParser"/>
<extension id="org.eclipse.cdt.core.GmakeErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.CWDLocator" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.GCCErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
@ -96,31 +89,31 @@
</extensions>
</storageModule>
<storageModule moduleId="cdtBuildSystem" version="4.0.0">
<configuration artifactName="${ProjName}" buildArtefactType="org.eclipse.cdt.build.core.buildArtefactType.exe" buildProperties="org.eclipse.cdt.build.core.buildType=org.eclipse.cdt.build.core.buildType.release,org.eclipse.cdt.build.core.buildArtefactType=org.eclipse.cdt.build.core.buildArtefactType.exe" cleanCommand="rm -rf" description="" id="cdt.managedbuild.config.macosx.exe.release.1916112479" name="Release" parent="cdt.managedbuild.config.macosx.exe.release">
<folderInfo id="cdt.managedbuild.config.macosx.exe.release.1916112479." name="/" resourcePath="">
<toolChain id="cdt.managedbuild.toolchain.gnu.macosx.exe.release.1528572752" name="MacOSX GCC" superClass="cdt.managedbuild.toolchain.gnu.macosx.exe.release">
<targetPlatform id="cdt.managedbuild.target.gnu.platform.macosx.exe.release.1976002706" name="Debug Platform" superClass="cdt.managedbuild.target.gnu.platform.macosx.exe.release"/>
<builder buildPath="${workspace_loc:/moses-cmd/Release}" id="cdt.managedbuild.target.gnu.builder.macosx.exe.release.1470455063" keepEnvironmentInBuildfile="false" managedBuildOn="true" name="Gnu Make Builder" superClass="cdt.managedbuild.target.gnu.builder.macosx.exe.release"/>
<tool id="cdt.managedbuild.tool.macosx.c.linker.macosx.exe.release.335066624" name="MacOS X C Linker" superClass="cdt.managedbuild.tool.macosx.c.linker.macosx.exe.release"/>
<tool id="cdt.managedbuild.tool.macosx.cpp.linker.macosx.exe.release.1173017253" name="MacOS X C++ Linker" superClass="cdt.managedbuild.tool.macosx.cpp.linker.macosx.exe.release">
<inputType id="cdt.managedbuild.tool.macosx.cpp.linker.input.675070011" superClass="cdt.managedbuild.tool.macosx.cpp.linker.input">
<configuration artifactName="${ProjName}" buildArtefactType="org.eclipse.cdt.build.core.buildArtefactType.exe" buildProperties="org.eclipse.cdt.build.core.buildType=org.eclipse.cdt.build.core.buildType.release,org.eclipse.cdt.build.core.buildArtefactType=org.eclipse.cdt.build.core.buildArtefactType.exe" cleanCommand="rm -rf" description="" id="cdt.managedbuild.config.gnu.exe.release.2121690436" name="Release" parent="cdt.managedbuild.config.gnu.exe.release">
<folderInfo id="cdt.managedbuild.config.gnu.exe.release.2121690436." name="/" resourcePath="">
<toolChain id="cdt.managedbuild.toolchain.gnu.exe.release.1577734572" name="Linux GCC" superClass="cdt.managedbuild.toolchain.gnu.exe.release">
<targetPlatform id="cdt.managedbuild.target.gnu.platform.exe.release.1535487925" name="Debug Platform" superClass="cdt.managedbuild.target.gnu.platform.exe.release"/>
<builder buildPath="${workspace_loc:/moses-cmd/Release}" id="cdt.managedbuild.target.gnu.builder.exe.release.2122426151" keepEnvironmentInBuildfile="false" managedBuildOn="true" name="Gnu Make Builder" superClass="cdt.managedbuild.target.gnu.builder.exe.release"/>
<tool id="cdt.managedbuild.tool.gnu.archiver.base.441254004" name="GCC Archiver" superClass="cdt.managedbuild.tool.gnu.archiver.base"/>
<tool id="cdt.managedbuild.tool.gnu.cpp.compiler.exe.release.376987001" name="GCC C++ Compiler" superClass="cdt.managedbuild.tool.gnu.cpp.compiler.exe.release">
<option id="gnu.cpp.compiler.exe.release.option.optimization.level.1276092407" name="Optimization Level" superClass="gnu.cpp.compiler.exe.release.option.optimization.level" value="gnu.cpp.compiler.optimization.level.most" valueType="enumerated"/>
<option id="gnu.cpp.compiler.exe.release.option.debugging.level.1794377625" name="Debug Level" superClass="gnu.cpp.compiler.exe.release.option.debugging.level" value="gnu.cpp.compiler.debugging.level.none" valueType="enumerated"/>
<inputType id="cdt.managedbuild.tool.gnu.cpp.compiler.input.93276909" superClass="cdt.managedbuild.tool.gnu.cpp.compiler.input"/>
</tool>
<tool id="cdt.managedbuild.tool.gnu.c.compiler.exe.release.1553350132" name="GCC C Compiler" superClass="cdt.managedbuild.tool.gnu.c.compiler.exe.release">
<option defaultValue="gnu.c.optimization.level.most" id="gnu.c.compiler.exe.release.option.optimization.level.93522212" name="Optimization Level" superClass="gnu.c.compiler.exe.release.option.optimization.level" valueType="enumerated"/>
<option id="gnu.c.compiler.exe.release.option.debugging.level.1860716465" name="Debug Level" superClass="gnu.c.compiler.exe.release.option.debugging.level" value="gnu.c.debugging.level.none" valueType="enumerated"/>
<inputType id="cdt.managedbuild.tool.gnu.c.compiler.input.1508465135" superClass="cdt.managedbuild.tool.gnu.c.compiler.input"/>
</tool>
<tool id="cdt.managedbuild.tool.gnu.c.linker.exe.release.1658143889" name="GCC C Linker" superClass="cdt.managedbuild.tool.gnu.c.linker.exe.release"/>
<tool id="cdt.managedbuild.tool.gnu.cpp.linker.exe.release.378727798" name="GCC C++ Linker" superClass="cdt.managedbuild.tool.gnu.cpp.linker.exe.release">
<inputType id="cdt.managedbuild.tool.gnu.cpp.linker.input.1701769819" superClass="cdt.managedbuild.tool.gnu.cpp.linker.input">
<additionalInput kind="additionalinputdependency" paths="$(USER_OBJS)"/>
<additionalInput kind="additionalinput" paths="$(LIBS)"/>
</inputType>
</tool>
<tool id="cdt.managedbuild.tool.gnu.assembler.macosx.exe.release.174060449" name="GCC Assembler" superClass="cdt.managedbuild.tool.gnu.assembler.macosx.exe.release">
<inputType id="cdt.managedbuild.tool.gnu.assembler.input.1018665338" superClass="cdt.managedbuild.tool.gnu.assembler.input"/>
</tool>
<tool id="cdt.managedbuild.tool.gnu.archiver.macosx.base.440711813" name="GCC Archiver" superClass="cdt.managedbuild.tool.gnu.archiver.macosx.base"/>
<tool id="cdt.managedbuild.tool.gnu.cpp.compiler.macosx.exe.release.1219375865" name="GCC C++ Compiler" superClass="cdt.managedbuild.tool.gnu.cpp.compiler.macosx.exe.release">
<option id="gnu.cpp.compiler.macosx.exe.release.option.optimization.level.1940339824" name="Optimization Level" superClass="gnu.cpp.compiler.macosx.exe.release.option.optimization.level" value="gnu.cpp.compiler.optimization.level.most" valueType="enumerated"/>
<option id="gnu.cpp.compiler.macosx.exe.release.option.debugging.level.1648308879" name="Debug Level" superClass="gnu.cpp.compiler.macosx.exe.release.option.debugging.level" value="gnu.cpp.compiler.debugging.level.none" valueType="enumerated"/>
<inputType id="cdt.managedbuild.tool.gnu.cpp.compiler.input.604224475" superClass="cdt.managedbuild.tool.gnu.cpp.compiler.input"/>
</tool>
<tool id="cdt.managedbuild.tool.gnu.c.compiler.macosx.exe.release.759110223" name="GCC C Compiler" superClass="cdt.managedbuild.tool.gnu.c.compiler.macosx.exe.release">
<option defaultValue="gnu.c.optimization.level.most" id="gnu.c.compiler.macosx.exe.release.option.optimization.level.2105388501" name="Optimization Level" superClass="gnu.c.compiler.macosx.exe.release.option.optimization.level" valueType="enumerated"/>
<option id="gnu.c.compiler.macosx.exe.release.option.debugging.level.1692046412" name="Debug Level" superClass="gnu.c.compiler.macosx.exe.release.option.debugging.level" value="gnu.c.debugging.level.none" valueType="enumerated"/>
<inputType id="cdt.managedbuild.tool.gnu.c.compiler.input.1452105399" superClass="cdt.managedbuild.tool.gnu.c.compiler.input"/>
<tool id="cdt.managedbuild.tool.gnu.assembler.exe.release.1550193619" name="GCC Assembler" superClass="cdt.managedbuild.tool.gnu.assembler.exe.release">
<inputType id="cdt.managedbuild.tool.gnu.assembler.input.1296687303" superClass="cdt.managedbuild.tool.gnu.assembler.input"/>
</tool>
</toolChain>
</folderInfo>
@ -130,25 +123,24 @@
</cconfiguration>
</storageModule>
<storageModule moduleId="cdtBuildSystem" version="4.0.0">
<project id="moses-cmd.cdt.managedbuild.target.macosx.exe.1016275955" name="Executable" projectType="cdt.managedbuild.target.macosx.exe"/>
<project id="moses-cmd.cdt.managedbuild.target.gnu.exe.1380109162" name="Executable" projectType="cdt.managedbuild.target.gnu.exe"/>
</storageModule>
<storageModule moduleId="scannerConfiguration">
<autodiscovery enabled="true" problemReportingEnabled="true" selectedProfileId=""/>
<scannerConfigBuildInfo instanceId="cdt.managedbuild.config.gnu.exe.release.2121690436;cdt.managedbuild.config.gnu.exe.release.2121690436.;cdt.managedbuild.tool.gnu.c.compiler.exe.release.1553350132;cdt.managedbuild.tool.gnu.c.compiler.input.1508465135">
<autodiscovery enabled="true" problemReportingEnabled="true" selectedProfileId="org.eclipse.cdt.managedbuilder.core.GCCManagedMakePerProjectProfileC"/>
</scannerConfigBuildInfo>
<scannerConfigBuildInfo instanceId="cdt.managedbuild.config.gnu.exe.debug.461114338;cdt.managedbuild.config.gnu.exe.debug.461114338.;cdt.managedbuild.tool.gnu.c.compiler.exe.debug.504208780;cdt.managedbuild.tool.gnu.c.compiler.input.860636318">
<autodiscovery enabled="true" problemReportingEnabled="true" selectedProfileId="org.eclipse.cdt.managedbuilder.core.GCCManagedMakePerProjectProfileC"/>
</scannerConfigBuildInfo>
<scannerConfigBuildInfo instanceId="cdt.managedbuild.config.gnu.exe.debug.461114338;cdt.managedbuild.config.gnu.exe.debug.461114338.;cdt.managedbuild.tool.gnu.cpp.compiler.exe.debug.626095182;cdt.managedbuild.tool.gnu.cpp.compiler.input.363379373">
<autodiscovery enabled="true" problemReportingEnabled="true" selectedProfileId="org.eclipse.cdt.managedbuilder.core.GCCManagedMakePerProjectProfileCPP"/>
</scannerConfigBuildInfo>
<scannerConfigBuildInfo instanceId="cdt.managedbuild.config.gnu.exe.release.2121690436;cdt.managedbuild.config.gnu.exe.release.2121690436.;cdt.managedbuild.tool.gnu.cpp.compiler.exe.release.376987001;cdt.managedbuild.tool.gnu.cpp.compiler.input.93276909">
<autodiscovery enabled="true" problemReportingEnabled="true" selectedProfileId="org.eclipse.cdt.managedbuilder.core.GCCManagedMakePerProjectProfileCPP"/>
</scannerConfigBuildInfo>
</storageModule>
<storageModule moduleId="refreshScope" versionNumber="1">
<resource resourceType="PROJECT" workspacePath="/moses-cmd"/>
</storageModule>
<storageModule moduleId="org.eclipse.cdt.make.core.buildtargets"/>
<storageModule moduleId="scannerConfiguration">
<autodiscovery enabled="true" problemReportingEnabled="true" selectedProfileId=""/>
<scannerConfigBuildInfo instanceId="cdt.managedbuild.config.macosx.exe.release.1916112479;cdt.managedbuild.config.macosx.exe.release.1916112479.;cdt.managedbuild.tool.gnu.c.compiler.macosx.exe.release.759110223;cdt.managedbuild.tool.gnu.c.compiler.input.1452105399">
<autodiscovery enabled="true" problemReportingEnabled="true" selectedProfileId="org.eclipse.cdt.managedbuilder.core.GCCManagedMakePerProjectProfileC"/>
</scannerConfigBuildInfo>
<scannerConfigBuildInfo instanceId="cdt.managedbuild.config.gnu.macosx.exe.debug.341255150;cdt.managedbuild.config.gnu.macosx.exe.debug.341255150.;cdt.managedbuild.tool.gnu.c.compiler.macosx.exe.debug.1201400609;cdt.managedbuild.tool.gnu.c.compiler.input.2031799877">
<autodiscovery enabled="true" problemReportingEnabled="true" selectedProfileId="org.eclipse.cdt.managedbuilder.core.GCCManagedMakePerProjectProfileC"/>
</scannerConfigBuildInfo>
<scannerConfigBuildInfo instanceId="cdt.managedbuild.config.macosx.exe.release.1916112479;cdt.managedbuild.config.macosx.exe.release.1916112479.;cdt.managedbuild.tool.gnu.cpp.compiler.macosx.exe.release.1219375865;cdt.managedbuild.tool.gnu.cpp.compiler.input.604224475">
<autodiscovery enabled="true" problemReportingEnabled="true" selectedProfileId="org.eclipse.cdt.managedbuilder.core.GCCManagedMakePerProjectProfileCPP"/>
</scannerConfigBuildInfo>
<scannerConfigBuildInfo instanceId="cdt.managedbuild.config.gnu.macosx.exe.debug.341255150;cdt.managedbuild.config.gnu.macosx.exe.debug.341255150.;cdt.managedbuild.tool.gnu.cpp.compiler.macosx.exe.debug.1808603697;cdt.managedbuild.tool.gnu.cpp.compiler.input.240921565">
<autodiscovery enabled="true" problemReportingEnabled="true" selectedProfileId="org.eclipse.cdt.managedbuilder.core.GCCManagedMakePerProjectProfileCPP"/>
</scannerConfigBuildInfo>
</storageModule>
</cproject>

View File

@ -95,11 +95,6 @@
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-cmd/src/IOWrapper.h</locationURI>
</link>
<link>
<name>IOWrapper.o</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-cmd/src/IOWrapper.o</locationURI>
</link>
<link>
<name>Jamfile</name>
<type>1</type>
@ -115,21 +110,6 @@
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-cmd/src/LatticeMBR.h</locationURI>
</link>
<link>
<name>LatticeMBR.o</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-cmd/src/LatticeMBR.o</locationURI>
</link>
<link>
<name>LatticeMBRGrid.cpp</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-cmd/src/LatticeMBRGrid.cpp</locationURI>
</link>
<link>
<name>LatticeMBRGrid.o</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-cmd/src/LatticeMBRGrid.o</locationURI>
</link>
<link>
<name>Main.cpp</name>
<type>1</type>
@ -140,11 +120,6 @@
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-cmd/src/Main.h</locationURI>
</link>
<link>
<name>Main.o</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-cmd/src/Main.o</locationURI>
</link>
<link>
<name>TranslationAnalysis.cpp</name>
<type>1</type>
@ -156,19 +131,9 @@
<locationURI>PARENT-3-PROJECT_LOC/moses-cmd/src/TranslationAnalysis.h</locationURI>
</link>
<link>
<name>TranslationAnalysis.o</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-cmd/src/TranslationAnalysis.o</locationURI>
</link>
<link>
<name>libkenlm.dylib</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-cmd/src/libkenlm.dylib</locationURI>
</link>
<link>
<name>libkenutil.dylib</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-cmd/src/libkenutil.dylib</locationURI>
<name>bin</name>
<type>2</type>
<locationURI>virtual:/virtual</locationURI>
</link>
<link>
<name>lmbrgrid</name>
@ -185,15 +150,80 @@
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-cmd/src/mbr.h</locationURI>
</link>
<link>
<name>mbr.o</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-cmd/src/mbr.o</locationURI>
</link>
<link>
<name>moses</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-cmd/src/moses</locationURI>
</link>
<link>
<name>bin/gcc-4.6</name>
<type>2</type>
<locationURI>virtual:/virtual</locationURI>
</link>
<link>
<name>bin/gcc-4.6/release</name>
<type>2</type>
<locationURI>virtual:/virtual</locationURI>
</link>
<link>
<name>bin/gcc-4.6/release/debug-symbols-on</name>
<type>2</type>
<locationURI>virtual:/virtual</locationURI>
</link>
<link>
<name>bin/gcc-4.6/release/debug-symbols-on/link-static</name>
<type>2</type>
<locationURI>virtual:/virtual</locationURI>
</link>
<link>
<name>bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi</name>
<type>2</type>
<locationURI>virtual:/virtual</locationURI>
</link>
<link>
<name>bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/IOWrapper.o</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-cmd/src/bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/IOWrapper.o</locationURI>
</link>
<link>
<name>bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/LatticeMBR.o</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-cmd/src/bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/LatticeMBR.o</locationURI>
</link>
<link>
<name>bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/LatticeMBRGrid.o</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-cmd/src/bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/LatticeMBRGrid.o</locationURI>
</link>
<link>
<name>bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/Main.o</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-cmd/src/bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/Main.o</locationURI>
</link>
<link>
<name>bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/PhraseDictionary.o</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-cmd/src/bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/PhraseDictionary.o</locationURI>
</link>
<link>
<name>bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/TranslationAnalysis.o</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-cmd/src/bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/TranslationAnalysis.o</locationURI>
</link>
<link>
<name>bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/lmbrgrid</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-cmd/src/bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/lmbrgrid</locationURI>
</link>
<link>
<name>bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/mbr.o</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-cmd/src/bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/mbr.o</locationURI>
</link>
<link>
<name>bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/moses</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/moses-cmd/src/bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/moses</locationURI>
</link>
</linkedResources>
</projectDescription>

View File

@ -31,13 +31,15 @@
<option id="gnu.cpp.compiler.exe.debug.option.optimization.level.1759650532" name="Optimization Level" superClass="gnu.cpp.compiler.exe.debug.option.optimization.level" value="gnu.cpp.compiler.optimization.level.none" valueType="enumerated"/>
<option id="gnu.cpp.compiler.exe.debug.option.debugging.level.2123672332" name="Debug Level" superClass="gnu.cpp.compiler.exe.debug.option.debugging.level" value="gnu.cpp.compiler.debugging.level.max" valueType="enumerated"/>
<option id="gnu.cpp.compiler.option.include.paths.57896781" name="Include paths (-I)" superClass="gnu.cpp.compiler.option.include.paths" valueType="includePath">
<listOptionValue builtIn="false" value="/opt/local/include/"/>
<listOptionValue builtIn="false" value="&quot;${workspace_loc}/../../boost/include&quot;"/>
<listOptionValue builtIn="false" value="${workspace_loc}/../../irstlm/include"/>
<listOptionValue builtIn="false" value="${workspace_loc}/../../srilm/include"/>
<listOptionValue builtIn="false" value="${workspace_loc}/../../moses/src"/>
<listOptionValue builtIn="false" value="${workspace_loc}/../../"/>
</option>
<option id="gnu.cpp.compiler.option.preprocessor.def.752586397" name="Defined symbols (-D)" superClass="gnu.cpp.compiler.option.preprocessor.def" valueType="definedSymbols">
<listOptionValue builtIn="false" value="IS_ECLIPSE"/>
<listOptionValue builtIn="false" value="WITH_THREADS"/>
<listOptionValue builtIn="false" value="KENLM_MAX_ORDER=7"/>
<listOptionValue builtIn="false" value="TRACE_ENABLE"/>
<listOptionValue builtIn="false" value="LM_IRST"/>
@ -63,18 +65,8 @@
</tool>
</toolChain>
</folderInfo>
<fileInfo id="cdt.managedbuild.config.gnu.exe.debug.656913512.558758254" name="SyntacticLanguageModelState.h" rcbsApplicability="disable" resourcePath="SyntacticLanguageModelState.h" toolsToInvoke=""/>
<fileInfo id="cdt.managedbuild.config.gnu.exe.debug.656913512.1930327037" name="SyntacticLanguageModelFiles.h" rcbsApplicability="disable" resourcePath="SyntacticLanguageModelFiles.h" toolsToInvoke=""/>
<fileInfo id="cdt.managedbuild.config.gnu.exe.debug.656913512.1751563578" name="PhraseTableCreator.cpp" rcbsApplicability="disable" resourcePath="CompactPT/PhraseTableCreator.cpp" toolsToInvoke="cdt.managedbuild.tool.gnu.cpp.compiler.exe.debug.1774992327.1652631861">
<tool id="cdt.managedbuild.tool.gnu.cpp.compiler.exe.debug.1774992327.1652631861" name="GCC C++ Compiler" superClass="cdt.managedbuild.tool.gnu.cpp.compiler.exe.debug.1774992327"/>
</fileInfo>
<fileInfo id="cdt.managedbuild.config.gnu.exe.debug.656913512.1174630266" name="Rand.h" rcbsApplicability="disable" resourcePath="LM/Rand.h" toolsToInvoke=""/>
<fileInfo id="cdt.managedbuild.config.gnu.exe.debug.656913512.707830535" name="SRI.h" rcbsApplicability="disable" resourcePath="LM/SRI.h" toolsToInvoke=""/>
<fileInfo id="cdt.managedbuild.config.gnu.exe.debug.656913512.160366559" name="LDHT.h" rcbsApplicability="disable" resourcePath="LM/LDHT.h" toolsToInvoke=""/>
<fileInfo id="cdt.managedbuild.config.gnu.exe.debug.656913512.622077510" name="ParallelBackoff.h" rcbsApplicability="disable" resourcePath="LM/ParallelBackoff.h" toolsToInvoke=""/>
<fileInfo id="cdt.managedbuild.config.gnu.exe.debug.656913512.1084194539" name="SyntacticLanguageModel.h" rcbsApplicability="disable" resourcePath="SyntacticLanguageModel.h" toolsToInvoke=""/>
<sourceEntries>
<entry excluding="CompactPT/PhraseTableCreator.cpp|CompactPT/LexicalReorderingTableCreator.cpp|LM/SRI.h|LM/SRI.cpp|SyntacticLanguageModelState.h|SyntacticLanguageModelFiles.h|SyntacticLanguageModel.h|SyntacticLanguageModel.cpp|LM/ParallelBackoff.h|LM/ParallelBackoff.cpp|LM/Rand.h|LM/Rand.cpp|LM/LDHT.h|LM/LDHT.cpp" flags="VALUE_WORKSPACE_PATH|RESOLVED" kind="sourcePath" name=""/>
<entry excluding="src/SyntacticLanguageModelState.h|src/SyntacticLanguageModelFiles.h|src/SyntacticLanguageModel.h|src/SyntacticLanguageModel.cpp|src/LM/SRI.h|src/LM/SRI.cpp|src/LM/Rand.h|src/LM/Rand.cpp|src/LM/LDHT.h|src/LM/LDHT.cpp|LM/SRI.h|LM/SRI.cpp|SyntacticLanguageModelState.h|SyntacticLanguageModelFiles.h|SyntacticLanguageModel.h|SyntacticLanguageModel.cpp|LM/ParallelBackoff.h|LM/ParallelBackoff.cpp|LM/Rand.h|LM/Rand.cpp|LM/LDHT.h|LM/LDHT.cpp" flags="VALUE_WORKSPACE_PATH|RESOLVED" kind="sourcePath" name=""/>
</sourceEntries>
</configuration>
</storageModule>

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,124 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?fileVersion 4.0.0?>
<cproject storage_type_id="org.eclipse.cdt.core.XmlProjectDescriptionStorage">
<storageModule moduleId="org.eclipse.cdt.core.settings">
<cconfiguration id="cdt.managedbuild.config.gnu.exe.debug.722547278">
<storageModule buildSystemId="org.eclipse.cdt.managedbuilder.core.configurationDataProvider" id="cdt.managedbuild.config.gnu.exe.debug.722547278" moduleId="org.eclipse.cdt.core.settings" name="Debug">
<externalSettings>
<externalSetting>
<entry flags="VALUE_WORKSPACE_PATH" kind="includePath" name="/search"/>
<entry flags="VALUE_WORKSPACE_PATH" kind="libraryPath" name="/search/Debug"/>
<entry flags="RESOLVED" kind="libraryFile" name="search"/>
</externalSetting>
</externalSettings>
<extensions>
<extension id="org.eclipse.cdt.core.ELF" point="org.eclipse.cdt.core.BinaryParser"/>
<extension id="org.eclipse.cdt.core.GmakeErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.CWDLocator" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.GCCErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.GASErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
</extensions>
</storageModule>
<storageModule moduleId="cdtBuildSystem" version="4.0.0">
<configuration artifactExtension="a" artifactName="${ProjName}" buildArtefactType="org.eclipse.cdt.build.core.buildArtefactType.staticLib" buildProperties="org.eclipse.cdt.build.core.buildType=org.eclipse.cdt.build.core.buildType.debug,org.eclipse.cdt.build.core.buildArtefactType=org.eclipse.cdt.build.core.buildArtefactType.staticLib" cleanCommand="rm -rf" description="" id="cdt.managedbuild.config.gnu.exe.debug.722547278" name="Debug" parent="cdt.managedbuild.config.gnu.exe.debug">
<folderInfo id="cdt.managedbuild.config.gnu.exe.debug.722547278." name="/" resourcePath="">
<toolChain id="cdt.managedbuild.toolchain.gnu.exe.debug.1512691763" name="Linux GCC" superClass="cdt.managedbuild.toolchain.gnu.exe.debug">
<targetPlatform id="cdt.managedbuild.target.gnu.platform.exe.debug.633526059" name="Debug Platform" superClass="cdt.managedbuild.target.gnu.platform.exe.debug"/>
<builder buildPath="${workspace_loc:/search/Debug}" id="cdt.managedbuild.target.gnu.builder.exe.debug.164367197" keepEnvironmentInBuildfile="false" managedBuildOn="true" name="Gnu Make Builder" superClass="cdt.managedbuild.target.gnu.builder.exe.debug"/>
<tool id="cdt.managedbuild.tool.gnu.archiver.base.854512708" name="GCC Archiver" superClass="cdt.managedbuild.tool.gnu.archiver.base"/>
<tool id="cdt.managedbuild.tool.gnu.cpp.compiler.exe.debug.1096845166" name="GCC C++ Compiler" superClass="cdt.managedbuild.tool.gnu.cpp.compiler.exe.debug">
<option id="gnu.cpp.compiler.exe.debug.option.optimization.level.240381177" name="Optimization Level" superClass="gnu.cpp.compiler.exe.debug.option.optimization.level" value="gnu.cpp.compiler.optimization.level.none" valueType="enumerated"/>
<option id="gnu.cpp.compiler.exe.debug.option.debugging.level.275467568" name="Debug Level" superClass="gnu.cpp.compiler.exe.debug.option.debugging.level" value="gnu.cpp.compiler.debugging.level.max" valueType="enumerated"/>
<option id="gnu.cpp.compiler.option.include.paths.1356228283" superClass="gnu.cpp.compiler.option.include.paths" valueType="includePath">
<listOptionValue builtIn="false" value="&quot;${workspace_loc}/../../&quot;"/>
</option>
<inputType id="cdt.managedbuild.tool.gnu.cpp.compiler.input.1099209487" superClass="cdt.managedbuild.tool.gnu.cpp.compiler.input"/>
</tool>
<tool id="cdt.managedbuild.tool.gnu.c.compiler.exe.debug.1160060999" name="GCC C Compiler" superClass="cdt.managedbuild.tool.gnu.c.compiler.exe.debug">
<option defaultValue="gnu.c.optimization.level.none" id="gnu.c.compiler.exe.debug.option.optimization.level.1646958507" name="Optimization Level" superClass="gnu.c.compiler.exe.debug.option.optimization.level" valueType="enumerated"/>
<option id="gnu.c.compiler.exe.debug.option.debugging.level.770533945" name="Debug Level" superClass="gnu.c.compiler.exe.debug.option.debugging.level" value="gnu.c.debugging.level.max" valueType="enumerated"/>
<inputType id="cdt.managedbuild.tool.gnu.c.compiler.input.448849586" superClass="cdt.managedbuild.tool.gnu.c.compiler.input"/>
</tool>
<tool id="cdt.managedbuild.tool.gnu.c.linker.exe.debug.772519271" name="GCC C Linker" superClass="cdt.managedbuild.tool.gnu.c.linker.exe.debug"/>
<tool id="cdt.managedbuild.tool.gnu.cpp.linker.exe.debug.216274333" name="GCC C++ Linker" superClass="cdt.managedbuild.tool.gnu.cpp.linker.exe.debug">
<inputType id="cdt.managedbuild.tool.gnu.cpp.linker.input.1503482972" superClass="cdt.managedbuild.tool.gnu.cpp.linker.input">
<additionalInput kind="additionalinputdependency" paths="$(USER_OBJS)"/>
<additionalInput kind="additionalinput" paths="$(LIBS)"/>
</inputType>
</tool>
<tool id="cdt.managedbuild.tool.gnu.assembler.exe.debug.462899337" name="GCC Assembler" superClass="cdt.managedbuild.tool.gnu.assembler.exe.debug">
<inputType id="cdt.managedbuild.tool.gnu.assembler.input.855395816" superClass="cdt.managedbuild.tool.gnu.assembler.input"/>
</tool>
</toolChain>
</folderInfo>
</configuration>
</storageModule>
<storageModule moduleId="org.eclipse.cdt.core.externalSettings"/>
</cconfiguration>
<cconfiguration id="cdt.managedbuild.config.gnu.exe.release.443554127">
<storageModule buildSystemId="org.eclipse.cdt.managedbuilder.core.configurationDataProvider" id="cdt.managedbuild.config.gnu.exe.release.443554127" moduleId="org.eclipse.cdt.core.settings" name="Release">
<externalSettings/>
<extensions>
<extension id="org.eclipse.cdt.core.ELF" point="org.eclipse.cdt.core.BinaryParser"/>
<extension id="org.eclipse.cdt.core.GmakeErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.CWDLocator" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.GCCErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.GASErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.GLDErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
</extensions>
</storageModule>
<storageModule moduleId="cdtBuildSystem" version="4.0.0">
<configuration artifactName="${ProjName}" buildArtefactType="org.eclipse.cdt.build.core.buildArtefactType.exe" buildProperties="org.eclipse.cdt.build.core.buildType=org.eclipse.cdt.build.core.buildType.release,org.eclipse.cdt.build.core.buildArtefactType=org.eclipse.cdt.build.core.buildArtefactType.exe" cleanCommand="rm -rf" description="" id="cdt.managedbuild.config.gnu.exe.release.443554127" name="Release" parent="cdt.managedbuild.config.gnu.exe.release">
<folderInfo id="cdt.managedbuild.config.gnu.exe.release.443554127." name="/" resourcePath="">
<toolChain id="cdt.managedbuild.toolchain.gnu.exe.release.2087651883" name="Linux GCC" superClass="cdt.managedbuild.toolchain.gnu.exe.release">
<targetPlatform id="cdt.managedbuild.target.gnu.platform.exe.release.1177425262" name="Debug Platform" superClass="cdt.managedbuild.target.gnu.platform.exe.release"/>
<builder buildPath="${workspace_loc:/search/Release}" id="cdt.managedbuild.target.gnu.builder.exe.release.1508486313" keepEnvironmentInBuildfile="false" managedBuildOn="true" name="Gnu Make Builder" superClass="cdt.managedbuild.target.gnu.builder.exe.release"/>
<tool id="cdt.managedbuild.tool.gnu.archiver.base.1570343986" name="GCC Archiver" superClass="cdt.managedbuild.tool.gnu.archiver.base"/>
<tool id="cdt.managedbuild.tool.gnu.cpp.compiler.exe.release.156371039" name="GCC C++ Compiler" superClass="cdt.managedbuild.tool.gnu.cpp.compiler.exe.release">
<option id="gnu.cpp.compiler.exe.release.option.optimization.level.659087940" name="Optimization Level" superClass="gnu.cpp.compiler.exe.release.option.optimization.level" value="gnu.cpp.compiler.optimization.level.most" valueType="enumerated"/>
<option id="gnu.cpp.compiler.exe.release.option.debugging.level.1733942639" name="Debug Level" superClass="gnu.cpp.compiler.exe.release.option.debugging.level" value="gnu.cpp.compiler.debugging.level.none" valueType="enumerated"/>
<inputType id="cdt.managedbuild.tool.gnu.cpp.compiler.input.1227769637" superClass="cdt.managedbuild.tool.gnu.cpp.compiler.input"/>
</tool>
<tool id="cdt.managedbuild.tool.gnu.c.compiler.exe.release.453047218" name="GCC C Compiler" superClass="cdt.managedbuild.tool.gnu.c.compiler.exe.release">
<option defaultValue="gnu.c.optimization.level.most" id="gnu.c.compiler.exe.release.option.optimization.level.316944308" name="Optimization Level" superClass="gnu.c.compiler.exe.release.option.optimization.level" valueType="enumerated"/>
<option id="gnu.c.compiler.exe.release.option.debugging.level.1549298576" name="Debug Level" superClass="gnu.c.compiler.exe.release.option.debugging.level" value="gnu.c.debugging.level.none" valueType="enumerated"/>
<inputType id="cdt.managedbuild.tool.gnu.c.compiler.input.1350942207" superClass="cdt.managedbuild.tool.gnu.c.compiler.input"/>
</tool>
<tool id="cdt.managedbuild.tool.gnu.c.linker.exe.release.592800732" name="GCC C Linker" superClass="cdt.managedbuild.tool.gnu.c.linker.exe.release"/>
<tool id="cdt.managedbuild.tool.gnu.cpp.linker.exe.release.418411249" name="GCC C++ Linker" superClass="cdt.managedbuild.tool.gnu.cpp.linker.exe.release">
<inputType id="cdt.managedbuild.tool.gnu.cpp.linker.input.280173206" superClass="cdt.managedbuild.tool.gnu.cpp.linker.input">
<additionalInput kind="additionalinputdependency" paths="$(USER_OBJS)"/>
<additionalInput kind="additionalinput" paths="$(LIBS)"/>
</inputType>
</tool>
<tool id="cdt.managedbuild.tool.gnu.assembler.exe.release.1614982995" name="GCC Assembler" superClass="cdt.managedbuild.tool.gnu.assembler.exe.release">
<inputType id="cdt.managedbuild.tool.gnu.assembler.input.1687870954" superClass="cdt.managedbuild.tool.gnu.assembler.input"/>
</tool>
</toolChain>
</folderInfo>
</configuration>
</storageModule>
<storageModule moduleId="org.eclipse.cdt.core.externalSettings"/>
</cconfiguration>
</storageModule>
<storageModule moduleId="cdtBuildSystem" version="4.0.0">
<project id="search.cdt.managedbuild.target.gnu.exe.775449486" name="Executable" projectType="cdt.managedbuild.target.gnu.exe"/>
</storageModule>
<storageModule moduleId="scannerConfiguration">
<autodiscovery enabled="true" problemReportingEnabled="true" selectedProfileId=""/>
<scannerConfigBuildInfo instanceId="cdt.managedbuild.config.gnu.exe.release.443554127;cdt.managedbuild.config.gnu.exe.release.443554127.;cdt.managedbuild.tool.gnu.cpp.compiler.exe.release.156371039;cdt.managedbuild.tool.gnu.cpp.compiler.input.1227769637">
<autodiscovery enabled="true" problemReportingEnabled="true" selectedProfileId="org.eclipse.cdt.managedbuilder.core.GCCManagedMakePerProjectProfileCPP"/>
</scannerConfigBuildInfo>
<scannerConfigBuildInfo instanceId="cdt.managedbuild.config.gnu.exe.release.443554127;cdt.managedbuild.config.gnu.exe.release.443554127.;cdt.managedbuild.tool.gnu.c.compiler.exe.release.453047218;cdt.managedbuild.tool.gnu.c.compiler.input.1350942207">
<autodiscovery enabled="true" problemReportingEnabled="true" selectedProfileId="org.eclipse.cdt.managedbuilder.core.GCCManagedMakePerProjectProfileC"/>
</scannerConfigBuildInfo>
<scannerConfigBuildInfo instanceId="cdt.managedbuild.config.gnu.exe.debug.722547278;cdt.managedbuild.config.gnu.exe.debug.722547278.;cdt.managedbuild.tool.gnu.cpp.compiler.exe.debug.1096845166;cdt.managedbuild.tool.gnu.cpp.compiler.input.1099209487">
<autodiscovery enabled="true" problemReportingEnabled="true" selectedProfileId="org.eclipse.cdt.managedbuilder.core.GCCManagedMakePerProjectProfileCPP"/>
</scannerConfigBuildInfo>
<scannerConfigBuildInfo instanceId="cdt.managedbuild.config.gnu.exe.debug.722547278;cdt.managedbuild.config.gnu.exe.debug.722547278.;cdt.managedbuild.tool.gnu.c.compiler.exe.debug.1160060999;cdt.managedbuild.tool.gnu.c.compiler.input.448849586">
<autodiscovery enabled="true" problemReportingEnabled="true" selectedProfileId="org.eclipse.cdt.managedbuilder.core.GCCManagedMakePerProjectProfileC"/>
</scannerConfigBuildInfo>
</storageModule>
<storageModule moduleId="refreshScope"/>
</cproject>

View File

@ -0,0 +1,325 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>search</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>org.eclipse.cdt.managedbuilder.core.genmakebuilder</name>
<triggers>clean,full,incremental,</triggers>
<arguments>
<dictionary>
<key>?name?</key>
<value></value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.append_environment</key>
<value>true</value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.autoBuildTarget</key>
<value>all</value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.buildArguments</key>
<value></value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.buildCommand</key>
<value>make</value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.buildLocation</key>
<value>${workspace_loc:/search/Debug}</value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.cleanBuildTarget</key>
<value>clean</value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.contents</key>
<value>org.eclipse.cdt.make.core.activeConfigSettings</value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.enableAutoBuild</key>
<value>false</value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.enableCleanBuild</key>
<value>true</value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.enableFullBuild</key>
<value>true</value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.fullBuildTarget</key>
<value>all</value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.stopOnError</key>
<value>true</value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.useDefaultBuildCmd</key>
<value>true</value>
</dictionary>
</arguments>
</buildCommand>
<buildCommand>
<name>org.eclipse.cdt.managedbuilder.core.ScannerConfigBuilder</name>
<triggers>full,incremental,</triggers>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.eclipse.cdt.core.cnature</nature>
<nature>org.eclipse.cdt.core.ccnature</nature>
<nature>org.eclipse.cdt.managedbuilder.core.managedBuildNature</nature>
<nature>org.eclipse.cdt.managedbuilder.core.ScannerConfigNature</nature>
</natures>
<linkedResources>
<link>
<name>search</name>
<type>2</type>
<locationURI>virtual:/virtual</locationURI>
</link>
<link>
<name>search/Jamfile</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/Jamfile</locationURI>
</link>
<link>
<name>search/arity.hh</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/arity.hh</locationURI>
</link>
<link>
<name>search/bin</name>
<type>2</type>
<locationURI>virtual:/virtual</locationURI>
</link>
<link>
<name>search/config.hh</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/config.hh</locationURI>
</link>
<link>
<name>search/context.hh</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/context.hh</locationURI>
</link>
<link>
<name>search/edge.hh</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/edge.hh</locationURI>
</link>
<link>
<name>search/edge_generator.cc</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/edge_generator.cc</locationURI>
</link>
<link>
<name>search/edge_generator.hh</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/edge_generator.hh</locationURI>
</link>
<link>
<name>search/edge_queue.hh</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/edge_queue.hh</locationURI>
</link>
<link>
<name>search/final.hh</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/final.hh</locationURI>
</link>
<link>
<name>search/note.hh</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/note.hh</locationURI>
</link>
<link>
<name>search/rule.cc</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/rule.cc</locationURI>
</link>
<link>
<name>search/rule.hh</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/rule.hh</locationURI>
</link>
<link>
<name>search/source.hh</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/source.hh</locationURI>
</link>
<link>
<name>search/types.hh</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/types.hh</locationURI>
</link>
<link>
<name>search/vertex.cc</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/vertex.cc</locationURI>
</link>
<link>
<name>search/vertex.hh</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/vertex.hh</locationURI>
</link>
<link>
<name>search/vertex_generator.cc</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/vertex_generator.cc</locationURI>
</link>
<link>
<name>search/vertex_generator.hh</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/vertex_generator.hh</locationURI>
</link>
<link>
<name>search/weights.cc</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/weights.cc</locationURI>
</link>
<link>
<name>search/weights.hh</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/weights.hh</locationURI>
</link>
<link>
<name>search/weights_test.cc</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/weights_test.cc</locationURI>
</link>
<link>
<name>search/bin/gcc-4.6</name>
<type>2</type>
<locationURI>virtual:/virtual</locationURI>
</link>
<link>
<name>search/bin/gcc-4.6/release</name>
<type>2</type>
<locationURI>virtual:/virtual</locationURI>
</link>
<link>
<name>search/bin/gcc-4.6/release/debug-symbols-on</name>
<type>2</type>
<locationURI>virtual:/virtual</locationURI>
</link>
<link>
<name>search/bin/gcc-4.6/release/debug-symbols-on/link-static</name>
<type>2</type>
<locationURI>virtual:/virtual</locationURI>
</link>
<link>
<name>search/bin/gcc-4.6/release/debug-symbols-on/link-static/edge_generator.o</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/bin/gcc-4.6/release/debug-symbols-on/link-static/edge_generator.o</locationURI>
</link>
<link>
<name>search/bin/gcc-4.6/release/debug-symbols-on/link-static/edge_queue.o</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/bin/gcc-4.6/release/debug-symbols-on/link-static/edge_queue.o</locationURI>
</link>
<link>
<name>search/bin/gcc-4.6/release/debug-symbols-on/link-static/libsearch.a</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/bin/gcc-4.6/release/debug-symbols-on/link-static/libsearch.a</locationURI>
</link>
<link>
<name>search/bin/gcc-4.6/release/debug-symbols-on/link-static/rule.o</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/bin/gcc-4.6/release/debug-symbols-on/link-static/rule.o</locationURI>
</link>
<link>
<name>search/bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi</name>
<type>2</type>
<locationURI>virtual:/virtual</locationURI>
</link>
<link>
<name>search/bin/gcc-4.6/release/debug-symbols-on/link-static/vertex.o</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/bin/gcc-4.6/release/debug-symbols-on/link-static/vertex.o</locationURI>
</link>
<link>
<name>search/bin/gcc-4.6/release/debug-symbols-on/link-static/vertex_generator.o</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/bin/gcc-4.6/release/debug-symbols-on/link-static/vertex_generator.o</locationURI>
</link>
<link>
<name>search/bin/gcc-4.6/release/debug-symbols-on/link-static/weights.o</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/bin/gcc-4.6/release/debug-symbols-on/link-static/weights.o</locationURI>
</link>
<link>
<name>search/bin/gcc-4.6/release/debug-symbols-on/link-static/weights_test</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/bin/gcc-4.6/release/debug-symbols-on/link-static/weights_test</locationURI>
</link>
<link>
<name>search/bin/gcc-4.6/release/debug-symbols-on/link-static/weights_test.o</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/bin/gcc-4.6/release/debug-symbols-on/link-static/weights_test.o</locationURI>
</link>
<link>
<name>search/bin/gcc-4.6/release/debug-symbols-on/link-static/weights_test.passed</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/bin/gcc-4.6/release/debug-symbols-on/link-static/weights_test.passed</locationURI>
</link>
<link>
<name>search/bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/edge_generator.o</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/edge_generator.o</locationURI>
</link>
<link>
<name>search/bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/edge_queue.o</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/edge_queue.o</locationURI>
</link>
<link>
<name>search/bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/libsearch.a</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/libsearch.a</locationURI>
</link>
<link>
<name>search/bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/rule.o</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/rule.o</locationURI>
</link>
<link>
<name>search/bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/vertex.o</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/vertex.o</locationURI>
</link>
<link>
<name>search/bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/vertex_generator.o</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/vertex_generator.o</locationURI>
</link>
<link>
<name>search/bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/weights.o</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/weights.o</locationURI>
</link>
<link>
<name>search/bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/weights_test</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/weights_test</locationURI>
</link>
<link>
<name>search/bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/weights_test.o</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/weights_test.o</locationURI>
</link>
<link>
<name>search/bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/weights_test.passed</name>
<type>1</type>
<locationURI>PARENT-3-PROJECT_LOC/search/bin/gcc-4.6/release/debug-symbols-on/link-static/threading-multi/weights_test.passed</locationURI>
</link>
</linkedResources>
</projectDescription>

View File

@ -46,6 +46,7 @@
</option>
<option id="gnu.cpp.compiler.option.preprocessor.def.1952961175" name="Defined symbols (-D)" superClass="gnu.cpp.compiler.option.preprocessor.def" valueType="definedSymbols">
<listOptionValue builtIn="false" value="TRACE_ENABLE"/>
<listOptionValue builtIn="false" value="WITH_THREADS"/>
</option>
<inputType id="cdt.managedbuild.tool.gnu.cpp.compiler.input.1420621104" superClass="cdt.managedbuild.tool.gnu.cpp.compiler.input"/>
</tool>

View File

@ -175,8 +175,7 @@ public:
stringstream out, graphInfo, transCollOpts;
map<string, xmlrpc_c::value> retData;
SearchAlgorithm searchAlgorithm = staticData.GetSearchAlgorithm();
if (searchAlgorithm == ChartDecoding) {
if (staticData.IsChart()) {
TreeInput tinput;
const vector<FactorType> &inputFactorOrder =
staticData.GetInputFactorOrder();

View File

@ -0,0 +1,74 @@
#!/usr/bin/perl -w
use strict;
use Frontier::Client;
my $output_suffix = $ARGV[0];
$output_suffix = "" if (not $output_suffix);
my $port = "50015";
my $url = "http://localhost:".$port."/RPC2";
my $server = Frontier::Client->new('url' => $url, 'encoding' => 'UTF-8');
my $verbose=0;
my $translations="translations$output_suffix.out";
open TR, ">:utf8", $translations;
my $sg_out="searchGraph$output_suffix.out";
open SG, ">:utf8", $sg_out;
my $i=0;
while (my $text = <STDIN>)
{
my $date = `date`;
chop($date);
print "[$date] sentence $i: translate\n" if $verbose;
# update weights
my $core_weights = "0.0314787,-0.138354,1,0.0867223,0.0349965,0.104774,0.0607203,0.0516889,0.113694,0.0947218,0.0642702,0.0385324,0.0560749,0.0434684,0.0805031";
#my $core_weights = "0.0314787,-0.138354,1,0.0867223,0.0349965,0.104774,0.0607203,0.0516889,0.113694,0.0947218,0.0642702,0.0385324,0.0560749,0.0434684,0.0805031,0";
#my $sparse_weights = "pp_dummy~dummy=0.001";
my $sparse_weights = "";
my %param = ("core-weights" => $core_weights, "sparse-weights" => $sparse_weights);
$server->call("setWeights",(\%param));
#my $core_weight_update = "0.1,0.1,0,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1";
#my $sparse_weight_update = "pp_dummy~dummy=0.1";
#my %param_update = ("core-weights" => $core_weight_update, "sparse-weights" => $sparse_weight_update);
#$server->call("addWeights",(\%param_update));
# translate
#my %param = ("text" => $server->string($SENTENCE[$i]) , "sg" => "true");
%param = ("text" => $text, "id" => $i, "sg" => "true");
my $result = $server->call("translate",(\%param));
$date = `date`;
chop($date);
print "[$date] sentence $i: process translation\n" if $verbose;
# process translation
my $mt_output = $result->{'text'};
$mt_output =~ s/\|\S+//g; # no multiple factors, only first
print "sentence $i >> $translations \n";
print TR $mt_output."\n";
# print out search graph
print "sentence $i >> $sg_out \n";
my $sg_ref = $result->{'sg'};
foreach my $sgn (@$sg_ref) {
# print out in extended format
if ($sgn->{hyp} eq 0) {
print SG "$i hyp=$sgn->{'hyp'} stack=$sgn->{'stack'} forward=$sgn->{'forward'} fscore=$sgn->{'fscore'} \n";
}
else {
print SG "$i hyp=$sgn->{'hyp'} stack=$sgn->{'stack'} back=$sgn->{'back'} score=$sgn->{'score'} transition=$sgn->{'transition'} ";
if ($sgn->{"recombined"}) {
print SG "recombined=$sgn->{'recombined'} ";
}
print SG "forward=$sgn->{'forward'} fscore=$sgn->{'fscore'} covered=$sgn->{'cover-start'}-$sgn->{'cover-end'} ";
print SG "scores=\"$sgn->{'scores'}\" src-phrase=\"$sgn->{'src-phrase'}\" tgt-phrase=\"$sgn->{'tgt-phrase'}\" \n";
}
}
++$i;
}
close(SG);

0
contrib/web/bin/detokenizer.perl Normal file → Executable file
View File

0
contrib/web/bin/start-daemon-cluster.pl Normal file → Executable file
View File

0
contrib/web/bin/tokenizer.perl Normal file → Executable file
View File

View File

@ -33,10 +33,10 @@ toy-data = $moses-script-dir/ems/example/data
### basic tools
#
# moses decoder
decoder = $moses-src-dir/dist/bin/moses
decoder = $moses-src-dir/bin/moses
# conversion of phrase table into binary on-disk format
ttable-binarizer = $moses-src-dir/dist/bin/processPhraseTable
ttable-binarizer = $moses-src-dir/bin/processPhraseTable
# conversion of rule table into binary on-disk format
#ttable-binarizer = "$moses-src-dir/CreateOnDisk/src/CreateOnDiskPt 1 1 5 100 2"

0
jam-files/engine/bump_version.py Normal file → Executable file
View File

View File

@ -29,7 +29,7 @@ ldflags = [ os.environ "LDFLAGS" ] ;
#Run g++ with empty main and these arguments to see if it passes.
rule test_flags ( flags * ) {
flags = $(cxxflags) $(ldflags) $(flags) ;
local cmd = "bash -c \"g++ "$(flags:J=" ")" -x c++ - <<<'int main() {}' -o /dev/null >/dev/null 2>/dev/null\"" ;
local cmd = "bash -c \"g++ "$(flags:J=" ")" -x c++ - <<<'int main() {}' -o $(TOP)/dummy >/dev/null 2>/dev/null && rm $(TOP)/dummy 2>/dev/null\"" ;
local ret = [ SHELL $(cmd) : exit-status ] ;
if --debug-configuration in [ modules.peek : ARGV ] {
echo $(cmd) ;
@ -63,7 +63,7 @@ requirements = ;
FORCE-STATIC = [ option.get "static" : : "yes" ] ;
if $(FORCE-STATIC) {
requirements += <runtime-link>static ;
requirements += <link>static <runtime-link>static ;
}
#Determine if a library can be compiled statically.
@ -260,3 +260,5 @@ if [ option.get "sanity-test" : : "yes" ] {
EXIT "Bad" : 1 ;
}
}
use-project /top : . ;

View File

@ -1,674 +0,0 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<http://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<http://www.gnu.org/philosophy/why-not-lgpl.html>.

View File

@ -1,165 +0,0 @@
GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
This version of the GNU Lesser General Public License incorporates
the terms and conditions of version 3 of the GNU General Public
License, supplemented by the additional permissions listed below.
0. Additional Definitions.
As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the GNU
General Public License.
"The Library" refers to a covered work governed by this License,
other than an Application or a Combined Work as defined below.
An "Application" is any work that makes use of an interface provided
by the Library, but which is not otherwise based on the Library.
Defining a subclass of a class defined by the Library is deemed a mode
of using an interface provided by the Library.
A "Combined Work" is a work produced by combining or linking an
Application with the Library. The particular version of the Library
with which the Combined Work was made is also called the "Linked
Version".
The "Minimal Corresponding Source" for a Combined Work means the
Corresponding Source for the Combined Work, excluding any source code
for portions of the Combined Work that, considered in isolation, are
based on the Application, and not on the Linked Version.
The "Corresponding Application Code" for a Combined Work means the
object code and/or source code for the Application, including any data
and utility programs needed for reproducing the Combined Work from the
Application, but excluding the System Libraries of the Combined Work.
1. Exception to Section 3 of the GNU GPL.
You may convey a covered work under sections 3 and 4 of this License
without being bound by section 3 of the GNU GPL.
2. Conveying Modified Versions.
If you modify a copy of the Library, and, in your modifications, a
facility refers to a function or data to be supplied by an Application
that uses the facility (other than as an argument passed when the
facility is invoked), then you may convey a copy of the modified
version:
a) under this License, provided that you make a good faith effort to
ensure that, in the event an Application does not supply the
function or data, the facility still operates, and performs
whatever part of its purpose remains meaningful, or
b) under the GNU GPL, with none of the additional permissions of
this License applicable to that copy.
3. Object Code Incorporating Material from Library Header Files.
The object code form of an Application may incorporate material from
a header file that is part of the Library. You may convey such object
code under terms of your choice, provided that, if the incorporated
material is not limited to numerical parameters, data structure
layouts and accessors, or small macros, inline functions and templates
(ten or fewer lines in length), you do both of the following:
a) Give prominent notice with each copy of the object code that the
Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the object code with a copy of the GNU GPL and this license
document.
4. Combined Works.
You may convey a Combined Work under terms of your choice that,
taken together, effectively do not restrict modification of the
portions of the Library contained in the Combined Work and reverse
engineering for debugging such modifications, if you also do each of
the following:
a) Give prominent notice with each copy of the Combined Work that
the Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the Combined Work with a copy of the GNU GPL and this license
document.
c) For a Combined Work that displays copyright notices during
execution, include the copyright notice for the Library among
these notices, as well as a reference directing the user to the
copies of the GNU GPL and this license document.
d) Do one of the following:
0) Convey the Minimal Corresponding Source under the terms of this
License, and the Corresponding Application Code in a form
suitable for, and under terms that permit, the user to
recombine or relink the Application with a modified version of
the Linked Version to produce a modified Combined Work, in the
manner specified by section 6 of the GNU GPL for conveying
Corresponding Source.
1) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (a) uses at run time
a copy of the Library already present on the user's computer
system, and (b) will operate properly with a modified version
of the Library that is interface-compatible with the Linked
Version.
e) Provide Installation Information, but only if you would otherwise
be required to provide such information under section 6 of the
GNU GPL, and only to the extent that such information is
necessary to install and execute a modified version of the
Combined Work produced by recombining or relinking the
Application with a modified version of the Linked Version. (If
you use option 4d0, the Installation Information must accompany
the Minimal Corresponding Source and Corresponding Application
Code. If you use option 4d1, you must provide the Installation
Information in the manner specified by section 6 of the GNU GPL
for conveying Corresponding Source.)
5. Combined Libraries.
You may place library facilities that are a work based on the
Library side by side in a single library together with other library
facilities that are not Applications and are not covered by this
License, and convey such a combined library under terms of your
choice, if you do both of the following:
a) Accompany the combined library with a copy of the same work based
on the Library, uncombined with any other library facilities,
conveyed under the terms of this License.
b) Give prominent notice with the combined library that part of it
is a work based on the Library, and explaining where to find the
accompanying uncombined form of the same work.
6. Revised Versions of the GNU Lesser General Public License.
The Free Software Foundation may publish revised and/or new versions
of the GNU Lesser General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the
Library as you received it specifies that a certain numbered version
of the GNU Lesser General Public License "or any later version"
applies to it, you have the option of following the terms and
conditions either of that published version or of any later version
published by the Free Software Foundation. If the Library as you
received it does not specify a version number of the GNU Lesser
General Public License, you may choose any version of the GNU Lesser
General Public License ever published by the Free Software Foundation.
If the Library as you received it specifies that a proxy can decide
whether future versions of the GNU Lesser General Public License shall
apply, that proxy's public statement of acceptance of any version is
permanent authorization for you to choose that version for the
Library.

View File

@ -1,20 +1,12 @@
# If you need higher order, change this option
# Having this limit means that State can be
# (KENLM_MAX_ORDER - 1) * sizeof(float) bytes instead of
# sizeof(float*) + (KENLM_MAX_ORDER - 1) * sizeof(float) + malloc overhead
max-order = [ option.get "max-kenlm-order" : 6 : 6 ] ;
if ( $(max-order) != 6 ) {
echo "Setting KenLM maximum n-gram order to $(max-order)" ;
}
max-order = <define>KENLM_MAX_ORDER=$(max-order) ;
lib kenlm : bhiksha.cc binary_format.cc config.cc lm_exception.cc model.cc quantize.cc read_arpa.cc search_hashed.cc search_trie.cc trie.cc trie_sort.cc value_build.cc virtual_interface.cc vocab.cc ../util//kenutil : <include>.. $(max-order) : : <include>.. <library>../util//kenutil $(max-order) ;
lib kenlm : bhiksha.cc binary_format.cc config.cc lm_exception.cc model.cc quantize.cc read_arpa.cc search_hashed.cc search_trie.cc trie.cc trie_sort.cc value_build.cc virtual_interface.cc vocab.cc ../util//kenutil : <include>.. : : <include>.. <library>../util//kenutil ;
import testing ;
run left_test.cc ../util//kenutil kenlm ..//boost_unit_test_framework : : test.arpa ;
run model_test.cc ../util//kenutil kenlm ..//boost_unit_test_framework : : test.arpa test_nounk.arpa ;
run left_test.cc ../util//kenutil kenlm /top//boost_unit_test_framework : : test.arpa ;
run model_test.cc ../util//kenutil kenlm /top//boost_unit_test_framework : : test.arpa test_nounk.arpa ;
exe query : ngram_query.cc kenlm ../util//kenutil ;
exe build_binary : build_binary.cc kenlm ../util//kenutil ;
exe kenlm_max_order : max_order.cc : <include>.. $(max-order) ;
exe kenlm_max_order : max_order.cc : <include>.. ;
alias programs : query build_binary kenlm_max_order ;

View File

@ -1,12 +0,0 @@
Avenue code is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Avenue code is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with Avenue code. If not, see <http://www.gnu.org/licenses/>.

View File

@ -1,44 +0,0 @@
Language model inference code by Kenneth Heafield <kenlm at kheafield.com>
THE GIT REPOSITORY https://github.com/kpu/kenlm IS WHERE ACTIVE DEVELOPMENT HAPPENS. IT MAY RETURN SILENTLY WRONG ANSWERS OR BE SILENTLY BINARY-INCOMPATIBLE WITH STABLE RELEASES.
The website http://kheafield.com/code/kenlm/ has more documentation. If you're a decoder developer, please download the latest version from there instead of copying from another decoder.
Two data structures are supported: probing and trie. Probing is a probing hash table with keys that ere 64-bit hashes of n-grams and floats as values. Trie is a fairly standard trie but with bit-level packing so it uses the minimum number of bits to store word indices and pointers. The trie node entries are sorted by word index. Probing is the fastest and uses the most memory. Trie uses the least memory and a bit slower.
With trie, resident memory is 58% of IRST's smallest version and 21% of SRI's compact version. Simultaneously, trie CPU's use is 81% of IRST's fastest version and 84% of SRI's fast version. KenLM's probing hash table implementation goes even faster at the expense of using more memory. See http://kheafield.com/code/kenlm/benchmark/.
Binary format via mmap is supported. Run ./build_binary to make one then pass the binary file name to the appropriate Model constructor.
PLATFORMS
murmur_hash.cc and bit_packing.hh perform unaligned reads and writes that make the code architecture-dependent.
It has been sucessfully tested on x86_64, x86, and PPC64.
ARM support is reportedly working, at least on the iphone, but I cannot test this.
Runs on Linux, OS X, Cygwin, and MinGW.
Hideo Okuma and Tomoyuki Yoshimura from NICT contributed ports to ARM and MinGW. Hieu Hoang is working on a native Windows port.
DECODER DEVELOPERS
- I recommend copying the code and distributing it with your decoder. However, please send improvements upstream as indicated in CONTRIBUTORS.
- It does not depend on Boost or ICU. If you use ICU, define HAVE_ICU in util/have.hh (uncomment the line) to avoid a name conflict. Defining HAVE_BOOST will let you hash StringPiece.
- Most people have zlib. If you don't want to depend on that, comment out #define HAVE_ZLIB in util/have.hh. This will disable loading gzipped ARPA files.
- There are two build systems: compile.sh and Jamroot+Jamfile. They're pretty simple and are intended to be reimplemented in your build system.
- Use either the interface in lm/model.hh or lm/virtual_interface.hh. Interface documentation is in comments of lm/virtual_interface.hh and lm/model.hh.
- There are several possible data structures in model.hh. Use RecognizeBinary in binary_format.hh to determine which one a user has provided. You probably already implement feature functions as an abstract virtual base class with several children. I suggest you co-opt this existing virtual dispatch by templatizing the language model feature implementation on the KenLM model identified by RecognizeBinary. This is the strategy used in Moses and cdec.
- See lm/config.hh for tuning options.
CONTRIBUTORS
Contributions to KenLM are welcome. Please base your contributions on https://github.com/kpu/kenlm and send pull requests (or I might give you commit access). Downstream copies in Moses and cdec are maintained by overwriting them so do not make changes there.
The name was Hieu Hoang's idea, not mine.

View File

@ -83,7 +83,13 @@ void WriteHeader(void *to, const Parameters &params) {
uint8_t *SetupJustVocab(const Config &config, uint8_t order, std::size_t memory_size, Backing &backing) {
if (config.write_mmap) {
std::size_t total = TotalHeaderSize(order) + memory_size;
backing.vocab.reset(util::MapZeroedWrite(config.write_mmap, total, backing.file), total, util::scoped_memory::MMAP_ALLOCATED);
backing.file.reset(util::CreateOrThrow(config.write_mmap));
if (config.write_method == Config::WRITE_MMAP) {
backing.vocab.reset(util::MapZeroedWrite(backing.file.get(), total), total, util::scoped_memory::MMAP_ALLOCATED);
} else {
util::ResizeOrThrow(backing.file.get(), 0);
util::MapAnonymous(total, backing.vocab);
}
strncpy(reinterpret_cast<char*>(backing.vocab.get()), kMagicIncomplete, TotalHeaderSize(order));
return reinterpret_cast<uint8_t*>(backing.vocab.get()) + TotalHeaderSize(order);
} else {
@ -121,12 +127,14 @@ uint8_t *GrowForSearch(const Config &config, std::size_t vocab_pad, std::size_t
void FinishFile(const Config &config, ModelType model_type, unsigned int search_version, const std::vector<uint64_t> &counts, std::size_t vocab_pad, Backing &backing) {
if (!config.write_mmap) return;
util::SyncOrThrow(backing.vocab.get(), backing.vocab.size());
switch (config.write_method) {
case Config::WRITE_MMAP:
util::SyncOrThrow(backing.vocab.get(), backing.vocab.size());
util::SyncOrThrow(backing.search.get(), backing.search.size());
break;
case Config::WRITE_AFTER:
util::SeekOrThrow(backing.file.get(), 0);
util::WriteOrThrow(backing.file.get(), backing.vocab.get(), backing.vocab.size());
util::SeekOrThrow(backing.file.get(), backing.vocab.size() + vocab_pad);
util::WriteOrThrow(backing.file.get(), backing.search.get(), backing.search.size());
util::FSyncOrThrow(backing.file.get());
@ -141,6 +149,10 @@ void FinishFile(const Config &config, ModelType model_type, unsigned int search_
params.fixed.has_vocabulary = config.include_vocab;
params.fixed.search_version = search_version;
WriteHeader(backing.vocab.get(), params);
if (config.write_method == Config::WRITE_AFTER) {
util::SeekOrThrow(backing.file.get(), 0);
util::WriteOrThrow(backing.file.get(), backing.vocab.get(), TotalHeaderSize(counts.size()));
}
}
namespace detail {

View File

@ -1,3 +0,0 @@
#!/bin/bash
cd "$(dirname "$0")/.."
rm -rf {lm,util}/*.o lm/query lm/build_binary {lm,util}/*_test lm/test.binary* lm/test.arpa?????? util/file_piece.cc.gz

View File

@ -1,16 +0,0 @@
#!/bin/bash
#This is just an example compilation. You should integrate these files into your build system. I can provide boost jam if you want.
#If your code uses ICU, edit util/string_piece.hh and uncomment #define USE_ICU
#I use zlib by default. If you don't want to depend on zlib, remove #define USE_ZLIB from util/file_piece.hh
#don't need to use if compiling with moses Makefiles already
cd "$(dirname "$0")/.."
set -e
for i in util/{bit_packing,ersatz_progress,exception,file_piece,murmur_hash,file,mmap} lm/{bhiksha,binary_format,config,lm_exception,model,quantize,read_arpa,search_hashed,search_trie,trie,trie_sort,virtual_interface,vocab}; do
g++ -I. -O3 -DNDEBUG $CXXFLAGS -c $i.cc -o $i.o
done
g++ -I. -O3 -DNDEBUG $CXXFLAGS lm/build_binary.cc {lm,util}/*.o -lz -o lm/build_binary
g++ -I. -O3 -DNDEBUG $CXXFLAGS lm/ngram_query.cc {lm,util}/*.o -lz -o lm/query

37
lm/fragment.cc Normal file
View File

@ -0,0 +1,37 @@
#include "lm/binary_format.hh"
#include "lm/model.hh"
#include "lm/left.hh"
#include "util/tokenize_piece.hh"
template <class Model> void Query(const char *name) {
Model model(name);
std::string line;
lm::ngram::ChartState ignored;
while (getline(std::cin, line)) {
lm::ngram::RuleScore<Model> scorer(model, ignored);
for (util::TokenIter<util::SingleCharacter, true> i(line, ' '); i; ++i) {
scorer.Terminal(model.GetVocabulary().Index(*i));
}
std::cout << scorer.Finish() << '\n';
}
}
int main(int argc, char *argv[]) {
if (argc != 2) {
std::cerr << "Expected model file name." << std::endl;
return 1;
}
const char *name = argv[1];
lm::ngram::ModelType model_type = lm::ngram::PROBING;
lm::ngram::RecognizeBinary(name, model_type);
switch (model_type) {
case lm::ngram::PROBING:
Query<lm::ngram::ProbingModel>(name);
break;
case lm::ngram::REST_PROBING:
Query<lm::ngram::RestProbingModel>(name);
break;
default:
std::cerr << "Model type not supported yet." << std::endl;
}
}

View File

@ -8,5 +8,5 @@
#define KENLM_MAX_ORDER 6
#endif
#ifndef KENLM_ORDER_MESSAGE
#define KENLM_ORDER_MESSAGE "Recompile with e.g. `bjam --kenlm-max-order=6 -a' to change the maximum order."
#define KENLM_ORDER_MESSAGE "If your build system supports changing KENLM_MAX_ORDER, change it there and recompile. In the KenLM tarball or Moses, use e.g. `bjam --kenlm-max-order=6 -a'. Otherwise, edit lm/max_order.hh."
#endif

167
lm/partial.hh Normal file
View File

@ -0,0 +1,167 @@
#ifndef LM_PARTIAL__
#define LM_PARTIAL__
#include "lm/return.hh"
#include "lm/state.hh"
#include <algorithm>
#include <assert.h>
namespace lm {
namespace ngram {
struct ExtendReturn {
float adjust;
bool make_full;
unsigned char next_use;
};
template <class Model> ExtendReturn ExtendLoop(
const Model &model,
unsigned char seen, const WordIndex *add_rbegin, const WordIndex *add_rend, const float *backoff_start,
const uint64_t *pointers, const uint64_t *pointers_end,
uint64_t *&pointers_write,
float *backoff_write) {
unsigned char add_length = add_rend - add_rbegin;
float backoff_buf[2][KENLM_MAX_ORDER - 1];
float *backoff_in = backoff_buf[0], *backoff_out = backoff_buf[1];
std::copy(backoff_start, backoff_start + add_length, backoff_in);
ExtendReturn value;
value.make_full = false;
value.adjust = 0.0;
value.next_use = add_length;
unsigned char i = 0;
unsigned char length = pointers_end - pointers;
// pointers_write is NULL means that the existing left state is full, so we should use completed probabilities.
if (pointers_write) {
// Using full context, writing to new left state.
for (; i < length; ++i) {
FullScoreReturn ret(model.ExtendLeft(
add_rbegin, add_rbegin + value.next_use,
backoff_in,
pointers[i], i + seen + 1,
backoff_out,
value.next_use));
std::swap(backoff_in, backoff_out);
if (ret.independent_left) {
value.adjust += ret.prob;
value.make_full = true;
++i;
break;
}
value.adjust += ret.rest;
*pointers_write++ = ret.extend_left;
if (value.next_use != add_length) {
value.make_full = true;
++i;
break;
}
}
}
// Using some of the new context.
for (; i < length && value.next_use; ++i) {
FullScoreReturn ret(model.ExtendLeft(
add_rbegin, add_rbegin + value.next_use,
backoff_in,
pointers[i], i + seen + 1,
backoff_out,
value.next_use));
std::swap(backoff_in, backoff_out);
value.adjust += ret.prob;
}
float unrest = model.UnRest(pointers + i, pointers_end, i + seen + 1);
// Using none of the new context.
value.adjust += unrest;
std::copy(backoff_in, backoff_in + value.next_use, backoff_write);
return value;
}
template <class Model> float RevealBefore(const Model &model, const Right &reveal, const unsigned char seen, bool reveal_full, Left &left, Right &right) {
assert(seen < reveal.length || reveal_full);
uint64_t *pointers_write = reveal_full ? NULL : left.pointers;
float backoff_buffer[KENLM_MAX_ORDER - 1];
ExtendReturn value(ExtendLoop(
model,
seen, reveal.words + seen, reveal.words + reveal.length, reveal.backoff + seen,
left.pointers, left.pointers + left.length,
pointers_write,
left.full ? backoff_buffer : (right.backoff + right.length)));
if (reveal_full) {
left.length = 0;
value.make_full = true;
} else {
left.length = pointers_write - left.pointers;
value.make_full |= (left.length == model.Order() - 1);
}
if (left.full) {
for (unsigned char i = 0; i < value.next_use; ++i) value.adjust += backoff_buffer[i];
} else {
// If left wasn't full when it came in, put words into right state.
std::copy(reveal.words + seen, reveal.words + seen + value.next_use, right.words + right.length);
right.length += value.next_use;
left.full = value.make_full || (right.length == model.Order() - 1);
}
return value.adjust;
}
template <class Model> float RevealAfter(const Model &model, Left &left, Right &right, const Left &reveal, unsigned char seen) {
assert(seen < reveal.length || reveal.full);
uint64_t *pointers_write = left.full ? NULL : (left.pointers + left.length);
ExtendReturn value(ExtendLoop(
model,
seen, right.words, right.words + right.length, right.backoff,
reveal.pointers + seen, reveal.pointers + reveal.length,
pointers_write,
right.backoff));
if (reveal.full) {
for (unsigned char i = 0; i < value.next_use; ++i) value.adjust += right.backoff[i];
right.length = 0;
value.make_full = true;
} else {
right.length = value.next_use;
value.make_full |= (right.length == model.Order() - 1);
}
if (!left.full) {
left.length = pointers_write - left.pointers;
left.full = value.make_full || (left.length == model.Order() - 1);
}
return value.adjust;
}
template <class Model> float Subsume(const Model &model, Left &first_left, const Right &first_right, const Left &second_left, Right &second_right, const unsigned int between_length) {
assert(first_right.length < KENLM_MAX_ORDER);
assert(second_left.length < KENLM_MAX_ORDER);
assert(between_length < KENLM_MAX_ORDER - 1);
uint64_t *pointers_write = first_left.full ? NULL : (first_left.pointers + first_left.length);
float backoff_buffer[KENLM_MAX_ORDER - 1];
ExtendReturn value(ExtendLoop(
model,
between_length, first_right.words, first_right.words + first_right.length, first_right.backoff,
second_left.pointers, second_left.pointers + second_left.length,
pointers_write,
second_left.full ? backoff_buffer : (second_right.backoff + second_right.length)));
if (second_left.full) {
for (unsigned char i = 0; i < value.next_use; ++i) value.adjust += backoff_buffer[i];
} else {
std::copy(first_right.words, first_right.words + value.next_use, second_right.words + second_right.length);
second_right.length += value.next_use;
value.make_full |= (second_right.length == model.Order() - 1);
}
if (!first_left.full) {
first_left.length = pointers_write - first_left.pointers;
first_left.full = value.make_full || second_left.full || (first_left.length == model.Order() - 1);
}
assert(first_left.length < KENLM_MAX_ORDER);
assert(second_right.length < KENLM_MAX_ORDER);
return value.adjust;
}
} // namespace ngram
} // namespace lm
#endif // LM_PARTIAL__

199
lm/partial_test.cc Normal file
View File

@ -0,0 +1,199 @@
#include "lm/partial.hh"
#include "lm/left.hh"
#include "lm/model.hh"
#include "util/tokenize_piece.hh"
#define BOOST_TEST_MODULE PartialTest
#include <boost/test/unit_test.hpp>
#include <boost/test/floating_point_comparison.hpp>
namespace lm {
namespace ngram {
namespace {
const char *TestLocation() {
if (boost::unit_test::framework::master_test_suite().argc < 2) {
return "test.arpa";
}
return boost::unit_test::framework::master_test_suite().argv[1];
}
Config SilentConfig() {
Config config;
config.arpa_complain = Config::NONE;
config.messages = NULL;
return config;
}
struct ModelFixture {
ModelFixture() : m(TestLocation(), SilentConfig()) {}
RestProbingModel m;
};
BOOST_FIXTURE_TEST_SUITE(suite, ModelFixture)
BOOST_AUTO_TEST_CASE(SimpleBefore) {
Left left;
left.full = false;
left.length = 0;
Right right;
right.length = 0;
Right reveal;
reveal.length = 1;
WordIndex period = m.GetVocabulary().Index(".");
reveal.words[0] = period;
reveal.backoff[0] = -0.845098;
BOOST_CHECK_CLOSE(0.0, RevealBefore(m, reveal, 0, false, left, right), 0.001);
BOOST_CHECK_EQUAL(0, left.length);
BOOST_CHECK(!left.full);
BOOST_CHECK_EQUAL(1, right.length);
BOOST_CHECK_EQUAL(period, right.words[0]);
BOOST_CHECK_CLOSE(-0.845098, right.backoff[0], 0.001);
WordIndex more = m.GetVocabulary().Index("more");
reveal.words[1] = more;
reveal.backoff[1] = -0.4771212;
reveal.length = 2;
BOOST_CHECK_CLOSE(0.0, RevealBefore(m, reveal, 1, false, left, right), 0.001);
BOOST_CHECK_EQUAL(0, left.length);
BOOST_CHECK(!left.full);
BOOST_CHECK_EQUAL(2, right.length);
BOOST_CHECK_EQUAL(period, right.words[0]);
BOOST_CHECK_EQUAL(more, right.words[1]);
BOOST_CHECK_CLOSE(-0.845098, right.backoff[0], 0.001);
BOOST_CHECK_CLOSE(-0.4771212, right.backoff[1], 0.001);
}
BOOST_AUTO_TEST_CASE(AlsoWouldConsider) {
WordIndex would = m.GetVocabulary().Index("would");
WordIndex consider = m.GetVocabulary().Index("consider");
ChartState current;
current.left.length = 1;
current.left.pointers[0] = would;
current.left.full = false;
current.right.length = 1;
current.right.words[0] = would;
current.right.backoff[0] = -0.30103;
Left after;
after.full = false;
after.length = 1;
after.pointers[0] = consider;
// adjustment for would consider
BOOST_CHECK_CLOSE(-1.687872 - -0.2922095 - 0.30103, RevealAfter(m, current.left, current.right, after, 0), 0.001);
BOOST_CHECK_EQUAL(2, current.left.length);
BOOST_CHECK_EQUAL(would, current.left.pointers[0]);
BOOST_CHECK_EQUAL(false, current.left.full);
WordIndex also = m.GetVocabulary().Index("also");
Right before;
before.length = 1;
before.words[0] = also;
before.backoff[0] = -0.30103;
// r(would) = -0.2922095 [i would], r(would -> consider) = -1.988902 [b(would) + p(consider)]
// p(also -> would) = -2, p(also would -> consider) = -3
BOOST_CHECK_CLOSE(-2 + 0.2922095 -3 + 1.988902, RevealBefore(m, before, 0, false, current.left, current.right), 0.001);
BOOST_CHECK_EQUAL(0, current.left.length);
BOOST_CHECK(current.left.full);
BOOST_CHECK_EQUAL(2, current.right.length);
BOOST_CHECK_EQUAL(would, current.right.words[0]);
BOOST_CHECK_EQUAL(also, current.right.words[1]);
}
BOOST_AUTO_TEST_CASE(EndSentence) {
WordIndex loin = m.GetVocabulary().Index("loin");
WordIndex period = m.GetVocabulary().Index(".");
WordIndex eos = m.GetVocabulary().EndSentence();
ChartState between;
between.left.length = 1;
between.left.pointers[0] = eos;
between.left.full = true;
between.right.length = 0;
Right before;
before.words[0] = period;
before.words[1] = loin;
before.backoff[0] = -0.845098;
before.backoff[1] = 0.0;
before.length = 1;
BOOST_CHECK_CLOSE(-0.0410707, RevealBefore(m, before, 0, true, between.left, between.right), 0.001);
BOOST_CHECK_EQUAL(0, between.left.length);
}
float ScoreFragment(const RestProbingModel &model, unsigned int *begin, unsigned int *end, ChartState &out) {
RuleScore<RestProbingModel> scorer(model, out);
for (unsigned int *i = begin; i < end; ++i) {
scorer.Terminal(*i);
}
return scorer.Finish();
}
void CheckAdjustment(const RestProbingModel &model, float expect, const Right &before_in, bool before_full, ChartState between, const Left &after_in) {
Right before(before_in);
Left after(after_in);
after.full = false;
float got = 0.0;
for (unsigned int i = 1; i < 5; ++i) {
if (before_in.length >= i) {
before.length = i;
got += RevealBefore(model, before, i - 1, false, between.left, between.right);
}
if (after_in.length >= i) {
after.length = i;
got += RevealAfter(model, between.left, between.right, after, i - 1);
}
}
if (after_in.full) {
after.full = true;
got += RevealAfter(model, between.left, between.right, after, after.length);
}
if (before_full) {
got += RevealBefore(model, before, before.length, true, between.left, between.right);
}
// Sometimes they're zero and BOOST_CHECK_CLOSE fails for this.
BOOST_CHECK(fabs(expect - got) < 0.001);
}
void FullDivide(const RestProbingModel &model, StringPiece str) {
std::vector<WordIndex> indices;
for (util::TokenIter<util::SingleCharacter, true> i(str, ' '); i; ++i) {
indices.push_back(model.GetVocabulary().Index(*i));
}
ChartState full_state;
float full = ScoreFragment(model, &indices.front(), &indices.back() + 1, full_state);
ChartState before_state;
before_state.left.full = false;
RuleScore<RestProbingModel> before_scorer(model, before_state);
float before_score = 0.0;
for (unsigned int before = 0; before < indices.size(); ++before) {
for (unsigned int after = before; after <= indices.size(); ++after) {
ChartState after_state, between_state;
float after_score = ScoreFragment(model, &indices.front() + after, &indices.front() + indices.size(), after_state);
float between_score = ScoreFragment(model, &indices.front() + before, &indices.front() + after, between_state);
CheckAdjustment(model, full - before_score - after_score - between_score, before_state.right, before_state.left.full, between_state, after_state.left);
}
before_scorer.Terminal(indices[before]);
before_score = before_scorer.Finish();
}
}
BOOST_AUTO_TEST_CASE(Strings) {
FullDivide(m, "also would consider");
FullDivide(m, "looking on a little more loin . </s>");
FullDivide(m, "in biarritz watching considering looking . on a little more loin also would consider higher to look good unknown the screening foo bar , unknown however unknown </s>");
}
BOOST_AUTO_TEST_SUITE_END()
} // namespace
} // namespace ngram
} // namespace lm

View File

@ -234,7 +234,7 @@ template <> void HashedSearch<BackoffValue>::DispatchBuild(util::FilePiece &f, c
ApplyBuild(f, counts, config, vocab, warn, build);
}
template <> void HashedSearch<RestValue>::DispatchBuild(util::FilePiece &f, const std::vector<uint64_t> &counts, const Config &config, const ProbingVocabulary &vocab, PositiveProbWarn &warn) {
template <> void HashedSearch<RestValue>::DispatchBuild(util::FilePiece &f, const std::vector<uint64_t> &counts, const Config &config, const ProbingVocabulary &vocab, PositiveProbWarn &warn) {
switch (config.rest_function) {
case Config::REST_MAX:
{

View File

@ -161,7 +161,7 @@ template <class Value> class HashedSearch {
{}
static uint64_t Size(uint64_t count) {
return (count + 1) * sizeof(ProbBackoff); // +1 for hallucinate <unk>
return (count + 1) * sizeof(typename Value::Weights); // +1 for hallucinate <unk>
}
const typename Value::Weights &Lookup(WordIndex index) const {

View File

@ -47,6 +47,8 @@ class State {
unsigned char length;
};
typedef State Right;
inline uint64_t hash_value(const State &state, uint64_t seed = 0) {
return util::MurmurHashNative(state.words, sizeof(WordIndex) * state.length, seed);
}

View File

@ -1,10 +0,0 @@
#!/bin/bash
#Run tests. Requires Boost.
cd "$(dirname "$0")/.."
set -e
lm/compile.sh
for i in util/{bit_packing,file_piece,joint_sort,key_value_packing,probing_hash_table,sorted_uniform,tokenize_piece}_test lm/{model,left}_test; do
g++ -I. -O3 $CXXFLAGS $i.cc {lm,util}/*.o -lboost_test_exec_monitor -lz -o $i
pushd $(dirname $i) >/dev/null && ./$(basename $i) || echo "$i failed"; popd >/dev/null
done

View File

@ -2,8 +2,11 @@
#ifndef LM_WORD_INDEX__
#define LM_WORD_INDEX__
#include <limits.h>
namespace lm {
typedef unsigned int WordIndex;
const WordIndex kMaxWordIndex = UINT_MAX;
} // namespace lm
typedef lm::WordIndex LMWordIndex;

View File

@ -12,6 +12,8 @@
#include "Ngram.h"
#include "Reference.h"
#include "Util.h"
#include "ScoreDataIterator.h"
#include "FeatureDataIterator.h"
#include "Vocabulary.h"
using namespace std;
@ -278,5 +280,66 @@ float unsmoothedBleu(const std::vector<float>& stats) {
return exp(logbleu);
}
vector<float> BleuScorer::ScoreNbestList(string scoreFile, string featureFile) {
vector<string> scoreFiles;
vector<string> featureFiles;
scoreFiles.push_back(scoreFile);
featureFiles.push_back(featureFile);
vector<FeatureDataIterator> featureDataIters;
vector<ScoreDataIterator> scoreDataIters;
for (size_t i = 0; i < featureFiles.size(); ++i) {
featureDataIters.push_back(FeatureDataIterator(featureFiles[i]));
scoreDataIters.push_back(ScoreDataIterator(scoreFiles[i]));
}
vector<pair<size_t,size_t> > hypotheses;
if (featureDataIters[0] == FeatureDataIterator::end()) {
cerr << "Error: at the end of feature data iterator" << endl;
exit(1);
}
for (size_t i = 0; i < featureFiles.size(); ++i) {
if (featureDataIters[i] == FeatureDataIterator::end()) {
cerr << "Error: Feature file " << i << " ended prematurely" << endl;
exit(1);
}
if (scoreDataIters[i] == ScoreDataIterator::end()) {
cerr << "Error: Score file " << i << " ended prematurely" << endl;
exit(1);
}
if (featureDataIters[i]->size() != scoreDataIters[i]->size()) {
cerr << "Error: features and scores have different size" << endl;
exit(1);
}
for (size_t j = 0; j < featureDataIters[i]->size(); ++j) {
hypotheses.push_back(pair<size_t,size_t>(i,j));
}
}
// score the nbest list
vector<float> bleuScores;
for (size_t i=0; i < hypotheses.size(); ++i) {
pair<size_t,size_t> translation = hypotheses[i];
float bleu = sentenceLevelBleuPlusOne(scoreDataIters[translation.first]->operator[](translation.second));
bleuScores.push_back(bleu);
}
return bleuScores;
}
float BleuScorer::sentenceLevelBleuPlusOne(const vector<float>& stats) {
float logbleu = 0.0;
const unsigned int bleu_order = 4;
for (unsigned int j=0; j<bleu_order; j++) {
//cerr << (stats.get(2*j)+1) << "/" << (stats.get(2*j+1)+1) << " ";
logbleu += log(stats[2*j]+1) - log(stats[2*j+1]+1);
}
logbleu /= bleu_order;
float brevity = 1.0 - (float)stats[(bleu_order*2)]/stats[1];
if (brevity < 0.0) {
logbleu += brevity;
}
//cerr << brevity << " -> " << exp(logbleu) << endl;
return exp(logbleu);
}
}

View File

@ -18,6 +18,8 @@ const int kBleuNgramOrder = 4;
class NgramCounts;
class Reference;
using namespace std;
/**
* Bleu scoring
*/
@ -32,6 +34,9 @@ public:
explicit BleuScorer(const std::string& config = "");
~BleuScorer();
static vector<float> ScoreNbestList(string scoreFile, string featureFile);
static float sentenceLevelBleuPlusOne(const vector<float>& stats);
virtual void setReferenceFiles(const std::vector<std::string>& referenceFiles);
virtual void prepareStats(std::size_t sid, const std::string& text, ScoreStats& entry);

View File

@ -21,25 +21,24 @@ using namespace std;
namespace MosesTuning
{
Data::Data()
: m_scorer(NULL),
m_num_scores(0),
m_sparse_flag(false),
m_score_data(),
m_feature_data() {}
Data::Data(Scorer* scorer)
Data::Data(Scorer* scorer, const string& sparse_weights_file)
: m_scorer(scorer),
m_score_type(m_scorer->getName()),
m_num_scores(0),
m_sparse_flag(false),
m_score_data(new ScoreData(m_scorer)),
m_feature_data(new FeatureData)
{
TRACE_ERR("Data::m_score_type " << m_score_type << endl);
TRACE_ERR("Data::Scorer type from Scorer: " << m_scorer->getName() << endl);
if (sparse_weights_file.size()) {
m_sparse_weights.load(sparse_weights_file);
ostringstream msg;
msg << "Data::sparse_weights {";
m_sparse_weights.write(msg,"=");
msg << "}";
TRACE_ERR(msg.str() << std::endl);
}
}
//ADDED BY TS
@ -126,10 +125,8 @@ void Data::removeDuplicates() {
//END_ADDED
void Data::load(const std::string &featfile, const std::string &scorefile) {
m_feature_data->load(featfile);
m_feature_data->load(featfile, m_sparse_weights);
m_score_data->load(scorefile);
if (m_feature_data->hasSparseFeatures())
m_sparse_flag = true;
}
void Data::loadNBest(const string &file)
@ -236,18 +233,11 @@ void Data::AddFeatures(const string& str,
string name = substr;
getNextPound(buf, substr);
feature_entry.addSparse(name, atof(substr.c_str()));
m_sparse_flag = true;
}
}
m_feature_data->add(feature_entry, sentence_index);
}
// TODO
void Data::mergeSparseFeatures() {
cerr << "ERROR: sparse features can only be trained with pairwise ranked optimizer (PRO), not traditional MERT\n";
exit(1);
}
void Data::createShards(size_t shard_count, float shard_size, const string& scorerconfig,
vector<Data>& shards)
{
@ -282,7 +272,6 @@ void Data::createShards(size_t shard_count, float shard_size, const string& scor
shards.push_back(Data(scorer));
shards.back().m_score_type = m_score_type;
shards.back().m_num_scores = m_num_scores;
shards.back().m_sparse_flag = m_sparse_flag;
for (size_t i = 0; i < shard_contents.size(); ++i) {
shards.back().m_feature_data->add(m_feature_data->get(shard_contents[i]));
shards.back().m_score_data->add(m_score_data->get(shard_contents[i]));

View File

@ -32,13 +32,12 @@ private:
Scorer* m_scorer;
std::string m_score_type;
std::size_t m_num_scores;
bool m_sparse_flag;
ScoreDataHandle m_score_data;
FeatureDataHandle m_feature_data;
SparseVector m_sparse_weights;
public:
explicit Data(Scorer* scorer);
Data();
explicit Data(Scorer* scorer, const std::string& sparseweightsfile="");
void clear() {
m_score_data->clear();
@ -55,14 +54,9 @@ public:
return m_feature_data->NumberOfFeatures();
}
void NumberOfFeatures(std::size_t v) { m_feature_data->NumberOfFeatures(v); }
std::string Features() const { return m_feature_data->Features(); }
void Features(const std::string &f) { m_feature_data->Features(f); }
bool hasSparseFeatures() const { return m_sparse_flag; }
void mergeSparseFeatures();
void loadNBest(const std::string &file);
void load(const std::string &featfile, const std::string &scorefile);

View File

@ -19,7 +19,7 @@ namespace MosesTuning
FeatureArray::FeatureArray()
: m_index(""), m_num_features(0), m_sparse_flag(false) {}
: m_index(""), m_num_features(0){}
FeatureArray::~FeatureArray() {}
@ -81,19 +81,17 @@ void FeatureArray::loadbin(istream* is, size_t n)
}
}
void FeatureArray::loadtxt(istream* is, size_t n)
void FeatureArray::loadtxt(istream* is, const SparseVector& sparseWeights, size_t n)
{
FeatureStats entry(m_num_features);
for (size_t i = 0; i < n; i++) {
entry.loadtxt(is);
for (size_t i=0 ; i < n; i++) {
entry.loadtxt(is, sparseWeights);
add(entry);
if (entry.getSparse().size()>0)
m_sparse_flag = true;
}
}
void FeatureArray::load(istream* is)
void FeatureArray::load(istream* is, const SparseVector& sparseWeights)
{
size_t number_of_entries = 0;
bool binmode = false;
@ -128,7 +126,7 @@ void FeatureArray::load(istream* is)
if (binmode) {
loadbin(is, number_of_entries);
} else {
loadtxt(is, number_of_entries);
loadtxt(is, sparseWeights, number_of_entries);
}
getline(*is, stringBuf);
@ -141,15 +139,6 @@ void FeatureArray::load(istream* is)
}
}
void FeatureArray::load(const string &file)
{
TRACE_ERR("loading data from " << file << endl);
inputfilestream input_stream(file); // matches a stream with a file. Opens the file
istream* is = &input_stream;
load(is);
input_stream.close();
}
void FeatureArray::merge(FeatureArray& e)
{
//dummy implementation

View File

@ -27,11 +27,11 @@ class FeatureArray
private:
// idx to identify the utterance. It can differ from
// the index inside the vector.
std::string m_index;
featarray_t m_array;
std::size_t m_num_features;
std::string m_features;
bool m_sparse_flag;
public:
FeatureArray();
@ -39,7 +39,6 @@ public:
void clear() { m_array.clear(); }
bool hasSparseFeatures() const { return m_sparse_flag; }
std::string getIndex() const { return m_index; }
void setIndex(const std::string& value) { m_index = value; }
@ -75,10 +74,9 @@ public:
void save(const std::string &file, bool bin=false);
void save(bool bin=false);
void loadtxt(std::istream* is, std::size_t n);
void loadtxt(std::istream* is, const SparseVector& sparseWeights, std::size_t n);
void loadbin(std::istream* is, std::size_t n);
void load(std::istream* is);
void load(const std::string &file);
void load(std::istream* is, const SparseVector& sparseWeights);
bool check_consistency() const;
};

View File

@ -18,12 +18,9 @@ namespace MosesTuning
{
static const float MIN_FLOAT = -1.0 * numeric_limits<float>::max();
static const float MAX_FLOAT = numeric_limits<float>::max();
FeatureData::FeatureData()
: m_num_features(0),
m_sparse_flag(false) {}
: m_num_features(0) {}
void FeatureData::save(ostream* os, bool bin)
{
@ -45,7 +42,7 @@ void FeatureData::save(bool bin) {
save(&cout, bin);
}
void FeatureData::load(istream* is)
void FeatureData::load(istream* is, const SparseVector& sparseWeights)
{
FeatureArray entry;
@ -56,7 +53,7 @@ void FeatureData::load(istream* is)
}
entry.clear();
entry.load(is);
entry.load(is, sparseWeights);
if (entry.size() == 0)
break;
@ -64,15 +61,12 @@ void FeatureData::load(istream* is)
if (size() == 0)
setFeatureMap(entry.Features());
if (entry.hasSparseFeatures())
m_sparse_flag = true;
add(entry);
}
}
void FeatureData::load(const string &file)
void FeatureData::load(const string &file, const SparseVector& sparseWeights)
{
TRACE_ERR("loading feature data from " << file << endl);
inputfilestream input_stream(file); // matches a stream with a file. Opens the file
@ -80,7 +74,7 @@ void FeatureData::load(const string &file)
throw runtime_error("Unable to open feature file: " + file);
}
istream* is = &input_stream;
load(is);
load(is, sparseWeights);
input_stream.close();
}
@ -157,13 +151,7 @@ string FeatureData::ToString() const {
{
stringstream ss;
ss << "number of features: " << m_num_features
<< ", features: " << m_features
<< ", sparse flag: ";
if (m_sparse_flag) {
ss << "yes, ";
} else {
ss << "no, ";
}
<< ", features: " << m_features;
res.append(ss.str());
}

View File

@ -23,7 +23,6 @@ class FeatureData
private:
std::size_t m_num_features;
std::string m_features;
bool m_sparse_flag;
std::map<std::string, std::size_t> m_feature_name_to_index; // map from name to index of features
std::map<std::size_t, std::string> m_index_to_feature_name; // map from index to name of features
featdata_t m_array;
@ -36,14 +35,15 @@ public:
void clear() { m_array.clear(); }
bool hasSparseFeatures() const { return m_sparse_flag; }
FeatureArray get(const std::string& idx) {
return m_array.at(getIndex(idx));
}
FeatureArray& get(std::size_t idx) { return m_array.at(idx); }
const FeatureArray& get(std::size_t idx) const { return m_array.at(idx); }
FeatureArray& get(size_t idx) {
return m_array.at(idx);
}
const FeatureArray& get(size_t idx) const {
return m_array.at(idx);
}
inline bool exists(const std::string& sent_idx) const {
return exists(getIndex(sent_idx));
@ -76,8 +76,8 @@ public:
void save(std::ostream* os, bool bin=false);
void save(bool bin=false);
void load(std::istream* is);
void load(const std::string &file);
void load(std::istream* is, const SparseVector& sparseWeights);
void load(const std::string &file, const SparseVector& sparseWeights);
bool check_consistency() const;

View File

@ -86,7 +86,7 @@ void FeatureDataIterator::readNext() {
StringPiece line = m_in->ReadLine();
m_next.push_back(FeatureDataItem());
for (TokenIter<AnyCharacter, true> token(line, AnyCharacter(" \t")); token; ++token) {
TokenIter<AnyCharacter,false> value(*token,AnyCharacter(":"));
TokenIter<AnyCharacterLast,false> value(*token,AnyCharacterLast(":"));
if (!value) throw FileFormatException(m_in->FileName(), line.as_string());
StringPiece first = *value;
++value;

View File

@ -10,6 +10,8 @@
#include <fstream>
#include <cmath>
#include <stdexcept>
#include <boost/functional/hash.hpp>
#include "Util.h"
@ -65,28 +67,55 @@ void SparseVector::clear() {
m_fvector.clear();
}
SparseVector& SparseVector::operator-=(const SparseVector& rhs) {
//All the elements that have values in *this
for (fvector_t::iterator i = m_fvector.begin(); i != m_fvector.end(); ++i) {
m_fvector[i->first] = i->second - rhs.get(i->first);
void SparseVector::load(const string& file) {
ifstream in(file.c_str());
if (!in) {
throw runtime_error("Failed to open sparse weights file: " + file);
}
string line;
while(getline(in,line)) {
if (line[0] == '#') continue;
istringstream linestream(line);
string name;
float value;
linestream >> name;
linestream >> value;
set(name,value);
}
}
SparseVector& SparseVector::operator-=(const SparseVector& rhs) {
//Any elements in rhs, that have no value in *this
for (fvector_t::const_iterator i = rhs.m_fvector.begin();
i != rhs.m_fvector.end(); ++i) {
if (m_fvector.find(i->first) == m_fvector.end()) {
m_fvector[i->first] = -(i->second);
}
m_fvector[i->first] = get(i->first) - (i->second);
}
return *this;
}
FeatureStatsType SparseVector::inner_product(const SparseVector& rhs) const {
FeatureStatsType product = 0.0;
for (fvector_t::const_iterator i = m_fvector.begin();
i != m_fvector.end(); ++i) {
product += ((i->second) * (rhs.get(i->first)));
}
return product;
}
SparseVector operator-(const SparseVector& lhs, const SparseVector& rhs) {
SparseVector res(lhs);
res -= rhs;
return res;
}
FeatureStatsType inner_product(const SparseVector& lhs, const SparseVector& rhs) {
if (lhs.size() >= rhs.size()) {
return rhs.inner_product(lhs);
} else {
return lhs.inner_product(rhs);
}
}
std::vector<std::size_t> SparseVector::feats() const {
std::vector<std::size_t> toRet;
for(fvector_t::const_iterator iter = m_fvector.begin();
@ -123,7 +152,6 @@ std::size_t hash_value(SparseVector const& item) {
return hasher(item.m_fvector);
}
FeatureStats::FeatureStats()
: m_available_size(kAvailableSize), m_entries(0),
m_array(new FeatureStatsType[m_available_size]) {}
@ -135,12 +163,6 @@ FeatureStats::FeatureStats(const size_t size)
memset(m_array, 0, GetArraySizeWithBytes());
}
FeatureStats::FeatureStats(string &theString)
: m_available_size(0), m_entries(0), m_array(NULL)
{
set(theString);
}
FeatureStats::~FeatureStats()
{
if (m_array) {
@ -190,7 +212,7 @@ void FeatureStats::addSparse(const string& name, FeatureStatsType v)
m_map.set(name,v);
}
void FeatureStats::set(string &theString)
void FeatureStats::set(string &theString, const SparseVector& sparseWeights )
{
string substring, stringBuf;
reset();
@ -207,6 +229,26 @@ void FeatureStats::set(string &theString)
addSparse(substring.substr(0,separator), atof(substring.substr(separator+1).c_str()) );
}
}
if (sparseWeights.size()) {
//Merge the sparse features
FeatureStatsType merged = inner_product(sparseWeights, m_map);
add(merged);
/*
cerr << "Merged ";
sparseWeights.write(cerr,"=");
cerr << " and ";
map_.write(cerr,"=");
cerr << " to give " << merged << endl;
*/
m_map.clear();
}
/*
cerr << "FS: ";
for (size_t i = 0; i < entries_; ++i) {
cerr << array_[i] << " ";
}
cerr << endl;*/
}
void FeatureStats::loadbin(istream* is)
@ -215,22 +257,11 @@ void FeatureStats::loadbin(istream* is)
static_cast<streamsize>(GetArraySizeWithBytes()));
}
void FeatureStats::loadtxt(istream* is)
void FeatureStats::loadtxt(istream* is, const SparseVector& sparseWeights)
{
string line;
getline(*is, line);
set(line);
}
void FeatureStats::loadtxt(const string &file)
{
ifstream ifs(file.c_str(), ios::in);
if (!ifs) {
cerr << "Failed to open " << file << endl;
exit(1);
}
istream* is = &ifs;
loadtxt(is);
set(line, sparseWeights);
}
void FeatureStats::savetxt(const string &file)

View File

@ -31,11 +31,13 @@ public:
FeatureStatsType get(std::size_t id) const;
void set(const std::string& name, FeatureStatsType value);
void clear();
void load(const std::string& file);
std::size_t size() const { return m_fvector.size(); }
void write(std::ostream& out, const std::string& sep = " ") const;
SparseVector& operator-=(const SparseVector& rhs);
FeatureStatsType inner_product(const SparseVector& rhs) const;
// Added by cherryc
std::vector<std::size_t> feats() const;
@ -52,6 +54,7 @@ private:
};
SparseVector operator-(const SparseVector& lhs, const SparseVector& rhs);
FeatureStatsType inner_product(const SparseVector& lhs, const SparseVector& rhs);
class FeatureStats
{
@ -66,7 +69,6 @@ private:
public:
FeatureStats();
explicit FeatureStats(const std::size_t size);
explicit FeatureStats(std::string &theString);
~FeatureStats();
@ -97,7 +99,7 @@ public:
const SparseVector& getSparse() const { return m_map; }
void set(std::string &theString);
void set(std::string &theString, const SparseVector& sparseWeights);
inline std::size_t bytes() const { return GetArraySizeWithBytes(); }
@ -114,8 +116,7 @@ public:
void savebin(std::ostream* os);
void savetxt();
void loadtxt(const std::string &file);
void loadtxt(std::istream* is);
void loadtxt(std::istream* is, const SparseVector& sparseWeights);
void loadbin(std::istream* is);
/**

View File

@ -51,13 +51,13 @@ PermutationScorer.cpp
StatisticsBasedScorer.cpp
../util//kenutil m ..//z ;
exe mert : mert.cpp mert_lib ../moses/src//ThreadPool ;
exe mert : mert.cpp mert_lib bleu_lib ../moses/src//ThreadPool ;
exe extractor : extractor.cpp mert_lib ;
exe extractor : extractor.cpp mert_lib bleu_lib ;
exe evaluator : evaluator.cpp mert_lib ;
exe evaluator : evaluator.cpp mert_lib bleu_lib ;
exe pro : pro.cpp mert_lib ..//boost_program_options ;
exe pro : pro.cpp mert_lib bleu_lib ..//boost_program_options ;
exe kbmira : kbmira.cpp mert_lib ..//boost_program_options ;
@ -75,3 +75,7 @@ unit-test timer_test : TimerTest.cpp mert_lib ..//boost_unit_test_framework ;
unit-test util_test : UtilTest.cpp mert_lib ..//boost_unit_test_framework ;
unit-test vocabulary_test : VocabularyTest.cpp mert_lib ..//boost_unit_test_framework ;
install legacy : programs : <location>. ;
lib bleu_lib : BleuScorer.cpp mert_lib : : : <include>. ;

View File

@ -225,12 +225,12 @@ void PermutationScorer::prepareStats(size_t sid, const string& text, ScoreStats&
//SCOREROUT eg: 0.04546
distanceValue*=SCORE_MULTFACT; //SCOREROUT eg: 4546 to transform float into integer
ostringstream tempStream;
tempStream.precision(SCORE_PRECISION);
tempStream << distanceValue << " 1"; //use for final normalization over the amount of test sentences
tempStream.precision(0); // decimal precision not needed as score was multiplied per SCORE_MULTFACT
tempStream << std::fixed << distanceValue << " 1"; //use for final normalization over the amount of test sentences
string str = tempStream.str();
entry.set(str);
//cout << tempStream.str();
//cout << distanceValue << "=" << distanceValue << " (str:" << tempStream.str() << ")" << endl;
}
//Will just be final score

View File

@ -212,7 +212,7 @@ int main(int argc, char** argv)
if (referenceFiles.size() > 0)
scorer->setReferenceFiles(referenceFiles);
PrintUserTime("References loaded");
// PrintUserTime("References loaded");
Data data(scorer.get());
@ -221,14 +221,14 @@ int main(int argc, char** argv)
data.load(prevFeatureDataFiles.at(i), prevScoreDataFiles.at(i));
}
PrintUserTime("Previous data loaded");
// PrintUserTime("Previous data loaded");
// computing score statistics of each nbest file
for (size_t i = 0; i < nbestFiles.size(); i++) {
data.loadNBest(nbestFiles.at(i));
}
PrintUserTime("Nbest entries loaded and scored");
// PrintUserTime("Nbest entries loaded and scored");
//ADDED_BY_TS
if (!option.allowDuplicates) {

View File

@ -38,6 +38,7 @@ const char kDefaultScorerFile[] = "statscore.data";
const char kDefaultFeatureFile[] = "features.data";
const char kDefaultInitFile[] = "init.opt";
const char kDefaultPositiveString[] = "";
const char kDefaultSparseWeightsFile[] = "";
// Used when saving optimized weights.
const char kOutputFile[] = "weights.txt";
@ -99,49 +100,48 @@ bool WriteFinalWeights(const char* filename, const Point& point) {
void usage(int ret)
{
cerr << "usage: mert -d <dimensions> (mandatory)" << endl;
cerr << "[-n] retry ntimes (default 1)" << endl;
cerr << "[-m] number of random directions in powell (default 0)"<< endl;
cerr << "[-o] the indexes to optimize(default all)" << endl;
cerr << "[-t] the optimizer(default " << kDefaultOptimizer << ")" << endl;
cerr << "[-r] the random seed (defaults to system clock)" << endl;
cerr << "[--sctype|-s] the scorer type (default " << kDefaultScorer << ")" << endl;
cerr << "[--scconfig|-c] configuration string passed to scorer" << endl;
cerr << "[--scfile|-S] comma separated list of scorer data files (default " << kDefaultScorerFile << ")" << endl;
cerr << "[--ffile|-F] comma separated list of feature data files (default " << kDefaultFeatureFile << ")" << endl;
cerr << "[--ifile|-i] the starting point data file (default " << kDefaultInitFile << ")" << endl;
cerr << "[--positive|-P] indexes with positive weights (default none)"<<endl;
cerr<<"usage: mert -d <dimensions> (mandatory )"<<endl;
cerr<<"[-n] retry ntimes (default 1)"<<endl;
cerr<<"[-m] number of random directions in powell (default 0)"<<endl;
cerr<<"[-o] the indexes to optimize(default all)"<<endl;
cerr<<"[-t] the optimizer(default powell)"<<endl;
cerr<<"[-r] the random seed (defaults to system clock)"<<endl;
cerr<<"[--sctype|-s] the scorer type (default BLEU)"<<endl;
cerr<<"[--scconfig|-c] configuration string passed to scorer"<<endl;
cerr<<"[--scfile|-S] comma separated list of scorer data files (default score.data)"<<endl;
cerr<<"[--ffile|-F] comma separated list of feature data files (default feature.data)"<<endl;
cerr<<"[--ifile|-i] the starting point data file (default init.opt)"<<endl;
cerr<<"[--sparse-weights|-p] required for merging sparse features"<<endl;
#ifdef WITH_THREADS
cerr << "[--threads|-T] use multiple threads (default 1)" << endl;
cerr<<"[--threads|-T] use multiple threads (default 1)"<<endl;
#endif
cerr << "[--shard-count] Split data into shards, optimize for each shard and average" << endl;
cerr << "[--shard-size] Shard size as proportion of data. If 0, use non-overlapping shards" << endl;
cerr << "[-v] verbose level" << endl;
cerr << "[--help|-h] print this message and exit" << endl;
cerr<<"[--shard-count] Split data into shards, optimize for each shard and average"<<endl;
cerr<<"[--shard-size] Shard size as proportion of data. If 0, use non-overlapping shards"<<endl;
cerr<<"[-v] verbose level"<<endl;
cerr<<"[--help|-h] print this message and exit"<<endl;
exit(ret);
}
static struct option long_options[] = {
{"pdim", 1, 0, 'd'},
{"ntry", 1, 0, 'n'},
{"nrandom", 1, 0, 'm'},
{"rseed", required_argument, 0, 'r'},
{"optimize", 1, 0, 'o'},
{"pro", required_argument, 0, 'p'},
{"positive",1,0,'P'},
{"type", 1, 0, 't'},
{"sctype", 1, 0, 's'},
{"scconfig", required_argument, 0, 'c'},
{"scfile", 1, 0, 'S'},
{"ffile", 1, 0, 'F'},
{"ifile", 1, 0, 'i'},
{"ntry",1,0,'n'},
{"nrandom",1,0,'m'},
{"rseed",required_argument,0,'r'},
{"optimize",1,0,'o'},
{"type",1,0,'t'},
{"sctype",1,0,'s'},
{"scconfig",required_argument,0,'c'},
{"scfile",1,0,'S'},
{"ffile",1,0,'F'},
{"ifile",1,0,'i'},
{"sparse-weights",required_argument,0,'p'},
#ifdef WITH_THREADS
{"threads", required_argument, 0, 'T'},
{"threads", required_argument,0,'T'},
#endif
{"shard-count", required_argument, 0, 'a'},
{"shard-size", required_argument, 0, 'b'},
{"verbose", 1, 0, 'v'},
{"help", no_argument, 0, 'h'},
{"verbose",1,0,'v'},
{"help",no_argument,0,'h'},
{0, 0, 0, 0}
};
@ -159,6 +159,7 @@ struct ProgramOption {
string feature_file;
string init_file;
string positive_string;
string sparse_weights_file;
size_t num_threads;
float shard_size;
size_t shard_count;
@ -177,6 +178,7 @@ struct ProgramOption {
feature_file(kDefaultFeatureFile),
init_file(kDefaultInitFile),
positive_string(kDefaultPositiveString),
sparse_weights_file(kDefaultSparseWeightsFile),
num_threads(1),
shard_size(0),
shard_count(0) { }
@ -222,6 +224,9 @@ void ParseCommandOptions(int argc, char** argv, ProgramOption* opt) {
case 'i':
opt->init_file = string(optarg);
break;
case 'p':
opt->sparse_weights_file=string(optarg);
break;
case 'v':
setverboselevel(strtol(optarg, NULL, 10));
break;
@ -286,6 +291,8 @@ int main(int argc, char **argv)
srandom(time(NULL));
}
if (option.sparse_weights_file.size()) ++option.pdim;
// read in starting points
string onefile;
while (!option.init_file.empty()) {
@ -349,7 +356,7 @@ int main(int argc, char **argv)
ScorerFactory::getScorer(option.scorer_type, option.scorer_config));
//load data
Data data(scorer.get());
Data data(scorer.get(), option.sparse_weights_file);
for (size_t i = 0; i < ScoreDataFiles.size(); i++) {
cerr<<"Loading Data from: "<< ScoreDataFiles.at(i) << " and " << FeatureDataFiles.at(i) << endl;
@ -419,12 +426,6 @@ int main(int argc, char **argv)
}
}
// treat sparse features just like regular features
if (data.hasSparseFeatures()) {
data.mergeSparseFeatures();
}
#ifdef WITH_THREADS
cerr << "Creating a pool of " << option.num_threads << " threads" << endl;
Moses::ThreadPool pool(option.num_threads);

View File

@ -41,6 +41,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#include "BleuScorer.h"
#include "FeatureDataIterator.h"
#include "ScoreDataIterator.h"
#include "BleuScorer.h"
using namespace std;
using namespace MosesTuning;

394
mira/Decoder.cpp Normal file
View File

@ -0,0 +1,394 @@
/***********************************************************************
Moses - factored phrase-based language decoder
Copyright (C) 2009 University of Edinburgh
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
***********************************************************************/
#include "Decoder.h"
#include "Manager.h"
#include "ChartManager.h"
#include "Sentence.h"
#include "InputType.h"
#include "TranslationSystem.h"
#include "Phrase.h"
#include "TrellisPathList.h"
#include "ChartTrellisPathList.h"
#include "ChartTrellisPath.h"
#include "IOWrapper.h"
using namespace std;
using namespace Moses;
namespace Mira {
/**
* Allocates a char* and copies string into it.
**/
static char* strToChar(const string& s) {
char* c = new char[s.size()+1];
strcpy(c,s.c_str());
return c;
}
MosesDecoder::MosesDecoder(const string& inifile, int debuglevel, int argc, vector<string> decoder_params)
: m_manager(NULL) {
static int BASE_ARGC = 8;
Parameter* params = new Parameter();
char ** mosesargv = new char*[BASE_ARGC + argc];
mosesargv[0] = strToChar("-f");
mosesargv[1] = strToChar(inifile);
mosesargv[2] = strToChar("-v");
stringstream dbgin;
dbgin << debuglevel;
mosesargv[3] = strToChar(dbgin.str());
mosesargv[4] = strToChar("-use-persistent-cache");
mosesargv[5] = strToChar("0");
mosesargv[6] = strToChar("-persistent-cache-size");
mosesargv[7] = strToChar("0");
for (int i = 0; i < argc; ++i) {
char *cstr = &(decoder_params[i])[0];
mosesargv[BASE_ARGC + i] = cstr;
}
if (!params->LoadParam(BASE_ARGC + argc,mosesargv)) {
cerr << "Loading static data failed, exit." << endl;
exit(1);
}
StaticData::LoadDataStatic(params, "mira");
for (int i = 0; i < BASE_ARGC; ++i) {
delete[] mosesargv[i];
}
delete[] mosesargv;
const StaticData &staticData = StaticData::Instance();
m_bleuScoreFeature = staticData.GetBleuScoreFeature();
}
void MosesDecoder::cleanup(bool chartDecoding) {
delete m_manager;
if (chartDecoding)
delete m_chartManager;
else
delete m_sentence;
}
vector< vector<const Word*> > MosesDecoder::getNBest(const std::string& source,
size_t sentenceid,
size_t nBestSize,
float bleuObjectiveWeight,
float bleuScoreWeight,
vector< ScoreComponentCollection>& featureValues,
vector< float>& bleuScores,
vector< float>& modelScores,
size_t numReturnedTranslations,
bool realBleu,
bool distinct,
bool avgRefLength,
size_t rank,
size_t epoch,
string filename)
{
StaticData &staticData = StaticData::InstanceNonConst();
bool chartDecoding = (staticData.GetSearchAlgorithm() == ChartDecoding);
initialize(staticData, source, sentenceid, bleuObjectiveWeight, bleuScoreWeight, avgRefLength, chartDecoding);
const TranslationSystem& system = staticData.GetTranslationSystem(TranslationSystem::DEFAULT);
// run the decoder
if (chartDecoding) {
return runChartDecoder(source, sentenceid, nBestSize, bleuObjectiveWeight, bleuScoreWeight,
featureValues, bleuScores, modelScores, numReturnedTranslations, realBleu, distinct, rank, epoch,
system);
}
else {
SearchAlgorithm search = staticData.GetSearchAlgorithm();
return runDecoder(source, sentenceid, nBestSize, bleuObjectiveWeight, bleuScoreWeight,
featureValues, bleuScores, modelScores, numReturnedTranslations, realBleu, distinct, rank, epoch,
search, system, filename);
}
}
vector< vector<const Word*> > MosesDecoder::runDecoder(const std::string& source,
size_t sentenceid,
size_t nBestSize,
float bleuObjectiveWeight,
float bleuScoreWeight,
vector< ScoreComponentCollection>& featureValues,
vector< float>& bleuScores,
vector< float>& modelScores,
size_t numReturnedTranslations,
bool realBleu,
bool distinct,
size_t rank,
size_t epoch,
SearchAlgorithm& search,
const TranslationSystem& system,
string filename) {
// run the decoder
m_manager = new Moses::Manager(0,*m_sentence, search, &system);
m_manager->ProcessSentence();
TrellisPathList nBestList;
m_manager->CalcNBest(nBestSize, nBestList, distinct);
// optionally print nbest to file (to extract scores and features.. currently just for sentence bleu scoring)
if (filename != "") {
ofstream out(filename.c_str());
if (!out) {
ostringstream msg;
msg << "Unable to open " << filename;
throw runtime_error(msg.str());
}
// TODO: handle sentence id (for now always 0)
//OutputNBest(out, nBestList, StaticData::Instance().GetOutputFactorOrder(),m_manager->GetTranslationSystem(), 0, false);
out.close();
}
// read off the feature values and bleu scores for each sentence in the nbest list
Moses::TrellisPathList::const_iterator iter;
for (iter = nBestList.begin() ; iter != nBestList.end() ; ++iter) {
const Moses::TrellisPath &path = **iter;
featureValues.push_back(path.GetScoreBreakdown());
float bleuScore, dynBleuScore, realBleuScore;
if (realBleu) realBleuScore = m_bleuScoreFeature->CalculateBleu(path.GetTargetPhrase());
else dynBleuScore = getBleuScore(featureValues.back());
bleuScore = realBleu ? realBleuScore : dynBleuScore;
bleuScores.push_back(bleuScore);
//std::cout << "Score breakdown: " << path.GetScoreBreakdown() << endl;
float scoreWithoutBleu = path.GetTotalScore() - (bleuObjectiveWeight * bleuScoreWeight * bleuScore);
modelScores.push_back(scoreWithoutBleu);
if (iter != nBestList.begin())
cerr << endl;
cerr << "Rank " << rank << ", epoch " << epoch << ", \"" << path.GetTargetPhrase() << "\", score: "
<< scoreWithoutBleu << ", Bleu: " << bleuScore << ", total: " << path.GetTotalScore();
if (m_bleuScoreFeature->Enabled() && realBleu)
cerr << " (d-bleu: " << dynBleuScore << ", r-bleu: " << realBleuScore << ") ";
// set bleu score to zero in the feature vector since we do not want to optimise its weight
setBleuScore(featureValues.back(), 0);
}
// prepare translations to return
vector< vector<const Word*> > translations;
for (size_t i=0; i < numReturnedTranslations && i < nBestList.GetSize(); ++i) {
const TrellisPath &path = nBestList.at(i);
Phrase phrase = path.GetTargetPhrase();
vector<const Word*> translation;
for (size_t pos = 0; pos < phrase.GetSize(); ++pos) {
const Word &word = phrase.GetWord(pos);
Word *newWord = new Word(word);
translation.push_back(newWord);
}
translations.push_back(translation);
}
return translations;
}
vector< vector<const Word*> > MosesDecoder::runChartDecoder(const std::string& source,
size_t sentenceid,
size_t nBestSize,
float bleuObjectiveWeight,
float bleuScoreWeight,
vector< ScoreComponentCollection>& featureValues,
vector< float>& bleuScores,
vector< float>& modelScores,
size_t numReturnedTranslations,
bool realBleu,
bool distinct,
size_t rank,
size_t epoch,
const TranslationSystem& system) {
// run the decoder
m_chartManager = new ChartManager(*m_sentence, &system);
m_chartManager->ProcessSentence();
ChartTrellisPathList nBestList;
m_chartManager->CalcNBest(nBestSize, nBestList, distinct);
// read off the feature values and bleu scores for each sentence in the nbest list
ChartTrellisPathList::const_iterator iter;
for (iter = nBestList.begin() ; iter != nBestList.end() ; ++iter) {
const Moses::ChartTrellisPath &path = **iter;
featureValues.push_back(path.GetScoreBreakdown());
float bleuScore, dynBleuScore, realBleuScore;
dynBleuScore = getBleuScore(featureValues.back());
realBleuScore = m_bleuScoreFeature->CalculateBleu(path.GetOutputPhrase());
bleuScore = realBleu ? realBleuScore : dynBleuScore;
bleuScores.push_back(bleuScore);
//std::cout << "Score breakdown: " << path.GetScoreBreakdown() << endl;
float scoreWithoutBleu = path.GetTotalScore() - (bleuObjectiveWeight * bleuScoreWeight * bleuScore);
modelScores.push_back(scoreWithoutBleu);
if (iter != nBestList.begin())
cerr << endl;
cerr << "Rank " << rank << ", epoch " << epoch << ", \"" << path.GetOutputPhrase() << "\", score: "
<< scoreWithoutBleu << ", Bleu: " << bleuScore << ", total: " << path.GetTotalScore();
if (m_bleuScoreFeature->Enabled() && realBleu)
cerr << " (d-bleu: " << dynBleuScore << ", r-bleu: " << realBleuScore << ") ";
// set bleu score to zero in the feature vector since we do not want to optimise its weight
setBleuScore(featureValues.back(), 0);
}
// prepare translations to return
vector< vector<const Word*> > translations;
for (iter = nBestList.begin() ; iter != nBestList.end() ; ++iter) {
const ChartTrellisPath &path = **iter;
Phrase phrase = path.GetOutputPhrase();
vector<const Word*> translation;
for (size_t pos = 0; pos < phrase.GetSize(); ++pos) {
const Word &word = phrase.GetWord(pos);
Word *newWord = new Word(word);
translation.push_back(newWord);
}
translations.push_back(translation);
}
return translations;
}
void MosesDecoder::outputNBestList(const std::string& source, size_t sentenceid,
size_t nBestSize, float bleuObjectiveWeight, float bleuScoreWeight,
bool distinctNbest, bool avgRefLength, string filename, ofstream& streamOut) {
StaticData &staticData = StaticData::InstanceNonConst();
bool chartDecoding = (staticData.GetSearchAlgorithm() == ChartDecoding);
initialize(staticData, source, sentenceid, bleuObjectiveWeight, bleuScoreWeight, avgRefLength, chartDecoding);
const TranslationSystem& system = staticData.GetTranslationSystem(TranslationSystem::DEFAULT);
if (chartDecoding) {
m_chartManager = new ChartManager(*m_sentence, &system);
m_chartManager->ProcessSentence();
ChartTrellisPathList nBestList;
m_chartManager->CalcNBest(nBestSize, nBestList, distinctNbest);
cerr << "generate nbest list " << filename << endl;
cerr << "not implemented.." << endl;
exit(1);
if (filename != "") {
ofstream out(filename.c_str());
if (!out) {
ostringstream msg;
msg << "Unable to open " << filename;
throw runtime_error(msg.str());
}
// TODO: handle sentence id (for now always 0)
// OutputNBestList(const ChartTrellisPathList &nBestList, const ChartHypothesis *bestHypo, const TranslationSystem* system, long translationId, false)
// OutputNBest(out, nBestList, StaticData::Instance().GetOutputFactorOrder(),m_manager->GetTranslationSystem(), 0, false);
out.close();
}
else {
// OutputNBest(streamOut, nBestList, StaticData::Instance().GetOutputFactorOrder(),m_manager->GetTranslationSystem(), sentenceid, false);
}
}
else {
// run the decoder
m_manager = new Moses::Manager(0,*m_sentence, staticData.GetSearchAlgorithm(), &system);
m_manager->ProcessSentence();
TrellisPathList nBestList;
m_manager->CalcNBest(nBestSize, nBestList, distinctNbest);
if (filename != "") {
ofstream out(filename.c_str());
if (!out) {
ostringstream msg;
msg << "Unable to open " << filename;
throw runtime_error(msg.str());
}
// TODO: handle sentence id (for now always 0)
//OutputNBest(out, nBestList, StaticData::Instance().GetOutputFactorOrder(),m_manager->GetTranslationSystem(), 0, false);
out.close();
}
else {
//OutputNBest(streamOut, nBestList, StaticData::Instance().GetOutputFactorOrder(),m_manager->GetTranslationSystem(), sentenceid, false);
streamOut.flush();
}
}
}
void MosesDecoder::initialize(StaticData& staticData, const std::string& source, size_t sentenceid,
float bleuObjectiveWeight, float bleuScoreWeight, bool avgRefLength, bool chartDecoding) {
m_sentence = new Sentence();
stringstream in(source + "\n");
const std::vector<FactorType> &inputFactorOrder = staticData.GetInputFactorOrder();
m_sentence->Read(in,inputFactorOrder);
// set weight of BleuScoreFeature
//cerr << "Reload Bleu feature weight: " << bleuObjectiveWeight*bleuScoreWeight << " (" << bleuObjectiveWeight << "*" << bleuScoreWeight << ")" << endl;
staticData.ReLoadBleuScoreFeatureParameter(bleuObjectiveWeight*bleuScoreWeight);
m_bleuScoreFeature->SetCurrSourceLength((*m_sentence).GetSize());
if (chartDecoding)
m_bleuScoreFeature->SetCurrNormSourceLength((*m_sentence).GetSize()-2);
else
m_bleuScoreFeature->SetCurrNormSourceLength((*m_sentence).GetSize());
if (avgRefLength)
m_bleuScoreFeature->SetCurrAvgRefLength(sentenceid);
else
m_bleuScoreFeature->SetCurrShortestRefLength(sentenceid);
m_bleuScoreFeature->SetCurrReferenceNgrams(sentenceid);
}
float MosesDecoder::getBleuScore(const ScoreComponentCollection& scores) {
return scores.GetScoreForProducer(m_bleuScoreFeature);
}
void MosesDecoder::setBleuScore(ScoreComponentCollection& scores, float bleu) {
scores.Assign(m_bleuScoreFeature, bleu);
}
ScoreComponentCollection MosesDecoder::getWeights() {
return StaticData::Instance().GetAllWeights();
}
void MosesDecoder::setWeights(const ScoreComponentCollection& weights) {
StaticData::InstanceNonConst().SetAllWeights(weights);
}
void MosesDecoder::updateHistory(const vector<const Word*>& words) {
m_bleuScoreFeature->UpdateHistory(words);
}
void MosesDecoder::updateHistory(const vector< vector< const Word*> >& words, vector<size_t>& sourceLengths, vector<size_t>& ref_ids, size_t rank, size_t epoch) {
m_bleuScoreFeature->UpdateHistory(words, sourceLengths, ref_ids, rank, epoch);
}
void MosesDecoder::printBleuFeatureHistory(std::ostream& out) {
m_bleuScoreFeature->PrintHistory(out);
}
size_t MosesDecoder::getClosestReferenceLength(size_t ref_id, int hypoLength) {
return m_bleuScoreFeature->GetClosestRefLength(ref_id, hypoLength);
}
size_t MosesDecoder::getShortestReferenceIndex(size_t ref_id) {
return m_bleuScoreFeature->GetShortestRefIndex(ref_id);
}
void MosesDecoder::setBleuParameters(bool disable, bool sentenceBleu, bool scaleByInputLength, bool scaleByAvgInputLength,
bool scaleByInverseLength, bool scaleByAvgInverseLength,
float scaleByX, float historySmoothing, size_t scheme, bool simpleHistoryBleu) {
m_bleuScoreFeature->SetBleuParameters(disable, sentenceBleu, scaleByInputLength, scaleByAvgInputLength,
scaleByInverseLength, scaleByAvgInverseLength,
scaleByX, historySmoothing, scheme, simpleHistoryBleu);
}
}

140
mira/Decoder.h Normal file
View File

@ -0,0 +1,140 @@
/***********************************************************************
Moses - factored phrase-based language decoder
Copyright (C) 2010 University of Edinburgh
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
***********************************************************************/
#ifndef _MIRA_DECODER_H_
#define _MIRA_DECODER_H_
#include <iostream>
#include <cstring>
#include <sstream>
#include "BleuScoreFeature.h"
#include "ChartTrellisPathList.h"
#include "Hypothesis.h"
#include "Parameter.h"
#include "SearchNormal.h"
#include "Sentence.h"
#include "StaticData.h"
//
// Wrapper functions and objects for the decoder.
//
namespace Mira {
/**
* Wraps moses decoder.
**/
class MosesDecoder {
public:
/**
* Initialise moses (including StaticData) using the given ini file and debuglevel, passing through any
* other command line arguments.
**/
MosesDecoder(const std::string& inifile, int debuglevel, int argc, std::vector<std::string> decoder_params);
//returns the best sentence
std::vector< std::vector<const Moses::Word*> > getNBest(const std::string& source,
size_t sentenceid,
size_t nbestSize,
float bleuObjectiveweight, //weight of bleu in objective
float bleuScoreWeight, //weight of bleu in score
std::vector< Moses::ScoreComponentCollection>& featureValues,
std::vector< float>& bleuScores,
std::vector< float>& modelScores,
size_t numReturnedTranslations,
bool realBleu,
bool distinct,
bool avgRefLength,
size_t rank,
size_t epoch,
std::string filename);
std::vector< std::vector<const Moses::Word*> > runDecoder(const std::string& source,
size_t sentenceid,
size_t nbestSize,
float bleuObjectiveweight, //weight of bleu in objective
float bleuScoreWeight, //weight of bleu in score
std::vector< Moses::ScoreComponentCollection>& featureValues,
std::vector< float>& bleuScores,
std::vector< float>& modelScores,
size_t numReturnedTranslations,
bool realBleu,
bool distinct,
size_t rank,
size_t epoch,
Moses::SearchAlgorithm& seach,
const Moses::TranslationSystem& system,
std::string filename);
std::vector< std::vector<const Moses::Word*> > runChartDecoder(const std::string& source,
size_t sentenceid,
size_t nbestSize,
float bleuObjectiveweight, //weight of bleu in objective
float bleuScoreWeight, //weight of bleu in score
std::vector< Moses::ScoreComponentCollection>& featureValues,
std::vector< float>& bleuScores,
std::vector< float>& modelScores,
size_t numReturnedTranslations,
bool realBleu,
bool distinct,
size_t rank,
size_t epoch,
const Moses::TranslationSystem& system);
void outputNBestList(const std::string& source,
size_t sentenceid,
size_t nBestSize,
float bleuObjectiveWeight,
float bleuScoreWeight,
bool distinctNbest,
bool avgRefLength,
std::string filename,
std::ofstream& streamOut);
void initialize(Moses::StaticData& staticData, const std::string& source, size_t sentenceid,
float bleuObjectiveWeight, float bleuScoreWeight, bool avgRefLength, bool chartDecoding);
void updateHistory(const std::vector<const Moses::Word*>& words);
void updateHistory(const std::vector< std::vector< const Moses::Word*> >& words, std::vector<size_t>& sourceLengths, std::vector<size_t>& ref_ids, size_t rank, size_t epoch);
void printBleuFeatureHistory(std::ostream& out);
void printReferenceLength(const std::vector<size_t>& ref_ids);
size_t getReferenceLength(size_t ref_id);
size_t getClosestReferenceLength(size_t ref_id, int hypoLength);
size_t getShortestReferenceIndex(size_t ref_id);
void setBleuParameters(bool disable, bool sentenceBleu, bool scaleByInputLength, bool scaleByAvgInputLength,
bool scaleByInverseLength, bool scaleByAvgInverseLength,
float scaleByX, float historySmoothing, size_t scheme, bool simpleHistoryBleu);
void setAvgInputLength (float l) { m_bleuScoreFeature->SetAvgInputLength(l); }
Moses::ScoreComponentCollection getWeights();
void setWeights(const Moses::ScoreComponentCollection& weights);
void cleanup(bool chartDecoding);
float getSourceLengthHistory() { return m_bleuScoreFeature->GetSourceLengthHistory(); }
float getTargetLengthHistory() { return m_bleuScoreFeature->GetTargetLengthHistory(); }
float getAverageInputLength() { return m_bleuScoreFeature->GetAverageInputLength(); }
private:
float getBleuScore(const Moses::ScoreComponentCollection& scores);
void setBleuScore(Moses::ScoreComponentCollection& scores, float bleu);
Moses::Manager *m_manager;
Moses::ChartManager *m_chartManager;
Moses::Sentence *m_sentence;
Moses::BleuScoreFeature *m_bleuScoreFeature;
};
} //namespace
#endif

188
mira/Hildreth.cpp Normal file
View File

@ -0,0 +1,188 @@
#include "Hildreth.h"
using namespace Moses;
using namespace std;
namespace Mira {
vector<float> Hildreth::optimise (const vector<ScoreComponentCollection>& a, const vector<float>& b) {
size_t i;
int max_iter = 10000;
float eps = 0.00000001;
float zero = 0.000000000001;
vector<float> alpha ( b.size() );
vector<float> F ( b.size() );
vector<float> kkt ( b.size() );
float max_kkt = -1e100;
size_t K = b.size();
float A[K][K];
bool is_computed[K];
for ( i = 0; i < K; i++ )
{
A[i][i] = a[i].InnerProduct(a[i]);
is_computed[i] = false;
}
int max_kkt_i = -1;
for ( i = 0; i < b.size(); i++ )
{
F[i] = b[i];
kkt[i] = F[i];
if ( kkt[i] > max_kkt )
{
max_kkt = kkt[i];
max_kkt_i = i;
}
}
int iter = 0;
float diff_alpha;
float try_alpha;
float add_alpha;
while ( max_kkt >= eps && iter < max_iter )
{
diff_alpha = A[max_kkt_i][max_kkt_i] <= zero ? 0.0 : F[max_kkt_i]/A[max_kkt_i][max_kkt_i];
try_alpha = alpha[max_kkt_i] + diff_alpha;
add_alpha = 0.0;
if ( try_alpha < 0.0 )
add_alpha = -1.0 * alpha[max_kkt_i];
else
add_alpha = diff_alpha;
alpha[max_kkt_i] = alpha[max_kkt_i] + add_alpha;
if ( !is_computed[max_kkt_i] )
{
for ( i = 0; i < K; i++ )
{
A[i][max_kkt_i] = a[i].InnerProduct(a[max_kkt_i] ); // for version 1
//A[i][max_kkt_i] = 0; // for version 1
is_computed[max_kkt_i] = true;
}
}
for ( i = 0; i < F.size(); i++ )
{
F[i] -= add_alpha * A[i][max_kkt_i];
kkt[i] = F[i];
if ( alpha[i] > zero )
kkt[i] = abs ( F[i] );
}
max_kkt = -1e100;
max_kkt_i = -1;
for ( i = 0; i < F.size(); i++ )
if ( kkt[i] > max_kkt )
{
max_kkt = kkt[i];
max_kkt_i = i;
}
iter++;
}
return alpha;
}
vector<float> Hildreth::optimise (const vector<ScoreComponentCollection>& a, const vector<float>& b, float C) {
size_t i;
int max_iter = 10000;
float eps = 0.00000001;
float zero = 0.000000000001;
vector<float> alpha ( b.size() );
vector<float> F ( b.size() );
vector<float> kkt ( b.size() );
float max_kkt = -1e100;
size_t K = b.size();
float A[K][K];
bool is_computed[K];
for ( i = 0; i < K; i++ )
{
A[i][i] = a[i].InnerProduct(a[i]);
is_computed[i] = false;
}
int max_kkt_i = -1;
for ( i = 0; i < b.size(); i++ )
{
F[i] = b[i];
kkt[i] = F[i];
if ( kkt[i] > max_kkt )
{
max_kkt = kkt[i];
max_kkt_i = i;
}
}
int iter = 0;
float diff_alpha;
float try_alpha;
float add_alpha;
while ( max_kkt >= eps && iter < max_iter )
{
diff_alpha = A[max_kkt_i][max_kkt_i] <= zero ? 0.0 : F[max_kkt_i]/A[max_kkt_i][max_kkt_i];
try_alpha = alpha[max_kkt_i] + diff_alpha;
add_alpha = 0.0;
if ( try_alpha < 0.0 )
add_alpha = -1.0 * alpha[max_kkt_i];
else if (try_alpha > C)
add_alpha = C - alpha[max_kkt_i];
else
add_alpha = diff_alpha;
alpha[max_kkt_i] = alpha[max_kkt_i] + add_alpha;
if ( !is_computed[max_kkt_i] )
{
for ( i = 0; i < K; i++ )
{
A[i][max_kkt_i] = a[i].InnerProduct(a[max_kkt_i] ); // for version 1
//A[i][max_kkt_i] = 0; // for version 1
is_computed[max_kkt_i] = true;
}
}
for ( i = 0; i < F.size(); i++ )
{
F[i] -= add_alpha * A[i][max_kkt_i];
kkt[i] = F[i];
if (alpha[i] > C - zero)
kkt[i]=-kkt[i];
else if (alpha[i] > zero)
kkt[i] = abs(F[i]);
}
max_kkt = -1e100;
max_kkt_i = -1;
for ( i = 0; i < F.size(); i++ )
if ( kkt[i] > max_kkt )
{
max_kkt = kkt[i];
max_kkt_i = i;
}
iter++;
}
return alpha;
}
}

11
mira/Hildreth.h Normal file
View File

@ -0,0 +1,11 @@
#include "FeatureVector.h"
#include "ScoreComponentCollection.h"
namespace Mira {
class Hildreth {
public :
static std::vector<float> optimise (const std::vector<Moses::ScoreComponentCollection>& a, const std::vector<float>& b );
static std::vector<float> optimise (const std::vector<Moses::ScoreComponentCollection>& a, const std::vector<float>& b, float C);
};
}

784
mira/HildrethTest.cpp Normal file
View File

@ -0,0 +1,784 @@
/***********************************************************************
Moses - factored phrase-based language decoder
Copyright (C) 2010 University of Edinburgh
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
***********************************************************************/
#include <cstdio>
#include <cstdlib>
#include <string>
#include <boost/test/unit_test.hpp>
#include "Hildreth.h"
#include "Optimiser.h"
#include "ScoreComponentCollection.h"
using namespace std;
using namespace Moses;
using namespace Mira;
namespace MosesTest
{
class MockSingleFeature : public StatelessFeatureFunction {
public:
MockSingleFeature(): StatelessFeatureFunction("MockSingle",1) {}
std::string GetScoreProducerWeightShortName(unsigned) const {return "sf";}
};
class MockMultiFeature : public StatelessFeatureFunction {
public:
MockMultiFeature(): StatelessFeatureFunction("MockMulti",5) {}
std::string GetScoreProducerWeightShortName(unsigned) const {return "mf";}
};
class MockSparseFeature : public StatelessFeatureFunction {
public:
MockSparseFeature(): StatelessFeatureFunction("MockSparse", ScoreProducer::unlimited) {}
std::string GetScoreProducerWeightShortName(unsigned) const {return "sf";}
};
struct MockProducers {
MockProducers() {}
MockSingleFeature single;
MockMultiFeature multi;
MockSparseFeature sparse;
};
BOOST_AUTO_TEST_SUITE(hildreth_test)
BOOST_FIXTURE_TEST_CASE(test_hildreth_1, MockProducers)
{
// Feasible example with 2 constraints
cerr << "\n>>>>>Hildreth test, without slack and with 0.01 slack" << endl << endl;
vector< ScoreComponentCollection> featureValueDiffs;
vector< float> lossMinusModelScoreDiff;
// initial weights
float w[] = { 1, 1, 1, 1, 0 };
vector<float> vec(w,w+5);
ScoreComponentCollection weights;
weights.PlusEquals(&multi, vec);
// feature values (second is oracle)
//float arr1[] = {0, -5, -27.0908, -1.83258, 0 };
//float arr2[] = {0, -5, -29.158, -1.83258, 0 };
//float arr3[] = {0, -5, -27.0908, -1.83258, 0 };
// feature value differences (to oracle)
ScoreComponentCollection s1, s2, s3;
float arr1[] = { 0, 0, -2.0672, 0, 0 };
float arr2[] = { 0, 0, 0, 0, 0 };
float arr3[] = { 0, 0, -2.0672, 0, 0 };
float loss1 = 2.34085;
float loss2 = 0;
float loss3 = 2.34085;
vector<float> vec1(arr1,arr1+5);
vector<float> vec2(arr2,arr2+5);
vector<float> vec3(arr3,arr3+5);
s1.PlusEquals(&multi,vec1);
s2.PlusEquals(&multi,vec2);
s3.PlusEquals(&multi,vec3);
featureValueDiffs.push_back(s1);
featureValueDiffs.push_back(s2);
featureValueDiffs.push_back(s3);
cerr << "feature value diff: " << featureValueDiffs[0] << endl;
cerr << "feature value diff: " << featureValueDiffs[1] << endl;
cerr << "feature value diff: " << featureValueDiffs[2] << endl << endl;
float oldModelScoreDiff1 = featureValueDiffs[0].InnerProduct(weights);
float oldModelScoreDiff2 = featureValueDiffs[1].InnerProduct(weights);
float oldModelScoreDiff3 = featureValueDiffs[2].InnerProduct(weights);
cerr << "model score diff: " << oldModelScoreDiff1 << ", loss: " << loss1 << endl;
cerr << "model score diff: " << oldModelScoreDiff2 << ", loss: " << loss2 << endl;
cerr << "model score diff: " << oldModelScoreDiff3 << ", loss: " << loss3 << endl << endl;
lossMinusModelScoreDiff.push_back(loss1 - oldModelScoreDiff1);
lossMinusModelScoreDiff.push_back(loss2 - oldModelScoreDiff2);
lossMinusModelScoreDiff.push_back(loss3 - oldModelScoreDiff3);
vector< float> alphas1 = Hildreth::optimise(featureValueDiffs, lossMinusModelScoreDiff);
vector< float> alphas2 = Hildreth::optimise(featureValueDiffs, lossMinusModelScoreDiff, 0.01);
cerr << "\nalphas without slack:" << endl;
for (size_t i = 0; i < alphas1.size(); ++i) {
cerr << "alpha " << i << ": " << alphas1[i] << endl;
}
cerr << endl;
cerr << "partial updates:" << endl;
vector< ScoreComponentCollection> featureValueDiffs1(featureValueDiffs);
FVector totalUpdate1 = ScoreComponentCollection::CreateFVector();
for (size_t k = 0; k < featureValueDiffs1.size(); ++k) {
featureValueDiffs1[k].MultiplyEquals(alphas1[k]);
cerr << k << ": " << featureValueDiffs1[k].GetScoresVector() << endl;
FVector update = featureValueDiffs1[k].GetScoresVector();
totalUpdate1 += update;
}
cerr << endl;
cerr << "total update: " << totalUpdate1 << endl << endl;
ScoreComponentCollection weightsUpdate1(weights);
weightsUpdate1.PlusEquals(totalUpdate1);
cerr << "new weights: " << weightsUpdate1 << endl << endl;
float newModelScoreDiff1 = featureValueDiffs[0].InnerProduct(weightsUpdate1);
float newModelScoreDiff2 = featureValueDiffs[1].InnerProduct(weightsUpdate1);
float newModelScoreDiff3 = featureValueDiffs[2].InnerProduct(weightsUpdate1);
cerr << "new model score diff: " << newModelScoreDiff1 << ", loss: " << loss1 << endl;
cerr << "new model score diff: " << newModelScoreDiff2 << ", loss: " << loss2 << endl;
cerr << "new model score diff: " << newModelScoreDiff3 << ", loss: " << loss3 << endl;
cerr << "\n\nalphas with slack 0.01:" << endl;
for (size_t i = 0; i < alphas2.size(); ++i) {
cerr << "alpha " << i << ": " << alphas2[i] << endl;
}
cerr << endl;
cerr << "partial updates:" << endl;
vector< ScoreComponentCollection> featureValueDiffs2(featureValueDiffs);
FVector totalUpdate2 = ScoreComponentCollection::CreateFVector();
for (size_t k = 0; k < featureValueDiffs2.size(); ++k) {
featureValueDiffs2[k].MultiplyEquals(alphas2[k]);
cerr << k << ": " << featureValueDiffs2[k].GetScoresVector() << endl;
FVector update = featureValueDiffs2[k].GetScoresVector();
totalUpdate2 += update;
}
cerr << endl;
cerr << "total update: " << totalUpdate2 << endl << endl;
ScoreComponentCollection weightsUpdate2(weights);
weightsUpdate2.PlusEquals(totalUpdate2);
cerr << "new weights: " << weightsUpdate2 << endl << endl;
float newModelScoreDiff4 = featureValueDiffs[0].InnerProduct(weightsUpdate2);
float newModelScoreDiff5 = featureValueDiffs[1].InnerProduct(weightsUpdate2);
float newModelScoreDiff6 = featureValueDiffs[2].InnerProduct(weightsUpdate2);
cerr << "new model score diff: " << newModelScoreDiff4 << ", loss: " << loss1 << endl;
cerr << "new model score diff: " << newModelScoreDiff5 << ", loss: " << loss2 << endl;
cerr << "new model score diff: " << newModelScoreDiff6 << ", loss: " << loss3 << endl;
}
BOOST_FIXTURE_TEST_CASE(test_hildreth_3, MockProducers)
{
// Unfeasible example with 21 constraints
cerr << "\n>>>>>Hildreth test, without slack and with 0.01 slack" << endl << endl;
vector< ScoreComponentCollection> featureValueDiffs;
vector< float> lossMinusModelScoreDiff;
// initial weights
float w[] = { 1, 1, 0.638672, 1, 0 };
vector<float> vec(w,w+5);
ScoreComponentCollection weights;
weights.PlusEquals(&multi, vec);
int numberOfConstraints = 21;
// feature value differences (to oracle)
// NOTE: these feature values are only approximations
ScoreComponentCollection s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, s16, s17, s18, s19, s20, s21;
float arr1[] = { 0, 0, -2.0672, 0, 0 };
float arr2[] = { 0, 0, 0, 0, 0 };
float arr3[] = { 0, 0, -2.08436, 1.38629, 0 };
float arr4[] = { 0, 0, -0.0171661, 1.38629, 0 };
float arr5[] = { 0, 0, 4.4283, 0, 0 };
float arr6[] = { 0, 0, 3.84829, 1.38629, 0 };
float arr7[] = { 0, 0, 6.83689, 0, 0 };
float arr8[] = { 0, 0, 0, 0, 0 };
float arr9[] = { 0, 0, -2.0672, 0, 0 };
float arr10[] = { 0, 0, -0.0171661, 1.38629, 0 };
float arr11[] = { 0, 0, -2.08436, 1.38629, 0 };
float arr12[] = { 0, 0, 4.4283, 0, 0 };
float arr13[] = { 3, 0, 2.41089, 0, 0 };
float arr14[] = { 3, 0, 2.32709, 0, 0 };
float arr15[] = { 0, 0, -2.0672, 0, 0 };
float arr16[] = { 0, 0, -2.08436, 1.38629, 0 };
float arr17[] = { 0, 0, 4.4283, 0, 0 };
float arr18[] = { 0, 0, 3.84829, 1.38629, 0 };
float arr19[] = { 0, 0, -0.0171661, 1.38629, 0 };
float arr20[] = { 0, 0, 0, 0, 0 };
float arr21[] = { 0, 0, 6.83689, 0, 0 };
vector<float> losses;
losses.push_back(2.73485);
losses.push_back(0);
losses.push_back(3.64118);
losses.push_back(1.47347);
losses.push_back(3.64118);
losses.push_back(4.16278);
losses.push_back(3.13952);
losses.push_back(0);
losses.push_back(2.73485);
losses.push_back(1.47347);
losses.push_back(3.64118);
losses.push_back(3.64118);
losses.push_back(2.51662);
losses.push_back(2.73485);
losses.push_back(2.73485);
losses.push_back(3.64118);
losses.push_back(3.64118);
losses.push_back(4.16278);
losses.push_back(1.47347);
losses.push_back(0);
losses.push_back(3.13952);
vector<float> vec1(arr1,arr1+5);
vector<float> vec2(arr2,arr2+5);
vector<float> vec3(arr3,arr3+5);
vector<float> vec4(arr4,arr4+5);
vector<float> vec5(arr5,arr5+5);
vector<float> vec6(arr6,arr6+5);
vector<float> vec7(arr7,arr7+5);
vector<float> vec8(arr8,arr8+5);
vector<float> vec9(arr9,arr9+5);
vector<float> vec10(arr10,arr10+5);
vector<float> vec11(arr11,arr11+5);
vector<float> vec12(arr12,arr12+5);
vector<float> vec13(arr13,arr13+5);
vector<float> vec14(arr14,arr14+5);
vector<float> vec15(arr15,arr15+5);
vector<float> vec16(arr16,arr16+5);
vector<float> vec17(arr17,arr17+5);
vector<float> vec18(arr18,arr18+5);
vector<float> vec19(arr19,arr19+5);
vector<float> vec20(arr20,arr20+5);
vector<float> vec21(arr21,arr21+5);
s1.PlusEquals(&multi,vec1);
s2.PlusEquals(&multi,vec2);
s3.PlusEquals(&multi,vec3);
s4.PlusEquals(&multi,vec4);
s5.PlusEquals(&multi,vec5);
s6.PlusEquals(&multi,vec6);
s7.PlusEquals(&multi,vec7);
s8.PlusEquals(&multi,vec8);
s9.PlusEquals(&multi,vec9);
s10.PlusEquals(&multi,vec10);
s11.PlusEquals(&multi,vec11);
s12.PlusEquals(&multi,vec12);
s13.PlusEquals(&multi,vec13);
s14.PlusEquals(&multi,vec14);
s15.PlusEquals(&multi,vec15);
s16.PlusEquals(&multi,vec16);
s17.PlusEquals(&multi,vec17);
s18.PlusEquals(&multi,vec18);
s19.PlusEquals(&multi,vec19);
s20.PlusEquals(&multi,vec20);
s21.PlusEquals(&multi,vec21);
featureValueDiffs.push_back(s1);
featureValueDiffs.push_back(s2);
featureValueDiffs.push_back(s3);
featureValueDiffs.push_back(s4);
featureValueDiffs.push_back(s5);
featureValueDiffs.push_back(s6);
featureValueDiffs.push_back(s7);
featureValueDiffs.push_back(s8);
featureValueDiffs.push_back(s9);
featureValueDiffs.push_back(s10);
featureValueDiffs.push_back(s11);
featureValueDiffs.push_back(s12);
featureValueDiffs.push_back(s13);
featureValueDiffs.push_back(s14);
featureValueDiffs.push_back(s15);
featureValueDiffs.push_back(s16);
featureValueDiffs.push_back(s17);
featureValueDiffs.push_back(s18);
featureValueDiffs.push_back(s19);
featureValueDiffs.push_back(s20);
featureValueDiffs.push_back(s21);
vector<float> oldModelScoreDiff;
for (int i = 0; i < numberOfConstraints; ++i) {
oldModelScoreDiff.push_back(featureValueDiffs[i].InnerProduct(weights));
}
for (int i = 0; i < numberOfConstraints; ++i) {
cerr << "old model score diff: " << oldModelScoreDiff[i] << ", loss: " << losses[i] << "\t" << (oldModelScoreDiff[i] >= losses[i] ? 1 : 0) << endl;
}
for (int i = 0; i < numberOfConstraints; ++i) {
lossMinusModelScoreDiff.push_back(losses[i] - oldModelScoreDiff[i]);
}
for (int i = 0; i < numberOfConstraints; ++i) {
cerr << "A: " << featureValueDiffs[i] << ", b: " << lossMinusModelScoreDiff[i] << endl;
}
vector< float> alphas1 = Hildreth::optimise(featureValueDiffs, lossMinusModelScoreDiff);
vector< float> alphas2 = Hildreth::optimise(featureValueDiffs, lossMinusModelScoreDiff, 0.01);
cerr << "\nalphas without slack:" << endl;
for (size_t i = 0; i < alphas1.size(); ++i) {
cerr << "alpha " << i << ": " << alphas1[i] << endl;
}
cerr << endl;
cerr << "partial updates:" << endl;
vector< ScoreComponentCollection> featureValueDiffs1(featureValueDiffs);
FVector totalUpdate1 = ScoreComponentCollection::CreateFVector();
for (size_t k = 0; k < featureValueDiffs1.size(); ++k) {
featureValueDiffs1[k].MultiplyEquals(alphas1[k]);
cerr << k << ": " << featureValueDiffs1[k].GetScoresVector() << endl;
FVector update = featureValueDiffs1[k].GetScoresVector();
totalUpdate1 += update;
}
cerr << endl;
cerr << "total update: " << totalUpdate1 << endl << endl;
ScoreComponentCollection weightsUpdate1(weights);
weightsUpdate1.PlusEquals(totalUpdate1);
cerr << "old weights: " << weights << endl;
cerr << "new weights: " << weightsUpdate1 << endl << endl;
vector<float> newModelScoreDiff;
for (int i = 0; i < numberOfConstraints; ++i) {
newModelScoreDiff.push_back(featureValueDiffs[i].InnerProduct(weightsUpdate1));
}
for (int i = 0; i < numberOfConstraints; ++i) {
cerr << "new model score diff: " << newModelScoreDiff[i] << ", loss: " << losses[i] << "\t" << (newModelScoreDiff[i] >= losses[i] ? 1 : 0) << endl;
}
cerr << "\n\nalphas with slack 0.01:" << endl;
for (size_t i = 0; i < alphas2.size(); ++i) {
cerr << "alpha " << i << ": " << alphas2[i] << endl;
}
cerr << endl;
cerr << "partial updates:" << endl;
vector< ScoreComponentCollection> featureValueDiffs2(featureValueDiffs);
FVector totalUpdate2 = ScoreComponentCollection::CreateFVector();
for (size_t k = 0; k < featureValueDiffs2.size(); ++k) {
featureValueDiffs2[k].MultiplyEquals(alphas2[k]);
cerr << k << ": " << featureValueDiffs2[k].GetScoresVector() << endl;
FVector update = featureValueDiffs2[k].GetScoresVector();
totalUpdate2 += update;
}
cerr << endl;
cerr << "total update: " << totalUpdate2 << endl << endl;
ScoreComponentCollection weightsUpdate2(weights);
weightsUpdate2.PlusEquals(totalUpdate2);
cerr << "old weights: " << weights << endl;
cerr << "new weights: " << weightsUpdate2 << endl << endl;
newModelScoreDiff.clear();
for (int i = 0; i < numberOfConstraints; ++i) {
newModelScoreDiff.push_back(featureValueDiffs[i].InnerProduct(weightsUpdate2));
}
for (int i = 0; i < numberOfConstraints; ++i) {
cerr << "new model score diff: " << newModelScoreDiff[i] << ", loss: " << losses[i] << endl;
}
}
BOOST_FIXTURE_TEST_CASE(test_hildreth_4, MockProducers)
{
// Feasible example with 8 constraints
cerr << "\n>>>>>Hildreth test, without slack and with 0.01 slack" << endl << endl;
vector< ScoreComponentCollection> featureValueDiffs;
vector< float> lossMinusModelScoreDiff;
// initial weights
float w[] = { 1, 1, 0.638672, 1, 0 };
vector<float> vec(w,w+5);
ScoreComponentCollection weights;
weights.PlusEquals(&multi, vec);
int numberOfConstraints = 8;
// feature value differences (to oracle)
// NOTE: these feature values are only approximations
ScoreComponentCollection s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, s16, s17, s18, s19, s20, s21;
float arr1[] = { 0, 0, -2.0672, 0, 0 };
float arr2[] = { 0, 0, 0, 0, 0 };
float arr3[] = { 0, 0, -2.08436, 1.38629, 0 };
float arr4[] = { 0, 0, -0.0171661, 1.38629, 0 };
// float arr5[] = { 0, 0, 4.4283, 0, 0 };
// float arr6[] = { 0, 0, 3.84829, 1.38629, 0 };
// float arr7[] = { 0, 0, 6.83689, 0, 0 };
float arr8[] = { 0, 0, 0, 0, 0 };
float arr9[] = { 0, 0, -2.0672, 0, 0 };
// float arr10[] = { 0, 0, -0.0171661, 1.38629, 0 };
// float arr11[] = { 0, 0, -2.08436, 1.38629, 0 };
// float arr12[] = { 0, 0, 4.4283, 0, 0 };
// float arr13[] = { 3, 0, 2.41089, 0, 0 };
// float arr14[] = { 3, 0, 2.32709, 0, 0 };
float arr15[] = { 0, 0, -2.0672, 0, 0 };
float arr16[] = { 0, 0, -2.08436, 1.38629, 0 };
// float arr17[] = { 0, 0, 4.4283, 0, 0 };
// float arr18[] = { 0, 0, 3.84829, 1.38629, 0 };
// float arr19[] = { 0, 0, -0.0171661, 1.38629, 0 };
// float arr20[] = { 0, 0, 0, 0, 0 };
// float arr21[] = { 0, 0, 6.83689, 0, 0 };
vector<float> losses;
losses.push_back(2.73485);
losses.push_back(0);
losses.push_back(3.64118);
losses.push_back(1.47347);
// losses.push_back(3.64118);
// losses.push_back(4.16278);
// losses.push_back(3.13952);
losses.push_back(0);
losses.push_back(2.73485);
// losses.push_back(1.47347);
// losses.push_back(3.64118);
// losses.push_back(3.64118);
// losses.push_back(2.51662);
// losses.push_back(2.73485);
losses.push_back(2.73485);
losses.push_back(3.64118);
// losses.push_back(3.64118);
// losses.push_back(4.16278);
// losses.push_back(1.47347);
// losses.push_back(0);
// losses.push_back(3.13952);
vector<float> vec1(arr1,arr1+5);
vector<float> vec2(arr2,arr2+5);
vector<float> vec3(arr3,arr3+5);
vector<float> vec4(arr4,arr4+5);
// vector<float> vec5(arr5,arr5+5);
// vector<float> vec6(arr6,arr6+5);
// vector<float> vec7(arr7,arr7+5);
vector<float> vec8(arr8,arr8+5);
vector<float> vec9(arr9,arr9+5);
// vector<float> vec10(arr10,arr10+5);
// vector<float> vec11(arr11,arr11+5);
// vector<float> vec12(arr12,arr12+5);
// vector<float> vec13(arr13,arr13+5);
// vector<float> vec14(arr14,arr14+5);
vector<float> vec15(arr15,arr15+5);
vector<float> vec16(arr16,arr16+5);
// vector<float> vec17(arr17,arr17+5);
// vector<float> vec18(arr18,arr18+5);
// vector<float> vec19(arr19,arr19+5);
// vector<float> vec20(arr20,arr20+5);
// vector<float> vec21(arr21,arr21+5);
s1.PlusEquals(&multi,vec1);
s2.PlusEquals(&multi,vec2);
s3.PlusEquals(&multi,vec3);
s4.PlusEquals(&multi,vec4);
// s5.PlusEquals(&multi,vec5);
// s6.PlusEquals(&multi,vec6);
// s7.PlusEquals(&multi,vec7);
s8.PlusEquals(&multi,vec8);
s9.PlusEquals(&multi,vec9);
// s10.PlusEquals(&multi,vec10);
// s11.PlusEquals(&multi,vec11);
// s12.PlusEquals(&multi,vec12);
// s13.PlusEquals(&multi,vec13);
// s14.PlusEquals(&multi,vec14);
s15.PlusEquals(&multi,vec15);
s16.PlusEquals(&multi,vec16);
// s17.PlusEquals(&multi,vec17);
// s18.PlusEquals(&multi,vec18);
// s19.PlusEquals(&multi,vec19);
// s20.PlusEquals(&multi,vec20);
// s21.PlusEquals(&multi,vec21);
featureValueDiffs.push_back(s1);
featureValueDiffs.push_back(s2);
featureValueDiffs.push_back(s3);
featureValueDiffs.push_back(s4);
// featureValueDiffs.push_back(s5);
// featureValueDiffs.push_back(s6);
// featureValueDiffs.push_back(s7);
featureValueDiffs.push_back(s8);
featureValueDiffs.push_back(s9);
// featureValueDiffs.push_back(s10);
// featureValueDiffs.push_back(s11);
// featureValueDiffs.push_back(s12);
// featureValueDiffs.push_back(s13);
// featureValueDiffs.push_back(s14);
featureValueDiffs.push_back(s15);
featureValueDiffs.push_back(s16);
// featureValueDiffs.push_back(s17);
// featureValueDiffs.push_back(s18);
// featureValueDiffs.push_back(s19);
// featureValueDiffs.push_back(s20);
// featureValueDiffs.push_back(s21);
vector<float> oldModelScoreDiff;
for (int i = 0; i < numberOfConstraints; ++i) {
oldModelScoreDiff.push_back(featureValueDiffs[i].InnerProduct(weights));
}
for (int i = 0; i < numberOfConstraints; ++i) {
cerr << "old model score diff: " << oldModelScoreDiff[i] << ", loss: " << losses[i] << "\t" << (oldModelScoreDiff[i] >= losses[i] ? 1 : 0) << endl;
}
for (int i = 0; i < numberOfConstraints; ++i) {
lossMinusModelScoreDiff.push_back(losses[i] - oldModelScoreDiff[i]);
}
for (int i = 0; i < numberOfConstraints; ++i) {
cerr << "A: " << featureValueDiffs[i] << ", b: " << lossMinusModelScoreDiff[i] << endl;
}
vector< float> alphas1 = Hildreth::optimise(featureValueDiffs, lossMinusModelScoreDiff);
vector< float> alphas2 = Hildreth::optimise(featureValueDiffs, lossMinusModelScoreDiff, 0.01);
cerr << "\nalphas without slack:" << endl;
for (size_t i = 0; i < alphas1.size(); ++i) {
cerr << "alpha " << i << ": " << alphas1[i] << endl;
}
cerr << endl;
cerr << "partial updates:" << endl;
vector< ScoreComponentCollection> featureValueDiffs1(featureValueDiffs);
FVector totalUpdate1 = ScoreComponentCollection::CreateFVector();
for (size_t k = 0; k < featureValueDiffs1.size(); ++k) {
featureValueDiffs1[k].MultiplyEquals(alphas1[k]);
cerr << k << ": " << featureValueDiffs1[k].GetScoresVector() << endl;
FVector update = featureValueDiffs1[k].GetScoresVector();
totalUpdate1 += update;
}
cerr << endl;
cerr << "total update: " << totalUpdate1 << endl << endl;
ScoreComponentCollection weightsUpdate1(weights);
weightsUpdate1.PlusEquals(totalUpdate1);
cerr << "old weights: " << weights << endl;
cerr << "new weights: " << weightsUpdate1 << endl << endl;
vector<float> newModelScoreDiff;
for (int i = 0; i < numberOfConstraints; ++i) {
newModelScoreDiff.push_back(featureValueDiffs[i].InnerProduct(weightsUpdate1));
}
for (int i = 0; i < numberOfConstraints; ++i) {
cerr << "new model score diff: " << newModelScoreDiff[i] << ", loss: " << losses[i] << "\t" << (newModelScoreDiff[i] >= losses[i] ? 1 : 0) << endl;
}
cerr << "\n\nalphas with slack 0.01:" << endl;
for (size_t i = 0; i < alphas2.size(); ++i) {
cerr << "alpha " << i << ": " << alphas2[i] << endl;
}
cerr << endl;
cerr << "partial updates:" << endl;
vector< ScoreComponentCollection> featureValueDiffs2(featureValueDiffs);
FVector totalUpdate2 = ScoreComponentCollection::CreateFVector();
for (size_t k = 0; k < featureValueDiffs2.size(); ++k) {
featureValueDiffs2[k].MultiplyEquals(alphas2[k]);
cerr << k << ": " << featureValueDiffs2[k].GetScoresVector() << endl;
FVector update = featureValueDiffs2[k].GetScoresVector();
totalUpdate2 += update;
}
cerr << endl;
cerr << "total update: " << totalUpdate2 << endl << endl;
ScoreComponentCollection weightsUpdate2(weights);
weightsUpdate2.PlusEquals(totalUpdate2);
cerr << "old weights: " << weights << endl;
cerr << "new weights: " << weightsUpdate2 << endl << endl;
newModelScoreDiff.clear();
for (int i = 0; i < numberOfConstraints; ++i) {
newModelScoreDiff.push_back(featureValueDiffs[i].InnerProduct(weightsUpdate2));
}
for (int i = 0; i < numberOfConstraints; ++i) {
cerr << "new model score diff: " << newModelScoreDiff[i] << ", loss: " << losses[i] << endl;
}
}
BOOST_FIXTURE_TEST_CASE(test_hildreth_5, MockProducers)
{
// Unfeasible example with 2 constraints
cerr << "\n>>>>>Hildreth test, without slack and with 0.01 slack" << endl << endl;
vector< ScoreComponentCollection> featureValueDiffs;
vector< float> lossMinusModelScoreDiff;
// initial weights
float w[] = { 1, 1, 0.638672, 1, 0 };
vector<float> vec(w,w+5);
ScoreComponentCollection weights;
weights.PlusEquals(&multi, vec);
int numberOfConstraints = 2;
// feature value differences (to oracle)
// NOTE: these feature values are only approximations
ScoreComponentCollection s1, s17;
float arr1[] = { 0, 0, -2.0672, 0, 0 };
float arr17[] = { 0, 0, 4.4283, 0, 0 };
vector<float> losses;
losses.push_back(2.73485);
losses.push_back(3.64118);
vector<float> vec1(arr1,arr1+5);
vector<float> vec17(arr17,arr17+5);
s1.PlusEquals(&multi,vec1);
s17.PlusEquals(&multi,vec17);
featureValueDiffs.push_back(s1);
featureValueDiffs.push_back(s17);
vector<float> oldModelScoreDiff;
for (int i = 0; i < numberOfConstraints; ++i) {
oldModelScoreDiff.push_back(featureValueDiffs[i].InnerProduct(weights));
}
float sumOfOldError = 0;
for (int i = 0; i < numberOfConstraints; ++i) {
cerr << "old model score diff: " << oldModelScoreDiff[i] << ", loss: " << losses[i] << "\t" << (oldModelScoreDiff[i] >= losses[i] ? 1 : 0) << endl;
sumOfOldError += (losses[i] - oldModelScoreDiff[i]);
}
cerr << "sum of old error: " << sumOfOldError << endl;
for (int i = 0; i < numberOfConstraints; ++i) {
lossMinusModelScoreDiff.push_back(losses[i] - oldModelScoreDiff[i]);
}
for (int i = 0; i < numberOfConstraints; ++i) {
cerr << "A: " << featureValueDiffs[i] << ", b: " << lossMinusModelScoreDiff[i] << endl;
}
vector< float> alphas1 = Hildreth::optimise(featureValueDiffs, lossMinusModelScoreDiff);
vector< float> alphas2 = Hildreth::optimise(featureValueDiffs, lossMinusModelScoreDiff, 0.01);
vector< float> alphas3 = Hildreth::optimise(featureValueDiffs, lossMinusModelScoreDiff, 0.1);
cerr << "\nalphas without slack:" << endl;
for (size_t i = 0; i < alphas1.size(); ++i) {
cerr << "alpha " << i << ": " << alphas1[i] << endl;
}
cerr << endl;
cerr << "partial updates:" << endl;
vector< ScoreComponentCollection> featureValueDiffs1(featureValueDiffs);
FVector totalUpdate1 = ScoreComponentCollection::CreateFVector();
for (size_t k = 0; k < featureValueDiffs1.size(); ++k) {
featureValueDiffs1[k].MultiplyEquals(alphas1[k]);
cerr << k << ": " << featureValueDiffs1[k].GetScoresVector() << endl;
FVector update = featureValueDiffs1[k].GetScoresVector();
totalUpdate1 += update;
}
cerr << endl;
cerr << "total update: " << totalUpdate1 << endl << endl;
ScoreComponentCollection weightsUpdate1(weights);
weightsUpdate1.PlusEquals(totalUpdate1);
cerr << "old weights: " << weights << endl;
cerr << "new weights: " << weightsUpdate1 << endl << endl;
vector<float> newModelScoreDiff;
for (int i = 0; i < numberOfConstraints; ++i) {
newModelScoreDiff.push_back(featureValueDiffs[i].InnerProduct(weightsUpdate1));
}
float sumOfNewError = 0;
for (int i = 0; i < numberOfConstraints; ++i) {
cerr << "new model score diff: " << newModelScoreDiff[i] << ", loss: " << losses[i] << "\t" << (newModelScoreDiff[i] >= losses[i] ? 1 : 0) << endl;
sumOfNewError += (losses[i] - newModelScoreDiff[i]);
}
cerr << "sum of new error: " << sumOfNewError << endl;
cerr << "\n\nalphas with slack 0.01:" << endl;
for (size_t i = 0; i < alphas2.size(); ++i) {
cerr << "alpha " << i << ": " << alphas2[i] << endl;
}
cerr << endl;
cerr << "partial updates:" << endl;
vector< ScoreComponentCollection> featureValueDiffs2(featureValueDiffs);
FVector totalUpdate2 = ScoreComponentCollection::CreateFVector();
for (size_t k = 0; k < featureValueDiffs2.size(); ++k) {
featureValueDiffs2[k].MultiplyEquals(alphas2[k]);
cerr << k << ": " << featureValueDiffs2[k].GetScoresVector() << endl;
FVector update = featureValueDiffs2[k].GetScoresVector();
totalUpdate2 += update;
}
cerr << endl;
cerr << "total update: " << totalUpdate2 << endl << endl;
ScoreComponentCollection weightsUpdate2(weights);
weightsUpdate2.PlusEquals(totalUpdate2);
cerr << "old weights: " << weights << endl;
cerr << "new weights: " << weightsUpdate2 << endl << endl;
newModelScoreDiff.clear();
for (int i = 0; i < numberOfConstraints; ++i) {
newModelScoreDiff.push_back(featureValueDiffs[i].InnerProduct(weightsUpdate2));
}
sumOfNewError = 0;
for (int i = 0; i < numberOfConstraints; ++i) {
cerr << "new model score diff: " << newModelScoreDiff[i] << ", loss: " << losses[i] << "\t" << (newModelScoreDiff[i] >= losses[i] ? 1 : 0) << endl;
sumOfNewError += (losses[i] - newModelScoreDiff[i]);
}
cerr << "sum of new error: " << sumOfNewError << endl;
cerr << "\n\nalphas with slack 0.1:" << endl;
for (size_t i = 0; i < alphas3.size(); ++i) {
cerr << "alpha " << i << ": " << alphas3[i] << endl;
}
cerr << endl;
cerr << "partial updates:" << endl;
vector< ScoreComponentCollection> featureValueDiffs3(featureValueDiffs);
FVector totalUpdate3 = ScoreComponentCollection::CreateFVector();
for (size_t k = 0; k < featureValueDiffs3.size(); ++k) {
featureValueDiffs3[k].MultiplyEquals(alphas3[k]);
cerr << k << ": " << featureValueDiffs3[k].GetScoresVector() << endl;
FVector update = featureValueDiffs3[k].GetScoresVector();
totalUpdate3 += update;
}
cerr << endl;
cerr << "total update: " << totalUpdate3 << endl << endl;
ScoreComponentCollection weightsUpdate3(weights);
weightsUpdate3.PlusEquals(totalUpdate3);
cerr << "old weights: " << weights << endl;
cerr << "new weights: " << weightsUpdate3 << endl << endl;
newModelScoreDiff.clear();
for (int i = 0; i < numberOfConstraints; ++i) {
newModelScoreDiff.push_back(featureValueDiffs[i].InnerProduct(weightsUpdate3));
}
sumOfNewError = 0;
for (int i = 0; i < numberOfConstraints; ++i) {
cerr << "new model score diff: " << newModelScoreDiff[i] << ", loss: " << losses[i] << "\t" << (newModelScoreDiff[i] >= losses[i] ? 1 : 0) << endl;
sumOfNewError += (losses[i] - newModelScoreDiff[i]);
}
cerr << "sum of new error: " << sumOfNewError << endl;
}
BOOST_AUTO_TEST_SUITE_END()
}

62
mira/HypothesisQueue.cpp Normal file
View File

@ -0,0 +1,62 @@
/***********************************************************************
Moses - statistical machine translation system
Copyright (C) 2006-2011 University of Edinburgh
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
***********************************************************************/
#include <iostream>
#include "HypothesisQueue.h"
using namespace std;
namespace Moses {
HypothesisQueue::~HypothesisQueue() {
m_queue.clear();
}
void HypothesisQueue::Push(BleuIndexPair hypo) {
//pair<set<BleuIndexPair>::iterator,bool> ret;
if (m_capacity == 0 || m_queue.size() < m_capacity) {
m_queue.insert(hypo);
} else if (hypo.first > (*(m_queue.rbegin())).first) {
// Remove the worst-scoring item from the queue and insert hypo (only erase item if new item was successfully added )
/*ret = m_queue.insert(hypo);
if ((*(ret.first)).second == 1) {
HypoQueueType::iterator p = m_queue.end();
--p;
m_queue.erase(p);
}*/
// with multisets we do not have to check whether the item was successfully added
m_queue.insert(hypo);
HypoQueueType::iterator p = m_queue.end();
--p;
m_queue.erase(p);
} else {
// The hypo is unusable: the queue is full and hypo has a worse (or
// equal) score than the worst-scoring item already held.
}
}
BleuIndexPair HypothesisQueue::Pop() {
HypoQueueType::iterator p = m_queue.begin();
BleuIndexPair top = *p;
m_queue.erase(p);
return top;
}
} // namespace Moses

65
mira/HypothesisQueue.h Normal file
View File

@ -0,0 +1,65 @@
/***********************************************************************
Moses - statistical machine translation system
Copyright (C) 2006-2011 University of Edinburgh
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
***********************************************************************/
#pragma once
#include <set>
namespace Moses {
// pair of Bleu score and index
typedef std::pair<float, size_t> BleuIndexPair;
// A bounded priority queue of BleuIndexPairs. The top item is
// the best scoring hypothesis. The queue assumes ownership of pushed items and
// relinquishes ownership when they are popped. Any remaining items at the
// time of the queue's destruction are deleted.
class HypothesisQueue {
public:
// Create empty queue with fixed capacity of c. Capacity 0 means unbounded.
HypothesisQueue(size_t c) : m_capacity(c) {}
~HypothesisQueue();
bool Empty() { return m_queue.empty(); }
// Add the hypo to the queue or delete it if the queue is full and the
// score is no better than the queue's worst score.
void Push(BleuIndexPair hypo);
// Remove the best-scoring detour from the queue and return it. The
// caller is responsible for deleting the object.
BleuIndexPair Pop();
private:
struct HypothesisOrderer {
bool operator()(BleuIndexPair a,
BleuIndexPair b) {
return (a.first > b.first);
}
};
typedef std::multiset<BleuIndexPair, HypothesisOrderer> HypoQueueType;
//typedef std::set<BleuIndexPair, HypothesisOrderer> HypoQueueType;
HypoQueueType m_queue;
const size_t m_capacity;
};
} // namespace Moses

13
mira/Jamfile Normal file
View File

@ -0,0 +1,13 @@
lib mira_lib :
[ glob *.cpp : *Test.cpp Main.cpp ]
../moses-cmd/src//IOWrapper_lib ../mert//bleu_lib ../moses/src//moses ../OnDiskPt//OnDiskPt ..//boost_program_options ;
exe mira : Main.cpp mira_lib ;
alias programs : mira ;
import testing ;
unit-test mira_test : [ glob *Test.cpp ] mira_lib ..//boost_unit_test_framework ;
explicit mira_test ;

1992
mira/Main.cpp Normal file

File diff suppressed because it is too large Load Diff

58
mira/Main.h Normal file
View File

@ -0,0 +1,58 @@
/***********************************************************************
Moses - factored phrase-based language decoder
Copyright (C) 2010 University of Edinburgh
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
***********************************************************************/
#ifndef MAIN_H_
#define MAIN_H_
#include <vector>
#include "ScoreComponentCollection.h"
#include "Word.h"
#include "ScoreProducer.h"
#include "Decoder.h"
typedef std::map<const Moses::ScoreProducer*, std::vector< float > > ProducerWeightMap;
typedef std::pair<const Moses::ScoreProducer*, std::vector< float > > ProducerWeightPair;
template <class T> bool from_string(T& t, const std::string& s, std::ios_base& (*f)(std::ios_base&))
{
std::istringstream iss(s);
return !(iss >> f >> t).fail();
}
struct RandomIndex {
ptrdiff_t operator()(ptrdiff_t max) {
srand(time(0)); // Initialize random number generator with current time.
return static_cast<ptrdiff_t> (rand() % max);
}
};
//void OutputNBestList(const MosesChart::TrellisPathList &nBestList, const TranslationSystem* system, long translationId);
bool loadSentences(const std::string& filename, std::vector<std::string>& sentences);
bool evaluateModulo(size_t shard_position, size_t mix_or_dump_base, size_t actual_batch_size);
void printFeatureValues(std::vector<std::vector<Moses::ScoreComponentCollection> > &featureValues);
void ignoreCoreFeatures(std::vector<std::vector<Moses::ScoreComponentCollection> > &featureValues, ProducerWeightMap &coreWeightMap);
void takeLogs(std::vector<std::vector<Moses::ScoreComponentCollection> > &featureValues, size_t base);
void deleteTranslations(std::vector<std::vector<const Moses::Word*> > &translations);
void decodeHopeOrFear(size_t rank, size_t size, size_t decode, std::string decode_filename, std::vector<std::string> &inputSentences, Mira::MosesDecoder* decoder, size_t n, float bleuWeight);
void applyLearningRates(std::vector<std::vector<Moses::ScoreComponentCollection> > &featureValues, float core_r0, float sparse_r0);
void applyPerFeatureLearningRates(std::vector<std::vector<Moses::ScoreComponentCollection> > &featureValues, Moses::ScoreComponentCollection featureLearningRates, float sparse_r0);
void scaleFeatureScore(Moses::ScoreProducer *sp, float scaling_factor, std::vector<std::vector<Moses::ScoreComponentCollection> > &featureValues, size_t rank, size_t epoch);
void scaleFeatureScores(Moses::ScoreProducer *sp, float scaling_factor, std::vector<std::vector<Moses::ScoreComponentCollection> > &featureValues, size_t rank, size_t epoch);
#endif /* MAIN_H_ */

Some files were not shown because too many files have changed in this diff Show More