2013-04-22 15:21:59 +04:00
/***********************************************************************
Moses - factored phrase - based language decoder
Copyright ( C ) 2006 University of Edinburgh
This library is free software ; you can redistribute it and / or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation ; either
version 2.1 of the License , or ( at your option ) any later version .
This library is distributed in the hope that it will be useful ,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
Lesser General Public License for more details .
You should have received a copy of the GNU Lesser General Public
License along with this library ; if not , write to the Free Software
Foundation , Inc . , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 USA
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2013-05-02 14:14:17 +04:00
# include "util/exception.hh"
2013-04-22 15:21:59 +04:00
# include "moses/TranslationModel/PhraseDictionaryMultiModelCounts.h"
# define LINE_MAX_LENGTH 100000
# include "phrase-extract/SafeGetline.h" // for SAFE_GETLINE()
using namespace std ;
2013-05-15 17:34:31 +04:00
template < typename T >
2013-05-15 18:52:08 +04:00
void OutputVec ( const vector < T > & vec )
2013-05-15 17:34:31 +04:00
{
for ( size_t i = 0 ; i < vec . size ( ) ; + + i ) {
cerr < < vec [ i ] < < " " < < flush ;
}
cerr < < endl ;
}
2013-04-22 15:21:59 +04:00
// from phrase-extract/tables-core.cpp
vector < string > tokenize ( const char * input )
{
vector < string > token ;
bool betweenWords = true ;
int start = 0 ;
int i = 0 ;
for ( ; input [ i ] ! = ' \0 ' ; i + + ) {
bool isSpace = ( input [ i ] = = ' ' | | input [ i ] = = ' \t ' ) ;
if ( ! isSpace & & betweenWords ) {
start = i ;
betweenWords = false ;
} else if ( isSpace & & ! betweenWords ) {
token . push_back ( string ( input + start , i - start ) ) ;
betweenWords = true ;
}
}
if ( ! betweenWords )
token . push_back ( string ( input + start , i - start ) ) ;
return token ;
}
namespace Moses
{
2013-05-13 20:20:14 +04:00
PhraseDictionaryMultiModelCounts : : PhraseDictionaryMultiModelCounts ( const std : : string & line )
2013-05-29 21:16:15 +04:00
: PhraseDictionaryMultiModel ( " PhraseDictionaryMultiModelCounts " , line )
2013-04-22 15:21:59 +04:00
{
2013-07-30 17:03:25 +04:00
m_mode = " instance_weighting " ;
2013-05-29 21:16:15 +04:00
m_combineFunction = InstanceWeighting ;
2013-06-11 00:24:31 +04:00
cerr < < " m_args= " < < m_args . size ( ) < < endl ;
2013-06-20 16:06:03 +04:00
ReadParameters ( ) ;
2013-06-11 00:24:31 +04:00
CHECK ( m_targetTable . size ( ) = = m_pdStr . size ( ) ) ;
}
2013-06-20 16:25:02 +04:00
void PhraseDictionaryMultiModelCounts : : SetParameter ( const std : : string & key , const std : : string & value )
2013-06-11 00:24:31 +04:00
{
if ( key = = " mode " ) {
m_mode = value ;
2013-07-30 17:03:25 +04:00
if ( m_mode = = " instance_weighting " )
m_combineFunction = InstanceWeighting ;
else if ( m_mode = = " interpolate " )
m_combineFunction = LinearInterpolationFromCounts ;
else {
ostringstream msg ;
msg < < " combination mode unknown: " < < m_mode ;
throw runtime_error ( msg . str ( ) ) ;
}
2013-06-11 00:24:31 +04:00
} else if ( key = = " lex-e2f " ) {
m_lexE2FStr = Tokenize ( value , " , " ) ;
CHECK ( m_lexE2FStr . size ( ) = = m_pdStr . size ( ) ) ;
} else if ( key = = " lex-f2e " ) {
m_lexF2EStr = Tokenize ( value , " , " ) ;
CHECK ( m_lexF2EStr . size ( ) = = m_pdStr . size ( ) ) ;
} else if ( key = = " target-table " ) {
m_targetTable = Tokenize ( value , " , " ) ;
} else {
2013-06-20 16:25:02 +04:00
PhraseDictionaryMultiModel : : SetParameter ( key , value ) ;
2013-06-11 00:24:31 +04:00
}
2013-06-10 19:29:07 +04:00
}
2013-04-22 15:21:59 +04:00
PhraseDictionaryMultiModelCounts : : ~ PhraseDictionaryMultiModelCounts ( )
{
2013-05-29 21:16:15 +04:00
RemoveAllInColl ( m_lexTable_e2f ) ;
RemoveAllInColl ( m_lexTable_f2e ) ;
2013-04-22 15:21:59 +04:00
}
2013-05-14 16:11:55 +04:00
2013-05-31 23:21:02 +04:00
void PhraseDictionaryMultiModelCounts : : Load ( )
2013-05-14 18:16:09 +04:00
{
2013-06-14 21:34:47 +04:00
SetFeaturesToApply ( ) ;
2013-05-29 21:16:15 +04:00
for ( size_t i = 0 ; i < m_numModels ; + + i ) {
2013-05-14 16:11:55 +04:00
2013-05-14 18:16:09 +04:00
// phrase table
const string & ptName = m_pdStr [ i ] ;
2013-05-14 16:11:55 +04:00
2013-05-14 18:16:09 +04:00
PhraseDictionary * pt ;
pt = FindPhraseDictionary ( ptName ) ;
2013-05-14 16:11:55 +04:00
CHECK ( pt ) ;
m_pd . push_back ( pt ) ;
2013-05-14 18:16:09 +04:00
// reverse
const string & target_table = m_targetTable [ i ] ;
pt = FindPhraseDictionary ( target_table ) ;
CHECK ( pt ) ;
m_inverse_pd . push_back ( pt ) ;
// lex
string lex_e2f = m_lexE2FStr [ i ] ;
string lex_f2e = m_lexF2EStr [ i ] ;
lexicalTable * e2f = new lexicalTable ;
LoadLexicalTable ( lex_e2f , e2f ) ;
lexicalTable * f2e = new lexicalTable ;
LoadLexicalTable ( lex_f2e , f2e ) ;
m_lexTable_e2f . push_back ( e2f ) ;
m_lexTable_f2e . push_back ( f2e ) ;
2013-05-14 16:11:55 +04:00
}
2013-04-22 15:21:59 +04:00
}
2013-08-24 00:02:03 +04:00
const TargetPhraseCollection * PhraseDictionaryMultiModelCounts : : GetTargetPhraseCollectionLEGACY ( const Phrase & src ) const
2013-04-22 15:21:59 +04:00
{
vector < vector < float > > multimodelweights ;
bool normalize ;
normalize = ( m_mode = = " interpolate " ) ? true : false ;
multimodelweights = getWeights ( 4 , normalize ) ;
//source phrase frequency is shared among all phrase pairs
vector < float > fs ( m_numModels ) ;
map < string , multiModelCountsStatistics * > * allStats = new ( map < string , multiModelCountsStatistics * > ) ;
CollectSufficientStatistics ( src , fs , allStats ) ;
TargetPhraseCollection * ret = CreateTargetPhraseCollectionCounts ( src , fs , allStats , multimodelweights ) ;
ret - > NthElement ( m_tableLimit ) ; // sort the phrases for pruning later
const_cast < PhraseDictionaryMultiModelCounts * > ( this ) - > CacheForCleanup ( ret ) ;
return ret ;
}
void PhraseDictionaryMultiModelCounts : : CollectSufficientStatistics ( const Phrase & src , vector < float > & fs , map < string , multiModelCountsStatistics * > * allStats ) const
//fill fs and allStats with statistics from models
{
2013-05-29 21:16:15 +04:00
for ( size_t i = 0 ; i < m_numModels ; + + i ) {
2013-05-14 20:35:22 +04:00
const PhraseDictionary & pd = * m_pd [ i ] ;
2013-04-22 15:21:59 +04:00
2013-08-24 00:02:03 +04:00
TargetPhraseCollection * ret_raw = ( TargetPhraseCollection * ) pd . GetTargetPhraseCollectionLEGACY ( src ) ;
2013-04-22 15:21:59 +04:00
if ( ret_raw ! = NULL ) {
TargetPhraseCollection : : iterator iterTargetPhrase ;
for ( iterTargetPhrase = ret_raw - > begin ( ) ; iterTargetPhrase ! = ret_raw - > end ( ) ; + + iterTargetPhrase ) {
2013-08-08 17:38:41 +04:00
const TargetPhrase * targetPhrase = * iterTargetPhrase ;
2013-05-14 20:35:22 +04:00
vector < float > raw_scores = targetPhrase - > GetScoreBreakdown ( ) . GetScoresForProducer ( & pd ) ;
2013-04-22 15:21:59 +04:00
string targetString = targetPhrase - > GetStringRep ( m_output ) ;
if ( allStats - > find ( targetString ) = = allStats - > end ( ) ) {
multiModelCountsStatistics * statistics = new multiModelCountsStatistics ;
statistics - > targetPhrase = new TargetPhrase ( * targetPhrase ) ; //make a copy so that we don't overwrite the original phrase table info
2013-07-31 15:32:53 +04:00
//correct future cost estimates and total score
statistics - > targetPhrase - > GetScoreBreakdown ( ) . InvertDenseFeatures ( & pd ) ;
vector < FeatureFunction * > pd_feature ;
pd_feature . push_back ( m_pd [ i ] ) ;
const vector < FeatureFunction * > pd_feature_const ( pd_feature ) ;
statistics - > targetPhrase - > Evaluate ( src , pd_feature_const ) ;
2013-05-14 20:35:22 +04:00
// zero out scores from original phrase table
statistics - > targetPhrase - > GetScoreBreakdown ( ) . ZeroDenseFeatures ( & pd ) ;
2013-04-22 15:21:59 +04:00
statistics - > fst . resize ( m_numModels ) ;
statistics - > ft . resize ( m_numModels ) ;
2013-05-14 20:35:22 +04:00
2013-04-22 15:21:59 +04:00
( * allStats ) [ targetString ] = statistics ;
}
multiModelCountsStatistics * statistics = ( * allStats ) [ targetString ] ;
statistics - > fst [ i ] = UntransformScore ( raw_scores [ 0 ] ) ;
statistics - > ft [ i ] = UntransformScore ( raw_scores [ 1 ] ) ;
fs [ i ] = UntransformScore ( raw_scores [ 2 ] ) ;
( * allStats ) [ targetString ] = statistics ;
}
}
}
// get target phrase frequency for models which have not seen the phrase pair
for ( map < string , multiModelCountsStatistics * > : : const_iterator iter = allStats - > begin ( ) ; iter ! = allStats - > end ( ) ; + + iter ) {
multiModelCountsStatistics * statistics = iter - > second ;
for ( size_t i = 0 ; i < m_numModels ; + + i ) {
2013-05-29 21:16:15 +04:00
if ( ! statistics - > ft [ i ] ) {
statistics - > ft [ i ] = GetTargetCount ( static_cast < const Phrase & > ( * statistics - > targetPhrase ) , i ) ;
}
2013-04-22 15:21:59 +04:00
}
}
}
TargetPhraseCollection * PhraseDictionaryMultiModelCounts : : CreateTargetPhraseCollectionCounts ( const Phrase & src , vector < float > & fs , map < string , multiModelCountsStatistics * > * allStats , vector < vector < float > > & multimodelweights ) const
{
TargetPhraseCollection * ret = new TargetPhraseCollection ( ) ;
for ( map < string , multiModelCountsStatistics * > : : const_iterator iter = allStats - > begin ( ) ; iter ! = allStats - > end ( ) ; + + iter ) {
multiModelCountsStatistics * statistics = iter - > second ;
if ( statistics - > targetPhrase - > GetAlignTerm ( ) . GetSize ( ) = = 0 ) {
2013-05-29 21:16:15 +04:00
UTIL_THROW ( util : : Exception , " alignment information empty \n count-tables need to include alignment information for computation of lexical weights. \n Use --phrase-word-alignment during training; for on-disk tables, also set -alignment-info when creating on-disk tables. " ) ;
2013-04-22 15:21:59 +04:00
}
try {
2013-05-29 21:16:15 +04:00
pair < vector < set < size_t > > , vector < set < size_t > > > alignment = GetAlignmentsForLexWeights ( src , static_cast < const Phrase & > ( * statistics - > targetPhrase ) , statistics - > targetPhrase - > GetAlignTerm ( ) ) ;
vector < set < size_t > > alignedToT = alignment . first ;
vector < set < size_t > > alignedToS = alignment . second ;
2013-07-30 17:03:25 +04:00
double lexst = ComputeWeightedLexicalTranslation ( static_cast < const Phrase & > ( * statistics - > targetPhrase ) , src , alignedToS , m_lexTable_e2f , multimodelweights [ 1 ] , false ) ;
double lexts = ComputeWeightedLexicalTranslation ( src , static_cast < const Phrase & > ( * statistics - > targetPhrase ) , alignedToT , m_lexTable_f2e , multimodelweights [ 3 ] , true ) ;
2013-05-29 21:16:15 +04:00
Scores scoreVector ( 5 ) ;
scoreVector [ 0 ] = FloorScore ( TransformScore ( m_combineFunction ( statistics - > fst , statistics - > ft , multimodelweights [ 0 ] ) ) ) ;
scoreVector [ 1 ] = FloorScore ( TransformScore ( lexst ) ) ;
scoreVector [ 2 ] = FloorScore ( TransformScore ( m_combineFunction ( statistics - > fst , fs , multimodelweights [ 2 ] ) ) ) ;
scoreVector [ 3 ] = FloorScore ( TransformScore ( lexts ) ) ;
scoreVector [ 4 ] = FloorScore ( TransformScore ( 2.718 ) ) ;
statistics - > targetPhrase - > GetScoreBreakdown ( ) . Assign ( this , scoreVector ) ;
2013-07-31 15:32:53 +04:00
//correct future cost estimates and total score
vector < FeatureFunction * > pd_feature ;
pd_feature . push_back ( const_cast < PhraseDictionaryMultiModelCounts * > ( this ) ) ;
const vector < FeatureFunction * > pd_feature_const ( pd_feature ) ;
statistics - > targetPhrase - > Evaluate ( src , pd_feature_const ) ;
2013-05-29 21:16:15 +04:00
} catch ( AlignmentException & e ) {
continue ;
2013-04-22 15:21:59 +04:00
}
ret - > Add ( new TargetPhrase ( * statistics - > targetPhrase ) ) ;
}
RemoveAllInMap ( * allStats ) ;
delete allStats ;
return ret ;
}
2013-05-29 21:16:15 +04:00
float PhraseDictionaryMultiModelCounts : : GetTargetCount ( const Phrase & target , size_t modelIndex ) const
{
2013-04-22 15:21:59 +04:00
2013-05-29 21:16:15 +04:00
const PhraseDictionary & pd = * m_inverse_pd [ modelIndex ] ;
2013-08-24 00:02:03 +04:00
const TargetPhraseCollection * ret_raw = pd . GetTargetPhraseCollectionLEGACY ( target ) ;
2013-04-22 15:21:59 +04:00
2013-05-29 21:16:15 +04:00
// in inverse mode, we want the first score of the first phrase pair (note: if we were to work with truly symmetric models, it would be the third score)
2013-07-16 19:30:54 +04:00
if ( ret_raw & & ret_raw - > GetSize ( ) > 0 ) {
const TargetPhrase * targetPhrase = * ( ret_raw - > begin ( ) ) ;
2013-05-29 21:16:15 +04:00
return UntransformScore ( targetPhrase - > GetScoreBreakdown ( ) . GetScoresForProducer ( & pd ) [ 0 ] ) ;
}
2013-04-22 15:21:59 +04:00
2013-05-29 21:16:15 +04:00
// target phrase unknown
else return 0 ;
2013-04-22 15:21:59 +04:00
}
2013-05-29 21:16:15 +04:00
pair < PhraseDictionaryMultiModelCounts : : AlignVector , PhraseDictionaryMultiModelCounts : : AlignVector > PhraseDictionaryMultiModelCounts : : GetAlignmentsForLexWeights ( const Phrase & phraseS , const Phrase & phraseT , const AlignmentInfo & alignment ) const
{
2013-04-22 15:21:59 +04:00
2013-05-29 21:16:15 +04:00
size_t tsize = phraseT . GetSize ( ) ;
size_t ssize = phraseS . GetSize ( ) ;
AlignVector alignedToT ( tsize ) ;
AlignVector alignedToS ( ssize ) ;
AlignmentInfo : : const_iterator iter ;
2013-04-22 15:21:59 +04:00
2013-05-29 21:16:15 +04:00
for ( iter = alignment . begin ( ) ; iter ! = alignment . end ( ) ; + + iter ) {
2013-04-22 15:21:59 +04:00
const pair < size_t , size_t > & alignPair = * iter ;
2013-05-29 21:16:15 +04:00
size_t s = alignPair . first ;
size_t t = alignPair . second ;
if ( s > = ssize | | t > = tsize ) {
cerr < < " Error: inconsistent alignment for phrase pair: " < < phraseS < < " - " < < phraseT < < endl ;
cerr < < " phrase pair will be discarded " < < endl ;
throw AlignmentException ( ) ;
}
alignedToT [ t ] . insert ( s ) ;
alignedToS [ s ] . insert ( t ) ;
2013-04-22 15:21:59 +04:00
}
return make_pair ( alignedToT , alignedToS ) ;
}
2013-07-30 17:03:25 +04:00
double PhraseDictionaryMultiModelCounts : : ComputeWeightedLexicalTranslation ( const Phrase & phraseS , const Phrase & phraseT , AlignVector & alignment , const vector < lexicalTable * > & tables , vector < float > & multimodelweights , bool is_input ) const
2013-05-29 21:16:15 +04:00
{
2013-04-22 15:21:59 +04:00
// lexical translation probability
double lexScore = 1.0 ;
2013-07-30 17:03:25 +04:00
Word null ;
if ( is_input ) {
null . CreateFromString ( Input , m_input , " NULL " , false ) ;
} else {
null . CreateFromString ( Output , m_output , " NULL " , false ) ;
}
2013-04-22 15:21:59 +04:00
// all target words have to be explained
for ( size_t ti = 0 ; ti < alignment . size ( ) ; ti + + ) {
const set < size_t > & srcIndices = alignment [ ti ] ;
Word t_word = phraseT . GetWord ( ti ) ;
2013-07-30 17:03:25 +04:00
2013-04-22 15:21:59 +04:00
if ( srcIndices . empty ( ) ) {
// explain unaligned word by NULL
2013-07-30 17:03:25 +04:00
lexScore * = GetLexicalProbability ( null , t_word , tables , multimodelweights ) ;
2013-04-22 15:21:59 +04:00
} else {
// go through all the aligned words to compute average
double thisWordScore = 0 ;
for ( set < size_t > : : const_iterator si ( srcIndices . begin ( ) ) ; si ! = srcIndices . end ( ) ; + + si ) {
2013-07-30 17:03:25 +04:00
Word s_word = phraseS . GetWord ( * si ) ;
thisWordScore + = GetLexicalProbability ( s_word , t_word , tables , multimodelweights ) ;
2013-04-22 15:21:59 +04:00
}
lexScore * = thisWordScore / srcIndices . size ( ) ;
}
}
return lexScore ;
}
2013-07-30 17:03:25 +04:00
lexicalCache PhraseDictionaryMultiModelCounts : : CacheLexicalStatistics ( const Phrase & phraseS , const Phrase & phraseT , AlignVector & alignment , const vector < lexicalTable * > & tables , bool is_input )
2013-05-29 21:16:15 +04:00
{
2013-04-22 15:21:59 +04:00
//do all the necessary lexical table lookups and get counts, but don't apply weights yet
2013-07-30 17:03:25 +04:00
Word null ;
if ( is_input ) {
null . CreateFromString ( Input , m_input , " NULL " , false ) ;
} else {
null . CreateFromString ( Output , m_output , " NULL " , false ) ;
}
2013-04-22 15:21:59 +04:00
lexicalCache ret ;
// all target words have to be explained
for ( size_t ti = 0 ; ti < alignment . size ( ) ; ti + + ) {
const set < size_t > & srcIndices = alignment [ ti ] ;
Word t_word = phraseT . GetWord ( ti ) ;
vector < lexicalPair > ti_vector ;
if ( srcIndices . empty ( ) ) {
// explain unaligned word by NULL
vector < float > joint_count ( m_numModels ) ;
vector < float > marginals ( m_numModels ) ;
2013-07-30 17:03:25 +04:00
FillLexicalCountsJoint ( null , t_word , joint_count , tables ) ;
2013-04-22 15:21:59 +04:00
FillLexicalCountsMarginal ( null , marginals , tables ) ;
ti_vector . push_back ( make_pair ( joint_count , marginals ) ) ;
} else {
for ( set < size_t > : : const_iterator si ( srcIndices . begin ( ) ) ; si ! = srcIndices . end ( ) ; + + si ) {
2013-07-30 17:03:25 +04:00
Word s_word = phraseS . GetWord ( * si ) ;
2013-04-22 15:21:59 +04:00
vector < float > joint_count ( m_numModels ) ;
vector < float > marginals ( m_numModels ) ;
2013-07-30 17:03:25 +04:00
FillLexicalCountsJoint ( s_word , t_word , joint_count , tables ) ;
FillLexicalCountsMarginal ( s_word , marginals , tables ) ;
2013-04-22 15:21:59 +04:00
ti_vector . push_back ( make_pair ( joint_count , marginals ) ) ;
}
}
ret . push_back ( ti_vector ) ;
}
return ret ;
}
2013-05-29 21:16:15 +04:00
double PhraseDictionaryMultiModelCounts : : ComputeWeightedLexicalTranslationFromCache ( lexicalCache & cache , vector < float > & weights ) const
{
2013-04-22 15:21:59 +04:00
// lexical translation probability
double lexScore = 1.0 ;
for ( lexicalCache : : const_iterator iter = cache . begin ( ) ; iter ! = cache . end ( ) ; + + iter ) {
2013-05-29 21:16:15 +04:00
vector < lexicalPair > t_vector = * iter ;
double thisWordScore = 0 ;
for ( vector < lexicalPair > : : const_iterator iter2 = t_vector . begin ( ) ; iter2 ! = t_vector . end ( ) ; + + iter2 ) {
vector < float > joint_count = iter2 - > first ;
vector < float > marginal = iter2 - > second ;
thisWordScore + = m_combineFunction ( joint_count , marginal , weights ) ;
}
lexScore * = thisWordScore / t_vector . size ( ) ;
2013-04-22 15:21:59 +04:00
}
return lexScore ;
}
// get lexical probability for single word alignment pair
2013-07-30 17:03:25 +04:00
double PhraseDictionaryMultiModelCounts : : GetLexicalProbability ( Word & wordS , Word & wordT , const vector < lexicalTable * > & tables , vector < float > & multimodelweights ) const
2013-05-29 21:16:15 +04:00
{
vector < float > joint_count ( m_numModels ) ;
vector < float > marginals ( m_numModels ) ;
2013-04-22 15:21:59 +04:00
2013-05-29 21:16:15 +04:00
FillLexicalCountsJoint ( wordS , wordT , joint_count , tables ) ;
FillLexicalCountsMarginal ( wordS , marginals , tables ) ;
2013-04-22 15:21:59 +04:00
2013-05-29 21:16:15 +04:00
double lexProb = m_combineFunction ( joint_count , marginals , multimodelweights ) ;
2013-04-22 15:21:59 +04:00
return lexProb ;
}
2013-07-30 17:03:25 +04:00
void PhraseDictionaryMultiModelCounts : : FillLexicalCountsJoint ( Word & wordS , Word & wordT , vector < float > & count , const vector < lexicalTable * > & tables ) const
2013-05-29 21:16:15 +04:00
{
for ( size_t i = 0 ; i < m_numModels ; i + + ) {
lexicalMapJoint : : iterator joint_s = tables [ i ] - > joint . find ( wordS ) ;
if ( joint_s = = tables [ i ] - > joint . end ( ) ) count [ i ] = 0.0 ;
else {
lexicalMap : : iterator joint_t = joint_s - > second . find ( wordT ) ;
if ( joint_t = = joint_s - > second . end ( ) ) count [ i ] = 0.0 ;
else count [ i ] = joint_t - > second ;
2013-04-22 15:21:59 +04:00
}
2013-05-29 21:16:15 +04:00
}
2013-04-22 15:21:59 +04:00
}
2013-07-30 17:03:25 +04:00
void PhraseDictionaryMultiModelCounts : : FillLexicalCountsMarginal ( Word & wordS , vector < float > & count , const vector < lexicalTable * > & tables ) const
2013-05-29 21:16:15 +04:00
{
for ( size_t i = 0 ; i < m_numModels ; i + + ) {
lexicalMap : : iterator marginal_s = tables [ i ] - > marginal . find ( wordS ) ;
if ( marginal_s = = tables [ i ] - > marginal . end ( ) ) count [ i ] = 0.0 ;
else count [ i ] = marginal_s - > second ;
}
2013-04-22 15:21:59 +04:00
}
2013-05-29 21:16:15 +04:00
void PhraseDictionaryMultiModelCounts : : LoadLexicalTable ( string & fileName , lexicalTable * ltable )
{
2013-04-22 15:21:59 +04:00
cerr < < " Loading lexical translation table from " < < fileName ;
ifstream inFile ;
inFile . open ( fileName . c_str ( ) ) ;
if ( inFile . fail ( ) ) {
cerr < < " - ERROR: could not open file \n " ;
exit ( 1 ) ;
}
istream * inFileP = & inFile ;
char line [ LINE_MAX_LENGTH ] ;
int i = 0 ;
while ( true ) {
i + + ;
if ( i % 100000 = = 0 ) cerr < < " . " < < flush ;
SAFE_GETLINE ( ( * inFileP ) , line , LINE_MAX_LENGTH , ' \n ' , __FILE__ ) ;
if ( inFileP - > eof ( ) ) break ;
vector < string > token = tokenize ( line ) ;
if ( token . size ( ) ! = 4 ) {
cerr < < " line " < < i < < " in " < < fileName
< < " has wrong number of tokens, skipping: \n "
< < token . size ( ) < < " " < < token [ 0 ] < < " " < < line < < endl ;
continue ;
}
double joint = atof ( token [ 2 ] . c_str ( ) ) ;
double marginal = atof ( token [ 3 ] . c_str ( ) ) ;
2013-07-30 17:03:25 +04:00
Word wordT , wordS ;
wordT . CreateFromString ( Output , m_output , token [ 0 ] , false ) ;
wordS . CreateFromString ( Input , m_input , token [ 1 ] , false ) ;
2013-04-22 15:21:59 +04:00
ltable - > joint [ wordS ] [ wordT ] = joint ;
ltable - > marginal [ wordS ] = marginal ;
}
cerr < < endl ;
}
# ifdef WITH_DLIB
2013-05-29 21:16:15 +04:00
vector < float > PhraseDictionaryMultiModelCounts : : MinimizePerplexity ( vector < pair < string , string > > & phrase_pair_vector )
{
2013-04-22 15:21:59 +04:00
2013-05-29 21:16:15 +04:00
const StaticData & staticData = StaticData : : Instance ( ) ;
const string & factorDelimiter = staticData . GetFactorDelimiter ( ) ;
2013-04-22 15:21:59 +04:00
2013-05-29 21:16:15 +04:00
map < pair < string , string > , size_t > phrase_pair_map ;
2013-04-22 15:21:59 +04:00
2013-05-29 21:16:15 +04:00
for ( vector < pair < string , string > > : : const_iterator iter = phrase_pair_vector . begin ( ) ; iter ! = phrase_pair_vector . end ( ) ; + + iter ) {
phrase_pair_map [ * iter ] + = 1 ;
}
2013-04-22 15:21:59 +04:00
2013-05-29 21:16:15 +04:00
vector < multiModelCountsStatisticsOptimization * > optimizerStats ;
2013-04-22 15:21:59 +04:00
2013-05-29 21:16:15 +04:00
for ( map < pair < string , string > , size_t > : : iterator iter = phrase_pair_map . begin ( ) ; iter ! = phrase_pair_map . end ( ) ; + + iter ) {
2013-04-22 15:21:59 +04:00
2013-05-29 21:16:15 +04:00
pair < string , string > phrase_pair = iter - > first ;
string source_string = phrase_pair . first ;
string target_string = phrase_pair . second ;
2013-04-22 15:21:59 +04:00
2013-05-29 21:16:15 +04:00
vector < float > fs ( m_numModels ) ;
map < string , multiModelCountsStatistics * > * allStats = new ( map < string , multiModelCountsStatistics * > ) ;
2013-04-22 15:21:59 +04:00
2013-05-29 21:16:15 +04:00
Phrase sourcePhrase ( 0 ) ;
sourcePhrase . CreateFromString ( Input , m_input , source_string , factorDelimiter , NULL ) ;
2013-04-22 15:21:59 +04:00
2013-05-29 21:16:15 +04:00
CollectSufficientStatistics ( sourcePhrase , fs , allStats ) ; //optimization potential: only call this once per source phrase
2013-04-22 15:21:59 +04:00
2013-05-29 21:16:15 +04:00
//phrase pair not found; leave cache empty
if ( allStats - > find ( target_string ) = = allStats - > end ( ) ) {
RemoveAllInMap ( * allStats ) ;
delete allStats ;
continue ;
}
2013-04-22 15:21:59 +04:00
2013-05-29 21:16:15 +04:00
multiModelCountsStatisticsOptimization * targetStatistics = new multiModelCountsStatisticsOptimization ( ) ;
targetStatistics - > targetPhrase = new TargetPhrase ( * ( * allStats ) [ target_string ] - > targetPhrase ) ;
targetStatistics - > fs = fs ;
targetStatistics - > fst = ( * allStats ) [ target_string ] - > fst ;
targetStatistics - > ft = ( * allStats ) [ target_string ] - > ft ;
targetStatistics - > f = iter - > second ;
2013-04-22 15:21:59 +04:00
2013-05-29 21:16:15 +04:00
try {
pair < vector < set < size_t > > , vector < set < size_t > > > alignment = GetAlignmentsForLexWeights ( sourcePhrase , static_cast < const Phrase & > ( * targetStatistics - > targetPhrase ) , targetStatistics - > targetPhrase - > GetAlignTerm ( ) ) ;
2013-07-30 17:03:25 +04:00
targetStatistics - > lexCachee2f = CacheLexicalStatistics ( static_cast < const Phrase & > ( * targetStatistics - > targetPhrase ) , sourcePhrase , alignment . second , m_lexTable_e2f , false ) ;
targetStatistics - > lexCachef2e = CacheLexicalStatistics ( sourcePhrase , static_cast < const Phrase & > ( * targetStatistics - > targetPhrase ) , alignment . first , m_lexTable_f2e , true ) ;
2013-04-22 15:21:59 +04:00
2013-05-29 21:16:15 +04:00
optimizerStats . push_back ( targetStatistics ) ;
} catch ( AlignmentException & e ) { }
2013-04-22 15:21:59 +04:00
2013-05-29 21:16:15 +04:00
RemoveAllInMap ( * allStats ) ;
delete allStats ;
}
2013-04-22 15:21:59 +04:00
2013-05-29 21:16:15 +04:00
Sentence sentence ;
CleanUpAfterSentenceProcessing ( sentence ) ; // free memory used by compact phrase tables
2013-04-22 15:21:59 +04:00
2013-05-29 21:16:15 +04:00
vector < float > ret ( m_numModels * 4 ) ;
for ( size_t iFeature = 0 ; iFeature < 4 ; iFeature + + ) {
2013-04-22 15:21:59 +04:00
2013-05-29 21:16:15 +04:00
CrossEntropyCounts * ObjectiveFunction = new CrossEntropyCounts ( optimizerStats , this , iFeature ) ;
2013-04-22 15:21:59 +04:00
2013-05-29 21:16:15 +04:00
vector < float > weight_vector = Optimize ( ObjectiveFunction , m_numModels ) ;
2013-04-22 15:21:59 +04:00
2013-05-29 21:16:15 +04:00
if ( m_mode = = " interpolate " ) {
weight_vector = normalizeWeights ( weight_vector ) ;
} else if ( m_mode = = " instance_weighting " ) {
float first_value = weight_vector [ 0 ] ;
for ( size_t i = 0 ; i < m_numModels ; i + + ) {
weight_vector [ i ] = weight_vector [ i ] / first_value ;
}
}
cerr < < " Weight vector for feature " < < iFeature < < " : " ;
for ( size_t i = 0 ; i < m_numModels ; i + + ) {
ret [ ( iFeature * m_numModels ) + i ] = weight_vector [ i ] ;
cerr < < weight_vector [ i ] < < " " ;
2013-04-22 15:21:59 +04:00
}
2013-05-29 21:16:15 +04:00
cerr < < endl ;
delete ObjectiveFunction ;
}
2013-04-22 15:21:59 +04:00
2013-05-29 21:16:15 +04:00
RemoveAllInColl ( optimizerStats ) ;
return ret ;
2013-04-22 15:21:59 +04:00
}
double CrossEntropyCounts : : operator ( ) ( const dlib : : matrix < double , 0 , 1 > & arg ) const
{
2013-05-29 21:16:15 +04:00
double total = 0.0 ;
double n = 0.0 ;
std : : vector < float > weight_vector ( m_model - > m_numModels ) ;
2013-04-22 15:21:59 +04:00
2013-05-29 21:16:15 +04:00
for ( int i = 0 ; i < arg . nr ( ) ; i + + ) {
weight_vector [ i ] = arg ( i ) ;
}
if ( m_model - > m_mode = = " interpolate " ) {
weight_vector = m_model - > normalizeWeights ( weight_vector ) ;
}
2013-04-22 15:21:59 +04:00
2013-05-29 21:16:15 +04:00
for ( std : : vector < multiModelCountsStatisticsOptimization * > : : const_iterator iter = m_optimizerStats . begin ( ) ; iter ! = m_optimizerStats . end ( ) ; + + iter ) {
multiModelCountsStatisticsOptimization * statistics = * iter ;
size_t f = statistics - > f ;
double score ;
if ( m_iFeature = = 0 ) {
score = m_model - > m_combineFunction ( statistics - > fst , statistics - > ft , weight_vector ) ;
} else if ( m_iFeature = = 1 ) {
score = m_model - > ComputeWeightedLexicalTranslationFromCache ( statistics - > lexCachee2f , weight_vector ) ;
} else if ( m_iFeature = = 2 ) {
score = m_model - > m_combineFunction ( statistics - > fst , statistics - > fs , weight_vector ) ;
} else if ( m_iFeature = = 3 ) {
score = m_model - > ComputeWeightedLexicalTranslationFromCache ( statistics - > lexCachef2e , weight_vector ) ;
} else {
score = 0 ;
UTIL_THROW ( util : : Exception , " Trying to optimize feature that I don't know. Aborting " ) ;
2013-04-22 15:21:59 +04:00
}
2013-05-29 21:16:15 +04:00
total - = ( FloorScore ( TransformScore ( score ) ) / TransformScore ( 2 ) ) * f ;
n + = f ;
}
return total / n ;
2013-04-22 15:21:59 +04:00
}
# endif
// calculate weighted probability based on instance weighting of joint counts and marginal counts
2013-05-29 21:16:15 +04:00
double InstanceWeighting ( vector < float > & joint_counts , vector < float > & marginals , vector < float > & multimodelweights )
{
2013-04-22 15:21:59 +04:00
2013-05-29 21:16:15 +04:00
double joint_counts_weighted = inner_product ( joint_counts . begin ( ) , joint_counts . end ( ) , multimodelweights . begin ( ) , 0.0 ) ;
double marginals_weighted = inner_product ( marginals . begin ( ) , marginals . end ( ) , multimodelweights . begin ( ) , 0.0 ) ;
2013-04-22 15:21:59 +04:00
2013-05-29 21:16:15 +04:00
if ( marginals_weighted = = 0 ) {
return 0 ;
} else {
return joint_counts_weighted / marginals_weighted ;
}
2013-04-22 15:21:59 +04:00
}
// calculate linear interpolation of relative frequency estimates based on joint count and marginal counts
//unused for now; enable in config?
2013-05-29 21:16:15 +04:00
double LinearInterpolationFromCounts ( vector < float > & joint_counts , vector < float > & marginals , vector < float > & multimodelweights )
{
2013-04-22 15:21:59 +04:00
2013-05-29 21:16:15 +04:00
vector < float > p ( marginals . size ( ) ) ;
2013-04-22 15:21:59 +04:00
2013-05-29 21:16:15 +04:00
for ( size_t i = 0 ; i < marginals . size ( ) ; i + + ) {
if ( marginals [ i ] ! = 0 ) {
p [ i ] = joint_counts [ i ] / marginals [ i ] ;
2013-04-22 15:21:59 +04:00
}
2013-05-29 21:16:15 +04:00
}
2013-04-22 15:21:59 +04:00
2013-05-29 21:16:15 +04:00
double p_weighted = inner_product ( p . begin ( ) , p . end ( ) , multimodelweights . begin ( ) , 0.0 ) ;
2013-04-22 15:21:59 +04:00
2013-05-29 21:16:15 +04:00
return p_weighted ;
2013-04-22 15:21:59 +04:00
}
2013-05-13 20:20:14 +04:00
} //namespace