2008-06-11 14:52:57 +04:00
// $Id$
/***********************************************************************
Moses - factored phrase - based language decoder
Copyright ( C ) 2006 University of Edinburgh
This library is free software ; you can redistribute it and / or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation ; either
version 2.1 of the License , or ( at your option ) any later version .
This library is distributed in the hope that it will be useful ,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
Lesser General Public License for more details .
You should have received a copy of the GNU Lesser General Public
License along with this library ; if not , write to the Free Software
Foundation , Inc . , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 USA
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2010-06-05 17:10:26 +04:00
# include <ctime>
2008-06-11 14:52:57 +04:00
# include <iostream>
# include <iterator>
# include <fstream>
# include <sstream>
# include <algorithm>
# include "Parameter.h"
# include "Util.h"
# include "InputFileStream.h"
2010-09-14 13:42:37 +04:00
# include "StaticData.h"
2008-06-11 14:52:57 +04:00
# include "UserMessage.h"
using namespace std ;
2008-10-09 03:51:26 +04:00
namespace Moses
{
2008-06-11 14:52:57 +04:00
/** define allowed parameters */
2011-02-24 16:14:42 +03:00
Parameter : : Parameter ( )
2008-06-11 14:52:57 +04:00
{
2011-02-24 16:14:42 +03:00
AddParam ( " beam-threshold " , " b " , " threshold for threshold pruning " ) ;
AddParam ( " config " , " f " , " location of the configuration file " ) ;
AddParam ( " continue-partial-translation " , " cpt " , " start from nonempty hypothesis " ) ;
2011-10-11 21:09:39 +04:00
AddParam ( " decoding-graph-backoff " , " dpb " , " only use subsequent decoding paths for unknown spans of given length " ) ;
2012-03-02 22:27:45 +04:00
AddParam ( " dlm-model " , " Order, factor and vocabulary file for discriminative LM. Use * for filename to indicate unlimited vocabulary. " ) ;
2011-02-24 16:14:42 +03:00
AddParam ( " drop-unknown " , " du " , " drop unknown words instead of copying them " ) ;
2010-03-07 10:57:48 +03:00
AddParam ( " disable-discarding " , " dd " , " disable hypothesis discarding " ) ;
2011-02-24 16:14:42 +03:00
AddParam ( " factor-delimiter " , " fd " , " specify a different factor delimiter than the default " ) ;
AddParam ( " generation-file " , " location and properties of the generation table " ) ;
AddParam ( " global-lexical-file " , " gl " , " discriminatively trained global lexical translation model file " ) ;
2012-05-18 21:59:10 +04:00
AddParam ( " glm-feature " , " discriminatively trained global lexical translation feature, sparse producer " ) ;
2011-02-24 16:14:42 +03:00
AddParam ( " input-factors " , " list of factors in the input " ) ;
AddParam ( " input-file " , " i " , " location of the input file to be translated " ) ;
AddParam ( " inputtype " , " text (0), confusion network (1), word lattice (2) (default = 0) " ) ;
AddParam ( " labeled-n-best-list " , " print out labels for each weight type in n-best list. default is true " ) ;
AddParam ( " lmodel-file " , " location and properties of the language models " ) ;
AddParam ( " lmodel-dub " , " dictionary upper bounds of language models " ) ;
2011-09-09 22:03:00 +04:00
AddParam ( " lmodel-oov-feature " , " add language model oov feature, one per model " ) ;
2011-02-24 16:14:42 +03:00
AddParam ( " mapping " , " description of decoding steps " ) ;
AddParam ( " max-partial-trans-opt " , " maximum number of partial translation options per input span (during mapping steps) " ) ;
AddParam ( " max-trans-opt-per-coverage " , " maximum number of translation options per input span (after applying mapping steps) " ) ;
AddParam ( " max-phrase-length " , " maximum phrase length (default 20) " ) ;
AddParam ( " n-best-list " , " file and size of n-best-list to be generated; specify - as the file in order to write to STDOUT " ) ;
2011-10-04 19:46:24 +04:00
AddParam ( " lattice-samples " , " generate samples from lattice, in same format as nbest list. Uses the file and size arguments, as in n-best-list " ) ;
2011-02-24 16:14:42 +03:00
AddParam ( " n-best-factor " , " factor to compute the maximum number of contenders (=factor*nbest-size). value 0 means infinity, i.e. no threshold. default is 0 " ) ;
2010-03-07 10:57:48 +03:00
AddParam ( " print-all-derivations " , " to print all derivations in search graph " ) ;
2011-02-24 16:14:42 +03:00
AddParam ( " output-factors " , " list of factors in the output " ) ;
AddParam ( " phrase-drop-allowed " , " da " , " if present, allow dropping of source words " ) ; //da = drop any (word); see -du for comparison
AddParam ( " report-all-factors " , " report all factors in output, not just first " ) ;
AddParam ( " report-all-factors-in-n-best " , " Report all factors in n-best-lists. Default is false " ) ;
2011-05-13 23:28:23 +04:00
# ifdef HAVE_SYNLM
2013-05-29 21:16:15 +04:00
AddParam ( " slmodel-file " , " location of the syntactic language model file(s) " ) ;
AddParam ( " slmodel-factor " , " factor to use with syntactic language model " ) ;
AddParam ( " slmodel-beam " , " beam width to use with syntactic language model's parser " ) ;
2011-05-13 23:28:23 +04:00
# endif
2011-02-24 16:14:42 +03:00
AddParam ( " stack " , " s " , " maximum stack size for histogram pruning " ) ;
AddParam ( " stack-diversity " , " sd " , " minimum number of hypothesis of each coverage in stack (default 0) " ) ;
AddParam ( " threads " , " th " , " number of threads to use in decoding (defaults to single-threaded) " ) ;
2013-05-29 21:16:15 +04:00
AddParam ( " translation-details " , " T " , " for each best hypothesis, report translation details to the given file " ) ;
AddParam ( " ttable-file " , " location and properties of the translation tables " ) ;
AddParam ( " translation-option-threshold " , " tot " , " threshold for translation options relative to best for input phrase " ) ;
AddParam ( " early-discarding-threshold " , " edt " , " threshold for constructing hypotheses based on estimate cost " ) ;
AddParam ( " verbose " , " v " , " verbosity level of the logging " ) ;
2010-09-14 20:25:33 +04:00
AddParam ( " references " , " Reference file(s) - used for bleu score feature " ) ;
2013-05-29 21:16:15 +04:00
AddParam ( " output-factors " , " list if factors in the output " ) ;
AddParam ( " cache-path " , " ? " ) ;
AddParam ( " distortion-limit " , " dl " , " distortion (reordering) limit in maximum number of words (0 = monotone, -1 = unlimited) " ) ;
AddParam ( " monotone-at-punctuation " , " mp " , " do not reorder over punctuation " ) ;
AddParam ( " distortion-file " , " source factors (0 if table independent of source), target factors, location of the factorized/lexicalized reordering tables " ) ;
AddParam ( " distortion " , " configurations for each factorized/lexicalized reordering model. " ) ;
AddParam ( " early-distortion-cost " , " edc " , " include estimate of distortion cost yet to be incurred in the score [Moore & Quirk 2007]. Default is no " ) ;
AddParam ( " xml-input " , " xi " , " allows markup of input with desired translations and probabilities. values can be 'pass-through' (default), 'inclusive', 'exclusive', 'ignore' " ) ;
2011-11-16 16:53:10 +04:00
AddParam ( " xml-brackets " , " xb " , " specify strings to be used as xml tags opening and closing, e.g. \" {{ }} \" (default \" < > \" ). Avoid square brackets because of configuration file format. Valid only with text input mode " ) ;
2013-05-29 21:16:15 +04:00
AddParam ( " minimum-bayes-risk " , " mbr " , " use miminum Bayes risk to determine best translation " ) ;
2010-02-03 13:23:32 +03:00
AddParam ( " lminimum-bayes-risk " , " lmbr " , " use lattice miminum Bayes risk to determine best translation " ) ;
2012-05-28 10:03:45 +04:00
AddParam ( " mira " , " do mira training " ) ;
2010-04-07 19:47:58 +04:00
AddParam ( " consensus-decoding " , " con " , " use consensus decoding (De Nero et. al. 2009) " ) ;
2011-02-24 16:14:42 +03:00
AddParam ( " mbr-size " , " number of translation candidates considered in MBR decoding (default 200) " ) ;
AddParam ( " mbr-scale " , " scaling factor to convert log linear score probability in MBR decoding (default 1.0) " ) ;
2010-02-03 13:23:32 +03:00
AddParam ( " lmbr-thetas " , " theta(s) for lattice mbr calculation " ) ;
AddParam ( " lmbr-pruning-factor " , " average number of nodes/word wanted in pruned lattice " ) ;
2010-02-03 22:46:35 +03:00
AddParam ( " lmbr-p " , " unigram precision value for lattice mbr " ) ;
AddParam ( " lmbr-r " , " ngram precision decay value for lattice mbr " ) ;
2010-03-14 23:23:17 +03:00
AddParam ( " lmbr-map-weight " , " weight given to map solution when doing lattice MBR (default 0) " ) ;
2010-02-03 14:20:20 +03:00
AddParam ( " lattice-hypo-set " , " to use lattice as hypo set during lattice MBR " ) ;
2011-02-24 16:14:42 +03:00
AddParam ( " clean-lm-cache " , " clean language model caches after N translations (default N=1) " ) ;
AddParam ( " use-persistent-cache " , " cache translation options across sentences (default true) " ) ;
AddParam ( " persistent-cache-size " , " maximum size of cache for translation options (default 10,000 input phrases) " ) ;
AddParam ( " recover-input-path " , " r " , " (conf net/word lattice only) - recover input path corresponding to the best translation " ) ;
AddParam ( " output-word-graph " , " owg " , " Output stack info as word graph. Takes filename, 0=only hypos in stack, 1=stack + nbest hypos " ) ;
AddParam ( " time-out " , " seconds after which is interrupted (-1=no time-out, default is -1) " ) ;
AddParam ( " output-search-graph " , " osg " , " Output connected hypotheses of search into specified filename " ) ;
AddParam ( " output-search-graph-extended " , " osgx " , " Output connected hypotheses of search into specified filename, in extended format " ) ;
2011-09-16 15:58:53 +04:00
AddParam ( " unpruned-search-graph " , " usg " , " When outputting chart search graph, do not exclude dead ends. Note: stack pruning may have eliminated some hypotheses " ) ;
2013-02-15 22:06:54 +04:00
AddParam ( " output-search-graph-slf " , " slf " , " Output connected hypotheses of search into specified directory, one file per sentence, in HTK standard lattice format (SLF) " ) ;
2013-02-22 21:24:35 +04:00
AddParam ( " output-search-graph-hypergraph " , " Output connected hypotheses of search into specified directory, one file per sentence, in a hypergraph format (see Kenneth Heafield's lazy hypergraph decoder) " ) ;
2012-09-03 10:23:32 +04:00
AddParam ( " include-lhs-in-search-graph " , " lhssg " , " When outputting chart search graph, include the label of the LHS of the rule (useful when using syntax) " ) ;
2008-09-24 20:48:23 +04:00
# ifdef HAVE_PROTOBUF
2011-02-24 16:14:42 +03:00
AddParam ( " output-search-graph-pb " , " pb " , " Write phrase lattice to protocol buffer objects in the specified path. " ) ;
2008-09-24 20:48:23 +04:00
# endif
2013-05-29 21:16:15 +04:00
AddParam ( " cube-pruning-pop-limit " , " cbp " , " How many hypotheses should be popped for each stack. (default = 1000) " ) ;
AddParam ( " cube-pruning-diversity " , " cbd " , " How many hypotheses should be created for each coverage. (default = 0) " ) ;
AddParam ( " search-algorithm " , " Which search algorithm to use. 0=normal stack, 1=cube pruning, 2=cube growing. (default = 0) " ) ;
AddParam ( " constraint " , " Location of the file with target sentences to produce constraining the search " ) ;
AddParam ( " description " , " Source language, target language, description " ) ;
AddParam ( " max-chart-span " , " maximum num. of source word chart rules can consume (default 10) " ) ;
AddParam ( " non-terminals " , " list of non-term symbols, space separated " ) ;
AddParam ( " rule-limit " , " a little like table limit. But for chart decoding rules. Default is DEFAULT_MAX_TRANS_OPT_SIZE " ) ;
AddParam ( " source-label-overlap " , " What happens if a span already has a label. 0=add more. 1=replace. 2=discard. Default is 0 " ) ;
AddParam ( " output-hypo-score " , " Output the hypo score to stdout with the output string. For search error analysis. Default is false " ) ;
AddParam ( " unknown-lhs " , " file containing target lhs of unknown words. 1 per line: LHS prob " ) ;
2011-03-22 17:33:16 +03:00
AddParam ( " phrase-pair-feature " , " Source and target factors for phrase pair feature " ) ;
2011-05-11 02:02:25 +04:00
AddParam ( " phrase-boundary-source-feature " , " Source factors for phrase boundary feature " ) ;
AddParam ( " phrase-boundary-target-feature " , " Target factors for phrase boundary feature " ) ;
2011-08-13 04:25:23 +04:00
AddParam ( " phrase-length-feature " , " Count features for source length, target length, both of each phrase " ) ;
2011-08-13 05:39:35 +04:00
AddParam ( " target-word-insertion-feature " , " Count feature for each unaligned target word " ) ;
AddParam ( " source-word-deletion-feature " , " Count feature for each unaligned source word " ) ;
2011-08-13 06:40:54 +04:00
AddParam ( " word-translation-feature " , " Count feature for word translation according to word alignment " ) ;
2011-06-27 19:13:15 +04:00
AddParam ( " cube-pruning-lazy-scoring " , " cbls " , " Don't fully score a hypothesis until it is popped " ) ;
2012-01-26 15:38:40 +04:00
AddParam ( " parsing-algorithm " , " Which parsing algorithm to use. 0=CYK+, 1=scope-3. (default = 0) " ) ;
2012-07-02 18:57:54 +04:00
AddParam ( " search-algorithm " , " Which search algorithm to use. 0=normal stack, 1=cube pruning, 2=cube growing, 4=stack with batched lm requests (default = 0) " ) ;
2011-02-24 16:14:42 +03:00
AddParam ( " constraint " , " Location of the file with target sentences to produce constraining the search " ) ;
AddParam ( " link-param-count " , " Number of parameters on word links when using confusion networks or lattices (default = 1) " ) ;
AddParam ( " description " , " Source language, target language, description " ) ;
AddParam ( " max-chart-span " , " maximum num. of source word chart rules can consume (default 10) " ) ;
AddParam ( " non-terminals " , " list of non-term symbols, space separated " ) ;
AddParam ( " rule-limit " , " a little like table limit. But for chart decoding rules. Default is DEFAULT_MAX_TRANS_OPT_SIZE " ) ;
AddParam ( " source-label-overlap " , " What happens if a span already has a label. 0=add more. 1=replace. 2=discard. Default is 0 " ) ;
AddParam ( " output-hypo-score " , " Output the hypo score to stdout with the output string. For search error analysis. Default is false " ) ;
AddParam ( " unknown-lhs " , " file containing target lhs of unknown words. 1 per line: LHS prob " ) ;
2011-08-07 04:58:56 +04:00
AddParam ( " show-weights " , " print feature weights and exit " ) ;
2011-11-13 21:14:40 +04:00
AddParam ( " start-translation-id " , " Id of 1st input. Default = 0 " ) ;
2012-09-21 11:55:37 +04:00
AddParam ( " output-unknowns " , " Output the unknown (OOV) words to the given file, one line per sentence " ) ;
2013-05-29 21:16:15 +04:00
// Compact phrase table and reordering table.
AddParam ( " minlexr-memory " , " Load lexical reordering table in minlexr format into memory " ) ;
2012-08-03 18:38:45 +04:00
AddParam ( " minphr-memory " , " Load phrase table in minphr format into memory " ) ;
2012-11-14 17:43:04 +04:00
2013-04-18 19:38:52 +04:00
AddParam ( " print-alignment-info " , " Output word-to-word alignment to standard out, separated from translation by |||. Word-to-word alignments are takne from the phrase table if any. Default is false " ) ;
2012-11-14 20:47:16 +04:00
AddParam ( " include-segmentation-in-n-best " , " include phrasal segmentation in the n-best list. default is false " ) ;
2012-11-14 17:43:04 +04:00
AddParam ( " print-alignment-info-in-n-best " , " Include word-to-word alignment in the n-best list. Word-to-word alignments are takne from the phrase table if any. Default is false " ) ;
AddParam ( " alignment-output-file " , " print output word alignments into given file " ) ;
AddParam ( " sort-word-alignment " , " Sort word alignments for more consistent display. 0=no sort (default), 1=target order " ) ;
AddParam ( " report-segmentation " , " t " , " report phrase segmentation in the output " ) ;
2012-12-06 00:21:33 +04:00
AddParam ( " link-param-count " , " DEPRECATED. DO NOT USE. Number of parameters on word links when using confusion networks or lattices (default = 1) " ) ;
2012-12-05 21:12:01 +04:00
AddParam ( " weight-slm " , " slm " , " DEPRECATED. DO NOT USE. weight(s) for syntactic language model " ) ;
AddParam ( " weight-bl " , " bl " , " DEPRECATED. DO NOT USE. weight for bleu score feature " ) ;
AddParam ( " weight-d " , " d " , " DEPRECATED. DO NOT USE. weight(s) for distortion (reordering components) " ) ;
AddParam ( " weight-dlm " , " dlm " , " DEPRECATED. DO NOT USE. weight for discriminative LM feature function (on top of sparse weights) " ) ;
AddParam ( " weight-lr " , " lr " , " DEPRECATED. DO NOT USE. weight(s) for lexicalized reordering, if not included in weight-d " ) ;
AddParam ( " weight-generation " , " g " , " DEPRECATED. DO NOT USE. weight(s) for generation components " ) ;
AddParam ( " weight-i " , " I " , " DEPRECATED. DO NOT USE. weight(s) for word insertion - used for parameters from confusion network and lattice input links " ) ;
AddParam ( " weight-l " , " lm " , " DEPRECATED. DO NOT USE. weight(s) for language models " ) ;
AddParam ( " weight-lex " , " lex " , " DEPRECATED. DO NOT USE. weight for global lexical model " ) ;
AddParam ( " weight-glm " , " glm " , " DEPRECATED. DO NOT USE. weight for global lexical feature, sparse producer " ) ;
AddParam ( " weight-wt " , " wt " , " DEPRECATED. DO NOT USE. weight for word translation feature " ) ;
AddParam ( " weight-pp " , " pp " , " DEPRECATED. DO NOT USE. weight for phrase pair feature " ) ;
AddParam ( " weight-pb " , " pb " , " DEPRECATED. DO NOT USE. weight for phrase boundary feature " ) ;
AddParam ( " weight-t " , " tm " , " DEPRECATED. DO NOT USE. weights for translation model components " ) ;
AddParam ( " weight-w " , " w " , " DEPRECATED. DO NOT USE. weight for word penalty " ) ;
AddParam ( " weight-u " , " u " , " DEPRECATED. DO NOT USE. weight for unknown word penalty " ) ;
AddParam ( " weight-e " , " e " , " DEPRECATED. DO NOT USE. weight for word deletion " ) ;
2013-02-07 20:03:59 +04:00
AddParam ( " text-type " , " DEPRECATED. DO NOT USE. should be one of dev/devtest/test, used for domain adaptation features " ) ;
2013-06-05 04:41:52 +04:00
AddParam ( " input-scores " , " DEPRECATED. DO NOT USE. 2 numbers on 2 lines - [1] of scores on each edge of a confusion network or lattice input (default=1). [2] Number of 'real' word scores (0 or 1. default=0) " ) ;
2012-12-05 21:12:01 +04:00
AddParam ( " weight-file " , " wf " , " feature weights file. Do *not* put weights for 'core' features in here - they go in moses.ini " ) ;
AddParam ( " weight " , " weights for ALL models, 1 per line 'WeightName value'. Weight names can be repeated " ) ;
AddParam ( " weight-overwrite " , " special parameter for mert. All on 1 line. Overrides weights specified in 'weights' argument " ) ;
2013-06-07 20:32:01 +04:00
AddParam ( " feature-overwrite " , " Override arguments in a particular featureu function with a particular key " ) ;
2012-12-31 20:41:33 +04:00
AddParam ( " feature " , " " ) ;
2013-02-11 22:01:33 +04:00
AddParam ( " print-id " , " prefix translations with id. Default if false " ) ;
2013-05-31 15:28:57 +04:00
AddParam ( " alternate-weight-setting " , " aws " , " alternate set of weights to used per xml specification " ) ;
2013-07-18 20:00:07 +04:00
AddParam ( " placeholder-factor " , " Which factor to use to store the original text for placeholders " ) ;
2008-06-11 14:52:57 +04:00
}
Parameter : : ~ Parameter ( )
{
}
/** initialize a parameter, sub of constructor */
void Parameter : : AddParam ( const string & paramName , const string & description )
{
2011-02-24 16:14:42 +03:00
m_valid [ paramName ] = true ;
m_description [ paramName ] = description ;
2008-06-11 14:52:57 +04:00
}
/** initialize a parameter (including abbreviation), sub of constructor */
void Parameter : : AddParam ( const string & paramName , const string & abbrevName , const string & description )
{
2011-02-24 16:14:42 +03:00
m_valid [ paramName ] = true ;
m_valid [ abbrevName ] = true ;
m_abbreviation [ paramName ] = abbrevName ;
2013-05-29 21:16:15 +04:00
m_fullname [ abbrevName ] = paramName ;
2011-02-24 16:14:42 +03:00
m_description [ paramName ] = description ;
2008-06-11 14:52:57 +04:00
}
/** print descriptions of all parameters */
2011-02-24 16:14:42 +03:00
void Parameter : : Explain ( )
{
cerr < < " Usage: " < < endl ;
for ( PARAM_STRING : : const_iterator iterParam = m_description . begin ( ) ; iterParam ! = m_description . end ( ) ; iterParam + + ) {
const string paramName = iterParam - > first ;
const string paramDescription = iterParam - > second ;
cerr < < " \t - " < < paramName ;
PARAM_STRING : : const_iterator iterAbbr = m_abbreviation . find ( paramName ) ;
if ( iterAbbr ! = m_abbreviation . end ( ) )
cerr < < " ( " < < iterAbbr - > second < < " ) " ;
cerr < < " : " < < paramDescription < < endl ;
}
2008-06-11 14:52:57 +04:00
}
2011-02-24 16:14:42 +03:00
/** check whether an item on the command line is a switch or a value
2008-06-11 14:52:57 +04:00
* \ param token token on the command line to checked * */
2011-02-24 16:14:42 +03:00
bool Parameter : : isOption ( const char * token )
{
2008-06-11 14:52:57 +04:00
if ( ! token ) return false ;
std : : string tokenString ( token ) ;
size_t length = tokenString . size ( ) ;
if ( length > 0 & & tokenString . substr ( 0 , 1 ) ! = " - " ) return false ;
if ( length > 1 & & tokenString . substr ( 1 , 1 ) . find_first_not_of ( " 0123456789 " ) = = 0 ) return true ;
return false ;
}
/** load all parameters from the configuration file and the command line switches */
bool Parameter : : LoadParam ( const string & filePath )
{
2011-02-24 16:14:42 +03:00
const char * argv [ ] = { " executable " , " -f " , filePath . c_str ( ) } ;
return LoadParam ( 3 , ( char * * ) argv ) ;
2008-06-11 14:52:57 +04:00
}
2011-02-24 16:14:42 +03:00
2008-06-11 14:52:57 +04:00
/** load all parameters from the configuration file and the command line switches */
2011-02-24 16:14:42 +03:00
bool Parameter : : LoadParam ( int argc , char * argv [ ] )
2008-06-11 14:52:57 +04:00
{
2011-02-24 16:14:42 +03:00
// config file (-f) arg mandatory
string configPath ;
if ( ( configPath = FindParam ( " -f " , argc , argv ) ) = = " "
& & ( configPath = FindParam ( " -config " , argc , argv ) ) = = " " ) {
PrintCredit ( ) ;
Explain ( ) ;
2013-05-29 21:16:15 +04:00
cerr < < endl ;
2011-02-24 16:14:42 +03:00
UserMessage : : Add ( " No configuration file was specified. Use -config or -f " ) ;
2012-09-04 20:32:55 +04:00
cerr < < endl ;
2011-02-24 16:14:42 +03:00
return false ;
} else {
if ( ! ReadConfigFile ( configPath ) ) {
UserMessage : : Add ( " Could not read " + configPath ) ;
return false ;
}
}
// overwrite parameters with values from switches
2013-06-26 20:19:09 +04:00
for ( PARAM_STRING : : const_iterator iterParam = m_description . begin ( ) ;
2013-06-05 13:46:42 +04:00
iterParam ! = m_description . end ( ) ; iterParam + + ) {
2011-02-24 16:14:42 +03:00
const string paramName = iterParam - > first ;
OverwriteParam ( " - " + paramName , paramName , argc , argv ) ;
}
// ... also shortcuts
2013-06-26 20:19:09 +04:00
for ( PARAM_STRING : : const_iterator iterParam = m_abbreviation . begin ( ) ;
2013-06-05 13:46:42 +04:00
iterParam ! = m_abbreviation . end ( ) ; iterParam + + ) {
2011-02-24 16:14:42 +03:00
const string paramName = iterParam - > first ;
const string paramShortName = iterParam - > second ;
OverwriteParam ( " - " + paramShortName , paramName , argc , argv ) ;
}
// logging of parameters that were set in either config or switch
int verbose = 1 ;
if ( m_setting . find ( " verbose " ) ! = m_setting . end ( ) & &
m_setting [ " verbose " ] . size ( ) > 0 )
verbose = Scan < int > ( m_setting [ " verbose " ] [ 0 ] ) ;
if ( verbose > = 1 ) { // only if verbose
TRACE_ERR ( " Defined parameters (per moses.ini or switch): " < < endl ) ;
2013-06-26 20:19:09 +04:00
for ( PARAM_MAP : : const_iterator iterParam = m_setting . begin ( ) ;
iterParam ! = m_setting . end ( ) ; iterParam + + ) {
2011-02-24 16:14:42 +03:00
TRACE_ERR ( " \t " < < iterParam - > first < < " : " ) ;
for ( size_t i = 0 ; i < iterParam - > second . size ( ) ; i + + )
TRACE_ERR ( iterParam - > second [ i ] < < " " ) ;
TRACE_ERR ( endl ) ;
}
}
2012-12-05 21:12:01 +04:00
// convert old weights args to new format
2013-06-05 13:46:42 +04:00
// WHAT IS GOING ON HERE??? - UG
2013-06-26 20:19:09 +04:00
if ( ! isParamSpecified ( " feature " ) ) // UG
2013-02-14 01:00:51 +04:00
ConvertWeightArgs ( ) ;
2012-12-05 21:12:01 +04:00
CreateWeightsMap ( ) ;
WeightOverwrite ( ) ;
2011-02-24 16:14:42 +03:00
// check for illegal parameters
bool noErrorFlag = true ;
for ( int i = 0 ; i < argc ; i + + ) {
if ( isOption ( argv [ i ] ) ) {
string paramSwitch = ( string ) argv [ i ] ;
string paramName = paramSwitch . substr ( 1 ) ;
if ( m_valid . find ( paramName ) = = m_valid . end ( ) ) {
UserMessage : : Add ( " illegal switch: " + paramSwitch ) ;
noErrorFlag = false ;
}
}
}
2008-06-11 14:52:57 +04:00
2013-06-05 04:41:52 +04:00
//Save("/tmp/moses.ini.new");
2013-06-04 20:01:10 +04:00
2008-06-11 14:52:57 +04:00
// check if parameters make sense
2011-02-24 16:14:42 +03:00
return Validate ( ) & & noErrorFlag ;
2008-06-11 14:52:57 +04:00
}
2013-03-06 18:04:09 +04:00
std : : vector < float > & Parameter : : GetWeights ( const std : : string & name )
2012-12-11 22:57:42 +04:00
{
2013-03-06 18:04:09 +04:00
std : : vector < float > & ret = m_weights [ name ] ;
2013-02-07 00:05:00 +04:00
2013-06-05 13:46:42 +04:00
// cerr << "WEIGHT " << name << "=";
// for (size_t i = 0; i < ret.size(); ++i) {
// cerr << ret[i] << ",";
// }
// cerr << endl;
2013-02-07 00:05:00 +04:00
return ret ;
2012-12-11 22:57:42 +04:00
}
2012-12-12 22:30:11 +04:00
void Parameter : : SetWeight ( const std : : string & name , size_t ind , float weight )
2012-12-05 21:12:01 +04:00
{
PARAM_VEC & newWeights = m_setting [ " weight " ] ;
2012-12-12 22:30:11 +04:00
string line = name + SPrint ( ind ) + " = " + SPrint ( weight ) ;
2012-12-11 22:57:42 +04:00
newWeights . push_back ( line ) ;
2012-12-12 22:30:11 +04:00
}
void Parameter : : SetWeight ( const std : : string & name , size_t ind , const vector < float > & weights )
{
PARAM_VEC & newWeights = m_setting [ " weight " ] ;
string line = name + SPrint ( ind ) + " = " ;
for ( size_t i = 0 ; i < weights . size ( ) ; + + i ) {
line + = " " + SPrint ( weights [ i ] ) ;
}
newWeights . push_back ( line ) ;
}
2013-06-26 20:19:09 +04:00
void
2013-06-05 13:46:42 +04:00
Parameter : :
2013-06-26 20:19:09 +04:00
AddWeight ( const std : : string & name , size_t ind ,
const std : : vector < float > & weights )
2012-12-12 22:30:11 +04:00
{
PARAM_VEC & newWeights = m_setting [ " weight " ] ;
string sought = name + SPrint ( ind ) + " = " ;
for ( size_t i = 0 ; i < newWeights . size ( ) ; + + i ) {
string & line = newWeights [ i ] ;
if ( line . find ( sought ) = = 0 ) {
// found existing weight, most likely to be input weights. Append to this line
for ( size_t i = 0 ; i < weights . size ( ) ; + + i ) {
line + = " " + SPrint ( weights [ i ] ) ;
}
return ;
}
}
2012-12-11 22:57:42 +04:00
2012-12-12 22:30:11 +04:00
// nothing found. Just set
SetWeight ( name , ind , weights ) ;
2012-12-11 22:57:42 +04:00
}
2012-12-12 22:30:11 +04:00
void Parameter : : ConvertWeightArgsSingleWeight ( const string & oldWeightName , const string & newWeightName )
2012-12-11 22:57:42 +04:00
{
size_t ind = 0 ;
2012-12-05 21:12:01 +04:00
PARAM_MAP : : iterator iterMap ;
iterMap = m_setting . find ( oldWeightName ) ;
2013-05-29 21:16:15 +04:00
if ( iterMap ! = m_setting . end ( ) ) {
2012-12-05 21:12:01 +04:00
const PARAM_VEC & weights = iterMap - > second ;
2013-05-29 21:16:15 +04:00
for ( size_t i = 0 ; i < weights . size ( ) ; + + i ) {
2012-12-12 22:30:11 +04:00
SetWeight ( newWeightName , ind , Scan < float > ( weights [ i ] ) ) ;
2012-12-05 21:12:01 +04:00
}
2012-12-12 23:35:06 +04:00
2012-12-13 14:43:29 +04:00
m_setting . erase ( iterMap ) ;
}
2012-12-11 22:57:42 +04:00
}
2012-12-05 21:12:01 +04:00
2013-02-22 23:17:57 +04:00
void Parameter : : ConvertWeightArgsPhraseModel ( const string & oldWeightName )
2012-12-11 22:57:42 +04:00
{
2012-12-12 23:35:06 +04:00
// process input weights 1st
if ( isParamSpecified ( " weight-i " ) ) {
vector < float > inputWeights = Scan < float > ( m_setting [ " weight-i " ] ) ;
2012-12-14 16:32:45 +04:00
PARAM_VEC & numInputScores = m_setting [ " input-scores " ] ;
if ( inputWeights . size ( ) = = 1 ) {
CHECK ( numInputScores . size ( ) = = 0 ) ;
numInputScores . push_back ( " 1 " ) ;
numInputScores . push_back ( " 0 " ) ;
2013-05-29 21:16:15 +04:00
} else if ( inputWeights . size ( ) = = 2 ) {
2012-12-14 16:32:45 +04:00
CHECK ( numInputScores . size ( ) = = 0 ) ;
numInputScores . push_back ( " 1 " ) ;
numInputScores . push_back ( " 1 " ) ;
}
2013-05-14 15:19:55 +04:00
SetWeight ( " PhraseDictionaryBinary " , 0 , inputWeights ) ;
2012-12-12 23:35:06 +04:00
}
2013-02-22 23:17:57 +04:00
// convert actually pt feature
2013-02-12 21:16:34 +04:00
VERBOSE ( 2 , " Creating phrase table features " < < endl ) ;
2013-02-13 00:27:14 +04:00
size_t numInputScores = 0 ;
size_t numRealWordsInInput = 0 ;
2013-02-25 19:39:34 +04:00
map < string , size_t > ptIndices ;
2013-02-12 21:16:34 +04:00
if ( GetParam ( " input-scores " ) . size ( ) ) {
2013-02-13 00:27:14 +04:00
numInputScores = Scan < size_t > ( GetParam ( " input-scores " ) [ 0 ] ) ;
2013-02-12 21:16:34 +04:00
}
if ( GetParam ( " input-scores " ) . size ( ) > 1 ) {
2013-02-13 00:27:14 +04:00
numRealWordsInInput = Scan < size_t > ( GetParam ( " input-scores " ) [ 1 ] ) ;
2013-02-12 21:16:34 +04:00
}
// load phrase translation tables
if ( GetParam ( " ttable-file " ) . size ( ) > 0 ) {
// weights
const vector < string > & translationVector = GetParam ( " ttable-file " ) ;
vector < size_t > maxTargetPhrase = Scan < size_t > ( GetParam ( " ttable-limit " ) ) ;
if ( maxTargetPhrase . size ( ) = = 1 & & translationVector . size ( ) > 1 ) {
VERBOSE ( 1 , " Using uniform ttable-limit of " < < maxTargetPhrase [ 0 ] < < " for all translation tables. " < < endl ) ;
for ( size_t i = 1 ; i < translationVector . size ( ) ; i + + )
maxTargetPhrase . push_back ( maxTargetPhrase [ 0 ] ) ;
} else if ( maxTargetPhrase . size ( ) ! = 1 & & maxTargetPhrase . size ( ) < translationVector . size ( ) ) {
stringstream strme ;
strme < < " You specified " < < translationVector . size ( ) < < " translation tables, but only " < < maxTargetPhrase . size ( ) < < " ttable-limits. " ;
UserMessage : : Add ( strme . str ( ) ) ;
return ;
}
// MAIN LOOP
2013-02-24 19:38:29 +04:00
const PARAM_VEC & oldWeights = m_setting [ oldWeightName ] ;
size_t currOldInd = 0 ;
2013-02-12 21:16:34 +04:00
for ( size_t currDict = 0 ; currDict < translationVector . size ( ) ; currDict + + ) {
stringstream ptLine ;
vector < string > token = Tokenize ( translationVector [ currDict ] ) ;
if ( currDict = = 0 & & token . size ( ) = = 4 ) {
UserMessage : : Add ( " Phrase table specification in old 4-field format. No longer supported " ) ;
return ;
}
CHECK ( token . size ( ) > = 5 ) ;
PhraseTableImplementation implementation = ( PhraseTableImplementation ) Scan < int > ( token [ 0 ] ) ;
2013-02-22 23:17:57 +04:00
2013-02-24 19:38:29 +04:00
string ptType ;
2013-05-29 21:16:15 +04:00
switch ( implementation ) {
2013-02-22 23:17:57 +04:00
case Memory :
2013-04-26 18:55:32 +04:00
ptType = " PhraseDictionaryMemory " ;
2013-02-22 23:17:57 +04:00
break ;
2013-02-25 15:49:34 +04:00
case Binary :
2013-05-14 15:19:55 +04:00
ptType = " PhraseDictionaryBinary " ;
2013-02-25 15:49:34 +04:00
break ;
2013-02-25 18:19:48 +04:00
case OnDisk :
ptType = " PhraseDictionaryOnDisk " ;
break ;
2013-02-25 18:50:33 +04:00
case SCFG :
2013-04-26 20:49:11 +04:00
ptType = " PhraseDictionaryMemory " ;
2013-02-25 18:50:33 +04:00
break ;
2013-02-25 22:52:58 +04:00
case Compact :
ptType = " PhraseDictionaryCompact " ;
break ;
2013-06-05 13:46:42 +04:00
case SuffixArray :
ptType = " PhraseDictionarySuffixArray " ;
break ;
case DSuffixArray :
ptType = " PhraseDictionaryDynSuffixArray " ;
break ;
2013-02-25 22:52:58 +04:00
default :
break ;
2013-02-22 23:17:57 +04:00
}
2013-02-25 19:39:34 +04:00
size_t ptInd ;
if ( ptIndices . find ( ptType ) = = ptIndices . end ( ) ) {
ptIndices [ ptType ] = 0 ;
ptInd = 0 ;
2013-05-29 21:16:15 +04:00
} else {
2013-02-25 19:39:34 +04:00
ptInd = + + ptIndices [ ptType ] ;
2013-02-22 23:17:57 +04:00
}
2013-02-24 19:38:29 +04:00
// weights
size_t numFFInd = ( token . size ( ) = = 4 ) ? 2 : 3 ;
size_t numFF = Scan < size_t > ( token [ numFFInd ] ) ;
vector < float > weights ( numFF ) ;
for ( size_t currFF = 0 ; currFF < numFF ; + + currFF ) {
CHECK ( currOldInd < oldWeights . size ( ) ) ;
float weight = Scan < float > ( oldWeights [ currOldInd ] ) ;
weights [ currFF ] = weight ;
+ + currOldInd ;
}
2013-06-05 13:46:42 +04:00
2013-06-26 20:19:09 +04:00
// cerr << weights.size() << " PHRASE TABLE WEIGHTS "
2013-06-05 13:46:42 +04:00
// << __FILE__ << ":" << __LINE__ << endl;
2013-02-25 19:39:34 +04:00
AddWeight ( ptType , ptInd , weights ) ;
2013-02-24 19:38:29 +04:00
// actual pt
ptLine < < ptType < < " " ;
2013-02-12 21:16:34 +04:00
ptLine < < " input-factor= " < < token [ 1 ] < < " " ;
ptLine < < " output-factor= " < < token [ 2 ] < < " " ;
ptLine < < " path= " < < token [ 4 ] < < " " ;
//characteristics of the phrase table
vector < FactorType > input = Tokenize < FactorType > ( token [ 1 ] , " , " )
2013-05-29 21:16:15 +04:00
, output = Tokenize < FactorType > ( token [ 2 ] , " , " ) ;
2013-02-12 21:16:34 +04:00
size_t numScoreComponent = Scan < size_t > ( token [ 3 ] ) ;
string filePath = token [ 4 ] ;
if ( currDict = = 0 ) {
// only the 1st pt. THis is shit
// TODO. find what the assumptions made by confusion network about phrase table output which makes
// it only work with binary file. This is a hack
2013-02-13 00:27:14 +04:00
numScoreComponent + = numInputScores + numRealWordsInInput ;
2013-02-12 21:16:34 +04:00
}
ptLine < < " num-features= " < < numScoreComponent < < " " ;
ptLine < < " table-limit= " < < maxTargetPhrase [ currDict ] < < " " ;
2013-06-05 13:46:42 +04:00
if ( implementation = = SuffixArray | | implementation = = DSuffixArray ) {
2013-02-12 21:16:34 +04:00
ptLine < < " target-path= " < < token [ 5 ] < < " " ;
ptLine < < " alignment-path= " < < token [ 6 ] < < " " ;
}
AddFeature ( ptLine . str ( ) ) ;
} // for(size_t currDict = 0 ; currDict < translationVector.size(); currDict++) {
2013-02-13 01:30:19 +04:00
} // if (GetParam("ttable-file").size() > 0) {
2013-02-24 19:38:29 +04:00
m_setting . erase ( " weight-i " ) ;
m_setting . erase ( oldWeightName ) ;
2013-02-13 01:30:19 +04:00
m_setting . erase ( " ttable-file " ) ;
m_setting . erase ( " ttable-limit " ) ;
2012-12-12 22:30:11 +04:00
}
2013-01-15 22:32:13 +04:00
void Parameter : : AddFeature ( const std : : string & line )
{
PARAM_VEC & features = m_setting [ " feature " ] ;
features . push_back ( line ) ;
}
2012-12-12 22:30:11 +04:00
void Parameter : : ConvertWeightArgsDistortion ( )
{
const string oldWeightName = " weight-d " ;
2013-01-17 21:15:10 +04:00
const string oldLexReordingName = " distortion-file " ;
2012-12-12 22:30:11 +04:00
// distortion / lex distortion
2013-01-15 22:32:13 +04:00
const PARAM_VEC & oldWeights = GetParam ( oldWeightName ) ;
2012-12-12 22:30:11 +04:00
2013-05-29 21:16:15 +04:00
if ( oldWeights . size ( ) > 0 ) {
2013-02-04 19:41:25 +04:00
if ( ! isParamSpecified ( " search-algorithm " ) | |
2013-05-29 21:16:15 +04:00
( GetParam ( " search-algorithm " ) . size ( ) > 0
& & ( Trim ( GetParam ( " search-algorithm " ) [ 0 ] ) = = " 0 "
2013-02-04 19:41:25 +04:00
| | Trim ( GetParam ( " search-algorithm " ) [ 0 ] ) = = " 1 "
2013-05-29 21:16:15 +04:00
)
)
2013-02-04 19:41:25 +04:00
) {
// phrase-based. Add distance distortion to list of features
AddFeature ( " Distortion " ) ;
SetWeight ( " Distortion " , 0 , Scan < float > ( oldWeights [ 0 ] ) ) ;
}
2012-12-12 22:30:11 +04:00
// everything but the last is lex reordering model
size_t currOldInd = 1 ;
2013-01-17 21:15:10 +04:00
const PARAM_VEC & lextable = GetParam ( oldLexReordingName ) ;
2012-12-12 22:30:11 +04:00
for ( size_t indTable = 0 ; indTable < lextable . size ( ) ; + + indTable ) {
2013-01-15 22:32:13 +04:00
const string & line = lextable [ indTable ] ;
2012-12-12 22:30:11 +04:00
vector < string > toks = Tokenize ( line ) ;
size_t numFF = Scan < size_t > ( toks [ 2 ] ) ;
vector < float > weights ( numFF ) ;
2013-05-29 21:16:15 +04:00
for ( size_t currFF = 0 ; currFF < numFF ; + + currFF ) {
2012-12-12 22:30:11 +04:00
CHECK ( currOldInd < oldWeights . size ( ) ) ;
float weight = Scan < float > ( oldWeights [ currOldInd ] ) ;
weights [ currFF ] = weight ;
+ + currOldInd ;
}
SetWeight ( " LexicalReordering " , indTable , weights ) ;
2013-01-15 22:32:13 +04:00
stringstream strme ;
strme < < " LexicalReordering "
2013-02-03 22:16:42 +04:00
< < " type= " < < toks [ 1 ] < < " " ;
2013-01-15 22:32:13 +04:00
vector < FactorType > factors = Tokenize < FactorType > ( toks [ 0 ] , " - " ) ;
CHECK ( factors . size ( ) = = 2 ) ;
2013-02-03 22:16:42 +04:00
strme < < " input-factor= " < < factors [ 0 ]
< < " output-factor= " < < factors [ 1 ] < < " " ;
2013-01-15 22:32:13 +04:00
2013-02-03 22:16:42 +04:00
strme < < " num-features= " < < toks [ 2 ] < < " " ;
strme < < " path= " < < toks [ 3 ] ;
2013-01-15 22:32:13 +04:00
AddFeature ( strme . str ( ) ) ;
2012-12-12 22:30:11 +04:00
}
2012-12-12 23:35:06 +04:00
}
2012-12-12 22:30:11 +04:00
2012-12-12 23:35:06 +04:00
m_setting . erase ( oldWeightName ) ;
2013-01-17 21:15:10 +04:00
m_setting . erase ( oldLexReordingName ) ;
2012-12-12 23:35:06 +04:00
}
2013-04-26 22:39:29 +04:00
void Parameter : : ConvertWeightArgsLM ( )
2012-12-12 23:35:06 +04:00
{
2013-04-26 22:39:29 +04:00
const string oldWeightName = " weight-l " ;
const string oldFeatureName = " lmodel-file " ;
bool isChartDecoding = true ;
if ( ! isParamSpecified ( " search-algorithm " ) | |
2013-05-29 21:16:15 +04:00
( GetParam ( " search-algorithm " ) . size ( ) > 0
& & ( Trim ( GetParam ( " search-algorithm " ) [ 0 ] ) = = " 0 "
| | Trim ( GetParam ( " search-algorithm " ) [ 0 ] ) = = " 1 "
)
)
) {
2013-04-26 22:39:29 +04:00
isChartDecoding = false ;
}
2013-01-18 21:57:26 +04:00
2012-12-12 23:35:06 +04:00
vector < int > oovWeights ;
if ( isParamSpecified ( " lmodel-oov-feature " ) ) {
oovWeights = Scan < int > ( m_setting [ " lmodel-oov-feature " ] ) ;
}
PARAM_MAP : : iterator iterMap ;
iterMap = m_setting . find ( oldWeightName ) ;
2013-05-29 21:16:15 +04:00
if ( iterMap ! = m_setting . end ( ) ) {
2012-12-12 23:35:06 +04:00
size_t currOldInd = 0 ;
const PARAM_VEC & weights = iterMap - > second ;
2013-01-18 21:57:26 +04:00
const PARAM_VEC & models = m_setting [ oldFeatureName ] ;
2013-01-17 21:15:10 +04:00
for ( size_t lmIndex = 0 ; lmIndex < models . size ( ) ; + + lmIndex ) {
const string & line = models [ lmIndex ] ;
vector < string > modelToks = Tokenize ( line ) ;
2013-02-22 15:55:28 +04:00
int lmType = Scan < int > ( modelToks [ 0 ] ) ;
2013-01-17 21:15:10 +04:00
2013-01-18 21:57:26 +04:00
string newFeatureName ;
2013-05-29 21:16:15 +04:00
switch ( lmType ) {
2013-02-22 15:55:28 +04:00
case 0 :
2013-01-18 21:57:26 +04:00
newFeatureName = " SRILM " ;
2013-01-17 21:15:10 +04:00
break ;
2013-02-22 15:55:28 +04:00
case 1 :
2013-01-18 21:57:26 +04:00
newFeatureName = " IRSTLM " ;
2013-01-17 21:15:10 +04:00
break ;
2013-02-22 15:55:28 +04:00
case 8 :
case 9 :
2013-01-18 21:57:26 +04:00
newFeatureName = " KENLM " ;
2013-01-17 21:15:10 +04:00
break ;
default :
abort ( ) ;
}
2012-12-12 23:35:06 +04:00
size_t numFF = 1 ;
2013-01-17 21:15:10 +04:00
if ( oovWeights . size ( ) > lmIndex )
numFF + = oovWeights [ lmIndex ] ;
2012-12-12 23:35:06 +04:00
vector < float > weightsLM ( numFF ) ;
2013-05-29 21:16:15 +04:00
for ( size_t currFF = 0 ; currFF < numFF ; + + currFF ) {
2012-12-12 23:35:06 +04:00
CHECK ( currOldInd < weights . size ( ) ) ;
weightsLM [ currFF ] = Scan < float > ( weights [ currOldInd ] ) ;
2013-04-26 22:39:29 +04:00
if ( isChartDecoding ) {
2013-05-29 21:16:15 +04:00
weightsLM [ currFF ] = UntransformLMScore ( weightsLM [ currFF ] ) ;
2013-04-26 22:39:29 +04:00
}
2012-12-12 23:35:06 +04:00
+ + currOldInd ;
}
2013-01-17 21:15:10 +04:00
2013-06-08 00:08:05 +04:00
SetWeight ( newFeatureName , lmIndex , weightsLM ) ;
2013-01-17 21:15:10 +04:00
2013-01-18 21:57:26 +04:00
string featureLine = newFeatureName + " "
2013-06-05 16:42:56 +04:00
+ " factor= " + modelToks [ 1 ] + " " // factor
+ " order= " + modelToks [ 2 ] + " " // order
+ " num-features= " + SPrint ( numFF ) + " " ;
2013-02-22 15:55:28 +04:00
if ( lmType = = 9 ) {
2013-02-05 03:10:12 +04:00
featureLine + = " lazyken=1 " ;
2013-05-29 21:16:15 +04:00
} else if ( lmType = = 8 ) {
2013-02-05 03:10:12 +04:00
featureLine + = " lazyken=0 " ;
2013-01-17 21:15:10 +04:00
}
2013-01-17 21:58:44 +04:00
2013-02-05 03:10:12 +04:00
featureLine + = " path= " + modelToks [ 3 ] ; // file
2013-01-17 21:15:10 +04:00
AddFeature ( featureLine ) ;
} // for (size_t lmIndex = 0; lmIndex < models.size(); ++lmIndex) {
2012-12-12 23:35:06 +04:00
m_setting . erase ( iterMap ) ;
}
2013-01-18 21:57:26 +04:00
m_setting . erase ( oldFeatureName ) ;
2012-12-12 23:35:06 +04:00
}
void Parameter : : ConvertWeightArgsGeneration ( const std : : string & oldWeightName , const std : : string & newWeightName )
{
2013-01-18 21:57:26 +04:00
string oldFeatureName = " generation-file " ;
2012-12-12 23:35:06 +04:00
// distortion / lex distortion
PARAM_VEC & oldWeights = m_setting [ oldWeightName ] ;
2013-05-29 21:16:15 +04:00
if ( oldWeights . size ( ) > 0 ) {
2012-12-12 23:35:06 +04:00
size_t currOldInd = 0 ;
2013-01-18 21:57:26 +04:00
PARAM_VEC & models = m_setting [ oldFeatureName ] ;
2012-12-12 23:35:06 +04:00
for ( size_t indTable = 0 ; indTable < models . size ( ) ; + + indTable ) {
string & line = models [ indTable ] ;
2013-01-18 21:57:26 +04:00
vector < string > modelToks = Tokenize ( line ) ;
2012-12-12 23:35:06 +04:00
2013-01-18 21:57:26 +04:00
size_t numFF = Scan < size_t > ( modelToks [ 2 ] ) ;
2012-12-12 23:35:06 +04:00
vector < float > weights ( numFF ) ;
2013-05-29 21:16:15 +04:00
for ( size_t currFF = 0 ; currFF < numFF ; + + currFF ) {
2012-12-12 23:35:06 +04:00
CHECK ( currOldInd < oldWeights . size ( ) ) ;
float weight = Scan < float > ( oldWeights [ currOldInd ] ) ;
weights [ currFF ] = weight ;
+ + currOldInd ;
}
SetWeight ( newWeightName , indTable , weights ) ;
2013-02-05 03:35:29 +04:00
stringstream strme ;
strme < < " Generation "
2013-05-29 21:16:15 +04:00
< < " input-factor= " < < modelToks [ 0 ] < < " "
< < " output-factor= " < < modelToks [ 1 ] < < " "
< < " num-features= " < < modelToks [ 2 ] < < " "
< < " path= " < < modelToks [ 3 ] ;
2013-02-05 03:35:29 +04:00
AddFeature ( strme . str ( ) ) ;
2012-12-12 23:35:06 +04:00
}
2012-12-11 22:57:42 +04:00
}
2012-12-12 22:30:11 +04:00
2012-12-12 23:35:06 +04:00
m_setting . erase ( oldWeightName ) ;
2013-01-18 21:57:26 +04:00
m_setting . erase ( oldFeatureName ) ;
2012-12-05 21:12:01 +04:00
}
2013-04-24 22:23:14 +04:00
void Parameter : : ConvertWeightArgsWordPenalty ( )
{
const std : : string oldWeightName = " weight-w " ;
const std : : string newWeightName = " WordPenalty " ;
bool isChartDecoding = true ;
if ( ! isParamSpecified ( " search-algorithm " ) | |
2013-05-29 21:16:15 +04:00
( GetParam ( " search-algorithm " ) . size ( ) > 0
& & ( Trim ( GetParam ( " search-algorithm " ) [ 0 ] ) = = " 0 "
| | Trim ( GetParam ( " search-algorithm " ) [ 0 ] ) = = " 1 "
)
)
) {
2013-04-24 22:23:14 +04:00
isChartDecoding = false ;
}
PARAM_MAP : : iterator iterMap ;
iterMap = m_setting . find ( oldWeightName ) ;
2013-05-29 21:16:15 +04:00
if ( iterMap ! = m_setting . end ( ) ) {
2013-04-24 22:23:14 +04:00
const PARAM_VEC & weights = iterMap - > second ;
2013-05-29 21:16:15 +04:00
for ( size_t i = 0 ; i < weights . size ( ) ; + + i ) {
2013-04-24 22:23:14 +04:00
float weight = Scan < float > ( weights [ i ] ) ;
if ( isChartDecoding ) {
weight * = 0.434294482 ;
}
SetWeight ( newWeightName , i , weight ) ;
}
m_setting . erase ( iterMap ) ;
}
}
2012-12-05 21:12:01 +04:00
void Parameter : : ConvertWeightArgs ( )
{
2013-01-15 18:10:49 +04:00
// can't handle discr LM. must do it manually 'cos of bigram/n-gram split
CHECK ( m_setting . count ( " weight-dlm " ) = = 0 ) ;
2012-12-05 21:12:01 +04:00
// check that old & new format aren't mixed
if ( m_setting . count ( " weight " ) & &
( m_setting . count ( " weight-i " ) | | m_setting . count ( " weight-t " ) | | m_setting . count ( " weight-w " ) | |
m_setting . count ( " weight-l " ) | | m_setting . count ( " weight-u " ) | | m_setting . count ( " weight-lex " ) | |
m_setting . count ( " weight-generation " ) | | m_setting . count ( " weight-lr " ) | | m_setting . count ( " weight-d " )
2013-05-29 21:16:15 +04:00
) ) {
2012-12-06 00:21:33 +04:00
cerr < < " Do not mix old and new format for specify weights " ;
2012-12-05 21:12:01 +04:00
}
2013-04-24 22:23:14 +04:00
ConvertWeightArgsWordPenalty ( ) ;
2013-04-26 22:39:29 +04:00
ConvertWeightArgsLM ( ) ;
2012-12-12 22:30:11 +04:00
ConvertWeightArgsSingleWeight ( " weight-slm " , " SyntacticLM " ) ;
ConvertWeightArgsSingleWeight ( " weight-u " , " UnknownWordPenalty " ) ;
2012-12-12 23:35:06 +04:00
ConvertWeightArgsGeneration ( " weight-generation " , " Generation " ) ;
2012-12-14 16:32:45 +04:00
ConvertWeightArgsDistortion ( ) ;
2012-12-11 22:57:42 +04:00
// don't know or can't be bothered converting these weights
2012-12-12 22:30:11 +04:00
ConvertWeightArgsSingleWeight ( " weight-lr " , " LexicalReordering " ) ;
ConvertWeightArgsSingleWeight ( " weight-bl " , " BleuScoreFeature " ) ;
ConvertWeightArgsSingleWeight ( " weight-glm " , " GlobalLexicalModel " ) ;
ConvertWeightArgsSingleWeight ( " weight-wt " , " WordTranslationFeature " ) ;
ConvertWeightArgsSingleWeight ( " weight-pp " , " PhrasePairFeature " ) ;
ConvertWeightArgsSingleWeight ( " weight-pb " , " PhraseBoundaryFeature " ) ;
ConvertWeightArgsSingleWeight ( " weight-e " , " WordDeletion " ) ; // TODO Can't find real name
ConvertWeightArgsSingleWeight ( " weight-lex " , " GlobalLexicalReordering " ) ; // TODO Can't find real name
2013-02-04 20:04:17 +04:00
AddFeature ( " WordPenalty " ) ;
2013-02-04 20:50:26 +04:00
AddFeature ( " UnknownWordPenalty " ) ;
2013-02-12 21:16:34 +04:00
2013-02-22 23:17:57 +04:00
ConvertWeightArgsPhraseModel ( " weight-t " ) ;
2013-02-12 21:16:34 +04:00
2012-12-05 21:12:01 +04:00
}
void Parameter : : CreateWeightsMap ( )
{
PARAM_VEC & vec = m_setting [ " weight " ] ;
2013-05-29 21:16:15 +04:00
for ( size_t i = 0 ; i < vec . size ( ) ; + + i ) {
2012-12-05 21:12:01 +04:00
const string & line = vec [ i ] ;
vector < string > toks = Tokenize ( line ) ;
2012-12-12 22:30:11 +04:00
CHECK ( toks . size ( ) > = 2 ) ;
string name = toks [ 0 ] ;
name = name . substr ( 0 , name . size ( ) - 1 ) ;
2012-12-05 21:12:01 +04:00
2012-12-12 22:30:11 +04:00
vector < float > weights ( toks . size ( ) - 1 ) ;
for ( size_t i = 1 ; i < toks . size ( ) ; + + i ) {
float weight = Scan < float > ( toks [ i ] ) ;
weights [ i - 1 ] = weight ;
}
m_weights [ name ] = weights ;
2012-12-05 21:12:01 +04:00
}
}
void Parameter : : WeightOverwrite ( )
{
PARAM_VEC & vec = m_setting [ " weight-overwrite " ] ;
2012-12-13 16:01:39 +04:00
if ( vec . size ( ) = = 0 )
return ;
// should only be 1 line
CHECK ( vec . size ( ) = = 1 ) ;
string name ( " " ) ;
vector < float > weights ;
vector < string > toks = Tokenize ( vec [ 0 ] ) ;
2013-05-29 21:16:15 +04:00
for ( size_t i = 0 ; i < toks . size ( ) ; + + i ) {
2012-12-13 16:01:39 +04:00
const string & tok = toks [ i ] ;
2012-12-05 21:12:01 +04:00
2012-12-13 16:01:39 +04:00
if ( tok . substr ( tok . size ( ) - 1 , 1 ) = = " = " ) {
// start of new feature
2012-12-05 21:12:01 +04:00
2012-12-13 16:01:39 +04:00
if ( name ! = " " ) {
// save previous ff
m_weights [ name ] = weights ;
weights . clear ( ) ;
}
2012-12-05 21:12:01 +04:00
2012-12-13 16:01:39 +04:00
name = tok . substr ( 0 , tok . size ( ) - 1 ) ;
2013-05-29 21:16:15 +04:00
} else {
2012-12-13 16:01:39 +04:00
// a weight for curr ff
2012-12-14 21:40:31 +04:00
float weight = Scan < float > ( toks [ i ] ) ;
2012-12-13 16:01:39 +04:00
weights . push_back ( weight ) ;
}
2012-12-05 21:12:01 +04:00
}
2012-12-13 16:01:39 +04:00
m_weights [ name ] = weights ;
2012-12-05 21:12:01 +04:00
}
2008-06-11 14:52:57 +04:00
/** check that parameter settings make sense */
2011-02-24 16:14:42 +03:00
bool Parameter : : Validate ( )
2008-06-11 14:52:57 +04:00
{
2011-02-24 16:14:42 +03:00
bool noErrorFlag = true ;
2008-06-11 14:52:57 +04:00
2011-06-11 08:59:55 +04:00
PARAM_MAP : : const_iterator iterParams ;
for ( iterParams = m_setting . begin ( ) ; iterParams ! = m_setting . end ( ) ; + + iterParams ) {
const std : : string & key = iterParams - > first ;
2013-05-29 21:16:15 +04:00
if ( m_valid . find ( key ) = = m_valid . end ( ) ) {
2011-06-11 08:59:55 +04:00
UserMessage : : Add ( " Unknown parameter " + key ) ;
noErrorFlag = false ;
}
}
2013-05-29 21:16:15 +04:00
2011-02-24 16:14:42 +03:00
if ( m_setting [ " lmodel-dub " ] . size ( ) > 0 ) {
if ( m_setting [ " lmodel-file " ] . size ( ) ! = m_setting [ " lmodel-dub " ] . size ( ) ) {
stringstream errorMsg ( " " ) ;
errorMsg < < " Config and parameters specify "
< < static_cast < int > ( m_setting [ " lmodel-file " ] . size ( ) )
< < " language model files (lmodel-file), but "
< < static_cast < int > ( m_setting [ " lmodel-dub " ] . size ( ) )
< < " LM upperbounds (lmodel-dub) "
< < endl ;
UserMessage : : Add ( errorMsg . str ( ) ) ;
noErrorFlag = false ;
}
}
2012-12-11 22:57:42 +04:00
/*
2012-12-05 21:12:01 +04:00
const vector < float > & lmWeights = GetWeights ( " LM " ) ;
2011-09-12 22:04:56 +04:00
if ( m_setting [ " lmodel-file " ] . size ( ) * ( m_setting . find ( " lmodel-oov-feature " ) ! = m_setting . end ( ) ? 2 : 1 )
2012-12-05 21:12:01 +04:00
! = lmWeights . size ( ) ) {
2011-02-24 16:14:42 +03:00
stringstream errorMsg ( " " ) ;
errorMsg < < " Config and parameters specify "
< < static_cast < int > ( m_setting [ " lmodel-file " ] . size ( ) )
< < " language model files (lmodel-file), but "
2012-12-05 21:12:01 +04:00
< < static_cast < int > ( lmWeights . size ( ) )
2011-02-24 16:14:42 +03:00
< < " weights (weight-l) " ;
2008-06-11 14:52:57 +04:00
errorMsg < < endl < < " You might be giving '-lmodel-file TYPE FACTOR ORDER FILENAME' but you should be giving these four as a single argument, i.e. '-lmodel-file \" TYPE FACTOR ORDER FILENAME \" ' " ;
2011-09-09 22:03:00 +04:00
errorMsg < < endl < < " You should also remember that each language model requires 2 weights, if and only if lmodel-oov-feature is on. " ;
2011-02-24 16:14:42 +03:00
UserMessage : : Add ( errorMsg . str ( ) ) ;
noErrorFlag = false ;
}
2012-12-11 22:57:42 +04:00
*/
2008-06-11 14:52:57 +04:00
// do files exist?
2010-06-07 21:19:51 +04:00
2011-02-24 16:14:42 +03:00
// input file
if ( noErrorFlag & & m_setting [ " input-file " ] . size ( ) = = 1 ) {
noErrorFlag = FileExists ( m_setting [ " input-file " ] [ 0 ] ) ;
2012-11-20 16:11:01 +04:00
if ( ! noErrorFlag ) {
stringstream errorMsg ( " " ) ;
errorMsg < < endl < < " Input file " < < m_setting [ " input-file " ] [ 0 ] < < " does not exist " ;
UserMessage : : Add ( errorMsg . str ( ) ) ;
}
2011-02-24 16:14:42 +03:00
}
// generation tables
if ( noErrorFlag ) {
std : : vector < std : : string > ext ;
//raw tables in either un compressed or compressed form
ext . push_back ( " " ) ;
ext . push_back ( " .gz " ) ;
noErrorFlag = FilesExist ( " generation-file " , 3 , ext ) ;
}
// distortion
if ( noErrorFlag ) {
std : : vector < std : : string > ext ;
//raw tables in either un compressed or compressed form
ext . push_back ( " " ) ;
ext . push_back ( " .gz " ) ;
//prefix tree format
ext . push_back ( " .binlexr.idx " ) ;
2012-08-10 15:22:09 +04:00
//prefix tree format
ext . push_back ( " .minlexr " ) ;
2011-02-24 16:14:42 +03:00
noErrorFlag = FilesExist ( " distortion-file " , 3 , ext ) ;
}
return noErrorFlag ;
2008-06-11 14:52:57 +04:00
}
/** check whether a file exists */
2010-05-13 18:12:05 +04:00
bool Parameter : : FilesExist ( const string & paramName , int fieldNo , std : : vector < std : : string > const & extensions )
2008-06-11 14:52:57 +04:00
{
2011-02-24 16:14:42 +03:00
typedef std : : vector < std : : string > StringVec ;
StringVec : : const_iterator iter ;
PARAM_MAP : : const_iterator iterParam = m_setting . find ( paramName ) ;
if ( iterParam = = m_setting . end ( ) ) {
// no param. therefore nothing to check
return true ;
}
const StringVec & pathVec = ( * iterParam ) . second ;
for ( iter = pathVec . begin ( ) ; iter ! = pathVec . end ( ) ; + + iter ) {
StringVec vec = Tokenize ( * iter ) ;
size_t tokenizeIndex ;
if ( fieldNo = = - 1 )
tokenizeIndex = vec . size ( ) - 1 ;
else
tokenizeIndex = static_cast < size_t > ( fieldNo ) ;
if ( tokenizeIndex > = vec . size ( ) ) {
stringstream errorMsg ( " " ) ;
errorMsg < < " Expected at least " < < ( tokenizeIndex + 1 ) < < " tokens per entry in ' "
< < paramName < < " ', but only found "
< < vec . size ( ) ;
UserMessage : : Add ( errorMsg . str ( ) ) ;
return false ;
}
const string & pathStr = vec [ tokenizeIndex ] ;
bool fileFound = 0 ;
for ( size_t i = 0 ; i < extensions . size ( ) & & ! fileFound ; + + i ) {
fileFound | = FileExists ( pathStr + extensions [ i ] ) ;
}
if ( ! fileFound ) {
stringstream errorMsg ( " " ) ;
errorMsg < < " File " < < pathStr < < " does not exist " ;
UserMessage : : Add ( errorMsg . str ( ) ) ;
return false ;
}
}
return true ;
2008-06-11 14:52:57 +04:00
}
/** look for a switch in arg, update parameter */
// TODO arg parsing like this does not belong in the library, it belongs
// in moses-cmd
string Parameter : : FindParam ( const string & paramSwitch , int argc , char * argv [ ] )
{
2011-02-24 16:14:42 +03:00
for ( int i = 0 ; i < argc ; i + + ) {
if ( string ( argv [ i ] ) = = paramSwitch ) {
if ( i + 1 < argc ) {
return argv [ i + 1 ] ;
} else {
stringstream errorMsg ( " " ) ;
errorMsg < < " Option " < < paramSwitch < < " requires a parameter! " ;
UserMessage : : Add ( errorMsg . str ( ) ) ;
// TODO return some sort of error, not the empty string
}
}
}
return " " ;
2008-06-11 14:52:57 +04:00
}
/** update parameter settings with command line switches
* \ param paramSwitch ( potentially short ) name of switch
* \ param paramName full name of parameter
* \ param argc number of arguments on command line
* \ param argv values of paramters on command line */
void Parameter : : OverwriteParam ( const string & paramSwitch , const string & paramName , int argc , char * argv [ ] )
{
2011-02-24 16:14:42 +03:00
int startPos = - 1 ;
for ( int i = 0 ; i < argc ; i + + ) {
if ( string ( argv [ i ] ) = = paramSwitch ) {
startPos = i + 1 ;
break ;
}
}
if ( startPos < 0 )
return ;
int index = 0 ;
m_setting [ paramName ] ; // defines the parameter, important for boolean switches
while ( startPos < argc & & ( ! isOption ( argv [ startPos ] ) ) ) {
if ( m_setting [ paramName ] . size ( ) > ( size_t ) index )
m_setting [ paramName ] [ index ] = argv [ startPos ] ;
else
m_setting [ paramName ] . push_back ( argv [ startPos ] ) ;
index + + ;
startPos + + ;
}
2008-06-11 14:52:57 +04:00
}
/** read parameters from a configuration file */
2011-02-24 16:14:42 +03:00
bool Parameter : : ReadConfigFile ( const string & filePath )
2008-06-11 14:52:57 +04:00
{
2011-02-24 16:14:42 +03:00
InputFileStream inFile ( filePath ) ;
string line , paramName ;
while ( getline ( inFile , line ) ) {
// comments
size_t comPos = line . find_first_of ( " # " ) ;
if ( comPos ! = string : : npos )
line = line . substr ( 0 , comPos ) ;
// trim leading and trailing spaces/tabs
line = Trim ( line ) ;
2011-10-06 13:16:21 +04:00
if ( line . size ( ) = = 0 ) {
2011-10-06 19:06:59 +04:00
// blank line. do nothing.
2013-05-29 21:16:15 +04:00
} else if ( line [ 0 ] = = ' [ ' ) {
2011-02-24 16:14:42 +03:00
// new parameter
for ( size_t currPos = 0 ; currPos < line . size ( ) ; currPos + + ) {
if ( line [ currPos ] = = ' ] ' ) {
paramName = line . substr ( 1 , currPos - 1 ) ;
break ;
}
}
2011-10-06 13:16:21 +04:00
} else {
2011-02-24 16:14:42 +03:00
// add value to parameter
m_setting [ paramName ] . push_back ( line ) ;
}
}
return true ;
2008-06-11 14:52:57 +04:00
}
2011-02-24 16:14:42 +03:00
struct Credit {
string name , contact , currentPursuits , areaResponsibility ;
int sortId ;
Credit ( string name , string contact , string currentPursuits , string areaResponsibility ) {
this - > name = name ;
this - > contact = contact ;
this - > currentPursuits = currentPursuits ;
this - > areaResponsibility = areaResponsibility ;
this - > sortId = rand ( ) % 1000 ;
}
bool operator < ( const Credit & other ) const {
/*
if ( areaResponsibility . size ( ) ! = 0 & & other . areaResponsibility . size ( ) = = 0 )
return true ;
if ( areaResponsibility . size ( ) = = 0 & & other . areaResponsibility . size ( ) ! = 0 )
return false ;
return name < other . name ;
*/
return sortId < other . sortId ;
}
2008-06-11 14:52:57 +04:00
} ;
std : : ostream & operator < < ( std : : ostream & os , const Credit & credit )
{
2011-02-24 16:14:42 +03:00
os < < credit . name ;
if ( credit . contact ! = " " )
os < < " \t contact: " < < credit . contact ;
if ( credit . currentPursuits ! = " " )
os < < " " < < credit . currentPursuits ;
if ( credit . areaResponsibility ! = " " )
os < < " I'll answer question on: " < < credit . areaResponsibility ;
return os ;
2008-06-11 14:52:57 +04:00
}
void Parameter : : PrintCredit ( )
{
2011-02-24 16:14:42 +03:00
vector < Credit > everyone ;
srand ( time ( NULL ) ) ;
everyone . push_back ( Credit ( " Nicola Bertoldi "
, " 911 "
, " "
, " scripts & other stuff " ) ) ;
everyone . push_back ( Credit ( " Ondrej Bojar "
, " "
, " czech this out! "
, " " ) ) ;
everyone . push_back ( Credit ( " Chris Callison-Burch "
, " anytime, anywhere "
, " international playboy "
, " " ) ) ;
everyone . push_back ( Credit ( " Alexandra Constantin "
, " "
, " eu sunt varza "
, " " ) ) ;
everyone . push_back ( Credit ( " Brooke Cowan "
, " brooke@csail.mit.edu "
, " if you're going to san francisco, be sure to wear a flower in your hair "
, " " ) ) ;
everyone . push_back ( Credit ( " Chris Dyer "
, " can't. i'll be out driving my mustang "
, " driving my mustang "
, " " ) ) ;
everyone . push_back ( Credit ( " Marcello Federico "
, " federico at itc at it "
, " Researcher at ITC-irst, Trento, Italy "
, " IRST language model " ) ) ;
everyone . push_back ( Credit ( " Evan Herbst "
, " Small college in upstate New York "
, " "
, " " ) ) ;
everyone . push_back ( Credit ( " Philipp Koehn "
, " only between 2 and 4am "
, " "
, " Nothing fazes this dude " ) ) ;
everyone . push_back ( Credit ( " Christine Moran "
, " weird building at MIT "
, " "
, " " ) ) ;
everyone . push_back ( Credit ( " Wade Shen "
, " via morse code "
, " buying another laptop "
, " " ) ) ;
everyone . push_back ( Credit ( " Richard Zens "
, " richard at aachen dot de "
, " "
, " ambiguous source input, confusion networks, confusing source code " ) ) ;
everyone . push_back ( Credit ( " Hieu Hoang " , " http://www.hoang.co.uk/hieu/ "
, " phd student at Edinburgh Uni. Original Moses developer "
, " general queries/ flames on Moses. " ) ) ;
sort ( everyone . begin ( ) , everyone . end ( ) ) ;
cerr < < " Moses - A beam search decoder for phrase-based statistical machine translation models " < < endl
< < " Copyright (C) 2006 University of Edinburgh " < < endl < < endl
< < " This library is free software; you can redistribute it and/or " < < endl
< < " modify it under the terms of the GNU Lesser General Public " < < endl
< < " License as published by the Free Software Foundation; either " < < endl
< < " version 2.1 of the License, or (at your option) any later version. " < < endl < < endl
< < " This library is distributed in the hope that it will be useful, " < < endl
< < " but WITHOUT ANY WARRANTY; without even the implied warranty of " < < endl
< < " MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU " < < endl
< < " Lesser General Public License for more details. " < < endl < < endl
< < " You should have received a copy of the GNU Lesser General Public " < < endl
< < " License along with this library; if not, write to the Free Software " < < endl
< < " Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA " < < endl < < endl
< < " *********************************************************************** " < < endl < < endl
< < " Built on " < < __DATE__ < < " at " __TIME__ < < endl < < endl
< < " WHO'S FAULT IS THIS GODDAM SOFTWARE: " < < endl ;
ostream_iterator < Credit > out ( cerr , " \n " ) ;
copy ( everyone . begin ( ) , everyone . end ( ) , out ) ;
cerr < < endl < < endl ;
2008-06-11 14:52:57 +04:00
}
2010-09-14 13:42:37 +04:00
/** update parameter settings with command line switches
* \ param paramName full name of parameter
* \ param values inew values for paramName */
void Parameter : : OverwriteParam ( const string & paramName , PARAM_VEC values )
{
2013-05-29 21:16:15 +04:00
VERBOSE ( 2 , " Overwriting parameter " < < paramName ) ;
m_setting [ paramName ] ; // defines the parameter, important for boolean switches
if ( m_setting [ paramName ] . size ( ) > 1 ) {
VERBOSE ( 2 , " (the parameter had " < < m_setting [ paramName ] . size ( ) < < " previous values) " ) ;
CHECK ( m_setting [ paramName ] . size ( ) = = values . size ( ) ) ;
} else {
VERBOSE ( 2 , " (the parameter does not have previous values) " ) ;
m_setting [ paramName ] . resize ( values . size ( ) ) ;
}
VERBOSE ( 2 , " with the following values: " ) ;
int i = 0 ;
for ( PARAM_VEC : : iterator iter = values . begin ( ) ; iter ! = values . end ( ) ; iter + + , i + + ) {
m_setting [ paramName ] [ i ] = * iter ;
VERBOSE ( 2 , " " < < * iter ) ;
}
VERBOSE ( 2 , std : : endl ) ;
2010-09-14 13:42:37 +04:00
}
2013-02-07 00:05:00 +04:00
std : : set < std : : string > Parameter : : GetWeightNames ( ) const
{
std : : set < std : : string > ret ;
std : : map < std : : string , std : : vector < float > > : : const_iterator iter ;
for ( iter = m_weights . begin ( ) ; iter ! = m_weights . end ( ) ; + + iter ) {
const string & key = iter - > first ;
ret . insert ( key ) ;
}
return ret ;
}
2013-05-29 21:16:15 +04:00
2013-06-04 20:01:10 +04:00
void Parameter : : Save ( const std : : string path )
{
ofstream file ;
file . open ( path . c_str ( ) ) ;
PARAM_MAP : : const_iterator iterOuter ;
for ( iterOuter = m_setting . begin ( ) ; iterOuter ! = m_setting . end ( ) ; + + iterOuter ) {
2013-06-05 16:42:56 +04:00
const std : : string & sectionName = iterOuter - > first ;
file < < " [ " < < sectionName < < " ] " < < endl ;
2013-06-04 20:01:10 +04:00
2013-06-05 16:42:56 +04:00
const PARAM_VEC & values = iterOuter - > second ;
2013-06-04 20:01:10 +04:00
2013-06-05 16:42:56 +04:00
PARAM_VEC : : const_iterator iterInner ;
for ( iterInner = values . begin ( ) ; iterInner ! = values . end ( ) ; + + iterInner ) {
2013-06-04 20:01:10 +04:00
const std : : string & value = * iterInner ;
file < < value < < endl ;
2013-06-05 16:42:56 +04:00
}
2013-06-04 20:01:10 +04:00
2013-06-05 16:42:56 +04:00
file < < endl ;
2013-06-04 20:01:10 +04:00
}
file . close ( ) ;
}
2008-10-09 03:51:26 +04:00
}