Support tokenize(const std::string &) as well.

Convenience wrapper: the actual function takes a const char[], but many of
the call sites want to pass a string and have to call its c_str() first.
This commit is contained in:
Jeroen Vermeulen 2015-04-22 10:35:18 +07:00
parent 10a0a7b05a
commit 32722ab5b1
12 changed files with 23 additions and 14 deletions

View File

@ -442,7 +442,7 @@ void PhraseDictionaryMultiModelCounts::LoadLexicalTable( string &fileName, lexic
i++;
if (i%100000 == 0) cerr << "." << flush;
const vector<string> token = util::tokenize( line.c_str() );
const vector<string> token = util::tokenize( line );
if (token.size() != 4) {
cerr << "line " << i << " in " << fileName
<< " has wrong number of tokens, skipping:\n"

View File

@ -18,7 +18,7 @@ void Domain::load( const std::string &domainFileName )
string line;
while(getline(*fileP, line)) {
// read
const vector< string > domainSpecLine = util::tokenize( line.c_str() );
const vector< string > domainSpecLine = util::tokenize( line );
int lineNumber;
if (domainSpecLine.size() != 2 ||
! sscanf(domainSpecLine[0].c_str(), "%d", &lineNumber)) {

View File

@ -50,7 +50,7 @@ bool SentenceAlignmentWithSyntax::processTargetSentence(const char * targetStrin
<< sentenceID << ": " << e.getMsg() << std::endl;
return false;
}
target = util::tokenize(targetStringCPP.c_str());
target = util::tokenize(targetStringCPP);
return true;
}
@ -71,7 +71,7 @@ bool SentenceAlignmentWithSyntax::processSourceSentence(const char * sourceStrin
<< sentenceID << ": " << e.getMsg() << std::endl;
return false;
}
source = util::tokenize(sourceStringCPP.c_str());
source = util::tokenize(sourceStringCPP);
return true;
}

View File

@ -108,7 +108,7 @@ int main(int argc, char* argv[])
if (! getLine(fileDirectP, itemDirect ))
break;
const vector< string > count = util::tokenize( itemDirect[4].c_str() );
const vector< string > count = util::tokenize( itemDirect[4] );
float countEF = atof(count[0].c_str());
float countF = atof(count[1].c_str());
float prob = countF/countEF;

View File

@ -166,8 +166,8 @@ void processFiles( char* fileNameDirect, char* fileNameIndirect, char* fileNameC
fileConsolidated << " ||| " << reverseAlignment(itemDirect[3]);
// counts, for debugging
const vector<string> directCounts = util::tokenize(itemDirect[4].c_str());
const vector<string> indirectCounts = util::tokenize(itemIndirect[4].c_str());
const vector<string> directCounts = util::tokenize(itemDirect[4]);
const vector<string> indirectCounts = util::tokenize(itemIndirect[4]);
fileConsolidated << "||| " << directCounts[0] << " " << indirectCounts[0];
// output rule count if present in either file
if (indirectCounts.size() > 1) {
@ -223,7 +223,7 @@ string reverseAlignment(const string &alignments)
{
stringstream ret("");
const vector<string> alignToks = util::tokenize(alignments.c_str());
const vector<string> alignToks = util::tokenize(alignments);
for (size_t i = 0; i < alignToks.size(); ++i) {
const string &alignPair = alignToks[i];

View File

@ -57,7 +57,7 @@ std::auto_ptr<ParseTree> XmlTreeParser::Parse(const std::string &line)
m_tree.ConnectNodes();
SyntaxNode *root = m_tree.GetTop();
assert(root);
m_words = util::tokenize(m_line.c_str());
m_words = util::tokenize(m_line);
return ConvertTree(*root, m_words);
}

View File

@ -52,7 +52,7 @@ std::auto_ptr<PcfgTree> XmlTreeParser::Parse(const std::string &line) {
// There is no XML tree.
return std::auto_ptr<PcfgTree>();
}
m_words = util::tokenize(m_line.c_str());
m_words = util::tokenize(m_line);
return ConvertTree(*root, m_words);
}

View File

@ -45,7 +45,7 @@ int main(int argc, char* argv[])
map< string, int > topLabelCollection; // count of top labels, not used
SyntaxTree tree;
ProcessAndStripXMLTags( inBufferString, tree, labelCollection, topLabelCollection, false );
const vector< string > inWords = util::tokenize( inBufferString.c_str() );
const vector< string > inWords = util::tokenize( inBufferString );
// output tree
// cerr << "BEFORE:" << endl << tree;

View File

@ -322,7 +322,7 @@ void LexicalTable::load( const string &filePath )
i++;
if (i%100000 == 0) cerr << "." << flush;
const vector<string> token = util::tokenize( line.c_str() );
const vector<string> token = util::tokenize( line );
if (token.size() != 3) {
cerr << "line " << i << " in " << filePath << " has wrong number of tokens, skipping:\n" <<
token.size() << " " << token[0] << " " << line << endl;

View File

@ -25,7 +25,7 @@ StringTree *XmlTreeParser::Parse(const std::string &line) {
tree_.ConnectNodes();
SyntaxNode *root = tree_.GetTop();
assert(root);
words_ = util::tokenize(line_.c_str());
words_ = util::tokenize(line_);
return ConvertTree(*root, words_);
}

View File

@ -85,7 +85,7 @@ void DTable::load( const string& fileName )
abort();
}
const vector<string> token = util::tokenize(line.c_str());
const vector<string> token = util::tokenize(line);
if (token.size() < 2) {
cerr << "line " << i << " in " << fileName << " too short, skipping\n";
continue;

View File

@ -37,6 +37,15 @@ inline std::vector<std::string> tokenize(const char input[])
return token;
}
/** Split input string into a series of tokens.
*
* Like tokenize(const char[]), but takes a std::string.
*/
inline std::vector<std::string> tokenize(const std::string &input)
{
return tokenize(input.c_str());
}
} // namespace util
#endif