2

我正在尝试使用Boost Spirit来解析以下语法:句子:名词动词句子连词句子

连词:“和”

名词:“鸟”“猫”

动词:“飞”“喵”

当语法只包含名词>>动词规则时,解析成功。当语法被修改为包含句子>>连词>>句子规则并且我提供了一个无效的输入,例如“birds fly”而不是“birdsfly”时,我在程序运行时得到一个未处理的异常。

这是从 boost doc 上的示例修改的代码

#define BOOST_VARIANT_MINIMIZE_SIZE
#include <boost/config/warning_disable.hpp>
#include <boost/spirit/include/qi.hpp>
#include <boost/spirit/include/lex_lexertl.hpp>
#include <boost/spirit/include/phoenix_operator.hpp>
#include <boost/spirit/include/phoenix_statement.hpp>
#include <boost/spirit/include/phoenix_container.hpp>
#include <iostream>
#include <string>

using namespace boost::spirit;
using namespace boost::spirit::ascii;

template <typename Lexer>
struct token_list : lex::lexer<Lexer>
{
    token_list()
    {
        noun = "birds|cats";    
        verb =  "fly|meow";
        conjunction = "and";

        this->self.add
            (noun)         
            (verb) 
            (conjunction)
        ;
    }
    lex::token_def<std::string> noun, verb, conjunction;
};

template <typename Iterator>
struct Grammar : qi::grammar<Iterator>
{
    template <typename TokenDef>
    Grammar(TokenDef const& tok)
      : Grammar::base_type(sentence)
    {
        sentence = (tok.noun>>tok.verb)
        |
        (sentence>>tok.conjunction>>sentence)>>eoi
    ;
    }
    qi::rule<Iterator> sentence;
};

int main()
{
typedef lex::lexertl::token<char const*, boost::mpl::vector<std::string>> token_type;
typedef lex::lexertl::lexer<token_type> lexer_type;
typedef token_list<lexer_type>::iterator_type iterator_type;

     token_list<lexer_type> word_count;         
     Grammar<iterator_type> g (word_count); 

     std::string str = "birdsfly"; 
 //std::string str = "birds fly"; this input caused unhandled exception

     char const* first = str.c_str();
     char const* last = &first[str.size()];

     bool r = lex::tokenize_and_parse(first, last, word_count, g);

     if (r) {
         std::cout << "Parsing passed"<< "\n";
     }
     else {
         std::string rest(first, last);
         std::cerr << "Parsing failed\n" << "stopped at: \"" 
                   << rest << "\"\n";
     }
    system("PAUSE");
    return 0;
}
4

1 回答 1

2

您在规则的第二个分支中有左递归sentence

sentence = sentence >> ....

总是会在句子上递归,所以你会看到一个stackoverflow。

我建议编写规则,例如:

sentence = 
      (tok.noun >> tok.verb) 
  >> *(tok.conjunction >> sentence) 
  >> qi::eoi
  ;

现在结果显示

g++ -Wall -pedantic -std=c++0x -g -O0 test.cpp -o test
Parsing failed
stopped at: " fly"

(当然还有不可避免的“sh:PAUSE:找不到命令”......)

PS。不要using namespace请。反而:

namespace qi  = boost::spirit::qi;
namespace lex = boost::spirit::lex;

这是删除/修复了一些其他内容的清理版本:http ://coliru.stacked-crooked.com/view?id=1fb26ca3e8c207979eaaf4592c319316-e223fd4a885a77b520bbfe69dda8fb91

#define BOOST_VARIANT_MINIMIZE_SIZE
#include <boost/spirit/include/qi.hpp>
#include <boost/spirit/include/lex_lexertl.hpp>
// #include <boost/spirit/include/phoenix.hpp>
#include <iostream>
#include <string>

namespace qi  = boost::spirit::qi;
namespace lex = boost::spirit::lex;

template <typename Lexer>
struct token_list : lex::lexer<Lexer>
{
    token_list()
    {
        noun        = "birds|cats";    
        verb        = "fly|meow";
        conjunction = "and";

        this->self.add
            (noun)         
            (verb) 
            (conjunction)
        ;
    }

    lex::token_def<std::string> noun, verb, conjunction;
};

template <typename Iterator>
struct Grammar : qi::grammar<Iterator>
{
    template <typename TokenDef>
    Grammar(TokenDef const& tok) : Grammar::base_type(sentence)
    {
        sentence = 
              (tok.noun >> tok.verb) 
          >> *(tok.conjunction >> sentence) 
          >> qi::eoi
          ;
    }
    qi::rule<Iterator> sentence;
};

int main()
{
    typedef std::string::const_iterator It;
    typedef lex::lexertl::token<It, boost::mpl::vector<std::string>> token_type;
    typedef lex::lexertl::lexer<token_type> lexer_type;
    typedef token_list<lexer_type>::iterator_type iterator_type;

    token_list<lexer_type> word_count;         
    Grammar<iterator_type> g(word_count); 

    //std::string str = "birdsfly"; 
    const std::string str = "birds fly";

    It first = str.begin();
    It last  = str.end();

    bool r = lex::tokenize_and_parse(first, last, word_count, g);

    if (r) {
        std::cout << "Parsing passed"<< "\n";
    }
    else {
        std::string rest(first, last);
        std::cerr << "Parsing failed\n" << "stopped at: \"" << rest << "\"\n";
    }
}
于 2013-07-30T10:21:03.873 回答