如何在spirit::lex标记中去掉转义字符

how to get rid of escape character in a token with spirit::lex?

本文关键字:转义字符 lex spirit      更新时间:2023-10-16

我想标记我自己的SQL语法扩展。这涉及到识别双引号字符串中的转义双引号。例如,在MySQL中这两个字符串标记是等价的:""""(第二个双引号作为转义字符)和'"'。我尝试过不同的东西,但我被困在如何替换一个令牌的值。

#include <boost/spirit/include/lex_lexertl.hpp>
namespace lex = boost::spirit::lex;
template <typename Lexer>
struct sql_tokens : lex::lexer<Lexer>
{
  sql_tokens()
  {
    string_quote_double = "\"";    // '"'
    this->self("INITIAL")
      = string_quote_double [ lex::_state = "STRING_DOUBLE" ] // how to also ignore + ctx.more()?
      | ...
      ;
    this->self("STRING_DOUBLE") 
      = lex::token_def<>("[^\"]*") // action: ignore + ctx.more()
      | lex::token_def<>("\"\"") // how to set token value to '"' ?
      | lex::token_def<>("\"") [ lex::_state = "INITIAL" ]
      ;
  }
  lex::token_def<> string_quote_double, ...;
};

那么当找到""时,如何将令牌的值设置为"呢?

除此之外,我还有以下问题:我可以为语义动作编写一个函子来调用ctx.more()并同时忽略该标记(从而将"低级"标记组合为"高级"字符串标记)。但是如何优雅地将它与lex::_state = ".."结合起来呢?div ?

EDITED回复评论,见下文"UPDATE"


我建议不要尝试在lexer中解决这个问题。让词法分析器生成原始字符串:

template <typename Lexer>
    struct mylexer_t : lex::lexer<Lexer>
{
    mylexer_t()
    {
        string_quote_double = "\"([^"]|\"\")*\"";
        this->self("INITIAL")
            = string_quote_double
            | lex::token_def<>("[ trn]") [ lex::_pass = lex::pass_flags::pass_ignore ]
            ;
    }
    lex::token_def<std::string> string_quote_double;
};

注释像这样暴露token属性,需要修改token类型定义:

typedef lex::lexertl::token<char const*, boost::mpl::vector<char, std::string> > token_type;
typedef lex::lexertl::actor_lexer<token_type> lexer_type;

解析器中的后处理:

template <typename Iterator> struct mygrammar_t
    : public qi::grammar<Iterator, std::vector<std::string>()>
{
    typedef mygrammar_t<Iterator> This;
    template <typename TokenDef>
        mygrammar_t(TokenDef const& tok) : mygrammar_t::base_type(start)
    {
        using namespace qi;
        string_quote_double %= tok.string_quote_double [ undoublequote ];
        start = *string_quote_double;
        BOOST_SPIRIT_DEBUG_NODES((start)(string_quote_double));
    }
  private:
    qi::rule<Iterator, std::vector<std::string>()> start;
    qi::rule<Iterator, std::string()> string_quote_double;
};

可以看到,undoubleqoute可以是任何满足Spirit语义操作标准的Phoenix actor。一个脑残的示例实现是:

static bool undoublequote(std::string& val)
{
    auto outidx = 0;
    for(auto in = val.begin(); in!=val.end(); ++in) {
        switch(*in) {
            case '"': 
                if (++in == val.end()) { // eat the escape
                    // end of input reached
                    val.resize(outidx); // resize to effective chars
                    return true;
                }
                // fall through
            default:
                val[outidx++] = *in; // append the character
        }
    }
    return false; // not ended with double quote as expected
}

但是我建议你写一个"适当的"反escaper(因为我很确定MySql将允许t, r, u001e甚至更古老的东西)。

我在这里有一些更完整的旧答案样本:

  • TODO
  • 这是一个使用Spirit的搜索页面,有许多相关的答案

<标题> 更新

事实上,正如您所指出的,将属性值规范化集成到词法分析器本身是相当容易的:

template <typename Lexer>
    struct mylexer_t : lex::lexer<Lexer>
{
    struct undoublequote_lex_type {
        template <typename, typename, typename, typename> struct result { typedef void type; };
        template <typename It, typename IdType, typename pass_flag, typename Ctx>
            void operator()(It& f, It& l, pass_flag& pass, IdType& id, Ctx& ctx) const {
                std::string raw(f,l);
                if (undoublequote(raw))
                    ctx.set_value(raw);
                else
                    pass = lex::pass_flags::pass_fail;
            }
    } undoublequote_lex;
    mylexer_t()
    {
        string_quote_double = "\"([^"]|\"\")*\"";
        const static undoublequote_lex_type undoublequote_lex;
        this->self("INITIAL")
            = string_quote_double [ undoublequote_lex ]
            | lex::token_def<>("[ trn]") [ lex::_pass = lex::pass_flags::pass_ignore ]
            ;
    }
    lex::token_def<std::string> string_quote_double;
};

这重用了上面所示的相同的undoublequote函数,但将其包装在满足Lexer语义动作标准的Deferred Callable Object(或"多态函子")undoublequote_lex_type中。


这是一个完整的概念证明:

//#include <boost/config/warning_disable.hpp>
//#define BOOST_SPIRIT_DEBUG_PRINT_SOME 80
//#define BOOST_SPIRIT_DEBUG // before including Spirit
#include <boost/spirit/include/lex_lexertl.hpp>
#include <boost/spirit/include/qi.hpp>
#include <fstream>
#ifdef MEMORY_MAPPED
#   include <boost/iostreams/device/mapped_file.hpp>
#endif
//#include <boost/spirit/include/lex_generate_static_lexertl.hpp>
namespace /*anon*/
{
    namespace phx=boost::phoenix;
    namespace qi =boost::spirit::qi;
    namespace lex=boost::spirit::lex;
    template <typename Lexer>
        struct mylexer_t : lex::lexer<Lexer>
    {
        mylexer_t()
        {
            string_quote_double = "\"([^"]|\"\")*\"";
            this->self("INITIAL")
                = string_quote_double
                | lex::token_def<>("[ trn]") [ lex::_pass = lex::pass_flags::pass_ignore ]
                ;
        }
        lex::token_def<std::string> string_quote_double;
    };
    static bool undoublequote(std::string& val)
    {
        auto outidx = 0;
        for(auto in = val.begin(); in!=val.end(); ++in) {
            switch(*in) {
                case '"': 
                    if (++in == val.end()) { // eat the escape
                        // end of input reached
                        val.resize(outidx); // resize to effective chars
                        return true;
                    }
                    // fall through
                default:
                    val[outidx++] = *in; // append the character
            }
        }
        return false; // not ended with double quote as expected
    }
    template <typename Iterator> struct mygrammar_t
        : public qi::grammar<Iterator, std::vector<std::string>()>
    {
        typedef mygrammar_t<Iterator> This;
        template <typename TokenDef>
            mygrammar_t(TokenDef const& tok) : mygrammar_t::base_type(start)
        {
            using namespace qi;
            string_quote_double %= tok.string_quote_double [ undoublequote ];
            start = *string_quote_double;
            BOOST_SPIRIT_DEBUG_NODES((start)(string_quote_double));
        }
      private:
        qi::rule<Iterator, std::vector<std::string>()> start;
        qi::rule<Iterator, std::string()> string_quote_double;
    };
}
std::vector<std::string> do_test_parse(const std::string& v)
{
    char const *first = &v[0];
    char const *last = first+v.size();
    typedef lex::lexertl::token<char const*, boost::mpl::vector<char, std::string> > token_type;
    typedef lex::lexertl::actor_lexer<token_type> lexer_type;
    typedef mylexer_t<lexer_type>::iterator_type iterator_type;
    const static mylexer_t<lexer_type> mylexer;
    const static mygrammar_t<iterator_type> parser(mylexer);
    auto iter = mylexer.begin(first, last);
    auto end = mylexer.end();
    std::vector<std::string> data;
    bool r = qi::parse(iter, end, parser, data);
    r = r && (iter == end);
    if (!r)
        std::cerr << "parsing (" << iter->state() << ") failed at: '" << std::string(first, last) << "'n";
    return data;
}
int main(int argc, const char *argv[])
{
    for (auto&& s : do_test_parse( ""bla""blo""))
        std::cout << s << std::endl;
}

我建议在lexer中解决这个问题和类似的任务,而不是让lexer返回一些中间的东西,然后用额外的代码解析它。双引号不是字符串中唯一复杂的地方,还可以有其他转义,最好在一个地方清楚地描述字符串解析过程,并让lexer完成所有的工作。

下面是仅使用lexer的主题问题的解决方案:

using namespace boost::spirit;
namespace px = boost::phoenix;
template <typename Lexer>
struct sql_tokens : public lex::lexer<Lexer>
{
  sql_tokens()
  {
    string = '"';
    this->self +=
      lex::token_def<>('"')
      [
        lex::_state = "STRING",
        lex::_pass = lex::pass_flags::pass_ignore,
        px::ref(curString) = std::string()
      ];
    std::string& (std::string::*append)(std::string::iterator,
                                        std::string::iterator)
    { &std::string::append<std::string::iterator> };
    this->self("STRING") =
      lex::token_def<>("[^"]*")
      [
        lex::_pass = lex::pass_flags::pass_ignore,
        px::bind(append, curString, lex::_start, lex::_end)
      ] |
      lex::token_def<>("\"\"")
      [
        lex::_pass = lex::pass_flags::pass_ignore,
        px::ref(curString) += px::val(""")
      ] |
      string
      [
        lex::_val = px::ref(curString),
        lex::_state = "INITIAL"
      ];
    this->self("WS") = lex::token_def<>("[ \t\n]+");
  }
  std::string curString;
  lex::token_def<std::string> string;
};