Let's consider following code:
#include <boost/phoenix.hpp>
#include <boost/spirit/include/lex_lexertl.hpp>
#include <boost/spirit/include/qi.hpp>
#include <algorithm>
#include <iostream>
#include <string>
#include <utility>
#include <vector>
namespace lex = boost::spirit::lex;
namespace qi = boost::spirit::qi;
namespace phoenix = boost::phoenix;
struct operation
{
enum type
{
add,
sub,
mul,
div
};
};
template<typename Lexer>
class expression_lexer
: public lex::lexer<Lexer>
{
public:
typedef lex::token_def<operation::type> operator_token_type;
typedef lex::token_def<double> value_token_type;
typedef lex::token_def<std::string> variable_token_type;
typedef lex::token_def<lex::omit> parenthesis_token_type;
typedef std::pair<parenthesis_token_type, parenthesis_token_type> parenthesis_token_pair_type;
typedef lex::token_def<lex::omit> whitespace_token_type;
expression_lexer()
: operator_add('+'),
operator_sub('-'),
operator_mul("[x*]"),
operator_div("[:/]"),
value("\\d+(\\.\\d+)?"),
variable("%(\\w+)"),
parenthesis({
std::make_pair(parenthesis_token_type('('), parenthesis_token_type(')')),
std::make_pair(parenthesis_token_type('['), parenthesis_token_type(']'))
}),
whitespace("[ \\t]+")
{
this->self
+= operator_add [lex::_val = operation::add]
| operator_sub [lex::_val = operation::sub]
| operator_mul [lex::_val = operation::mul]
| operator_div [lex::_val = operation::div]
| value
| variable [lex::_val = phoenix::construct<std::string>(lex::_start + 1, lex::_end)]
| whitespace [lex::_pass = lex::pass_flags::pass_ignore]
;
std::for_each(parenthesis.cbegin(), parenthesis.cend(),
[&](parenthesis_token_pair_type const& token_pair)
{
this->self += token_pair.first | token_pair.second;
}
);
}
operator_token_type operator_add;
operator_token_type operator_sub;
operator_token_type operator_mul;
operator_token_type operator_div;
value_token_type value;
variable_token_type variable;
std::vector<parenthesis_token_pair_type> parenthesis;
whitespace_token_type whitespace;
};
template<typename Iterator>
class expression_grammar
: public qi::grammar<Iterator>
{
public:
template<typename Tokens>
explicit expression_grammar(Tokens const& tokens)
: expression_grammar::base_type(start)
{
start %= expression >> qi::eoi;
expression %= sum_operand >> -(sum_operator >> expression);
sum_operator %= tokens.operator_add | tokens.operator_sub;
sum_operand %= fac_operand >> -(fac_operator >> sum_operand);
fac_operator %= tokens.operator_mul | tokens.operator_div;
if(!tokens.parenthesis.empty())
fac_operand %= parenthesised | terminal;
else
fac_operand %= terminal;
terminal %= tokens.value | tokens.variable;
if(!tokens.parenthesis.empty())
{
parenthesised %= tokens.parenthesis.front().first >> expression >> tokens.parenthesis.front().second;
std::for_each(tokens.parenthesis.cbegin() + 1, tokens.parenthesis.cend(),
[&](typename Tokens::parenthesis_token_pair_type const& token_pair)
{
parenthesised %= parenthesised.copy() | (token_pair.first >> expression >> token_pair.second);
}
);
}
}
private:
qi::rule<Iterator> start;
qi::rule<Iterator> expression;
qi::rule<Iterator> sum_operand;
qi::rule<Iterator> sum_operator;
qi::rule<Iterator> fac_operand;
qi::rule<Iterator> fac_operator;
qi::rule<Iterator> terminal;
qi::rule<Iterator> parenthesised;
};
int main()
{
typedef lex::lexertl::token<std::string::const_iterator, boost::mpl::vector<operation::type, double, std::string>> token_type;
typedef expression_lexer<lex::lexertl::actor_lexer<token_type>> expression_lexer_type;
typedef expression_lexer_type::iterator_type expression_lexer_iterator_type;
typedef expression_grammar<expression_lexer_iterator_type> expression_grammar_type;
expression_lexer_type lexer;
expression_grammar_type grammar(lexer);
while(std::cin)
{
std::string line;
std::getline(std::cin, line);
std::string::const_iterator first = line.begin();
std::string::const_iterator const last = line.end();
bool const result = lex::tokenize_and_parse(first, last, lexer, grammar);
if(!result)
std::cout << "Parsing failed! Reminder: >" << std::string(first, last) << "<" << std::endl;
else
{
if(first != last)
std::cout << "Parsing succeeded! Reminder: >" << std::string(first, last) << "<" << std::endl;
else
std::cout << "Parsing succeeded!" << std::endl;
}
}
}
It is a simple parser for arithmetic expressions with values and variables. It is build using expression_lexer
for extracting tokens, and then with expression_grammar
to parse the tokens.
Use of lexer for such a small case might seem an overkill and probably is one. But that is the cost of simplified example. Also note that use of lexer allows to easily define tokens with regular expression while that allows to easily define them by external code (and user provided configuration in particular). With the example provided it would be no issue at all to read definition of tokens from an external config file and for example allow user to change variables from %name
to $name
.
The code seems to be working fine (checked on Visual Studio 2013 with Boost 1.61).
The expression_lexer
has attributes attached to tokens. I guess they work since they compile. But I don't really know how to check.
Ultimately I would like the grammar to build me an std::vector
with reversed polish notation of the expression. (Where every element would be a boost::variant
over either operator::type
or double
or std::string
.)
The problem is however that I failed to use token attributes in my expression_grammar
. For example if you try to change sum_operator
following way:
qi::rule<Iterator, operation::type ()> sum_operator;
you will get compilation error. I expected this to work since operation::type
is the attribute for both operator_add
and operator_sub
and so also for their alternative. And still it doesn't compile. Judging from the error in assign_to_attribute_from_iterators
it seems that parser tries to build the attribute value directly from input stream range. Which means it ignores the [lex::_val = operation::add]
I specified in my lexer.
Changing that to
qi::rule<Iterator, operation::type (operation::type)> sum_operator;
didn't help either.
Also I tried changing definition to
sum_operator %= (tokens.operator_add | tokens.operator_sub) [qi::_val = qi::_1];
didn't help either.
How to work around that? I know I could use symbols
from Qi. But I want to have the lexer to make it easy to configure regexes for the tokens. I could also extend the assign_to_attribute_from_iterators
as described in the documentation but this kind of double the work. I guess I could also skip the attributes on lexer and just have them on grammar. But this again doesn't work well with flexibility on variable
token (in my actual case there is slightly more logic there so that it is configurable also which part of the token forms actual name of the variable - while here it is fixed to just skip the first character). Anything else?
Also a side question - maybe anyone knows. Is there a way to get to capture groups of the regular expression of the token from tokens action? So that instead of having
variable [lex::_val = phoenix::construct<std::string>(lex::_start + 1, lex::_end)]
instead I would be able to make a string from the capture group and so easily handle formats like $var$
.
Edited! I have improved whitespace skipping along conclusions from Whitespace skipper when using Boost.Spirit Qi and Lex. It is a simplification that does not affect questions asked here.