usql/lexer.cpp

296 lines
10 KiB
C++

#include "lexer.h"
#include "exception.h"
#include <algorithm>
namespace usql {
Token::Token(const std::string &token_str, TokenType typ) {
token_string = token_str;
type = typ;
}
Lexer::Lexer() {
k_words_regex =
"[-+]?[0-9]+\\.[0-9]+|[-+]?[0-9]+|[A-Za-z]+[A-Za-z0-9_#]*|[\\(\\)\\[\\]\\{\\}]|[-\\+\\*/"
",;:\?]|!=|<>|==|>=|<=|~=|>|<|=|;|~|\\||\n|\r|\r\n|'([^']|'')*'|\".*?\"|%.*?\n";
k_int_regex = "[-+]?[0-9]+";
k_int_underscored_regex = "[-+]?[0-9][0-9_]+[0-9]";
k_double_regex = "[-+]?[0-9]+\\.[0-9]+";
k_identifier_regex = "[A-Za-z]+[A-Za-z0-9_#]*";
}
void Lexer::parse(const std::string &code) {
if (code.empty())
throw Exception("Lexer.parse empty code");
m_tokens.clear();
m_tokens.reserve(64);
m_code_str = code;
if (!m_code_str.empty() && m_code_str.back() != '\n') {
m_code_str.append("\n"); // temp solution to prevent possible situation when last line is a comment
}
auto words_begin = std::sregex_iterator(m_code_str.begin(), m_code_str.end(), k_words_regex);
auto words_end = std::sregex_iterator();
for (std::sregex_iterator i = words_begin; i != words_end; ++i) {
std::smatch match = *i;
std::string match_str = match.str();
TokenType token_type = type(match_str);
if (token_type == TokenType::undef)
throw Exception("Lexer.parse unknown token type: " + match_str);
if (token_type == TokenType::string_literal)
match_str = stringLiteral(match_str);
if (token_type != TokenType::newline)
m_tokens.emplace_back(match_str, token_type);
}
// DEBUG IT
// debugTokens();
m_index = 0;
}
void Lexer::debugTokens() {
int i = 0;
for (auto & m_token : m_tokens) {
std::cerr << i << "\t" << m_token.token_string << std::endl;
i++;
}
}
Token Lexer::currentToken() { return m_tokens[m_index]; }
Token Lexer::consumeToken() {
int i = m_index;
nextToken();
return m_tokens[i];
}
Token Lexer::consumeToken(TokenType type) {
int i = m_index;
skipToken(type);
return m_tokens[i];
}
void Lexer::nextToken() {
if (m_index < m_tokens.size()) {
m_index++;
}
}
void Lexer::skipToken(TokenType type) {
if (tokenType() == type) {
nextToken();
} else {
throw Exception("ERROR unexpected token " + consumeToken().token_string + ", instead of " + typeToString(type));
}
}
void Lexer::skipTokenOptional(TokenType type) {
if (tokenType() == type) {
nextToken();
}
}
TokenType Lexer::tokenType() { return m_index < m_tokens.size() ? currentToken().type : TokenType::eof; }
TokenType Lexer::nextTokenType() {
return m_index < m_tokens.size() - 1 ? m_tokens[m_index + 1].type : TokenType::eof;
}
bool Lexer::isRelationalOperator(TokenType token_type) {
return (token_type == TokenType::equal || token_type == TokenType::not_equal ||
token_type == TokenType::greater || token_type == TokenType::greater_equal ||
token_type == TokenType::lesser || token_type == TokenType::lesser_equal ||
token_type == TokenType::is);
}
bool Lexer::isLogicalOperator(TokenType token_type) {
return (token_type == TokenType::logical_and || token_type == TokenType::logical_or);
}
bool Lexer::isArithmeticalOperator(TokenType token_type) {
return (token_type == TokenType::plus || token_type == TokenType::minus ||
token_type == TokenType::multiply ||
token_type == TokenType::divide);
}
TokenType Lexer::type(const std::string &token) {
if (token == ";") return TokenType::semicolon;
if (token == "+") return TokenType::plus;
if (token == "-") return TokenType::minus;
if (token == "*") return TokenType::multiply;
if (token == "/") return TokenType::divide;
if (token == "(") return TokenType::open_paren;
if (token == ")") return TokenType::close_paren;
if (token == "=") return TokenType::equal;
if (token == "!=" || token == "<>") return TokenType::not_equal;
if (token == ">") return TokenType::greater;
if (token == ">=") return TokenType::greater_equal;
if (token == "<") return TokenType::lesser;
if (token == "<=") return TokenType::lesser_equal;
if (token == "is") return TokenType::is;
if (token == "as") return TokenType::keyword_as;
if (token == "create") return TokenType::keyword_create;
if (token == "drop") return TokenType::keyword_drop;
if (token == "where") return TokenType::keyword_where;
if (token == "order") return TokenType::keyword_order;
if (token == "by") return TokenType::keyword_by;
if (token == "offset") return TokenType::keyword_offset;
if (token == "limit") return TokenType::keyword_limit;
if (token == "asc") return TokenType::keyword_asc;
if (token == "desc") return TokenType::keyword_desc;
if (token == "from") return TokenType::keyword_from;
if (token == "delete") return TokenType::keyword_delete;
if (token == "table") return TokenType::keyword_table;
if (token == "index") return TokenType::keyword_index;
if (token == "on") return TokenType::keyword_on;
if (token == "insert") return TokenType::keyword_insert;
if (token == "into") return TokenType::keyword_into;
if (token == "values") return TokenType::keyword_values;
if (token == "select") return TokenType::keyword_select;
if (token == "set") return TokenType::keyword_set;
if (token == "copy") return TokenType::keyword_copy;
if (token == "update") return TokenType::keyword_update;
if (token == "load") return TokenType::keyword_load;
if (token == "save") return TokenType::keyword_save;
if (token == "not") return TokenType::keyword_not;
if (token == "null") return TokenType::keyword_null;
if (token == "integer") return TokenType::keyword_integer;
if (token == "float" || token == "double") return TokenType::keyword_float;
if (token == "varchar") return TokenType::keyword_varchar;
if (token == "date") return TokenType::keyword_date;
if (token == "boolean") return TokenType::keyword_bool;
if (token == "true") return TokenType::keyword_true;
if (token == "false") return TokenType::keyword_false;
if (token == "distinct") return TokenType::keyword_distinct;
if (token == "show") return TokenType::keyword_show;
if (token == "or") return TokenType::logical_or;
if (token == "and") return TokenType::logical_and;
if (token == ",") return TokenType::comma;
if (token == "\n" || token == "\r\n" || token == "\r") return TokenType::newline;
if (token.length() > 1 && token.at(0) == '%' && (token.at(token.length() - 1) == '\n' || token.at(token.length() - 1) == '\r'))
return TokenType::comment;
if (token.length() >= 2 && token.at(0) == '"')
return (token.at(token.length() - 1) == '"') ? TokenType::string_literal : TokenType::undef;
if (token.length() >= 2 && token.at(0) == '\'')
return (token.at(token.length() - 1) == '\'') ? TokenType::string_literal : TokenType::undef;
if (std::regex_match(token, k_int_regex)) return TokenType::int_number;
if (std::regex_match(token, k_int_underscored_regex)) return TokenType::int_number;
if (std::regex_match(token, k_double_regex)) return TokenType::double_number;
if (std::regex_match(token, k_identifier_regex)) return TokenType::identifier;
return TokenType::undef;
}
std::string Lexer::stringLiteral(std::string token) {
// remove ' or " from the literal ends
bool replace = token[0] == '\'' && token[token.size() - 1] == '\'';
std::string str = token.substr(1, token.size() - 2);
if (!replace) {
return str;
}
std::string out;
out.reserve(str.size());
for (std::string::size_type i = 0; i < str.size(); ++i) {
if (str[i] == '\'' && i < str.size() - 1) {
if (str[i + 1] == '\'') {
out.append(1, '\'');
i++;
} else {
out.append(1, str[i]);
}
} else if (str[i] == '\\' && i < str.size() - 1) {
if (str[i + 1] == 'n') {
out.append(1, '\n');
i++;
} else if (str[i + 1] == 't') {
out.append(1, '\t');
i++;
} else {
out.append(1, str[i]);
}
} else {
out.append(1, str[i]);
}
}
return out;
}
std::string Lexer::typeToString(TokenType token_type) {
switch (token_type) {
case TokenType::undef: return "undef";
case TokenType::identifier: return "identifier";
case TokenType::plus: return "+";
case TokenType::minus: return "-";
case TokenType::multiply: return "*";
case TokenType::divide: return "/";
case TokenType::equal: return "==";
case TokenType::not_equal: return "!=";
case TokenType::greater: return ">";
case TokenType::greater_equal: return ">=";
case TokenType::lesser: return "<";
case TokenType::lesser_equal: return "<=";
case TokenType::is: return "is";
case TokenType::keyword_as: return "as";
case TokenType::keyword_create: return "create";
case TokenType::keyword_drop: return "drop";
case TokenType::keyword_where: return "where";
case TokenType::keyword_order: return "order";
case TokenType::keyword_by: return "by";
case TokenType::keyword_offset: return "offset";
case TokenType::keyword_limit: return "limit";
case TokenType::keyword_asc: return "asc";
case TokenType::keyword_desc: return "desc";
case TokenType::keyword_table: return "table";
case TokenType::keyword_index: return "index";
case TokenType::keyword_on: return "on";
case TokenType::keyword_into: return "into";
case TokenType::keyword_values: return "values";
case TokenType::keyword_select: return "select";
case TokenType::keyword_set: return "set";
case TokenType::keyword_copy: return "copy";
case TokenType::keyword_update: return "update";
case TokenType::keyword_load: return "load";
case TokenType::keyword_save: return "save";
case TokenType::keyword_not: return "not";
case TokenType::keyword_null: return "null";
case TokenType::keyword_integer: return "integer";
case TokenType::keyword_float: return "float";
case TokenType::keyword_varchar: return "varchar";
case TokenType::keyword_date: return "date";
case TokenType::keyword_bool: return "boolean";
case TokenType::keyword_true: return "true";
case TokenType::keyword_false: return "false";
case TokenType::keyword_distinct: return "distinct";
case TokenType::keyword_show: return "show";
case TokenType::int_number: return "int number";
case TokenType::double_number: return "double number";
case TokenType::string_literal: return "string literal";
case TokenType::open_paren: return "(";
case TokenType::close_paren: return ")";
case TokenType::logical_and: return "and";
case TokenType::logical_or: return "or";
case TokenType::semicolon: return ";";
case TokenType::comma: return ",";
case TokenType::newline: return "newline";
case TokenType::comment: return "comment";
case TokenType::eof: return "eof";
default:
return "FIXME, unknown token type";
}
}
} // namespace usql