use more consistent types
This commit is contained in:
parent
3ce2eb0557
commit
9ebcac9b45
|
|
@ -93,7 +93,7 @@ std::string string_substring(const std::string & str, long pos, long count) {
|
|||
return str.substr(start_pos, count);
|
||||
}
|
||||
|
||||
size_t string_find_substr(const std::string & str, const std::string & pattern, long pos) {
|
||||
size_t string_find_substr(const std::string & str, const std::string & pattern, size_t pos) {
|
||||
if (pos >= str.size()) {
|
||||
throw std::invalid_argument("Invalid parameter(s) for string-find.");
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,4 +19,4 @@ std::string string_padd(const std::string & str, int pad_len, char fill_char, bo
|
|||
|
||||
std::string string_substring(const std::string & str, long pos, long count);
|
||||
|
||||
size_t string_find_substr(const std::string & str, const std::string & pattern, long pos);
|
||||
size_t string_find_substr(const std::string & str, const std::string & pattern, size_t pos);
|
||||
|
|
|
|||
22
lexer.h
22
lexer.h
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
namespace usql {
|
||||
|
||||
enum class TokenType {
|
||||
enum class TokenType {
|
||||
undef,
|
||||
identifier,
|
||||
plus,
|
||||
|
|
@ -69,17 +69,17 @@ namespace usql {
|
|||
newline,
|
||||
comment,
|
||||
eof
|
||||
};
|
||||
};
|
||||
|
||||
struct Token {
|
||||
struct Token {
|
||||
std::string token_string;
|
||||
TokenType type;
|
||||
|
||||
Token(const std::string &token_str, TokenType typ);
|
||||
};
|
||||
};
|
||||
|
||||
class Lexer {
|
||||
public:
|
||||
class Lexer {
|
||||
public:
|
||||
Lexer();
|
||||
|
||||
void parse(const std::string &code);
|
||||
|
|
@ -108,7 +108,7 @@ namespace usql {
|
|||
|
||||
static bool isArithmeticalOperator(TokenType token_type);
|
||||
|
||||
private:
|
||||
private:
|
||||
TokenType type(const std::string &token);
|
||||
|
||||
static std::string stringLiteral(std::string token);
|
||||
|
|
@ -116,16 +116,16 @@ namespace usql {
|
|||
static std::string typeToString(TokenType token_type);
|
||||
|
||||
|
||||
private:
|
||||
private:
|
||||
std::string m_code_str;
|
||||
std::vector<Token> m_tokens;
|
||||
int m_index = 0;
|
||||
size_t m_index = 0;
|
||||
|
||||
std::regex k_words_regex;
|
||||
std::regex k_int_regex;
|
||||
std::regex k_int_underscored_regex;
|
||||
std::regex k_double_regex;
|
||||
std::regex k_identifier_regex;
|
||||
};
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace
|
||||
88
parser.cpp
88
parser.cpp
|
|
@ -318,58 +318,58 @@ namespace usql {
|
|||
}
|
||||
|
||||
std::vector<ColOrderNode> Parser::parse_order_by_clause() {
|
||||
std::vector<ColOrderNode> order_cols;
|
||||
|
||||
if (m_lexer.tokenType() == TokenType::keyword_order) {
|
||||
m_lexer.skipToken(TokenType::keyword_order);
|
||||
m_lexer.skipToken(TokenType::keyword_by);
|
||||
std::vector<ColOrderNode> order_cols;
|
||||
|
||||
if (m_lexer.tokenType() == TokenType::keyword_order) {
|
||||
m_lexer.skipToken(TokenType::keyword_order);
|
||||
m_lexer.skipToken(TokenType::keyword_by);
|
||||
|
||||
do {
|
||||
bool asc = true;
|
||||
do {
|
||||
bool asc = true;
|
||||
|
||||
auto cspec_token_type = m_lexer.tokenType();
|
||||
std::string cspec_token = m_lexer.consumeToken().token_string;
|
||||
|
||||
if (m_lexer.tokenType() == TokenType::keyword_asc) {
|
||||
m_lexer.skipToken(TokenType::keyword_asc);
|
||||
} else if (m_lexer.tokenType() == TokenType::keyword_desc) {
|
||||
m_lexer.skipToken(TokenType::keyword_desc);
|
||||
asc = false;
|
||||
}
|
||||
|
||||
switch (cspec_token_type) {
|
||||
case TokenType::int_number:
|
||||
order_cols.emplace_back(std::stoi(cspec_token), asc);
|
||||
break;
|
||||
case TokenType::identifier:
|
||||
order_cols.emplace_back(cspec_token, asc);
|
||||
break;
|
||||
default:
|
||||
throw Exception("order by column can be either column m_index or identifier");
|
||||
}
|
||||
|
||||
m_lexer.skipTokenOptional(TokenType::comma);
|
||||
} while (m_lexer.tokenType() != TokenType::eof && m_lexer.tokenType() != TokenType::keyword_offset && m_lexer.tokenType() != TokenType::keyword_limit);
|
||||
}
|
||||
|
||||
auto cspec_token_type = m_lexer.tokenType();
|
||||
std::string cspec_token = m_lexer.consumeToken().token_string;
|
||||
|
||||
if (m_lexer.tokenType() == TokenType::keyword_asc) {
|
||||
m_lexer.skipToken(TokenType::keyword_asc);
|
||||
} else if (m_lexer.tokenType() == TokenType::keyword_desc) {
|
||||
m_lexer.skipToken(TokenType::keyword_desc);
|
||||
asc = false;
|
||||
}
|
||||
|
||||
switch (cspec_token_type) {
|
||||
case TokenType::int_number:
|
||||
order_cols.emplace_back(std::stoi(cspec_token), asc);
|
||||
break;
|
||||
case TokenType::identifier:
|
||||
order_cols.emplace_back(cspec_token, asc);
|
||||
break;
|
||||
default:
|
||||
throw Exception("order by column can be either column m_index or identifier");
|
||||
}
|
||||
|
||||
m_lexer.skipTokenOptional(TokenType::comma);
|
||||
} while (m_lexer.tokenType() != TokenType::eof && m_lexer.tokenType() != TokenType::keyword_offset && m_lexer.tokenType() != TokenType::keyword_limit);
|
||||
}
|
||||
|
||||
return order_cols;
|
||||
return order_cols;
|
||||
}
|
||||
|
||||
OffsetLimitNode Parser::parse_offset_limit_clause() {
|
||||
int offset = 0;
|
||||
int limit = 999999999;
|
||||
size_t offset = 0;
|
||||
size_t limit = 999999999;
|
||||
|
||||
if (m_lexer.tokenType() == TokenType::keyword_offset) {
|
||||
m_lexer.skipToken(TokenType::keyword_offset);
|
||||
offset = std::stoi(m_lexer.consumeToken(TokenType::int_number).token_string);
|
||||
}
|
||||
if (m_lexer.tokenType() == TokenType::keyword_offset) {
|
||||
m_lexer.skipToken(TokenType::keyword_offset);
|
||||
offset = std::stoi(m_lexer.consumeToken(TokenType::int_number).token_string);
|
||||
}
|
||||
|
||||
if (m_lexer.tokenType() == TokenType::keyword_limit) {
|
||||
m_lexer.skipToken(TokenType::keyword_limit);
|
||||
limit = std::stoi(m_lexer.consumeToken(TokenType::int_number).token_string);
|
||||
}
|
||||
if (m_lexer.tokenType() == TokenType::keyword_limit) {
|
||||
m_lexer.skipToken(TokenType::keyword_limit);
|
||||
limit = std::stoi(m_lexer.consumeToken(TokenType::int_number).token_string);
|
||||
}
|
||||
|
||||
return OffsetLimitNode{offset, limit};
|
||||
return OffsetLimitNode{offset, limit};
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
10
parser.h
10
parser.h
|
|
@ -79,10 +79,10 @@ struct ColOrderNode : Node {
|
|||
|
||||
|
||||
struct OffsetLimitNode : Node {
|
||||
int offset;
|
||||
int limit;
|
||||
size_t offset;
|
||||
size_t limit;
|
||||
|
||||
OffsetLimitNode(int off, int lim) : Node(NodeType::offset_limit), offset(off), limit(lim) {}
|
||||
OffsetLimitNode(size_t off, size_t lim) : Node(NodeType::offset_limit), offset(off), limit(lim) {}
|
||||
|
||||
void dump() const override {
|
||||
std::cout << "type: OffsetLimitNode, offset: " << offset << ", limit: " << limit << std::endl;
|
||||
|
|
@ -107,10 +107,10 @@ struct ColDefNode : Node {
|
|||
std::string name;
|
||||
ColumnType type;
|
||||
int order;
|
||||
int length;
|
||||
size_t length;
|
||||
bool null;
|
||||
|
||||
ColDefNode(std::string col_name, ColumnType col_type, int col_order, int col_len, bool nullable) :
|
||||
ColDefNode(std::string col_name, ColumnType col_type, int col_order, size_t col_len, bool nullable) :
|
||||
Node(NodeType::column_def), name(std::move(col_name)), type(col_type), order(col_order), length(col_len),
|
||||
null(nullable) {}
|
||||
|
||||
|
|
|
|||
6
row.cpp
6
row.cpp
|
|
@ -53,7 +53,7 @@ int ColBooleanValue::compare(ColValue &other) const {
|
|||
}
|
||||
|
||||
Row::Row(const Row &other) : m_visible(other.m_visible), m_columns(other.m_columns.size()) {
|
||||
for (int i = 0; i < other.m_columns.size(); i++) {
|
||||
for (size_t i = 0; i < other.m_columns.size(); i++) {
|
||||
if (other[i].isNull())
|
||||
continue; // for null NOP
|
||||
|
||||
|
|
@ -156,7 +156,7 @@ void Row::setColumnValue(ColDefNode *col_def, ValueNode *col_value) {
|
|||
}
|
||||
|
||||
int Row::compare(const Row &other) const {
|
||||
for (int ci = 0; ci < m_columns.size(); ci++) {
|
||||
for (size_t ci = 0; ci < m_columns.size(); ci++) {
|
||||
int cmp = this->operator[](ci).compare(other[ci]);
|
||||
if (cmp != 0) return cmp;
|
||||
}
|
||||
|
|
@ -166,7 +166,7 @@ int Row::compare(const Row &other) const {
|
|||
void Row::print(const std::vector<ColDefNode> &col_defs) {
|
||||
std::string out{"| "};
|
||||
|
||||
for (int ci = 0; ci < m_columns.size(); ci++) {
|
||||
for (size_t ci = 0; ci < m_columns.size(); ci++) {
|
||||
auto & col_def = col_defs[ci];
|
||||
int col_size = print_get_column_size(col_def);
|
||||
|
||||
|
|
|
|||
|
|
@ -85,7 +85,7 @@ std::string Table::csv_string() {
|
|||
out_string.reserve(m_rows.size() * k_row_size_est);
|
||||
|
||||
// header
|
||||
for(int i = 0; i < m_col_defs.size(); i++) {
|
||||
for(size_t i = 0; i < m_col_defs.size(); i++) {
|
||||
if (i > 0) out_string += ',';
|
||||
out_string += m_col_defs[i].name;
|
||||
}
|
||||
|
|
@ -96,7 +96,7 @@ std::string Table::csv_string() {
|
|||
std::string csv_line{"\n"};
|
||||
csv_line.reserve(k_row_size_est);
|
||||
|
||||
for (int i = 0; i < m_col_defs.size(); i++) {
|
||||
for (size_t i = 0; i < m_col_defs.size(); i++) {
|
||||
if (i > 0) csv_line += ',';
|
||||
|
||||
auto &col = row[i];
|
||||
|
|
@ -193,7 +193,7 @@ void Table::commit_row(Row &row) {
|
|||
void Table::commit_copy_of_row(Row &row) {
|
||||
Row& new_row = create_empty_row();
|
||||
|
||||
for(int i = 0; i < m_col_defs.size(); i++) {
|
||||
for(size_t i = 0; i < m_col_defs.size(); i++) {
|
||||
ColValue &ct = row[i];
|
||||
|
||||
if (ct.isNull()) {
|
||||
|
|
@ -236,7 +236,7 @@ void Table::validate_column(const ColDefNode *col_def, ColValue &col_val) {
|
|||
}
|
||||
|
||||
void Table::validate_row(Row &row) {
|
||||
for(int i = 0; i < m_col_defs.size(); i++) {
|
||||
for(size_t i = 0; i < m_col_defs.size(); i++) {
|
||||
ColDefNode col_def = m_col_defs[i];
|
||||
ColValue &col_val = row[i];
|
||||
|
||||
|
|
|
|||
|
|
@ -114,8 +114,8 @@ void USql::select_row(SelectFromTableNode &where_node,
|
|||
}
|
||||
|
||||
bool USql::check_for_aggregate_only_functions(SelectFromTableNode &node, size_t result_cols_cnt) {
|
||||
int aggregate_funcs = 0;
|
||||
for (int i = 0; i < node.cols_names->size(); i++) {
|
||||
size_t aggregate_funcs = 0;
|
||||
for (size_t i = 0; i < node.cols_names->size(); i++) {
|
||||
SelectColNode * col_node = &node.cols_names->operator[](i);
|
||||
if (col_node->value->node_type == NodeType::function) {
|
||||
auto func_node = static_cast<FunctionNode *>(col_node->value.get());
|
||||
|
|
@ -244,7 +244,7 @@ std::tuple<int, ColDefNode> USql::get_node_definition(Table *table, Node * node,
|
|||
return std::make_tuple(-1, col_def);
|
||||
} else if (func_node->function == "min" || func_node->function == "max") {
|
||||
auto col_type= ColumnType::float_type;
|
||||
int col_len = 1;
|
||||
size_t col_len = 1;
|
||||
auto & v = func_node->params[0];
|
||||
if (v->node_type == NodeType::database_value) {
|
||||
ColDefNode src_col_def = get_db_column_definition(table, v.get());
|
||||
|
|
@ -387,7 +387,7 @@ std::unique_ptr<Table> USql::execute_select(SelectFromTableNode &node) const {
|
|||
// create result table
|
||||
std::vector<ColDefNode> result_tbl_col_defs{};
|
||||
std::vector<int> source_table_col_index{};
|
||||
for (int i = 0; i < node.cols_names->size(); i++) {
|
||||
for (size_t i = 0; i < node.cols_names->size(); i++) {
|
||||
SelectColNode *col_node = &node.cols_names->operator[](i);
|
||||
auto [src_tbl_col_index, rst_tbl_col_def] = get_column_definition(table, col_node, i);
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue