refactor: 重构前端代码并添加日志功能

- 重命名和重构了多个文件,包括 lexer、parser 和 AST 相关代码
- 添加了日志功能,使用 LOG_* 宏替代原有的 error 和 warn 函数
- 优化了错误处理和内存分配方式
- 调整了代码结构,提高了模块化和可读性
This commit is contained in:
ZZY
2025-03-19 12:22:55 +08:00
parent 172d72b0a0
commit 05c637e594
76 changed files with 1479 additions and 310 deletions

View File

@@ -26,8 +26,8 @@ the distribution and installation instructions.
Chris Fraser / cwf@aya.yale.edu
David Hanson / drh@drhanson.net
*/
#define FRONTEND_IMPLEMENTATION
#include "../frontend.h"
#include <lib/core.h>
#include "lexer_log.h"
#include "token.h"
#include "lexer.h"
@@ -74,8 +74,9 @@ static inline int keyword_cmp(const char* name, int len) {
return -1; // Not a keyword.
}
void init_lexer(lexer_t* lexer, const char* file_name, void* stream, lexer_sread_fn sread)
{
void init_lexer(lexer_t* lexer, const char* file_name, void* stream, lexer_sread_fn sread) {
init_lib_core();
lexer->cur_ptr = lexer->end_ptr = (unsigned char*)&(lexer->buffer);
lexer->index = 1;
lexer->line = 1;
@@ -96,10 +97,10 @@ static void flush_buffer(lexer_t* lexer) {
lexer->cur_ptr = (unsigned char*)lexer->buffer;
int read_size = LEXER_BUFFER_SIZE - num;
// TODO size_t to int maybe lose precision
// TODO rt_size_t to int maybe lose precision
int got_size = lexer->sread(lexer->buffer + num, read_size, 1, read_size, lexer->stream);
if (got_size < 0) {
error("lexer read error");
LEX_ERROR("lexer read error");
} else if (got_size < read_size) {
lexer->end_ptr += got_size;
lexer->end_ptr[0] = '\0'; // EOF
@@ -107,7 +108,7 @@ static void flush_buffer(lexer_t* lexer) {
} else if (got_size == read_size) {
lexer->end_ptr += got_size;
} else {
error("lexer read error imposible got_size > read_size maybe overflow?");
LEX_ERROR("lexer read error imposible got_size > read_size maybe overflow?");
}
}
@@ -153,8 +154,10 @@ static char got_slash(unsigned char* peek) {
case 'r': return '\r';
case 't': return '\t';
case 'v': return '\v';
default: error("Unknown escape character");
default: break;
}
LEX_ERROR("Unknown escape character");
return -1;
}
static void parse_char_literal(lexer_t* lexer, tok_t* token) {
@@ -168,7 +171,7 @@ static void parse_char_literal(lexer_t* lexer, tok_t* token) {
val = *peek++;
}
if (*peek++ != '\'') error("Unclosed character literal");
if (*peek++ != '\'') LEX_ERROR("Unclosed character literal");
token->val.ch = val;
lexer->cur_ptr = peek;
token->val.have = 1;
@@ -178,7 +181,7 @@ static void parse_char_literal(lexer_t* lexer, tok_t* token) {
static void parse_string_literal(lexer_t* lexer, tok_t* token) {
unsigned char* peek = lexer->cur_ptr + 1;
// TODO string literal size check
char* dest = token->val.str = xmalloc(LEXER_MAX_TOKEN_SIZE + 1);
char* dest = token->val.str = rt._malloc(LEXER_MAX_TOKEN_SIZE + 1);
int len = 0;
while (*peek != '"') {
@@ -189,7 +192,7 @@ static void parse_string_literal(lexer_t* lexer, tok_t* token) {
*peek = got_slash(peek);
}
if (len >= LEXER_MAX_TOKEN_SIZE) error("String too long");
if (len >= LEXER_MAX_TOKEN_SIZE) LEX_ERROR("String too long");
dest[len++] = *peek++;
}
dest[len] = '\0';
@@ -431,7 +434,7 @@ void get_token(lexer_t* lexer, tok_t* token) {
lexer->line++;
tok = TOKEN_FLUSH; break;
case '#':
warn("TODO: #define\n");
LEX_WARN("Marroc does not support in lexer rather in preprocessor, it will be ignored");
goto_newline(lexer);
tok = TOKEN_FLUSH;
goto END;
@@ -458,14 +461,14 @@ void get_token(lexer_t* lexer, tok_t* token) {
case '_':
// TOKEN_IDENT
if ((*peek == 'L' && *peek == '\'') || (*peek == 'L' && *peek == '"')) {
error("unsupport wide-character char literal by `L` format");
LEX_ERROR("unsupport wide-character char literal by `L` format");
}
while (1) {
if (peek == lexer->end_ptr) {
error("unsupport outof 64 length identifier");
LEX_ERROR("unsupport outof 64 length identifier");
}
if ((*peek >= 'a' && *peek <= 'z') || (*peek >= 'A' && *peek <= 'Z') ||
(*peek == '_') || (*peek >= '0' && *peek <= '9')) {
(*peek == '_') || (*peek >= '0' && *peek <= '9')) {
peek++;
continue;
}
@@ -475,7 +478,7 @@ void get_token(lexer_t* lexer, tok_t* token) {
int res = keyword_cmp((const char*)lexer->cur_ptr, peek - (lexer->cur_ptr));
if (res == -1) {
int strlen = peek - lexer->cur_ptr;
unsigned char* str = xmalloc(strlen + 1);
unsigned char* str = rt._malloc(strlen + 1);
constant.have = 1;
constant.str = (char*)str;
for (int i = 0; i < strlen; i++) {
@@ -489,7 +492,7 @@ void get_token(lexer_t* lexer, tok_t* token) {
tok = keywords[res].tok; break;
}
default:
error("unsupport char in sourse code `%c`", *(lexer->cur_ptr));
LEX_ERROR("unsupport char in sourse code `%c`", *(lexer->cur_ptr));
break;
}
@@ -497,6 +500,7 @@ void get_token(lexer_t* lexer, tok_t* token) {
END:
token->val = constant;
token->type = tok;
LEX_DEBUG("get token `%s` (ch: %c, int: %d)", get_tok_name(token->type), token->val.ch, token->val.i);
}
// get_token maybe got invalid (with parser)
@@ -507,4 +511,3 @@ void get_valid_token(lexer_t* lexer, tok_t* token) {
type = token->type;
} while (type == TOKEN_FLUSH || type == TOKEN_LINE_COMMENT || type == TOKEN_BLOCK_COMMENT);
}