diff --git a/Makefile b/Makefile index a5dd923..a843c75 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,8 @@ LEXER_C = src/lexer/lex.yy.c LEXER_H = src/lexer/lex.yy.h CFILES = $(shell find src -name '*.c') -BINARY = bin/cargon +CFLAGS = -lm -lcjson -Wall -Wextra -Wno-unused-function -s +BINARY = bin/argon all: $(BINARY) @@ -12,11 +13,11 @@ $(LEXER_C) $(LEXER_H): $(LEXER_SRC) $(BINARY): $(CFILES) $(LEXER_C) $(LEXER_H) mkdir -p bin - gcc -O3 -o $(BINARY) $(CFILES) -lm -Wall -Wextra -Wno-unused-function + gcc -O3 -o $(BINARY) $(CFILES) $(CFLAGS) debug: $(CFILES) $(LEXER_C) $(LEXER_H) mkdir -p bin - gcc -g -O0 -o $(BINARY) $(CFILES) -lm -Wall -Wextra -Wno-unused-function + gcc -g -O0 -o $(BINARY) $(CFILES) $(CFLAGS) clean: rm -rf bin diff --git a/src/lexer/lex.l b/src/lexer/lex.l index 5285df5..0451817 100644 --- a/src/lexer/lex.l +++ b/src/lexer/lex.l @@ -4,8 +4,9 @@ #include "token.h" #include "lexer.h" #define GET_STATE LexerState *state = (LexerState *)yyget_extra(yyscanner); -#define GET_ADD_COLUMN COLUMN_NO += yyleng; +#define ADD_TO_COLUMN COLUMN_NO += yyleng; #define LINE_NO yylineno+1 +#define TOKENS state->tokens #define COLUMN_NO state->current_column int yywrap(void *) { @@ -15,44 +16,104 @@ int yywrap(void *) { %% -\"(\\[a-z\"'`]|[^\\"])*\" { +(\"(\\[a-z\"'`]|[^\\"])*\") { GET_STATE - add_token(TOKEN_STRING, yytext, LINE_NO, COLUMN_NO); - GET_ADD_COLUMN + add_token(TOKENS,TOKEN_STRING, yytext, LINE_NO, COLUMN_NO); + ADD_TO_COLUMN } -[0-9]+ { +('((\\([a-z'\"`]))|[^'])*') { GET_STATE - add_token(TOKEN_NUMBER, yytext, LINE_NO, COLUMN_NO); - GET_ADD_COLUMN + add_token(TOKENS,TOKEN_STRING, yytext, LINE_NO, COLUMN_NO); + ADD_TO_COLUMN } +((([0-9]+(\.[0-9]+)?)|(\.[0-9]+))(e((\-|\+)?([0-9]+(\.[0-9]+)?)))?) { + GET_STATE + add_token(TOKENS,TOKEN_NUMBER, yytext, LINE_NO, COLUMN_NO); + ADD_TO_COLUMN +} + +([0-9]+\/[0-9]+) { + GET_STATE + add_token(TOKENS,TOKEN_FRACTION, yytext, LINE_NO, COLUMN_NO); + ADD_TO_COLUMN +} + +"not"[ \t]+"in" { GET_STATE; add_token(TOKENS,TOKEN_NOT_IN, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +"&&" { GET_STATE; add_token(TOKENS,TOKEN_AND, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +"||" { GET_STATE; add_token(TOKENS,TOKEN_OR, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +"<=" { GET_STATE; add_token(TOKENS,TOKEN_LE, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +">=" { GET_STATE; add_token(TOKENS,TOKEN_GE, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +"!=" { GET_STATE; add_token(TOKENS,TOKEN_NE, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +"==" { GET_STATE; add_token(TOKENS,TOKEN_EQ, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +"=" { GET_STATE; add_token(TOKENS,TOKEN_ASSIGN, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +"//" { GET_STATE; add_token(TOKENS,TOKEN_FLOORDIV, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +"<" { GET_STATE; add_token(TOKENS,TOKEN_LT, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +">" { GET_STATE; add_token(TOKENS,TOKEN_GT, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +"+" { GET_STATE; add_token(TOKENS,TOKEN_PLUS, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +"-" { GET_STATE; add_token(TOKENS,TOKEN_MINUS, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +"%" { GET_STATE; add_token(TOKENS,TOKEN_MODULO, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +"*" { GET_STATE; add_token(TOKENS,TOKEN_STAR, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +"/" { GET_STATE; add_token(TOKENS,TOKEN_SLASH, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +"^" { GET_STATE; add_token(TOKENS,TOKEN_CARET, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } + + +"if" { GET_STATE; add_token(TOKENS,TOKEN_IF, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +"else" { GET_STATE; add_token(TOKENS,TOKEN_ELSE, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +"while" { GET_STATE; add_token(TOKENS,TOKEN_WHILE, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +"forever" { GET_STATE; add_token(TOKENS,TOKEN_FOREVER, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +"for" { GET_STATE; add_token(TOKENS,TOKEN_FOR, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +"break" { GET_STATE; add_token(TOKENS,TOKEN_BREAK, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +"continue" { GET_STATE; add_token(TOKENS,TOKEN_CONTINUE, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +"return" { GET_STATE; add_token(TOKENS,TOKEN_RETURN, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +"let" { GET_STATE; add_token(TOKENS,TOKEN_LET, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +"import" { GET_STATE; add_token(TOKENS,TOKEN_IMPORT, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +"from" { GET_STATE; add_token(TOKENS,TOKEN_FROM, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +"do" { GET_STATE; add_token(TOKENS,TOKEN_DO, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +"true" { GET_STATE; add_token(TOKENS,TOKEN_TRUE, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +"false" { GET_STATE; add_token(TOKENS,TOKEN_FALSE, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +"null" { GET_STATE; add_token(TOKENS,TOKEN_NULL, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +"delete" { GET_STATE; add_token(TOKENS,TOKEN_DELETE, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +"not" { GET_STATE; add_token(TOKENS,TOKEN_NOT, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +"in" { GET_STATE; add_token(TOKENS,TOKEN_IN, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +"try" { GET_STATE; add_token(TOKENS,TOKEN_TRY, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +"catch" { GET_STATE; add_token(TOKENS,TOKEN_CATCH, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } + +"(" { GET_STATE; add_token(TOKENS,TOKEN_LPAREN, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +")" { GET_STATE; add_token(TOKENS,TOKEN_RPAREN, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +"[" { GET_STATE; add_token(TOKENS,TOKEN_LBRACKET, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +"]" { GET_STATE; add_token(TOKENS,TOKEN_RBRACKET, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +"{" { GET_STATE; add_token(TOKENS,TOKEN_LBRACE, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } +"}" { GET_STATE; add_token(TOKENS,TOKEN_RBRACE, yytext, LINE_NO, COLUMN_NO); ADD_TO_COLUMN; } + [a-zA-Z_][a-zA-Z0-9_]* { GET_STATE - add_token(TOKEN_IDENTIFIER, yytext, LINE_NO, COLUMN_NO); - GET_ADD_COLUMN + add_token(TOKENS,TOKEN_IDENTIFIER, yytext, LINE_NO, COLUMN_NO); + ADD_TO_COLUMN } -"." { - GET_STATE - add_token(TOKEN_DOT, yytext, LINE_NO, COLUMN_NO); - GET_ADD_COLUMN -} +"." {GET_STATE;add_token(TOKENS,TOKEN_DOT, yytext, LINE_NO, COLUMN_NO);ADD_TO_COLUMN} +"," {GET_STATE;add_token(TOKENS,TOKEN_COMMA, yytext, LINE_NO, COLUMN_NO);ADD_TO_COLUMN} +":" {GET_STATE;add_token(TOKENS,TOKEN_COLON, yytext, LINE_NO, COLUMN_NO);ADD_TO_COLUMN} \n { GET_STATE - add_token(TOKEN_NEW_LINE, yytext, LINE_NO, COLUMN_NO); + add_token(TOKENS,TOKEN_NEW_LINE, yytext, LINE_NO, COLUMN_NO); COLUMN_NO = 1; } [ \t]+ { GET_STATE - GET_ADD_COLUMN // Advance column for whitespace + if (COLUMN_NO == 1){ + add_token(TOKENS,TOKEN_INDENT, yytext, LINE_NO, COLUMN_NO); + } + ADD_TO_COLUMN // Advance column for whitespace } . { GET_STATE - fprintf(stderr, "Error in file %s on line %d column %d: unexpected character '%s'\n", state->filename, LINE_NO, COLUMN_NO, yytext); + fprintf(stderr, "%s: line %d column %d: unexpected character '%s'\n", state->path, LINE_NO, COLUMN_NO, yytext); exit(1); } %% \ No newline at end of file diff --git a/src/lexer/lexer.c b/src/lexer/lexer.c index 58eef98..bd8b8a1 100644 --- a/src/lexer/lexer.c +++ b/src/lexer/lexer.c @@ -1,31 +1,26 @@ #include "lex.yy.h" -#include "token.h" #include "lexer.h" +#include "../string/string.h" +#include -int lexer() { +void lexer(LexerState state) { yyscan_t scanner; - LexerState state = { "file1.src", 1 }; - const char *input = "let x = 10"; + char *unquoted = unquote(state.content); + if (unquoted) { + printf("%s\n", unquoted); + free(unquoted); + } yylex_init(&scanner); - // Set the extra data *before* scanning yyset_extra(&state, scanner); - void* buffer = yy_scan_string(input, scanner); + void* buffer = yy_scan_string(state.content, scanner); yy_switch_to_buffer(buffer, scanner); - yylex(scanner); // This fills the token array + yylex(scanner); yy_delete_buffer(buffer, scanner); yylex_destroy(scanner); - - // print tokens etc. - for (int i = 0; i < token_count; i++) { - printf("Token(type=%d, value='%s')\n", tokens[i].type, tokens[i].value); - } - - free_tokens(); - return 0; } \ No newline at end of file diff --git a/src/lexer/lexer.h b/src/lexer/lexer.h index 5646f0e..ec42dda 100644 --- a/src/lexer/lexer.h +++ b/src/lexer/lexer.h @@ -1,7 +1,11 @@ +#include "token.h" + typedef struct { - const char *filename; + const char *path; + const char *content; int current_column; + TokenStruct* tokens; // add more fields as needed } LexerState; -int lexer(); +void lexer(LexerState state); diff --git a/src/lexer/token.c b/src/lexer/token.c index 2e1a207..41b3d5b 100644 --- a/src/lexer/token.c +++ b/src/lexer/token.c @@ -4,32 +4,44 @@ #define INITIAL_CAPACITY 64 -Token* tokens = NULL; -int token_count = 0; -static int token_capacity = 0; -void add_token(TokenType type, const char* value, int line, int column) { - if (tokens == NULL) { - token_capacity = INITIAL_CAPACITY; - tokens = malloc(sizeof(Token) * token_capacity); - } else if (token_count >= token_capacity) { - token_capacity *= 2; - tokens = realloc(tokens, sizeof(Token) * token_capacity); +TokenStruct* init_token() { + TokenStruct *tokenStruct = malloc(sizeof(TokenStruct));\ + if (tokenStruct == NULL) { + // handle malloc failure + return NULL; } - - tokens[token_count].type = type; - tokens[token_count].value = strdup(value); - tokens[token_count].line = line; - tokens[token_count].column = column; - token_count++; + tokenStruct->count = 0; + tokenStruct->capacity = INITIAL_CAPACITY; + tokenStruct->tokens = malloc(sizeof(Token) * INITIAL_CAPACITY); + if (tokenStruct->tokens == NULL) { + // handle malloc failure + free(tokenStruct); + return NULL; + } + return tokenStruct; } -void free_tokens() { - for (int i = 0; i < token_count; ++i) { - free(tokens[i].value); +void add_token(TokenStruct* token,TokenType type, const char* value, int line, int column) { + if (token->count >= token->capacity) { + token->capacity *= 2; + token->tokens = realloc(token->tokens, sizeof(Token) * token->capacity); } - free(tokens); - tokens = NULL; - token_count = 0; - token_capacity = 0; + + token->tokens[token->count].type = type; + token->tokens[token->count].value = strdup(value); + token->tokens[token->count].line = line; + token->tokens[token->count].column = column; + token->count++; +} + +void free_tokens(TokenStruct* token) { + for (int i = 0; i < token->count; ++i) { + free(token->tokens[i].value); + } + free(token->tokens); + token->tokens = NULL; + token->count = 0; + token->capacity = 0; + free(token); } \ No newline at end of file diff --git a/src/lexer/token.h b/src/lexer/token.h index b2d6131..cb0edb2 100644 --- a/src/lexer/token.h +++ b/src/lexer/token.h @@ -4,26 +4,83 @@ typedef enum { TOKEN_STRING, TOKEN_NUMBER, + TOKEN_FRACTION, TOKEN_IDENTIFIER, TOKEN_KEYWORD, - TOKEN_DOT, TOKEN_NEW_LINE, + TOKEN_INDENT, + + // Operators + TOKEN_AND, // && + TOKEN_OR, // || + TOKEN_NOT_IN, // not in + TOKEN_LE, // <= + TOKEN_GE, // >= + TOKEN_LT, // < + TOKEN_GT, // > + TOKEN_NE, // != + TOKEN_EQ, // == + TOKEN_ASSIGN, + TOKEN_PLUS, // + + TOKEN_MINUS, // - + TOKEN_MODULO, // % + TOKEN_STAR, // * + TOKEN_FLOORDIV, // // + TOKEN_SLASH, // / + TOKEN_CARET, // ^ + + // Keywords + TOKEN_IF, + TOKEN_ELSE, + TOKEN_WHILE, + TOKEN_FOREVER, + TOKEN_FOR, + TOKEN_BREAK, + TOKEN_CONTINUE, + TOKEN_RETURN, + TOKEN_LET, + TOKEN_IMPORT, + TOKEN_FROM, + TOKEN_DO, + TOKEN_TRUE, + TOKEN_FALSE, + TOKEN_NULL, + TOKEN_DELETE, + TOKEN_NOT, + TOKEN_IN, + TOKEN_TRY, + TOKEN_CATCH, + + // parentheses, brackets, and braces + TOKEN_LPAREN, // ( + TOKEN_RPAREN, // ) + TOKEN_LBRACKET, // [ + TOKEN_RBRACKET, // ] + TOKEN_LBRACE, // { + TOKEN_RBRACE, // } + + TOKEN_DOT, + TOKEN_COMMA, + TOKEN_COLON, } TokenType; typedef struct { TokenType type; - char* value; int line; int column; + char* value; } Token; -extern int token_count; +typedef struct { + int count; + int capacity; + Token* tokens; +} TokenStruct; -extern Token* tokens; +TokenStruct* init_token(); +void add_token(TokenStruct* token,TokenType type, const char* value, int line, int column); -void add_token(TokenType type, const char* value, int line, int column); - -void free_tokens(); +void free_tokens(TokenStruct* token); #endif \ No newline at end of file diff --git a/src/main.c b/src/main.c index 4705d7e..41973c9 100644 --- a/src/main.c +++ b/src/main.c @@ -1,15 +1,54 @@ -#include "number/number.h" #include "lexer/lexer.h" -void initialize() { - initNumber(); -} +#include +#include -void cleanup() { - cleanupNumber(); +char* read_file_as_text(const char* filename) { + FILE *file = fopen(filename, "r"); + if (!file) { + perror("Failed to open file"); + return NULL; + } + + // Seek to the end to find the file size + fseek(file, 0, SEEK_END); + long length = ftell(file); + rewind(file); // Go back to the beginning + + // Allocate buffer (+1 for null terminator) + char *buffer = malloc(length + 1); + if (!buffer) { + perror("Failed to allocate memory"); + fclose(file); + return NULL; + } + + // Read the whole file into the buffer + size_t read_size = fread(buffer, 1, length, file); + buffer[read_size] = '\0'; // Null-terminate + + fclose(file); + return buffer; } int main() { - lexer(); + const char * path = "test.ar"; + + char *content = read_file_as_text(path); + TokenStruct* tokenStruct = init_token(); + if (!content) return 1; + + LexerState state = { + path, + content, + 1, + tokenStruct + }; + lexer(state); + free(content); + for (int i = 0; icount; i++) { + printf("%d\n", tokenStruct->tokens[i].type); + } + free_tokens(tokenStruct); return 0; } diff --git a/src/number/number.c b/src/number/number.c deleted file mode 100644 index 071473b..0000000 --- a/src/number/number.c +++ /dev/null @@ -1,108 +0,0 @@ -#include "number.h" -#include "../string/string.h" - -#include -#include -#include -#include -#include - -regex_t numberCompile; - -void initNumber() -{ - int compileError; - compileError = regcomp(&numberCompile, "^( *)(-)?(((([0-9]+(\\.[0-9]+)?)|(\\.[0-9]+))(e((\\-|\\+)?([0-9]+)))?))( *)$", REG_EXTENDED); - if (compileError) - { - char errorBuffer[1024]; - regerror(compileError, &numberCompile, errorBuffer, sizeof(errorBuffer)); - fprintf(stderr, "Error compiling regex: %s\n", errorBuffer); - exit(1); - } -} - -void cleanupNumber() -{ - regfree(&numberCompile); -} - -int gcd(int64_t a, int64_t b) -{ - while (b != 0) - { - int temp = b; - b = a % b; - a = temp; - } - return a; -} - -void simplifyFraction(int64_t *numerator, int64_t *denominator) -{ - int common_divisor = gcd(*numerator, *denominator); - *numerator /= common_divisor; - *denominator /= common_divisor; -} - -void doubleToFraction(double num, int64_t *numerator, uint64_t *denominator) { - int currentSign = (num < 0) ? -1 : 1; - num = fabs(num); - - long double tolerance = 1.0e-10; - long double h1 = 1, h2 = 0, k1 = 0, k2 = 1; - long double b = num; - do { - long double a = floor(b); - long double aux = h1; - h1 = a * h1 + h2; - h2 = aux; - aux = k1; - k1 = a * k1 + k2; - k2 = aux; - b = 1 / (b - a); - } while (fabsl(num - h1 / k1) > num * tolerance); - - *numerator = (int64_t)(h1 * currentSign); - *denominator = (uint64_t)k1; -} - -struct number translateNumber(char *code) -{ - char *codeClone = cloneString(code); - stripString(codeClone, WHITE_SPACE); - int reti = regexec(&numberCompile, codeClone, 0, NULL, 0); - if (reti == REG_NOMATCH) - { - return (struct number){ - .numerator = 0, - .denominator = 0 - }; - } - struct number num; - num.numerator = 0; - num.denominator = 1; - - double coefficient = 0; - int exponent = 0; - - char *e = strchr(codeClone, 'e'); - if (e) { - *e = '\0'; - e++; - if (*e == '+') e++; - exponent = atoi(e); - } - - coefficient = atof(codeClone); - - doubleToFraction(coefficient, &num.numerator, &num.denominator); - - if (exponent > 0) { - num.numerator *= (int64_t)pow(10, exponent); - } else if (exponent < 0) { - num.denominator *= (int64_t)pow(10, -exponent); - } - - return num; -} diff --git a/src/number/number.h b/src/number/number.h deleted file mode 100644 index 8fff0b1..0000000 --- a/src/number/number.h +++ /dev/null @@ -1,18 +0,0 @@ -#ifndef NUMBER_H -#define CLONESTRING_HNUMBER_H - -#include -#include - #include - -struct number { - int64_t numerator; - uint64_t denominator; -}; - -struct number translateNumber(char *code); - -void initNumber(); -void cleanupNumber(); - -#endif // NUMBER_H diff --git a/src/string/string.c b/src/string/string.c index c0c8f0b..1a969d7 100644 --- a/src/string/string.c +++ b/src/string/string.c @@ -3,6 +3,65 @@ #include #include #include +#include +#include +#include +#include + +char *swap_quotes(const char *input) { + size_t len = strlen(input); + char *result = malloc(len + 1); + if (!result) return NULL; + + for (size_t i = 0; i < len; ++i) { + if (input[i] == '"') result[i] = '\''; + else if (input[i] == '\'') result[i] = '"'; + else result[i] = input[i]; + } + result[len] = '\0'; + return result; +} + +char *unquote(const char *str) { + if (*str == '\0') return NULL; + + char quote = str[0]; + char *swapped = NULL; + char *unescaped = NULL; + + if (quote == '\'') { + swapped = swap_quotes(str); + if (!swapped) return NULL; + str = swapped; + } + + cJSON *json = cJSON_Parse(str); + if (!json || !cJSON_IsString(json)) { + if (swapped) free(swapped); + return NULL; + } + + // Copy unescaped string before freeing JSON object + const char *decoded = cJSON_GetStringValue(json); + if (!decoded) { + cJSON_Delete(json); + if (swapped) free(swapped); + return NULL; + } + + unescaped = strdup(decoded); + cJSON_Delete(json); + if (swapped) free(swapped); + + // If input was single-quoted, swap quotes back in the output + if (quote == '\'') { + char *final = swap_quotes(unescaped); + free(unescaped); + return final; + } + + return unescaped; +} const char *WHITE_SPACE = " \t\n\r\f\v"; diff --git a/src/string/string.h b/src/string/string.h index bd3f07e..bd89403 100644 --- a/src/string/string.h +++ b/src/string/string.h @@ -7,4 +7,9 @@ char* cloneString(char* str); void stripString(char* str, const char* chars); +char *swap_quotes(const char *input); + +char *unquote(const char *str); + + #endif // CLONESTRING_H diff --git a/src/translate/ArObject/ArObject.c b/src/translate/ArObject/ArObject.c deleted file mode 100644 index c14ca5d..0000000 --- a/src/translate/ArObject/ArObject.c +++ /dev/null @@ -1,6 +0,0 @@ - -struct ArObject -{ - -}; - diff --git a/src/translate/ArObject/ArObject.h b/src/translate/ArObject/ArObject.h deleted file mode 100644 index e69de29..0000000 diff --git a/src/translate/ArString/ArString.c b/src/translate/ArString/ArString.c deleted file mode 100644 index e69de29..0000000 diff --git a/test.ar b/test.ar new file mode 100644 index 0000000..d6dac33 --- /dev/null +++ b/test.ar @@ -0,0 +1,54 @@ +import "url.ar" as url + +let __makeFile(name, type, data) = do + let File = {name: name, type: type, data: data} + let save(path) = do + let file = file.write(path) + file.buffer(data) + file.close() + File.save = save + return File + +let __multipart(req, res) = do + let boundary = buffer().from(req.headers["content-type"].splitN("boundary=", 2)[1]) + let newLineSplit = buffer().from("\r\n\r\n") + let parts = req.buffer.body.split(boundary) + for (i from 0 to parts.length) do + let str = parts[i].to("string") + if (str == "" || str=="--" || str=="--\r\n") continue + str = null + let headers = {} + let lines = parts[i].splitN(newLineSplit, 2) + let headerLines = lines[0].to("string").split("\r\n") + for (j from 0 to headerLines.length) do + let header = headerLines[j].splitN(": ", 2) + if (header.length != 2) continue + headers[header[0].lower()] = header[1] + if (lines.length != 2) continue + let body = lines[1] + if (i != parts.length-1) do + body = body.slice(0, body.length-4) + if ("content-disposition" in headers) do + let disposition = headers["content-disposition"].split("; ") + if (disposition[0] == "form-data") do + let name = json.parse(disposition[1].splitN("=", 2)[1]) + if (disposition.length >= 3) do + let filename = json.parse(disposition[2].splitN("=", 2)[1]) + req.files[name] = __makeFile(filename, headers["content-type"], body) + else do + req.formdata[name] = body.to("string") + res.next() + + +let formdata(req, res) = do + req.formdata = {} + req.files = {} + + if (req.method != "POST") return res.next() + if ("content-type" not in req.headers) return res.next() + let loweredContentType = req.headers["content-type"].lower() + if (loweredContentType.startswith("multipart/form-data")) return __multipart(req, res) + else if (loweredContentType.startswith("application/x-www-form-urlencoded")) req.formdata = url.decodeURLQuery(req.buffer.body.to("string")) + else if (loweredContentType.startswith("application/json")) req.formdata = json.parse(req.buffer.body.to("string")) + else req.files.file = __makeFile("file", req.headers["content-type"], req.buffer.body) + res.next() \ No newline at end of file