new code manipulating experiments, fix to relex rule
This commit is contained in:
parent
9e151fd823
commit
7a12c71a95
File diff suppressed because one or more lines are too long
|
@ -2338,9 +2338,7 @@ CUSTOM_COMMAND_SIG(replace_in_range){
|
||||||
|
|
||||||
if (!query_user_string(app, &with)) return;
|
if (!query_user_string(app, &with)) return;
|
||||||
|
|
||||||
String r, w;
|
String r = replace.string, w = with.string;
|
||||||
r = replace.string;
|
|
||||||
w = with.string;
|
|
||||||
|
|
||||||
uint32_t access = AccessOpen;
|
uint32_t access = AccessOpen;
|
||||||
View_Summary view = app->get_active_view(app, access);
|
View_Summary view = app->get_active_view(app, access);
|
||||||
|
|
|
@ -224,15 +224,15 @@ to it's file or loading the buffer from the file both act as sync points.) */
|
||||||
ENUM(uint32_t, Dirty_State){
|
ENUM(uint32_t, Dirty_State){
|
||||||
/* DOC(DirtyState_UpToDate indicates that there are no unsaved changes and
|
/* DOC(DirtyState_UpToDate indicates that there are no unsaved changes and
|
||||||
the underlying system file still agrees with the buffer's state.) */
|
the underlying system file still agrees with the buffer's state.) */
|
||||||
DirtyState_UpToDate,
|
DirtyState_UpToDate = 0,
|
||||||
|
|
||||||
/* DOC(DirtyState_UnsavedChanges indicates that there have been changes in the
|
/* DOC(DirtyState_UnsavedChanges indicates that there have been changes in the
|
||||||
buffer since the last sync point.) */
|
buffer since the last sync point.) */
|
||||||
DirtyState_UnsavedChanges,
|
DirtyState_UnsavedChanges = 1,
|
||||||
|
|
||||||
/* DOC(DirtyState_UnsavedChanges indicates that the underlying file has been
|
/* DOC(DirtyState_UnsavedChanges indicates that the underlying file has been
|
||||||
edited since the last sync point with the buffer.) */
|
edited since the last sync point with the buffer.) */
|
||||||
DirtyState_UnloadedChanges
|
DirtyState_UnloadedChanges = 2
|
||||||
};
|
};
|
||||||
|
|
||||||
/* DOC(A Seek_Boundary_Flag field specifies a set of "boundary" types used in seeks for the
|
/* DOC(A Seek_Boundary_Flag field specifies a set of "boundary" types used in seeks for the
|
||||||
|
|
|
@ -276,9 +276,6 @@ cpp_pp_directive_to_state(Cpp_Token_Type type){
|
||||||
token_array_out->count = token_i; \
|
token_array_out->count = token_i; \
|
||||||
*S_ptr = S; S_ptr->__pc__ = -1; return(n); }
|
*S_ptr = S; S_ptr->__pc__ = -1; return(n); }
|
||||||
|
|
||||||
enum Lex_Result{
|
|
||||||
};
|
|
||||||
|
|
||||||
FCPP_INTERNAL Cpp_Lex_Result
|
FCPP_INTERNAL Cpp_Lex_Result
|
||||||
cpp_lex_nonalloc_null_end_no_limit(Cpp_Lex_Data *S_ptr, char *chunk, int32_t size,
|
cpp_lex_nonalloc_null_end_no_limit(Cpp_Lex_Data *S_ptr, char *chunk, int32_t size,
|
||||||
Cpp_Token_Array *token_array_out){
|
Cpp_Token_Array *token_array_out){
|
||||||
|
|
|
@ -20,216 +20,218 @@
|
||||||
actually output by the lexer, but exist because parsers will also make use of token
|
actually output by the lexer, but exist because parsers will also make use of token
|
||||||
types in their own output.) */
|
types in their own output.) */
|
||||||
ENUM(uint32_t, Cpp_Token_Type){
|
ENUM(uint32_t, Cpp_Token_Type){
|
||||||
CPP_TOKEN_JUNK,
|
|
||||||
CPP_TOKEN_COMMENT,
|
|
||||||
|
|
||||||
CPP_PP_INCLUDE,
|
// IGNORE THIS
|
||||||
CPP_PP_DEFINE,
|
CPP_TOKEN_JUNK = 0,
|
||||||
CPP_PP_UNDEF,
|
CPP_TOKEN_COMMENT = 1,
|
||||||
CPP_PP_IF,
|
|
||||||
CPP_PP_IFDEF,
|
|
||||||
CPP_PP_IFNDEF,
|
|
||||||
CPP_PP_ELSE,
|
|
||||||
CPP_PP_ELIF,
|
|
||||||
CPP_PP_ENDIF,
|
|
||||||
CPP_PP_ERROR,
|
|
||||||
CPP_PP_IMPORT,
|
|
||||||
CPP_PP_USING,
|
|
||||||
CPP_PP_LINE,
|
|
||||||
CPP_PP_PRAGMA,
|
|
||||||
CPP_PP_STRINGIFY,
|
|
||||||
CPP_PP_CONCAT,
|
|
||||||
CPP_PP_UNKNOWN,
|
|
||||||
|
|
||||||
CPP_PP_DEFINED,
|
CPP_PP_INCLUDE = 2,
|
||||||
CPP_PP_INCLUDE_FILE,
|
CPP_PP_DEFINE = 3,
|
||||||
CPP_PP_ERROR_MESSAGE,
|
CPP_PP_UNDEF = 4,
|
||||||
|
CPP_PP_IF = 5,
|
||||||
|
CPP_PP_IFDEF = 6,
|
||||||
|
CPP_PP_IFNDEF = 7,
|
||||||
|
CPP_PP_ELSE = 8,
|
||||||
|
CPP_PP_ELIF = 9,
|
||||||
|
CPP_PP_ENDIF = 10,
|
||||||
|
CPP_PP_ERROR = 11,
|
||||||
|
CPP_PP_IMPORT = 12,
|
||||||
|
CPP_PP_USING = 13,
|
||||||
|
CPP_PP_LINE = 14,
|
||||||
|
CPP_PP_PRAGMA = 15,
|
||||||
|
CPP_PP_STRINGIFY = 16,
|
||||||
|
CPP_PP_CONCAT = 17,
|
||||||
|
CPP_PP_UNKNOWN = 18,
|
||||||
|
|
||||||
CPP_TOKEN_KEY_TYPE,
|
CPP_PP_DEFINED = 19,
|
||||||
CPP_TOKEN_KEY_MODIFIER,
|
CPP_PP_INCLUDE_FILE = 20,
|
||||||
CPP_TOKEN_KEY_QUALIFIER,
|
CPP_PP_ERROR_MESSAGE = 21,
|
||||||
|
|
||||||
|
CPP_TOKEN_KEY_TYPE = 22,
|
||||||
|
CPP_TOKEN_KEY_MODIFIER = 23,
|
||||||
|
CPP_TOKEN_KEY_QUALIFIER = 24,
|
||||||
/* DOC(This type is not stored in token output from the lexer.) */
|
/* DOC(This type is not stored in token output from the lexer.) */
|
||||||
CPP_TOKEN_KEY_OPERATOR,
|
CPP_TOKEN_KEY_OPERATOR = 25,
|
||||||
CPP_TOKEN_KEY_CONTROL_FLOW,
|
CPP_TOKEN_KEY_CONTROL_FLOW = 26,
|
||||||
CPP_TOKEN_KEY_CAST,
|
CPP_TOKEN_KEY_CAST = 27,
|
||||||
CPP_TOKEN_KEY_TYPE_DECLARATION,
|
CPP_TOKEN_KEY_TYPE_DECLARATION = 28,
|
||||||
CPP_TOKEN_KEY_ACCESS,
|
CPP_TOKEN_KEY_ACCESS = 29,
|
||||||
CPP_TOKEN_KEY_LINKAGE,
|
CPP_TOKEN_KEY_LINKAGE = 30,
|
||||||
CPP_TOKEN_KEY_OTHER,
|
CPP_TOKEN_KEY_OTHER = 31,
|
||||||
|
|
||||||
CPP_TOKEN_IDENTIFIER,
|
CPP_TOKEN_IDENTIFIER = 32,
|
||||||
CPP_TOKEN_INTEGER_CONSTANT,
|
CPP_TOKEN_INTEGER_CONSTANT = 33,
|
||||||
CPP_TOKEN_CHARACTER_CONSTANT,
|
CPP_TOKEN_CHARACTER_CONSTANT = 34,
|
||||||
CPP_TOKEN_FLOATING_CONSTANT,
|
CPP_TOKEN_FLOATING_CONSTANT = 35,
|
||||||
CPP_TOKEN_STRING_CONSTANT,
|
CPP_TOKEN_STRING_CONSTANT = 36,
|
||||||
CPP_TOKEN_BOOLEAN_CONSTANT,
|
CPP_TOKEN_BOOLEAN_CONSTANT = 37,
|
||||||
|
|
||||||
CPP_TOKEN_STATIC_ASSERT,
|
CPP_TOKEN_STATIC_ASSERT = 38,
|
||||||
|
|
||||||
CPP_TOKEN_BRACKET_OPEN,
|
CPP_TOKEN_BRACKET_OPEN = 39,
|
||||||
CPP_TOKEN_BRACKET_CLOSE,
|
CPP_TOKEN_BRACKET_CLOSE = 40,
|
||||||
CPP_TOKEN_PARENTHESE_OPEN,
|
CPP_TOKEN_PARENTHESE_OPEN = 41,
|
||||||
CPP_TOKEN_PARENTHESE_CLOSE,
|
CPP_TOKEN_PARENTHESE_CLOSE = 42,
|
||||||
CPP_TOKEN_BRACE_OPEN,
|
CPP_TOKEN_BRACE_OPEN = 43,
|
||||||
CPP_TOKEN_BRACE_CLOSE,
|
CPP_TOKEN_BRACE_CLOSE = 44,
|
||||||
CPP_TOKEN_SEMICOLON,
|
CPP_TOKEN_SEMICOLON = 45,
|
||||||
CPP_TOKEN_ELLIPSIS,
|
CPP_TOKEN_ELLIPSIS = 46,
|
||||||
|
|
||||||
/* DOC(This is an 'ambiguous' token type because it requires
|
/* DOC(This is an 'ambiguous' token type because it requires
|
||||||
parsing to determine the full nature of the token.) */
|
parsing to determine the full nature of the token.) */
|
||||||
CPP_TOKEN_STAR,
|
CPP_TOKEN_STAR = 47,
|
||||||
|
|
||||||
/* DOC(This is an 'ambiguous' token type because it requires
|
/* DOC(This is an 'ambiguous' token type because it requires
|
||||||
parsing to determine the full nature of the token.) */
|
parsing to determine the full nature of the token.) */
|
||||||
CPP_TOKEN_AMPERSAND,
|
CPP_TOKEN_AMPERSAND = 48,
|
||||||
|
|
||||||
/* DOC(This is an 'ambiguous' token type because it requires
|
/* DOC(This is an 'ambiguous' token type because it requires
|
||||||
parsing to determine the full nature of the token.) */
|
parsing to determine the full nature of the token.) */
|
||||||
CPP_TOKEN_TILDE,
|
CPP_TOKEN_TILDE = 49,
|
||||||
|
|
||||||
/* DOC(This is an 'ambiguous' token type because it requires
|
/* DOC(This is an 'ambiguous' token type because it requires
|
||||||
parsing to determine the full nature of the token.) */
|
parsing to determine the full nature of the token.) */
|
||||||
CPP_TOKEN_PLUS,
|
CPP_TOKEN_PLUS = 50,
|
||||||
|
|
||||||
/* DOC(This is an 'ambiguous' token type because it requires
|
/* DOC(This is an 'ambiguous' token type because it requires
|
||||||
parsing to determine the full nature of the token.) */
|
parsing to determine the full nature of the token.) */
|
||||||
CPP_TOKEN_MINUS,
|
CPP_TOKEN_MINUS = 51,
|
||||||
|
|
||||||
/* DOC(This is an 'ambiguous' token type because it requires
|
/* DOC(This is an 'ambiguous' token type because it requires
|
||||||
parsing to determine the full nature of the token.) */
|
parsing to determine the full nature of the token.) */
|
||||||
CPP_TOKEN_INCREMENT,
|
CPP_TOKEN_INCREMENT = 52,
|
||||||
|
|
||||||
/* DOC(This is an 'ambiguous' token type because it requires
|
/* DOC(This is an 'ambiguous' token type because it requires
|
||||||
parsing to determine the full nature of the token.) */
|
parsing to determine the full nature of the token.) */
|
||||||
CPP_TOKEN_DECREMENT,
|
CPP_TOKEN_DECREMENT = 53,
|
||||||
|
|
||||||
// NOTE(allen): Precedence 1, LtoR
|
// NOTE(allen): Precedence 1, LtoR
|
||||||
CPP_TOKEN_SCOPE,
|
CPP_TOKEN_SCOPE = 54,
|
||||||
|
|
||||||
// NOTE(allen): Precedence 2, LtoR
|
// NOTE(allen): Precedence 2, LtoR
|
||||||
/* DOC(This type is for parser use, it is not output by the lexer.) */
|
/* DOC(This type is for parser use, it is not output by the lexer.) */
|
||||||
CPP_TOKEN_POSTINC,
|
CPP_TOKEN_POSTINC = 55,
|
||||||
/* DOC(This type is for parser use, it is not output by the lexer.) */
|
/* DOC(This type is for parser use, it is not output by the lexer.) */
|
||||||
CPP_TOKEN_POSTDEC,
|
CPP_TOKEN_POSTDEC = 56,
|
||||||
/* DOC(This type is for parser use, it is not output by the lexer.) */
|
/* DOC(This type is for parser use, it is not output by the lexer.) */
|
||||||
CPP_TOKEN_FUNC_STYLE_CAST,
|
CPP_TOKEN_FUNC_STYLE_CAST = 57,
|
||||||
CPP_TOKEN_CPP_STYLE_CAST,
|
CPP_TOKEN_CPP_STYLE_CAST = 58,
|
||||||
/* DOC(This type is for parser use, it is not output by the lexer.) */
|
/* DOC(This type is for parser use, it is not output by the lexer.) */
|
||||||
CPP_TOKEN_CALL,
|
CPP_TOKEN_CALL = 59,
|
||||||
/* DOC(This type is for parser use, it is not output by the lexer.) */
|
/* DOC(This type is for parser use, it is not output by the lexer.) */
|
||||||
CPP_TOKEN_INDEX,
|
CPP_TOKEN_INDEX = 60,
|
||||||
CPP_TOKEN_DOT,
|
CPP_TOKEN_DOT = 61,
|
||||||
CPP_TOKEN_ARROW,
|
CPP_TOKEN_ARROW = 62,
|
||||||
|
|
||||||
// NOTE(allen): Precedence 3, RtoL
|
// NOTE(allen): Precedence 3, RtoL
|
||||||
|
|
||||||
/* DOC(This token is for parser use, it is not output by the lexer.) */
|
/* DOC(This token is for parser use, it is not output by the lexer.) */
|
||||||
CPP_TOKEN_PREINC,
|
CPP_TOKEN_PREINC = 63,
|
||||||
/* DOC(This token is for parser use, it is not output by the lexer.) */
|
/* DOC(This token is for parser use, it is not output by the lexer.) */
|
||||||
CPP_TOKEN_PREDEC,
|
CPP_TOKEN_PREDEC = 64,
|
||||||
/* DOC(This token is for parser use, it is not output by the lexer.) */
|
/* DOC(This token is for parser use, it is not output by the lexer.) */
|
||||||
CPP_TOKEN_POSITIVE,
|
CPP_TOKEN_POSITIVE = 65,
|
||||||
/* DOC(This token is for parser use, it is not output by the lexer.) */
|
/* DOC(This token is for parser use, it is not output by the lexer.) */
|
||||||
CPP_TOKEN_NEGAITVE,
|
CPP_TOKEN_NEGAITVE = 66,
|
||||||
CPP_TOKEN_NOT,
|
CPP_TOKEN_NOT = 67,
|
||||||
|
|
||||||
/* DOC(This type is for parser use, it is not output by the lexer.) */
|
/* DOC(This type is for parser use, it is not output by the lexer.) */
|
||||||
CPP_TOKEN_BIT_NOT,
|
CPP_TOKEN_BIT_NOT = 68,
|
||||||
|
|
||||||
/* DOC(This type is for parser use, it is not output by the lexer.) */
|
/* DOC(This type is for parser use, it is not output by the lexer.) */
|
||||||
CPP_TOKEN_CAST,
|
CPP_TOKEN_CAST = 69,
|
||||||
/* DOC(This type is for parser use, it is not output by the lexer.) */
|
/* DOC(This type is for parser use, it is not output by the lexer.) */
|
||||||
CPP_TOKEN_DEREF,
|
CPP_TOKEN_DEREF = 70,
|
||||||
/* DOC(This type is for parser use, it is not output by the lexer.) */
|
/* DOC(This type is for parser use, it is not output by the lexer.) */
|
||||||
CPP_TOKEN_TYPE_PTR,
|
CPP_TOKEN_TYPE_PTR = 71,
|
||||||
/* DOC(This type is for parser use, it is not output by the lexer.) */
|
/* DOC(This type is for parser use, it is not output by the lexer.) */
|
||||||
CPP_TOKEN_ADDRESS,
|
CPP_TOKEN_ADDRESS = 72,
|
||||||
/* DOC(This type is for parser use, it is not output by the lexer.) */
|
/* DOC(This type is for parser use, it is not output by the lexer.) */
|
||||||
CPP_TOKEN_TYPE_REF,
|
CPP_TOKEN_TYPE_REF = 73,
|
||||||
CPP_TOKEN_SIZEOF,
|
CPP_TOKEN_SIZEOF = 74,
|
||||||
CPP_TOKEN_ALIGNOF,
|
CPP_TOKEN_ALIGNOF = 75,
|
||||||
CPP_TOKEN_DECLTYPE,
|
CPP_TOKEN_DECLTYPE = 76,
|
||||||
CPP_TOKEN_TYPEID,
|
CPP_TOKEN_TYPEID = 77,
|
||||||
CPP_TOKEN_NEW,
|
CPP_TOKEN_NEW = 78,
|
||||||
CPP_TOKEN_DELETE,
|
CPP_TOKEN_DELETE = 79,
|
||||||
/* DOC(This type is for parser use, it is not output by the lexer.) */
|
/* DOC(This type is for parser use, it is not output by the lexer.) */
|
||||||
CPP_TOKEN_NEW_ARRAY,
|
CPP_TOKEN_NEW_ARRAY = 80,
|
||||||
/* DOC(This type is for parser use, it is not output by the lexer.) */
|
/* DOC(This type is for parser use, it is not output by the lexer.) */
|
||||||
CPP_TOKEN_DELETE_ARRAY,
|
CPP_TOKEN_DELETE_ARRAY = 81,
|
||||||
|
|
||||||
// NOTE(allen): Precedence 4, LtoR
|
// NOTE(allen): Precedence 4, LtoR
|
||||||
CPP_TOKEN_PTRDOT,
|
CPP_TOKEN_PTRDOT = 82,
|
||||||
CPP_TOKEN_PTRARROW,
|
CPP_TOKEN_PTRARROW = 83,
|
||||||
|
|
||||||
// NOTE(allen): Precedence 5, LtoR
|
// NOTE(allen): Precedence 5, LtoR
|
||||||
|
|
||||||
/* DOC(This type is for parser use, it is not output by the lexer.) */
|
/* DOC(This type is for parser use, it is not output by the lexer.) */
|
||||||
CPP_TOKEN_MUL,
|
CPP_TOKEN_MUL = 84,
|
||||||
CPP_TOKEN_DIV,
|
CPP_TOKEN_DIV = 85,
|
||||||
CPP_TOKEN_MOD,
|
CPP_TOKEN_MOD = 86,
|
||||||
|
|
||||||
// NOTE(allen): Precedence 6, LtoR
|
// NOTE(allen): Precedence 6, LtoR
|
||||||
|
|
||||||
/* DOC(This type is for parser use, it is not output by the lexer.) */
|
/* DOC(This type is for parser use, it is not output by the lexer.) */
|
||||||
CPP_TOKEN_ADD,
|
CPP_TOKEN_ADD = 87,
|
||||||
|
|
||||||
/* DOC(This type is for parser use, it is not output by the lexer.) */
|
/* DOC(This type is for parser use, it is not output by the lexer.) */
|
||||||
CPP_TOKEN_SUB,
|
CPP_TOKEN_SUB = 88,
|
||||||
|
|
||||||
// NOTE(allen): Precedence 7, LtoR
|
// NOTE(allen): Precedence 7, LtoR
|
||||||
CPP_TOKEN_LSHIFT,
|
CPP_TOKEN_LSHIFT = 89,
|
||||||
CPP_TOKEN_RSHIFT,
|
CPP_TOKEN_RSHIFT = 90,
|
||||||
|
|
||||||
// NOTE(allen): Precedence 8, LtoR
|
// NOTE(allen): Precedence 8, LtoR
|
||||||
CPP_TOKEN_LESS,
|
CPP_TOKEN_LESS = 91,
|
||||||
CPP_TOKEN_GRTR,
|
CPP_TOKEN_GRTR = 92,
|
||||||
CPP_TOKEN_GRTREQ,
|
CPP_TOKEN_GRTREQ = 93,
|
||||||
CPP_TOKEN_LESSEQ,
|
CPP_TOKEN_LESSEQ = 94,
|
||||||
|
|
||||||
// NOTE(allen): Precedence 9, LtoR
|
// NOTE(allen): Precedence 9, LtoR
|
||||||
CPP_TOKEN_EQEQ,
|
CPP_TOKEN_EQEQ = 95,
|
||||||
CPP_TOKEN_NOTEQ,
|
CPP_TOKEN_NOTEQ = 96,
|
||||||
|
|
||||||
// NOTE(allen): Precedence 10, LtoR
|
// NOTE(allen): Precedence 10, LtoR
|
||||||
|
|
||||||
/* DOC(This type is for parser use, it is not output by the lexer.) */
|
/* DOC(This type is for parser use, it is not output by the lexer.) */
|
||||||
CPP_TOKEN_BIT_AND,
|
CPP_TOKEN_BIT_AND = 97,
|
||||||
|
|
||||||
// NOTE(allen): Precedence 11, LtoR
|
// NOTE(allen): Precedence 11, LtoR
|
||||||
CPP_TOKEN_BIT_XOR,
|
CPP_TOKEN_BIT_XOR = 98,
|
||||||
|
|
||||||
// NOTE(allen): Precedence 12, LtoR
|
// NOTE(allen): Precedence 12, LtoR
|
||||||
CPP_TOKEN_BIT_OR,
|
CPP_TOKEN_BIT_OR = 99,
|
||||||
|
|
||||||
// NOTE(allen): Precedence 13, LtoR
|
// NOTE(allen): Precedence 13, LtoR
|
||||||
CPP_TOKEN_AND,
|
CPP_TOKEN_AND = 100,
|
||||||
|
|
||||||
// NOTE(allen): Precedence 14, LtoR
|
// NOTE(allen): Precedence 14, LtoR
|
||||||
CPP_TOKEN_OR,
|
CPP_TOKEN_OR = 101,
|
||||||
|
|
||||||
// NOTE(allen): Precedence 15, RtoL
|
// NOTE(allen): Precedence 15, RtoL
|
||||||
CPP_TOKEN_TERNARY_QMARK,
|
CPP_TOKEN_TERNARY_QMARK = 102,
|
||||||
CPP_TOKEN_COLON,
|
CPP_TOKEN_COLON = 103,
|
||||||
CPP_TOKEN_THROW,
|
CPP_TOKEN_THROW = 104,
|
||||||
CPP_TOKEN_EQ,
|
CPP_TOKEN_EQ = 105,
|
||||||
CPP_TOKEN_ADDEQ,
|
CPP_TOKEN_ADDEQ = 106,
|
||||||
CPP_TOKEN_SUBEQ,
|
CPP_TOKEN_SUBEQ = 107,
|
||||||
CPP_TOKEN_MULEQ,
|
CPP_TOKEN_MULEQ = 108,
|
||||||
CPP_TOKEN_DIVEQ,
|
CPP_TOKEN_DIVEQ = 109,
|
||||||
CPP_TOKEN_MODEQ,
|
CPP_TOKEN_MODEQ = 110,
|
||||||
CPP_TOKEN_LSHIFTEQ,
|
CPP_TOKEN_LSHIFTEQ = 111,
|
||||||
CPP_TOKEN_RSHIFTEQ,
|
CPP_TOKEN_RSHIFTEQ = 112,
|
||||||
CPP_TOKEN_ANDEQ,
|
CPP_TOKEN_ANDEQ = 113,
|
||||||
CPP_TOKEN_OREQ,
|
CPP_TOKEN_OREQ = 114,
|
||||||
CPP_TOKEN_XOREQ,
|
CPP_TOKEN_XOREQ = 115,
|
||||||
|
|
||||||
// NOTE(allen): Precedence 16, LtoR
|
// NOTE(allen): Precedence 16, LtoR
|
||||||
CPP_TOKEN_COMMA,
|
CPP_TOKEN_COMMA = 116,
|
||||||
|
|
||||||
/* DOC(This type is for parser use, it is not output by the lexer.) */
|
/* DOC(This type is for parser use, it is not output by the lexer.) */
|
||||||
CPP_TOKEN_EOF,
|
CPP_TOKEN_EOF = 117,
|
||||||
|
|
||||||
CPP_TOKEN_TYPE_COUNT
|
CPP_TOKEN_TYPE_COUNT = 118
|
||||||
};
|
};
|
||||||
|
|
||||||
/* DOC(Cpp_Token represents a single lexed token.
|
/* DOC(Cpp_Token represents a single lexed token.
|
||||||
|
@ -354,18 +356,18 @@ struct Cpp_Lex_Data{
|
||||||
/* DOC(Cpp_Lex_Result is returned from the lexing engine to indicate why it stopped lexing.) */
|
/* DOC(Cpp_Lex_Result is returned from the lexing engine to indicate why it stopped lexing.) */
|
||||||
ENUM(int32_t, Cpp_Lex_Result){
|
ENUM(int32_t, Cpp_Lex_Result){
|
||||||
/* DOC(This indicates that the system got to the end of the file and will not accept more input.) */
|
/* DOC(This indicates that the system got to the end of the file and will not accept more input.) */
|
||||||
LexResult_Finished,
|
LexResult_Finished = 0,
|
||||||
|
|
||||||
/* DOC(This indicates that the system got to the end of an input chunk and is ready to receive the
|
/* DOC(This indicates that the system got to the end of an input chunk and is ready to receive the
|
||||||
next input chunk.) */
|
next input chunk.) */
|
||||||
LexResult_NeedChunk,
|
LexResult_NeedChunk = 1,
|
||||||
|
|
||||||
/* DOC(This indicates that the output array ran out of space to store tokens and needs to be
|
/* DOC(This indicates that the output array ran out of space to store tokens and needs to be
|
||||||
replaced or expanded before continuing.) */
|
replaced or expanded before continuing.) */
|
||||||
LexResult_NeedTokenMemory,
|
LexResult_NeedTokenMemory = 2,
|
||||||
|
|
||||||
/* DOC(This indicates that the maximum number of output tokens as specified by the user was hit.) */
|
/* DOC(This indicates that the maximum number of output tokens as specified by the user was hit.) */
|
||||||
LexResult_HitTokenLimit,
|
LexResult_HitTokenLimit = 3,
|
||||||
};
|
};
|
||||||
|
|
||||||
ENUM_INTERNAL(uint16_t, Cpp_Preprocessor_State){
|
ENUM_INTERNAL(uint16_t, Cpp_Preprocessor_State){
|
||||||
|
|
|
@ -1941,7 +1941,7 @@ internal void
|
||||||
file_edit_cursor_fix(System_Functions *system,
|
file_edit_cursor_fix(System_Functions *system,
|
||||||
Partition *part, General_Memory *general,
|
Partition *part, General_Memory *general,
|
||||||
Editing_File *file, Editing_Layout *layout,
|
Editing_File *file, Editing_Layout *layout,
|
||||||
Cursor_Fix_Descriptor desc){
|
Cursor_Fix_Descriptor desc, i32 *shift_out){
|
||||||
|
|
||||||
Temp_Memory cursor_temp = begin_temp_memory(part);
|
Temp_Memory cursor_temp = begin_temp_memory(part);
|
||||||
i32 cursor_max = layout->panel_max_count * 2;
|
i32 cursor_max = layout->panel_max_count * 2;
|
||||||
|
@ -1967,13 +1967,20 @@ file_edit_cursor_fix(System_Functions *system,
|
||||||
if (cursor_count > 0){
|
if (cursor_count > 0){
|
||||||
buffer_sort_cursors(cursors, cursor_count);
|
buffer_sort_cursors(cursors, cursor_count);
|
||||||
if (desc.is_batch){
|
if (desc.is_batch){
|
||||||
buffer_batch_edit_update_cursors(cursors, cursor_count,
|
i32 shift_total =
|
||||||
desc.batch, desc.batch_size);
|
buffer_batch_edit_update_cursors(cursors, cursor_count,
|
||||||
|
desc.batch, desc.batch_size);
|
||||||
|
if (shift_out){
|
||||||
|
*shift_out = shift_total;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
else{
|
else{
|
||||||
buffer_update_cursors(cursors, cursor_count,
|
buffer_update_cursors(cursors, cursor_count,
|
||||||
desc.start, desc.end,
|
desc.start, desc.end,
|
||||||
desc.shift_amount + (desc.end - desc.start));
|
desc.shift_amount + (desc.end - desc.start));
|
||||||
|
if (shift_out){
|
||||||
|
*shift_out = desc.shift_amount;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
buffer_unsort_cursors(cursors, cursor_count);
|
buffer_unsort_cursors(cursors, cursor_count);
|
||||||
|
|
||||||
|
@ -2088,7 +2095,7 @@ file_do_single_edit(System_Functions *system,
|
||||||
desc.end = end;
|
desc.end = end;
|
||||||
desc.shift_amount = shift_amount;
|
desc.shift_amount = shift_amount;
|
||||||
|
|
||||||
file_edit_cursor_fix(system, part, general, file, layout, desc);
|
file_edit_cursor_fix(system, part, general, file, layout, desc, 0);
|
||||||
|
|
||||||
#if BUFFER_EXPERIMENT_SCALPEL <= 0
|
#if BUFFER_EXPERIMENT_SCALPEL <= 0
|
||||||
// NOTE(allen): token fixing
|
// NOTE(allen): token fixing
|
||||||
|
@ -2132,7 +2139,9 @@ file_do_batch_edit(System_Functions *system, Models *models, Editing_File *file,
|
||||||
new_data = general_memory_allocate(general, request_amount);
|
new_data = general_memory_allocate(general, request_amount);
|
||||||
}
|
}
|
||||||
void *old_data = buffer_edit_provide_memory(&file->state.buffer, new_data, request_amount);
|
void *old_data = buffer_edit_provide_memory(&file->state.buffer, new_data, request_amount);
|
||||||
if (old_data) general_memory_free(general, old_data);
|
if (old_data){
|
||||||
|
general_memory_free(general, old_data);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE(allen): meta data
|
// NOTE(allen): meta data
|
||||||
|
@ -2141,18 +2150,21 @@ file_do_batch_edit(System_Functions *system, Models *models, Editing_File *file,
|
||||||
i16 font_id = file->settings.font_id;
|
i16 font_id = file->settings.font_id;
|
||||||
Render_Font *font = get_font_info(models->font_set, font_id)->font;
|
Render_Font *font = get_font_info(models->font_set, font_id)->font;
|
||||||
float *advance_data = 0;
|
float *advance_data = 0;
|
||||||
if (font) advance_data = font->advance_data;
|
if (font){
|
||||||
|
advance_data = font->advance_data;
|
||||||
|
}
|
||||||
buffer_measure_starts_widths(&state, &file->state.buffer, advance_data);
|
buffer_measure_starts_widths(&state, &file->state.buffer, advance_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE(allen): cursor fixing
|
// NOTE(allen): cursor fixing
|
||||||
|
i32 shift_total = 0;
|
||||||
{
|
{
|
||||||
Cursor_Fix_Descriptor desc = {};
|
Cursor_Fix_Descriptor desc = {};
|
||||||
desc.is_batch = 1;
|
desc.is_batch = 1;
|
||||||
desc.batch = batch;
|
desc.batch = batch;
|
||||||
desc.batch_size = batch_size;
|
desc.batch_size = batch_size;
|
||||||
|
|
||||||
file_edit_cursor_fix(system, part, general, file, layout, desc);
|
file_edit_cursor_fix(system, part, general, file, layout, desc, &shift_total);
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE(allen): token fixing
|
// NOTE(allen): token fixing
|
||||||
|
@ -2160,15 +2172,11 @@ file_do_batch_edit(System_Functions *system, Models *models, Editing_File *file,
|
||||||
case BatchEdit_Normal:
|
case BatchEdit_Normal:
|
||||||
{
|
{
|
||||||
if (file->settings.tokens_exist){
|
if (file->settings.tokens_exist){
|
||||||
Buffer_Edit *edit = batch;
|
// TODO(allen): Write a smart fast one here someday.
|
||||||
for (i32 i = 0; i < batch_size; ++i, ++edit){
|
Buffer_Edit *first_edit = batch;
|
||||||
i32 start = edit->start;
|
Buffer_Edit *last_edit = batch + batch_size - 1;
|
||||||
i32 end = edit->end;
|
file_relex_parallel(system, mem, file, first_edit->start, last_edit->end, shift_total);
|
||||||
i32 shift_amount = edit->len - (end - start);
|
|
||||||
if (!file_relex_parallel(system, mem, file, start, end, shift_amount)){
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}break;
|
}break;
|
||||||
|
|
||||||
|
|
|
@ -208,7 +208,7 @@ buffer_batch_edit_max_shift(Buffer_Edit *sorted_edits, int edit_count){
|
||||||
return(shift_max);
|
return(shift_max);
|
||||||
}
|
}
|
||||||
|
|
||||||
internal_4tech void
|
internal_4tech int
|
||||||
buffer_batch_edit_update_cursors(Cursor_With_Index *sorted_positions, int count, Buffer_Edit *sorted_edits, int edit_count){
|
buffer_batch_edit_update_cursors(Cursor_With_Index *sorted_positions, int count, Buffer_Edit *sorted_edits, int edit_count){
|
||||||
Cursor_With_Index *position, *end_position;
|
Cursor_With_Index *position, *end_position;
|
||||||
Buffer_Edit *edit, *end_edit;
|
Buffer_Edit *edit, *end_edit;
|
||||||
|
@ -241,6 +241,12 @@ buffer_batch_edit_update_cursors(Cursor_With_Index *sorted_positions, int count,
|
||||||
for (; position < end_position; ++position){
|
for (; position < end_position; ++position){
|
||||||
position->pos += shift_amount;
|
position->pos += shift_amount;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (; edit < end_edit; ++edit){
|
||||||
|
shift_amount += (edit->len - (edit->end - edit->start));
|
||||||
|
}
|
||||||
|
|
||||||
|
return(shift_amount);
|
||||||
}
|
}
|
||||||
|
|
||||||
internal_4tech int
|
internal_4tech int
|
||||||
|
|
|
@ -309,6 +309,257 @@ CUSTOM_COMMAND_SIG(cursor_to_surrounding_scope){
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NOTE(allen): Some basic code manipulation ideas.
|
||||||
|
|
||||||
|
CUSTOM_COMMAND_SIG(rename_parameter){
|
||||||
|
uint32_t access = AccessOpen;
|
||||||
|
View_Summary view = app->get_active_view(app, access);
|
||||||
|
Buffer_Summary buffer = app->get_buffer(app, view.buffer_id, access);
|
||||||
|
|
||||||
|
Partition *part = &global_part;
|
||||||
|
|
||||||
|
Temp_Memory temp = begin_temp_memory(part);
|
||||||
|
Cpp_Token_Array array = buffer_get_all_tokens(app, part, &buffer);
|
||||||
|
Cpp_Get_Token_Result result = cpp_get_token(&array, view.cursor.pos);
|
||||||
|
if (!result.in_whitespace && result.token_index < array.count){
|
||||||
|
Cpp_Token *token_ptr = &array.tokens[result.token_index];
|
||||||
|
Cpp_Token *token_end = array.tokens + array.count - 1;
|
||||||
|
if (token_ptr->type == CPP_TOKEN_IDENTIFIER){
|
||||||
|
char old_lexeme_base[128];
|
||||||
|
String old_lexeme = make_fixed_width_string(old_lexeme_base);
|
||||||
|
if (token_ptr->size < sizeof(old_lexeme_base)){
|
||||||
|
|
||||||
|
Cpp_Token original_token = *token_ptr;
|
||||||
|
old_lexeme.size = token_ptr->size;
|
||||||
|
app->buffer_read_range(app, &buffer, token_ptr->start,
|
||||||
|
token_ptr->start+token_ptr->size,
|
||||||
|
old_lexeme.str);
|
||||||
|
|
||||||
|
int32_t proc_body_found = 0;
|
||||||
|
for (++token_ptr; token_ptr < token_end; ++token_ptr){
|
||||||
|
switch (token_ptr->type){
|
||||||
|
case CPP_TOKEN_BRACE_OPEN:
|
||||||
|
{
|
||||||
|
proc_body_found = 1;
|
||||||
|
goto doublebreak;
|
||||||
|
}break;
|
||||||
|
|
||||||
|
case CPP_TOKEN_BRACE_CLOSE:
|
||||||
|
case CPP_TOKEN_PARENTHESE_OPEN:
|
||||||
|
{
|
||||||
|
goto doublebreak;
|
||||||
|
}break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
doublebreak:;
|
||||||
|
|
||||||
|
if (proc_body_found){
|
||||||
|
|
||||||
|
Query_Bar with;
|
||||||
|
char with_space[1024];
|
||||||
|
with.prompt = make_lit_string("New Name: ");
|
||||||
|
with.string = make_fixed_width_string(with_space);
|
||||||
|
if (!query_user_string(app, &with)) return;
|
||||||
|
|
||||||
|
String replace_string = with.string;
|
||||||
|
|
||||||
|
Cpp_Token *token_start_ptr = token_ptr+1;
|
||||||
|
|
||||||
|
Buffer_Edit *edits = (Buffer_Edit*)partition_current(part);
|
||||||
|
int32_t edit_max = (partition_remaining(part))/sizeof(Buffer_Edit);
|
||||||
|
int32_t edit_count = 0;
|
||||||
|
|
||||||
|
if (edit_max >= 1){
|
||||||
|
Buffer_Edit edit;
|
||||||
|
edit.str_start = 0;
|
||||||
|
edit.len = replace_string.size;
|
||||||
|
edit.start = original_token.start;
|
||||||
|
edit.end = original_token.start + original_token.size;
|
||||||
|
|
||||||
|
edits[edit_count] = edit;
|
||||||
|
++edit_count;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t nesting_level = 0;
|
||||||
|
int32_t closed_correctly = 0;
|
||||||
|
token_ptr = token_start_ptr;
|
||||||
|
for (; token_ptr < token_end; ++token_ptr){
|
||||||
|
switch (token_ptr->type){
|
||||||
|
case CPP_TOKEN_IDENTIFIER:
|
||||||
|
{
|
||||||
|
if (token_ptr->size == old_lexeme.size){
|
||||||
|
char other_lexeme_base[128];
|
||||||
|
String other_lexeme = make_fixed_width_string(other_lexeme_base);
|
||||||
|
other_lexeme.size = old_lexeme.size;
|
||||||
|
app->buffer_read_range(app, &buffer, token_ptr->start,
|
||||||
|
token_ptr->start+token_ptr->size,
|
||||||
|
other_lexeme.str);
|
||||||
|
|
||||||
|
if (match(old_lexeme, other_lexeme)){
|
||||||
|
Buffer_Edit edit;
|
||||||
|
edit.str_start = 0;
|
||||||
|
edit.len = replace_string.size;
|
||||||
|
edit.start = token_ptr->start;
|
||||||
|
edit.end = token_ptr->start + token_ptr->size;
|
||||||
|
|
||||||
|
if (edit_count < edit_max){
|
||||||
|
edits[edit_count] = edit;
|
||||||
|
++edit_count;
|
||||||
|
}
|
||||||
|
else{
|
||||||
|
goto doublebreak2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}break;
|
||||||
|
|
||||||
|
case CPP_TOKEN_BRACE_OPEN:
|
||||||
|
{
|
||||||
|
++nesting_level;
|
||||||
|
}break;
|
||||||
|
|
||||||
|
case CPP_TOKEN_BRACE_CLOSE:
|
||||||
|
{
|
||||||
|
if (nesting_level == 0){
|
||||||
|
closed_correctly = 1;
|
||||||
|
goto doublebreak2;
|
||||||
|
}
|
||||||
|
else{
|
||||||
|
--nesting_level;
|
||||||
|
}
|
||||||
|
}break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
doublebreak2:;
|
||||||
|
|
||||||
|
if (closed_correctly){
|
||||||
|
app->buffer_batch_edit(app, &buffer, replace_string.str, replace_string.size,
|
||||||
|
edits, edit_count, BatchEdit_Normal);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
end_temp_memory(temp);
|
||||||
|
}
|
||||||
|
|
||||||
|
CUSTOM_COMMAND_SIG(write_explicit_enum_values){
|
||||||
|
uint32_t access = AccessOpen;
|
||||||
|
View_Summary view = app->get_active_view(app, access);
|
||||||
|
Buffer_Summary buffer = app->get_buffer(app, view.buffer_id, access);
|
||||||
|
|
||||||
|
Partition *part = &global_part;
|
||||||
|
|
||||||
|
Temp_Memory temp = begin_temp_memory(part);
|
||||||
|
Cpp_Token_Array array = buffer_get_all_tokens(app, part, &buffer);
|
||||||
|
Cpp_Get_Token_Result result = cpp_get_token(&array, view.cursor.pos);
|
||||||
|
if (!result.in_whitespace && result.token_index < array.count){
|
||||||
|
Cpp_Token *token_ptr = &array.tokens[result.token_index];
|
||||||
|
Cpp_Token *token_end = array.tokens + array.count;
|
||||||
|
if (token_ptr->type == CPP_TOKEN_BRACE_OPEN){
|
||||||
|
|
||||||
|
++token_ptr;
|
||||||
|
|
||||||
|
int32_t closed_correctly = 0;
|
||||||
|
Cpp_Token *token_seeker = token_ptr;
|
||||||
|
for (; token_seeker < token_end; ++token_seeker){
|
||||||
|
switch (token_seeker->type){
|
||||||
|
case CPP_TOKEN_BRACE_CLOSE:
|
||||||
|
closed_correctly = 1;
|
||||||
|
goto finished_seek;
|
||||||
|
|
||||||
|
case CPP_TOKEN_BRACE_OPEN:
|
||||||
|
goto finished_seek;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
finished_seek:;
|
||||||
|
|
||||||
|
if (closed_correctly){
|
||||||
|
int32_t count_estimate = 1 + (int32_t)(token_seeker - token_ptr)/2;
|
||||||
|
|
||||||
|
Buffer_Edit *edits = push_array(part, Buffer_Edit, count_estimate);
|
||||||
|
int32_t edit_count = 0;
|
||||||
|
|
||||||
|
char *string_base = (char*)partition_current(part);
|
||||||
|
String string = make_string(string_base, 0, partition_remaining(part));
|
||||||
|
|
||||||
|
int32_t value = 0;
|
||||||
|
closed_correctly = 0;
|
||||||
|
for (;token_ptr < token_end; ++token_ptr){
|
||||||
|
switch (token_ptr->type){
|
||||||
|
case CPP_TOKEN_IDENTIFIER:
|
||||||
|
{
|
||||||
|
int32_t edit_start = token_ptr->start + token_ptr->size;
|
||||||
|
int32_t edit_stop = edit_start;
|
||||||
|
|
||||||
|
int32_t edit_is_good = 0;
|
||||||
|
for (++token_ptr; token_ptr < token_end; ++token_ptr){
|
||||||
|
switch (token_ptr->type){
|
||||||
|
case CPP_TOKEN_COMMA:
|
||||||
|
{
|
||||||
|
edit_stop = token_ptr->start;
|
||||||
|
edit_is_good = 1;
|
||||||
|
goto good_edit;
|
||||||
|
}break;
|
||||||
|
|
||||||
|
case CPP_TOKEN_BRACE_CLOSE:
|
||||||
|
{
|
||||||
|
edit_stop = token_ptr->start;
|
||||||
|
closed_correctly = 1;
|
||||||
|
edit_is_good = 1;
|
||||||
|
goto good_edit;
|
||||||
|
}break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
good_edit:;
|
||||||
|
if (edit_is_good){
|
||||||
|
int32_t str_pos = string.size;
|
||||||
|
|
||||||
|
append(&string, " = ");
|
||||||
|
append_int_to_str(&string, value);
|
||||||
|
if (closed_correctly){
|
||||||
|
append(&string, "\n");
|
||||||
|
}
|
||||||
|
++value;
|
||||||
|
|
||||||
|
int32_t str_size = string.size - str_pos;
|
||||||
|
|
||||||
|
Buffer_Edit edit;
|
||||||
|
edit.str_start = str_pos;
|
||||||
|
edit.len = str_size;
|
||||||
|
edit.start = edit_start;
|
||||||
|
edit.end = edit_stop;
|
||||||
|
|
||||||
|
assert(edit_count < count_estimate);
|
||||||
|
edits[edit_count] = edit;
|
||||||
|
++edit_count;
|
||||||
|
}
|
||||||
|
if (!edit_is_good || closed_correctly){
|
||||||
|
goto finished;
|
||||||
|
}
|
||||||
|
}break;
|
||||||
|
|
||||||
|
case CPP_TOKEN_BRACE_CLOSE:
|
||||||
|
{
|
||||||
|
closed_correctly = 1;
|
||||||
|
goto finished;
|
||||||
|
}break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
finished:;
|
||||||
|
if (closed_correctly){
|
||||||
|
app->buffer_batch_edit(app, &buffer, string_base, string.size,
|
||||||
|
edits, edit_count, BatchEdit_Normal);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
end_temp_memory(temp);
|
||||||
|
}
|
||||||
|
|
||||||
// TODO(allen): Query theme settings
|
// TODO(allen): Query theme settings
|
||||||
#if 0
|
#if 0
|
||||||
CUSTOM_COMMAND_SIG(save_theme_settings){
|
CUSTOM_COMMAND_SIG(save_theme_settings){
|
||||||
|
@ -402,8 +653,10 @@ get_bindings(void *data, int size){
|
||||||
end_map(context);
|
end_map(context);
|
||||||
|
|
||||||
begin_map(context, my_code_map);
|
begin_map(context, my_code_map);
|
||||||
bind(context, ']', MDFR_ALT, mark_matching_brace);
|
|
||||||
bind(context, '[', MDFR_ALT, cursor_to_surrounding_scope);
|
bind(context, '[', MDFR_ALT, cursor_to_surrounding_scope);
|
||||||
|
bind(context, ']', MDFR_ALT, mark_matching_brace);
|
||||||
|
bind(context, key_insert, MDFR_CTRL, write_explicit_enum_values);
|
||||||
|
bind(context, 'p', MDFR_ALT, rename_parameter);
|
||||||
end_map(context);
|
end_map(context);
|
||||||
|
|
||||||
BIND_4CODER_TESTS(context);
|
BIND_4CODER_TESTS(context);
|
||||||
|
|
Loading…
Reference in New Issue