diff --git a/ChangeLog.txt b/ChangeLog.txt index 33779caf9bd..9d0c1906d6c 100644 --- a/ChangeLog.txt +++ b/ChangeLog.txt @@ -7,6 +7,10 @@ Entries may not always be in chronological/commit order. See license at the end of file. */ +2023-11-07 03:40 UTC+0100 Phil Krylov (phil a t krylov.eu) + * src/3rd/pcre + ! Updated pcre to 8.45. + 2023-11-07 03:30 UTC+0100 Phil Krylov (phil a t krylov.eu) * src/3rd/tiff ! Updated tiff to 4.6.0. Removed C99-isms. diff --git a/src/3rd/pcre/LICENCE b/src/3rd/pcre/LICENCE index 3aff6a62c00..803b4119e50 100644 --- a/src/3rd/pcre/LICENCE +++ b/src/3rd/pcre/LICENCE @@ -6,7 +6,8 @@ and semantics are as close as possible to those of the Perl 5 language. Release 8 of PCRE is distributed under the terms of the "BSD" licence, as specified below. The documentation for PCRE, supplied in the "doc" -directory, is distributed under the same terms as the software itself. +directory, is distributed under the same terms as the software itself. The data +in the testdata directory is not copyrighted and is in the public domain. The basic library functions are written in C and are freestanding. Also included in the distribution is a set of C++ wrapper functions, and a @@ -18,13 +19,13 @@ THE BASIC LIBRARY FUNCTIONS --------------------------- Written by: Philip Hazel -Email local part: ph10 -Email domain: cam.ac.uk +Email local part: Philip.Hazel +Email domain: gmail.com University of Cambridge Computing Service, Cambridge, England. -Copyright (c) 1997-2013 University of Cambridge +Copyright (c) 1997-2021 University of Cambridge All rights reserved. @@ -33,9 +34,9 @@ PCRE JUST-IN-TIME COMPILATION SUPPORT Written by: Zoltan Herczeg Email local part: hzmester -Emain domain: freemail.hu +Email domain: freemail.hu -Copyright(c) 2010-2013 Zoltan Herczeg +Copyright(c) 2010-2021 Zoltan Herczeg All rights reserved. @@ -44,9 +45,9 @@ STACK-LESS JUST-IN-TIME COMPILER Written by: Zoltan Herczeg Email local part: hzmester -Emain domain: freemail.hu +Email domain: freemail.hu -Copyright(c) 2009-2013 Zoltan Herczeg +Copyright(c) 2009-2021 Zoltan Herczeg All rights reserved. diff --git a/src/3rd/pcre/Makefile b/src/3rd/pcre/Makefile index a6ba555cce2..28882cf8de4 100644 --- a/src/3rd/pcre/Makefile +++ b/src/3rd/pcre/Makefile @@ -77,9 +77,9 @@ else include $(TOP)$(ROOT)config/none.mk endif -# ORIGIN http://www.pcre.org/ -# VER 8.34 -# URL ftp://ftp.csx.cam.ac.uk/pub/software/programming/pcre/pcre-8.34.tar.gz +# ORIGIN https://www.pcre.org/ +# VER 8.45 +# URL https://sourceforge.net/projects/pcre/files/pcre/8.45/pcre-8.45.tar.gz/download # DIFF pcre.diff # # MAP LICENCE @@ -113,9 +113,11 @@ endif # MAP sljit/sljitExecAllocator.c sjexeca.c # MAP sljit/sljitLir.c sjlir.c # MAP sljit/sljitLir.h sjlir.h -# MAP sljit/sljitNativeARM_Thumb2.c sjarmth2.c -# MAP sljit/sljitNativeARM_v5.c sjarmv5.c +# MAP sljit/sljitNativeARM_32.c sjarm32.c +# MAP sljit/sljitNativeARM_64.c sjarm64.c +# MAP sljit/sljitNativeARM_T2_32.c sjarmth2.c # MAP sljit/sljitNativeMIPS_32.c sjmips32.c +# MAP sljit/sljitNativeMIPS_64.c sjmips64.c # MAP sljit/sljitNativeMIPS_common.c sjmipsc.c # MAP sljit/sljitNativePPC_32.c sjppc32.c # MAP sljit/sljitNativePPC_64.c sjppc64.c @@ -123,4 +125,6 @@ endif # MAP sljit/sljitNativeX86_32.c sjx8632.c # MAP sljit/sljitNativeX86_64.c sjx8664.c # MAP sljit/sljitNativeX86_common.c sjx86c.c +# MAP sljit/sljitNativeSPARC_32.c sjsprc32.c +# MAP sljit/sljitNativeSPARC_common.c sjsparcc.c # MAP sljit/sljitUtils.c sjutils.c diff --git a/src/3rd/pcre/config.h b/src/3rd/pcre/config.h index 58fb5a214f6..b0d46f71a3e 100644 --- a/src/3rd/pcre/config.h +++ b/src/3rd/pcre/config.h @@ -84,9 +84,6 @@ sure both macros are undefined; an emulation function will then be used. */ /* Define to 1 if you have the `memmove' function. */ /* #undef HAVE_MEMMOVE */ -/* Define to 1 if you have the header file. */ -/* #undef HAVE_MEMORY_H */ - /* Define if you have POSIX threads libraries and header files. */ /* #undef HAVE_PTHREAD */ @@ -102,6 +99,9 @@ sure both macros are undefined; an emulation function will then be used. */ /* Define to 1 if you have the header file. */ /* #undef HAVE_STDINT_H */ +/* Define to 1 if you have the header file. */ +/* #undef HAVE_STDIO_H */ + /* Define to 1 if you have the header file. */ /* #undef HAVE_STDLIB_H */ @@ -162,8 +162,7 @@ sure both macros are undefined; an emulation function will then be used. */ #define LINK_SIZE 2 #endif -/* Define to the sub-directory in which libtool stores uninstalled libraries. - */ +/* Define to the sub-directory where libtool stores uninstalled libraries. */ /* This is ignored unless you are using libtool. */ #ifndef LT_OBJDIR #define LT_OBJDIR ".libs/" @@ -236,7 +235,7 @@ sure both macros are undefined; an emulation function will then be used. */ #define PACKAGE_NAME "PCRE" /* Define to the full name and version of this package. */ -#define PACKAGE_STRING "PCRE 8.34" +#define PACKAGE_STRING "PCRE 8.45" /* Define to the one symbol short name of this package. */ #define PACKAGE_TARNAME "pcre" @@ -245,7 +244,7 @@ sure both macros are undefined; an emulation function will then be used. */ #define PACKAGE_URL "" /* Define to the version of this package. */ -#define PACKAGE_VERSION "8.34" +#define PACKAGE_VERSION "8.45" /* The value of PARENS_NEST_LIMIT specifies the maximum depth of nested parentheses (of any kind) in a pattern. This limits the amount of system @@ -292,7 +291,9 @@ sure both macros are undefined; an emulation function will then be used. */ your system. */ /* #undef PTHREAD_CREATE_JOINABLE */ -/* Define to 1 if you have the ANSI C header files. */ +/* Define to 1 if all of the C90 standard headers exist (not just the ones + required in a freestanding environment). This macro is provided for + backward compatibility; new code need not use it. */ /* #undef STDC_HEADERS */ /* Define to any value to enable support for Just-In-Time compiling. */ @@ -337,7 +338,7 @@ sure both macros are undefined; an emulation function will then be used. */ /* #undef SUPPORT_VALGRIND */ /* Version number of package */ -#define VERSION "8.34" +#define VERSION "8.45" /* Define to empty if `const' does not conform to ANSI C. */ /* #undef const */ diff --git a/src/3rd/pcre/pcre.diff b/src/3rd/pcre/pcre.diff index 8cfaf9aee36..164291d0d51 100644 --- a/src/3rd/pcre/pcre.diff +++ b/src/3rd/pcre/pcre.diff @@ -1,6 +1,6 @@ -diff -urN pcre.orig/pcrejitc.c pcre/pcrejitc.c ---- pcre.orig/pcrejitc.c 2014-02-03 09:05:44.496039042 +0100 -+++ pcre/pcrejitc.c 2014-02-03 09:05:44.496039042 +0100 +diff --strip-trailing-cr -urN pcre.orig/pcrejitc.c pcre/pcrejitc.c +--- pcre.orig/pcrejitc.c 2020-02-11 17:36:46.000000000 +0000 ++++ pcre/pcrejitc.c 2023-11-07 01:09:40.000000000 +0000 @@ -59,7 +59,7 @@ #define SLJIT_VERBOSE 0 #define SLJIT_DEBUG 0 @@ -10,19 +10,20 @@ diff -urN pcre.orig/pcrejitc.c pcre/pcrejitc.c #if defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED #error Unsupported architecture -diff -urN pcre.orig/sjconfi.h pcre/sjconfi.h ---- pcre.orig/sjconfi.h 2014-02-03 09:05:44.536039042 +0100 -+++ pcre/sjconfi.h 2014-02-03 09:05:44.536039042 +0100 -@@ -323,8 +323,12 @@ +diff --strip-trailing-cr -urN pcre.orig/sjconfi.h pcre/sjconfi.h +--- pcre.orig/sjconfi.h 2019-11-19 16:21:54.000000000 +0000 ++++ pcre/sjconfi.h 2023-11-07 01:15:20.000000000 +0000 +@@ -505,8 +505,13 @@ #if defined(__GNUC__) && !defined(__APPLE__) +#if ( __GNUC__ > 3 ) || ( ( __GNUC__ == 3 ) && ( __GNUC_MINOR__ >= 4 ) ) - #define SLJIT_CALL __attribute__ ((fastcall)) + #define SLJIT_FUNC __attribute__ ((fastcall)) #define SLJIT_X86_32_FASTCALL 1 +#else -+#define SLJIT_CALL ++#define SLJIT_FUNC +#endif ++ #elif defined(_MSC_VER) diff --git a/src/3rd/pcre/pcre.h b/src/3rd/pcre/pcre.h index c85f36b6bc6..bee1fe6ffd6 100644 --- a/src/3rd/pcre/pcre.h +++ b/src/3rd/pcre/pcre.h @@ -5,7 +5,7 @@ /* This is the public header file for the PCRE library, to be #included by applications that call the PCRE functions. - Copyright (c) 1997-2013 University of Cambridge + Copyright (c) 1997-2014 University of Cambridge ----------------------------------------------------------------------------- Redistribution and use in source and binary forms, with or without @@ -42,9 +42,9 @@ POSSIBILITY OF SUCH DAMAGE. /* The current PCRE version information. */ #define PCRE_MAJOR 8 -#define PCRE_MINOR 34 +#define PCRE_MINOR 45 #define PCRE_PRERELEASE -#define PCRE_DATE 2013-12-15 +#define PCRE_DATE 2021-06-15 /* When an application links to a PCRE DLL in Windows, the symbols that are imported have to be identified as such. When building PCRE, the appropriate @@ -321,11 +321,11 @@ these bits, just add new ones on the end, in order to remain compatible. */ /* Types */ -struct real_pcre; /* declaration; the definition is private */ -typedef struct real_pcre pcre; +struct real_pcre8_or_16; /* declaration; the definition is private */ +typedef struct real_pcre8_or_16 pcre; -struct real_pcre16; /* declaration; the definition is private */ -typedef struct real_pcre16 pcre16; +struct real_pcre8_or_16; /* declaration; the definition is private */ +typedef struct real_pcre8_or_16 pcre16; struct real_pcre32; /* declaration; the definition is private */ typedef struct real_pcre32 pcre32; @@ -491,36 +491,42 @@ PCRE_EXP_DECL void (*pcre_free)(void *); PCRE_EXP_DECL void *(*pcre_stack_malloc)(size_t); PCRE_EXP_DECL void (*pcre_stack_free)(void *); PCRE_EXP_DECL int (*pcre_callout)(pcre_callout_block *); +PCRE_EXP_DECL int (*pcre_stack_guard)(void); PCRE_EXP_DECL void *(*pcre16_malloc)(size_t); PCRE_EXP_DECL void (*pcre16_free)(void *); PCRE_EXP_DECL void *(*pcre16_stack_malloc)(size_t); PCRE_EXP_DECL void (*pcre16_stack_free)(void *); PCRE_EXP_DECL int (*pcre16_callout)(pcre16_callout_block *); +PCRE_EXP_DECL int (*pcre16_stack_guard)(void); PCRE_EXP_DECL void *(*pcre32_malloc)(size_t); PCRE_EXP_DECL void (*pcre32_free)(void *); PCRE_EXP_DECL void *(*pcre32_stack_malloc)(size_t); PCRE_EXP_DECL void (*pcre32_stack_free)(void *); PCRE_EXP_DECL int (*pcre32_callout)(pcre32_callout_block *); +PCRE_EXP_DECL int (*pcre32_stack_guard)(void); #else /* VPCOMPAT */ PCRE_EXP_DECL void *pcre_malloc(size_t); PCRE_EXP_DECL void pcre_free(void *); PCRE_EXP_DECL void *pcre_stack_malloc(size_t); PCRE_EXP_DECL void pcre_stack_free(void *); PCRE_EXP_DECL int pcre_callout(pcre_callout_block *); +PCRE_EXP_DECL int pcre_stack_guard(void); PCRE_EXP_DECL void *pcre16_malloc(size_t); PCRE_EXP_DECL void pcre16_free(void *); PCRE_EXP_DECL void *pcre16_stack_malloc(size_t); PCRE_EXP_DECL void pcre16_stack_free(void *); PCRE_EXP_DECL int pcre16_callout(pcre16_callout_block *); +PCRE_EXP_DECL int pcre16_stack_guard(void); PCRE_EXP_DECL void *pcre32_malloc(size_t); PCRE_EXP_DECL void pcre32_free(void *); PCRE_EXP_DECL void *pcre32_stack_malloc(size_t); PCRE_EXP_DECL void pcre32_stack_free(void *); PCRE_EXP_DECL int pcre32_callout(pcre32_callout_block *); +PCRE_EXP_DECL int pcre32_stack_guard(void); #endif /* VPCOMPAT */ /* User defined callback which provides a stack just before the match starts. */ diff --git a/src/3rd/pcre/pcrebyte.c b/src/3rd/pcre/pcrebyte.c index 4dfbaae82e8..1f98199fca8 100644 --- a/src/3rd/pcre/pcrebyte.c +++ b/src/3rd/pcre/pcrebyte.c @@ -6,7 +6,7 @@ and semantics are as close as possible to those of the Perl 5 language. Written by Philip Hazel - Copyright (c) 1997-2013 University of Cambridge + Copyright (c) 1997-2014 University of Cambridge ----------------------------------------------------------------------------- Redistribution and use in source and binary forms, with or without @@ -311,9 +311,9 @@ while(TRUE) ptr++; } /* Control should never reach here in 16/32 bit mode. */ -#endif /* !COMPILE_PCRE8 */ - +#else /* In 8-bit mode, the pattern does not need to be processed. */ return 0; +#endif /* !COMPILE_PCRE8 */ } /* End of pcre_byte_order.c */ diff --git a/src/3rd/pcre/pcrecomp.c b/src/3rd/pcre/pcrecomp.c index 6a75a6cf03d..652dc88a154 100644 --- a/src/3rd/pcre/pcrecomp.c +++ b/src/3rd/pcre/pcrecomp.c @@ -6,7 +6,7 @@ and semantics are as close as possible to those of the Perl 5 language. Written by Philip Hazel - Copyright (c) 1997-2013 University of Cambridge + Copyright (c) 1997-2021 University of Cambridge ----------------------------------------------------------------------------- Redistribution and use in source and binary forms, with or without @@ -47,8 +47,8 @@ supporting internal functions that are not used by other modules. */ #endif #define NLBLOCK cd /* Block containing newline information */ -#define PSSTART start_pattern /* Field containing processed string start */ -#define PSEND end_pattern /* Field containing processed string end */ +#define PSSTART start_pattern /* Field containing pattern start */ +#define PSEND end_pattern /* Field containing pattern end */ #include "pcreinal.h" @@ -68,7 +68,7 @@ COMPILE_PCREx macro will already be appropriately set. */ /* Macro for setting individual bits in class bitmaps. */ -#define SETBIT(a,b) a[(b)/8] |= (1 << ((b)&7)) +#define SETBIT(a,b) a[(b)/8] |= (1U << ((b)&7)) /* Maximum length value to check against when making sure that the integer that holds the compiled pattern length does not overflow. We make it a bit less than @@ -129,8 +129,8 @@ overrun before it actually does run off the end of the data block. */ /* Private flags added to firstchar and reqchar. */ -#define REQ_CASELESS (1 << 0) /* Indicates caselessness */ -#define REQ_VARY (1 << 1) /* Reqchar followed non-literal item */ +#define REQ_CASELESS (1U << 0) /* Indicates caselessness */ +#define REQ_VARY (1U << 1) /* Reqchar followed non-literal item */ /* Negative values for the firstchar and reqchar flags */ #define REQ_UNSET (-2) #define REQ_NONE (-1) @@ -174,7 +174,7 @@ static const short int escapes[] = { -ESC_Z, CHAR_LEFT_SQUARE_BRACKET, CHAR_BACKSLASH, CHAR_RIGHT_SQUARE_BRACKET, CHAR_CIRCUMFLEX_ACCENT, CHAR_UNDERSCORE, - CHAR_GRAVE_ACCENT, 7, + CHAR_GRAVE_ACCENT, ESC_a, -ESC_b, 0, -ESC_d, ESC_e, ESC_f, 0, @@ -202,9 +202,9 @@ static const short int escapes[] = { /* 68 */ 0, 0, '|', ',', '%', '_', '>', '?', /* 70 */ 0, 0, 0, 0, 0, 0, 0, 0, /* 78 */ 0, '`', ':', '#', '@', '\'', '=', '"', -/* 80 */ 0, 7, -ESC_b, 0, -ESC_d, ESC_e, ESC_f, 0, +/* 80 */ 0, ESC_a, -ESC_b, 0, -ESC_d, ESC_e, ESC_f, 0, /* 88 */-ESC_h, 0, 0, '{', 0, 0, 0, 0, -/* 90 */ 0, 0, -ESC_k, 'l', 0, ESC_n, 0, -ESC_p, +/* 90 */ 0, 0, -ESC_k, 0, 0, ESC_n, 0, -ESC_p, /* 98 */ 0, ESC_r, 0, '}', 0, 0, 0, 0, /* A0 */ 0, '~', -ESC_s, ESC_tee, 0,-ESC_v, -ESC_w, 0, /* A8 */ 0,-ESC_z, 0, 0, 0, '[', 0, 0, @@ -219,6 +219,12 @@ static const short int escapes[] = { /* F0 */ 0, 0, 0, 0, 0, 0, 0, 0, /* F8 */ 0, 0, 0, 0, 0, 0, 0, 0 }; + +/* We also need a table of characters that may follow \c in an EBCDIC +environment for characters 0-31. */ + +static unsigned char ebcdic_escape_c[] = "@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_"; + #endif @@ -458,7 +464,7 @@ static const char error_texts[] = "range out of order in character class\0" "nothing to repeat\0" /* 10 */ - "operand of unlimited repeat could match the empty string\0" /** DEAD **/ + "internal error: invalid forward reference offset\0" "internal error: unexpected repeat\0" "unrecognized character after (? or (?-\0" "POSIX named classes are supported only within a class\0" @@ -479,7 +485,7 @@ static const char error_texts[] = "lookbehind assertion is not fixed length\0" "malformed number or name after (?(\0" "conditional group contains more than two branches\0" - "assertion expected after (?(\0" + "assertion expected after (?( or (?(?C)\0" "(?R or (?[+-]digits must be followed by )\0" /* 30 */ "unknown POSIX class name\0" @@ -527,7 +533,11 @@ static const char error_texts[] = "different names for subpatterns of the same number are not allowed\0" "(*MARK) must have an argument\0" "this version of PCRE is not compiled with Unicode property support\0" +#ifndef EBCDIC "\\c must be followed by an ASCII character\0" +#else + "\\c must be followed by a letter or one of [\\]^_?\0" +#endif "\\k is not followed by a braced, angle-bracketed, or quoted name\0" /* 70 */ "internal error: unknown opcode in find_fixedlength()\0" @@ -547,6 +557,10 @@ static const char error_texts[] = "parentheses are too deeply nested\0" "invalid range in character class\0" "group name must start with a non-digit\0" + /* 85 */ + "parentheses are too deeply nested (stack check)\0" + "digits missing in \\x{} or \\o{}\0" + "regular expression is too complicated\0" ; /* Table to identify digits and hex digits. This is used when compiling @@ -1257,6 +1271,7 @@ else case CHAR_o: if (ptr[1] != CHAR_LEFT_CURLY_BRACKET) *errorcodeptr = ERR81; else + if (ptr[2] == CHAR_RIGHT_CURLY_BRACKET) *errorcodeptr = ERR86; else { ptr += 2; c = 0; @@ -1326,6 +1341,11 @@ else if (ptr[1] == CHAR_LEFT_CURLY_BRACKET) { ptr += 2; + if (*ptr == CHAR_RIGHT_CURLY_BRACKET) + { + *errorcodeptr = ERR86; + break; + } c = 0; overflow = FALSE; while (MAX_255(*ptr) && (digitab[*ptr] & ctype_xdigit) != 0) @@ -1416,7 +1436,16 @@ else c ^= 0x40; #else /* EBCDIC coding */ if (c >= CHAR_a && c <= CHAR_z) c += 64; - c ^= 0xC0; + if (c == CHAR_QUESTION_MARK) + c = ('\\' == 188 && '`' == 74)? 0x5f : 0xff; + else + { + for (i = 0; i < 32; i++) + { + if (c == ebcdic_escape_c[i]) break; + } + if (i < 32) c = i; else *errorcodeptr = ERR68; + } #endif break; @@ -1581,29 +1610,29 @@ read_repeat_counts(const pcre_uchar *p, int *minp, int *maxp, int *errorcodeptr) int min = 0; int max = -1; -/* Read the minimum value and do a paranoid check: a negative value indicates -an integer overflow. */ - -while (IS_DIGIT(*p)) min = min * 10 + (int)(*p++ - CHAR_0); -if (min < 0 || min > 65535) +while (IS_DIGIT(*p)) { - *errorcodeptr = ERR5; - return p; + min = min * 10 + (int)(*p++ - CHAR_0); + if (min > 65535) + { + *errorcodeptr = ERR5; + return p; + } } -/* Read the maximum value if there is one, and again do a paranoid on its size. -Also, max must not be less than min. */ - if (*p == CHAR_RIGHT_CURLY_BRACKET) max = min; else { if (*(++p) != CHAR_RIGHT_CURLY_BRACKET) { max = 0; - while(IS_DIGIT(*p)) max = max * 10 + (int)(*p++ - CHAR_0); - if (max < 0 || max > 65535) + while(IS_DIGIT(*p)) { - *errorcodeptr = ERR5; - return p; + max = max * 10 + (int)(*p++ - CHAR_0); + if (max > 65535) + { + *errorcodeptr = ERR5; + return p; + } } if (max < min) { @@ -1613,9 +1642,6 @@ if (*p == CHAR_RIGHT_CURLY_BRACKET) max = min; else } } -/* Fill in the required variables, and pass back the pointer to the terminating -'}'. */ - *minp = min; *maxp = max; return p; @@ -1698,6 +1724,7 @@ and doing the check at the end; a flag specifies which mode we are running in. utf TRUE in UTF-8 / UTF-16 / UTF-32 mode atend TRUE if called when the pattern is complete cd the "compile data" structure + recurses chain of recurse_check to catch mutual recursion Returns: the fixed length, or -1 if there is no fixed length, @@ -1707,10 +1734,11 @@ Returns: the fixed length, */ static int -find_fixedlength(pcre_uchar *code, BOOL utf, BOOL atend, compile_data *cd) +find_fixedlength(pcre_uchar *code, BOOL utf, BOOL atend, compile_data *cd, + recurse_check *recurses) { int length = -1; - +recurse_check this_recurse; register int branchlength = 0; register pcre_uchar *cc = code + 1 + LINK_SIZE; @@ -1735,7 +1763,8 @@ for (;;) case OP_ONCE: case OP_ONCE_NC: case OP_COND: - d = find_fixedlength(cc + ((op == OP_CBRA)? IMM2_SIZE : 0), utf, atend, cd); + d = find_fixedlength(cc + ((op == OP_CBRA)? IMM2_SIZE : 0), utf, atend, cd, + recurses); if (d < 0) return d; branchlength += d; do cc += GET(cc, 1); while (*cc == OP_ALT); @@ -1769,7 +1798,15 @@ for (;;) cs = ce = (pcre_uchar *)cd->start_code + GET(cc, 1); /* Start subpattern */ do ce += GET(ce, 1); while (*ce == OP_ALT); /* End subpattern */ if (cc > cs && cc < ce) return -1; /* Recursion */ - d = find_fixedlength(cs + IMM2_SIZE, utf, atend, cd); + else /* Check for mutual recursion */ + { + recurse_check *r = recurses; + for (r = recurses; r != NULL; r = r->prev) if (r->group == cs) break; + if (r != NULL) return -1; /* Mutual recursion */ + } + this_recurse.prev = recurses; + this_recurse.group = cs; + d = find_fixedlength(cs + IMM2_SIZE, utf, atend, cd, &this_recurse); if (d < 0) return d; branchlength += d; cc += 1 + LINK_SIZE; @@ -1782,7 +1819,7 @@ for (;;) case OP_ASSERTBACK: case OP_ASSERTBACK_NOT: do cc += GET(cc, 1); while (*cc == OP_ALT); - cc += PRIV(OP_lengths)[*cc]; + cc += 1 + LINK_SIZE; break; /* Skip over things that don't match chars */ @@ -2123,32 +2160,60 @@ for (;;) { case OP_CHAR: case OP_CHARI: + case OP_NOT: + case OP_NOTI: case OP_EXACT: case OP_EXACTI: + case OP_NOTEXACT: + case OP_NOTEXACTI: case OP_UPTO: case OP_UPTOI: + case OP_NOTUPTO: + case OP_NOTUPTOI: case OP_MINUPTO: case OP_MINUPTOI: + case OP_NOTMINUPTO: + case OP_NOTMINUPTOI: case OP_POSUPTO: case OP_POSUPTOI: + case OP_NOTPOSUPTO: + case OP_NOTPOSUPTOI: case OP_STAR: case OP_STARI: + case OP_NOTSTAR: + case OP_NOTSTARI: case OP_MINSTAR: case OP_MINSTARI: + case OP_NOTMINSTAR: + case OP_NOTMINSTARI: case OP_POSSTAR: case OP_POSSTARI: + case OP_NOTPOSSTAR: + case OP_NOTPOSSTARI: case OP_PLUS: case OP_PLUSI: + case OP_NOTPLUS: + case OP_NOTPLUSI: case OP_MINPLUS: case OP_MINPLUSI: + case OP_NOTMINPLUS: + case OP_NOTMINPLUSI: case OP_POSPLUS: case OP_POSPLUSI: + case OP_NOTPOSPLUS: + case OP_NOTPOSPLUSI: case OP_QUERY: case OP_QUERYI: + case OP_NOTQUERY: + case OP_NOTQUERYI: case OP_MINQUERY: case OP_MINQUERYI: + case OP_NOTMINQUERY: + case OP_NOTMINQUERYI: case OP_POSQUERY: case OP_POSQUERYI: + case OP_NOTPOSQUERY: + case OP_NOTPOSQUERYI: if (HAS_EXTRALEN(code[-1])) code += GET_EXTRALEN(code[-1]); break; } @@ -2328,11 +2393,6 @@ bracket whose current branch will already have been scanned. Returns: TRUE if what is matched could be empty */ -typedef struct recurse_check { - struct recurse_check *prev; - const pcre_uchar *group; -} recurse_check; - static BOOL could_be_empty_branch(const pcre_uchar *code, const pcre_uchar *endcode, BOOL utf, compile_data *cd, recurse_check *recurses) @@ -2368,6 +2428,7 @@ for (code = first_significant_code(code + PRIV(OP_lengths)[*code], TRUE); if (c == OP_RECURSE) { const pcre_uchar *scode = cd->start_code + GET(code, 1); + const pcre_uchar *endgroup = scode; BOOL empty_branch; /* Test for forward reference or uncompleted reference. This is disabled @@ -2382,20 +2443,16 @@ for (code = first_significant_code(code + PRIV(OP_lengths)[*code], TRUE); if (GET(scode, 1) == 0) return TRUE; /* Unclosed */ } - /* If we are scanning a completed pattern, there are no forward references - and all groups are complete. We need to detect whether this is a recursive - call, as otherwise there will be an infinite loop. If it is a recursion, - just skip over it. Simple recursions are easily detected. For mutual - recursions we keep a chain on the stack. */ + /* If the reference is to a completed group, we need to detect whether this + is a recursive call, as otherwise there will be an infinite loop. If it is + a recursion, just skip over it. Simple recursions are easily detected. For + mutual recursions we keep a chain on the stack. */ + do endgroup += GET(endgroup, 1); while (*endgroup == OP_ALT); + if (code >= scode && code <= endgroup) continue; /* Simple recursion */ else { recurse_check *r = recurses; - const pcre_uchar *endgroup = scode; - - do endgroup += GET(endgroup, 1); while (*endgroup == OP_ALT); - if (code >= scode && code <= endgroup) continue; /* Simple recursion */ - for (r = recurses; r != NULL; r = r->prev) if (r->group == scode) break; if (r != NULL) continue; /* Mutual recursion */ @@ -2450,7 +2507,7 @@ for (code = first_significant_code(code + PRIV(OP_lengths)[*code], TRUE); if (c == OP_BRA || c == OP_BRAPOS || c == OP_CBRA || c == OP_CBRAPOS || c == OP_ONCE || c == OP_ONCE_NC || - c == OP_COND) + c == OP_COND || c == OP_SCOND) { BOOL empty_branch; if (GET(code, 1) == 0) return TRUE; /* Hit unclosed bracket */ @@ -2466,8 +2523,8 @@ for (code = first_significant_code(code + PRIV(OP_lengths)[*code], TRUE); empty_branch = FALSE; do { - if (!empty_branch && could_be_empty_branch(code, endcode, utf, cd, NULL)) - empty_branch = TRUE; + if (!empty_branch && could_be_empty_branch(code, endcode, utf, cd, + recurses)) empty_branch = TRUE; code += GET(code, 1); } while (*code == OP_ALT); @@ -3036,7 +3093,7 @@ switch(c) end += 1 + 2 * IMM2_SIZE; break; } - list[2] = end - code; + list[2] = (pcre_uint32)(end - code); return end; } return NULL; /* Opcode not accepted */ @@ -3062,7 +3119,7 @@ Returns: TRUE if the auto-possessification is possible static BOOL compare_opcodes(const pcre_uchar *code, BOOL utf, const compile_data *cd, - const pcre_uint32 *base_list, const pcre_uchar *base_end) + const pcre_uint32 *base_list, const pcre_uchar *base_end, int *rec_limit) { pcre_uchar c; pcre_uint32 list[8]; @@ -3070,10 +3127,17 @@ const pcre_uint32 *chr_ptr; const pcre_uint32 *ochr_ptr; const pcre_uint32 *list_ptr; const pcre_uchar *next_code; +#if defined SUPPORT_UTF || !defined COMPILE_PCRE8 +const pcre_uchar *xclass_flags; +#endif const pcre_uint8 *class_bitset; -const pcre_uint32 *set1, *set2, *set_end; +const pcre_uint8 *set1, *set2, *set_end; pcre_uint32 chr; BOOL accepted, invert_bits; +BOOL entered_a_group = FALSE; + +if (*rec_limit == 0) return FALSE; +--(*rec_limit); /* Note: the base_list[1] contains whether the current opcode has greedy (represented by a non-zero value) quantifier. This is a different from @@ -3127,8 +3191,10 @@ for(;;) case OP_ONCE: case OP_ONCE_NC: /* Atomic sub-patterns and assertions can always auto-possessify their - last iterator. */ - return TRUE; + last iterator. However, if the group was entered as a result of checking + a previous iterator, this is not possible. */ + + return !entered_a_group; } code += PRIV(OP_lengths)[c]; @@ -3143,10 +3209,13 @@ for(;;) while (*next_code == OP_ALT) { - if (!compare_opcodes(code, utf, cd, base_list, base_end)) return FALSE; + if (!compare_opcodes(code, utf, cd, base_list, base_end, rec_limit)) + return FALSE; code = next_code + 1 + LINK_SIZE; next_code += GET(next_code, 1); } + + entered_a_group = TRUE; continue; case OP_BRAZERO: @@ -3161,11 +3230,14 @@ for(;;) /* The bracket content will be checked by the OP_BRA/OP_CBRA case above. */ next_code += 1 + LINK_SIZE; - if (!compare_opcodes(next_code, utf, cd, base_list, base_end)) + if (!compare_opcodes(next_code, utf, cd, base_list, base_end, rec_limit)) return FALSE; code += PRIV(OP_lengths)[c]; continue; + + default: + break; } /* Check for a supported opcode, and load its properties. */ @@ -3202,12 +3274,12 @@ for(;;) if (base_list[0] == OP_CLASS) #endif { - set1 = (pcre_uint32 *)(base_end - base_list[2]); + set1 = (pcre_uint8 *)(base_end - base_list[2]); list_ptr = list; } else { - set1 = (pcre_uint32 *)(code - list[2]); + set1 = (pcre_uint8 *)(code - list[2]); list_ptr = base_list; } @@ -3216,41 +3288,53 @@ for(;;) { case OP_CLASS: case OP_NCLASS: - set2 = (pcre_uint32 *) + set2 = (pcre_uint8 *) ((list_ptr == list ? code : base_end) - list_ptr[2]); break; - /* OP_XCLASS cannot be supported here, because its bitset - is not necessarily complete. E.g: [a-\0x{200}] is stored - as a character range, and the appropriate bits are not set. */ +#if defined SUPPORT_UTF || !defined COMPILE_PCRE8 + case OP_XCLASS: + xclass_flags = (list_ptr == list ? code : base_end) - list_ptr[2] + LINK_SIZE; + if ((*xclass_flags & XCL_HASPROP) != 0) return FALSE; + if ((*xclass_flags & XCL_MAP) == 0) + { + /* No bits are set for characters < 256. */ + if (list[1] == 0) return (*xclass_flags & XCL_NOT) == 0; + /* Might be an empty repeat. */ + continue; + } + set2 = (pcre_uint8 *)(xclass_flags + 1); + break; +#endif case OP_NOT_DIGIT: - invert_bits = TRUE; - /* Fall through */ + invert_bits = TRUE; + /* Fall through */ case OP_DIGIT: - set2 = (pcre_uint32 *)(cd->cbits + cbit_digit); - break; + set2 = (pcre_uint8 *)(cd->cbits + cbit_digit); + break; case OP_NOT_WHITESPACE: - invert_bits = TRUE; - /* Fall through */ + invert_bits = TRUE; + /* Fall through */ case OP_WHITESPACE: - set2 = (pcre_uint32 *)(cd->cbits + cbit_space); - break; + set2 = (pcre_uint8 *)(cd->cbits + cbit_space); + break; case OP_NOT_WORDCHAR: - invert_bits = TRUE; - /* Fall through */ + invert_bits = TRUE; + /* Fall through */ case OP_WORDCHAR: - set2 = (pcre_uint32 *)(cd->cbits + cbit_word); - break; + set2 = (pcre_uint8 *)(cd->cbits + cbit_word); + break; default: return FALSE; } - /* Compare 4 bytes to improve speed. */ - set_end = set1 + (32 / 4); + /* Because the sets are unaligned, we need + to perform byte comparison here. */ + set_end = set1 + 32; if (invert_bits) { do @@ -3392,8 +3476,7 @@ for(;;) rightop >= FIRST_AUTOTAB_OP && rightop <= LAST_AUTOTAB_RIGHT_OP && autoposstab[leftop - FIRST_AUTOTAB_OP][rightop - FIRST_AUTOTAB_OP]; - if (!accepted) - return FALSE; + if (!accepted) return FALSE; if (list[1] == 0) return TRUE; /* Might be an empty repeat. */ @@ -3528,7 +3611,7 @@ for(;;) if (chr > 255) break; class_bitset = (pcre_uint8 *) ((list_ptr == list ? code : base_end) - list_ptr[2]); - if ((class_bitset[chr >> 3] & (1 << (chr & 7))) != 0) return FALSE; + if ((class_bitset[chr >> 3] & (1U << (chr & 7))) != 0) return FALSE; break; #if defined SUPPORT_UTF || !defined COMPILE_PCRE8 @@ -3551,7 +3634,9 @@ for(;;) if (list[1] == 0) return TRUE; } -return FALSE; +/* Control never reaches here. There used to be a fail-save return FALSE; here, +but some compilers complain about an unreachable statement. */ + } @@ -3578,11 +3663,20 @@ register pcre_uchar c; const pcre_uchar *end; pcre_uchar *repeat_opcode; pcre_uint32 list[8]; +int rec_limit; for (;;) { c = *code; + /* When a pattern with bad UTF-8 encoding is compiled with NO_UTF_CHECK, + it may compile without complaining, but may get into a loop here if the code + pointer points to a bad value. This is, of course a documentated possibility, + when NO_UTF_CHECK is set, so it isn't a bug, but we can detect this case and + just give up on this optimization. */ + + if (c >= OP_TABLE_LENGTH) return; + if (c >= OP_STAR && c <= OP_TYPEPOSUPTO) { c -= get_repeat_base(c) - OP_STAR; @@ -3590,7 +3684,8 @@ for (;;) get_chr_property_list(code, utf, cd->fcc, list) : NULL; list[1] = c == OP_STAR || c == OP_PLUS || c == OP_QUERY || c == OP_UPTO; - if (end != NULL && compare_opcodes(end, utf, cd, list, end)) + rec_limit = 1000; + if (end != NULL && compare_opcodes(end, utf, cd, list, end, &rec_limit)) { switch(c) { @@ -3623,7 +3718,7 @@ for (;;) break; case OP_MINUPTO: - *code += OP_MINUPTO - OP_UPTO; + *code += OP_POSUPTO - OP_MINUPTO; break; } } @@ -3646,7 +3741,8 @@ for (;;) list[1] = (c & 1) == 0; - if (compare_opcodes(end, utf, cd, list, end)) + rec_limit = 1000; + if (compare_opcodes(end, utf, cd, list, end, &rec_limit)) { switch (c) { @@ -3810,11 +3906,11 @@ didn't consider this to be a POSIX class. Likewise for [:1234:]. The problem in trying to be exactly like Perl is in the handling of escapes. We have to be sure that [abc[:x\]pqr] is *not* treated as containing a POSIX class, but [abc[:x\]pqr:]] is (so that an error can be generated). The code -below handles the special case of \], but does not try to do any other escape -processing. This makes it different from Perl for cases such as [:l\ower:] -where Perl recognizes it as the POSIX class "lower" but PCRE does not recognize -"l\ower". This is a lesser evil than not diagnosing bad classes when Perl does, -I think. +below handles the special cases \\ and \], but does not try to do any other +escape processing. This makes it different from Perl for cases such as +[:l\ower:] where Perl recognizes it as the POSIX class "lower" but PCRE does +not recognize "l\ower". This is a lesser evil than not diagnosing bad classes +when Perl does, I think. A user pointed out that PCRE was rejecting [:a[:digit:]] whereas Perl was not. It seems that the appearance of a nested POSIX class supersedes an apparent @@ -3841,21 +3937,16 @@ pcre_uchar terminator; /* Don't combine these lines; the Solaris cc */ terminator = *(++ptr); /* compiler warns about "non-constant" initializer. */ for (++ptr; *ptr != CHAR_NULL; ptr++) { - if (*ptr == CHAR_BACKSLASH && ptr[1] == CHAR_RIGHT_SQUARE_BRACKET) + if (*ptr == CHAR_BACKSLASH && + (ptr[1] == CHAR_RIGHT_SQUARE_BRACKET || + ptr[1] == CHAR_BACKSLASH)) ptr++; - else if (*ptr == CHAR_RIGHT_SQUARE_BRACKET) return FALSE; - else + else if ((*ptr == CHAR_LEFT_SQUARE_BRACKET && ptr[1] == terminator) || + *ptr == CHAR_RIGHT_SQUARE_BRACKET) return FALSE; + else if (*ptr == terminator && ptr[1] == CHAR_RIGHT_SQUARE_BRACKET) { - if (*ptr == terminator && ptr[1] == CHAR_RIGHT_SQUARE_BRACKET) - { - *endptr = ptr; - return TRUE; - } - if (*ptr == CHAR_LEFT_SQUARE_BRACKET && - (ptr[1] == CHAR_COLON || ptr[1] == CHAR_DOT || - ptr[1] == CHAR_EQUALS_SIGN) && - check_posix_syntax(ptr, endptr)) - return FALSE; + *endptr = ptr; + return TRUE; } } return FALSE; @@ -3909,48 +4000,42 @@ have their offsets adjusted. That one of the jobs of this function. Before it is called, the partially compiled regex must be temporarily terminated with OP_END. -This function has been extended with the possibility of forward references for -recursions and subroutine calls. It must also check the list of such references -for the group we are dealing with. If it finds that one of the recursions in -the current group is on this list, it adjusts the offset in the list, not the -value in the reference (which is a group number). +This function has been extended to cope with forward references for recursions +and subroutine calls. It must check the list of such references for the +group we are dealing with. If it finds that one of the recursions in the +current group is on this list, it does not adjust the value in the reference +(which is a group number). After the group has been scanned, all the offsets in +the forward reference list for the group are adjusted. Arguments: group points to the start of the group adjust the amount by which the group is to be moved utf TRUE in UTF-8 / UTF-16 / UTF-32 mode cd contains pointers to tables etc. - save_hwm the hwm forward reference pointer at the start of the group + save_hwm_offset the hwm forward reference offset at the start of the group Returns: nothing */ static void adjust_recurse(pcre_uchar *group, int adjust, BOOL utf, compile_data *cd, - pcre_uchar *save_hwm) + size_t save_hwm_offset) { +int offset; +pcre_uchar *hc; pcre_uchar *ptr = group; while ((ptr = (pcre_uchar *)find_recurse(ptr, utf)) != NULL) { - int offset; - pcre_uchar *hc; - - /* See if this recursion is on the forward reference list. If so, adjust the - reference. */ - - for (hc = save_hwm; hc < cd->hwm; hc += LINK_SIZE) + for (hc = (pcre_uchar *)cd->start_workspace + save_hwm_offset; hc < cd->hwm; + hc += LINK_SIZE) { offset = (int)GET(hc, 0); - if (cd->start_code + offset == ptr + 1) - { - PUT(hc, 0, offset + adjust); - break; - } + if (cd->start_code + offset == ptr + 1) break; } - /* Otherwise, adjust the recursion offset if it's after the start of this - group. */ + /* If we have not found this recursion on the forward reference list, adjust + the recursion's offset if it's after the start of this group. */ if (hc >= cd->hwm) { @@ -3960,6 +4045,15 @@ while ((ptr = (pcre_uchar *)find_recurse(ptr, utf)) != NULL) ptr += 1 + LINK_SIZE; } + +/* Now adjust all forward reference offsets for the group. */ + +for (hc = (pcre_uchar *)cd->start_workspace + save_hwm_offset; hc < cd->hwm; + hc += LINK_SIZE) + { + offset = (int)GET(hc, 0); + PUT(hc, 0, offset + adjust); + } } @@ -4062,12 +4156,16 @@ for (c = *cptr; c <= d; c++) if (c > d) return -1; /* Reached end of range */ +/* Found a character that has a single other case. Search for the end of the +range, which is either the end of the input range, or a character that has zero +or more than one other cases. */ + *ocptr = othercase; next = othercase + 1; for (++c; c <= d; c++) { - if (UCD_OTHERCASE(c) != next) break; + if ((co = UCD_CASESET(c)) != 0 || UCD_OTHERCASE(c) != next) break; next++; } @@ -4105,6 +4203,7 @@ add_to_class(pcre_uint8 *classbits, pcre_uchar **uchardptr, int options, compile_data *cd, pcre_uint32 start, pcre_uint32 end) { pcre_uint32 c; +pcre_uint32 classbits_end = (end <= 0xff ? end : 0xff); int n8 = 0; /* If caseless matching is required, scan the range and process alternate @@ -4139,7 +4238,11 @@ if ((options & PCRE_CASELESS) != 0) range. Otherwise, use a recursive call to add the additional range. */ else if (oc < start && od >= start - 1) start = oc; /* Extend downwards */ - else if (od > end && oc <= end + 1) end = od; /* Extend upwards */ + else if (od > end && oc <= end + 1) + { + end = od; /* Extend upwards */ + if (end > classbits_end) classbits_end = (end <= 0xff ? end : 0xff); + } else n8 += add_to_class(classbits, uchardptr, options, cd, oc, od); } } @@ -4148,7 +4251,7 @@ if ((options & PCRE_CASELESS) != 0) /* Not UTF-mode, or no UCP */ - for (c = start; c <= end && c < 256; c++) + for (c = start; c <= classbits_end; c++) { SETBIT(classbits, cd->fcc[c]); n8++; @@ -4173,22 +4276,21 @@ in all cases. */ #endif /* COMPILE_PCRE[8|16] */ -/* If all characters are less than 256, use the bit map. Otherwise use extra -data. */ +/* Use the bitmap for characters < 256. Otherwise use extra data.*/ -if (end < 0x100) +for (c = start; c <= classbits_end; c++) { - for (c = start; c <= end; c++) - { - n8++; - SETBIT(classbits, c); - } + /* Regardless of start, c will always be <= 255. */ + SETBIT(classbits, c); + n8++; } -else +#if defined SUPPORT_UTF || !defined COMPILE_PCRE8 +if (start <= 0xff) start = 0xff + 1; + +if (end >= start) { pcre_uchar *uchardata = *uchardptr; - #ifdef SUPPORT_UTF if ((options & PCRE_UTF8) != 0) /* All UTFs use the same flag bit */ { @@ -4228,6 +4330,7 @@ else *uchardptr = uchardata; /* Updata extra data pointer */ } +#endif /* SUPPORT_UTF || !COMPILE_PCRE8 */ return n8; /* Number of 8-bit characters */ } @@ -4379,7 +4482,7 @@ const pcre_uchar *tempptr; const pcre_uchar *nestptr = NULL; pcre_uchar *previous = NULL; pcre_uchar *previous_callout = NULL; -pcre_uchar *save_hwm = NULL; +size_t item_hwm_offset = 0; pcre_uint8 classbits[32]; /* We can fish out the UTF-8 setting once and for all into a BOOL, but we @@ -4449,6 +4552,9 @@ for (;; ptr++) BOOL reset_bracount; int class_has_8bitchar; int class_one_char; +#if defined SUPPORT_UTF || !defined COMPILE_PCRE8 + BOOL xclass_has_prop; +#endif int newoptions; int recno; int refsign; @@ -4461,6 +4567,10 @@ for (;; ptr++) pcre_uint32 ec; pcre_uchar mcbuffer[8]; + /* Come here to restart the loop without advancing the pointer. */ + + REDO_LOOP: + /* Get next character in the pattern */ c = *ptr; @@ -4486,7 +4596,8 @@ for (;; ptr++) if (code > cd->start_workspace + cd->workspace_size - WORK_SIZE_SAFETY_MARGIN) /* Check for overrun */ { - *errorcodeptr = ERR52; + *errorcodeptr = (code >= cd->start_workspace + cd->workspace_size)? + ERR52 : ERR87; goto FAILED; } @@ -4534,16 +4645,16 @@ for (;; ptr++) /* In the real compile phase, just check the workspace used by the forward reference list. */ - else if (cd->hwm > cd->start_workspace + cd->workspace_size - - WORK_SIZE_SAFETY_MARGIN) + else if (cd->hwm > cd->start_workspace + cd->workspace_size) { *errorcodeptr = ERR52; goto FAILED; } - /* If in \Q...\E, check for the end; if not, we have a literal */ + /* If in \Q...\E, check for the end; if not, we have a literal. Otherwise an + isolated \E is ignored. */ - if (inescq && c != CHAR_NULL) + if (c != CHAR_NULL) { if (c == CHAR_BACKSLASH && ptr[1] == CHAR_E) { @@ -4551,7 +4662,7 @@ for (;; ptr++) ptr++; continue; } - else + else if (inescq) { if (previous_callout != NULL) { @@ -4566,18 +4677,27 @@ for (;; ptr++) } goto NORMAL_CHAR; } - /* Control does not reach here. */ + + /* Check for the start of a \Q...\E sequence. We must do this here rather + than later in case it is immediately followed by \E, which turns it into a + "do nothing" sequence. */ + + if (c == CHAR_BACKSLASH && ptr[1] == CHAR_Q) + { + inescq = TRUE; + ptr++; + continue; + } } - /* In extended mode, skip white space and comments. We need a loop in order - to check for more white space and more comments after a comment. */ + /* In extended mode, skip white space and comments. */ if ((options & PCRE_EXTENDED) != 0) { - for (;;) + const pcre_uchar *wscptr = ptr; + while (MAX_255(c) && (cd->ctypes[c] & ctype_space) != 0) c = *(++ptr); + if (c == CHAR_NUMBER_SIGN) { - while (MAX_255(c) && (cd->ctypes[c] & ctype_space) != 0) c = *(++ptr); - if (c != CHAR_NUMBER_SIGN) break; ptr++; while (*ptr != CHAR_NULL) { @@ -4591,8 +4711,29 @@ for (;; ptr++) if (utf) FORWARDCHAR(ptr); #endif } - c = *ptr; /* Either NULL or the char after a newline */ } + + /* If we skipped any characters, restart the loop. Otherwise, we didn't see + a comment. */ + + if (ptr > wscptr) goto REDO_LOOP; + } + + /* Skip over (?# comments. We need to do this here because we want to know if + the next thing is a quantifier, and these comments may come between an item + and its quantifier. */ + + if (c == CHAR_LEFT_PARENTHESIS && ptr[1] == CHAR_QUESTION_MARK && + ptr[2] == CHAR_NUMBER_SIGN) + { + ptr += 3; + while (*ptr != CHAR_NULL && *ptr != CHAR_RIGHT_PARENTHESIS) ptr++; + if (*ptr == CHAR_NULL) + { + *errorcodeptr = ERR18; + goto FAILED; + } + continue; } /* See if the next thing is a quantifier. */ @@ -4656,7 +4797,8 @@ for (;; ptr++) previous = NULL; if ((options & PCRE_MULTILINE) != 0) { - if (firstcharflags == REQ_UNSET) firstcharflags = REQ_NONE; + if (firstcharflags == REQ_UNSET) + zerofirstcharflags = firstcharflags = REQ_NONE; *code++ = OP_CIRCM; } else *code++ = OP_CIRC; @@ -4677,6 +4819,7 @@ for (;; ptr++) zeroreqchar = reqchar; zeroreqcharflags = reqcharflags; previous = code; + item_hwm_offset = cd->hwm - cd->start_workspace; *code++ = ((options & PCRE_DOTALL) != 0)? OP_ALLANY: OP_ANY; break; @@ -4714,20 +4857,21 @@ for (;; ptr++) if (STRNCMP_UC_C8(ptr+1, STRING_WEIRD_STARTWORD, 6) == 0) { nestptr = ptr + 7; - ptr = sub_start_of_word - 1; - continue; + ptr = sub_start_of_word; + goto REDO_LOOP; } if (STRNCMP_UC_C8(ptr+1, STRING_WEIRD_ENDWORD, 6) == 0) { nestptr = ptr + 7; - ptr = sub_end_of_word - 1; - continue; + ptr = sub_end_of_word; + goto REDO_LOOP; } /* Handle a real character class. */ previous = code; + item_hwm_offset = cd->hwm - cd->start_workspace; /* PCRE supports POSIX class stuff inside a class. Perl gives an error if they are encountered at the top level, so we'll do that too. */ @@ -4783,13 +4927,26 @@ for (;; ptr++) should_flip_negation = FALSE; + /* Extended class (xclass) will be used when characters > 255 + might match. */ + +#if defined SUPPORT_UTF || !defined COMPILE_PCRE8 + xclass = FALSE; + class_uchardata = code + LINK_SIZE + 2; /* For XCLASS items */ + class_uchardata_base = class_uchardata; /* Save the start */ +#endif + /* For optimization purposes, we track some properties of the class: class_has_8bitchar will be non-zero if the class contains at least one < 256 character; class_one_char will be 1 if the class contains just one - character. */ + character; xclass_has_prop will be TRUE if unicode property checks + are present in the class. */ class_has_8bitchar = 0; class_one_char = 0; +#if defined SUPPORT_UTF || !defined COMPILE_PCRE8 + xclass_has_prop = FALSE; +#endif /* Initialize the 32-char bit map to all zeros. We build the map in a temporary bit of memory, in case the class contains fewer than two @@ -4798,12 +4955,6 @@ for (;; ptr++) memset(classbits, 0, 32 * sizeof(pcre_uint8)); -#if defined SUPPORT_UTF || !defined COMPILE_PCRE8 - xclass = FALSE; - class_uchardata = code + LINK_SIZE + 2; /* For XCLASS items */ - class_uchardata_base = class_uchardata; /* Save the start */ -#endif - /* Process characters until ] is reached. By writing this as a "do" it means that an initial ] is taken as a data character. At the start of the loop, c contains the first byte of the character. */ @@ -4826,10 +4977,11 @@ for (;; ptr++) (which is on the stack). We have to remember that there was XCLASS data, however. */ + if (class_uchardata > class_uchardata_base) xclass = TRUE; + if (lengthptr != NULL && class_uchardata > class_uchardata_base) { - xclass = TRUE; - *lengthptr += class_uchardata - class_uchardata_base; + *lengthptr += (int)(class_uchardata - class_uchardata_base); class_uchardata = class_uchardata_base; } #endif @@ -4927,13 +5079,32 @@ for (;; ptr++) *class_uchardata++ = local_negate? XCL_NOTPROP : XCL_PROP; *class_uchardata++ = ptype; *class_uchardata++ = 0; + xclass_has_prop = TRUE; ptr = tempptr + 1; continue; - /* For all other POSIX classes, no special action is taken in UCP - mode. Fall through to the non_UCP case. */ + /* For the other POSIX classes (ascii, cntrl, xdigit) we are going + to fall through to the non-UCP case and build a bit map for + characters with code points less than 256. If we are in a negated + POSIX class, characters with code points greater than 255 must + either all match or all not match. In the special case where we + have not yet generated any xclass data, and this is the final item + in the overall class, we need do nothing: later on, the opcode + OP_NCLASS will be used to indicate that characters greater than 255 + are acceptable. If we have already seen an xclass item or one may + follow (we have to assume that it might if this is not the end of + the class), explicitly list all wide codepoints, which will then + either not match or match, depending on whether the class is or is + not negated. */ default: + if (local_negate && + (xclass || tempptr[2] != CHAR_RIGHT_SQUARE_BRACKET)) + { + *class_uchardata++ = XCL_RANGE; + class_uchardata += PRIV(ord2utf)(0x100, class_uchardata); + class_uchardata += PRIV(ord2utf)(0x10ffff, class_uchardata); + } break; } } @@ -5097,9 +5268,9 @@ for (;; ptr++) cd, PRIV(vspace_list)); continue; -#ifdef SUPPORT_UCP case ESC_p: case ESC_P: +#ifdef SUPPORT_UCP { BOOL negated; unsigned int ptype = 0, pdata = 0; @@ -5109,9 +5280,13 @@ for (;; ptr++) XCL_PROP : XCL_NOTPROP; *class_uchardata++ = ptype; *class_uchardata++ = pdata; + xclass_has_prop = TRUE; class_has_8bitchar--; /* Undo! */ continue; } +#else + *errorcodeptr = ERR45; + goto FAILED; #endif /* Unrecognized escapes are faulted if PCRE is running in its strict mode. By default, for compatibility with Perl, they are @@ -5268,16 +5443,20 @@ for (;; ptr++) CLASS_SINGLE_CHARACTER: if (class_one_char < 2) class_one_char++; - /* If class_one_char is 1, we have the first single character in the - class, and there have been no prior ranges, or XCLASS items generated by - escapes. If this is the final character in the class, we can optimize by - turning the item into a 1-character OP_CHAR[I] if it's positive, or - OP_NOT[I] if it's negative. In the positive case, it can cause firstchar - to be set. Otherwise, there can be no first char if this item is first, - whatever repeat count may follow. In the case of reqchar, save the - previous value for reinstating. */ + /* If xclass_has_prop is false and class_one_char is 1, we have the first + single character in the class, and there have been no prior ranges, or + XCLASS items generated by escapes. If this is the final character in the + class, we can optimize by turning the item into a 1-character OP_CHAR[I] + if it's positive, or OP_NOT[I] if it's negative. In the positive case, it + can cause firstchar to be set. Otherwise, there can be no first char if + this item is first, whatever repeat count may follow. In the case of + reqchar, save the previous value for reinstating. */ - if (class_one_char == 1 && ptr[1] == CHAR_RIGHT_SQUARE_BRACKET) + if (!inescq && +#ifdef SUPPORT_UCP + !xclass_has_prop && +#endif + class_one_char == 1 && ptr[1] == CHAR_RIGHT_SQUARE_BRACKET) { ptr++; zeroreqchar = reqchar; @@ -5393,16 +5572,46 @@ for (;; ptr++) actual compiled code. */ #ifdef SUPPORT_UTF - if (xclass && (!should_flip_negation || (options & PCRE_UCP) != 0)) + if (xclass && (xclass_has_prop || !should_flip_negation || + (options & PCRE_UCP) != 0)) #elif !defined COMPILE_PCRE8 - if (xclass && !should_flip_negation) + if (xclass && (xclass_has_prop || !should_flip_negation)) #endif #if defined SUPPORT_UTF || !defined COMPILE_PCRE8 { + /* For non-UCP wide characters, in a non-negative class containing \S or + similar (should_flip_negation is set), all characters greater than 255 + must be in the class. */ + + if ( +#if defined COMPILE_PCRE8 + utf && +#endif + should_flip_negation && !negate_class && (options & PCRE_UCP) == 0) + { + *class_uchardata++ = XCL_RANGE; + if (utf) /* Will always be utf in the 8-bit library */ + { + class_uchardata += PRIV(ord2utf)(0x100, class_uchardata); + class_uchardata += PRIV(ord2utf)(0x10ffff, class_uchardata); + } + else /* Can only happen for the 16-bit & 32-bit libraries */ + { +#if defined COMPILE_PCRE16 + *class_uchardata++ = 0x100; + *class_uchardata++ = 0xffffu; +#elif defined COMPILE_PCRE32 + *class_uchardata++ = 0x100; + *class_uchardata++ = 0xffffffffu; +#endif + } + } + *class_uchardata++ = XCL_END; /* Marks the end of extra data */ *code++ = OP_XCLASS; code += LINK_SIZE; *code = negate_class? XCL_NOT:0; + if (xclass_has_prop) *code |= XCL_HASPROP; /* If the map is required, move up the extra data to make room for it; otherwise just move the code pointer to the end of the extra data. */ @@ -5412,6 +5621,8 @@ for (;; ptr++) *code++ |= XCL_MAP; memmove(code + (32 / sizeof(pcre_uchar)), code, IN_UCHARS(class_uchardata - code)); + if (negate_class && !xclass_has_prop) + for (c = 0; c < 32; c++) classbits[c] = ~classbits[c]; memcpy(code, classbits, 32); code = class_uchardata + (32 / sizeof(pcre_uchar)); } @@ -5422,6 +5633,12 @@ for (;; ptr++) PUT(previous, 1, (int)(code - previous)); break; /* End of class handling */ } + + /* Even though any XCLASS list is now discarded, we must allow for + its memory. */ + + if (lengthptr != NULL) + *lengthptr += (int)(class_uchardata - class_uchardata_base); #endif /* If there are no characters > 255, or they are all to be included or @@ -5522,6 +5739,21 @@ for (;; ptr++) ptr = p - 1; /* Character before the next significant one. */ } + /* We also need to skip over (?# comments, which are not dependent on + extended mode. */ + + if (ptr[1] == CHAR_LEFT_PARENTHESIS && ptr[2] == CHAR_QUESTION_MARK && + ptr[3] == CHAR_NUMBER_SIGN) + { + ptr += 4; + while (*ptr != CHAR_NULL && *ptr != CHAR_RIGHT_PARENTHESIS) ptr++; + if (*ptr == CHAR_NULL) + { + *errorcodeptr = ERR18; + goto FAILED; + } + } + /* If the next character is '+', we have a possessive quantifier. This implies greediness, whatever the setting of the PCRE_UNGREEDY option. If the next character is '?' this is a minimizing repeat, by default, @@ -5822,6 +6054,7 @@ for (;; ptr++) { register int i; int len = (int)(code - previous); + size_t base_hwm_offset = item_hwm_offset; pcre_uchar *bralink = NULL; pcre_uchar *brazeroptr = NULL; @@ -5876,7 +6109,7 @@ for (;; ptr++) if (repeat_max <= 1) /* Covers 0, 1, and unlimited */ { *code = OP_END; - adjust_recurse(previous, 1, utf, cd, save_hwm); + adjust_recurse(previous, 1, utf, cd, item_hwm_offset); memmove(previous + 1, previous, IN_UCHARS(len)); code++; if (repeat_max == 0) @@ -5900,7 +6133,7 @@ for (;; ptr++) { int offset; *code = OP_END; - adjust_recurse(previous, 2 + LINK_SIZE, utf, cd, save_hwm); + adjust_recurse(previous, 2 + LINK_SIZE, utf, cd, item_hwm_offset); memmove(previous + 2 + LINK_SIZE, previous, IN_UCHARS(len)); code += 2 + LINK_SIZE; *previous++ = OP_BRAZERO + repeat_type; @@ -5963,26 +6196,25 @@ for (;; ptr++) for (i = 1; i < repeat_min; i++) { pcre_uchar *hc; - pcre_uchar *this_hwm = cd->hwm; + size_t this_hwm_offset = cd->hwm - cd->start_workspace; memcpy(code, previous, IN_UCHARS(len)); while (cd->hwm > cd->start_workspace + cd->workspace_size - - WORK_SIZE_SAFETY_MARGIN - (this_hwm - save_hwm)) + WORK_SIZE_SAFETY_MARGIN - + (this_hwm_offset - base_hwm_offset)) { - int save_offset = save_hwm - cd->start_workspace; - int this_offset = this_hwm - cd->start_workspace; *errorcodeptr = expand_workspace(cd); if (*errorcodeptr != 0) goto FAILED; - save_hwm = (pcre_uchar *)cd->start_workspace + save_offset; - this_hwm = (pcre_uchar *)cd->start_workspace + this_offset; } - for (hc = save_hwm; hc < this_hwm; hc += LINK_SIZE) + for (hc = (pcre_uchar *)cd->start_workspace + base_hwm_offset; + hc < (pcre_uchar *)cd->start_workspace + this_hwm_offset; + hc += LINK_SIZE) { PUT(cd->hwm, 0, GET(hc, 0) + len); cd->hwm += LINK_SIZE; } - save_hwm = this_hwm; + base_hwm_offset = this_hwm_offset; code += len; } } @@ -6027,7 +6259,7 @@ for (;; ptr++) else for (i = repeat_max - 1; i >= 0; i--) { pcre_uchar *hc; - pcre_uchar *this_hwm = cd->hwm; + size_t this_hwm_offset = cd->hwm - cd->start_workspace; *code++ = OP_BRAZERO + repeat_type; @@ -6049,22 +6281,21 @@ for (;; ptr++) copying them. */ while (cd->hwm > cd->start_workspace + cd->workspace_size - - WORK_SIZE_SAFETY_MARGIN - (this_hwm - save_hwm)) + WORK_SIZE_SAFETY_MARGIN - + (this_hwm_offset - base_hwm_offset)) { - int save_offset = save_hwm - cd->start_workspace; - int this_offset = this_hwm - cd->start_workspace; *errorcodeptr = expand_workspace(cd); if (*errorcodeptr != 0) goto FAILED; - save_hwm = (pcre_uchar *)cd->start_workspace + save_offset; - this_hwm = (pcre_uchar *)cd->start_workspace + this_offset; } - for (hc = save_hwm; hc < this_hwm; hc += LINK_SIZE) + for (hc = (pcre_uchar *)cd->start_workspace + base_hwm_offset; + hc < (pcre_uchar *)cd->start_workspace + this_hwm_offset; + hc += LINK_SIZE) { PUT(cd->hwm, 0, GET(hc, 0) + len + ((i != 0)? 2+LINK_SIZE : 1)); cd->hwm += LINK_SIZE; } - save_hwm = this_hwm; + base_hwm_offset = this_hwm_offset; code += len; } @@ -6147,6 +6378,12 @@ for (;; ptr++) while (*scode == OP_ALT); } + /* A conditional group with only one branch has an implicit empty + alternative branch. */ + + if (*bracode == OP_COND && bracode[GET(bracode,1)] != OP_ALT) + *bracode = OP_SCOND; + /* Handle possessive quantifiers. */ if (possessive_quantifier) @@ -6160,11 +6397,11 @@ for (;; ptr++) { int nlen = (int)(code - bracode); *code = OP_END; - adjust_recurse(bracode, 1 + LINK_SIZE, utf, cd, save_hwm); + adjust_recurse(bracode, 1 + LINK_SIZE, utf, cd, item_hwm_offset); memmove(bracode + 1 + LINK_SIZE, bracode, IN_UCHARS(nlen)); code += 1 + LINK_SIZE; nlen += 1 + LINK_SIZE; - *bracode = OP_BRAPOS; + *bracode = (*bracode == OP_COND)? OP_BRAPOS : OP_SBRAPOS; *code++ = OP_KETRPOS; PUTINC(code, 0, nlen); PUT(bracode, 1, nlen); @@ -6294,7 +6531,7 @@ for (;; ptr++) else { *code = OP_END; - adjust_recurse(tempcode, 1 + LINK_SIZE, utf, cd, save_hwm); + adjust_recurse(tempcode, 1 + LINK_SIZE, utf, cd, item_hwm_offset); memmove(tempcode + 1 + LINK_SIZE, tempcode, IN_UCHARS(len)); code += 1 + LINK_SIZE; len += 1 + LINK_SIZE; @@ -6343,7 +6580,7 @@ for (;; ptr++) default: *code = OP_END; - adjust_recurse(tempcode, 1 + LINK_SIZE, utf, cd, save_hwm); + adjust_recurse(tempcode, 1 + LINK_SIZE, utf, cd, item_hwm_offset); memmove(tempcode + 1 + LINK_SIZE, tempcode, IN_UCHARS(len)); code += 1 + LINK_SIZE; len += 1 + LINK_SIZE; @@ -6372,15 +6609,10 @@ for (;; ptr++) parenthesis forms. */ case CHAR_LEFT_PARENTHESIS: - newoptions = options; - skipbytes = 0; - bravalue = OP_CBRA; - save_hwm = cd->hwm; - reset_bracount = FALSE; + ptr++; - /* First deal with various "verbs" that can be introduced by '*'. */ + /* Now deal with various "verbs" that can be introduced by '*'. */ - ptr++; if (ptr[0] == CHAR_ASTERISK && (ptr[1] == ':' || (MAX_255(ptr[1]) && ((cd->ctypes[ptr[1]] & ctype_letter) != 0)))) { @@ -6439,8 +6671,21 @@ for (;; ptr++) cd->had_accept = TRUE; for (oc = cd->open_caps; oc != NULL; oc = oc->next) { - *code++ = OP_CLOSE; - PUT2INC(code, 0, oc->number); + if (lengthptr != NULL) + { +#ifdef COMPILE_PCRE8 + *lengthptr += 1 + IMM2_SIZE; +#elif defined COMPILE_PCRE16 + *lengthptr += 2 + IMM2_SIZE; +#elif defined COMPILE_PCRE32 + *lengthptr += 4 + IMM2_SIZE; +#endif + } + else + { + *code++ = OP_CLOSE; + PUT2INC(code, 0, oc->number); + } } setverb = *code++ = (cd->assert_depth > 0)? OP_ASSERT_ACCEPT : OP_ACCEPT; @@ -6469,9 +6714,17 @@ for (;; ptr++) goto FAILED; } setverb = *code++ = verbs[i].op_arg; - *code++ = arglen; - memcpy(code, arg, IN_UCHARS(arglen)); - code += arglen; + if (lengthptr != NULL) /* In pass 1 just add in the length */ + { /* to avoid potential workspace */ + *lengthptr += arglen; /* overflow. */ + *code++ = 0; + } + else + { + *code++ = arglen; + memcpy(code, arg, IN_UCHARS(arglen)); + code += arglen; + } *code++ = 0; } @@ -6501,10 +6754,18 @@ for (;; ptr++) goto FAILED; } + /* Initialize for "real" parentheses */ + + newoptions = options; + skipbytes = 0; + bravalue = OP_CBRA; + item_hwm_offset = cd->hwm - cd->start_workspace; + reset_bracount = FALSE; + /* Deal with the extended parentheses; all are introduced by '?', and the appearance of any of them means that this is not a capturing group. */ - else if (*ptr == CHAR_QUESTION_MARK) + if (*ptr == CHAR_QUESTION_MARK) { int i, set, unset, namelen; int *optset; @@ -6513,20 +6774,10 @@ for (;; ptr++) switch (*(++ptr)) { - case CHAR_NUMBER_SIGN: /* Comment; skip to ket */ - ptr++; - while (*ptr != CHAR_NULL && *ptr != CHAR_RIGHT_PARENTHESIS) ptr++; - if (*ptr == CHAR_NULL) - { - *errorcodeptr = ERR18; - goto FAILED; - } - continue; - - /* ------------------------------------------------------------ */ case CHAR_VERTICAL_LINE: /* Reset capture count for each branch */ reset_bracount = TRUE; + cd->dupgroups = TRUE; /* Record (?| encountered */ /* Fall through */ /* ------------------------------------------------------------ */ @@ -6563,6 +6814,15 @@ for (;; ptr++) for (i = 3;; i++) if (!IS_DIGIT(ptr[i])) break; if (ptr[i] == CHAR_RIGHT_PARENTHESIS) tempptr += i + 1; + + /* tempptr should now be pointing to the opening parenthesis of the + assertion condition. */ + + if (*tempptr != CHAR_LEFT_PARENTHESIS) + { + *errorcodeptr = ERR28; + goto FAILED; + } } /* For conditions that are assertions, check the syntax, and then exit @@ -6572,15 +6832,23 @@ for (;; ptr++) if (tempptr[1] == CHAR_QUESTION_MARK && (tempptr[2] == CHAR_EQUALS_SIGN || tempptr[2] == CHAR_EXCLAMATION_MARK || - tempptr[2] == CHAR_LESS_THAN_SIGN)) + (tempptr[2] == CHAR_LESS_THAN_SIGN && + (tempptr[3] == CHAR_EQUALS_SIGN || + tempptr[3] == CHAR_EXCLAMATION_MARK)))) + { + cd->iscondassert = TRUE; break; + } /* Other conditions use OP_CREF/OP_DNCREF/OP_RREF/OP_DNRREF, and all need to skip at least 1+IMM2_SIZE bytes at the start of the group. */ code[1+LINK_SIZE] = OP_CREF; skipbytes = 1+IMM2_SIZE; - refsign = -1; + refsign = -1; /* => not a number */ + namelen = -1; /* => not a name; must set to avoid warning */ + name = NULL; /* Always set to avoid warning */ + recno = 0; /* Always set to avoid warning */ /* Check for a test for recursion in a named group. */ @@ -6617,9 +6885,14 @@ for (;; ptr++) if (refsign >= 0) { - recno = 0; while (IS_DIGIT(*ptr)) { + if (recno > INT_MAX / 10 - 1) /* Integer overflow */ + { + while (IS_DIGIT(*ptr)) ptr++; + *errorcodeptr = ERR61; + goto FAILED; + } recno = recno * 10 + (int)(*ptr - CHAR_0); ptr++; } @@ -6648,7 +6921,7 @@ for (;; ptr++) ptr++; } namelen = (int)(ptr - name); - if (lengthptr != NULL) *lengthptr += IMM2_SIZE; + if (lengthptr != NULL) skipbytes += IMM2_SIZE; } /* Check the terminator */ @@ -6684,6 +6957,7 @@ for (;; ptr++) goto FAILED; } PUT2(code, 2+LINK_SIZE, recno); + if (recno > cd->top_backref) cd->top_backref = recno; break; } @@ -6692,7 +6966,8 @@ for (;; ptr++) slot = cd->name_table; for (i = 0; i < cd->names_found; i++) { - if (STRNCMP_UC_UC(name, slot+IMM2_SIZE, namelen) == 0) break; + if (STRNCMP_UC_UC(name, slot+IMM2_SIZE, namelen) == 0 && + slot[IMM2_SIZE+namelen] == 0) break; slot += cd->name_entry_size; } @@ -6706,12 +6981,15 @@ for (;; ptr++) int offset = i++; int count = 1; recno = GET2(slot, 0); /* Number from first found */ + if (recno > cd->top_backref) cd->top_backref = recno; for (; i < cd->names_found; i++) { slot += cd->name_entry_size; - if (STRNCMP_UC_UC(name, slot+IMM2_SIZE, namelen) != 0) break; + if (STRNCMP_UC_UC(name, slot+IMM2_SIZE, namelen) != 0 || + (slot+IMM2_SIZE)[namelen] != 0) break; count++; } + if (count > 1) { PUT2(code, 2+LINK_SIZE, offset); @@ -6750,6 +7028,11 @@ for (;; ptr++) *errorcodeptr = ERR15; goto FAILED; } + if (recno > INT_MAX / 10 - 1) /* Integer overflow */ + { + *errorcodeptr = ERR61; + goto FAILED; + } recno = recno * 10 + name[i] - CHAR_0; } if (recno == 0) recno = RREF_ANY; @@ -6847,17 +7130,19 @@ for (;; ptr++) int n = 0; ptr++; while(IS_DIGIT(*ptr)) + { n = n * 10 + *ptr++ - CHAR_0; + if (n > 255) + { + *errorcodeptr = ERR38; + goto FAILED; + } + } if (*ptr != CHAR_RIGHT_PARENTHESIS) { *errorcodeptr = ERR39; goto FAILED; } - if (n > 255) - { - *errorcodeptr = ERR38; - goto FAILED; - } *code++ = n; PUT(code, 0, (int)(ptr - cd->start_pattern + 1)); /* Pattern offset */ PUT(code, LINK_SIZE, 0); /* Default length */ @@ -7026,6 +7311,7 @@ for (;; ptr++) if (lengthptr != NULL) { named_group *ng; + recno = 0; if (namelen == 0) { @@ -7043,23 +7329,70 @@ for (;; ptr++) goto FAILED; } - /* The name table does not exist in the first pass; instead we must - scan the list of names encountered so far in order to get the - number. If the name is not found, set the value to 0 for a forward - reference. */ - - ng = cd->named_groups; - for (i = 0; i < cd->names_found; i++, ng++) - { - if (namelen == ng->length && - STRNCMP_UC_UC(name, ng->name, namelen) == 0) - break; - } - recno = (i < cd->names_found)? ng->number : 0; - /* Count named back references. */ if (!is_recurse) cd->namedrefcount++; + + /* We have to allow for a named reference to a duplicated name (this + cannot be determined until the second pass). This needs an extra + 16-bit data item. */ + + *lengthptr += IMM2_SIZE; + + /* If this is a forward reference and we are within a (?|...) group, + the reference may end up as the number of a group which we are + currently inside, that is, it could be a recursive reference. In the + real compile this will be picked up and the reference wrapped with + OP_ONCE to make it atomic, so we must space in case this occurs. */ + + /* In fact, this can happen for a non-forward reference because + another group with the same number might be created later. This + issue is fixed "properly" in PCRE2. As PCRE1 is now in maintenance + only mode, we finesse the bug by allowing more memory always. */ + + *lengthptr += 4 + 4*LINK_SIZE; + + /* It is even worse than that. The current reference may be to an + existing named group with a different number (so apparently not + recursive) but which later on is also attached to a group with the + current number. This can only happen if $(| has been previous + encountered. In that case, we allow yet more memory, just in case. + (Again, this is fixed "properly" in PCRE2. */ + + if (cd->dupgroups) *lengthptr += 4 + 4*LINK_SIZE; + + /* Otherwise, check for recursion here. The name table does not exist + in the first pass; instead we must scan the list of names encountered + so far in order to get the number. If the name is not found, leave + the value of recno as 0 for a forward reference. */ + + /* This patch (removing "else") fixes a problem when a reference is + to multiple identically named nested groups from within the nest. + Once again, it is not the "proper" fix, and it results in an + over-allocation of memory. */ + + /* else */ + { + ng = cd->named_groups; + for (i = 0; i < cd->names_found; i++, ng++) + { + if (namelen == ng->length && + STRNCMP_UC_UC(name, ng->name, namelen) == 0) + { + open_capitem *oc; + recno = ng->number; + if (is_recurse) break; + for (oc = cd->open_caps; oc != NULL; oc = oc->next) + { + if (oc->number == recno) + { + oc->flag = TRUE; + break; + } + } + } + } + } } /* In the real compile, search the name table. We check the name @@ -7114,6 +7447,7 @@ for (;; ptr++) { if (firstcharflags == REQ_UNSET) firstcharflags = REQ_NONE; previous = code; + item_hwm_offset = cd->hwm - cd->start_workspace; *code++ = ((options & PCRE_CASELESS) != 0)? OP_DNREFI : OP_DNREF; PUT2INC(code, 0, index); PUT2INC(code, 0, count); @@ -7124,7 +7458,7 @@ for (;; ptr++) { open_capitem *oc; recno = GET2(slot, 0); - cd->backref_map |= (recno < 32)? (1 << recno) : 1; + cd->backref_map |= (recno < 32)? (1U << recno) : 1; if (recno > cd->top_backref) cd->top_backref = recno; /* Check to see if this back reference is recursive, that it, it @@ -7151,9 +7485,14 @@ for (;; ptr++) /* ------------------------------------------------------------ */ - case CHAR_R: /* Recursion */ - ptr++; /* Same as (?0) */ - /* Fall through */ + case CHAR_R: /* Recursion, same as (?0) */ + recno = 0; + if (*(++ptr) != CHAR_RIGHT_PARENTHESIS) + { + *errorcodeptr = ERR29; + goto FAILED; + } + goto HANDLE_RECURSION; /* ------------------------------------------------------------ */ @@ -7190,7 +7529,15 @@ for (;; ptr++) recno = 0; while(IS_DIGIT(*ptr)) + { + if (recno > INT_MAX / 10 - 1) /* Integer overflow */ + { + while (IS_DIGIT(*ptr)) ptr++; + *errorcodeptr = ERR61; + goto FAILED; + } recno = recno * 10 + *ptr++ - CHAR_0; + } if (*ptr != (pcre_uchar)terminator) { @@ -7227,6 +7574,7 @@ for (;; ptr++) HANDLE_RECURSION: previous = code; + item_hwm_offset = cd->hwm - cd->start_workspace; called = cd->start_code; /* When we are actually compiling, find the bracket that is being @@ -7296,6 +7644,8 @@ for (;; ptr++) /* Can't determine a first byte now */ if (firstcharflags == REQ_UNSET) firstcharflags = REQ_NONE; + zerofirstchar = firstchar; + zerofirstcharflags = firstcharflags; continue; @@ -7334,39 +7684,15 @@ for (;; ptr++) newoptions = (options | set) & (~unset); /* If the options ended with ')' this is not the start of a nested - group with option changes, so the options change at this level. If this - item is right at the start of the pattern, the options can be - abstracted and made external in the pre-compile phase, and ignored in - the compile phase. This can be helpful when matching -- for instance in - caseless checking of required bytes. - - If the code pointer is not (cd->start_code + 1 + LINK_SIZE), we are - definitely *not* at the start of the pattern because something has been - compiled. In the pre-compile phase, however, the code pointer can have - that value after the start, because it gets reset as code is discarded - during the pre-compile. However, this can happen only at top level - if - we are within parentheses, the starting BRA will still be present. At - any parenthesis level, the length value can be used to test if anything - has been compiled at that level. Thus, a test for both these conditions - is necessary to ensure we correctly detect the start of the pattern in - both phases. - + group with option changes, so the options change at this level. If we are not at the pattern start, reset the greedy defaults and the case value for firstchar and reqchar. */ if (*ptr == CHAR_RIGHT_PARENTHESIS) { - if (code == cd->start_code + 1 + LINK_SIZE && - (lengthptr == NULL || *lengthptr == 2 + 2*LINK_SIZE)) - { - cd->external_options = newoptions; - } - else - { - greedy_default = ((newoptions & PCRE_UNGREEDY) != 0); - greedy_non_default = greedy_default ^ 1; - req_caseopt = ((newoptions & PCRE_CASELESS) != 0)? REQ_CASELESS:0; - } + greedy_default = ((newoptions & PCRE_UNGREEDY) != 0); + greedy_non_default = greedy_default ^ 1; + req_caseopt = ((newoptions & PCRE_CASELESS) != 0)? REQ_CASELESS:0; /* Change options at this level, and pass them back for use in subsequent branches. */ @@ -7414,12 +7740,26 @@ for (;; ptr++) goto FAILED; } - /* Assertions used not to be repeatable, but this was changed for Perl - compatibility, so all kinds can now be repeated. We copy code into a + /* All assertions used not to be repeatable, but this was changed for Perl + compatibility. All kinds can now be repeated except for assertions that are + conditions (Perl also forbids these to be repeated). We copy code into a non-register variable (tempcode) in order to be able to pass its address - because some compilers complain otherwise. */ + because some compilers complain otherwise. At the start of a conditional + group whose condition is an assertion, cd->iscondassert is set. We unset it + here so as to allow assertions later in the group to be quantified. */ + + if (bravalue >= OP_ASSERT && bravalue <= OP_ASSERTBACK_NOT && + cd->iscondassert) + { + previous = NULL; + cd->iscondassert = FALSE; + } + else + { + previous = code; + item_hwm_offset = cd->hwm - cd->start_workspace; + } - previous = code; /* For handling repetition */ *code = bravalue; tempcode = code; tempreqvary = cd->req_varyopt; /* Save value before bracket */ @@ -7597,15 +7937,17 @@ for (;; ptr++) } } - /* For a forward assertion, we take the reqchar, if set. This can be - helpful if the pattern that follows the assertion doesn't set a different - char. For example, it's useful for /(?=abcde).+/. We can't set firstchar - for an assertion, however because it leads to incorrect effect for patterns - such as /(?=a)a.+/ when the "real" "a" would then become a reqchar instead - of a firstchar. This is overcome by a scan at the end if there's no - firstchar, looking for an asserted first char. */ - - else if (bravalue == OP_ASSERT && subreqcharflags >= 0) + /* For a forward assertion, we take the reqchar, if set, provided that the + group has also set a first char. This can be helpful if the pattern that + follows the assertion doesn't set a different char. For example, it's + useful for /(?=abcde).+/. We can't set firstchar for an assertion, however + because it leads to incorrect effect for patterns such as /(?=a)a.+/ when + the "real" "a" would then become a reqchar instead of a firstchar. This is + overcome by a scan at the end if there's no firstchar, looking for an + asserted first char. */ + + else if (bravalue == OP_ASSERT && subreqcharflags >= 0 && + subfirstcharflags >= 0) { reqchar = subreqchar; reqcharflags = subreqcharflags; @@ -7631,16 +7973,6 @@ for (;; ptr++) c = ec; else { - if (escape == ESC_Q) /* Handle start of quoted string */ - { - if (ptr[1] == CHAR_BACKSLASH && ptr[2] == CHAR_E) - ptr += 2; /* avoid empty string */ - else inescq = TRUE; - continue; - } - - if (escape == ESC_E) continue; /* Perl ignores an orphan \E */ - /* For metasequences that actually match a character, we disable the setting of a first character if it hasn't already been set. */ @@ -7666,7 +7998,7 @@ for (;; ptr++) const pcre_uchar *p; pcre_uint32 cf; - save_hwm = cd->hwm; /* Normally this is set when '(' is read */ + item_hwm_offset = cd->hwm - cd->start_workspace; /* Normally this is set when '(' is read */ terminator = (*(++ptr) == CHAR_LESS_THAN_SIGN)? CHAR_GREATER_THAN_SIGN : CHAR_APOSTROPHE; @@ -7695,7 +8027,7 @@ for (;; ptr++) if (*p != (pcre_uchar)terminator) { *errorcodeptr = ERR57; - break; + goto FAILED; } ptr++; goto HANDLE_NUMERICAL_RECURSION; @@ -7710,7 +8042,7 @@ for (;; ptr++) ptr[1] != CHAR_APOSTROPHE && ptr[1] != CHAR_LEFT_CURLY_BRACKET)) { *errorcodeptr = ERR69; - break; + goto FAILED; } is_recurse = FALSE; terminator = (*(++ptr) == CHAR_LESS_THAN_SIGN)? @@ -7732,11 +8064,12 @@ for (;; ptr++) single group (i.e. not to a duplicated name. */ HANDLE_REFERENCE: - if (firstcharflags == REQ_UNSET) firstcharflags = REQ_NONE; + if (firstcharflags == REQ_UNSET) zerofirstcharflags = firstcharflags = REQ_NONE; previous = code; + item_hwm_offset = cd->hwm - cd->start_workspace; *code++ = ((options & PCRE_CASELESS) != 0)? OP_REFI : OP_REF; PUT2INC(code, 0, recno); - cd->backref_map |= (recno < 32)? (1 << recno) : 1; + cd->backref_map |= (recno < 32)? (1U << recno) : 1; if (recno > cd->top_backref) cd->top_backref = recno; /* Check to see if this back reference is recursive, that it, it @@ -7763,6 +8096,7 @@ for (;; ptr++) if (!get_ucp(&ptr, &negated, &ptype, &pdata, errorcodeptr)) goto FAILED; previous = code; + item_hwm_offset = cd->hwm - cd->start_workspace; *code++ = ((escape == ESC_p) != negated)? OP_PROP : OP_NOTPROP; *code++ = ptype; *code++ = pdata; @@ -7803,6 +8137,7 @@ for (;; ptr++) { previous = (escape > ESC_b && escape < ESC_Z)? code : NULL; + item_hwm_offset = cd->hwm - cd->start_workspace; *code++ = (!utf && escape == ESC_C)? OP_ALLANY : escape; } } @@ -7846,6 +8181,7 @@ for (;; ptr++) ONE_CHAR: previous = code; + item_hwm_offset = cd->hwm - cd->start_workspace; /* For caseless UTF-8 mode when UCP support is available, check whether this character has more than one other case. If so, generate a special @@ -7893,7 +8229,6 @@ for (;; ptr++) if (mclength == 1 || req_caseopt == 0) { - firstchar = mcbuffer[0] | req_caseopt; firstchar = mcbuffer[0]; firstcharflags = req_caseopt; @@ -7993,6 +8328,17 @@ int length; unsigned int orig_bracount; unsigned int max_bracount; branch_chain bc; +size_t save_hwm_offset; + +/* If set, call the external function that checks for stack availability. */ + +if (PUBL(stack_guard) != NULL && PUBL(stack_guard)()) + { + *errorcodeptr= ERR85; + return FALSE; + } + +/* Miscellaneous initialization */ bc.outer = bcptr; bc.current_branch = code; @@ -8000,6 +8346,8 @@ bc.current_branch = code; firstchar = reqchar = 0; firstcharflags = reqcharflags = REQ_UNSET; +save_hwm_offset = cd->hwm - cd->start_workspace; + /* Accumulate the length for use in the pre-compile phase. Start with the length of the BRA and KET and any extra bytes that are required at the beginning. We accumulate in a local variable to save frequent testing of @@ -8141,7 +8489,7 @@ for (;;) int fixed_length; *code = OP_END; fixed_length = find_fixedlength(last_branch, (options & PCRE_UTF8) != 0, - FALSE, cd); + FALSE, cd, NULL); DPRINTF(("fixed length = %d\n", fixed_length)); if (fixed_length == -3) { @@ -8193,12 +8541,16 @@ for (;;) /* If it was a capturing subpattern, check to see if it contained any recursive back references. If so, we must wrap it in atomic brackets. - In any event, remove the block from the chain. */ + Because we are moving code along, we must ensure that any pending recursive + references are updated. In any event, remove the block from the chain. */ if (capnumber > 0) { if (cd->open_caps->flag) { + *code = OP_END; + adjust_recurse(start_bracket, 1 + LINK_SIZE, + (options & PCRE_UTF8) != 0, cd, save_hwm_offset); memmove(start_bracket + 1 + LINK_SIZE, start_bracket, IN_UCHARS(code - start_bracket)); *start_bracket = OP_ONCE; @@ -8330,14 +8682,22 @@ do { op == OP_SCBRA || op == OP_SCBRAPOS) { int n = GET2(scode, 1+LINK_SIZE); - int new_map = bracket_map | ((n < 32)? (1 << n) : 1); + int new_map = bracket_map | ((n < 32)? (1U << n) : 1); if (!is_anchored(scode, new_map, cd, atomcount)) return FALSE; } - /* Positive forward assertions and conditions */ + /* Positive forward assertion */ + + else if (op == OP_ASSERT) + { + if (!is_anchored(scode, bracket_map, cd, atomcount)) return FALSE; + } + + /* Condition; not anchored if no second branch */ - else if (op == OP_ASSERT || op == OP_COND) + else if (op == OP_COND) { + if (scode[GET(scode,1)] != OP_ALT) return FALSE; if (!is_anchored(scode, bracket_map, cd, atomcount)) return FALSE; } @@ -8383,8 +8743,8 @@ matching and for non-DOTALL patterns that start with .* (which must start at the beginning or after \n). As in the case of is_anchored() (see above), we have to take account of back references to capturing brackets that contain .* because in that case we can't make the assumption. Also, the appearance of .* -inside atomic brackets or in a pattern that contains *PRUNE or *SKIP does not -count, because once again the assumption no longer holds. +inside atomic brackets or in an assertion, or in a pattern that contains *PRUNE +or *SKIP does not count, because once again the assumption no longer holds. Arguments: code points to start of expression (the bracket) @@ -8393,13 +8753,14 @@ count, because once again the assumption no longer holds. the less precise approach cd points to the compile data atomcount atomic group level + inassert TRUE if in an assertion Returns: TRUE or FALSE */ static BOOL is_startline(const pcre_uchar *code, unsigned int bracket_map, - compile_data *cd, int atomcount) + compile_data *cd, int atomcount, BOOL inassert) { do { const pcre_uchar *scode = first_significant_code( @@ -8422,10 +8783,11 @@ do { case OP_RREF: case OP_DNRREF: case OP_DEF: + case OP_FAIL: return FALSE; default: /* Assertion */ - if (!is_startline(scode, bracket_map, cd, atomcount)) return FALSE; + if (!is_startline(scode, bracket_map, cd, atomcount, TRUE)) return FALSE; do scode += GET(scode, 1); while (*scode == OP_ALT); scode += 1 + LINK_SIZE; break; @@ -8439,7 +8801,7 @@ do { if (op == OP_BRA || op == OP_BRAPOS || op == OP_SBRA || op == OP_SBRAPOS) { - if (!is_startline(scode, bracket_map, cd, atomcount)) return FALSE; + if (!is_startline(scode, bracket_map, cd, atomcount, inassert)) return FALSE; } /* Capturing brackets */ @@ -8448,34 +8810,34 @@ do { op == OP_SCBRA || op == OP_SCBRAPOS) { int n = GET2(scode, 1+LINK_SIZE); - int new_map = bracket_map | ((n < 32)? (1 << n) : 1); - if (!is_startline(scode, new_map, cd, atomcount)) return FALSE; + int new_map = bracket_map | ((n < 32)? (1U << n) : 1); + if (!is_startline(scode, new_map, cd, atomcount, inassert)) return FALSE; } /* Positive forward assertions */ else if (op == OP_ASSERT) { - if (!is_startline(scode, bracket_map, cd, atomcount)) return FALSE; + if (!is_startline(scode, bracket_map, cd, atomcount, TRUE)) return FALSE; } /* Atomic brackets */ else if (op == OP_ONCE || op == OP_ONCE_NC) { - if (!is_startline(scode, bracket_map, cd, atomcount + 1)) return FALSE; + if (!is_startline(scode, bracket_map, cd, atomcount + 1, inassert)) return FALSE; } /* .* means "start at start or after \n" if it isn't in atomic brackets or - brackets that may be referenced, as long as the pattern does not contain - *PRUNE or *SKIP, because these break the feature. Consider, for example, - /.*?a(*PRUNE)b/ with the subject "aab", which matches "ab", i.e. not at the - start of a line. */ + brackets that may be referenced or an assertion, as long as the pattern does + not contain *PRUNE or *SKIP, because these break the feature. Consider, for + example, /.*?a(*PRUNE)b/ with the subject "aab", which matches "ab", i.e. + not at the start of a line. */ else if (op == OP_TYPESTAR || op == OP_TYPEMINSTAR || op == OP_TYPEPOSSTAR) { if (scode[1] != OP_ANY || (bracket_map & cd->backref_map) != 0 || - atomcount > 0 || cd->had_pruneorskip) + atomcount > 0 || cd->had_pruneorskip || inassert) return FALSE; } @@ -8737,6 +9099,8 @@ pcre_uchar cworkspace[COMPILE_WORK_SIZE]; similar way to cworkspace, it can be expanded using malloc() if necessary. */ named_group named_groups[NAMED_GROUP_LIST_SIZE]; +cd->named_groups = named_groups; +cd->named_group_list_size = NAMED_GROUP_LIST_SIZE; /* Set this early so that early errors get offset 0. */ @@ -9003,13 +9367,13 @@ cd->names_found = 0; cd->name_entry_size = 0; cd->name_table = NULL; cd->dupnames = FALSE; +cd->dupgroups = FALSE; cd->namedrefcount = 0; cd->start_code = cworkspace; cd->hwm = cworkspace; +cd->iscondassert = FALSE; cd->start_workspace = cworkspace; cd->workspace_size = COMPILE_WORK_SIZE; -cd->named_groups = named_groups; -cd->named_group_list_size = NAMED_GROUP_LIST_SIZE; cd->start_pattern = (const pcre_uchar *)pattern; cd->end_pattern = (const pcre_uchar *)(pattern + STRLEN_UC((const pcre_uchar *)pattern)); cd->req_varyopt = 0; @@ -9043,13 +9407,6 @@ if (length > MAX_PATTERN_SIZE) goto PCRE_EARLY_ERROR_RETURN; } -/* If there are groups with duplicate names and there are also references by -name, we must allow for the possibility of named references to duplicated -groups. These require an extra data item each. */ - -if (cd->dupnames && cd->namedrefcount > 0) - length += cd->namedrefcount * IMM2_SIZE * sizeof(pcre_uchar); - /* Compute the size of the data block for storing the compiled pattern. Integer overflow should no longer be possible because nowadays we limit the maximum value of cd->names_found and cd->name_entry_size. */ @@ -9108,6 +9465,7 @@ cd->name_table = (pcre_uchar *)re + re->name_table_offset; codestart = cd->name_table + re->name_entry_size * re->name_count; cd->start_code = codestart; cd->hwm = (pcre_uchar *)(cd->start_workspace); +cd->iscondassert = FALSE; cd->req_varyopt = 0; cd->had_accept = FALSE; cd->had_pruneorskip = FALSE; @@ -9126,6 +9484,7 @@ if (cd->names_found > 0) add_name(cd, ng->name, ng->length, ng->number); if (cd->named_group_list_size > NAMED_GROUP_LIST_SIZE) (PUBL(free))((void *)cd->named_groups); + cd->named_group_list_size = 0; /* So we don't free it twice */ } /* Set up a starting, non-extracting bracket, then compile the expression. On @@ -9180,6 +9539,16 @@ if (cd->hwm > cd->start_workspace) int offset, recno; cd->hwm -= LINK_SIZE; offset = GET(cd->hwm, 0); + + /* Check that the hwm handling hasn't gone wrong. This whole area is + rewritten in PCRE2 because there are some obscure cases. */ + + if (offset == 0 || codestart[offset-1] != OP_RECURSE) + { + errorcode = ERR10; + break; + } + recno = GET(codestart, offset); if (recno != prev_recno) { @@ -9203,11 +9572,18 @@ subpattern. */ if (errorcode == 0 && re->top_backref > re->top_bracket) errorcode = ERR15; -/* Unless disabled, check whether single character iterators can be -auto-possessified. The function overwrites the appropriate opcode values. */ +/* Unless disabled, check whether any single character iterators can be +auto-possessified. The function overwrites the appropriate opcode values, so +the type of the pointer must be cast. NOTE: the intermediate variable "temp" is +used in this code because at least one compiler gives a warning about loss of +"const" attribute if the cast (pcre_uchar *)codestart is used directly in the +function call. */ -if ((options & PCRE_NO_AUTO_POSSESS) == 0) - auto_possessify((pcre_uchar *)codestart, utf, cd); +if (errorcode == 0 && (options & PCRE_NO_AUTO_POSSESS) == 0) + { + pcre_uchar *temp = (pcre_uchar *)codestart; + auto_possessify(temp, utf, cd); + } /* If there were any lookbehind assertions that contained OP_RECURSE (recursions or subroutine calls), a flag is set for them to be checked here, @@ -9217,7 +9593,7 @@ OP_RECURSE that are not fixed length get a diagnosic with a useful offset. The exceptional ones forgo this. We scan the pattern to check that they are fixed length, and set their lengths. */ -if (cd->check_lookbehind) +if (errorcode == 0 && cd->check_lookbehind) { pcre_uchar *cc = (pcre_uchar *)codestart; @@ -9237,7 +9613,7 @@ if (cd->check_lookbehind) int end_op = *be; *be = OP_END; fixed_length = find_fixedlength(cc, (re->options & PCRE_UTF8) != 0, TRUE, - cd); + cd, NULL); *be = end_op; DPRINTF(("fixed length = %d\n", fixed_length)); if (fixed_length < 0) @@ -9259,6 +9635,8 @@ if (errorcode != 0) { (PUBL(free))(re); PCRE_EARLY_ERROR_RETURN: + if (cd->named_group_list_size > NAMED_GROUP_LIST_SIZE) + (PUBL(free))((void *)cd->named_groups); *erroroffset = (int)(ptr - (const pcre_uchar *)pattern); PCRE_EARLY_ERROR_RETURN2: *errorptr = find_error_text(errorcode); @@ -9317,7 +9695,7 @@ if ((re->options & PCRE_ANCHORED) == 0) re->flags |= PCRE_FIRSTSET; } - else if (is_startline(codestart, 0, cd, 0)) re->flags |= PCRE_STARTLINE; + else if (is_startline(codestart, 0, cd, 0, FALSE)) re->flags |= PCRE_STARTLINE; } } @@ -9430,4 +9808,3 @@ return (pcre32 *)re; } /* End of pcre_compile.c */ - diff --git a/src/3rd/pcre/pcredfa.c b/src/3rd/pcre/pcredfa.c index aa02ef39fdf..56e9a5ad8a8 100644 --- a/src/3rd/pcre/pcredfa.c +++ b/src/3rd/pcre/pcredfa.c @@ -7,7 +7,7 @@ and semantics are as close as possible to those of the Perl 5 language (but see below for why this module is different). Written by Philip Hazel - Copyright (c) 1997-2013 University of Cambridge + Copyright (c) 1997-2017 University of Cambridge ----------------------------------------------------------------------------- Redistribution and use in source and binary forms, with or without @@ -1473,7 +1473,7 @@ for (;;) goto ANYNL01; case CHAR_CR: - if (ptr + 1 < end_subject && RAWUCHARTEST(ptr + 1) == CHAR_LF) ncount = 1; + if (ptr + 1 < end_subject && UCHAR21TEST(ptr + 1) == CHAR_LF) ncount = 1; /* Fall through */ ANYNL01: @@ -1742,7 +1742,7 @@ for (;;) goto ANYNL02; case CHAR_CR: - if (ptr + 1 < end_subject && RAWUCHARTEST(ptr + 1) == CHAR_LF) ncount = 1; + if (ptr + 1 < end_subject && UCHAR21TEST(ptr + 1) == CHAR_LF) ncount = 1; /* Fall through */ ANYNL02: @@ -2012,7 +2012,7 @@ for (;;) goto ANYNL03; case CHAR_CR: - if (ptr + 1 < end_subject && RAWUCHARTEST(ptr + 1) == CHAR_LF) ncount = 1; + if (ptr + 1 < end_subject && UCHAR21TEST(ptr + 1) == CHAR_LF) ncount = 1; /* Fall through */ ANYNL03: @@ -2210,7 +2210,7 @@ for (;;) if ((md->moptions & PCRE_PARTIAL_HARD) != 0) reset_could_continue = TRUE; } - else if (RAWUCHARTEST(ptr + 1) == CHAR_LF) + else if (UCHAR21TEST(ptr + 1) == CHAR_LF) { ADD_NEW_DATA(-(state_offset + 1), 0, 1); } @@ -2287,12 +2287,14 @@ for (;;) case OP_NOTI: if (clen > 0) { - unsigned int otherd; + pcre_uint32 otherd; #ifdef SUPPORT_UTF if (utf && d >= 128) { #ifdef SUPPORT_UCP otherd = UCD_OTHERCASE(d); +#else + otherd = d; #endif /* SUPPORT_UCP */ } else @@ -2625,7 +2627,7 @@ for (;;) if (isinclass) { int max = (int)GET2(ecode, 1 + IMM2_SIZE); - if (*ecode == OP_CRPOSRANGE) + if (*ecode == OP_CRPOSRANGE && count >= (int)GET2(ecode, 1)) { active_count--; /* Remove non-match possibility */ next_active_state--; @@ -2736,9 +2738,10 @@ for (;;) condcode == OP_DNRREF) return PCRE_ERROR_DFA_UCOND; - /* The DEFINE condition is always false */ + /* The DEFINE condition is always false, and the assertion (?!) is + converted to OP_FAIL. */ - if (condcode == OP_DEF) + if (condcode == OP_DEF || condcode == OP_FAIL) { ADD_ACTIVE(state_offset + codelink + LINK_SIZE + 1, 0); } /* The only supported version of OP_RREF is for the value RREF_ANY, @@ -3242,7 +3245,7 @@ md->callout_data = NULL; if (extra_data != NULL) { - unsigned int flags = extra_data->flags; + unsigned long int flags = extra_data->flags; if ((flags & PCRE_EXTRA_STUDY_DATA) != 0) study = (const pcre_study_data *)extra_data->study_data; if ((flags & PCRE_EXTRA_MATCH_LIMIT) != 0) return PCRE_ERROR_DFA_UMLIMIT; @@ -3466,7 +3469,7 @@ for (;;) if (((options | re->options) & PCRE_NO_START_OPTIMIZE) == 0) { - /* Advance to a known first char. */ + /* Advance to a known first pcre_uchar (i.e. data item) */ if (has_first_char) { @@ -3474,12 +3477,12 @@ for (;;) { pcre_uchar csc; while (current_subject < end_subject && - (csc = RAWUCHARTEST(current_subject)) != first_char && csc != first_char2) + (csc = UCHAR21TEST(current_subject)) != first_char && csc != first_char2) current_subject++; } else while (current_subject < end_subject && - RAWUCHARTEST(current_subject) != first_char) + UCHAR21TEST(current_subject) != first_char) current_subject++; } @@ -3509,36 +3512,26 @@ for (;;) ANYCRLF, and we are now at a LF, advance the match position by one more character. */ - if (RAWUCHARTEST(current_subject - 1) == CHAR_CR && + if (UCHAR21TEST(current_subject - 1) == CHAR_CR && (md->nltype == NLTYPE_ANY || md->nltype == NLTYPE_ANYCRLF) && current_subject < end_subject && - RAWUCHARTEST(current_subject) == CHAR_NL) + UCHAR21TEST(current_subject) == CHAR_NL) current_subject++; } } - /* Or to a non-unique first char after study */ + /* Advance to a non-unique first pcre_uchar after study */ else if (start_bits != NULL) { while (current_subject < end_subject) { - register pcre_uint32 c = RAWUCHARTEST(current_subject); + register pcre_uint32 c = UCHAR21TEST(current_subject); #ifndef COMPILE_PCRE8 if (c > 255) c = 255; #endif - if ((start_bits[c/8] & (1 << (c&7))) == 0) - { - current_subject++; -#if defined SUPPORT_UTF && defined COMPILE_PCRE8 - /* In non 8-bit mode, the iteration will stop for - characters > 255 at the beginning or not stop at all. */ - if (utf) - ACROSSCHAR(current_subject < end_subject, *current_subject, - current_subject++); -#endif - } - else break; + if ((start_bits[c/8] & (1 << (c&7))) != 0) break; + current_subject++; } } } @@ -3557,19 +3550,20 @@ for (;;) /* If the pattern was studied, a minimum subject length may be set. This is a lower bound; no actual string of that length may actually match the pattern. Although the value is, strictly, in characters, we treat it as - bytes to avoid spending too much time in this optimization. */ + in pcre_uchar units to avoid spending too much time in this optimization. + */ if (study != NULL && (study->flags & PCRE_STUDY_MINLEN) != 0 && (pcre_uint32)(end_subject - current_subject) < study->minlength) return PCRE_ERROR_NOMATCH; - /* If req_char is set, we know that that character must appear in the - subject for the match to succeed. If the first character is set, req_char - must be later in the subject; otherwise the test starts at the match - point. This optimization can save a huge amount of work in patterns with - nested unlimited repeats that aren't going to match. Writing separate - code for cased/caseless versions makes it go faster, as does using an - autoincrement and backing off on a match. + /* If req_char is set, we know that that pcre_uchar must appear in the + subject for the match to succeed. If the first pcre_uchar is set, + req_char must be later in the subject; otherwise the test starts at the + match point. This optimization can save a huge amount of work in patterns + with nested unlimited repeats that aren't going to match. Writing + separate code for cased/caseless versions makes it go faster, as does + using an autoincrement and backing off on a match. HOWEVER: when the subject string is very, very long, searching to its end can take a long time, and give bad performance on quite ordinary @@ -3589,7 +3583,7 @@ for (;;) { while (p < end_subject) { - register pcre_uint32 pp = RAWUCHARINCTEST(p); + register pcre_uint32 pp = UCHAR21INCTEST(p); if (pp == req_char || pp == req_char2) { p--; break; } } } @@ -3597,18 +3591,18 @@ for (;;) { while (p < end_subject) { - if (RAWUCHARINCTEST(p) == req_char) { p--; break; } + if (UCHAR21INCTEST(p) == req_char) { p--; break; } } } - /* If we can't find the required character, break the matching loop, + /* If we can't find the required pcre_uchar, break the matching loop, which will cause a return or PCRE_ERROR_NOMATCH. */ if (p >= end_subject) break; - /* If we have found the required character, save the point where we + /* If we have found the required pcre_uchar, save the point where we found it, so that we don't search again next time round the loop if - the start hasn't passed this character yet. */ + the start hasn't passed this point yet. */ req_char_ptr = p; } @@ -3665,9 +3659,9 @@ for (;;) not contain any explicit matches for \r or \n, and the newline option is CRLF or ANY or ANYCRLF, advance the match position by one more character. */ - if (RAWUCHARTEST(current_subject - 1) == CHAR_CR && + if (UCHAR21TEST(current_subject - 1) == CHAR_CR && current_subject < end_subject && - RAWUCHARTEST(current_subject) == CHAR_NL && + UCHAR21TEST(current_subject) == CHAR_NL && (re->flags & PCRE_HASCRORLF) == 0 && (md->nltype == NLTYPE_ANY || md->nltype == NLTYPE_ANYCRLF || diff --git a/src/3rd/pcre/pcreexec.c b/src/3rd/pcre/pcreexec.c index 234fadaf91d..f68ea33153d 100644 --- a/src/3rd/pcre/pcreexec.c +++ b/src/3rd/pcre/pcreexec.c @@ -6,7 +6,7 @@ and semantics are as close as possible to those of the Perl 5 language. Written by Philip Hazel - Copyright (c) 1997-2013 University of Cambridge + Copyright (c) 1997-2021 University of Cambridge ----------------------------------------------------------------------------- Redistribution and use in source and binary forms, with or without @@ -134,7 +134,7 @@ pcre_uint32 c; BOOL utf = md->utf; if (is_subject && length > md->end_subject - p) length = md->end_subject - p; while (length-- > 0) - if (isprint(c = RAWUCHARINCTEST(p))) printf("%c", (char)c); else printf("\\x{%02x}", c); + if (isprint(c = UCHAR21INCTEST(p))) printf("%c", (char)c); else printf("\\x{%02x}", c); } #endif @@ -237,8 +237,8 @@ if (caseless) { pcre_uint32 cc, cp; if (eptr >= md->end_subject) return -2; /* Partial match */ - cc = RAWUCHARTEST(eptr); - cp = RAWUCHARTEST(p); + cc = UCHAR21TEST(eptr); + cp = UCHAR21TEST(p); if (TABLE_GET(cp, md->lcc, cp) != TABLE_GET(cc, md->lcc, cc)) return -1; p++; eptr++; @@ -254,7 +254,7 @@ else while (length-- > 0) { if (eptr >= md->end_subject) return -2; /* Partial match */ - if (RAWUCHARINCTEST(p) != RAWUCHARINCTEST(eptr)) return -1; + if (UCHAR21INCTEST(p) != UCHAR21INCTEST(eptr)) return -1; } } @@ -669,7 +669,7 @@ if (ecode == NULL) return match((PCRE_PUCHAR)&rdepth, NULL, NULL, 0, NULL, NULL, 1); else { - int len = (char *)&rdepth - (char *)eptr; + int len = (int)((char *)&rdepth - (char *)eptr); return (len > 0)? -len : len; } } @@ -758,7 +758,7 @@ for (;;) md->mark = NULL; /* In case previously set by assertion */ RMATCH(eptr, ecode + PRIV(OP_lengths)[*ecode] + ecode[1], offset_top, md, eptrb, RM55); - if ((rrc == MATCH_MATCH || rrc == MATCH_ACCEPT) && + if ((rrc == MATCH_MATCH || rrc == MATCH_ACCEPT || rrc == MATCH_KETRPOS) && md->mark == NULL) md->mark = ecode + 2; /* A return of MATCH_SKIP_ARG means that matching failed at SKIP with an @@ -1136,88 +1136,81 @@ for (;;) printf("\n"); #endif - if (offset < md->offset_max) - { - matched_once = FALSE; - code_offset = (int)(ecode - md->start_code); + if (offset >= md->offset_max) goto POSSESSIVE_NON_CAPTURE; - save_offset1 = md->offset_vector[offset]; - save_offset2 = md->offset_vector[offset+1]; - save_offset3 = md->offset_vector[md->offset_end - number]; - save_capture_last = md->capture_last; - - DPRINTF(("saving %d %d %d\n", save_offset1, save_offset2, save_offset3)); + matched_once = FALSE; + code_offset = (int)(ecode - md->start_code); - /* Each time round the loop, save the current subject position for use - when the group matches. For MATCH_MATCH, the group has matched, so we - restart it with a new subject starting position, remembering that we had - at least one match. For MATCH_NOMATCH, carry on with the alternatives, as - usual. If we haven't matched any alternatives in any iteration, check to - see if a previous iteration matched. If so, the group has matched; - continue from afterwards. Otherwise it has failed; restore the previous - capture values before returning NOMATCH. */ + save_offset1 = md->offset_vector[offset]; + save_offset2 = md->offset_vector[offset+1]; + save_offset3 = md->offset_vector[md->offset_end - number]; + save_capture_last = md->capture_last; - for (;;) - { - md->offset_vector[md->offset_end - number] = - (int)(eptr - md->start_subject); - if (op >= OP_SBRA) md->match_function_type = MATCH_CBEGROUP; - RMATCH(eptr, ecode + PRIV(OP_lengths)[*ecode], offset_top, md, - eptrb, RM63); - if (rrc == MATCH_KETRPOS) - { - offset_top = md->end_offset_top; - eptr = md->end_match_ptr; - ecode = md->start_code + code_offset; - save_capture_last = md->capture_last; - matched_once = TRUE; - mstart = md->start_match_ptr; /* In case \K changed it */ - continue; - } + DPRINTF(("saving %d %d %d\n", save_offset1, save_offset2, save_offset3)); - /* See comment in the code for capturing groups above about handling - THEN. */ + /* Each time round the loop, save the current subject position for use + when the group matches. For MATCH_MATCH, the group has matched, so we + restart it with a new subject starting position, remembering that we had + at least one match. For MATCH_NOMATCH, carry on with the alternatives, as + usual. If we haven't matched any alternatives in any iteration, check to + see if a previous iteration matched. If so, the group has matched; + continue from afterwards. Otherwise it has failed; restore the previous + capture values before returning NOMATCH. */ - if (rrc == MATCH_THEN) + for (;;) + { + md->offset_vector[md->offset_end - number] = + (int)(eptr - md->start_subject); + if (op >= OP_SBRA) md->match_function_type = MATCH_CBEGROUP; + RMATCH(eptr, ecode + PRIV(OP_lengths)[*ecode], offset_top, md, + eptrb, RM63); + if (rrc == MATCH_KETRPOS) + { + offset_top = md->end_offset_top; + ecode = md->start_code + code_offset; + save_capture_last = md->capture_last; + matched_once = TRUE; + mstart = md->start_match_ptr; /* In case \K changed it */ + if (eptr == md->end_match_ptr) /* Matched an empty string */ { - next = ecode + GET(ecode,1); - if (md->start_match_ptr < next && - (*ecode == OP_ALT || *next == OP_ALT)) - rrc = MATCH_NOMATCH; + do ecode += GET(ecode, 1); while (*ecode == OP_ALT); + break; } - - if (rrc != MATCH_NOMATCH) RRETURN(rrc); - md->capture_last = save_capture_last; - ecode += GET(ecode, 1); - if (*ecode != OP_ALT) break; + eptr = md->end_match_ptr; + continue; } - if (!matched_once) - { - md->offset_vector[offset] = save_offset1; - md->offset_vector[offset+1] = save_offset2; - md->offset_vector[md->offset_end - number] = save_offset3; - } + /* See comment in the code for capturing groups above about handling + THEN. */ - if (allow_zero || matched_once) + if (rrc == MATCH_THEN) { - ecode += 1 + LINK_SIZE; - break; + next = ecode + GET(ecode,1); + if (md->start_match_ptr < next && + (*ecode == OP_ALT || *next == OP_ALT)) + rrc = MATCH_NOMATCH; } - RRETURN(MATCH_NOMATCH); + if (rrc != MATCH_NOMATCH) RRETURN(rrc); + md->capture_last = save_capture_last; + ecode += GET(ecode, 1); + if (*ecode != OP_ALT) break; } - /* FALL THROUGH ... Insufficient room for saving captured contents. Treat - as a non-capturing bracket. */ - - /* VVVVVVVVVVVVVVVVVVVVVVVVV */ - /* VVVVVVVVVVVVVVVVVVVVVVVVV */ + if (!matched_once) + { + md->offset_vector[offset] = save_offset1; + md->offset_vector[offset+1] = save_offset2; + md->offset_vector[md->offset_end - number] = save_offset3; + } - DPRINTF(("insufficient capture room: treat as non-capturing\n")); + if (allow_zero || matched_once) + { + ecode += 1 + LINK_SIZE; + break; + } - /* VVVVVVVVVVVVVVVVVVVVVVVVV */ - /* VVVVVVVVVVVVVVVVVVVVVVVVV */ + RRETURN(MATCH_NOMATCH); /* Non-capturing possessive bracket with unlimited repeat. We come here from BRAZERO with allow_zero = TRUE. The code is similar to the above, @@ -1241,10 +1234,15 @@ for (;;) if (rrc == MATCH_KETRPOS) { offset_top = md->end_offset_top; - eptr = md->end_match_ptr; ecode = md->start_code + code_offset; matched_once = TRUE; mstart = md->start_match_ptr; /* In case \K reset it */ + if (eptr == md->end_match_ptr) /* Matched an empty string */ + { + do ecode += GET(ecode, 1); while (*ecode == OP_ALT); + break; + } + eptr = md->end_match_ptr; continue; } @@ -1378,6 +1376,7 @@ for (;;) break; case OP_DEF: /* DEFINE - always false */ + case OP_FAIL: /* From optimized (?!) condition */ break; /* The condition is an assertion. Call match() to evaluate it - setting @@ -1394,8 +1393,11 @@ for (;;) condition = TRUE; /* Advance ecode past the assertion to the start of the first branch, - but adjust it so that the general choosing code below works. */ + but adjust it so that the general choosing code below works. If the + assertion has a quantifier that allows zero repeats we must skip over + the BRAZERO. This is a lunatic thing to do, but somebody did! */ + if (*ecode == OP_BRAZERO) ecode++; ecode += GET(ecode, 1); while (*ecode == OP_ALT) ecode += GET(ecode, 1); ecode += 1 + LINK_SIZE - PRIV(OP_lengths)[condcode]; @@ -1464,7 +1466,18 @@ for (;;) md->offset_vector[offset] = md->offset_vector[md->offset_end - number]; md->offset_vector[offset+1] = (int)(eptr - md->start_subject); - if (offset_top <= offset) offset_top = offset + 2; + + /* If this group is at or above the current highwater mark, ensure that + any groups between the current high water mark and this group are marked + unset and then update the high water mark. */ + + if (offset >= offset_top) + { + register int *iptr = md->offset_vector + offset_top; + register int *iend = md->offset_vector + offset; + while (iptr < iend) *iptr++ = -1; + offset_top = offset + 2; + } } ecode += 1 + IMM2_SIZE; break; @@ -1816,7 +1829,11 @@ for (;;) are defined in a range that can be tested for. */ if (rrc >= MATCH_BACKTRACK_MIN && rrc <= MATCH_BACKTRACK_MAX) + { + if (new_recursive.offset_save != stacksave) + (PUBL(free))(new_recursive.offset_save); RRETURN(MATCH_NOMATCH); + } /* Any return code other than NOMATCH is an error. */ @@ -1979,6 +1996,19 @@ for (;;) } } + /* OP_KETRPOS is a possessive repeating ket. Remember the current position, + and return the MATCH_KETRPOS. This makes it possible to do the repeats one + at a time from the outer level, thus saving stack. This must precede the + empty string test - in this case that test is done at the outer level. */ + + if (*ecode == OP_KETRPOS) + { + md->start_match_ptr = mstart; /* In case \K reset it */ + md->end_match_ptr = eptr; + md->end_offset_top = offset_top; + RRETURN(MATCH_KETRPOS); + } + /* For an ordinary non-repeating ket, just continue at this level. This also happens for a repeating ket if no characters were matched in the group. This is the forcible breaking of infinite loops as implemented in @@ -2001,18 +2031,6 @@ for (;;) break; } - /* OP_KETRPOS is a possessive repeating ket. Remember the current position, - and return the MATCH_KETRPOS. This makes it possible to do the repeats one - at a time from the outer level, thus saving stack. */ - - if (*ecode == OP_KETRPOS) - { - md->start_match_ptr = mstart; /* In case \K reset it */ - md->end_match_ptr = eptr; - md->end_offset_top = offset_top; - RRETURN(MATCH_KETRPOS); - } - /* The normal repeating kets try the rest of the pattern or restart from the preceding bracket, in the appropriate order. In the second case, we can use tail recursion to avoid using another stack frame, unless we have an @@ -2103,7 +2121,7 @@ for (;;) eptr + 1 >= md->end_subject && NLBLOCK->nltype == NLTYPE_FIXED && NLBLOCK->nllen == 2 && - RAWUCHARTEST(eptr) == NLBLOCK->nl[0]) + UCHAR21TEST(eptr) == NLBLOCK->nl[0]) { md->hitend = TRUE; if (md->partial > 1) RRETURN(PCRE_ERROR_PARTIAL); @@ -2147,7 +2165,7 @@ for (;;) eptr + 1 >= md->end_subject && NLBLOCK->nltype == NLTYPE_FIXED && NLBLOCK->nllen == 2 && - RAWUCHARTEST(eptr) == NLBLOCK->nl[0]) + UCHAR21TEST(eptr) == NLBLOCK->nl[0]) { md->hitend = TRUE; if (md->partial > 1) RRETURN(PCRE_ERROR_PARTIAL); @@ -2287,10 +2305,10 @@ for (;;) case OP_ANY: if (IS_NEWLINE(eptr)) RRETURN(MATCH_NOMATCH); if (md->partial != 0 && - eptr + 1 >= md->end_subject && + eptr == md->end_subject - 1 && NLBLOCK->nltype == NLTYPE_FIXED && NLBLOCK->nllen == 2 && - RAWUCHARTEST(eptr) == NLBLOCK->nl[0]) + UCHAR21TEST(eptr) == NLBLOCK->nl[0]) { md->hitend = TRUE; if (md->partial > 1) RRETURN(PCRE_ERROR_PARTIAL); @@ -2444,7 +2462,7 @@ for (;;) { SCHECK_PARTIAL(); } - else if (RAWUCHARTEST(eptr) == CHAR_LF) eptr++; + else if (UCHAR21TEST(eptr) == CHAR_LF) eptr++; break; case CHAR_LF: @@ -2691,16 +2709,22 @@ for (;;) pcre_uchar *slot = md->name_table + GET2(ecode, 1) * md->name_entry_size; ecode += 1 + 2*IMM2_SIZE; + /* Setting the default length first and initializing 'offset' avoids + compiler warnings in the REF_REPEAT code. */ + + length = (md->jscript_compat)? 0 : -1; + offset = 0; + while (count-- > 0) { offset = GET2(slot, 0) << 1; - if (offset < offset_top && md->offset_vector[offset] >= 0) break; + if (offset < offset_top && md->offset_vector[offset] >= 0) + { + length = md->offset_vector[offset+1] - md->offset_vector[offset]; + break; + } slot += md->name_entry_size; } - if (count < 0) - length = (md->jscript_compat)? 0 : -1; - else - length = md->offset_vector[offset+1] - md->offset_vector[offset]; } goto REF_REPEAT; @@ -3029,7 +3053,7 @@ for (;;) { RMATCH(eptr, ecode, offset_top, md, eptrb, RM18); if (rrc != MATCH_NOMATCH) RRETURN(rrc); - if (eptr-- == pp) break; /* Stop if tried at original pos */ + if (eptr-- <= pp) break; /* Stop if tried at original pos */ BACKCHAR(eptr); } } @@ -3186,7 +3210,7 @@ for (;;) { RMATCH(eptr, ecode, offset_top, md, eptrb, RM21); if (rrc != MATCH_NOMATCH) RRETURN(rrc); - if (eptr-- == pp) break; /* Stop if tried at original pos */ + if (eptr-- <= pp) break; /* Stop if tried at original pos */ #ifdef SUPPORT_UTF if (utf) BACKCHAR(eptr); #endif @@ -3212,7 +3236,7 @@ for (;;) CHECK_PARTIAL(); /* Not SCHECK_PARTIAL() */ RRETURN(MATCH_NOMATCH); } - while (length-- > 0) if (*ecode++ != RAWUCHARINC(eptr)) RRETURN(MATCH_NOMATCH); + while (length-- > 0) if (*ecode++ != UCHAR21INC(eptr)) RRETURN(MATCH_NOMATCH); } else #endif @@ -3252,7 +3276,7 @@ for (;;) if (fc < 128) { - pcre_uint32 cc = RAWUCHAR(eptr); + pcre_uint32 cc = UCHAR21(eptr); if (md->lcc[fc] != TABLE_GET(cc, md->lcc, cc)) RRETURN(MATCH_NOMATCH); ecode++; eptr++; @@ -3459,7 +3483,7 @@ for (;;) if (possessive) continue; /* No backtracking */ for(;;) { - if (eptr == pp) goto TAIL_RECURSE; + if (eptr <= pp) goto TAIL_RECURSE; RMATCH(eptr, ecode, offset_top, md, eptrb, RM23); if (rrc != MATCH_NOMATCH) RRETURN(rrc); #ifdef SUPPORT_UCP @@ -3521,7 +3545,7 @@ for (;;) SCHECK_PARTIAL(); RRETURN(MATCH_NOMATCH); } - cc = RAWUCHARTEST(eptr); + cc = UCHAR21TEST(eptr); if (fc != cc && foc != cc) RRETURN(MATCH_NOMATCH); eptr++; } @@ -3539,7 +3563,7 @@ for (;;) SCHECK_PARTIAL(); RRETURN(MATCH_NOMATCH); } - cc = RAWUCHARTEST(eptr); + cc = UCHAR21TEST(eptr); if (fc != cc && foc != cc) RRETURN(MATCH_NOMATCH); eptr++; } @@ -3556,7 +3580,7 @@ for (;;) SCHECK_PARTIAL(); break; } - cc = RAWUCHARTEST(eptr); + cc = UCHAR21TEST(eptr); if (fc != cc && foc != cc) break; eptr++; } @@ -3583,7 +3607,7 @@ for (;;) SCHECK_PARTIAL(); RRETURN(MATCH_NOMATCH); } - if (fc != RAWUCHARINCTEST(eptr)) RRETURN(MATCH_NOMATCH); + if (fc != UCHAR21INCTEST(eptr)) RRETURN(MATCH_NOMATCH); } if (min == max) continue; @@ -3600,7 +3624,7 @@ for (;;) SCHECK_PARTIAL(); RRETURN(MATCH_NOMATCH); } - if (fc != RAWUCHARINCTEST(eptr)) RRETURN(MATCH_NOMATCH); + if (fc != UCHAR21INCTEST(eptr)) RRETURN(MATCH_NOMATCH); } /* Control never gets here */ } @@ -3614,7 +3638,7 @@ for (;;) SCHECK_PARTIAL(); break; } - if (fc != RAWUCHARTEST(eptr)) break; + if (fc != UCHAR21TEST(eptr)) break; eptr++; } if (possessive) continue; /* No backtracking */ @@ -3880,7 +3904,7 @@ for (;;) if (possessive) continue; /* No backtracking */ for(;;) { - if (eptr == pp) goto TAIL_RECURSE; + if (eptr <= pp) goto TAIL_RECURSE; RMATCH(eptr, ecode, offset_top, md, eptrb, RM30); if (rrc != MATCH_NOMATCH) RRETURN(rrc); eptr--; @@ -4015,7 +4039,7 @@ for (;;) if (possessive) continue; /* No backtracking */ for(;;) { - if (eptr == pp) goto TAIL_RECURSE; + if (eptr <= pp) goto TAIL_RECURSE; RMATCH(eptr, ecode, offset_top, md, eptrb, RM34); if (rrc != MATCH_NOMATCH) RRETURN(rrc); eptr--; @@ -4369,7 +4393,7 @@ for (;;) eptr + 1 >= md->end_subject && NLBLOCK->nltype == NLTYPE_FIXED && NLBLOCK->nllen == 2 && - RAWUCHAR(eptr) == NLBLOCK->nl[0]) + UCHAR21(eptr) == NLBLOCK->nl[0]) { md->hitend = TRUE; if (md->partial > 1) RRETURN(PCRE_ERROR_PARTIAL); @@ -4411,7 +4435,7 @@ for (;;) default: RRETURN(MATCH_NOMATCH); case CHAR_CR: - if (eptr < md->end_subject && RAWUCHAR(eptr) == CHAR_LF) eptr++; + if (eptr < md->end_subject && UCHAR21(eptr) == CHAR_LF) eptr++; break; case CHAR_LF: @@ -4521,7 +4545,7 @@ for (;;) SCHECK_PARTIAL(); RRETURN(MATCH_NOMATCH); } - cc = RAWUCHAR(eptr); + cc = UCHAR21(eptr); if (cc >= 128 || (md->ctypes[cc] & ctype_digit) == 0) RRETURN(MATCH_NOMATCH); eptr++; @@ -4538,7 +4562,7 @@ for (;;) SCHECK_PARTIAL(); RRETURN(MATCH_NOMATCH); } - cc = RAWUCHAR(eptr); + cc = UCHAR21(eptr); if (cc < 128 && (md->ctypes[cc] & ctype_space) != 0) RRETURN(MATCH_NOMATCH); eptr++; @@ -4555,7 +4579,7 @@ for (;;) SCHECK_PARTIAL(); RRETURN(MATCH_NOMATCH); } - cc = RAWUCHAR(eptr); + cc = UCHAR21(eptr); if (cc >= 128 || (md->ctypes[cc] & ctype_space) == 0) RRETURN(MATCH_NOMATCH); eptr++; @@ -4572,7 +4596,7 @@ for (;;) SCHECK_PARTIAL(); RRETURN(MATCH_NOMATCH); } - cc = RAWUCHAR(eptr); + cc = UCHAR21(eptr); if (cc < 128 && (md->ctypes[cc] & ctype_word) != 0) RRETURN(MATCH_NOMATCH); eptr++; @@ -4589,7 +4613,7 @@ for (;;) SCHECK_PARTIAL(); RRETURN(MATCH_NOMATCH); } - cc = RAWUCHAR(eptr); + cc = UCHAR21(eptr); if (cc >= 128 || (md->ctypes[cc] & ctype_word) == 0) RRETURN(MATCH_NOMATCH); eptr++; @@ -5150,7 +5174,7 @@ for (;;) { default: RRETURN(MATCH_NOMATCH); case CHAR_CR: - if (eptr < md->end_subject && RAWUCHAR(eptr) == CHAR_LF) eptr++; + if (eptr < md->end_subject && UCHAR21(eptr) == CHAR_LF) eptr++; break; case CHAR_LF: @@ -5586,7 +5610,7 @@ for (;;) if (possessive) continue; /* No backtracking */ for(;;) { - if (eptr == pp) goto TAIL_RECURSE; + if (eptr <= pp) goto TAIL_RECURSE; RMATCH(eptr, ecode, offset_top, md, eptrb, RM44); if (rrc != MATCH_NOMATCH) RRETURN(rrc); eptr--; @@ -5628,12 +5652,17 @@ for (;;) if (possessive) continue; /* No backtracking */ + /* We use <= pp rather than == pp to detect the start of the run while + backtracking because the use of \C in UTF mode can cause BACKCHAR to + move back past pp. This is just palliative; the use of \C in UTF mode + is fraught with danger. */ + for(;;) { int lgb, rgb; PCRE_PUCHAR fptr; - if (eptr == pp) goto TAIL_RECURSE; /* At start of char run */ + if (eptr <= pp) goto TAIL_RECURSE; /* At start of char run */ RMATCH(eptr, ecode, offset_top, md, eptrb, RM45); if (rrc != MATCH_NOMATCH) RRETURN(rrc); @@ -5651,7 +5680,7 @@ for (;;) for (;;) { - if (eptr == pp) goto TAIL_RECURSE; /* At start of char run */ + if (eptr <= pp) goto TAIL_RECURSE; /* At start of char run */ fptr = eptr - 1; if (!utf) c = *fptr; else { @@ -5675,54 +5704,25 @@ for (;;) switch(ctype) { case OP_ANY: - if (max < INT_MAX) + for (i = min; i < max; i++) { - for (i = min; i < max; i++) + if (eptr >= md->end_subject) { - if (eptr >= md->end_subject) - { - SCHECK_PARTIAL(); - break; - } - if (IS_NEWLINE(eptr)) break; - if (md->partial != 0 && /* Take care with CRLF partial */ - eptr + 1 >= md->end_subject && - NLBLOCK->nltype == NLTYPE_FIXED && - NLBLOCK->nllen == 2 && - RAWUCHAR(eptr) == NLBLOCK->nl[0]) - { - md->hitend = TRUE; - if (md->partial > 1) RRETURN(PCRE_ERROR_PARTIAL); - } - eptr++; - ACROSSCHAR(eptr < md->end_subject, *eptr, eptr++); + SCHECK_PARTIAL(); + break; } - } - - /* Handle unlimited UTF-8 repeat */ - - else - { - for (i = min; i < max; i++) + if (IS_NEWLINE(eptr)) break; + if (md->partial != 0 && /* Take care with CRLF partial */ + eptr + 1 >= md->end_subject && + NLBLOCK->nltype == NLTYPE_FIXED && + NLBLOCK->nllen == 2 && + UCHAR21(eptr) == NLBLOCK->nl[0]) { - if (eptr >= md->end_subject) - { - SCHECK_PARTIAL(); - break; - } - if (IS_NEWLINE(eptr)) break; - if (md->partial != 0 && /* Take care with CRLF partial */ - eptr + 1 >= md->end_subject && - NLBLOCK->nltype == NLTYPE_FIXED && - NLBLOCK->nllen == 2 && - RAWUCHAR(eptr) == NLBLOCK->nl[0]) - { - md->hitend = TRUE; - if (md->partial > 1) RRETURN(PCRE_ERROR_PARTIAL); - } - eptr++; - ACROSSCHAR(eptr < md->end_subject, *eptr, eptr++); + md->hitend = TRUE; + if (md->partial > 1) RRETURN(PCRE_ERROR_PARTIAL); } + eptr++; + ACROSSCHAR(eptr < md->end_subject, *eptr, eptr++); } break; @@ -5772,7 +5772,7 @@ for (;;) if (c == CHAR_CR) { if (++eptr >= md->end_subject) break; - if (RAWUCHAR(eptr) == CHAR_LF) eptr++; + if (UCHAR21(eptr) == CHAR_LF) eptr++; } else { @@ -5930,13 +5930,13 @@ for (;;) if (possessive) continue; /* No backtracking */ for(;;) { - if (eptr == pp) goto TAIL_RECURSE; + if (eptr <= pp) goto TAIL_RECURSE; RMATCH(eptr, ecode, offset_top, md, eptrb, RM46); if (rrc != MATCH_NOMATCH) RRETURN(rrc); eptr--; BACKCHAR(eptr); - if (ctype == OP_ANYNL && eptr > pp && RAWUCHAR(eptr) == CHAR_NL && - RAWUCHAR(eptr - 1) == CHAR_CR) eptr--; + if (ctype == OP_ANYNL && eptr > pp && UCHAR21(eptr) == CHAR_NL && + UCHAR21(eptr - 1) == CHAR_CR) eptr--; } } else @@ -6513,7 +6513,7 @@ tables = re->tables; if (extra_data != NULL) { - register unsigned int flags = extra_data->flags; + unsigned long int flags = extra_data->flags; if ((flags & PCRE_EXTRA_STUDY_DATA) != 0) study = (const pcre_study_data *)extra_data->study_data; if ((flags & PCRE_EXTRA_MATCH_LIMIT) != 0) @@ -6685,7 +6685,8 @@ if (md->offset_vector != NULL) register int *iend = iptr - re->top_bracket; if (iend < md->offset_vector + 2) iend = md->offset_vector + 2; while (--iptr >= iend) *iptr = -1; - md->offset_vector[0] = md->offset_vector[1] = -1; + if (offsetcount > 0) md->offset_vector[0] = -1; + if (offsetcount > 1) md->offset_vector[1] = -1; } /* Set up the first character to match, if available. The first_char value is @@ -6783,10 +6784,10 @@ for(;;) if (first_char != first_char2) while (start_match < end_subject && - (smc = RAWUCHARTEST(start_match)) != first_char && smc != first_char2) + (smc = UCHAR21TEST(start_match)) != first_char && smc != first_char2) start_match++; else - while (start_match < end_subject && RAWUCHARTEST(start_match) != first_char) + while (start_match < end_subject && UCHAR21TEST(start_match) != first_char) start_match++; } @@ -6818,7 +6819,7 @@ for(;;) if (start_match[-1] == CHAR_CR && (md->nltype == NLTYPE_ANY || md->nltype == NLTYPE_ANYCRLF) && start_match < end_subject && - RAWUCHARTEST(start_match) == CHAR_NL) + UCHAR21TEST(start_match) == CHAR_NL) start_match++; } } @@ -6829,22 +6830,12 @@ for(;;) { while (start_match < end_subject) { - register pcre_uint32 c = RAWUCHARTEST(start_match); + register pcre_uint32 c = UCHAR21TEST(start_match); #ifndef COMPILE_PCRE8 if (c > 255) c = 255; #endif - if ((start_bits[c/8] & (1 << (c&7))) == 0) - { - start_match++; -#if defined SUPPORT_UTF && defined COMPILE_PCRE8 - /* In non 8-bit mode, the iteration will stop for - characters > 255 at the beginning or not stop at all. */ - if (utf) - ACROSSCHAR(start_match < end_subject, *start_match, - start_match++); -#endif - } - else break; + if ((start_bits[c/8] & (1 << (c&7))) != 0) break; + start_match++; } } } /* Starting optimizations */ @@ -6897,7 +6888,7 @@ for(;;) { while (p < end_subject) { - register pcre_uint32 pp = RAWUCHARINCTEST(p); + register pcre_uint32 pp = UCHAR21INCTEST(p); if (pp == req_char || pp == req_char2) { p--; break; } } } @@ -6905,7 +6896,7 @@ for(;;) { while (p < end_subject) { - if (RAWUCHARINCTEST(p) == req_char) { p--; break; } + if (UCHAR21INCTEST(p) == req_char) { p--; break; } } } diff --git a/src/3rd/pcre/pcreget.c b/src/3rd/pcre/pcreget.c index 8e2c82c0dd1..8ce30dd00d2 100644 --- a/src/3rd/pcre/pcreget.c +++ b/src/3rd/pcre/pcreget.c @@ -250,6 +250,7 @@ It returns the number of the first one that was set in a pattern match. code the compiled regex stringname the name of the capturing substring ovector the vector of matched substrings + stringcount number of captured substrings Returns: the number of the first that is set, or the number of the last one if none are set, @@ -258,13 +259,16 @@ Returns: the number of the first that is set, #if defined COMPILE_PCRE8 static int -get_first_set(const pcre *code, const char *stringname, int *ovector) +get_first_set(const pcre *code, const char *stringname, int *ovector, + int stringcount) #elif defined COMPILE_PCRE16 static int -get_first_set(const pcre16 *code, PCRE_SPTR16 stringname, int *ovector) +get_first_set(const pcre16 *code, PCRE_SPTR16 stringname, int *ovector, + int stringcount) #elif defined COMPILE_PCRE32 static int -get_first_set(const pcre32 *code, PCRE_SPTR32 stringname, int *ovector) +get_first_set(const pcre32 *code, PCRE_SPTR32 stringname, int *ovector, + int stringcount) #endif { const REAL_PCRE *re = (const REAL_PCRE *)code; @@ -295,7 +299,7 @@ if (entrysize <= 0) return entrysize; for (entry = (pcre_uchar *)first; entry <= (pcre_uchar *)last; entry += entrysize) { int n = GET2(entry, 0); - if (ovector[n*2] >= 0) return n; + if (n < stringcount && ovector[n*2] >= 0) return n; } return GET2(entry, 0); } @@ -402,7 +406,7 @@ pcre32_copy_named_substring(const pcre32 *code, PCRE_SPTR32 subject, PCRE_UCHAR32 *buffer, int size) #endif { -int n = get_first_set(code, stringname, ovector); +int n = get_first_set(code, stringname, ovector, stringcount); if (n <= 0) return n; #if defined COMPILE_PCRE8 return pcre_copy_substring(subject, ovector, stringcount, n, buffer, size); @@ -457,7 +461,10 @@ pcre_uchar **stringlist; pcre_uchar *p; for (i = 0; i < double_count; i += 2) - size += sizeof(pcre_uchar *) + IN_UCHARS(ovector[i+1] - ovector[i] + 1); + { + size += sizeof(pcre_uchar *) + IN_UCHARS(1); + if (ovector[i+1] > ovector[i]) size += IN_UCHARS(ovector[i+1] - ovector[i]); + } stringlist = (pcre_uchar **)(PUBL(malloc))(size); if (stringlist == NULL) return PCRE_ERROR_NOMEMORY; @@ -473,7 +480,7 @@ p = (pcre_uchar *)(stringlist + stringcount + 1); for (i = 0; i < double_count; i += 2) { - int len = ovector[i+1] - ovector[i]; + int len = (ovector[i+1] > ovector[i])? (ovector[i+1] - ovector[i]) : 0; memcpy(p, subject + ovector[i], IN_UCHARS(len)); *stringlist++ = p; p += len; @@ -619,7 +626,7 @@ pcre32_get_named_substring(const pcre32 *code, PCRE_SPTR32 subject, PCRE_SPTR32 *stringptr) #endif { -int n = get_first_set(code, stringname, ovector); +int n = get_first_set(code, stringname, ovector, stringcount); if (n <= 0) return n; #if defined COMPILE_PCRE8 return pcre_get_substring(subject, ovector, stringcount, n, stringptr); diff --git a/src/3rd/pcre/pcreglob.c b/src/3rd/pcre/pcreglob.c index 835eae9b9e3..83e04842551 100644 --- a/src/3rd/pcre/pcreglob.c +++ b/src/3rd/pcre/pcreglob.c @@ -6,7 +6,7 @@ and semantics are as close as possible to those of the Perl 5 language. Written by Philip Hazel - Copyright (c) 1997-2012 University of Cambridge + Copyright (c) 1997-2014 University of Cambridge ----------------------------------------------------------------------------- Redistribution and use in source and binary forms, with or without @@ -72,6 +72,7 @@ PCRE_EXP_DATA_DEFN void (*PUBL(free))(void *) = LocalPcreFree; PCRE_EXP_DATA_DEFN void *(*PUBL(stack_malloc))(size_t) = LocalPcreMalloc; PCRE_EXP_DATA_DEFN void (*PUBL(stack_free))(void *) = LocalPcreFree; PCRE_EXP_DATA_DEFN int (*PUBL(callout))(PUBL(callout_block) *) = NULL; +PCRE_EXP_DATA_DEFN int (*PUBL(stack_guard))(void) = NULL; #elif !defined VPCOMPAT PCRE_EXP_DATA_DEFN void *(*PUBL(malloc))(size_t) = malloc; @@ -79,6 +80,7 @@ PCRE_EXP_DATA_DEFN void (*PUBL(free))(void *) = free; PCRE_EXP_DATA_DEFN void *(*PUBL(stack_malloc))(size_t) = malloc; PCRE_EXP_DATA_DEFN void (*PUBL(stack_free))(void *) = free; PCRE_EXP_DATA_DEFN int (*PUBL(callout))(PUBL(callout_block) *) = NULL; +PCRE_EXP_DATA_DEFN int (*PUBL(stack_guard))(void) = NULL; #endif /* End of pcre_globals.c */ diff --git a/src/3rd/pcre/pcreinal.h b/src/3rd/pcre/pcreinal.h index 0b9798c5541..97ff55d03b3 100644 --- a/src/3rd/pcre/pcreinal.h +++ b/src/3rd/pcre/pcreinal.h @@ -7,7 +7,7 @@ and semantics are as close as possible to those of the Perl 5 language. Written by Philip Hazel - Copyright (c) 1997-2013 University of Cambridge + Copyright (c) 1997-2016 University of Cambridge ----------------------------------------------------------------------------- Redistribution and use in source and binary forms, with or without @@ -275,7 +275,7 @@ pcre.h(.in) and disable (comment out) this message. */ typedef pcre_uint16 pcre_uchar; #define UCHAR_SHIFT (1) -#define IN_UCHARS(x) ((x) << UCHAR_SHIFT) +#define IN_UCHARS(x) ((x) * 2) #define MAX_255(c) ((c) <= 255u) #define TABLE_GET(c, table, default) (MAX_255(c)? ((table)[c]):(default)) @@ -283,7 +283,7 @@ typedef pcre_uint16 pcre_uchar; typedef pcre_uint32 pcre_uchar; #define UCHAR_SHIFT (2) -#define IN_UCHARS(x) ((x) << UCHAR_SHIFT) +#define IN_UCHARS(x) ((x) * 4) #define MAX_255(c) ((c) <= 255u) #define TABLE_GET(c, table, default) (MAX_255(c)? ((table)[c]):(default)) @@ -316,8 +316,8 @@ start/end of string field names are. */ &(NLBLOCK->nllen), utf)) \ : \ ((p) <= NLBLOCK->PSEND - NLBLOCK->nllen && \ - RAWUCHARTEST(p) == NLBLOCK->nl[0] && \ - (NLBLOCK->nllen == 1 || RAWUCHARTEST(p+1) == NLBLOCK->nl[1]) \ + UCHAR21TEST(p) == NLBLOCK->nl[0] && \ + (NLBLOCK->nllen == 1 || UCHAR21TEST(p+1) == NLBLOCK->nl[1]) \ ) \ ) @@ -330,8 +330,8 @@ start/end of string field names are. */ &(NLBLOCK->nllen), utf)) \ : \ ((p) >= NLBLOCK->PSSTART + NLBLOCK->nllen && \ - RAWUCHARTEST(p - NLBLOCK->nllen) == NLBLOCK->nl[0] && \ - (NLBLOCK->nllen == 1 || RAWUCHARTEST(p - NLBLOCK->nllen + 1) == NLBLOCK->nl[1]) \ + UCHAR21TEST(p - NLBLOCK->nllen) == NLBLOCK->nl[0] && \ + (NLBLOCK->nllen == 1 || UCHAR21TEST(p - NLBLOCK->nllen + 1) == NLBLOCK->nl[1]) \ ) \ ) @@ -582,12 +582,27 @@ changed in future to be a fixed number of bytes or to depend on LINK_SIZE. */ #define MAX_MARK ((1u << 8) - 1) #endif +/* There is a proposed future special "UTF-21" mode, in which only the lowest +21 bits of a 32-bit character are interpreted as UTF, with the remaining 11 +high-order bits available to the application for other uses. In preparation for +the future implementation of this mode, there are macros that load a data item +and, if in this special mode, mask it to 21 bits. These macros all have names +starting with UCHAR21. In all other modes, including the normal 32-bit +library, the macros all have the same simple definitions. When the new mode is +implemented, it is expected that these definitions will be varied appropriately +using #ifdef when compiling the library that supports the special mode. */ + +#define UCHAR21(eptr) (*(eptr)) +#define UCHAR21TEST(eptr) (*(eptr)) +#define UCHAR21INC(eptr) (*(eptr)++) +#define UCHAR21INCTEST(eptr) (*(eptr)++) + /* When UTF encoding is being used, a character is no longer just a single -byte. The macros for character handling generate simple sequences when used in -character-mode, and more complicated ones for UTF characters. GETCHARLENTEST -and other macros are not used when UTF is not supported, so they are not -defined. To make sure they can never even appear when UTF support is omitted, -we don't even define them. */ +byte in 8-bit mode or a single short in 16-bit mode. The macros for character +handling generate simple sequences when used in the basic mode, and more +complicated ones for UTF characters. GETCHARLENTEST and other macros are not +used when UTF is not supported. To make sure they can never even appear when +UTF support is omitted, we don't even define them. */ #ifndef SUPPORT_UTF @@ -600,10 +615,6 @@ we don't even define them. */ #define GETCHARINC(c, eptr) c = *eptr++; #define GETCHARINCTEST(c, eptr) c = *eptr++; #define GETCHARLEN(c, eptr, len) c = *eptr; -#define RAWUCHAR(eptr) (*(eptr)) -#define RAWUCHARINC(eptr) (*(eptr)++) -#define RAWUCHARTEST(eptr) (*(eptr)) -#define RAWUCHARINCTEST(eptr) (*(eptr)++) /* #define GETCHARLENTEST(c, eptr, len) */ /* #define BACKCHAR(eptr) */ /* #define FORWARDCHAR(eptr) */ @@ -776,30 +787,6 @@ do not know if we are in UTF-8 mode. */ c = *eptr; \ if (utf && c >= 0xc0) GETUTF8LEN(c, eptr, len); -/* Returns the next uchar, not advancing the pointer. This is called when -we know we are in UTF mode. */ - -#define RAWUCHAR(eptr) \ - (*(eptr)) - -/* Returns the next uchar, advancing the pointer. This is called when -we know we are in UTF mode. */ - -#define RAWUCHARINC(eptr) \ - (*((eptr)++)) - -/* Returns the next uchar, testing for UTF mode, and not advancing the -pointer. */ - -#define RAWUCHARTEST(eptr) \ - (*(eptr)) - -/* Returns the next uchar, testing for UTF mode, advancing the -pointer. */ - -#define RAWUCHARINCTEST(eptr) \ - (*((eptr)++)) - /* If the pointer is not at the start of a character, move it back until it is. This is called only in UTF-8 mode - we don't put a test within the macro because almost all calls are already within a block of UTF-8 only code. */ @@ -895,30 +882,6 @@ we do not know if we are in UTF-16 mode. */ c = *eptr; \ if (utf && (c & 0xfc00) == 0xd800) GETUTF16LEN(c, eptr, len); -/* Returns the next uchar, not advancing the pointer. This is called when -we know we are in UTF mode. */ - -#define RAWUCHAR(eptr) \ - (*(eptr)) - -/* Returns the next uchar, advancing the pointer. This is called when -we know we are in UTF mode. */ - -#define RAWUCHARINC(eptr) \ - (*((eptr)++)) - -/* Returns the next uchar, testing for UTF mode, and not advancing the -pointer. */ - -#define RAWUCHARTEST(eptr) \ - (*(eptr)) - -/* Returns the next uchar, testing for UTF mode, advancing the -pointer. */ - -#define RAWUCHARINCTEST(eptr) \ - (*((eptr)++)) - /* If the pointer is not at the start of a character, move it back until it is. This is called only in UTF-16 mode - we don't put a test within the macro because almost all calls are already within a block of UTF-16 only @@ -980,30 +943,6 @@ This is called when we do not know if we are in UTF-32 mode. */ #define GETCHARLENTEST(c, eptr, len) \ GETCHARTEST(c, eptr) -/* Returns the next uchar, not advancing the pointer. This is called when -we know we are in UTF mode. */ - -#define RAWUCHAR(eptr) \ - (*(eptr)) - -/* Returns the next uchar, advancing the pointer. This is called when -we know we are in UTF mode. */ - -#define RAWUCHARINC(eptr) \ - (*((eptr)++)) - -/* Returns the next uchar, testing for UTF mode, and not advancing the -pointer. */ - -#define RAWUCHARTEST(eptr) \ - (*(eptr)) - -/* Returns the next uchar, testing for UTF mode, advancing the -pointer. */ - -#define RAWUCHARINCTEST(eptr) \ - (*((eptr)++)) - /* If the pointer is not at the start of a character, move it back until it is. This is called only in UTF-32 mode - we don't put a test within the macro because almost all calls are already within a block of UTF-32 only @@ -1045,7 +984,7 @@ other. NOTE: The values also appear in pcre_jit_compile.c. */ #ifndef EBCDIC #define HSPACE_LIST \ - CHAR_HT, CHAR_SPACE, 0xa0, \ + CHAR_HT, CHAR_SPACE, CHAR_NBSP, \ 0x1680, 0x180e, 0x2000, 0x2001, 0x2002, 0x2003, 0x2004, 0x2005, \ 0x2006, 0x2007, 0x2008, 0x2009, 0x200A, 0x202f, 0x205f, 0x3000, \ NOTACHAR @@ -1071,7 +1010,7 @@ other. NOTE: The values also appear in pcre_jit_compile.c. */ #define HSPACE_BYTE_CASES \ case CHAR_HT: \ case CHAR_SPACE: \ - case 0xa0 /* NBSP */ + case CHAR_NBSP #define HSPACE_CASES \ HSPACE_BYTE_CASES: \ @@ -1098,11 +1037,12 @@ other. NOTE: The values also appear in pcre_jit_compile.c. */ /* ------ EBCDIC environments ------ */ #else -#define HSPACE_LIST CHAR_HT, CHAR_SPACE +#define HSPACE_LIST CHAR_HT, CHAR_SPACE, CHAR_NBSP, NOTACHAR #define HSPACE_BYTE_CASES \ case CHAR_HT: \ - case CHAR_SPACE + case CHAR_SPACE: \ + case CHAR_NBSP #define HSPACE_CASES HSPACE_BYTE_CASES @@ -1276,6 +1216,7 @@ same code point. */ #define CHAR_ESC '\047' #define CHAR_DEL '\007' +#define CHAR_NBSP '\x41' #define STR_ESC "\047" #define STR_DEL "\007" @@ -1290,6 +1231,7 @@ a positive value. */ #define CHAR_NEL ((unsigned char)'\x85') #define CHAR_ESC '\033' #define CHAR_DEL '\177' +#define CHAR_NBSP ((unsigned char)'\xa0') #define STR_LF "\n" #define STR_NL STR_LF @@ -1667,6 +1609,7 @@ only. */ #define CHAR_VERTICAL_LINE '\174' #define CHAR_RIGHT_CURLY_BRACKET '\175' #define CHAR_TILDE '\176' +#define CHAR_NBSP ((unsigned char)'\xa0') #define STR_HT "\011" #define STR_VT "\013" @@ -1823,6 +1766,10 @@ only. */ /* Escape items that are just an encoding of a particular data value. */ +#ifndef ESC_a +#define ESC_a CHAR_BEL +#endif + #ifndef ESC_e #define ESC_e CHAR_ESC #endif @@ -1874,8 +1821,9 @@ table. */ /* Flag bits and data types for the extended class (OP_XCLASS) for classes that contain characters with values greater than 255. */ -#define XCL_NOT 0x01 /* Flag: this is a negative class */ -#define XCL_MAP 0x02 /* Flag: a 32-byte map is present */ +#define XCL_NOT 0x01 /* Flag: this is a negative class */ +#define XCL_MAP 0x02 /* Flag: a 32-byte map is present */ +#define XCL_HASPROP 0x04 /* Flag: property checks are present. */ #define XCL_END 0 /* Marks end of individual items */ #define XCL_SINGLE 1 /* Single item (one multibyte char) follows */ @@ -2341,7 +2289,7 @@ enum { ERR0, ERR1, ERR2, ERR3, ERR4, ERR5, ERR6, ERR7, ERR8, ERR9, ERR50, ERR51, ERR52, ERR53, ERR54, ERR55, ERR56, ERR57, ERR58, ERR59, ERR60, ERR61, ERR62, ERR63, ERR64, ERR65, ERR66, ERR67, ERR68, ERR69, ERR70, ERR71, ERR72, ERR73, ERR74, ERR75, ERR76, ERR77, ERR78, ERR79, - ERR80, ERR81, ERR82, ERR83, ERR84, ERRCOUNT }; + ERR80, ERR81, ERR82, ERR83, ERR84, ERR85, ERR86, ERR87, ERRCOUNT }; /* JIT compiling modes. The function list is indexed by them. */ @@ -2506,6 +2454,8 @@ typedef struct compile_data { BOOL had_pruneorskip; /* (*PRUNE) or (*SKIP) encountered */ BOOL check_lookbehind; /* Lookbehinds need later checking */ BOOL dupnames; /* Duplicate names exist */ + BOOL dupgroups; /* Duplicate groups exist: (?| found */ + BOOL iscondassert; /* Next assert is a condition */ int nltype; /* Newline type */ int nllen; /* Newline string length */ pcre_uchar nl[4]; /* Newline string when fixed length */ @@ -2519,6 +2469,13 @@ typedef struct branch_chain { pcre_uchar *current_branch; } branch_chain; +/* Structure for mutual recursion detection. */ + +typedef struct recurse_check { + struct recurse_check *prev; + const pcre_uchar *group; +} recurse_check; + /* Structure for items in a linked list that represents an explicit recursive call within the pattern; used by pcre_exec(). */ @@ -2815,6 +2772,9 @@ extern const pcre_uint8 PRIV(ucd_stage1)[]; extern const pcre_uint16 PRIV(ucd_stage2)[]; extern const pcre_uint32 PRIV(ucp_gentype)[]; extern const pcre_uint32 PRIV(ucp_gbtable)[]; +#ifdef COMPILE_PCRE32 +extern const ucd_record PRIV(dummy_ucd_record)[]; +#endif #ifdef SUPPORT_JIT extern const int PRIV(ucp_typerange)[]; #endif @@ -2823,10 +2783,16 @@ extern const int PRIV(ucp_typerange)[]; /* UCD access macros */ #define UCD_BLOCK_SIZE 128 -#define GET_UCD(ch) (PRIV(ucd_records) + \ +#define REAL_GET_UCD(ch) (PRIV(ucd_records) + \ PRIV(ucd_stage2)[PRIV(ucd_stage1)[(int)(ch) / UCD_BLOCK_SIZE] * \ UCD_BLOCK_SIZE + (int)(ch) % UCD_BLOCK_SIZE]) +#ifdef COMPILE_PCRE32 +#define GET_UCD(ch) ((ch > 0x10ffff)? PRIV(dummy_ucd_record) : REAL_GET_UCD(ch)) +#else +#define GET_UCD(ch) REAL_GET_UCD(ch) +#endif + #define UCD_CHARTYPE(ch) GET_UCD(ch)->chartype #define UCD_SCRIPT(ch) GET_UCD(ch)->script #define UCD_CATEGORY(ch) PRIV(ucp_gentype)[UCD_CHARTYPE(ch)] diff --git a/src/3rd/pcre/pcrejitc.c b/src/3rd/pcre/pcrejitc.c index c511468513f..7f1f1ea42dd 100644 --- a/src/3rd/pcre/pcrejitc.c +++ b/src/3rd/pcre/pcrejitc.c @@ -52,8 +52,8 @@ POSSIBILITY OF SUCH DAMAGE. we just include it. This way we don't need to touch the build system files. */ -#define SLJIT_MALLOC(size) (PUBL(malloc))(size) -#define SLJIT_FREE(ptr) (PUBL(free))(ptr) +#define SLJIT_MALLOC(size, allocator_data) (PUBL(malloc))(size) +#define SLJIT_FREE(ptr, allocator_data) (PUBL(free))(ptr) #define SLJIT_CONFIG_AUTO 1 #define SLJIT_CONFIG_STATIC 1 #define SLJIT_VERBOSE 0 @@ -164,26 +164,26 @@ typedef struct jit_arguments { const pcre_uchar *begin; const pcre_uchar *end; int *offsets; - pcre_uchar *uchar_ptr; pcre_uchar *mark_ptr; void *callout_data; /* Everything else after. */ - pcre_uint32 limit_match; + sljit_u32 limit_match; int real_offset_count; int offset_count; - pcre_uint8 notbol; - pcre_uint8 noteol; - pcre_uint8 notempty; - pcre_uint8 notempty_atstart; + sljit_u8 notbol; + sljit_u8 noteol; + sljit_u8 notempty; + sljit_u8 notempty_atstart; } jit_arguments; typedef struct executable_functions { void *executable_funcs[JIT_NUMBER_OF_COMPILE_MODES]; + void *read_only_data_heads[JIT_NUMBER_OF_COMPILE_MODES]; + sljit_uw executable_sizes[JIT_NUMBER_OF_COMPILE_MODES]; PUBL(jit_callback) callback; void *userdata; - pcre_uint32 top_bracket; - pcre_uint32 limit_match; - sljit_uw executable_sizes[JIT_NUMBER_OF_COMPILE_MODES]; + sljit_u32 top_bracket; + sljit_u32 limit_match; } executable_functions; typedef struct jump_list { @@ -197,6 +197,12 @@ typedef struct stub_list { struct stub_list *next; } stub_list; +typedef struct label_addr_list { + struct sljit_label *label; + sljit_uw *update_addr; + struct label_addr_list *next; +} label_addr_list; + enum frame_types { no_frame = -1, no_stack = -2 @@ -207,7 +213,7 @@ enum control_types { type_then_trap = 1 }; -typedef int (SLJIT_CALL *jit_function)(jit_arguments *args); +typedef int (SLJIT_FUNC *jit_function)(jit_arguments *args); /* The following structure is the key data type for the recursive code generator. It is allocated by compile_matchingpath, and contains @@ -270,11 +276,25 @@ typedef struct braminzero_backtrack { struct sljit_label *matchingpath; } braminzero_backtrack; -typedef struct iterator_backtrack { +typedef struct char_iterator_backtrack { + backtrack_common common; + /* Next iteration. */ + struct sljit_label *matchingpath; + union { + jump_list *backtracks; + struct { + unsigned int othercasebit; + pcre_uchar chr; + BOOL enabled; + } charpos; + } u; +} char_iterator_backtrack; + +typedef struct ref_iterator_backtrack { backtrack_common common; /* Next iteration. */ struct sljit_label *matchingpath; -} iterator_backtrack; +} ref_iterator_backtrack; typedef struct recurse_entry { struct recurse_entry *next; @@ -306,7 +326,7 @@ typedef struct then_trap_backtrack { int framesize; } then_trap_backtrack; -#define MAX_RANGE_SIZE 6 +#define MAX_RANGE_SIZE 4 typedef struct compiler_common { /* The sljit ceneric compiler. */ @@ -314,62 +334,75 @@ typedef struct compiler_common { /* First byte code. */ pcre_uchar *start; /* Maps private data offset to each opcode. */ - sljit_si *private_data_ptrs; + sljit_s32 *private_data_ptrs; + /* Chain list of read-only data ptrs. */ + void *read_only_data_head; /* Tells whether the capturing bracket is optimized. */ - pcre_uint8 *optimized_cbracket; + sljit_u8 *optimized_cbracket; /* Tells whether the starting offset is a target of then. */ - pcre_uint8 *then_offsets; + sljit_u8 *then_offsets; /* Current position where a THEN must jump. */ then_trap_backtrack *then_trap; /* Starting offset of private data for capturing brackets. */ - int cbra_ptr; + sljit_s32 cbra_ptr; /* Output vector starting point. Must be divisible by 2. */ - int ovector_start; + sljit_s32 ovector_start; + /* Points to the starting character of the current match. */ + sljit_s32 start_ptr; /* Last known position of the requested byte. */ - int req_char_ptr; + sljit_s32 req_char_ptr; /* Head of the last recursion. */ - int recursive_head_ptr; - /* First inspected character for partial matching. */ - int start_used_ptr; + sljit_s32 recursive_head_ptr; + /* First inspected character for partial matching. + (Needed for avoiding zero length partial matches.) */ + sljit_s32 start_used_ptr; /* Starting pointer for partial soft matches. */ - int hit_start; - /* End pointer of the first line. */ - int first_line_end; + sljit_s32 hit_start; + /* Pointer of the match end position. */ + sljit_s32 match_end_ptr; /* Points to the marked string. */ - int mark_ptr; + sljit_s32 mark_ptr; /* Recursive control verb management chain. */ - int control_head_ptr; + sljit_s32 control_head_ptr; /* Points to the last matched capture block index. */ - int capture_last_ptr; - /* Points to the starting position of the current match. */ - int start_ptr; + sljit_s32 capture_last_ptr; + /* Fast forward skipping byte code pointer. */ + pcre_uchar *fast_forward_bc_ptr; + /* Locals used by fast fail optimization. */ + sljit_s32 fast_fail_start_ptr; + sljit_s32 fast_fail_end_ptr; /* Flipped and lower case tables. */ - const pcre_uint8 *fcc; + const sljit_u8 *fcc; sljit_sw lcc; /* Mode can be PCRE_STUDY_JIT_COMPILE and others. */ int mode; + /* TRUE, when minlength is greater than 0. */ + BOOL might_be_empty; /* \K is found in the pattern. */ BOOL has_set_som; /* (*SKIP:arg) is found in the pattern. */ BOOL has_skip_arg; /* (*THEN) is found in the pattern. */ BOOL has_then; - /* Needs to know the start position anytime. */ - BOOL needs_start_ptr; + /* (*SKIP) or (*SKIP:arg) is found in lookbehind assertion. */ + BOOL has_skip_in_assert_back; /* Currently in recurse or negative assert. */ BOOL local_exit; /* Currently in a positive assert. */ BOOL positive_assert; /* Newline control. */ int nltype; + sljit_u32 nlmax; + sljit_u32 nlmin; int newline; int bsr_nltype; + sljit_u32 bsr_nlmax; + sljit_u32 bsr_nlmin; /* Dollar endonly. */ int endonly; /* Tables. */ sljit_sw ctypes; - int digits[2 + MAX_RANGE_SIZE]; /* Named capturing brackets. */ pcre_uchar *name_table; sljit_sw name_count; @@ -380,7 +413,9 @@ typedef struct compiler_common { struct sljit_label *quit_label; struct sljit_label *forced_quit_label; struct sljit_label *accept_label; + struct sljit_label *ff_newline_shortcut; stub_list *stubs; + label_addr_list *label_addrs; recurse_entry *entries; recurse_entry *currententry; jump_list *partialmatch; @@ -403,17 +438,14 @@ typedef struct compiler_common { BOOL utf; #ifdef SUPPORT_UCP BOOL use_ucp; -#endif -#ifndef COMPILE_PCRE32 - jump_list *utfreadchar; + jump_list *getucd; #endif #ifdef COMPILE_PCRE8 + jump_list *utfreadchar; + jump_list *utfreadchar16; jump_list *utfreadtype8; #endif #endif /* SUPPORT_UTF */ -#ifdef SUPPORT_UCP - jump_list *getucd; -#endif } compiler_common; /* For byte_sequence_compare. */ @@ -424,27 +456,27 @@ typedef struct compare_context { #if defined SLJIT_UNALIGNED && SLJIT_UNALIGNED int ucharptr; union { - sljit_si asint; - sljit_uh asushort; + sljit_s32 asint; + sljit_u16 asushort; #if defined COMPILE_PCRE8 - sljit_ub asbyte; - sljit_ub asuchars[4]; + sljit_u8 asbyte; + sljit_u8 asuchars[4]; #elif defined COMPILE_PCRE16 - sljit_uh asuchars[2]; + sljit_u16 asuchars[2]; #elif defined COMPILE_PCRE32 - sljit_ui asuchars[1]; + sljit_u32 asuchars[1]; #endif } c; union { - sljit_si asint; - sljit_uh asushort; + sljit_s32 asint; + sljit_u16 asushort; #if defined COMPILE_PCRE8 - sljit_ub asbyte; - sljit_ub asuchars[4]; + sljit_u8 asbyte; + sljit_u8 asuchars[4]; #elif defined COMPILE_PCRE16 - sljit_uh asuchars[2]; + sljit_u16 asuchars[2]; #elif defined COMPILE_PCRE32 - sljit_ui asuchars[1]; + sljit_u32 asuchars[1]; #endif } oc; #endif @@ -454,18 +486,33 @@ typedef struct compare_context { #undef CMP /* Used for accessing the elements of the stack. */ -#define STACK(i) ((-(i) - 1) * (int)sizeof(sljit_sw)) - -#define TMP1 SLJIT_SCRATCH_REG1 -#define TMP2 SLJIT_SCRATCH_REG3 -#define TMP3 SLJIT_TEMPORARY_EREG2 -#define STR_PTR SLJIT_SAVED_REG1 -#define STR_END SLJIT_SAVED_REG2 -#define STACK_TOP SLJIT_SCRATCH_REG2 -#define STACK_LIMIT SLJIT_SAVED_REG3 -#define ARGUMENTS SLJIT_SAVED_EREG1 -#define COUNT_MATCH SLJIT_SAVED_EREG2 -#define RETURN_ADDR SLJIT_TEMPORARY_EREG1 +#define STACK(i) ((i) * (int)sizeof(sljit_sw)) + +#ifdef SLJIT_PREF_SHIFT_REG +#if SLJIT_PREF_SHIFT_REG == SLJIT_R2 +/* Nothing. */ +#elif SLJIT_PREF_SHIFT_REG == SLJIT_R3 +#define SHIFT_REG_IS_R3 +#else +#error "Unsupported shift register" +#endif +#endif + +#define TMP1 SLJIT_R0 +#ifdef SHIFT_REG_IS_R3 +#define TMP2 SLJIT_R3 +#define TMP3 SLJIT_R2 +#else +#define TMP2 SLJIT_R2 +#define TMP3 SLJIT_R3 +#endif +#define STR_PTR SLJIT_S0 +#define STR_END SLJIT_S1 +#define STACK_TOP SLJIT_R1 +#define STACK_LIMIT SLJIT_S2 +#define COUNT_MATCH SLJIT_S3 +#define ARGUMENTS SLJIT_S4 +#define RETURN_ADDR SLJIT_R4 /* Local space layout. */ /* These two locals can be used by the current opcode. */ @@ -486,14 +533,11 @@ the start pointers when the end of the capturing group has not yet reached. */ #define PRIVATE_DATA(cc) (common->private_data_ptrs[(cc) - common->start]) #if defined COMPILE_PCRE8 -#define MOV_UCHAR SLJIT_MOV_UB -#define MOVU_UCHAR SLJIT_MOVU_UB +#define MOV_UCHAR SLJIT_MOV_U8 #elif defined COMPILE_PCRE16 -#define MOV_UCHAR SLJIT_MOV_UH -#define MOVU_UCHAR SLJIT_MOVU_UH +#define MOV_UCHAR SLJIT_MOV_U16 #elif defined COMPILE_PCRE32 -#define MOV_UCHAR SLJIT_MOV_UI -#define MOVU_UCHAR SLJIT_MOVU_UI +#define MOV_UCHAR SLJIT_MOV_U32 #else #error Unsupported compiling mode #endif @@ -519,12 +563,16 @@ the start pointers when the end of the capturing group has not yet reached. */ sljit_emit_cmp(compiler, (type), (src1), (src1w), (src2), (src2w)) #define CMPTO(type, src1, src1w, src2, src2w, label) \ sljit_set_label(sljit_emit_cmp(compiler, (type), (src1), (src1w), (src2), (src2w)), (label)) -#define OP_FLAGS(op, dst, dstw, src, srcw, type) \ - sljit_emit_op_flags(compiler, (op), (dst), (dstw), (src), (srcw), (type)) +#define OP_FLAGS(op, dst, dstw, type) \ + sljit_emit_op_flags(compiler, (op), (dst), (dstw), (type)) #define GET_LOCAL_BASE(dst, dstw, offset) \ sljit_get_local_base(compiler, (dst), (dstw), (offset)) -static pcre_uchar* bracketend(pcre_uchar* cc) +#define READ_CHAR_MAX 0x7fffffff + +#define INVALID_UTF_CHAR 888 + +static pcre_uchar *bracketend(pcre_uchar *cc) { SLJIT_ASSERT((*cc >= OP_ASSERT && *cc <= OP_ASSERTBACK_NOT) || (*cc >= OP_ONCE && *cc <= OP_SCOND)); do cc += GET(cc, 1); while (*cc == OP_ALT); @@ -533,6 +581,20 @@ cc += 1 + LINK_SIZE; return cc; } +static int no_alternatives(pcre_uchar *cc) +{ +int count = 0; +SLJIT_ASSERT((*cc >= OP_ASSERT && *cc <= OP_ASSERTBACK_NOT) || (*cc >= OP_ONCE && *cc <= OP_SCOND)); +do + { + cc += GET(cc, 1); + count++; + } +while (*cc == OP_ALT); +SLJIT_ASSERT(*cc >= OP_KET && *cc <= OP_KETRPOS); +return count; +} + /* Functions whose might need modification for all new supported opcodes: next_opcode check_opcode_types @@ -735,7 +797,7 @@ switch(*cc) default: /* All opcodes are supported now! */ - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); return NULL; } } @@ -744,6 +806,7 @@ static BOOL check_opcode_types(compiler_common *common, pcre_uchar *cc, pcre_uch { int count; pcre_uchar *slot; +pcre_uchar *assert_back_end = cc - 1; /* Calculate important variables (like stack size) and checks whether all opcodes are supported. */ while (cc < ccend) @@ -752,6 +815,7 @@ while (cc < ccend) { case OP_SET_SOM: common->has_set_som = TRUE; + common->might_be_empty = TRUE; cc += 1; break; @@ -813,15 +877,19 @@ while (cc < ccend) cc += 2 + 2 * LINK_SIZE; break; + case OP_ASSERTBACK: + slot = bracketend(cc); + if (slot > assert_back_end) + assert_back_end = slot; + cc += 1 + LINK_SIZE; + break; + case OP_THEN_ARG: common->has_then = TRUE; common->control_head_ptr = 1; /* Fall through. */ case OP_PRUNE_ARG: - common->needs_start_ptr = TRUE; - /* Fall through. */ - case OP_MARK: if (common->mark_ptr == 0) { @@ -834,17 +902,20 @@ while (cc < ccend) case OP_THEN: common->has_then = TRUE; common->control_head_ptr = 1; - /* Fall through. */ + cc += 1; + break; - case OP_PRUNE: case OP_SKIP: - common->needs_start_ptr = TRUE; + if (cc < assert_back_end) + common->has_skip_in_assert_back = TRUE; cc += 1; break; case OP_SKIP_ARG: common->control_head_ptr = 1; common->has_skip_arg = TRUE; + if (cc < assert_back_end) + common->has_skip_in_assert_back = TRUE; cc += 1 + 2 + cc[1]; break; @@ -858,8 +929,189 @@ while (cc < ccend) return TRUE; } +static BOOL is_accelerated_repeat(pcre_uchar *cc) +{ +switch(*cc) + { + case OP_TYPESTAR: + case OP_TYPEMINSTAR: + case OP_TYPEPLUS: + case OP_TYPEMINPLUS: + case OP_TYPEPOSSTAR: + case OP_TYPEPOSPLUS: + return (cc[1] != OP_ANYNL && cc[1] != OP_EXTUNI); + + case OP_STAR: + case OP_MINSTAR: + case OP_PLUS: + case OP_MINPLUS: + case OP_POSSTAR: + case OP_POSPLUS: + + case OP_STARI: + case OP_MINSTARI: + case OP_PLUSI: + case OP_MINPLUSI: + case OP_POSSTARI: + case OP_POSPLUSI: + + case OP_NOTSTAR: + case OP_NOTMINSTAR: + case OP_NOTPLUS: + case OP_NOTMINPLUS: + case OP_NOTPOSSTAR: + case OP_NOTPOSPLUS: + + case OP_NOTSTARI: + case OP_NOTMINSTARI: + case OP_NOTPLUSI: + case OP_NOTMINPLUSI: + case OP_NOTPOSSTARI: + case OP_NOTPOSPLUSI: + return TRUE; + + case OP_CLASS: + case OP_NCLASS: +#if defined SUPPORT_UTF || !defined COMPILE_PCRE8 + case OP_XCLASS: + cc += (*cc == OP_XCLASS) ? GET(cc, 1) : (int)(1 + (32 / sizeof(pcre_uchar))); +#else + cc += (1 + (32 / sizeof(pcre_uchar))); +#endif + + switch(*cc) + { + case OP_CRSTAR: + case OP_CRMINSTAR: + case OP_CRPLUS: + case OP_CRMINPLUS: + case OP_CRPOSSTAR: + case OP_CRPOSPLUS: + return TRUE; + } + break; + } +return FALSE; +} + +static SLJIT_INLINE BOOL detect_fast_forward_skip(compiler_common *common, int *private_data_start) +{ +pcre_uchar *cc = common->start; +pcre_uchar *end; + +/* Skip not repeated brackets. */ +while (TRUE) + { + switch(*cc) + { + case OP_SOD: + case OP_SOM: + case OP_SET_SOM: + case OP_NOT_WORD_BOUNDARY: + case OP_WORD_BOUNDARY: + case OP_EODN: + case OP_EOD: + case OP_CIRC: + case OP_CIRCM: + case OP_DOLL: + case OP_DOLLM: + /* Zero width assertions. */ + cc++; + continue; + } + + if (*cc != OP_BRA && *cc != OP_CBRA) + break; + + end = cc + GET(cc, 1); + if (*end != OP_KET || PRIVATE_DATA(end) != 0) + return FALSE; + if (*cc == OP_CBRA) + { + if (common->optimized_cbracket[GET2(cc, 1 + LINK_SIZE)] == 0) + return FALSE; + cc += IMM2_SIZE; + } + cc += 1 + LINK_SIZE; + } + +if (is_accelerated_repeat(cc)) + { + common->fast_forward_bc_ptr = cc; + common->private_data_ptrs[(cc + 1) - common->start] = *private_data_start; + *private_data_start += sizeof(sljit_sw); + return TRUE; + } +return FALSE; +} + +static SLJIT_INLINE void detect_fast_fail(compiler_common *common, pcre_uchar *cc, int *private_data_start, sljit_s32 depth) +{ + pcre_uchar *next_alt; + + SLJIT_ASSERT(*cc == OP_BRA || *cc == OP_CBRA); + + if (*cc == OP_CBRA && common->optimized_cbracket[GET2(cc, 1 + LINK_SIZE)] == 0) + return; + + next_alt = bracketend(cc) - (1 + LINK_SIZE); + if (*next_alt != OP_KET || PRIVATE_DATA(next_alt) != 0) + return; + + do + { + next_alt = cc + GET(cc, 1); + + cc += 1 + LINK_SIZE + ((*cc == OP_CBRA) ? IMM2_SIZE : 0); + + while (TRUE) + { + switch(*cc) + { + case OP_SOD: + case OP_SOM: + case OP_SET_SOM: + case OP_NOT_WORD_BOUNDARY: + case OP_WORD_BOUNDARY: + case OP_EODN: + case OP_EOD: + case OP_CIRC: + case OP_CIRCM: + case OP_DOLL: + case OP_DOLLM: + /* Zero width assertions. */ + cc++; + continue; + } + break; + } + + if (depth > 0 && (*cc == OP_BRA || *cc == OP_CBRA)) + detect_fast_fail(common, cc, private_data_start, depth - 1); + + if (is_accelerated_repeat(cc)) + { + common->private_data_ptrs[(cc + 1) - common->start] = *private_data_start; + + if (common->fast_fail_start_ptr == 0) + common->fast_fail_start_ptr = *private_data_start; + + *private_data_start += sizeof(sljit_sw); + common->fast_fail_end_ptr = *private_data_start; + + if (*private_data_start > SLJIT_MAX_LOCAL_SIZE) + return; + } + + cc = next_alt; + } + while (*cc == OP_ALT); +} + static int get_class_iterator_size(pcre_uchar *cc) { +sljit_u32 min; +sljit_u32 max; switch(*cc) { case OP_CRSTAR: @@ -874,9 +1126,14 @@ switch(*cc) case OP_CRRANGE: case OP_CRMINRANGE: - if (GET2(cc, 1) == GET2(cc, 1 + IMM2_SIZE)) - return 0; - return 2; + min = GET2(cc, 1); + max = GET2(cc, 1 + IMM2_SIZE); + if (max == 0) + return (*cc == OP_CRRANGE) ? 2 : 1; + max -= min; + if (max > 2) + max = 2; + return max; default: return 0; @@ -1027,6 +1284,7 @@ pcre_uchar *alternative; pcre_uchar *end = NULL; int private_data_ptr = *private_data_start; int space, size, bracketlen; +BOOL repeat_check = TRUE; while (cc < ccend) { @@ -1034,9 +1292,10 @@ while (cc < ccend) size = 0; bracketlen = 0; if (private_data_ptr > SLJIT_MAX_LOCAL_SIZE) - return; + break; - if (*cc == OP_ONCE || *cc == OP_ONCE_NC || *cc == OP_BRA || *cc == OP_CBRA || *cc == OP_COND) + if (repeat_check && (*cc == OP_ONCE || *cc == OP_ONCE_NC || *cc == OP_BRA || *cc == OP_CBRA || *cc == OP_COND)) + { if (detect_repeat(common, cc)) { /* These brackets are converted to repeats, so no global @@ -1044,6 +1303,8 @@ while (cc < ccend) if (cc >= end) end = bracketend(cc); } + } + repeat_check = TRUE; switch(*cc) { @@ -1099,6 +1360,13 @@ while (cc < ccend) bracketlen = 1 + LINK_SIZE + IMM2_SIZE; break; + case OP_BRAZERO: + case OP_BRAMINZERO: + case OP_BRAPOSZERO: + repeat_check = FALSE; + size = 1; + break; + CASE_ITERATOR_PRIVATE_DATA_1 space = 1; size = -2; @@ -1125,22 +1393,27 @@ while (cc < ccend) size = 1; break; - CASE_ITERATOR_TYPE_PRIVATE_DATA_2B + case OP_TYPEUPTO: if (cc[1 + IMM2_SIZE] != OP_ANYNL && cc[1 + IMM2_SIZE] != OP_EXTUNI) space = 2; size = 1 + IMM2_SIZE; break; + case OP_TYPEMINUPTO: + space = 2; + size = 1 + IMM2_SIZE; + break; + case OP_CLASS: case OP_NCLASS: - size += 1 + 32 / sizeof(pcre_uchar); space = get_class_iterator_size(cc + size); + size = 1 + 32 / sizeof(pcre_uchar); break; #if defined SUPPORT_UTF || !defined COMPILE_PCRE8 case OP_XCLASS: - size = GET(cc, 1); space = get_class_iterator_size(cc + size); + size = GET(cc, 1); break; #endif @@ -1186,7 +1459,7 @@ while (cc < ccend) } /* Returns with a frame_types (always < 0) if no need for frame. */ -static int get_framesize(compiler_common *common, pcre_uchar *cc, pcre_uchar *ccend, BOOL recursive, BOOL* needs_control_head) +static int get_framesize(compiler_common *common, pcre_uchar *cc, pcre_uchar *ccend, BOOL recursive, BOOL *needs_control_head) { int length = 0; int possessive = 0; @@ -1279,6 +1552,13 @@ while (cc < ccend) cc += 1 + LINK_SIZE + IMM2_SIZE; break; + case OP_THEN: + stack_restore = TRUE; + if (common->control_head_ptr != 0) + *needs_control_head = TRUE; + cc ++; + break; + default: stack_restore = TRUE; /* Fall through. */ @@ -1346,6 +1626,7 @@ while (cc < ccend) case OP_CLASS: case OP_NCLASS: case OP_XCLASS: + case OP_CALLOUT: cc = next_opcode(common, cc); SLJIT_ASSERT(cc != NULL); @@ -1390,11 +1671,11 @@ while (cc < ccend) SLJIT_ASSERT(common->has_set_som); if (!setsom_found) { - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(0)); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(0)); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, SLJIT_IMM, -OVECTOR(0)); - stackpos += (int)sizeof(sljit_sw); + stackpos -= (int)sizeof(sljit_sw); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, TMP1, 0); - stackpos += (int)sizeof(sljit_sw); + stackpos -= (int)sizeof(sljit_sw); setsom_found = TRUE; } cc += 1; @@ -1406,11 +1687,11 @@ while (cc < ccend) SLJIT_ASSERT(common->mark_ptr != 0); if (!setmark_found) { - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), common->mark_ptr); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->mark_ptr); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, SLJIT_IMM, -common->mark_ptr); - stackpos += (int)sizeof(sljit_sw); + stackpos -= (int)sizeof(sljit_sw); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, TMP1, 0); - stackpos += (int)sizeof(sljit_sw); + stackpos -= (int)sizeof(sljit_sw); setmark_found = TRUE; } cc += 1 + 2 + cc[1]; @@ -1419,29 +1700,29 @@ while (cc < ccend) case OP_RECURSE: if (common->has_set_som && !setsom_found) { - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(0)); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(0)); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, SLJIT_IMM, -OVECTOR(0)); - stackpos += (int)sizeof(sljit_sw); + stackpos -= (int)sizeof(sljit_sw); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, TMP1, 0); - stackpos += (int)sizeof(sljit_sw); + stackpos -= (int)sizeof(sljit_sw); setsom_found = TRUE; } if (common->mark_ptr != 0 && !setmark_found) { - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), common->mark_ptr); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->mark_ptr); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, SLJIT_IMM, -common->mark_ptr); - stackpos += (int)sizeof(sljit_sw); + stackpos -= (int)sizeof(sljit_sw); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, TMP1, 0); - stackpos += (int)sizeof(sljit_sw); + stackpos -= (int)sizeof(sljit_sw); setmark_found = TRUE; } if (common->capture_last_ptr != 0 && !capture_last_found) { - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), common->capture_last_ptr); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->capture_last_ptr); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, SLJIT_IMM, -common->capture_last_ptr); - stackpos += (int)sizeof(sljit_sw); + stackpos -= (int)sizeof(sljit_sw); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, TMP1, 0); - stackpos += (int)sizeof(sljit_sw); + stackpos -= (int)sizeof(sljit_sw); capture_last_found = TRUE; } cc += 1 + LINK_SIZE; @@ -1453,22 +1734,22 @@ while (cc < ccend) case OP_SCBRAPOS: if (common->capture_last_ptr != 0 && !capture_last_found) { - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), common->capture_last_ptr); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->capture_last_ptr); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, SLJIT_IMM, -common->capture_last_ptr); - stackpos += (int)sizeof(sljit_sw); + stackpos -= (int)sizeof(sljit_sw); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, TMP1, 0); - stackpos += (int)sizeof(sljit_sw); + stackpos -= (int)sizeof(sljit_sw); capture_last_found = TRUE; } offset = (GET2(cc, 1 + LINK_SIZE)) << 1; OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, SLJIT_IMM, OVECTOR(offset)); - stackpos += (int)sizeof(sljit_sw); - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(offset)); - OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(offset + 1)); + stackpos -= (int)sizeof(sljit_sw); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset)); + OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset + 1)); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, TMP1, 0); - stackpos += (int)sizeof(sljit_sw); + stackpos -= (int)sizeof(sljit_sw); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, TMP2, 0); - stackpos += (int)sizeof(sljit_sw); + stackpos -= (int)sizeof(sljit_sw); cc += 1 + LINK_SIZE + IMM2_SIZE; break; @@ -1496,7 +1777,11 @@ while (cc < ccend) { case OP_KET: if (PRIVATE_DATA(cc) != 0) + { private_data_length++; + SLJIT_ASSERT(PRIVATE_DATA(cc + 1) != 0); + cc += PRIVATE_DATA(cc + 1); + } cc += 1 + LINK_SIZE; break; @@ -1511,6 +1796,7 @@ while (cc < ccend) case OP_SBRAPOS: case OP_SCOND: private_data_length++; + SLJIT_ASSERT(PRIVATE_DATA(cc) != 0); cc += 1 + LINK_SIZE; break; @@ -1614,18 +1900,17 @@ BOOL tmp1empty = TRUE; BOOL tmp2empty = TRUE; pcre_uchar *alternative; enum { - start, loop, end } status; -status = save ? start : loop; -stackptr = STACK(stackptr - 2); +status = loop; +stackptr = STACK(stackptr); stacktop = STACK(stacktop - 1); if (!save) { - stackptr += (needs_control_head ? 2 : 1) * sizeof(sljit_sw); + stacktop -= (needs_control_head ? 2 : 1) * sizeof(sljit_sw); if (stackptr < stacktop) { OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), stackptr); @@ -1641,194 +1926,186 @@ if (!save) /* The tmp1next must be TRUE in either way. */ } +SLJIT_ASSERT(common->recursive_head_ptr != 0); + do { count = 0; - switch(status) + if (cc >= ccend) { - case start: - SLJIT_ASSERT(save && common->recursive_head_ptr != 0); + if (!save) + break; + count = 1; srcw[0] = common->recursive_head_ptr; if (needs_control_head) { SLJIT_ASSERT(common->control_head_ptr != 0); count = 2; - srcw[1] = common->control_head_ptr; + srcw[0] = common->control_head_ptr; + srcw[1] = common->recursive_head_ptr; + } + status = end; + } + else switch(*cc) + { + case OP_KET: + if (PRIVATE_DATA(cc) != 0) + { + count = 1; + srcw[0] = PRIVATE_DATA(cc); + SLJIT_ASSERT(PRIVATE_DATA(cc + 1) != 0); + cc += PRIVATE_DATA(cc + 1); } - status = loop; + cc += 1 + LINK_SIZE; + break; + + case OP_ASSERT: + case OP_ASSERT_NOT: + case OP_ASSERTBACK: + case OP_ASSERTBACK_NOT: + case OP_ONCE: + case OP_ONCE_NC: + case OP_BRAPOS: + case OP_SBRA: + case OP_SBRAPOS: + case OP_SCOND: + count = 1; + srcw[0] = PRIVATE_DATA(cc); + SLJIT_ASSERT(srcw[0] != 0); + cc += 1 + LINK_SIZE; break; - case loop: - if (cc >= ccend) + case OP_CBRA: + case OP_SCBRA: + if (common->optimized_cbracket[GET2(cc, 1 + LINK_SIZE)] == 0) { - status = end; - break; + count = 1; + srcw[0] = OVECTOR_PRIV(GET2(cc, 1 + LINK_SIZE)); } + cc += 1 + LINK_SIZE + IMM2_SIZE; + break; - switch(*cc) - { - case OP_KET: - if (PRIVATE_DATA(cc) != 0) - { - count = 1; - srcw[0] = PRIVATE_DATA(cc); - } - cc += 1 + LINK_SIZE; - break; + case OP_CBRAPOS: + case OP_SCBRAPOS: + count = 2; + srcw[0] = PRIVATE_DATA(cc); + srcw[1] = OVECTOR_PRIV(GET2(cc, 1 + LINK_SIZE)); + SLJIT_ASSERT(srcw[0] != 0 && srcw[1] != 0); + cc += 1 + LINK_SIZE + IMM2_SIZE; + break; - case OP_ASSERT: - case OP_ASSERT_NOT: - case OP_ASSERTBACK: - case OP_ASSERTBACK_NOT: - case OP_ONCE: - case OP_ONCE_NC: - case OP_BRAPOS: - case OP_SBRA: - case OP_SBRAPOS: - case OP_SCOND: + case OP_COND: + /* Might be a hidden SCOND. */ + alternative = cc + GET(cc, 1); + if (*alternative == OP_KETRMAX || *alternative == OP_KETRMIN) + { count = 1; srcw[0] = PRIVATE_DATA(cc); SLJIT_ASSERT(srcw[0] != 0); - cc += 1 + LINK_SIZE; - break; + } + cc += 1 + LINK_SIZE; + break; - case OP_CBRA: - case OP_SCBRA: - if (common->optimized_cbracket[GET2(cc, 1 + LINK_SIZE)] == 0) - { - count = 1; - srcw[0] = OVECTOR_PRIV(GET2(cc, 1 + LINK_SIZE)); - } - cc += 1 + LINK_SIZE + IMM2_SIZE; - break; - - case OP_CBRAPOS: - case OP_SCBRAPOS: - count = 2; + CASE_ITERATOR_PRIVATE_DATA_1 + if (PRIVATE_DATA(cc)) + { + count = 1; srcw[0] = PRIVATE_DATA(cc); - srcw[1] = OVECTOR_PRIV(GET2(cc, 1 + LINK_SIZE)); - SLJIT_ASSERT(srcw[0] != 0 && srcw[1] != 0); - cc += 1 + LINK_SIZE + IMM2_SIZE; - break; - - case OP_COND: - /* Might be a hidden SCOND. */ - alternative = cc + GET(cc, 1); - if (*alternative == OP_KETRMAX || *alternative == OP_KETRMIN) - { - count = 1; - srcw[0] = PRIVATE_DATA(cc); - SLJIT_ASSERT(srcw[0] != 0); - } - cc += 1 + LINK_SIZE; - break; - - CASE_ITERATOR_PRIVATE_DATA_1 - if (PRIVATE_DATA(cc)) - { - count = 1; - srcw[0] = PRIVATE_DATA(cc); - } - cc += 2; + } + cc += 2; #ifdef SUPPORT_UTF - if (common->utf && HAS_EXTRALEN(cc[-1])) cc += GET_EXTRALEN(cc[-1]); + if (common->utf && HAS_EXTRALEN(cc[-1])) cc += GET_EXTRALEN(cc[-1]); #endif - break; + break; - CASE_ITERATOR_PRIVATE_DATA_2A - if (PRIVATE_DATA(cc)) - { - count = 2; - srcw[0] = PRIVATE_DATA(cc); - srcw[1] = PRIVATE_DATA(cc) + sizeof(sljit_sw); - } - cc += 2; + CASE_ITERATOR_PRIVATE_DATA_2A + if (PRIVATE_DATA(cc)) + { + count = 2; + srcw[0] = PRIVATE_DATA(cc); + srcw[1] = PRIVATE_DATA(cc) + sizeof(sljit_sw); + } + cc += 2; #ifdef SUPPORT_UTF - if (common->utf && HAS_EXTRALEN(cc[-1])) cc += GET_EXTRALEN(cc[-1]); + if (common->utf && HAS_EXTRALEN(cc[-1])) cc += GET_EXTRALEN(cc[-1]); #endif - break; + break; - CASE_ITERATOR_PRIVATE_DATA_2B - if (PRIVATE_DATA(cc)) - { - count = 2; - srcw[0] = PRIVATE_DATA(cc); - srcw[1] = PRIVATE_DATA(cc) + sizeof(sljit_sw); - } - cc += 2 + IMM2_SIZE; + CASE_ITERATOR_PRIVATE_DATA_2B + if (PRIVATE_DATA(cc)) + { + count = 2; + srcw[0] = PRIVATE_DATA(cc); + srcw[1] = PRIVATE_DATA(cc) + sizeof(sljit_sw); + } + cc += 2 + IMM2_SIZE; #ifdef SUPPORT_UTF - if (common->utf && HAS_EXTRALEN(cc[-1])) cc += GET_EXTRALEN(cc[-1]); + if (common->utf && HAS_EXTRALEN(cc[-1])) cc += GET_EXTRALEN(cc[-1]); #endif - break; + break; + + CASE_ITERATOR_TYPE_PRIVATE_DATA_1 + if (PRIVATE_DATA(cc)) + { + count = 1; + srcw[0] = PRIVATE_DATA(cc); + } + cc += 1; + break; + + CASE_ITERATOR_TYPE_PRIVATE_DATA_2A + if (PRIVATE_DATA(cc)) + { + count = 2; + srcw[0] = PRIVATE_DATA(cc); + srcw[1] = srcw[0] + sizeof(sljit_sw); + } + cc += 1; + break; + + CASE_ITERATOR_TYPE_PRIVATE_DATA_2B + if (PRIVATE_DATA(cc)) + { + count = 2; + srcw[0] = PRIVATE_DATA(cc); + srcw[1] = srcw[0] + sizeof(sljit_sw); + } + cc += 1 + IMM2_SIZE; + break; - CASE_ITERATOR_TYPE_PRIVATE_DATA_1 - if (PRIVATE_DATA(cc)) + case OP_CLASS: + case OP_NCLASS: +#if defined SUPPORT_UTF || !defined COMPILE_PCRE8 + case OP_XCLASS: + size = (*cc == OP_XCLASS) ? GET(cc, 1) : 1 + 32 / (int)sizeof(pcre_uchar); +#else + size = 1 + 32 / (int)sizeof(pcre_uchar); +#endif + if (PRIVATE_DATA(cc)) + switch(get_class_iterator_size(cc + size)) { + case 1: count = 1; srcw[0] = PRIVATE_DATA(cc); - } - cc += 1; - break; + break; - CASE_ITERATOR_TYPE_PRIVATE_DATA_2A - if (PRIVATE_DATA(cc)) - { + case 2: count = 2; srcw[0] = PRIVATE_DATA(cc); srcw[1] = srcw[0] + sizeof(sljit_sw); - } - cc += 1; - break; + break; - CASE_ITERATOR_TYPE_PRIVATE_DATA_2B - if (PRIVATE_DATA(cc)) - { - count = 2; - srcw[0] = PRIVATE_DATA(cc); - srcw[1] = srcw[0] + sizeof(sljit_sw); + default: + SLJIT_UNREACHABLE(); + break; } - cc += 1 + IMM2_SIZE; - break; - - case OP_CLASS: - case OP_NCLASS: -#if defined SUPPORT_UTF || !defined COMPILE_PCRE8 - case OP_XCLASS: - size = (*cc == OP_XCLASS) ? GET(cc, 1) : 1 + 32 / (int)sizeof(pcre_uchar); -#else - size = 1 + 32 / (int)sizeof(pcre_uchar); -#endif - if (PRIVATE_DATA(cc)) - switch(get_class_iterator_size(cc + size)) - { - case 1: - count = 1; - srcw[0] = PRIVATE_DATA(cc); - break; - - case 2: - count = 2; - srcw[0] = PRIVATE_DATA(cc); - srcw[1] = srcw[0] + sizeof(sljit_sw); - break; - - default: - SLJIT_ASSERT_STOP(); - break; - } - cc += size; - break; - - default: - cc = next_opcode(common, cc); - SLJIT_ASSERT(cc != NULL); - break; - } + cc += size; break; - case end: - SLJIT_ASSERT_STOP(); + default: + cc = next_opcode(common, cc); + SLJIT_ASSERT(cc != NULL); break; } @@ -1844,7 +2121,7 @@ do OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackptr, TMP1, 0); stackptr += sizeof(sljit_sw); } - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), srcw[count]); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), srcw[count]); tmp1empty = FALSE; tmp1next = FALSE; } @@ -1855,7 +2132,7 @@ do OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackptr, TMP2, 0); stackptr += sizeof(sljit_sw); } - OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), srcw[count]); + OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), srcw[count]); tmp2empty = FALSE; tmp1next = TRUE; } @@ -1865,7 +2142,7 @@ do if (tmp1next) { SLJIT_ASSERT(!tmp1empty); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), srcw[count], TMP1, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), srcw[count], TMP1, 0); tmp1empty = stackptr >= stacktop; if (!tmp1empty) { @@ -1877,7 +2154,7 @@ do else { SLJIT_ASSERT(!tmp2empty); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), srcw[count], TMP2, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), srcw[count], TMP2, 0); tmp2empty = stackptr >= stacktop; if (!tmp2empty) { @@ -1923,7 +2200,7 @@ if (save) SLJIT_ASSERT(cc == ccend && stackptr == stacktop && (save || (tmp1empty && tmp2empty))); } -static SLJIT_INLINE pcre_uchar *set_then_offsets(compiler_common *common, pcre_uchar *cc, pcre_uint8 *current_offset) +static SLJIT_INLINE pcre_uchar *set_then_offsets(compiler_common *common, pcre_uchar *cc, sljit_u8 *current_offset) { pcre_uchar *end = bracketend(cc); BOOL has_alternatives = cc[GET(cc, 1)] == OP_ALT; @@ -1979,7 +2256,7 @@ while (list) } } -static SLJIT_INLINE void add_jump(struct sljit_compiler *compiler, jump_list **list, struct sljit_jump* jump) +static SLJIT_INLINE void add_jump(struct sljit_compiler *compiler, jump_list **list, struct sljit_jump *jump) { jump_list *list_item = sljit_alloc_memory(compiler, sizeof(jump_list)); if (list_item) @@ -1993,7 +2270,7 @@ if (list_item) static void add_stub(compiler_common *common, struct sljit_jump *start) { DEFINE_COMPILER; -stub_list* list_item = sljit_alloc_memory(compiler, sizeof(stub_list)); +stub_list *list_item = sljit_alloc_memory(compiler, sizeof(stub_list)); if (list_item) { @@ -2007,7 +2284,7 @@ if (list_item) static void flush_stubs(compiler_common *common) { DEFINE_COMPILER; -stub_list* list_item = common->stubs; +stub_list *list_item = common->stubs; while (list_item) { @@ -2019,12 +2296,26 @@ while (list_item) common->stubs = NULL; } +static void add_label_addr(compiler_common *common, sljit_uw *update_addr) +{ +DEFINE_COMPILER; +label_addr_list *label_addr; + +label_addr = sljit_alloc_memory(compiler, sizeof(label_addr_list)); +if (label_addr == NULL) + return; +label_addr->label = LABEL(); +label_addr->update_addr = update_addr; +label_addr->next = common->label_addrs; +common->label_addrs = label_addr; +} + static SLJIT_INLINE void count_match(compiler_common *common) { DEFINE_COMPILER; -OP2(SLJIT_SUB | SLJIT_SET_E, COUNT_MATCH, 0, COUNT_MATCH, 0, SLJIT_IMM, 1); -add_jump(compiler, &common->calllimit, JUMP(SLJIT_C_ZERO)); +OP2(SLJIT_SUB | SLJIT_SET_Z, COUNT_MATCH, 0, COUNT_MATCH, 0, SLJIT_IMM, 1); +add_jump(compiler, &common->calllimit, JUMP(SLJIT_ZERO)); } static SLJIT_INLINE void allocate_stack(compiler_common *common, int size) @@ -2032,21 +2323,58 @@ static SLJIT_INLINE void allocate_stack(compiler_common *common, int size) /* May destroy all locals and registers except TMP2. */ DEFINE_COMPILER; -OP2(SLJIT_ADD, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, size * sizeof(sljit_sw)); +SLJIT_ASSERT(size > 0); +OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, size * sizeof(sljit_sw)); #ifdef DESTROY_REGISTERS OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, 12345); OP1(SLJIT_MOV, TMP3, 0, TMP1, 0); OP1(SLJIT_MOV, RETURN_ADDR, 0, TMP1, 0); -OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), LOCALS0, TMP1, 0); -OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), LOCALS1, TMP1, 0); +OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS0, TMP1, 0); +OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS1, TMP1, 0); #endif -add_stub(common, CMP(SLJIT_C_GREATER, STACK_TOP, 0, STACK_LIMIT, 0)); +add_stub(common, CMP(SLJIT_LESS, STACK_TOP, 0, STACK_LIMIT, 0)); } static SLJIT_INLINE void free_stack(compiler_common *common, int size) { DEFINE_COMPILER; -OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, size * sizeof(sljit_sw)); + +SLJIT_ASSERT(size > 0); +OP2(SLJIT_ADD, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, size * sizeof(sljit_sw)); +} + +static sljit_uw * allocate_read_only_data(compiler_common *common, sljit_uw size) +{ +DEFINE_COMPILER; +sljit_uw *result; + +if (SLJIT_UNLIKELY(sljit_get_compiler_error(compiler))) + return NULL; + +result = (sljit_uw *)SLJIT_MALLOC(size + sizeof(sljit_uw), compiler->allocator_data); +if (SLJIT_UNLIKELY(result == NULL)) + { + sljit_set_compiler_memory_error(compiler); + return NULL; + } + +*(void**)result = common->read_only_data_head; +common->read_only_data_head = (void *)result; +return result + 1; +} + +static void free_read_only_data(void *current, void *allocator_data) +{ +void *next; + +SLJIT_UNUSED_ARG(allocator_data); + +while (current != NULL) + { + next = *(void**)current; + SLJIT_FREE(current, allocator_data); + current = next; + } } static SLJIT_INLINE void reset_ovector(compiler_common *common, int length) @@ -2058,23 +2386,48 @@ int i; /* At this point we can freely use all temporary registers. */ SLJIT_ASSERT(length > 1); /* TMP1 returns with begin - 1. */ -OP2(SLJIT_SUB, SLJIT_SCRATCH_REG1, 0, SLJIT_MEM1(SLJIT_SAVED_REG1), SLJIT_OFFSETOF(jit_arguments, begin), SLJIT_IMM, IN_UCHARS(1)); +OP2(SLJIT_SUB, SLJIT_R0, 0, SLJIT_MEM1(SLJIT_S0), SLJIT_OFFSETOF(jit_arguments, begin), SLJIT_IMM, IN_UCHARS(1)); if (length < 8) { for (i = 1; i < length; i++) - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(i), SLJIT_SCRATCH_REG1, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), OVECTOR(i), SLJIT_R0, 0); } else { - GET_LOCAL_BASE(SLJIT_SCRATCH_REG2, 0, OVECTOR_START); - OP1(SLJIT_MOV, SLJIT_SCRATCH_REG3, 0, SLJIT_IMM, length - 1); - loop = LABEL(); - OP1(SLJIT_MOVU, SLJIT_MEM1(SLJIT_SCRATCH_REG2), sizeof(sljit_sw), SLJIT_SCRATCH_REG1, 0); - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_SCRATCH_REG3, 0, SLJIT_SCRATCH_REG3, 0, SLJIT_IMM, 1); - JUMPTO(SLJIT_C_NOT_ZERO, loop); + if (sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_SUPP | SLJIT_MEM_STORE | SLJIT_MEM_PRE, SLJIT_R0, SLJIT_MEM1(SLJIT_R1), sizeof(sljit_sw)) == SLJIT_SUCCESS) + { + GET_LOCAL_BASE(SLJIT_R1, 0, OVECTOR_START); + OP1(SLJIT_MOV, SLJIT_R2, 0, SLJIT_IMM, length - 1); + loop = LABEL(); + sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_STORE | SLJIT_MEM_PRE, SLJIT_R0, SLJIT_MEM1(SLJIT_R1), sizeof(sljit_sw)); + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_R2, 0, SLJIT_R2, 0, SLJIT_IMM, 1); + JUMPTO(SLJIT_NOT_ZERO, loop); + } + else + { + GET_LOCAL_BASE(SLJIT_R1, 0, OVECTOR_START + sizeof(sljit_sw)); + OP1(SLJIT_MOV, SLJIT_R2, 0, SLJIT_IMM, length - 1); + loop = LABEL(); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_R1), 0, SLJIT_R0, 0); + OP2(SLJIT_ADD, SLJIT_R1, 0, SLJIT_R1, 0, SLJIT_IMM, sizeof(sljit_sw)); + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_R2, 0, SLJIT_R2, 0, SLJIT_IMM, 1); + JUMPTO(SLJIT_NOT_ZERO, loop); + } } } +static SLJIT_INLINE void reset_fast_fail(compiler_common *common) +{ +DEFINE_COMPILER; +sljit_s32 i; + +SLJIT_ASSERT(common->fast_fail_start_ptr < common->fast_fail_end_ptr); + +OP2(SLJIT_SUB, TMP1, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); +for (i = common->fast_fail_start_ptr; i < common->fast_fail_end_ptr; i += sizeof(sljit_sw)) + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), i, TMP1, 0); +} + static SLJIT_INLINE void do_reset_match(compiler_common *common, int length) { DEFINE_COMPILER; @@ -2084,53 +2437,67 @@ int i; SLJIT_ASSERT(length > 1); /* OVECTOR(1) contains the "string begin - 1" constant. */ if (length > 2) - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(1)); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(1)); if (length < 8) { for (i = 2; i < length; i++) - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(i), TMP1, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), OVECTOR(i), TMP1, 0); } else { - GET_LOCAL_BASE(TMP2, 0, OVECTOR_START + sizeof(sljit_sw)); - OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_IMM, length - 2); - loop = LABEL(); - OP1(SLJIT_MOVU, SLJIT_MEM1(TMP2), sizeof(sljit_sw), TMP1, 0); - OP2(SLJIT_SUB | SLJIT_SET_E, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 1); - JUMPTO(SLJIT_C_NOT_ZERO, loop); + if (sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_SUPP | SLJIT_MEM_STORE | SLJIT_MEM_PRE, TMP1, SLJIT_MEM1(TMP2), sizeof(sljit_sw)) == SLJIT_SUCCESS) + { + GET_LOCAL_BASE(TMP2, 0, OVECTOR_START + sizeof(sljit_sw)); + OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_IMM, length - 2); + loop = LABEL(); + sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_STORE | SLJIT_MEM_PRE, TMP1, SLJIT_MEM1(TMP2), sizeof(sljit_sw)); + OP2(SLJIT_SUB | SLJIT_SET_Z, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 1); + JUMPTO(SLJIT_NOT_ZERO, loop); + } + else + { + GET_LOCAL_BASE(TMP2, 0, OVECTOR_START + 2 * sizeof(sljit_sw)); + OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_IMM, length - 2); + loop = LABEL(); + OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), 0, TMP1, 0); + OP2(SLJIT_ADD, TMP2, 0, TMP2, 0, SLJIT_IMM, sizeof(sljit_sw)); + OP2(SLJIT_SUB | SLJIT_SET_Z, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 1); + JUMPTO(SLJIT_NOT_ZERO, loop); + } } OP1(SLJIT_MOV, STACK_TOP, 0, ARGUMENTS, 0); if (common->mark_ptr != 0) - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->mark_ptr, SLJIT_IMM, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->mark_ptr, SLJIT_IMM, 0); if (common->control_head_ptr != 0) - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->control_head_ptr, SLJIT_IMM, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr, SLJIT_IMM, 0); OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(STACK_TOP), SLJIT_OFFSETOF(jit_arguments, stack)); -OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), common->start_ptr); -OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(STACK_TOP), SLJIT_OFFSETOF(struct sljit_stack, base)); +OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->start_ptr); +OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(STACK_TOP), SLJIT_OFFSETOF(struct sljit_stack, end)); } -static sljit_sw SLJIT_CALL do_search_mark(sljit_sw *current, const pcre_uchar *skip_arg) +static sljit_sw SLJIT_FUNC do_search_mark(sljit_sw *current, const pcre_uchar *skip_arg) { while (current != NULL) { - switch (current[-2]) + switch (current[1]) { case type_then_trap: break; case type_mark: - if (STRCMP_UC_UC(skip_arg, (pcre_uchar *)current[-3]) == 0) - return current[-4]; + if (STRCMP_UC_UC(skip_arg, (pcre_uchar *)current[2]) == 0) + return current[3]; break; default: - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); break; } - current = (sljit_sw*)current[-1]; + SLJIT_ASSERT(current[0] == 0 || current < (sljit_sw*)current[0]); + current = (sljit_sw*)current[0]; } -return -1; +return 0; } static SLJIT_INLINE void copy_ovector(compiler_common *common, int topbracket) @@ -2138,46 +2505,75 @@ static SLJIT_INLINE void copy_ovector(compiler_common *common, int topbracket) DEFINE_COMPILER; struct sljit_label *loop; struct sljit_jump *early_quit; +BOOL has_pre; /* At this point we can freely use all registers. */ -OP1(SLJIT_MOV, SLJIT_SAVED_REG3, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(1)); -OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(1), STR_PTR, 0); +OP1(SLJIT_MOV, SLJIT_S2, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(1)); +OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), OVECTOR(1), STR_PTR, 0); -OP1(SLJIT_MOV, SLJIT_SCRATCH_REG1, 0, ARGUMENTS, 0); +OP1(SLJIT_MOV, SLJIT_R0, 0, ARGUMENTS, 0); if (common->mark_ptr != 0) - OP1(SLJIT_MOV, SLJIT_SCRATCH_REG3, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), common->mark_ptr); -OP1(SLJIT_MOV_SI, SLJIT_SCRATCH_REG2, 0, SLJIT_MEM1(SLJIT_SCRATCH_REG1), SLJIT_OFFSETOF(jit_arguments, offset_count)); + OP1(SLJIT_MOV, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_SP), common->mark_ptr); +OP1(SLJIT_MOV_S32, SLJIT_R1, 0, SLJIT_MEM1(SLJIT_R0), SLJIT_OFFSETOF(jit_arguments, offset_count)); if (common->mark_ptr != 0) - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SCRATCH_REG1), SLJIT_OFFSETOF(jit_arguments, mark_ptr), SLJIT_SCRATCH_REG3, 0); -OP2(SLJIT_SUB, SLJIT_SCRATCH_REG3, 0, SLJIT_MEM1(SLJIT_SCRATCH_REG1), SLJIT_OFFSETOF(jit_arguments, offsets), SLJIT_IMM, sizeof(int)); -OP1(SLJIT_MOV, SLJIT_SCRATCH_REG1, 0, SLJIT_MEM1(SLJIT_SCRATCH_REG1), SLJIT_OFFSETOF(jit_arguments, begin)); -GET_LOCAL_BASE(SLJIT_SAVED_REG1, 0, OVECTOR_START); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_R0), SLJIT_OFFSETOF(jit_arguments, mark_ptr), SLJIT_R2, 0); +OP2(SLJIT_SUB, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_R0), SLJIT_OFFSETOF(jit_arguments, offsets), SLJIT_IMM, sizeof(int)); +OP1(SLJIT_MOV, SLJIT_R0, 0, SLJIT_MEM1(SLJIT_R0), SLJIT_OFFSETOF(jit_arguments, begin)); + +has_pre = sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_SUPP | SLJIT_MEM_PRE, SLJIT_S1, SLJIT_MEM1(SLJIT_S0), sizeof(sljit_sw)) == SLJIT_SUCCESS; +GET_LOCAL_BASE(SLJIT_S0, 0, OVECTOR_START - (has_pre ? sizeof(sljit_sw) : 0)); + /* Unlikely, but possible */ -early_quit = CMP(SLJIT_C_EQUAL, SLJIT_SCRATCH_REG2, 0, SLJIT_IMM, 0); +early_quit = CMP(SLJIT_EQUAL, SLJIT_R1, 0, SLJIT_IMM, 0); loop = LABEL(); -OP2(SLJIT_SUB, SLJIT_SAVED_REG2, 0, SLJIT_MEM1(SLJIT_SAVED_REG1), 0, SLJIT_SCRATCH_REG1, 0); -OP2(SLJIT_ADD, SLJIT_SAVED_REG1, 0, SLJIT_SAVED_REG1, 0, SLJIT_IMM, sizeof(sljit_sw)); + +if (has_pre) + sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_PRE, SLJIT_S1, SLJIT_MEM1(SLJIT_S0), sizeof(sljit_sw)); +else + { + OP1(SLJIT_MOV, SLJIT_S1, 0, SLJIT_MEM1(SLJIT_S0), 0); + OP2(SLJIT_ADD, SLJIT_S0, 0, SLJIT_S0, 0, SLJIT_IMM, sizeof(sljit_sw)); + } + +OP2(SLJIT_ADD, SLJIT_R2, 0, SLJIT_R2, 0, SLJIT_IMM, sizeof(int)); +OP2(SLJIT_SUB, SLJIT_S1, 0, SLJIT_S1, 0, SLJIT_R0, 0); /* Copy the integer value to the output buffer */ #if defined COMPILE_PCRE16 || defined COMPILE_PCRE32 -OP2(SLJIT_ASHR, SLJIT_SAVED_REG2, 0, SLJIT_SAVED_REG2, 0, SLJIT_IMM, UCHAR_SHIFT); +OP2(SLJIT_ASHR, SLJIT_S1, 0, SLJIT_S1, 0, SLJIT_IMM, UCHAR_SHIFT); #endif -OP1(SLJIT_MOVU_SI, SLJIT_MEM1(SLJIT_SCRATCH_REG3), sizeof(int), SLJIT_SAVED_REG2, 0); -OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_SCRATCH_REG2, 0, SLJIT_SCRATCH_REG2, 0, SLJIT_IMM, 1); -JUMPTO(SLJIT_C_NOT_ZERO, loop); + +OP1(SLJIT_MOV_S32, SLJIT_MEM1(SLJIT_R2), 0, SLJIT_S1, 0); +OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_R1, 0, SLJIT_R1, 0, SLJIT_IMM, 1); +JUMPTO(SLJIT_NOT_ZERO, loop); JUMPHERE(early_quit); /* Calculate the return value, which is the maximum ovector value. */ if (topbracket > 1) { - GET_LOCAL_BASE(SLJIT_SCRATCH_REG1, 0, OVECTOR_START + topbracket * 2 * sizeof(sljit_sw)); - OP1(SLJIT_MOV, SLJIT_SCRATCH_REG2, 0, SLJIT_IMM, topbracket + 1); + if (sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_SUPP | SLJIT_MEM_PRE, SLJIT_R2, SLJIT_MEM1(SLJIT_R0), -(2 * (sljit_sw)sizeof(sljit_sw))) == SLJIT_SUCCESS) + { + GET_LOCAL_BASE(SLJIT_R0, 0, OVECTOR_START + topbracket * 2 * sizeof(sljit_sw)); + OP1(SLJIT_MOV, SLJIT_R1, 0, SLJIT_IMM, topbracket + 1); - /* OVECTOR(0) is never equal to SLJIT_SAVED_REG3. */ - loop = LABEL(); - OP1(SLJIT_MOVU, SLJIT_SCRATCH_REG3, 0, SLJIT_MEM1(SLJIT_SCRATCH_REG1), -(2 * (sljit_sw)sizeof(sljit_sw))); - OP2(SLJIT_SUB, SLJIT_SCRATCH_REG2, 0, SLJIT_SCRATCH_REG2, 0, SLJIT_IMM, 1); - CMPTO(SLJIT_C_EQUAL, SLJIT_SCRATCH_REG3, 0, SLJIT_SAVED_REG3, 0, loop); - OP1(SLJIT_MOV, SLJIT_RETURN_REG, 0, SLJIT_SCRATCH_REG2, 0); + /* OVECTOR(0) is never equal to SLJIT_S2. */ + loop = LABEL(); + sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_PRE, SLJIT_R2, SLJIT_MEM1(SLJIT_R0), -(2 * (sljit_sw)sizeof(sljit_sw))); + OP2(SLJIT_SUB, SLJIT_R1, 0, SLJIT_R1, 0, SLJIT_IMM, 1); + CMPTO(SLJIT_EQUAL, SLJIT_R2, 0, SLJIT_S2, 0, loop); + } + else + { + GET_LOCAL_BASE(SLJIT_R0, 0, OVECTOR_START + (topbracket - 1) * 2 * sizeof(sljit_sw)); + OP1(SLJIT_MOV, SLJIT_R1, 0, SLJIT_IMM, topbracket + 1); + + /* OVECTOR(0) is never equal to SLJIT_S2. */ + loop = LABEL(); + OP1(SLJIT_MOV, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_R0), 0); + OP2(SLJIT_SUB, SLJIT_R0, 0, SLJIT_R0, 0, SLJIT_IMM, 2 * (sljit_sw)sizeof(sljit_sw)); + OP2(SLJIT_SUB, SLJIT_R1, 0, SLJIT_R1, 0, SLJIT_IMM, 1); + CMPTO(SLJIT_EQUAL, SLJIT_R2, 0, SLJIT_S2, 0, loop); + } + OP1(SLJIT_MOV, SLJIT_RETURN_REG, 0, SLJIT_R1, 0); } else OP1(SLJIT_MOV, SLJIT_RETURN_REG, 0, SLJIT_IMM, 1); @@ -2188,39 +2584,39 @@ static SLJIT_INLINE void return_with_partial_match(compiler_common *common, stru DEFINE_COMPILER; struct sljit_jump *jump; -SLJIT_COMPILE_ASSERT(STR_END == SLJIT_SAVED_REG2, str_end_must_be_saved_reg2); +SLJIT_COMPILE_ASSERT(STR_END == SLJIT_S1, str_end_must_be_saved_reg2); SLJIT_ASSERT(common->start_used_ptr != 0 && common->start_ptr != 0 && (common->mode == JIT_PARTIAL_SOFT_COMPILE ? common->hit_start != 0 : common->hit_start == 0)); -OP1(SLJIT_MOV, SLJIT_SCRATCH_REG2, 0, ARGUMENTS, 0); +OP1(SLJIT_MOV, SLJIT_R1, 0, ARGUMENTS, 0); OP1(SLJIT_MOV, SLJIT_RETURN_REG, 0, SLJIT_IMM, PCRE_ERROR_PARTIAL); -OP1(SLJIT_MOV_SI, SLJIT_SCRATCH_REG3, 0, SLJIT_MEM1(SLJIT_SCRATCH_REG2), SLJIT_OFFSETOF(jit_arguments, real_offset_count)); -CMPTO(SLJIT_C_SIG_LESS, SLJIT_SCRATCH_REG3, 0, SLJIT_IMM, 2, quit); +OP1(SLJIT_MOV_S32, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_R1), SLJIT_OFFSETOF(jit_arguments, real_offset_count)); +CMPTO(SLJIT_SIG_LESS, SLJIT_R2, 0, SLJIT_IMM, 2, quit); /* Store match begin and end. */ -OP1(SLJIT_MOV, SLJIT_SAVED_REG1, 0, SLJIT_MEM1(SLJIT_SCRATCH_REG2), SLJIT_OFFSETOF(jit_arguments, begin)); -OP1(SLJIT_MOV, SLJIT_SCRATCH_REG2, 0, SLJIT_MEM1(SLJIT_SCRATCH_REG2), SLJIT_OFFSETOF(jit_arguments, offsets)); +OP1(SLJIT_MOV, SLJIT_S0, 0, SLJIT_MEM1(SLJIT_R1), SLJIT_OFFSETOF(jit_arguments, begin)); +OP1(SLJIT_MOV, SLJIT_R1, 0, SLJIT_MEM1(SLJIT_R1), SLJIT_OFFSETOF(jit_arguments, offsets)); -jump = CMP(SLJIT_C_SIG_LESS, SLJIT_SCRATCH_REG3, 0, SLJIT_IMM, 3); -OP2(SLJIT_SUB, SLJIT_SCRATCH_REG3, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), common->mode == JIT_PARTIAL_HARD_COMPILE ? common->start_ptr : (common->hit_start + (int)sizeof(sljit_sw)), SLJIT_SAVED_REG1, 0); +jump = CMP(SLJIT_SIG_LESS, SLJIT_R2, 0, SLJIT_IMM, 3); +OP2(SLJIT_SUB, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_SP), common->mode == JIT_PARTIAL_HARD_COMPILE ? common->start_ptr : (common->hit_start + (int)sizeof(sljit_sw)), SLJIT_S0, 0); #if defined COMPILE_PCRE16 || defined COMPILE_PCRE32 -OP2(SLJIT_ASHR, SLJIT_SCRATCH_REG3, 0, SLJIT_SCRATCH_REG3, 0, SLJIT_IMM, UCHAR_SHIFT); +OP2(SLJIT_ASHR, SLJIT_R2, 0, SLJIT_R2, 0, SLJIT_IMM, UCHAR_SHIFT); #endif -OP1(SLJIT_MOV_SI, SLJIT_MEM1(SLJIT_SCRATCH_REG2), 2 * sizeof(int), SLJIT_SCRATCH_REG3, 0); +OP1(SLJIT_MOV_S32, SLJIT_MEM1(SLJIT_R1), 2 * sizeof(int), SLJIT_R2, 0); JUMPHERE(jump); -OP1(SLJIT_MOV, SLJIT_SCRATCH_REG3, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), common->mode == JIT_PARTIAL_HARD_COMPILE ? common->start_used_ptr : common->hit_start); -OP2(SLJIT_SUB, SLJIT_SAVED_REG2, 0, STR_END, 0, SLJIT_SAVED_REG1, 0); +OP1(SLJIT_MOV, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_SP), common->mode == JIT_PARTIAL_HARD_COMPILE ? common->start_used_ptr : common->hit_start); +OP2(SLJIT_SUB, SLJIT_S1, 0, STR_END, 0, SLJIT_S0, 0); #if defined COMPILE_PCRE16 || defined COMPILE_PCRE32 -OP2(SLJIT_ASHR, SLJIT_SAVED_REG2, 0, SLJIT_SAVED_REG2, 0, SLJIT_IMM, UCHAR_SHIFT); +OP2(SLJIT_ASHR, SLJIT_S1, 0, SLJIT_S1, 0, SLJIT_IMM, UCHAR_SHIFT); #endif -OP1(SLJIT_MOV_SI, SLJIT_MEM1(SLJIT_SCRATCH_REG2), sizeof(int), SLJIT_SAVED_REG2, 0); +OP1(SLJIT_MOV_S32, SLJIT_MEM1(SLJIT_R1), sizeof(int), SLJIT_S1, 0); -OP2(SLJIT_SUB, SLJIT_SCRATCH_REG3, 0, SLJIT_SCRATCH_REG3, 0, SLJIT_SAVED_REG1, 0); +OP2(SLJIT_SUB, SLJIT_R2, 0, SLJIT_R2, 0, SLJIT_S0, 0); #if defined COMPILE_PCRE16 || defined COMPILE_PCRE32 -OP2(SLJIT_ASHR, SLJIT_SCRATCH_REG3, 0, SLJIT_SCRATCH_REG3, 0, SLJIT_IMM, UCHAR_SHIFT); +OP2(SLJIT_ASHR, SLJIT_R2, 0, SLJIT_R2, 0, SLJIT_IMM, UCHAR_SHIFT); #endif -OP1(SLJIT_MOV_SI, SLJIT_MEM1(SLJIT_SCRATCH_REG2), 0, SLJIT_SCRATCH_REG3, 0); +OP1(SLJIT_MOV_S32, SLJIT_MEM1(SLJIT_R1), 0, SLJIT_R2, 0); JUMPTO(SLJIT_JUMP, quit); } @@ -2234,22 +2630,22 @@ struct sljit_jump *jump; if (common->mode == JIT_PARTIAL_SOFT_COMPILE) { /* The value of -1 must be kept for start_used_ptr! */ - OP2(SLJIT_ADD, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), common->start_used_ptr, SLJIT_IMM, 1); + OP2(SLJIT_ADD, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->start_used_ptr, SLJIT_IMM, 1); /* Jumps if start_used_ptr < STR_PTR, or start_used_ptr == -1. Although overwriting is not necessary if start_used_ptr == STR_PTR, it does not hurt as well. */ - jump = CMP(SLJIT_C_LESS_EQUAL, TMP1, 0, STR_PTR, 0); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->start_used_ptr, STR_PTR, 0); + jump = CMP(SLJIT_LESS_EQUAL, TMP1, 0, STR_PTR, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->start_used_ptr, STR_PTR, 0); JUMPHERE(jump); } else if (common->mode == JIT_PARTIAL_HARD_COMPILE) { - jump = CMP(SLJIT_C_LESS_EQUAL, SLJIT_MEM1(SLJIT_LOCALS_REG), common->start_used_ptr, STR_PTR, 0); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->start_used_ptr, STR_PTR, 0); + jump = CMP(SLJIT_LESS_EQUAL, SLJIT_MEM1(SLJIT_SP), common->start_used_ptr, STR_PTR, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->start_used_ptr, STR_PTR, 0); JUMPHERE(jump); } } -static SLJIT_INLINE BOOL char_has_othercase(compiler_common *common, pcre_uchar* cc) +static SLJIT_INLINE BOOL char_has_othercase(compiler_common *common, pcre_uchar *cc) { /* Detects if the character has an othercase. */ unsigned int c; @@ -2292,7 +2688,7 @@ if (common->utf && c > 127) return TABLE_GET(c, common->fcc, c); } -static unsigned int char_get_othercase_bit(compiler_common *common, pcre_uchar* cc) +static unsigned int char_get_othercase_bit(compiler_common *common, pcre_uchar *cc) { /* Detects if the character and its othercase has only 1 bit difference. */ unsigned int c, oc, bit; @@ -2380,12 +2776,12 @@ if (common->mode == JIT_COMPILE) return; if (!force) - jump = CMP(SLJIT_C_GREATER_EQUAL, SLJIT_MEM1(SLJIT_LOCALS_REG), common->start_used_ptr, STR_PTR, 0); + jump = CMP(SLJIT_GREATER_EQUAL, SLJIT_MEM1(SLJIT_SP), common->start_used_ptr, STR_PTR, 0); else if (common->mode == JIT_PARTIAL_SOFT_COMPILE) - jump = CMP(SLJIT_C_EQUAL, SLJIT_MEM1(SLJIT_LOCALS_REG), common->start_used_ptr, SLJIT_IMM, -1); + jump = CMP(SLJIT_EQUAL, SLJIT_MEM1(SLJIT_SP), common->start_used_ptr, SLJIT_IMM, -1); if (common->mode == JIT_PARTIAL_SOFT_COMPILE) - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->hit_start, SLJIT_IMM, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->hit_start, SLJIT_IMM, 0); else { if (common->partialmatchlabel != NULL) @@ -2406,20 +2802,20 @@ struct sljit_jump *jump; if (common->mode == JIT_COMPILE) { - add_jump(compiler, end_reached, CMP(SLJIT_C_GREATER_EQUAL, STR_PTR, 0, STR_END, 0)); + add_jump(compiler, end_reached, CMP(SLJIT_GREATER_EQUAL, STR_PTR, 0, STR_END, 0)); return; } -jump = CMP(SLJIT_C_LESS, STR_PTR, 0, STR_END, 0); +jump = CMP(SLJIT_LESS, STR_PTR, 0, STR_END, 0); if (common->mode == JIT_PARTIAL_SOFT_COMPILE) { - add_jump(compiler, end_reached, CMP(SLJIT_C_GREATER_EQUAL, SLJIT_MEM1(SLJIT_LOCALS_REG), common->start_used_ptr, STR_PTR, 0)); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->hit_start, SLJIT_IMM, 0); + add_jump(compiler, end_reached, CMP(SLJIT_GREATER_EQUAL, SLJIT_MEM1(SLJIT_SP), common->start_used_ptr, STR_PTR, 0)); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->hit_start, SLJIT_IMM, 0); add_jump(compiler, end_reached, JUMP(SLJIT_JUMP)); } else { - add_jump(compiler, end_reached, CMP(SLJIT_C_GREATER_EQUAL, SLJIT_MEM1(SLJIT_LOCALS_REG), common->start_used_ptr, STR_PTR, 0)); + add_jump(compiler, end_reached, CMP(SLJIT_GREATER_EQUAL, SLJIT_MEM1(SLJIT_SP), common->start_used_ptr, STR_PTR, 0)); if (common->partialmatchlabel != NULL) JUMPTO(SLJIT_JUMP, common->partialmatchlabel); else @@ -2435,16 +2831,16 @@ struct sljit_jump *jump; if (common->mode == JIT_COMPILE) { - add_jump(compiler, backtracks, CMP(SLJIT_C_GREATER_EQUAL, STR_PTR, 0, STR_END, 0)); + add_jump(compiler, backtracks, CMP(SLJIT_GREATER_EQUAL, STR_PTR, 0, STR_END, 0)); return; } /* Partial matching mode. */ -jump = CMP(SLJIT_C_LESS, STR_PTR, 0, STR_END, 0); -add_jump(compiler, backtracks, CMP(SLJIT_C_GREATER_EQUAL, SLJIT_MEM1(SLJIT_LOCALS_REG), common->start_used_ptr, STR_PTR, 0)); +jump = CMP(SLJIT_LESS, STR_PTR, 0, STR_END, 0); +add_jump(compiler, backtracks, CMP(SLJIT_GREATER_EQUAL, SLJIT_MEM1(SLJIT_SP), common->start_used_ptr, STR_PTR, 0)); if (common->mode == JIT_PARTIAL_SOFT_COMPILE) { - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->hit_start, SLJIT_IMM, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->hit_start, SLJIT_IMM, 0); add_jump(compiler, backtracks, JUMP(SLJIT_JUMP)); } else @@ -2457,107 +2853,290 @@ else JUMPHERE(jump); } -static void read_char(compiler_common *common) +static void peek_char(compiler_common *common, sljit_u32 max) { -/* Reads the character into TMP1, updates STR_PTR. +/* Reads the character into TMP1, keeps STR_PTR. Does not check STR_END. TMP2 Destroyed. */ DEFINE_COMPILER; #if defined SUPPORT_UTF && !defined COMPILE_PCRE32 struct sljit_jump *jump; #endif +SLJIT_UNUSED_ARG(max); + OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), 0); -#if defined SUPPORT_UTF && !defined COMPILE_PCRE32 +#if defined SUPPORT_UTF && defined COMPILE_PCRE8 if (common->utf) { -#if defined COMPILE_PCRE8 - jump = CMP(SLJIT_C_LESS, TMP1, 0, SLJIT_IMM, 0xc0); -#elif defined COMPILE_PCRE16 - jump = CMP(SLJIT_C_LESS, TMP1, 0, SLJIT_IMM, 0xd800); -#endif /* COMPILE_PCRE[8|16] */ + if (max < 128) return; + + jump = CMP(SLJIT_LESS, TMP1, 0, SLJIT_IMM, 0xc0); + OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); add_jump(compiler, &common->utfreadchar, JUMP(SLJIT_FAST_CALL)); + OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, TMP2, 0); JUMPHERE(jump); } #endif /* SUPPORT_UTF && !COMPILE_PCRE32 */ -OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); + +#if defined SUPPORT_UTF && defined COMPILE_PCRE16 +if (common->utf) + { + if (max < 0xd800) return; + + OP2(SLJIT_SUB, TMP2, 0, TMP1, 0, SLJIT_IMM, 0xd800); + jump = CMP(SLJIT_GREATER, TMP2, 0, SLJIT_IMM, 0xdc00 - 0xd800 - 1); + /* TMP2 contains the high surrogate. */ + OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(0)); + OP2(SLJIT_ADD, TMP2, 0, TMP2, 0, SLJIT_IMM, 0x40); + OP2(SLJIT_SHL, TMP2, 0, TMP2, 0, SLJIT_IMM, 10); + OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, 0x3ff); + OP2(SLJIT_OR, TMP1, 0, TMP1, 0, TMP2, 0); + JUMPHERE(jump); + } +#endif } -static void peek_char(compiler_common *common) +#if defined SUPPORT_UTF && defined COMPILE_PCRE8 + +static BOOL is_char7_bitset(const sljit_u8 *bitset, BOOL nclass) { -/* Reads the character into TMP1, keeps STR_PTR. -Does not check STR_END. TMP2 Destroyed. */ +/* Tells whether the character codes below 128 are enough +to determine a match. */ +const sljit_u8 value = nclass ? 0xff : 0; +const sljit_u8 *end = bitset + 32; + +bitset += 16; +do + { + if (*bitset++ != value) + return FALSE; + } +while (bitset < end); +return TRUE; +} + +static void read_char7_type(compiler_common *common, BOOL full_read) +{ +/* Reads the precise character type of a character into TMP1, if the character +is less than 128. Otherwise it returns with zero. Does not check STR_END. The +full_read argument tells whether characters above max are accepted or not. */ DEFINE_COMPILER; -#if defined SUPPORT_UTF && !defined COMPILE_PCRE32 struct sljit_jump *jump; -#endif -OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), 0); -#if defined SUPPORT_UTF && !defined COMPILE_PCRE32 -if (common->utf) +SLJIT_ASSERT(common->utf); + +OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(STR_PTR), 0); +OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); + +OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP2), common->ctypes); + +if (full_read) { -#if defined COMPILE_PCRE8 - jump = CMP(SLJIT_C_LESS, TMP1, 0, SLJIT_IMM, 0xc0); -#elif defined COMPILE_PCRE16 - jump = CMP(SLJIT_C_LESS, TMP1, 0, SLJIT_IMM, 0xd800); -#endif /* COMPILE_PCRE[8|16] */ - add_jump(compiler, &common->utfreadchar, JUMP(SLJIT_FAST_CALL)); - OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, TMP2, 0); + jump = CMP(SLJIT_LESS, TMP2, 0, SLJIT_IMM, 0xc0); + OP1(SLJIT_MOV_U8, TMP2, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(utf8_table4) - 0xc0); + OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, TMP2, 0); JUMPHERE(jump); } -#endif /* SUPPORT_UTF && !COMPILE_PCRE32 */ } -static void read_char8_type(compiler_common *common) +#endif /* SUPPORT_UTF && COMPILE_PCRE8 */ + +static void read_char_range(compiler_common *common, sljit_u32 min, sljit_u32 max, BOOL update_str_ptr) { -/* Reads the character type into TMP1, updates STR_PTR. Does not check STR_END. */ +/* Reads the precise value of a character into TMP1, if the character is +between min and max (c >= min && c <= max). Otherwise it returns with a value +outside the range. Does not check STR_END. */ DEFINE_COMPILER; -#if defined SUPPORT_UTF || defined COMPILE_PCRE16 || defined COMPILE_PCRE32 +#if defined SUPPORT_UTF && !defined COMPILE_PCRE32 struct sljit_jump *jump; #endif +#if defined SUPPORT_UTF && defined COMPILE_PCRE8 +struct sljit_jump *jump2; +#endif -#ifdef SUPPORT_UTF +SLJIT_UNUSED_ARG(update_str_ptr); +SLJIT_UNUSED_ARG(min); +SLJIT_UNUSED_ARG(max); +SLJIT_ASSERT(min <= max); + +OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(0)); +OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); + +#if defined SUPPORT_UTF && defined COMPILE_PCRE8 if (common->utf) { - OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(STR_PTR), 0); - OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); -#if defined COMPILE_PCRE8 - /* This can be an extra read in some situations, but hopefully - it is needed in most cases. */ - OP1(SLJIT_MOV_UB, TMP1, 0, SLJIT_MEM1(TMP2), common->ctypes); - jump = CMP(SLJIT_C_LESS, TMP2, 0, SLJIT_IMM, 0xc0); - add_jump(compiler, &common->utfreadtype8, JUMP(SLJIT_FAST_CALL)); - JUMPHERE(jump); -#elif defined COMPILE_PCRE16 - OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, 0); - jump = CMP(SLJIT_C_GREATER, TMP2, 0, SLJIT_IMM, 255); - OP1(SLJIT_MOV_UB, TMP1, 0, SLJIT_MEM1(TMP2), common->ctypes); + if (max < 128 && !update_str_ptr) return; + + jump = CMP(SLJIT_LESS, TMP1, 0, SLJIT_IMM, 0xc0); + if (min >= 0x10000) + { + OP2(SLJIT_SUB, TMP2, 0, TMP1, 0, SLJIT_IMM, 0xf0); + if (update_str_ptr) + OP1(SLJIT_MOV_U8, RETURN_ADDR, 0, SLJIT_MEM1(TMP1), (sljit_sw)PRIV(utf8_table4) - 0xc0); + OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(0)); + jump2 = CMP(SLJIT_GREATER, TMP2, 0, SLJIT_IMM, 0x7); + OP2(SLJIT_SHL, TMP2, 0, TMP2, 0, SLJIT_IMM, 6); + OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, 0x3f); + OP2(SLJIT_OR, TMP1, 0, TMP1, 0, TMP2, 0); + OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); + OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, 6); + OP2(SLJIT_AND, TMP2, 0, TMP2, 0, SLJIT_IMM, 0x3f); + OP2(SLJIT_OR, TMP1, 0, TMP1, 0, TMP2, 0); + OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(2)); + if (!update_str_ptr) + OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(3)); + OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, 6); + OP2(SLJIT_AND, TMP2, 0, TMP2, 0, SLJIT_IMM, 0x3f); + OP2(SLJIT_OR, TMP1, 0, TMP1, 0, TMP2, 0); + JUMPHERE(jump2); + if (update_str_ptr) + OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, RETURN_ADDR, 0); + } + else if (min >= 0x800 && max <= 0xffff) + { + OP2(SLJIT_SUB, TMP2, 0, TMP1, 0, SLJIT_IMM, 0xe0); + if (update_str_ptr) + OP1(SLJIT_MOV_U8, RETURN_ADDR, 0, SLJIT_MEM1(TMP1), (sljit_sw)PRIV(utf8_table4) - 0xc0); + OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(0)); + jump2 = CMP(SLJIT_GREATER, TMP2, 0, SLJIT_IMM, 0xf); + OP2(SLJIT_SHL, TMP2, 0, TMP2, 0, SLJIT_IMM, 6); + OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, 0x3f); + OP2(SLJIT_OR, TMP1, 0, TMP1, 0, TMP2, 0); + OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); + if (!update_str_ptr) + OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(2)); + OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, 6); + OP2(SLJIT_AND, TMP2, 0, TMP2, 0, SLJIT_IMM, 0x3f); + OP2(SLJIT_OR, TMP1, 0, TMP1, 0, TMP2, 0); + JUMPHERE(jump2); + if (update_str_ptr) + OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, RETURN_ADDR, 0); + } + else if (max >= 0x800) + add_jump(compiler, (max < 0x10000) ? &common->utfreadchar16 : &common->utfreadchar, JUMP(SLJIT_FAST_CALL)); + else if (max < 128) + { + OP1(SLJIT_MOV_U8, TMP2, 0, SLJIT_MEM1(TMP1), (sljit_sw)PRIV(utf8_table4) - 0xc0); + OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, TMP2, 0); + } + else + { + OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(0)); + if (!update_str_ptr) + OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); + else + OP1(SLJIT_MOV_U8, RETURN_ADDR, 0, SLJIT_MEM1(TMP1), (sljit_sw)PRIV(utf8_table4) - 0xc0); + OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, 0x3f); + OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, 6); + OP2(SLJIT_AND, TMP2, 0, TMP2, 0, SLJIT_IMM, 0x3f); + OP2(SLJIT_OR, TMP1, 0, TMP1, 0, TMP2, 0); + if (update_str_ptr) + OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, RETURN_ADDR, 0); + } JUMPHERE(jump); + } +#endif + +#if defined SUPPORT_UTF && defined COMPILE_PCRE16 +if (common->utf) + { + if (max >= 0x10000) + { + OP2(SLJIT_SUB, TMP2, 0, TMP1, 0, SLJIT_IMM, 0xd800); + jump = CMP(SLJIT_GREATER, TMP2, 0, SLJIT_IMM, 0xdc00 - 0xd800 - 1); + /* TMP2 contains the high surrogate. */ + OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(0)); + OP2(SLJIT_ADD, TMP2, 0, TMP2, 0, SLJIT_IMM, 0x40); + OP2(SLJIT_SHL, TMP2, 0, TMP2, 0, SLJIT_IMM, 10); + OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); + OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, 0x3ff); + OP2(SLJIT_OR, TMP1, 0, TMP1, 0, TMP2, 0); + JUMPHERE(jump); + return; + } + + if (max < 0xd800 && !update_str_ptr) return; + /* Skip low surrogate if necessary. */ - OP2(SLJIT_AND, TMP2, 0, TMP2, 0, SLJIT_IMM, 0xfc00); - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP2, 0, SLJIT_IMM, 0xd800); - OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_C_EQUAL); - OP2(SLJIT_SHL, TMP2, 0, TMP2, 0, SLJIT_IMM, 1); - OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, TMP2, 0); -#elif defined COMPILE_PCRE32 - OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, 0); - jump = CMP(SLJIT_C_GREATER, TMP2, 0, SLJIT_IMM, 255); - OP1(SLJIT_MOV_UB, TMP1, 0, SLJIT_MEM1(TMP2), common->ctypes); + OP2(SLJIT_SUB, TMP2, 0, TMP1, 0, SLJIT_IMM, 0xd800); + jump = CMP(SLJIT_GREATER, TMP2, 0, SLJIT_IMM, 0xdc00 - 0xd800 - 1); + if (update_str_ptr) + OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); + if (max >= 0xd800) + OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, 0x10000); JUMPHERE(jump); -#endif /* COMPILE_PCRE[8|16|32] */ - return; } -#endif /* SUPPORT_UTF */ +#endif +} + +static SLJIT_INLINE void read_char(compiler_common *common) +{ +read_char_range(common, 0, READ_CHAR_MAX, TRUE); +} + +static void read_char8_type(compiler_common *common, BOOL update_str_ptr) +{ +/* Reads the character type into TMP1, updates STR_PTR. Does not check STR_END. */ +DEFINE_COMPILER; +#if defined SUPPORT_UTF || !defined COMPILE_PCRE8 +struct sljit_jump *jump; +#endif +#if defined SUPPORT_UTF && defined COMPILE_PCRE8 +struct sljit_jump *jump2; +#endif + +SLJIT_UNUSED_ARG(update_str_ptr); + OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(STR_PTR), 0); OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); -#if defined COMPILE_PCRE16 || defined COMPILE_PCRE32 + +#if defined SUPPORT_UTF && defined COMPILE_PCRE8 +if (common->utf) + { + /* This can be an extra read in some situations, but hopefully + it is needed in most cases. */ + OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP2), common->ctypes); + jump = CMP(SLJIT_LESS, TMP2, 0, SLJIT_IMM, 0xc0); + if (!update_str_ptr) + { + OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(0)); + OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); + OP2(SLJIT_AND, TMP2, 0, TMP2, 0, SLJIT_IMM, 0x3f); + OP2(SLJIT_SHL, TMP2, 0, TMP2, 0, SLJIT_IMM, 6); + OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, 0x3f); + OP2(SLJIT_OR, TMP2, 0, TMP2, 0, TMP1, 0); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, 0); + jump2 = CMP(SLJIT_GREATER, TMP2, 0, SLJIT_IMM, 255); + OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP2), common->ctypes); + JUMPHERE(jump2); + } + else + add_jump(compiler, &common->utfreadtype8, JUMP(SLJIT_FAST_CALL)); + JUMPHERE(jump); + return; + } +#endif /* SUPPORT_UTF && COMPILE_PCRE8 */ + +#if !defined COMPILE_PCRE8 /* The ctypes array contains only 256 values. */ OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, 0); -jump = CMP(SLJIT_C_GREATER, TMP2, 0, SLJIT_IMM, 255); +jump = CMP(SLJIT_GREATER, TMP2, 0, SLJIT_IMM, 255); #endif -OP1(SLJIT_MOV_UB, TMP1, 0, SLJIT_MEM1(TMP2), common->ctypes); -#if defined COMPILE_PCRE16 || defined COMPILE_PCRE32 +OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP2), common->ctypes); +#if !defined COMPILE_PCRE8 JUMPHERE(jump); #endif + +#if defined SUPPORT_UTF && defined COMPILE_PCRE16 +if (common->utf && update_str_ptr) + { + /* Skip low surrogate if necessary. */ + OP2(SLJIT_SUB, TMP2, 0, TMP2, 0, SLJIT_IMM, 0xd800); + jump = CMP(SLJIT_GREATER, TMP2, 0, SLJIT_IMM, 0xdc00 - 0xd800 - 1); + OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); + JUMPHERE(jump); + } +#endif /* SUPPORT_UTF && COMPILE_PCRE16 */ } static void skip_char_back(compiler_common *common) @@ -2574,7 +3153,7 @@ if (common->utf) OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), -IN_UCHARS(1)); OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, 0xc0); - CMPTO(SLJIT_C_EQUAL, TMP1, 0, SLJIT_IMM, 0x80, label); + CMPTO(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, 0x80, label); return; } #elif defined COMPILE_PCRE16 @@ -2584,8 +3163,8 @@ if (common->utf) OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); /* Skip low surrogate if necessary. */ OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, 0xfc00); - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0xdc00); - OP_FLAGS(SLJIT_MOV, TMP1, 0, SLJIT_UNUSED, 0, SLJIT_C_EQUAL); + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0xdc00); + OP_FLAGS(SLJIT_MOV, TMP1, 0, SLJIT_EQUAL); OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, 1); OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, TMP1, 0); return; @@ -2595,28 +3174,36 @@ if (common->utf) OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); } -static void check_newlinechar(compiler_common *common, int nltype, jump_list **backtracks, BOOL jumpiftrue) +static void check_newlinechar(compiler_common *common, int nltype, jump_list **backtracks, BOOL jumpifmatch) { /* Character comes in TMP1. Checks if it is a newline. TMP2 may be destroyed. */ DEFINE_COMPILER; +struct sljit_jump *jump; if (nltype == NLTYPE_ANY) { add_jump(compiler, &common->anynewline, JUMP(SLJIT_FAST_CALL)); - add_jump(compiler, backtracks, JUMP(jumpiftrue ? SLJIT_C_NOT_ZERO : SLJIT_C_ZERO)); + sljit_set_current_flags(compiler, SLJIT_SET_Z); + add_jump(compiler, backtracks, JUMP(jumpifmatch ? SLJIT_NOT_ZERO : SLJIT_ZERO)); } else if (nltype == NLTYPE_ANYCRLF) { - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, CHAR_CR); - OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_C_EQUAL); - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, CHAR_NL); - OP_FLAGS(SLJIT_OR | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_C_EQUAL); - add_jump(compiler, backtracks, JUMP(jumpiftrue ? SLJIT_C_NOT_ZERO : SLJIT_C_ZERO)); + if (jumpifmatch) + { + add_jump(compiler, backtracks, CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, CHAR_CR)); + add_jump(compiler, backtracks, CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, CHAR_NL)); + } + else + { + jump = CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, CHAR_CR); + add_jump(compiler, backtracks, CMP(SLJIT_NOT_EQUAL, TMP1, 0, SLJIT_IMM, CHAR_NL)); + JUMPHERE(jump); + } } else { SLJIT_ASSERT(nltype == NLTYPE_FIXED && common->newline < 256); - add_jump(compiler, backtracks, CMP(jumpiftrue ? SLJIT_C_EQUAL : SLJIT_C_NOT_EQUAL, TMP1, 0, SLJIT_IMM, common->newline)); + add_jump(compiler, backtracks, CMP(jumpifmatch ? SLJIT_EQUAL : SLJIT_NOT_EQUAL, TMP1, 0, SLJIT_IMM, common->newline)); } } @@ -2626,58 +3213,84 @@ else static void do_utfreadchar(compiler_common *common) { /* Fast decoding a UTF-8 character. TMP1 contains the first byte -of the character (>= 0xc0). Return char value in TMP1, length - 1 in TMP2. */ +of the character (>= 0xc0). Return char value in TMP1, length in TMP2. */ DEFINE_COMPILER; struct sljit_jump *jump; sljit_emit_fast_enter(compiler, RETURN_ADDR, 0); -/* Searching for the first zero. */ -OP2(SLJIT_AND | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x20); -jump = JUMP(SLJIT_C_NOT_ZERO); -/* Two byte sequence. */ -OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); -OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); -OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, 0x1f); +OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(0)); +OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, 0x3f); OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, 6); OP2(SLJIT_AND, TMP2, 0, TMP2, 0, SLJIT_IMM, 0x3f); OP2(SLJIT_OR, TMP1, 0, TMP1, 0, TMP2, 0); -OP1(SLJIT_MOV, TMP2, 0, SLJIT_IMM, IN_UCHARS(1)); + +/* Searching for the first zero. */ +OP2(SLJIT_AND | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x800); +jump = JUMP(SLJIT_NOT_ZERO); +/* Two byte sequence. */ +OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); +OP1(SLJIT_MOV, TMP2, 0, SLJIT_IMM, IN_UCHARS(2)); sljit_emit_fast_return(compiler, RETURN_ADDR, 0); -JUMPHERE(jump); -OP2(SLJIT_AND | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x10); -jump = JUMP(SLJIT_C_NOT_ZERO); -/* Three byte sequence. */ +JUMPHERE(jump); OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); -OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, 0x0f); -OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, 12); +OP2(SLJIT_XOR, TMP1, 0, TMP1, 0, SLJIT_IMM, 0x800); +OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, 6); OP2(SLJIT_AND, TMP2, 0, TMP2, 0, SLJIT_IMM, 0x3f); -OP2(SLJIT_SHL, TMP2, 0, TMP2, 0, SLJIT_IMM, 6); OP2(SLJIT_OR, TMP1, 0, TMP1, 0, TMP2, 0); -OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(2)); + +OP2(SLJIT_AND | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x10000); +jump = JUMP(SLJIT_NOT_ZERO); +/* Three byte sequence. */ OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(2)); -OP2(SLJIT_AND, TMP2, 0, TMP2, 0, SLJIT_IMM, 0x3f); -OP2(SLJIT_OR, TMP1, 0, TMP1, 0, TMP2, 0); -OP1(SLJIT_MOV, TMP2, 0, SLJIT_IMM, IN_UCHARS(2)); +OP1(SLJIT_MOV, TMP2, 0, SLJIT_IMM, IN_UCHARS(3)); sljit_emit_fast_return(compiler, RETURN_ADDR, 0); -JUMPHERE(jump); /* Four byte sequence. */ -OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); -OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, 0x07); -OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, 18); +JUMPHERE(jump); +OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(2)); +OP2(SLJIT_XOR, TMP1, 0, TMP1, 0, SLJIT_IMM, 0x10000); +OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, 6); +OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(3)); OP2(SLJIT_AND, TMP2, 0, TMP2, 0, SLJIT_IMM, 0x3f); -OP2(SLJIT_SHL, TMP2, 0, TMP2, 0, SLJIT_IMM, 12); OP2(SLJIT_OR, TMP1, 0, TMP1, 0, TMP2, 0); -OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(2)); +OP1(SLJIT_MOV, TMP2, 0, SLJIT_IMM, IN_UCHARS(4)); +sljit_emit_fast_return(compiler, RETURN_ADDR, 0); +} + +static void do_utfreadchar16(compiler_common *common) +{ +/* Fast decoding a UTF-8 character. TMP1 contains the first byte +of the character (>= 0xc0). Return value in TMP1. */ +DEFINE_COMPILER; +struct sljit_jump *jump; + +sljit_emit_fast_enter(compiler, RETURN_ADDR, 0); +OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(0)); +OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, 0x3f); +OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, 6); OP2(SLJIT_AND, TMP2, 0, TMP2, 0, SLJIT_IMM, 0x3f); -OP2(SLJIT_SHL, TMP2, 0, TMP2, 0, SLJIT_IMM, 6); OP2(SLJIT_OR, TMP1, 0, TMP1, 0, TMP2, 0); -OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(3)); -OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(3)); + +/* Searching for the first zero. */ +OP2(SLJIT_AND | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x800); +jump = JUMP(SLJIT_NOT_ZERO); +/* Two byte sequence. */ +OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); +sljit_emit_fast_return(compiler, RETURN_ADDR, 0); + +JUMPHERE(jump); +OP2(SLJIT_AND | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x400); +OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_NOT_ZERO); +/* This code runs only in 8 bit mode. No need to shift the value. */ +OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, TMP2, 0); +OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); +OP2(SLJIT_XOR, TMP1, 0, TMP1, 0, SLJIT_IMM, 0x800); +OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, 6); OP2(SLJIT_AND, TMP2, 0, TMP2, 0, SLJIT_IMM, 0x3f); OP2(SLJIT_OR, TMP1, 0, TMP1, 0, TMP2, 0); -OP1(SLJIT_MOV, TMP2, 0, SLJIT_IMM, IN_UCHARS(3)); +/* Three byte sequence. */ +OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(2)); sljit_emit_fast_return(compiler, RETURN_ADDR, 0); } @@ -2691,59 +3304,33 @@ struct sljit_jump *compare; sljit_emit_fast_enter(compiler, RETURN_ADDR, 0); -OP2(SLJIT_AND | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP2, 0, SLJIT_IMM, 0x20); -jump = JUMP(SLJIT_C_NOT_ZERO); +OP2(SLJIT_AND | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP2, 0, SLJIT_IMM, 0x20); +jump = JUMP(SLJIT_NOT_ZERO); /* Two byte sequence. */ OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(0)); OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); OP2(SLJIT_AND, TMP2, 0, TMP2, 0, SLJIT_IMM, 0x1f); +/* The upper 5 bits are known at this point. */ +compare = CMP(SLJIT_GREATER, TMP2, 0, SLJIT_IMM, 0x3); OP2(SLJIT_SHL, TMP2, 0, TMP2, 0, SLJIT_IMM, 6); OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, 0x3f); OP2(SLJIT_OR, TMP2, 0, TMP2, 0, TMP1, 0); -compare = CMP(SLJIT_C_GREATER, TMP2, 0, SLJIT_IMM, 255); -OP1(SLJIT_MOV_UB, TMP1, 0, SLJIT_MEM1(TMP2), common->ctypes); +OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP2), common->ctypes); sljit_emit_fast_return(compiler, RETURN_ADDR, 0); JUMPHERE(compare); OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, 0); sljit_emit_fast_return(compiler, RETURN_ADDR, 0); -JUMPHERE(jump); /* We only have types for characters less than 256. */ -OP1(SLJIT_MOV_UB, TMP1, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(utf8_table4) - 0xc0); -OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, TMP1, 0); -OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, 0); -sljit_emit_fast_return(compiler, RETURN_ADDR, 0); -} - -#elif defined COMPILE_PCRE16 - -static void do_utfreadchar(compiler_common *common) -{ -/* Fast decoding a UTF-16 character. TMP1 contains the first 16 bit char -of the character (>= 0xd800). Return char value in TMP1, length - 1 in TMP2. */ -DEFINE_COMPILER; -struct sljit_jump *jump; - -sljit_emit_fast_enter(compiler, RETURN_ADDR, 0); -jump = CMP(SLJIT_C_LESS, TMP1, 0, SLJIT_IMM, 0xdc00); -/* Do nothing, only return. */ -sljit_emit_fast_return(compiler, RETURN_ADDR, 0); - JUMPHERE(jump); -/* Combine two 16 bit characters. */ -OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); -OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); -OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, 0x3ff); -OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, 10); -OP2(SLJIT_AND, TMP2, 0, TMP2, 0, SLJIT_IMM, 0x3ff); -OP2(SLJIT_OR, TMP1, 0, TMP1, 0, TMP2, 0); -OP1(SLJIT_MOV, TMP2, 0, SLJIT_IMM, IN_UCHARS(1)); -OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, SLJIT_IMM, 0x10000); +OP1(SLJIT_MOV_U8, TMP2, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(utf8_table4) - 0xc0); +OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, 0); +OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, TMP2, 0); sljit_emit_fast_return(compiler, RETURN_ADDR, 0); } -#endif /* COMPILE_PCRE[8|16] */ +#endif /* COMPILE_PCRE8 */ #endif /* SUPPORT_UTF */ @@ -2758,31 +3345,51 @@ static void do_getucd(compiler_common *common) /* Search the UCD record for the character comes in TMP1. Returns chartype in TMP1 and UCD offset in TMP2. */ DEFINE_COMPILER; +#ifdef COMPILE_PCRE32 +struct sljit_jump *jump; +#endif + +#if defined SLJIT_DEBUG && SLJIT_DEBUG +/* dummy_ucd_record */ +const ucd_record *record = GET_UCD(INVALID_UTF_CHAR); +SLJIT_ASSERT(record->script == ucp_Common && record->chartype == ucp_Cn && record->gbprop == ucp_gbOther); +SLJIT_ASSERT(record->caseset == 0 && record->other_case == 0); +#endif SLJIT_ASSERT(UCD_BLOCK_SIZE == 128 && sizeof(ucd_record) == 8); sljit_emit_fast_enter(compiler, RETURN_ADDR, 0); + +#ifdef COMPILE_PCRE32 +if (!common->utf) + { + jump = CMP(SLJIT_LESS, TMP1, 0, SLJIT_IMM, 0x10ffff + 1); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, INVALID_UTF_CHAR); + JUMPHERE(jump); + } +#endif + OP2(SLJIT_LSHR, TMP2, 0, TMP1, 0, SLJIT_IMM, UCD_BLOCK_SHIFT); -OP1(SLJIT_MOV_UB, TMP2, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_stage1)); +OP1(SLJIT_MOV_U8, TMP2, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_stage1)); OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, UCD_BLOCK_MASK); OP2(SLJIT_SHL, TMP2, 0, TMP2, 0, SLJIT_IMM, UCD_BLOCK_SHIFT); OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, TMP2, 0); OP1(SLJIT_MOV, TMP2, 0, SLJIT_IMM, (sljit_sw)PRIV(ucd_stage2)); -OP1(SLJIT_MOV_UH, TMP2, 0, SLJIT_MEM2(TMP2, TMP1), 1); +OP1(SLJIT_MOV_U16, TMP2, 0, SLJIT_MEM2(TMP2, TMP1), 1); OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, chartype)); -OP1(SLJIT_MOV_UB, TMP1, 0, SLJIT_MEM2(TMP1, TMP2), 3); +OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM2(TMP1, TMP2), 3); sljit_emit_fast_return(compiler, RETURN_ADDR, 0); } #endif -static SLJIT_INLINE struct sljit_label *mainloop_entry(compiler_common *common, BOOL hascrorlf, BOOL firstline) +static SLJIT_INLINE struct sljit_label *mainloop_entry(compiler_common *common, BOOL hascrorlf) { DEFINE_COMPILER; struct sljit_label *mainloop; struct sljit_label *newlinelabel = NULL; struct sljit_jump *start; struct sljit_jump *end = NULL; -struct sljit_jump *nl = NULL; +struct sljit_jump *end2 = NULL; #if defined SUPPORT_UTF && !defined COMPILE_PCRE32 struct sljit_jump *singlechar; #endif @@ -2790,39 +3397,38 @@ jump_list *newline = NULL; BOOL newlinecheck = FALSE; BOOL readuchar = FALSE; -if (!(hascrorlf || firstline) && (common->nltype == NLTYPE_ANY || - common->nltype == NLTYPE_ANYCRLF || common->newline > 255)) +if (!(hascrorlf || (common->match_end_ptr != 0)) && + (common->nltype == NLTYPE_ANY || common->nltype == NLTYPE_ANYCRLF || common->newline > 255)) newlinecheck = TRUE; -if (firstline) +if (common->match_end_ptr != 0) { /* Search for the end of the first line. */ - SLJIT_ASSERT(common->first_line_end != 0); OP1(SLJIT_MOV, TMP3, 0, STR_PTR, 0); if (common->nltype == NLTYPE_FIXED && common->newline > 255) { mainloop = LABEL(); OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); - end = CMP(SLJIT_C_GREATER_EQUAL, STR_PTR, 0, STR_END, 0); + end = CMP(SLJIT_GREATER_EQUAL, STR_PTR, 0, STR_END, 0); OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(-1)); OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(0)); - CMPTO(SLJIT_C_NOT_EQUAL, TMP1, 0, SLJIT_IMM, (common->newline >> 8) & 0xff, mainloop); - CMPTO(SLJIT_C_NOT_EQUAL, TMP2, 0, SLJIT_IMM, common->newline & 0xff, mainloop); + CMPTO(SLJIT_NOT_EQUAL, TMP1, 0, SLJIT_IMM, (common->newline >> 8) & 0xff, mainloop); + CMPTO(SLJIT_NOT_EQUAL, TMP2, 0, SLJIT_IMM, common->newline & 0xff, mainloop); JUMPHERE(end); - OP2(SLJIT_SUB, SLJIT_MEM1(SLJIT_LOCALS_REG), common->first_line_end, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); + OP2(SLJIT_SUB, SLJIT_MEM1(SLJIT_SP), common->match_end_ptr, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); } else { - end = CMP(SLJIT_C_GREATER_EQUAL, STR_PTR, 0, STR_END, 0); + end = CMP(SLJIT_GREATER_EQUAL, STR_PTR, 0, STR_END, 0); mainloop = LABEL(); /* Continual stores does not cause data dependency. */ - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->first_line_end, STR_PTR, 0); - read_char(common); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->match_end_ptr, STR_PTR, 0); + read_char_range(common, common->nlmin, common->nlmax, TRUE); check_newlinechar(common, common->nltype, &newline, TRUE); - CMPTO(SLJIT_C_LESS, STR_PTR, 0, STR_END, 0, mainloop); + CMPTO(SLJIT_LESS, STR_PTR, 0, STR_END, 0, mainloop); JUMPHERE(end); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->first_line_end, STR_PTR, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->match_end_ptr, STR_PTR, 0); set_jumps(newline, LABEL()); } @@ -2835,15 +3441,15 @@ if (newlinecheck) { newlinelabel = LABEL(); OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); - end = CMP(SLJIT_C_GREATER_EQUAL, STR_PTR, 0, STR_END, 0); + end = CMP(SLJIT_GREATER_EQUAL, STR_PTR, 0, STR_END, 0); OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), 0); - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, common->newline & 0xff); - OP_FLAGS(SLJIT_MOV, TMP1, 0, SLJIT_UNUSED, 0, SLJIT_C_EQUAL); + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, common->newline & 0xff); + OP_FLAGS(SLJIT_MOV, TMP1, 0, SLJIT_EQUAL); #if defined COMPILE_PCRE16 || defined COMPILE_PCRE32 OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, UCHAR_SHIFT); #endif OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, TMP1, 0); - nl = JUMP(SLJIT_JUMP); + end2 = JUMP(SLJIT_JUMP); } mainloop = LABEL(); @@ -2858,25 +3464,25 @@ if (readuchar) OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), 0); if (newlinecheck) - CMPTO(SLJIT_C_EQUAL, TMP1, 0, SLJIT_IMM, (common->newline >> 8) & 0xff, newlinelabel); + CMPTO(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, (common->newline >> 8) & 0xff, newlinelabel); OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); #if defined SUPPORT_UTF && !defined COMPILE_PCRE32 #if defined COMPILE_PCRE8 if (common->utf) { - singlechar = CMP(SLJIT_C_LESS, TMP1, 0, SLJIT_IMM, 0xc0); - OP1(SLJIT_MOV_UB, TMP1, 0, SLJIT_MEM1(TMP1), (sljit_sw)PRIV(utf8_table4) - 0xc0); + singlechar = CMP(SLJIT_LESS, TMP1, 0, SLJIT_IMM, 0xc0); + OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP1), (sljit_sw)PRIV(utf8_table4) - 0xc0); OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, TMP1, 0); JUMPHERE(singlechar); } #elif defined COMPILE_PCRE16 if (common->utf) { - singlechar = CMP(SLJIT_C_LESS, TMP1, 0, SLJIT_IMM, 0xd800); + singlechar = CMP(SLJIT_LESS, TMP1, 0, SLJIT_IMM, 0xd800); OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, 0xfc00); - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0xd800); - OP_FLAGS(SLJIT_MOV, TMP1, 0, SLJIT_UNUSED, 0, SLJIT_C_EQUAL); + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0xd800); + OP_FLAGS(SLJIT_MOV, TMP1, 0, SLJIT_EQUAL); OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, 1); OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, TMP1, 0); JUMPHERE(singlechar); @@ -2888,43 +3494,79 @@ JUMPHERE(start); if (newlinecheck) { JUMPHERE(end); - JUMPHERE(nl); + JUMPHERE(end2); } return mainloop; } -#define MAX_N_CHARS 3 +#define MAX_N_CHARS 16 +#define MAX_DIFF_CHARS 6 -static SLJIT_INLINE BOOL fast_forward_first_n_chars(compiler_common *common, BOOL firstline) +static SLJIT_INLINE void add_prefix_char(pcre_uchar chr, pcre_uchar *chars) { -DEFINE_COMPILER; -struct sljit_label *start; -struct sljit_jump *quit; -pcre_uint32 chars[MAX_N_CHARS * 2]; -pcre_uchar *cc = common->start + 1 + LINK_SIZE; -int location = 0; -pcre_int32 len, c, bit, caseless; -int must_stop; - -/* We do not support alternatives now. */ -if (*(common->start + GET(common->start, 1)) == OP_ALT) - return FALSE; +pcre_uchar i, len; + +len = chars[0]; +if (len == 255) + return; + +if (len == 0) + { + chars[0] = 1; + chars[1] = chr; + return; + } + +for (i = len; i > 0; i--) + if (chars[i] == chr) + return; + +if (len >= MAX_DIFF_CHARS - 1) + { + chars[0] = 255; + return; + } + +len++; +chars[len] = chr; +chars[0] = len; +} + +static int scan_prefix(compiler_common *common, pcre_uchar *cc, pcre_uchar *chars, int max_chars, sljit_u32 *rec_count) +{ +/* Recursive function, which scans prefix literals. */ +BOOL last, any, class, caseless; +int len, repeat, len_save, consumed = 0; +sljit_u32 chr; /* Any unicode character. */ +sljit_u8 *bytes, *bytes_end, byte; +pcre_uchar *alternative, *cc_save, *oc; +#if defined SUPPORT_UTF && defined COMPILE_PCRE8 +pcre_uchar othercase[8]; +#elif defined SUPPORT_UTF && defined COMPILE_PCRE16 +pcre_uchar othercase[2]; +#else +pcre_uchar othercase[1]; +#endif +repeat = 1; while (TRUE) { - caseless = 0; - must_stop = 1; - switch(*cc) - { - case OP_CHAR: - must_stop = 0; - cc++; - break; + if (*rec_count == 0) + return 0; + (*rec_count)--; + + last = TRUE; + any = FALSE; + class = FALSE; + caseless = FALSE; + switch (*cc) + { case OP_CHARI: - caseless = 1; - must_stop = 0; + caseless = TRUE; + case OP_CHAR: + last = FALSE; cc++; break; @@ -2943,184 +3585,1037 @@ while (TRUE) cc++; continue; + case OP_ASSERT: + case OP_ASSERT_NOT: + case OP_ASSERTBACK: + case OP_ASSERTBACK_NOT: + cc = bracketend(cc); + continue; + + case OP_PLUSI: + case OP_MINPLUSI: + case OP_POSPLUSI: + caseless = TRUE; case OP_PLUS: case OP_MINPLUS: case OP_POSPLUS: cc++; break; + case OP_EXACTI: + caseless = TRUE; case OP_EXACT: + repeat = GET2(cc, 1); + last = FALSE; cc += 1 + IMM2_SIZE; break; - case OP_PLUSI: - case OP_MINPLUSI: - case OP_POSPLUSI: - caseless = 1; + case OP_QUERYI: + case OP_MINQUERYI: + case OP_POSQUERYI: + caseless = TRUE; + case OP_QUERY: + case OP_MINQUERY: + case OP_POSQUERY: + len = 1; cc++; +#ifdef SUPPORT_UTF + if (common->utf && HAS_EXTRALEN(*cc)) len += GET_EXTRALEN(*cc); +#endif + max_chars = scan_prefix(common, cc + len, chars, max_chars, rec_count); + if (max_chars == 0) + return consumed; + last = FALSE; break; - case OP_EXACTI: - caseless = 1; - cc += 1 + IMM2_SIZE; - break; + case OP_KET: + cc += 1 + LINK_SIZE; + continue; - default: - must_stop = 2; - break; - } + case OP_ALT: + cc += GET(cc, 1); + continue; - if (must_stop == 2) - break; + case OP_ONCE: + case OP_ONCE_NC: + case OP_BRA: + case OP_BRAPOS: + case OP_CBRA: + case OP_CBRAPOS: + alternative = cc + GET(cc, 1); + while (*alternative == OP_ALT) + { + max_chars = scan_prefix(common, alternative + 1 + LINK_SIZE, chars, max_chars, rec_count); + if (max_chars == 0) + return consumed; + alternative += GET(alternative, 1); + } - len = 1; -#ifdef SUPPORT_UTF - if (common->utf && HAS_EXTRALEN(cc[0])) len += GET_EXTRALEN(cc[0]); + if (*cc == OP_CBRA || *cc == OP_CBRAPOS) + cc += IMM2_SIZE; + cc += 1 + LINK_SIZE; + continue; + + case OP_CLASS: +#if defined SUPPORT_UTF && defined COMPILE_PCRE8 + if (common->utf && !is_char7_bitset((const sljit_u8 *)(cc + 1), FALSE)) + return consumed; #endif + class = TRUE; + break; - if (caseless && char_has_othercase(common, cc)) - { - caseless = char_get_othercase_bit(common, cc); - if (caseless == 0) - return FALSE; -#ifdef COMPILE_PCRE8 - caseless = ((caseless & 0xff) << 8) | (len - (caseless >> 8)); + case OP_NCLASS: +#if defined SUPPORT_UTF && !defined COMPILE_PCRE32 + if (common->utf) return consumed; +#endif + class = TRUE; + break; + +#if defined SUPPORT_UTF || !defined COMPILE_PCRE8 + case OP_XCLASS: +#if defined SUPPORT_UTF && !defined COMPILE_PCRE32 + if (common->utf) return consumed; +#endif + any = TRUE; + cc += GET(cc, 1); + break; +#endif + + case OP_DIGIT: +#if defined SUPPORT_UTF && defined COMPILE_PCRE8 + if (common->utf && !is_char7_bitset((const sljit_u8 *)common->ctypes - cbit_length + cbit_digit, FALSE)) + return consumed; +#endif + any = TRUE; + cc++; + break; + + case OP_WHITESPACE: +#if defined SUPPORT_UTF && defined COMPILE_PCRE8 + if (common->utf && !is_char7_bitset((const sljit_u8 *)common->ctypes - cbit_length + cbit_space, FALSE)) + return consumed; +#endif + any = TRUE; + cc++; + break; + + case OP_WORDCHAR: +#if defined SUPPORT_UTF && defined COMPILE_PCRE8 + if (common->utf && !is_char7_bitset((const sljit_u8 *)common->ctypes - cbit_length + cbit_word, FALSE)) + return consumed; +#endif + any = TRUE; + cc++; + break; + + case OP_NOT: + case OP_NOTI: + cc++; + /* Fall through. */ + case OP_NOT_DIGIT: + case OP_NOT_WHITESPACE: + case OP_NOT_WORDCHAR: + case OP_ANY: + case OP_ALLANY: +#if defined SUPPORT_UTF && !defined COMPILE_PCRE32 + if (common->utf) return consumed; +#endif + any = TRUE; + cc++; + break; + +#ifdef SUPPORT_UTF + case OP_NOTPROP: + case OP_PROP: +#ifndef COMPILE_PCRE32 + if (common->utf) return consumed; +#endif + any = TRUE; + cc += 1 + 2; + break; +#endif + + case OP_TYPEEXACT: + repeat = GET2(cc, 1); + cc += 1 + IMM2_SIZE; + continue; + + case OP_NOTEXACT: + case OP_NOTEXACTI: +#if defined SUPPORT_UTF && !defined COMPILE_PCRE32 + if (common->utf) return consumed; +#endif + any = TRUE; + repeat = GET2(cc, 1); + cc += 1 + IMM2_SIZE + 1; + break; + + default: + return consumed; + } + + if (any) + { + do + { + chars[0] = 255; + + consumed++; + if (--max_chars == 0) + return consumed; + chars += MAX_DIFF_CHARS; + } + while (--repeat > 0); + + repeat = 1; + continue; + } + + if (class) + { + bytes = (sljit_u8*) (cc + 1); + cc += 1 + 32 / sizeof(pcre_uchar); + + switch (*cc) + { + case OP_CRSTAR: + case OP_CRMINSTAR: + case OP_CRPOSSTAR: + case OP_CRQUERY: + case OP_CRMINQUERY: + case OP_CRPOSQUERY: + max_chars = scan_prefix(common, cc + 1, chars, max_chars, rec_count); + if (max_chars == 0) + return consumed; + break; + + default: + case OP_CRPLUS: + case OP_CRMINPLUS: + case OP_CRPOSPLUS: + break; + + case OP_CRRANGE: + case OP_CRMINRANGE: + case OP_CRPOSRANGE: + repeat = GET2(cc, 1); + if (repeat <= 0) + return consumed; + break; + } + + do + { + if (bytes[31] & 0x80) + chars[0] = 255; + else if (chars[0] != 255) + { + bytes_end = bytes + 32; + chr = 0; + do + { + byte = *bytes++; + SLJIT_ASSERT((chr & 0x7) == 0); + if (byte == 0) + chr += 8; + else + { + do + { + if ((byte & 0x1) != 0) + add_prefix_char(chr, chars); + byte >>= 1; + chr++; + } + while (byte != 0); + chr = (chr + 7) & ~7; + } + } + while (chars[0] != 255 && bytes < bytes_end); + bytes = bytes_end - 32; + } + + consumed++; + if (--max_chars == 0) + return consumed; + chars += MAX_DIFF_CHARS; + } + while (--repeat > 0); + + switch (*cc) + { + case OP_CRSTAR: + case OP_CRMINSTAR: + case OP_CRPOSSTAR: + return consumed; + + case OP_CRQUERY: + case OP_CRMINQUERY: + case OP_CRPOSQUERY: + cc++; + break; + + case OP_CRRANGE: + case OP_CRMINRANGE: + case OP_CRPOSRANGE: + if (GET2(cc, 1) != GET2(cc, 1 + IMM2_SIZE)) + return consumed; + cc += 1 + 2 * IMM2_SIZE; + break; + } + + repeat = 1; + continue; + } + + len = 1; +#ifdef SUPPORT_UTF + if (common->utf && HAS_EXTRALEN(*cc)) len += GET_EXTRALEN(*cc); +#endif + + if (caseless && char_has_othercase(common, cc)) + { +#ifdef SUPPORT_UTF + if (common->utf) + { + GETCHAR(chr, cc); + if ((int)PRIV(ord2utf)(char_othercase(common, chr), othercase) != len) + return consumed; + } + else +#endif + { + chr = *cc; + othercase[0] = TABLE_GET(chr, common->fcc, chr); + } + } + else + { + caseless = FALSE; + othercase[0] = 0; /* Stops compiler warning - PH */ + } + + len_save = len; + cc_save = cc; + while (TRUE) + { + oc = othercase; + do + { + chr = *cc; + add_prefix_char(*cc, chars); + + if (caseless) + add_prefix_char(*oc, chars); + + len--; + consumed++; + if (--max_chars == 0) + return consumed; + chars += MAX_DIFF_CHARS; + cc++; + oc++; + } + while (len > 0); + + if (--repeat == 0) + break; + + len = len_save; + cc = cc_save; + } + + repeat = 1; + if (last) + return consumed; + } +} + +#if (defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86) && !(defined SUPPORT_VALGRIND) + +static sljit_s32 character_to_int32(pcre_uchar chr) +{ +sljit_s32 value = (sljit_s32)chr; +#if defined COMPILE_PCRE8 +#define SSE2_COMPARE_TYPE_INDEX 0 +return ((unsigned int)value << 24) | ((unsigned int)value << 16) | ((unsigned int)value << 8) | (unsigned int)value; +#elif defined COMPILE_PCRE16 +#define SSE2_COMPARE_TYPE_INDEX 1 +return ((unsigned int)value << 16) | value; +#elif defined COMPILE_PCRE32 +#define SSE2_COMPARE_TYPE_INDEX 2 +return value; +#else +#error "Unsupported unit width" +#endif +} + +static SLJIT_INLINE void fast_forward_first_char2_sse2(compiler_common *common, pcre_uchar char1, pcre_uchar char2) +{ +DEFINE_COMPILER; +struct sljit_label *start; +struct sljit_jump *quit[3]; +struct sljit_jump *nomatch; +sljit_u8 instruction[8]; +sljit_s32 tmp1_ind = sljit_get_register_index(TMP1); +sljit_s32 tmp2_ind = sljit_get_register_index(TMP2); +sljit_s32 str_ptr_ind = sljit_get_register_index(STR_PTR); +BOOL load_twice = FALSE; +pcre_uchar bit; + +bit = char1 ^ char2; +if (!is_powerof2(bit)) + bit = 0; + +if ((char1 != char2) && bit == 0) + load_twice = TRUE; + +quit[0] = CMP(SLJIT_GREATER_EQUAL, STR_PTR, 0, STR_END, 0); + +/* First part (unaligned start) */ + +OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, character_to_int32(char1 | bit)); + +SLJIT_ASSERT(tmp1_ind < 8 && tmp2_ind == 1); + +/* MOVD xmm, r/m32 */ +instruction[0] = 0x66; +instruction[1] = 0x0f; +instruction[2] = 0x6e; +instruction[3] = 0xc0 | (2 << 3) | tmp1_ind; +sljit_emit_op_custom(compiler, instruction, 4); + +if (char1 != char2) + { + OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, character_to_int32(bit != 0 ? bit : char2)); + + /* MOVD xmm, r/m32 */ + instruction[3] = 0xc0 | (3 << 3) | tmp1_ind; + sljit_emit_op_custom(compiler, instruction, 4); + } + +/* PSHUFD xmm1, xmm2/m128, imm8 */ +instruction[2] = 0x70; +instruction[3] = 0xc0 | (2 << 3) | 2; +instruction[4] = 0; +sljit_emit_op_custom(compiler, instruction, 5); + +if (char1 != char2) + { + /* PSHUFD xmm1, xmm2/m128, imm8 */ + instruction[3] = 0xc0 | (3 << 3) | 3; + instruction[4] = 0; + sljit_emit_op_custom(compiler, instruction, 5); + } + +OP2(SLJIT_AND, TMP2, 0, STR_PTR, 0, SLJIT_IMM, 0xf); +OP2(SLJIT_AND, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, ~0xf); + +/* MOVDQA xmm1, xmm2/m128 */ +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + +if (str_ptr_ind < 8) + { + instruction[2] = 0x6f; + instruction[3] = (0 << 3) | str_ptr_ind; + sljit_emit_op_custom(compiler, instruction, 4); + + if (load_twice) + { + instruction[3] = (1 << 3) | str_ptr_ind; + sljit_emit_op_custom(compiler, instruction, 4); + } + } +else + { + instruction[1] = 0x41; + instruction[2] = 0x0f; + instruction[3] = 0x6f; + instruction[4] = (0 << 3) | (str_ptr_ind & 0x7); + sljit_emit_op_custom(compiler, instruction, 5); + + if (load_twice) + { + instruction[4] = (1 << 3) | str_ptr_ind; + sljit_emit_op_custom(compiler, instruction, 5); + } + instruction[1] = 0x0f; + } + +#else + +instruction[2] = 0x6f; +instruction[3] = (0 << 3) | str_ptr_ind; +sljit_emit_op_custom(compiler, instruction, 4); + +if (load_twice) + { + instruction[3] = (1 << 3) | str_ptr_ind; + sljit_emit_op_custom(compiler, instruction, 4); + } + +#endif + +if (bit != 0) + { + /* POR xmm1, xmm2/m128 */ + instruction[2] = 0xeb; + instruction[3] = 0xc0 | (0 << 3) | 3; + sljit_emit_op_custom(compiler, instruction, 4); + } + +/* PCMPEQB/W/D xmm1, xmm2/m128 */ +instruction[2] = 0x74 + SSE2_COMPARE_TYPE_INDEX; +instruction[3] = 0xc0 | (0 << 3) | 2; +sljit_emit_op_custom(compiler, instruction, 4); + +if (load_twice) + { + instruction[3] = 0xc0 | (1 << 3) | 3; + sljit_emit_op_custom(compiler, instruction, 4); + } + +/* PMOVMSKB reg, xmm */ +instruction[2] = 0xd7; +instruction[3] = 0xc0 | (tmp1_ind << 3) | 0; +sljit_emit_op_custom(compiler, instruction, 4); + +if (load_twice) + { + OP1(SLJIT_MOV, RETURN_ADDR, 0, TMP2, 0); + instruction[3] = 0xc0 | (tmp2_ind << 3) | 1; + sljit_emit_op_custom(compiler, instruction, 4); + + OP2(SLJIT_OR, TMP1, 0, TMP1, 0, TMP2, 0); + OP1(SLJIT_MOV, TMP2, 0, RETURN_ADDR, 0); + } + +OP2(SLJIT_ASHR, TMP1, 0, TMP1, 0, TMP2, 0); + +/* BSF r32, r/m32 */ +instruction[0] = 0x0f; +instruction[1] = 0xbc; +instruction[2] = 0xc0 | (tmp1_ind << 3) | tmp1_ind; +sljit_emit_op_custom(compiler, instruction, 3); +sljit_set_current_flags(compiler, SLJIT_SET_Z); + +nomatch = JUMP(SLJIT_ZERO); + +OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, TMP2, 0); +OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, TMP1, 0); +quit[1] = JUMP(SLJIT_JUMP); + +JUMPHERE(nomatch); + +start = LABEL(); +OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, 16); +quit[2] = CMP(SLJIT_GREATER_EQUAL, STR_PTR, 0, STR_END, 0); + +/* Second part (aligned) */ + +instruction[0] = 0x66; +instruction[1] = 0x0f; + +/* MOVDQA xmm1, xmm2/m128 */ +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + +if (str_ptr_ind < 8) + { + instruction[2] = 0x6f; + instruction[3] = (0 << 3) | str_ptr_ind; + sljit_emit_op_custom(compiler, instruction, 4); + + if (load_twice) + { + instruction[3] = (1 << 3) | str_ptr_ind; + sljit_emit_op_custom(compiler, instruction, 4); + } + } +else + { + instruction[1] = 0x41; + instruction[2] = 0x0f; + instruction[3] = 0x6f; + instruction[4] = (0 << 3) | (str_ptr_ind & 0x7); + sljit_emit_op_custom(compiler, instruction, 5); + + if (load_twice) + { + instruction[4] = (1 << 3) | str_ptr_ind; + sljit_emit_op_custom(compiler, instruction, 5); + } + instruction[1] = 0x0f; + } + +#else + +instruction[2] = 0x6f; +instruction[3] = (0 << 3) | str_ptr_ind; +sljit_emit_op_custom(compiler, instruction, 4); + +if (load_twice) + { + instruction[3] = (1 << 3) | str_ptr_ind; + sljit_emit_op_custom(compiler, instruction, 4); + } + +#endif + +if (bit != 0) + { + /* POR xmm1, xmm2/m128 */ + instruction[2] = 0xeb; + instruction[3] = 0xc0 | (0 << 3) | 3; + sljit_emit_op_custom(compiler, instruction, 4); + } + +/* PCMPEQB/W/D xmm1, xmm2/m128 */ +instruction[2] = 0x74 + SSE2_COMPARE_TYPE_INDEX; +instruction[3] = 0xc0 | (0 << 3) | 2; +sljit_emit_op_custom(compiler, instruction, 4); + +if (load_twice) + { + instruction[3] = 0xc0 | (1 << 3) | 3; + sljit_emit_op_custom(compiler, instruction, 4); + } + +/* PMOVMSKB reg, xmm */ +instruction[2] = 0xd7; +instruction[3] = 0xc0 | (tmp1_ind << 3) | 0; +sljit_emit_op_custom(compiler, instruction, 4); + +if (load_twice) + { + instruction[3] = 0xc0 | (tmp2_ind << 3) | 1; + sljit_emit_op_custom(compiler, instruction, 4); + + OP2(SLJIT_OR, TMP1, 0, TMP1, 0, TMP2, 0); + } + +/* BSF r32, r/m32 */ +instruction[0] = 0x0f; +instruction[1] = 0xbc; +instruction[2] = 0xc0 | (tmp1_ind << 3) | tmp1_ind; +sljit_emit_op_custom(compiler, instruction, 3); +sljit_set_current_flags(compiler, SLJIT_SET_Z); + +JUMPTO(SLJIT_ZERO, start); + +OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, TMP1, 0); + +start = LABEL(); +SET_LABEL(quit[0], start); +SET_LABEL(quit[1], start); +SET_LABEL(quit[2], start); +} + +#undef SSE2_COMPARE_TYPE_INDEX + +#endif + +static void fast_forward_first_char2(compiler_common *common, pcre_uchar char1, pcre_uchar char2, sljit_s32 offset) +{ +DEFINE_COMPILER; +struct sljit_label *start; +struct sljit_jump *quit; +struct sljit_jump *found; +pcre_uchar mask; +#if defined SUPPORT_UTF && !defined COMPILE_PCRE32 +struct sljit_label *utf_start = NULL; +struct sljit_jump *utf_quit = NULL; +#endif +BOOL has_match_end = (common->match_end_ptr != 0); + +if (offset > 0) + OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(offset)); + +if (has_match_end) + { + OP1(SLJIT_MOV, TMP3, 0, STR_END, 0); + + OP2(SLJIT_ADD, STR_END, 0, SLJIT_MEM1(SLJIT_SP), common->match_end_ptr, SLJIT_IMM, IN_UCHARS(offset + 1)); + OP2(SLJIT_SUB | SLJIT_SET_GREATER, SLJIT_UNUSED, 0, STR_END, 0, TMP3, 0); + sljit_emit_cmov(compiler, SLJIT_GREATER, STR_END, TMP3, 0); + } + +#if defined SUPPORT_UTF && !defined COMPILE_PCRE32 +if (common->utf && offset > 0) + utf_start = LABEL(); +#endif + +#if (defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86) && !(defined SUPPORT_VALGRIND) + +/* SSE2 accelerated first character search. */ + +if (sljit_has_cpu_feature(SLJIT_HAS_SSE2)) + { + fast_forward_first_char2_sse2(common, char1, char2); + + SLJIT_ASSERT(common->mode == JIT_COMPILE || offset == 0); + if (common->mode == JIT_COMPILE) + { + /* In complete mode, we don't need to run a match when STR_PTR == STR_END. */ + SLJIT_ASSERT(common->forced_quit_label == NULL); + OP1(SLJIT_MOV, SLJIT_RETURN_REG, 0, SLJIT_IMM, PCRE_ERROR_NOMATCH); + add_jump(compiler, &common->forced_quit, CMP(SLJIT_GREATER_EQUAL, STR_PTR, 0, STR_END, 0)); + +#if defined SUPPORT_UTF && !defined COMPILE_PCRE32 + if (common->utf && offset > 0) + { + SLJIT_ASSERT(common->mode == JIT_COMPILE); + + OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(-offset)); + OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); +#if defined COMPILE_PCRE8 + OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, 0xc0); + CMPTO(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, 0x80, utf_start); +#elif defined COMPILE_PCRE16 + OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, 0xfc00); + CMPTO(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, 0xdc00, utf_start); #else - if ((caseless & 0x100) != 0) - caseless = ((caseless & 0xff) << 16) | (len - (caseless >> 9)); +#error "Unknown code width" +#endif + OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); + } +#endif + + if (offset > 0) + OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(offset)); + } + else + { + OP2(SLJIT_SUB | SLJIT_SET_GREATER_EQUAL, SLJIT_UNUSED, 0, STR_PTR, 0, STR_END, 0); + if (has_match_end) + { + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->match_end_ptr); + sljit_emit_cmov(compiler, SLJIT_GREATER_EQUAL, STR_PTR, TMP1, 0); + } else - caseless = ((caseless & 0xff) << 8) | (len - (caseless >> 9)); + sljit_emit_cmov(compiler, SLJIT_GREATER_EQUAL, STR_PTR, STR_END, 0); + } + + if (has_match_end) + OP1(SLJIT_MOV, STR_END, 0, TMP3, 0); + return; + } + +#endif + +quit = CMP(SLJIT_GREATER_EQUAL, STR_PTR, 0, STR_END, 0); + +start = LABEL(); +OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), 0); + +if (char1 == char2) + found = CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, char1); +else + { + mask = char1 ^ char2; + if (is_powerof2(mask)) + { + OP2(SLJIT_OR, TMP1, 0, TMP1, 0, SLJIT_IMM, mask); + found = CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, char1 | mask); + } + else + { + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, char1); + OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL); + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, char2); + OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_EQUAL); + found = JUMP(SLJIT_NOT_ZERO); + } + } + +OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); +CMPTO(SLJIT_LESS, STR_PTR, 0, STR_END, 0, start); + +#if defined SUPPORT_UTF && !defined COMPILE_PCRE32 +if (common->utf && offset > 0) + utf_quit = JUMP(SLJIT_JUMP); +#endif + +JUMPHERE(found); + +#if defined SUPPORT_UTF && !defined COMPILE_PCRE32 +if (common->utf && offset > 0) + { + OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(-offset)); + OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); +#if defined COMPILE_PCRE8 + OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, 0xc0); + CMPTO(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, 0x80, utf_start); +#elif defined COMPILE_PCRE16 + OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, 0xfc00); + CMPTO(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, 0xdc00, utf_start); +#else +#error "Unknown code width" +#endif + OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); + JUMPHERE(utf_quit); + } #endif + +JUMPHERE(quit); + +if (has_match_end) + { + quit = CMP(SLJIT_LESS, STR_PTR, 0, STR_END, 0); + OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(SLJIT_SP), common->match_end_ptr); + if (offset > 0) + OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(offset)); + JUMPHERE(quit); + OP1(SLJIT_MOV, STR_END, 0, TMP3, 0); + } + +if (offset > 0) + OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(offset)); +} + +static SLJIT_INLINE BOOL fast_forward_first_n_chars(compiler_common *common) +{ +DEFINE_COMPILER; +struct sljit_label *start; +struct sljit_jump *quit; +struct sljit_jump *match; +/* bytes[0] represent the number of characters between 0 +and MAX_N_BYTES - 1, 255 represents any character. */ +pcre_uchar chars[MAX_N_CHARS * MAX_DIFF_CHARS]; +sljit_s32 offset; +pcre_uchar mask; +pcre_uchar *char_set, *char_set_end; +int i, max, from; +int range_right = -1, range_len; +sljit_u8 *update_table = NULL; +BOOL in_range; +sljit_u32 rec_count; + +for (i = 0; i < MAX_N_CHARS; i++) + chars[i * MAX_DIFF_CHARS] = 0; + +rec_count = 10000; +max = scan_prefix(common, common->start, chars, MAX_N_CHARS, &rec_count); + +if (max < 1) + return FALSE; + +in_range = FALSE; +/* Prevent compiler "uninitialized" warning */ +from = 0; +range_len = 4 /* minimum length */ - 1; +for (i = 0; i <= max; i++) + { + if (in_range && (i - from) > range_len && (chars[(i - 1) * MAX_DIFF_CHARS] < 255)) + { + range_len = i - from; + range_right = i - 1; + } + + if (i < max && chars[i * MAX_DIFF_CHARS] < 255) + { + SLJIT_ASSERT(chars[i * MAX_DIFF_CHARS] > 0); + if (!in_range) + { + in_range = TRUE; + from = i; + } } else - caseless = 0; + in_range = FALSE; + } + +if (range_right >= 0) + { + update_table = (sljit_u8 *)allocate_read_only_data(common, 256); + if (update_table == NULL) + return TRUE; + memset(update_table, IN_UCHARS(range_len), 256); - while (len > 0 && location < MAX_N_CHARS * 2) + for (i = 0; i < range_len; i++) { - c = *cc; - bit = 0; - if (len == (caseless & 0xff)) + char_set = chars + ((range_right - i) * MAX_DIFF_CHARS); + SLJIT_ASSERT(char_set[0] > 0 && char_set[0] < 255); + char_set_end = char_set + char_set[0]; + char_set++; + while (char_set <= char_set_end) { - bit = caseless >> 8; - c |= bit; + if (update_table[(*char_set) & 0xff] > IN_UCHARS(i)) + update_table[(*char_set) & 0xff] = IN_UCHARS(i); + char_set++; + } + } + } + +offset = -1; +/* Scan forward. */ +for (i = 0; i < max; i++) + { + if (offset == -1) + { + if (chars[i * MAX_DIFF_CHARS] <= 2) + offset = i; + } + else if (chars[offset * MAX_DIFF_CHARS] == 2 && chars[i * MAX_DIFF_CHARS] <= 2) + { + if (chars[i * MAX_DIFF_CHARS] == 1) + offset = i; + else + { + mask = chars[offset * MAX_DIFF_CHARS + 1] ^ chars[offset * MAX_DIFF_CHARS + 2]; + if (!is_powerof2(mask)) + { + mask = chars[i * MAX_DIFF_CHARS + 1] ^ chars[i * MAX_DIFF_CHARS + 2]; + if (is_powerof2(mask)) + offset = i; + } } + } + } - chars[location] = c; - chars[location + 1] = bit; +if (range_right < 0) + { + if (offset < 0) + return FALSE; + SLJIT_ASSERT(chars[offset * MAX_DIFF_CHARS] >= 1 && chars[offset * MAX_DIFF_CHARS] <= 2); + /* Works regardless the value is 1 or 2. */ + mask = chars[offset * MAX_DIFF_CHARS + chars[offset * MAX_DIFF_CHARS]]; + fast_forward_first_char2(common, chars[offset * MAX_DIFF_CHARS + 1], mask, offset); + return TRUE; + } - len--; - location += 2; - cc++; +if (range_right == offset) + offset = -1; + +SLJIT_ASSERT(offset == -1 || (chars[offset * MAX_DIFF_CHARS] >= 1 && chars[offset * MAX_DIFF_CHARS] <= 2)); + +max -= 1; +SLJIT_ASSERT(max > 0); +if (common->match_end_ptr != 0) + { + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->match_end_ptr); + OP1(SLJIT_MOV, TMP3, 0, STR_END, 0); + OP2(SLJIT_SUB, STR_END, 0, STR_END, 0, SLJIT_IMM, IN_UCHARS(max)); + quit = CMP(SLJIT_LESS_EQUAL, STR_END, 0, TMP1, 0); + OP1(SLJIT_MOV, STR_END, 0, TMP1, 0); + JUMPHERE(quit); + } +else + OP2(SLJIT_SUB, STR_END, 0, STR_END, 0, SLJIT_IMM, IN_UCHARS(max)); + +SLJIT_ASSERT(range_right >= 0); + +#if !(defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) +OP1(SLJIT_MOV, RETURN_ADDR, 0, SLJIT_IMM, (sljit_sw)update_table); +#endif + +start = LABEL(); +quit = CMP(SLJIT_GREATER_EQUAL, STR_PTR, 0, STR_END, 0); + +#if defined COMPILE_PCRE8 || (defined SLJIT_LITTLE_ENDIAN && SLJIT_LITTLE_ENDIAN) +OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(range_right)); +#else +OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(range_right + 1) - 1); +#endif + +#if !(defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) +OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM2(RETURN_ADDR, TMP1), 0); +#else +OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP1), (sljit_sw)update_table); +#endif +OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, TMP1, 0); +CMPTO(SLJIT_NOT_EQUAL, TMP1, 0, SLJIT_IMM, 0, start); + +if (offset >= 0) + { + OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(offset)); + OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); + + if (chars[offset * MAX_DIFF_CHARS] == 1) + CMPTO(SLJIT_NOT_EQUAL, TMP1, 0, SLJIT_IMM, chars[offset * MAX_DIFF_CHARS + 1], start); + else + { + mask = chars[offset * MAX_DIFF_CHARS + 1] ^ chars[offset * MAX_DIFF_CHARS + 2]; + if (is_powerof2(mask)) + { + OP2(SLJIT_OR, TMP1, 0, TMP1, 0, SLJIT_IMM, mask); + CMPTO(SLJIT_NOT_EQUAL, TMP1, 0, SLJIT_IMM, chars[offset * MAX_DIFF_CHARS + 1] | mask, start); + } + else + { + match = CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, chars[offset * MAX_DIFF_CHARS + 1]); + CMPTO(SLJIT_NOT_EQUAL, TMP1, 0, SLJIT_IMM, chars[offset * MAX_DIFF_CHARS + 2], start); + JUMPHERE(match); + } } - - if (location >= MAX_N_CHARS * 2 || must_stop != 0) - break; } -/* At least two characters are required. */ -if (location < 2 * 2) - return FALSE; - -if (firstline) +#if defined SUPPORT_UTF && !defined COMPILE_PCRE32 +if (common->utf && offset != 0) { - SLJIT_ASSERT(common->first_line_end != 0); - OP1(SLJIT_MOV, TMP3, 0, STR_END, 0); - OP2(SLJIT_SUB, STR_END, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), common->first_line_end, SLJIT_IMM, IN_UCHARS((location >> 1) - 1)); + if (offset < 0) + { + OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), 0); + OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); + } + else + OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(-1)); +#if defined COMPILE_PCRE8 + OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, 0xc0); + CMPTO(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, 0x80, start); +#elif defined COMPILE_PCRE16 + OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, 0xfc00); + CMPTO(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, 0xdc00, start); +#else +#error "Unknown code width" +#endif + if (offset < 0) + OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); } -else - OP2(SLJIT_SUB, STR_END, 0, STR_END, 0, SLJIT_IMM, IN_UCHARS((location >> 1) - 1)); - -start = LABEL(); -quit = CMP(SLJIT_C_GREATER_EQUAL, STR_PTR, 0, STR_END, 0); +#endif -OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(0)); -OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); -OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); -if (chars[1] != 0) - OP2(SLJIT_OR, TMP1, 0, TMP1, 0, SLJIT_IMM, chars[1]); -CMPTO(SLJIT_C_NOT_EQUAL, TMP1, 0, SLJIT_IMM, chars[0], start); -if (location > 2 * 2) - OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); -if (chars[3] != 0) - OP2(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_IMM, chars[3]); -CMPTO(SLJIT_C_NOT_EQUAL, TMP2, 0, SLJIT_IMM, chars[2], start); -if (location > 2 * 2) - { - if (chars[5] != 0) - OP2(SLJIT_OR, TMP1, 0, TMP1, 0, SLJIT_IMM, chars[5]); - CMPTO(SLJIT_C_NOT_EQUAL, TMP1, 0, SLJIT_IMM, chars[4], start); - } -OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); +if (offset >= 0) + OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); JUMPHERE(quit); -if (firstline) +if (common->match_end_ptr != 0) + { + if (range_right >= 0) + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->match_end_ptr); OP1(SLJIT_MOV, STR_END, 0, TMP3, 0); + if (range_right >= 0) + { + quit = CMP(SLJIT_LESS_EQUAL, STR_PTR, 0, TMP1, 0); + OP1(SLJIT_MOV, STR_PTR, 0, TMP1, 0); + JUMPHERE(quit); + } + } else - OP2(SLJIT_ADD, STR_END, 0, STR_END, 0, SLJIT_IMM, IN_UCHARS((location >> 1) - 1)); + OP2(SLJIT_ADD, STR_END, 0, STR_END, 0, SLJIT_IMM, IN_UCHARS(max)); return TRUE; } #undef MAX_N_CHARS +#undef MAX_DIFF_CHARS -static SLJIT_INLINE void fast_forward_first_char(compiler_common *common, pcre_uchar first_char, BOOL caseless, BOOL firstline) +static SLJIT_INLINE void fast_forward_first_char(compiler_common *common, pcre_uchar first_char, BOOL caseless) { -DEFINE_COMPILER; -struct sljit_label *start; -struct sljit_jump *quit; -struct sljit_jump *found; -pcre_uchar oc, bit; - -if (firstline) - { - SLJIT_ASSERT(common->first_line_end != 0); - OP1(SLJIT_MOV, TMP3, 0, STR_END, 0); - OP1(SLJIT_MOV, STR_END, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), common->first_line_end); - } - -start = LABEL(); -quit = CMP(SLJIT_C_GREATER_EQUAL, STR_PTR, 0, STR_END, 0); -OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), 0); +pcre_uchar oc; oc = first_char; if (caseless) { oc = TABLE_GET(first_char, common->fcc, first_char); -#if defined SUPPORT_UCP && !(defined COMPILE_PCRE8) +#if defined SUPPORT_UCP && !defined COMPILE_PCRE8 if (first_char > 127 && common->utf) oc = UCD_OTHERCASE(first_char); #endif } -if (first_char == oc) - found = CMP(SLJIT_C_EQUAL, TMP1, 0, SLJIT_IMM, first_char); -else - { - bit = first_char ^ oc; - if (is_powerof2(bit)) - { - OP2(SLJIT_OR, TMP2, 0, TMP1, 0, SLJIT_IMM, bit); - found = CMP(SLJIT_C_EQUAL, TMP2, 0, SLJIT_IMM, first_char | bit); - } - else - { - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, first_char); - OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_C_EQUAL); - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, oc); - OP_FLAGS(SLJIT_OR | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_C_EQUAL); - found = JUMP(SLJIT_C_NOT_ZERO); - } - } - -OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); -JUMPTO(SLJIT_JUMP, start); -JUMPHERE(found); -JUMPHERE(quit); -if (firstline) - OP1(SLJIT_MOV, STR_END, 0, TMP3, 0); +fast_forward_first_char2(common, first_char, oc, 0); } -static SLJIT_INLINE void fast_forward_newline(compiler_common *common, BOOL firstline) +static SLJIT_INLINE void fast_forward_newline(compiler_common *common) { DEFINE_COMPILER; struct sljit_label *loop; @@ -3131,24 +4626,23 @@ struct sljit_jump *foundcr = NULL; struct sljit_jump *notfoundnl; jump_list *newline = NULL; -if (firstline) +if (common->match_end_ptr != 0) { - SLJIT_ASSERT(common->first_line_end != 0); OP1(SLJIT_MOV, TMP3, 0, STR_END, 0); - OP1(SLJIT_MOV, STR_END, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), common->first_line_end); + OP1(SLJIT_MOV, STR_END, 0, SLJIT_MEM1(SLJIT_SP), common->match_end_ptr); } if (common->nltype == NLTYPE_FIXED && common->newline > 255) { - lastchar = CMP(SLJIT_C_GREATER_EQUAL, STR_PTR, 0, STR_END, 0); + lastchar = CMP(SLJIT_GREATER_EQUAL, STR_PTR, 0, STR_END, 0); OP1(SLJIT_MOV, TMP1, 0, ARGUMENTS, 0); OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, str)); OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, begin)); - firstchar = CMP(SLJIT_C_LESS_EQUAL, STR_PTR, 0, TMP2, 0); + firstchar = CMP(SLJIT_LESS_EQUAL, STR_PTR, 0, TMP2, 0); OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, SLJIT_IMM, IN_UCHARS(2)); - OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, STR_PTR, 0, TMP1, 0); - OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_C_GREATER_EQUAL); + OP2(SLJIT_SUB | SLJIT_SET_GREATER_EQUAL, SLJIT_UNUSED, 0, STR_PTR, 0, TMP1, 0); + OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_GREATER_EQUAL); #if defined COMPILE_PCRE16 || defined COMPILE_PCRE32 OP2(SLJIT_SHL, TMP2, 0, TMP2, 0, SLJIT_IMM, UCHAR_SHIFT); #endif @@ -3156,31 +4650,33 @@ if (common->nltype == NLTYPE_FIXED && common->newline > 255) loop = LABEL(); OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); - quit = CMP(SLJIT_C_GREATER_EQUAL, STR_PTR, 0, STR_END, 0); + quit = CMP(SLJIT_GREATER_EQUAL, STR_PTR, 0, STR_END, 0); OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(-2)); OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(-1)); - CMPTO(SLJIT_C_NOT_EQUAL, TMP1, 0, SLJIT_IMM, (common->newline >> 8) & 0xff, loop); - CMPTO(SLJIT_C_NOT_EQUAL, TMP2, 0, SLJIT_IMM, common->newline & 0xff, loop); + CMPTO(SLJIT_NOT_EQUAL, TMP1, 0, SLJIT_IMM, (common->newline >> 8) & 0xff, loop); + CMPTO(SLJIT_NOT_EQUAL, TMP2, 0, SLJIT_IMM, common->newline & 0xff, loop); JUMPHERE(quit); JUMPHERE(firstchar); JUMPHERE(lastchar); - if (firstline) - OP1(SLJIT_MOV, STR_END, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), POSSESSIVE0); + if (common->match_end_ptr != 0) + OP1(SLJIT_MOV, STR_END, 0, TMP3, 0); return; } OP1(SLJIT_MOV, TMP1, 0, ARGUMENTS, 0); OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, str)); -firstchar = CMP(SLJIT_C_LESS_EQUAL, STR_PTR, 0, TMP2, 0); +firstchar = CMP(SLJIT_LESS_EQUAL, STR_PTR, 0, TMP2, 0); skip_char_back(common); loop = LABEL(); -read_char(common); -lastchar = CMP(SLJIT_C_GREATER_EQUAL, STR_PTR, 0, STR_END, 0); +common->ff_newline_shortcut = loop; + +read_char_range(common, common->nlmin, common->nlmax, TRUE); +lastchar = CMP(SLJIT_GREATER_EQUAL, STR_PTR, 0, STR_END, 0); if (common->nltype == NLTYPE_ANY || common->nltype == NLTYPE_ANYCRLF) - foundcr = CMP(SLJIT_C_EQUAL, TMP1, 0, SLJIT_IMM, CHAR_CR); + foundcr = CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, CHAR_CR); check_newlinechar(common, common->nltype, &newline, FALSE); set_jumps(newline, loop); @@ -3188,10 +4684,10 @@ if (common->nltype == NLTYPE_ANY || common->nltype == NLTYPE_ANYCRLF) { quit = JUMP(SLJIT_JUMP); JUMPHERE(foundcr); - notfoundnl = CMP(SLJIT_C_GREATER_EQUAL, STR_PTR, 0, STR_END, 0); + notfoundnl = CMP(SLJIT_GREATER_EQUAL, STR_PTR, 0, STR_END, 0); OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), 0); - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, CHAR_NL); - OP_FLAGS(SLJIT_MOV, TMP1, 0, SLJIT_UNUSED, 0, SLJIT_C_EQUAL); + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, CHAR_NL); + OP_FLAGS(SLJIT_MOV, TMP1, 0, SLJIT_EQUAL); #if defined COMPILE_PCRE16 || defined COMPILE_PCRE32 OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, UCHAR_SHIFT); #endif @@ -3202,56 +4698,50 @@ if (common->nltype == NLTYPE_ANY || common->nltype == NLTYPE_ANYCRLF) JUMPHERE(lastchar); JUMPHERE(firstchar); -if (firstline) +if (common->match_end_ptr != 0) OP1(SLJIT_MOV, STR_END, 0, TMP3, 0); } -static BOOL check_class_ranges(compiler_common *common, const pcre_uint8 *bits, BOOL nclass, jump_list **backtracks); +static BOOL check_class_ranges(compiler_common *common, const sljit_u8 *bits, BOOL nclass, BOOL invert, jump_list **backtracks); -static SLJIT_INLINE void fast_forward_start_bits(compiler_common *common, sljit_uw start_bits, BOOL firstline) +static SLJIT_INLINE void fast_forward_start_bits(compiler_common *common, const sljit_u8 *start_bits) { DEFINE_COMPILER; struct sljit_label *start; struct sljit_jump *quit; struct sljit_jump *found = NULL; jump_list *matches = NULL; -pcre_uint8 inverted_start_bits[32]; -int i; #ifndef COMPILE_PCRE8 struct sljit_jump *jump; #endif -for (i = 0; i < 32; ++i) - inverted_start_bits[i] = ~(((pcre_uint8*)start_bits)[i]); - -if (firstline) +if (common->match_end_ptr != 0) { - SLJIT_ASSERT(common->first_line_end != 0); OP1(SLJIT_MOV, RETURN_ADDR, 0, STR_END, 0); - OP1(SLJIT_MOV, STR_END, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), common->first_line_end); + OP1(SLJIT_MOV, STR_END, 0, SLJIT_MEM1(SLJIT_SP), common->match_end_ptr); } start = LABEL(); -quit = CMP(SLJIT_C_GREATER_EQUAL, STR_PTR, 0, STR_END, 0); +quit = CMP(SLJIT_GREATER_EQUAL, STR_PTR, 0, STR_END, 0); OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), 0); #ifdef SUPPORT_UTF if (common->utf) OP1(SLJIT_MOV, TMP3, 0, TMP1, 0); #endif -if (!check_class_ranges(common, inverted_start_bits, (inverted_start_bits[31] & 0x80) != 0, &matches)) +if (!check_class_ranges(common, start_bits, (start_bits[31] & 0x80) != 0, TRUE, &matches)) { #ifndef COMPILE_PCRE8 - jump = CMP(SLJIT_C_LESS, TMP1, 0, SLJIT_IMM, 255); + jump = CMP(SLJIT_LESS, TMP1, 0, SLJIT_IMM, 255); OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, 255); JUMPHERE(jump); #endif OP2(SLJIT_AND, TMP2, 0, TMP1, 0, SLJIT_IMM, 0x7); OP2(SLJIT_LSHR, TMP1, 0, TMP1, 0, SLJIT_IMM, 3); - OP1(SLJIT_MOV_UB, TMP1, 0, SLJIT_MEM1(TMP1), start_bits); + OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP1), (sljit_sw)start_bits); OP2(SLJIT_SHL, TMP2, 0, SLJIT_IMM, 1, TMP2, 0); - OP2(SLJIT_AND | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, TMP2, 0); - found = JUMP(SLJIT_C_NOT_ZERO); + OP2(SLJIT_AND | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, TMP2, 0); + found = JUMP(SLJIT_NOT_ZERO); } #ifdef SUPPORT_UTF @@ -3263,17 +4753,17 @@ OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); #if defined COMPILE_PCRE8 if (common->utf) { - CMPTO(SLJIT_C_LESS, TMP1, 0, SLJIT_IMM, 0xc0, start); - OP1(SLJIT_MOV_UB, TMP1, 0, SLJIT_MEM1(TMP1), (sljit_sw)PRIV(utf8_table4) - 0xc0); + CMPTO(SLJIT_LESS, TMP1, 0, SLJIT_IMM, 0xc0, start); + OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP1), (sljit_sw)PRIV(utf8_table4) - 0xc0); OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, TMP1, 0); } #elif defined COMPILE_PCRE16 if (common->utf) { - CMPTO(SLJIT_C_LESS, TMP1, 0, SLJIT_IMM, 0xd800, start); + CMPTO(SLJIT_LESS, TMP1, 0, SLJIT_IMM, 0xd800, start); OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, 0xfc00); - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0xd800); - OP_FLAGS(SLJIT_MOV, TMP1, 0, SLJIT_UNUSED, 0, SLJIT_C_EQUAL); + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0xd800); + OP_FLAGS(SLJIT_MOV, TMP1, 0, SLJIT_EQUAL); OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, 1); OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, TMP1, 0); } @@ -3286,7 +4776,7 @@ if (matches != NULL) set_jumps(matches, LABEL()); JUMPHERE(quit); -if (firstline) +if (common->match_end_ptr != 0) OP1(SLJIT_MOV, STR_END, 0, RETURN_ADDR, 0); } @@ -3299,13 +4789,13 @@ struct sljit_jump *alreadyfound; struct sljit_jump *found; struct sljit_jump *foundoc = NULL; struct sljit_jump *notfound; -pcre_uint32 oc, bit; +sljit_u32 oc, bit; SLJIT_ASSERT(common->req_char_ptr != 0); -OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), common->req_char_ptr); +OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), common->req_char_ptr); OP2(SLJIT_ADD, TMP1, 0, STR_PTR, 0, SLJIT_IMM, REQ_BYTE_MAX); -toolong = CMP(SLJIT_C_LESS, TMP1, 0, STR_END, 0); -alreadyfound = CMP(SLJIT_C_LESS, STR_PTR, 0, TMP2, 0); +toolong = CMP(SLJIT_LESS, TMP1, 0, STR_END, 0); +alreadyfound = CMP(SLJIT_LESS, STR_PTR, 0, TMP2, 0); if (has_firstchar) OP2(SLJIT_ADD, TMP1, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); @@ -3313,7 +4803,7 @@ else OP1(SLJIT_MOV, TMP1, 0, STR_PTR, 0); loop = LABEL(); -notfound = CMP(SLJIT_C_GREATER_EQUAL, TMP1, 0, STR_END, 0); +notfound = CMP(SLJIT_GREATER_EQUAL, TMP1, 0, STR_END, 0); OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(TMP1), 0); oc = req_char; @@ -3326,19 +4816,19 @@ if (caseless) #endif } if (req_char == oc) - found = CMP(SLJIT_C_EQUAL, TMP2, 0, SLJIT_IMM, req_char); + found = CMP(SLJIT_EQUAL, TMP2, 0, SLJIT_IMM, req_char); else { bit = req_char ^ oc; if (is_powerof2(bit)) { OP2(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_IMM, bit); - found = CMP(SLJIT_C_EQUAL, TMP2, 0, SLJIT_IMM, req_char | bit); + found = CMP(SLJIT_EQUAL, TMP2, 0, SLJIT_IMM, req_char | bit); } else { - found = CMP(SLJIT_C_EQUAL, TMP2, 0, SLJIT_IMM, req_char); - foundoc = CMP(SLJIT_C_EQUAL, TMP2, 0, SLJIT_IMM, oc); + found = CMP(SLJIT_EQUAL, TMP2, 0, SLJIT_IMM, req_char); + foundoc = CMP(SLJIT_EQUAL, TMP2, 0, SLJIT_IMM, oc); } } OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, SLJIT_IMM, IN_UCHARS(1)); @@ -3347,7 +4837,7 @@ JUMPTO(SLJIT_JUMP, loop); JUMPHERE(found); if (foundoc) JUMPHERE(foundoc); -OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->req_char_ptr, TMP1, 0); +OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->req_char_ptr, TMP1, 0); JUMPHERE(alreadyfound); JUMPHERE(toolong); return notfound; @@ -3360,31 +4850,31 @@ struct sljit_jump *jump; struct sljit_label *mainloop; sljit_emit_fast_enter(compiler, RETURN_ADDR, 0); -OP1(SLJIT_MOV, TMP1, 0, STACK_TOP, 0); -GET_LOCAL_BASE(TMP3, 0, 0); +OP1(SLJIT_MOV, TMP3, 0, STACK_TOP, 0); +GET_LOCAL_BASE(TMP1, 0, 0); /* Drop frames until we reach STACK_TOP. */ mainloop = LABEL(); -OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(TMP1), 0); -OP2(SLJIT_SUB | SLJIT_SET_S, SLJIT_UNUSED, 0, TMP2, 0, SLJIT_IMM, 0); -jump = JUMP(SLJIT_C_SIG_LESS_EQUAL); - -OP2(SLJIT_ADD, TMP2, 0, TMP2, 0, TMP3, 0); -OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), 0, SLJIT_MEM1(TMP1), sizeof(sljit_sw)); -OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), sizeof(sljit_sw), SLJIT_MEM1(TMP1), 2 * sizeof(sljit_sw)); -OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, SLJIT_IMM, 3 * sizeof(sljit_sw)); +OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(STACK_TOP), -sizeof(sljit_sw)); +jump = CMP(SLJIT_SIG_LESS_EQUAL, TMP2, 0, SLJIT_IMM, 0); + +OP2(SLJIT_ADD, TMP2, 0, TMP2, 0, TMP1, 0); +OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), 0, SLJIT_MEM1(STACK_TOP), -2 * sizeof(sljit_sw)); +OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), sizeof(sljit_sw), SLJIT_MEM1(STACK_TOP), -3 * sizeof(sljit_sw)); +OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 3 * sizeof(sljit_sw)); JUMPTO(SLJIT_JUMP, mainloop); JUMPHERE(jump); -jump = JUMP(SLJIT_C_SIG_LESS); -/* End of dropping frames. */ +jump = CMP(SLJIT_NOT_ZERO /* SIG_LESS */, TMP2, 0, SLJIT_IMM, 0); +/* End of reverting values. */ +OP1(SLJIT_MOV, STACK_TOP, 0, TMP3, 0); sljit_emit_fast_return(compiler, RETURN_ADDR, 0); JUMPHERE(jump); OP1(SLJIT_NEG, TMP2, 0, TMP2, 0); -OP2(SLJIT_ADD, TMP2, 0, TMP2, 0, TMP3, 0); -OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), 0, SLJIT_MEM1(TMP1), sizeof(sljit_sw)); -OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, SLJIT_IMM, 2 * sizeof(sljit_sw)); +OP2(SLJIT_ADD, TMP2, 0, TMP2, 0, TMP1, 0); +OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), 0, SLJIT_MEM1(STACK_TOP), -2 * sizeof(sljit_sw)); +OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 2 * sizeof(sljit_sw)); JUMPTO(SLJIT_JUMP, mainloop); } @@ -3399,12 +4889,12 @@ struct sljit_jump *jump; SLJIT_COMPILE_ASSERT(ctype_word == 0x10, ctype_word_must_be_16); -sljit_emit_fast_enter(compiler, SLJIT_MEM1(SLJIT_LOCALS_REG), LOCALS0); +sljit_emit_fast_enter(compiler, SLJIT_MEM1(SLJIT_SP), LOCALS0); /* Get type of the previous char, and put it to LOCALS1. */ OP1(SLJIT_MOV, TMP1, 0, ARGUMENTS, 0); OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, begin)); -OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), LOCALS1, SLJIT_IMM, 0); -skipread = CMP(SLJIT_C_LESS_EQUAL, STR_PTR, 0, TMP1, 0); +OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS1, SLJIT_IMM, 0); +skipread = CMP(SLJIT_LESS_EQUAL, STR_PTR, 0, TMP1, 0); skip_char_back(common); check_start_used_ptr(common); read_char(common); @@ -3414,32 +4904,32 @@ read_char(common); if (common->use_ucp) { OP1(SLJIT_MOV, TMP2, 0, SLJIT_IMM, 1); - jump = CMP(SLJIT_C_EQUAL, TMP1, 0, SLJIT_IMM, CHAR_UNDERSCORE); + jump = CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, CHAR_UNDERSCORE); add_jump(compiler, &common->getucd, JUMP(SLJIT_FAST_CALL)); OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, ucp_Ll); - OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, ucp_Lu - ucp_Ll); - OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_C_LESS_EQUAL); + OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, ucp_Lu - ucp_Ll); + OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL); OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, ucp_Nd - ucp_Ll); - OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, ucp_No - ucp_Nd); - OP_FLAGS(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_C_LESS_EQUAL); + OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, ucp_No - ucp_Nd); + OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_LESS_EQUAL); JUMPHERE(jump); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), LOCALS1, TMP2, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS1, TMP2, 0); } else #endif { #ifndef COMPILE_PCRE8 - jump = CMP(SLJIT_C_GREATER, TMP1, 0, SLJIT_IMM, 255); + jump = CMP(SLJIT_GREATER, TMP1, 0, SLJIT_IMM, 255); #elif defined SUPPORT_UTF /* Here LOCALS1 has already been zeroed. */ jump = NULL; if (common->utf) - jump = CMP(SLJIT_C_GREATER, TMP1, 0, SLJIT_IMM, 255); + jump = CMP(SLJIT_GREATER, TMP1, 0, SLJIT_IMM, 255); #endif /* COMPILE_PCRE8 */ - OP1(SLJIT_MOV_UB, TMP1, 0, SLJIT_MEM1(TMP1), common->ctypes); + OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP1), common->ctypes); OP2(SLJIT_LSHR, TMP1, 0, TMP1, 0, SLJIT_IMM, 4 /* ctype_word */); OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, 1); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), LOCALS1, TMP1, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS1, TMP1, 0); #ifndef COMPILE_PCRE8 JUMPHERE(jump); #elif defined SUPPORT_UTF @@ -3451,21 +4941,21 @@ JUMPHERE(skipread); OP1(SLJIT_MOV, TMP2, 0, SLJIT_IMM, 0); check_str_end(common, &skipread_list); -peek_char(common); +peek_char(common, READ_CHAR_MAX); /* Testing char type. This is a code duplication. */ #ifdef SUPPORT_UCP if (common->use_ucp) { OP1(SLJIT_MOV, TMP2, 0, SLJIT_IMM, 1); - jump = CMP(SLJIT_C_EQUAL, TMP1, 0, SLJIT_IMM, CHAR_UNDERSCORE); + jump = CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, CHAR_UNDERSCORE); add_jump(compiler, &common->getucd, JUMP(SLJIT_FAST_CALL)); OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, ucp_Ll); - OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, ucp_Lu - ucp_Ll); - OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_C_LESS_EQUAL); + OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, ucp_Lu - ucp_Ll); + OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL); OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, ucp_Nd - ucp_Ll); - OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, ucp_No - ucp_Nd); - OP_FLAGS(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_C_LESS_EQUAL); + OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, ucp_No - ucp_Nd); + OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_LESS_EQUAL); JUMPHERE(jump); } else @@ -3474,14 +4964,14 @@ else #ifndef COMPILE_PCRE8 /* TMP2 may be destroyed by peek_char. */ OP1(SLJIT_MOV, TMP2, 0, SLJIT_IMM, 0); - jump = CMP(SLJIT_C_GREATER, TMP1, 0, SLJIT_IMM, 255); + jump = CMP(SLJIT_GREATER, TMP1, 0, SLJIT_IMM, 255); #elif defined SUPPORT_UTF OP1(SLJIT_MOV, TMP2, 0, SLJIT_IMM, 0); jump = NULL; if (common->utf) - jump = CMP(SLJIT_C_GREATER, TMP1, 0, SLJIT_IMM, 255); + jump = CMP(SLJIT_GREATER, TMP1, 0, SLJIT_IMM, 255); #endif - OP1(SLJIT_MOV_UB, TMP2, 0, SLJIT_MEM1(TMP1), common->ctypes); + OP1(SLJIT_MOV_U8, TMP2, 0, SLJIT_MEM1(TMP1), common->ctypes); OP2(SLJIT_LSHR, TMP2, 0, TMP2, 0, SLJIT_IMM, 4 /* ctype_word */); OP2(SLJIT_AND, TMP2, 0, TMP2, 0, SLJIT_IMM, 1); #ifndef COMPILE_PCRE8 @@ -3493,114 +4983,20 @@ else } set_jumps(skipread_list, LABEL()); -OP2(SLJIT_XOR | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP2, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), LOCALS1); -sljit_emit_fast_return(compiler, SLJIT_MEM1(SLJIT_LOCALS_REG), LOCALS0); +OP2(SLJIT_XOR | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP2, 0, SLJIT_MEM1(SLJIT_SP), LOCALS1); +sljit_emit_fast_return(compiler, SLJIT_MEM1(SLJIT_SP), LOCALS0); } -/* - range format: - - ranges[0] = length of the range (max MAX_RANGE_SIZE, -1 means invalid range). - ranges[1] = first bit (0 or 1) - ranges[2-length] = position of the bit change (when the current bit is not equal to the previous) -*/ - -static BOOL check_ranges(compiler_common *common, int *ranges, jump_list **backtracks, BOOL readch) +static BOOL check_class_ranges(compiler_common *common, const sljit_u8 *bits, BOOL nclass, BOOL invert, jump_list **backtracks) { +/* May destroy TMP1. */ DEFINE_COMPILER; -struct sljit_jump *jump; - -if (ranges[0] < 0) - return FALSE; - -switch(ranges[0]) - { - case 1: - if (readch) - read_char(common); - add_jump(compiler, backtracks, CMP(ranges[1] == 0 ? SLJIT_C_LESS : SLJIT_C_GREATER_EQUAL, TMP1, 0, SLJIT_IMM, ranges[2])); - return TRUE; - - case 2: - if (readch) - read_char(common); - OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, ranges[2]); - add_jump(compiler, backtracks, CMP(ranges[1] != 0 ? SLJIT_C_LESS : SLJIT_C_GREATER_EQUAL, TMP1, 0, SLJIT_IMM, ranges[3] - ranges[2])); - return TRUE; - - case 4: - if (ranges[2] + 1 == ranges[3] && ranges[4] + 1 == ranges[5]) - { - if (readch) - read_char(common); - if (ranges[1] != 0) - { - add_jump(compiler, backtracks, CMP(SLJIT_C_EQUAL, TMP1, 0, SLJIT_IMM, ranges[2])); - add_jump(compiler, backtracks, CMP(SLJIT_C_EQUAL, TMP1, 0, SLJIT_IMM, ranges[4])); - } - else - { - jump = CMP(SLJIT_C_EQUAL, TMP1, 0, SLJIT_IMM, ranges[2]); - add_jump(compiler, backtracks, CMP(SLJIT_C_NOT_EQUAL, TMP1, 0, SLJIT_IMM, ranges[4])); - JUMPHERE(jump); - } - return TRUE; - } - if ((ranges[3] - ranges[2]) == (ranges[5] - ranges[4]) && is_powerof2(ranges[4] - ranges[2])) - { - if (readch) - read_char(common); - OP2(SLJIT_OR, TMP1, 0, TMP1, 0, SLJIT_IMM, ranges[4] - ranges[2]); - OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, ranges[4]); - add_jump(compiler, backtracks, CMP(ranges[1] != 0 ? SLJIT_C_LESS : SLJIT_C_GREATER_EQUAL, TMP1, 0, SLJIT_IMM, ranges[5] - ranges[4])); - return TRUE; - } - return FALSE; - - default: - return FALSE; - } -} - -static void get_ctype_ranges(compiler_common *common, int flag, int *ranges) -{ -int i, bit, length; -const pcre_uint8 *ctypes = (const pcre_uint8*)common->ctypes; - -bit = ctypes[0] & flag; -ranges[0] = -1; -ranges[1] = bit != 0 ? 1 : 0; -length = 0; - -for (i = 1; i < 256; i++) - if ((ctypes[i] & flag) != bit) - { - if (length >= MAX_RANGE_SIZE) - return; - ranges[2 + length] = i; - length++; - bit ^= flag; - } - -if (bit != 0) - { - if (length >= MAX_RANGE_SIZE) - return; - ranges[2 + length] = 256; - length++; - } -ranges[0] = length; -} - -static BOOL check_class_ranges(compiler_common *common, const pcre_uint8 *bits, BOOL nclass, jump_list **backtracks) -{ -int ranges[2 + MAX_RANGE_SIZE]; -pcre_uint8 bit, cbit, all; +int ranges[MAX_RANGE_SIZE]; +sljit_u8 bit, cbit, all; int i, byte, length = 0; bit = bits[0] & 0x1; -ranges[1] = bit; -/* Can be 0 or 255. */ +/* All bits will be zero or one (since bit is zero or one). */ all = -bit; for (i = 0; i < 256; ) @@ -3615,7 +5011,7 @@ for (i = 0; i < 256; ) { if (length >= MAX_RANGE_SIZE) return FALSE; - ranges[2 + length] = i; + ranges[length] = i; length++; bit = cbit; all = -cbit; @@ -3628,12 +5024,119 @@ if (((bit == 0) && nclass) || ((bit == 1) && !nclass)) { if (length >= MAX_RANGE_SIZE) return FALSE; - ranges[2 + length] = 256; + ranges[length] = 256; length++; } -ranges[0] = length; -return check_ranges(common, ranges, backtracks, FALSE); +if (length < 0 || length > 4) + return FALSE; + +bit = bits[0] & 0x1; +if (invert) bit ^= 0x1; + +/* No character is accepted. */ +if (length == 0 && bit == 0) + add_jump(compiler, backtracks, JUMP(SLJIT_JUMP)); + +switch(length) + { + case 0: + /* When bit != 0, all characters are accepted. */ + return TRUE; + + case 1: + add_jump(compiler, backtracks, CMP(bit == 0 ? SLJIT_LESS : SLJIT_GREATER_EQUAL, TMP1, 0, SLJIT_IMM, ranges[0])); + return TRUE; + + case 2: + if (ranges[0] + 1 != ranges[1]) + { + OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, ranges[0]); + add_jump(compiler, backtracks, CMP(bit != 0 ? SLJIT_LESS : SLJIT_GREATER_EQUAL, TMP1, 0, SLJIT_IMM, ranges[1] - ranges[0])); + } + else + add_jump(compiler, backtracks, CMP(bit != 0 ? SLJIT_EQUAL : SLJIT_NOT_EQUAL, TMP1, 0, SLJIT_IMM, ranges[0])); + return TRUE; + + case 3: + if (bit != 0) + { + add_jump(compiler, backtracks, CMP(SLJIT_GREATER_EQUAL, TMP1, 0, SLJIT_IMM, ranges[2])); + if (ranges[0] + 1 != ranges[1]) + { + OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, ranges[0]); + add_jump(compiler, backtracks, CMP(SLJIT_LESS, TMP1, 0, SLJIT_IMM, ranges[1] - ranges[0])); + } + else + add_jump(compiler, backtracks, CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, ranges[0])); + return TRUE; + } + + add_jump(compiler, backtracks, CMP(SLJIT_LESS, TMP1, 0, SLJIT_IMM, ranges[0])); + if (ranges[1] + 1 != ranges[2]) + { + OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, ranges[1]); + add_jump(compiler, backtracks, CMP(SLJIT_LESS, TMP1, 0, SLJIT_IMM, ranges[2] - ranges[1])); + } + else + add_jump(compiler, backtracks, CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, ranges[1])); + return TRUE; + + case 4: + if ((ranges[1] - ranges[0]) == (ranges[3] - ranges[2]) + && (ranges[0] | (ranges[2] - ranges[0])) == ranges[2] + && (ranges[1] & (ranges[2] - ranges[0])) == 0 + && is_powerof2(ranges[2] - ranges[0])) + { + SLJIT_ASSERT((ranges[0] & (ranges[2] - ranges[0])) == 0 && (ranges[2] & ranges[3] & (ranges[2] - ranges[0])) != 0); + OP2(SLJIT_OR, TMP1, 0, TMP1, 0, SLJIT_IMM, ranges[2] - ranges[0]); + if (ranges[2] + 1 != ranges[3]) + { + OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, ranges[2]); + add_jump(compiler, backtracks, CMP(bit != 0 ? SLJIT_LESS : SLJIT_GREATER_EQUAL, TMP1, 0, SLJIT_IMM, ranges[3] - ranges[2])); + } + else + add_jump(compiler, backtracks, CMP(bit != 0 ? SLJIT_EQUAL : SLJIT_NOT_EQUAL, TMP1, 0, SLJIT_IMM, ranges[2])); + return TRUE; + } + + if (bit != 0) + { + i = 0; + if (ranges[0] + 1 != ranges[1]) + { + OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, ranges[0]); + add_jump(compiler, backtracks, CMP(SLJIT_LESS, TMP1, 0, SLJIT_IMM, ranges[1] - ranges[0])); + i = ranges[0]; + } + else + add_jump(compiler, backtracks, CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, ranges[0])); + + if (ranges[2] + 1 != ranges[3]) + { + OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, ranges[2] - i); + add_jump(compiler, backtracks, CMP(SLJIT_LESS, TMP1, 0, SLJIT_IMM, ranges[3] - ranges[2])); + } + else + add_jump(compiler, backtracks, CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, ranges[2] - i)); + return TRUE; + } + + OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, ranges[0]); + add_jump(compiler, backtracks, CMP(SLJIT_GREATER_EQUAL, TMP1, 0, SLJIT_IMM, ranges[3] - ranges[0])); + if (ranges[1] + 1 != ranges[2]) + { + OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, ranges[1] - ranges[0]); + add_jump(compiler, backtracks, CMP(SLJIT_LESS, TMP1, 0, SLJIT_IMM, ranges[2] - ranges[1])); + } + else + add_jump(compiler, backtracks, CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, ranges[1] - ranges[0])); + return TRUE; + + default: + SLJIT_UNREACHABLE(); + return FALSE; + } } static void check_anynewline(compiler_common *common) @@ -3644,22 +5147,22 @@ DEFINE_COMPILER; sljit_emit_fast_enter(compiler, RETURN_ADDR, 0); OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, 0x0a); -OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x0d - 0x0a); -OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_C_LESS_EQUAL); -OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x85 - 0x0a); +OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x0d - 0x0a); +OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL); +OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x85 - 0x0a); #if defined SUPPORT_UTF || defined COMPILE_PCRE16 || defined COMPILE_PCRE32 #ifdef COMPILE_PCRE8 if (common->utf) { #endif - OP_FLAGS(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_C_EQUAL); + OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); OP2(SLJIT_OR, TMP1, 0, TMP1, 0, SLJIT_IMM, 0x1); - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x2029 - 0x0a); + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x2029 - 0x0a); #ifdef COMPILE_PCRE8 } #endif #endif /* SUPPORT_UTF || COMPILE_PCRE16 || COMPILE_PCRE32 */ -OP_FLAGS(SLJIT_OR | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_C_EQUAL); +OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_EQUAL); sljit_emit_fast_return(compiler, RETURN_ADDR, 0); } @@ -3670,34 +5173,34 @@ DEFINE_COMPILER; sljit_emit_fast_enter(compiler, RETURN_ADDR, 0); -OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x09); -OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_C_EQUAL); -OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x20); -OP_FLAGS(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_C_EQUAL); -OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0xa0); +OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x09); +OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL); +OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x20); +OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); +OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0xa0); #if defined SUPPORT_UTF || defined COMPILE_PCRE16 || defined COMPILE_PCRE32 #ifdef COMPILE_PCRE8 if (common->utf) { #endif - OP_FLAGS(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_C_EQUAL); - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x1680); - OP_FLAGS(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_C_EQUAL); - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x180e); - OP_FLAGS(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_C_EQUAL); + OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x1680); + OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x180e); + OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, 0x2000); - OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x200A - 0x2000); - OP_FLAGS(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_C_LESS_EQUAL); - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x202f - 0x2000); - OP_FLAGS(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_C_EQUAL); - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x205f - 0x2000); - OP_FLAGS(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_C_EQUAL); - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x3000 - 0x2000); + OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x200A - 0x2000); + OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_LESS_EQUAL); + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x202f - 0x2000); + OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x205f - 0x2000); + OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x3000 - 0x2000); #ifdef COMPILE_PCRE8 } #endif #endif /* SUPPORT_UTF || COMPILE_PCRE16 || COMPILE_PCRE32 */ -OP_FLAGS(SLJIT_OR | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_C_EQUAL); +OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_EQUAL); sljit_emit_fast_return(compiler, RETURN_ADDR, 0); } @@ -3710,115 +5213,212 @@ DEFINE_COMPILER; sljit_emit_fast_enter(compiler, RETURN_ADDR, 0); OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, 0x0a); -OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x0d - 0x0a); -OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_C_LESS_EQUAL); -OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x85 - 0x0a); +OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x0d - 0x0a); +OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL); +OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x85 - 0x0a); #if defined SUPPORT_UTF || defined COMPILE_PCRE16 || defined COMPILE_PCRE32 #ifdef COMPILE_PCRE8 if (common->utf) { #endif - OP_FLAGS(SLJIT_OR | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_C_EQUAL); + OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); OP2(SLJIT_OR, TMP1, 0, TMP1, 0, SLJIT_IMM, 0x1); - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x2029 - 0x0a); + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x2029 - 0x0a); #ifdef COMPILE_PCRE8 } #endif #endif /* SUPPORT_UTF || COMPILE_PCRE16 || COMPILE_PCRE32 */ -OP_FLAGS(SLJIT_OR | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_C_EQUAL); +OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_EQUAL); sljit_emit_fast_return(compiler, RETURN_ADDR, 0); } -#define CHAR1 STR_END -#define CHAR2 STACK_TOP - static void do_casefulcmp(compiler_common *common) { DEFINE_COMPILER; struct sljit_jump *jump; struct sljit_label *label; +int char1_reg; +int char2_reg; -sljit_emit_fast_enter(compiler, RETURN_ADDR, 0); +if (sljit_get_register_index(TMP3) < 0) + { + char1_reg = STR_END; + char2_reg = STACK_TOP; + } +else + { + char1_reg = TMP3; + char2_reg = RETURN_ADDR; + } + +sljit_emit_fast_enter(compiler, SLJIT_MEM1(SLJIT_SP), LOCALS0); OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, TMP2, 0); -OP1(SLJIT_MOV, TMP3, 0, CHAR1, 0); -OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), LOCALS0, CHAR2, 0); -OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, IN_UCHARS(1)); -OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); -label = LABEL(); -OP1(MOVU_UCHAR, CHAR1, 0, SLJIT_MEM1(TMP1), IN_UCHARS(1)); -OP1(MOVU_UCHAR, CHAR2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); -jump = CMP(SLJIT_C_NOT_EQUAL, CHAR1, 0, CHAR2, 0); -OP2(SLJIT_SUB | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_IMM, IN_UCHARS(1)); -JUMPTO(SLJIT_C_NOT_ZERO, label); +if (char1_reg == STR_END) + { + OP1(SLJIT_MOV, TMP3, 0, char1_reg, 0); + OP1(SLJIT_MOV, RETURN_ADDR, 0, char2_reg, 0); + } + +if (sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_POST, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)) == SLJIT_SUCCESS) + { + label = LABEL(); + sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_POST, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)); + sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_POST, char2_reg, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); + jump = CMP(SLJIT_NOT_EQUAL, char1_reg, 0, char2_reg, 0); + OP2(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, TMP2, 0, SLJIT_IMM, IN_UCHARS(1)); + JUMPTO(SLJIT_NOT_ZERO, label); -JUMPHERE(jump); -OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); -OP1(SLJIT_MOV, CHAR1, 0, TMP3, 0); -OP1(SLJIT_MOV, CHAR2, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), LOCALS0); -sljit_emit_fast_return(compiler, RETURN_ADDR, 0); -} + JUMPHERE(jump); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0); + } +else if (sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_PRE, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)) == SLJIT_SUCCESS) + { + OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, IN_UCHARS(1)); + OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); + + label = LABEL(); + sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_PRE, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)); + sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_PRE, char2_reg, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); + jump = CMP(SLJIT_NOT_EQUAL, char1_reg, 0, char2_reg, 0); + OP2(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, TMP2, 0, SLJIT_IMM, IN_UCHARS(1)); + JUMPTO(SLJIT_NOT_ZERO, label); + + JUMPHERE(jump); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0); + OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); + } +else + { + label = LABEL(); + OP1(MOV_UCHAR, char1_reg, 0, SLJIT_MEM1(TMP1), 0); + OP1(MOV_UCHAR, char2_reg, 0, SLJIT_MEM1(STR_PTR), 0); + OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, SLJIT_IMM, IN_UCHARS(1)); + OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); + jump = CMP(SLJIT_NOT_EQUAL, char1_reg, 0, char2_reg, 0); + OP2(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, TMP2, 0, SLJIT_IMM, IN_UCHARS(1)); + JUMPTO(SLJIT_NOT_ZERO, label); + + JUMPHERE(jump); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0); + } + +if (char1_reg == STR_END) + { + OP1(SLJIT_MOV, char1_reg, 0, TMP3, 0); + OP1(SLJIT_MOV, char2_reg, 0, RETURN_ADDR, 0); + } -#define LCC_TABLE STACK_LIMIT +sljit_emit_fast_return(compiler, TMP1, 0); +} static void do_caselesscmp(compiler_common *common) { DEFINE_COMPILER; struct sljit_jump *jump; struct sljit_label *label; +int char1_reg = STR_END; +int char2_reg; +int lcc_table; +int opt_type = 0; -sljit_emit_fast_enter(compiler, RETURN_ADDR, 0); +if (sljit_get_register_index(TMP3) < 0) + { + char2_reg = STACK_TOP; + lcc_table = STACK_LIMIT; + } +else + { + char2_reg = RETURN_ADDR; + lcc_table = TMP3; + } + +if (sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_POST, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)) == SLJIT_SUCCESS) + opt_type = 1; +else if (sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_PRE, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)) == SLJIT_SUCCESS) + opt_type = 2; + +sljit_emit_fast_enter(compiler, SLJIT_MEM1(SLJIT_SP), LOCALS0); OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, TMP2, 0); -OP1(SLJIT_MOV, TMP3, 0, LCC_TABLE, 0); -OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), LOCALS0, CHAR1, 0); -OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), LOCALS1, CHAR2, 0); -OP1(SLJIT_MOV, LCC_TABLE, 0, SLJIT_IMM, common->lcc); -OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, IN_UCHARS(1)); -OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); +OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS1, char1_reg, 0); + +if (char2_reg == STACK_TOP) + { + OP1(SLJIT_MOV, TMP3, 0, char2_reg, 0); + OP1(SLJIT_MOV, RETURN_ADDR, 0, lcc_table, 0); + } + +OP1(SLJIT_MOV, lcc_table, 0, SLJIT_IMM, common->lcc); + +if (opt_type == 1) + { + label = LABEL(); + sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_POST, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)); + sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_POST, char2_reg, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); + } +else if (opt_type == 2) + { + OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, IN_UCHARS(1)); + OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); + + label = LABEL(); + sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_PRE, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)); + sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_PRE, char2_reg, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); + } +else + { + label = LABEL(); + OP1(MOV_UCHAR, char1_reg, 0, SLJIT_MEM1(TMP1), 0); + OP1(MOV_UCHAR, char2_reg, 0, SLJIT_MEM1(STR_PTR), 0); + OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, SLJIT_IMM, IN_UCHARS(1)); + } -label = LABEL(); -OP1(MOVU_UCHAR, CHAR1, 0, SLJIT_MEM1(TMP1), IN_UCHARS(1)); -OP1(MOVU_UCHAR, CHAR2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); #ifndef COMPILE_PCRE8 -jump = CMP(SLJIT_C_GREATER, CHAR1, 0, SLJIT_IMM, 255); +jump = CMP(SLJIT_GREATER, char1_reg, 0, SLJIT_IMM, 255); #endif -OP1(SLJIT_MOV_UB, CHAR1, 0, SLJIT_MEM2(LCC_TABLE, CHAR1), 0); +OP1(SLJIT_MOV_U8, char1_reg, 0, SLJIT_MEM2(lcc_table, char1_reg), 0); #ifndef COMPILE_PCRE8 JUMPHERE(jump); -jump = CMP(SLJIT_C_GREATER, CHAR2, 0, SLJIT_IMM, 255); +jump = CMP(SLJIT_GREATER, char2_reg, 0, SLJIT_IMM, 255); #endif -OP1(SLJIT_MOV_UB, CHAR2, 0, SLJIT_MEM2(LCC_TABLE, CHAR2), 0); +OP1(SLJIT_MOV_U8, char2_reg, 0, SLJIT_MEM2(lcc_table, char2_reg), 0); #ifndef COMPILE_PCRE8 JUMPHERE(jump); #endif -jump = CMP(SLJIT_C_NOT_EQUAL, CHAR1, 0, CHAR2, 0); -OP2(SLJIT_SUB | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_IMM, IN_UCHARS(1)); -JUMPTO(SLJIT_C_NOT_ZERO, label); + +if (opt_type == 0) + OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); + +jump = CMP(SLJIT_NOT_EQUAL, char1_reg, 0, char2_reg, 0); +OP2(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, TMP2, 0, SLJIT_IMM, IN_UCHARS(1)); +JUMPTO(SLJIT_NOT_ZERO, label); JUMPHERE(jump); -OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); -OP1(SLJIT_MOV, LCC_TABLE, 0, TMP3, 0); -OP1(SLJIT_MOV, CHAR1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), LOCALS0); -OP1(SLJIT_MOV, CHAR2, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), LOCALS1); -sljit_emit_fast_return(compiler, RETURN_ADDR, 0); -} +OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0); + +if (opt_type == 2) + OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); + +if (char2_reg == STACK_TOP) + { + OP1(SLJIT_MOV, char2_reg, 0, TMP3, 0); + OP1(SLJIT_MOV, lcc_table, 0, RETURN_ADDR, 0); + } -#undef LCC_TABLE -#undef CHAR1 -#undef CHAR2 +OP1(SLJIT_MOV, char1_reg, 0, SLJIT_MEM1(SLJIT_SP), LOCALS1); +sljit_emit_fast_return(compiler, TMP1, 0); +} #if defined SUPPORT_UTF && defined SUPPORT_UCP -static const pcre_uchar * SLJIT_CALL do_utf_caselesscmp(pcre_uchar *src1, jit_arguments *args, pcre_uchar *end1) +static const pcre_uchar * SLJIT_FUNC do_utf_caselesscmp(pcre_uchar *src1, pcre_uchar *src2, pcre_uchar *end1, pcre_uchar *end2) { /* This function would be ineffective to do in JIT level. */ -pcre_uint32 c1, c2; -const pcre_uchar *src2 = args->uchar_ptr; -const pcre_uchar *end2 = args->end; +sljit_u32 c1, c2; const ucd_record *ur; -const pcre_uint32 *pp; +const sljit_u32 *pp; while (src1 < end1) { @@ -3843,7 +5443,7 @@ return src2; #endif /* SUPPORT_UTF && SUPPORT_UCP */ static pcre_uchar *byte_sequence_compare(compiler_common *common, BOOL caseless, pcre_uchar *cc, - compare_context* context, jump_list **backtracks) + compare_context *context, jump_list **backtracks) { DEFINE_COMPILER; unsigned int othercasebit = 0; @@ -3878,16 +5478,16 @@ if (context->sourcereg == -1) #if defined COMPILE_PCRE8 #if defined SLJIT_UNALIGNED && SLJIT_UNALIGNED if (context->length >= 4) - OP1(SLJIT_MOV_SI, TMP1, 0, SLJIT_MEM1(STR_PTR), -context->length); + OP1(SLJIT_MOV_S32, TMP1, 0, SLJIT_MEM1(STR_PTR), -context->length); else if (context->length >= 2) - OP1(SLJIT_MOV_UH, TMP1, 0, SLJIT_MEM1(STR_PTR), -context->length); + OP1(SLJIT_MOV_U16, TMP1, 0, SLJIT_MEM1(STR_PTR), -context->length); else #endif - OP1(SLJIT_MOV_UB, TMP1, 0, SLJIT_MEM1(STR_PTR), -context->length); + OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(STR_PTR), -context->length); #elif defined COMPILE_PCRE16 #if defined SLJIT_UNALIGNED && SLJIT_UNALIGNED if (context->length >= 4) - OP1(SLJIT_MOV_SI, TMP1, 0, SLJIT_MEM1(STR_PTR), -context->length); + OP1(SLJIT_MOV_S32, TMP1, 0, SLJIT_MEM1(STR_PTR), -context->length); else #endif OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), -context->length); @@ -3929,12 +5529,12 @@ do #endif { if (context->length >= 4) - OP1(SLJIT_MOV_SI, context->sourcereg, 0, SLJIT_MEM1(STR_PTR), -context->length); + OP1(SLJIT_MOV_S32, context->sourcereg, 0, SLJIT_MEM1(STR_PTR), -context->length); else if (context->length >= 2) - OP1(SLJIT_MOV_UH, context->sourcereg, 0, SLJIT_MEM1(STR_PTR), -context->length); + OP1(SLJIT_MOV_U16, context->sourcereg, 0, SLJIT_MEM1(STR_PTR), -context->length); #if defined COMPILE_PCRE8 else if (context->length >= 1) - OP1(SLJIT_MOV_UB, context->sourcereg, 0, SLJIT_MEM1(STR_PTR), -context->length); + OP1(SLJIT_MOV_U8, context->sourcereg, 0, SLJIT_MEM1(STR_PTR), -context->length); #endif /* COMPILE_PCRE8 */ context->sourcereg = context->sourcereg == TMP1 ? TMP2 : TMP1; @@ -3943,25 +5543,25 @@ do case 4 / sizeof(pcre_uchar): if (context->oc.asint != 0) OP2(SLJIT_OR, context->sourcereg, 0, context->sourcereg, 0, SLJIT_IMM, context->oc.asint); - add_jump(compiler, backtracks, CMP(SLJIT_C_NOT_EQUAL, context->sourcereg, 0, SLJIT_IMM, context->c.asint | context->oc.asint)); + add_jump(compiler, backtracks, CMP(SLJIT_NOT_EQUAL, context->sourcereg, 0, SLJIT_IMM, context->c.asint | context->oc.asint)); break; case 2 / sizeof(pcre_uchar): if (context->oc.asushort != 0) OP2(SLJIT_OR, context->sourcereg, 0, context->sourcereg, 0, SLJIT_IMM, context->oc.asushort); - add_jump(compiler, backtracks, CMP(SLJIT_C_NOT_EQUAL, context->sourcereg, 0, SLJIT_IMM, context->c.asushort | context->oc.asushort)); + add_jump(compiler, backtracks, CMP(SLJIT_NOT_EQUAL, context->sourcereg, 0, SLJIT_IMM, context->c.asushort | context->oc.asushort)); break; #ifdef COMPILE_PCRE8 case 1: if (context->oc.asbyte != 0) OP2(SLJIT_OR, context->sourcereg, 0, context->sourcereg, 0, SLJIT_IMM, context->oc.asbyte); - add_jump(compiler, backtracks, CMP(SLJIT_C_NOT_EQUAL, context->sourcereg, 0, SLJIT_IMM, context->c.asbyte | context->oc.asbyte)); + add_jump(compiler, backtracks, CMP(SLJIT_NOT_EQUAL, context->sourcereg, 0, SLJIT_IMM, context->c.asbyte | context->oc.asbyte)); break; #endif default: - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); break; } context->ucharptr = 0; @@ -3978,10 +5578,10 @@ do if (othercasebit != 0 && othercasechar == cc) { OP2(SLJIT_OR, context->sourcereg, 0, context->sourcereg, 0, SLJIT_IMM, othercasebit); - add_jump(compiler, backtracks, CMP(SLJIT_C_NOT_EQUAL, context->sourcereg, 0, SLJIT_IMM, *cc | othercasebit)); + add_jump(compiler, backtracks, CMP(SLJIT_NOT_EQUAL, context->sourcereg, 0, SLJIT_IMM, *cc | othercasebit)); } else - add_jump(compiler, backtracks, CMP(SLJIT_C_NOT_EQUAL, context->sourcereg, 0, SLJIT_IMM, *cc)); + add_jump(compiler, backtracks, CMP(SLJIT_NOT_EQUAL, context->sourcereg, 0, SLJIT_IMM, *cc)); #endif @@ -4000,105 +5600,76 @@ return cc; #define SET_TYPE_OFFSET(value) \ if ((value) != typeoffset) \ { \ - if ((value) > typeoffset) \ - OP2(SLJIT_SUB, typereg, 0, typereg, 0, SLJIT_IMM, (value) - typeoffset); \ - else \ + if ((value) < typeoffset) \ OP2(SLJIT_ADD, typereg, 0, typereg, 0, SLJIT_IMM, typeoffset - (value)); \ + else \ + OP2(SLJIT_SUB, typereg, 0, typereg, 0, SLJIT_IMM, (value) - typeoffset); \ } \ typeoffset = (value); #define SET_CHAR_OFFSET(value) \ if ((value) != charoffset) \ { \ - if ((value) > charoffset) \ - OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, (value) - charoffset); \ + if ((value) < charoffset) \ + OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)(charoffset - (value))); \ else \ - OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, SLJIT_IMM, charoffset - (value)); \ + OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)((value) - charoffset)); \ } \ charoffset = (value); +static pcre_uchar *compile_char1_matchingpath(compiler_common *common, pcre_uchar type, pcre_uchar *cc, jump_list **backtracks, BOOL check_str_ptr); + static void compile_xclass_matchingpath(compiler_common *common, pcre_uchar *cc, jump_list **backtracks) { DEFINE_COMPILER; jump_list *found = NULL; -jump_list **list = (*cc & XCL_NOT) == 0 ? &found : backtracks; -pcre_int32 c, charoffset; +jump_list **list = (cc[0] & XCL_NOT) == 0 ? &found : backtracks; +sljit_uw c, charoffset, max = 256, min = READ_CHAR_MAX; struct sljit_jump *jump = NULL; pcre_uchar *ccbegin; int compares, invertcmp, numberofcmps; +#if defined SUPPORT_UTF && (defined COMPILE_PCRE8 || defined COMPILE_PCRE16) +BOOL utf = common->utf; +#endif #ifdef SUPPORT_UCP BOOL needstype = FALSE, needsscript = FALSE, needschar = FALSE; BOOL charsaved = FALSE; -int typereg = TMP1, scriptreg = TMP1; -const pcre_uint32 *other_cases; -pcre_int32 typeoffset; +int typereg = TMP1; +const sljit_u32 *other_cases; +sljit_uw typeoffset; #endif -/* Although SUPPORT_UTF must be defined, we are - not necessary in utf mode even in 8 bit mode. */ -detect_partial_match(common, backtracks); -read_char(common); - -if ((*cc++ & XCL_MAP) != 0) +/* Scanning the necessary info. */ +cc++; +ccbegin = cc; +compares = 0; +if (cc[-1] & XCL_MAP) { - OP1(SLJIT_MOV, TMP3, 0, TMP1, 0); -#ifndef COMPILE_PCRE8 - jump = CMP(SLJIT_C_GREATER, TMP1, 0, SLJIT_IMM, 255); -#elif defined SUPPORT_UTF - if (common->utf) - jump = CMP(SLJIT_C_GREATER, TMP1, 0, SLJIT_IMM, 255); -#endif - - if (!check_class_ranges(common, (const pcre_uint8 *)cc, TRUE, list)) - { - OP2(SLJIT_AND, TMP2, 0, TMP1, 0, SLJIT_IMM, 0x7); - OP2(SLJIT_LSHR, TMP1, 0, TMP1, 0, SLJIT_IMM, 3); - OP1(SLJIT_MOV_UB, TMP1, 0, SLJIT_MEM1(TMP1), (sljit_sw)cc); - OP2(SLJIT_SHL, TMP2, 0, SLJIT_IMM, 1, TMP2, 0); - OP2(SLJIT_AND | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, TMP2, 0); - add_jump(compiler, list, JUMP(SLJIT_C_NOT_ZERO)); - } - -#ifndef COMPILE_PCRE8 - JUMPHERE(jump); -#elif defined SUPPORT_UTF - if (common->utf) - JUMPHERE(jump); -#endif - OP1(SLJIT_MOV, TMP1, 0, TMP3, 0); -#ifdef SUPPORT_UCP - charsaved = TRUE; -#endif + min = 0; cc += 32 / sizeof(pcre_uchar); } -/* Scanning the necessary info. */ -ccbegin = cc; -compares = 0; while (*cc != XCL_END) { compares++; if (*cc == XCL_SINGLE) { - cc += 2; -#ifdef SUPPORT_UTF - if (common->utf && HAS_EXTRALEN(cc[-1])) cc += GET_EXTRALEN(cc[-1]); -#endif + cc ++; + GETCHARINCTEST(c, cc); + if (c > max) max = c; + if (c < min) min = c; #ifdef SUPPORT_UCP needschar = TRUE; #endif } else if (*cc == XCL_RANGE) { - cc += 2; -#ifdef SUPPORT_UTF - if (common->utf && HAS_EXTRALEN(cc[-1])) cc += GET_EXTRALEN(cc[-1]); -#endif - cc++; -#ifdef SUPPORT_UTF - if (common->utf && HAS_EXTRALEN(cc[-1])) cc += GET_EXTRALEN(cc[-1]); -#endif + cc ++; + GETCHARINCTEST(c, cc); + if (c < min) min = c; + GETCHARINCTEST(c, cc); + if (c > max) max = c; #ifdef SUPPORT_UCP needschar = TRUE; #endif @@ -4108,9 +5679,33 @@ while (*cc != XCL_END) { SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP); cc++; + if (*cc == PT_CLIST) + { + other_cases = PRIV(ucd_caseless_sets) + cc[1]; + while (*other_cases != NOTACHAR) + { + if (*other_cases > max) max = *other_cases; + if (*other_cases < min) min = *other_cases; + other_cases++; + } + } + else + { + max = READ_CHAR_MAX; + min = 0; + } + switch(*cc) { case PT_ANY: + /* Any either accepts everything or ignored. */ + if (cc[-1] == XCL_PROP) + { + compile_char1_matchingpath(common, OP_ALLANY, cc, backtracks, FALSE); + if (list == backtracks) + add_jump(compiler, backtracks, JUMP(SLJIT_JUMP)); + return; + } break; case PT_LAMP: @@ -4140,56 +5735,163 @@ while (*cc != XCL_END) break; default: - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); break; } cc += 2; } #endif } +SLJIT_ASSERT(compares > 0); + +/* We are not necessary in utf mode even in 8 bit mode. */ +cc = ccbegin; +read_char_range(common, min, max, (cc[-1] & XCL_NOT) != 0); + +if ((cc[-1] & XCL_HASPROP) == 0) + { + if ((cc[-1] & XCL_MAP) != 0) + { + jump = CMP(SLJIT_GREATER, TMP1, 0, SLJIT_IMM, 255); + if (!check_class_ranges(common, (const sljit_u8 *)cc, (((const sljit_u8 *)cc)[31] & 0x80) != 0, TRUE, &found)) + { + OP2(SLJIT_AND, TMP2, 0, TMP1, 0, SLJIT_IMM, 0x7); + OP2(SLJIT_LSHR, TMP1, 0, TMP1, 0, SLJIT_IMM, 3); + OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP1), (sljit_sw)cc); + OP2(SLJIT_SHL, TMP2, 0, SLJIT_IMM, 1, TMP2, 0); + OP2(SLJIT_AND | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, TMP2, 0); + add_jump(compiler, &found, JUMP(SLJIT_NOT_ZERO)); + } + + add_jump(compiler, backtracks, JUMP(SLJIT_JUMP)); + JUMPHERE(jump); + + cc += 32 / sizeof(pcre_uchar); + } + else + { + OP2(SLJIT_SUB, TMP2, 0, TMP1, 0, SLJIT_IMM, min); + add_jump(compiler, (cc[-1] & XCL_NOT) == 0 ? backtracks : &found, CMP(SLJIT_GREATER, TMP2, 0, SLJIT_IMM, max - min)); + } + } +else if ((cc[-1] & XCL_MAP) != 0) + { + OP1(SLJIT_MOV, RETURN_ADDR, 0, TMP1, 0); +#ifdef SUPPORT_UCP + charsaved = TRUE; +#endif + if (!check_class_ranges(common, (const sljit_u8 *)cc, FALSE, TRUE, list)) + { +#ifdef COMPILE_PCRE8 + jump = NULL; + if (common->utf) +#endif + jump = CMP(SLJIT_GREATER, TMP1, 0, SLJIT_IMM, 255); + + OP2(SLJIT_AND, TMP2, 0, TMP1, 0, SLJIT_IMM, 0x7); + OP2(SLJIT_LSHR, TMP1, 0, TMP1, 0, SLJIT_IMM, 3); + OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP1), (sljit_sw)cc); + OP2(SLJIT_SHL, TMP2, 0, SLJIT_IMM, 1, TMP2, 0); + OP2(SLJIT_AND | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, TMP2, 0); + add_jump(compiler, list, JUMP(SLJIT_NOT_ZERO)); + +#ifdef COMPILE_PCRE8 + if (common->utf) +#endif + JUMPHERE(jump); + } + + OP1(SLJIT_MOV, TMP1, 0, RETURN_ADDR, 0); + cc += 32 / sizeof(pcre_uchar); + } #ifdef SUPPORT_UCP -/* Simple register allocation. TMP1 is preferred if possible. */ if (needstype || needsscript) { if (needschar && !charsaved) - OP1(SLJIT_MOV, TMP3, 0, TMP1, 0); - add_jump(compiler, &common->getucd, JUMP(SLJIT_FAST_CALL)); - if (needschar) + OP1(SLJIT_MOV, RETURN_ADDR, 0, TMP1, 0); + +#ifdef COMPILE_PCRE32 + if (!common->utf) + { + jump = CMP(SLJIT_LESS, TMP1, 0, SLJIT_IMM, 0x10ffff + 1); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, INVALID_UTF_CHAR); + JUMPHERE(jump); + } +#endif + + OP2(SLJIT_LSHR, TMP2, 0, TMP1, 0, SLJIT_IMM, UCD_BLOCK_SHIFT); + OP1(SLJIT_MOV_U8, TMP2, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_stage1)); + OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, UCD_BLOCK_MASK); + OP2(SLJIT_SHL, TMP2, 0, TMP2, 0, SLJIT_IMM, UCD_BLOCK_SHIFT); + OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, TMP2, 0); + OP1(SLJIT_MOV, TMP2, 0, SLJIT_IMM, (sljit_sw)PRIV(ucd_stage2)); + OP1(SLJIT_MOV_U16, TMP2, 0, SLJIT_MEM2(TMP2, TMP1), 1); + + /* Before anything else, we deal with scripts. */ + if (needsscript) { - if (needstype) + OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, script)); + OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM2(TMP1, TMP2), 3); + + ccbegin = cc; + + while (*cc != XCL_END) { - OP1(SLJIT_MOV, RETURN_ADDR, 0, TMP1, 0); - typereg = RETURN_ADDR; + if (*cc == XCL_SINGLE) + { + cc ++; + GETCHARINCTEST(c, cc); + } + else if (*cc == XCL_RANGE) + { + cc ++; + GETCHARINCTEST(c, cc); + GETCHARINCTEST(c, cc); + } + else + { + SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP); + cc++; + if (*cc == PT_SC) + { + compares--; + invertcmp = (compares == 0 && list != backtracks); + if (cc[-1] == XCL_NOTPROP) + invertcmp ^= 0x1; + jump = CMP(SLJIT_EQUAL ^ invertcmp, TMP1, 0, SLJIT_IMM, (int)cc[1]); + add_jump(compiler, compares > 0 ? list : backtracks, jump); + } + cc += 2; + } } - if (needsscript) - scriptreg = TMP3; - OP1(SLJIT_MOV, TMP1, 0, TMP3, 0); + cc = ccbegin; } - else if (needstype && needsscript) - scriptreg = TMP3; - /* In all other cases only one of them was specified, and that can goes to TMP1. */ - if (needsscript) + if (needschar) + { + OP1(SLJIT_MOV, TMP1, 0, RETURN_ADDR, 0); + } + + if (needstype) { - if (scriptreg == TMP1) + if (!needschar) { - OP1(SLJIT_MOV, scriptreg, 0, SLJIT_IMM, (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, script)); - OP1(SLJIT_MOV_UB, scriptreg, 0, SLJIT_MEM2(scriptreg, TMP2), 3); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, chartype)); + OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM2(TMP1, TMP2), 3); } else { OP2(SLJIT_SHL, TMP2, 0, TMP2, 0, SLJIT_IMM, 3); - OP2(SLJIT_ADD, TMP2, 0, TMP2, 0, SLJIT_IMM, (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, script)); - OP1(SLJIT_MOV_UB, scriptreg, 0, SLJIT_MEM1(TMP2), 0); + OP1(SLJIT_MOV_U8, RETURN_ADDR, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, chartype)); + typereg = RETURN_ADDR; } } } #endif /* Generating code. */ -cc = ccbegin; charoffset = 0; numberofcmps = 0; #ifdef SUPPORT_UCP @@ -4205,147 +5907,123 @@ while (*cc != XCL_END) if (*cc == XCL_SINGLE) { cc ++; -#ifdef SUPPORT_UTF - if (common->utf) - { - GETCHARINC(c, cc); - } - else -#endif - c = *cc++; + GETCHARINCTEST(c, cc); if (numberofcmps < 3 && (*cc == XCL_SINGLE || *cc == XCL_RANGE)) { - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, c - charoffset); - OP_FLAGS(numberofcmps == 0 ? SLJIT_MOV : SLJIT_OR, TMP2, 0, numberofcmps == 0 ? SLJIT_UNUSED : TMP2, 0, SLJIT_C_EQUAL); + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset)); + OP_FLAGS(numberofcmps == 0 ? SLJIT_MOV : SLJIT_OR, TMP2, 0, SLJIT_EQUAL); numberofcmps++; } else if (numberofcmps > 0) { - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, c - charoffset); - OP_FLAGS(SLJIT_OR | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_C_EQUAL); - jump = JUMP(SLJIT_C_NOT_ZERO ^ invertcmp); + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset)); + OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_EQUAL); + jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp); numberofcmps = 0; } else { - jump = CMP(SLJIT_C_EQUAL ^ invertcmp, TMP1, 0, SLJIT_IMM, c - charoffset); + jump = CMP(SLJIT_EQUAL ^ invertcmp, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset)); numberofcmps = 0; } } else if (*cc == XCL_RANGE) { cc ++; -#ifdef SUPPORT_UTF - if (common->utf) - { - GETCHARINC(c, cc); - } - else -#endif - c = *cc++; + GETCHARINCTEST(c, cc); SET_CHAR_OFFSET(c); -#ifdef SUPPORT_UTF - if (common->utf) - { - GETCHARINC(c, cc); - } - else -#endif - c = *cc++; + GETCHARINCTEST(c, cc); + if (numberofcmps < 3 && (*cc == XCL_SINGLE || *cc == XCL_RANGE)) { - OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, c - charoffset); - OP_FLAGS(numberofcmps == 0 ? SLJIT_MOV : SLJIT_OR, TMP2, 0, numberofcmps == 0 ? SLJIT_UNUSED : TMP2, 0, SLJIT_C_LESS_EQUAL); + OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset)); + OP_FLAGS(numberofcmps == 0 ? SLJIT_MOV : SLJIT_OR, TMP2, 0, SLJIT_LESS_EQUAL); numberofcmps++; } else if (numberofcmps > 0) { - OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, c - charoffset); - OP_FLAGS(SLJIT_OR | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_C_LESS_EQUAL); - jump = JUMP(SLJIT_C_NOT_ZERO ^ invertcmp); + OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset)); + OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_LESS_EQUAL); + jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp); numberofcmps = 0; } else { - jump = CMP(SLJIT_C_LESS_EQUAL ^ invertcmp, TMP1, 0, SLJIT_IMM, c - charoffset); + jump = CMP(SLJIT_LESS_EQUAL ^ invertcmp, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset)); numberofcmps = 0; } } #ifdef SUPPORT_UCP else { + SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP); if (*cc == XCL_NOTPROP) invertcmp ^= 0x1; cc++; switch(*cc) { case PT_ANY: - if (list != backtracks) - { - if ((cc[-1] == XCL_NOTPROP && compares > 0) || (cc[-1] == XCL_PROP && compares == 0)) - continue; - } - else if (cc[-1] == XCL_NOTPROP) - continue; - jump = JUMP(SLJIT_JUMP); + if (!invertcmp) + jump = JUMP(SLJIT_JUMP); break; case PT_LAMP: - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_Lu - typeoffset); - OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_C_EQUAL); - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_Ll - typeoffset); - OP_FLAGS(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_C_EQUAL); - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_Lt - typeoffset); - OP_FLAGS(SLJIT_OR | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_C_EQUAL); - jump = JUMP(SLJIT_C_NOT_ZERO ^ invertcmp); + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_Lu - typeoffset); + OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL); + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_Ll - typeoffset); + OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_Lt - typeoffset); + OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_EQUAL); + jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp); break; case PT_GC: c = PRIV(ucp_typerange)[(int)cc[1] * 2]; SET_TYPE_OFFSET(c); - jump = CMP(SLJIT_C_LESS_EQUAL ^ invertcmp, typereg, 0, SLJIT_IMM, PRIV(ucp_typerange)[(int)cc[1] * 2 + 1] - c); + jump = CMP(SLJIT_LESS_EQUAL ^ invertcmp, typereg, 0, SLJIT_IMM, PRIV(ucp_typerange)[(int)cc[1] * 2 + 1] - c); break; case PT_PC: - jump = CMP(SLJIT_C_EQUAL ^ invertcmp, typereg, 0, SLJIT_IMM, (int)cc[1] - typeoffset); + jump = CMP(SLJIT_EQUAL ^ invertcmp, typereg, 0, SLJIT_IMM, (int)cc[1] - typeoffset); break; case PT_SC: - jump = CMP(SLJIT_C_EQUAL ^ invertcmp, scriptreg, 0, SLJIT_IMM, (int)cc[1]); + compares++; + /* Do nothing. */ break; case PT_SPACE: case PT_PXSPACE: SET_CHAR_OFFSET(9); - OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0xd - 0x9); - OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_C_LESS_EQUAL); + OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0xd - 0x9); + OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL); - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x85 - 0x9); - OP_FLAGS(SLJIT_OR | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_C_EQUAL); + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x85 - 0x9); + OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x180e - 0x9); - OP_FLAGS(SLJIT_OR | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_C_EQUAL); + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x180e - 0x9); + OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); SET_TYPE_OFFSET(ucp_Zl); - OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_Zs - ucp_Zl); - OP_FLAGS(SLJIT_OR | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_C_LESS_EQUAL); - jump = JUMP(SLJIT_C_NOT_ZERO ^ invertcmp); + OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_Zs - ucp_Zl); + OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_LESS_EQUAL); + jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp); break; case PT_WORD: - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, CHAR_UNDERSCORE - charoffset); - OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_C_EQUAL); + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)(CHAR_UNDERSCORE - charoffset)); + OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL); /* Fall through. */ case PT_ALNUM: SET_TYPE_OFFSET(ucp_Ll); - OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_Lu - ucp_Ll); - OP_FLAGS((*cc == PT_ALNUM) ? SLJIT_MOV : SLJIT_OR, TMP2, 0, (*cc == PT_ALNUM) ? SLJIT_UNUSED : TMP2, 0, SLJIT_C_LESS_EQUAL); + OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_Lu - ucp_Ll); + OP_FLAGS((*cc == PT_ALNUM) ? SLJIT_MOV : SLJIT_OR, TMP2, 0, SLJIT_LESS_EQUAL); SET_TYPE_OFFSET(ucp_Nd); - OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_No - ucp_Nd); - OP_FLAGS(SLJIT_OR | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_C_LESS_EQUAL); - jump = JUMP(SLJIT_C_NOT_ZERO ^ invertcmp); + OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_No - ucp_Nd); + OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_LESS_EQUAL); + jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp); break; case PT_CLIST: @@ -4366,8 +6044,8 @@ while (*cc != XCL_END) OP2(SLJIT_ADD, TMP2, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)charoffset); OP2(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_IMM, other_cases[1] ^ other_cases[0]); } - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP2, 0, SLJIT_IMM, other_cases[1]); - OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_C_EQUAL); + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP2, 0, SLJIT_IMM, other_cases[1]); + OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL); other_cases += 2; } else if (is_powerof2(other_cases[2] ^ other_cases[1])) @@ -4379,207 +6057,424 @@ while (*cc != XCL_END) OP2(SLJIT_ADD, TMP2, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)charoffset); OP2(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_IMM, other_cases[1] ^ other_cases[0]); } - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP2, 0, SLJIT_IMM, other_cases[2]); - OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_C_EQUAL); + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP2, 0, SLJIT_IMM, other_cases[2]); + OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL); - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, other_cases[0] - charoffset); - OP_FLAGS(SLJIT_OR | ((other_cases[3] == NOTACHAR) ? SLJIT_SET_E : 0), TMP2, 0, TMP2, 0, SLJIT_C_EQUAL); + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)(other_cases[0] - charoffset)); + OP_FLAGS(SLJIT_OR | ((other_cases[3] == NOTACHAR) ? SLJIT_SET_Z : 0), TMP2, 0, SLJIT_EQUAL); other_cases += 3; } else { - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, *other_cases++ - charoffset); - OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_C_EQUAL); + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)(*other_cases++ - charoffset)); + OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL); } while (*other_cases != NOTACHAR) { - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, *other_cases++ - charoffset); - OP_FLAGS(SLJIT_OR | ((*other_cases == NOTACHAR) ? SLJIT_SET_E : 0), TMP2, 0, TMP2, 0, SLJIT_C_EQUAL); + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)(*other_cases++ - charoffset)); + OP_FLAGS(SLJIT_OR | ((*other_cases == NOTACHAR) ? SLJIT_SET_Z : 0), TMP2, 0, SLJIT_EQUAL); } - jump = JUMP(SLJIT_C_NOT_ZERO ^ invertcmp); + jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp); break; case PT_UCNC: - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, CHAR_DOLLAR_SIGN - charoffset); - OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_C_EQUAL); - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, CHAR_COMMERCIAL_AT - charoffset); - OP_FLAGS(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_C_EQUAL); - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, CHAR_GRAVE_ACCENT - charoffset); - OP_FLAGS(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_C_EQUAL); + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)(CHAR_DOLLAR_SIGN - charoffset)); + OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL); + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)(CHAR_COMMERCIAL_AT - charoffset)); + OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)(CHAR_GRAVE_ACCENT - charoffset)); + OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); SET_CHAR_OFFSET(0xa0); - OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0xd7ff - charoffset); - OP_FLAGS(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_C_LESS_EQUAL); + OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)(0xd7ff - charoffset)); + OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_LESS_EQUAL); SET_CHAR_OFFSET(0); - OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0xe000 - 0); - OP_FLAGS(SLJIT_OR | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_C_GREATER_EQUAL); - jump = JUMP(SLJIT_C_NOT_ZERO ^ invertcmp); + OP2(SLJIT_SUB | SLJIT_SET_GREATER_EQUAL, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0xe000 - 0); + OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_GREATER_EQUAL); + jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp); break; case PT_PXGRAPH: /* C and Z groups are the farthest two groups. */ SET_TYPE_OFFSET(ucp_Ll); - OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_So - ucp_Ll); - OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_C_GREATER); + OP2(SLJIT_SUB | SLJIT_SET_GREATER, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_So - ucp_Ll); + OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_GREATER); + + jump = CMP(SLJIT_NOT_EQUAL, typereg, 0, SLJIT_IMM, ucp_Cf - ucp_Ll); + + /* In case of ucp_Cf, we overwrite the result. */ + SET_CHAR_OFFSET(0x2066); + OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x2069 - 0x2066); + OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL); + + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x061c - 0x2066); + OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); + + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x180e - 0x2066); + OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); + + JUMPHERE(jump); + jump = CMP(SLJIT_ZERO ^ invertcmp, TMP2, 0, SLJIT_IMM, 0); + break; + + case PT_PXPRINT: + /* C and Z groups are the farthest two groups. */ + SET_TYPE_OFFSET(ucp_Ll); + OP2(SLJIT_SUB | SLJIT_SET_GREATER, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_So - ucp_Ll); + OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_GREATER); + + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_Zs - ucp_Ll); + OP_FLAGS(SLJIT_AND, TMP2, 0, SLJIT_NOT_EQUAL); - jump = CMP(SLJIT_C_NOT_EQUAL, typereg, 0, SLJIT_IMM, ucp_Cf - ucp_Ll); + jump = CMP(SLJIT_NOT_EQUAL, typereg, 0, SLJIT_IMM, ucp_Cf - ucp_Ll); /* In case of ucp_Cf, we overwrite the result. */ SET_CHAR_OFFSET(0x2066); - OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x2069 - 0x2066); - OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_C_LESS_EQUAL); + OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x2069 - 0x2066); + OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL); + + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x061c - 0x2066); + OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); + + JUMPHERE(jump); + jump = CMP(SLJIT_ZERO ^ invertcmp, TMP2, 0, SLJIT_IMM, 0); + break; + + case PT_PXPUNCT: + SET_TYPE_OFFSET(ucp_Sc); + OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_So - ucp_Sc); + OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL); + + SET_CHAR_OFFSET(0); + OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x7f); + OP_FLAGS(SLJIT_AND, TMP2, 0, SLJIT_LESS_EQUAL); + + SET_TYPE_OFFSET(ucp_Pc); + OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_Ps - ucp_Pc); + OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_LESS_EQUAL); + jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp); + break; + + default: + SLJIT_UNREACHABLE(); + break; + } + cc += 2; + } +#endif + + if (jump != NULL) + add_jump(compiler, compares > 0 ? list : backtracks, jump); + } + +if (found != NULL) + set_jumps(found, LABEL()); +} + +#undef SET_TYPE_OFFSET +#undef SET_CHAR_OFFSET + +#endif + +static pcre_uchar *compile_simple_assertion_matchingpath(compiler_common *common, pcre_uchar type, pcre_uchar *cc, jump_list **backtracks) +{ +DEFINE_COMPILER; +int length; +struct sljit_jump *jump[4]; +#ifdef SUPPORT_UTF +struct sljit_label *label; +#endif /* SUPPORT_UTF */ + +switch(type) + { + case OP_SOD: + OP1(SLJIT_MOV, TMP1, 0, ARGUMENTS, 0); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, begin)); + add_jump(compiler, backtracks, CMP(SLJIT_NOT_EQUAL, STR_PTR, 0, TMP1, 0)); + return cc; + + case OP_SOM: + OP1(SLJIT_MOV, TMP1, 0, ARGUMENTS, 0); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, str)); + add_jump(compiler, backtracks, CMP(SLJIT_NOT_EQUAL, STR_PTR, 0, TMP1, 0)); + return cc; - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x061c - 0x2066); - OP_FLAGS(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_C_EQUAL); + case OP_NOT_WORD_BOUNDARY: + case OP_WORD_BOUNDARY: + add_jump(compiler, &common->wordboundary, JUMP(SLJIT_FAST_CALL)); + sljit_set_current_flags(compiler, SLJIT_SET_Z); + add_jump(compiler, backtracks, JUMP(type == OP_NOT_WORD_BOUNDARY ? SLJIT_NOT_ZERO : SLJIT_ZERO)); + return cc; + + case OP_EODN: + /* Requires rather complex checks. */ + jump[0] = CMP(SLJIT_GREATER_EQUAL, STR_PTR, 0, STR_END, 0); + if (common->nltype == NLTYPE_FIXED && common->newline > 255) + { + OP2(SLJIT_ADD, TMP2, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(2)); + OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(0)); + if (common->mode == JIT_COMPILE) + add_jump(compiler, backtracks, CMP(SLJIT_NOT_EQUAL, TMP2, 0, STR_END, 0)); + else + { + jump[1] = CMP(SLJIT_EQUAL, TMP2, 0, STR_END, 0); + OP2(SLJIT_SUB | SLJIT_SET_LESS, SLJIT_UNUSED, 0, TMP2, 0, STR_END, 0); + OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS); + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (common->newline >> 8) & 0xff); + OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_NOT_EQUAL); + add_jump(compiler, backtracks, JUMP(SLJIT_NOT_EQUAL)); + check_partial(common, TRUE); + add_jump(compiler, backtracks, JUMP(SLJIT_JUMP)); + JUMPHERE(jump[1]); + } + OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); + add_jump(compiler, backtracks, CMP(SLJIT_NOT_EQUAL, TMP1, 0, SLJIT_IMM, (common->newline >> 8) & 0xff)); + add_jump(compiler, backtracks, CMP(SLJIT_NOT_EQUAL, TMP2, 0, SLJIT_IMM, common->newline & 0xff)); + } + else if (common->nltype == NLTYPE_FIXED) + { + OP2(SLJIT_ADD, TMP2, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); + OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(0)); + add_jump(compiler, backtracks, CMP(SLJIT_NOT_EQUAL, TMP2, 0, STR_END, 0)); + add_jump(compiler, backtracks, CMP(SLJIT_NOT_EQUAL, TMP1, 0, SLJIT_IMM, common->newline)); + } + else + { + OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(0)); + jump[1] = CMP(SLJIT_NOT_EQUAL, TMP1, 0, SLJIT_IMM, CHAR_CR); + OP2(SLJIT_ADD, TMP2, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(2)); + OP2(SLJIT_SUB | SLJIT_SET_Z | SLJIT_SET_GREATER, SLJIT_UNUSED, 0, TMP2, 0, STR_END, 0); + jump[2] = JUMP(SLJIT_GREATER); + add_jump(compiler, backtracks, JUMP(SLJIT_NOT_EQUAL) /* LESS */); + /* Equal. */ + OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); + jump[3] = CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, CHAR_NL); + add_jump(compiler, backtracks, JUMP(SLJIT_JUMP)); - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x180e - 0x2066); - OP_FLAGS(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_C_EQUAL); + JUMPHERE(jump[1]); + if (common->nltype == NLTYPE_ANYCRLF) + { + OP2(SLJIT_ADD, TMP2, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); + add_jump(compiler, backtracks, CMP(SLJIT_LESS, TMP2, 0, STR_END, 0)); + add_jump(compiler, backtracks, CMP(SLJIT_NOT_EQUAL, TMP1, 0, SLJIT_IMM, CHAR_NL)); + } + else + { + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS1, STR_PTR, 0); + read_char_range(common, common->nlmin, common->nlmax, TRUE); + add_jump(compiler, backtracks, CMP(SLJIT_NOT_EQUAL, STR_PTR, 0, STR_END, 0)); + add_jump(compiler, &common->anynewline, JUMP(SLJIT_FAST_CALL)); + sljit_set_current_flags(compiler, SLJIT_SET_Z); + add_jump(compiler, backtracks, JUMP(SLJIT_ZERO)); + OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(SLJIT_SP), LOCALS1); + } + JUMPHERE(jump[2]); + JUMPHERE(jump[3]); + } + JUMPHERE(jump[0]); + check_partial(common, FALSE); + return cc; - JUMPHERE(jump); - jump = CMP(SLJIT_C_ZERO ^ invertcmp, TMP2, 0, SLJIT_IMM, 0); - break; + case OP_EOD: + add_jump(compiler, backtracks, CMP(SLJIT_LESS, STR_PTR, 0, STR_END, 0)); + check_partial(common, FALSE); + return cc; - case PT_PXPRINT: - /* C and Z groups are the farthest two groups. */ - SET_TYPE_OFFSET(ucp_Ll); - OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_So - ucp_Ll); - OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_C_GREATER); + case OP_DOLL: + OP1(SLJIT_MOV, TMP2, 0, ARGUMENTS, 0); + OP1(SLJIT_MOV_U8, TMP2, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(jit_arguments, noteol)); + add_jump(compiler, backtracks, CMP(SLJIT_NOT_EQUAL, TMP2, 0, SLJIT_IMM, 0)); - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_Zs - ucp_Ll); - OP_FLAGS(SLJIT_AND, TMP2, 0, TMP2, 0, SLJIT_C_NOT_EQUAL); + if (!common->endonly) + compile_simple_assertion_matchingpath(common, OP_EODN, cc, backtracks); + else + { + add_jump(compiler, backtracks, CMP(SLJIT_LESS, STR_PTR, 0, STR_END, 0)); + check_partial(common, FALSE); + } + return cc; - jump = CMP(SLJIT_C_NOT_EQUAL, typereg, 0, SLJIT_IMM, ucp_Cf - ucp_Ll); + case OP_DOLLM: + jump[1] = CMP(SLJIT_LESS, STR_PTR, 0, STR_END, 0); + OP1(SLJIT_MOV, TMP2, 0, ARGUMENTS, 0); + OP1(SLJIT_MOV_U8, TMP2, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(jit_arguments, noteol)); + add_jump(compiler, backtracks, CMP(SLJIT_NOT_EQUAL, TMP2, 0, SLJIT_IMM, 0)); + check_partial(common, FALSE); + jump[0] = JUMP(SLJIT_JUMP); + JUMPHERE(jump[1]); - /* In case of ucp_Cf, we overwrite the result. */ - SET_CHAR_OFFSET(0x2066); - OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x2069 - 0x2066); - OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_C_LESS_EQUAL); + if (common->nltype == NLTYPE_FIXED && common->newline > 255) + { + OP2(SLJIT_ADD, TMP2, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(2)); + OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(0)); + if (common->mode == JIT_COMPILE) + add_jump(compiler, backtracks, CMP(SLJIT_GREATER, TMP2, 0, STR_END, 0)); + else + { + jump[1] = CMP(SLJIT_LESS_EQUAL, TMP2, 0, STR_END, 0); + /* STR_PTR = STR_END - IN_UCHARS(1) */ + add_jump(compiler, backtracks, CMP(SLJIT_NOT_EQUAL, TMP1, 0, SLJIT_IMM, (common->newline >> 8) & 0xff)); + check_partial(common, TRUE); + add_jump(compiler, backtracks, JUMP(SLJIT_JUMP)); + JUMPHERE(jump[1]); + } - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x061c - 0x2066); - OP_FLAGS(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_C_EQUAL); + OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); + add_jump(compiler, backtracks, CMP(SLJIT_NOT_EQUAL, TMP1, 0, SLJIT_IMM, (common->newline >> 8) & 0xff)); + add_jump(compiler, backtracks, CMP(SLJIT_NOT_EQUAL, TMP2, 0, SLJIT_IMM, common->newline & 0xff)); + } + else + { + peek_char(common, common->nlmax); + check_newlinechar(common, common->nltype, backtracks, FALSE); + } + JUMPHERE(jump[0]); + return cc; - JUMPHERE(jump); - jump = CMP(SLJIT_C_ZERO ^ invertcmp, TMP2, 0, SLJIT_IMM, 0); - break; + case OP_CIRC: + OP1(SLJIT_MOV, TMP2, 0, ARGUMENTS, 0); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(jit_arguments, begin)); + add_jump(compiler, backtracks, CMP(SLJIT_GREATER, STR_PTR, 0, TMP1, 0)); + OP1(SLJIT_MOV_U8, TMP2, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(jit_arguments, notbol)); + add_jump(compiler, backtracks, CMP(SLJIT_NOT_EQUAL, TMP2, 0, SLJIT_IMM, 0)); + return cc; - case PT_PXPUNCT: - SET_TYPE_OFFSET(ucp_Sc); - OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_So - ucp_Sc); - OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_C_LESS_EQUAL); + case OP_CIRCM: + OP1(SLJIT_MOV, TMP2, 0, ARGUMENTS, 0); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(jit_arguments, begin)); + jump[1] = CMP(SLJIT_GREATER, STR_PTR, 0, TMP1, 0); + OP1(SLJIT_MOV_U8, TMP2, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(jit_arguments, notbol)); + add_jump(compiler, backtracks, CMP(SLJIT_NOT_EQUAL, TMP2, 0, SLJIT_IMM, 0)); + jump[0] = JUMP(SLJIT_JUMP); + JUMPHERE(jump[1]); - SET_CHAR_OFFSET(0); - OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0xff); - OP_FLAGS(SLJIT_AND, TMP2, 0, TMP2, 0, SLJIT_C_LESS_EQUAL); + add_jump(compiler, backtracks, CMP(SLJIT_GREATER_EQUAL, STR_PTR, 0, STR_END, 0)); + if (common->nltype == NLTYPE_FIXED && common->newline > 255) + { + OP2(SLJIT_SUB, TMP2, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(2)); + add_jump(compiler, backtracks, CMP(SLJIT_LESS, TMP2, 0, TMP1, 0)); + OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(-2)); + OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(-1)); + add_jump(compiler, backtracks, CMP(SLJIT_NOT_EQUAL, TMP1, 0, SLJIT_IMM, (common->newline >> 8) & 0xff)); + add_jump(compiler, backtracks, CMP(SLJIT_NOT_EQUAL, TMP2, 0, SLJIT_IMM, common->newline & 0xff)); + } + else + { + skip_char_back(common); + read_char_range(common, common->nlmin, common->nlmax, TRUE); + check_newlinechar(common, common->nltype, backtracks, FALSE); + } + JUMPHERE(jump[0]); + return cc; - SET_TYPE_OFFSET(ucp_Pc); - OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_Ps - ucp_Pc); - OP_FLAGS(SLJIT_OR | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_C_LESS_EQUAL); - jump = JUMP(SLJIT_C_NOT_ZERO ^ invertcmp); - break; - } - cc += 2; + case OP_REVERSE: + length = GET(cc, 0); + if (length == 0) + return cc + LINK_SIZE; + OP1(SLJIT_MOV, TMP1, 0, ARGUMENTS, 0); +#ifdef SUPPORT_UTF + if (common->utf) + { + OP1(SLJIT_MOV, TMP3, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, begin)); + OP1(SLJIT_MOV, TMP2, 0, SLJIT_IMM, length); + label = LABEL(); + add_jump(compiler, backtracks, CMP(SLJIT_LESS_EQUAL, STR_PTR, 0, TMP3, 0)); + skip_char_back(common); + OP2(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, TMP2, 0, SLJIT_IMM, 1); + JUMPTO(SLJIT_NOT_ZERO, label); } + else #endif - - if (jump != NULL) - add_jump(compiler, compares > 0 ? list : backtracks, jump); + { + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, begin)); + OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(length)); + add_jump(compiler, backtracks, CMP(SLJIT_LESS, STR_PTR, 0, TMP1, 0)); + } + check_start_used_ptr(common); + return cc + LINK_SIZE; } - -if (found != NULL) - set_jumps(found, LABEL()); +SLJIT_UNREACHABLE(); +return cc; } -#undef SET_TYPE_OFFSET -#undef SET_CHAR_OFFSET - -#endif - -static pcre_uchar *compile_char1_matchingpath(compiler_common *common, pcre_uchar type, pcre_uchar *cc, jump_list **backtracks) +static pcre_uchar *compile_char1_matchingpath(compiler_common *common, pcre_uchar type, pcre_uchar *cc, jump_list **backtracks, BOOL check_str_ptr) { DEFINE_COMPILER; int length; unsigned int c, oc, bit; compare_context context; -struct sljit_jump *jump[4]; +struct sljit_jump *jump[3]; jump_list *end_list; #ifdef SUPPORT_UTF struct sljit_label *label; #ifdef SUPPORT_UCP pcre_uchar propdata[5]; #endif -#endif +#endif /* SUPPORT_UTF */ switch(type) { - case OP_SOD: - OP1(SLJIT_MOV, TMP1, 0, ARGUMENTS, 0); - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, begin)); - add_jump(compiler, backtracks, CMP(SLJIT_C_NOT_EQUAL, STR_PTR, 0, TMP1, 0)); - return cc; - - case OP_SOM: - OP1(SLJIT_MOV, TMP1, 0, ARGUMENTS, 0); - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, str)); - add_jump(compiler, backtracks, CMP(SLJIT_C_NOT_EQUAL, STR_PTR, 0, TMP1, 0)); - return cc; - - case OP_NOT_WORD_BOUNDARY: - case OP_WORD_BOUNDARY: - add_jump(compiler, &common->wordboundary, JUMP(SLJIT_FAST_CALL)); - add_jump(compiler, backtracks, JUMP(type == OP_NOT_WORD_BOUNDARY ? SLJIT_C_NOT_ZERO : SLJIT_C_ZERO)); - return cc; - case OP_NOT_DIGIT: case OP_DIGIT: /* Digits are usually 0-9, so it is worth to optimize them. */ - if (common->digits[0] == -2) - get_ctype_ranges(common, ctype_digit, common->digits); - detect_partial_match(common, backtracks); - /* Flip the starting bit in the negative case. */ - if (type == OP_NOT_DIGIT) - common->digits[1] ^= 1; - if (!check_ranges(common, common->digits, backtracks, TRUE)) - { - read_char8_type(common); - OP2(SLJIT_AND | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, ctype_digit); - add_jump(compiler, backtracks, JUMP(type == OP_DIGIT ? SLJIT_C_ZERO : SLJIT_C_NOT_ZERO)); - } - if (type == OP_NOT_DIGIT) - common->digits[1] ^= 1; + if (check_str_ptr) + detect_partial_match(common, backtracks); +#if defined SUPPORT_UTF && defined COMPILE_PCRE8 + if (common->utf && is_char7_bitset((const sljit_u8 *)common->ctypes - cbit_length + cbit_digit, FALSE)) + read_char7_type(common, type == OP_NOT_DIGIT); + else +#endif + read_char8_type(common, type == OP_NOT_DIGIT); + /* Flip the starting bit in the negative case. */ + OP2(SLJIT_AND | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, ctype_digit); + add_jump(compiler, backtracks, JUMP(type == OP_DIGIT ? SLJIT_ZERO : SLJIT_NOT_ZERO)); return cc; case OP_NOT_WHITESPACE: case OP_WHITESPACE: - detect_partial_match(common, backtracks); - read_char8_type(common); - OP2(SLJIT_AND | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, ctype_space); - add_jump(compiler, backtracks, JUMP(type == OP_WHITESPACE ? SLJIT_C_ZERO : SLJIT_C_NOT_ZERO)); + if (check_str_ptr) + detect_partial_match(common, backtracks); +#if defined SUPPORT_UTF && defined COMPILE_PCRE8 + if (common->utf && is_char7_bitset((const sljit_u8 *)common->ctypes - cbit_length + cbit_space, FALSE)) + read_char7_type(common, type == OP_NOT_WHITESPACE); + else +#endif + read_char8_type(common, type == OP_NOT_WHITESPACE); + OP2(SLJIT_AND | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, ctype_space); + add_jump(compiler, backtracks, JUMP(type == OP_WHITESPACE ? SLJIT_ZERO : SLJIT_NOT_ZERO)); return cc; case OP_NOT_WORDCHAR: case OP_WORDCHAR: - detect_partial_match(common, backtracks); - read_char8_type(common); - OP2(SLJIT_AND | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, ctype_word); - add_jump(compiler, backtracks, JUMP(type == OP_WORDCHAR ? SLJIT_C_ZERO : SLJIT_C_NOT_ZERO)); + if (check_str_ptr) + detect_partial_match(common, backtracks); +#if defined SUPPORT_UTF && defined COMPILE_PCRE8 + if (common->utf && is_char7_bitset((const sljit_u8 *)common->ctypes - cbit_length + cbit_word, FALSE)) + read_char7_type(common, type == OP_NOT_WORDCHAR); + else +#endif + read_char8_type(common, type == OP_NOT_WORDCHAR); + OP2(SLJIT_AND | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, ctype_word); + add_jump(compiler, backtracks, JUMP(type == OP_WORDCHAR ? SLJIT_ZERO : SLJIT_NOT_ZERO)); return cc; case OP_ANY: - detect_partial_match(common, backtracks); - read_char(common); + if (check_str_ptr) + detect_partial_match(common, backtracks); + read_char_range(common, common->nlmin, common->nlmax, TRUE); if (common->nltype == NLTYPE_FIXED && common->newline > 255) { - jump[0] = CMP(SLJIT_C_NOT_EQUAL, TMP1, 0, SLJIT_IMM, (common->newline >> 8) & 0xff); + jump[0] = CMP(SLJIT_NOT_EQUAL, TMP1, 0, SLJIT_IMM, (common->newline >> 8) & 0xff); end_list = NULL; if (common->mode != JIT_PARTIAL_HARD_COMPILE) - add_jump(compiler, &end_list, CMP(SLJIT_C_GREATER_EQUAL, STR_PTR, 0, STR_END, 0)); + add_jump(compiler, &end_list, CMP(SLJIT_GREATER_EQUAL, STR_PTR, 0, STR_END, 0)); else check_str_end(common, &end_list); OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), 0); - add_jump(compiler, backtracks, CMP(SLJIT_C_EQUAL, TMP1, 0, SLJIT_IMM, common->newline & 0xff)); + add_jump(compiler, backtracks, CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, common->newline & 0xff)); set_jumps(end_list, LABEL()); JUMPHERE(jump[0]); } @@ -4588,7 +6483,8 @@ switch(type) return cc; case OP_ALLANY: - detect_partial_match(common, backtracks); + if (check_str_ptr) + detect_partial_match(common, backtracks); #ifdef SUPPORT_UTF if (common->utf) { @@ -4596,14 +6492,14 @@ switch(type) OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); #if defined COMPILE_PCRE8 || defined COMPILE_PCRE16 #if defined COMPILE_PCRE8 - jump[0] = CMP(SLJIT_C_LESS, TMP1, 0, SLJIT_IMM, 0xc0); - OP1(SLJIT_MOV_UB, TMP1, 0, SLJIT_MEM1(TMP1), (sljit_sw)PRIV(utf8_table4) - 0xc0); + jump[0] = CMP(SLJIT_LESS, TMP1, 0, SLJIT_IMM, 0xc0); + OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP1), (sljit_sw)PRIV(utf8_table4) - 0xc0); OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, TMP1, 0); #elif defined COMPILE_PCRE16 - jump[0] = CMP(SLJIT_C_LESS, TMP1, 0, SLJIT_IMM, 0xd800); + jump[0] = CMP(SLJIT_LESS, TMP1, 0, SLJIT_IMM, 0xd800); OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, 0xfc00); - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0xd800); - OP_FLAGS(SLJIT_MOV, TMP1, 0, SLJIT_UNUSED, 0, SLJIT_C_EQUAL); + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0xd800); + OP_FLAGS(SLJIT_MOV, TMP1, 0, SLJIT_EQUAL); OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, 1); OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, TMP1, 0); #endif @@ -4616,7 +6512,8 @@ switch(type) return cc; case OP_ANYBYTE: - detect_partial_match(common, backtracks); + if (check_str_ptr) + detect_partial_match(common, backtracks); OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); return cc; @@ -4624,28 +6521,31 @@ switch(type) #ifdef SUPPORT_UCP case OP_NOTPROP: case OP_PROP: - propdata[0] = 0; + propdata[0] = XCL_HASPROP; propdata[1] = type == OP_NOTPROP ? XCL_NOTPROP : XCL_PROP; propdata[2] = cc[0]; propdata[3] = cc[1]; propdata[4] = XCL_END; + if (check_str_ptr) + detect_partial_match(common, backtracks); compile_xclass_matchingpath(common, propdata, backtracks); return cc + 2; #endif #endif case OP_ANYNL: - detect_partial_match(common, backtracks); - read_char(common); - jump[0] = CMP(SLJIT_C_NOT_EQUAL, TMP1, 0, SLJIT_IMM, CHAR_CR); + if (check_str_ptr) + detect_partial_match(common, backtracks); + read_char_range(common, common->bsr_nlmin, common->bsr_nlmax, FALSE); + jump[0] = CMP(SLJIT_NOT_EQUAL, TMP1, 0, SLJIT_IMM, CHAR_CR); /* We don't need to handle soft partial matching case. */ end_list = NULL; if (common->mode != JIT_PARTIAL_HARD_COMPILE) - add_jump(compiler, &end_list, CMP(SLJIT_C_GREATER_EQUAL, STR_PTR, 0, STR_END, 0)); + add_jump(compiler, &end_list, CMP(SLJIT_GREATER_EQUAL, STR_PTR, 0, STR_END, 0)); else check_str_end(common, &end_list); OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), 0); - jump[1] = CMP(SLJIT_C_NOT_EQUAL, TMP1, 0, SLJIT_IMM, CHAR_NL); + jump[1] = CMP(SLJIT_NOT_EQUAL, TMP1, 0, SLJIT_IMM, CHAR_NL); OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); jump[2] = JUMP(SLJIT_JUMP); JUMPHERE(jump[0]); @@ -4657,218 +6557,63 @@ switch(type) case OP_NOT_HSPACE: case OP_HSPACE: - detect_partial_match(common, backtracks); - read_char(common); + if (check_str_ptr) + detect_partial_match(common, backtracks); + read_char_range(common, 0x9, 0x3000, type == OP_NOT_HSPACE); add_jump(compiler, &common->hspace, JUMP(SLJIT_FAST_CALL)); - add_jump(compiler, backtracks, JUMP(type == OP_NOT_HSPACE ? SLJIT_C_NOT_ZERO : SLJIT_C_ZERO)); + sljit_set_current_flags(compiler, SLJIT_SET_Z); + add_jump(compiler, backtracks, JUMP(type == OP_NOT_HSPACE ? SLJIT_NOT_ZERO : SLJIT_ZERO)); return cc; case OP_NOT_VSPACE: case OP_VSPACE: - detect_partial_match(common, backtracks); - read_char(common); - add_jump(compiler, &common->vspace, JUMP(SLJIT_FAST_CALL)); - add_jump(compiler, backtracks, JUMP(type == OP_NOT_VSPACE ? SLJIT_C_NOT_ZERO : SLJIT_C_ZERO)); - return cc; - -#ifdef SUPPORT_UCP - case OP_EXTUNI: - detect_partial_match(common, backtracks); - read_char(common); - add_jump(compiler, &common->getucd, JUMP(SLJIT_FAST_CALL)); - OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, gbprop)); - /* Optimize register allocation: use a real register. */ - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), LOCALS0, STACK_TOP, 0); - OP1(SLJIT_MOV_UB, STACK_TOP, 0, SLJIT_MEM2(TMP1, TMP2), 3); - - label = LABEL(); - jump[0] = CMP(SLJIT_C_GREATER_EQUAL, STR_PTR, 0, STR_END, 0); - OP1(SLJIT_MOV, TMP3, 0, STR_PTR, 0); - read_char(common); - add_jump(compiler, &common->getucd, JUMP(SLJIT_FAST_CALL)); - OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, gbprop)); - OP1(SLJIT_MOV_UB, TMP2, 0, SLJIT_MEM2(TMP1, TMP2), 3); - - OP2(SLJIT_SHL, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 2); - OP1(SLJIT_MOV_UI, TMP1, 0, SLJIT_MEM1(STACK_TOP), (sljit_sw)PRIV(ucp_gbtable)); - OP1(SLJIT_MOV, STACK_TOP, 0, TMP2, 0); - OP2(SLJIT_SHL, TMP2, 0, SLJIT_IMM, 1, TMP2, 0); - OP2(SLJIT_AND | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, TMP2, 0); - JUMPTO(SLJIT_C_NOT_ZERO, label); - - OP1(SLJIT_MOV, STR_PTR, 0, TMP3, 0); - JUMPHERE(jump[0]); - OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), LOCALS0); - - if (common->mode == JIT_PARTIAL_HARD_COMPILE) - { - jump[0] = CMP(SLJIT_C_LESS, STR_PTR, 0, STR_END, 0); - /* Since we successfully read a char above, partial matching must occure. */ - check_partial(common, TRUE); - JUMPHERE(jump[0]); - } - return cc; -#endif - - case OP_EODN: - /* Requires rather complex checks. */ - jump[0] = CMP(SLJIT_C_GREATER_EQUAL, STR_PTR, 0, STR_END, 0); - if (common->nltype == NLTYPE_FIXED && common->newline > 255) - { - OP2(SLJIT_ADD, TMP2, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(2)); - OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(0)); - if (common->mode == JIT_COMPILE) - add_jump(compiler, backtracks, CMP(SLJIT_C_NOT_EQUAL, TMP2, 0, STR_END, 0)); - else - { - jump[1] = CMP(SLJIT_C_EQUAL, TMP2, 0, STR_END, 0); - OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, TMP2, 0, STR_END, 0); - OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_C_LESS); - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (common->newline >> 8) & 0xff); - OP_FLAGS(SLJIT_OR | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_C_NOT_EQUAL); - add_jump(compiler, backtracks, JUMP(SLJIT_C_NOT_EQUAL)); - check_partial(common, TRUE); - add_jump(compiler, backtracks, JUMP(SLJIT_JUMP)); - JUMPHERE(jump[1]); - } - OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); - add_jump(compiler, backtracks, CMP(SLJIT_C_NOT_EQUAL, TMP1, 0, SLJIT_IMM, (common->newline >> 8) & 0xff)); - add_jump(compiler, backtracks, CMP(SLJIT_C_NOT_EQUAL, TMP2, 0, SLJIT_IMM, common->newline & 0xff)); - } - else if (common->nltype == NLTYPE_FIXED) - { - OP2(SLJIT_ADD, TMP2, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); - OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(0)); - add_jump(compiler, backtracks, CMP(SLJIT_C_NOT_EQUAL, TMP2, 0, STR_END, 0)); - add_jump(compiler, backtracks, CMP(SLJIT_C_NOT_EQUAL, TMP1, 0, SLJIT_IMM, common->newline)); - } - else - { - OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(0)); - jump[1] = CMP(SLJIT_C_NOT_EQUAL, TMP1, 0, SLJIT_IMM, CHAR_CR); - OP2(SLJIT_ADD, TMP2, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(2)); - OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, TMP2, 0, STR_END, 0); - jump[2] = JUMP(SLJIT_C_GREATER); - add_jump(compiler, backtracks, JUMP(SLJIT_C_LESS)); - /* Equal. */ - OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); - jump[3] = CMP(SLJIT_C_EQUAL, TMP1, 0, SLJIT_IMM, CHAR_NL); - add_jump(compiler, backtracks, JUMP(SLJIT_JUMP)); - - JUMPHERE(jump[1]); - if (common->nltype == NLTYPE_ANYCRLF) - { - OP2(SLJIT_ADD, TMP2, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); - add_jump(compiler, backtracks, CMP(SLJIT_C_LESS, TMP2, 0, STR_END, 0)); - add_jump(compiler, backtracks, CMP(SLJIT_C_NOT_EQUAL, TMP1, 0, SLJIT_IMM, CHAR_NL)); - } - else - { - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), LOCALS1, STR_PTR, 0); - read_char(common); - add_jump(compiler, backtracks, CMP(SLJIT_C_NOT_EQUAL, STR_PTR, 0, STR_END, 0)); - add_jump(compiler, &common->anynewline, JUMP(SLJIT_FAST_CALL)); - add_jump(compiler, backtracks, JUMP(SLJIT_C_ZERO)); - OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), LOCALS1); - } - JUMPHERE(jump[2]); - JUMPHERE(jump[3]); - } - JUMPHERE(jump[0]); - check_partial(common, FALSE); - return cc; - - case OP_EOD: - add_jump(compiler, backtracks, CMP(SLJIT_C_LESS, STR_PTR, 0, STR_END, 0)); - check_partial(common, FALSE); - return cc; - - case OP_CIRC: - OP1(SLJIT_MOV, TMP2, 0, ARGUMENTS, 0); - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(jit_arguments, begin)); - add_jump(compiler, backtracks, CMP(SLJIT_C_GREATER, STR_PTR, 0, TMP1, 0)); - OP1(SLJIT_MOV_UB, TMP2, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(jit_arguments, notbol)); - add_jump(compiler, backtracks, CMP(SLJIT_C_NOT_EQUAL, TMP2, 0, SLJIT_IMM, 0)); - return cc; - - case OP_CIRCM: - OP1(SLJIT_MOV, TMP2, 0, ARGUMENTS, 0); - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(jit_arguments, begin)); - jump[1] = CMP(SLJIT_C_GREATER, STR_PTR, 0, TMP1, 0); - OP1(SLJIT_MOV_UB, TMP2, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(jit_arguments, notbol)); - add_jump(compiler, backtracks, CMP(SLJIT_C_NOT_EQUAL, TMP2, 0, SLJIT_IMM, 0)); - jump[0] = JUMP(SLJIT_JUMP); - JUMPHERE(jump[1]); - - add_jump(compiler, backtracks, CMP(SLJIT_C_GREATER_EQUAL, STR_PTR, 0, STR_END, 0)); - if (common->nltype == NLTYPE_FIXED && common->newline > 255) - { - OP2(SLJIT_SUB, TMP2, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(2)); - add_jump(compiler, backtracks, CMP(SLJIT_C_LESS, TMP2, 0, TMP1, 0)); - OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(-2)); - OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(-1)); - add_jump(compiler, backtracks, CMP(SLJIT_C_NOT_EQUAL, TMP1, 0, SLJIT_IMM, (common->newline >> 8) & 0xff)); - add_jump(compiler, backtracks, CMP(SLJIT_C_NOT_EQUAL, TMP2, 0, SLJIT_IMM, common->newline & 0xff)); - } - else - { - skip_char_back(common); - read_char(common); - check_newlinechar(common, common->nltype, backtracks, FALSE); - } - JUMPHERE(jump[0]); - return cc; - - case OP_DOLL: - OP1(SLJIT_MOV, TMP2, 0, ARGUMENTS, 0); - OP1(SLJIT_MOV_UB, TMP2, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(jit_arguments, noteol)); - add_jump(compiler, backtracks, CMP(SLJIT_C_NOT_EQUAL, TMP2, 0, SLJIT_IMM, 0)); - - if (!common->endonly) - compile_char1_matchingpath(common, OP_EODN, cc, backtracks); - else - { - add_jump(compiler, backtracks, CMP(SLJIT_C_LESS, STR_PTR, 0, STR_END, 0)); - check_partial(common, FALSE); - } + if (check_str_ptr) + detect_partial_match(common, backtracks); + read_char_range(common, 0xa, 0x2029, type == OP_NOT_VSPACE); + add_jump(compiler, &common->vspace, JUMP(SLJIT_FAST_CALL)); + sljit_set_current_flags(compiler, SLJIT_SET_Z); + add_jump(compiler, backtracks, JUMP(type == OP_NOT_VSPACE ? SLJIT_NOT_ZERO : SLJIT_ZERO)); return cc; - case OP_DOLLM: - jump[1] = CMP(SLJIT_C_LESS, STR_PTR, 0, STR_END, 0); - OP1(SLJIT_MOV, TMP2, 0, ARGUMENTS, 0); - OP1(SLJIT_MOV_UB, TMP2, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(jit_arguments, noteol)); - add_jump(compiler, backtracks, CMP(SLJIT_C_NOT_EQUAL, TMP2, 0, SLJIT_IMM, 0)); - check_partial(common, FALSE); - jump[0] = JUMP(SLJIT_JUMP); - JUMPHERE(jump[1]); +#ifdef SUPPORT_UCP + case OP_EXTUNI: + if (check_str_ptr) + detect_partial_match(common, backtracks); + read_char(common); + add_jump(compiler, &common->getucd, JUMP(SLJIT_FAST_CALL)); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, gbprop)); + /* Optimize register allocation: use a real register. */ + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS0, STACK_TOP, 0); + OP1(SLJIT_MOV_U8, STACK_TOP, 0, SLJIT_MEM2(TMP1, TMP2), 3); - if (common->nltype == NLTYPE_FIXED && common->newline > 255) - { - OP2(SLJIT_ADD, TMP2, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(2)); - OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(0)); - if (common->mode == JIT_COMPILE) - add_jump(compiler, backtracks, CMP(SLJIT_C_GREATER, TMP2, 0, STR_END, 0)); - else - { - jump[1] = CMP(SLJIT_C_LESS_EQUAL, TMP2, 0, STR_END, 0); - /* STR_PTR = STR_END - IN_UCHARS(1) */ - add_jump(compiler, backtracks, CMP(SLJIT_C_NOT_EQUAL, TMP1, 0, SLJIT_IMM, (common->newline >> 8) & 0xff)); - check_partial(common, TRUE); - add_jump(compiler, backtracks, JUMP(SLJIT_JUMP)); - JUMPHERE(jump[1]); - } + label = LABEL(); + jump[0] = CMP(SLJIT_GREATER_EQUAL, STR_PTR, 0, STR_END, 0); + OP1(SLJIT_MOV, TMP3, 0, STR_PTR, 0); + read_char(common); + add_jump(compiler, &common->getucd, JUMP(SLJIT_FAST_CALL)); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, gbprop)); + OP1(SLJIT_MOV_U8, TMP2, 0, SLJIT_MEM2(TMP1, TMP2), 3); - OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); - add_jump(compiler, backtracks, CMP(SLJIT_C_NOT_EQUAL, TMP1, 0, SLJIT_IMM, (common->newline >> 8) & 0xff)); - add_jump(compiler, backtracks, CMP(SLJIT_C_NOT_EQUAL, TMP2, 0, SLJIT_IMM, common->newline & 0xff)); - } - else + OP2(SLJIT_SHL, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 2); + OP1(SLJIT_MOV_U32, TMP1, 0, SLJIT_MEM1(STACK_TOP), (sljit_sw)PRIV(ucp_gbtable)); + OP1(SLJIT_MOV, STACK_TOP, 0, TMP2, 0); + OP2(SLJIT_SHL, TMP2, 0, SLJIT_IMM, 1, TMP2, 0); + OP2(SLJIT_AND | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, TMP2, 0); + JUMPTO(SLJIT_NOT_ZERO, label); + + OP1(SLJIT_MOV, STR_PTR, 0, TMP3, 0); + JUMPHERE(jump[0]); + OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0); + + if (common->mode == JIT_PARTIAL_HARD_COMPILE) { - peek_char(common); - check_newlinechar(common, common->nltype, backtracks, FALSE); + jump[0] = CMP(SLJIT_LESS, STR_PTR, 0, STR_END, 0); + /* Since we successfully read a char above, partial matching must occure. */ + check_partial(common, TRUE); + JUMPHERE(jump[0]); } - JUMPHERE(jump[0]); return cc; +#endif case OP_CHAR: case OP_CHARI: @@ -4876,10 +6621,11 @@ switch(type) #ifdef SUPPORT_UTF if (common->utf && HAS_EXTRALEN(*cc)) length += GET_EXTRALEN(*cc); #endif - if (common->mode == JIT_COMPILE && (type == OP_CHAR || !char_has_othercase(common, cc) || char_get_othercase_bit(common, cc) != 0)) + if (common->mode == JIT_COMPILE && check_str_ptr + && (type == OP_CHAR || !char_has_othercase(common, cc) || char_get_othercase_bit(common, cc) != 0)) { OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(length)); - add_jump(compiler, backtracks, CMP(SLJIT_C_GREATER, STR_PTR, 0, STR_END, 0)); + add_jump(compiler, backtracks, CMP(SLJIT_GREATER, STR_PTR, 0, STR_END, 0)); context.length = IN_UCHARS(length); context.sourcereg = -1; @@ -4888,8 +6634,9 @@ switch(type) #endif return byte_sequence_compare(common, type == OP_CHARI, cc, &context, backtracks); } - detect_partial_match(common, backtracks); - read_char(common); + + if (check_str_ptr) + detect_partial_match(common, backtracks); #ifdef SUPPORT_UTF if (common->utf) { @@ -4898,29 +6645,31 @@ switch(type) else #endif c = *cc; + if (type == OP_CHAR || !char_has_othercase(common, cc)) { - add_jump(compiler, backtracks, CMP(SLJIT_C_NOT_EQUAL, TMP1, 0, SLJIT_IMM, c)); + read_char_range(common, c, c, FALSE); + add_jump(compiler, backtracks, CMP(SLJIT_NOT_EQUAL, TMP1, 0, SLJIT_IMM, c)); return cc + length; } oc = char_othercase(common, c); + read_char_range(common, c < oc ? c : oc, c > oc ? c : oc, FALSE); bit = c ^ oc; if (is_powerof2(bit)) { OP2(SLJIT_OR, TMP1, 0, TMP1, 0, SLJIT_IMM, bit); - add_jump(compiler, backtracks, CMP(SLJIT_C_NOT_EQUAL, TMP1, 0, SLJIT_IMM, c | bit)); + add_jump(compiler, backtracks, CMP(SLJIT_NOT_EQUAL, TMP1, 0, SLJIT_IMM, c | bit)); return cc + length; } - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, c); - OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_C_EQUAL); - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, oc); - OP_FLAGS(SLJIT_OR | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_C_EQUAL); - add_jump(compiler, backtracks, JUMP(SLJIT_C_ZERO)); + jump[0] = CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, c); + add_jump(compiler, backtracks, CMP(SLJIT_NOT_EQUAL, TMP1, 0, SLJIT_IMM, oc)); + JUMPHERE(jump[0]); return cc + length; case OP_NOT: case OP_NOTI: - detect_partial_match(common, backtracks); + if (check_str_ptr) + detect_partial_match(common, backtracks); length = 1; #ifdef SUPPORT_UTF if (common->utf) @@ -4929,18 +6678,18 @@ switch(type) c = *cc; if (c < 128) { - OP1(SLJIT_MOV_UB, TMP1, 0, SLJIT_MEM1(STR_PTR), 0); + OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(STR_PTR), 0); if (type == OP_NOT || !char_has_othercase(common, cc)) - add_jump(compiler, backtracks, CMP(SLJIT_C_EQUAL, TMP1, 0, SLJIT_IMM, c)); + add_jump(compiler, backtracks, CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, c)); else { /* Since UTF8 code page is fixed, we know that c is in [a-z] or [A-Z] range. */ OP2(SLJIT_OR, TMP2, 0, TMP1, 0, SLJIT_IMM, 0x20); - add_jump(compiler, backtracks, CMP(SLJIT_C_EQUAL, TMP2, 0, SLJIT_IMM, c | 0x20)); + add_jump(compiler, backtracks, CMP(SLJIT_EQUAL, TMP2, 0, SLJIT_IMM, c | 0x20)); } /* Skip the variable-length character. */ OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); - jump[0] = CMP(SLJIT_C_LESS, TMP1, 0, SLJIT_IMM, 0xc0); + jump[0] = CMP(SLJIT_LESS, TMP1, 0, SLJIT_IMM, 0xc0); OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(TMP1), (sljit_sw)PRIV(utf8_table4) - 0xc0); OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, TMP1, 0); JUMPHERE(jump[0]); @@ -4950,103 +6699,92 @@ switch(type) #endif /* COMPILE_PCRE8 */ { GETCHARLEN(c, cc, length); - read_char(common); } } else #endif /* SUPPORT_UTF */ - { - read_char(common); c = *cc; - } if (type == OP_NOT || !char_has_othercase(common, cc)) - add_jump(compiler, backtracks, CMP(SLJIT_C_EQUAL, TMP1, 0, SLJIT_IMM, c)); + { + read_char_range(common, c, c, TRUE); + add_jump(compiler, backtracks, CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, c)); + } else { oc = char_othercase(common, c); + read_char_range(common, c < oc ? c : oc, c > oc ? c : oc, TRUE); bit = c ^ oc; if (is_powerof2(bit)) { OP2(SLJIT_OR, TMP1, 0, TMP1, 0, SLJIT_IMM, bit); - add_jump(compiler, backtracks, CMP(SLJIT_C_EQUAL, TMP1, 0, SLJIT_IMM, c | bit)); + add_jump(compiler, backtracks, CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, c | bit)); } else { - add_jump(compiler, backtracks, CMP(SLJIT_C_EQUAL, TMP1, 0, SLJIT_IMM, c)); - add_jump(compiler, backtracks, CMP(SLJIT_C_EQUAL, TMP1, 0, SLJIT_IMM, oc)); + add_jump(compiler, backtracks, CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, c)); + add_jump(compiler, backtracks, CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, oc)); } } return cc + length; case OP_CLASS: case OP_NCLASS: - detect_partial_match(common, backtracks); - read_char(common); - if (check_class_ranges(common, (const pcre_uint8 *)cc, type == OP_NCLASS, backtracks)) + if (check_str_ptr) + detect_partial_match(common, backtracks); + +#if defined SUPPORT_UTF && defined COMPILE_PCRE8 + bit = (common->utf && is_char7_bitset((const sljit_u8 *)cc, type == OP_NCLASS)) ? 127 : 255; + read_char_range(common, 0, bit, type == OP_NCLASS); +#else + read_char_range(common, 0, 255, type == OP_NCLASS); +#endif + + if (check_class_ranges(common, (const sljit_u8 *)cc, type == OP_NCLASS, FALSE, backtracks)) return cc + 32 / sizeof(pcre_uchar); -#if defined SUPPORT_UTF || !defined COMPILE_PCRE8 +#if defined SUPPORT_UTF && defined COMPILE_PCRE8 jump[0] = NULL; -#ifdef COMPILE_PCRE8 - /* This check only affects 8 bit mode. In other modes, we - always need to compare the value with 255. */ if (common->utf) -#endif /* COMPILE_PCRE8 */ { - jump[0] = CMP(SLJIT_C_GREATER, TMP1, 0, SLJIT_IMM, 255); + jump[0] = CMP(SLJIT_GREATER, TMP1, 0, SLJIT_IMM, bit); if (type == OP_CLASS) { add_jump(compiler, backtracks, jump[0]); jump[0] = NULL; } } -#endif /* SUPPORT_UTF || !COMPILE_PCRE8 */ +#elif !defined COMPILE_PCRE8 + jump[0] = CMP(SLJIT_GREATER, TMP1, 0, SLJIT_IMM, 255); + if (type == OP_CLASS) + { + add_jump(compiler, backtracks, jump[0]); + jump[0] = NULL; + } +#endif /* SUPPORT_UTF && COMPILE_PCRE8 */ + OP2(SLJIT_AND, TMP2, 0, TMP1, 0, SLJIT_IMM, 0x7); OP2(SLJIT_LSHR, TMP1, 0, TMP1, 0, SLJIT_IMM, 3); - OP1(SLJIT_MOV_UB, TMP1, 0, SLJIT_MEM1(TMP1), (sljit_sw)cc); + OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP1), (sljit_sw)cc); OP2(SLJIT_SHL, TMP2, 0, SLJIT_IMM, 1, TMP2, 0); - OP2(SLJIT_AND | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, TMP2, 0); - add_jump(compiler, backtracks, JUMP(SLJIT_C_ZERO)); + OP2(SLJIT_AND | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, TMP2, 0); + add_jump(compiler, backtracks, JUMP(SLJIT_ZERO)); + #if defined SUPPORT_UTF || !defined COMPILE_PCRE8 if (jump[0] != NULL) JUMPHERE(jump[0]); -#endif /* SUPPORT_UTF || !COMPILE_PCRE8 */ +#endif return cc + 32 / sizeof(pcre_uchar); #if defined SUPPORT_UTF || defined COMPILE_PCRE16 || defined COMPILE_PCRE32 case OP_XCLASS: + if (check_str_ptr) + detect_partial_match(common, backtracks); compile_xclass_matchingpath(common, cc + LINK_SIZE, backtracks); return cc + GET(cc, 0) - 1; #endif - - case OP_REVERSE: - length = GET(cc, 0); - if (length == 0) - return cc + LINK_SIZE; - OP1(SLJIT_MOV, TMP1, 0, ARGUMENTS, 0); -#ifdef SUPPORT_UTF - if (common->utf) - { - OP1(SLJIT_MOV, TMP3, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, begin)); - OP1(SLJIT_MOV, TMP2, 0, SLJIT_IMM, length); - label = LABEL(); - add_jump(compiler, backtracks, CMP(SLJIT_C_LESS_EQUAL, STR_PTR, 0, TMP3, 0)); - skip_char_back(common); - OP2(SLJIT_SUB | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_IMM, 1); - JUMPTO(SLJIT_C_NOT_ZERO, label); - } - else -#endif - { - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, begin)); - OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(length)); - add_jump(compiler, backtracks, CMP(SLJIT_C_LESS, STR_PTR, 0, TMP1, 0)); - } - check_start_used_ptr(common); - return cc + LINK_SIZE; } -SLJIT_ASSERT_STOP(); +SLJIT_UNREACHABLE(); return cc; } @@ -5102,7 +6840,7 @@ if (context.length > 0) { /* We have a fixed-length byte sequence. */ OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, context.length); - add_jump(compiler, backtracks, CMP(SLJIT_C_GREATER, STR_PTR, 0, STR_END, 0)); + add_jump(compiler, backtracks, CMP(SLJIT_GREATER, STR_PTR, 0, STR_END, 0)); context.sourcereg = -1; #if defined SLJIT_UNALIGNED && SLJIT_UNALIGNED @@ -5113,7 +6851,7 @@ if (context.length > 0) } /* A non-fixed length character will be checked if length == 0. */ -return compile_char1_matchingpath(common, *cc, cc + 1, backtracks); +return compile_char1_matchingpath(common, *cc, cc + 1, backtracks, TRUE); } /* Forward definitions. */ @@ -5159,21 +6897,21 @@ jump_list *found = NULL; SLJIT_ASSERT(*cc == OP_DNREF || *cc == OP_DNREFI); -OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(1)); +OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(1)); count--; while (count-- > 0) { offset = GET2(slot, 0) << 1; GET_LOCAL_BASE(TMP2, 0, OVECTOR(offset)); - add_jump(compiler, &found, CMP(SLJIT_C_NOT_EQUAL, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(offset), TMP1, 0)); + add_jump(compiler, &found, CMP(SLJIT_NOT_EQUAL, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset), TMP1, 0)); slot += common->name_entry_size; } offset = GET2(slot, 0) << 1; GET_LOCAL_BASE(TMP2, 0, OVECTOR(offset)); if (backtracks != NULL && !common->jscript_compat) - add_jump(compiler, backtracks, CMP(SLJIT_C_EQUAL, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(offset), TMP1, 0)); + add_jump(compiler, backtracks, CMP(SLJIT_EQUAL, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset), TMP1, 0)); set_jumps(found, LABEL()); } @@ -5190,10 +6928,10 @@ struct sljit_jump *nopartial; if (ref) { offset = GET2(cc, 1) << 1; - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(offset)); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset)); /* OVECTOR(1) contains the "string begin - 1" constant. */ if (withchecks && !common->jscript_compat) - add_jump(compiler, backtracks, CMP(SLJIT_C_EQUAL, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(1))); + add_jump(compiler, backtracks, CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(1))); } else OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(TMP2), 0); @@ -5201,51 +6939,56 @@ else #if defined SUPPORT_UTF && defined SUPPORT_UCP if (common->utf && *cc == OP_REFI) { - SLJIT_ASSERT(TMP1 == SLJIT_SCRATCH_REG1 && STACK_TOP == SLJIT_SCRATCH_REG2 && TMP2 == SLJIT_SCRATCH_REG3); + SLJIT_ASSERT(TMP1 == SLJIT_R0 && STACK_TOP == SLJIT_R1); if (ref) - OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(offset + 1)); + OP1(SLJIT_MOV, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset + 1)); else - OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(TMP2), sizeof(sljit_sw)); + OP1(SLJIT_MOV, SLJIT_R2, 0, SLJIT_MEM1(TMP2), sizeof(sljit_sw)); if (withchecks) - jump = CMP(SLJIT_C_EQUAL, TMP1, 0, TMP2, 0); - - /* Needed to save important temporary registers. */ - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), LOCALS0, STACK_TOP, 0); - OP1(SLJIT_MOV, SLJIT_SCRATCH_REG2, 0, ARGUMENTS, 0); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SCRATCH_REG2), SLJIT_OFFSETOF(jit_arguments, uchar_ptr), STR_PTR, 0); - sljit_emit_ijump(compiler, SLJIT_CALL3, SLJIT_IMM, SLJIT_FUNC_OFFSET(do_utf_caselesscmp)); - OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), LOCALS0); + jump = CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_R2, 0); + + /* No free saved registers so save data on stack. */ + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS0, STACK_TOP, 0); + OP1(SLJIT_MOV, SLJIT_R1, 0, STR_PTR, 0); + OP1(SLJIT_MOV, SLJIT_R3, 0, STR_END, 0); + sljit_emit_icall(compiler, SLJIT_CALL, SLJIT_RET(SW) | SLJIT_ARG1(SW) | SLJIT_ARG2(SW) | SLJIT_ARG3(SW) | SLJIT_ARG4(SW), SLJIT_IMM, SLJIT_FUNC_OFFSET(do_utf_caselesscmp)); + OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0); + OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_RETURN_REG, 0); + if (common->mode == JIT_COMPILE) - add_jump(compiler, backtracks, CMP(SLJIT_C_LESS_EQUAL, SLJIT_RETURN_REG, 0, SLJIT_IMM, 1)); + add_jump(compiler, backtracks, CMP(SLJIT_LESS_EQUAL, SLJIT_RETURN_REG, 0, SLJIT_IMM, 1)); else { - add_jump(compiler, backtracks, CMP(SLJIT_C_EQUAL, SLJIT_RETURN_REG, 0, SLJIT_IMM, 0)); - nopartial = CMP(SLJIT_C_NOT_EQUAL, SLJIT_RETURN_REG, 0, SLJIT_IMM, 1); + OP2(SLJIT_SUB | SLJIT_SET_Z | SLJIT_SET_LESS, SLJIT_UNUSED, 0, SLJIT_RETURN_REG, 0, SLJIT_IMM, 1); + + add_jump(compiler, backtracks, JUMP(SLJIT_LESS)); + + nopartial = JUMP(SLJIT_NOT_EQUAL); + OP1(SLJIT_MOV, STR_PTR, 0, STR_END, 0); check_partial(common, FALSE); add_jump(compiler, backtracks, JUMP(SLJIT_JUMP)); JUMPHERE(nopartial); } - OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_RETURN_REG, 0); } else #endif /* SUPPORT_UTF && SUPPORT_UCP */ { if (ref) - OP2(SLJIT_SUB | SLJIT_SET_E, TMP2, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(offset + 1), TMP1, 0); + OP2(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset + 1), TMP1, 0); else - OP2(SLJIT_SUB | SLJIT_SET_E, TMP2, 0, SLJIT_MEM1(TMP2), sizeof(sljit_sw), TMP1, 0); + OP2(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, SLJIT_MEM1(TMP2), sizeof(sljit_sw), TMP1, 0); if (withchecks) - jump = JUMP(SLJIT_C_ZERO); + jump = JUMP(SLJIT_ZERO); OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, TMP2, 0); - partial = CMP(SLJIT_C_GREATER, STR_PTR, 0, STR_END, 0); + partial = CMP(SLJIT_GREATER, STR_PTR, 0, STR_END, 0); if (common->mode == JIT_COMPILE) add_jump(compiler, backtracks, partial); add_jump(compiler, *cc == OP_REF ? &common->casefulcmp : &common->caselesscmp, JUMP(SLJIT_FAST_CALL)); - add_jump(compiler, backtracks, CMP(SLJIT_C_NOT_EQUAL, TMP2, 0, SLJIT_IMM, 0)); + add_jump(compiler, backtracks, CMP(SLJIT_NOT_EQUAL, TMP2, 0, SLJIT_IMM, 0)); if (common->mode != JIT_COMPILE) { @@ -5254,10 +6997,10 @@ else /* TMP2 -= STR_END - STR_PTR */ OP2(SLJIT_SUB, TMP2, 0, TMP2, 0, STR_PTR, 0); OP2(SLJIT_ADD, TMP2, 0, TMP2, 0, STR_END, 0); - partial = CMP(SLJIT_C_EQUAL, TMP2, 0, SLJIT_IMM, 0); + partial = CMP(SLJIT_EQUAL, TMP2, 0, SLJIT_IMM, 0); OP1(SLJIT_MOV, STR_PTR, 0, STR_END, 0); add_jump(compiler, *cc == OP_REF ? &common->casefulcmp : &common->caselesscmp, JUMP(SLJIT_FAST_CALL)); - add_jump(compiler, backtracks, CMP(SLJIT_C_NOT_EQUAL, TMP2, 0, SLJIT_IMM, 0)); + add_jump(compiler, backtracks, CMP(SLJIT_NOT_EQUAL, TMP2, 0, SLJIT_IMM, 0)); JUMPHERE(partial); check_partial(common, FALSE); add_jump(compiler, backtracks, JUMP(SLJIT_JUMP)); @@ -5288,7 +7031,7 @@ pcre_uchar *ccbegin = cc; int min = 0, max = 0; BOOL minimize; -PUSH_BACKTRACK(sizeof(iterator_backtrack), cc, NULL); +PUSH_BACKTRACK(sizeof(ref_iterator_backtrack), cc, NULL); if (ref) offset = GET2(cc, 1) << 1; @@ -5325,7 +7068,7 @@ switch(type) cc += 1 + IMM2_SIZE + 1 + 2 * IMM2_SIZE; break; default: - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); break; } @@ -5335,63 +7078,63 @@ if (!minimize) { allocate_stack(common, 2); if (ref) - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(offset)); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset)); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), STR_PTR, 0); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(1), SLJIT_IMM, 0); /* Temporary release of STR_PTR. */ - OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, sizeof(sljit_sw)); + OP2(SLJIT_ADD, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, sizeof(sljit_sw)); /* Handles both invalid and empty cases. Since the minimum repeat, is zero the invalid case is basically the same as an empty case. */ if (ref) - zerolength = CMP(SLJIT_C_EQUAL, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(offset + 1)); + zerolength = CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset + 1)); else { compile_dnref_search(common, ccbegin, NULL); OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(TMP2), 0); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), POSSESSIVE1, TMP2, 0); - zerolength = CMP(SLJIT_C_EQUAL, TMP1, 0, SLJIT_MEM1(TMP2), sizeof(sljit_sw)); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), POSSESSIVE1, TMP2, 0); + zerolength = CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_MEM1(TMP2), sizeof(sljit_sw)); } /* Restore if not zero length. */ - OP2(SLJIT_ADD, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, sizeof(sljit_sw)); + OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, sizeof(sljit_sw)); } else { allocate_stack(common, 1); if (ref) - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(offset)); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset)); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), SLJIT_IMM, 0); if (ref) { - add_jump(compiler, &backtrack->topbacktracks, CMP(SLJIT_C_EQUAL, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(1))); - zerolength = CMP(SLJIT_C_EQUAL, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(offset + 1)); + add_jump(compiler, &backtrack->topbacktracks, CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(1))); + zerolength = CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset + 1)); } else { compile_dnref_search(common, ccbegin, &backtrack->topbacktracks); OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(TMP2), 0); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), POSSESSIVE1, TMP2, 0); - zerolength = CMP(SLJIT_C_EQUAL, TMP1, 0, SLJIT_MEM1(TMP2), sizeof(sljit_sw)); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), POSSESSIVE1, TMP2, 0); + zerolength = CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_MEM1(TMP2), sizeof(sljit_sw)); } } if (min > 1 || max > 1) - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), POSSESSIVE0, SLJIT_IMM, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), POSSESSIVE0, SLJIT_IMM, 0); label = LABEL(); if (!ref) - OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), POSSESSIVE1); + OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), POSSESSIVE1); compile_ref_matchingpath(common, ccbegin, &backtrack->topbacktracks, FALSE, FALSE); if (min > 1 || max > 1) { - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), POSSESSIVE0); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), POSSESSIVE0); OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, SLJIT_IMM, 1); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), POSSESSIVE0, TMP1, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), POSSESSIVE0, TMP1, 0); if (min > 1) - CMPTO(SLJIT_C_LESS, TMP1, 0, SLJIT_IMM, min, label); + CMPTO(SLJIT_LESS, TMP1, 0, SLJIT_IMM, min, label); if (max > 1) { - jump = CMP(SLJIT_C_GREATER_EQUAL, TMP1, 0, SLJIT_IMM, max); + jump = CMP(SLJIT_GREATER_EQUAL, TMP1, 0, SLJIT_IMM, max); allocate_stack(common, 1); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), STR_PTR, 0); JUMPTO(SLJIT_JUMP, label); @@ -5408,7 +7151,7 @@ if (!minimize) } JUMPHERE(zerolength); - BACKTRACK_AS(iterator_backtrack)->matchingpath = LABEL(); + BACKTRACK_AS(ref_iterator_backtrack)->matchingpath = LABEL(); count_match(common); return cc; @@ -5416,7 +7159,7 @@ if (!minimize) allocate_stack(common, ref ? 2 : 3); if (ref) - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(offset)); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset)); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), SLJIT_IMM, 0); if (type != OP_CRMINSTAR) OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(1), SLJIT_IMM, 0); @@ -5426,13 +7169,13 @@ if (min == 0) /* Handles both invalid and empty cases. Since the minimum repeat, is zero the invalid case is basically the same as an empty case. */ if (ref) - zerolength = CMP(SLJIT_C_EQUAL, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(offset + 1)); + zerolength = CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset + 1)); else { compile_dnref_search(common, ccbegin, NULL); OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(TMP2), 0); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(2), TMP2, 0); - zerolength = CMP(SLJIT_C_EQUAL, TMP1, 0, SLJIT_MEM1(TMP2), sizeof(sljit_sw)); + zerolength = CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_MEM1(TMP2), sizeof(sljit_sw)); } /* Length is non-zero, we can match real repeats. */ OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), STR_PTR, 0); @@ -5442,21 +7185,21 @@ else { if (ref) { - add_jump(compiler, &backtrack->topbacktracks, CMP(SLJIT_C_EQUAL, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(1))); - zerolength = CMP(SLJIT_C_EQUAL, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(offset + 1)); + add_jump(compiler, &backtrack->topbacktracks, CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(1))); + zerolength = CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset + 1)); } else { compile_dnref_search(common, ccbegin, &backtrack->topbacktracks); OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(TMP2), 0); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(2), TMP2, 0); - zerolength = CMP(SLJIT_C_EQUAL, TMP1, 0, SLJIT_MEM1(TMP2), sizeof(sljit_sw)); + zerolength = CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_MEM1(TMP2), sizeof(sljit_sw)); } } -BACKTRACK_AS(iterator_backtrack)->matchingpath = LABEL(); +BACKTRACK_AS(ref_iterator_backtrack)->matchingpath = LABEL(); if (max > 0) - add_jump(compiler, &backtrack->topbacktracks, CMP(SLJIT_C_GREATER_EQUAL, SLJIT_MEM1(STACK_TOP), STACK(1), SLJIT_IMM, max)); + add_jump(compiler, &backtrack->topbacktracks, CMP(SLJIT_GREATER_EQUAL, SLJIT_MEM1(STACK_TOP), STACK(1), SLJIT_IMM, max)); if (!ref) OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(STACK_TOP), STACK(2)); @@ -5468,7 +7211,7 @@ if (min > 1) OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), STACK(1)); OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, SLJIT_IMM, 1); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(1), TMP1, 0); - CMPTO(SLJIT_C_LESS, TMP1, 0, SLJIT_IMM, min, BACKTRACK_AS(iterator_backtrack)->matchingpath); + CMPTO(SLJIT_LESS, TMP1, 0, SLJIT_IMM, min, BACKTRACK_AS(ref_iterator_backtrack)->matchingpath); } else if (max > 0) OP2(SLJIT_ADD, SLJIT_MEM1(STACK_TOP), STACK(1), SLJIT_MEM1(STACK_TOP), STACK(1), SLJIT_IMM, 1); @@ -5528,15 +7271,15 @@ if (entry == NULL) if (common->has_set_som && common->mark_ptr != 0) { - OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(0)); + OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(0)); allocate_stack(common, 2); - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), common->mark_ptr); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->mark_ptr); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), TMP2, 0); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(1), TMP1, 0); } else if (common->has_set_som || common->mark_ptr != 0) { - OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), common->has_set_som ? (int)(OVECTOR(0)) : common->mark_ptr); + OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), common->has_set_som ? (int)(OVECTOR(0)) : common->mark_ptr); allocate_stack(common, 1); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), TMP2, 0); } @@ -5546,11 +7289,11 @@ if (entry->entry == NULL) else JUMPTO(SLJIT_FAST_CALL, entry->entry); /* Leave if the match is failed. */ -add_jump(compiler, &backtrack->topbacktracks, CMP(SLJIT_C_EQUAL, TMP1, 0, SLJIT_IMM, 0)); +add_jump(compiler, &backtrack->topbacktracks, CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, 0)); return cc + 1 + LINK_SIZE; } -static int SLJIT_CALL do_callout(struct jit_arguments* arguments, PUBL(callout_block) *callout_block, pcre_uchar **jit_ovector) +static sljit_s32 SLJIT_FUNC do_callout(struct jit_arguments *arguments, PUBL(callout_block) *callout_block, pcre_uchar **jit_ovector) { const pcre_uchar *begin = arguments->begin; int *offset_vector = arguments->offsets; @@ -5599,7 +7342,7 @@ return (*PUBL(callout))(callout_block); (((int)sizeof(PUBL(callout_block)) + 7) & ~7) #define CALLOUT_ARG_OFFSET(arg) \ - (-CALLOUT_ARG_SIZE + SLJIT_OFFSETOF(PUBL(callout_block), arg)) + SLJIT_OFFSETOF(PUBL(callout_block), arg) static SLJIT_INLINE pcre_uchar *compile_callout_matchingpath(compiler_common *common, pcre_uchar *cc, backtrack_common *parent) { @@ -5610,45 +7353,71 @@ PUSH_BACKTRACK(sizeof(backtrack_common), cc, NULL); allocate_stack(common, CALLOUT_ARG_SIZE / sizeof(sljit_sw)); -OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), common->capture_last_ptr); -OP1(SLJIT_MOV, TMP1, 0, ARGUMENTS, 0); SLJIT_ASSERT(common->capture_last_ptr != 0); -OP1(SLJIT_MOV_SI, SLJIT_MEM1(STACK_TOP), CALLOUT_ARG_OFFSET(callout_number), SLJIT_IMM, cc[1]); -OP1(SLJIT_MOV_SI, SLJIT_MEM1(STACK_TOP), CALLOUT_ARG_OFFSET(capture_last), TMP2, 0); +OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), common->capture_last_ptr); +OP1(SLJIT_MOV, TMP1, 0, ARGUMENTS, 0); +OP1(SLJIT_MOV_S32, SLJIT_MEM1(STACK_TOP), CALLOUT_ARG_OFFSET(callout_number), SLJIT_IMM, cc[1]); +OP1(SLJIT_MOV_S32, SLJIT_MEM1(STACK_TOP), CALLOUT_ARG_OFFSET(capture_last), TMP2, 0); /* These pointer sized fields temporarly stores internal variables. */ -OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(0)); +OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(0)); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), CALLOUT_ARG_OFFSET(offset_vector), STR_PTR, 0); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), CALLOUT_ARG_OFFSET(subject), TMP2, 0); if (common->mark_ptr != 0) OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, mark_ptr)); -OP1(SLJIT_MOV_SI, SLJIT_MEM1(STACK_TOP), CALLOUT_ARG_OFFSET(pattern_position), SLJIT_IMM, GET(cc, 2)); -OP1(SLJIT_MOV_SI, SLJIT_MEM1(STACK_TOP), CALLOUT_ARG_OFFSET(next_item_length), SLJIT_IMM, GET(cc, 2 + LINK_SIZE)); +OP1(SLJIT_MOV_S32, SLJIT_MEM1(STACK_TOP), CALLOUT_ARG_OFFSET(pattern_position), SLJIT_IMM, GET(cc, 2)); +OP1(SLJIT_MOV_S32, SLJIT_MEM1(STACK_TOP), CALLOUT_ARG_OFFSET(next_item_length), SLJIT_IMM, GET(cc, 2 + LINK_SIZE)); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), CALLOUT_ARG_OFFSET(mark), (common->mark_ptr != 0) ? TMP2 : SLJIT_IMM, 0); /* Needed to save important temporary registers. */ -OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), LOCALS0, STACK_TOP, 0); -OP2(SLJIT_SUB, SLJIT_SCRATCH_REG2, 0, STACK_TOP, 0, SLJIT_IMM, CALLOUT_ARG_SIZE); -GET_LOCAL_BASE(SLJIT_SCRATCH_REG3, 0, OVECTOR_START); -sljit_emit_ijump(compiler, SLJIT_CALL3, SLJIT_IMM, SLJIT_FUNC_OFFSET(do_callout)); -OP1(SLJIT_MOV_SI, SLJIT_RETURN_REG, 0, SLJIT_RETURN_REG, 0); -OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), LOCALS0); +OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS0, STACK_TOP, 0); +/* SLJIT_R0 = arguments */ +OP1(SLJIT_MOV, SLJIT_R1, 0, STACK_TOP, 0); +GET_LOCAL_BASE(SLJIT_R2, 0, OVECTOR_START); +sljit_emit_icall(compiler, SLJIT_CALL, SLJIT_RET(S32) | SLJIT_ARG1(SW) | SLJIT_ARG2(SW) | SLJIT_ARG3(SW), SLJIT_IMM, SLJIT_FUNC_OFFSET(do_callout)); +OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0); free_stack(common, CALLOUT_ARG_SIZE / sizeof(sljit_sw)); /* Check return value. */ -OP2(SLJIT_SUB | SLJIT_SET_S, SLJIT_UNUSED, 0, SLJIT_RETURN_REG, 0, SLJIT_IMM, 0); -add_jump(compiler, &backtrack->topbacktracks, JUMP(SLJIT_C_SIG_GREATER)); +OP2(SLJIT_SUB32 | SLJIT_SET_Z | SLJIT_SET_SIG_GREATER, SLJIT_UNUSED, 0, SLJIT_RETURN_REG, 0, SLJIT_IMM, 0); +add_jump(compiler, &backtrack->topbacktracks, JUMP(SLJIT_SIG_GREATER32)); if (common->forced_quit_label == NULL) - add_jump(compiler, &common->forced_quit, JUMP(SLJIT_C_SIG_LESS)); + add_jump(compiler, &common->forced_quit, JUMP(SLJIT_NOT_EQUAL32) /* SIG_LESS */); else - JUMPTO(SLJIT_C_SIG_LESS, common->forced_quit_label); + JUMPTO(SLJIT_NOT_EQUAL32 /* SIG_LESS */, common->forced_quit_label); return cc + 2 + 2 * LINK_SIZE; } #undef CALLOUT_ARG_SIZE #undef CALLOUT_ARG_OFFSET +static SLJIT_INLINE BOOL assert_needs_str_ptr_saving(pcre_uchar *cc) +{ +while (TRUE) + { + switch (*cc) + { + case OP_NOT_WORD_BOUNDARY: + case OP_WORD_BOUNDARY: + case OP_CIRC: + case OP_CIRCM: + case OP_DOLL: + case OP_DOLLM: + case OP_CALLOUT: + case OP_ALT: + cc += PRIV(OP_lengths)[*cc]; + break; + + case OP_KET: + return FALSE; + + default: + return TRUE; + } + } +} + static pcre_uchar *compile_assert_matchingpath(compiler_common *common, pcre_uchar *cc, assert_backtrack *backtrack, BOOL conditional) { DEFINE_COMPILER; @@ -5700,21 +7469,34 @@ if (bra == OP_BRAMINZERO) /* This is a braminzero backtrack path. */ OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(STACK_TOP), STACK(0)); free_stack(common, 1); - brajump = CMP(SLJIT_C_EQUAL, STR_PTR, 0, SLJIT_IMM, 0); + brajump = CMP(SLJIT_EQUAL, STR_PTR, 0, SLJIT_IMM, 0); } if (framesize < 0) { - extrasize = needs_control_head ? 2 : 1; + extrasize = 1; + if (bra == OP_BRA && !assert_needs_str_ptr_saving(ccbegin + 1 + LINK_SIZE)) + extrasize = 0; + + if (needs_control_head) + extrasize++; + if (framesize == no_frame) - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr, STACK_TOP, 0); - allocate_stack(common, extrasize); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), private_data_ptr, STACK_TOP, 0); + + if (extrasize > 0) + allocate_stack(common, extrasize); + if (needs_control_head) - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), common->control_head_ptr); - OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), STR_PTR, 0); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr); + + if (extrasize > 0) + OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), STR_PTR, 0); + if (needs_control_head) { - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->control_head_ptr, SLJIT_IMM, 0); + SLJIT_ASSERT(extrasize == 2); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr, SLJIT_IMM, 0); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(1), TMP1, 0); } } @@ -5722,20 +7504,23 @@ else { extrasize = needs_control_head ? 3 : 2; allocate_stack(common, framesize + extrasize); - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr); - OP2(SLJIT_SUB, TMP2, 0, STACK_TOP, 0, SLJIT_IMM, (framesize + extrasize) * sizeof(sljit_sw)); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr, TMP2, 0); + + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr); + OP2(SLJIT_ADD, TMP2, 0, STACK_TOP, 0, SLJIT_IMM, (framesize + extrasize) * sizeof(sljit_sw)); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), private_data_ptr, TMP2, 0); if (needs_control_head) - OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), common->control_head_ptr); + OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), STR_PTR, 0); + if (needs_control_head) { OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(2), TMP1, 0); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(1), TMP2, 0); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->control_head_ptr, SLJIT_IMM, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr, SLJIT_IMM, 0); } else OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(1), TMP1, 0); + init_frame(common, ccbegin, NULL, framesize + extrasize - 1, extrasize, FALSE); } @@ -5759,7 +7544,7 @@ while (1) altbacktrack.top = NULL; altbacktrack.topbacktracks = NULL; - if (*ccbegin == OP_ALT) + if (*ccbegin == OP_ALT && extrasize > 0) OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(STACK_TOP), STACK(0)); altbacktrack.cc = ccbegin; @@ -5787,26 +7572,27 @@ while (1) if (framesize < 0) { if (framesize == no_frame) - OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr); - else + OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr); + else if (extrasize > 0) free_stack(common, extrasize); + if (needs_control_head) - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->control_head_ptr, SLJIT_MEM1(STACK_TOP), 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr, SLJIT_MEM1(STACK_TOP), STACK(-1)); } else { if ((opcode != OP_ASSERT_NOT && opcode != OP_ASSERTBACK_NOT) || conditional) { /* We don't need to keep the STR_PTR, only the previous private_data_ptr. */ - OP2(SLJIT_ADD, STACK_TOP, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr, SLJIT_IMM, (framesize + 1) * sizeof(sljit_sw)); + OP2(SLJIT_SUB, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr, SLJIT_IMM, (framesize + 1) * sizeof(sljit_sw)); if (needs_control_head) - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->control_head_ptr, SLJIT_MEM1(STACK_TOP), 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr, SLJIT_MEM1(STACK_TOP), STACK(-1)); } else { - OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr); + OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr); if (needs_control_head) - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->control_head_ptr, SLJIT_MEM1(STACK_TOP), (framesize + 1) * sizeof(sljit_sw)); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr, SLJIT_MEM1(STACK_TOP), STACK(-framesize - 2)); add_jump(compiler, &common->revertframes, JUMP(SLJIT_FAST_CALL)); } } @@ -5815,24 +7601,27 @@ while (1) { /* We know that STR_PTR was stored on the top of the stack. */ if (conditional) - OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(STACK_TOP), needs_control_head ? sizeof(sljit_sw) : 0); + { + if (extrasize > 0) + OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(STACK_TOP), needs_control_head ? STACK(-2) : STACK(-1)); + } else if (bra == OP_BRAZERO) { if (framesize < 0) - OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(STACK_TOP), (extrasize - 1) * sizeof(sljit_sw)); + OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(STACK_TOP), STACK(-extrasize)); else { - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), framesize * sizeof(sljit_sw)); - OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(STACK_TOP), (framesize + extrasize - 1) * sizeof(sljit_sw)); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr, TMP1, 0); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), STACK(-framesize - 1)); + OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(STACK_TOP), STACK(-framesize - extrasize)); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), private_data_ptr, TMP1, 0); } - OP2(SLJIT_ADD, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, sizeof(sljit_sw)); + OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, sizeof(sljit_sw)); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), SLJIT_IMM, 0); } else if (framesize >= 0) { /* For OP_BRA and OP_BRAMINZERO. */ - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr, SLJIT_MEM1(STACK_TOP), framesize * sizeof(sljit_sw)); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), private_data_ptr, SLJIT_MEM1(STACK_TOP), STACK(-framesize - 1)); } } add_jump(compiler, found, JUMP(SLJIT_JUMP)); @@ -5876,23 +7665,23 @@ if (common->positive_assert_quit != NULL) set_jumps(common->positive_assert_quit, LABEL()); SLJIT_ASSERT(framesize != no_stack); if (framesize < 0) - OP2(SLJIT_ADD, STACK_TOP, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr, SLJIT_IMM, extrasize * sizeof(sljit_sw)); + OP2(SLJIT_SUB, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr, SLJIT_IMM, extrasize * sizeof(sljit_sw)); else { - OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr); + OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr); add_jump(compiler, &common->revertframes, JUMP(SLJIT_FAST_CALL)); - OP2(SLJIT_ADD, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, (framesize + extrasize) * sizeof(sljit_sw)); + OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, (framesize + extrasize) * sizeof(sljit_sw)); } JUMPHERE(jump); } if (needs_control_head) - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->control_head_ptr, SLJIT_MEM1(STACK_TOP), STACK(1)); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr, SLJIT_MEM1(STACK_TOP), STACK(1)); if (opcode == OP_ASSERT || opcode == OP_ASSERTBACK) { /* Assert is failed. */ - if (conditional || bra == OP_BRAZERO) + if ((conditional && extrasize > 0) || bra == OP_BRAZERO) OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(STACK_TOP), STACK(0)); if (framesize < 0) @@ -5904,7 +7693,7 @@ if (opcode == OP_ASSERT || opcode == OP_ASSERTBACK) free_stack(common, 1); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), SLJIT_IMM, 0); } - else + else if (extrasize > 0) free_stack(common, extrasize); } else @@ -5918,7 +7707,7 @@ if (opcode == OP_ASSERT || opcode == OP_ASSERTBACK) } else free_stack(common, framesize + extrasize); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr, TMP1, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), private_data_ptr, TMP1, 0); } jump = JUMP(SLJIT_JUMP); if (bra != OP_BRAZERO) @@ -5929,17 +7718,19 @@ if (opcode == OP_ASSERT || opcode == OP_ASSERTBACK) if (framesize < 0) { /* We know that STR_PTR was stored on the top of the stack. */ - OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(STACK_TOP), (extrasize - 1) * sizeof(sljit_sw)); + if (extrasize > 0) + OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(STACK_TOP), STACK(-extrasize)); + /* Keep the STR_PTR on the top of the stack. */ if (bra == OP_BRAZERO) { - OP2(SLJIT_ADD, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, sizeof(sljit_sw)); + OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, sizeof(sljit_sw)); if (extrasize == 2) OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), STR_PTR, 0); } else if (bra == OP_BRAMINZERO) { - OP2(SLJIT_ADD, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, sizeof(sljit_sw)); + OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, sizeof(sljit_sw)); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), SLJIT_IMM, 0); } } @@ -5948,13 +7739,13 @@ if (opcode == OP_ASSERT || opcode == OP_ASSERTBACK) if (bra == OP_BRA) { /* We don't need to keep the STR_PTR, only the previous private_data_ptr. */ - OP2(SLJIT_ADD, STACK_TOP, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr, SLJIT_IMM, (framesize + 1) * sizeof(sljit_sw)); - OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(STACK_TOP), (extrasize - 2) * sizeof(sljit_sw)); + OP2(SLJIT_SUB, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr, SLJIT_IMM, (framesize + 1) * sizeof(sljit_sw)); + OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(STACK_TOP), STACK(-extrasize + 1)); } else { /* We don't need to keep the STR_PTR, only the previous private_data_ptr. */ - OP2(SLJIT_ADD, STACK_TOP, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr, SLJIT_IMM, (framesize + 2) * sizeof(sljit_sw)); + OP2(SLJIT_SUB, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr, SLJIT_IMM, (framesize + 2) * sizeof(sljit_sw)); if (extrasize == 2) { OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(STACK_TOP), STACK(0)); @@ -5980,9 +7771,9 @@ if (opcode == OP_ASSERT || opcode == OP_ASSERTBACK) JUMPHERE(brajump); if (framesize >= 0) { - OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr); + OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr); add_jump(compiler, &common->revertframes, JUMP(SLJIT_FAST_CALL)); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr, SLJIT_MEM1(STACK_TOP), framesize * sizeof(sljit_sw)); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), private_data_ptr, SLJIT_MEM1(STACK_TOP), STACK(-framesize - 1)); } set_jumps(backtrack->common.topbacktracks, LABEL()); } @@ -5992,14 +7783,16 @@ else /* AssertNot is successful. */ if (framesize < 0) { - OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(STACK_TOP), STACK(0)); + if (extrasize > 0) + OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(STACK_TOP), STACK(0)); + if (bra != OP_BRA) { if (extrasize == 2) free_stack(common, 1); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), SLJIT_IMM, 0); } - else + else if (extrasize > 0) free_stack(common, extrasize); } else @@ -6014,7 +7807,7 @@ else } else free_stack(common, framesize + extrasize); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr, TMP1, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), private_data_ptr, TMP1, 0); } if (bra == OP_BRAZERO) @@ -6055,33 +7848,35 @@ int stacksize; if (framesize < 0) { if (framesize == no_frame) - OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr); + OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr); else { stacksize = needs_control_head ? 1 : 0; if (ket != OP_KET || has_alternatives) stacksize++; - free_stack(common, stacksize); + + if (stacksize > 0) + free_stack(common, stacksize); } if (needs_control_head) - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), (ket != OP_KET || has_alternatives) ? sizeof(sljit_sw) : 0); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), (ket != OP_KET || has_alternatives) ? STACK(-2) : STACK(-1)); /* TMP2 which is set here used by OP_KETRMAX below. */ if (ket == OP_KETRMAX) - OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(STACK_TOP), 0); + OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(STACK_TOP), STACK(-1)); else if (ket == OP_KETRMIN) { /* Move the STR_PTR to the private_data_ptr. */ - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr, SLJIT_MEM1(STACK_TOP), 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), private_data_ptr, SLJIT_MEM1(STACK_TOP), STACK(-1)); } } else { stacksize = (ket != OP_KET || has_alternatives) ? 2 : 1; - OP2(SLJIT_ADD, STACK_TOP, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr, SLJIT_IMM, (framesize + stacksize) * sizeof(sljit_sw)); + OP2(SLJIT_SUB, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr, SLJIT_IMM, (framesize + stacksize) * sizeof(sljit_sw)); if (needs_control_head) - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), 0); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), STACK(-1)); if (ket == OP_KETRMAX) { @@ -6090,7 +7885,7 @@ else } } if (needs_control_head) - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->control_head_ptr, TMP1, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr, TMP1, 0); } static SLJIT_INLINE int match_capture_common(compiler_common *common, int stacksize, int offset, int private_data_ptr) @@ -6099,20 +7894,20 @@ DEFINE_COMPILER; if (common->capture_last_ptr != 0) { - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), common->capture_last_ptr); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->capture_last_ptr, SLJIT_IMM, offset >> 1); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->capture_last_ptr); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->capture_last_ptr, SLJIT_IMM, offset >> 1); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(stacksize), TMP1, 0); stacksize++; } if (common->optimized_cbracket[offset >> 1] == 0) { - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(offset)); - OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(offset + 1)); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset)); + OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset + 1)); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(stacksize), TMP1, 0); - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(stacksize + 1), TMP2, 0); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(offset + 1), STR_PTR, 0); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(offset), TMP1, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset + 1), STR_PTR, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset), TMP1, 0); stacksize += 2; } return stacksize; @@ -6235,7 +8030,7 @@ cc += GET(cc, 1); has_alternatives = *cc == OP_ALT; if (SLJIT_UNLIKELY(opcode == OP_COND || opcode == OP_SCOND)) - has_alternatives = (*matchingpath == OP_RREF || *matchingpath == OP_DNRREF) ? FALSE : TRUE; + has_alternatives = (*matchingpath == OP_RREF || *matchingpath == OP_DNRREF || *matchingpath == OP_FAIL) ? FALSE : TRUE; if (SLJIT_UNLIKELY(opcode == OP_COND) && (*cc == OP_KETRMAX || *cc == OP_KETRMIN)) opcode = OP_SCOND; @@ -6296,13 +8091,13 @@ if (bra == OP_BRAMINZERO) if (ket != OP_KETRMIN) { free_stack(common, 1); - braminzero = CMP(SLJIT_C_EQUAL, STR_PTR, 0, SLJIT_IMM, 0); + braminzero = CMP(SLJIT_EQUAL, STR_PTR, 0, SLJIT_IMM, 0); } else { if (opcode == OP_ONCE || opcode >= OP_SBRA) { - jump = CMP(SLJIT_C_NOT_EQUAL, STR_PTR, 0, SLJIT_IMM, 0); + jump = CMP(SLJIT_NOT_EQUAL, STR_PTR, 0, SLJIT_IMM, 0); OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(STACK_TOP), STACK(1)); /* Nothing stored during the first run. */ skip = JUMP(SLJIT_JUMP); @@ -6311,19 +8106,19 @@ if (bra == OP_BRAMINZERO) if (opcode != OP_ONCE || BACKTRACK_AS(bracket_backtrack)->u.framesize < 0) { /* When we come from outside, private_data_ptr contains the previous STR_PTR. */ - braminzero = CMP(SLJIT_C_EQUAL, STR_PTR, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr); + braminzero = CMP(SLJIT_EQUAL, STR_PTR, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr); } else { /* Except when the whole stack frame must be saved. */ - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr); - braminzero = CMP(SLJIT_C_EQUAL, STR_PTR, 0, SLJIT_MEM1(TMP1), (BACKTRACK_AS(bracket_backtrack)->u.framesize + 1) * sizeof(sljit_sw)); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr); + braminzero = CMP(SLJIT_EQUAL, STR_PTR, 0, SLJIT_MEM1(TMP1), STACK(-BACKTRACK_AS(bracket_backtrack)->u.framesize - 2)); } JUMPHERE(skip); } else { - jump = CMP(SLJIT_C_NOT_EQUAL, STR_PTR, 0, SLJIT_IMM, 0); + jump = CMP(SLJIT_NOT_EQUAL, STR_PTR, 0, SLJIT_IMM, 0); OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(STACK_TOP), STACK(1)); JUMPHERE(jump); } @@ -6332,7 +8127,7 @@ if (bra == OP_BRAMINZERO) if (repeat_type != 0) { - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), repeat_ptr, SLJIT_IMM, repeat_count); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), repeat_ptr, SLJIT_IMM, repeat_count); if (repeat_type == OP_EXACT) rmax_label = LABEL(); } @@ -6353,7 +8148,7 @@ if (opcode == OP_ONCE) stacksize = 0; if (needs_control_head) { - OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), common->control_head_ptr); + OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr); stacksize++; } @@ -6364,12 +8159,12 @@ if (opcode == OP_ONCE) { stacksize += 2; if (!needs_control_head) - OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr); + OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr); } else { if (BACKTRACK_AS(bracket_backtrack)->u.framesize == no_frame) - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr, STACK_TOP, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), private_data_ptr, STACK_TOP, 0); if (ket == OP_KETRMAX || has_alternatives) stacksize++; } @@ -6387,10 +8182,10 @@ if (opcode == OP_ONCE) if (ket == OP_KETRMIN) { if (needs_control_head) - OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr); + OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(stacksize), STR_PTR, 0); if (BACKTRACK_AS(bracket_backtrack)->u.framesize == no_frame) - OP2(SLJIT_SUB, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr, STACK_TOP, 0, SLJIT_IMM, needs_control_head ? (2 * sizeof(sljit_sw)) : sizeof(sljit_sw)); + OP2(SLJIT_ADD, SLJIT_MEM1(SLJIT_SP), private_data_ptr, STACK_TOP, 0, SLJIT_IMM, needs_control_head ? (2 * sizeof(sljit_sw)) : sizeof(sljit_sw)); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(stacksize + 1), TMP2, 0); } else if (ket == OP_KETRMAX || has_alternatives) @@ -6407,20 +8202,20 @@ if (opcode == OP_ONCE) if (needs_control_head) OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), TMP2, 0); - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr); - OP2(SLJIT_SUB, TMP2, 0, STACK_TOP, 0, SLJIT_IMM, stacksize * sizeof(sljit_sw)); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr); + OP2(SLJIT_ADD, TMP2, 0, STACK_TOP, 0, SLJIT_IMM, stacksize * sizeof(sljit_sw)); stacksize = needs_control_head ? 1 : 0; if (ket != OP_KET || has_alternatives) { OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(stacksize), STR_PTR, 0); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr, TMP2, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), private_data_ptr, TMP2, 0); stacksize++; OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(stacksize), TMP1, 0); } else { - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr, TMP2, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), private_data_ptr, TMP2, 0); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(stacksize), TMP1, 0); } init_frame(common, ccbegin, NULL, BACKTRACK_AS(bracket_backtrack)->u.framesize + stacksize, stacksize + 1, FALSE); @@ -6433,26 +8228,26 @@ else if (opcode == OP_CBRA || opcode == OP_SCBRA) { SLJIT_ASSERT(private_data_ptr == OVECTOR(offset)); allocate_stack(common, 2); - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr); - OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr + sizeof(sljit_sw)); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr, STR_PTR, 0); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr); + OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr + sizeof(sljit_sw)); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), private_data_ptr, STR_PTR, 0); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), TMP1, 0); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(1), TMP2, 0); } else { - OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr); + OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr); allocate_stack(common, 1); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr, STR_PTR, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), private_data_ptr, STR_PTR, 0); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), TMP2, 0); } } else if (opcode == OP_SBRA || opcode == OP_SCOND) { /* Saving the previous value. */ - OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr); + OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr); allocate_stack(common, 1); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr, STR_PTR, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), private_data_ptr, STR_PTR, 0); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), TMP2, 0); } else if (has_alternatives) @@ -6469,7 +8264,7 @@ if (opcode == OP_COND || opcode == OP_SCOND) { SLJIT_ASSERT(has_alternatives); add_jump(compiler, &(BACKTRACK_AS(bracket_backtrack)->u.condfailed), - CMP(SLJIT_C_EQUAL, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(GET2(matchingpath, 1) << 1), SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(1))); + CMP(SLJIT_EQUAL, SLJIT_MEM1(SLJIT_SP), OVECTOR(GET2(matchingpath, 1) << 1), SLJIT_MEM1(SLJIT_SP), OVECTOR(1))); matchingpath += 1 + IMM2_SIZE; } else if (*matchingpath == OP_DNCREF) @@ -6479,27 +8274,29 @@ if (opcode == OP_COND || opcode == OP_SCOND) i = GET2(matchingpath, 1 + IMM2_SIZE); slot = common->name_table + GET2(matchingpath, 1) * common->name_entry_size; OP1(SLJIT_MOV, TMP3, 0, STR_PTR, 0); - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(1)); - OP2(SLJIT_SUB | SLJIT_SET_E, TMP2, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(GET2(slot, 0) << 1), TMP1, 0); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(1)); + OP2(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(GET2(slot, 0) << 1), TMP1, 0); slot += common->name_entry_size; i--; while (i-- > 0) { - OP2(SLJIT_SUB, STR_PTR, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(GET2(slot, 0) << 1), TMP1, 0); - OP2(SLJIT_OR | SLJIT_SET_E, TMP2, 0, TMP2, 0, STR_PTR, 0); + OP2(SLJIT_SUB, STR_PTR, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(GET2(slot, 0) << 1), TMP1, 0); + OP2(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, TMP2, 0, STR_PTR, 0); slot += common->name_entry_size; } OP1(SLJIT_MOV, STR_PTR, 0, TMP3, 0); - add_jump(compiler, &(BACKTRACK_AS(bracket_backtrack)->u.condfailed), JUMP(SLJIT_C_ZERO)); + add_jump(compiler, &(BACKTRACK_AS(bracket_backtrack)->u.condfailed), JUMP(SLJIT_ZERO)); matchingpath += 1 + 2 * IMM2_SIZE; } - else if (*matchingpath == OP_RREF || *matchingpath == OP_DNRREF) + else if (*matchingpath == OP_RREF || *matchingpath == OP_DNRREF || *matchingpath == OP_FAIL) { /* Never has other case. */ BACKTRACK_AS(bracket_backtrack)->u.condfailed = NULL; SLJIT_ASSERT(!has_alternatives); - if (*matchingpath == OP_RREF) + if (*matchingpath == OP_FAIL) + stacksize = 0; + else if (*matchingpath == OP_RREF) { stacksize = GET2(matchingpath, 1); if (common->currententry == NULL) @@ -6573,7 +8370,7 @@ stacksize = 0; if (repeat_type == OP_MINUPTO) { /* We need to preserve the counter. TMP2 will be used below. */ - OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), repeat_ptr); + OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), repeat_ptr); stacksize++; } if (ket != OP_KET || bra != OP_BRA) @@ -6623,7 +8420,7 @@ if (has_alternatives) if (offset != 0 && common->optimized_cbracket[offset >> 1] != 0) { SLJIT_ASSERT(private_data_ptr == OVECTOR(offset + 0)); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(offset + 1), STR_PTR, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset + 1), STR_PTR, 0); } if (ket == OP_KETRMAX) @@ -6632,8 +8429,8 @@ if (ket == OP_KETRMAX) { if (has_alternatives) BACKTRACK_AS(bracket_backtrack)->alternative_matchingpath = LABEL(); - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_MEM1(SLJIT_LOCALS_REG), repeat_ptr, SLJIT_MEM1(SLJIT_LOCALS_REG), repeat_ptr, SLJIT_IMM, 1); - JUMPTO(SLJIT_C_NOT_ZERO, rmax_label); + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_MEM1(SLJIT_SP), repeat_ptr, SLJIT_MEM1(SLJIT_SP), repeat_ptr, SLJIT_IMM, 1); + JUMPTO(SLJIT_NOT_ZERO, rmax_label); /* Drop STR_PTR for greedy plus quantifier. */ if (opcode != OP_ONCE) free_stack(common, 1); @@ -6645,14 +8442,14 @@ if (ket == OP_KETRMAX) /* Checking zero-length iteration. */ if (opcode != OP_ONCE) { - CMPTO(SLJIT_C_NOT_EQUAL, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr, STR_PTR, 0, rmax_label); + CMPTO(SLJIT_NOT_EQUAL, SLJIT_MEM1(SLJIT_SP), private_data_ptr, STR_PTR, 0, rmax_label); /* Drop STR_PTR for greedy plus quantifier. */ if (bra != OP_BRAZERO) free_stack(common, 1); } else /* TMP2 must contain the starting STR_PTR. */ - CMPTO(SLJIT_C_NOT_EQUAL, TMP2, 0, STR_PTR, 0, rmax_label); + CMPTO(SLJIT_NOT_EQUAL, TMP2, 0, STR_PTR, 0, rmax_label); } else JUMPTO(SLJIT_JUMP, rmax_label); @@ -6661,13 +8458,14 @@ if (ket == OP_KETRMAX) if (repeat_type == OP_EXACT) { - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_MEM1(SLJIT_LOCALS_REG), repeat_ptr, SLJIT_MEM1(SLJIT_LOCALS_REG), repeat_ptr, SLJIT_IMM, 1); - JUMPTO(SLJIT_C_NOT_ZERO, rmax_label); + count_match(common); + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_MEM1(SLJIT_SP), repeat_ptr, SLJIT_MEM1(SLJIT_SP), repeat_ptr, SLJIT_IMM, 1); + JUMPTO(SLJIT_NOT_ZERO, rmax_label); } else if (repeat_type == OP_UPTO) { /* We need to preserve the counter. */ - OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), repeat_ptr); + OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), repeat_ptr); allocate_stack(common, 1); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), TMP2, 0); } @@ -6687,7 +8485,7 @@ if (bra == OP_BRAMINZERO) framesize is < 0, OP_ONCE will do the release itself. */ if (opcode == OP_ONCE && BACKTRACK_AS(bracket_backtrack)->u.framesize >= 0) { - OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr); + OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr); add_jump(compiler, &common->revertframes, JUMP(SLJIT_FAST_CALL)); } else if (ket == OP_KETRMIN && opcode != OP_ONCE) @@ -6704,9 +8502,13 @@ while (*cc == OP_ALT) cc += GET(cc, 1); cc += 1 + LINK_SIZE; -/* Temporarily encoding the needs_control_head in framesize. */ if (opcode == OP_ONCE) - BACKTRACK_AS(bracket_backtrack)->u.framesize = (BACKTRACK_AS(bracket_backtrack)->u.framesize << 1) | (needs_control_head ? 1 : 0); + { + /* We temporarily encode the needs_control_head in the lowest bit. + Note: on the target architectures of SLJIT the ((x << 1) >> 1) returns + the same value for small signed numbers (including negative numbers). */ + BACKTRACK_AS(bracket_backtrack)->u.framesize = ((unsigned int)BACKTRACK_AS(bracket_backtrack)->u.framesize << 1) | (needs_control_head ? 1 : 0); + } return cc + repeat_length; } @@ -6757,7 +8559,7 @@ switch(opcode) break; default: - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); break; } @@ -6782,20 +8584,20 @@ if (framesize < 0) BACKTRACK_AS(bracketpos_backtrack)->stacksize = stacksize; allocate_stack(common, stacksize); if (framesize == no_frame) - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr, STACK_TOP, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), private_data_ptr, STACK_TOP, 0); stack = 0; if (offset != 0) { stack = 2; - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(offset)); - OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(offset + 1)); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset)); + OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset + 1)); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), TMP1, 0); if (common->capture_last_ptr != 0) - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), common->capture_last_ptr); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->capture_last_ptr); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(1), TMP2, 0); if (needs_control_head) - OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), common->control_head_ptr); + OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr); if (common->capture_last_ptr != 0) { OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(2), TMP1, 0); @@ -6805,7 +8607,7 @@ if (framesize < 0) else { if (needs_control_head) - OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), common->control_head_ptr); + OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), STR_PTR, 0); stack = 1; } @@ -6832,10 +8634,10 @@ else BACKTRACK_AS(bracketpos_backtrack)->stacksize = stacksize; allocate_stack(common, stacksize); - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr); if (needs_control_head) - OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), common->control_head_ptr); - OP2(SLJIT_SUB, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr, STACK_TOP, 0, SLJIT_IMM, -STACK(stacksize - 1)); + OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr); + OP2(SLJIT_ADD, SLJIT_MEM1(SLJIT_SP), private_data_ptr, STACK_TOP, 0, SLJIT_IMM, stacksize * sizeof(sljit_sw)); stack = 0; if (!zero) @@ -6859,7 +8661,7 @@ else } if (offset != 0) - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), cbraprivptr, STR_PTR, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), cbraprivptr, STR_PTR, 0); loop = LABEL(); while (*cc != OP_KETRPOS) @@ -6875,16 +8677,16 @@ while (*cc != OP_KETRPOS) if (framesize < 0) { if (framesize == no_frame) - OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr); + OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr); if (offset != 0) { - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), cbraprivptr); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(offset + 1), STR_PTR, 0); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), cbraprivptr, STR_PTR, 0); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), cbraprivptr); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset + 1), STR_PTR, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), cbraprivptr, STR_PTR, 0); if (common->capture_last_ptr != 0) - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->capture_last_ptr, SLJIT_IMM, offset >> 1); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(offset), TMP1, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->capture_last_ptr, SLJIT_IMM, offset >> 1); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset), TMP1, 0); } else { @@ -6893,8 +8695,12 @@ while (*cc != OP_KETRPOS) OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), STR_PTR, 0); } + /* Even if the match is empty, we need to reset the control head. */ + if (needs_control_head) + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr, SLJIT_MEM1(STACK_TOP), STACK(stack)); + if (opcode == OP_SBRAPOS || opcode == OP_SCBRAPOS) - add_jump(compiler, &emptymatch, CMP(SLJIT_C_EQUAL, TMP1, 0, STR_PTR, 0)); + add_jump(compiler, &emptymatch, CMP(SLJIT_EQUAL, TMP1, 0, STR_PTR, 0)); if (!zero) OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(stacksize - 1), SLJIT_IMM, 0); @@ -6903,25 +8709,29 @@ while (*cc != OP_KETRPOS) { if (offset != 0) { - OP2(SLJIT_ADD, STACK_TOP, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr, SLJIT_IMM, stacksize * sizeof(sljit_sw)); - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), cbraprivptr); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(offset + 1), STR_PTR, 0); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), cbraprivptr, STR_PTR, 0); + OP2(SLJIT_SUB, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr, SLJIT_IMM, stacksize * sizeof(sljit_sw)); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), cbraprivptr); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset + 1), STR_PTR, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), cbraprivptr, STR_PTR, 0); if (common->capture_last_ptr != 0) - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->capture_last_ptr, SLJIT_IMM, offset >> 1); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(offset), TMP1, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->capture_last_ptr, SLJIT_IMM, offset >> 1); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset), TMP1, 0); } else { - OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr); - OP2(SLJIT_ADD, STACK_TOP, 0, TMP2, 0, SLJIT_IMM, stacksize * sizeof(sljit_sw)); + OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr); + OP2(SLJIT_SUB, STACK_TOP, 0, TMP2, 0, SLJIT_IMM, stacksize * sizeof(sljit_sw)); if (opcode == OP_SBRAPOS) - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(TMP2), (framesize + 1) * sizeof(sljit_sw)); - OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), (framesize + 1) * sizeof(sljit_sw), STR_PTR, 0); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(TMP2), STACK(-framesize - 2)); + OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), STACK(-framesize - 2), STR_PTR, 0); } + /* Even if the match is empty, we need to reset the control head. */ + if (needs_control_head) + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr, SLJIT_MEM1(STACK_TOP), STACK(stack)); + if (opcode == OP_SBRAPOS || opcode == OP_SCBRAPOS) - add_jump(compiler, &emptymatch, CMP(SLJIT_C_EQUAL, TMP1, 0, STR_PTR, 0)); + add_jump(compiler, &emptymatch, CMP(SLJIT_EQUAL, TMP1, 0, STR_PTR, 0)); if (!zero) { @@ -6932,9 +8742,6 @@ while (*cc != OP_KETRPOS) } } - if (needs_control_head) - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->control_head_ptr, SLJIT_MEM1(STACK_TOP), STACK(stack)); - JUMPTO(SLJIT_JUMP, loop); flush_stubs(common); @@ -6946,7 +8753,7 @@ while (*cc != OP_KETRPOS) if (framesize < 0) { if (offset != 0) - OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), cbraprivptr); + OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(SLJIT_SP), cbraprivptr); else OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(STACK_TOP), STACK(0)); } @@ -6956,13 +8763,13 @@ while (*cc != OP_KETRPOS) { /* Last alternative. */ if (*cc == OP_KETRPOS) - OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr); - OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), cbraprivptr); + OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr); + OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(SLJIT_SP), cbraprivptr); } else { - OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr); - OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(TMP2), (framesize + 1) * sizeof(sljit_sw)); + OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr); + OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(TMP2), STACK(-framesize - 2)); } } @@ -6977,9 +8784,9 @@ backtrack->topbacktracks = NULL; if (!zero) { if (framesize < 0) - add_jump(compiler, &backtrack->topbacktracks, CMP(SLJIT_C_NOT_EQUAL, SLJIT_MEM1(STACK_TOP), STACK(stacksize - 1), SLJIT_IMM, 0)); + add_jump(compiler, &backtrack->topbacktracks, CMP(SLJIT_NOT_EQUAL, SLJIT_MEM1(STACK_TOP), STACK(stacksize - 1), SLJIT_IMM, 0)); else /* TMP2 is set to [private_data_ptr] above. */ - add_jump(compiler, &backtrack->topbacktracks, CMP(SLJIT_C_NOT_EQUAL, SLJIT_MEM1(TMP2), (stacksize - 1) * sizeof(sljit_sw), SLJIT_IMM, 0)); + add_jump(compiler, &backtrack->topbacktracks, CMP(SLJIT_NOT_EQUAL, SLJIT_MEM1(TMP2), STACK(-stacksize), SLJIT_IMM, 0)); } /* None of them matched. */ @@ -6988,11 +8795,13 @@ count_match(common); return cc + 1 + LINK_SIZE; } -static SLJIT_INLINE pcre_uchar *get_iterator_parameters(compiler_common *common, pcre_uchar *cc, pcre_uchar *opcode, pcre_uchar *type, int *max, int *min, pcre_uchar **end) +static SLJIT_INLINE pcre_uchar *get_iterator_parameters(compiler_common *common, pcre_uchar *cc, pcre_uchar *opcode, pcre_uchar *type, sljit_u32 *max, sljit_u32 *exact, pcre_uchar **end) { int class_len; *opcode = *cc; +*exact = 0; + if (*opcode >= OP_STAR && *opcode <= OP_POSUPTO) { cc++; @@ -7020,7 +8829,7 @@ else if (*opcode >= OP_TYPESTAR && *opcode <= OP_TYPEPOSUPTO) { cc++; *opcode -= OP_TYPESTAR - OP_STAR; - *type = 0; + *type = OP_END; } else { @@ -7029,60 +8838,105 @@ else cc++; class_len = (*type < OP_XCLASS) ? (int)(1 + (32 / sizeof(pcre_uchar))) : GET(cc, 0); *opcode = cc[class_len - 1]; + if (*opcode >= OP_CRSTAR && *opcode <= OP_CRMINQUERY) { *opcode -= OP_CRSTAR - OP_STAR; - if (end != NULL) - *end = cc + class_len; + *end = cc + class_len; + + if (*opcode == OP_PLUS || *opcode == OP_MINPLUS) + { + *exact = 1; + *opcode -= OP_PLUS - OP_STAR; + } } else if (*opcode >= OP_CRPOSSTAR && *opcode <= OP_CRPOSQUERY) { *opcode -= OP_CRPOSSTAR - OP_POSSTAR; - if (end != NULL) - *end = cc + class_len; + *end = cc + class_len; + + if (*opcode == OP_POSPLUS) + { + *exact = 1; + *opcode = OP_POSSTAR; + } } else { SLJIT_ASSERT(*opcode == OP_CRRANGE || *opcode == OP_CRMINRANGE || *opcode == OP_CRPOSRANGE); *max = GET2(cc, (class_len + IMM2_SIZE)); - *min = GET2(cc, class_len); + *exact = GET2(cc, class_len); - if (*min == 0) + if (*max == 0) { - SLJIT_ASSERT(*max != 0); - *opcode = (*opcode == OP_CRRANGE) ? OP_UPTO : (*opcode == OP_CRMINRANGE ? OP_MINUPTO : OP_POSUPTO); + if (*opcode == OP_CRPOSRANGE) + *opcode = OP_POSSTAR; + else + *opcode -= OP_CRRANGE - OP_STAR; } - if (*max == *min) - *opcode = OP_EXACT; - - if (end != NULL) - *end = cc + class_len + 2 * IMM2_SIZE; + else + { + *max -= *exact; + if (*max == 0) + *opcode = OP_EXACT; + else if (*max == 1) + { + if (*opcode == OP_CRPOSRANGE) + *opcode = OP_POSQUERY; + else + *opcode -= OP_CRRANGE - OP_QUERY; + } + else + { + if (*opcode == OP_CRPOSRANGE) + *opcode = OP_POSUPTO; + else + *opcode -= OP_CRRANGE - OP_UPTO; + } + } + *end = cc + class_len + 2 * IMM2_SIZE; } return cc; } -if (*opcode == OP_UPTO || *opcode == OP_MINUPTO || *opcode == OP_EXACT || *opcode == OP_POSUPTO) +switch(*opcode) { + case OP_EXACT: + *exact = GET2(cc, 0); + cc += IMM2_SIZE; + break; + + case OP_PLUS: + case OP_MINPLUS: + *exact = 1; + *opcode -= OP_PLUS - OP_STAR; + break; + + case OP_POSPLUS: + *exact = 1; + *opcode = OP_POSSTAR; + break; + + case OP_UPTO: + case OP_MINUPTO: + case OP_POSUPTO: *max = GET2(cc, 0); cc += IMM2_SIZE; + break; } -if (*type == 0) +if (*type == OP_END) { *type = *cc; - if (end != NULL) - *end = next_opcode(common, cc); + *end = next_opcode(common, cc); cc++; return cc; } -if (end != NULL) - { - *end = cc + 1; +*end = cc + 1; #ifdef SUPPORT_UTF - if (common->utf && HAS_EXTRALEN(*cc)) *end += GET_EXTRALEN(*cc); +if (common->utf && HAS_EXTRALEN(*cc)) *end += GET_EXTRALEN(*cc); #endif - } return cc; } @@ -7092,95 +8946,110 @@ DEFINE_COMPILER; backtrack_common *backtrack; pcre_uchar opcode; pcre_uchar type; -int max = -1, min = -1; -pcre_uchar* end; -jump_list *nomatch = NULL; +sljit_u32 max = 0, exact; +BOOL fast_fail; +sljit_s32 fast_str_ptr; +BOOL charpos_enabled; +pcre_uchar charpos_char; +unsigned int charpos_othercasebit; +pcre_uchar *end; +jump_list *no_match = NULL; +jump_list *no_char1_match = NULL; struct sljit_jump *jump = NULL; struct sljit_label *label; int private_data_ptr = PRIVATE_DATA(cc); -int base = (private_data_ptr == 0) ? SLJIT_MEM1(STACK_TOP) : SLJIT_MEM1(SLJIT_LOCALS_REG); +int base = (private_data_ptr == 0) ? SLJIT_MEM1(STACK_TOP) : SLJIT_MEM1(SLJIT_SP); int offset0 = (private_data_ptr == 0) ? STACK(0) : private_data_ptr; int offset1 = (private_data_ptr == 0) ? STACK(1) : private_data_ptr + (int)sizeof(sljit_sw); int tmp_base, tmp_offset; -PUSH_BACKTRACK(sizeof(iterator_backtrack), cc, NULL); +PUSH_BACKTRACK(sizeof(char_iterator_backtrack), cc, NULL); -cc = get_iterator_parameters(common, cc, &opcode, &type, &max, &min, &end); +fast_str_ptr = PRIVATE_DATA(cc + 1); +fast_fail = TRUE; -switch(type) +SLJIT_ASSERT(common->fast_forward_bc_ptr == NULL || fast_str_ptr == 0 || cc == common->fast_forward_bc_ptr); + +if (cc == common->fast_forward_bc_ptr) + fast_fail = FALSE; +else if (common->fast_fail_start_ptr == 0) + fast_str_ptr = 0; + +SLJIT_ASSERT(common->fast_forward_bc_ptr != NULL || fast_str_ptr == 0 + || (fast_str_ptr >= common->fast_fail_start_ptr && fast_str_ptr <= common->fast_fail_end_ptr)); + +cc = get_iterator_parameters(common, cc, &opcode, &type, &max, &exact, &end); + +if (type != OP_EXTUNI) { - case OP_NOT_DIGIT: - case OP_DIGIT: - case OP_NOT_WHITESPACE: - case OP_WHITESPACE: - case OP_NOT_WORDCHAR: - case OP_WORDCHAR: - case OP_ANY: - case OP_ALLANY: - case OP_ANYBYTE: - case OP_ANYNL: - case OP_NOT_HSPACE: - case OP_HSPACE: - case OP_NOT_VSPACE: - case OP_VSPACE: - case OP_CHAR: - case OP_CHARI: - case OP_NOT: - case OP_NOTI: - case OP_CLASS: - case OP_NCLASS: tmp_base = TMP3; tmp_offset = 0; - break; + } +else + { + tmp_base = SLJIT_MEM1(SLJIT_SP); + tmp_offset = POSSESSIVE0; + } - default: - SLJIT_ASSERT_STOP(); - /* Fall through. */ +if (fast_fail && fast_str_ptr != 0) + add_jump(compiler, &backtrack->topbacktracks, CMP(SLJIT_LESS_EQUAL, STR_PTR, 0, SLJIT_MEM1(SLJIT_SP), fast_str_ptr)); - case OP_EXTUNI: - case OP_XCLASS: - case OP_NOTPROP: - case OP_PROP: - tmp_base = SLJIT_MEM1(SLJIT_LOCALS_REG); - tmp_offset = POSSESSIVE0; - break; +/* Handle fixed part first. */ +if (exact > 1) + { + SLJIT_ASSERT(fast_str_ptr == 0); + if (common->mode == JIT_COMPILE +#ifdef SUPPORT_UTF + && !common->utf +#endif + && type != OP_ANYNL && type != OP_EXTUNI) + { + OP2(SLJIT_ADD, TMP1, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(exact)); + add_jump(compiler, &backtrack->topbacktracks, CMP(SLJIT_GREATER, TMP1, 0, STR_END, 0)); + OP1(SLJIT_MOV, tmp_base, tmp_offset, SLJIT_IMM, exact); + label = LABEL(); + compile_char1_matchingpath(common, type, cc, &backtrack->topbacktracks, FALSE); + OP2(SLJIT_SUB | SLJIT_SET_Z, tmp_base, tmp_offset, tmp_base, tmp_offset, SLJIT_IMM, 1); + JUMPTO(SLJIT_NOT_ZERO, label); + } + else + { + OP1(SLJIT_MOV, tmp_base, tmp_offset, SLJIT_IMM, exact); + label = LABEL(); + compile_char1_matchingpath(common, type, cc, &backtrack->topbacktracks, TRUE); + OP2(SLJIT_SUB | SLJIT_SET_Z, tmp_base, tmp_offset, tmp_base, tmp_offset, SLJIT_IMM, 1); + JUMPTO(SLJIT_NOT_ZERO, label); + } } +else if (exact == 1) + compile_char1_matchingpath(common, type, cc, &backtrack->topbacktracks, TRUE); switch(opcode) { case OP_STAR: - case OP_PLUS: case OP_UPTO: - case OP_CRRANGE: + SLJIT_ASSERT(fast_str_ptr == 0 || opcode == OP_STAR); + if (type == OP_ANYNL || type == OP_EXTUNI) { SLJIT_ASSERT(private_data_ptr == 0); - if (opcode == OP_STAR || opcode == OP_UPTO) - { - allocate_stack(common, 2); - OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), STR_PTR, 0); - OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(1), SLJIT_IMM, 0); - } - else - { - allocate_stack(common, 1); - OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), SLJIT_IMM, 0); - } + SLJIT_ASSERT(fast_str_ptr == 0); - if (opcode == OP_UPTO || opcode == OP_CRRANGE) - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), POSSESSIVE0, SLJIT_IMM, 0); + allocate_stack(common, 2); + OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), STR_PTR, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(1), SLJIT_IMM, 0); + + if (opcode == OP_UPTO) + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), POSSESSIVE0, SLJIT_IMM, max); label = LABEL(); - compile_char1_matchingpath(common, type, cc, &backtrack->topbacktracks); - if (opcode == OP_UPTO || opcode == OP_CRRANGE) + compile_char1_matchingpath(common, type, cc, &BACKTRACK_AS(char_iterator_backtrack)->u.backtracks, TRUE); + if (opcode == OP_UPTO) { - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), POSSESSIVE0); - OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, SLJIT_IMM, 1); - if (opcode == OP_CRRANGE && min > 0) - CMPTO(SLJIT_C_LESS, TMP1, 0, SLJIT_IMM, min, label); - if (opcode == OP_UPTO || (opcode == OP_CRRANGE && max > 0)) - jump = CMP(SLJIT_C_GREATER_EQUAL, TMP1, 0, SLJIT_IMM, max); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), POSSESSIVE0, TMP1, 0); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), POSSESSIVE0); + OP2(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, TMP1, 0, SLJIT_IMM, 1); + jump = JUMP(SLJIT_ZERO); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), POSSESSIVE0, TMP1, 0); } /* We cannot use TMP3 because of this allocate_stack. */ @@ -7192,139 +9061,273 @@ switch(opcode) } else { - if (opcode == OP_PLUS) - compile_char1_matchingpath(common, type, cc, &backtrack->topbacktracks); - if (private_data_ptr == 0) - allocate_stack(common, 2); - OP1(SLJIT_MOV, base, offset0, STR_PTR, 0); - if (opcode <= OP_PLUS) + charpos_enabled = FALSE; + charpos_char = 0; + charpos_othercasebit = 0; + + if ((type != OP_CHAR && type != OP_CHARI) && (*end == OP_CHAR || *end == OP_CHARI)) + { + charpos_enabled = TRUE; +#ifdef SUPPORT_UTF + charpos_enabled = !common->utf || !HAS_EXTRALEN(end[1]); +#endif + if (charpos_enabled && *end == OP_CHARI && char_has_othercase(common, end + 1)) + { + charpos_othercasebit = char_get_othercase_bit(common, end + 1); + if (charpos_othercasebit == 0) + charpos_enabled = FALSE; + } + + if (charpos_enabled) + { + charpos_char = end[1]; + /* Consumpe the OP_CHAR opcode. */ + end += 2; +#if defined COMPILE_PCRE8 + SLJIT_ASSERT((charpos_othercasebit >> 8) == 0); +#elif defined COMPILE_PCRE16 || defined COMPILE_PCRE32 + SLJIT_ASSERT((charpos_othercasebit >> 9) == 0); + if ((charpos_othercasebit & 0x100) != 0) + charpos_othercasebit = (charpos_othercasebit & 0xff) << 8; +#endif + if (charpos_othercasebit != 0) + charpos_char |= charpos_othercasebit; + + BACKTRACK_AS(char_iterator_backtrack)->u.charpos.enabled = TRUE; + BACKTRACK_AS(char_iterator_backtrack)->u.charpos.chr = charpos_char; + BACKTRACK_AS(char_iterator_backtrack)->u.charpos.othercasebit = charpos_othercasebit; + } + } + + if (charpos_enabled) + { + if (opcode == OP_UPTO) + OP1(SLJIT_MOV, tmp_base, tmp_offset, SLJIT_IMM, max + 1); + + /* Search the first instance of charpos_char. */ + jump = JUMP(SLJIT_JUMP); + label = LABEL(); + if (opcode == OP_UPTO) + { + OP2(SLJIT_SUB | SLJIT_SET_Z, tmp_base, tmp_offset, tmp_base, tmp_offset, SLJIT_IMM, 1); + add_jump(compiler, &backtrack->topbacktracks, JUMP(SLJIT_ZERO)); + } + compile_char1_matchingpath(common, type, cc, &backtrack->topbacktracks, FALSE); + if (fast_str_ptr != 0) + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), fast_str_ptr, STR_PTR, 0); + JUMPHERE(jump); + + detect_partial_match(common, &backtrack->topbacktracks); + OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(0)); + if (charpos_othercasebit != 0) + OP2(SLJIT_OR, TMP1, 0, TMP1, 0, SLJIT_IMM, charpos_othercasebit); + CMPTO(SLJIT_NOT_EQUAL, TMP1, 0, SLJIT_IMM, charpos_char, label); + + if (private_data_ptr == 0) + allocate_stack(common, 2); + OP1(SLJIT_MOV, base, offset0, STR_PTR, 0); OP1(SLJIT_MOV, base, offset1, STR_PTR, 0); - else - OP1(SLJIT_MOV, base, offset1, SLJIT_IMM, 1); - label = LABEL(); - compile_char1_matchingpath(common, type, cc, &nomatch); - OP1(SLJIT_MOV, base, offset0, STR_PTR, 0); - if (opcode <= OP_PLUS) - JUMPTO(SLJIT_JUMP, label); - else if (opcode == OP_CRRANGE && max == 0) + if (opcode == OP_UPTO) + { + OP2(SLJIT_SUB | SLJIT_SET_Z, tmp_base, tmp_offset, tmp_base, tmp_offset, SLJIT_IMM, 1); + add_jump(compiler, &no_match, JUMP(SLJIT_ZERO)); + } + + /* Search the last instance of charpos_char. */ + label = LABEL(); + compile_char1_matchingpath(common, type, cc, &no_match, FALSE); + if (fast_str_ptr != 0) + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), fast_str_ptr, STR_PTR, 0); + detect_partial_match(common, &no_match); + OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(0)); + if (charpos_othercasebit != 0) + OP2(SLJIT_OR, TMP1, 0, TMP1, 0, SLJIT_IMM, charpos_othercasebit); + if (opcode == OP_STAR) + { + CMPTO(SLJIT_NOT_EQUAL, TMP1, 0, SLJIT_IMM, charpos_char, label); + OP1(SLJIT_MOV, base, offset0, STR_PTR, 0); + } + else + { + jump = CMP(SLJIT_NOT_EQUAL, TMP1, 0, SLJIT_IMM, charpos_char); + OP1(SLJIT_MOV, base, offset0, STR_PTR, 0); + JUMPHERE(jump); + } + + if (opcode == OP_UPTO) + { + OP2(SLJIT_SUB | SLJIT_SET_Z, tmp_base, tmp_offset, tmp_base, tmp_offset, SLJIT_IMM, 1); + JUMPTO(SLJIT_NOT_ZERO, label); + } + else + JUMPTO(SLJIT_JUMP, label); + + set_jumps(no_match, LABEL()); + OP1(SLJIT_MOV, STR_PTR, 0, base, offset0); + OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); + OP1(SLJIT_MOV, base, offset0, STR_PTR, 0); + } +#if defined SUPPORT_UTF && !defined COMPILE_PCRE32 + else if (common->utf) { - OP2(SLJIT_ADD, base, offset1, base, offset1, SLJIT_IMM, 1); - JUMPTO(SLJIT_JUMP, label); + if (private_data_ptr == 0) + allocate_stack(common, 2); + + OP1(SLJIT_MOV, base, offset0, STR_PTR, 0); + OP1(SLJIT_MOV, base, offset1, STR_PTR, 0); + + if (opcode == OP_UPTO) + OP1(SLJIT_MOV, tmp_base, tmp_offset, SLJIT_IMM, max); + + label = LABEL(); + compile_char1_matchingpath(common, type, cc, &no_match, TRUE); + OP1(SLJIT_MOV, base, offset0, STR_PTR, 0); + + if (opcode == OP_UPTO) + { + OP2(SLJIT_SUB | SLJIT_SET_Z, tmp_base, tmp_offset, tmp_base, tmp_offset, SLJIT_IMM, 1); + JUMPTO(SLJIT_NOT_ZERO, label); + } + else + JUMPTO(SLJIT_JUMP, label); + + set_jumps(no_match, LABEL()); + OP1(SLJIT_MOV, STR_PTR, 0, base, offset0); + if (fast_str_ptr != 0) + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), fast_str_ptr, STR_PTR, 0); } +#endif else { - OP1(SLJIT_MOV, TMP1, 0, base, offset1); - OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, SLJIT_IMM, 1); - OP1(SLJIT_MOV, base, offset1, TMP1, 0); - CMPTO(SLJIT_C_LESS, TMP1, 0, SLJIT_IMM, max + 1, label); + if (private_data_ptr == 0) + allocate_stack(common, 2); + + OP1(SLJIT_MOV, base, offset1, STR_PTR, 0); + if (opcode == OP_UPTO) + OP1(SLJIT_MOV, tmp_base, tmp_offset, SLJIT_IMM, max); + + label = LABEL(); + detect_partial_match(common, &no_match); + compile_char1_matchingpath(common, type, cc, &no_char1_match, FALSE); + if (opcode == OP_UPTO) + { + OP2(SLJIT_SUB | SLJIT_SET_Z, tmp_base, tmp_offset, tmp_base, tmp_offset, SLJIT_IMM, 1); + JUMPTO(SLJIT_NOT_ZERO, label); + OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); + } + else + JUMPTO(SLJIT_JUMP, label); + + set_jumps(no_char1_match, LABEL()); + OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); + set_jumps(no_match, LABEL()); + OP1(SLJIT_MOV, base, offset0, STR_PTR, 0); + if (fast_str_ptr != 0) + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), fast_str_ptr, STR_PTR, 0); } - set_jumps(nomatch, LABEL()); - if (opcode == OP_CRRANGE) - add_jump(compiler, &backtrack->topbacktracks, CMP(SLJIT_C_LESS, base, offset1, SLJIT_IMM, min + 1)); - OP1(SLJIT_MOV, STR_PTR, 0, base, offset0); } - BACKTRACK_AS(iterator_backtrack)->matchingpath = LABEL(); + BACKTRACK_AS(char_iterator_backtrack)->matchingpath = LABEL(); break; case OP_MINSTAR: - case OP_MINPLUS: - if (opcode == OP_MINPLUS) - compile_char1_matchingpath(common, type, cc, &backtrack->topbacktracks); if (private_data_ptr == 0) allocate_stack(common, 1); OP1(SLJIT_MOV, base, offset0, STR_PTR, 0); - BACKTRACK_AS(iterator_backtrack)->matchingpath = LABEL(); + BACKTRACK_AS(char_iterator_backtrack)->matchingpath = LABEL(); + if (fast_str_ptr != 0) + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), fast_str_ptr, STR_PTR, 0); break; case OP_MINUPTO: - case OP_CRMINRANGE: + SLJIT_ASSERT(fast_str_ptr == 0); if (private_data_ptr == 0) allocate_stack(common, 2); OP1(SLJIT_MOV, base, offset0, STR_PTR, 0); - OP1(SLJIT_MOV, base, offset1, SLJIT_IMM, 1); - if (opcode == OP_CRMINRANGE) - add_jump(compiler, &backtrack->topbacktracks, JUMP(SLJIT_JUMP)); - BACKTRACK_AS(iterator_backtrack)->matchingpath = LABEL(); + OP1(SLJIT_MOV, base, offset1, SLJIT_IMM, max + 1); + BACKTRACK_AS(char_iterator_backtrack)->matchingpath = LABEL(); break; case OP_QUERY: case OP_MINQUERY: + SLJIT_ASSERT(fast_str_ptr == 0); if (private_data_ptr == 0) allocate_stack(common, 1); OP1(SLJIT_MOV, base, offset0, STR_PTR, 0); if (opcode == OP_QUERY) - compile_char1_matchingpath(common, type, cc, &backtrack->topbacktracks); - BACKTRACK_AS(iterator_backtrack)->matchingpath = LABEL(); + compile_char1_matchingpath(common, type, cc, &BACKTRACK_AS(char_iterator_backtrack)->u.backtracks, TRUE); + BACKTRACK_AS(char_iterator_backtrack)->matchingpath = LABEL(); break; case OP_EXACT: - OP1(SLJIT_MOV, tmp_base, tmp_offset, SLJIT_IMM, max); - label = LABEL(); - compile_char1_matchingpath(common, type, cc, &backtrack->topbacktracks); - OP2(SLJIT_SUB | SLJIT_SET_E, tmp_base, tmp_offset, tmp_base, tmp_offset, SLJIT_IMM, 1); - JUMPTO(SLJIT_C_NOT_ZERO, label); break; case OP_POSSTAR: - case OP_POSPLUS: - case OP_POSUPTO: - if (opcode == OP_POSPLUS) - compile_char1_matchingpath(common, type, cc, &backtrack->topbacktracks); - if (opcode == OP_POSUPTO) - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), POSSESSIVE1, SLJIT_IMM, max); - OP1(SLJIT_MOV, tmp_base, tmp_offset, STR_PTR, 0); - label = LABEL(); - compile_char1_matchingpath(common, type, cc, &nomatch); - OP1(SLJIT_MOV, tmp_base, tmp_offset, STR_PTR, 0); - if (opcode != OP_POSUPTO) - JUMPTO(SLJIT_JUMP, label); - else +#if defined SUPPORT_UTF && !defined COMPILE_PCRE32 + if (common->utf) { - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_MEM1(SLJIT_LOCALS_REG), POSSESSIVE1, SLJIT_MEM1(SLJIT_LOCALS_REG), POSSESSIVE1, SLJIT_IMM, 1); - JUMPTO(SLJIT_C_NOT_ZERO, label); + OP1(SLJIT_MOV, tmp_base, tmp_offset, STR_PTR, 0); + label = LABEL(); + compile_char1_matchingpath(common, type, cc, &no_match, TRUE); + OP1(SLJIT_MOV, tmp_base, tmp_offset, STR_PTR, 0); + JUMPTO(SLJIT_JUMP, label); + set_jumps(no_match, LABEL()); + OP1(SLJIT_MOV, STR_PTR, 0, tmp_base, tmp_offset); + if (fast_str_ptr != 0) + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), fast_str_ptr, STR_PTR, 0); + break; } - set_jumps(nomatch, LABEL()); - OP1(SLJIT_MOV, STR_PTR, 0, tmp_base, tmp_offset); - break; - - case OP_POSQUERY: - OP1(SLJIT_MOV, tmp_base, tmp_offset, STR_PTR, 0); - compile_char1_matchingpath(common, type, cc, &nomatch); - OP1(SLJIT_MOV, tmp_base, tmp_offset, STR_PTR, 0); - set_jumps(nomatch, LABEL()); - OP1(SLJIT_MOV, STR_PTR, 0, tmp_base, tmp_offset); - break; - - case OP_CRPOSRANGE: - /* Combination of OP_EXACT and OP_POSSTAR or OP_POSUPTO */ - OP1(SLJIT_MOV, tmp_base, tmp_offset, SLJIT_IMM, min); +#endif label = LABEL(); - compile_char1_matchingpath(common, type, cc, &backtrack->topbacktracks); - OP2(SLJIT_SUB | SLJIT_SET_E, tmp_base, tmp_offset, tmp_base, tmp_offset, SLJIT_IMM, 1); - JUMPTO(SLJIT_C_NOT_ZERO, label); + detect_partial_match(common, &no_match); + compile_char1_matchingpath(common, type, cc, &no_char1_match, FALSE); + JUMPTO(SLJIT_JUMP, label); + set_jumps(no_char1_match, LABEL()); + OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); + set_jumps(no_match, LABEL()); + if (fast_str_ptr != 0) + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), fast_str_ptr, STR_PTR, 0); + break; - if (max != 0) + case OP_POSUPTO: + SLJIT_ASSERT(fast_str_ptr == 0); +#if defined SUPPORT_UTF && !defined COMPILE_PCRE32 + if (common->utf) { - SLJIT_ASSERT(max - min > 0); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), POSSESSIVE1, SLJIT_IMM, max - min); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), POSSESSIVE1, STR_PTR, 0); + OP1(SLJIT_MOV, tmp_base, tmp_offset, SLJIT_IMM, max); + label = LABEL(); + compile_char1_matchingpath(common, type, cc, &no_match, TRUE); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), POSSESSIVE1, STR_PTR, 0); + OP2(SLJIT_SUB | SLJIT_SET_Z, tmp_base, tmp_offset, tmp_base, tmp_offset, SLJIT_IMM, 1); + JUMPTO(SLJIT_NOT_ZERO, label); + set_jumps(no_match, LABEL()); + OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(SLJIT_SP), POSSESSIVE1); + break; } - OP1(SLJIT_MOV, tmp_base, tmp_offset, STR_PTR, 0); +#endif + OP1(SLJIT_MOV, tmp_base, tmp_offset, SLJIT_IMM, max); label = LABEL(); - compile_char1_matchingpath(common, type, cc, &nomatch); + detect_partial_match(common, &no_match); + compile_char1_matchingpath(common, type, cc, &no_char1_match, FALSE); + OP2(SLJIT_SUB | SLJIT_SET_Z, tmp_base, tmp_offset, tmp_base, tmp_offset, SLJIT_IMM, 1); + JUMPTO(SLJIT_NOT_ZERO, label); + OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); + set_jumps(no_char1_match, LABEL()); + OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); + set_jumps(no_match, LABEL()); + break; + + case OP_POSQUERY: + SLJIT_ASSERT(fast_str_ptr == 0); OP1(SLJIT_MOV, tmp_base, tmp_offset, STR_PTR, 0); - if (max == 0) - JUMPTO(SLJIT_JUMP, label); - else - { - OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_MEM1(SLJIT_LOCALS_REG), POSSESSIVE1, SLJIT_MEM1(SLJIT_LOCALS_REG), POSSESSIVE1, SLJIT_IMM, 1); - JUMPTO(SLJIT_C_NOT_ZERO, label); - } - set_jumps(nomatch, LABEL()); + compile_char1_matchingpath(common, type, cc, &no_match, TRUE); + OP1(SLJIT_MOV, tmp_base, tmp_offset, STR_PTR, 0); + set_jumps(no_match, LABEL()); OP1(SLJIT_MOV, STR_PTR, 0, tmp_base, tmp_offset); break; default: - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); break; } @@ -7345,7 +9348,7 @@ if (*cc == OP_FAIL) return cc + 1; } -if (*cc == OP_ASSERT_ACCEPT || common->currententry != NULL) +if (*cc == OP_ASSERT_ACCEPT || common->currententry != NULL || !common->might_be_empty) { /* No need to check notempty conditions. */ if (common->accept_label == NULL) @@ -7356,22 +9359,22 @@ if (*cc == OP_ASSERT_ACCEPT || common->currententry != NULL) } if (common->accept_label == NULL) - add_jump(compiler, &common->accept, CMP(SLJIT_C_NOT_EQUAL, STR_PTR, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(0))); + add_jump(compiler, &common->accept, CMP(SLJIT_NOT_EQUAL, STR_PTR, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(0))); else - CMPTO(SLJIT_C_NOT_EQUAL, STR_PTR, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(0), common->accept_label); + CMPTO(SLJIT_NOT_EQUAL, STR_PTR, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(0), common->accept_label); OP1(SLJIT_MOV, TMP1, 0, ARGUMENTS, 0); -OP1(SLJIT_MOV_UB, TMP2, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, notempty)); -add_jump(compiler, &backtrack->topbacktracks, CMP(SLJIT_C_NOT_EQUAL, TMP2, 0, SLJIT_IMM, 0)); -OP1(SLJIT_MOV_UB, TMP2, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, notempty_atstart)); +OP1(SLJIT_MOV_U8, TMP2, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, notempty)); +add_jump(compiler, &backtrack->topbacktracks, CMP(SLJIT_NOT_EQUAL, TMP2, 0, SLJIT_IMM, 0)); +OP1(SLJIT_MOV_U8, TMP2, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, notempty_atstart)); if (common->accept_label == NULL) - add_jump(compiler, &common->accept, CMP(SLJIT_C_EQUAL, TMP2, 0, SLJIT_IMM, 0)); + add_jump(compiler, &common->accept, CMP(SLJIT_EQUAL, TMP2, 0, SLJIT_IMM, 0)); else - CMPTO(SLJIT_C_EQUAL, TMP2, 0, SLJIT_IMM, 0, common->accept_label); + CMPTO(SLJIT_EQUAL, TMP2, 0, SLJIT_IMM, 0, common->accept_label); OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, str)); if (common->accept_label == NULL) - add_jump(compiler, &common->accept, CMP(SLJIT_C_NOT_EQUAL, TMP2, 0, STR_PTR, 0)); + add_jump(compiler, &common->accept, CMP(SLJIT_NOT_EQUAL, TMP2, 0, STR_PTR, 0)); else - CMPTO(SLJIT_C_NOT_EQUAL, TMP2, 0, STR_PTR, 0, common->accept_label); + CMPTO(SLJIT_NOT_EQUAL, TMP2, 0, STR_PTR, 0, common->accept_label); add_jump(compiler, &backtrack->topbacktracks, JUMP(SLJIT_JUMP)); return cc + 1; } @@ -7387,11 +9390,11 @@ if (common->currententry != NULL) return cc + 1 + IMM2_SIZE; if (!optimized_cbracket) - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR_PRIV(offset)); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR_PRIV(offset)); offset <<= 1; -OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(offset + 1), STR_PTR, 0); +OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset + 1), STR_PTR, 0); if (!optimized_cbracket) - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(offset), TMP1, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset), TMP1, 0); return cc + 1 + IMM2_SIZE; } @@ -7418,7 +9421,7 @@ if (opcode == OP_PRUNE_ARG || opcode == OP_THEN_ARG) { OP1(SLJIT_MOV, TMP1, 0, ARGUMENTS, 0); OP1(SLJIT_MOV, TMP2, 0, SLJIT_IMM, (sljit_sw)(cc + 2)); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->mark_ptr, TMP2, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->mark_ptr, TMP2, 0); OP1(SLJIT_MOV, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, mark_ptr), TMP2, 0); } @@ -7443,12 +9446,12 @@ BACKTRACK_AS(then_trap_backtrack)->framesize = get_framesize(common, cc, ccend, size = BACKTRACK_AS(then_trap_backtrack)->framesize; size = 3 + (size < 0 ? 0 : size); -OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), common->control_head_ptr); +OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr); allocate_stack(common, size); if (size > 3) - OP2(SLJIT_SUB, SLJIT_MEM1(SLJIT_LOCALS_REG), common->control_head_ptr, STACK_TOP, 0, SLJIT_IMM, (size - 3) * sizeof(sljit_sw)); + OP2(SLJIT_ADD, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr, STACK_TOP, 0, SLJIT_IMM, (size - 3) * sizeof(sljit_sw)); else - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->control_head_ptr, STACK_TOP, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr, STACK_TOP, 0); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(size - 1), SLJIT_IMM, BACKTRACK_AS(then_trap_backtrack)->start); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(size - 2), SLJIT_IMM, type_then_trap); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(size - 3), TMP2, 0); @@ -7484,6 +9487,16 @@ while (cc < ccend) case OP_SOM: case OP_NOT_WORD_BOUNDARY: case OP_WORD_BOUNDARY: + case OP_EODN: + case OP_EOD: + case OP_DOLL: + case OP_DOLLM: + case OP_CIRC: + case OP_CIRCM: + case OP_REVERSE: + cc = compile_simple_assertion_matchingpath(common, *cc, cc + 1, parent->top != NULL ? &parent->top->nextbacktracks : &parent->topbacktracks); + break; + case OP_NOT_DIGIT: case OP_DIGIT: case OP_NOT_WHITESPACE: @@ -7501,23 +9514,16 @@ while (cc < ccend) case OP_NOT_VSPACE: case OP_VSPACE: case OP_EXTUNI: - case OP_EODN: - case OP_EOD: - case OP_CIRC: - case OP_CIRCM: - case OP_DOLL: - case OP_DOLLM: case OP_NOT: case OP_NOTI: - case OP_REVERSE: - cc = compile_char1_matchingpath(common, *cc, cc + 1, parent->top != NULL ? &parent->top->nextbacktracks : &parent->topbacktracks); + cc = compile_char1_matchingpath(common, *cc, cc + 1, parent->top != NULL ? &parent->top->nextbacktracks : &parent->topbacktracks, TRUE); break; case OP_SET_SOM: PUSH_BACKTRACK_NOVALUE(sizeof(backtrack_common), cc); - OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(0)); + OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(0)); allocate_stack(common, 1); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(0), STR_PTR, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), OVECTOR(0), STR_PTR, 0); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), TMP2, 0); cc++; break; @@ -7527,7 +9533,7 @@ while (cc < ccend) if (common->mode == JIT_COMPILE) cc = compile_charn_matchingpath(common, cc, ccend, parent->top != NULL ? &parent->top->nextbacktracks : &parent->topbacktracks); else - cc = compile_char1_matchingpath(common, *cc, cc + 1, parent->top != NULL ? &parent->top->nextbacktracks : &parent->topbacktracks); + cc = compile_char1_matchingpath(common, *cc, cc + 1, parent->top != NULL ? &parent->top->nextbacktracks : &parent->topbacktracks, TRUE); break; case OP_STAR: @@ -7603,7 +9609,7 @@ while (cc < ccend) if (cc[1 + (32 / sizeof(pcre_uchar))] >= OP_CRSTAR && cc[1 + (32 / sizeof(pcre_uchar))] <= OP_CRPOSRANGE) cc = compile_iterator_matchingpath(common, cc, parent); else - cc = compile_char1_matchingpath(common, *cc, cc + 1, parent->top != NULL ? &parent->top->nextbacktracks : &parent->topbacktracks); + cc = compile_char1_matchingpath(common, *cc, cc + 1, parent->top != NULL ? &parent->top->nextbacktracks : &parent->topbacktracks, TRUE); break; #if defined SUPPORT_UTF || defined COMPILE_PCRE16 || defined COMPILE_PCRE32 @@ -7611,7 +9617,7 @@ while (cc < ccend) if (*(cc + GET(cc, 1)) >= OP_CRSTAR && *(cc + GET(cc, 1)) <= OP_CRPOSRANGE) cc = compile_iterator_matchingpath(common, cc, parent); else - cc = compile_char1_matchingpath(common, *cc, cc + 1, parent->top != NULL ? &parent->top->nextbacktracks : &parent->topbacktracks); + cc = compile_char1_matchingpath(common, *cc, cc + 1, parent->top != NULL ? &parent->top->nextbacktracks : &parent->topbacktracks, TRUE); break; #endif @@ -7669,8 +9675,7 @@ while (cc < ccend) OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(1), STR_PTR, 0); } BACKTRACK_AS(braminzero_backtrack)->matchingpath = LABEL(); - if (cc[1] > OP_ASSERTBACK_NOT) - count_match(common); + count_match(common); break; case OP_ONCE: @@ -7705,17 +9710,17 @@ while (cc < ccend) case OP_MARK: PUSH_BACKTRACK_NOVALUE(sizeof(backtrack_common), cc); SLJIT_ASSERT(common->mark_ptr != 0); - OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), common->mark_ptr); + OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), common->mark_ptr); allocate_stack(common, common->has_skip_arg ? 5 : 1); OP1(SLJIT_MOV, TMP1, 0, ARGUMENTS, 0); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(common->has_skip_arg ? 4 : 0), TMP2, 0); OP1(SLJIT_MOV, TMP2, 0, SLJIT_IMM, (sljit_sw)(cc + 2)); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->mark_ptr, TMP2, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->mark_ptr, TMP2, 0); OP1(SLJIT_MOV, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, mark_ptr), TMP2, 0); if (common->has_skip_arg) { - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), common->control_head_ptr); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->control_head_ptr, STACK_TOP, 0); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr, STACK_TOP, 0); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(1), SLJIT_IMM, type_mark); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(2), SLJIT_IMM, (sljit_sw)(cc + 2)); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(3), STR_PTR, 0); @@ -7749,7 +9754,7 @@ while (cc < ccend) break; default: - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); return; } if (cc == NULL) @@ -7788,95 +9793,82 @@ DEFINE_COMPILER; pcre_uchar *cc = current->cc; pcre_uchar opcode; pcre_uchar type; -int max = -1, min = -1; +sljit_u32 max = 0, exact; struct sljit_label *label = NULL; struct sljit_jump *jump = NULL; jump_list *jumplist = NULL; +pcre_uchar *end; int private_data_ptr = PRIVATE_DATA(cc); -int base = (private_data_ptr == 0) ? SLJIT_MEM1(STACK_TOP) : SLJIT_MEM1(SLJIT_LOCALS_REG); +int base = (private_data_ptr == 0) ? SLJIT_MEM1(STACK_TOP) : SLJIT_MEM1(SLJIT_SP); int offset0 = (private_data_ptr == 0) ? STACK(0) : private_data_ptr; int offset1 = (private_data_ptr == 0) ? STACK(1) : private_data_ptr + (int)sizeof(sljit_sw); -cc = get_iterator_parameters(common, cc, &opcode, &type, &max, &min, NULL); +cc = get_iterator_parameters(common, cc, &opcode, &type, &max, &exact, &end); switch(opcode) { case OP_STAR: - case OP_PLUS: case OP_UPTO: - case OP_CRRANGE: if (type == OP_ANYNL || type == OP_EXTUNI) { SLJIT_ASSERT(private_data_ptr == 0); - set_jumps(current->topbacktracks, LABEL()); + set_jumps(CURRENT_AS(char_iterator_backtrack)->u.backtracks, LABEL()); OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(STACK_TOP), STACK(0)); free_stack(common, 1); - CMPTO(SLJIT_C_NOT_EQUAL, STR_PTR, 0, SLJIT_IMM, 0, CURRENT_AS(iterator_backtrack)->matchingpath); + CMPTO(SLJIT_NOT_EQUAL, STR_PTR, 0, SLJIT_IMM, 0, CURRENT_AS(char_iterator_backtrack)->matchingpath); } else { - if (opcode == OP_UPTO) - min = 0; - if (opcode <= OP_PLUS) + if (CURRENT_AS(char_iterator_backtrack)->u.charpos.enabled) { OP1(SLJIT_MOV, STR_PTR, 0, base, offset0); - jump = CMP(SLJIT_C_LESS_EQUAL, STR_PTR, 0, base, offset1); + OP1(SLJIT_MOV, TMP2, 0, base, offset1); + OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); + + jump = CMP(SLJIT_LESS_EQUAL, STR_PTR, 0, TMP2, 0); + label = LABEL(); + OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(-1)); + OP1(SLJIT_MOV, base, offset0, STR_PTR, 0); + if (CURRENT_AS(char_iterator_backtrack)->u.charpos.othercasebit != 0) + OP2(SLJIT_OR, TMP1, 0, TMP1, 0, SLJIT_IMM, CURRENT_AS(char_iterator_backtrack)->u.charpos.othercasebit); + CMPTO(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, CURRENT_AS(char_iterator_backtrack)->u.charpos.chr, CURRENT_AS(char_iterator_backtrack)->matchingpath); + skip_char_back(common); + CMPTO(SLJIT_GREATER, STR_PTR, 0, TMP2, 0, label); } else { - OP1(SLJIT_MOV, TMP1, 0, base, offset1); OP1(SLJIT_MOV, STR_PTR, 0, base, offset0); - jump = CMP(SLJIT_C_LESS_EQUAL, TMP1, 0, SLJIT_IMM, min + 1); - OP2(SLJIT_SUB, base, offset1, TMP1, 0, SLJIT_IMM, 1); + jump = CMP(SLJIT_LESS_EQUAL, STR_PTR, 0, base, offset1); + skip_char_back(common); + OP1(SLJIT_MOV, base, offset0, STR_PTR, 0); + JUMPTO(SLJIT_JUMP, CURRENT_AS(char_iterator_backtrack)->matchingpath); } - skip_char_back(common); - OP1(SLJIT_MOV, base, offset0, STR_PTR, 0); - JUMPTO(SLJIT_JUMP, CURRENT_AS(iterator_backtrack)->matchingpath); - if (opcode == OP_CRRANGE) - set_jumps(current->topbacktracks, LABEL()); JUMPHERE(jump); if (private_data_ptr == 0) free_stack(common, 2); - if (opcode == OP_PLUS) - set_jumps(current->topbacktracks, LABEL()); } break; case OP_MINSTAR: - case OP_MINPLUS: OP1(SLJIT_MOV, STR_PTR, 0, base, offset0); - compile_char1_matchingpath(common, type, cc, &jumplist); + compile_char1_matchingpath(common, type, cc, &jumplist, TRUE); OP1(SLJIT_MOV, base, offset0, STR_PTR, 0); - JUMPTO(SLJIT_JUMP, CURRENT_AS(iterator_backtrack)->matchingpath); + JUMPTO(SLJIT_JUMP, CURRENT_AS(char_iterator_backtrack)->matchingpath); set_jumps(jumplist, LABEL()); if (private_data_ptr == 0) free_stack(common, 1); - if (opcode == OP_MINPLUS) - set_jumps(current->topbacktracks, LABEL()); break; case OP_MINUPTO: - case OP_CRMINRANGE: - if (opcode == OP_CRMINRANGE) - { - label = LABEL(); - set_jumps(current->topbacktracks, label); - } + OP1(SLJIT_MOV, TMP1, 0, base, offset1); OP1(SLJIT_MOV, STR_PTR, 0, base, offset0); - compile_char1_matchingpath(common, type, cc, &jumplist); + OP2(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, TMP1, 0, SLJIT_IMM, 1); + add_jump(compiler, &jumplist, JUMP(SLJIT_ZERO)); - OP1(SLJIT_MOV, TMP1, 0, base, offset1); - OP1(SLJIT_MOV, base, offset0, STR_PTR, 0); - OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, SLJIT_IMM, 1); OP1(SLJIT_MOV, base, offset1, TMP1, 0); - - if (opcode == OP_CRMINRANGE) - CMPTO(SLJIT_C_LESS, TMP1, 0, SLJIT_IMM, min + 1, label); - - if (opcode == OP_CRMINRANGE && max == 0) - JUMPTO(SLJIT_JUMP, CURRENT_AS(iterator_backtrack)->matchingpath); - else - CMPTO(SLJIT_C_LESS, TMP1, 0, SLJIT_IMM, max + 2, CURRENT_AS(iterator_backtrack)->matchingpath); + compile_char1_matchingpath(common, type, cc, &jumplist, TRUE); + OP1(SLJIT_MOV, base, offset0, STR_PTR, 0); + JUMPTO(SLJIT_JUMP, CURRENT_AS(char_iterator_backtrack)->matchingpath); set_jumps(jumplist, LABEL()); if (private_data_ptr == 0) @@ -7886,12 +9878,12 @@ switch(opcode) case OP_QUERY: OP1(SLJIT_MOV, STR_PTR, 0, base, offset0); OP1(SLJIT_MOV, base, offset0, SLJIT_IMM, 0); - CMPTO(SLJIT_C_NOT_EQUAL, STR_PTR, 0, SLJIT_IMM, 0, CURRENT_AS(iterator_backtrack)->matchingpath); + CMPTO(SLJIT_NOT_EQUAL, STR_PTR, 0, SLJIT_IMM, 0, CURRENT_AS(char_iterator_backtrack)->matchingpath); jump = JUMP(SLJIT_JUMP); - set_jumps(current->topbacktracks, LABEL()); + set_jumps(CURRENT_AS(char_iterator_backtrack)->u.backtracks, LABEL()); OP1(SLJIT_MOV, STR_PTR, 0, base, offset0); OP1(SLJIT_MOV, base, offset0, SLJIT_IMM, 0); - JUMPTO(SLJIT_JUMP, CURRENT_AS(iterator_backtrack)->matchingpath); + JUMPTO(SLJIT_JUMP, CURRENT_AS(char_iterator_backtrack)->matchingpath); JUMPHERE(jump); if (private_data_ptr == 0) free_stack(common, 1); @@ -7900,9 +9892,9 @@ switch(opcode) case OP_MINQUERY: OP1(SLJIT_MOV, STR_PTR, 0, base, offset0); OP1(SLJIT_MOV, base, offset0, SLJIT_IMM, 0); - jump = CMP(SLJIT_C_EQUAL, STR_PTR, 0, SLJIT_IMM, 0); - compile_char1_matchingpath(common, type, cc, &jumplist); - JUMPTO(SLJIT_JUMP, CURRENT_AS(iterator_backtrack)->matchingpath); + jump = CMP(SLJIT_EQUAL, STR_PTR, 0, SLJIT_IMM, 0); + compile_char1_matchingpath(common, type, cc, &jumplist, TRUE); + JUMPTO(SLJIT_JUMP, CURRENT_AS(char_iterator_backtrack)->matchingpath); set_jumps(jumplist, LABEL()); JUMPHERE(jump); if (private_data_ptr == 0) @@ -7910,20 +9902,17 @@ switch(opcode) break; case OP_EXACT: - case OP_POSPLUS: - case OP_CRPOSRANGE: - set_jumps(current->topbacktracks, LABEL()); - break; - case OP_POSSTAR: case OP_POSQUERY: case OP_POSUPTO: break; default: - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); break; } + +set_jumps(current->topbacktracks, LABEL()); } static SLJIT_INLINE void compile_ref_iterator_backtrackingpath(compiler_common *common, struct backtrack_common *current) @@ -7941,12 +9930,12 @@ if ((type & 0x1) == 0) set_jumps(current->topbacktracks, LABEL()); OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(STACK_TOP), STACK(0)); free_stack(common, 1); - CMPTO(SLJIT_C_NOT_EQUAL, STR_PTR, 0, SLJIT_IMM, 0, CURRENT_AS(iterator_backtrack)->matchingpath); + CMPTO(SLJIT_NOT_EQUAL, STR_PTR, 0, SLJIT_IMM, 0, CURRENT_AS(ref_iterator_backtrack)->matchingpath); return; } OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(STACK_TOP), STACK(0)); -CMPTO(SLJIT_C_NOT_EQUAL, STR_PTR, 0, SLJIT_IMM, 0, CURRENT_AS(iterator_backtrack)->matchingpath); +CMPTO(SLJIT_NOT_EQUAL, STR_PTR, 0, SLJIT_IMM, 0, CURRENT_AS(ref_iterator_backtrack)->matchingpath); set_jumps(current->topbacktracks, LABEL()); free_stack(common, ref ? 2 : 3); } @@ -7966,14 +9955,14 @@ if (common->has_set_som && common->mark_ptr != 0) OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(STACK_TOP), STACK(0)); OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), STACK(1)); free_stack(common, 2); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(0), TMP2, 0); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->mark_ptr, TMP1, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), OVECTOR(0), TMP2, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->mark_ptr, TMP1, 0); } else if (common->has_set_som || common->mark_ptr != 0) { OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(STACK_TOP), STACK(0)); free_stack(common, 1); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->has_set_som ? (int)(OVECTOR(0)) : common->mark_ptr, TMP2, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->has_set_som ? (int)(OVECTOR(0)) : common->mark_ptr, TMP2, 0); } } @@ -8004,7 +9993,7 @@ if (CURRENT_AS(assert_backtrack)->framesize < 0) if (bra == OP_BRAZERO) { OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), SLJIT_IMM, 0); - CMPTO(SLJIT_C_NOT_EQUAL, STR_PTR, 0, SLJIT_IMM, 0, CURRENT_AS(assert_backtrack)->matchingpath); + CMPTO(SLJIT_NOT_EQUAL, STR_PTR, 0, SLJIT_IMM, 0, CURRENT_AS(assert_backtrack)->matchingpath); free_stack(common, 1); } return; @@ -8015,19 +10004,19 @@ if (bra == OP_BRAZERO) if (*cc == OP_ASSERT_NOT || *cc == OP_ASSERTBACK_NOT) { OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), SLJIT_IMM, 0); - CMPTO(SLJIT_C_NOT_EQUAL, STR_PTR, 0, SLJIT_IMM, 0, CURRENT_AS(assert_backtrack)->matchingpath); + CMPTO(SLJIT_NOT_EQUAL, STR_PTR, 0, SLJIT_IMM, 0, CURRENT_AS(assert_backtrack)->matchingpath); free_stack(common, 1); return; } free_stack(common, 1); - brajump = CMP(SLJIT_C_EQUAL, STR_PTR, 0, SLJIT_IMM, 0); + brajump = CMP(SLJIT_EQUAL, STR_PTR, 0, SLJIT_IMM, 0); } if (*cc == OP_ASSERT || *cc == OP_ASSERTBACK) { - OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), CURRENT_AS(assert_backtrack)->private_data_ptr); + OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), CURRENT_AS(assert_backtrack)->private_data_ptr); add_jump(compiler, &common->revertframes, JUMP(SLJIT_FAST_CALL)); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), CURRENT_AS(assert_backtrack)->private_data_ptr, SLJIT_MEM1(STACK_TOP), CURRENT_AS(assert_backtrack)->framesize * sizeof(sljit_sw)); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), CURRENT_AS(assert_backtrack)->private_data_ptr, SLJIT_MEM1(STACK_TOP), STACK(-CURRENT_AS(assert_backtrack)->framesize - 1)); set_jumps(current->topbacktracks, LABEL()); } @@ -8037,7 +10026,7 @@ else if (bra == OP_BRAZERO) { /* We know there is enough place on the stack. */ - OP2(SLJIT_ADD, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, sizeof(sljit_sw)); + OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, sizeof(sljit_sw)); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), SLJIT_IMM, 0); JUMPTO(SLJIT_JUMP, CURRENT_AS(assert_backtrack)->matchingpath); JUMPHERE(brajump); @@ -8047,21 +10036,22 @@ if (bra == OP_BRAZERO) static void compile_bracket_backtrackingpath(compiler_common *common, struct backtrack_common *current) { DEFINE_COMPILER; -int opcode, stacksize, count; +int opcode, stacksize, alt_count, alt_max; int offset = 0; int private_data_ptr = CURRENT_AS(bracket_backtrack)->private_data_ptr; int repeat_ptr = 0, repeat_type = 0, repeat_count = 0; pcre_uchar *cc = current->cc; pcre_uchar *ccbegin; pcre_uchar *ccprev; -jump_list *jumplist = NULL; -jump_list *jumplistitem = NULL; pcre_uchar bra = OP_BRA; pcre_uchar ket; assert_backtrack *assert; +sljit_uw *next_update_addr = NULL; BOOL has_alternatives; BOOL needs_control_head = FALSE; struct sljit_jump *brazero = NULL; +struct sljit_jump *alt1 = NULL; +struct sljit_jump *alt2 = NULL; struct sljit_jump *once = NULL; struct sljit_jump *cond = NULL; struct sljit_label *rmin_label = NULL; @@ -8099,6 +10089,8 @@ if (SLJIT_UNLIKELY(opcode == OP_COND) && (*cc == OP_KETRMAX || *cc == OP_KETRMIN if (SLJIT_UNLIKELY(opcode == OP_ONCE_NC)) opcode = OP_ONCE; +alt_max = has_alternatives ? no_alternatives(ccbegin) : 0; + /* Decoding the needs_control_head in framesize. */ if (opcode == OP_ONCE) { @@ -8112,9 +10104,9 @@ if (ket != OP_KET && repeat_type != 0) OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), STACK(0)); free_stack(common, 1); if (repeat_type == OP_UPTO) - OP2(SLJIT_ADD, SLJIT_MEM1(SLJIT_LOCALS_REG), repeat_ptr, TMP1, 0, SLJIT_IMM, 1); + OP2(SLJIT_ADD, SLJIT_MEM1(SLJIT_SP), repeat_ptr, TMP1, 0, SLJIT_IMM, 1); else - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), repeat_ptr, TMP1, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), repeat_ptr, TMP1, 0); } if (ket == OP_KETRMAX) @@ -8123,7 +10115,7 @@ if (ket == OP_KETRMAX) { OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), STACK(0)); free_stack(common, 1); - brazero = CMP(SLJIT_C_EQUAL, TMP1, 0, SLJIT_IMM, 0); + brazero = CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, 0); } } else if (ket == OP_KETRMIN) @@ -8134,7 +10126,7 @@ else if (ket == OP_KETRMIN) if (repeat_type != 0) { /* TMP1 was set a few lines above. */ - CMPTO(SLJIT_C_NOT_EQUAL, TMP1, 0, SLJIT_IMM, 0, CURRENT_AS(bracket_backtrack)->recursive_matchingpath); + CMPTO(SLJIT_NOT_EQUAL, TMP1, 0, SLJIT_IMM, 0, CURRENT_AS(bracket_backtrack)->recursive_matchingpath); /* Drop STR_PTR for non-greedy plus quantifier. */ if (opcode != OP_ONCE) free_stack(common, 1); @@ -8143,11 +10135,11 @@ else if (ket == OP_KETRMIN) { /* Checking zero-length iteration. */ if (opcode != OP_ONCE || CURRENT_AS(bracket_backtrack)->u.framesize < 0) - CMPTO(SLJIT_C_NOT_EQUAL, STR_PTR, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr, CURRENT_AS(bracket_backtrack)->recursive_matchingpath); + CMPTO(SLJIT_NOT_EQUAL, STR_PTR, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr, CURRENT_AS(bracket_backtrack)->recursive_matchingpath); else { - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr); - CMPTO(SLJIT_C_NOT_EQUAL, STR_PTR, 0, SLJIT_MEM1(TMP1), (CURRENT_AS(bracket_backtrack)->u.framesize + 1) * sizeof(sljit_sw), CURRENT_AS(bracket_backtrack)->recursive_matchingpath); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr); + CMPTO(SLJIT_NOT_EQUAL, STR_PTR, 0, SLJIT_MEM1(TMP1), STACK(-CURRENT_AS(bracket_backtrack)->u.framesize - 2), CURRENT_AS(bracket_backtrack)->recursive_matchingpath); } /* Drop STR_PTR for non-greedy plus quantifier. */ if (opcode != OP_ONCE) @@ -8158,17 +10150,17 @@ else if (ket == OP_KETRMIN) } rmin_label = LABEL(); if (repeat_type != 0) - OP2(SLJIT_ADD, SLJIT_MEM1(SLJIT_LOCALS_REG), repeat_ptr, SLJIT_MEM1(SLJIT_LOCALS_REG), repeat_ptr, SLJIT_IMM, 1); + OP2(SLJIT_ADD, SLJIT_MEM1(SLJIT_SP), repeat_ptr, SLJIT_MEM1(SLJIT_SP), repeat_ptr, SLJIT_IMM, 1); } else if (bra == OP_BRAZERO) { OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), STACK(0)); free_stack(common, 1); - brazero = CMP(SLJIT_C_NOT_EQUAL, TMP1, 0, SLJIT_IMM, 0); + brazero = CMP(SLJIT_NOT_EQUAL, TMP1, 0, SLJIT_IMM, 0); } else if (repeat_type == OP_EXACT) { - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), repeat_ptr, SLJIT_IMM, 1); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), repeat_ptr, SLJIT_IMM, 1); exact_label = LABEL(); } @@ -8179,19 +10171,19 @@ if (offset != 0) SLJIT_ASSERT(common->optimized_cbracket[offset >> 1] == 0); OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), STACK(0)); OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(STACK_TOP), STACK(1)); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->capture_last_ptr, TMP1, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->capture_last_ptr, TMP1, 0); OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), STACK(2)); free_stack(common, 3); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(offset), TMP2, 0); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(offset + 1), TMP1, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset), TMP2, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset + 1), TMP1, 0); } else if (common->optimized_cbracket[offset >> 1] == 0) { OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), STACK(0)); OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(STACK_TOP), STACK(1)); free_stack(common, 2); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(offset), TMP1, 0); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(offset + 1), TMP2, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset), TMP1, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset + 1), TMP2, 0); } } @@ -8199,7 +10191,7 @@ if (SLJIT_UNLIKELY(opcode == OP_ONCE)) { if (CURRENT_AS(bracket_backtrack)->u.framesize >= 0) { - OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr); + OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr); add_jump(compiler, &common->revertframes, JUMP(SLJIT_FAST_CALL)); } once = JUMP(SLJIT_JUMP); @@ -8212,44 +10204,30 @@ else if (SLJIT_UNLIKELY(opcode == OP_COND) || SLJIT_UNLIKELY(opcode == OP_SCOND) OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), STACK(0)); free_stack(common, 1); - jumplistitem = sljit_alloc_memory(compiler, sizeof(jump_list)); - if (SLJIT_UNLIKELY(!jumplistitem)) - return; - jumplist = jumplistitem; - jumplistitem->next = NULL; - jumplistitem->jump = CMP(SLJIT_C_EQUAL, TMP1, 0, SLJIT_IMM, 1); + alt_max = 2; + alt1 = CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, sizeof(sljit_uw)); } } -else if (*cc == OP_ALT) +else if (has_alternatives) { - /* Build a jump list. Get the last successfully matched branch index. */ OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), STACK(0)); free_stack(common, 1); - count = 1; - do - { - /* Append as the last item. */ - if (jumplist != NULL) - { - jumplistitem->next = sljit_alloc_memory(compiler, sizeof(jump_list)); - jumplistitem = jumplistitem->next; - } - else - { - jumplistitem = sljit_alloc_memory(compiler, sizeof(jump_list)); - jumplist = jumplistitem; - } - if (SLJIT_UNLIKELY(!jumplistitem)) + if (alt_max > 4) + { + /* Table jump if alt_max is greater than 4. */ + next_update_addr = allocate_read_only_data(common, alt_max * sizeof(sljit_uw)); + if (SLJIT_UNLIKELY(next_update_addr == NULL)) return; - - jumplistitem->next = NULL; - jumplistitem->jump = CMP(SLJIT_C_EQUAL, TMP1, 0, SLJIT_IMM, count++); - cc += GET(cc, 1); + sljit_emit_ijump(compiler, SLJIT_JUMP, SLJIT_MEM1(TMP1), (sljit_sw)next_update_addr); + add_label_addr(common, next_update_addr++); + } + else + { + if (alt_max == 4) + alt2 = CMP(SLJIT_GREATER_EQUAL, TMP1, 0, SLJIT_IMM, 2 * sizeof(sljit_uw)); + alt1 = CMP(SLJIT_GREATER_EQUAL, TMP1, 0, SLJIT_IMM, sizeof(sljit_uw)); } - while (*cc == OP_ALT); - - cc = ccbegin + GET(ccbegin, 1); } COMPILE_BACKTRACKINGPATH(current->top); @@ -8265,9 +10243,9 @@ if (SLJIT_UNLIKELY(opcode == OP_COND) || SLJIT_UNLIKELY(opcode == OP_SCOND)) assert = CURRENT_AS(bracket_backtrack)->u.assert; if (assert->framesize >= 0 && (ccbegin[1 + LINK_SIZE] == OP_ASSERT || ccbegin[1 + LINK_SIZE] == OP_ASSERTBACK)) { - OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), assert->private_data_ptr); + OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), assert->private_data_ptr); add_jump(compiler, &common->revertframes, JUMP(SLJIT_FAST_CALL)); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), assert->private_data_ptr, SLJIT_MEM1(STACK_TOP), assert->framesize * sizeof(sljit_sw)); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), assert->private_data_ptr, SLJIT_MEM1(STACK_TOP), STACK(-assert->framesize - 1)); } cond = JUMP(SLJIT_JUMP); set_jumps(CURRENT_AS(bracket_backtrack)->u.assert->condfailed, LABEL()); @@ -8284,7 +10262,7 @@ if (SLJIT_UNLIKELY(opcode == OP_COND) || SLJIT_UNLIKELY(opcode == OP_SCOND)) if (has_alternatives) { - count = 1; + alt_count = sizeof(sljit_uw); do { current->top = NULL; @@ -8300,7 +10278,7 @@ if (has_alternatives) if (opcode != OP_ONCE) { if (private_data_ptr != 0) - OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr); + OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr); else OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(STACK_TOP), STACK(0)); } @@ -8321,7 +10299,7 @@ if (has_alternatives) if (repeat_type == OP_MINUPTO) { /* We need to preserve the counter. TMP2 will be used below. */ - OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), repeat_ptr); + OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), repeat_ptr); stacksize++; } if (ket != OP_KET || bra != OP_BRA) @@ -8360,22 +10338,37 @@ if (has_alternatives) stacksize = match_capture_common(common, stacksize, offset, private_data_ptr); if (opcode != OP_ONCE) - OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(stacksize), SLJIT_IMM, count++); + OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(stacksize), SLJIT_IMM, alt_count); if (offset != 0 && ket == OP_KETRMAX && common->optimized_cbracket[offset >> 1] != 0) { /* If ket is not OP_KETRMAX, this code path is executed after the jump to alternative_matchingpath. */ SLJIT_ASSERT(private_data_ptr == OVECTOR(offset + 0)); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(offset + 1), STR_PTR, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset + 1), STR_PTR, 0); } JUMPTO(SLJIT_JUMP, CURRENT_AS(bracket_backtrack)->alternative_matchingpath); if (opcode != OP_ONCE) { - SLJIT_ASSERT(jumplist); - JUMPHERE(jumplist->jump); - jumplist = jumplist->next; + if (alt_max > 4) + add_label_addr(common, next_update_addr++); + else + { + if (alt_count != 2 * sizeof(sljit_uw)) + { + JUMPHERE(alt1); + if (alt_max == 3 && alt_count == sizeof(sljit_uw)) + alt2 = CMP(SLJIT_GREATER_EQUAL, TMP1, 0, SLJIT_IMM, 2 * sizeof(sljit_uw)); + } + else + { + JUMPHERE(alt2); + if (alt_max == 4) + alt1 = CMP(SLJIT_GREATER_EQUAL, TMP1, 0, SLJIT_IMM, 3 * sizeof(sljit_uw)); + } + } + alt_count += sizeof(sljit_uw); } COMPILE_BACKTRACKINGPATH(current->top); @@ -8384,7 +10377,6 @@ if (has_alternatives) SLJIT_ASSERT(!current->nextbacktracks); } while (*cc == OP_ALT); - SLJIT_ASSERT(!jumplist); if (cond != NULL) { @@ -8392,9 +10384,9 @@ if (has_alternatives) assert = CURRENT_AS(bracket_backtrack)->u.assert; if ((ccbegin[1 + LINK_SIZE] == OP_ASSERT_NOT || ccbegin[1 + LINK_SIZE] == OP_ASSERTBACK_NOT) && assert->framesize >= 0) { - OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), assert->private_data_ptr); + OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), assert->private_data_ptr); add_jump(compiler, &common->revertframes, JUMP(SLJIT_FAST_CALL)); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), assert->private_data_ptr, SLJIT_MEM1(STACK_TOP), assert->framesize * sizeof(sljit_sw)); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), assert->private_data_ptr, SLJIT_MEM1(STACK_TOP), STACK(-assert->framesize - 1)); } JUMPHERE(cond); } @@ -8412,19 +10404,19 @@ if (offset != 0) OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), STACK(0)); OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(STACK_TOP), STACK(1)); free_stack(common, 2); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(offset), TMP1, 0); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(offset + 1), TMP2, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset), TMP1, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset + 1), TMP2, 0); } else { OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), STACK(0)); free_stack(common, 1); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr, TMP1, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), private_data_ptr, TMP1, 0); } } else if (opcode == OP_SBRA || opcode == OP_SCOND) { - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr, SLJIT_MEM1(STACK_TOP), STACK(0)); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), private_data_ptr, SLJIT_MEM1(STACK_TOP), STACK(0)); free_stack(common, 1); } else if (opcode == OP_ONCE) @@ -8442,26 +10434,28 @@ else if (opcode == OP_ONCE) /* The STR_PTR must be released. */ stacksize++; } - free_stack(common, stacksize); + + if (stacksize > 0) + free_stack(common, stacksize); JUMPHERE(once); /* Restore previous private_data_ptr */ if (CURRENT_AS(bracket_backtrack)->u.framesize >= 0) - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr, SLJIT_MEM1(STACK_TOP), CURRENT_AS(bracket_backtrack)->u.framesize * sizeof(sljit_sw)); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), private_data_ptr, SLJIT_MEM1(STACK_TOP), STACK(-CURRENT_AS(bracket_backtrack)->u.framesize - 1)); else if (ket == OP_KETRMIN) { OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), STACK(1)); /* See the comment below. */ free_stack(common, 2); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), private_data_ptr, TMP1, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), private_data_ptr, TMP1, 0); } } if (repeat_type == OP_EXACT) { - OP2(SLJIT_ADD, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), repeat_ptr, SLJIT_IMM, 1); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), repeat_ptr, TMP1, 0); - CMPTO(SLJIT_C_LESS_EQUAL, TMP1, 0, SLJIT_IMM, repeat_count, exact_label); + OP2(SLJIT_ADD, TMP1, 0, SLJIT_MEM1(SLJIT_SP), repeat_ptr, SLJIT_IMM, 1); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), repeat_ptr, TMP1, 0); + CMPTO(SLJIT_LESS_EQUAL, TMP1, 0, SLJIT_IMM, repeat_count, exact_label); } else if (ket == OP_KETRMAX) { @@ -8469,7 +10463,7 @@ else if (ket == OP_KETRMAX) if (bra != OP_BRAZERO) free_stack(common, 1); - CMPTO(SLJIT_C_NOT_EQUAL, STR_PTR, 0, SLJIT_IMM, 0, CURRENT_AS(bracket_backtrack)->recursive_matchingpath); + CMPTO(SLJIT_NOT_EQUAL, STR_PTR, 0, SLJIT_IMM, 0, CURRENT_AS(bracket_backtrack)->recursive_matchingpath); if (bra == OP_BRAZERO) { OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(STACK_TOP), STACK(1)); @@ -8487,7 +10481,7 @@ else if (ket == OP_KETRMIN) affect badly the free_stack(2) above. */ if (opcode != OP_ONCE) free_stack(common, 1); - CMPTO(SLJIT_C_NOT_EQUAL, TMP1, 0, SLJIT_IMM, 0, rmin_label); + CMPTO(SLJIT_NOT_EQUAL, TMP1, 0, SLJIT_IMM, 0, rmin_label); if (opcode == OP_ONCE) free_stack(common, bra == OP_BRAMINZERO ? 2 : 1); else if (bra == OP_BRAMINZERO) @@ -8514,19 +10508,19 @@ if (CURRENT_AS(bracketpos_backtrack)->framesize < 0) offset = (GET2(current->cc, 1 + LINK_SIZE)) << 1; OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), STACK(0)); OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(STACK_TOP), STACK(1)); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(offset), TMP1, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset), TMP1, 0); if (common->capture_last_ptr != 0) OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), STACK(2)); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(offset + 1), TMP2, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset + 1), TMP2, 0); if (common->capture_last_ptr != 0) - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->capture_last_ptr, TMP1, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->capture_last_ptr, TMP1, 0); } set_jumps(current->topbacktracks, LABEL()); free_stack(common, CURRENT_AS(bracketpos_backtrack)->stacksize); return; } -OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), CURRENT_AS(bracketpos_backtrack)->private_data_ptr); +OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), CURRENT_AS(bracketpos_backtrack)->private_data_ptr); add_jump(compiler, &common->revertframes, JUMP(SLJIT_FAST_CALL)); if (current->topbacktracks) @@ -8537,7 +10531,7 @@ if (current->topbacktracks) free_stack(common, CURRENT_AS(bracketpos_backtrack)->stacksize); JUMPHERE(jump); } -OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), CURRENT_AS(bracketpos_backtrack)->private_data_ptr, SLJIT_MEM1(STACK_TOP), CURRENT_AS(bracketpos_backtrack)->framesize * sizeof(sljit_sw)); +OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), CURRENT_AS(bracketpos_backtrack)->private_data_ptr, SLJIT_MEM1(STACK_TOP), STACK(-CURRENT_AS(bracketpos_backtrack)->framesize - 1)); } static SLJIT_INLINE void compile_braminzero_backtrackingpath(compiler_common *common, struct backtrack_common *current) @@ -8577,16 +10571,16 @@ if (opcode == OP_THEN || opcode == OP_THEN_ARG) { SLJIT_ASSERT(common->control_head_ptr != 0); - OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), common->control_head_ptr); + OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr); OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, type_then_trap); OP1(SLJIT_MOV, TMP2, 0, SLJIT_IMM, common->then_trap->start); jump = JUMP(SLJIT_JUMP); loop = LABEL(); - OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(STACK_TOP), -(int)sizeof(sljit_sw)); + OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(STACK_TOP), STACK(0)); JUMPHERE(jump); - CMPTO(SLJIT_C_NOT_EQUAL, SLJIT_MEM1(STACK_TOP), -(int)(2 * sizeof(sljit_sw)), TMP1, 0, loop); - CMPTO(SLJIT_C_NOT_EQUAL, SLJIT_MEM1(STACK_TOP), -(int)(3 * sizeof(sljit_sw)), TMP2, 0, loop); + CMPTO(SLJIT_NOT_EQUAL, SLJIT_MEM1(STACK_TOP), STACK(1), TMP1, 0, loop); + CMPTO(SLJIT_NOT_EQUAL, SLJIT_MEM1(STACK_TOP), STACK(2), TMP2, 0, loop); add_jump(compiler, &common->then_trap->quit, JUMP(SLJIT_JUMP)); return; } @@ -8609,14 +10603,14 @@ if (common->local_exit) if (opcode == OP_SKIP_ARG) { SLJIT_ASSERT(common->control_head_ptr != 0); - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), common->control_head_ptr); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), LOCALS0, STACK_TOP, 0); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS0, STACK_TOP, 0); OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_IMM, (sljit_sw)(current->cc + 2)); - sljit_emit_ijump(compiler, SLJIT_CALL2, SLJIT_IMM, SLJIT_FUNC_OFFSET(do_search_mark)); - OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), LOCALS0); + sljit_emit_icall(compiler, SLJIT_CALL, SLJIT_RET(SW) | SLJIT_ARG1(SW) | SLJIT_ARG2(SW), SLJIT_IMM, SLJIT_FUNC_OFFSET(do_search_mark)); + OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0); OP1(SLJIT_MOV, STR_PTR, 0, TMP1, 0); - add_jump(compiler, &common->reset_match, CMP(SLJIT_C_NOT_EQUAL, STR_PTR, 0, SLJIT_IMM, -1)); + add_jump(compiler, &common->reset_match, CMP(SLJIT_NOT_EQUAL, STR_PTR, 0, SLJIT_IMM, 0)); return; } @@ -8654,7 +10648,7 @@ OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), STACK(0)); free_stack(common, 3); JUMPHERE(jump); -OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->control_head_ptr, TMP1, 0); +OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr, TMP1, 0); } static void compile_backtrackingpath(compiler_common *common, struct backtrack_common *current) @@ -8671,7 +10665,7 @@ while (current) case OP_SET_SOM: OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), STACK(0)); free_stack(common, 1); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(0), TMP1, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), OVECTOR(0), TMP1, 0); break; case OP_STAR: @@ -8800,9 +10794,9 @@ while (current) if (common->has_skip_arg) OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(STACK_TOP), STACK(0)); free_stack(common, common->has_skip_arg ? 5 : 1); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->mark_ptr, TMP1, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->mark_ptr, TMP1, 0); if (common->has_skip_arg) - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->control_head_ptr, TMP2, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr, TMP2, 0); break; case OP_THEN: @@ -8836,7 +10830,7 @@ while (current) break; default: - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); break; } current = current->prev; @@ -8849,7 +10843,7 @@ static SLJIT_INLINE void compile_recurse(compiler_common *common) DEFINE_COMPILER; pcre_uchar *cc = common->start + common->currententry->start; pcre_uchar *ccbegin = cc + 1 + LINK_SIZE + (*cc == OP_BRA ? 0 : IMM2_SIZE); -pcre_uchar *ccend = bracketend(cc); +pcre_uchar *ccend = bracketend(cc) - (1 + LINK_SIZE); BOOL needs_control_head; int framesize = get_framesize(common, cc, NULL, TRUE, &needs_control_head); int private_data_size = get_private_data_copy_length(common, ccbegin, ccend, needs_control_head); @@ -8872,12 +10866,13 @@ common->currententry->entry = LABEL(); set_jumps(common->currententry->calls, common->currententry->entry); sljit_emit_fast_enter(compiler, TMP2, 0); +count_match(common); allocate_stack(common, private_data_size + framesize + alternativesize); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(private_data_size + framesize + alternativesize - 1), TMP2, 0); -copy_private_data(common, ccbegin, ccend, TRUE, private_data_size + framesize + alternativesize, framesize + alternativesize, needs_control_head); +copy_private_data(common, ccbegin, ccend, TRUE, framesize + alternativesize, private_data_size + framesize + alternativesize, needs_control_head); if (needs_control_head) - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->control_head_ptr, SLJIT_IMM, 0); -OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->recursive_head_ptr, STACK_TOP, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr, SLJIT_IMM, 0); +OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->recursive_head_ptr, STACK_TOP, 0); if (needs_frame) init_frame(common, cc, NULL, framesize + alternativesize - 1, alternativesize, TRUE); @@ -8924,12 +10919,12 @@ jump = JUMP(SLJIT_JUMP); if (common->quit != NULL) { set_jumps(common->quit, LABEL()); - OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), common->recursive_head_ptr); + OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), common->recursive_head_ptr); if (needs_frame) { - OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, (framesize + alternativesize) * sizeof(sljit_sw)); - add_jump(compiler, &common->revertframes, JUMP(SLJIT_FAST_CALL)); OP2(SLJIT_ADD, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, (framesize + alternativesize) * sizeof(sljit_sw)); + add_jump(compiler, &common->revertframes, JUMP(SLJIT_FAST_CALL)); + OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, (framesize + alternativesize) * sizeof(sljit_sw)); } OP1(SLJIT_MOV, TMP3, 0, SLJIT_IMM, 0); common->quit = NULL; @@ -8937,35 +10932,35 @@ if (common->quit != NULL) } set_jumps(common->accept, LABEL()); -OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), common->recursive_head_ptr); +OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), common->recursive_head_ptr); if (needs_frame) { - OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, (framesize + alternativesize) * sizeof(sljit_sw)); - add_jump(compiler, &common->revertframes, JUMP(SLJIT_FAST_CALL)); OP2(SLJIT_ADD, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, (framesize + alternativesize) * sizeof(sljit_sw)); + add_jump(compiler, &common->revertframes, JUMP(SLJIT_FAST_CALL)); + OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, (framesize + alternativesize) * sizeof(sljit_sw)); } OP1(SLJIT_MOV, TMP3, 0, SLJIT_IMM, 1); JUMPHERE(jump); if (common->quit != NULL) set_jumps(common->quit, LABEL()); -copy_private_data(common, ccbegin, ccend, FALSE, private_data_size + framesize + alternativesize, framesize + alternativesize, needs_control_head); +copy_private_data(common, ccbegin, ccend, FALSE, framesize + alternativesize, private_data_size + framesize + alternativesize, needs_control_head); free_stack(common, private_data_size + framesize + alternativesize); if (needs_control_head) { - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), 2 * sizeof(sljit_sw)); - OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(STACK_TOP), sizeof(sljit_sw)); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->recursive_head_ptr, TMP1, 0); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), STACK(-3)); + OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(STACK_TOP), STACK(-2)); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->recursive_head_ptr, TMP1, 0); OP1(SLJIT_MOV, TMP1, 0, TMP3, 0); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->control_head_ptr, TMP2, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr, TMP2, 0); } else { - OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(STACK_TOP), sizeof(sljit_sw)); + OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(STACK_TOP), STACK(-2)); OP1(SLJIT_MOV, TMP1, 0, TMP3, 0); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->recursive_head_ptr, TMP2, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->recursive_head_ptr, TMP2, 0); } -sljit_emit_fast_return(compiler, SLJIT_MEM1(STACK_TOP), 0); +sljit_emit_fast_return(compiler, SLJIT_MEM1(STACK_TOP), STACK(-1)); } #undef COMPILE_BACKTRACKINGPATH @@ -8978,23 +10973,25 @@ struct sljit_compiler *compiler; backtrack_common rootbacktrack; compiler_common common_data; compiler_common *common = &common_data; -const pcre_uint8 *tables = re->tables; +const sljit_u8 *tables = re->tables; pcre_study_data *study; int private_data_size; pcre_uchar *ccend; executable_functions *functions; void *executable_func; sljit_uw executable_size; +sljit_uw total_length; +label_addr_list *label_addr; struct sljit_label *mainloop_label = NULL; struct sljit_label *continue_match_label; -struct sljit_label *empty_match_found_label; -struct sljit_label *empty_match_backtrack_label; +struct sljit_label *empty_match_found_label = NULL; +struct sljit_label *empty_match_backtrack_label = NULL; struct sljit_label *reset_match_label; +struct sljit_label *quit_label; struct sljit_jump *jump; struct sljit_jump *minlength_check_failed = NULL; struct sljit_jump *reqbyte_notfound = NULL; -struct sljit_jump *empty_match; -struct sljit_label *quit_label; +struct sljit_jump *empty_match = NULL; SLJIT_ASSERT((extra->flags & PCRE_EXTRA_STUDY_DATA) != 0); study = extra->study_data; @@ -9007,9 +11004,11 @@ memset(common, 0, sizeof(compiler_common)); rootbacktrack.cc = (pcre_uchar *)re + re->name_table_offset + re->name_count * re->name_entry_size; common->start = rootbacktrack.cc; +common->read_only_data_head = NULL; common->fcc = tables + fcc_offset; common->lcc = (sljit_sw)(tables + lcc_offset); common->mode = mode; +common->might_be_empty = study->minlength == 0; common->nltype = NLTYPE_FIXED; switch(re->options & PCRE_NEWLINE_BITS) { @@ -9030,6 +11029,8 @@ switch(re->options & PCRE_NEWLINE_BITS) case PCRE_NEWLINE_ANYCRLF: common->newline = (CHAR_CR << 8) | CHAR_NL; common->nltype = NLTYPE_ANYCRLF; break; default: return; } +common->nlmax = READ_CHAR_MAX; +common->nlmin = 0; if ((re->options & PCRE_BSR_ANYCRLF) != 0) common->bsr_nltype = NLTYPE_ANYCRLF; else if ((re->options & PCRE_BSR_UNICODE) != 0) @@ -9042,9 +11043,10 @@ else common->bsr_nltype = NLTYPE_ANY; #endif } +common->bsr_nlmax = READ_CHAR_MAX; +common->bsr_nlmin = 0; common->endonly = (re->options & PCRE_DOLLAR_ENDONLY) != 0; common->ctypes = (sljit_sw)(tables + ctypes_offset); -common->digits[0] = -2; common->name_table = ((pcre_uchar *)re) + re->name_table_offset; common->name_count = re->name_count; common->name_entry_size = re->name_entry_size; @@ -9055,12 +11057,35 @@ common->utf = (re->options & PCRE_UTF8) != 0; #ifdef SUPPORT_UCP common->use_ucp = (re->options & PCRE_UCP) != 0; #endif +if (common->utf) + { + if (common->nltype == NLTYPE_ANY) + common->nlmax = 0x2029; + else if (common->nltype == NLTYPE_ANYCRLF) + common->nlmax = (CHAR_CR > CHAR_NL) ? CHAR_CR : CHAR_NL; + else + { + /* We only care about the first newline character. */ + common->nlmax = common->newline & 0xff; + } + + if (common->nltype == NLTYPE_FIXED) + common->nlmin = common->newline & 0xff; + else + common->nlmin = (CHAR_CR < CHAR_NL) ? CHAR_CR : CHAR_NL; + + if (common->bsr_nltype == NLTYPE_ANY) + common->bsr_nlmax = 0x2029; + else + common->bsr_nlmax = (CHAR_CR > CHAR_NL) ? CHAR_CR : CHAR_NL; + common->bsr_nlmin = (CHAR_CR < CHAR_NL) ? CHAR_CR : CHAR_NL; + } #endif /* SUPPORT_UTF */ -ccend = bracketend(rootbacktrack.cc); +ccend = bracketend(common->start); /* Calculate the local space size on the stack. */ common->ovector_start = LIMIT_MATCH + sizeof(sljit_sw); -common->optimized_cbracket = (pcre_uint8 *)SLJIT_MALLOC(re->top_bracket + 1); +common->optimized_cbracket = (sljit_u8 *)SLJIT_MALLOC(re->top_bracket + 1, compiler->allocator_data); if (!common->optimized_cbracket) return; #if defined DEBUG_FORCE_UNOPTIMIZED_CBRAS && DEBUG_FORCE_UNOPTIMIZED_CBRAS == 1 @@ -9069,14 +11094,14 @@ memset(common->optimized_cbracket, 0, re->top_bracket + 1); memset(common->optimized_cbracket, 1, re->top_bracket + 1); #endif -SLJIT_ASSERT(*rootbacktrack.cc == OP_BRA && ccend[-(1 + LINK_SIZE)] == OP_KET); +SLJIT_ASSERT(*common->start == OP_BRA && ccend[-(1 + LINK_SIZE)] == OP_KET); #if defined DEBUG_FORCE_UNOPTIMIZED_CBRAS && DEBUG_FORCE_UNOPTIMIZED_CBRAS == 2 common->capture_last_ptr = common->ovector_start; common->ovector_start += sizeof(sljit_sw); #endif -if (!check_opcode_types(common, rootbacktrack.cc, ccend)) +if (!check_opcode_types(common, common->start, ccend)) { - SLJIT_FREE(common->optimized_cbracket); + SLJIT_FREE(common->optimized_cbracket, compiler->allocator_data); return; } @@ -9095,15 +11120,10 @@ if (mode != JIT_COMPILE) common->hit_start = common->ovector_start; common->ovector_start += 2 * sizeof(sljit_sw); } - else - { - SLJIT_ASSERT(mode == JIT_PARTIAL_HARD_COMPILE); - common->needs_start_ptr = TRUE; - } } if ((re->options & PCRE_FIRSTLINE) != 0) { - common->first_line_end = common->ovector_start; + common->match_end_ptr = common->ovector_start; common->ovector_start += sizeof(sljit_sw); } #if defined DEBUG_FORCE_CONTROL_HEAD && DEBUG_FORCE_CONTROL_HEAD @@ -9114,14 +11134,12 @@ if (common->control_head_ptr != 0) common->control_head_ptr = common->ovector_start; common->ovector_start += sizeof(sljit_sw); } -if (common->needs_start_ptr && common->has_set_som) +if (common->has_set_som) { /* Saving the real start pointer is necessary. */ common->start_ptr = common->ovector_start; common->ovector_start += sizeof(sljit_sw); } -else - common->needs_start_ptr = FALSE; /* Aligning ovector to even number of sljit words. */ if ((common->ovector_start & sizeof(sljit_sw)) != 0) @@ -9137,88 +11155,93 @@ if (common->capture_last_ptr != 0) SLJIT_ASSERT(!(common->req_char_ptr != 0 && common->start_used_ptr != 0)); common->cbra_ptr = OVECTOR_START + (re->top_bracket + 1) * 2 * sizeof(sljit_sw); -common->private_data_ptrs = (int *)SLJIT_MALLOC((ccend - rootbacktrack.cc) * sizeof(sljit_si)); +total_length = ccend - common->start; +common->private_data_ptrs = (sljit_s32 *)SLJIT_MALLOC(total_length * (sizeof(sljit_s32) + (common->has_then ? 1 : 0)), compiler->allocator_data); if (!common->private_data_ptrs) { - SLJIT_FREE(common->optimized_cbracket); + SLJIT_FREE(common->optimized_cbracket, compiler->allocator_data); return; } -memset(common->private_data_ptrs, 0, (ccend - rootbacktrack.cc) * sizeof(int)); +memset(common->private_data_ptrs, 0, total_length * sizeof(sljit_s32)); private_data_size = common->cbra_ptr + (re->top_bracket + 1) * sizeof(sljit_sw); set_private_data_ptrs(common, &private_data_size, ccend); +if ((re->options & PCRE_ANCHORED) == 0 && (re->options & PCRE_NO_START_OPTIMIZE) == 0) + { + if (!detect_fast_forward_skip(common, &private_data_size) && !common->has_skip_in_assert_back) + detect_fast_fail(common, common->start, &private_data_size, 4); + } + +SLJIT_ASSERT(common->fast_fail_start_ptr <= common->fast_fail_end_ptr); + if (private_data_size > SLJIT_MAX_LOCAL_SIZE) { - SLJIT_FREE(common->private_data_ptrs); - SLJIT_FREE(common->optimized_cbracket); + SLJIT_FREE(common->private_data_ptrs, compiler->allocator_data); + SLJIT_FREE(common->optimized_cbracket, compiler->allocator_data); return; } if (common->has_then) { - common->then_offsets = (pcre_uint8 *)SLJIT_MALLOC(ccend - rootbacktrack.cc); - if (!common->then_offsets) - { - SLJIT_FREE(common->optimized_cbracket); - SLJIT_FREE(common->private_data_ptrs); - return; - } - memset(common->then_offsets, 0, ccend - rootbacktrack.cc); - set_then_offsets(common, rootbacktrack.cc, NULL); + common->then_offsets = (sljit_u8 *)(common->private_data_ptrs + total_length); + memset(common->then_offsets, 0, total_length); + set_then_offsets(common, common->start, NULL); } -compiler = sljit_create_compiler(); +compiler = sljit_create_compiler(NULL); if (!compiler) { - SLJIT_FREE(common->optimized_cbracket); - SLJIT_FREE(common->private_data_ptrs); - if (common->has_then) - SLJIT_FREE(common->then_offsets); + SLJIT_FREE(common->optimized_cbracket, compiler->allocator_data); + SLJIT_FREE(common->private_data_ptrs, compiler->allocator_data); return; } common->compiler = compiler; /* Main pcre_jit_exec entry. */ -sljit_emit_enter(compiler, 1, 5, 5, private_data_size); +sljit_emit_enter(compiler, 0, SLJIT_ARG1(SW), 5, 5, 0, 0, private_data_size); /* Register init. */ reset_ovector(common, (re->top_bracket + 1) * 2); if (common->req_char_ptr != 0) - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->req_char_ptr, SLJIT_SCRATCH_REG1, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->req_char_ptr, SLJIT_R0, 0); -OP1(SLJIT_MOV, ARGUMENTS, 0, SLJIT_SAVED_REG1, 0); -OP1(SLJIT_MOV, TMP1, 0, SLJIT_SAVED_REG1, 0); +OP1(SLJIT_MOV, ARGUMENTS, 0, SLJIT_S0, 0); +OP1(SLJIT_MOV, TMP1, 0, SLJIT_S0, 0); OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, str)); OP1(SLJIT_MOV, STR_END, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, end)); OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, stack)); -OP1(SLJIT_MOV_UI, TMP1, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, limit_match)); -OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(struct sljit_stack, base)); -OP1(SLJIT_MOV, STACK_LIMIT, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(struct sljit_stack, limit)); -OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), LIMIT_MATCH, TMP1, 0); +OP1(SLJIT_MOV_U32, TMP1, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, limit_match)); +OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(struct sljit_stack, end)); +OP1(SLJIT_MOV, STACK_LIMIT, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(struct sljit_stack, start)); +OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, SLJIT_IMM, 1); +OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LIMIT_MATCH, TMP1, 0); + +if (common->fast_fail_start_ptr < common->fast_fail_end_ptr) + reset_fast_fail(common); if (mode == JIT_PARTIAL_SOFT_COMPILE) - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->hit_start, SLJIT_IMM, -1); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->hit_start, SLJIT_IMM, -1); if (common->mark_ptr != 0) - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->mark_ptr, SLJIT_IMM, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->mark_ptr, SLJIT_IMM, 0); if (common->control_head_ptr != 0) - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->control_head_ptr, SLJIT_IMM, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr, SLJIT_IMM, 0); /* Main part of the matching */ if ((re->options & PCRE_ANCHORED) == 0) { - mainloop_label = mainloop_entry(common, (re->flags & PCRE_HASCRORLF) != 0, (re->options & PCRE_FIRSTLINE) != 0); + mainloop_label = mainloop_entry(common, (re->flags & PCRE_HASCRORLF) != 0); continue_match_label = LABEL(); /* Forward search if possible. */ if ((re->options & PCRE_NO_START_OPTIMIZE) == 0) { - if (mode == JIT_COMPILE && fast_forward_first_n_chars(common, (re->options & PCRE_FIRSTLINE) != 0)) - { /* Do nothing */ } + if (mode == JIT_COMPILE && fast_forward_first_n_chars(common)) + ; else if ((re->flags & PCRE_FIRSTSET) != 0) - fast_forward_first_char(common, (pcre_uchar)re->first_char, (re->flags & PCRE_FCH_CASELESS) != 0, (re->options & PCRE_FIRSTLINE) != 0); + fast_forward_first_char(common, (pcre_uchar)re->first_char, (re->flags & PCRE_FCH_CASELESS) != 0); else if ((re->flags & PCRE_STARTLINE) != 0) - fast_forward_newline(common, (re->options & PCRE_FIRSTLINE) != 0); - else if ((re->flags & PCRE_STARTLINE) == 0 && study != NULL && (study->flags & PCRE_STUDY_MAPPED) != 0) - fast_forward_start_bits(common, (sljit_uw)study->start_bits, (re->options & PCRE_FIRSTLINE) != 0); + fast_forward_newline(common); + else if (study != NULL && (study->flags & PCRE_STUDY_MAPPED) != 0) + fast_forward_start_bits(common, study->start_bits); } } else @@ -9228,50 +11251,49 @@ if (mode == JIT_COMPILE && study->minlength > 0 && (re->options & PCRE_NO_START_ { OP1(SLJIT_MOV, SLJIT_RETURN_REG, 0, SLJIT_IMM, PCRE_ERROR_NOMATCH); OP2(SLJIT_ADD, TMP2, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(study->minlength)); - minlength_check_failed = CMP(SLJIT_C_GREATER, TMP2, 0, STR_END, 0); + minlength_check_failed = CMP(SLJIT_GREATER, TMP2, 0, STR_END, 0); } if (common->req_char_ptr != 0) reqbyte_notfound = search_requested_char(common, (pcre_uchar)re->req_char, (re->flags & PCRE_RCH_CASELESS) != 0, (re->flags & PCRE_FIRSTSET) != 0); /* Store the current STR_PTR in OVECTOR(0). */ -OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(0), STR_PTR, 0); +OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), OVECTOR(0), STR_PTR, 0); /* Copy the limit of allowed recursions. */ -OP1(SLJIT_MOV, COUNT_MATCH, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), LIMIT_MATCH); +OP1(SLJIT_MOV, COUNT_MATCH, 0, SLJIT_MEM1(SLJIT_SP), LIMIT_MATCH); if (common->capture_last_ptr != 0) - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->capture_last_ptr, SLJIT_IMM, -1); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->capture_last_ptr, SLJIT_IMM, -1); +if (common->fast_forward_bc_ptr != NULL) + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), PRIVATE_DATA(common->fast_forward_bc_ptr + 1), STR_PTR, 0); -if (common->needs_start_ptr) - { - SLJIT_ASSERT(common->start_ptr != OVECTOR(0)); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->start_ptr, STR_PTR, 0); - } -else - SLJIT_ASSERT(common->start_ptr == OVECTOR(0)); +if (common->start_ptr != OVECTOR(0)) + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->start_ptr, STR_PTR, 0); /* Copy the beginning of the string. */ if (mode == JIT_PARTIAL_SOFT_COMPILE) { - jump = CMP(SLJIT_C_NOT_EQUAL, SLJIT_MEM1(SLJIT_LOCALS_REG), common->hit_start, SLJIT_IMM, -1); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->start_used_ptr, STR_PTR, 0); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->hit_start + sizeof(sljit_sw), STR_PTR, 0); + jump = CMP(SLJIT_NOT_EQUAL, SLJIT_MEM1(SLJIT_SP), common->hit_start, SLJIT_IMM, -1); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->start_used_ptr, STR_PTR, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->hit_start + sizeof(sljit_sw), STR_PTR, 0); JUMPHERE(jump); } else if (mode == JIT_PARTIAL_HARD_COMPILE) - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->start_used_ptr, STR_PTR, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->start_used_ptr, STR_PTR, 0); -compile_matchingpath(common, rootbacktrack.cc, ccend, &rootbacktrack); +compile_matchingpath(common, common->start, ccend, &rootbacktrack); if (SLJIT_UNLIKELY(sljit_get_compiler_error(compiler))) { sljit_free_compiler(compiler); - SLJIT_FREE(common->optimized_cbracket); - SLJIT_FREE(common->private_data_ptrs); - if (common->has_then) - SLJIT_FREE(common->then_offsets); + SLJIT_FREE(common->optimized_cbracket, compiler->allocator_data); + SLJIT_FREE(common->private_data_ptrs, compiler->allocator_data); + free_read_only_data(common->read_only_data_head, compiler->allocator_data); return; } -empty_match = CMP(SLJIT_C_EQUAL, STR_PTR, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), OVECTOR(0)); -empty_match_found_label = LABEL(); +if (common->might_be_empty) + { + empty_match = CMP(SLJIT_EQUAL, STR_PTR, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(0)); + empty_match_found_label = LABEL(); + } common->accept_label = LABEL(); if (common->accept != NULL) @@ -9295,15 +11317,15 @@ if (mode != JIT_COMPILE) return_with_partial_match(common, common->quit_label); } -empty_match_backtrack_label = LABEL(); +if (common->might_be_empty) + empty_match_backtrack_label = LABEL(); compile_backtrackingpath(common, rootbacktrack.top); if (SLJIT_UNLIKELY(sljit_get_compiler_error(compiler))) { sljit_free_compiler(compiler); - SLJIT_FREE(common->optimized_cbracket); - SLJIT_FREE(common->private_data_ptrs); - if (common->has_then) - SLJIT_FREE(common->then_offsets); + SLJIT_FREE(common->optimized_cbracket, compiler->allocator_data); + SLJIT_FREE(common->private_data_ptrs, compiler->allocator_data); + free_read_only_data(common->read_only_data_head, compiler->allocator_data); return; } @@ -9313,28 +11335,33 @@ reset_match_label = LABEL(); if (mode == JIT_PARTIAL_SOFT_COMPILE) { /* Update hit_start only in the first time. */ - jump = CMP(SLJIT_C_NOT_EQUAL, SLJIT_MEM1(SLJIT_LOCALS_REG), common->hit_start, SLJIT_IMM, 0); - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), common->start_used_ptr); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->start_used_ptr, SLJIT_IMM, -1); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), common->hit_start, TMP1, 0); + jump = CMP(SLJIT_NOT_EQUAL, SLJIT_MEM1(SLJIT_SP), common->hit_start, SLJIT_IMM, 0); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->start_used_ptr); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->start_used_ptr, SLJIT_IMM, -1); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->hit_start, TMP1, 0); JUMPHERE(jump); } /* Check we have remaining characters. */ if ((re->options & PCRE_ANCHORED) == 0 && (re->options & PCRE_FIRSTLINE) != 0) { - SLJIT_ASSERT(common->first_line_end != 0); - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), common->first_line_end); + SLJIT_ASSERT(common->match_end_ptr != 0); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->match_end_ptr); } -OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), common->start_ptr); +OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(SLJIT_SP), + (common->fast_forward_bc_ptr != NULL) ? (PRIVATE_DATA(common->fast_forward_bc_ptr + 1)) : common->start_ptr); if ((re->options & PCRE_ANCHORED) == 0) { - if ((re->options & PCRE_FIRSTLINE) == 0) - CMPTO(SLJIT_C_LESS, STR_PTR, 0, STR_END, 0, mainloop_label); + if (common->ff_newline_shortcut != NULL) + { + if ((re->options & PCRE_FIRSTLINE) == 0) + CMPTO(SLJIT_LESS, STR_PTR, 0, STR_END, 0, common->ff_newline_shortcut); + /* There cannot be more newlines here. */ + } else - CMPTO(SLJIT_C_LESS, STR_PTR, 0, TMP1, 0, mainloop_label); + CMPTO(SLJIT_LESS, STR_PTR, 0, ((re->options & PCRE_FIRSTLINE) == 0) ? STR_END : TMP1, 0, mainloop_label); } /* No more remaining characters. */ @@ -9342,23 +11369,29 @@ if (reqbyte_notfound != NULL) JUMPHERE(reqbyte_notfound); if (mode == JIT_PARTIAL_SOFT_COMPILE) - CMPTO(SLJIT_C_NOT_EQUAL, SLJIT_MEM1(SLJIT_LOCALS_REG), common->hit_start, SLJIT_IMM, -1, common->partialmatchlabel); + CMPTO(SLJIT_NOT_EQUAL, SLJIT_MEM1(SLJIT_SP), common->hit_start, SLJIT_IMM, -1, common->partialmatchlabel); OP1(SLJIT_MOV, SLJIT_RETURN_REG, 0, SLJIT_IMM, PCRE_ERROR_NOMATCH); JUMPTO(SLJIT_JUMP, common->quit_label); flush_stubs(common); -JUMPHERE(empty_match); -OP1(SLJIT_MOV, TMP1, 0, ARGUMENTS, 0); -OP1(SLJIT_MOV_UB, TMP2, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, notempty)); -CMPTO(SLJIT_C_NOT_EQUAL, TMP2, 0, SLJIT_IMM, 0, empty_match_backtrack_label); -OP1(SLJIT_MOV_UB, TMP2, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, notempty_atstart)); -CMPTO(SLJIT_C_EQUAL, TMP2, 0, SLJIT_IMM, 0, empty_match_found_label); -OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, str)); -CMPTO(SLJIT_C_NOT_EQUAL, TMP2, 0, STR_PTR, 0, empty_match_found_label); -JUMPTO(SLJIT_JUMP, empty_match_backtrack_label); +if (common->might_be_empty) + { + JUMPHERE(empty_match); + OP1(SLJIT_MOV, TMP1, 0, ARGUMENTS, 0); + OP1(SLJIT_MOV_U8, TMP2, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, notempty)); + CMPTO(SLJIT_NOT_EQUAL, TMP2, 0, SLJIT_IMM, 0, empty_match_backtrack_label); + OP1(SLJIT_MOV_U8, TMP2, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, notempty_atstart)); + CMPTO(SLJIT_EQUAL, TMP2, 0, SLJIT_IMM, 0, empty_match_found_label); + OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, str)); + CMPTO(SLJIT_NOT_EQUAL, TMP2, 0, STR_PTR, 0, empty_match_found_label); + JUMPTO(SLJIT_JUMP, empty_match_backtrack_label); + } +common->fast_forward_bc_ptr = NULL; +common->fast_fail_start_ptr = 0; +common->fast_fail_end_ptr = 0; common->currententry = common->entries; common->local_exit = TRUE; quit_label = common->quit_label; @@ -9369,10 +11402,9 @@ while (common->currententry != NULL) if (SLJIT_UNLIKELY(sljit_get_compiler_error(compiler))) { sljit_free_compiler(compiler); - SLJIT_FREE(common->optimized_cbracket); - SLJIT_FREE(common->private_data_ptrs); - if (common->has_then) - SLJIT_FREE(common->then_offsets); + SLJIT_FREE(common->optimized_cbracket, compiler->allocator_data); + SLJIT_FREE(common->private_data_ptrs, compiler->allocator_data); + free_read_only_data(common->read_only_data_head, compiler->allocator_data); return; } flush_stubs(common); @@ -9385,21 +11417,23 @@ common->quit_label = quit_label; /* This is a (really) rare case. */ set_jumps(common->stackalloc, LABEL()); /* RETURN_ADDR is not a saved register. */ -sljit_emit_fast_enter(compiler, SLJIT_MEM1(SLJIT_LOCALS_REG), LOCALS0); -OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_LOCALS_REG), LOCALS1, TMP2, 0); -OP1(SLJIT_MOV, TMP1, 0, ARGUMENTS, 0); -OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, stack)); -OP1(SLJIT_MOV, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(struct sljit_stack, top), STACK_TOP, 0); -OP2(SLJIT_ADD, TMP2, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(struct sljit_stack, limit), SLJIT_IMM, STACK_GROWTH_RATE); +sljit_emit_fast_enter(compiler, SLJIT_MEM1(SLJIT_SP), LOCALS0); -sljit_emit_ijump(compiler, SLJIT_CALL2, SLJIT_IMM, SLJIT_FUNC_OFFSET(sljit_stack_resize)); -jump = CMP(SLJIT_C_NOT_EQUAL, SLJIT_RETURN_REG, 0, SLJIT_IMM, 0); -OP1(SLJIT_MOV, TMP1, 0, ARGUMENTS, 0); -OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, stack)); -OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(struct sljit_stack, top)); -OP1(SLJIT_MOV, STACK_LIMIT, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(struct sljit_stack, limit)); -OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), LOCALS1); -sljit_emit_fast_return(compiler, SLJIT_MEM1(SLJIT_LOCALS_REG), LOCALS0); +SLJIT_ASSERT(TMP1 == SLJIT_R0 && STACK_TOP == SLJIT_R1); + +OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS1, STACK_TOP, 0); +OP1(SLJIT_MOV, SLJIT_R0, 0, ARGUMENTS, 0); +OP2(SLJIT_SUB, SLJIT_R1, 0, STACK_LIMIT, 0, SLJIT_IMM, STACK_GROWTH_RATE); +OP1(SLJIT_MOV, SLJIT_R0, 0, SLJIT_MEM1(SLJIT_R0), SLJIT_OFFSETOF(jit_arguments, stack)); +OP1(SLJIT_MOV, STACK_LIMIT, 0, TMP2, 0); + +sljit_emit_icall(compiler, SLJIT_CALL, SLJIT_RET(SW) | SLJIT_ARG1(SW) | SLJIT_ARG2(SW), SLJIT_IMM, SLJIT_FUNC_OFFSET(sljit_stack_resize)); +jump = CMP(SLJIT_EQUAL, SLJIT_RETURN_REG, 0, SLJIT_IMM, 0); +OP1(SLJIT_MOV, TMP2, 0, STACK_LIMIT, 0); +OP1(SLJIT_MOV, STACK_LIMIT, 0, SLJIT_RETURN_REG, 0); +OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0); +OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), LOCALS1); +sljit_emit_fast_return(compiler, TMP1, 0); /* Allocation failed. */ JUMPHERE(jump); @@ -9451,19 +11485,22 @@ if (common->reset_match != NULL) { set_jumps(common->reset_match, LABEL()); do_reset_match(common, (re->top_bracket + 1) * 2); - CMPTO(SLJIT_C_GREATER, STR_PTR, 0, TMP1, 0, continue_match_label); + CMPTO(SLJIT_GREATER, STR_PTR, 0, TMP1, 0, continue_match_label); OP1(SLJIT_MOV, STR_PTR, 0, TMP1, 0); JUMPTO(SLJIT_JUMP, reset_match_label); } #ifdef SUPPORT_UTF -#ifndef COMPILE_PCRE32 +#ifdef COMPILE_PCRE8 if (common->utfreadchar != NULL) { set_jumps(common->utfreadchar, LABEL()); do_utfreadchar(common); } -#endif /* !COMPILE_PCRE32 */ -#ifdef COMPILE_PCRE8 +if (common->utfreadchar16 != NULL) + { + set_jumps(common->utfreadchar16, LABEL()); + do_utfreadchar16(common); + } if (common->utfreadtype8 != NULL) { set_jumps(common->utfreadtype8, LABEL()); @@ -9479,16 +11516,23 @@ if (common->getucd != NULL) } #endif -SLJIT_FREE(common->optimized_cbracket); -SLJIT_FREE(common->private_data_ptrs); -if (common->has_then) - SLJIT_FREE(common->then_offsets); +SLJIT_FREE(common->optimized_cbracket, compiler->allocator_data); +SLJIT_FREE(common->private_data_ptrs, compiler->allocator_data); executable_func = sljit_generate_code(compiler); executable_size = sljit_get_generated_code_size(compiler); +label_addr = common->label_addrs; +while (label_addr != NULL) + { + *label_addr->update_addr = sljit_get_label_addr(label_addr->label); + label_addr = label_addr->next; + } sljit_free_compiler(compiler); if (executable_func == NULL) + { + free_read_only_data(common->read_only_data_head, compiler->allocator_data); return; + } /* Reuse the function descriptor if possible. */ if ((extra->flags & PCRE_EXTRA_EXECUTABLE_JIT) != 0 && extra->executable_jit != NULL) @@ -9504,12 +11548,13 @@ else * bit remains set, as the bit indicates that the pointer to the data * is valid.) */ - functions = SLJIT_MALLOC(sizeof(executable_functions)); + functions = SLJIT_MALLOC(sizeof(executable_functions), compiler->allocator_data); if (functions == NULL) { /* This case is highly unlikely since we just recently - freed a lot of memory. Although not impossible. */ + freed a lot of memory. Not impossible though. */ sljit_free_code(executable_func); + free_read_only_data(common->read_only_data_head, compiler->allocator_data); return; } memset(functions, 0, sizeof(executable_functions)); @@ -9520,22 +11565,23 @@ else } functions->executable_funcs[mode] = executable_func; +functions->read_only_data_heads[mode] = common->read_only_data_head; functions->executable_sizes[mode] = executable_size; } -static int jit_machine_stack_exec(jit_arguments *arguments, void* executable_func) +static SLJIT_NOINLINE int jit_machine_stack_exec(jit_arguments *arguments, void *executable_func) { union { - void* executable_func; + void *executable_func; jit_function call_executable_func; } convert_executable_func; -pcre_uint8 local_space[MACHINE_STACK_SIZE]; +sljit_u8 local_space[MACHINE_STACK_SIZE]; struct sljit_stack local_stack; -local_stack.top = (sljit_sw)&local_space; -local_stack.base = local_stack.top; -local_stack.limit = local_stack.base + MACHINE_STACK_SIZE; -local_stack.max_limit = local_stack.limit; +local_stack.min_start = local_space; +local_stack.start = local_space; +local_stack.end = local_space + MACHINE_STACK_SIZE; +local_stack.top = local_space + MACHINE_STACK_SIZE; arguments->stack = &local_stack; convert_executable_func.executable_func = executable_func; return convert_executable_func.call_executable_func(arguments); @@ -9547,7 +11593,7 @@ PRIV(jit_exec)(const PUBL(extra) *extra_data, const pcre_uchar *subject, { executable_functions *functions = (executable_functions *)extra_data->executable_jit; union { - void* executable_func; + void *executable_func; jit_function call_executable_func; } convert_executable_func; jit_arguments arguments; @@ -9569,7 +11615,7 @@ arguments.begin = subject; arguments.end = subject + length; arguments.mark_ptr = NULL; /* JIT decreases this value less frequently than the interpreter. */ -arguments.limit_match = ((extra_data->flags & PCRE_EXTRA_MATCH_LIMIT) == 0) ? MATCH_LIMIT : (pcre_uint32)(extra_data->match_limit); +arguments.limit_match = ((extra_data->flags & PCRE_EXTRA_MATCH_LIMIT) == 0) ? MATCH_LIMIT : (sljit_u32)(extra_data->match_limit); if (functions->limit_match != 0 && functions->limit_match < arguments.limit_match) arguments.limit_match = functions->limit_match; arguments.notbol = (options & PCRE_NOTBOL) != 0; @@ -9634,7 +11680,7 @@ pcre32_jit_exec(const pcre32 *argument_re, const pcre32_extra *extra_data, pcre_uchar *subject_ptr = (pcre_uchar *)subject; executable_functions *functions = (executable_functions *)extra_data->executable_jit; union { - void* executable_func; + void *executable_func; jit_function call_executable_func; } convert_executable_func; jit_arguments arguments; @@ -9652,7 +11698,7 @@ if ((options & PCRE_PARTIAL_HARD) != 0) else if ((options & PCRE_PARTIAL_SOFT) != 0) mode = JIT_PARTIAL_SOFT_COMPILE; -if (functions->executable_funcs[mode] == NULL) +if (functions == NULL || functions->executable_funcs[mode] == NULL) return PCRE_ERROR_JIT_BADOPTION; /* Sanity checks should be handled by pcre_exec. */ @@ -9662,7 +11708,7 @@ arguments.begin = subject_ptr; arguments.end = subject_ptr + length; arguments.mark_ptr = NULL; /* JIT decreases this value less frequently than the interpreter. */ -arguments.limit_match = ((extra_data->flags & PCRE_EXTRA_MATCH_LIMIT) == 0) ? MATCH_LIMIT : (pcre_uint32)(extra_data->match_limit); +arguments.limit_match = ((extra_data->flags & PCRE_EXTRA_MATCH_LIMIT) == 0) ? MATCH_LIMIT : (sljit_u32)(extra_data->match_limit); if (functions->limit_match != 0 && functions->limit_match < arguments.limit_match) arguments.limit_match = functions->limit_match; arguments.notbol = (options & PCRE_NOTBOL) != 0; @@ -9706,8 +11752,9 @@ for (i = 0; i < JIT_NUMBER_OF_COMPILE_MODES; i++) { if (functions->executable_funcs[i] != NULL) sljit_free_code(functions->executable_funcs[i]); + free_read_only_data(functions->read_only_data_heads[i], NULL); } -SLJIT_FREE(functions); +SLJIT_FREE(functions, compiler->allocator_data); } int @@ -9744,7 +11791,7 @@ if (startsize > maxsize) startsize = maxsize; startsize = (startsize + STACK_GROWTH_RATE - 1) & ~(STACK_GROWTH_RATE - 1); maxsize = (maxsize + STACK_GROWTH_RATE - 1) & ~(STACK_GROWTH_RATE - 1); -return (PUBL(jit_stack)*)sljit_allocate_stack(startsize, maxsize); +return (PUBL(jit_stack)*)sljit_allocate_stack(startsize, maxsize, NULL); } #if defined COMPILE_PCRE8 @@ -9758,7 +11805,7 @@ PCRE_EXP_DECL void pcre32_jit_stack_free(pcre32_jit_stack *stack) #endif { -sljit_free_stack((struct sljit_stack *)stack); +sljit_free_stack((struct sljit_stack *)stack, NULL); } #if defined COMPILE_PCRE8 diff --git a/src/3rd/pcre/pcreprni.c b/src/3rd/pcre/pcreprni.c index 56653135cf6..7049e3b30ae 100644 --- a/src/3rd/pcre/pcreprni.c +++ b/src/3rd/pcre/pcreprni.c @@ -644,7 +644,9 @@ for(;;) int i; unsigned int min, max; BOOL printmap; + BOOL invertmap = FALSE; pcre_uint8 *map; + pcre_uint8 inverted_map[32]; fprintf(f, " ["); @@ -653,7 +655,12 @@ for(;;) extra = GET(code, 1); ccode = code + LINK_SIZE + 1; printmap = (*ccode & XCL_MAP) != 0; - if ((*ccode++ & XCL_NOT) != 0) fprintf(f, "^"); + if ((*ccode & XCL_NOT) != 0) + { + invertmap = (*ccode & XCL_HASPROP) == 0; + fprintf(f, "^"); + } + ccode++; } else { @@ -666,6 +673,12 @@ for(;;) if (printmap) { map = (pcre_uint8 *)ccode; + if (invertmap) + { + for (i = 0; i < 32; i++) inverted_map[i] = ~map[i]; + map = inverted_map; + } + for (i = 0; i < 256; i++) { if ((map[i/8] & (1 << (i&7))) != 0) diff --git a/src/3rd/pcre/pcrestud.c b/src/3rd/pcre/pcrestud.c index 81a88da1a39..3b30ed2cd19 100644 --- a/src/3rd/pcre/pcrestud.c +++ b/src/3rd/pcre/pcrestud.c @@ -70,7 +70,8 @@ rather than bytes. code pointer to start of group (the bracket) startcode pointer to start of the whole pattern's code options the compiling options - int RECURSE depth + recurses chain of recurse_check to catch mutual recursion + countptr pointer to call count (to catch over complexity) Returns: the minimum length -1 if \C in UTF-8 mode or (*ACCEPT) was encountered @@ -80,15 +81,19 @@ Returns: the minimum length static int find_minlength(const REAL_PCRE *re, const pcre_uchar *code, - const pcre_uchar *startcode, int options, int recurse_depth) + const pcre_uchar *startcode, int options, recurse_check *recurses, + int *countptr) { int length = -1; /* PCRE_UTF16 has the same value as PCRE_UTF8. */ BOOL utf = (options & PCRE_UTF8) != 0; BOOL had_recurse = FALSE; +recurse_check this_recurse; register int branchlength = 0; register pcre_uchar *cc = (pcre_uchar *)code + 1 + LINK_SIZE; +if ((*countptr)++ > 1000) return -1; /* too complex */ + if (*code == OP_CBRA || *code == OP_SCBRA || *code == OP_CBRAPOS || *code == OP_SCBRAPOS) cc += IMM2_SIZE; @@ -130,7 +135,7 @@ for (;;) case OP_SBRAPOS: case OP_ONCE: case OP_ONCE_NC: - d = find_minlength(re, cc, startcode, options, recurse_depth); + d = find_minlength(re, cc, startcode, options, recurses, countptr); if (d < 0) return d; branchlength += d; do cc += GET(cc, 1); while (*cc == OP_ALT); @@ -393,7 +398,7 @@ for (;;) ce = cs = (pcre_uchar *)PRIV(find_bracket)(startcode, utf, GET2(slot, 0)); if (cs == NULL) return -2; do ce += GET(ce, 1); while (*ce == OP_ALT); - if (cc > cs && cc < ce) + if (cc > cs && cc < ce) /* Simple recursion */ { d = 0; had_recurse = TRUE; @@ -401,8 +406,23 @@ for (;;) } else { - int dd = find_minlength(re, cs, startcode, options, recurse_depth); - if (dd < d) d = dd; + recurse_check *r = recurses; + for (r = recurses; r != NULL; r = r->prev) if (r->group == cs) break; + if (r != NULL) /* Mutual recursion */ + { + d = 0; + had_recurse = TRUE; + break; + } + else + { + int dd; + this_recurse.prev = recurses; + this_recurse.group = cs; + dd = find_minlength(re, cs, startcode, options, &this_recurse, + countptr); + if (dd < d) d = dd; + } } slot += re->name_entry_size; } @@ -418,14 +438,27 @@ for (;;) ce = cs = (pcre_uchar *)PRIV(find_bracket)(startcode, utf, GET2(cc, 1)); if (cs == NULL) return -2; do ce += GET(ce, 1); while (*ce == OP_ALT); - if (cc > cs && cc < ce) + if (cc > cs && cc < ce) /* Simple recursion */ { d = 0; had_recurse = TRUE; } else { - d = find_minlength(re, cs, startcode, options, recurse_depth); + recurse_check *r = recurses; + for (r = recurses; r != NULL; r = r->prev) if (r->group == cs) break; + if (r != NULL) /* Mutual recursion */ + { + d = 0; + had_recurse = TRUE; + } + else + { + this_recurse.prev = recurses; + this_recurse.group = cs; + d = find_minlength(re, cs, startcode, options, &this_recurse, + countptr); + } } } else d = 0; @@ -474,12 +507,21 @@ for (;;) case OP_RECURSE: cs = ce = (pcre_uchar *)startcode + GET(cc, 1); do ce += GET(ce, 1); while (*ce == OP_ALT); - if ((cc > cs && cc < ce) || recurse_depth > 10) + if (cc > cs && cc < ce) /* Simple recursion */ had_recurse = TRUE; else { - branchlength += find_minlength(re, cs, startcode, options, - recurse_depth + 1); + recurse_check *r = recurses; + for (r = recurses; r != NULL; r = r->prev) if (r->group == cs) break; + if (r != NULL) /* Mutual recursion */ + had_recurse = TRUE; + else + { + this_recurse.prev = recurses; + this_recurse.group = cs; + branchlength += find_minlength(re, cs, startcode, options, + &this_recurse, countptr); + } } cc += 1 + LINK_SIZE; break; @@ -863,7 +905,6 @@ do case OP_NOTUPTOI: case OP_NOT_HSPACE: case OP_NOT_VSPACE: - case OP_PROP: case OP_PRUNE: case OP_PRUNE_ARG: case OP_RECURSE: @@ -879,11 +920,33 @@ do case OP_SOM: case OP_THEN: case OP_THEN_ARG: -#if defined SUPPORT_UTF || !defined COMPILE_PCRE8 - case OP_XCLASS: -#endif return SSB_FAIL; + /* A "real" property test implies no starting bits, but the fake property + PT_CLIST identifies a list of characters. These lists are short, as they + are used for characters with more than one "other case", so there is no + point in recognizing them for OP_NOTPROP. */ + + case OP_PROP: + if (tcode[1] != PT_CLIST) return SSB_FAIL; + { + const pcre_uint32 *p = PRIV(ucd_caseless_sets) + tcode[2]; + while ((c = *p++) < NOTACHAR) + { +#if defined SUPPORT_UTF && defined COMPILE_PCRE8 + if (utf) + { + pcre_uchar buff[6]; + (void)PRIV(ord2utf)(c, buff); + c = buff[0]; + } +#endif + if (c > 0xff) SET_BIT(0xff); else SET_BIT(c); + } + } + try_next = FALSE; + break; + /* We can ignore word boundary tests. */ case OP_WORD_BOUNDARY: @@ -1109,24 +1172,17 @@ do try_next = FALSE; break; - /* The cbit_space table has vertical tab as whitespace; we have to - ensure it is set as not whitespace. Luckily, the code value is the same - (0x0b) in ASCII and EBCDIC, so we can just adjust the appropriate bit. */ + /* The cbit_space table has vertical tab as whitespace; we no longer + have to play fancy tricks because Perl added VT to its whitespace at + release 5.18. PCRE added it at release 8.34. */ case OP_NOT_WHITESPACE: set_nottype_bits(start_bits, cbit_space, table_limit, cd); - start_bits[1] |= 0x08; try_next = FALSE; break; - /* The cbit_space table has vertical tab as whitespace; we have to not - set it from the table. Luckily, the code value is the same (0x0b) in - ASCII and EBCDIC, so we can just adjust the appropriate bit. */ - case OP_WHITESPACE: - c = start_bits[1]; /* Save in case it was already set */ set_type_bits(start_bits, cbit_space, table_limit, cd); - start_bits[1] = (start_bits[1] & ~0x08) | c; try_next = FALSE; break; @@ -1257,6 +1313,16 @@ do with a value >= 0xc4 is a potentially valid starter because it starts a character with a value > 255. */ +#if defined SUPPORT_UTF || !defined COMPILE_PCRE8 + case OP_XCLASS: + if ((tcode[1 + LINK_SIZE] & XCL_HASPROP) != 0) + return SSB_FAIL; + /* All bits are set. */ + if ((tcode[1 + LINK_SIZE] & XCL_MAP) == 0 && (tcode[1 + LINK_SIZE] & XCL_NOT) != 0) + return SSB_FAIL; +#endif + /* Fall through */ + case OP_NCLASS: #if defined SUPPORT_UTF && defined COMPILE_PCRE8 if (utf) @@ -1273,8 +1339,21 @@ do case OP_CLASS: { pcre_uint8 *map; - tcode++; - map = (pcre_uint8 *)tcode; +#if defined SUPPORT_UTF || !defined COMPILE_PCRE8 + map = NULL; + if (*tcode == OP_XCLASS) + { + if ((tcode[1 + LINK_SIZE] & XCL_MAP) != 0) + map = (pcre_uint8 *)(tcode + 1 + LINK_SIZE + 1); + tcode += GET(tcode, 1); + } + else +#endif + { + tcode++; + map = (pcre_uint8 *)tcode; + tcode += 32 / sizeof(pcre_uchar); + } /* In UTF-8 mode, the bits in a bit map correspond to character values, not to byte values. However, the bit map we are constructing is @@ -1282,31 +1361,35 @@ do value is > 127. In fact, there are only two possible starting bytes for characters in the range 128 - 255. */ -#if defined SUPPORT_UTF && defined COMPILE_PCRE8 - if (utf) +#if defined SUPPORT_UTF || !defined COMPILE_PCRE8 + if (map != NULL) +#endif { - for (c = 0; c < 16; c++) start_bits[c] |= map[c]; - for (c = 128; c < 256; c++) +#if defined SUPPORT_UTF && defined COMPILE_PCRE8 + if (utf) { - if ((map[c/8] && (1 << (c&7))) != 0) + for (c = 0; c < 16; c++) start_bits[c] |= map[c]; + for (c = 128; c < 256; c++) { - int d = (c >> 6) | 0xc0; /* Set bit for this starter */ - start_bits[d/8] |= (1 << (d&7)); /* and then skip on to the */ - c = (c & 0xc0) + 0x40 - 1; /* next relevant character. */ + if ((map[c/8] & (1 << (c&7))) != 0) + { + int d = (c >> 6) | 0xc0; /* Set bit for this starter */ + start_bits[d/8] |= (1 << (d&7)); /* and then skip on to the */ + c = (c & 0xc0) + 0x40 - 1; /* next relevant character. */ + } } } - } - else + else #endif - { - /* In non-UTF-8 mode, the two bit maps are completely compatible. */ - for (c = 0; c < 32; c++) start_bits[c] |= map[c]; + { + /* In non-UTF-8 mode, the two bit maps are completely compatible. */ + for (c = 0; c < 32; c++) start_bits[c] |= map[c]; + } } /* Advance past the bit map, and act on what follows. For a zero minimum repeat, continue; otherwise stop processing. */ - tcode += 32 / sizeof(pcre_uchar); switch (*tcode) { case OP_CRSTAR: @@ -1376,6 +1459,7 @@ pcre32_study(const pcre32 *external_re, int options, const char **errorptr) #endif { int min; +int count = 0; BOOL bits_set = FALSE; pcre_uint8 start_bits[32]; PUBL(extra) *extra = NULL; @@ -1462,7 +1546,7 @@ if ((re->options & PCRE_ANCHORED) == 0 && /* Find the minimum length of subject string. */ -switch(min = find_minlength(re, code, code, re->options, 0)) +switch(min = find_minlength(re, code, code, re->options, NULL, &count)) { case -2: *errorptr = "internal error: missing capturing bracket"; return NULL; case -3: *errorptr = "internal error: opcode not recognized"; return NULL; diff --git a/src/3rd/pcre/pcretabs.c b/src/3rd/pcre/pcretabs.c index cdb97126155..4c5e5421efe 100644 --- a/src/3rd/pcre/pcretabs.c +++ b/src/3rd/pcre/pcretabs.c @@ -6,7 +6,7 @@ and semantics are as close as possible to those of the Perl 5 language. Written by Philip Hazel - Copyright (c) 1997-2012 University of Cambridge + Copyright (c) 1997-2017 University of Cambridge ----------------------------------------------------------------------------- Redistribution and use in source and binary forms, with or without @@ -161,7 +161,7 @@ const pcre_uint32 PRIV(ucp_gbtable[]) = { (1<chartype] == ucp_P || - (c < 256 && PRIV(ucp_gentype)[prop->chartype] == ucp_S)) == isprop) + (c < 128 && PRIV(ucp_gentype)[prop->chartype] == ucp_S)) == isprop) return !negated; break; diff --git a/src/3rd/pcre/sjarm32.c b/src/3rd/pcre/sjarm32.c new file mode 100644 index 00000000000..8da0d09351f --- /dev/null +++ b/src/3rd/pcre/sjarm32.c @@ -0,0 +1,2734 @@ +/* + * Stack-less Just-In-Time compiler + * + * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are + * permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of + * conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list + * of conditions and the following disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifdef __SOFTFP__ +#define ARM_ABI_INFO " ABI:softfp" +#else +#define ARM_ABI_INFO " ABI:hardfp" +#endif + +SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void) +{ +#if (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) + return "ARMv7" SLJIT_CPUINFO ARM_ABI_INFO; +#elif (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + return "ARMv5" SLJIT_CPUINFO ARM_ABI_INFO; +#else +#error "Internal error: Unknown ARM architecture" +#endif +} + +/* Last register + 1. */ +#define TMP_REG1 (SLJIT_NUMBER_OF_REGISTERS + 2) +#define TMP_REG2 (SLJIT_NUMBER_OF_REGISTERS + 3) +#define TMP_PC (SLJIT_NUMBER_OF_REGISTERS + 4) + +#define TMP_FREG1 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1) +#define TMP_FREG2 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 2) + +/* In ARM instruction words. + Cache lines are usually 32 byte aligned. */ +#define CONST_POOL_ALIGNMENT 8 +#define CONST_POOL_EMPTY 0xffffffff + +#define ALIGN_INSTRUCTION(ptr) \ + (sljit_uw*)(((sljit_uw)(ptr) + (CONST_POOL_ALIGNMENT * sizeof(sljit_uw)) - 1) & ~((CONST_POOL_ALIGNMENT * sizeof(sljit_uw)) - 1)) +#define MAX_DIFFERENCE(max_diff) \ + (((max_diff) / (sljit_s32)sizeof(sljit_uw)) - (CONST_POOL_ALIGNMENT - 1)) + +/* See sljit_emit_enter and sljit_emit_op0 if you want to change them. */ +static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 5] = { + 0, 0, 1, 2, 3, 11, 10, 9, 8, 7, 6, 5, 4, 13, 12, 14, 15 +}; + +static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = { + 0, 0, 1, 2, 3, 4, 5, 6, 7 +}; + +#define RM(rm) (reg_map[rm]) +#define RD(rd) (reg_map[rd] << 12) +#define RN(rn) (reg_map[rn] << 16) + +/* --------------------------------------------------------------------- */ +/* Instrucion forms */ +/* --------------------------------------------------------------------- */ + +/* The instruction includes the AL condition. + INST_NAME - CONDITIONAL remove this flag. */ +#define COND_MASK 0xf0000000 +#define CONDITIONAL 0xe0000000 +#define PUSH_POOL 0xff000000 + +#define ADC 0xe0a00000 +#define ADD 0xe0800000 +#define AND 0xe0000000 +#define B 0xea000000 +#define BIC 0xe1c00000 +#define BL 0xeb000000 +#define BLX 0xe12fff30 +#define BX 0xe12fff10 +#define CLZ 0xe16f0f10 +#define CMN 0xe1600000 +#define CMP 0xe1400000 +#define BKPT 0xe1200070 +#define EOR 0xe0200000 +#define MOV 0xe1a00000 +#define MUL 0xe0000090 +#define MVN 0xe1e00000 +#define NOP 0xe1a00000 +#define ORR 0xe1800000 +#define PUSH 0xe92d0000 +#define POP 0xe8bd0000 +#define RSB 0xe0600000 +#define RSC 0xe0e00000 +#define SBC 0xe0c00000 +#define SMULL 0xe0c00090 +#define SUB 0xe0400000 +#define UMULL 0xe0800090 +#define VABS_F32 0xeeb00ac0 +#define VADD_F32 0xee300a00 +#define VCMP_F32 0xeeb40a40 +#define VCVT_F32_S32 0xeeb80ac0 +#define VCVT_F64_F32 0xeeb70ac0 +#define VCVT_S32_F32 0xeebd0ac0 +#define VDIV_F32 0xee800a00 +#define VMOV_F32 0xeeb00a40 +#define VMOV 0xee000a10 +#define VMOV2 0xec400a10 +#define VMRS 0xeef1fa10 +#define VMUL_F32 0xee200a00 +#define VNEG_F32 0xeeb10a40 +#define VSTR_F32 0xed000a00 +#define VSUB_F32 0xee300a40 + +#if (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) +/* Arm v7 specific instructions. */ +#define MOVW 0xe3000000 +#define MOVT 0xe3400000 +#define SXTB 0xe6af0070 +#define SXTH 0xe6bf0070 +#define UXTB 0xe6ef0070 +#define UXTH 0xe6ff0070 +#endif + +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + +static sljit_s32 push_cpool(struct sljit_compiler *compiler) +{ + /* Pushing the constant pool into the instruction stream. */ + sljit_uw* inst; + sljit_uw* cpool_ptr; + sljit_uw* cpool_end; + sljit_s32 i; + + /* The label could point the address after the constant pool. */ + if (compiler->last_label && compiler->last_label->size == compiler->size) + compiler->last_label->size += compiler->cpool_fill + (CONST_POOL_ALIGNMENT - 1) + 1; + + SLJIT_ASSERT(compiler->cpool_fill > 0 && compiler->cpool_fill <= CPOOL_SIZE); + inst = (sljit_uw*)ensure_buf(compiler, sizeof(sljit_uw)); + FAIL_IF(!inst); + compiler->size++; + *inst = 0xff000000 | compiler->cpool_fill; + + for (i = 0; i < CONST_POOL_ALIGNMENT - 1; i++) { + inst = (sljit_uw*)ensure_buf(compiler, sizeof(sljit_uw)); + FAIL_IF(!inst); + compiler->size++; + *inst = 0; + } + + cpool_ptr = compiler->cpool; + cpool_end = cpool_ptr + compiler->cpool_fill; + while (cpool_ptr < cpool_end) { + inst = (sljit_uw*)ensure_buf(compiler, sizeof(sljit_uw)); + FAIL_IF(!inst); + compiler->size++; + *inst = *cpool_ptr++; + } + compiler->cpool_diff = CONST_POOL_EMPTY; + compiler->cpool_fill = 0; + return SLJIT_SUCCESS; +} + +static sljit_s32 push_inst(struct sljit_compiler *compiler, sljit_uw inst) +{ + sljit_uw* ptr; + + if (SLJIT_UNLIKELY(compiler->cpool_diff != CONST_POOL_EMPTY && compiler->size - compiler->cpool_diff >= MAX_DIFFERENCE(4092))) + FAIL_IF(push_cpool(compiler)); + + ptr = (sljit_uw*)ensure_buf(compiler, sizeof(sljit_uw)); + FAIL_IF(!ptr); + compiler->size++; + *ptr = inst; + return SLJIT_SUCCESS; +} + +static sljit_s32 push_inst_with_literal(struct sljit_compiler *compiler, sljit_uw inst, sljit_uw literal) +{ + sljit_uw* ptr; + sljit_uw cpool_index = CPOOL_SIZE; + sljit_uw* cpool_ptr; + sljit_uw* cpool_end; + sljit_u8* cpool_unique_ptr; + + if (SLJIT_UNLIKELY(compiler->cpool_diff != CONST_POOL_EMPTY && compiler->size - compiler->cpool_diff >= MAX_DIFFERENCE(4092))) + FAIL_IF(push_cpool(compiler)); + else if (compiler->cpool_fill > 0) { + cpool_ptr = compiler->cpool; + cpool_end = cpool_ptr + compiler->cpool_fill; + cpool_unique_ptr = compiler->cpool_unique; + do { + if ((*cpool_ptr == literal) && !(*cpool_unique_ptr)) { + cpool_index = cpool_ptr - compiler->cpool; + break; + } + cpool_ptr++; + cpool_unique_ptr++; + } while (cpool_ptr < cpool_end); + } + + if (cpool_index == CPOOL_SIZE) { + /* Must allocate a new entry in the literal pool. */ + if (compiler->cpool_fill < CPOOL_SIZE) { + cpool_index = compiler->cpool_fill; + compiler->cpool_fill++; + } + else { + FAIL_IF(push_cpool(compiler)); + cpool_index = 0; + compiler->cpool_fill = 1; + } + } + + SLJIT_ASSERT((inst & 0xfff) == 0); + ptr = (sljit_uw*)ensure_buf(compiler, sizeof(sljit_uw)); + FAIL_IF(!ptr); + compiler->size++; + *ptr = inst | cpool_index; + + compiler->cpool[cpool_index] = literal; + compiler->cpool_unique[cpool_index] = 0; + if (compiler->cpool_diff == CONST_POOL_EMPTY) + compiler->cpool_diff = compiler->size; + return SLJIT_SUCCESS; +} + +static sljit_s32 push_inst_with_unique_literal(struct sljit_compiler *compiler, sljit_uw inst, sljit_uw literal) +{ + sljit_uw* ptr; + if (SLJIT_UNLIKELY((compiler->cpool_diff != CONST_POOL_EMPTY && compiler->size - compiler->cpool_diff >= MAX_DIFFERENCE(4092)) || compiler->cpool_fill >= CPOOL_SIZE)) + FAIL_IF(push_cpool(compiler)); + + SLJIT_ASSERT(compiler->cpool_fill < CPOOL_SIZE && (inst & 0xfff) == 0); + ptr = (sljit_uw*)ensure_buf(compiler, sizeof(sljit_uw)); + FAIL_IF(!ptr); + compiler->size++; + *ptr = inst | compiler->cpool_fill; + + compiler->cpool[compiler->cpool_fill] = literal; + compiler->cpool_unique[compiler->cpool_fill] = 1; + compiler->cpool_fill++; + if (compiler->cpool_diff == CONST_POOL_EMPTY) + compiler->cpool_diff = compiler->size; + return SLJIT_SUCCESS; +} + +static SLJIT_INLINE sljit_s32 prepare_blx(struct sljit_compiler *compiler) +{ + /* Place for at least two instruction (doesn't matter whether the first has a literal). */ + if (SLJIT_UNLIKELY(compiler->cpool_diff != CONST_POOL_EMPTY && compiler->size - compiler->cpool_diff >= MAX_DIFFERENCE(4088))) + return push_cpool(compiler); + return SLJIT_SUCCESS; +} + +static SLJIT_INLINE sljit_s32 emit_blx(struct sljit_compiler *compiler) +{ + /* Must follow tightly the previous instruction (to be able to convert it to bl instruction). */ + SLJIT_ASSERT(compiler->cpool_diff == CONST_POOL_EMPTY || compiler->size - compiler->cpool_diff < MAX_DIFFERENCE(4092)); + SLJIT_ASSERT(reg_map[TMP_REG1] != 14); + + return push_inst(compiler, BLX | RM(TMP_REG1)); +} + +static sljit_uw patch_pc_relative_loads(sljit_uw *last_pc_patch, sljit_uw *code_ptr, sljit_uw* const_pool, sljit_uw cpool_size) +{ + sljit_uw diff; + sljit_uw ind; + sljit_uw counter = 0; + sljit_uw* clear_const_pool = const_pool; + sljit_uw* clear_const_pool_end = const_pool + cpool_size; + + SLJIT_ASSERT(const_pool - code_ptr <= CONST_POOL_ALIGNMENT); + /* Set unused flag for all literals in the constant pool. + I.e.: unused literals can belong to branches, which can be encoded as B or BL. + We can "compress" the constant pool by discarding these literals. */ + while (clear_const_pool < clear_const_pool_end) + *clear_const_pool++ = (sljit_uw)(-1); + + while (last_pc_patch < code_ptr) { + /* Data transfer instruction with Rn == r15. */ + if ((*last_pc_patch & 0x0c0f0000) == 0x040f0000) { + diff = const_pool - last_pc_patch; + ind = (*last_pc_patch) & 0xfff; + + /* Must be a load instruction with immediate offset. */ + SLJIT_ASSERT(ind < cpool_size && !(*last_pc_patch & (1 << 25)) && (*last_pc_patch & (1 << 20))); + if ((sljit_s32)const_pool[ind] < 0) { + const_pool[ind] = counter; + ind = counter; + counter++; + } + else + ind = const_pool[ind]; + + SLJIT_ASSERT(diff >= 1); + if (diff >= 2 || ind > 0) { + diff = (diff + ind - 2) << 2; + SLJIT_ASSERT(diff <= 0xfff); + *last_pc_patch = (*last_pc_patch & ~0xfff) | diff; + } + else + *last_pc_patch = (*last_pc_patch & ~(0xfff | (1 << 23))) | 0x004; + } + last_pc_patch++; + } + return counter; +} + +/* In some rare ocasions we may need future patches. The probability is close to 0 in practice. */ +struct future_patch { + struct future_patch* next; + sljit_s32 index; + sljit_s32 value; +}; + +static sljit_s32 resolve_const_pool_index(struct sljit_compiler *compiler, struct future_patch **first_patch, sljit_uw cpool_current_index, sljit_uw *cpool_start_address, sljit_uw *buf_ptr) +{ + sljit_s32 value; + struct future_patch *curr_patch, *prev_patch; + + SLJIT_UNUSED_ARG(compiler); + + /* Using the values generated by patch_pc_relative_loads. */ + if (!*first_patch) + value = (sljit_s32)cpool_start_address[cpool_current_index]; + else { + curr_patch = *first_patch; + prev_patch = NULL; + while (1) { + if (!curr_patch) { + value = (sljit_s32)cpool_start_address[cpool_current_index]; + break; + } + if ((sljit_uw)curr_patch->index == cpool_current_index) { + value = curr_patch->value; + if (prev_patch) + prev_patch->next = curr_patch->next; + else + *first_patch = curr_patch->next; + SLJIT_FREE(curr_patch, compiler->allocator_data); + break; + } + prev_patch = curr_patch; + curr_patch = curr_patch->next; + } + } + + if (value >= 0) { + if ((sljit_uw)value > cpool_current_index) { + curr_patch = (struct future_patch*)SLJIT_MALLOC(sizeof(struct future_patch), compiler->allocator_data); + if (!curr_patch) { + while (*first_patch) { + curr_patch = *first_patch; + *first_patch = (*first_patch)->next; + SLJIT_FREE(curr_patch, compiler->allocator_data); + } + return SLJIT_ERR_ALLOC_FAILED; + } + curr_patch->next = *first_patch; + curr_patch->index = value; + curr_patch->value = cpool_start_address[value]; + *first_patch = curr_patch; + } + cpool_start_address[value] = *buf_ptr; + } + return SLJIT_SUCCESS; +} + +#else + +static sljit_s32 push_inst(struct sljit_compiler *compiler, sljit_uw inst) +{ + sljit_uw* ptr; + + ptr = (sljit_uw*)ensure_buf(compiler, sizeof(sljit_uw)); + FAIL_IF(!ptr); + compiler->size++; + *ptr = inst; + return SLJIT_SUCCESS; +} + +static SLJIT_INLINE sljit_s32 emit_imm(struct sljit_compiler *compiler, sljit_s32 reg, sljit_sw imm) +{ + FAIL_IF(push_inst(compiler, MOVW | RD(reg) | ((imm << 4) & 0xf0000) | (imm & 0xfff))); + return push_inst(compiler, MOVT | RD(reg) | ((imm >> 12) & 0xf0000) | ((imm >> 16) & 0xfff)); +} + +#endif + +static SLJIT_INLINE sljit_s32 detect_jump_type(struct sljit_jump *jump, sljit_uw *code_ptr, sljit_uw *code, sljit_sw executable_offset) +{ + sljit_sw diff; + + if (jump->flags & SLJIT_REWRITABLE_JUMP) + return 0; + +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + if (jump->flags & IS_BL) + code_ptr--; + + if (jump->flags & JUMP_ADDR) + diff = ((sljit_sw)jump->u.target - (sljit_sw)(code_ptr + 2) - executable_offset); + else { + SLJIT_ASSERT(jump->flags & JUMP_LABEL); + diff = ((sljit_sw)(code + jump->u.label->size) - (sljit_sw)(code_ptr + 2)); + } + + /* Branch to Thumb code has not been optimized yet. */ + if (diff & 0x3) + return 0; + + if (jump->flags & IS_BL) { + if (diff <= 0x01ffffff && diff >= -0x02000000) { + *code_ptr = (BL - CONDITIONAL) | (*(code_ptr + 1) & COND_MASK); + jump->flags |= PATCH_B; + return 1; + } + } + else { + if (diff <= 0x01ffffff && diff >= -0x02000000) { + *code_ptr = (B - CONDITIONAL) | (*code_ptr & COND_MASK); + jump->flags |= PATCH_B; + } + } +#else + if (jump->flags & JUMP_ADDR) + diff = ((sljit_sw)jump->u.target - (sljit_sw)code_ptr - executable_offset); + else { + SLJIT_ASSERT(jump->flags & JUMP_LABEL); + diff = ((sljit_sw)(code + jump->u.label->size) - (sljit_sw)code_ptr); + } + + /* Branch to Thumb code has not been optimized yet. */ + if (diff & 0x3) + return 0; + + if (diff <= 0x01ffffff && diff >= -0x02000000) { + code_ptr -= 2; + *code_ptr = ((jump->flags & IS_BL) ? (BL - CONDITIONAL) : (B - CONDITIONAL)) | (code_ptr[2] & COND_MASK); + jump->flags |= PATCH_B; + return 1; + } +#endif + return 0; +} + +static SLJIT_INLINE void inline_set_jump_addr(sljit_uw jump_ptr, sljit_sw executable_offset, sljit_uw new_addr, sljit_s32 flush_cache) +{ +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + sljit_uw *ptr = (sljit_uw *)jump_ptr; + sljit_uw *inst = (sljit_uw *)ptr[0]; + sljit_uw mov_pc = ptr[1]; + sljit_s32 bl = (mov_pc & 0x0000f000) != RD(TMP_PC); + sljit_sw diff = (sljit_sw)(((sljit_sw)new_addr - (sljit_sw)(inst + 2) - executable_offset) >> 2); + + if (diff <= 0x7fffff && diff >= -0x800000) { + /* Turn to branch. */ + if (!bl) { + inst[0] = (mov_pc & COND_MASK) | (B - CONDITIONAL) | (diff & 0xffffff); + if (flush_cache) { + inst = (sljit_uw *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset); + SLJIT_CACHE_FLUSH(inst, inst + 1); + } + } else { + inst[0] = (mov_pc & COND_MASK) | (BL - CONDITIONAL) | (diff & 0xffffff); + inst[1] = NOP; + if (flush_cache) { + inst = (sljit_uw *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset); + SLJIT_CACHE_FLUSH(inst, inst + 2); + } + } + } else { + /* Get the position of the constant. */ + if (mov_pc & (1 << 23)) + ptr = inst + ((mov_pc & 0xfff) >> 2) + 2; + else + ptr = inst + 1; + + if (*inst != mov_pc) { + inst[0] = mov_pc; + if (!bl) { + if (flush_cache) { + inst = (sljit_uw *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset); + SLJIT_CACHE_FLUSH(inst, inst + 1); + } + } else { + inst[1] = BLX | RM(TMP_REG1); + if (flush_cache) { + inst = (sljit_uw *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset); + SLJIT_CACHE_FLUSH(inst, inst + 2); + } + } + } + *ptr = new_addr; + } +#else + sljit_uw *inst = (sljit_uw*)jump_ptr; + SLJIT_ASSERT((inst[0] & 0xfff00000) == MOVW && (inst[1] & 0xfff00000) == MOVT); + inst[0] = MOVW | (inst[0] & 0xf000) | ((new_addr << 4) & 0xf0000) | (new_addr & 0xfff); + inst[1] = MOVT | (inst[1] & 0xf000) | ((new_addr >> 12) & 0xf0000) | ((new_addr >> 16) & 0xfff); + if (flush_cache) { + inst = (sljit_uw *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset); + SLJIT_CACHE_FLUSH(inst, inst + 2); + } +#endif +} + +static sljit_uw get_imm(sljit_uw imm); + +static SLJIT_INLINE void inline_set_const(sljit_uw addr, sljit_sw executable_offset, sljit_sw new_constant, sljit_s32 flush_cache) +{ +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + sljit_uw *ptr = (sljit_uw*)addr; + sljit_uw *inst = (sljit_uw*)ptr[0]; + sljit_uw ldr_literal = ptr[1]; + sljit_uw src2; + + src2 = get_imm(new_constant); + if (src2) { + *inst = 0xe3a00000 | (ldr_literal & 0xf000) | src2; + if (flush_cache) { + inst = (sljit_uw *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset); + SLJIT_CACHE_FLUSH(inst, inst + 1); + } + return; + } + + src2 = get_imm(~new_constant); + if (src2) { + *inst = 0xe3e00000 | (ldr_literal & 0xf000) | src2; + if (flush_cache) { + inst = (sljit_uw *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset); + SLJIT_CACHE_FLUSH(inst, inst + 1); + } + return; + } + + if (ldr_literal & (1 << 23)) + ptr = inst + ((ldr_literal & 0xfff) >> 2) + 2; + else + ptr = inst + 1; + + if (*inst != ldr_literal) { + *inst = ldr_literal; + if (flush_cache) { + inst = (sljit_uw *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset); + SLJIT_CACHE_FLUSH(inst, inst + 1); + } + } + *ptr = new_constant; +#else + sljit_uw *inst = (sljit_uw*)addr; + SLJIT_ASSERT((inst[0] & 0xfff00000) == MOVW && (inst[1] & 0xfff00000) == MOVT); + inst[0] = MOVW | (inst[0] & 0xf000) | ((new_constant << 4) & 0xf0000) | (new_constant & 0xfff); + inst[1] = MOVT | (inst[1] & 0xf000) | ((new_constant >> 12) & 0xf0000) | ((new_constant >> 16) & 0xfff); + if (flush_cache) { + inst = (sljit_uw *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset); + SLJIT_CACHE_FLUSH(inst, inst + 2); + } +#endif +} + +SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compiler) +{ + struct sljit_memory_fragment *buf; + sljit_uw *code; + sljit_uw *code_ptr; + sljit_uw *buf_ptr; + sljit_uw *buf_end; + sljit_uw size; + sljit_uw word_count; + sljit_uw next_addr; + sljit_sw executable_offset; + sljit_sw addr; +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + sljit_uw cpool_size; + sljit_uw cpool_skip_alignment; + sljit_uw cpool_current_index; + sljit_uw *cpool_start_address; + sljit_uw *last_pc_patch; + struct future_patch *first_patch; +#endif + + struct sljit_label *label; + struct sljit_jump *jump; + struct sljit_const *const_; + struct sljit_put_label *put_label; + + CHECK_ERROR_PTR(); + CHECK_PTR(check_sljit_generate_code(compiler)); + reverse_buf(compiler); + + /* Second code generation pass. */ +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + size = compiler->size + (compiler->patches << 1); + if (compiler->cpool_fill > 0) + size += compiler->cpool_fill + CONST_POOL_ALIGNMENT - 1; +#else + size = compiler->size; +#endif + code = (sljit_uw*)SLJIT_MALLOC_EXEC(size * sizeof(sljit_uw)); + PTR_FAIL_WITH_EXEC_IF(code); + buf = compiler->buf; + +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + cpool_size = 0; + cpool_skip_alignment = 0; + cpool_current_index = 0; + cpool_start_address = NULL; + first_patch = NULL; + last_pc_patch = code; +#endif + + code_ptr = code; + word_count = 0; + next_addr = 1; + executable_offset = SLJIT_EXEC_OFFSET(code); + + label = compiler->labels; + jump = compiler->jumps; + const_ = compiler->consts; + put_label = compiler->put_labels; + + if (label && label->size == 0) { + label->addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code, executable_offset); + label = label->next; + } + + do { + buf_ptr = (sljit_uw*)buf->memory; + buf_end = buf_ptr + (buf->used_size >> 2); + do { + word_count++; +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + if (cpool_size > 0) { + if (cpool_skip_alignment > 0) { + buf_ptr++; + cpool_skip_alignment--; + } + else { + if (SLJIT_UNLIKELY(resolve_const_pool_index(compiler, &first_patch, cpool_current_index, cpool_start_address, buf_ptr))) { + SLJIT_FREE_EXEC(code); + compiler->error = SLJIT_ERR_ALLOC_FAILED; + return NULL; + } + buf_ptr++; + if (++cpool_current_index >= cpool_size) { + SLJIT_ASSERT(!first_patch); + cpool_size = 0; + if (label && label->size == word_count) { + /* Points after the current instruction. */ + label->addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset); + label->size = code_ptr - code; + label = label->next; + + next_addr = compute_next_addr(label, jump, const_, put_label); + } + } + } + } + else if ((*buf_ptr & 0xff000000) != PUSH_POOL) { +#endif + *code_ptr = *buf_ptr++; + if (next_addr == word_count) { + SLJIT_ASSERT(!label || label->size >= word_count); + SLJIT_ASSERT(!jump || jump->addr >= word_count); + SLJIT_ASSERT(!const_ || const_->addr >= word_count); + SLJIT_ASSERT(!put_label || put_label->addr >= word_count); + + /* These structures are ordered by their address. */ + if (jump && jump->addr == word_count) { +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + if (detect_jump_type(jump, code_ptr, code, executable_offset)) + code_ptr--; + jump->addr = (sljit_uw)code_ptr; +#else + jump->addr = (sljit_uw)(code_ptr - 2); + if (detect_jump_type(jump, code_ptr, code, executable_offset)) + code_ptr -= 2; +#endif + jump = jump->next; + } + if (label && label->size == word_count) { + /* code_ptr can be affected above. */ + label->addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr + 1, executable_offset); + label->size = (code_ptr + 1) - code; + label = label->next; + } + if (const_ && const_->addr == word_count) { +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + const_->addr = (sljit_uw)code_ptr; +#else + const_->addr = (sljit_uw)(code_ptr - 1); +#endif + const_ = const_->next; + } + if (put_label && put_label->addr == word_count) { + SLJIT_ASSERT(put_label->label); + put_label->addr = (sljit_uw)code_ptr; + put_label = put_label->next; + } + next_addr = compute_next_addr(label, jump, const_, put_label); + } + code_ptr++; +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + } + else { + /* Fortunately, no need to shift. */ + cpool_size = *buf_ptr++ & ~PUSH_POOL; + SLJIT_ASSERT(cpool_size > 0); + cpool_start_address = ALIGN_INSTRUCTION(code_ptr + 1); + cpool_current_index = patch_pc_relative_loads(last_pc_patch, code_ptr, cpool_start_address, cpool_size); + if (cpool_current_index > 0) { + /* Unconditional branch. */ + *code_ptr = B | (((cpool_start_address - code_ptr) + cpool_current_index - 2) & ~PUSH_POOL); + code_ptr = cpool_start_address + cpool_current_index; + } + cpool_skip_alignment = CONST_POOL_ALIGNMENT - 1; + cpool_current_index = 0; + last_pc_patch = code_ptr; + } +#endif + } while (buf_ptr < buf_end); + buf = buf->next; + } while (buf); + + SLJIT_ASSERT(!label); + SLJIT_ASSERT(!jump); + SLJIT_ASSERT(!const_); + SLJIT_ASSERT(!put_label); + +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + SLJIT_ASSERT(cpool_size == 0); + if (compiler->cpool_fill > 0) { + cpool_start_address = ALIGN_INSTRUCTION(code_ptr); + cpool_current_index = patch_pc_relative_loads(last_pc_patch, code_ptr, cpool_start_address, compiler->cpool_fill); + if (cpool_current_index > 0) + code_ptr = cpool_start_address + cpool_current_index; + + buf_ptr = compiler->cpool; + buf_end = buf_ptr + compiler->cpool_fill; + cpool_current_index = 0; + while (buf_ptr < buf_end) { + if (SLJIT_UNLIKELY(resolve_const_pool_index(compiler, &first_patch, cpool_current_index, cpool_start_address, buf_ptr))) { + SLJIT_FREE_EXEC(code); + compiler->error = SLJIT_ERR_ALLOC_FAILED; + return NULL; + } + buf_ptr++; + cpool_current_index++; + } + SLJIT_ASSERT(!first_patch); + } +#endif + + jump = compiler->jumps; + while (jump) { + buf_ptr = (sljit_uw *)jump->addr; + + if (jump->flags & PATCH_B) { + addr = (sljit_sw)SLJIT_ADD_EXEC_OFFSET(buf_ptr + 2, executable_offset); + if (!(jump->flags & JUMP_ADDR)) { + SLJIT_ASSERT(jump->flags & JUMP_LABEL); + SLJIT_ASSERT(((sljit_sw)jump->u.label->addr - addr) <= 0x01ffffff && ((sljit_sw)jump->u.label->addr - addr) >= -0x02000000); + *buf_ptr |= (((sljit_sw)jump->u.label->addr - addr) >> 2) & 0x00ffffff; + } + else { + SLJIT_ASSERT(((sljit_sw)jump->u.target - addr) <= 0x01ffffff && ((sljit_sw)jump->u.target - addr) >= -0x02000000); + *buf_ptr |= (((sljit_sw)jump->u.target - addr) >> 2) & 0x00ffffff; + } + } + else if (jump->flags & SLJIT_REWRITABLE_JUMP) { +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + jump->addr = (sljit_uw)code_ptr; + code_ptr[0] = (sljit_uw)buf_ptr; + code_ptr[1] = *buf_ptr; + inline_set_jump_addr((sljit_uw)code_ptr, executable_offset, (jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target, 0); + code_ptr += 2; +#else + inline_set_jump_addr((sljit_uw)buf_ptr, executable_offset, (jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target, 0); +#endif + } + else { +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + if (jump->flags & IS_BL) + buf_ptr--; + if (*buf_ptr & (1 << 23)) + buf_ptr += ((*buf_ptr & 0xfff) >> 2) + 2; + else + buf_ptr += 1; + *buf_ptr = (jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target; +#else + inline_set_jump_addr((sljit_uw)buf_ptr, executable_offset, (jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target, 0); +#endif + } + jump = jump->next; + } + +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + const_ = compiler->consts; + while (const_) { + buf_ptr = (sljit_uw*)const_->addr; + const_->addr = (sljit_uw)code_ptr; + + code_ptr[0] = (sljit_uw)buf_ptr; + code_ptr[1] = *buf_ptr; + if (*buf_ptr & (1 << 23)) + buf_ptr += ((*buf_ptr & 0xfff) >> 2) + 2; + else + buf_ptr += 1; + /* Set the value again (can be a simple constant). */ + inline_set_const((sljit_uw)code_ptr, executable_offset, *buf_ptr, 0); + code_ptr += 2; + + const_ = const_->next; + } +#endif + + put_label = compiler->put_labels; + while (put_label) { + addr = put_label->label->addr; + buf_ptr = (sljit_uw*)put_label->addr; + +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + SLJIT_ASSERT((buf_ptr[0] & 0xffff0000) == 0xe59f0000); + buf_ptr[((buf_ptr[0] & 0xfff) >> 2) + 2] = addr; +#else + SLJIT_ASSERT((buf_ptr[-1] & 0xfff00000) == MOVW && (buf_ptr[0] & 0xfff00000) == MOVT); + buf_ptr[-1] |= ((addr << 4) & 0xf0000) | (addr & 0xfff); + buf_ptr[0] |= ((addr >> 12) & 0xf0000) | ((addr >> 16) & 0xfff); +#endif + put_label = put_label->next; + } + + SLJIT_ASSERT(code_ptr - code <= (sljit_s32)size); + + compiler->error = SLJIT_ERR_COMPILED; + compiler->executable_offset = executable_offset; + compiler->executable_size = (code_ptr - code) * sizeof(sljit_uw); + + code = (sljit_uw *)SLJIT_ADD_EXEC_OFFSET(code, executable_offset); + code_ptr = (sljit_uw *)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset); + + SLJIT_CACHE_FLUSH(code, code_ptr); + return code; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type) +{ + switch (feature_type) { + case SLJIT_HAS_FPU: +#ifdef SLJIT_IS_FPU_AVAILABLE + return SLJIT_IS_FPU_AVAILABLE; +#else + /* Available by default. */ + return 1; +#endif + + case SLJIT_HAS_CLZ: + case SLJIT_HAS_CMOV: + return 1; + + default: + return 0; + } +} + +/* --------------------------------------------------------------------- */ +/* Entry, exit */ +/* --------------------------------------------------------------------- */ + +/* Creates an index in data_transfer_insts array. */ +#define WORD_SIZE 0x00 +#define BYTE_SIZE 0x01 +#define HALF_SIZE 0x02 +#define PRELOAD 0x03 +#define SIGNED 0x04 +#define LOAD_DATA 0x08 + +/* Flag bits for emit_op. */ +#define ALLOW_IMM 0x10 +#define ALLOW_INV_IMM 0x20 +#define ALLOW_ANY_IMM (ALLOW_IMM | ALLOW_INV_IMM) + +/* s/l - store/load (1 bit) + u/s - signed/unsigned (1 bit) + w/b/h/N - word/byte/half/NOT allowed (2 bit) + Storing signed and unsigned values are the same operations. */ + +static const sljit_uw data_transfer_insts[16] = { +/* s u w */ 0xe5000000 /* str */, +/* s u b */ 0xe5400000 /* strb */, +/* s u h */ 0xe10000b0 /* strh */, +/* s u N */ 0x00000000 /* not allowed */, +/* s s w */ 0xe5000000 /* str */, +/* s s b */ 0xe5400000 /* strb */, +/* s s h */ 0xe10000b0 /* strh */, +/* s s N */ 0x00000000 /* not allowed */, + +/* l u w */ 0xe5100000 /* ldr */, +/* l u b */ 0xe5500000 /* ldrb */, +/* l u h */ 0xe11000b0 /* ldrh */, +/* l u p */ 0xf5500000 /* preload */, +/* l s w */ 0xe5100000 /* ldr */, +/* l s b */ 0xe11000d0 /* ldrsb */, +/* l s h */ 0xe11000f0 /* ldrsh */, +/* l s N */ 0x00000000 /* not allowed */, +}; + +#define EMIT_DATA_TRANSFER(type, add, target_reg, base_reg, arg) \ + (data_transfer_insts[(type) & 0xf] | ((add) << 23) | RD(target_reg) | RN(base_reg) | (arg)) + +/* Normal ldr/str instruction. + Type2: ldrsb, ldrh, ldrsh */ +#define IS_TYPE1_TRANSFER(type) \ + (data_transfer_insts[(type) & 0xf] & 0x04000000) +#define TYPE2_TRANSFER_IMM(imm) \ + (((imm) & 0xf) | (((imm) & 0xf0) << 4) | (1 << 22)) + +static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 inp_flags, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w); + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler, + sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds, + sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size) +{ + sljit_s32 args, size, i, tmp; + sljit_uw push; + + CHECK_ERROR(); + CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size)); + set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size); + + /* Push saved registers, temporary registers + stmdb sp!, {..., lr} */ + push = PUSH | (1 << 14); + + tmp = saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - saveds) : SLJIT_FIRST_SAVED_REG; + for (i = SLJIT_S0; i >= tmp; i--) + push |= 1 << reg_map[i]; + + for (i = scratches; i >= SLJIT_FIRST_SAVED_REG; i--) + push |= 1 << reg_map[i]; + + FAIL_IF(push_inst(compiler, push)); + + /* Stack must be aligned to 8 bytes: */ + size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1); + local_size = ((size + local_size + 7) & ~7) - size; + compiler->local_size = local_size; + if (local_size > 0) + FAIL_IF(emit_op(compiler, SLJIT_SUB, ALLOW_IMM, SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, local_size)); + + args = get_arg_count(arg_types); + + if (args >= 1) + FAIL_IF(push_inst(compiler, MOV | RD(SLJIT_S0) | RM(SLJIT_R0))); + if (args >= 2) + FAIL_IF(push_inst(compiler, MOV | RD(SLJIT_S1) | RM(SLJIT_R1))); + if (args >= 3) + FAIL_IF(push_inst(compiler, MOV | RD(SLJIT_S2) | RM(SLJIT_R2))); + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *compiler, + sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds, + sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size) +{ + sljit_s32 size; + + CHECK_ERROR(); + CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size)); + set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size); + + size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1); + compiler->local_size = ((size + local_size + 7) & ~7) - size; + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw) +{ + sljit_s32 i, tmp; + sljit_uw pop; + + CHECK_ERROR(); + CHECK(check_sljit_emit_return(compiler, op, src, srcw)); + + FAIL_IF(emit_mov_before_return(compiler, op, src, srcw)); + + if (compiler->local_size > 0) + FAIL_IF(emit_op(compiler, SLJIT_ADD, ALLOW_IMM, SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, compiler->local_size)); + + /* Push saved registers, temporary registers + ldmia sp!, {..., pc} */ + pop = POP | (1 << 15); + + tmp = compiler->saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - compiler->saveds) : SLJIT_FIRST_SAVED_REG; + for (i = SLJIT_S0; i >= tmp; i--) + pop |= 1 << reg_map[i]; + + for (i = compiler->scratches; i >= SLJIT_FIRST_SAVED_REG; i--) + pop |= 1 << reg_map[i]; + + return push_inst(compiler, pop); +} + +/* --------------------------------------------------------------------- */ +/* Operators */ +/* --------------------------------------------------------------------- */ + +/* flags: */ + /* Arguments are swapped. */ +#define ARGS_SWAPPED 0x01 + /* Inverted immediate. */ +#define INV_IMM 0x02 + /* Source and destination is register. */ +#define MOVE_REG_CONV 0x04 + /* Unused return value. */ +#define UNUSED_RETURN 0x08 +/* SET_FLAGS must be (1 << 20) as it is also the value of S bit (can be used for optimization). */ +#define SET_FLAGS (1 << 20) +/* dst: reg + src1: reg + src2: reg or imm (if allowed) + SRC2_IMM must be (1 << 25) as it is also the value of I bit (can be used for optimization). */ +#define SRC2_IMM (1 << 25) + +#define EMIT_SHIFT_INS_AND_RETURN(opcode) \ + SLJIT_ASSERT(!(flags & INV_IMM) && !(src2 & SRC2_IMM)); \ + if (compiler->shift_imm != 0x20) { \ + SLJIT_ASSERT(src1 == TMP_REG1); \ + SLJIT_ASSERT(!(flags & ARGS_SWAPPED)); \ + \ + if (compiler->shift_imm != 0) \ + return push_inst(compiler, MOV | (flags & SET_FLAGS) | \ + RD(dst) | (compiler->shift_imm << 7) | (opcode << 5) | RM(src2)); \ + return push_inst(compiler, MOV | (flags & SET_FLAGS) | RD(dst) | RM(src2)); \ + } \ + return push_inst(compiler, MOV | (flags & SET_FLAGS) | RD(dst) | \ + (reg_map[(flags & ARGS_SWAPPED) ? src1 : src2] << 8) | (opcode << 5) | 0x10 | RM((flags & ARGS_SWAPPED) ? src2 : src1)); + +static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 flags, + sljit_s32 dst, sljit_s32 src1, sljit_s32 src2) +{ + switch (GET_OPCODE(op)) { + case SLJIT_MOV: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & ARGS_SWAPPED)); + if (dst != src2) { + if (src2 & SRC2_IMM) { + return push_inst(compiler, ((flags & INV_IMM) ? MVN : MOV) | RD(dst) | src2); + } + return push_inst(compiler, MOV | RD(dst) | RM(src2)); + } + return SLJIT_SUCCESS; + + case SLJIT_MOV_U8: + case SLJIT_MOV_S8: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & ARGS_SWAPPED)); + if (flags & MOVE_REG_CONV) { +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + if (op == SLJIT_MOV_U8) + return push_inst(compiler, AND | RD(dst) | RN(src2) | SRC2_IMM | 0xff); + FAIL_IF(push_inst(compiler, MOV | RD(dst) | (24 << 7) | RM(src2))); + return push_inst(compiler, MOV | RD(dst) | (24 << 7) | (op == SLJIT_MOV_U8 ? 0x20 : 0x40) | RM(dst)); +#else + return push_inst(compiler, (op == SLJIT_MOV_U8 ? UXTB : SXTB) | RD(dst) | RM(src2)); +#endif + } + else if (dst != src2) { + SLJIT_ASSERT(src2 & SRC2_IMM); + return push_inst(compiler, ((flags & INV_IMM) ? MVN : MOV) | RD(dst) | src2); + } + return SLJIT_SUCCESS; + + case SLJIT_MOV_U16: + case SLJIT_MOV_S16: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & ARGS_SWAPPED)); + if (flags & MOVE_REG_CONV) { +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + FAIL_IF(push_inst(compiler, MOV | RD(dst) | (16 << 7) | RM(src2))); + return push_inst(compiler, MOV | RD(dst) | (16 << 7) | (op == SLJIT_MOV_U16 ? 0x20 : 0x40) | RM(dst)); +#else + return push_inst(compiler, (op == SLJIT_MOV_U16 ? UXTH : SXTH) | RD(dst) | RM(src2)); +#endif + } + else if (dst != src2) { + SLJIT_ASSERT(src2 & SRC2_IMM); + return push_inst(compiler, ((flags & INV_IMM) ? MVN : MOV) | RD(dst) | src2); + } + return SLJIT_SUCCESS; + + case SLJIT_NOT: + if (src2 & SRC2_IMM) { + return push_inst(compiler, ((flags & INV_IMM) ? MOV : MVN) | (flags & SET_FLAGS) | RD(dst) | src2); + } + return push_inst(compiler, MVN | (flags & SET_FLAGS) | RD(dst) | RM(src2)); + + case SLJIT_CLZ: + SLJIT_ASSERT(!(flags & INV_IMM)); + SLJIT_ASSERT(!(src2 & SRC2_IMM)); + FAIL_IF(push_inst(compiler, CLZ | RD(dst) | RM(src2))); + return SLJIT_SUCCESS; + + case SLJIT_ADD: + SLJIT_ASSERT(!(flags & INV_IMM)); + if ((flags & (UNUSED_RETURN | SET_FLAGS)) == (UNUSED_RETURN | SET_FLAGS) && !(flags & ARGS_SWAPPED)) + return push_inst(compiler, CMN | SET_FLAGS | RN(src1) | ((src2 & SRC2_IMM) ? src2 : RM(src2))); + return push_inst(compiler, ADD | (flags & SET_FLAGS) | RD(dst) | RN(src1) | ((src2 & SRC2_IMM) ? src2 : RM(src2))); + + case SLJIT_ADDC: + SLJIT_ASSERT(!(flags & INV_IMM)); + return push_inst(compiler, ADC | (flags & SET_FLAGS) | RD(dst) | RN(src1) | ((src2 & SRC2_IMM) ? src2 : RM(src2))); + + case SLJIT_SUB: + SLJIT_ASSERT(!(flags & INV_IMM)); + if ((flags & (UNUSED_RETURN | SET_FLAGS)) == (UNUSED_RETURN | SET_FLAGS) && !(flags & ARGS_SWAPPED)) + return push_inst(compiler, CMP | SET_FLAGS | RN(src1) | ((src2 & SRC2_IMM) ? src2 : RM(src2))); + return push_inst(compiler, (!(flags & ARGS_SWAPPED) ? SUB : RSB) | (flags & SET_FLAGS) + | RD(dst) | RN(src1) | ((src2 & SRC2_IMM) ? src2 : RM(src2))); + + case SLJIT_SUBC: + SLJIT_ASSERT(!(flags & INV_IMM)); + return push_inst(compiler, (!(flags & ARGS_SWAPPED) ? SBC : RSC) | (flags & SET_FLAGS) + | RD(dst) | RN(src1) | ((src2 & SRC2_IMM) ? src2 : RM(src2))); + + case SLJIT_MUL: + SLJIT_ASSERT(!(flags & INV_IMM)); + SLJIT_ASSERT(!(src2 & SRC2_IMM)); + + if (!HAS_FLAGS(op)) + return push_inst(compiler, MUL | (reg_map[dst] << 16) | (reg_map[src2] << 8) | reg_map[src1]); + + FAIL_IF(push_inst(compiler, SMULL | (reg_map[TMP_REG1] << 16) | (reg_map[dst] << 12) | (reg_map[src2] << 8) | reg_map[src1])); + + /* cmp TMP_REG1, dst asr #31. */ + return push_inst(compiler, CMP | SET_FLAGS | RN(TMP_REG1) | RM(dst) | 0xfc0); + + case SLJIT_AND: + return push_inst(compiler, (!(flags & INV_IMM) ? AND : BIC) | (flags & SET_FLAGS) + | RD(dst) | RN(src1) | ((src2 & SRC2_IMM) ? src2 : RM(src2))); + + case SLJIT_OR: + SLJIT_ASSERT(!(flags & INV_IMM)); + return push_inst(compiler, ORR | (flags & SET_FLAGS) | RD(dst) | RN(src1) | ((src2 & SRC2_IMM) ? src2 : RM(src2))); + + case SLJIT_XOR: + SLJIT_ASSERT(!(flags & INV_IMM)); + return push_inst(compiler, EOR | (flags & SET_FLAGS) | RD(dst) | RN(src1) | ((src2 & SRC2_IMM) ? src2 : RM(src2))); + + case SLJIT_SHL: + EMIT_SHIFT_INS_AND_RETURN(0); + + case SLJIT_LSHR: + EMIT_SHIFT_INS_AND_RETURN(1); + + case SLJIT_ASHR: + EMIT_SHIFT_INS_AND_RETURN(2); + } + + SLJIT_UNREACHABLE(); + return SLJIT_SUCCESS; +} + +#undef EMIT_SHIFT_INS_AND_RETURN + +/* Tests whether the immediate can be stored in the 12 bit imm field. + Returns with 0 if not possible. */ +static sljit_uw get_imm(sljit_uw imm) +{ + sljit_s32 rol; + + if (imm <= 0xff) + return SRC2_IMM | imm; + + if (!(imm & 0xff000000)) { + imm <<= 8; + rol = 8; + } + else { + imm = (imm << 24) | (imm >> 8); + rol = 0; + } + + if (!(imm & 0xff000000)) { + imm <<= 8; + rol += 4; + } + + if (!(imm & 0xf0000000)) { + imm <<= 4; + rol += 2; + } + + if (!(imm & 0xc0000000)) { + imm <<= 2; + rol += 1; + } + + if (!(imm & 0x00ffffff)) + return SRC2_IMM | (imm >> 24) | (rol << 8); + else + return 0; +} + +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) +static sljit_s32 generate_int(struct sljit_compiler *compiler, sljit_s32 reg, sljit_uw imm, sljit_s32 positive) +{ + sljit_uw mask; + sljit_uw imm1; + sljit_uw imm2; + sljit_s32 rol; + + /* Step1: Search a zero byte (8 continous zero bit). */ + mask = 0xff000000; + rol = 8; + while(1) { + if (!(imm & mask)) { + /* Rol imm by rol. */ + imm = (imm << rol) | (imm >> (32 - rol)); + /* Calculate arm rol. */ + rol = 4 + (rol >> 1); + break; + } + rol += 2; + mask >>= 2; + if (mask & 0x3) { + /* rol by 8. */ + imm = (imm << 8) | (imm >> 24); + mask = 0xff00; + rol = 24; + while (1) { + if (!(imm & mask)) { + /* Rol imm by rol. */ + imm = (imm << rol) | (imm >> (32 - rol)); + /* Calculate arm rol. */ + rol = (rol >> 1) - 8; + break; + } + rol += 2; + mask >>= 2; + if (mask & 0x3) + return 0; + } + break; + } + } + + /* The low 8 bit must be zero. */ + SLJIT_ASSERT(!(imm & 0xff)); + + if (!(imm & 0xff000000)) { + imm1 = SRC2_IMM | ((imm >> 16) & 0xff) | (((rol + 4) & 0xf) << 8); + imm2 = SRC2_IMM | ((imm >> 8) & 0xff) | (((rol + 8) & 0xf) << 8); + } + else if (imm & 0xc0000000) { + imm1 = SRC2_IMM | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8); + imm <<= 8; + rol += 4; + + if (!(imm & 0xff000000)) { + imm <<= 8; + rol += 4; + } + + if (!(imm & 0xf0000000)) { + imm <<= 4; + rol += 2; + } + + if (!(imm & 0xc0000000)) { + imm <<= 2; + rol += 1; + } + + if (!(imm & 0x00ffffff)) + imm2 = SRC2_IMM | (imm >> 24) | ((rol & 0xf) << 8); + else + return 0; + } + else { + if (!(imm & 0xf0000000)) { + imm <<= 4; + rol += 2; + } + + if (!(imm & 0xc0000000)) { + imm <<= 2; + rol += 1; + } + + imm1 = SRC2_IMM | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8); + imm <<= 8; + rol += 4; + + if (!(imm & 0xf0000000)) { + imm <<= 4; + rol += 2; + } + + if (!(imm & 0xc0000000)) { + imm <<= 2; + rol += 1; + } + + if (!(imm & 0x00ffffff)) + imm2 = SRC2_IMM | (imm >> 24) | ((rol & 0xf) << 8); + else + return 0; + } + + FAIL_IF(push_inst(compiler, (positive ? MOV : MVN) | RD(reg) | imm1)); + FAIL_IF(push_inst(compiler, (positive ? ORR : BIC) | RD(reg) | RN(reg) | imm2)); + return 1; +} +#endif + +static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 reg, sljit_uw imm) +{ + sljit_uw tmp; + +#if (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) + if (!(imm & ~0xffff)) + return push_inst(compiler, MOVW | RD(reg) | ((imm << 4) & 0xf0000) | (imm & 0xfff)); +#endif + + /* Create imm by 1 inst. */ + tmp = get_imm(imm); + if (tmp) + return push_inst(compiler, MOV | RD(reg) | tmp); + + tmp = get_imm(~imm); + if (tmp) + return push_inst(compiler, MVN | RD(reg) | tmp); + +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + /* Create imm by 2 inst. */ + FAIL_IF(generate_int(compiler, reg, imm, 1)); + FAIL_IF(generate_int(compiler, reg, ~imm, 0)); + + /* Load integer. */ + return push_inst_with_literal(compiler, EMIT_DATA_TRANSFER(WORD_SIZE | LOAD_DATA, 1, reg, TMP_PC, 0), imm); +#else + FAIL_IF(push_inst(compiler, MOVW | RD(reg) | ((imm << 4) & 0xf0000) | (imm & 0xfff))); + if (imm <= 0xffff) + return SLJIT_SUCCESS; + return push_inst(compiler, MOVT | RD(reg) | ((imm >> 12) & 0xf0000) | ((imm >> 16) & 0xfff)); +#endif +} + +static SLJIT_INLINE sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, + sljit_s32 arg, sljit_sw argw, sljit_s32 tmp_reg) +{ + sljit_uw imm, offset_reg; + sljit_uw is_type1_transfer = IS_TYPE1_TRANSFER(flags); + + SLJIT_ASSERT (arg & SLJIT_MEM); + SLJIT_ASSERT((arg & REG_MASK) != tmp_reg); + + if ((arg & REG_MASK) == SLJIT_UNUSED) { + if (is_type1_transfer) { + FAIL_IF(load_immediate(compiler, tmp_reg, argw & ~0xfff)); + argw &= 0xfff; + } + else { + FAIL_IF(load_immediate(compiler, tmp_reg, argw & ~0xff)); + argw &= 0xff; + } + + return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 1, reg, tmp_reg, + is_type1_transfer ? argw : TYPE2_TRANSFER_IMM(argw))); + } + + if (arg & OFFS_REG_MASK) { + offset_reg = OFFS_REG(arg); + arg &= REG_MASK; + argw &= 0x3; + + if (argw != 0 && !is_type1_transfer) { + FAIL_IF(push_inst(compiler, ADD | RD(tmp_reg) | RN(arg) | RM(offset_reg) | (argw << 7))); + return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 1, reg, tmp_reg, TYPE2_TRANSFER_IMM(0))); + } + + /* Bit 25: RM is offset. */ + return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 1, reg, arg, + RM(offset_reg) | (is_type1_transfer ? (1 << 25) : 0) | (argw << 7))); + } + + arg &= REG_MASK; + + if (is_type1_transfer) { + if (argw > 0xfff) { + imm = get_imm(argw & ~0xfff); + if (imm) { + FAIL_IF(push_inst(compiler, ADD | RD(tmp_reg) | RN(arg) | imm)); + argw = argw & 0xfff; + arg = tmp_reg; + } + } + else if (argw < -0xfff) { + imm = get_imm(-argw & ~0xfff); + if (imm) { + FAIL_IF(push_inst(compiler, SUB | RD(tmp_reg) | RN(arg) | imm)); + argw = -(-argw & 0xfff); + arg = tmp_reg; + } + } + + if (argw >= 0 && argw <= 0xfff) + return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 1, reg, arg, argw)); + + if (argw < 0 && argw >= -0xfff) + return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 0, reg, arg, -argw)); + } + else { + if (argw > 0xff) { + imm = get_imm(argw & ~0xff); + if (imm) { + FAIL_IF(push_inst(compiler, ADD | RD(tmp_reg) | RN(arg) | imm)); + argw = argw & 0xff; + arg = tmp_reg; + } + } + else if (argw < -0xff) { + imm = get_imm(-argw & ~0xff); + if (imm) { + FAIL_IF(push_inst(compiler, SUB | RD(tmp_reg) | RN(arg) | imm)); + argw = -(-argw & 0xff); + arg = tmp_reg; + } + } + + if (argw >= 0 && argw <= 0xff) + return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 1, reg, arg, TYPE2_TRANSFER_IMM(argw))); + + if (argw < 0 && argw >= -0xff) { + argw = -argw; + return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 0, reg, arg, TYPE2_TRANSFER_IMM(argw))); + } + } + + FAIL_IF(load_immediate(compiler, tmp_reg, argw)); + return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 1, reg, arg, + RM(tmp_reg) | (is_type1_transfer ? (1 << 25) : 0))); +} + +static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 inp_flags, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) +{ + /* src1 is reg or TMP_REG1 + src2 is reg, TMP_REG2, or imm + result goes to TMP_REG2, so put result can use TMP_REG1. */ + + /* We prefers register and simple consts. */ + sljit_s32 dst_reg; + sljit_s32 src1_reg; + sljit_s32 src2_reg; + sljit_s32 flags = HAS_FLAGS(op) ? SET_FLAGS : 0; + + /* Destination check. */ + if (SLJIT_UNLIKELY(dst == SLJIT_UNUSED)) + flags |= UNUSED_RETURN; + + SLJIT_ASSERT(!(inp_flags & ALLOW_INV_IMM) || (inp_flags & ALLOW_IMM)); + + src2_reg = 0; + + do { + if (!(inp_flags & ALLOW_IMM)) + break; + + if (src2 & SLJIT_IMM) { + src2_reg = get_imm(src2w); + if (src2_reg) + break; + if (inp_flags & ALLOW_INV_IMM) { + src2_reg = get_imm(~src2w); + if (src2_reg) { + flags |= INV_IMM; + break; + } + } + if (GET_OPCODE(op) == SLJIT_ADD) { + src2_reg = get_imm(-src2w); + if (src2_reg) { + op = SLJIT_SUB | GET_ALL_FLAGS(op); + break; + } + } + if (GET_OPCODE(op) == SLJIT_SUB) { + src2_reg = get_imm(-src2w); + if (src2_reg) { + op = SLJIT_ADD | GET_ALL_FLAGS(op); + break; + } + } + } + + if (src1 & SLJIT_IMM) { + src2_reg = get_imm(src1w); + if (src2_reg) { + flags |= ARGS_SWAPPED; + src1 = src2; + src1w = src2w; + break; + } + if (inp_flags & ALLOW_INV_IMM) { + src2_reg = get_imm(~src1w); + if (src2_reg) { + flags |= ARGS_SWAPPED | INV_IMM; + src1 = src2; + src1w = src2w; + break; + } + } + if (GET_OPCODE(op) == SLJIT_ADD) { + src2_reg = get_imm(-src1w); + if (src2_reg) { + /* Note: add is commutative operation. */ + src1 = src2; + src1w = src2w; + op = SLJIT_SUB | GET_ALL_FLAGS(op); + break; + } + } + } + } while(0); + + /* Source 1. */ + if (FAST_IS_REG(src1)) + src1_reg = src1; + else if (src1 & SLJIT_MEM) { + FAIL_IF(emit_op_mem(compiler, inp_flags | LOAD_DATA, TMP_REG1, src1, src1w, TMP_REG1)); + src1_reg = TMP_REG1; + } + else { + FAIL_IF(load_immediate(compiler, TMP_REG1, src1w)); + src1_reg = TMP_REG1; + } + + /* Destination. */ + dst_reg = SLOW_IS_REG(dst) ? dst : TMP_REG2; + + if (op <= SLJIT_MOV_P) { + if (dst & SLJIT_MEM) { + if (inp_flags & BYTE_SIZE) + inp_flags &= ~SIGNED; + + if (FAST_IS_REG(src2)) + return emit_op_mem(compiler, inp_flags, src2, dst, dstw, TMP_REG2); + } + + if (FAST_IS_REG(src2) && dst_reg != TMP_REG2) + flags |= MOVE_REG_CONV; + } + + /* Source 2. */ + if (src2_reg == 0) { + src2_reg = (op <= SLJIT_MOV_P) ? dst_reg : TMP_REG2; + + if (FAST_IS_REG(src2)) + src2_reg = src2; + else if (src2 & SLJIT_MEM) + FAIL_IF(emit_op_mem(compiler, inp_flags | LOAD_DATA, src2_reg, src2, src2w, TMP_REG2)); + else + FAIL_IF(load_immediate(compiler, src2_reg, src2w)); + } + + FAIL_IF(emit_single_op(compiler, op, flags, dst_reg, src1_reg, src2_reg)); + + if (!(dst & SLJIT_MEM)) + return SLJIT_SUCCESS; + + return emit_op_mem(compiler, inp_flags, dst_reg, dst, dstw, TMP_REG1); +} + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(__GNUC__) +extern unsigned int __aeabi_uidivmod(unsigned int numerator, unsigned int denominator); +extern int __aeabi_idivmod(int numerator, int denominator); +#else +#error "Software divmod functions are needed" +#endif + +#ifdef __cplusplus +} +#endif + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compiler, sljit_s32 op) +{ + sljit_sw saved_reg_list[3]; + sljit_sw saved_reg_count; + + CHECK_ERROR(); + CHECK(check_sljit_emit_op0(compiler, op)); + + op = GET_OPCODE(op); + switch (op) { + case SLJIT_BREAKPOINT: + FAIL_IF(push_inst(compiler, BKPT)); + break; + case SLJIT_NOP: + FAIL_IF(push_inst(compiler, NOP)); + break; + case SLJIT_LMUL_UW: + case SLJIT_LMUL_SW: + return push_inst(compiler, (op == SLJIT_LMUL_UW ? UMULL : SMULL) + | (reg_map[SLJIT_R1] << 16) + | (reg_map[SLJIT_R0] << 12) + | (reg_map[SLJIT_R0] << 8) + | reg_map[SLJIT_R1]); + case SLJIT_DIVMOD_UW: + case SLJIT_DIVMOD_SW: + case SLJIT_DIV_UW: + case SLJIT_DIV_SW: + SLJIT_COMPILE_ASSERT((SLJIT_DIVMOD_UW & 0x2) == 0 && SLJIT_DIV_UW - 0x2 == SLJIT_DIVMOD_UW, bad_div_opcode_assignments); + SLJIT_ASSERT(reg_map[2] == 1 && reg_map[3] == 2 && reg_map[4] == 3); + + saved_reg_count = 0; + if (compiler->scratches >= 4) + saved_reg_list[saved_reg_count++] = 3; + if (compiler->scratches >= 3) + saved_reg_list[saved_reg_count++] = 2; + if (op >= SLJIT_DIV_UW) + saved_reg_list[saved_reg_count++] = 1; + + if (saved_reg_count > 0) { + FAIL_IF(push_inst(compiler, 0xe52d0000 | (saved_reg_count >= 3 ? 16 : 8) + | (saved_reg_list[0] << 12) /* str rX, [sp, #-8/-16]! */)); + if (saved_reg_count >= 2) { + SLJIT_ASSERT(saved_reg_list[1] < 8); + FAIL_IF(push_inst(compiler, 0xe58d0004 | (saved_reg_list[1] << 12) /* str rX, [sp, #4] */)); + } + if (saved_reg_count >= 3) { + SLJIT_ASSERT(saved_reg_list[2] < 8); + FAIL_IF(push_inst(compiler, 0xe58d0008 | (saved_reg_list[2] << 12) /* str rX, [sp, #8] */)); + } + } + +#if defined(__GNUC__) + FAIL_IF(sljit_emit_ijump(compiler, SLJIT_FAST_CALL, SLJIT_IMM, + ((op | 0x2) == SLJIT_DIV_UW ? SLJIT_FUNC_OFFSET(__aeabi_uidivmod) : SLJIT_FUNC_OFFSET(__aeabi_idivmod)))); +#else +#error "Software divmod functions are needed" +#endif + + if (saved_reg_count > 0) { + if (saved_reg_count >= 3) { + SLJIT_ASSERT(saved_reg_list[2] < 8); + FAIL_IF(push_inst(compiler, 0xe59d0008 | (saved_reg_list[2] << 12) /* ldr rX, [sp, #8] */)); + } + if (saved_reg_count >= 2) { + SLJIT_ASSERT(saved_reg_list[1] < 8); + FAIL_IF(push_inst(compiler, 0xe59d0004 | (saved_reg_list[1] << 12) /* ldr rX, [sp, #4] */)); + } + return push_inst(compiler, 0xe49d0000 | (saved_reg_count >= 3 ? 16 : 8) + | (saved_reg_list[0] << 12) /* ldr rX, [sp], #8/16 */); + } + return SLJIT_SUCCESS; + } + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) +{ + CHECK_ERROR(); + CHECK(check_sljit_emit_op1(compiler, op, dst, dstw, src, srcw)); + ADJUST_LOCAL_OFFSET(dst, dstw); + ADJUST_LOCAL_OFFSET(src, srcw); + + if (dst == SLJIT_UNUSED && !HAS_FLAGS(op)) { +#if (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) + if (op <= SLJIT_MOV_P && (src & SLJIT_MEM)) + return emit_op_mem(compiler, PRELOAD | LOAD_DATA, TMP_PC, src, srcw, TMP_REG1); +#endif + return SLJIT_SUCCESS; + } + + switch (GET_OPCODE(op)) { + case SLJIT_MOV: + case SLJIT_MOV_U32: + case SLJIT_MOV_S32: + case SLJIT_MOV_P: + return emit_op(compiler, SLJIT_MOV, ALLOW_ANY_IMM, dst, dstw, TMP_REG1, 0, src, srcw); + + case SLJIT_MOV_U8: + return emit_op(compiler, SLJIT_MOV_U8, ALLOW_ANY_IMM | BYTE_SIZE, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u8)srcw : srcw); + + case SLJIT_MOV_S8: + return emit_op(compiler, SLJIT_MOV_S8, ALLOW_ANY_IMM | SIGNED | BYTE_SIZE, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s8)srcw : srcw); + + case SLJIT_MOV_U16: + return emit_op(compiler, SLJIT_MOV_U16, ALLOW_ANY_IMM | HALF_SIZE, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u16)srcw : srcw); + + case SLJIT_MOV_S16: + return emit_op(compiler, SLJIT_MOV_S16, ALLOW_ANY_IMM | SIGNED | HALF_SIZE, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s16)srcw : srcw); + + case SLJIT_NOT: + return emit_op(compiler, op, ALLOW_ANY_IMM, dst, dstw, TMP_REG1, 0, src, srcw); + + case SLJIT_NEG: +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ + || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + compiler->skip_checks = 1; +#endif + return sljit_emit_op2(compiler, SLJIT_SUB | GET_ALL_FLAGS(op), dst, dstw, SLJIT_IMM, 0, src, srcw); + + case SLJIT_CLZ: + return emit_op(compiler, op, 0, dst, dstw, TMP_REG1, 0, src, srcw); + } + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) +{ + CHECK_ERROR(); + CHECK(check_sljit_emit_op2(compiler, op, dst, dstw, src1, src1w, src2, src2w)); + ADJUST_LOCAL_OFFSET(dst, dstw); + ADJUST_LOCAL_OFFSET(src1, src1w); + ADJUST_LOCAL_OFFSET(src2, src2w); + + if (dst == SLJIT_UNUSED && !HAS_FLAGS(op)) + return SLJIT_SUCCESS; + + switch (GET_OPCODE(op)) { + case SLJIT_ADD: + case SLJIT_ADDC: + case SLJIT_SUB: + case SLJIT_SUBC: + case SLJIT_OR: + case SLJIT_XOR: + return emit_op(compiler, op, ALLOW_IMM, dst, dstw, src1, src1w, src2, src2w); + + case SLJIT_MUL: + return emit_op(compiler, op, 0, dst, dstw, src1, src1w, src2, src2w); + + case SLJIT_AND: + return emit_op(compiler, op, ALLOW_ANY_IMM, dst, dstw, src1, src1w, src2, src2w); + + case SLJIT_SHL: + case SLJIT_LSHR: + case SLJIT_ASHR: + if (src2 & SLJIT_IMM) { + compiler->shift_imm = src2w & 0x1f; + return emit_op(compiler, op, 0, dst, dstw, TMP_REG1, 0, src1, src1w); + } + else { + compiler->shift_imm = 0x20; + return emit_op(compiler, op, 0, dst, dstw, src1, src1w, src2, src2w); + } + } + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg) +{ + CHECK_REG_INDEX(check_sljit_get_register_index(reg)); + return reg_map[reg]; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg) +{ + CHECK_REG_INDEX(check_sljit_get_float_register_index(reg)); + return (freg_map[reg] << 1); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler, + void *instruction, sljit_s32 size) +{ + CHECK_ERROR(); + CHECK(check_sljit_emit_op_custom(compiler, instruction, size)); + + return push_inst(compiler, *(sljit_uw*)instruction); +} + +/* --------------------------------------------------------------------- */ +/* Floating point operators */ +/* --------------------------------------------------------------------- */ + + +#define FPU_LOAD (1 << 20) +#define EMIT_FPU_DATA_TRANSFER(inst, add, base, freg, offs) \ + ((inst) | ((add) << 23) | (reg_map[base] << 16) | (freg_map[freg] << 12) | (offs)) +#define EMIT_FPU_OPERATION(opcode, mode, dst, src1, src2) \ + ((opcode) | (mode) | (freg_map[dst] << 12) | freg_map[src1] | (freg_map[src2] << 16)) + +static sljit_s32 emit_fop_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw) +{ + sljit_uw imm; + sljit_sw inst = VSTR_F32 | (flags & (SLJIT_F32_OP | FPU_LOAD)); + + SLJIT_ASSERT(arg & SLJIT_MEM); + arg &= ~SLJIT_MEM; + + if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) { + FAIL_IF(push_inst(compiler, ADD | RD(TMP_REG2) | RN(arg & REG_MASK) | RM(OFFS_REG(arg)) | ((argw & 0x3) << 7))); + arg = TMP_REG2; + argw = 0; + } + + /* Fast loads and stores. */ + if (arg) { + if (!(argw & ~0x3fc)) + return push_inst(compiler, EMIT_FPU_DATA_TRANSFER(inst, 1, arg & REG_MASK, reg, argw >> 2)); + if (!(-argw & ~0x3fc)) + return push_inst(compiler, EMIT_FPU_DATA_TRANSFER(inst, 0, arg & REG_MASK, reg, (-argw) >> 2)); + + imm = get_imm(argw & ~0x3fc); + if (imm) { + FAIL_IF(push_inst(compiler, ADD | RD(TMP_REG2) | RN(arg & REG_MASK) | imm)); + return push_inst(compiler, EMIT_FPU_DATA_TRANSFER(inst, 1, TMP_REG2, reg, (argw & 0x3fc) >> 2)); + } + imm = get_imm(-argw & ~0x3fc); + if (imm) { + argw = -argw; + FAIL_IF(push_inst(compiler, SUB | RD(TMP_REG2) | RN(arg & REG_MASK) | imm)); + return push_inst(compiler, EMIT_FPU_DATA_TRANSFER(inst, 0, TMP_REG2, reg, (argw & 0x3fc) >> 2)); + } + } + + if (arg) { + FAIL_IF(load_immediate(compiler, TMP_REG2, argw)); + FAIL_IF(push_inst(compiler, ADD | RD(TMP_REG2) | RN(arg & REG_MASK) | RM(TMP_REG2))); + } + else + FAIL_IF(load_immediate(compiler, TMP_REG2, argw)); + + return push_inst(compiler, EMIT_FPU_DATA_TRANSFER(inst, 1, TMP_REG2, reg, 0)); +} + +static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) +{ + op ^= SLJIT_F32_OP; + + if (src & SLJIT_MEM) { + FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG1, src, srcw)); + src = TMP_FREG1; + } + + FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VCVT_S32_F32, op & SLJIT_F32_OP, TMP_FREG1, src, 0))); + + if (FAST_IS_REG(dst)) + return push_inst(compiler, VMOV | (1 << 20) | RD(dst) | (freg_map[TMP_FREG1] << 16)); + + /* Store the integer value from a VFP register. */ + return emit_fop_mem(compiler, 0, TMP_FREG1, dst, dstw); +} + +static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) +{ + sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1; + + op ^= SLJIT_F32_OP; + + if (FAST_IS_REG(src)) + FAIL_IF(push_inst(compiler, VMOV | RD(src) | (freg_map[TMP_FREG1] << 16))); + else if (src & SLJIT_MEM) { + /* Load the integer value into a VFP register. */ + FAIL_IF(emit_fop_mem(compiler, FPU_LOAD, TMP_FREG1, src, srcw)); + } + else { + FAIL_IF(load_immediate(compiler, TMP_REG1, srcw)); + FAIL_IF(push_inst(compiler, VMOV | RD(TMP_REG1) | (freg_map[TMP_FREG1] << 16))); + } + + FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VCVT_F32_S32, op & SLJIT_F32_OP, dst_r, TMP_FREG1, 0))); + + if (dst & SLJIT_MEM) + return emit_fop_mem(compiler, (op & SLJIT_F32_OP), TMP_FREG1, dst, dstw); + return SLJIT_SUCCESS; +} + +static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) +{ + op ^= SLJIT_F32_OP; + + if (src1 & SLJIT_MEM) { + FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG1, src1, src1w)); + src1 = TMP_FREG1; + } + + if (src2 & SLJIT_MEM) { + FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG2, src2, src2w)); + src2 = TMP_FREG2; + } + + FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VCMP_F32, op & SLJIT_F32_OP, src1, src2, 0))); + return push_inst(compiler, VMRS); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) +{ + sljit_s32 dst_r; + + CHECK_ERROR(); + + SLJIT_COMPILE_ASSERT((SLJIT_F32_OP == 0x100), float_transfer_bit_error); + SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw); + + dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1; + + if (GET_OPCODE(op) != SLJIT_CONV_F64_FROM_F32) + op ^= SLJIT_F32_OP; + + if (src & SLJIT_MEM) { + FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, dst_r, src, srcw)); + src = dst_r; + } + + switch (GET_OPCODE(op)) { + case SLJIT_MOV_F64: + if (src != dst_r) { + if (dst_r != TMP_FREG1) + FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VMOV_F32, op & SLJIT_F32_OP, dst_r, src, 0))); + else + dst_r = src; + } + break; + case SLJIT_NEG_F64: + FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VNEG_F32, op & SLJIT_F32_OP, dst_r, src, 0))); + break; + case SLJIT_ABS_F64: + FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VABS_F32, op & SLJIT_F32_OP, dst_r, src, 0))); + break; + case SLJIT_CONV_F64_FROM_F32: + FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VCVT_F64_F32, op & SLJIT_F32_OP, dst_r, src, 0))); + op ^= SLJIT_F32_OP; + break; + } + + if (dst & SLJIT_MEM) + return emit_fop_mem(compiler, (op & SLJIT_F32_OP), dst_r, dst, dstw); + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) +{ + sljit_s32 dst_r; + + CHECK_ERROR(); + CHECK(check_sljit_emit_fop2(compiler, op, dst, dstw, src1, src1w, src2, src2w)); + ADJUST_LOCAL_OFFSET(dst, dstw); + ADJUST_LOCAL_OFFSET(src1, src1w); + ADJUST_LOCAL_OFFSET(src2, src2w); + + op ^= SLJIT_F32_OP; + + dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1; + + if (src2 & SLJIT_MEM) { + FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG2, src2, src2w)); + src2 = TMP_FREG2; + } + + if (src1 & SLJIT_MEM) { + FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG1, src1, src1w)); + src1 = TMP_FREG1; + } + + switch (GET_OPCODE(op)) { + case SLJIT_ADD_F64: + FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VADD_F32, op & SLJIT_F32_OP, dst_r, src2, src1))); + break; + + case SLJIT_SUB_F64: + FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VSUB_F32, op & SLJIT_F32_OP, dst_r, src2, src1))); + break; + + case SLJIT_MUL_F64: + FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VMUL_F32, op & SLJIT_F32_OP, dst_r, src2, src1))); + break; + + case SLJIT_DIV_F64: + FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VDIV_F32, op & SLJIT_F32_OP, dst_r, src2, src1))); + break; + } + + if (dst_r == TMP_FREG1) + FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP), TMP_FREG1, dst, dstw)); + + return SLJIT_SUCCESS; +} + +#undef FPU_LOAD +#undef EMIT_FPU_DATA_TRANSFER + +/* --------------------------------------------------------------------- */ +/* Other instructions */ +/* --------------------------------------------------------------------- */ + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw) +{ + CHECK_ERROR(); + CHECK(check_sljit_emit_fast_enter(compiler, dst, dstw)); + ADJUST_LOCAL_OFFSET(dst, dstw); + + SLJIT_ASSERT(reg_map[TMP_REG2] == 14); + + if (FAST_IS_REG(dst)) + return push_inst(compiler, MOV | RD(dst) | RM(TMP_REG2)); + + /* Memory. */ + return emit_op_mem(compiler, WORD_SIZE, TMP_REG2, dst, dstw, TMP_REG1); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_s32 src, sljit_sw srcw) +{ + CHECK_ERROR(); + CHECK(check_sljit_emit_fast_return(compiler, src, srcw)); + ADJUST_LOCAL_OFFSET(src, srcw); + + SLJIT_ASSERT(reg_map[TMP_REG2] == 14); + + if (FAST_IS_REG(src)) + FAIL_IF(push_inst(compiler, MOV | RD(TMP_REG2) | RM(src))); + else + FAIL_IF(emit_op_mem(compiler, WORD_SIZE | LOAD_DATA, TMP_REG2, src, srcw, TMP_REG1)); + + return push_inst(compiler, BX | RM(TMP_REG2)); +} + +/* --------------------------------------------------------------------- */ +/* Conditional instructions */ +/* --------------------------------------------------------------------- */ + +static sljit_uw get_cc(sljit_s32 type) +{ + switch (type) { + case SLJIT_EQUAL: + case SLJIT_MUL_NOT_OVERFLOW: + case SLJIT_EQUAL_F64: + return 0x00000000; + + case SLJIT_NOT_EQUAL: + case SLJIT_MUL_OVERFLOW: + case SLJIT_NOT_EQUAL_F64: + return 0x10000000; + + case SLJIT_LESS: + case SLJIT_LESS_F64: + return 0x30000000; + + case SLJIT_GREATER_EQUAL: + case SLJIT_GREATER_EQUAL_F64: + return 0x20000000; + + case SLJIT_GREATER: + case SLJIT_GREATER_F64: + return 0x80000000; + + case SLJIT_LESS_EQUAL: + case SLJIT_LESS_EQUAL_F64: + return 0x90000000; + + case SLJIT_SIG_LESS: + return 0xb0000000; + + case SLJIT_SIG_GREATER_EQUAL: + return 0xa0000000; + + case SLJIT_SIG_GREATER: + return 0xc0000000; + + case SLJIT_SIG_LESS_EQUAL: + return 0xd0000000; + + case SLJIT_OVERFLOW: + case SLJIT_UNORDERED_F64: + return 0x60000000; + + case SLJIT_NOT_OVERFLOW: + case SLJIT_ORDERED_F64: + return 0x70000000; + + default: + SLJIT_ASSERT(type >= SLJIT_JUMP && type <= SLJIT_CALL_CDECL); + return 0xe0000000; + } +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compiler *compiler) +{ + struct sljit_label *label; + + CHECK_ERROR_PTR(); + CHECK_PTR(check_sljit_emit_label(compiler)); + + if (compiler->last_label && compiler->last_label->size == compiler->size) + return compiler->last_label; + + label = (struct sljit_label*)ensure_abuf(compiler, sizeof(struct sljit_label)); + PTR_FAIL_IF(!label); + set_label(label, compiler); + return label; +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_s32 type) +{ + struct sljit_jump *jump; + + CHECK_ERROR_PTR(); + CHECK_PTR(check_sljit_emit_jump(compiler, type)); + + jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); + PTR_FAIL_IF(!jump); + set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP); + type &= 0xff; + + SLJIT_ASSERT(reg_map[TMP_REG1] != 14); + +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + if (type >= SLJIT_FAST_CALL) + PTR_FAIL_IF(prepare_blx(compiler)); + PTR_FAIL_IF(push_inst_with_unique_literal(compiler, ((EMIT_DATA_TRANSFER(WORD_SIZE | LOAD_DATA, 1, + type <= SLJIT_JUMP ? TMP_PC : TMP_REG1, TMP_PC, 0)) & ~COND_MASK) | get_cc(type), 0)); + + if (jump->flags & SLJIT_REWRITABLE_JUMP) { + jump->addr = compiler->size; + compiler->patches++; + } + + if (type >= SLJIT_FAST_CALL) { + jump->flags |= IS_BL; + PTR_FAIL_IF(emit_blx(compiler)); + } + + if (!(jump->flags & SLJIT_REWRITABLE_JUMP)) + jump->addr = compiler->size; +#else + if (type >= SLJIT_FAST_CALL) + jump->flags |= IS_BL; + PTR_FAIL_IF(emit_imm(compiler, TMP_REG1, 0)); + PTR_FAIL_IF(push_inst(compiler, (((type <= SLJIT_JUMP ? BX : BLX) | RM(TMP_REG1)) & ~COND_MASK) | get_cc(type))); + jump->addr = compiler->size; +#endif + return jump; +} + +#ifdef __SOFTFP__ + +static sljit_s32 softfloat_call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_s32 *src) +{ + sljit_s32 stack_offset = 0; + sljit_s32 arg_count = 0; + sljit_s32 word_arg_offset = 0; + sljit_s32 float_arg_count = 0; + sljit_s32 types = 0; + sljit_s32 src_offset = 4 * sizeof(sljit_sw); + sljit_u8 offsets[4]; + + if (src && FAST_IS_REG(*src)) + src_offset = reg_map[*src] * sizeof(sljit_sw); + + arg_types >>= SLJIT_DEF_SHIFT; + + while (arg_types) { + types = (types << SLJIT_DEF_SHIFT) | (arg_types & SLJIT_DEF_MASK); + + switch (arg_types & SLJIT_DEF_MASK) { + case SLJIT_ARG_TYPE_F32: + offsets[arg_count] = (sljit_u8)stack_offset; + stack_offset += sizeof(sljit_f32); + arg_count++; + float_arg_count++; + break; + case SLJIT_ARG_TYPE_F64: + if (stack_offset & 0x7) + stack_offset += sizeof(sljit_sw); + offsets[arg_count] = (sljit_u8)stack_offset; + stack_offset += sizeof(sljit_f64); + arg_count++; + float_arg_count++; + break; + default: + offsets[arg_count] = (sljit_u8)stack_offset; + stack_offset += sizeof(sljit_sw); + arg_count++; + word_arg_offset += sizeof(sljit_sw); + break; + } + + arg_types >>= SLJIT_DEF_SHIFT; + } + + if (stack_offset > 16) + FAIL_IF(push_inst(compiler, SUB | RD(SLJIT_SP) | RN(SLJIT_SP) | SRC2_IMM | (((stack_offset - 16) + 0x7) & ~0x7))); + + /* Process arguments in reversed direction. */ + while (types) { + switch (types & SLJIT_DEF_MASK) { + case SLJIT_ARG_TYPE_F32: + arg_count--; + float_arg_count--; + stack_offset = offsets[arg_count]; + + if (stack_offset < 16) { + if (src_offset == stack_offset) { + FAIL_IF(push_inst(compiler, MOV | RD(TMP_REG1) | (src_offset >> 2))); + *src = TMP_REG1; + } + FAIL_IF(push_inst(compiler, VMOV | 0x100000 | (float_arg_count << 16) | (stack_offset << 10))); + } else + FAIL_IF(push_inst(compiler, VSTR_F32 | 0x800000 | RN(SLJIT_SP) | (float_arg_count << 12) | ((stack_offset - 16) >> 2))); + break; + case SLJIT_ARG_TYPE_F64: + arg_count--; + float_arg_count--; + stack_offset = offsets[arg_count]; + + SLJIT_ASSERT((stack_offset & 0x7) == 0); + + if (stack_offset < 16) { + if (src_offset == stack_offset || src_offset == stack_offset + sizeof(sljit_sw)) { + FAIL_IF(push_inst(compiler, MOV | RD(TMP_REG1) | (src_offset >> 2))); + *src = TMP_REG1; + } + FAIL_IF(push_inst(compiler, VMOV2 | 0x100000 | (stack_offset << 10) | ((stack_offset + sizeof(sljit_sw)) << 14) | float_arg_count)); + } else + FAIL_IF(push_inst(compiler, VSTR_F32 | 0x800100 | RN(SLJIT_SP) | (float_arg_count << 12) | ((stack_offset - 16) >> 2))); + break; + default: + arg_count--; + word_arg_offset -= sizeof(sljit_sw); + stack_offset = offsets[arg_count]; + + SLJIT_ASSERT(stack_offset >= word_arg_offset); + + if (stack_offset != word_arg_offset) { + if (stack_offset < 16) { + if (src_offset == stack_offset) { + FAIL_IF(push_inst(compiler, MOV | RD(TMP_REG1) | (src_offset >> 2))); + *src = TMP_REG1; + } + else if (src_offset == word_arg_offset) { + *src = 1 + (stack_offset >> 2); + src_offset = stack_offset; + } + FAIL_IF(push_inst(compiler, MOV | (stack_offset << 10) | (word_arg_offset >> 2))); + } else + FAIL_IF(push_inst(compiler, data_transfer_insts[WORD_SIZE] | 0x800000 | RN(SLJIT_SP) | (word_arg_offset << 10) | (stack_offset - 16))); + } + break; + } + + types >>= SLJIT_DEF_SHIFT; + } + + return SLJIT_SUCCESS; +} + +static sljit_s32 softfloat_post_call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types) +{ + sljit_s32 stack_size = 0; + + if ((arg_types & SLJIT_DEF_MASK) == SLJIT_ARG_TYPE_F32) + FAIL_IF(push_inst(compiler, VMOV | (0 << 16) | (0 << 12))); + if ((arg_types & SLJIT_DEF_MASK) == SLJIT_ARG_TYPE_F64) + FAIL_IF(push_inst(compiler, VMOV2 | (1 << 16) | (0 << 12) | 0)); + + arg_types >>= SLJIT_DEF_SHIFT; + + while (arg_types) { + switch (arg_types & SLJIT_DEF_MASK) { + case SLJIT_ARG_TYPE_F32: + stack_size += sizeof(sljit_f32); + break; + case SLJIT_ARG_TYPE_F64: + if (stack_size & 0x7) + stack_size += sizeof(sljit_sw); + stack_size += sizeof(sljit_f64); + break; + default: + stack_size += sizeof(sljit_sw); + break; + } + + arg_types >>= SLJIT_DEF_SHIFT; + } + + if (stack_size <= 16) + return SLJIT_SUCCESS; + + return push_inst(compiler, ADD | RD(SLJIT_SP) | RN(SLJIT_SP) | SRC2_IMM | (((stack_size - 16) + 0x7) & ~0x7)); +} + +#else /* !__SOFTFP__ */ + +static sljit_s32 hardfloat_call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types) +{ + sljit_u32 remap = 0; + sljit_u32 offset = 0; + sljit_u32 new_offset, mask; + + /* Remove return value. */ + arg_types >>= SLJIT_DEF_SHIFT; + + while (arg_types) { + if ((arg_types & SLJIT_DEF_MASK) == SLJIT_ARG_TYPE_F32) { + new_offset = 0; + mask = 1; + + while (remap & mask) { + new_offset++; + mask <<= 1; + } + remap |= mask; + + if (offset != new_offset) + FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VMOV_F32, + 0, (new_offset >> 1) + 1, (offset >> 1) + 1, 0) | ((new_offset & 0x1) ? 0x400000 : 0))); + + offset += 2; + } + else if ((arg_types & SLJIT_DEF_MASK) == SLJIT_ARG_TYPE_F64) { + new_offset = 0; + mask = 3; + + while (remap & mask) { + new_offset += 2; + mask <<= 2; + } + remap |= mask; + + if (offset != new_offset) + FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VMOV_F32, SLJIT_F32_OP, (new_offset >> 1) + 1, (offset >> 1) + 1, 0))); + + offset += 2; + } + arg_types >>= SLJIT_DEF_SHIFT; + } + + return SLJIT_SUCCESS; +} + +#endif /* __SOFTFP__ */ + +#undef EMIT_FPU_OPERATION + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 arg_types) +{ +#ifdef __SOFTFP__ + struct sljit_jump *jump; +#endif + + CHECK_ERROR_PTR(); + CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types)); + +#ifdef __SOFTFP__ + PTR_FAIL_IF(softfloat_call_with_args(compiler, arg_types, NULL)); + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ + || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + compiler->skip_checks = 1; +#endif + + jump = sljit_emit_jump(compiler, type); + PTR_FAIL_IF(jump == NULL); + + PTR_FAIL_IF(softfloat_post_call_with_args(compiler, arg_types)); + return jump; +#else /* !__SOFTFP__ */ + PTR_FAIL_IF(hardfloat_call_with_args(compiler, arg_types)); + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ + || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + compiler->skip_checks = 1; +#endif + + return sljit_emit_jump(compiler, type); +#endif /* __SOFTFP__ */ +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 src, sljit_sw srcw) +{ + struct sljit_jump *jump; + + CHECK_ERROR(); + CHECK(check_sljit_emit_ijump(compiler, type, src, srcw)); + ADJUST_LOCAL_OFFSET(src, srcw); + + SLJIT_ASSERT(reg_map[TMP_REG1] != 14); + + if (!(src & SLJIT_IMM)) { + if (FAST_IS_REG(src)) { + SLJIT_ASSERT(reg_map[src] != 14); + return push_inst(compiler, (type <= SLJIT_JUMP ? BX : BLX) | RM(src)); + } + + SLJIT_ASSERT(src & SLJIT_MEM); + FAIL_IF(emit_op_mem(compiler, WORD_SIZE | LOAD_DATA, TMP_REG1, src, srcw, TMP_REG1)); + return push_inst(compiler, (type <= SLJIT_JUMP ? BX : BLX) | RM(TMP_REG1)); + } + + /* These jumps are converted to jump/call instructions when possible. */ + jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); + FAIL_IF(!jump); + set_jump(jump, compiler, JUMP_ADDR | ((type >= SLJIT_FAST_CALL) ? IS_BL : 0)); + jump->u.target = srcw; + +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + if (type >= SLJIT_FAST_CALL) + FAIL_IF(prepare_blx(compiler)); + FAIL_IF(push_inst_with_unique_literal(compiler, EMIT_DATA_TRANSFER(WORD_SIZE | LOAD_DATA, 1, type <= SLJIT_JUMP ? TMP_PC : TMP_REG1, TMP_PC, 0), 0)); + if (type >= SLJIT_FAST_CALL) + FAIL_IF(emit_blx(compiler)); +#else + FAIL_IF(emit_imm(compiler, TMP_REG1, 0)); + FAIL_IF(push_inst(compiler, (type <= SLJIT_JUMP ? BX : BLX) | RM(TMP_REG1))); +#endif + jump->addr = compiler->size; + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 arg_types, + sljit_s32 src, sljit_sw srcw) +{ + CHECK_ERROR(); + CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw)); + +#ifdef __SOFTFP__ + if (src & SLJIT_MEM) { + FAIL_IF(emit_op_mem(compiler, WORD_SIZE | LOAD_DATA, TMP_REG1, src, srcw, TMP_REG1)); + src = TMP_REG1; + } + + FAIL_IF(softfloat_call_with_args(compiler, arg_types, &src)); + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ + || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + compiler->skip_checks = 1; +#endif + + FAIL_IF(sljit_emit_ijump(compiler, type, src, srcw)); + + return softfloat_post_call_with_args(compiler, arg_types); +#else /* !__SOFTFP__ */ + FAIL_IF(hardfloat_call_with_args(compiler, arg_types)); + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ + || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + compiler->skip_checks = 1; +#endif + + return sljit_emit_ijump(compiler, type, src, srcw); +#endif /* __SOFTFP__ */ +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 type) +{ + sljit_s32 dst_reg, flags = GET_ALL_FLAGS(op); + sljit_uw cc, ins; + + CHECK_ERROR(); + CHECK(check_sljit_emit_op_flags(compiler, op, dst, dstw, type)); + ADJUST_LOCAL_OFFSET(dst, dstw); + + op = GET_OPCODE(op); + cc = get_cc(type & 0xff); + dst_reg = FAST_IS_REG(dst) ? dst : TMP_REG1; + + if (op < SLJIT_ADD) { + FAIL_IF(push_inst(compiler, MOV | RD(dst_reg) | SRC2_IMM | 0)); + FAIL_IF(push_inst(compiler, ((MOV | RD(dst_reg) | SRC2_IMM | 1) & ~COND_MASK) | cc)); + if (dst & SLJIT_MEM) + return emit_op_mem(compiler, WORD_SIZE, TMP_REG1, dst, dstw, TMP_REG2); + return SLJIT_SUCCESS; + } + + ins = (op == SLJIT_AND ? AND : (op == SLJIT_OR ? ORR : EOR)); + + if (dst & SLJIT_MEM) + FAIL_IF(emit_op_mem(compiler, WORD_SIZE | LOAD_DATA, TMP_REG1, dst, dstw, TMP_REG2)); + + FAIL_IF(push_inst(compiler, ((ins | RD(dst_reg) | RN(dst_reg) | SRC2_IMM | 1) & ~COND_MASK) | cc)); + + if (op == SLJIT_AND) + FAIL_IF(push_inst(compiler, ((ins | RD(dst_reg) | RN(dst_reg) | SRC2_IMM | 0) & ~COND_MASK) | (cc ^ 0x10000000))); + + if (dst & SLJIT_MEM) + FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG1, dst, dstw, TMP_REG2)); + + if (flags & SLJIT_SET_Z) + return push_inst(compiler, MOV | SET_FLAGS | RD(TMP_REG2) | RM(dst_reg)); + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 dst_reg, + sljit_s32 src, sljit_sw srcw) +{ + sljit_uw cc, tmp; + + CHECK_ERROR(); + CHECK(check_sljit_emit_cmov(compiler, type, dst_reg, src, srcw)); + + dst_reg &= ~SLJIT_I32_OP; + + cc = get_cc(type & 0xff); + + if (SLJIT_UNLIKELY(src & SLJIT_IMM)) { + tmp = get_imm(srcw); + if (tmp) + return push_inst(compiler, ((MOV | RD(dst_reg) | tmp) & ~COND_MASK) | cc); + + tmp = get_imm(~srcw); + if (tmp) + return push_inst(compiler, ((MVN | RD(dst_reg) | tmp) & ~COND_MASK) | cc); + +#if (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) + tmp = (sljit_uw) srcw; + FAIL_IF(push_inst(compiler, (MOVW & ~COND_MASK) | cc | RD(dst_reg) | ((tmp << 4) & 0xf0000) | (tmp & 0xfff))); + if (tmp <= 0xffff) + return SLJIT_SUCCESS; + return push_inst(compiler, (MOVT & ~COND_MASK) | cc | RD(dst_reg) | ((tmp >> 12) & 0xf0000) | ((tmp >> 16) & 0xfff)); +#else + FAIL_IF(load_immediate(compiler, TMP_REG1, srcw)); + src = TMP_REG1; +#endif + } + + return push_inst(compiler, ((MOV | RD(dst_reg) | RM(src)) & ~COND_MASK) | cc); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 reg, + sljit_s32 mem, sljit_sw memw) +{ + sljit_s32 flags; + sljit_uw is_type1_transfer, inst; + + CHECK_ERROR(); + CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw)); + + is_type1_transfer = 1; + + switch (type & 0xff) { + case SLJIT_MOV: + case SLJIT_MOV_U32: + case SLJIT_MOV_S32: + case SLJIT_MOV_P: + flags = WORD_SIZE; + break; + case SLJIT_MOV_U8: + flags = BYTE_SIZE; + break; + case SLJIT_MOV_S8: + if (!(type & SLJIT_MEM_STORE)) + is_type1_transfer = 0; + flags = BYTE_SIZE | SIGNED; + break; + case SLJIT_MOV_U16: + is_type1_transfer = 0; + flags = HALF_SIZE; + break; + case SLJIT_MOV_S16: + is_type1_transfer = 0; + flags = HALF_SIZE | SIGNED; + break; + default: + SLJIT_UNREACHABLE(); + flags = WORD_SIZE; + break; + } + + if (!(type & SLJIT_MEM_STORE)) + flags |= LOAD_DATA; + + SLJIT_ASSERT(is_type1_transfer == !!IS_TYPE1_TRANSFER(flags)); + + if (SLJIT_UNLIKELY(mem & OFFS_REG_MASK)) { + if (!is_type1_transfer && memw != 0) + return SLJIT_ERR_UNSUPPORTED; + } + else { + if (is_type1_transfer) { + if (memw > 4095 && memw < -4095) + return SLJIT_ERR_UNSUPPORTED; + } + else { + if (memw > 255 && memw < -255) + return SLJIT_ERR_UNSUPPORTED; + } + } + + if (type & SLJIT_MEM_SUPP) + return SLJIT_SUCCESS; + + if (SLJIT_UNLIKELY(mem & OFFS_REG_MASK)) { + memw &= 0x3; + + inst = EMIT_DATA_TRANSFER(flags, 1, reg, mem & REG_MASK, RM(OFFS_REG(mem)) | (memw << 7)); + + if (is_type1_transfer) + inst |= (1 << 25); + + if (type & SLJIT_MEM_PRE) + inst |= (1 << 21); + else + inst ^= (1 << 24); + + return push_inst(compiler, inst); + } + + inst = EMIT_DATA_TRANSFER(flags, 0, reg, mem & REG_MASK, 0); + + if (type & SLJIT_MEM_PRE) + inst |= (1 << 21); + else + inst ^= (1 << 24); + + if (is_type1_transfer) { + if (memw >= 0) + inst |= (1 << 23); + else + memw = -memw; + + return push_inst(compiler, inst | memw); + } + + if (memw >= 0) + inst |= (1 << 23); + else + memw = -memw; + + return push_inst(compiler, inst | TYPE2_TRANSFER_IMM(memw)); +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw init_value) +{ + struct sljit_const *const_; + sljit_s32 dst_r; + + CHECK_ERROR_PTR(); + CHECK_PTR(check_sljit_emit_const(compiler, dst, dstw, init_value)); + ADJUST_LOCAL_OFFSET(dst, dstw); + + dst_r = SLOW_IS_REG(dst) ? dst : TMP_REG2; + +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + PTR_FAIL_IF(push_inst_with_unique_literal(compiler, EMIT_DATA_TRANSFER(WORD_SIZE | LOAD_DATA, 1, dst_r, TMP_PC, 0), init_value)); + compiler->patches++; +#else + PTR_FAIL_IF(emit_imm(compiler, dst_r, init_value)); +#endif + + const_ = (struct sljit_const*)ensure_abuf(compiler, sizeof(struct sljit_const)); + PTR_FAIL_IF(!const_); + set_const(const_, compiler); + + if (dst & SLJIT_MEM) + PTR_FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG2, dst, dstw, TMP_REG1)); + return const_; +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_put_label* sljit_emit_put_label(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw) +{ + struct sljit_put_label *put_label; + sljit_s32 dst_r; + + CHECK_ERROR_PTR(); + CHECK_PTR(check_sljit_emit_put_label(compiler, dst, dstw)); + ADJUST_LOCAL_OFFSET(dst, dstw); + + dst_r = SLOW_IS_REG(dst) ? dst : TMP_REG2; + +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + PTR_FAIL_IF(push_inst_with_unique_literal(compiler, EMIT_DATA_TRANSFER(WORD_SIZE | LOAD_DATA, 1, dst_r, TMP_PC, 0), 0)); + compiler->patches++; +#else + PTR_FAIL_IF(emit_imm(compiler, dst_r, 0)); +#endif + + put_label = (struct sljit_put_label*)ensure_abuf(compiler, sizeof(struct sljit_put_label)); + PTR_FAIL_IF(!put_label); + set_put_label(put_label, compiler, 0); + + if (dst & SLJIT_MEM) + PTR_FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG2, dst, dstw, TMP_REG1)); + return put_label; +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_target, sljit_sw executable_offset) +{ + inline_set_jump_addr(addr, executable_offset, new_target, 1); +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset) +{ + inline_set_const(addr, executable_offset, new_constant, 1); +} diff --git a/src/3rd/pcre/sjarm64.c b/src/3rd/pcre/sjarm64.c new file mode 100644 index 00000000000..e15b3451e80 --- /dev/null +++ b/src/3rd/pcre/sjarm64.c @@ -0,0 +1,2035 @@ +/* + * Stack-less Just-In-Time compiler + * + * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are + * permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of + * conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list + * of conditions and the following disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void) +{ + return "ARM-64" SLJIT_CPUINFO; +} + +/* Length of an instruction word */ +typedef sljit_u32 sljit_ins; + +#define TMP_ZERO (0) + +#define TMP_REG1 (SLJIT_NUMBER_OF_REGISTERS + 2) +#define TMP_REG2 (SLJIT_NUMBER_OF_REGISTERS + 3) +#define TMP_LR (SLJIT_NUMBER_OF_REGISTERS + 4) +#define TMP_FP (SLJIT_NUMBER_OF_REGISTERS + 5) + +#define TMP_FREG1 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1) +#define TMP_FREG2 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 2) + +/* r18 - platform register, currently not used */ +static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 8] = { + 31, 0, 1, 2, 3, 4, 5, 6, 7, 11, 12, 13, 14, 15, 16, 17, 8, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 31, 9, 10, 30, 29 +}; + +static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = { + 0, 0, 1, 2, 3, 4, 5, 6, 7 +}; + +#define W_OP (1u << 31) +#define RD(rd) (reg_map[rd]) +#define RT(rt) (reg_map[rt]) +#define RN(rn) (reg_map[rn] << 5) +#define RT2(rt2) (reg_map[rt2] << 10) +#define RM(rm) (reg_map[rm] << 16) +#define VD(vd) (freg_map[vd]) +#define VT(vt) (freg_map[vt]) +#define VN(vn) (freg_map[vn] << 5) +#define VM(vm) (freg_map[vm] << 16) + +/* --------------------------------------------------------------------- */ +/* Instrucion forms */ +/* --------------------------------------------------------------------- */ + +#define ADC 0x9a000000 +#define ADD 0x8b000000 +#define ADDE 0x8b200000 +#define ADDI 0x91000000 +#define AND 0x8a000000 +#define ANDI 0x92000000 +#define ASRV 0x9ac02800 +#define B 0x14000000 +#define B_CC 0x54000000 +#define BL 0x94000000 +#define BLR 0xd63f0000 +#define BR 0xd61f0000 +#define BRK 0xd4200000 +#define CBZ 0xb4000000 +#define CLZ 0xdac01000 +#define CSEL 0x9a800000 +#define CSINC 0x9a800400 +#define EOR 0xca000000 +#define EORI 0xd2000000 +#define FABS 0x1e60c000 +#define FADD 0x1e602800 +#define FCMP 0x1e602000 +#define FCVT 0x1e224000 +#define FCVTZS 0x9e780000 +#define FDIV 0x1e601800 +#define FMOV 0x1e604000 +#define FMUL 0x1e600800 +#define FNEG 0x1e614000 +#define FSUB 0x1e603800 +#define LDRI 0xf9400000 +#define LDP 0xa9400000 +#define LDP_PRE 0xa9c00000 +#define LDR_PRE 0xf8400c00 +#define LSLV 0x9ac02000 +#define LSRV 0x9ac02400 +#define MADD 0x9b000000 +#define MOVK 0xf2800000 +#define MOVN 0x92800000 +#define MOVZ 0xd2800000 +#define NOP 0xd503201f +#define ORN 0xaa200000 +#define ORR 0xaa000000 +#define ORRI 0xb2000000 +#define RET 0xd65f0000 +#define SBC 0xda000000 +#define SBFM 0x93000000 +#define SCVTF 0x9e620000 +#define SDIV 0x9ac00c00 +#define SMADDL 0x9b200000 +#define SMULH 0x9b403c00 +#define STP 0xa9000000 +#define STP_PRE 0xa9800000 +#define STRB 0x38206800 +#define STRBI 0x39000000 +#define STRI 0xf9000000 +#define STR_FI 0x3d000000 +#define STR_FR 0x3c206800 +#define STUR_FI 0x3c000000 +#define STURBI 0x38000000 +#define SUB 0xcb000000 +#define SUBI 0xd1000000 +#define SUBS 0xeb000000 +#define UBFM 0xd3000000 +#define UDIV 0x9ac00800 +#define UMULH 0x9bc03c00 + +/* dest_reg is the absolute name of the register + Useful for reordering instructions in the delay slot. */ +static sljit_s32 push_inst(struct sljit_compiler *compiler, sljit_ins ins) +{ + sljit_ins *ptr = (sljit_ins*)ensure_buf(compiler, sizeof(sljit_ins)); + FAIL_IF(!ptr); + *ptr = ins; + compiler->size++; + return SLJIT_SUCCESS; +} + +static SLJIT_INLINE sljit_s32 emit_imm64_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_uw imm) +{ + FAIL_IF(push_inst(compiler, MOVZ | RD(dst) | ((imm & 0xffff) << 5))); + FAIL_IF(push_inst(compiler, MOVK | RD(dst) | (((imm >> 16) & 0xffff) << 5) | (1 << 21))); + FAIL_IF(push_inst(compiler, MOVK | RD(dst) | (((imm >> 32) & 0xffff) << 5) | (2 << 21))); + return push_inst(compiler, MOVK | RD(dst) | ((imm >> 48) << 5) | (3 << 21)); +} + +static SLJIT_INLINE void modify_imm64_const(sljit_ins* inst, sljit_uw new_imm) +{ + sljit_s32 dst = inst[0] & 0x1f; + SLJIT_ASSERT((inst[0] & 0xffe00000) == MOVZ && (inst[1] & 0xffe00000) == (MOVK | (1 << 21))); + inst[0] = MOVZ | dst | ((new_imm & 0xffff) << 5); + inst[1] = MOVK | dst | (((new_imm >> 16) & 0xffff) << 5) | (1 << 21); + inst[2] = MOVK | dst | (((new_imm >> 32) & 0xffff) << 5) | (2 << 21); + inst[3] = MOVK | dst | ((new_imm >> 48) << 5) | (3 << 21); +} + +static SLJIT_INLINE sljit_sw detect_jump_type(struct sljit_jump *jump, sljit_ins *code_ptr, sljit_ins *code, sljit_sw executable_offset) +{ + sljit_sw diff; + sljit_uw target_addr; + + if (jump->flags & SLJIT_REWRITABLE_JUMP) { + jump->flags |= PATCH_ABS64; + return 0; + } + + if (jump->flags & JUMP_ADDR) + target_addr = jump->u.target; + else { + SLJIT_ASSERT(jump->flags & JUMP_LABEL); + target_addr = (sljit_uw)(code + jump->u.label->size) + (sljit_uw)executable_offset; + } + + diff = (sljit_sw)target_addr - (sljit_sw)(code_ptr + 4) - executable_offset; + + if (jump->flags & IS_COND) { + diff += sizeof(sljit_ins); + if (diff <= 0xfffff && diff >= -0x100000) { + code_ptr[-5] ^= (jump->flags & IS_CBZ) ? (0x1 << 24) : 0x1; + jump->addr -= sizeof(sljit_ins); + jump->flags |= PATCH_COND; + return 5; + } + diff -= sizeof(sljit_ins); + } + + if (diff <= 0x7ffffff && diff >= -0x8000000) { + jump->flags |= PATCH_B; + return 4; + } + + if (target_addr < 0x100000000l) { + if (jump->flags & IS_COND) + code_ptr[-5] -= (2 << 5); + code_ptr[-2] = code_ptr[0]; + return 2; + } + + if (target_addr < 0x1000000000000l) { + if (jump->flags & IS_COND) + code_ptr[-5] -= (1 << 5); + jump->flags |= PATCH_ABS48; + code_ptr[-1] = code_ptr[0]; + return 1; + } + + jump->flags |= PATCH_ABS64; + return 0; +} + +static SLJIT_INLINE sljit_sw put_label_get_length(struct sljit_put_label *put_label, sljit_uw max_label) +{ + if (max_label < 0x100000000l) { + put_label->flags = 0; + return 2; + } + + if (max_label < 0x1000000000000l) { + put_label->flags = 1; + return 1; + } + + put_label->flags = 2; + return 0; +} + +SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compiler) +{ + struct sljit_memory_fragment *buf; + sljit_ins *code; + sljit_ins *code_ptr; + sljit_ins *buf_ptr; + sljit_ins *buf_end; + sljit_uw word_count; + sljit_uw next_addr; + sljit_sw executable_offset; + sljit_uw addr; + sljit_s32 dst; + + struct sljit_label *label; + struct sljit_jump *jump; + struct sljit_const *const_; + struct sljit_put_label *put_label; + + CHECK_ERROR_PTR(); + CHECK_PTR(check_sljit_generate_code(compiler)); + reverse_buf(compiler); + + code = (sljit_ins*)SLJIT_MALLOC_EXEC(compiler->size * sizeof(sljit_ins)); + PTR_FAIL_WITH_EXEC_IF(code); + buf = compiler->buf; + + code_ptr = code; + word_count = 0; + next_addr = 0; + executable_offset = SLJIT_EXEC_OFFSET(code); + + label = compiler->labels; + jump = compiler->jumps; + const_ = compiler->consts; + put_label = compiler->put_labels; + + do { + buf_ptr = (sljit_ins*)buf->memory; + buf_end = buf_ptr + (buf->used_size >> 2); + do { + *code_ptr = *buf_ptr++; + if (next_addr == word_count) { + SLJIT_ASSERT(!label || label->size >= word_count); + SLJIT_ASSERT(!jump || jump->addr >= word_count); + SLJIT_ASSERT(!const_ || const_->addr >= word_count); + SLJIT_ASSERT(!put_label || put_label->addr >= word_count); + + /* These structures are ordered by their address. */ + if (label && label->size == word_count) { + label->addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset); + label->size = code_ptr - code; + label = label->next; + } + if (jump && jump->addr == word_count) { + jump->addr = (sljit_uw)(code_ptr - 4); + code_ptr -= detect_jump_type(jump, code_ptr, code, executable_offset); + jump = jump->next; + } + if (const_ && const_->addr == word_count) { + const_->addr = (sljit_uw)code_ptr; + const_ = const_->next; + } + if (put_label && put_label->addr == word_count) { + SLJIT_ASSERT(put_label->label); + put_label->addr = (sljit_uw)(code_ptr - 3); + code_ptr -= put_label_get_length(put_label, (sljit_uw)(SLJIT_ADD_EXEC_OFFSET(code, executable_offset) + put_label->label->size)); + put_label = put_label->next; + } + next_addr = compute_next_addr(label, jump, const_, put_label); + } + code_ptr ++; + word_count ++; + } while (buf_ptr < buf_end); + + buf = buf->next; + } while (buf); + + if (label && label->size == word_count) { + label->addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset); + label->size = code_ptr - code; + label = label->next; + } + + SLJIT_ASSERT(!label); + SLJIT_ASSERT(!jump); + SLJIT_ASSERT(!const_); + SLJIT_ASSERT(!put_label); + SLJIT_ASSERT(code_ptr - code <= (sljit_sw)compiler->size); + + jump = compiler->jumps; + while (jump) { + do { + addr = (jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target; + buf_ptr = (sljit_ins *)jump->addr; + + if (jump->flags & PATCH_B) { + addr = (sljit_sw)(addr - (sljit_uw)SLJIT_ADD_EXEC_OFFSET(buf_ptr, executable_offset)) >> 2; + SLJIT_ASSERT((sljit_sw)addr <= 0x1ffffff && (sljit_sw)addr >= -0x2000000); + buf_ptr[0] = ((jump->flags & IS_BL) ? BL : B) | (addr & 0x3ffffff); + if (jump->flags & IS_COND) + buf_ptr[-1] -= (4 << 5); + break; + } + if (jump->flags & PATCH_COND) { + addr = (sljit_sw)(addr - (sljit_uw)SLJIT_ADD_EXEC_OFFSET(buf_ptr, executable_offset)) >> 2; + SLJIT_ASSERT((sljit_sw)addr <= 0x3ffff && (sljit_sw)addr >= -0x40000); + buf_ptr[0] = (buf_ptr[0] & ~0xffffe0) | ((addr & 0x7ffff) << 5); + break; + } + + SLJIT_ASSERT((jump->flags & (PATCH_ABS48 | PATCH_ABS64)) || addr <= 0xffffffffl); + SLJIT_ASSERT((jump->flags & PATCH_ABS64) || addr <= 0xffffffffffffl); + + dst = buf_ptr[0] & 0x1f; + buf_ptr[0] = MOVZ | dst | ((addr & 0xffff) << 5); + buf_ptr[1] = MOVK | dst | (((addr >> 16) & 0xffff) << 5) | (1 << 21); + if (jump->flags & (PATCH_ABS48 | PATCH_ABS64)) + buf_ptr[2] = MOVK | dst | (((addr >> 32) & 0xffff) << 5) | (2 << 21); + if (jump->flags & PATCH_ABS64) + buf_ptr[3] = MOVK | dst | (((addr >> 48) & 0xffff) << 5) | (3 << 21); + } while (0); + jump = jump->next; + } + + put_label = compiler->put_labels; + while (put_label) { + addr = put_label->label->addr; + buf_ptr = (sljit_ins *)put_label->addr; + + buf_ptr[0] |= (addr & 0xffff) << 5; + buf_ptr[1] |= ((addr >> 16) & 0xffff) << 5; + + if (put_label->flags >= 1) + buf_ptr[2] |= ((addr >> 32) & 0xffff) << 5; + + if (put_label->flags >= 2) + buf_ptr[3] |= ((addr >> 48) & 0xffff) << 5; + + put_label = put_label->next; + } + + compiler->error = SLJIT_ERR_COMPILED; + compiler->executable_offset = executable_offset; + compiler->executable_size = (code_ptr - code) * sizeof(sljit_ins); + + code = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(code, executable_offset); + code_ptr = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset); + + SLJIT_CACHE_FLUSH(code, code_ptr); + return code; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type) +{ + switch (feature_type) { + case SLJIT_HAS_FPU: +#ifdef SLJIT_IS_FPU_AVAILABLE + return SLJIT_IS_FPU_AVAILABLE; +#else + /* Available by default. */ + return 1; +#endif + + case SLJIT_HAS_CLZ: + case SLJIT_HAS_CMOV: + return 1; + + default: + return 0; + } +} + +/* --------------------------------------------------------------------- */ +/* Core code generator functions. */ +/* --------------------------------------------------------------------- */ + +#define COUNT_TRAILING_ZERO(value, result) \ + result = 0; \ + if (!(value & 0xffffffff)) { \ + result += 32; \ + value >>= 32; \ + } \ + if (!(value & 0xffff)) { \ + result += 16; \ + value >>= 16; \ + } \ + if (!(value & 0xff)) { \ + result += 8; \ + value >>= 8; \ + } \ + if (!(value & 0xf)) { \ + result += 4; \ + value >>= 4; \ + } \ + if (!(value & 0x3)) { \ + result += 2; \ + value >>= 2; \ + } \ + if (!(value & 0x1)) { \ + result += 1; \ + value >>= 1; \ + } + +#define LOGICAL_IMM_CHECK 0x100 + +static sljit_ins logical_imm(sljit_sw imm, sljit_s32 len) +{ + sljit_s32 negated, ones, right; + sljit_uw mask, uimm; + sljit_ins ins; + + if (len & LOGICAL_IMM_CHECK) { + len &= ~LOGICAL_IMM_CHECK; + if (len == 32 && (imm == 0 || imm == -1)) + return 0; + if (len == 16 && ((sljit_s32)imm == 0 || (sljit_s32)imm == -1)) + return 0; + } + + SLJIT_ASSERT((len == 32 && imm != 0 && imm != -1) + || (len == 16 && (sljit_s32)imm != 0 && (sljit_s32)imm != -1)); + + uimm = (sljit_uw)imm; + while (1) { + if (len <= 0) { + SLJIT_UNREACHABLE(); + return 0; + } + + mask = ((sljit_uw)1 << len) - 1; + if ((uimm & mask) != ((uimm >> len) & mask)) + break; + len >>= 1; + } + + len <<= 1; + + negated = 0; + if (uimm & 0x1) { + negated = 1; + uimm = ~uimm; + } + + if (len < 64) + uimm &= ((sljit_uw)1 << len) - 1; + + /* Unsigned right shift. */ + COUNT_TRAILING_ZERO(uimm, right); + + /* Signed shift. We also know that the highest bit is set. */ + imm = (sljit_sw)~uimm; + SLJIT_ASSERT(imm < 0); + + COUNT_TRAILING_ZERO(imm, ones); + + if (~imm) + return 0; + + if (len == 64) + ins = 1 << 22; + else + ins = (0x3f - ((len << 1) - 1)) << 10; + + if (negated) + return ins | ((len - ones - 1) << 10) | ((len - ones - right) << 16); + + return ins | ((ones - 1) << 10) | ((len - right) << 16); +} + +#undef COUNT_TRAILING_ZERO + +static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw simm) +{ + sljit_uw imm = (sljit_uw)simm; + sljit_s32 i, zeros, ones, first; + sljit_ins bitmask; + + /* Handling simple immediates first. */ + if (imm <= 0xffff) + return push_inst(compiler, MOVZ | RD(dst) | (imm << 5)); + + if (simm < 0 && simm >= -0x10000) + return push_inst(compiler, MOVN | RD(dst) | ((~imm & 0xffff) << 5)); + + if (imm <= 0xffffffffl) { + if ((imm & 0xffff) == 0) + return push_inst(compiler, MOVZ | RD(dst) | ((imm >> 16) << 5) | (1 << 21)); + if ((imm & 0xffff0000l) == 0xffff0000) + return push_inst(compiler, (MOVN ^ W_OP) | RD(dst) | ((~imm & 0xffff) << 5)); + if ((imm & 0xffff) == 0xffff) + return push_inst(compiler, (MOVN ^ W_OP) | RD(dst) | ((~imm & 0xffff0000l) >> (16 - 5)) | (1 << 21)); + + bitmask = logical_imm(simm, 16); + if (bitmask != 0) + return push_inst(compiler, (ORRI ^ W_OP) | RD(dst) | RN(TMP_ZERO) | bitmask); + + FAIL_IF(push_inst(compiler, MOVZ | RD(dst) | ((imm & 0xffff) << 5))); + return push_inst(compiler, MOVK | RD(dst) | ((imm & 0xffff0000l) >> (16 - 5)) | (1 << 21)); + } + + bitmask = logical_imm(simm, 32); + if (bitmask != 0) + return push_inst(compiler, ORRI | RD(dst) | RN(TMP_ZERO) | bitmask); + + if (simm < 0 && simm >= -0x100000000l) { + if ((imm & 0xffff) == 0xffff) + return push_inst(compiler, MOVN | RD(dst) | ((~imm & 0xffff0000l) >> (16 - 5)) | (1 << 21)); + + FAIL_IF(push_inst(compiler, MOVN | RD(dst) | ((~imm & 0xffff) << 5))); + return push_inst(compiler, MOVK | RD(dst) | ((imm & 0xffff0000l) >> (16 - 5)) | (1 << 21)); + } + + /* A large amount of number can be constructed from ORR and MOVx, but computing them is costly. */ + + zeros = 0; + ones = 0; + for (i = 4; i > 0; i--) { + if ((simm & 0xffff) == 0) + zeros++; + if ((simm & 0xffff) == 0xffff) + ones++; + simm >>= 16; + } + + simm = (sljit_sw)imm; + first = 1; + if (ones > zeros) { + simm = ~simm; + for (i = 0; i < 4; i++) { + if (!(simm & 0xffff)) { + simm >>= 16; + continue; + } + if (first) { + first = 0; + FAIL_IF(push_inst(compiler, MOVN | RD(dst) | ((simm & 0xffff) << 5) | (i << 21))); + } + else + FAIL_IF(push_inst(compiler, MOVK | RD(dst) | ((~simm & 0xffff) << 5) | (i << 21))); + simm >>= 16; + } + return SLJIT_SUCCESS; + } + + for (i = 0; i < 4; i++) { + if (!(simm & 0xffff)) { + simm >>= 16; + continue; + } + if (first) { + first = 0; + FAIL_IF(push_inst(compiler, MOVZ | RD(dst) | ((simm & 0xffff) << 5) | (i << 21))); + } + else + FAIL_IF(push_inst(compiler, MOVK | RD(dst) | ((simm & 0xffff) << 5) | (i << 21))); + simm >>= 16; + } + return SLJIT_SUCCESS; +} + +#define ARG1_IMM 0x0010000 +#define ARG2_IMM 0x0020000 +#define INT_OP 0x0040000 +#define SET_FLAGS 0x0080000 +#define UNUSED_RETURN 0x0100000 + +#define CHECK_FLAGS(flag_bits) \ + if (flags & SET_FLAGS) { \ + inv_bits |= flag_bits; \ + if (flags & UNUSED_RETURN) \ + dst = TMP_ZERO; \ + } + +static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 dst, sljit_sw arg1, sljit_sw arg2) +{ + /* dst must be register, TMP_REG1 + arg1 must be register, TMP_REG1, imm + arg2 must be register, TMP_REG2, imm */ + sljit_ins inv_bits = (flags & INT_OP) ? W_OP : 0; + sljit_ins inst_bits; + sljit_s32 op = (flags & 0xffff); + sljit_s32 reg; + sljit_sw imm, nimm; + + if (SLJIT_UNLIKELY((flags & (ARG1_IMM | ARG2_IMM)) == (ARG1_IMM | ARG2_IMM))) { + /* Both are immediates. */ + flags &= ~ARG1_IMM; + if (arg1 == 0 && op != SLJIT_ADD && op != SLJIT_SUB) + arg1 = TMP_ZERO; + else { + FAIL_IF(load_immediate(compiler, TMP_REG1, arg1)); + arg1 = TMP_REG1; + } + } + + if (flags & (ARG1_IMM | ARG2_IMM)) { + reg = (flags & ARG2_IMM) ? arg1 : arg2; + imm = (flags & ARG2_IMM) ? arg2 : arg1; + + switch (op) { + case SLJIT_MUL: + case SLJIT_NEG: + case SLJIT_CLZ: + case SLJIT_ADDC: + case SLJIT_SUBC: + /* No form with immediate operand (except imm 0, which + is represented by a ZERO register). */ + break; + case SLJIT_MOV: + SLJIT_ASSERT(!(flags & SET_FLAGS) && (flags & ARG2_IMM) && arg1 == TMP_REG1); + return load_immediate(compiler, dst, imm); + case SLJIT_NOT: + SLJIT_ASSERT(flags & ARG2_IMM); + FAIL_IF(load_immediate(compiler, dst, (flags & INT_OP) ? (~imm & 0xffffffff) : ~imm)); + goto set_flags; + case SLJIT_SUB: + if (flags & ARG1_IMM) + break; + imm = -imm; + /* Fall through. */ + case SLJIT_ADD: + if (imm == 0) { + CHECK_FLAGS(1 << 29); + return push_inst(compiler, ((op == SLJIT_ADD ? ADDI : SUBI) ^ inv_bits) | RD(dst) | RN(reg)); + } + if (imm > 0 && imm <= 0xfff) { + CHECK_FLAGS(1 << 29); + return push_inst(compiler, (ADDI ^ inv_bits) | RD(dst) | RN(reg) | (imm << 10)); + } + nimm = -imm; + if (nimm > 0 && nimm <= 0xfff) { + CHECK_FLAGS(1 << 29); + return push_inst(compiler, (SUBI ^ inv_bits) | RD(dst) | RN(reg) | (nimm << 10)); + } + if (imm > 0 && imm <= 0xffffff && !(imm & 0xfff)) { + CHECK_FLAGS(1 << 29); + return push_inst(compiler, (ADDI ^ inv_bits) | RD(dst) | RN(reg) | ((imm >> 12) << 10) | (1 << 22)); + } + if (nimm > 0 && nimm <= 0xffffff && !(nimm & 0xfff)) { + CHECK_FLAGS(1 << 29); + return push_inst(compiler, (SUBI ^ inv_bits) | RD(dst) | RN(reg) | ((nimm >> 12) << 10) | (1 << 22)); + } + if (imm > 0 && imm <= 0xffffff && !(flags & SET_FLAGS)) { + FAIL_IF(push_inst(compiler, (ADDI ^ inv_bits) | RD(dst) | RN(reg) | ((imm >> 12) << 10) | (1 << 22))); + return push_inst(compiler, (ADDI ^ inv_bits) | RD(dst) | RN(dst) | ((imm & 0xfff) << 10)); + } + if (nimm > 0 && nimm <= 0xffffff && !(flags & SET_FLAGS)) { + FAIL_IF(push_inst(compiler, (SUBI ^ inv_bits) | RD(dst) | RN(reg) | ((nimm >> 12) << 10) | (1 << 22))); + return push_inst(compiler, (SUBI ^ inv_bits) | RD(dst) | RN(dst) | ((nimm & 0xfff) << 10)); + } + break; + case SLJIT_AND: + inst_bits = logical_imm(imm, LOGICAL_IMM_CHECK | ((flags & INT_OP) ? 16 : 32)); + if (!inst_bits) + break; + CHECK_FLAGS(3 << 29); + return push_inst(compiler, (ANDI ^ inv_bits) | RD(dst) | RN(reg) | inst_bits); + case SLJIT_OR: + case SLJIT_XOR: + inst_bits = logical_imm(imm, LOGICAL_IMM_CHECK | ((flags & INT_OP) ? 16 : 32)); + if (!inst_bits) + break; + if (op == SLJIT_OR) + inst_bits |= ORRI; + else + inst_bits |= EORI; + FAIL_IF(push_inst(compiler, (inst_bits ^ inv_bits) | RD(dst) | RN(reg))); + goto set_flags; + case SLJIT_SHL: + if (flags & ARG1_IMM) + break; + if (flags & INT_OP) { + imm &= 0x1f; + FAIL_IF(push_inst(compiler, (UBFM ^ inv_bits) | RD(dst) | RN(arg1) | ((-imm & 0x1f) << 16) | ((31 - imm) << 10))); + } + else { + imm &= 0x3f; + FAIL_IF(push_inst(compiler, (UBFM ^ inv_bits) | RD(dst) | RN(arg1) | (1 << 22) | ((-imm & 0x3f) << 16) | ((63 - imm) << 10))); + } + goto set_flags; + case SLJIT_LSHR: + case SLJIT_ASHR: + if (flags & ARG1_IMM) + break; + if (op == SLJIT_ASHR) + inv_bits |= 1 << 30; + if (flags & INT_OP) { + imm &= 0x1f; + FAIL_IF(push_inst(compiler, (UBFM ^ inv_bits) | RD(dst) | RN(arg1) | (imm << 16) | (31 << 10))); + } + else { + imm &= 0x3f; + FAIL_IF(push_inst(compiler, (UBFM ^ inv_bits) | RD(dst) | RN(arg1) | (1 << 22) | (imm << 16) | (63 << 10))); + } + goto set_flags; + default: + SLJIT_UNREACHABLE(); + break; + } + + if (flags & ARG2_IMM) { + if (arg2 == 0) + arg2 = TMP_ZERO; + else { + FAIL_IF(load_immediate(compiler, TMP_REG2, arg2)); + arg2 = TMP_REG2; + } + } + else { + if (arg1 == 0) + arg1 = TMP_ZERO; + else { + FAIL_IF(load_immediate(compiler, TMP_REG1, arg1)); + arg1 = TMP_REG1; + } + } + } + + /* Both arguments are registers. */ + switch (op) { + case SLJIT_MOV: + case SLJIT_MOV_P: + SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1); + if (dst == arg2) + return SLJIT_SUCCESS; + return push_inst(compiler, ORR | RD(dst) | RN(TMP_ZERO) | RM(arg2)); + case SLJIT_MOV_U8: + SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1); + return push_inst(compiler, (UBFM ^ W_OP) | RD(dst) | RN(arg2) | (7 << 10)); + case SLJIT_MOV_S8: + SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1); + if (!(flags & INT_OP)) + inv_bits |= 1 << 22; + return push_inst(compiler, (SBFM ^ inv_bits) | RD(dst) | RN(arg2) | (7 << 10)); + case SLJIT_MOV_U16: + SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1); + return push_inst(compiler, (UBFM ^ W_OP) | RD(dst) | RN(arg2) | (15 << 10)); + case SLJIT_MOV_S16: + SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1); + if (!(flags & INT_OP)) + inv_bits |= 1 << 22; + return push_inst(compiler, (SBFM ^ inv_bits) | RD(dst) | RN(arg2) | (15 << 10)); + case SLJIT_MOV_U32: + SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1); + if ((flags & INT_OP) && dst == arg2) + return SLJIT_SUCCESS; + return push_inst(compiler, (ORR ^ W_OP) | RD(dst) | RN(TMP_ZERO) | RM(arg2)); + case SLJIT_MOV_S32: + SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1); + if ((flags & INT_OP) && dst == arg2) + return SLJIT_SUCCESS; + return push_inst(compiler, SBFM | (1 << 22) | RD(dst) | RN(arg2) | (31 << 10)); + case SLJIT_NOT: + SLJIT_ASSERT(arg1 == TMP_REG1); + FAIL_IF(push_inst(compiler, (ORN ^ inv_bits) | RD(dst) | RN(TMP_ZERO) | RM(arg2))); + break; /* Set flags. */ + case SLJIT_NEG: + SLJIT_ASSERT(arg1 == TMP_REG1); + if (flags & SET_FLAGS) + inv_bits |= 1 << 29; + return push_inst(compiler, (SUB ^ inv_bits) | RD(dst) | RN(TMP_ZERO) | RM(arg2)); + case SLJIT_CLZ: + SLJIT_ASSERT(arg1 == TMP_REG1); + return push_inst(compiler, (CLZ ^ inv_bits) | RD(dst) | RN(arg2)); + case SLJIT_ADD: + CHECK_FLAGS(1 << 29); + return push_inst(compiler, (ADD ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2)); + case SLJIT_ADDC: + CHECK_FLAGS(1 << 29); + return push_inst(compiler, (ADC ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2)); + case SLJIT_SUB: + CHECK_FLAGS(1 << 29); + return push_inst(compiler, (SUB ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2)); + case SLJIT_SUBC: + CHECK_FLAGS(1 << 29); + return push_inst(compiler, (SBC ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2)); + case SLJIT_MUL: + if (!(flags & SET_FLAGS)) + return push_inst(compiler, (MADD ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2) | RT2(TMP_ZERO)); + if (flags & INT_OP) { + FAIL_IF(push_inst(compiler, SMADDL | RD(dst) | RN(arg1) | RM(arg2) | (31 << 10))); + FAIL_IF(push_inst(compiler, ADD | RD(TMP_LR) | RN(TMP_ZERO) | RM(dst) | (2 << 22) | (31 << 10))); + return push_inst(compiler, SUBS | RD(TMP_ZERO) | RN(TMP_LR) | RM(dst) | (2 << 22) | (63 << 10)); + } + FAIL_IF(push_inst(compiler, SMULH | RD(TMP_LR) | RN(arg1) | RM(arg2))); + FAIL_IF(push_inst(compiler, MADD | RD(dst) | RN(arg1) | RM(arg2) | RT2(TMP_ZERO))); + return push_inst(compiler, SUBS | RD(TMP_ZERO) | RN(TMP_LR) | RM(dst) | (2 << 22) | (63 << 10)); + case SLJIT_AND: + CHECK_FLAGS(3 << 29); + return push_inst(compiler, (AND ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2)); + case SLJIT_OR: + FAIL_IF(push_inst(compiler, (ORR ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2))); + break; /* Set flags. */ + case SLJIT_XOR: + FAIL_IF(push_inst(compiler, (EOR ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2))); + break; /* Set flags. */ + case SLJIT_SHL: + FAIL_IF(push_inst(compiler, (LSLV ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2))); + break; /* Set flags. */ + case SLJIT_LSHR: + FAIL_IF(push_inst(compiler, (LSRV ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2))); + break; /* Set flags. */ + case SLJIT_ASHR: + FAIL_IF(push_inst(compiler, (ASRV ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2))); + break; /* Set flags. */ + default: + SLJIT_UNREACHABLE(); + return SLJIT_SUCCESS; + } + +set_flags: + if (flags & SET_FLAGS) + return push_inst(compiler, (SUBS ^ inv_bits) | RD(TMP_ZERO) | RN(dst) | RM(TMP_ZERO)); + return SLJIT_SUCCESS; +} + +#define STORE 0x10 +#define SIGNED 0x20 + +#define BYTE_SIZE 0x0 +#define HALF_SIZE 0x1 +#define INT_SIZE 0x2 +#define WORD_SIZE 0x3 + +#define MEM_SIZE_SHIFT(flags) ((flags) & 0x3) + +static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, + sljit_s32 arg, sljit_sw argw, sljit_s32 tmp_reg) +{ + sljit_u32 shift = MEM_SIZE_SHIFT(flags); + sljit_u32 type = (shift << 30); + + if (!(flags & STORE)) + type |= (flags & SIGNED) ? 0x00800000 : 0x00400000; + + SLJIT_ASSERT(arg & SLJIT_MEM); + + if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) { + argw &= 0x3; + + if (argw == 0 || argw == shift) + return push_inst(compiler, STRB | type | RT(reg) + | RN(arg & REG_MASK) | RM(OFFS_REG(arg)) | (argw ? (1 << 12) : 0)); + + FAIL_IF(push_inst(compiler, ADD | RD(tmp_reg) | RN(arg & REG_MASK) | RM(OFFS_REG(arg)) | (argw << 10))); + return push_inst(compiler, STRBI | type | RT(reg) | RN(tmp_reg)); + } + + arg &= REG_MASK; + + if (arg == SLJIT_UNUSED) { + FAIL_IF(load_immediate(compiler, tmp_reg, argw & ~(0xfff << shift))); + + argw = (argw >> shift) & 0xfff; + + return push_inst(compiler, STRBI | type | RT(reg) | RN(tmp_reg) | (argw << 10)); + } + + if (argw >= 0 && (argw & ((1 << shift) - 1)) == 0) { + if ((argw >> shift) <= 0xfff) { + return push_inst(compiler, STRBI | type | RT(reg) | RN(arg) | (argw << (10 - shift))); + } + + if (argw <= 0xffffff) { + FAIL_IF(push_inst(compiler, ADDI | (1 << 22) | RD(tmp_reg) | RN(arg) | ((argw >> 12) << 10))); + + argw = ((argw & 0xfff) >> shift); + return push_inst(compiler, STRBI | type | RT(reg) | RN(tmp_reg) | (argw << 10)); + } + } + + if (argw <= 255 && argw >= -256) + return push_inst(compiler, STURBI | type | RT(reg) | RN(arg) | ((argw & 0x1ff) << 12)); + + FAIL_IF(load_immediate(compiler, tmp_reg, argw)); + + return push_inst(compiler, STRB | type | RT(reg) | RN(arg) | RM(tmp_reg)); +} + +/* --------------------------------------------------------------------- */ +/* Entry, exit */ +/* --------------------------------------------------------------------- */ + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler, + sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds, + sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size) +{ + sljit_s32 args, i, tmp, offs, prev, saved_regs_size; + + CHECK_ERROR(); + CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size)); + set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size); + + saved_regs_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 2); + if (saved_regs_size & 0x8) + saved_regs_size += sizeof(sljit_sw); + + local_size = (local_size + 15) & ~0xf; + compiler->local_size = local_size + saved_regs_size; + + FAIL_IF(push_inst(compiler, STP_PRE | RT(TMP_FP) | RT2(TMP_LR) + | RN(SLJIT_SP) | ((-(saved_regs_size >> 3) & 0x7f) << 15))); + +#ifdef _WIN32 + if (local_size >= 4096) + FAIL_IF(push_inst(compiler, SUBI | RD(TMP_REG1) | RN(SLJIT_SP) | (1 << 10) | (1 << 22))); + else if (local_size > 256) + FAIL_IF(push_inst(compiler, SUBI | RD(TMP_REG1) | RN(SLJIT_SP) | (local_size << 10))); +#endif + + tmp = saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - saveds) : SLJIT_FIRST_SAVED_REG; + prev = -1; + offs = 2 << 15; + for (i = SLJIT_S0; i >= tmp; i--) { + if (prev == -1) { + prev = i; + continue; + } + FAIL_IF(push_inst(compiler, STP | RT(prev) | RT2(i) | RN(SLJIT_SP) | offs)); + offs += 2 << 15; + prev = -1; + } + + for (i = scratches; i >= SLJIT_FIRST_SAVED_REG; i--) { + if (prev == -1) { + prev = i; + continue; + } + FAIL_IF(push_inst(compiler, STP | RT(prev) | RT2(i) | RN(SLJIT_SP) | offs)); + offs += 2 << 15; + prev = -1; + } + + if (prev != -1) + FAIL_IF(push_inst(compiler, STRI | RT(prev) | RN(SLJIT_SP) | (offs >> 5))); + + + FAIL_IF(push_inst(compiler, ADDI | RD(TMP_FP) | RN(SLJIT_SP) | (0 << 10))); + + args = get_arg_count(arg_types); + + if (args >= 1) + FAIL_IF(push_inst(compiler, ORR | RD(SLJIT_S0) | RN(TMP_ZERO) | RM(SLJIT_R0))); + if (args >= 2) + FAIL_IF(push_inst(compiler, ORR | RD(SLJIT_S1) | RN(TMP_ZERO) | RM(SLJIT_R1))); + if (args >= 3) + FAIL_IF(push_inst(compiler, ORR | RD(SLJIT_S2) | RN(TMP_ZERO) | RM(SLJIT_R2))); + +#ifdef _WIN32 + if (local_size >= 4096) { + if (local_size < 4 * 4096) { + /* No need for a loop. */ + if (local_size >= 2 * 4096) { + FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(TMP_REG1))); + FAIL_IF(push_inst(compiler, SUBI | RD(TMP_REG1) | RN(TMP_REG1) | (1 << 10) | (1 << 22))); + local_size -= 4096; + } + + if (local_size >= 2 * 4096) { + FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(TMP_REG1))); + FAIL_IF(push_inst(compiler, SUBI | RD(TMP_REG1) | RN(TMP_REG1) | (1 << 10) | (1 << 22))); + local_size -= 4096; + } + + FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(TMP_REG1))); + local_size -= 4096; + } + else { + FAIL_IF(push_inst(compiler, MOVZ | RD(TMP_REG2) | (((local_size >> 12) - 1) << 5))); + FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(TMP_REG1))); + FAIL_IF(push_inst(compiler, SUBI | RD(TMP_REG1) | RN(TMP_REG1) | (1 << 10) | (1 << 22))); + FAIL_IF(push_inst(compiler, SUBI | (1 << 29) | RD(TMP_REG2) | RN(TMP_REG2) | (1 << 10))); + FAIL_IF(push_inst(compiler, B_CC | ((((sljit_ins) -3) & 0x7ffff) << 5) | 0x1 /* not-equal */)); + FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(TMP_REG1))); + + local_size &= 0xfff; + } + + if (local_size > 256) { + FAIL_IF(push_inst(compiler, SUBI | RD(TMP_REG1) | RN(TMP_REG1) | (local_size << 10))); + FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(TMP_REG1))); + } + else if (local_size > 0) + FAIL_IF(push_inst(compiler, LDR_PRE | RT(TMP_ZERO) | RN(TMP_REG1) | ((-local_size & 0x1ff) << 12))); + + FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RN(TMP_REG1) | (0 << 10))); + } + else if (local_size > 256) { + FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(TMP_REG1))); + FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RN(TMP_REG1) | (0 << 10))); + } + else if (local_size > 0) + FAIL_IF(push_inst(compiler, LDR_PRE | RT(TMP_ZERO) | RN(SLJIT_SP) | ((-local_size & 0x1ff) << 12))); + +#else /* !_WIN32 */ + + /* The local_size does not include saved registers size. */ + if (local_size > 0xfff) { + FAIL_IF(push_inst(compiler, SUBI | RD(SLJIT_SP) | RN(SLJIT_SP) | ((local_size >> 12) << 10) | (1 << 22))); + local_size &= 0xfff; + } + if (local_size != 0) + FAIL_IF(push_inst(compiler, SUBI | RD(SLJIT_SP) | RN(SLJIT_SP) | (local_size << 10))); + +#endif /* _WIN32 */ + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *compiler, + sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds, + sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size) +{ + sljit_s32 saved_regs_size; + + CHECK_ERROR(); + CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size)); + set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size); + + saved_regs_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 2); + if (saved_regs_size & 0x8) + saved_regs_size += sizeof(sljit_sw); + + compiler->local_size = saved_regs_size + ((local_size + 15) & ~0xf); + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw) +{ + sljit_s32 local_size; + sljit_s32 i, tmp, offs, prev, saved_regs_size; + + CHECK_ERROR(); + CHECK(check_sljit_emit_return(compiler, op, src, srcw)); + + FAIL_IF(emit_mov_before_return(compiler, op, src, srcw)); + + saved_regs_size = GET_SAVED_REGISTERS_SIZE(compiler->scratches, compiler->saveds, 2); + if (saved_regs_size & 0x8) + saved_regs_size += sizeof(sljit_sw); + + local_size = compiler->local_size - saved_regs_size; + + /* Load LR as early as possible. */ + if (local_size == 0) + FAIL_IF(push_inst(compiler, LDP | RT(TMP_FP) | RT2(TMP_LR) | RN(SLJIT_SP))); + else if (local_size < 63 * sizeof(sljit_sw)) { + FAIL_IF(push_inst(compiler, LDP_PRE | RT(TMP_FP) | RT2(TMP_LR) + | RN(SLJIT_SP) | (local_size << (15 - 3)))); + } + else { + if (local_size > 0xfff) { + FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RN(SLJIT_SP) | ((local_size >> 12) << 10) | (1 << 22))); + local_size &= 0xfff; + } + if (local_size) + FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RN(SLJIT_SP) | (local_size << 10))); + + FAIL_IF(push_inst(compiler, LDP | RT(TMP_FP) | RT2(TMP_LR) | RN(SLJIT_SP))); + } + + tmp = compiler->saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - compiler->saveds) : SLJIT_FIRST_SAVED_REG; + prev = -1; + offs = 2 << 15; + for (i = SLJIT_S0; i >= tmp; i--) { + if (prev == -1) { + prev = i; + continue; + } + FAIL_IF(push_inst(compiler, LDP | RT(prev) | RT2(i) | RN(SLJIT_SP) | offs)); + offs += 2 << 15; + prev = -1; + } + + for (i = compiler->scratches; i >= SLJIT_FIRST_SAVED_REG; i--) { + if (prev == -1) { + prev = i; + continue; + } + FAIL_IF(push_inst(compiler, LDP | RT(prev) | RT2(i) | RN(SLJIT_SP) | offs)); + offs += 2 << 15; + prev = -1; + } + + if (prev != -1) + FAIL_IF(push_inst(compiler, LDRI | RT(prev) | RN(SLJIT_SP) | (offs >> 5))); + + /* These two can be executed in parallel. */ + FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RN(SLJIT_SP) | (saved_regs_size << 10))); + return push_inst(compiler, RET | RN(TMP_LR)); +} + +/* --------------------------------------------------------------------- */ +/* Operators */ +/* --------------------------------------------------------------------- */ + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compiler, sljit_s32 op) +{ + sljit_ins inv_bits = (op & SLJIT_I32_OP) ? W_OP : 0; + + CHECK_ERROR(); + CHECK(check_sljit_emit_op0(compiler, op)); + + op = GET_OPCODE(op); + switch (op) { + case SLJIT_BREAKPOINT: + return push_inst(compiler, BRK); + case SLJIT_NOP: + return push_inst(compiler, NOP); + case SLJIT_LMUL_UW: + case SLJIT_LMUL_SW: + FAIL_IF(push_inst(compiler, ORR | RD(TMP_REG1) | RN(TMP_ZERO) | RM(SLJIT_R0))); + FAIL_IF(push_inst(compiler, MADD | RD(SLJIT_R0) | RN(SLJIT_R0) | RM(SLJIT_R1) | RT2(TMP_ZERO))); + return push_inst(compiler, (op == SLJIT_LMUL_UW ? UMULH : SMULH) | RD(SLJIT_R1) | RN(TMP_REG1) | RM(SLJIT_R1)); + case SLJIT_DIVMOD_UW: + case SLJIT_DIVMOD_SW: + FAIL_IF(push_inst(compiler, (ORR ^ inv_bits) | RD(TMP_REG1) | RN(TMP_ZERO) | RM(SLJIT_R0))); + FAIL_IF(push_inst(compiler, ((op == SLJIT_DIVMOD_UW ? UDIV : SDIV) ^ inv_bits) | RD(SLJIT_R0) | RN(SLJIT_R0) | RM(SLJIT_R1))); + FAIL_IF(push_inst(compiler, (MADD ^ inv_bits) | RD(SLJIT_R1) | RN(SLJIT_R0) | RM(SLJIT_R1) | RT2(TMP_ZERO))); + return push_inst(compiler, (SUB ^ inv_bits) | RD(SLJIT_R1) | RN(TMP_REG1) | RM(SLJIT_R1)); + case SLJIT_DIV_UW: + case SLJIT_DIV_SW: + return push_inst(compiler, ((op == SLJIT_DIV_UW ? UDIV : SDIV) ^ inv_bits) | RD(SLJIT_R0) | RN(SLJIT_R0) | RM(SLJIT_R1)); + } + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) +{ + sljit_s32 dst_r, flags, mem_flags; + sljit_s32 op_flags = GET_ALL_FLAGS(op); + + CHECK_ERROR(); + CHECK(check_sljit_emit_op1(compiler, op, dst, dstw, src, srcw)); + ADJUST_LOCAL_OFFSET(dst, dstw); + ADJUST_LOCAL_OFFSET(src, srcw); + + if (dst == SLJIT_UNUSED && !HAS_FLAGS(op)) { + if (op <= SLJIT_MOV_P && (src & SLJIT_MEM)) { + SLJIT_ASSERT(reg_map[1] == 0 && reg_map[3] == 2 && reg_map[5] == 4); + + if (op >= SLJIT_MOV_U8 && op <= SLJIT_MOV_S8) + dst = 5; + else if (op >= SLJIT_MOV_U16 && op <= SLJIT_MOV_S16) + dst = 3; + else + dst = 1; + + /* Signed word sized load is the prefetch instruction. */ + return emit_op_mem(compiler, WORD_SIZE | SIGNED, dst, src, srcw, TMP_REG1); + } + return SLJIT_SUCCESS; + } + + dst_r = SLOW_IS_REG(dst) ? dst : TMP_REG1; + + op = GET_OPCODE(op); + if (op >= SLJIT_MOV && op <= SLJIT_MOV_P) { + /* Both operands are registers. */ + if (dst_r != TMP_REG1 && FAST_IS_REG(src)) + return emit_op_imm(compiler, op | ((op_flags & SLJIT_I32_OP) ? INT_OP : 0), dst_r, TMP_REG1, src); + + switch (op) { + case SLJIT_MOV: + case SLJIT_MOV_P: + mem_flags = WORD_SIZE; + break; + case SLJIT_MOV_U8: + mem_flags = BYTE_SIZE; + if (src & SLJIT_IMM) + srcw = (sljit_u8)srcw; + break; + case SLJIT_MOV_S8: + mem_flags = BYTE_SIZE | SIGNED; + if (src & SLJIT_IMM) + srcw = (sljit_s8)srcw; + break; + case SLJIT_MOV_U16: + mem_flags = HALF_SIZE; + if (src & SLJIT_IMM) + srcw = (sljit_u16)srcw; + break; + case SLJIT_MOV_S16: + mem_flags = HALF_SIZE | SIGNED; + if (src & SLJIT_IMM) + srcw = (sljit_s16)srcw; + break; + case SLJIT_MOV_U32: + mem_flags = INT_SIZE; + if (src & SLJIT_IMM) + srcw = (sljit_u32)srcw; + break; + case SLJIT_MOV_S32: + mem_flags = INT_SIZE | SIGNED; + if (src & SLJIT_IMM) + srcw = (sljit_s32)srcw; + break; + default: + SLJIT_UNREACHABLE(); + mem_flags = 0; + break; + } + + if (src & SLJIT_IMM) + FAIL_IF(emit_op_imm(compiler, SLJIT_MOV | ARG2_IMM, dst_r, TMP_REG1, srcw)); + else if (!(src & SLJIT_MEM)) + dst_r = src; + else + FAIL_IF(emit_op_mem(compiler, mem_flags, dst_r, src, srcw, TMP_REG1)); + + if (dst & SLJIT_MEM) + return emit_op_mem(compiler, mem_flags | STORE, dst_r, dst, dstw, TMP_REG2); + return SLJIT_SUCCESS; + } + + flags = HAS_FLAGS(op_flags) ? SET_FLAGS : 0; + mem_flags = WORD_SIZE; + + if (op_flags & SLJIT_I32_OP) { + flags |= INT_OP; + mem_flags = INT_SIZE; + } + + if (dst == SLJIT_UNUSED) + flags |= UNUSED_RETURN; + + if (src & SLJIT_MEM) { + FAIL_IF(emit_op_mem(compiler, mem_flags, TMP_REG2, src, srcw, TMP_REG2)); + src = TMP_REG2; + } + + emit_op_imm(compiler, flags | op, dst_r, TMP_REG1, src); + + if (SLJIT_UNLIKELY(dst & SLJIT_MEM)) + return emit_op_mem(compiler, mem_flags | STORE, dst_r, dst, dstw, TMP_REG2); + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) +{ + sljit_s32 dst_r, flags, mem_flags; + + CHECK_ERROR(); + CHECK(check_sljit_emit_op2(compiler, op, dst, dstw, src1, src1w, src2, src2w)); + ADJUST_LOCAL_OFFSET(dst, dstw); + ADJUST_LOCAL_OFFSET(src1, src1w); + ADJUST_LOCAL_OFFSET(src2, src2w); + + if (dst == SLJIT_UNUSED && !HAS_FLAGS(op)) + return SLJIT_SUCCESS; + + dst_r = SLOW_IS_REG(dst) ? dst : TMP_REG1; + flags = HAS_FLAGS(op) ? SET_FLAGS : 0; + mem_flags = WORD_SIZE; + + if (op & SLJIT_I32_OP) { + flags |= INT_OP; + mem_flags = INT_SIZE; + } + + if (dst == SLJIT_UNUSED) + flags |= UNUSED_RETURN; + + if (src1 & SLJIT_MEM) { + FAIL_IF(emit_op_mem(compiler, mem_flags, TMP_REG1, src1, src1w, TMP_REG1)); + src1 = TMP_REG1; + } + + if (src2 & SLJIT_MEM) { + FAIL_IF(emit_op_mem(compiler, mem_flags, TMP_REG2, src2, src2w, TMP_REG2)); + src2 = TMP_REG2; + } + + if (src1 & SLJIT_IMM) + flags |= ARG1_IMM; + else + src1w = src1; + + if (src2 & SLJIT_IMM) + flags |= ARG2_IMM; + else + src2w = src2; + + emit_op_imm(compiler, flags | GET_OPCODE(op), dst_r, src1w, src2w); + + if (dst & SLJIT_MEM) + return emit_op_mem(compiler, mem_flags | STORE, dst_r, dst, dstw, TMP_REG2); + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg) +{ + CHECK_REG_INDEX(check_sljit_get_register_index(reg)); + return reg_map[reg]; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg) +{ + CHECK_REG_INDEX(check_sljit_get_float_register_index(reg)); + return freg_map[reg]; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler, + void *instruction, sljit_s32 size) +{ + CHECK_ERROR(); + CHECK(check_sljit_emit_op_custom(compiler, instruction, size)); + + return push_inst(compiler, *(sljit_ins*)instruction); +} + +/* --------------------------------------------------------------------- */ +/* Floating point operators */ +/* --------------------------------------------------------------------- */ + +static sljit_s32 emit_fop_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw) +{ + sljit_u32 shift = MEM_SIZE_SHIFT(flags); + sljit_ins type = (shift << 30); + + SLJIT_ASSERT(arg & SLJIT_MEM); + + if (!(flags & STORE)) + type |= 0x00400000; + + if (arg & OFFS_REG_MASK) { + argw &= 3; + if (argw == 0 || argw == shift) + return push_inst(compiler, STR_FR | type | VT(reg) + | RN(arg & REG_MASK) | RM(OFFS_REG(arg)) | (argw ? (1 << 12) : 0)); + + FAIL_IF(push_inst(compiler, ADD | RD(TMP_REG1) | RN(arg & REG_MASK) | RM(OFFS_REG(arg)) | (argw << 10))); + return push_inst(compiler, STR_FI | type | VT(reg) | RN(TMP_REG1)); + } + + arg &= REG_MASK; + + if (arg == SLJIT_UNUSED) { + FAIL_IF(load_immediate(compiler, TMP_REG1, argw & ~(0xfff << shift))); + + argw = (argw >> shift) & 0xfff; + + return push_inst(compiler, STR_FI | type | VT(reg) | RN(TMP_REG1) | (argw << 10)); + } + + if (argw >= 0 && (argw & ((1 << shift) - 1)) == 0) { + if ((argw >> shift) <= 0xfff) + return push_inst(compiler, STR_FI | type | VT(reg) | RN(arg) | (argw << (10 - shift))); + + if (argw <= 0xffffff) { + FAIL_IF(push_inst(compiler, ADDI | (1 << 22) | RD(TMP_REG1) | RN(arg) | ((argw >> 12) << 10))); + + argw = ((argw & 0xfff) >> shift); + return push_inst(compiler, STR_FI | type | VT(reg) | RN(TMP_REG1) | (argw << 10)); + } + } + + if (argw <= 255 && argw >= -256) + return push_inst(compiler, STUR_FI | type | VT(reg) | RN(arg) | ((argw & 0x1ff) << 12)); + + FAIL_IF(load_immediate(compiler, TMP_REG1, argw)); + return push_inst(compiler, STR_FR | type | VT(reg) | RN(arg) | RM(TMP_REG1)); +} + +static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) +{ + sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1; + sljit_ins inv_bits = (op & SLJIT_F32_OP) ? (1 << 22) : 0; + + if (GET_OPCODE(op) == SLJIT_CONV_S32_FROM_F64) + inv_bits |= W_OP; + + if (src & SLJIT_MEM) { + emit_fop_mem(compiler, (op & SLJIT_F32_OP) ? INT_SIZE : WORD_SIZE, TMP_FREG1, src, srcw); + src = TMP_FREG1; + } + + FAIL_IF(push_inst(compiler, (FCVTZS ^ inv_bits) | RD(dst_r) | VN(src))); + + if (dst & SLJIT_MEM) + return emit_op_mem(compiler, ((GET_OPCODE(op) == SLJIT_CONV_S32_FROM_F64) ? INT_SIZE : WORD_SIZE) | STORE, TMP_REG1, dst, dstw, TMP_REG2); + return SLJIT_SUCCESS; +} + +static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) +{ + sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1; + sljit_ins inv_bits = (op & SLJIT_F32_OP) ? (1 << 22) : 0; + + if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32) + inv_bits |= W_OP; + + if (src & SLJIT_MEM) { + emit_op_mem(compiler, ((GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32) ? INT_SIZE : WORD_SIZE), TMP_REG1, src, srcw, TMP_REG1); + src = TMP_REG1; + } else if (src & SLJIT_IMM) { +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32) + srcw = (sljit_s32)srcw; +#endif + FAIL_IF(load_immediate(compiler, TMP_REG1, srcw)); + src = TMP_REG1; + } + + FAIL_IF(push_inst(compiler, (SCVTF ^ inv_bits) | VD(dst_r) | RN(src))); + + if (dst & SLJIT_MEM) + return emit_fop_mem(compiler, ((op & SLJIT_F32_OP) ? INT_SIZE : WORD_SIZE) | STORE, TMP_FREG1, dst, dstw); + return SLJIT_SUCCESS; +} + +static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) +{ + sljit_s32 mem_flags = (op & SLJIT_F32_OP) ? INT_SIZE : WORD_SIZE; + sljit_ins inv_bits = (op & SLJIT_F32_OP) ? (1 << 22) : 0; + + if (src1 & SLJIT_MEM) { + emit_fop_mem(compiler, mem_flags, TMP_FREG1, src1, src1w); + src1 = TMP_FREG1; + } + + if (src2 & SLJIT_MEM) { + emit_fop_mem(compiler, mem_flags, TMP_FREG2, src2, src2w); + src2 = TMP_FREG2; + } + + return push_inst(compiler, (FCMP ^ inv_bits) | VN(src1) | VM(src2)); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) +{ + sljit_s32 dst_r, mem_flags = (op & SLJIT_F32_OP) ? INT_SIZE : WORD_SIZE; + sljit_ins inv_bits; + + CHECK_ERROR(); + + SLJIT_COMPILE_ASSERT((INT_SIZE ^ 0x1) == WORD_SIZE, must_be_one_bit_difference); + SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw); + + inv_bits = (op & SLJIT_F32_OP) ? (1 << 22) : 0; + dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1; + + if (src & SLJIT_MEM) { + emit_fop_mem(compiler, (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32) ? (mem_flags ^ 0x1) : mem_flags, dst_r, src, srcw); + src = dst_r; + } + + switch (GET_OPCODE(op)) { + case SLJIT_MOV_F64: + if (src != dst_r) { + if (dst_r != TMP_FREG1) + FAIL_IF(push_inst(compiler, (FMOV ^ inv_bits) | VD(dst_r) | VN(src))); + else + dst_r = src; + } + break; + case SLJIT_NEG_F64: + FAIL_IF(push_inst(compiler, (FNEG ^ inv_bits) | VD(dst_r) | VN(src))); + break; + case SLJIT_ABS_F64: + FAIL_IF(push_inst(compiler, (FABS ^ inv_bits) | VD(dst_r) | VN(src))); + break; + case SLJIT_CONV_F64_FROM_F32: + FAIL_IF(push_inst(compiler, FCVT | ((op & SLJIT_F32_OP) ? (1 << 22) : (1 << 15)) | VD(dst_r) | VN(src))); + break; + } + + if (dst & SLJIT_MEM) + return emit_fop_mem(compiler, mem_flags | STORE, dst_r, dst, dstw); + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) +{ + sljit_s32 dst_r, mem_flags = (op & SLJIT_F32_OP) ? INT_SIZE : WORD_SIZE; + sljit_ins inv_bits = (op & SLJIT_F32_OP) ? (1 << 22) : 0; + + CHECK_ERROR(); + CHECK(check_sljit_emit_fop2(compiler, op, dst, dstw, src1, src1w, src2, src2w)); + ADJUST_LOCAL_OFFSET(dst, dstw); + ADJUST_LOCAL_OFFSET(src1, src1w); + ADJUST_LOCAL_OFFSET(src2, src2w); + + dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1; + if (src1 & SLJIT_MEM) { + emit_fop_mem(compiler, mem_flags, TMP_FREG1, src1, src1w); + src1 = TMP_FREG1; + } + if (src2 & SLJIT_MEM) { + emit_fop_mem(compiler, mem_flags, TMP_FREG2, src2, src2w); + src2 = TMP_FREG2; + } + + switch (GET_OPCODE(op)) { + case SLJIT_ADD_F64: + FAIL_IF(push_inst(compiler, (FADD ^ inv_bits) | VD(dst_r) | VN(src1) | VM(src2))); + break; + case SLJIT_SUB_F64: + FAIL_IF(push_inst(compiler, (FSUB ^ inv_bits) | VD(dst_r) | VN(src1) | VM(src2))); + break; + case SLJIT_MUL_F64: + FAIL_IF(push_inst(compiler, (FMUL ^ inv_bits) | VD(dst_r) | VN(src1) | VM(src2))); + break; + case SLJIT_DIV_F64: + FAIL_IF(push_inst(compiler, (FDIV ^ inv_bits) | VD(dst_r) | VN(src1) | VM(src2))); + break; + } + + if (!(dst & SLJIT_MEM)) + return SLJIT_SUCCESS; + return emit_fop_mem(compiler, mem_flags | STORE, TMP_FREG1, dst, dstw); +} + +/* --------------------------------------------------------------------- */ +/* Other instructions */ +/* --------------------------------------------------------------------- */ + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw) +{ + CHECK_ERROR(); + CHECK(check_sljit_emit_fast_enter(compiler, dst, dstw)); + ADJUST_LOCAL_OFFSET(dst, dstw); + + if (FAST_IS_REG(dst)) + return push_inst(compiler, ORR | RD(dst) | RN(TMP_ZERO) | RM(TMP_LR)); + + /* Memory. */ + return emit_op_mem(compiler, WORD_SIZE | STORE, TMP_LR, dst, dstw, TMP_REG1); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_s32 src, sljit_sw srcw) +{ + CHECK_ERROR(); + CHECK(check_sljit_emit_fast_return(compiler, src, srcw)); + ADJUST_LOCAL_OFFSET(src, srcw); + + if (FAST_IS_REG(src)) + FAIL_IF(push_inst(compiler, ORR | RD(TMP_LR) | RN(TMP_ZERO) | RM(src))); + else + FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_LR, src, srcw, TMP_REG1)); + + return push_inst(compiler, RET | RN(TMP_LR)); +} + +/* --------------------------------------------------------------------- */ +/* Conditional instructions */ +/* --------------------------------------------------------------------- */ + +static sljit_uw get_cc(sljit_s32 type) +{ + switch (type) { + case SLJIT_EQUAL: + case SLJIT_MUL_NOT_OVERFLOW: + case SLJIT_EQUAL_F64: + return 0x1; + + case SLJIT_NOT_EQUAL: + case SLJIT_MUL_OVERFLOW: + case SLJIT_NOT_EQUAL_F64: + return 0x0; + + case SLJIT_LESS: + case SLJIT_LESS_F64: + return 0x2; + + case SLJIT_GREATER_EQUAL: + case SLJIT_GREATER_EQUAL_F64: + return 0x3; + + case SLJIT_GREATER: + case SLJIT_GREATER_F64: + return 0x9; + + case SLJIT_LESS_EQUAL: + case SLJIT_LESS_EQUAL_F64: + return 0x8; + + case SLJIT_SIG_LESS: + return 0xa; + + case SLJIT_SIG_GREATER_EQUAL: + return 0xb; + + case SLJIT_SIG_GREATER: + return 0xd; + + case SLJIT_SIG_LESS_EQUAL: + return 0xc; + + case SLJIT_OVERFLOW: + case SLJIT_UNORDERED_F64: + return 0x7; + + case SLJIT_NOT_OVERFLOW: + case SLJIT_ORDERED_F64: + return 0x6; + + default: + SLJIT_UNREACHABLE(); + return 0xe; + } +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compiler *compiler) +{ + struct sljit_label *label; + + CHECK_ERROR_PTR(); + CHECK_PTR(check_sljit_emit_label(compiler)); + + if (compiler->last_label && compiler->last_label->size == compiler->size) + return compiler->last_label; + + label = (struct sljit_label*)ensure_abuf(compiler, sizeof(struct sljit_label)); + PTR_FAIL_IF(!label); + set_label(label, compiler); + return label; +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_s32 type) +{ + struct sljit_jump *jump; + + CHECK_ERROR_PTR(); + CHECK_PTR(check_sljit_emit_jump(compiler, type)); + + jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); + PTR_FAIL_IF(!jump); + set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP); + type &= 0xff; + + if (type < SLJIT_JUMP) { + jump->flags |= IS_COND; + PTR_FAIL_IF(push_inst(compiler, B_CC | (6 << 5) | get_cc(type))); + } + else if (type >= SLJIT_FAST_CALL) + jump->flags |= IS_BL; + + PTR_FAIL_IF(emit_imm64_const(compiler, TMP_REG1, 0)); + jump->addr = compiler->size; + PTR_FAIL_IF(push_inst(compiler, ((type >= SLJIT_FAST_CALL) ? BLR : BR) | RN(TMP_REG1))); + + return jump; +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 arg_types) +{ + CHECK_ERROR_PTR(); + CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types)); + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ + || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + compiler->skip_checks = 1; +#endif + + return sljit_emit_jump(compiler, type); +} + +static SLJIT_INLINE struct sljit_jump* emit_cmp_to0(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 src, sljit_sw srcw) +{ + struct sljit_jump *jump; + sljit_ins inv_bits = (type & SLJIT_I32_OP) ? W_OP : 0; + + SLJIT_ASSERT((type & 0xff) == SLJIT_EQUAL || (type & 0xff) == SLJIT_NOT_EQUAL); + ADJUST_LOCAL_OFFSET(src, srcw); + + jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); + PTR_FAIL_IF(!jump); + set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP); + jump->flags |= IS_CBZ | IS_COND; + + if (src & SLJIT_MEM) { + PTR_FAIL_IF(emit_op_mem(compiler, inv_bits ? INT_SIZE : WORD_SIZE, TMP_REG1, src, srcw, TMP_REG1)); + src = TMP_REG1; + } + else if (src & SLJIT_IMM) { + PTR_FAIL_IF(load_immediate(compiler, TMP_REG1, srcw)); + src = TMP_REG1; + } + + SLJIT_ASSERT(FAST_IS_REG(src)); + + if ((type & 0xff) == SLJIT_EQUAL) + inv_bits |= 1 << 24; + + PTR_FAIL_IF(push_inst(compiler, (CBZ ^ inv_bits) | (6 << 5) | RT(src))); + PTR_FAIL_IF(emit_imm64_const(compiler, TMP_REG1, 0)); + jump->addr = compiler->size; + PTR_FAIL_IF(push_inst(compiler, BR | RN(TMP_REG1))); + return jump; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 src, sljit_sw srcw) +{ + struct sljit_jump *jump; + + CHECK_ERROR(); + CHECK(check_sljit_emit_ijump(compiler, type, src, srcw)); + ADJUST_LOCAL_OFFSET(src, srcw); + + if (!(src & SLJIT_IMM)) { + if (src & SLJIT_MEM) { + FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG1, src, srcw, TMP_REG1)); + src = TMP_REG1; + } + return push_inst(compiler, ((type >= SLJIT_FAST_CALL) ? BLR : BR) | RN(src)); + } + + /* These jumps are converted to jump/call instructions when possible. */ + jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); + FAIL_IF(!jump); + set_jump(jump, compiler, JUMP_ADDR | ((type >= SLJIT_FAST_CALL) ? IS_BL : 0)); + jump->u.target = srcw; + + FAIL_IF(emit_imm64_const(compiler, TMP_REG1, 0)); + jump->addr = compiler->size; + return push_inst(compiler, ((type >= SLJIT_FAST_CALL) ? BLR : BR) | RN(TMP_REG1)); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 arg_types, + sljit_s32 src, sljit_sw srcw) +{ + CHECK_ERROR(); + CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw)); + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ + || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + compiler->skip_checks = 1; +#endif + + return sljit_emit_ijump(compiler, type, src, srcw); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 type) +{ + sljit_s32 dst_r, src_r, flags, mem_flags; + sljit_ins cc; + + CHECK_ERROR(); + CHECK(check_sljit_emit_op_flags(compiler, op, dst, dstw, type)); + ADJUST_LOCAL_OFFSET(dst, dstw); + + cc = get_cc(type & 0xff); + dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1; + + if (GET_OPCODE(op) < SLJIT_ADD) { + FAIL_IF(push_inst(compiler, CSINC | (cc << 12) | RD(dst_r) | RN(TMP_ZERO) | RM(TMP_ZERO))); + + if (dst_r == TMP_REG1) { + mem_flags = (GET_OPCODE(op) == SLJIT_MOV ? WORD_SIZE : INT_SIZE) | STORE; + return emit_op_mem(compiler, mem_flags, TMP_REG1, dst, dstw, TMP_REG2); + } + + return SLJIT_SUCCESS; + } + + flags = HAS_FLAGS(op) ? SET_FLAGS : 0; + mem_flags = WORD_SIZE; + + if (op & SLJIT_I32_OP) { + flags |= INT_OP; + mem_flags = INT_SIZE; + } + + src_r = dst; + + if (dst & SLJIT_MEM) { + FAIL_IF(emit_op_mem(compiler, mem_flags, TMP_REG1, dst, dstw, TMP_REG1)); + src_r = TMP_REG1; + } + + FAIL_IF(push_inst(compiler, CSINC | (cc << 12) | RD(TMP_REG2) | RN(TMP_ZERO) | RM(TMP_ZERO))); + emit_op_imm(compiler, flags | GET_OPCODE(op), dst_r, src_r, TMP_REG2); + + if (dst & SLJIT_MEM) + return emit_op_mem(compiler, mem_flags | STORE, TMP_REG1, dst, dstw, TMP_REG2); + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 dst_reg, + sljit_s32 src, sljit_sw srcw) +{ + sljit_ins inv_bits = (dst_reg & SLJIT_I32_OP) ? W_OP : 0; + sljit_ins cc; + + CHECK_ERROR(); + CHECK(check_sljit_emit_cmov(compiler, type, dst_reg, src, srcw)); + + if (SLJIT_UNLIKELY(src & SLJIT_IMM)) { + if (dst_reg & SLJIT_I32_OP) + srcw = (sljit_s32)srcw; + FAIL_IF(load_immediate(compiler, TMP_REG1, srcw)); + src = TMP_REG1; + srcw = 0; + } + + cc = get_cc(type & 0xff); + dst_reg &= ~SLJIT_I32_OP; + + return push_inst(compiler, (CSEL ^ inv_bits) | (cc << 12) | RD(dst_reg) | RN(dst_reg) | RM(src)); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 reg, + sljit_s32 mem, sljit_sw memw) +{ + sljit_u32 sign = 0, inst; + + CHECK_ERROR(); + CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw)); + + if ((mem & OFFS_REG_MASK) || (memw > 255 && memw < -256)) + return SLJIT_ERR_UNSUPPORTED; + + if (type & SLJIT_MEM_SUPP) + return SLJIT_SUCCESS; + + switch (type & 0xff) { + case SLJIT_MOV: + case SLJIT_MOV_P: + inst = STURBI | (MEM_SIZE_SHIFT(WORD_SIZE) << 30) | 0x400; + break; + case SLJIT_MOV_S8: + sign = 1; + case SLJIT_MOV_U8: + inst = STURBI | (MEM_SIZE_SHIFT(BYTE_SIZE) << 30) | 0x400; + break; + case SLJIT_MOV_S16: + sign = 1; + case SLJIT_MOV_U16: + inst = STURBI | (MEM_SIZE_SHIFT(HALF_SIZE) << 30) | 0x400; + break; + case SLJIT_MOV_S32: + sign = 1; + case SLJIT_MOV_U32: + inst = STURBI | (MEM_SIZE_SHIFT(INT_SIZE) << 30) | 0x400; + break; + default: + SLJIT_UNREACHABLE(); + inst = STURBI | (MEM_SIZE_SHIFT(WORD_SIZE) << 30) | 0x400; + break; + } + + if (!(type & SLJIT_MEM_STORE)) + inst |= sign ? 0x00800000 : 0x00400000; + + if (type & SLJIT_MEM_PRE) + inst |= 0x800; + + return push_inst(compiler, inst | RT(reg) | RN(mem & REG_MASK) | ((memw & 0x1ff) << 12)); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 freg, + sljit_s32 mem, sljit_sw memw) +{ + sljit_u32 inst; + + CHECK_ERROR(); + CHECK(check_sljit_emit_fmem(compiler, type, freg, mem, memw)); + + if ((mem & OFFS_REG_MASK) || (memw > 255 && memw < -256)) + return SLJIT_ERR_UNSUPPORTED; + + if (type & SLJIT_MEM_SUPP) + return SLJIT_SUCCESS; + + inst = STUR_FI | 0x80000400; + + if (!(type & SLJIT_F32_OP)) + inst |= 0x40000000; + + if (!(type & SLJIT_MEM_STORE)) + inst |= 0x00400000; + + if (type & SLJIT_MEM_PRE) + inst |= 0x800; + + return push_inst(compiler, inst | VT(freg) | RN(mem & REG_MASK) | ((memw & 0x1ff) << 12)); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_local_base(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw offset) +{ + sljit_s32 dst_reg; + sljit_ins ins; + + CHECK_ERROR(); + CHECK(check_sljit_get_local_base(compiler, dst, dstw, offset)); + + SLJIT_ASSERT (SLJIT_LOCALS_OFFSET_BASE == 0); + + dst_reg = FAST_IS_REG(dst) ? dst : TMP_REG1; + + if (offset <= 0xffffff && offset >= -0xffffff) { + ins = ADDI; + if (offset < 0) { + offset = -offset; + ins = SUBI; + } + + if (offset <= 0xfff) + FAIL_IF(push_inst(compiler, ins | RD(dst_reg) | RN(SLJIT_SP) | (offset << 10))); + else { + FAIL_IF(push_inst(compiler, ins | RD(dst_reg) | RN(SLJIT_SP) | ((offset & 0xfff000) >> (12 - 10)) | (1 << 22))); + + offset &= 0xfff; + if (offset != 0) + FAIL_IF(push_inst(compiler, ins | RD(dst_reg) | RN(dst_reg) | (offset << 10))); + } + } + else { + FAIL_IF(load_immediate (compiler, dst_reg, offset)); + /* Add extended register form. */ + FAIL_IF(push_inst(compiler, ADDE | (0x3 << 13) | RD(dst_reg) | RN(SLJIT_SP) | RM(dst_reg))); + } + + if (SLJIT_UNLIKELY(dst & SLJIT_MEM)) + return emit_op_mem(compiler, WORD_SIZE | STORE, dst_reg, dst, dstw, TMP_REG1); + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw init_value) +{ + struct sljit_const *const_; + sljit_s32 dst_r; + + CHECK_ERROR_PTR(); + CHECK_PTR(check_sljit_emit_const(compiler, dst, dstw, init_value)); + ADJUST_LOCAL_OFFSET(dst, dstw); + + const_ = (struct sljit_const*)ensure_abuf(compiler, sizeof(struct sljit_const)); + PTR_FAIL_IF(!const_); + set_const(const_, compiler); + + dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1; + PTR_FAIL_IF(emit_imm64_const(compiler, dst_r, init_value)); + + if (dst & SLJIT_MEM) + PTR_FAIL_IF(emit_op_mem(compiler, WORD_SIZE | STORE, dst_r, dst, dstw, TMP_REG2)); + return const_; +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_put_label* sljit_emit_put_label(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw) +{ + struct sljit_put_label *put_label; + sljit_s32 dst_r; + + CHECK_ERROR_PTR(); + CHECK_PTR(check_sljit_emit_put_label(compiler, dst, dstw)); + ADJUST_LOCAL_OFFSET(dst, dstw); + + dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1; + PTR_FAIL_IF(emit_imm64_const(compiler, dst_r, 0)); + + put_label = (struct sljit_put_label*)ensure_abuf(compiler, sizeof(struct sljit_put_label)); + PTR_FAIL_IF(!put_label); + set_put_label(put_label, compiler, 1); + + if (dst & SLJIT_MEM) + PTR_FAIL_IF(emit_op_mem(compiler, WORD_SIZE | STORE, dst_r, dst, dstw, TMP_REG2)); + + return put_label; +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_target, sljit_sw executable_offset) +{ + sljit_ins* inst = (sljit_ins*)addr; + modify_imm64_const(inst, new_target); + inst = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset); + SLJIT_CACHE_FLUSH(inst, inst + 4); +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset) +{ + sljit_ins* inst = (sljit_ins*)addr; + modify_imm64_const(inst, new_constant); + inst = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset); + SLJIT_CACHE_FLUSH(inst, inst + 4); +} diff --git a/src/3rd/pcre/sjarmth2.c b/src/3rd/pcre/sjarmth2.c index 74ec83177dd..cdfe4a4d24a 100644 --- a/src/3rd/pcre/sjarmth2.c +++ b/src/3rd/pcre/sjarmth2.c @@ -1,7 +1,7 @@ /* * Stack-less Just-In-Time compiler * - * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: @@ -24,26 +24,33 @@ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -SLJIT_API_FUNC_ATTRIBUTE SLJIT_CONST char* sljit_get_platform_name(void) +SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void) { - return "ARM-Thumb2" SLJIT_CPUINFO; +#ifdef __SOFTFP__ + return "ARM-Thumb2" SLJIT_CPUINFO " ABI:softfp"; +#else + return "ARM-Thumb2" SLJIT_CPUINFO " ABI:hardfp"; +#endif } /* Length of an instruction word. */ -typedef sljit_ui sljit_ins; +typedef sljit_u32 sljit_ins; /* Last register + 1. */ -#define TMP_REG1 (SLJIT_NO_REGISTERS + 1) -#define TMP_REG2 (SLJIT_NO_REGISTERS + 2) -#define TMP_REG3 (SLJIT_NO_REGISTERS + 3) -#define TMP_PC (SLJIT_NO_REGISTERS + 4) +#define TMP_REG1 (SLJIT_NUMBER_OF_REGISTERS + 2) +#define TMP_REG2 (SLJIT_NUMBER_OF_REGISTERS + 3) +#define TMP_PC (SLJIT_NUMBER_OF_REGISTERS + 4) -#define TMP_FREG1 (0) -#define TMP_FREG2 (SLJIT_FLOAT_REG6 + 1) +#define TMP_FREG1 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1) +#define TMP_FREG2 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 2) /* See sljit_emit_enter and sljit_emit_op0 if you want to change them. */ -static SLJIT_CONST sljit_ub reg_map[SLJIT_NO_REGISTERS + 5] = { - 0, 0, 1, 2, 12, 5, 6, 7, 8, 10, 11, 13, 3, 4, 14, 15 +static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 5] = { + 0, 0, 1, 2, 3, 11, 10, 9, 8, 7, 6, 5, 4, 13, 12, 14, 15 +}; + +static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = { + 0, 0, 1, 2, 3, 4, 5, 6, 7 }; #define COPY_BITS(src, from, to, bits) \ @@ -70,9 +77,9 @@ static SLJIT_CONST sljit_ub reg_map[SLJIT_NO_REGISTERS + 5] = { #define RN4(rn) (reg_map[rn] << 16) #define RM4(rm) (reg_map[rm]) #define RT4(rt) (reg_map[rt] << 12) -#define DD4(dd) ((dd) << 12) -#define DN4(dn) ((dn) << 16) -#define DM4(dm) (dm) +#define DD4(dd) (freg_map[dd] << 12) +#define DN4(dn) (freg_map[dn] << 16) +#define DM4(dm) (freg_map[dm]) #define IMM5(imm) \ (COPY_BITS(imm, 2, 12, 3) | ((imm & 0x3) << 6)) #define IMM12(imm) \ @@ -103,17 +110,23 @@ static SLJIT_CONST sljit_ub reg_map[SLJIT_NO_REGISTERS + 5] = { #define ASRSI 0x1000 #define ASR_W 0xfa40f000 #define ASR_WI 0xea4f0020 +#define BCC 0xd000 #define BICI 0xf0200000 #define BKPT 0xbe00 #define BLX 0x4780 #define BX 0x4700 #define CLZ 0xfab0f080 +#define CMNI_W 0xf1100f00 +#define CMP 0x4280 #define CMPI 0x2800 +#define CMPI_W 0xf1b00f00 +#define CMP_X 0x4500 #define CMP_W 0xebb00f00 #define EORI 0xf0800000 #define EORS 0x4040 #define EOR_W 0xea800000 #define IT 0xbf00 +#define LDRI 0xf8500800 #define LSLS 0x4080 #define LSLSI 0x0000 #define LSL_W 0xfa00f000 @@ -138,15 +151,16 @@ static SLJIT_CONST sljit_ub reg_map[SLJIT_NO_REGISTERS + 5] = { #define ORRI 0xf0400000 #define ORRS 0x4300 #define ORR_W 0xea400000 -#define POP 0xbd00 +#define POP 0xbc00 #define POP_W 0xe8bd0000 -#define PUSH 0xb500 +#define PUSH 0xb400 #define PUSH_W 0xe92d0000 #define RSB_WI 0xf1c00000 #define RSBSI 0x4240 #define SBCI 0xf1600000 #define SBCS 0x4180 #define SBC_W 0xeb600000 +#define SDIV 0xfb90f0f0 #define SMULL 0xfb800000 #define STR_SP 0x9000 #define SUBS 0x1a00 @@ -161,6 +175,7 @@ static SLJIT_CONST sljit_ub reg_map[SLJIT_NO_REGISTERS + 5] = { #define SXTH 0xb200 #define SXTH_W 0xfa0ff080 #define TST 0x4200 +#define UDIV 0xfbb0f0f0 #define UMULL 0xfba00000 #define UXTB 0xb2c0 #define UXTB_W 0xfa5ff080 @@ -169,29 +184,34 @@ static SLJIT_CONST sljit_ub reg_map[SLJIT_NO_REGISTERS + 5] = { #define VABS_F32 0xeeb00ac0 #define VADD_F32 0xee300a00 #define VCMP_F32 0xeeb40a40 +#define VCVT_F32_S32 0xeeb80ac0 +#define VCVT_F64_F32 0xeeb70ac0 +#define VCVT_S32_F32 0xeebd0ac0 #define VDIV_F32 0xee800a00 #define VMOV_F32 0xeeb00a40 +#define VMOV 0xee000a10 +#define VMOV2 0xec400a10 #define VMRS 0xeef1fa10 #define VMUL_F32 0xee200a00 #define VNEG_F32 0xeeb10a40 #define VSTR_F32 0xed000a00 #define VSUB_F32 0xee300a40 -static sljit_si push_inst16(struct sljit_compiler *compiler, sljit_ins inst) +static sljit_s32 push_inst16(struct sljit_compiler *compiler, sljit_ins inst) { - sljit_uh *ptr; + sljit_u16 *ptr; SLJIT_ASSERT(!(inst & 0xffff0000)); - ptr = (sljit_uh*)ensure_buf(compiler, sizeof(sljit_uh)); + ptr = (sljit_u16*)ensure_buf(compiler, sizeof(sljit_u16)); FAIL_IF(!ptr); *ptr = inst; compiler->size++; return SLJIT_SUCCESS; } -static sljit_si push_inst32(struct sljit_compiler *compiler, sljit_ins inst) +static sljit_s32 push_inst32(struct sljit_compiler *compiler, sljit_ins inst) { - sljit_uh *ptr = (sljit_uh*)ensure_buf(compiler, sizeof(sljit_ins)); + sljit_u16 *ptr = (sljit_u16*)ensure_buf(compiler, sizeof(sljit_ins)); FAIL_IF(!ptr); *ptr++ = inst >> 16; *ptr = inst; @@ -199,17 +219,17 @@ static sljit_si push_inst32(struct sljit_compiler *compiler, sljit_ins inst) return SLJIT_SUCCESS; } -static SLJIT_INLINE sljit_si emit_imm32_const(struct sljit_compiler *compiler, sljit_si dst, sljit_uw imm) +static SLJIT_INLINE sljit_s32 emit_imm32_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_uw imm) { - FAIL_IF(push_inst32(compiler, MOVW | RD4(dst) | - COPY_BITS(imm, 12, 16, 4) | COPY_BITS(imm, 11, 26, 1) | COPY_BITS(imm, 8, 12, 3) | (imm & 0xff))); - return push_inst32(compiler, MOVT | RD4(dst) | - COPY_BITS(imm, 12 + 16, 16, 4) | COPY_BITS(imm, 11 + 16, 26, 1) | COPY_BITS(imm, 8 + 16, 12, 3) | ((imm & 0xff0000) >> 16)); + FAIL_IF(push_inst32(compiler, MOVW | RD4(dst) + | COPY_BITS(imm, 12, 16, 4) | COPY_BITS(imm, 11, 26, 1) | COPY_BITS(imm, 8, 12, 3) | (imm & 0xff))); + return push_inst32(compiler, MOVT | RD4(dst) + | COPY_BITS(imm, 12 + 16, 16, 4) | COPY_BITS(imm, 11 + 16, 26, 1) | COPY_BITS(imm, 8 + 16, 12, 3) | ((imm & 0xff0000) >> 16)); } -static SLJIT_INLINE void modify_imm32_const(sljit_uh* inst, sljit_uw new_imm) +static SLJIT_INLINE void modify_imm32_const(sljit_u16 *inst, sljit_uw new_imm) { - sljit_si dst = inst[1] & 0x0f00; + sljit_s32 dst = inst[1] & 0x0f00; SLJIT_ASSERT(((inst[0] & 0xfbf0) == (MOVW >> 16)) && ((inst[2] & 0xfbf0) == (MOVT >> 16)) && dst == (inst[3] & 0x0f00)); inst[0] = (MOVW >> 16) | COPY_BITS(new_imm, 12, 0, 4) | COPY_BITS(new_imm, 11, 10, 1); inst[1] = dst | COPY_BITS(new_imm, 8, 12, 3) | (new_imm & 0xff); @@ -217,7 +237,7 @@ static SLJIT_INLINE void modify_imm32_const(sljit_uh* inst, sljit_uw new_imm) inst[3] = dst | COPY_BITS(new_imm, 8 + 16, 12, 3) | ((new_imm & 0xff0000) >> 16); } -static SLJIT_INLINE sljit_si detect_jump_type(struct sljit_jump *jump, sljit_uh *code_ptr, sljit_uh *code) +static SLJIT_INLINE sljit_s32 detect_jump_type(struct sljit_jump *jump, sljit_u16 *code_ptr, sljit_u16 *code, sljit_sw executable_offset) { sljit_sw diff; @@ -228,7 +248,7 @@ static SLJIT_INLINE sljit_si detect_jump_type(struct sljit_jump *jump, sljit_uh /* Branch to ARM code is not optimized yet. */ if (!(jump->u.target & 0x1)) return 0; - diff = ((sljit_sw)jump->u.target - (sljit_sw)(code_ptr + 2)) >> 1; + diff = ((sljit_sw)jump->u.target - (sljit_sw)(code_ptr + 2) - executable_offset) >> 1; } else { SLJIT_ASSERT(jump->flags & JUMP_LABEL); @@ -238,33 +258,33 @@ static SLJIT_INLINE sljit_si detect_jump_type(struct sljit_jump *jump, sljit_uh if (jump->flags & IS_COND) { SLJIT_ASSERT(!(jump->flags & IS_BL)); if (diff <= 127 && diff >= -128) { - jump->flags |= B_TYPE1; + jump->flags |= PATCH_TYPE1; return 5; } if (diff <= 524287 && diff >= -524288) { - jump->flags |= B_TYPE2; + jump->flags |= PATCH_TYPE2; return 4; } /* +1 comes from the prefix IT instruction. */ diff--; if (diff <= 8388607 && diff >= -8388608) { - jump->flags |= B_TYPE3; + jump->flags |= PATCH_TYPE3; return 3; } } else if (jump->flags & IS_BL) { if (diff <= 8388607 && diff >= -8388608) { - jump->flags |= BL_TYPE6; + jump->flags |= PATCH_BL; return 3; } } else { if (diff <= 1023 && diff >= -1024) { - jump->flags |= B_TYPE4; + jump->flags |= PATCH_TYPE4; return 4; } if (diff <= 8388607 && diff >= -8388608) { - jump->flags |= B_TYPE5; + jump->flags |= PATCH_TYPE5; return 3; } } @@ -272,34 +292,27 @@ static SLJIT_INLINE sljit_si detect_jump_type(struct sljit_jump *jump, sljit_uh return 0; } -static SLJIT_INLINE void inline_set_jump_addr(sljit_uw addr, sljit_uw new_addr, sljit_si flush) -{ - sljit_uh* inst = (sljit_uh*)addr; - modify_imm32_const(inst, new_addr); - if (flush) { - SLJIT_CACHE_FLUSH(inst, inst + 3); - } -} - -static SLJIT_INLINE void set_jump_instruction(struct sljit_jump *jump) +static SLJIT_INLINE void set_jump_instruction(struct sljit_jump *jump, sljit_sw executable_offset) { - sljit_si type = (jump->flags >> 4) & 0xf; + sljit_s32 type = (jump->flags >> 4) & 0xf; sljit_sw diff; - sljit_uh *jump_inst; - sljit_si s, j1, j2; + sljit_u16 *jump_inst; + sljit_s32 s, j1, j2; if (SLJIT_UNLIKELY(type == 0)) { - inline_set_jump_addr(jump->addr, (jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target, 0); + modify_imm32_const((sljit_u16*)jump->addr, (jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target); return; } if (jump->flags & JUMP_ADDR) { SLJIT_ASSERT(jump->u.target & 0x1); - diff = ((sljit_sw)jump->u.target - (sljit_sw)(jump->addr + 4)) >> 1; + diff = ((sljit_sw)jump->u.target - (sljit_sw)(jump->addr + sizeof(sljit_u32)) - executable_offset) >> 1; } - else - diff = ((sljit_sw)(jump->u.label->addr) - (sljit_sw)(jump->addr + 4)) >> 1; - jump_inst = (sljit_uh*)jump->addr; + else { + SLJIT_ASSERT(jump->u.label->addr & 0x1); + diff = ((sljit_sw)(jump->u.label->addr) - (sljit_sw)(jump->addr + sizeof(sljit_u32)) - executable_offset) >> 1; + } + jump_inst = (sljit_u16*)jump->addr; switch (type) { case 1: @@ -330,8 +343,8 @@ static SLJIT_INLINE void set_jump_instruction(struct sljit_jump *jump) /* Really complex instruction form for branches. */ s = (diff >> 23) & 0x1; - j1 = (~(diff >> 21) ^ s) & 0x1; - j2 = (~(diff >> 22) ^ s) & 0x1; + j1 = (~(diff >> 22) ^ s) & 0x1; + j2 = (~(diff >> 21) ^ s) & 0x1; jump_inst[0] = 0xf000 | (s << 10) | COPY_BITS(diff, 11, 0, 10); jump_inst[1] = (j1 << 13) | (j2 << 11) | (diff & 0x7ff); @@ -341,58 +354,75 @@ static SLJIT_INLINE void set_jump_instruction(struct sljit_jump *jump) else if (type == 6) /* Encoding T1 of 'BL' instruction */ jump_inst[1] |= 0xd000; else - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); } SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compiler) { struct sljit_memory_fragment *buf; - sljit_uh *code; - sljit_uh *code_ptr; - sljit_uh *buf_ptr; - sljit_uh *buf_end; + sljit_u16 *code; + sljit_u16 *code_ptr; + sljit_u16 *buf_ptr; + sljit_u16 *buf_end; sljit_uw half_count; + sljit_uw next_addr; + sljit_sw executable_offset; struct sljit_label *label; struct sljit_jump *jump; struct sljit_const *const_; + struct sljit_put_label *put_label; CHECK_ERROR_PTR(); - check_sljit_generate_code(compiler); + CHECK_PTR(check_sljit_generate_code(compiler)); reverse_buf(compiler); - code = (sljit_uh*)SLJIT_MALLOC_EXEC(compiler->size * sizeof(sljit_uh)); + code = (sljit_u16*)SLJIT_MALLOC_EXEC(compiler->size * sizeof(sljit_u16)); PTR_FAIL_WITH_EXEC_IF(code); buf = compiler->buf; code_ptr = code; half_count = 0; + next_addr = 0; + executable_offset = SLJIT_EXEC_OFFSET(code); + label = compiler->labels; jump = compiler->jumps; const_ = compiler->consts; + put_label = compiler->put_labels; do { - buf_ptr = (sljit_uh*)buf->memory; + buf_ptr = (sljit_u16*)buf->memory; buf_end = buf_ptr + (buf->used_size >> 1); do { *code_ptr = *buf_ptr++; - /* These structures are ordered by their address. */ - SLJIT_ASSERT(!label || label->size >= half_count); - SLJIT_ASSERT(!jump || jump->addr >= half_count); - SLJIT_ASSERT(!const_ || const_->addr >= half_count); - if (label && label->size == half_count) { - label->addr = ((sljit_uw)code_ptr) | 0x1; - label->size = code_ptr - code; - label = label->next; - } - if (jump && jump->addr == half_count) { - jump->addr = (sljit_uw)code_ptr - ((jump->flags & IS_COND) ? 10 : 8); - code_ptr -= detect_jump_type(jump, code_ptr, code); - jump = jump->next; - } - if (const_ && const_->addr == half_count) { - const_->addr = (sljit_uw)code_ptr; - const_ = const_->next; + if (next_addr == half_count) { + SLJIT_ASSERT(!label || label->size >= half_count); + SLJIT_ASSERT(!jump || jump->addr >= half_count); + SLJIT_ASSERT(!const_ || const_->addr >= half_count); + SLJIT_ASSERT(!put_label || put_label->addr >= half_count); + + /* These structures are ordered by their address. */ + if (label && label->size == half_count) { + label->addr = ((sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset)) | 0x1; + label->size = code_ptr - code; + label = label->next; + } + if (jump && jump->addr == half_count) { + jump->addr = (sljit_uw)code_ptr - ((jump->flags & IS_COND) ? 10 : 8); + code_ptr -= detect_jump_type(jump, code_ptr, code, executable_offset); + jump = jump->next; + } + if (const_ && const_->addr == half_count) { + const_->addr = (sljit_uw)code_ptr; + const_ = const_->next; + } + if (put_label && put_label->addr == half_count) { + SLJIT_ASSERT(put_label->label); + put_label->addr = (sljit_uw)code_ptr; + put_label = put_label->next; + } + next_addr = compute_next_addr(label, jump, const_, put_label); } code_ptr ++; half_count ++; @@ -402,7 +432,7 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil } while (buf); if (label && label->size == half_count) { - label->addr = ((sljit_uw)code_ptr) | 0x1; + label->addr = ((sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset)) | 0x1; label->size = code_ptr - code; label = label->next; } @@ -410,26 +440,62 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil SLJIT_ASSERT(!label); SLJIT_ASSERT(!jump); SLJIT_ASSERT(!const_); + SLJIT_ASSERT(!put_label); SLJIT_ASSERT(code_ptr - code <= (sljit_sw)compiler->size); jump = compiler->jumps; while (jump) { - set_jump_instruction(jump); + set_jump_instruction(jump, executable_offset); jump = jump->next; } + put_label = compiler->put_labels; + while (put_label) { + modify_imm32_const((sljit_u16 *)put_label->addr, put_label->label->addr); + put_label = put_label->next; + } + compiler->error = SLJIT_ERR_COMPILED; - compiler->executable_size = (code_ptr - code) * sizeof(sljit_uh); + compiler->executable_offset = executable_offset; + compiler->executable_size = (code_ptr - code) * sizeof(sljit_u16); + + code = (sljit_u16 *)SLJIT_ADD_EXEC_OFFSET(code, executable_offset); + code_ptr = (sljit_u16 *)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset); + SLJIT_CACHE_FLUSH(code, code_ptr); /* Set thumb mode flag. */ return (void*)((sljit_uw)code | 0x1); } +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type) +{ + switch (feature_type) { + case SLJIT_HAS_FPU: +#ifdef SLJIT_IS_FPU_AVAILABLE + return SLJIT_IS_FPU_AVAILABLE; +#else + /* Available by default. */ + return 1; +#endif + + case SLJIT_HAS_CLZ: + case SLJIT_HAS_CMOV: + return 1; + + default: + return 0; + } +} + +/* --------------------------------------------------------------------- */ +/* Core code generator functions. */ +/* --------------------------------------------------------------------- */ + #define INVALID_IMM 0x80000000 static sljit_uw get_imm(sljit_uw imm) { /* Thumb immediate form. */ - sljit_si counter; + sljit_s32 counter; if (imm <= 0xff) return imm; @@ -475,10 +541,12 @@ static sljit_uw get_imm(sljit_uw imm) return ((imm >> 24) & 0x7f) | COPY_BITS(counter, 4, 26, 1) | COPY_BITS(counter, 1, 12, 3) | COPY_BITS(counter, 0, 7, 1); } -static sljit_si load_immediate(struct sljit_compiler *compiler, sljit_si dst, sljit_uw imm) +static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 dst, sljit_uw imm) { sljit_uw tmp; + /* MOVS cannot be used since it destroy flags. */ + if (imm >= 0x10000) { tmp = get_imm(imm); if (tmp != INVALID_IMM) @@ -489,37 +557,32 @@ static sljit_si load_immediate(struct sljit_compiler *compiler, sljit_si dst, sl } /* set low 16 bits, set hi 16 bits to 0. */ - FAIL_IF(push_inst32(compiler, MOVW | RD4(dst) | - COPY_BITS(imm, 12, 16, 4) | COPY_BITS(imm, 11, 26, 1) | COPY_BITS(imm, 8, 12, 3) | (imm & 0xff))); + FAIL_IF(push_inst32(compiler, MOVW | RD4(dst) + | COPY_BITS(imm, 12, 16, 4) | COPY_BITS(imm, 11, 26, 1) | COPY_BITS(imm, 8, 12, 3) | (imm & 0xff))); /* set hi 16 bit if needed. */ if (imm >= 0x10000) - return push_inst32(compiler, MOVT | RD4(dst) | - COPY_BITS(imm, 12 + 16, 16, 4) | COPY_BITS(imm, 11 + 16, 26, 1) | COPY_BITS(imm, 8 + 16, 12, 3) | ((imm & 0xff0000) >> 16)); + return push_inst32(compiler, MOVT | RD4(dst) + | COPY_BITS(imm, 12 + 16, 16, 4) | COPY_BITS(imm, 11 + 16, 26, 1) | COPY_BITS(imm, 8 + 16, 12, 3) | ((imm & 0xff0000) >> 16)); return SLJIT_SUCCESS; } #define ARG1_IMM 0x0010000 #define ARG2_IMM 0x0020000 -#define KEEP_FLAGS 0x0040000 -#define SET_MULOV 0x0080000 /* SET_FLAGS must be 0x100000 as it is also the value of S bit (can be used for optimization). */ #define SET_FLAGS 0x0100000 #define UNUSED_RETURN 0x0200000 -#define SLOW_DEST 0x0400000 -#define SLOW_SRC1 0x0800000 -#define SLOW_SRC2 0x1000000 -static sljit_si emit_op_imm(struct sljit_compiler *compiler, sljit_si flags, sljit_si dst, sljit_uw arg1, sljit_uw arg2) +static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 dst, sljit_uw arg1, sljit_uw arg2) { /* dst must be register, TMP_REG1 - arg1 must be register, TMP_REG1, imm - arg2 must be register, TMP_REG2, imm */ - sljit_si reg; - sljit_uw imm, negated_imm; + arg1 must be register, imm + arg2 must be register, imm */ + sljit_s32 reg; + sljit_uw imm, nimm; if (SLJIT_UNLIKELY((flags & (ARG1_IMM | ARG2_IMM)) == (ARG1_IMM | ARG2_IMM))) { - /* Both are immediates. */ + /* Both are immediates, no temporaries are used. */ flags &= ~ARG1_IMM; FAIL_IF(load_immediate(compiler, TMP_REG1, arg1)); arg1 = TMP_REG1; @@ -530,41 +593,45 @@ static sljit_si emit_op_imm(struct sljit_compiler *compiler, sljit_si flags, slj imm = (flags & ARG2_IMM) ? arg2 : arg1; switch (flags & 0xffff) { + case SLJIT_CLZ: + case SLJIT_MUL: + /* No form with immediate operand. */ + break; case SLJIT_MOV: - SLJIT_ASSERT(!(flags & SET_FLAGS) && (flags & ARG2_IMM) && arg1 == TMP_REG1); + SLJIT_ASSERT(!(flags & SET_FLAGS) && (flags & ARG2_IMM) && arg1 == TMP_REG2); return load_immediate(compiler, dst, imm); case SLJIT_NOT: if (!(flags & SET_FLAGS)) return load_immediate(compiler, dst, ~imm); /* Since the flags should be set, we just fallback to the register mode. - Although I could do some clever things here, "NOT IMM" does not worth the efforts. */ - break; - case SLJIT_CLZ: - /* No form with immediate operand. */ + Although some clever things could be done here, "NOT IMM" does not worth the efforts. */ break; case SLJIT_ADD: - negated_imm = (sljit_uw)-(sljit_sw)imm; - if (!(flags & KEEP_FLAGS) && IS_2_LO_REGS(reg, dst)) { + nimm = -imm; + if (IS_2_LO_REGS(reg, dst)) { if (imm <= 0x7) return push_inst16(compiler, ADDSI3 | IMM3(imm) | RD3(dst) | RN3(reg)); - if (negated_imm <= 0x7) - return push_inst16(compiler, SUBSI3 | IMM3(negated_imm) | RD3(dst) | RN3(reg)); + if (nimm <= 0x7) + return push_inst16(compiler, SUBSI3 | IMM3(nimm) | RD3(dst) | RN3(reg)); if (reg == dst) { if (imm <= 0xff) return push_inst16(compiler, ADDSI8 | IMM8(imm) | RDN3(dst)); - if (negated_imm <= 0xff) - return push_inst16(compiler, SUBSI8 | IMM8(negated_imm) | RDN3(dst)); + if (nimm <= 0xff) + return push_inst16(compiler, SUBSI8 | IMM8(nimm) | RDN3(dst)); } } if (!(flags & SET_FLAGS)) { if (imm <= 0xfff) return push_inst32(compiler, ADDWI | RD4(dst) | RN4(reg) | IMM12(imm)); - if (negated_imm <= 0xfff) - return push_inst32(compiler, SUBWI | RD4(dst) | RN4(reg) | IMM12(negated_imm)); + if (nimm <= 0xfff) + return push_inst32(compiler, SUBWI | RD4(dst) | RN4(reg) | IMM12(nimm)); } - imm = get_imm(imm); - if (imm != INVALID_IMM) - return push_inst32(compiler, ADD_WI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm); + nimm = get_imm(imm); + if (nimm != INVALID_IMM) + return push_inst32(compiler, ADD_WI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | nimm); + nimm = get_imm(-imm); + if (nimm != INVALID_IMM) + return push_inst32(compiler, SUB_WI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | nimm); break; case SLJIT_ADDC: imm = get_imm(imm); @@ -572,63 +639,71 @@ static sljit_si emit_op_imm(struct sljit_compiler *compiler, sljit_si flags, slj return push_inst32(compiler, ADCI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm); break; case SLJIT_SUB: - if (flags & ARG2_IMM) { - negated_imm = (sljit_uw)-(sljit_sw)imm; - if (!(flags & KEEP_FLAGS) && IS_2_LO_REGS(reg, dst)) { - if (imm <= 0x7) - return push_inst16(compiler, SUBSI3 | IMM3(imm) | RD3(dst) | RN3(reg)); - if (negated_imm <= 0x7) - return push_inst16(compiler, ADDSI3 | IMM3(negated_imm) | RD3(dst) | RN3(reg)); - if (reg == dst) { - if (imm <= 0xff) - return push_inst16(compiler, SUBSI8 | IMM8(imm) | RDN3(dst)); - if (negated_imm <= 0xff) - return push_inst16(compiler, ADDSI8 | IMM8(negated_imm) | RDN3(dst)); - } - if (imm <= 0xff && (flags & UNUSED_RETURN)) - return push_inst16(compiler, CMPI | IMM8(imm) | RDN3(reg)); - } - if (!(flags & SET_FLAGS)) { - if (imm <= 0xfff) - return push_inst32(compiler, SUBWI | RD4(dst) | RN4(reg) | IMM12(imm)); - if (negated_imm <= 0xfff) - return push_inst32(compiler, ADDWI | RD4(dst) | RN4(reg) | IMM12(negated_imm)); - } - imm = get_imm(imm); - if (imm != INVALID_IMM) - return push_inst32(compiler, SUB_WI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm); - } - else { - if (!(flags & KEEP_FLAGS) && imm == 0 && IS_2_LO_REGS(reg, dst)) + /* SUB operation can be replaced by ADD because of the negative carry flag. */ + if (flags & ARG1_IMM) { + if (imm == 0 && IS_2_LO_REGS(reg, dst)) return push_inst16(compiler, RSBSI | RD3(dst) | RN3(reg)); imm = get_imm(imm); if (imm != INVALID_IMM) return push_inst32(compiler, RSB_WI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm); + break; } - break; - case SLJIT_SUBC: - if (flags & ARG2_IMM) { - imm = get_imm(imm); - if (imm != INVALID_IMM) - return push_inst32(compiler, SBCI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm); + if (flags & UNUSED_RETURN) { + if (imm <= 0xff && reg_map[reg] <= 7) + return push_inst16(compiler, CMPI | IMM8(imm) | RDN3(reg)); + nimm = get_imm(imm); + if (nimm != INVALID_IMM) + return push_inst32(compiler, CMPI_W | RN4(reg) | nimm); + nimm = get_imm(-imm); + if (nimm != INVALID_IMM) + return push_inst32(compiler, CMNI_W | RN4(reg) | nimm); + } + nimm = -imm; + if (IS_2_LO_REGS(reg, dst)) { + if (imm <= 0x7) + return push_inst16(compiler, SUBSI3 | IMM3(imm) | RD3(dst) | RN3(reg)); + if (nimm <= 0x7) + return push_inst16(compiler, ADDSI3 | IMM3(nimm) | RD3(dst) | RN3(reg)); + if (reg == dst) { + if (imm <= 0xff) + return push_inst16(compiler, SUBSI8 | IMM8(imm) | RDN3(dst)); + if (nimm <= 0xff) + return push_inst16(compiler, ADDSI8 | IMM8(nimm) | RDN3(dst)); + } } + if (!(flags & SET_FLAGS)) { + if (imm <= 0xfff) + return push_inst32(compiler, SUBWI | RD4(dst) | RN4(reg) | IMM12(imm)); + if (nimm <= 0xfff) + return push_inst32(compiler, ADDWI | RD4(dst) | RN4(reg) | IMM12(nimm)); + } + nimm = get_imm(imm); + if (nimm != INVALID_IMM) + return push_inst32(compiler, SUB_WI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | nimm); + nimm = get_imm(-imm); + if (nimm != INVALID_IMM) + return push_inst32(compiler, ADD_WI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | nimm); break; - case SLJIT_MUL: - /* No form with immediate operand. */ + case SLJIT_SUBC: + if (flags & ARG1_IMM) + break; + imm = get_imm(imm); + if (imm != INVALID_IMM) + return push_inst32(compiler, SBCI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm); break; case SLJIT_AND: + nimm = get_imm(imm); + if (nimm != INVALID_IMM) + return push_inst32(compiler, ANDI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | nimm); imm = get_imm(imm); - if (imm != INVALID_IMM) - return push_inst32(compiler, ANDI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm); - imm = get_imm(~((flags & ARG2_IMM) ? arg2 : arg1)); if (imm != INVALID_IMM) return push_inst32(compiler, BICI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm); break; case SLJIT_OR: + nimm = get_imm(imm); + if (nimm != INVALID_IMM) + return push_inst32(compiler, ORRI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | nimm); imm = get_imm(imm); - if (imm != INVALID_IMM) - return push_inst32(compiler, ORRI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm); - imm = get_imm(~((flags & ARG2_IMM) ? arg2 : arg1)); if (imm != INVALID_IMM) return push_inst32(compiler, ORNI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm); break; @@ -638,171 +713,149 @@ static sljit_si emit_op_imm(struct sljit_compiler *compiler, sljit_si flags, slj return push_inst32(compiler, EORI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm); break; case SLJIT_SHL: - if (flags & ARG2_IMM) { - imm &= 0x1f; - if (imm == 0) { - if (!(flags & SET_FLAGS)) - return push_inst16(compiler, MOV | SET_REGS44(dst, reg)); - if (IS_2_LO_REGS(dst, reg)) - return push_inst16(compiler, MOVS | RD3(dst) | RN3(reg)); - return push_inst32(compiler, MOV_W | SET_FLAGS | RD4(dst) | RM4(reg)); - } - if (!(flags & KEEP_FLAGS) && IS_2_LO_REGS(dst, reg)) + case SLJIT_LSHR: + case SLJIT_ASHR: + if (flags & ARG1_IMM) + break; + imm &= 0x1f; + if (imm == 0) { + if (!(flags & SET_FLAGS)) + return push_inst16(compiler, MOV | SET_REGS44(dst, reg)); + if (IS_2_LO_REGS(dst, reg)) + return push_inst16(compiler, MOVS | RD3(dst) | RN3(reg)); + return push_inst32(compiler, MOV_W | SET_FLAGS | RD4(dst) | RM4(reg)); + } + switch (flags & 0xffff) { + case SLJIT_SHL: + if (IS_2_LO_REGS(dst, reg)) return push_inst16(compiler, LSLSI | RD3(dst) | RN3(reg) | (imm << 6)); return push_inst32(compiler, LSL_WI | (flags & SET_FLAGS) | RD4(dst) | RM4(reg) | IMM5(imm)); - } - break; - case SLJIT_LSHR: - if (flags & ARG2_IMM) { - imm &= 0x1f; - if (imm == 0) { - if (!(flags & SET_FLAGS)) - return push_inst16(compiler, MOV | SET_REGS44(dst, reg)); - if (IS_2_LO_REGS(dst, reg)) - return push_inst16(compiler, MOVS | RD3(dst) | RN3(reg)); - return push_inst32(compiler, MOV_W | SET_FLAGS | RD4(dst) | RM4(reg)); - } - if (!(flags & KEEP_FLAGS) && IS_2_LO_REGS(dst, reg)) + case SLJIT_LSHR: + if (IS_2_LO_REGS(dst, reg)) return push_inst16(compiler, LSRSI | RD3(dst) | RN3(reg) | (imm << 6)); return push_inst32(compiler, LSR_WI | (flags & SET_FLAGS) | RD4(dst) | RM4(reg) | IMM5(imm)); - } - break; - case SLJIT_ASHR: - if (flags & ARG2_IMM) { - imm &= 0x1f; - if (imm == 0) { - if (!(flags & SET_FLAGS)) - return push_inst16(compiler, MOV | SET_REGS44(dst, reg)); - if (IS_2_LO_REGS(dst, reg)) - return push_inst16(compiler, MOVS | RD3(dst) | RN3(reg)); - return push_inst32(compiler, MOV_W | SET_FLAGS | RD4(dst) | RM4(reg)); - } - if (!(flags & KEEP_FLAGS) && IS_2_LO_REGS(dst, reg)) + default: /* SLJIT_ASHR */ + if (IS_2_LO_REGS(dst, reg)) return push_inst16(compiler, ASRSI | RD3(dst) | RN3(reg) | (imm << 6)); return push_inst32(compiler, ASR_WI | (flags & SET_FLAGS) | RD4(dst) | RM4(reg) | IMM5(imm)); } - break; default: - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); break; } if (flags & ARG2_IMM) { - FAIL_IF(load_immediate(compiler, TMP_REG2, arg2)); - arg2 = TMP_REG2; + imm = arg2; + arg2 = (arg1 == TMP_REG1) ? TMP_REG2 : TMP_REG1; + FAIL_IF(load_immediate(compiler, arg2, imm)); } else { - FAIL_IF(load_immediate(compiler, TMP_REG1, arg1)); - arg1 = TMP_REG1; + imm = arg1; + arg1 = (arg2 == TMP_REG1) ? TMP_REG2 : TMP_REG1; + FAIL_IF(load_immediate(compiler, arg1, imm)); } + + SLJIT_ASSERT(arg1 != arg2); } /* Both arguments are registers. */ switch (flags & 0xffff) { case SLJIT_MOV: - case SLJIT_MOV_UI: - case SLJIT_MOV_SI: + case SLJIT_MOV_U32: + case SLJIT_MOV_S32: case SLJIT_MOV_P: - case SLJIT_MOVU: - case SLJIT_MOVU_UI: - case SLJIT_MOVU_SI: - case SLJIT_MOVU_P: - SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1); + SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG2); + if (dst == arg2) + return SLJIT_SUCCESS; return push_inst16(compiler, MOV | SET_REGS44(dst, arg2)); - case SLJIT_MOV_UB: - case SLJIT_MOVU_UB: - SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1); + case SLJIT_MOV_U8: + SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG2); if (IS_2_LO_REGS(dst, arg2)) return push_inst16(compiler, UXTB | RD3(dst) | RN3(arg2)); return push_inst32(compiler, UXTB_W | RD4(dst) | RM4(arg2)); - case SLJIT_MOV_SB: - case SLJIT_MOVU_SB: - SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1); + case SLJIT_MOV_S8: + SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG2); if (IS_2_LO_REGS(dst, arg2)) return push_inst16(compiler, SXTB | RD3(dst) | RN3(arg2)); return push_inst32(compiler, SXTB_W | RD4(dst) | RM4(arg2)); - case SLJIT_MOV_UH: - case SLJIT_MOVU_UH: - SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1); + case SLJIT_MOV_U16: + SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG2); if (IS_2_LO_REGS(dst, arg2)) return push_inst16(compiler, UXTH | RD3(dst) | RN3(arg2)); return push_inst32(compiler, UXTH_W | RD4(dst) | RM4(arg2)); - case SLJIT_MOV_SH: - case SLJIT_MOVU_SH: - SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1); + case SLJIT_MOV_S16: + SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG2); if (IS_2_LO_REGS(dst, arg2)) return push_inst16(compiler, SXTH | RD3(dst) | RN3(arg2)); return push_inst32(compiler, SXTH_W | RD4(dst) | RM4(arg2)); case SLJIT_NOT: - SLJIT_ASSERT(arg1 == TMP_REG1); - if (!(flags & KEEP_FLAGS) && IS_2_LO_REGS(dst, arg2)) + SLJIT_ASSERT(arg1 == TMP_REG2); + if (IS_2_LO_REGS(dst, arg2)) return push_inst16(compiler, MVNS | RD3(dst) | RN3(arg2)); return push_inst32(compiler, MVN_W | (flags & SET_FLAGS) | RD4(dst) | RM4(arg2)); case SLJIT_CLZ: - SLJIT_ASSERT(arg1 == TMP_REG1); + SLJIT_ASSERT(arg1 == TMP_REG2); FAIL_IF(push_inst32(compiler, CLZ | RN4(arg2) | RD4(dst) | RM4(arg2))); - if (flags & SET_FLAGS) { - if (reg_map[dst] <= 7) - return push_inst16(compiler, CMPI | RDN3(dst)); - return push_inst32(compiler, ADD_WI | SET_FLAGS | RN4(dst) | RD4(dst)); - } return SLJIT_SUCCESS; case SLJIT_ADD: - if (!(flags & KEEP_FLAGS) && IS_3_LO_REGS(dst, arg1, arg2)) + if (IS_3_LO_REGS(dst, arg1, arg2)) return push_inst16(compiler, ADDS | RD3(dst) | RN3(arg1) | RM3(arg2)); if (dst == arg1 && !(flags & SET_FLAGS)) return push_inst16(compiler, ADD | SET_REGS44(dst, arg2)); return push_inst32(compiler, ADD_W | (flags & SET_FLAGS) | RD4(dst) | RN4(arg1) | RM4(arg2)); case SLJIT_ADDC: - if (dst == arg1 && !(flags & KEEP_FLAGS) && IS_2_LO_REGS(dst, arg2)) + if (dst == arg1 && IS_2_LO_REGS(dst, arg2)) return push_inst16(compiler, ADCS | RD3(dst) | RN3(arg2)); return push_inst32(compiler, ADC_W | (flags & SET_FLAGS) | RD4(dst) | RN4(arg1) | RM4(arg2)); case SLJIT_SUB: - if (!(flags & KEEP_FLAGS) && IS_3_LO_REGS(dst, arg1, arg2)) + if (flags & UNUSED_RETURN) { + if (IS_2_LO_REGS(arg1, arg2)) + return push_inst16(compiler, CMP | RD3(arg1) | RN3(arg2)); + return push_inst16(compiler, CMP_X | SET_REGS44(arg1, arg2)); + } + if (IS_3_LO_REGS(dst, arg1, arg2)) return push_inst16(compiler, SUBS | RD3(dst) | RN3(arg1) | RM3(arg2)); return push_inst32(compiler, SUB_W | (flags & SET_FLAGS) | RD4(dst) | RN4(arg1) | RM4(arg2)); case SLJIT_SUBC: - if (dst == arg1 && !(flags & KEEP_FLAGS) && IS_2_LO_REGS(dst, arg2)) + if (dst == arg1 && IS_2_LO_REGS(dst, arg2)) return push_inst16(compiler, SBCS | RD3(dst) | RN3(arg2)); return push_inst32(compiler, SBC_W | (flags & SET_FLAGS) | RD4(dst) | RN4(arg1) | RM4(arg2)); case SLJIT_MUL: if (!(flags & SET_FLAGS)) return push_inst32(compiler, MUL | RD4(dst) | RN4(arg1) | RM4(arg2)); - SLJIT_ASSERT(reg_map[TMP_REG2] <= 7 && dst != TMP_REG2); + SLJIT_ASSERT(dst != TMP_REG2); FAIL_IF(push_inst32(compiler, SMULL | RT4(dst) | RD4(TMP_REG2) | RN4(arg1) | RM4(arg2))); /* cmp TMP_REG2, dst asr #31. */ return push_inst32(compiler, CMP_W | RN4(TMP_REG2) | 0x70e0 | RM4(dst)); case SLJIT_AND: - if (!(flags & KEEP_FLAGS)) { - if (dst == arg1 && IS_2_LO_REGS(dst, arg2)) - return push_inst16(compiler, ANDS | RD3(dst) | RN3(arg2)); - if ((flags & UNUSED_RETURN) && IS_2_LO_REGS(arg1, arg2)) - return push_inst16(compiler, TST | RD3(arg1) | RN3(arg2)); - } + if (dst == arg1 && IS_2_LO_REGS(dst, arg2)) + return push_inst16(compiler, ANDS | RD3(dst) | RN3(arg2)); + if ((flags & UNUSED_RETURN) && IS_2_LO_REGS(arg1, arg2)) + return push_inst16(compiler, TST | RD3(arg1) | RN3(arg2)); return push_inst32(compiler, AND_W | (flags & SET_FLAGS) | RD4(dst) | RN4(arg1) | RM4(arg2)); case SLJIT_OR: - if (dst == arg1 && !(flags & KEEP_FLAGS) && IS_2_LO_REGS(dst, arg2)) + if (dst == arg1 && IS_2_LO_REGS(dst, arg2)) return push_inst16(compiler, ORRS | RD3(dst) | RN3(arg2)); return push_inst32(compiler, ORR_W | (flags & SET_FLAGS) | RD4(dst) | RN4(arg1) | RM4(arg2)); case SLJIT_XOR: - if (dst == arg1 && !(flags & KEEP_FLAGS) && IS_2_LO_REGS(dst, arg2)) + if (dst == arg1 && IS_2_LO_REGS(dst, arg2)) return push_inst16(compiler, EORS | RD3(dst) | RN3(arg2)); return push_inst32(compiler, EOR_W | (flags & SET_FLAGS) | RD4(dst) | RN4(arg1) | RM4(arg2)); case SLJIT_SHL: - if (dst == arg1 && !(flags & KEEP_FLAGS) && IS_2_LO_REGS(dst, arg2)) + if (dst == arg1 && IS_2_LO_REGS(dst, arg2)) return push_inst16(compiler, LSLS | RD3(dst) | RN3(arg2)); return push_inst32(compiler, LSL_W | (flags & SET_FLAGS) | RD4(dst) | RN4(arg1) | RM4(arg2)); case SLJIT_LSHR: - if (dst == arg1 && !(flags & KEEP_FLAGS) && IS_2_LO_REGS(dst, arg2)) + if (dst == arg1 && IS_2_LO_REGS(dst, arg2)) return push_inst16(compiler, LSRS | RD3(dst) | RN3(arg2)); return push_inst32(compiler, LSR_W | (flags & SET_FLAGS) | RD4(dst) | RN4(arg1) | RM4(arg2)); case SLJIT_ASHR: - if (dst == arg1 && !(flags & KEEP_FLAGS) && IS_2_LO_REGS(dst, arg2)) + if (dst == arg1 && IS_2_LO_REGS(dst, arg2)) return push_inst16(compiler, ASRS | RD3(dst) | RN3(arg2)); return push_inst32(compiler, ASR_W | (flags & SET_FLAGS) | RD4(dst) | RN4(arg1) | RM4(arg2)); } - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); return SLJIT_SUCCESS; } @@ -812,9 +865,7 @@ static sljit_si emit_op_imm(struct sljit_compiler *compiler, sljit_si flags, slj #define WORD_SIZE 0x00 #define BYTE_SIZE 0x04 #define HALF_SIZE 0x08 - -#define UPDATE 0x10 -#define ARG_TEST 0x20 +#define PRELOAD 0x0c #define IS_WORD_SIZE(flags) (!(flags & (BYTE_SIZE | HALF_SIZE))) #define OFFSET_CHECK(imm, shift) (!(argw & ~(imm << shift))) @@ -834,7 +885,7 @@ static sljit_si emit_op_imm(struct sljit_compiler *compiler, sljit_si flags, slj s = store */ -static SLJIT_CONST sljit_uw sljit_mem16[12] = { +static const sljit_ins sljit_mem16[12] = { /* w u l */ 0x5800 /* ldr */, /* w u s */ 0x5000 /* str */, /* w s l */ 0x5800 /* ldr */, @@ -851,7 +902,7 @@ static SLJIT_CONST sljit_uw sljit_mem16[12] = { /* h s s */ 0x5200 /* strh */, }; -static SLJIT_CONST sljit_uw sljit_mem16_imm5[12] = { +static const sljit_ins sljit_mem16_imm5[12] = { /* w u l */ 0x6800 /* ldr imm5 */, /* w u s */ 0x6000 /* str imm5 */, /* w s l */ 0x6800 /* ldr imm5 */, @@ -870,7 +921,7 @@ static SLJIT_CONST sljit_uw sljit_mem16_imm5[12] = { #define MEM_IMM8 0xc00 #define MEM_IMM12 0x800000 -static SLJIT_CONST sljit_uw sljit_mem32[12] = { +static const sljit_ins sljit_mem32[13] = { /* w u l */ 0xf8500000 /* ldr.w */, /* w u s */ 0xf8400000 /* str.w */, /* w s l */ 0xf8500000 /* ldr.w */, @@ -885,10 +936,12 @@ static SLJIT_CONST sljit_uw sljit_mem32[12] = { /* h u s */ 0xf8200000 /* strsh.w */, /* h s l */ 0xf9300000 /* ldrsh.w */, /* h s s */ 0xf8200000 /* strsh.w */, + +/* p u l */ 0xf8100000 /* pld */, }; /* Helper function. Dst should be reg + value, using at most 1 instruction, flags does not set. */ -static sljit_si emit_set_delta(struct sljit_compiler *compiler, sljit_si dst, sljit_si reg, sljit_sw value) +static sljit_s32 emit_set_delta(struct sljit_compiler *compiler, sljit_s32 dst, sljit_s32 reg, sljit_sw value) { if (value >= 0) { if (value <= 0xfff) @@ -908,53 +961,56 @@ static sljit_si emit_set_delta(struct sljit_compiler *compiler, sljit_si dst, sl return SLJIT_ERR_UNSUPPORTED; } -/* Can perform an operation using at most 1 instruction. */ -static sljit_si getput_arg_fast(struct sljit_compiler *compiler, sljit_si flags, sljit_si reg, sljit_si arg, sljit_sw argw) +static SLJIT_INLINE sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, + sljit_s32 arg, sljit_sw argw, sljit_s32 tmp_reg) { - sljit_si tmp; + sljit_s32 other_r; + sljit_uw tmp; SLJIT_ASSERT(arg & SLJIT_MEM); - - if (SLJIT_UNLIKELY(flags & UPDATE)) { - if ((arg & 0xf) && !(arg & 0xf0) && argw <= 0xff && argw >= -0xff) { - flags &= ~UPDATE; - arg &= 0xf; - if (SLJIT_UNLIKELY(flags & ARG_TEST)) - return 1; - - if (argw >= 0) - argw |= 0x200; - else { - argw = -argw; - } - SLJIT_ASSERT(argw >= 0 && (argw & 0xff) <= 0xff); - FAIL_IF(push_inst32(compiler, sljit_mem32[flags] | MEM_IMM8 | RT4(reg) | RN4(arg) | 0x100 | argw)); - return -1; + SLJIT_ASSERT((arg & REG_MASK) != tmp_reg); + arg &= ~SLJIT_MEM; + + if (SLJIT_UNLIKELY(!(arg & REG_MASK))) { + tmp = get_imm(argw & ~0xfff); + if (tmp != INVALID_IMM) { + FAIL_IF(push_inst32(compiler, MOV_WI | RD4(tmp_reg) | tmp)); + return push_inst32(compiler, sljit_mem32[flags] | MEM_IMM12 | RT4(reg) | RN4(tmp_reg) | (argw & 0xfff)); } - return (flags & ARG_TEST) ? SLJIT_SUCCESS : 0; + + FAIL_IF(load_immediate(compiler, tmp_reg, argw)); + if (IS_2_LO_REGS(reg, tmp_reg) && sljit_mem16_imm5[flags]) + return push_inst16(compiler, sljit_mem16_imm5[flags] | RD3(reg) | RN3(tmp_reg)); + return push_inst32(compiler, sljit_mem32[flags] | MEM_IMM12 | RT4(reg) | RN4(tmp_reg)); } - if (SLJIT_UNLIKELY(arg & 0xf0)) { + if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) { argw &= 0x3; - tmp = (arg >> 4) & 0xf; + other_r = OFFS_REG(arg); arg &= 0xf; - if (SLJIT_UNLIKELY(flags & ARG_TEST)) - return 1; - if (!argw && IS_3_LO_REGS(reg, arg, tmp)) - FAIL_IF(push_inst16(compiler, sljit_mem16[flags] | RD3(reg) | RN3(arg) | RM3(tmp))); - else - FAIL_IF(push_inst32(compiler, sljit_mem32[flags] | RT4(reg) | RN4(arg) | RM4(tmp) | (argw << 4))); - return -1; + if (!argw && IS_3_LO_REGS(reg, arg, other_r)) + return push_inst16(compiler, sljit_mem16[flags] | RD3(reg) | RN3(arg) | RM3(other_r)); + return push_inst32(compiler, sljit_mem32[flags] | RT4(reg) | RN4(arg) | RM4(other_r) | (argw << 4)); } - if (!(arg & 0xf) || argw > 0xfff || argw < -0xff) - return (flags & ARG_TEST) ? SLJIT_SUCCESS : 0; - - if (SLJIT_UNLIKELY(flags & ARG_TEST)) - return 1; + if (argw > 0xfff) { + tmp = get_imm(argw & ~0xfff); + if (tmp != INVALID_IMM) { + push_inst32(compiler, ADD_WI | RD4(tmp_reg) | RN4(arg) | tmp); + arg = tmp_reg; + argw = argw & 0xfff; + } + } + else if (argw < -0xff) { + tmp = get_imm(-argw & ~0xff); + if (tmp != INVALID_IMM) { + push_inst32(compiler, SUB_WI | RD4(tmp_reg) | RN4(arg) | tmp); + arg = tmp_reg; + argw = -(-argw & 0xff); + } + } - arg &= 0xf; if (IS_2_LO_REGS(reg, arg) && sljit_mem16_imm5[flags]) { tmp = 3; if (IS_WORD_SIZE(flags)) { @@ -972,249 +1028,169 @@ static sljit_si getput_arg_fast(struct sljit_compiler *compiler, sljit_si flags, tmp = 1; } - if (tmp != 3) { - FAIL_IF(push_inst16(compiler, sljit_mem16_imm5[flags] | RD3(reg) | RN3(arg) | (argw << (6 - tmp)))); - return -1; - } + if (tmp < 3) + return push_inst16(compiler, sljit_mem16_imm5[flags] | RD3(reg) | RN3(arg) | (argw << (6 - tmp))); } - - /* SP based immediate. */ - if (SLJIT_UNLIKELY(arg == SLJIT_LOCALS_REG) && OFFSET_CHECK(0xff, 2) && IS_WORD_SIZE(flags) && reg_map[reg] <= 7) { - FAIL_IF(push_inst16(compiler, STR_SP | ((flags & STORE) ? 0 : 0x800) | RDN3(reg) | (argw >> 2))); - return -1; + else if (SLJIT_UNLIKELY(arg == SLJIT_SP) && IS_WORD_SIZE(flags) && OFFSET_CHECK(0xff, 2) && reg_map[reg] <= 7) { + /* SP based immediate. */ + return push_inst16(compiler, STR_SP | ((flags & STORE) ? 0 : 0x800) | RDN3(reg) | (argw >> 2)); } - if (argw >= 0) - FAIL_IF(push_inst32(compiler, sljit_mem32[flags] | MEM_IMM12 | RT4(reg) | RN4(arg) | argw)); - else - FAIL_IF(push_inst32(compiler, sljit_mem32[flags] | MEM_IMM8 | RT4(reg) | RN4(arg) | -argw)); - return -1; -} + if (argw >= 0 && argw <= 0xfff) + return push_inst32(compiler, sljit_mem32[flags] | MEM_IMM12 | RT4(reg) | RN4(arg) | argw); + else if (argw < 0 && argw >= -0xff) + return push_inst32(compiler, sljit_mem32[flags] | MEM_IMM8 | RT4(reg) | RN4(arg) | -argw); -/* see getput_arg below. - Note: can_cache is called only for binary operators. Those - operators always uses word arguments without write back. */ -static sljit_si can_cache(sljit_si arg, sljit_sw argw, sljit_si next_arg, sljit_sw next_argw) -{ - /* Simple operation except for updates. */ - if ((arg & 0xf0) || !(next_arg & SLJIT_MEM)) - return 0; + SLJIT_ASSERT(arg != tmp_reg); - if (!(arg & 0xf)) { - if ((sljit_uw)(argw - next_argw) <= 0xfff || (sljit_uw)(next_argw - argw) <= 0xfff) - return 1; - return 0; - } - - if (argw == next_argw) - return 1; - - if (arg == next_arg && ((sljit_uw)(argw - next_argw) <= 0xfff || (sljit_uw)(next_argw - argw) <= 0xfff)) - return 1; - - return 0; + FAIL_IF(load_immediate(compiler, tmp_reg, argw)); + if (IS_3_LO_REGS(reg, arg, tmp_reg)) + return push_inst16(compiler, sljit_mem16[flags] | RD3(reg) | RN3(arg) | RM3(tmp_reg)); + return push_inst32(compiler, sljit_mem32[flags] | RT4(reg) | RN4(arg) | RM4(tmp_reg)); } -/* Emit the necessary instructions. See can_cache above. */ -static sljit_si getput_arg(struct sljit_compiler *compiler, sljit_si flags, sljit_si reg, sljit_si arg, sljit_sw argw, sljit_si next_arg, sljit_sw next_argw) +/* --------------------------------------------------------------------- */ +/* Entry, exit */ +/* --------------------------------------------------------------------- */ + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler, + sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds, + sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size) { - sljit_si tmp_r; - sljit_sw tmp; + sljit_s32 args, size, i, tmp; + sljit_ins push = 0; +#ifdef _WIN32 + sljit_uw imm; +#endif - SLJIT_ASSERT(arg & SLJIT_MEM); - if (!(next_arg & SLJIT_MEM)) { - next_arg = 0; - next_argw = 0; - } + CHECK_ERROR(); + CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size)); + set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size); - tmp_r = (flags & STORE) ? TMP_REG3 : reg; + tmp = saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - saveds) : SLJIT_FIRST_SAVED_REG; + for (i = SLJIT_S0; i >= tmp; i--) + push |= 1 << reg_map[i]; - if (SLJIT_UNLIKELY(flags & UPDATE)) { - flags &= ~UPDATE; - /* Update only applies if a base register exists. */ - if (arg & 0xf) { - /* There is no caching here. */ - tmp = (arg & 0xf0) >> 4; - arg &= 0xf; + for (i = scratches; i >= SLJIT_FIRST_SAVED_REG; i--) + push |= 1 << reg_map[i]; - if (!tmp) { - if (!(argw & ~0xfff)) { - FAIL_IF(push_inst32(compiler, sljit_mem32[flags] | MEM_IMM12 | RT4(reg) | RN4(arg) | argw)); - return push_inst32(compiler, ADDWI | RD4(arg) | RN4(arg) | IMM12(argw)); - } + FAIL_IF((push & 0xff00) + ? push_inst32(compiler, PUSH_W | (1 << 14) | push) + : push_inst16(compiler, PUSH | (1 << 8) | push)); - if (compiler->cache_arg == SLJIT_MEM) { - if (argw == compiler->cache_argw) { - tmp = TMP_REG3; - argw = 0; - } - else if (emit_set_delta(compiler, TMP_REG3, TMP_REG3, argw - compiler->cache_argw) != SLJIT_ERR_UNSUPPORTED) { - FAIL_IF(compiler->error); - compiler->cache_argw = argw; - tmp = TMP_REG3; - argw = 0; - } - } + /* Stack must be aligned to 8 bytes: (LR, R4) */ + size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1); + local_size = ((size + local_size + 7) & ~7) - size; + compiler->local_size = local_size; - if (argw) { - FAIL_IF(load_immediate(compiler, TMP_REG3, argw)); - compiler->cache_arg = SLJIT_MEM; - compiler->cache_argw = argw; - tmp = TMP_REG3; - argw = 0; - } - } +#ifdef _WIN32 + if (local_size >= 256) { + if (local_size > 4096) + imm = get_imm(4096); + else + imm = get_imm(local_size & ~0xff); - argw &= 0x3; - if (!argw && IS_3_LO_REGS(reg, arg, tmp)) { - FAIL_IF(push_inst16(compiler, sljit_mem16[flags] | RD3(reg) | RN3(arg) | RM3(tmp))); - return push_inst16(compiler, ADD | SET_REGS44(arg, tmp)); - } - FAIL_IF(push_inst32(compiler, sljit_mem32[flags] | RT4(reg) | RN4(arg) | RM4(tmp) | (argw << 4))); - return push_inst32(compiler, ADD_W | RD4(arg) | RN4(arg) | RM4(tmp) | (argw << 6)); - } + SLJIT_ASSERT(imm != INVALID_IMM); + FAIL_IF(push_inst32(compiler, SUB_WI | RD4(TMP_REG1) | RN4(SLJIT_SP) | imm)); } - - SLJIT_ASSERT(!(arg & 0xf0)); - - if (compiler->cache_arg == arg) { - if (!((argw - compiler->cache_argw) & ~0xfff)) - return push_inst32(compiler, sljit_mem32[flags] | MEM_IMM12 | RT4(reg) | RN4(TMP_REG3) | (argw - compiler->cache_argw)); - if (!((compiler->cache_argw - argw) & ~0xff)) - return push_inst32(compiler, sljit_mem32[flags] | MEM_IMM8 | RT4(reg) | RN4(TMP_REG3) | (compiler->cache_argw - argw)); - if (emit_set_delta(compiler, TMP_REG3, TMP_REG3, argw - compiler->cache_argw) != SLJIT_ERR_UNSUPPORTED) { - FAIL_IF(compiler->error); - return push_inst32(compiler, sljit_mem32[flags] | MEM_IMM12 | RT4(reg) | RN4(TMP_REG3) | 0); - } +#else + if (local_size > 0) { + if (local_size <= (127 << 2)) + FAIL_IF(push_inst16(compiler, SUB_SP | (local_size >> 2))); + else + FAIL_IF(emit_op_imm(compiler, SLJIT_SUB | ARG2_IMM, SLJIT_SP, SLJIT_SP, local_size)); } +#endif - next_arg = (arg & 0xf) && (arg == next_arg); - arg &= 0xf; - if (arg && compiler->cache_arg == SLJIT_MEM && compiler->cache_argw == argw) - return push_inst32(compiler, sljit_mem32[flags] | RT4(reg) | RN4(arg) | RM4(TMP_REG3)); - - compiler->cache_argw = argw; - if (next_arg && emit_set_delta(compiler, TMP_REG3, arg, argw) != SLJIT_ERR_UNSUPPORTED) { - FAIL_IF(compiler->error); - compiler->cache_arg = SLJIT_MEM | arg; - arg = 0; - } - else { - FAIL_IF(load_immediate(compiler, TMP_REG3, argw)); - compiler->cache_arg = SLJIT_MEM; + args = get_arg_count(arg_types); - if (next_arg) { - FAIL_IF(push_inst16(compiler, ADD | SET_REGS44(TMP_REG3, arg))); - compiler->cache_arg = SLJIT_MEM | arg; - arg = 0; - } - } + if (args >= 1) + FAIL_IF(push_inst16(compiler, MOV | SET_REGS44(SLJIT_S0, SLJIT_R0))); + if (args >= 2) + FAIL_IF(push_inst16(compiler, MOV | SET_REGS44(SLJIT_S1, SLJIT_R1))); + if (args >= 3) + FAIL_IF(push_inst16(compiler, MOV | SET_REGS44(SLJIT_S2, SLJIT_R2))); + +#ifdef _WIN32 + if (local_size >= 256) { + if (local_size > 4096) { + imm = get_imm(4096); + SLJIT_ASSERT(imm != INVALID_IMM); + + if (local_size < 4 * 4096) { + if (local_size > 2 * 4096) { + FAIL_IF(push_inst32(compiler, LDRI | 0x400 | RT4(TMP_REG2) | RN4(TMP_REG1))); + FAIL_IF(push_inst32(compiler, SUB_WI | RD4(TMP_REG1) | RN4(TMP_REG1) | imm)); + local_size -= 4096; + } - if (arg) - return push_inst32(compiler, sljit_mem32[flags] | RT4(reg) | RN4(arg) | RM4(TMP_REG3)); - return push_inst32(compiler, sljit_mem32[flags] | MEM_IMM12 | RT4(reg) | RN4(TMP_REG3) | 0); -} + if (local_size > 2 * 4096) { + FAIL_IF(push_inst32(compiler, LDRI | 0x400 | RT4(TMP_REG2) | RN4(TMP_REG1))); + FAIL_IF(push_inst32(compiler, SUB_WI | RD4(TMP_REG1) | RN4(TMP_REG1) | imm)); + local_size -= 4096; + } -static SLJIT_INLINE sljit_si emit_op_mem(struct sljit_compiler *compiler, sljit_si flags, sljit_si reg, sljit_si arg, sljit_sw argw) -{ - if (getput_arg_fast(compiler, flags, reg, arg, argw)) - return compiler->error; - compiler->cache_arg = 0; - compiler->cache_argw = 0; - return getput_arg(compiler, flags, reg, arg, argw, 0, 0); -} + FAIL_IF(push_inst32(compiler, LDRI | 0x400 | RT4(TMP_REG2) | RN4(TMP_REG1))); + local_size -= 4096; -static SLJIT_INLINE sljit_si emit_op_mem2(struct sljit_compiler *compiler, sljit_si flags, sljit_si reg, sljit_si arg1, sljit_sw arg1w, sljit_si arg2, sljit_sw arg2w) -{ - if (getput_arg_fast(compiler, flags, reg, arg1, arg1w)) - return compiler->error; - return getput_arg(compiler, flags, reg, arg1, arg1w, arg2, arg2w); -} + SLJIT_ASSERT(local_size > 0); + } + else { + FAIL_IF(load_immediate(compiler, SLJIT_R3, (local_size >> 12) - 1)); + FAIL_IF(push_inst32(compiler, LDRI | 0x400 | RT4(TMP_REG2) | RN4(TMP_REG1))); + FAIL_IF(push_inst32(compiler, SUB_WI | RD4(TMP_REG1) | RN4(TMP_REG1) | imm)); + SLJIT_ASSERT(reg_map[SLJIT_R3] < 7); + FAIL_IF(push_inst16(compiler, SUBSI8 | RDN3(SLJIT_R3) | 1)); + FAIL_IF(push_inst16(compiler, BCC | (0x1 << 8) /* not-equal */ | (-7 & 0xff))); -/* --------------------------------------------------------------------- */ -/* Entry, exit */ -/* --------------------------------------------------------------------- */ + local_size &= 0xfff; -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_enter(struct sljit_compiler *compiler, sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size) -{ - sljit_si size; - sljit_ins push; + if (local_size != 0) + FAIL_IF(push_inst32(compiler, LDRI | 0x400 | RT4(TMP_REG2) | RN4(TMP_REG1))); + } - CHECK_ERROR(); - check_sljit_emit_enter(compiler, args, scratches, saveds, local_size); + if (local_size >= 256) { + imm = get_imm(local_size & ~0xff); + SLJIT_ASSERT(imm != INVALID_IMM); - compiler->scratches = scratches; - compiler->saveds = saveds; -#if (defined SLJIT_DEBUG && SLJIT_DEBUG) - compiler->logical_local_size = local_size; -#endif + FAIL_IF(push_inst32(compiler, SUB_WI | RD4(TMP_REG1) | RN4(TMP_REG1) | imm)); + } + } - push = (1 << 4); - if (saveds >= 5) - push |= 1 << 11; - if (saveds >= 4) - push |= 1 << 10; - if (saveds >= 3) - push |= 1 << 8; - if (saveds >= 2) - push |= 1 << 7; - if (saveds >= 1) - push |= 1 << 6; - if (scratches >= 5) - push |= 1 << 5; - FAIL_IF(saveds >= 3 - ? push_inst32(compiler, PUSH_W | (1 << 14) | push) - : push_inst16(compiler, PUSH | push)); + local_size &= 0xff; + FAIL_IF(push_inst32(compiler, LDRI | 0x400 | (local_size > 0 ? 0x100 : 0) | RT4(TMP_REG2) | RN4(TMP_REG1) | local_size)); - /* Stack must be aligned to 8 bytes: */ - size = (3 + saveds) * sizeof(sljit_uw); - local_size += size; - local_size = (local_size + 7) & ~7; - local_size -= size; - compiler->local_size = local_size; - if (local_size > 0) { - if (local_size <= (127 << 2)) - FAIL_IF(push_inst16(compiler, SUB_SP | (local_size >> 2))); - else - FAIL_IF(emit_op_imm(compiler, SLJIT_SUB | ARG2_IMM, SLJIT_LOCALS_REG, SLJIT_LOCALS_REG, local_size)); + FAIL_IF(push_inst16(compiler, MOV | SET_REGS44(SLJIT_SP, TMP_REG1))); } - - if (args >= 1) - FAIL_IF(push_inst16(compiler, MOV | SET_REGS44(SLJIT_SAVED_REG1, SLJIT_SCRATCH_REG1))); - if (args >= 2) - FAIL_IF(push_inst16(compiler, MOV | SET_REGS44(SLJIT_SAVED_REG2, SLJIT_SCRATCH_REG2))); - if (args >= 3) - FAIL_IF(push_inst16(compiler, MOV | SET_REGS44(SLJIT_SAVED_REG3, SLJIT_SCRATCH_REG3))); + else if (local_size > 0) + FAIL_IF(push_inst32(compiler, LDRI | 0x500 | RT4(TMP_REG1) | RN4(SLJIT_SP) | local_size)); +#endif return SLJIT_SUCCESS; } -SLJIT_API_FUNC_ATTRIBUTE void sljit_set_context(struct sljit_compiler *compiler, sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *compiler, + sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds, + sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size) { - sljit_si size; - - CHECK_ERROR_VOID(); - check_sljit_set_context(compiler, args, scratches, saveds, local_size); + sljit_s32 size; - compiler->scratches = scratches; - compiler->saveds = saveds; -#if (defined SLJIT_DEBUG && SLJIT_DEBUG) - compiler->logical_local_size = local_size; -#endif + CHECK_ERROR(); + CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size)); + set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size); - size = (3 + saveds) * sizeof(sljit_uw); - local_size += size; - local_size = (local_size + 7) & ~7; - local_size -= size; - compiler->local_size = local_size; + size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1); + compiler->local_size = ((size + local_size + 7) & ~7) - size; + return SLJIT_SUCCESS; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_return(struct sljit_compiler *compiler, sljit_si op, sljit_si src, sljit_sw srcw) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw) { - sljit_ins pop; + sljit_s32 i, tmp; + sljit_ins pop = 0; CHECK_ERROR(); - check_sljit_emit_return(compiler, op, src, srcw); + CHECK(check_sljit_emit_return(compiler, op, src, srcw)); FAIL_IF(emit_mov_before_return(compiler, op, src, srcw)); @@ -1222,36 +1198,35 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_return(struct sljit_compiler *compi if (compiler->local_size <= (127 << 2)) FAIL_IF(push_inst16(compiler, ADD_SP | (compiler->local_size >> 2))); else - FAIL_IF(emit_op_imm(compiler, SLJIT_ADD | ARG2_IMM, SLJIT_LOCALS_REG, SLJIT_LOCALS_REG, compiler->local_size)); - } - - pop = (1 << 4); - if (compiler->saveds >= 5) - pop |= 1 << 11; - if (compiler->saveds >= 4) - pop |= 1 << 10; - if (compiler->saveds >= 3) - pop |= 1 << 8; - if (compiler->saveds >= 2) - pop |= 1 << 7; - if (compiler->saveds >= 1) - pop |= 1 << 6; - if (compiler->scratches >= 5) - pop |= 1 << 5; - return compiler->saveds >= 3 + FAIL_IF(emit_op_imm(compiler, SLJIT_ADD | ARG2_IMM, SLJIT_SP, SLJIT_SP, compiler->local_size)); + } + + tmp = compiler->saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - compiler->saveds) : SLJIT_FIRST_SAVED_REG; + for (i = SLJIT_S0; i >= tmp; i--) + pop |= 1 << reg_map[i]; + + for (i = compiler->scratches; i >= SLJIT_FIRST_SAVED_REG; i--) + pop |= 1 << reg_map[i]; + + return (pop & 0xff00) ? push_inst32(compiler, POP_W | (1 << 15) | pop) - : push_inst16(compiler, POP | pop); + : push_inst16(compiler, POP | (1 << 8) | pop); } /* --------------------------------------------------------------------- */ /* Operators */ /* --------------------------------------------------------------------- */ +#if !(defined __ARM_FEATURE_IDIV) && !(defined __ARM_ARCH_EXT_IDIV__) + #ifdef __cplusplus extern "C" { #endif -#if defined(__GNUC__) +#ifdef _WIN32 +extern unsigned long long __rt_udiv(unsigned int denominator, unsigned int numerator); +extern long long __rt_sdiv(int denominator, int numerator); +#elif defined(__GNUC__) extern unsigned int __aeabi_uidivmod(unsigned int numerator, int unsigned denominator); extern int __aeabi_idivmod(int numerator, int denominator); #else @@ -1262,285 +1237,264 @@ extern int __aeabi_idivmod(int numerator, int denominator); } #endif -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op0(struct sljit_compiler *compiler, sljit_si op) +#endif /* !__ARM_FEATURE_IDIV && !__ARM_ARCH_EXT_IDIV__ */ + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compiler, sljit_s32 op) { +#if !(defined __ARM_FEATURE_IDIV) && !(defined __ARM_ARCH_EXT_IDIV__) + sljit_sw saved_reg_list[3]; + sljit_sw saved_reg_count; +#endif + CHECK_ERROR(); - check_sljit_emit_op0(compiler, op); + CHECK(check_sljit_emit_op0(compiler, op)); op = GET_OPCODE(op); switch (op) { case SLJIT_BREAKPOINT: - push_inst16(compiler, BKPT); - break; + return push_inst16(compiler, BKPT); case SLJIT_NOP: - push_inst16(compiler, NOP); - break; - case SLJIT_UMUL: - case SLJIT_SMUL: - return push_inst32(compiler, (op == SLJIT_UMUL ? UMULL : SMULL) - | (reg_map[SLJIT_SCRATCH_REG2] << 8) - | (reg_map[SLJIT_SCRATCH_REG1] << 12) - | (reg_map[SLJIT_SCRATCH_REG1] << 16) - | reg_map[SLJIT_SCRATCH_REG2]); - case SLJIT_UDIV: - case SLJIT_SDIV: - if (compiler->scratches >= 4) { - FAIL_IF(push_inst32(compiler, 0xf84d2d04 /* str r2, [sp, #-4]! */)); - FAIL_IF(push_inst32(compiler, 0xf84dcd04 /* str ip, [sp, #-4]! */)); - } else if (compiler->scratches >= 3) - FAIL_IF(push_inst32(compiler, 0xf84d2d08 /* str r2, [sp, #-8]! */)); -#if defined(__GNUC__) + return push_inst16(compiler, NOP); + case SLJIT_LMUL_UW: + case SLJIT_LMUL_SW: + return push_inst32(compiler, (op == SLJIT_LMUL_UW ? UMULL : SMULL) + | (reg_map[SLJIT_R1] << 8) + | (reg_map[SLJIT_R0] << 12) + | (reg_map[SLJIT_R0] << 16) + | reg_map[SLJIT_R1]); +#if (defined __ARM_FEATURE_IDIV) || (defined __ARM_ARCH_EXT_IDIV__) + case SLJIT_DIVMOD_UW: + case SLJIT_DIVMOD_SW: + FAIL_IF(push_inst16(compiler, MOV | SET_REGS44(TMP_REG1, SLJIT_R0))); + FAIL_IF(push_inst32(compiler, (op == SLJIT_DIVMOD_UW ? UDIV : SDIV) | RD4(SLJIT_R0) | RN4(SLJIT_R0) | RM4(SLJIT_R1))); + FAIL_IF(push_inst32(compiler, MUL | RD4(SLJIT_R1) | RN4(SLJIT_R0) | RM4(SLJIT_R1))); + return push_inst32(compiler, SUB_W | RD4(SLJIT_R1) | RN4(TMP_REG1) | RM4(SLJIT_R1)); + case SLJIT_DIV_UW: + case SLJIT_DIV_SW: + return push_inst32(compiler, (op == SLJIT_DIV_UW ? UDIV : SDIV) | RD4(SLJIT_R0) | RN4(SLJIT_R0) | RM4(SLJIT_R1)); +#else /* !__ARM_FEATURE_IDIV && !__ARM_ARCH_EXT_IDIV__ */ + case SLJIT_DIVMOD_UW: + case SLJIT_DIVMOD_SW: + case SLJIT_DIV_UW: + case SLJIT_DIV_SW: + SLJIT_COMPILE_ASSERT((SLJIT_DIVMOD_UW & 0x2) == 0 && SLJIT_DIV_UW - 0x2 == SLJIT_DIVMOD_UW, bad_div_opcode_assignments); + SLJIT_ASSERT(reg_map[2] == 1 && reg_map[3] == 2 && reg_map[4] == 3); + + saved_reg_count = 0; + if (compiler->scratches >= 4) + saved_reg_list[saved_reg_count++] = 3; + if (compiler->scratches >= 3) + saved_reg_list[saved_reg_count++] = 2; + if (op >= SLJIT_DIV_UW) + saved_reg_list[saved_reg_count++] = 1; + + if (saved_reg_count > 0) { + FAIL_IF(push_inst32(compiler, 0xf84d0d00 | (saved_reg_count >= 3 ? 16 : 8) + | (saved_reg_list[0] << 12) /* str rX, [sp, #-8/-16]! */)); + if (saved_reg_count >= 2) { + SLJIT_ASSERT(saved_reg_list[1] < 8); + FAIL_IF(push_inst16(compiler, 0x9001 | (saved_reg_list[1] << 8) /* str rX, [sp, #4] */)); + } + if (saved_reg_count >= 3) { + SLJIT_ASSERT(saved_reg_list[2] < 8); + FAIL_IF(push_inst16(compiler, 0x9002 | (saved_reg_list[2] << 8) /* str rX, [sp, #8] */)); + } + } + +#ifdef _WIN32 + FAIL_IF(push_inst16(compiler, MOV | SET_REGS44(TMP_REG1, SLJIT_R0))); + FAIL_IF(push_inst16(compiler, MOV | SET_REGS44(SLJIT_R0, SLJIT_R1))); + FAIL_IF(push_inst16(compiler, MOV | SET_REGS44(SLJIT_R1, TMP_REG1))); + FAIL_IF(sljit_emit_ijump(compiler, SLJIT_FAST_CALL, SLJIT_IMM, + ((op | 0x2) == SLJIT_DIV_UW ? SLJIT_FUNC_OFFSET(__rt_udiv) : SLJIT_FUNC_OFFSET(__rt_sdiv)))); +#elif defined(__GNUC__) FAIL_IF(sljit_emit_ijump(compiler, SLJIT_FAST_CALL, SLJIT_IMM, - (op == SLJIT_UDIV ? SLJIT_FUNC_OFFSET(__aeabi_uidivmod) : SLJIT_FUNC_OFFSET(__aeabi_idivmod)))); + ((op | 0x2) == SLJIT_DIV_UW ? SLJIT_FUNC_OFFSET(__aeabi_uidivmod) : SLJIT_FUNC_OFFSET(__aeabi_idivmod)))); #else #error "Software divmod functions are needed" #endif - if (compiler->scratches >= 4) { - FAIL_IF(push_inst32(compiler, 0xf85dcb04 /* ldr ip, [sp], #4 */)); - return push_inst32(compiler, 0xf85d2b04 /* ldr r2, [sp], #4 */); - } else if (compiler->scratches >= 3) - return push_inst32(compiler, 0xf85d2b08 /* ldr r2, [sp], #8 */); + + if (saved_reg_count > 0) { + if (saved_reg_count >= 3) { + SLJIT_ASSERT(saved_reg_list[2] < 8); + FAIL_IF(push_inst16(compiler, 0x9802 | (saved_reg_list[2] << 8) /* ldr rX, [sp, #8] */)); + } + if (saved_reg_count >= 2) { + SLJIT_ASSERT(saved_reg_list[1] < 8); + FAIL_IF(push_inst16(compiler, 0x9801 | (saved_reg_list[1] << 8) /* ldr rX, [sp, #4] */)); + } + return push_inst32(compiler, 0xf85d0b00 | (saved_reg_count >= 3 ? 16 : 8) + | (saved_reg_list[0] << 12) /* ldr rX, [sp], #8/16 */); + } return SLJIT_SUCCESS; +#endif /* __ARM_FEATURE_IDIV || __ARM_ARCH_EXT_IDIV__ */ } return SLJIT_SUCCESS; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op1(struct sljit_compiler *compiler, sljit_si op, - sljit_si dst, sljit_sw dstw, - sljit_si src, sljit_sw srcw) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) { - sljit_si dst_r, flags; - sljit_si op_flags = GET_ALL_FLAGS(op); + sljit_s32 dst_r, flags; + sljit_s32 op_flags = GET_ALL_FLAGS(op); CHECK_ERROR(); - check_sljit_emit_op1(compiler, op, dst, dstw, src, srcw); + CHECK(check_sljit_emit_op1(compiler, op, dst, dstw, src, srcw)); ADJUST_LOCAL_OFFSET(dst, dstw); ADJUST_LOCAL_OFFSET(src, srcw); - compiler->cache_arg = 0; - compiler->cache_argw = 0; + if (dst == SLJIT_UNUSED && !HAS_FLAGS(op)) { + /* Since TMP_PC has index 15, IS_2_LO_REGS and IS_3_LO_REGS checks always fail. */ + if (op <= SLJIT_MOV_P && (src & SLJIT_MEM)) + return emit_op_mem(compiler, PRELOAD, TMP_PC, src, srcw, TMP_REG1); + return SLJIT_SUCCESS; + } - dst_r = (dst >= SLJIT_SCRATCH_REG1 && dst <= TMP_REG3) ? dst : TMP_REG1; + dst_r = SLOW_IS_REG(dst) ? dst : TMP_REG1; op = GET_OPCODE(op); - if (op >= SLJIT_MOV && op <= SLJIT_MOVU_P) { + if (op >= SLJIT_MOV && op <= SLJIT_MOV_P) { switch (op) { case SLJIT_MOV: - case SLJIT_MOV_UI: - case SLJIT_MOV_SI: + case SLJIT_MOV_U32: + case SLJIT_MOV_S32: case SLJIT_MOV_P: flags = WORD_SIZE; break; - case SLJIT_MOV_UB: + case SLJIT_MOV_U8: flags = BYTE_SIZE; if (src & SLJIT_IMM) - srcw = (sljit_ub)srcw; + srcw = (sljit_u8)srcw; break; - case SLJIT_MOV_SB: + case SLJIT_MOV_S8: flags = BYTE_SIZE | SIGNED; if (src & SLJIT_IMM) - srcw = (sljit_sb)srcw; + srcw = (sljit_s8)srcw; break; - case SLJIT_MOV_UH: + case SLJIT_MOV_U16: flags = HALF_SIZE; if (src & SLJIT_IMM) - srcw = (sljit_uh)srcw; + srcw = (sljit_u16)srcw; break; - case SLJIT_MOV_SH: + case SLJIT_MOV_S16: flags = HALF_SIZE | SIGNED; if (src & SLJIT_IMM) - srcw = (sljit_sh)srcw; - break; - case SLJIT_MOVU: - case SLJIT_MOVU_UI: - case SLJIT_MOVU_SI: - case SLJIT_MOVU_P: - flags = WORD_SIZE | UPDATE; - break; - case SLJIT_MOVU_UB: - flags = BYTE_SIZE | UPDATE; - if (src & SLJIT_IMM) - srcw = (sljit_ub)srcw; - break; - case SLJIT_MOVU_SB: - flags = BYTE_SIZE | SIGNED | UPDATE; - if (src & SLJIT_IMM) - srcw = (sljit_sb)srcw; - break; - case SLJIT_MOVU_UH: - flags = HALF_SIZE | UPDATE; - if (src & SLJIT_IMM) - srcw = (sljit_uh)srcw; - break; - case SLJIT_MOVU_SH: - flags = HALF_SIZE | SIGNED | UPDATE; - if (src & SLJIT_IMM) - srcw = (sljit_sh)srcw; + srcw = (sljit_s16)srcw; break; default: - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); flags = 0; break; } if (src & SLJIT_IMM) - FAIL_IF(emit_op_imm(compiler, SLJIT_MOV | ARG2_IMM, dst_r, TMP_REG1, srcw)); + FAIL_IF(emit_op_imm(compiler, SLJIT_MOV | ARG2_IMM, dst_r, TMP_REG2, srcw)); else if (src & SLJIT_MEM) { - if (getput_arg_fast(compiler, flags, dst_r, src, srcw)) - FAIL_IF(compiler->error); - else - FAIL_IF(getput_arg(compiler, flags, dst_r, src, srcw, dst, dstw)); + FAIL_IF(emit_op_mem(compiler, flags, dst_r, src, srcw, TMP_REG1)); } else { if (dst_r != TMP_REG1) - return emit_op_imm(compiler, op, dst_r, TMP_REG1, src); + return emit_op_imm(compiler, op, dst_r, TMP_REG2, src); dst_r = src; } - if (dst & SLJIT_MEM) { - if (getput_arg_fast(compiler, flags | STORE, dst_r, dst, dstw)) - return compiler->error; - else - return getput_arg(compiler, flags | STORE, dst_r, dst, dstw, 0, 0); - } - return SLJIT_SUCCESS; + if (!(dst & SLJIT_MEM)) + return SLJIT_SUCCESS; + + return emit_op_mem(compiler, flags | STORE, dst_r, dst, dstw, TMP_REG2); } if (op == SLJIT_NEG) { -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ + || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) compiler->skip_checks = 1; #endif return sljit_emit_op2(compiler, SLJIT_SUB | op_flags, dst, dstw, SLJIT_IMM, 0, src, srcw); } - flags = (GET_FLAGS(op_flags) ? SET_FLAGS : 0) | ((op_flags & SLJIT_KEEP_FLAGS) ? KEEP_FLAGS : 0); + flags = HAS_FLAGS(op_flags) ? SET_FLAGS : 0; + if (src & SLJIT_MEM) { - if (getput_arg_fast(compiler, WORD_SIZE, TMP_REG2, src, srcw)) - FAIL_IF(compiler->error); - else - FAIL_IF(getput_arg(compiler, WORD_SIZE, TMP_REG2, src, srcw, dst, dstw)); - src = TMP_REG2; + FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG1, src, srcw, TMP_REG1)); + src = TMP_REG1; } - if (src & SLJIT_IMM) - flags |= ARG2_IMM; - else - srcw = src; + emit_op_imm(compiler, flags | op, dst_r, TMP_REG2, src); - emit_op_imm(compiler, flags | op, dst_r, TMP_REG1, srcw); - - if (dst & SLJIT_MEM) { - if (getput_arg_fast(compiler, flags | STORE, dst_r, dst, dstw)) - return compiler->error; - else - return getput_arg(compiler, flags | STORE, dst_r, dst, dstw, 0, 0); - } + if (SLJIT_UNLIKELY(dst & SLJIT_MEM)) + return emit_op_mem(compiler, flags | STORE, dst_r, dst, dstw, TMP_REG2); return SLJIT_SUCCESS; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op2(struct sljit_compiler *compiler, sljit_si op, - sljit_si dst, sljit_sw dstw, - sljit_si src1, sljit_sw src1w, - sljit_si src2, sljit_sw src2w) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) { - sljit_si dst_r, flags; + sljit_s32 dst_reg, flags, src2_reg; CHECK_ERROR(); - check_sljit_emit_op2(compiler, op, dst, dstw, src1, src1w, src2, src2w); + CHECK(check_sljit_emit_op2(compiler, op, dst, dstw, src1, src1w, src2, src2w)); ADJUST_LOCAL_OFFSET(dst, dstw); ADJUST_LOCAL_OFFSET(src1, src1w); ADJUST_LOCAL_OFFSET(src2, src2w); - compiler->cache_arg = 0; - compiler->cache_argw = 0; - - dst_r = (dst >= SLJIT_SCRATCH_REG1 && dst <= TMP_REG3) ? dst : TMP_REG1; - flags = (GET_FLAGS(op) ? SET_FLAGS : 0) | ((op & SLJIT_KEEP_FLAGS) ? KEEP_FLAGS : 0); - - if ((dst & SLJIT_MEM) && !getput_arg_fast(compiler, WORD_SIZE | STORE | ARG_TEST, TMP_REG1, dst, dstw)) - flags |= SLOW_DEST; - - if (src1 & SLJIT_MEM) { - if (getput_arg_fast(compiler, WORD_SIZE, TMP_REG1, src1, src1w)) - FAIL_IF(compiler->error); - else - flags |= SLOW_SRC1; - } - if (src2 & SLJIT_MEM) { - if (getput_arg_fast(compiler, WORD_SIZE, TMP_REG2, src2, src2w)) - FAIL_IF(compiler->error); - else - flags |= SLOW_SRC2; - } - - if ((flags & (SLOW_SRC1 | SLOW_SRC2)) == (SLOW_SRC1 | SLOW_SRC2)) { - if (!can_cache(src1, src1w, src2, src2w) && can_cache(src1, src1w, dst, dstw)) { - FAIL_IF(getput_arg(compiler, WORD_SIZE, TMP_REG2, src2, src2w, src1, src1w)); - FAIL_IF(getput_arg(compiler, WORD_SIZE, TMP_REG1, src1, src1w, dst, dstw)); - } - else { - FAIL_IF(getput_arg(compiler, WORD_SIZE, TMP_REG1, src1, src1w, src2, src2w)); - FAIL_IF(getput_arg(compiler, WORD_SIZE, TMP_REG2, src2, src2w, dst, dstw)); - } - } - else if (flags & SLOW_SRC1) - FAIL_IF(getput_arg(compiler, WORD_SIZE, TMP_REG1, src1, src1w, dst, dstw)); - else if (flags & SLOW_SRC2) - FAIL_IF(getput_arg(compiler, WORD_SIZE, TMP_REG2, src2, src2w, dst, dstw)); + if (dst == SLJIT_UNUSED && !HAS_FLAGS(op)) + return SLJIT_SUCCESS; - if (src1 & SLJIT_MEM) - src1 = TMP_REG1; - if (src2 & SLJIT_MEM) - src2 = TMP_REG2; + dst_reg = SLOW_IS_REG(dst) ? dst : TMP_REG1; + flags = HAS_FLAGS(op) ? SET_FLAGS : 0; if (src1 & SLJIT_IMM) flags |= ARG1_IMM; + else if (src1 & SLJIT_MEM) { + emit_op_mem(compiler, WORD_SIZE, TMP_REG1, src1, src1w, TMP_REG1); + src1w = TMP_REG1; + } else src1w = src1; + if (src2 & SLJIT_IMM) flags |= ARG2_IMM; + else if (src2 & SLJIT_MEM) { + src2_reg = (!(flags & ARG1_IMM) && (src1w == TMP_REG1)) ? TMP_REG2 : TMP_REG1; + emit_op_mem(compiler, WORD_SIZE, src2_reg, src2, src2w, src2_reg); + src2w = src2_reg; + } else src2w = src2; if (dst == SLJIT_UNUSED) flags |= UNUSED_RETURN; - if (GET_OPCODE(op) == SLJIT_MUL && (op & SLJIT_SET_O)) - flags |= SET_MULOV; + emit_op_imm(compiler, flags | GET_OPCODE(op), dst_reg, src1w, src2w); - emit_op_imm(compiler, flags | GET_OPCODE(op), dst_r, src1w, src2w); - - if (dst & SLJIT_MEM) { - if (!(flags & SLOW_DEST)) { - getput_arg_fast(compiler, WORD_SIZE | STORE, dst_r, dst, dstw); - return compiler->error; - } - return getput_arg(compiler, WORD_SIZE | STORE, TMP_REG1, dst, dstw, 0, 0); - } - return SLJIT_SUCCESS; + if (!(dst & SLJIT_MEM)) + return SLJIT_SUCCESS; + return emit_op_mem(compiler, WORD_SIZE | STORE, dst_reg, dst, dstw, TMP_REG2); } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_get_register_index(sljit_si reg) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg) { - check_sljit_get_register_index(reg); + CHECK_REG_INDEX(check_sljit_get_register_index(reg)); return reg_map[reg]; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_get_float_register_index(sljit_si reg) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg) { - check_sljit_get_float_register_index(reg); - return reg; + CHECK_REG_INDEX(check_sljit_get_float_register_index(reg)); + return (freg_map[reg] << 1); } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_custom(struct sljit_compiler *compiler, - void *instruction, sljit_si size) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler, + void *instruction, sljit_s32 size) { CHECK_ERROR(); - check_sljit_emit_op_custom(compiler, instruction, size); - SLJIT_ASSERT(size == 2 || size == 4); + CHECK(check_sljit_emit_op_custom(compiler, instruction, size)); if (size == 2) - return push_inst16(compiler, *(sljit_uh*)instruction); + return push_inst16(compiler, *(sljit_u16*)instruction); return push_inst32(compiler, *(sljit_ins*)instruction); } @@ -1548,173 +1502,209 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_custom(struct sljit_compiler *co /* Floating point operators */ /* --------------------------------------------------------------------- */ -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_is_fpu_available(void) -{ - return 1; -} - #define FPU_LOAD (1 << 20) -static sljit_si emit_fop_mem(struct sljit_compiler *compiler, sljit_si flags, sljit_si reg, sljit_si arg, sljit_sw argw) +static sljit_s32 emit_fop_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw) { - sljit_sw tmp; sljit_uw imm; - sljit_sw inst = VSTR_F32 | (flags & (SLJIT_SINGLE_OP | FPU_LOAD)); + sljit_sw inst = VSTR_F32 | (flags & (SLJIT_F32_OP | FPU_LOAD)); SLJIT_ASSERT(arg & SLJIT_MEM); /* Fast loads and stores. */ - if (SLJIT_UNLIKELY(arg & 0xf0)) { - FAIL_IF(push_inst32(compiler, ADD_W | RD4(TMP_REG2) | RN4(arg & 0xf) | RM4((arg & 0xf0) >> 4) | ((argw & 0x3) << 6))); - arg = SLJIT_MEM | TMP_REG2; + if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) { + FAIL_IF(push_inst32(compiler, ADD_W | RD4(TMP_REG1) | RN4(arg & REG_MASK) | RM4(OFFS_REG(arg)) | ((argw & 0x3) << 6))); + arg = SLJIT_MEM | TMP_REG1; argw = 0; } - if ((arg & 0xf) && (argw & 0x3) == 0) { + if ((arg & REG_MASK) && (argw & 0x3) == 0) { if (!(argw & ~0x3fc)) - return push_inst32(compiler, inst | 0x800000 | RN4(arg & 0xf) | DD4(reg) | (argw >> 2)); + return push_inst32(compiler, inst | 0x800000 | RN4(arg & REG_MASK) | DD4(reg) | (argw >> 2)); if (!(-argw & ~0x3fc)) - return push_inst32(compiler, inst | RN4(arg & 0xf) | DD4(reg) | (-argw >> 2)); - } - - SLJIT_ASSERT(!(arg & 0xf0)); - if (compiler->cache_arg == arg) { - tmp = argw - compiler->cache_argw; - if (!(tmp & ~0x3fc)) - return push_inst32(compiler, inst | 0x800000 | RN4(TMP_REG3) | DD4(reg) | (tmp >> 2)); - if (!(-tmp & ~0x3fc)) - return push_inst32(compiler, inst | RN4(TMP_REG3) | DD4(reg) | (-tmp >> 2)); - if (emit_set_delta(compiler, TMP_REG3, TMP_REG3, tmp) != SLJIT_ERR_UNSUPPORTED) { - FAIL_IF(compiler->error); - compiler->cache_argw = argw; - return push_inst32(compiler, inst | 0x800000 | RN4(TMP_REG3) | DD4(reg)); - } + return push_inst32(compiler, inst | RN4(arg & REG_MASK) | DD4(reg) | (-argw >> 2)); } - if (arg & 0xf) { - if (emit_set_delta(compiler, TMP_REG1, arg & 0xf, argw) != SLJIT_ERR_UNSUPPORTED) { + if (arg & REG_MASK) { + if (emit_set_delta(compiler, TMP_REG1, arg & REG_MASK, argw) != SLJIT_ERR_UNSUPPORTED) { FAIL_IF(compiler->error); return push_inst32(compiler, inst | 0x800000 | RN4(TMP_REG1) | DD4(reg)); } imm = get_imm(argw & ~0x3fc); if (imm != INVALID_IMM) { - FAIL_IF(push_inst32(compiler, ADD_WI | RD4(TMP_REG1) | RN4(arg & 0xf) | imm)); + FAIL_IF(push_inst32(compiler, ADD_WI | RD4(TMP_REG1) | RN4(arg & REG_MASK) | imm)); return push_inst32(compiler, inst | 0x800000 | RN4(TMP_REG1) | DD4(reg) | ((argw & 0x3fc) >> 2)); } imm = get_imm(-argw & ~0x3fc); if (imm != INVALID_IMM) { argw = -argw; - FAIL_IF(push_inst32(compiler, SUB_WI | RD4(TMP_REG1) | RN4(arg & 0xf) | imm)); + FAIL_IF(push_inst32(compiler, SUB_WI | RD4(TMP_REG1) | RN4(arg & REG_MASK) | imm)); return push_inst32(compiler, inst | RN4(TMP_REG1) | DD4(reg) | ((argw & 0x3fc) >> 2)); } } - compiler->cache_arg = arg; - compiler->cache_argw = argw; + FAIL_IF(load_immediate(compiler, TMP_REG1, argw)); + if (arg & REG_MASK) + FAIL_IF(push_inst16(compiler, ADD | SET_REGS44(TMP_REG1, (arg & REG_MASK)))); + return push_inst32(compiler, inst | 0x800000 | RN4(TMP_REG1) | DD4(reg)); +} + +static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) +{ + op ^= SLJIT_F32_OP; + + if (src & SLJIT_MEM) { + FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG1, src, srcw)); + src = TMP_FREG1; + } + + FAIL_IF(push_inst32(compiler, VCVT_S32_F32 | (op & SLJIT_F32_OP) | DD4(TMP_FREG1) | DM4(src))); + + if (FAST_IS_REG(dst)) + return push_inst32(compiler, VMOV | (1 << 20) | RT4(dst) | DN4(TMP_FREG1)); + + /* Store the integer value from a VFP register. */ + return emit_fop_mem(compiler, 0, TMP_FREG1, dst, dstw); +} + +static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) +{ + sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1; + + op ^= SLJIT_F32_OP; - if (SLJIT_UNLIKELY(!(arg & 0xf))) - FAIL_IF(load_immediate(compiler, TMP_REG3, argw)); + if (FAST_IS_REG(src)) + FAIL_IF(push_inst32(compiler, VMOV | RT4(src) | DN4(TMP_FREG1))); + else if (src & SLJIT_MEM) { + /* Load the integer value into a VFP register. */ + FAIL_IF(emit_fop_mem(compiler, FPU_LOAD, TMP_FREG1, src, srcw)); + } else { - FAIL_IF(load_immediate(compiler, TMP_REG3, argw)); - if (arg & 0xf) - FAIL_IF(push_inst16(compiler, ADD | SET_REGS44(TMP_REG3, (arg & 0xf)))); + FAIL_IF(load_immediate(compiler, TMP_REG1, srcw)); + FAIL_IF(push_inst32(compiler, VMOV | RT4(TMP_REG1) | DN4(TMP_FREG1))); } - return push_inst32(compiler, inst | 0x800000 | RN4(TMP_REG3) | DD4(reg)); + + FAIL_IF(push_inst32(compiler, VCVT_F32_S32 | (op & SLJIT_F32_OP) | DD4(dst_r) | DM4(TMP_FREG1))); + + if (dst & SLJIT_MEM) + return emit_fop_mem(compiler, (op & SLJIT_F32_OP), TMP_FREG1, dst, dstw); + return SLJIT_SUCCESS; +} + +static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) +{ + op ^= SLJIT_F32_OP; + + if (src1 & SLJIT_MEM) { + emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG1, src1, src1w); + src1 = TMP_FREG1; + } + + if (src2 & SLJIT_MEM) { + emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG2, src2, src2w); + src2 = TMP_FREG2; + } + + FAIL_IF(push_inst32(compiler, VCMP_F32 | (op & SLJIT_F32_OP) | DD4(src1) | DM4(src2))); + return push_inst32(compiler, VMRS); } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop1(struct sljit_compiler *compiler, sljit_si op, - sljit_si dst, sljit_sw dstw, - sljit_si src, sljit_sw srcw) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) { - sljit_si dst_r; + sljit_s32 dst_r; CHECK_ERROR(); - check_sljit_emit_fop1(compiler, op, dst, dstw, src, srcw); - SLJIT_COMPILE_ASSERT((SLJIT_SINGLE_OP == 0x100), float_transfer_bit_error); - compiler->cache_arg = 0; - compiler->cache_argw = 0; - op ^= SLJIT_SINGLE_OP; + SLJIT_COMPILE_ASSERT((SLJIT_F32_OP == 0x100), float_transfer_bit_error); + SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw); - if (GET_OPCODE(op) == SLJIT_CMPD) { - if (dst & SLJIT_MEM) { - emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG1, dst, dstw); - dst = TMP_FREG1; - } - if (src & SLJIT_MEM) { - emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG2, src, srcw); - src = TMP_FREG2; - } - FAIL_IF(push_inst32(compiler, VCMP_F32 | (op & SLJIT_SINGLE_OP) | DD4(dst) | DM4(src))); - return push_inst32(compiler, VMRS); - } + dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1; + + if (GET_OPCODE(op) != SLJIT_CONV_F64_FROM_F32) + op ^= SLJIT_F32_OP; - dst_r = (dst > SLJIT_FLOAT_REG6) ? TMP_FREG1 : dst; if (src & SLJIT_MEM) { - emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, dst_r, src, srcw); + emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, dst_r, src, srcw); src = dst_r; } switch (GET_OPCODE(op)) { - case SLJIT_MOVD: - if (src != dst_r) - FAIL_IF(push_inst32(compiler, VMOV_F32 | (op & SLJIT_SINGLE_OP) | DD4(dst_r) | DM4(src))); + case SLJIT_MOV_F64: + if (src != dst_r) { + if (dst_r != TMP_FREG1) + FAIL_IF(push_inst32(compiler, VMOV_F32 | (op & SLJIT_F32_OP) | DD4(dst_r) | DM4(src))); + else + dst_r = src; + } break; - case SLJIT_NEGD: - FAIL_IF(push_inst32(compiler, VNEG_F32 | (op & SLJIT_SINGLE_OP) | DD4(dst_r) | DM4(src))); + case SLJIT_NEG_F64: + FAIL_IF(push_inst32(compiler, VNEG_F32 | (op & SLJIT_F32_OP) | DD4(dst_r) | DM4(src))); break; - case SLJIT_ABSD: - FAIL_IF(push_inst32(compiler, VABS_F32 | (op & SLJIT_SINGLE_OP) | DD4(dst_r) | DM4(src))); + case SLJIT_ABS_F64: + FAIL_IF(push_inst32(compiler, VABS_F32 | (op & SLJIT_F32_OP) | DD4(dst_r) | DM4(src))); + break; + case SLJIT_CONV_F64_FROM_F32: + FAIL_IF(push_inst32(compiler, VCVT_F64_F32 | (op & SLJIT_F32_OP) | DD4(dst_r) | DM4(src))); + op ^= SLJIT_F32_OP; break; } if (dst & SLJIT_MEM) - return emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP), TMP_FREG1, dst, dstw); + return emit_fop_mem(compiler, (op & SLJIT_F32_OP), dst_r, dst, dstw); return SLJIT_SUCCESS; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop2(struct sljit_compiler *compiler, sljit_si op, - sljit_si dst, sljit_sw dstw, - sljit_si src1, sljit_sw src1w, - sljit_si src2, sljit_sw src2w) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) { - sljit_si dst_r; + sljit_s32 dst_r; CHECK_ERROR(); - check_sljit_emit_fop2(compiler, op, dst, dstw, src1, src1w, src2, src2w); + CHECK(check_sljit_emit_fop2(compiler, op, dst, dstw, src1, src1w, src2, src2w)); + ADJUST_LOCAL_OFFSET(dst, dstw); + ADJUST_LOCAL_OFFSET(src1, src1w); + ADJUST_LOCAL_OFFSET(src2, src2w); - compiler->cache_arg = 0; - compiler->cache_argw = 0; - op ^= SLJIT_SINGLE_OP; + op ^= SLJIT_F32_OP; - dst_r = (dst > SLJIT_FLOAT_REG6) ? TMP_FREG1 : dst; + dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1; if (src1 & SLJIT_MEM) { - emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG1, src1, src1w); + emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG1, src1, src1w); src1 = TMP_FREG1; } if (src2 & SLJIT_MEM) { - emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG2, src2, src2w); + emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG2, src2, src2w); src2 = TMP_FREG2; } switch (GET_OPCODE(op)) { - case SLJIT_ADDD: - FAIL_IF(push_inst32(compiler, VADD_F32 | (op & SLJIT_SINGLE_OP) | DD4(dst_r) | DN4(src1) | DM4(src2))); + case SLJIT_ADD_F64: + FAIL_IF(push_inst32(compiler, VADD_F32 | (op & SLJIT_F32_OP) | DD4(dst_r) | DN4(src1) | DM4(src2))); break; - case SLJIT_SUBD: - FAIL_IF(push_inst32(compiler, VSUB_F32 | (op & SLJIT_SINGLE_OP) | DD4(dst_r) | DN4(src1) | DM4(src2))); + case SLJIT_SUB_F64: + FAIL_IF(push_inst32(compiler, VSUB_F32 | (op & SLJIT_F32_OP) | DD4(dst_r) | DN4(src1) | DM4(src2))); break; - case SLJIT_MULD: - FAIL_IF(push_inst32(compiler, VMUL_F32 | (op & SLJIT_SINGLE_OP) | DD4(dst_r) | DN4(src1) | DM4(src2))); + case SLJIT_MUL_F64: + FAIL_IF(push_inst32(compiler, VMUL_F32 | (op & SLJIT_F32_OP) | DD4(dst_r) | DN4(src1) | DM4(src2))); break; - case SLJIT_DIVD: - FAIL_IF(push_inst32(compiler, VDIV_F32 | (op & SLJIT_SINGLE_OP) | DD4(dst_r) | DN4(src1) | DM4(src2))); + case SLJIT_DIV_F64: + FAIL_IF(push_inst32(compiler, VDIV_F32 | (op & SLJIT_F32_OP) | DD4(dst_r) | DN4(src1) | DM4(src2))); break; } - if (dst & SLJIT_MEM) - return emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP), TMP_FREG1, dst, dstw); - return SLJIT_SUCCESS; + if (!(dst & SLJIT_MEM)) + return SLJIT_SUCCESS; + return emit_fop_mem(compiler, (op & SLJIT_F32_OP), TMP_FREG1, dst, dstw); } #undef FPU_LOAD @@ -1723,106 +1713,92 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop2(struct sljit_compiler *compile /* Other instructions */ /* --------------------------------------------------------------------- */ -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw) { CHECK_ERROR(); - check_sljit_emit_fast_enter(compiler, dst, dstw); + CHECK(check_sljit_emit_fast_enter(compiler, dst, dstw)); ADJUST_LOCAL_OFFSET(dst, dstw); - /* For UNUSED dst. Uncommon, but possible. */ - if (dst == SLJIT_UNUSED) - return SLJIT_SUCCESS; + SLJIT_ASSERT(reg_map[TMP_REG2] == 14); - if (dst <= TMP_REG3) - return push_inst16(compiler, MOV | SET_REGS44(dst, TMP_REG3)); + if (FAST_IS_REG(dst)) + return push_inst16(compiler, MOV | SET_REGS44(dst, TMP_REG2)); /* Memory. */ - if (getput_arg_fast(compiler, WORD_SIZE | STORE, TMP_REG3, dst, dstw)) - return compiler->error; - /* TMP_REG3 is used for caching. */ - FAIL_IF(push_inst16(compiler, MOV | SET_REGS44(TMP_REG2, TMP_REG3))); - compiler->cache_arg = 0; - compiler->cache_argw = 0; - return getput_arg(compiler, WORD_SIZE | STORE, TMP_REG2, dst, dstw, 0, 0); + return emit_op_mem(compiler, WORD_SIZE | STORE, TMP_REG2, dst, dstw, TMP_REG1); } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_si src, sljit_sw srcw) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_s32 src, sljit_sw srcw) { CHECK_ERROR(); - check_sljit_emit_fast_return(compiler, src, srcw); + CHECK(check_sljit_emit_fast_return(compiler, src, srcw)); ADJUST_LOCAL_OFFSET(src, srcw); - if (src <= TMP_REG3) - FAIL_IF(push_inst16(compiler, MOV | SET_REGS44(TMP_REG3, src))); - else if (src & SLJIT_MEM) { - if (getput_arg_fast(compiler, WORD_SIZE, TMP_REG3, src, srcw)) - FAIL_IF(compiler->error); - else { - compiler->cache_arg = 0; - compiler->cache_argw = 0; - FAIL_IF(getput_arg(compiler, WORD_SIZE, TMP_REG2, src, srcw, 0, 0)); - FAIL_IF(push_inst16(compiler, MOV | SET_REGS44(TMP_REG3, TMP_REG2))); - } - } - else if (src & SLJIT_IMM) - FAIL_IF(load_immediate(compiler, TMP_REG3, srcw)); - return push_inst16(compiler, BLX | RN3(TMP_REG3)); + SLJIT_ASSERT(reg_map[TMP_REG2] == 14); + + if (FAST_IS_REG(src)) + FAIL_IF(push_inst16(compiler, MOV | SET_REGS44(TMP_REG2, src))); + else + FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG2, src, srcw, TMP_REG2)); + + return push_inst16(compiler, BX | RN3(TMP_REG2)); } /* --------------------------------------------------------------------- */ /* Conditional instructions */ /* --------------------------------------------------------------------- */ -static sljit_uw get_cc(sljit_si type) +static sljit_uw get_cc(sljit_s32 type) { switch (type) { - case SLJIT_C_EQUAL: - case SLJIT_C_MUL_NOT_OVERFLOW: - case SLJIT_C_FLOAT_EQUAL: + case SLJIT_EQUAL: + case SLJIT_MUL_NOT_OVERFLOW: + case SLJIT_EQUAL_F64: return 0x0; - case SLJIT_C_NOT_EQUAL: - case SLJIT_C_MUL_OVERFLOW: - case SLJIT_C_FLOAT_NOT_EQUAL: + case SLJIT_NOT_EQUAL: + case SLJIT_MUL_OVERFLOW: + case SLJIT_NOT_EQUAL_F64: return 0x1; - case SLJIT_C_LESS: - case SLJIT_C_FLOAT_LESS: + case SLJIT_LESS: + case SLJIT_LESS_F64: return 0x3; - case SLJIT_C_GREATER_EQUAL: - case SLJIT_C_FLOAT_GREATER_EQUAL: + case SLJIT_GREATER_EQUAL: + case SLJIT_GREATER_EQUAL_F64: return 0x2; - case SLJIT_C_GREATER: - case SLJIT_C_FLOAT_GREATER: + case SLJIT_GREATER: + case SLJIT_GREATER_F64: return 0x8; - case SLJIT_C_LESS_EQUAL: - case SLJIT_C_FLOAT_LESS_EQUAL: + case SLJIT_LESS_EQUAL: + case SLJIT_LESS_EQUAL_F64: return 0x9; - case SLJIT_C_SIG_LESS: + case SLJIT_SIG_LESS: return 0xb; - case SLJIT_C_SIG_GREATER_EQUAL: + case SLJIT_SIG_GREATER_EQUAL: return 0xa; - case SLJIT_C_SIG_GREATER: + case SLJIT_SIG_GREATER: return 0xc; - case SLJIT_C_SIG_LESS_EQUAL: + case SLJIT_SIG_LESS_EQUAL: return 0xd; - case SLJIT_C_OVERFLOW: - case SLJIT_C_FLOAT_UNORDERED: + case SLJIT_OVERFLOW: + case SLJIT_UNORDERED_F64: return 0x6; - case SLJIT_C_NOT_OVERFLOW: - case SLJIT_C_FLOAT_ORDERED: + case SLJIT_NOT_OVERFLOW: + case SLJIT_ORDERED_F64: return 0x7; default: /* SLJIT_JUMP */ + SLJIT_UNREACHABLE(); return 0xe; } } @@ -1832,7 +1808,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compi struct sljit_label *label; CHECK_ERROR_PTR(); - check_sljit_emit_label(compiler); + CHECK_PTR(check_sljit_emit_label(compiler)); if (compiler->last_label && compiler->last_label->size == compiler->size) return compiler->last_label; @@ -1843,20 +1819,19 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compi return label; } -SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_si type) +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_s32 type) { struct sljit_jump *jump; - sljit_si cc; + sljit_ins cc; CHECK_ERROR_PTR(); - check_sljit_emit_jump(compiler, type); + CHECK_PTR(check_sljit_emit_jump(compiler, type)); jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); PTR_FAIL_IF(!jump); set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP); type &= 0xff; - /* In ARM, we don't need to touch the arguments. */ PTR_FAIL_IF(emit_imm32_const(compiler, TMP_REG1, 0)); if (type < SLJIT_JUMP) { jump->flags |= IS_COND; @@ -1876,56 +1851,322 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile return jump; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_ijump(struct sljit_compiler *compiler, sljit_si type, sljit_si src, sljit_sw srcw) +#ifdef __SOFTFP__ + +static sljit_s32 softfloat_call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_s32 *src) +{ + sljit_s32 stack_offset = 0; + sljit_s32 arg_count = 0; + sljit_s32 word_arg_offset = 0; + sljit_s32 float_arg_count = 0; + sljit_s32 types = 0; + sljit_s32 src_offset = 4 * sizeof(sljit_sw); + sljit_u8 offsets[4]; + + if (src && FAST_IS_REG(*src)) + src_offset = reg_map[*src] * sizeof(sljit_sw); + + arg_types >>= SLJIT_DEF_SHIFT; + + while (arg_types) { + types = (types << SLJIT_DEF_SHIFT) | (arg_types & SLJIT_DEF_MASK); + + switch (arg_types & SLJIT_DEF_MASK) { + case SLJIT_ARG_TYPE_F32: + offsets[arg_count] = (sljit_u8)stack_offset; + stack_offset += sizeof(sljit_f32); + arg_count++; + float_arg_count++; + break; + case SLJIT_ARG_TYPE_F64: + if (stack_offset & 0x7) + stack_offset += sizeof(sljit_sw); + offsets[arg_count] = (sljit_u8)stack_offset; + stack_offset += sizeof(sljit_f64); + arg_count++; + float_arg_count++; + break; + default: + offsets[arg_count] = (sljit_u8)stack_offset; + stack_offset += sizeof(sljit_sw); + arg_count++; + word_arg_offset += sizeof(sljit_sw); + break; + } + + arg_types >>= SLJIT_DEF_SHIFT; + } + + if (stack_offset > 16) + FAIL_IF(push_inst16(compiler, SUB_SP | (((stack_offset - 16) + 0x7) & ~0x7) >> 2)); + + SLJIT_ASSERT(reg_map[TMP_REG1] == 12); + + /* Process arguments in reversed direction. */ + while (types) { + switch (types & SLJIT_DEF_MASK) { + case SLJIT_ARG_TYPE_F32: + arg_count--; + float_arg_count--; + stack_offset = offsets[arg_count]; + + if (stack_offset < 16) { + if (src_offset == stack_offset) { + FAIL_IF(push_inst16(compiler, MOV | (src_offset << 1) | 4 | (1 << 7))); + *src = TMP_REG1; + } + FAIL_IF(push_inst32(compiler, VMOV | 0x100000 | (float_arg_count << 16) | (stack_offset << 10))); + } else + FAIL_IF(push_inst32(compiler, VSTR_F32 | 0x800000 | RN4(SLJIT_SP) | (float_arg_count << 12) | ((stack_offset - 16) >> 2))); + break; + case SLJIT_ARG_TYPE_F64: + arg_count--; + float_arg_count--; + stack_offset = offsets[arg_count]; + + SLJIT_ASSERT((stack_offset & 0x7) == 0); + + if (stack_offset < 16) { + if (src_offset == stack_offset || src_offset == stack_offset + sizeof(sljit_sw)) { + FAIL_IF(push_inst16(compiler, MOV | (src_offset << 1) | 4 | (1 << 7))); + *src = TMP_REG1; + } + FAIL_IF(push_inst32(compiler, VMOV2 | 0x100000 | (stack_offset << 10) | ((stack_offset + sizeof(sljit_sw)) << 14) | float_arg_count)); + } else + FAIL_IF(push_inst32(compiler, VSTR_F32 | 0x800100 | RN4(SLJIT_SP) | (float_arg_count << 12) | ((stack_offset - 16) >> 2))); + break; + default: + arg_count--; + word_arg_offset -= sizeof(sljit_sw); + stack_offset = offsets[arg_count]; + + SLJIT_ASSERT(stack_offset >= word_arg_offset); + + if (stack_offset != word_arg_offset) { + if (stack_offset < 16) { + if (src_offset == stack_offset) { + FAIL_IF(push_inst16(compiler, MOV | (src_offset << 1) | 4 | (1 << 7))); + *src = TMP_REG1; + } + else if (src_offset == word_arg_offset) { + *src = 1 + (stack_offset >> 2); + src_offset = stack_offset; + } + FAIL_IF(push_inst16(compiler, MOV | (stack_offset >> 2) | (word_arg_offset << 1))); + } else + FAIL_IF(push_inst16(compiler, STR_SP | (word_arg_offset << 6) | ((stack_offset - 16) >> 2))); + } + break; + } + + types >>= SLJIT_DEF_SHIFT; + } + + return SLJIT_SUCCESS; +} + +static sljit_s32 softfloat_post_call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types) +{ + sljit_s32 stack_size = 0; + + if ((arg_types & SLJIT_DEF_MASK) == SLJIT_ARG_TYPE_F32) + FAIL_IF(push_inst32(compiler, VMOV | (0 << 16) | (0 << 12))); + if ((arg_types & SLJIT_DEF_MASK) == SLJIT_ARG_TYPE_F64) + FAIL_IF(push_inst32(compiler, VMOV2 | (1 << 16) | (0 << 12) | 0)); + + arg_types >>= SLJIT_DEF_SHIFT; + + while (arg_types) { + switch (arg_types & SLJIT_DEF_MASK) { + case SLJIT_ARG_TYPE_F32: + stack_size += sizeof(sljit_f32); + break; + case SLJIT_ARG_TYPE_F64: + if (stack_size & 0x7) + stack_size += sizeof(sljit_sw); + stack_size += sizeof(sljit_f64); + break; + default: + stack_size += sizeof(sljit_sw); + break; + } + + arg_types >>= SLJIT_DEF_SHIFT; + } + + if (stack_size <= 16) + return SLJIT_SUCCESS; + + return push_inst16(compiler, ADD_SP | ((((stack_size - 16) + 0x7) & ~0x7) >> 2)); +} + +#else + +static sljit_s32 hardfloat_call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types) +{ + sljit_u32 remap = 0; + sljit_u32 offset = 0; + sljit_u32 new_offset, mask; + + /* Remove return value. */ + arg_types >>= SLJIT_DEF_SHIFT; + + while (arg_types) { + if ((arg_types & SLJIT_DEF_MASK) == SLJIT_ARG_TYPE_F32) { + new_offset = 0; + mask = 1; + + while (remap & mask) { + new_offset++; + mask <<= 1; + } + remap |= mask; + + if (offset != new_offset) + FAIL_IF(push_inst32(compiler, VMOV_F32 | DD4((new_offset >> 1) + 1) + | ((new_offset & 0x1) ? 0x400000 : 0) | DM4((offset >> 1) + 1))); + + offset += 2; + } + else if ((arg_types & SLJIT_DEF_MASK) == SLJIT_ARG_TYPE_F64) { + new_offset = 0; + mask = 3; + + while (remap & mask) { + new_offset += 2; + mask <<= 2; + } + remap |= mask; + + if (offset != new_offset) + FAIL_IF(push_inst32(compiler, VMOV_F32 | SLJIT_F32_OP | DD4((new_offset >> 1) + 1) | DM4((offset >> 1) + 1))); + + offset += 2; + } + arg_types >>= SLJIT_DEF_SHIFT; + } + + return SLJIT_SUCCESS; +} + +#endif + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 arg_types) +{ +#ifdef __SOFTFP__ + struct sljit_jump *jump; +#endif + + CHECK_ERROR_PTR(); + CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types)); + +#ifdef __SOFTFP__ + PTR_FAIL_IF(softfloat_call_with_args(compiler, arg_types, NULL)); + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ + || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + compiler->skip_checks = 1; +#endif + + jump = sljit_emit_jump(compiler, type); + PTR_FAIL_IF(jump == NULL); + + PTR_FAIL_IF(softfloat_post_call_with_args(compiler, arg_types)); + return jump; +#else + PTR_FAIL_IF(hardfloat_call_with_args(compiler, arg_types)); + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ + || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + compiler->skip_checks = 1; +#endif + + return sljit_emit_jump(compiler, type); +#endif +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 src, sljit_sw srcw) { struct sljit_jump *jump; CHECK_ERROR(); - check_sljit_emit_ijump(compiler, type, src, srcw); + CHECK(check_sljit_emit_ijump(compiler, type, src, srcw)); ADJUST_LOCAL_OFFSET(src, srcw); - /* In ARM, we don't need to touch the arguments. */ - if (src & SLJIT_IMM) { - jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); - FAIL_IF(!jump); - set_jump(jump, compiler, JUMP_ADDR | ((type >= SLJIT_FAST_CALL) ? IS_BL : 0)); - jump->u.target = srcw; + SLJIT_ASSERT(reg_map[TMP_REG1] != 14); - FAIL_IF(emit_imm32_const(compiler, TMP_REG1, 0)); - jump->addr = compiler->size; - FAIL_IF(push_inst16(compiler, (type <= SLJIT_JUMP ? BX : BLX) | RN3(TMP_REG1))); - } - else { - if (src <= TMP_REG3) + if (!(src & SLJIT_IMM)) { + if (FAST_IS_REG(src)) { + SLJIT_ASSERT(reg_map[src] != 14); return push_inst16(compiler, (type <= SLJIT_JUMP ? BX : BLX) | RN3(src)); + } - FAIL_IF(emit_op_mem(compiler, WORD_SIZE, type <= SLJIT_JUMP ? TMP_PC : TMP_REG1, src, srcw)); + FAIL_IF(emit_op_mem(compiler, WORD_SIZE, type <= SLJIT_JUMP ? TMP_PC : TMP_REG1, src, srcw, TMP_REG1)); if (type >= SLJIT_FAST_CALL) return push_inst16(compiler, BLX | RN3(TMP_REG1)); } - return SLJIT_SUCCESS; + + /* These jumps are converted to jump/call instructions when possible. */ + jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); + FAIL_IF(!jump); + set_jump(jump, compiler, JUMP_ADDR | ((type >= SLJIT_FAST_CALL) ? IS_BL : 0)); + jump->u.target = srcw; + + FAIL_IF(emit_imm32_const(compiler, TMP_REG1, 0)); + jump->addr = compiler->size; + return push_inst16(compiler, (type <= SLJIT_JUMP ? BX : BLX) | RN3(TMP_REG1)); } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_si op, - sljit_si dst, sljit_sw dstw, - sljit_si src, sljit_sw srcw, - sljit_si type) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 arg_types, + sljit_s32 src, sljit_sw srcw) { - sljit_si dst_r, flags = GET_ALL_FLAGS(op); - sljit_ins ins; - sljit_uw cc; + CHECK_ERROR(); + CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw)); + +#ifdef __SOFTFP__ + if (src & SLJIT_MEM) { + FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG1, src, srcw, TMP_REG1)); + src = TMP_REG1; + } + + FAIL_IF(softfloat_call_with_args(compiler, arg_types, &src)); + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ + || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + compiler->skip_checks = 1; +#endif + + FAIL_IF(sljit_emit_ijump(compiler, type, src, srcw)); + + return softfloat_post_call_with_args(compiler, arg_types); +#else /* !__SOFTFP__ */ + FAIL_IF(hardfloat_call_with_args(compiler, arg_types)); + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ + || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + compiler->skip_checks = 1; +#endif + + return sljit_emit_ijump(compiler, type, src, srcw); +#endif /* __SOFTFP__ */ +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 type) +{ + sljit_s32 dst_r, flags = GET_ALL_FLAGS(op); + sljit_ins cc; CHECK_ERROR(); - check_sljit_emit_op_flags(compiler, op, dst, dstw, src, srcw, type); + CHECK(check_sljit_emit_op_flags(compiler, op, dst, dstw, type)); ADJUST_LOCAL_OFFSET(dst, dstw); - ADJUST_LOCAL_OFFSET(src, srcw); - - if (dst == SLJIT_UNUSED) - return SLJIT_SUCCESS; op = GET_OPCODE(op); - cc = get_cc(type); - dst_r = (dst <= TMP_REG3) ? dst : TMP_REG2; + cc = get_cc(type & 0xff); + dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1; if (op < SLJIT_ADD) { FAIL_IF(push_inst16(compiler, IT | (cc << 4) | (((cc & 0x1) ^ 0x1) << 3) | 0x4)); @@ -1933,82 +2174,197 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_flags(struct sljit_compiler *com FAIL_IF(push_inst32(compiler, MOV_WI | RD4(dst_r) | 1)); FAIL_IF(push_inst32(compiler, MOV_WI | RD4(dst_r) | 0)); } else { + /* The movsi (immediate) instruction does not set flags in IT block. */ FAIL_IF(push_inst16(compiler, MOVSI | RDN3(dst_r) | 1)); FAIL_IF(push_inst16(compiler, MOVSI | RDN3(dst_r) | 0)); } - return dst_r == TMP_REG2 ? emit_op_mem(compiler, WORD_SIZE | STORE, TMP_REG2, dst, dstw) : SLJIT_SUCCESS; + if (!(dst & SLJIT_MEM)) + return SLJIT_SUCCESS; + return emit_op_mem(compiler, WORD_SIZE | STORE, TMP_REG1, dst, dstw, TMP_REG2); } - ins = (op == SLJIT_AND ? ANDI : (op == SLJIT_OR ? ORRI : EORI)); - if ((op == SLJIT_OR || op == SLJIT_XOR) && dst <= TMP_REG3 && dst == src) { - /* Does not change the other bits. */ + if (dst & SLJIT_MEM) + FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG1, dst, dstw, TMP_REG2)); + + if (op == SLJIT_AND) { + FAIL_IF(push_inst16(compiler, IT | (cc << 4) | (((cc & 0x1) ^ 0x1) << 3) | 0x4)); + FAIL_IF(push_inst32(compiler, ANDI | RN4(dst_r) | RD4(dst_r) | 1)); + FAIL_IF(push_inst32(compiler, ANDI | RN4(dst_r) | RD4(dst_r) | 0)); + } + else { FAIL_IF(push_inst16(compiler, IT | (cc << 4) | 0x8)); - FAIL_IF(push_inst32(compiler, ins | RN4(src) | RD4(dst) | 1)); - if (flags & SLJIT_SET_E) { - /* The condition must always be set, even if the ORRI/EORI is not executed above. */ - if (reg_map[dst] <= 7) - return push_inst16(compiler, MOVS | RD3(TMP_REG1) | RN3(dst)); - return push_inst32(compiler, MOV_W | SET_FLAGS | RD4(TMP_REG1) | RM4(dst)); - } + FAIL_IF(push_inst32(compiler, ((op == SLJIT_OR) ? ORRI : EORI) | RN4(dst_r) | RD4(dst_r) | 1)); + } + + if (dst & SLJIT_MEM) + FAIL_IF(emit_op_mem(compiler, WORD_SIZE | STORE, TMP_REG1, dst, dstw, TMP_REG2)); + + if (!(flags & SLJIT_SET_Z)) return SLJIT_SUCCESS; + + /* The condition must always be set, even if the ORR/EORI is not executed above. */ + return push_inst32(compiler, MOV_W | SET_FLAGS | RD4(TMP_REG1) | RM4(dst_r)); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 dst_reg, + sljit_s32 src, sljit_sw srcw) +{ + sljit_uw cc, tmp; + + CHECK_ERROR(); + CHECK(check_sljit_emit_cmov(compiler, type, dst_reg, src, srcw)); + + dst_reg &= ~SLJIT_I32_OP; + + cc = get_cc(type & 0xff); + + if (!(src & SLJIT_IMM)) { + FAIL_IF(push_inst16(compiler, IT | (cc << 4) | 0x8)); + return push_inst16(compiler, MOV | SET_REGS44(dst_reg, src)); } - compiler->cache_arg = 0; - compiler->cache_argw = 0; - if (src & SLJIT_MEM) { - FAIL_IF(emit_op_mem2(compiler, WORD_SIZE, TMP_REG1, src, srcw, dst, dstw)); - src = TMP_REG1; - srcw = 0; - } else if (src & SLJIT_IMM) { - FAIL_IF(load_immediate(compiler, TMP_REG1, srcw)); - src = TMP_REG1; - srcw = 0; + tmp = (sljit_uw) srcw; + + if (tmp < 0x10000) { + /* set low 16 bits, set hi 16 bits to 0. */ + FAIL_IF(push_inst16(compiler, IT | (cc << 4) | 0x8)); + return push_inst32(compiler, MOVW | RD4(dst_reg) + | COPY_BITS(tmp, 12, 16, 4) | COPY_BITS(tmp, 11, 26, 1) | COPY_BITS(tmp, 8, 12, 3) | (tmp & 0xff)); } - FAIL_IF(push_inst16(compiler, IT | (cc << 4) | (((cc & 0x1) ^ 0x1) << 3) | 0x4)); - FAIL_IF(push_inst32(compiler, ins | RN4(src) | RD4(dst_r) | 1)); - FAIL_IF(push_inst32(compiler, ins | RN4(src) | RD4(dst_r) | 0)); - if (dst_r == TMP_REG2) - FAIL_IF(emit_op_mem2(compiler, WORD_SIZE | STORE, TMP_REG2, dst, dstw, 0, 0)); + tmp = get_imm(srcw); + if (tmp != INVALID_IMM) { + FAIL_IF(push_inst16(compiler, IT | (cc << 4) | 0x8)); + return push_inst32(compiler, MOV_WI | RD4(dst_reg) | tmp); + } - if (flags & SLJIT_SET_E) { - /* The condition must always be set, even if the ORR/EORI is not executed above. */ - if (reg_map[dst_r] <= 7) - return push_inst16(compiler, MOVS | RD3(TMP_REG1) | RN3(dst_r)); - return push_inst32(compiler, MOV_W | SET_FLAGS | RD4(TMP_REG1) | RM4(dst_r)); + tmp = get_imm(~srcw); + if (tmp != INVALID_IMM) { + FAIL_IF(push_inst16(compiler, IT | (cc << 4) | 0x8)); + return push_inst32(compiler, MVN_WI | RD4(dst_reg) | tmp); } - return SLJIT_SUCCESS; + + FAIL_IF(push_inst16(compiler, IT | (cc << 4) | ((cc & 0x1) << 3) | 0x4)); + + tmp = (sljit_uw) srcw; + FAIL_IF(push_inst32(compiler, MOVW | RD4(dst_reg) + | COPY_BITS(tmp, 12, 16, 4) | COPY_BITS(tmp, 11, 26, 1) | COPY_BITS(tmp, 8, 12, 3) | (tmp & 0xff))); + return push_inst32(compiler, MOVT | RD4(dst_reg) + | COPY_BITS(tmp, 12 + 16, 16, 4) | COPY_BITS(tmp, 11 + 16, 26, 1) | COPY_BITS(tmp, 8 + 16, 12, 3) | ((tmp & 0xff0000) >> 16)); } -SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw, sljit_sw init_value) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 reg, + sljit_s32 mem, sljit_sw memw) +{ + sljit_s32 flags; + sljit_ins inst; + + CHECK_ERROR(); + CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw)); + + if ((mem & OFFS_REG_MASK) || (memw > 255 && memw < -255)) + return SLJIT_ERR_UNSUPPORTED; + + if (type & SLJIT_MEM_SUPP) + return SLJIT_SUCCESS; + + switch (type & 0xff) { + case SLJIT_MOV: + case SLJIT_MOV_U32: + case SLJIT_MOV_S32: + case SLJIT_MOV_P: + flags = WORD_SIZE; + break; + case SLJIT_MOV_U8: + flags = BYTE_SIZE; + break; + case SLJIT_MOV_S8: + flags = BYTE_SIZE | SIGNED; + break; + case SLJIT_MOV_U16: + flags = HALF_SIZE; + break; + case SLJIT_MOV_S16: + flags = HALF_SIZE | SIGNED; + break; + default: + SLJIT_UNREACHABLE(); + flags = WORD_SIZE; + break; + } + + if (type & SLJIT_MEM_STORE) + flags |= STORE; + + inst = sljit_mem32[flags] | 0x900; + + if (type & SLJIT_MEM_PRE) + inst |= 0x400; + + if (memw >= 0) + inst |= 0x200; + else + memw = -memw; + + return push_inst32(compiler, inst | RT4(reg) | RN4(mem & REG_MASK) | memw); +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw init_value) { struct sljit_const *const_; - sljit_si dst_r; + sljit_s32 dst_r; CHECK_ERROR_PTR(); - check_sljit_emit_const(compiler, dst, dstw, init_value); + CHECK_PTR(check_sljit_emit_const(compiler, dst, dstw, init_value)); ADJUST_LOCAL_OFFSET(dst, dstw); const_ = (struct sljit_const*)ensure_abuf(compiler, sizeof(struct sljit_const)); PTR_FAIL_IF(!const_); set_const(const_, compiler); - dst_r = (dst <= TMP_REG3) ? dst : TMP_REG1; + dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1; PTR_FAIL_IF(emit_imm32_const(compiler, dst_r, init_value)); if (dst & SLJIT_MEM) - PTR_FAIL_IF(emit_op_mem(compiler, WORD_SIZE | STORE, dst_r, dst, dstw)); + PTR_FAIL_IF(emit_op_mem(compiler, WORD_SIZE | STORE, dst_r, dst, dstw, TMP_REG2)); return const_; } -SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_addr) +SLJIT_API_FUNC_ATTRIBUTE struct sljit_put_label* sljit_emit_put_label(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw) +{ + struct sljit_put_label *put_label; + sljit_s32 dst_r; + + CHECK_ERROR_PTR(); + CHECK_PTR(check_sljit_emit_put_label(compiler, dst, dstw)); + ADJUST_LOCAL_OFFSET(dst, dstw); + + put_label = (struct sljit_put_label*)ensure_abuf(compiler, sizeof(struct sljit_put_label)); + PTR_FAIL_IF(!put_label); + set_put_label(put_label, compiler, 0); + + dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1; + PTR_FAIL_IF(emit_imm32_const(compiler, dst_r, 0)); + + if (dst & SLJIT_MEM) + PTR_FAIL_IF(emit_op_mem(compiler, WORD_SIZE | STORE, dst_r, dst, dstw, TMP_REG2)); + return put_label; +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_target, sljit_sw executable_offset) { - inline_set_jump_addr(addr, new_addr, 1); + sljit_u16 *inst = (sljit_u16*)addr; + modify_imm32_const(inst, new_target); + inst = (sljit_u16 *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset); + SLJIT_CACHE_FLUSH(inst, inst + 4); } -SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant) +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset) { - sljit_uh* inst = (sljit_uh*)addr; + sljit_u16 *inst = (sljit_u16*)addr; modify_imm32_const(inst, new_constant); - SLJIT_CACHE_FLUSH(inst, inst + 3); + inst = (sljit_u16 *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset); + SLJIT_CACHE_FLUSH(inst, inst + 4); } diff --git a/src/3rd/pcre/sjarmv5.c b/src/3rd/pcre/sjarmv5.c deleted file mode 100644 index e3ca3d9bb17..00000000000 --- a/src/3rd/pcre/sjarmv5.c +++ /dev/null @@ -1,2519 +0,0 @@ -/* - * Stack-less Just-In-Time compiler - * - * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. - * - * Redistribution and use in source and binary forms, with or without modification, are - * permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, this list of - * conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright notice, this list - * of conditions and the following disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT - * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED - * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -SLJIT_API_FUNC_ATTRIBUTE SLJIT_CONST char* sljit_get_platform_name(void) -{ -#if (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) - return "ARMv7" SLJIT_CPUINFO; -#elif (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) - return "ARMv5" SLJIT_CPUINFO; -#else -#error "Internal error: Unknown ARM architecture" -#endif -} - -/* Last register + 1. */ -#define TMP_REG1 (SLJIT_NO_REGISTERS + 1) -#define TMP_REG2 (SLJIT_NO_REGISTERS + 2) -#define TMP_REG3 (SLJIT_NO_REGISTERS + 3) -#define TMP_PC (SLJIT_NO_REGISTERS + 4) - -#define TMP_FREG1 (0) -#define TMP_FREG2 (SLJIT_FLOAT_REG6 + 1) - -/* In ARM instruction words. - Cache lines are usually 32 byte aligned. */ -#define CONST_POOL_ALIGNMENT 8 -#define CONST_POOL_EMPTY 0xffffffff - -#define ALIGN_INSTRUCTION(ptr) \ - (sljit_uw*)(((sljit_uw)(ptr) + (CONST_POOL_ALIGNMENT * sizeof(sljit_uw)) - 1) & ~((CONST_POOL_ALIGNMENT * sizeof(sljit_uw)) - 1)) -#define MAX_DIFFERENCE(max_diff) \ - (((max_diff) / (sljit_si)sizeof(sljit_uw)) - (CONST_POOL_ALIGNMENT - 1)) - -/* See sljit_emit_enter and sljit_emit_op0 if you want to change them. */ -static SLJIT_CONST sljit_ub reg_map[SLJIT_NO_REGISTERS + 5] = { - 0, 0, 1, 2, 10, 11, 4, 5, 6, 7, 8, 13, 3, 12, 14, 15 -}; - -#define RM(rm) (reg_map[rm]) -#define RD(rd) (reg_map[rd] << 12) -#define RN(rn) (reg_map[rn] << 16) - -/* --------------------------------------------------------------------- */ -/* Instrucion forms */ -/* --------------------------------------------------------------------- */ - -/* The instruction includes the AL condition. - INST_NAME - CONDITIONAL remove this flag. */ -#define COND_MASK 0xf0000000 -#define CONDITIONAL 0xe0000000 -#define PUSH_POOL 0xff000000 - -/* DP - Data Processing instruction (use with EMIT_DATA_PROCESS_INS). */ -#define ADC_DP 0x5 -#define ADD_DP 0x4 -#define AND_DP 0x0 -#define B 0xea000000 -#define BIC_DP 0xe -#define BL 0xeb000000 -#define BLX 0xe12fff30 -#define BX 0xe12fff10 -#define CLZ 0xe16f0f10 -#define CMP_DP 0xa -#define BKPT 0xe1200070 -#define EOR_DP 0x1 -#define MOV_DP 0xd -#define MUL 0xe0000090 -#define MVN_DP 0xf -#define NOP 0xe1a00000 -#define ORR_DP 0xc -#define PUSH 0xe92d0000 -#define POP 0xe8bd0000 -#define RSB_DP 0x3 -#define RSC_DP 0x7 -#define SBC_DP 0x6 -#define SMULL 0xe0c00090 -#define SUB_DP 0x2 -#define UMULL 0xe0800090 -#define VABS_F32 0xeeb00ac0 -#define VADD_F32 0xee300a00 -#define VCMP_F32 0xeeb40a40 -#define VDIV_F32 0xee800a00 -#define VMOV_F32 0xeeb00a40 -#define VMRS 0xeef1fa10 -#define VMUL_F32 0xee200a00 -#define VNEG_F32 0xeeb10a40 -#define VSTR_F32 0xed000a00 -#define VSUB_F32 0xee300a40 - -#if (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) -/* Arm v7 specific instructions. */ -#define MOVW 0xe3000000 -#define MOVT 0xe3400000 -#define SXTB 0xe6af0070 -#define SXTH 0xe6bf0070 -#define UXTB 0xe6ef0070 -#define UXTH 0xe6ff0070 -#endif - -#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) - -static sljit_si push_cpool(struct sljit_compiler *compiler) -{ - /* Pushing the constant pool into the instruction stream. */ - sljit_uw* inst; - sljit_uw* cpool_ptr; - sljit_uw* cpool_end; - sljit_si i; - - /* The label could point the address after the constant pool. */ - if (compiler->last_label && compiler->last_label->size == compiler->size) - compiler->last_label->size += compiler->cpool_fill + (CONST_POOL_ALIGNMENT - 1) + 1; - - SLJIT_ASSERT(compiler->cpool_fill > 0 && compiler->cpool_fill <= CPOOL_SIZE); - inst = (sljit_uw*)ensure_buf(compiler, sizeof(sljit_uw)); - FAIL_IF(!inst); - compiler->size++; - *inst = 0xff000000 | compiler->cpool_fill; - - for (i = 0; i < CONST_POOL_ALIGNMENT - 1; i++) { - inst = (sljit_uw*)ensure_buf(compiler, sizeof(sljit_uw)); - FAIL_IF(!inst); - compiler->size++; - *inst = 0; - } - - cpool_ptr = compiler->cpool; - cpool_end = cpool_ptr + compiler->cpool_fill; - while (cpool_ptr < cpool_end) { - inst = (sljit_uw*)ensure_buf(compiler, sizeof(sljit_uw)); - FAIL_IF(!inst); - compiler->size++; - *inst = *cpool_ptr++; - } - compiler->cpool_diff = CONST_POOL_EMPTY; - compiler->cpool_fill = 0; - return SLJIT_SUCCESS; -} - -static sljit_si push_inst(struct sljit_compiler *compiler, sljit_uw inst) -{ - sljit_uw* ptr; - - if (SLJIT_UNLIKELY(compiler->cpool_diff != CONST_POOL_EMPTY && compiler->size - compiler->cpool_diff >= MAX_DIFFERENCE(4092))) - FAIL_IF(push_cpool(compiler)); - - ptr = (sljit_uw*)ensure_buf(compiler, sizeof(sljit_uw)); - FAIL_IF(!ptr); - compiler->size++; - *ptr = inst; - return SLJIT_SUCCESS; -} - -static sljit_si push_inst_with_literal(struct sljit_compiler *compiler, sljit_uw inst, sljit_uw literal) -{ - sljit_uw* ptr; - sljit_uw cpool_index = CPOOL_SIZE; - sljit_uw* cpool_ptr; - sljit_uw* cpool_end; - sljit_ub* cpool_unique_ptr; - - if (SLJIT_UNLIKELY(compiler->cpool_diff != CONST_POOL_EMPTY && compiler->size - compiler->cpool_diff >= MAX_DIFFERENCE(4092))) - FAIL_IF(push_cpool(compiler)); - else if (compiler->cpool_fill > 0) { - cpool_ptr = compiler->cpool; - cpool_end = cpool_ptr + compiler->cpool_fill; - cpool_unique_ptr = compiler->cpool_unique; - do { - if ((*cpool_ptr == literal) && !(*cpool_unique_ptr)) { - cpool_index = cpool_ptr - compiler->cpool; - break; - } - cpool_ptr++; - cpool_unique_ptr++; - } while (cpool_ptr < cpool_end); - } - - if (cpool_index == CPOOL_SIZE) { - /* Must allocate a new entry in the literal pool. */ - if (compiler->cpool_fill < CPOOL_SIZE) { - cpool_index = compiler->cpool_fill; - compiler->cpool_fill++; - } - else { - FAIL_IF(push_cpool(compiler)); - cpool_index = 0; - compiler->cpool_fill = 1; - } - } - - SLJIT_ASSERT((inst & 0xfff) == 0); - ptr = (sljit_uw*)ensure_buf(compiler, sizeof(sljit_uw)); - FAIL_IF(!ptr); - compiler->size++; - *ptr = inst | cpool_index; - - compiler->cpool[cpool_index] = literal; - compiler->cpool_unique[cpool_index] = 0; - if (compiler->cpool_diff == CONST_POOL_EMPTY) - compiler->cpool_diff = compiler->size; - return SLJIT_SUCCESS; -} - -static sljit_si push_inst_with_unique_literal(struct sljit_compiler *compiler, sljit_uw inst, sljit_uw literal) -{ - sljit_uw* ptr; - if (SLJIT_UNLIKELY((compiler->cpool_diff != CONST_POOL_EMPTY && compiler->size - compiler->cpool_diff >= MAX_DIFFERENCE(4092)) || compiler->cpool_fill >= CPOOL_SIZE)) - FAIL_IF(push_cpool(compiler)); - - SLJIT_ASSERT(compiler->cpool_fill < CPOOL_SIZE && (inst & 0xfff) == 0); - ptr = (sljit_uw*)ensure_buf(compiler, sizeof(sljit_uw)); - FAIL_IF(!ptr); - compiler->size++; - *ptr = inst | compiler->cpool_fill; - - compiler->cpool[compiler->cpool_fill] = literal; - compiler->cpool_unique[compiler->cpool_fill] = 1; - compiler->cpool_fill++; - if (compiler->cpool_diff == CONST_POOL_EMPTY) - compiler->cpool_diff = compiler->size; - return SLJIT_SUCCESS; -} - -static SLJIT_INLINE sljit_si prepare_blx(struct sljit_compiler *compiler) -{ - /* Place for at least two instruction (doesn't matter whether the first has a literal). */ - if (SLJIT_UNLIKELY(compiler->cpool_diff != CONST_POOL_EMPTY && compiler->size - compiler->cpool_diff >= MAX_DIFFERENCE(4088))) - return push_cpool(compiler); - return SLJIT_SUCCESS; -} - -static SLJIT_INLINE sljit_si emit_blx(struct sljit_compiler *compiler) -{ - /* Must follow tightly the previous instruction (to be able to convert it to bl instruction). */ - SLJIT_ASSERT(compiler->cpool_diff == CONST_POOL_EMPTY || compiler->size - compiler->cpool_diff < MAX_DIFFERENCE(4092)); - return push_inst(compiler, BLX | RM(TMP_REG1)); -} - -static sljit_uw patch_pc_relative_loads(sljit_uw *last_pc_patch, sljit_uw *code_ptr, sljit_uw* const_pool, sljit_uw cpool_size) -{ - sljit_uw diff; - sljit_uw ind; - sljit_uw counter = 0; - sljit_uw* clear_const_pool = const_pool; - sljit_uw* clear_const_pool_end = const_pool + cpool_size; - - SLJIT_ASSERT(const_pool - code_ptr <= CONST_POOL_ALIGNMENT); - /* Set unused flag for all literals in the constant pool. - I.e.: unused literals can belong to branches, which can be encoded as B or BL. - We can "compress" the constant pool by discarding these literals. */ - while (clear_const_pool < clear_const_pool_end) - *clear_const_pool++ = (sljit_uw)(-1); - - while (last_pc_patch < code_ptr) { - /* Data transfer instruction with Rn == r15. */ - if ((*last_pc_patch & 0x0c0f0000) == 0x040f0000) { - diff = const_pool - last_pc_patch; - ind = (*last_pc_patch) & 0xfff; - - /* Must be a load instruction with immediate offset. */ - SLJIT_ASSERT(ind < cpool_size && !(*last_pc_patch & (1 << 25)) && (*last_pc_patch & (1 << 20))); - if ((sljit_si)const_pool[ind] < 0) { - const_pool[ind] = counter; - ind = counter; - counter++; - } - else - ind = const_pool[ind]; - - SLJIT_ASSERT(diff >= 1); - if (diff >= 2 || ind > 0) { - diff = (diff + ind - 2) << 2; - SLJIT_ASSERT(diff <= 0xfff); - *last_pc_patch = (*last_pc_patch & ~0xfff) | diff; - } - else - *last_pc_patch = (*last_pc_patch & ~(0xfff | (1 << 23))) | 0x004; - } - last_pc_patch++; - } - return counter; -} - -/* In some rare ocasions we may need future patches. The probability is close to 0 in practice. */ -struct future_patch { - struct future_patch* next; - sljit_si index; - sljit_si value; -}; - -static SLJIT_INLINE sljit_si resolve_const_pool_index(struct future_patch **first_patch, sljit_uw cpool_current_index, sljit_uw *cpool_start_address, sljit_uw *buf_ptr) -{ - sljit_si value; - struct future_patch *curr_patch, *prev_patch; - - /* Using the values generated by patch_pc_relative_loads. */ - if (!*first_patch) - value = (sljit_si)cpool_start_address[cpool_current_index]; - else { - curr_patch = *first_patch; - prev_patch = 0; - while (1) { - if (!curr_patch) { - value = (sljit_si)cpool_start_address[cpool_current_index]; - break; - } - if ((sljit_uw)curr_patch->index == cpool_current_index) { - value = curr_patch->value; - if (prev_patch) - prev_patch->next = curr_patch->next; - else - *first_patch = curr_patch->next; - SLJIT_FREE(curr_patch); - break; - } - prev_patch = curr_patch; - curr_patch = curr_patch->next; - } - } - - if (value >= 0) { - if ((sljit_uw)value > cpool_current_index) { - curr_patch = (struct future_patch*)SLJIT_MALLOC(sizeof(struct future_patch)); - if (!curr_patch) { - while (*first_patch) { - curr_patch = *first_patch; - *first_patch = (*first_patch)->next; - SLJIT_FREE(curr_patch); - } - return SLJIT_ERR_ALLOC_FAILED; - } - curr_patch->next = *first_patch; - curr_patch->index = value; - curr_patch->value = cpool_start_address[value]; - *first_patch = curr_patch; - } - cpool_start_address[value] = *buf_ptr; - } - return SLJIT_SUCCESS; -} - -#else - -static sljit_si push_inst(struct sljit_compiler *compiler, sljit_uw inst) -{ - sljit_uw* ptr; - - ptr = (sljit_uw*)ensure_buf(compiler, sizeof(sljit_uw)); - FAIL_IF(!ptr); - compiler->size++; - *ptr = inst; - return SLJIT_SUCCESS; -} - -static SLJIT_INLINE sljit_si emit_imm(struct sljit_compiler *compiler, sljit_si reg, sljit_sw imm) -{ - FAIL_IF(push_inst(compiler, MOVW | RD(reg) | ((imm << 4) & 0xf0000) | (imm & 0xfff))); - return push_inst(compiler, MOVT | RD(reg) | ((imm >> 12) & 0xf0000) | ((imm >> 16) & 0xfff)); -} - -#endif - -static SLJIT_INLINE sljit_si detect_jump_type(struct sljit_jump *jump, sljit_uw *code_ptr, sljit_uw *code) -{ - sljit_sw diff; - - if (jump->flags & SLJIT_REWRITABLE_JUMP) - return 0; - -#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) - if (jump->flags & IS_BL) - code_ptr--; - - if (jump->flags & JUMP_ADDR) - diff = ((sljit_sw)jump->u.target - (sljit_sw)(code_ptr + 2)); - else { - SLJIT_ASSERT(jump->flags & JUMP_LABEL); - diff = ((sljit_sw)(code + jump->u.label->size) - (sljit_sw)(code_ptr + 2)); - } - - /* Branch to Thumb code has not been optimized yet. */ - if (diff & 0x3) - return 0; - - if (jump->flags & IS_BL) { - if (diff <= 0x01ffffff && diff >= -0x02000000) { - *code_ptr = (BL - CONDITIONAL) | (*(code_ptr + 1) & COND_MASK); - jump->flags |= PATCH_B; - return 1; - } - } - else { - if (diff <= 0x01ffffff && diff >= -0x02000000) { - *code_ptr = (B - CONDITIONAL) | (*code_ptr & COND_MASK); - jump->flags |= PATCH_B; - } - } -#else - if (jump->flags & JUMP_ADDR) - diff = ((sljit_sw)jump->u.target - (sljit_sw)code_ptr); - else { - SLJIT_ASSERT(jump->flags & JUMP_LABEL); - diff = ((sljit_sw)(code + jump->u.label->size) - (sljit_sw)code_ptr); - } - - /* Branch to Thumb code has not been optimized yet. */ - if (diff & 0x3) - return 0; - - if (diff <= 0x01ffffff && diff >= -0x02000000) { - code_ptr -= 2; - *code_ptr = ((jump->flags & IS_BL) ? (BL - CONDITIONAL) : (B - CONDITIONAL)) | (code_ptr[2] & COND_MASK); - jump->flags |= PATCH_B; - return 1; - } -#endif - return 0; -} - -static SLJIT_INLINE void inline_set_jump_addr(sljit_uw addr, sljit_uw new_addr, sljit_si flush) -{ -#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) - sljit_uw *ptr = (sljit_uw*)addr; - sljit_uw *inst = (sljit_uw*)ptr[0]; - sljit_uw mov_pc = ptr[1]; - sljit_si bl = (mov_pc & 0x0000f000) != RD(TMP_PC); - sljit_sw diff = (sljit_sw)(((sljit_sw)new_addr - (sljit_sw)(inst + 2)) >> 2); - - if (diff <= 0x7fffff && diff >= -0x800000) { - /* Turn to branch. */ - if (!bl) { - inst[0] = (mov_pc & COND_MASK) | (B - CONDITIONAL) | (diff & 0xffffff); - if (flush) { - SLJIT_CACHE_FLUSH(inst, inst + 1); - } - } else { - inst[0] = (mov_pc & COND_MASK) | (BL - CONDITIONAL) | (diff & 0xffffff); - inst[1] = NOP; - if (flush) { - SLJIT_CACHE_FLUSH(inst, inst + 2); - } - } - } else { - /* Get the position of the constant. */ - if (mov_pc & (1 << 23)) - ptr = inst + ((mov_pc & 0xfff) >> 2) + 2; - else - ptr = inst + 1; - - if (*inst != mov_pc) { - inst[0] = mov_pc; - if (!bl) { - if (flush) { - SLJIT_CACHE_FLUSH(inst, inst + 1); - } - } else { - inst[1] = BLX | RM(TMP_REG1); - if (flush) { - SLJIT_CACHE_FLUSH(inst, inst + 2); - } - } - } - *ptr = new_addr; - } -#else - sljit_uw *inst = (sljit_uw*)addr; - SLJIT_ASSERT((inst[0] & 0xfff00000) == MOVW && (inst[1] & 0xfff00000) == MOVT); - inst[0] = MOVW | (inst[0] & 0xf000) | ((new_addr << 4) & 0xf0000) | (new_addr & 0xfff); - inst[1] = MOVT | (inst[1] & 0xf000) | ((new_addr >> 12) & 0xf0000) | ((new_addr >> 16) & 0xfff); - if (flush) { - SLJIT_CACHE_FLUSH(inst, inst + 2); - } -#endif -} - -static sljit_uw get_imm(sljit_uw imm); - -static SLJIT_INLINE void inline_set_const(sljit_uw addr, sljit_sw new_constant, sljit_si flush) -{ -#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) - sljit_uw *ptr = (sljit_uw*)addr; - sljit_uw *inst = (sljit_uw*)ptr[0]; - sljit_uw ldr_literal = ptr[1]; - sljit_uw src2; - - src2 = get_imm(new_constant); - if (src2) { - *inst = 0xe3a00000 | (ldr_literal & 0xf000) | src2; - if (flush) { - SLJIT_CACHE_FLUSH(inst, inst + 1); - } - return; - } - - src2 = get_imm(~new_constant); - if (src2) { - *inst = 0xe3e00000 | (ldr_literal & 0xf000) | src2; - if (flush) { - SLJIT_CACHE_FLUSH(inst, inst + 1); - } - return; - } - - if (ldr_literal & (1 << 23)) - ptr = inst + ((ldr_literal & 0xfff) >> 2) + 2; - else - ptr = inst + 1; - - if (*inst != ldr_literal) { - *inst = ldr_literal; - if (flush) { - SLJIT_CACHE_FLUSH(inst, inst + 1); - } - } - *ptr = new_constant; -#else - sljit_uw *inst = (sljit_uw*)addr; - SLJIT_ASSERT((inst[0] & 0xfff00000) == MOVW && (inst[1] & 0xfff00000) == MOVT); - inst[0] = MOVW | (inst[0] & 0xf000) | ((new_constant << 4) & 0xf0000) | (new_constant & 0xfff); - inst[1] = MOVT | (inst[1] & 0xf000) | ((new_constant >> 12) & 0xf0000) | ((new_constant >> 16) & 0xfff); - if (flush) { - SLJIT_CACHE_FLUSH(inst, inst + 2); - } -#endif -} - -SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compiler) -{ - struct sljit_memory_fragment *buf; - sljit_uw *code; - sljit_uw *code_ptr; - sljit_uw *buf_ptr; - sljit_uw *buf_end; - sljit_uw size; - sljit_uw word_count; -#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) - sljit_uw cpool_size; - sljit_uw cpool_skip_alignment; - sljit_uw cpool_current_index; - sljit_uw *cpool_start_address; - sljit_uw *last_pc_patch; - struct future_patch *first_patch; -#endif - - struct sljit_label *label; - struct sljit_jump *jump; - struct sljit_const *const_; - - CHECK_ERROR_PTR(); - check_sljit_generate_code(compiler); - reverse_buf(compiler); - - /* Second code generation pass. */ -#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) - size = compiler->size + (compiler->patches << 1); - if (compiler->cpool_fill > 0) - size += compiler->cpool_fill + CONST_POOL_ALIGNMENT - 1; -#else - size = compiler->size; -#endif - code = (sljit_uw*)SLJIT_MALLOC_EXEC(size * sizeof(sljit_uw)); - PTR_FAIL_WITH_EXEC_IF(code); - buf = compiler->buf; - -#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) - cpool_size = 0; - cpool_skip_alignment = 0; - cpool_current_index = 0; - cpool_start_address = NULL; - first_patch = NULL; - last_pc_patch = code; -#endif - - code_ptr = code; - word_count = 0; - - label = compiler->labels; - jump = compiler->jumps; - const_ = compiler->consts; - - if (label && label->size == 0) { - label->addr = (sljit_uw)code; - label->size = 0; - label = label->next; - } - - do { - buf_ptr = (sljit_uw*)buf->memory; - buf_end = buf_ptr + (buf->used_size >> 2); - do { - word_count++; -#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) - if (cpool_size > 0) { - if (cpool_skip_alignment > 0) { - buf_ptr++; - cpool_skip_alignment--; - } - else { - if (SLJIT_UNLIKELY(resolve_const_pool_index(&first_patch, cpool_current_index, cpool_start_address, buf_ptr))) { - SLJIT_FREE_EXEC(code); - compiler->error = SLJIT_ERR_ALLOC_FAILED; - return NULL; - } - buf_ptr++; - if (++cpool_current_index >= cpool_size) { - SLJIT_ASSERT(!first_patch); - cpool_size = 0; - if (label && label->size == word_count) { - /* Points after the current instruction. */ - label->addr = (sljit_uw)code_ptr; - label->size = code_ptr - code; - label = label->next; - } - } - } - } - else if ((*buf_ptr & 0xff000000) != PUSH_POOL) { -#endif - *code_ptr = *buf_ptr++; - /* These structures are ordered by their address. */ - SLJIT_ASSERT(!label || label->size >= word_count); - SLJIT_ASSERT(!jump || jump->addr >= word_count); - SLJIT_ASSERT(!const_ || const_->addr >= word_count); - if (jump && jump->addr == word_count) { -#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) - if (detect_jump_type(jump, code_ptr, code)) - code_ptr--; - jump->addr = (sljit_uw)code_ptr; -#else - jump->addr = (sljit_uw)(code_ptr - 2); - if (detect_jump_type(jump, code_ptr, code)) - code_ptr -= 2; -#endif - jump = jump->next; - } - if (label && label->size == word_count) { - /* code_ptr can be affected above. */ - label->addr = (sljit_uw)(code_ptr + 1); - label->size = (code_ptr + 1) - code; - label = label->next; - } - if (const_ && const_->addr == word_count) { -#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) - const_->addr = (sljit_uw)code_ptr; -#else - const_->addr = (sljit_uw)(code_ptr - 1); -#endif - const_ = const_->next; - } - code_ptr++; -#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) - } - else { - /* Fortunately, no need to shift. */ - cpool_size = *buf_ptr++ & ~PUSH_POOL; - SLJIT_ASSERT(cpool_size > 0); - cpool_start_address = ALIGN_INSTRUCTION(code_ptr + 1); - cpool_current_index = patch_pc_relative_loads(last_pc_patch, code_ptr, cpool_start_address, cpool_size); - if (cpool_current_index > 0) { - /* Unconditional branch. */ - *code_ptr = B | (((cpool_start_address - code_ptr) + cpool_current_index - 2) & ~PUSH_POOL); - code_ptr = cpool_start_address + cpool_current_index; - } - cpool_skip_alignment = CONST_POOL_ALIGNMENT - 1; - cpool_current_index = 0; - last_pc_patch = code_ptr; - } -#endif - } while (buf_ptr < buf_end); - buf = buf->next; - } while (buf); - - SLJIT_ASSERT(!label); - SLJIT_ASSERT(!jump); - SLJIT_ASSERT(!const_); - -#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) - SLJIT_ASSERT(cpool_size == 0); - if (compiler->cpool_fill > 0) { - cpool_start_address = ALIGN_INSTRUCTION(code_ptr); - cpool_current_index = patch_pc_relative_loads(last_pc_patch, code_ptr, cpool_start_address, compiler->cpool_fill); - if (cpool_current_index > 0) - code_ptr = cpool_start_address + cpool_current_index; - - buf_ptr = compiler->cpool; - buf_end = buf_ptr + compiler->cpool_fill; - cpool_current_index = 0; - while (buf_ptr < buf_end) { - if (SLJIT_UNLIKELY(resolve_const_pool_index(&first_patch, cpool_current_index, cpool_start_address, buf_ptr))) { - SLJIT_FREE_EXEC(code); - compiler->error = SLJIT_ERR_ALLOC_FAILED; - return NULL; - } - buf_ptr++; - cpool_current_index++; - } - SLJIT_ASSERT(!first_patch); - } -#endif - - jump = compiler->jumps; - while (jump) { - buf_ptr = (sljit_uw*)jump->addr; - - if (jump->flags & PATCH_B) { - if (!(jump->flags & JUMP_ADDR)) { - SLJIT_ASSERT(jump->flags & JUMP_LABEL); - SLJIT_ASSERT(((sljit_sw)jump->u.label->addr - (sljit_sw)(buf_ptr + 2)) <= 0x01ffffff && ((sljit_sw)jump->u.label->addr - (sljit_sw)(buf_ptr + 2)) >= -0x02000000); - *buf_ptr |= (((sljit_sw)jump->u.label->addr - (sljit_sw)(buf_ptr + 2)) >> 2) & 0x00ffffff; - } - else { - SLJIT_ASSERT(((sljit_sw)jump->u.target - (sljit_sw)(buf_ptr + 2)) <= 0x01ffffff && ((sljit_sw)jump->u.target - (sljit_sw)(buf_ptr + 2)) >= -0x02000000); - *buf_ptr |= (((sljit_sw)jump->u.target - (sljit_sw)(buf_ptr + 2)) >> 2) & 0x00ffffff; - } - } - else if (jump->flags & SLJIT_REWRITABLE_JUMP) { -#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) - jump->addr = (sljit_uw)code_ptr; - code_ptr[0] = (sljit_uw)buf_ptr; - code_ptr[1] = *buf_ptr; - inline_set_jump_addr((sljit_uw)code_ptr, (jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target, 0); - code_ptr += 2; -#else - inline_set_jump_addr((sljit_uw)buf_ptr, (jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target, 0); -#endif - } - else { -#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) - if (jump->flags & IS_BL) - buf_ptr--; - if (*buf_ptr & (1 << 23)) - buf_ptr += ((*buf_ptr & 0xfff) >> 2) + 2; - else - buf_ptr += 1; - *buf_ptr = (jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target; -#else - inline_set_jump_addr((sljit_uw)buf_ptr, (jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target, 0); -#endif - } - jump = jump->next; - } - -#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) - const_ = compiler->consts; - while (const_) { - buf_ptr = (sljit_uw*)const_->addr; - const_->addr = (sljit_uw)code_ptr; - - code_ptr[0] = (sljit_uw)buf_ptr; - code_ptr[1] = *buf_ptr; - if (*buf_ptr & (1 << 23)) - buf_ptr += ((*buf_ptr & 0xfff) >> 2) + 2; - else - buf_ptr += 1; - /* Set the value again (can be a simple constant). */ - inline_set_const((sljit_uw)code_ptr, *buf_ptr, 0); - code_ptr += 2; - - const_ = const_->next; - } -#endif - - SLJIT_ASSERT(code_ptr - code <= (sljit_si)size); - - compiler->error = SLJIT_ERR_COMPILED; - compiler->executable_size = (code_ptr - code) * sizeof(sljit_uw); - SLJIT_CACHE_FLUSH(code, code_ptr); - return code; -} - -/* --------------------------------------------------------------------- */ -/* Entry, exit */ -/* --------------------------------------------------------------------- */ - -/* emit_op inp_flags. - WRITE_BACK must be the first, since it is a flag. */ -#define WRITE_BACK 0x01 -#define ALLOW_IMM 0x02 -#define ALLOW_INV_IMM 0x04 -#define ALLOW_ANY_IMM (ALLOW_IMM | ALLOW_INV_IMM) -#define ARG_TEST 0x08 - -/* Creates an index in data_transfer_insts array. */ -#define WORD_DATA 0x00 -#define BYTE_DATA 0x10 -#define HALF_DATA 0x20 -#define SIGNED_DATA 0x40 -#define LOAD_DATA 0x80 - -#define EMIT_INSTRUCTION(inst) \ - FAIL_IF(push_inst(compiler, (inst))) - -/* Condition: AL. */ -#define EMIT_DATA_PROCESS_INS(opcode, set_flags, dst, src1, src2) \ - (0xe0000000 | ((opcode) << 21) | (set_flags) | RD(dst) | RN(src1) | (src2)) - -static sljit_si emit_op(struct sljit_compiler *compiler, sljit_si op, sljit_si inp_flags, - sljit_si dst, sljit_sw dstw, - sljit_si src1, sljit_sw src1w, - sljit_si src2, sljit_sw src2w); - -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_enter(struct sljit_compiler *compiler, sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size) -{ - sljit_si size; - sljit_uw push; - - CHECK_ERROR(); - check_sljit_emit_enter(compiler, args, scratches, saveds, local_size); - - compiler->scratches = scratches; - compiler->saveds = saveds; -#if (defined SLJIT_DEBUG && SLJIT_DEBUG) - compiler->logical_local_size = local_size; -#endif - - /* Push saved registers, temporary registers - stmdb sp!, {..., lr} */ - push = PUSH | (1 << 14); - if (scratches >= 5) - push |= 1 << 11; - if (scratches >= 4) - push |= 1 << 10; - if (saveds >= 5) - push |= 1 << 8; - if (saveds >= 4) - push |= 1 << 7; - if (saveds >= 3) - push |= 1 << 6; - if (saveds >= 2) - push |= 1 << 5; - if (saveds >= 1) - push |= 1 << 4; - EMIT_INSTRUCTION(push); - - /* Stack must be aligned to 8 bytes: */ - size = (1 + saveds) * sizeof(sljit_uw); - if (scratches >= 4) - size += (scratches - 3) * sizeof(sljit_uw); - local_size += size; - local_size = (local_size + 7) & ~7; - local_size -= size; - compiler->local_size = local_size; - if (local_size > 0) - FAIL_IF(emit_op(compiler, SLJIT_SUB, ALLOW_IMM, SLJIT_LOCALS_REG, 0, SLJIT_LOCALS_REG, 0, SLJIT_IMM, local_size)); - - if (args >= 1) - EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(MOV_DP, 0, SLJIT_SAVED_REG1, SLJIT_UNUSED, RM(SLJIT_SCRATCH_REG1))); - if (args >= 2) - EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(MOV_DP, 0, SLJIT_SAVED_REG2, SLJIT_UNUSED, RM(SLJIT_SCRATCH_REG2))); - if (args >= 3) - EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(MOV_DP, 0, SLJIT_SAVED_REG3, SLJIT_UNUSED, RM(SLJIT_SCRATCH_REG3))); - - return SLJIT_SUCCESS; -} - -SLJIT_API_FUNC_ATTRIBUTE void sljit_set_context(struct sljit_compiler *compiler, sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size) -{ - sljit_si size; - - CHECK_ERROR_VOID(); - check_sljit_set_context(compiler, args, scratches, saveds, local_size); - - compiler->scratches = scratches; - compiler->saveds = saveds; -#if (defined SLJIT_DEBUG && SLJIT_DEBUG) - compiler->logical_local_size = local_size; -#endif - - size = (1 + saveds) * sizeof(sljit_uw); - if (scratches >= 4) - size += (scratches - 3) * sizeof(sljit_uw); - local_size += size; - local_size = (local_size + 7) & ~7; - local_size -= size; - compiler->local_size = local_size; -} - -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_return(struct sljit_compiler *compiler, sljit_si op, sljit_si src, sljit_sw srcw) -{ - sljit_uw pop; - - CHECK_ERROR(); - check_sljit_emit_return(compiler, op, src, srcw); - - FAIL_IF(emit_mov_before_return(compiler, op, src, srcw)); - - if (compiler->local_size > 0) - FAIL_IF(emit_op(compiler, SLJIT_ADD, ALLOW_IMM, SLJIT_LOCALS_REG, 0, SLJIT_LOCALS_REG, 0, SLJIT_IMM, compiler->local_size)); - - pop = POP | (1 << 15); - /* Push saved registers, temporary registers - ldmia sp!, {..., pc} */ - if (compiler->scratches >= 5) - pop |= 1 << 11; - if (compiler->scratches >= 4) - pop |= 1 << 10; - if (compiler->saveds >= 5) - pop |= 1 << 8; - if (compiler->saveds >= 4) - pop |= 1 << 7; - if (compiler->saveds >= 3) - pop |= 1 << 6; - if (compiler->saveds >= 2) - pop |= 1 << 5; - if (compiler->saveds >= 1) - pop |= 1 << 4; - - return push_inst(compiler, pop); -} - -/* --------------------------------------------------------------------- */ -/* Operators */ -/* --------------------------------------------------------------------- */ - -/* s/l - store/load (1 bit) - u/s - signed/unsigned (1 bit) - w/b/h/N - word/byte/half/NOT allowed (2 bit) - It contans 16 items, but not all are different. */ - -static sljit_sw data_transfer_insts[16] = { -/* s u w */ 0xe5000000 /* str */, -/* s u b */ 0xe5400000 /* strb */, -/* s u h */ 0xe10000b0 /* strh */, -/* s u N */ 0x00000000 /* not allowed */, -/* s s w */ 0xe5000000 /* str */, -/* s s b */ 0xe5400000 /* strb */, -/* s s h */ 0xe10000b0 /* strh */, -/* s s N */ 0x00000000 /* not allowed */, - -/* l u w */ 0xe5100000 /* ldr */, -/* l u b */ 0xe5500000 /* ldrb */, -/* l u h */ 0xe11000b0 /* ldrh */, -/* l u N */ 0x00000000 /* not allowed */, -/* l s w */ 0xe5100000 /* ldr */, -/* l s b */ 0xe11000d0 /* ldrsb */, -/* l s h */ 0xe11000f0 /* ldrsh */, -/* l s N */ 0x00000000 /* not allowed */, -}; - -#define EMIT_DATA_TRANSFER(type, add, wb, target, base1, base2) \ - (data_transfer_insts[(type) >> 4] | ((add) << 23) | ((wb) << 21) | (reg_map[target] << 12) | (reg_map[base1] << 16) | (base2)) -/* Normal ldr/str instruction. - Type2: ldrsb, ldrh, ldrsh */ -#define IS_TYPE1_TRANSFER(type) \ - (data_transfer_insts[(type) >> 4] & 0x04000000) -#define TYPE2_TRANSFER_IMM(imm) \ - (((imm) & 0xf) | (((imm) & 0xf0) << 4) | (1 << 22)) - -/* flags: */ - /* Arguments are swapped. */ -#define ARGS_SWAPPED 0x01 - /* Inverted immediate. */ -#define INV_IMM 0x02 - /* Source and destination is register. */ -#define REG_DEST 0x04 -#define REG_SOURCE 0x08 - /* One instruction is enough. */ -#define FAST_DEST 0x10 - /* Multiple instructions are required. */ -#define SLOW_DEST 0x20 -/* SET_FLAGS must be (1 << 20) as it is also the value of S bit (can be used for optimization). */ -#define SET_FLAGS (1 << 20) -/* dst: reg - src1: reg - src2: reg or imm (if allowed) - SRC2_IMM must be (1 << 25) as it is also the value of I bit (can be used for optimization). */ -#define SRC2_IMM (1 << 25) - -#define EMIT_DATA_PROCESS_INS_AND_RETURN(opcode) \ - return push_inst(compiler, EMIT_DATA_PROCESS_INS(opcode, flags & SET_FLAGS, dst, src1, (src2 & SRC2_IMM) ? src2 : RM(src2))) - -#define EMIT_FULL_DATA_PROCESS_INS_AND_RETURN(opcode, dst, src1, src2) \ - return push_inst(compiler, EMIT_DATA_PROCESS_INS(opcode, flags & SET_FLAGS, dst, src1, src2)) - -#define EMIT_SHIFT_INS_AND_RETURN(opcode) \ - SLJIT_ASSERT(!(flags & INV_IMM) && !(src2 & SRC2_IMM)); \ - if (compiler->shift_imm != 0x20) { \ - SLJIT_ASSERT(src1 == TMP_REG1); \ - SLJIT_ASSERT(!(flags & ARGS_SWAPPED)); \ - if (compiler->shift_imm != 0) \ - return push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, flags & SET_FLAGS, dst, SLJIT_UNUSED, (compiler->shift_imm << 7) | (opcode << 5) | reg_map[src2])); \ - return push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, flags & SET_FLAGS, dst, SLJIT_UNUSED, reg_map[src2])); \ - } \ - return push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, flags & SET_FLAGS, dst, SLJIT_UNUSED, (reg_map[(flags & ARGS_SWAPPED) ? src1 : src2] << 8) | (opcode << 5) | 0x10 | ((flags & ARGS_SWAPPED) ? reg_map[src2] : reg_map[src1]))); - -static SLJIT_INLINE sljit_si emit_single_op(struct sljit_compiler *compiler, sljit_si op, sljit_si flags, - sljit_si dst, sljit_si src1, sljit_si src2) -{ - sljit_sw mul_inst; - - switch (GET_OPCODE(op)) { - case SLJIT_MOV: - SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & ARGS_SWAPPED)); - if (dst != src2) { - if (src2 & SRC2_IMM) { - if (flags & INV_IMM) - EMIT_FULL_DATA_PROCESS_INS_AND_RETURN(MVN_DP, dst, SLJIT_UNUSED, src2); - EMIT_FULL_DATA_PROCESS_INS_AND_RETURN(MOV_DP, dst, SLJIT_UNUSED, src2); - } - EMIT_FULL_DATA_PROCESS_INS_AND_RETURN(MOV_DP, dst, SLJIT_UNUSED, reg_map[src2]); - } - return SLJIT_SUCCESS; - - case SLJIT_MOV_UB: - case SLJIT_MOV_SB: - SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & ARGS_SWAPPED)); - if ((flags & (REG_DEST | REG_SOURCE)) == (REG_DEST | REG_SOURCE)) { -#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) - if (op == SLJIT_MOV_UB) - return push_inst(compiler, EMIT_DATA_PROCESS_INS(AND_DP, 0, dst, src2, SRC2_IMM | 0xff)); - EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(MOV_DP, 0, dst, SLJIT_UNUSED, (24 << 7) | reg_map[src2])); - return push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, 0, dst, SLJIT_UNUSED, (24 << 7) | (op == SLJIT_MOV_UB ? 0x20 : 0x40) | reg_map[dst])); -#else - return push_inst(compiler, (op == SLJIT_MOV_UB ? UXTB : SXTB) | RD(dst) | RM(src2)); -#endif - } - else if (dst != src2) { - SLJIT_ASSERT(src2 & SRC2_IMM); - if (flags & INV_IMM) - EMIT_FULL_DATA_PROCESS_INS_AND_RETURN(MVN_DP, dst, SLJIT_UNUSED, src2); - EMIT_FULL_DATA_PROCESS_INS_AND_RETURN(MOV_DP, dst, SLJIT_UNUSED, src2); - } - return SLJIT_SUCCESS; - - case SLJIT_MOV_UH: - case SLJIT_MOV_SH: - SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & ARGS_SWAPPED)); - if ((flags & (REG_DEST | REG_SOURCE)) == (REG_DEST | REG_SOURCE)) { -#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) - EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(MOV_DP, 0, dst, SLJIT_UNUSED, (16 << 7) | reg_map[src2])); - return push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, 0, dst, SLJIT_UNUSED, (16 << 7) | (op == SLJIT_MOV_UH ? 0x20 : 0x40) | reg_map[dst])); -#else - return push_inst(compiler, (op == SLJIT_MOV_UH ? UXTH : SXTH) | RD(dst) | RM(src2)); -#endif - } - else if (dst != src2) { - SLJIT_ASSERT(src2 & SRC2_IMM); - if (flags & INV_IMM) - EMIT_FULL_DATA_PROCESS_INS_AND_RETURN(MVN_DP, dst, SLJIT_UNUSED, src2); - EMIT_FULL_DATA_PROCESS_INS_AND_RETURN(MOV_DP, dst, SLJIT_UNUSED, src2); - } - return SLJIT_SUCCESS; - - case SLJIT_NOT: - if (src2 & SRC2_IMM) { - if (flags & INV_IMM) - EMIT_FULL_DATA_PROCESS_INS_AND_RETURN(MOV_DP, dst, SLJIT_UNUSED, src2); - EMIT_FULL_DATA_PROCESS_INS_AND_RETURN(MVN_DP, dst, SLJIT_UNUSED, src2); - } - EMIT_FULL_DATA_PROCESS_INS_AND_RETURN(MVN_DP, dst, SLJIT_UNUSED, RM(src2)); - - case SLJIT_CLZ: - SLJIT_ASSERT(!(flags & INV_IMM)); - SLJIT_ASSERT(!(src2 & SRC2_IMM)); - FAIL_IF(push_inst(compiler, CLZ | RD(dst) | RM(src2))); - if (flags & SET_FLAGS) - EMIT_FULL_DATA_PROCESS_INS_AND_RETURN(CMP_DP, SLJIT_UNUSED, dst, SRC2_IMM); - return SLJIT_SUCCESS; - - case SLJIT_ADD: - SLJIT_ASSERT(!(flags & INV_IMM)); - EMIT_DATA_PROCESS_INS_AND_RETURN(ADD_DP); - - case SLJIT_ADDC: - SLJIT_ASSERT(!(flags & INV_IMM)); - EMIT_DATA_PROCESS_INS_AND_RETURN(ADC_DP); - - case SLJIT_SUB: - SLJIT_ASSERT(!(flags & INV_IMM)); - if (!(flags & ARGS_SWAPPED)) - EMIT_DATA_PROCESS_INS_AND_RETURN(SUB_DP); - EMIT_DATA_PROCESS_INS_AND_RETURN(RSB_DP); - - case SLJIT_SUBC: - SLJIT_ASSERT(!(flags & INV_IMM)); - if (!(flags & ARGS_SWAPPED)) - EMIT_DATA_PROCESS_INS_AND_RETURN(SBC_DP); - EMIT_DATA_PROCESS_INS_AND_RETURN(RSC_DP); - - case SLJIT_MUL: - SLJIT_ASSERT(!(flags & INV_IMM)); - SLJIT_ASSERT(!(src2 & SRC2_IMM)); - if (SLJIT_UNLIKELY(op & SLJIT_SET_O)) - mul_inst = SMULL | (reg_map[TMP_REG3] << 16) | (reg_map[dst] << 12); - else - mul_inst = MUL | (reg_map[dst] << 16); - - if (dst != src2) - FAIL_IF(push_inst(compiler, mul_inst | (reg_map[src1] << 8) | reg_map[src2])); - else if (dst != src1) - FAIL_IF(push_inst(compiler, mul_inst | (reg_map[src2] << 8) | reg_map[src1])); - else { - /* Rm and Rd must not be the same register. */ - SLJIT_ASSERT(dst != TMP_REG1); - FAIL_IF(push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, 0, TMP_REG1, SLJIT_UNUSED, reg_map[src2]))); - FAIL_IF(push_inst(compiler, mul_inst | (reg_map[src2] << 8) | reg_map[TMP_REG1])); - } - - if (!(op & SLJIT_SET_O)) - return SLJIT_SUCCESS; - - /* We need to use TMP_REG3. */ - compiler->cache_arg = 0; - compiler->cache_argw = 0; - /* cmp TMP_REG2, dst asr #31. */ - return push_inst(compiler, EMIT_DATA_PROCESS_INS(CMP_DP, SET_FLAGS, SLJIT_UNUSED, TMP_REG3, RM(dst) | 0xfc0)); - - case SLJIT_AND: - if (!(flags & INV_IMM)) - EMIT_DATA_PROCESS_INS_AND_RETURN(AND_DP); - EMIT_DATA_PROCESS_INS_AND_RETURN(BIC_DP); - - case SLJIT_OR: - SLJIT_ASSERT(!(flags & INV_IMM)); - EMIT_DATA_PROCESS_INS_AND_RETURN(ORR_DP); - - case SLJIT_XOR: - SLJIT_ASSERT(!(flags & INV_IMM)); - EMIT_DATA_PROCESS_INS_AND_RETURN(EOR_DP); - - case SLJIT_SHL: - EMIT_SHIFT_INS_AND_RETURN(0); - - case SLJIT_LSHR: - EMIT_SHIFT_INS_AND_RETURN(1); - - case SLJIT_ASHR: - EMIT_SHIFT_INS_AND_RETURN(2); - } - SLJIT_ASSERT_STOP(); - return SLJIT_SUCCESS; -} - -#undef EMIT_DATA_PROCESS_INS_AND_RETURN -#undef EMIT_FULL_DATA_PROCESS_INS_AND_RETURN -#undef EMIT_SHIFT_INS_AND_RETURN - -/* Tests whether the immediate can be stored in the 12 bit imm field. - Returns with 0 if not possible. */ -static sljit_uw get_imm(sljit_uw imm) -{ - sljit_si rol; - - if (imm <= 0xff) - return SRC2_IMM | imm; - - if (!(imm & 0xff000000)) { - imm <<= 8; - rol = 8; - } - else { - imm = (imm << 24) | (imm >> 8); - rol = 0; - } - - if (!(imm & 0xff000000)) { - imm <<= 8; - rol += 4; - } - - if (!(imm & 0xf0000000)) { - imm <<= 4; - rol += 2; - } - - if (!(imm & 0xc0000000)) { - imm <<= 2; - rol += 1; - } - - if (!(imm & 0x00ffffff)) - return SRC2_IMM | (imm >> 24) | (rol << 8); - else - return 0; -} - -#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) -static sljit_si generate_int(struct sljit_compiler *compiler, sljit_si reg, sljit_uw imm, sljit_si positive) -{ - sljit_uw mask; - sljit_uw imm1; - sljit_uw imm2; - sljit_si rol; - - /* Step1: Search a zero byte (8 continous zero bit). */ - mask = 0xff000000; - rol = 8; - while(1) { - if (!(imm & mask)) { - /* Rol imm by rol. */ - imm = (imm << rol) | (imm >> (32 - rol)); - /* Calculate arm rol. */ - rol = 4 + (rol >> 1); - break; - } - rol += 2; - mask >>= 2; - if (mask & 0x3) { - /* rol by 8. */ - imm = (imm << 8) | (imm >> 24); - mask = 0xff00; - rol = 24; - while (1) { - if (!(imm & mask)) { - /* Rol imm by rol. */ - imm = (imm << rol) | (imm >> (32 - rol)); - /* Calculate arm rol. */ - rol = (rol >> 1) - 8; - break; - } - rol += 2; - mask >>= 2; - if (mask & 0x3) - return 0; - } - break; - } - } - - /* The low 8 bit must be zero. */ - SLJIT_ASSERT(!(imm & 0xff)); - - if (!(imm & 0xff000000)) { - imm1 = SRC2_IMM | ((imm >> 16) & 0xff) | (((rol + 4) & 0xf) << 8); - imm2 = SRC2_IMM | ((imm >> 8) & 0xff) | (((rol + 8) & 0xf) << 8); - } - else if (imm & 0xc0000000) { - imm1 = SRC2_IMM | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8); - imm <<= 8; - rol += 4; - - if (!(imm & 0xff000000)) { - imm <<= 8; - rol += 4; - } - - if (!(imm & 0xf0000000)) { - imm <<= 4; - rol += 2; - } - - if (!(imm & 0xc0000000)) { - imm <<= 2; - rol += 1; - } - - if (!(imm & 0x00ffffff)) - imm2 = SRC2_IMM | (imm >> 24) | ((rol & 0xf) << 8); - else - return 0; - } - else { - if (!(imm & 0xf0000000)) { - imm <<= 4; - rol += 2; - } - - if (!(imm & 0xc0000000)) { - imm <<= 2; - rol += 1; - } - - imm1 = SRC2_IMM | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8); - imm <<= 8; - rol += 4; - - if (!(imm & 0xf0000000)) { - imm <<= 4; - rol += 2; - } - - if (!(imm & 0xc0000000)) { - imm <<= 2; - rol += 1; - } - - if (!(imm & 0x00ffffff)) - imm2 = SRC2_IMM | (imm >> 24) | ((rol & 0xf) << 8); - else - return 0; - } - - EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(positive ? MOV_DP : MVN_DP, 0, reg, SLJIT_UNUSED, imm1)); - EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(positive ? ORR_DP : BIC_DP, 0, reg, reg, imm2)); - return 1; -} -#endif - -static sljit_si load_immediate(struct sljit_compiler *compiler, sljit_si reg, sljit_uw imm) -{ - sljit_uw tmp; - -#if (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) - if (!(imm & ~0xffff)) - return push_inst(compiler, MOVW | RD(reg) | ((imm << 4) & 0xf0000) | (imm & 0xfff)); -#endif - - /* Create imm by 1 inst. */ - tmp = get_imm(imm); - if (tmp) { - EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(MOV_DP, 0, reg, SLJIT_UNUSED, tmp)); - return SLJIT_SUCCESS; - } - - tmp = get_imm(~imm); - if (tmp) { - EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(MVN_DP, 0, reg, SLJIT_UNUSED, tmp)); - return SLJIT_SUCCESS; - } - -#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) - /* Create imm by 2 inst. */ - FAIL_IF(generate_int(compiler, reg, imm, 1)); - FAIL_IF(generate_int(compiler, reg, ~imm, 0)); - - /* Load integer. */ - return push_inst_with_literal(compiler, EMIT_DATA_TRANSFER(WORD_DATA | LOAD_DATA, 1, 0, reg, TMP_PC, 0), imm); -#else - return emit_imm(compiler, reg, imm); -#endif -} - -/* Helper function. Dst should be reg + value, using at most 1 instruction, flags does not set. */ -static sljit_si emit_set_delta(struct sljit_compiler *compiler, sljit_si dst, sljit_si reg, sljit_sw value) -{ - if (value >= 0) { - value = get_imm(value); - if (value) - return push_inst(compiler, EMIT_DATA_PROCESS_INS(ADD_DP, 0, dst, reg, value)); - } - else { - value = get_imm(-value); - if (value) - return push_inst(compiler, EMIT_DATA_PROCESS_INS(SUB_DP, 0, dst, reg, value)); - } - return SLJIT_ERR_UNSUPPORTED; -} - -/* Can perform an operation using at most 1 instruction. */ -static sljit_si getput_arg_fast(struct sljit_compiler *compiler, sljit_si inp_flags, sljit_si reg, sljit_si arg, sljit_sw argw) -{ - sljit_uw imm; - - if (arg & SLJIT_IMM) { - imm = get_imm(argw); - if (imm) { - if (inp_flags & ARG_TEST) - return 1; - EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(MOV_DP, 0, reg, SLJIT_UNUSED, imm)); - return -1; - } - imm = get_imm(~argw); - if (imm) { - if (inp_flags & ARG_TEST) - return 1; - EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(MVN_DP, 0, reg, SLJIT_UNUSED, imm)); - return -1; - } - return (inp_flags & ARG_TEST) ? SLJIT_SUCCESS : 0; - } - - SLJIT_ASSERT(arg & SLJIT_MEM); - - /* Fast loads/stores. */ - if (arg & 0xf) { - if (!(arg & 0xf0)) { - if (IS_TYPE1_TRANSFER(inp_flags)) { - if (argw >= 0 && argw <= 0xfff) { - if (inp_flags & ARG_TEST) - return 1; - EMIT_INSTRUCTION(EMIT_DATA_TRANSFER(inp_flags, 1, inp_flags & WRITE_BACK, reg, arg & 0xf, argw)); - return -1; - } - if (argw < 0 && argw >= -0xfff) { - if (inp_flags & ARG_TEST) - return 1; - EMIT_INSTRUCTION(EMIT_DATA_TRANSFER(inp_flags, 0, inp_flags & WRITE_BACK, reg, arg & 0xf, -argw)); - return -1; - } - } - else { - if (argw >= 0 && argw <= 0xff) { - if (inp_flags & ARG_TEST) - return 1; - EMIT_INSTRUCTION(EMIT_DATA_TRANSFER(inp_flags, 1, inp_flags & WRITE_BACK, reg, arg & 0xf, TYPE2_TRANSFER_IMM(argw))); - return -1; - } - if (argw < 0 && argw >= -0xff) { - if (inp_flags & ARG_TEST) - return 1; - argw = -argw; - EMIT_INSTRUCTION(EMIT_DATA_TRANSFER(inp_flags, 0, inp_flags & WRITE_BACK, reg, arg & 0xf, TYPE2_TRANSFER_IMM(argw))); - return -1; - } - } - } - else if ((argw & 0x3) == 0 || IS_TYPE1_TRANSFER(inp_flags)) { - if (inp_flags & ARG_TEST) - return 1; - EMIT_INSTRUCTION(EMIT_DATA_TRANSFER(inp_flags, 1, inp_flags & WRITE_BACK, reg, arg & 0xf, - RM((arg >> 4) & 0xf) | (IS_TYPE1_TRANSFER(inp_flags) ? SRC2_IMM : 0) | ((argw & 0x3) << 7))); - return -1; - } - } - - return (inp_flags & ARG_TEST) ? SLJIT_SUCCESS : 0; -} - -/* See getput_arg below. - Note: can_cache is called only for binary operators. Those - operators always uses word arguments without write back. */ -static sljit_si can_cache(sljit_si arg, sljit_sw argw, sljit_si next_arg, sljit_sw next_argw) -{ - /* Immediate caching is not supported as it would be an operation on constant arguments. */ - if (arg & SLJIT_IMM) - return 0; - - /* Always a simple operation. */ - if (arg & 0xf0) - return 0; - - if (!(arg & 0xf)) { - /* Immediate access. */ - if ((next_arg & SLJIT_MEM) && ((sljit_uw)argw - (sljit_uw)next_argw <= 0xfff || (sljit_uw)next_argw - (sljit_uw)argw <= 0xfff)) - return 1; - return 0; - } - - if (argw <= 0xfffff && argw >= -0xfffff) - return 0; - - if (argw == next_argw && (next_arg & SLJIT_MEM)) - return 1; - - if (arg == next_arg && ((sljit_uw)argw - (sljit_uw)next_argw <= 0xfff || (sljit_uw)next_argw - (sljit_uw)argw <= 0xfff)) - return 1; - - return 0; -} - -#define GETPUT_ARG_DATA_TRANSFER(add, wb, target, base, imm) \ - if (max_delta & 0xf00) \ - FAIL_IF(push_inst(compiler, EMIT_DATA_TRANSFER(inp_flags, add, wb, target, base, imm))); \ - else \ - FAIL_IF(push_inst(compiler, EMIT_DATA_TRANSFER(inp_flags, add, wb, target, base, TYPE2_TRANSFER_IMM(imm)))); - -#define TEST_WRITE_BACK() \ - if (inp_flags & WRITE_BACK) { \ - tmp_r = arg & 0xf; \ - if (reg == tmp_r) { \ - /* This can only happen for stores */ \ - /* since ldr reg, [reg, ...]! has no meaning */ \ - SLJIT_ASSERT(!(inp_flags & LOAD_DATA)); \ - EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(MOV_DP, 0, TMP_REG3, SLJIT_UNUSED, RM(reg))); \ - reg = TMP_REG3; \ - } \ - } - -/* Emit the necessary instructions. See can_cache above. */ -static sljit_si getput_arg(struct sljit_compiler *compiler, sljit_si inp_flags, sljit_si reg, sljit_si arg, sljit_sw argw, sljit_si next_arg, sljit_sw next_argw) -{ - sljit_si tmp_r; - sljit_sw max_delta; - sljit_sw sign; - sljit_uw imm; - - if (arg & SLJIT_IMM) { - SLJIT_ASSERT(inp_flags & LOAD_DATA); - return load_immediate(compiler, reg, argw); - } - - SLJIT_ASSERT(arg & SLJIT_MEM); - - tmp_r = (inp_flags & LOAD_DATA) ? reg : TMP_REG3; - max_delta = IS_TYPE1_TRANSFER(inp_flags) ? 0xfff : 0xff; - - if ((arg & 0xf) == SLJIT_UNUSED) { - /* Write back is not used. */ - imm = (sljit_uw)(argw - compiler->cache_argw); - if ((compiler->cache_arg & SLJIT_IMM) && (imm <= (sljit_uw)max_delta || imm >= (sljit_uw)-max_delta)) { - if (imm <= (sljit_uw)max_delta) { - sign = 1; - argw = argw - compiler->cache_argw; - } - else { - sign = 0; - argw = compiler->cache_argw - argw; - } - - GETPUT_ARG_DATA_TRANSFER(sign, 0, reg, TMP_REG3, argw); - return SLJIT_SUCCESS; - } - - /* With write back, we can create some sophisticated loads, but - it is hard to decide whether we should convert downward (0s) or upward (1s). */ - imm = (sljit_uw)(argw - next_argw); - if ((next_arg & SLJIT_MEM) && (imm <= (sljit_uw)max_delta || imm >= (sljit_uw)-max_delta)) { - SLJIT_ASSERT(inp_flags & LOAD_DATA); - - compiler->cache_arg = SLJIT_IMM; - compiler->cache_argw = argw; - tmp_r = TMP_REG3; - } - - FAIL_IF(load_immediate(compiler, tmp_r, argw)); - GETPUT_ARG_DATA_TRANSFER(1, 0, reg, tmp_r, 0); - return SLJIT_SUCCESS; - } - - if (arg & 0xf0) { - SLJIT_ASSERT((argw & 0x3) && !(max_delta & 0xf00)); - if (inp_flags & WRITE_BACK) - tmp_r = arg & 0xf; - EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(ADD_DP, 0, tmp_r, arg & 0xf, RM((arg >> 4) & 0xf) | ((argw & 0x3) << 7))); - EMIT_INSTRUCTION(EMIT_DATA_TRANSFER(inp_flags, 1, 0, reg, tmp_r, TYPE2_TRANSFER_IMM(0))); - return SLJIT_SUCCESS; - } - - imm = (sljit_uw)(argw - compiler->cache_argw); - if (compiler->cache_arg == arg && imm <= (sljit_uw)max_delta) { - SLJIT_ASSERT(!(inp_flags & WRITE_BACK)); - GETPUT_ARG_DATA_TRANSFER(1, 0, reg, TMP_REG3, imm); - return SLJIT_SUCCESS; - } - if (compiler->cache_arg == arg && imm >= (sljit_uw)-max_delta) { - SLJIT_ASSERT(!(inp_flags & WRITE_BACK)); - imm = (sljit_uw)-(sljit_sw)imm; - GETPUT_ARG_DATA_TRANSFER(0, 0, reg, TMP_REG3, imm); - return SLJIT_SUCCESS; - } - - imm = get_imm(argw & ~max_delta); - if (imm) { - TEST_WRITE_BACK(); - EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(ADD_DP, 0, tmp_r, arg & 0xf, imm)); - GETPUT_ARG_DATA_TRANSFER(1, inp_flags & WRITE_BACK, reg, tmp_r, argw & max_delta); - return SLJIT_SUCCESS; - } - - imm = get_imm(-argw & ~max_delta); - if (imm) { - argw = -argw; - TEST_WRITE_BACK(); - EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(SUB_DP, 0, tmp_r, arg & 0xf, imm)); - GETPUT_ARG_DATA_TRANSFER(0, inp_flags & WRITE_BACK, reg, tmp_r, argw & max_delta); - return SLJIT_SUCCESS; - } - - if ((compiler->cache_arg & SLJIT_IMM) && compiler->cache_argw == argw) { - TEST_WRITE_BACK(); - EMIT_INSTRUCTION(EMIT_DATA_TRANSFER(inp_flags, 1, inp_flags & WRITE_BACK, reg, arg & 0xf, RM(TMP_REG3) | (max_delta & 0xf00 ? SRC2_IMM : 0))); - return SLJIT_SUCCESS; - } - - if (argw == next_argw && (next_arg & SLJIT_MEM)) { - SLJIT_ASSERT(inp_flags & LOAD_DATA); - FAIL_IF(load_immediate(compiler, TMP_REG3, argw)); - - compiler->cache_arg = SLJIT_IMM; - compiler->cache_argw = argw; - - TEST_WRITE_BACK(); - EMIT_INSTRUCTION(EMIT_DATA_TRANSFER(inp_flags, 1, inp_flags & WRITE_BACK, reg, arg & 0xf, RM(TMP_REG3) | (max_delta & 0xf00 ? SRC2_IMM : 0))); - return SLJIT_SUCCESS; - } - - imm = (sljit_uw)(argw - next_argw); - if (arg == next_arg && !(inp_flags & WRITE_BACK) && (imm <= (sljit_uw)max_delta || imm >= (sljit_uw)-max_delta)) { - SLJIT_ASSERT(inp_flags & LOAD_DATA); - FAIL_IF(load_immediate(compiler, TMP_REG3, argw)); - EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(ADD_DP, 0, TMP_REG3, TMP_REG3, reg_map[arg & 0xf])); - - compiler->cache_arg = arg; - compiler->cache_argw = argw; - - GETPUT_ARG_DATA_TRANSFER(1, 0, reg, TMP_REG3, 0); - return SLJIT_SUCCESS; - } - - if ((arg & 0xf) == tmp_r) { - compiler->cache_arg = SLJIT_IMM; - compiler->cache_argw = argw; - tmp_r = TMP_REG3; - } - - FAIL_IF(load_immediate(compiler, tmp_r, argw)); - EMIT_INSTRUCTION(EMIT_DATA_TRANSFER(inp_flags, 1, inp_flags & WRITE_BACK, reg, arg & 0xf, reg_map[tmp_r] | (max_delta & 0xf00 ? SRC2_IMM : 0))); - return SLJIT_SUCCESS; -} - -static SLJIT_INLINE sljit_si emit_op_mem(struct sljit_compiler *compiler, sljit_si flags, sljit_si reg, sljit_si arg, sljit_sw argw) -{ - if (getput_arg_fast(compiler, flags, reg, arg, argw)) - return compiler->error; - compiler->cache_arg = 0; - compiler->cache_argw = 0; - return getput_arg(compiler, flags, reg, arg, argw, 0, 0); -} - -static SLJIT_INLINE sljit_si emit_op_mem2(struct sljit_compiler *compiler, sljit_si flags, sljit_si reg, sljit_si arg1, sljit_sw arg1w, sljit_si arg2, sljit_sw arg2w) -{ - if (getput_arg_fast(compiler, flags, reg, arg1, arg1w)) - return compiler->error; - return getput_arg(compiler, flags, reg, arg1, arg1w, arg2, arg2w); -} - -static sljit_si emit_op(struct sljit_compiler *compiler, sljit_si op, sljit_si inp_flags, - sljit_si dst, sljit_sw dstw, - sljit_si src1, sljit_sw src1w, - sljit_si src2, sljit_sw src2w) -{ - /* arg1 goes to TMP_REG1 or src reg - arg2 goes to TMP_REG2, imm or src reg - TMP_REG3 can be used for caching - result goes to TMP_REG2, so put result can use TMP_REG1 and TMP_REG3. */ - - /* We prefers register and simple consts. */ - sljit_si dst_r; - sljit_si src1_r; - sljit_si src2_r = 0; - sljit_si sugg_src2_r = TMP_REG2; - sljit_si flags = GET_FLAGS(op) ? SET_FLAGS : 0; - - compiler->cache_arg = 0; - compiler->cache_argw = 0; - - /* Destination check. */ - if (SLJIT_UNLIKELY(dst == SLJIT_UNUSED)) { - if (op >= SLJIT_MOV && op <= SLJIT_MOVU_SI && !(src2 & SLJIT_MEM)) - return SLJIT_SUCCESS; - dst_r = TMP_REG2; - } - else if (dst <= TMP_REG3) { - dst_r = dst; - flags |= REG_DEST; - if (op >= SLJIT_MOV && op <= SLJIT_MOVU_SI) - sugg_src2_r = dst_r; - } - else { - SLJIT_ASSERT(dst & SLJIT_MEM); - if (getput_arg_fast(compiler, inp_flags | ARG_TEST, TMP_REG2, dst, dstw)) { - flags |= FAST_DEST; - dst_r = TMP_REG2; - } - else { - flags |= SLOW_DEST; - dst_r = 0; - } - } - - /* Source 1. */ - if (src1 <= TMP_REG3) - src1_r = src1; - else if (src2 <= TMP_REG3) { - flags |= ARGS_SWAPPED; - src1_r = src2; - src2 = src1; - src2w = src1w; - } - else do { /* do { } while(0) is used because of breaks. */ - src1_r = 0; - if ((inp_flags & ALLOW_ANY_IMM) && (src1 & SLJIT_IMM)) { - /* The second check will generate a hit. */ - src2_r = get_imm(src1w); - if (src2_r) { - flags |= ARGS_SWAPPED; - src1 = src2; - src1w = src2w; - break; - } - if (inp_flags & ALLOW_INV_IMM) { - src2_r = get_imm(~src1w); - if (src2_r) { - flags |= ARGS_SWAPPED | INV_IMM; - src1 = src2; - src1w = src2w; - break; - } - } - if (GET_OPCODE(op) == SLJIT_ADD) { - src2_r = get_imm(-src1w); - if (src2_r) { - /* Note: ARGS_SWAPPED is intentionally not applied! */ - src1 = src2; - src1w = src2w; - op = SLJIT_SUB | GET_ALL_FLAGS(op); - break; - } - } - } - - if (getput_arg_fast(compiler, inp_flags | LOAD_DATA, TMP_REG1, src1, src1w)) { - FAIL_IF(compiler->error); - src1_r = TMP_REG1; - } - } while (0); - - /* Source 2. */ - if (src2_r == 0) { - if (src2 <= TMP_REG3) { - src2_r = src2; - flags |= REG_SOURCE; - if (!(flags & REG_DEST) && op >= SLJIT_MOV && op <= SLJIT_MOVU_SI) - dst_r = src2_r; - } - else do { /* do { } while(0) is used because of breaks. */ - if ((inp_flags & ALLOW_ANY_IMM) && (src2 & SLJIT_IMM)) { - src2_r = get_imm(src2w); - if (src2_r) - break; - if (inp_flags & ALLOW_INV_IMM) { - src2_r = get_imm(~src2w); - if (src2_r) { - flags |= INV_IMM; - break; - } - } - if (GET_OPCODE(op) == SLJIT_ADD) { - src2_r = get_imm(-src2w); - if (src2_r) { - op = SLJIT_SUB | GET_ALL_FLAGS(op); - flags &= ~ARGS_SWAPPED; - break; - } - } - if (GET_OPCODE(op) == SLJIT_SUB && !(flags & ARGS_SWAPPED)) { - src2_r = get_imm(-src2w); - if (src2_r) { - op = SLJIT_ADD | GET_ALL_FLAGS(op); - flags &= ~ARGS_SWAPPED; - break; - } - } - } - - /* src2_r is 0. */ - if (getput_arg_fast(compiler, inp_flags | LOAD_DATA, sugg_src2_r, src2, src2w)) { - FAIL_IF(compiler->error); - src2_r = sugg_src2_r; - } - } while (0); - } - - /* src1_r, src2_r and dst_r can be zero (=unprocessed) or non-zero. - If they are zero, they must not be registers. */ - if (src1_r == 0 && src2_r == 0 && dst_r == 0) { - if (!can_cache(src1, src1w, src2, src2w) && can_cache(src1, src1w, dst, dstw)) { - SLJIT_ASSERT(!(flags & ARGS_SWAPPED)); - flags |= ARGS_SWAPPED; - FAIL_IF(getput_arg(compiler, inp_flags | LOAD_DATA, TMP_REG1, src2, src2w, src1, src1w)); - FAIL_IF(getput_arg(compiler, inp_flags | LOAD_DATA, TMP_REG2, src1, src1w, dst, dstw)); - } - else { - FAIL_IF(getput_arg(compiler, inp_flags | LOAD_DATA, TMP_REG1, src1, src1w, src2, src2w)); - FAIL_IF(getput_arg(compiler, inp_flags | LOAD_DATA, TMP_REG2, src2, src2w, dst, dstw)); - } - src1_r = TMP_REG1; - src2_r = TMP_REG2; - } - else if (src1_r == 0 && src2_r == 0) { - FAIL_IF(getput_arg(compiler, inp_flags | LOAD_DATA, TMP_REG1, src1, src1w, src2, src2w)); - src1_r = TMP_REG1; - } - else if (src1_r == 0 && dst_r == 0) { - FAIL_IF(getput_arg(compiler, inp_flags | LOAD_DATA, TMP_REG1, src1, src1w, dst, dstw)); - src1_r = TMP_REG1; - } - else if (src2_r == 0 && dst_r == 0) { - FAIL_IF(getput_arg(compiler, inp_flags | LOAD_DATA, sugg_src2_r, src2, src2w, dst, dstw)); - src2_r = sugg_src2_r; - } - - if (dst_r == 0) - dst_r = TMP_REG2; - - if (src1_r == 0) { - FAIL_IF(getput_arg(compiler, inp_flags | LOAD_DATA, TMP_REG1, src1, src1w, 0, 0)); - src1_r = TMP_REG1; - } - - if (src2_r == 0) { - FAIL_IF(getput_arg(compiler, inp_flags | LOAD_DATA, sugg_src2_r, src2, src2w, 0, 0)); - src2_r = sugg_src2_r; - } - - FAIL_IF(emit_single_op(compiler, op, flags, dst_r, src1_r, src2_r)); - - if (flags & (FAST_DEST | SLOW_DEST)) { - if (flags & FAST_DEST) - FAIL_IF(getput_arg_fast(compiler, inp_flags, dst_r, dst, dstw)); - else - FAIL_IF(getput_arg(compiler, inp_flags, dst_r, dst, dstw, 0, 0)); - } - return SLJIT_SUCCESS; -} - -#ifdef __cplusplus -extern "C" { -#endif - -#if defined(__GNUC__) -extern unsigned int __aeabi_uidivmod(unsigned int numerator, unsigned int denominator); -extern int __aeabi_idivmod(int numerator, int denominator); -#else -#error "Software divmod functions are needed" -#endif - -#ifdef __cplusplus -} -#endif - -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op0(struct sljit_compiler *compiler, sljit_si op) -{ - CHECK_ERROR(); - check_sljit_emit_op0(compiler, op); - - op = GET_OPCODE(op); - switch (op) { - case SLJIT_BREAKPOINT: - EMIT_INSTRUCTION(BKPT); - break; - case SLJIT_NOP: - EMIT_INSTRUCTION(NOP); - break; - case SLJIT_UMUL: - case SLJIT_SMUL: -#if (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) - return push_inst(compiler, (op == SLJIT_UMUL ? UMULL : SMULL) - | (reg_map[SLJIT_SCRATCH_REG2] << 16) - | (reg_map[SLJIT_SCRATCH_REG1] << 12) - | (reg_map[SLJIT_SCRATCH_REG1] << 8) - | reg_map[SLJIT_SCRATCH_REG2]); -#else - EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(MOV_DP, 0, TMP_REG1, SLJIT_UNUSED, RM(SLJIT_SCRATCH_REG2))); - return push_inst(compiler, (op == SLJIT_UMUL ? UMULL : SMULL) - | (reg_map[SLJIT_SCRATCH_REG2] << 16) - | (reg_map[SLJIT_SCRATCH_REG1] << 12) - | (reg_map[SLJIT_SCRATCH_REG1] << 8) - | reg_map[TMP_REG1]); -#endif - case SLJIT_UDIV: - case SLJIT_SDIV: - if (compiler->scratches >= 3) - EMIT_INSTRUCTION(0xe52d2008 /* str r2, [sp, #-8]! */); -#if defined(__GNUC__) - FAIL_IF(sljit_emit_ijump(compiler, SLJIT_FAST_CALL, SLJIT_IMM, - (op == SLJIT_UDIV ? SLJIT_FUNC_OFFSET(__aeabi_uidivmod) : SLJIT_FUNC_OFFSET(__aeabi_idivmod)))); -#else -#error "Software divmod functions are needed" -#endif - if (compiler->scratches >= 3) - return push_inst(compiler, 0xe49d2008 /* ldr r2, [sp], #8 */); - return SLJIT_SUCCESS; - } - - return SLJIT_SUCCESS; -} - -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op1(struct sljit_compiler *compiler, sljit_si op, - sljit_si dst, sljit_sw dstw, - sljit_si src, sljit_sw srcw) -{ - CHECK_ERROR(); - check_sljit_emit_op1(compiler, op, dst, dstw, src, srcw); - ADJUST_LOCAL_OFFSET(dst, dstw); - ADJUST_LOCAL_OFFSET(src, srcw); - - switch (GET_OPCODE(op)) { - case SLJIT_MOV: - case SLJIT_MOV_UI: - case SLJIT_MOV_SI: - case SLJIT_MOV_P: - return emit_op(compiler, SLJIT_MOV, ALLOW_ANY_IMM, dst, dstw, TMP_REG1, 0, src, srcw); - - case SLJIT_MOV_UB: - return emit_op(compiler, SLJIT_MOV_UB, ALLOW_ANY_IMM | BYTE_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_ub)srcw : srcw); - - case SLJIT_MOV_SB: - return emit_op(compiler, SLJIT_MOV_SB, ALLOW_ANY_IMM | SIGNED_DATA | BYTE_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_sb)srcw : srcw); - - case SLJIT_MOV_UH: - return emit_op(compiler, SLJIT_MOV_UH, ALLOW_ANY_IMM | HALF_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_uh)srcw : srcw); - - case SLJIT_MOV_SH: - return emit_op(compiler, SLJIT_MOV_SH, ALLOW_ANY_IMM | SIGNED_DATA | HALF_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_sh)srcw : srcw); - - case SLJIT_MOVU: - case SLJIT_MOVU_UI: - case SLJIT_MOVU_SI: - case SLJIT_MOVU_P: - return emit_op(compiler, SLJIT_MOV, ALLOW_ANY_IMM | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, srcw); - - case SLJIT_MOVU_UB: - return emit_op(compiler, SLJIT_MOV_UB, ALLOW_ANY_IMM | BYTE_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_ub)srcw : srcw); - - case SLJIT_MOVU_SB: - return emit_op(compiler, SLJIT_MOV_SB, ALLOW_ANY_IMM | SIGNED_DATA | BYTE_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_sb)srcw : srcw); - - case SLJIT_MOVU_UH: - return emit_op(compiler, SLJIT_MOV_UH, ALLOW_ANY_IMM | HALF_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_uh)srcw : srcw); - - case SLJIT_MOVU_SH: - return emit_op(compiler, SLJIT_MOV_SH, ALLOW_ANY_IMM | SIGNED_DATA | HALF_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_sh)srcw : srcw); - - case SLJIT_NOT: - return emit_op(compiler, op, ALLOW_ANY_IMM, dst, dstw, TMP_REG1, 0, src, srcw); - - case SLJIT_NEG: -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) - compiler->skip_checks = 1; -#endif - return sljit_emit_op2(compiler, SLJIT_SUB | GET_ALL_FLAGS(op), dst, dstw, SLJIT_IMM, 0, src, srcw); - - case SLJIT_CLZ: - return emit_op(compiler, op, 0, dst, dstw, TMP_REG1, 0, src, srcw); - } - - return SLJIT_SUCCESS; -} - -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op2(struct sljit_compiler *compiler, sljit_si op, - sljit_si dst, sljit_sw dstw, - sljit_si src1, sljit_sw src1w, - sljit_si src2, sljit_sw src2w) -{ - CHECK_ERROR(); - check_sljit_emit_op2(compiler, op, dst, dstw, src1, src1w, src2, src2w); - ADJUST_LOCAL_OFFSET(dst, dstw); - ADJUST_LOCAL_OFFSET(src1, src1w); - ADJUST_LOCAL_OFFSET(src2, src2w); - - switch (GET_OPCODE(op)) { - case SLJIT_ADD: - case SLJIT_ADDC: - case SLJIT_SUB: - case SLJIT_SUBC: - case SLJIT_OR: - case SLJIT_XOR: - return emit_op(compiler, op, ALLOW_IMM, dst, dstw, src1, src1w, src2, src2w); - - case SLJIT_MUL: - return emit_op(compiler, op, 0, dst, dstw, src1, src1w, src2, src2w); - - case SLJIT_AND: - return emit_op(compiler, op, ALLOW_ANY_IMM, dst, dstw, src1, src1w, src2, src2w); - - case SLJIT_SHL: - case SLJIT_LSHR: - case SLJIT_ASHR: - if (src2 & SLJIT_IMM) { - compiler->shift_imm = src2w & 0x1f; - return emit_op(compiler, op, 0, dst, dstw, TMP_REG1, 0, src1, src1w); - } - else { - compiler->shift_imm = 0x20; - return emit_op(compiler, op, 0, dst, dstw, src1, src1w, src2, src2w); - } - } - - return SLJIT_SUCCESS; -} - -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_get_register_index(sljit_si reg) -{ - check_sljit_get_register_index(reg); - return reg_map[reg]; -} - -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_get_float_register_index(sljit_si reg) -{ - check_sljit_get_float_register_index(reg); - return reg; -} - -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_custom(struct sljit_compiler *compiler, - void *instruction, sljit_si size) -{ - CHECK_ERROR(); - check_sljit_emit_op_custom(compiler, instruction, size); - SLJIT_ASSERT(size == 4); - - return push_inst(compiler, *(sljit_uw*)instruction); -} - -/* --------------------------------------------------------------------- */ -/* Floating point operators */ -/* --------------------------------------------------------------------- */ - -#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) - -/* 0 - no fpu - 1 - vfp */ -static sljit_si arm_fpu_type = -1; - -static void init_compiler(void) -{ - if (arm_fpu_type != -1) - return; - - /* TODO: Only the OS can help to determine the correct fpu type. */ - arm_fpu_type = 1; -} - -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_is_fpu_available(void) -{ - if (arm_fpu_type == -1) - init_compiler(); - return arm_fpu_type; -} - -#else - -#define arm_fpu_type 1 - -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_is_fpu_available(void) -{ - /* Always available. */ - return 1; -} - -#endif - -#define FPU_LOAD (1 << 20) -#define EMIT_FPU_DATA_TRANSFER(inst, add, base, freg, offs) \ - ((inst) | ((add) << 23) | (reg_map[base] << 16) | (freg << 12) | (offs)) -#define EMIT_FPU_OPERATION(opcode, mode, dst, src1, src2) \ - ((opcode) | (mode) | ((dst) << 12) | (src1) | ((src2) << 16)) - -static sljit_si emit_fop_mem(struct sljit_compiler *compiler, sljit_si flags, sljit_si reg, sljit_si arg, sljit_sw argw) -{ - sljit_sw tmp; - sljit_uw imm; - sljit_sw inst = VSTR_F32 | (flags & (SLJIT_SINGLE_OP | FPU_LOAD)); - SLJIT_ASSERT(arg & SLJIT_MEM); - - if (SLJIT_UNLIKELY(arg & 0xf0)) { - EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(ADD_DP, 0, TMP_REG1, arg & 0xf, RM((arg >> 4) & 0xf) | ((argw & 0x3) << 7))); - arg = SLJIT_MEM | TMP_REG1; - argw = 0; - } - - /* Fast loads and stores. */ - if ((arg & 0xf)) { - if (!(argw & ~0x3fc)) - return push_inst(compiler, EMIT_FPU_DATA_TRANSFER(inst, 1, arg & 0xf, reg, argw >> 2)); - if (!(-argw & ~0x3fc)) - return push_inst(compiler, EMIT_FPU_DATA_TRANSFER(inst, 0, arg & 0xf, reg, (-argw) >> 2)); - } - - if (compiler->cache_arg == arg) { - tmp = argw - compiler->cache_argw; - if (!(tmp & ~0x3fc)) - return push_inst(compiler, EMIT_FPU_DATA_TRANSFER(inst, 1, TMP_REG3, reg, tmp >> 2)); - if (!(-tmp & ~0x3fc)) - return push_inst(compiler, EMIT_FPU_DATA_TRANSFER(inst, 0, TMP_REG3, reg, -tmp >> 2)); - if (emit_set_delta(compiler, TMP_REG3, TMP_REG3, tmp) != SLJIT_ERR_UNSUPPORTED) { - FAIL_IF(compiler->error); - compiler->cache_argw = argw; - return push_inst(compiler, EMIT_FPU_DATA_TRANSFER(inst, 1, TMP_REG3, reg, 0)); - } - } - - if (arg & 0xf) { - if (emit_set_delta(compiler, TMP_REG1, arg & 0xf, argw) != SLJIT_ERR_UNSUPPORTED) { - FAIL_IF(compiler->error); - return push_inst(compiler, EMIT_FPU_DATA_TRANSFER(inst, 1, TMP_REG1, reg, 0)); - } - imm = get_imm(argw & ~0x3fc); - if (imm) { - EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(ADD_DP, 0, TMP_REG1, arg & 0xf, imm)); - return push_inst(compiler, EMIT_FPU_DATA_TRANSFER(inst, 1, TMP_REG1, reg, (argw & 0x3fc) >> 2)); - } - imm = get_imm(-argw & ~0x3fc); - if (imm) { - argw = -argw; - EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(SUB_DP, 0, TMP_REG1, arg & 0xf, imm)); - return push_inst(compiler, EMIT_FPU_DATA_TRANSFER(inst, 0, TMP_REG1, reg, (argw & 0x3fc) >> 2)); - } - } - - compiler->cache_arg = arg; - compiler->cache_argw = argw; - if (arg & 0xf) { - FAIL_IF(load_immediate(compiler, TMP_REG1, argw)); - EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(ADD_DP, 0, TMP_REG3, arg & 0xf, reg_map[TMP_REG1])); - } - else - FAIL_IF(load_immediate(compiler, TMP_REG3, argw)); - - return push_inst(compiler, EMIT_FPU_DATA_TRANSFER(inst, 1, TMP_REG3, reg, 0)); -} - -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop1(struct sljit_compiler *compiler, sljit_si op, - sljit_si dst, sljit_sw dstw, - sljit_si src, sljit_sw srcw) -{ - sljit_si dst_fr; - - CHECK_ERROR(); - check_sljit_emit_fop1(compiler, op, dst, dstw, src, srcw); - SLJIT_COMPILE_ASSERT((SLJIT_SINGLE_OP == 0x100), float_transfer_bit_error); - - compiler->cache_arg = 0; - compiler->cache_argw = 0; - op ^= SLJIT_SINGLE_OP; - - if (GET_OPCODE(op) == SLJIT_CMPD) { - if (dst > SLJIT_FLOAT_REG6) { - FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG1, dst, dstw)); - dst = TMP_FREG1; - } - if (src > SLJIT_FLOAT_REG6) { - FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG2, src, srcw)); - src = TMP_FREG2; - } - EMIT_INSTRUCTION(EMIT_FPU_OPERATION(VCMP_F32, op & SLJIT_SINGLE_OP, dst, src, 0)); - EMIT_INSTRUCTION(VMRS); - return SLJIT_SUCCESS; - } - - dst_fr = (dst > SLJIT_FLOAT_REG6) ? TMP_FREG1 : dst; - - if (src > SLJIT_FLOAT_REG6) { - FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, dst_fr, src, srcw)); - src = dst_fr; - } - - switch (GET_OPCODE(op)) { - case SLJIT_MOVD: - if (src != dst_fr && dst_fr != TMP_FREG1) - EMIT_INSTRUCTION(EMIT_FPU_OPERATION(VMOV_F32, op & SLJIT_SINGLE_OP, dst_fr, src, 0)); - break; - case SLJIT_NEGD: - EMIT_INSTRUCTION(EMIT_FPU_OPERATION(VNEG_F32, op & SLJIT_SINGLE_OP, dst_fr, src, 0)); - break; - case SLJIT_ABSD: - EMIT_INSTRUCTION(EMIT_FPU_OPERATION(VABS_F32, op & SLJIT_SINGLE_OP, dst_fr, src, 0)); - break; - } - - if (dst_fr == TMP_FREG1) { - if (GET_OPCODE(op) == SLJIT_MOVD) - dst_fr = src; - FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP), dst_fr, dst, dstw)); - } - - return SLJIT_SUCCESS; -} - -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop2(struct sljit_compiler *compiler, sljit_si op, - sljit_si dst, sljit_sw dstw, - sljit_si src1, sljit_sw src1w, - sljit_si src2, sljit_sw src2w) -{ - sljit_si dst_fr; - - CHECK_ERROR(); - check_sljit_emit_fop2(compiler, op, dst, dstw, src1, src1w, src2, src2w); - - compiler->cache_arg = 0; - compiler->cache_argw = 0; - op ^= SLJIT_SINGLE_OP; - - dst_fr = (dst > SLJIT_FLOAT_REG6) ? TMP_FREG1 : dst; - - if (src2 > SLJIT_FLOAT_REG6) { - FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG2, src2, src2w)); - src2 = TMP_FREG2; - } - - if (src1 > SLJIT_FLOAT_REG6) { - FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG1, src1, src1w)); - src1 = TMP_FREG1; - } - - switch (GET_OPCODE(op)) { - case SLJIT_ADDD: - EMIT_INSTRUCTION(EMIT_FPU_OPERATION(VADD_F32, op & SLJIT_SINGLE_OP, dst_fr, src2, src1)); - break; - - case SLJIT_SUBD: - EMIT_INSTRUCTION(EMIT_FPU_OPERATION(VSUB_F32, op & SLJIT_SINGLE_OP, dst_fr, src2, src1)); - break; - - case SLJIT_MULD: - EMIT_INSTRUCTION(EMIT_FPU_OPERATION(VMUL_F32, op & SLJIT_SINGLE_OP, dst_fr, src2, src1)); - break; - - case SLJIT_DIVD: - EMIT_INSTRUCTION(EMIT_FPU_OPERATION(VDIV_F32, op & SLJIT_SINGLE_OP, dst_fr, src2, src1)); - break; - } - - if (dst_fr == TMP_FREG1) - FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP), TMP_FREG1, dst, dstw)); - - return SLJIT_SUCCESS; -} - -#undef FPU_LOAD -#undef EMIT_FPU_DATA_TRANSFER -#undef EMIT_FPU_OPERATION - -/* --------------------------------------------------------------------- */ -/* Other instructions */ -/* --------------------------------------------------------------------- */ - -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw) -{ - CHECK_ERROR(); - check_sljit_emit_fast_enter(compiler, dst, dstw); - ADJUST_LOCAL_OFFSET(dst, dstw); - - /* For UNUSED dst. Uncommon, but possible. */ - if (dst == SLJIT_UNUSED) - return SLJIT_SUCCESS; - - if (dst <= TMP_REG3) - return push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, 0, dst, SLJIT_UNUSED, RM(TMP_REG3))); - - /* Memory. */ - if (getput_arg_fast(compiler, WORD_DATA, TMP_REG3, dst, dstw)) - return compiler->error; - /* TMP_REG3 is used for caching. */ - EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(MOV_DP, 0, TMP_REG2, SLJIT_UNUSED, RM(TMP_REG3))); - compiler->cache_arg = 0; - compiler->cache_argw = 0; - return getput_arg(compiler, WORD_DATA, TMP_REG2, dst, dstw, 0, 0); -} - -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_si src, sljit_sw srcw) -{ - CHECK_ERROR(); - check_sljit_emit_fast_return(compiler, src, srcw); - ADJUST_LOCAL_OFFSET(src, srcw); - - if (src <= TMP_REG3) - EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(MOV_DP, 0, TMP_REG3, SLJIT_UNUSED, RM(src))); - else if (src & SLJIT_MEM) { - if (getput_arg_fast(compiler, WORD_DATA | LOAD_DATA, TMP_REG3, src, srcw)) - FAIL_IF(compiler->error); - else { - compiler->cache_arg = 0; - compiler->cache_argw = 0; - FAIL_IF(getput_arg(compiler, WORD_DATA | LOAD_DATA, TMP_REG2, src, srcw, 0, 0)); - EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(MOV_DP, 0, TMP_REG3, SLJIT_UNUSED, RM(TMP_REG2))); - } - } - else if (src & SLJIT_IMM) - FAIL_IF(load_immediate(compiler, TMP_REG3, srcw)); - return push_inst(compiler, BLX | RM(TMP_REG3)); -} - -/* --------------------------------------------------------------------- */ -/* Conditional instructions */ -/* --------------------------------------------------------------------- */ - -static sljit_uw get_cc(sljit_si type) -{ - switch (type) { - case SLJIT_C_EQUAL: - case SLJIT_C_MUL_NOT_OVERFLOW: - case SLJIT_C_FLOAT_EQUAL: - return 0x00000000; - - case SLJIT_C_NOT_EQUAL: - case SLJIT_C_MUL_OVERFLOW: - case SLJIT_C_FLOAT_NOT_EQUAL: - return 0x10000000; - - case SLJIT_C_LESS: - case SLJIT_C_FLOAT_LESS: - return 0x30000000; - - case SLJIT_C_GREATER_EQUAL: - case SLJIT_C_FLOAT_GREATER_EQUAL: - return 0x20000000; - - case SLJIT_C_GREATER: - case SLJIT_C_FLOAT_GREATER: - return 0x80000000; - - case SLJIT_C_LESS_EQUAL: - case SLJIT_C_FLOAT_LESS_EQUAL: - return 0x90000000; - - case SLJIT_C_SIG_LESS: - return 0xb0000000; - - case SLJIT_C_SIG_GREATER_EQUAL: - return 0xa0000000; - - case SLJIT_C_SIG_GREATER: - return 0xc0000000; - - case SLJIT_C_SIG_LESS_EQUAL: - return 0xd0000000; - - case SLJIT_C_OVERFLOW: - case SLJIT_C_FLOAT_UNORDERED: - return 0x60000000; - - case SLJIT_C_NOT_OVERFLOW: - case SLJIT_C_FLOAT_ORDERED: - return 0x70000000; - - default: /* SLJIT_JUMP */ - return 0xe0000000; - } -} - -SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compiler *compiler) -{ - struct sljit_label *label; - - CHECK_ERROR_PTR(); - check_sljit_emit_label(compiler); - - if (compiler->last_label && compiler->last_label->size == compiler->size) - return compiler->last_label; - - label = (struct sljit_label*)ensure_abuf(compiler, sizeof(struct sljit_label)); - PTR_FAIL_IF(!label); - set_label(label, compiler); - return label; -} - -SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_si type) -{ - struct sljit_jump *jump; - - CHECK_ERROR_PTR(); - check_sljit_emit_jump(compiler, type); - - jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); - PTR_FAIL_IF(!jump); - set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP); - type &= 0xff; - - /* In ARM, we don't need to touch the arguments. */ -#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) - if (type >= SLJIT_FAST_CALL) - PTR_FAIL_IF(prepare_blx(compiler)); - PTR_FAIL_IF(push_inst_with_unique_literal(compiler, ((EMIT_DATA_TRANSFER(WORD_DATA | LOAD_DATA, 1, 0, - type <= SLJIT_JUMP ? TMP_PC : TMP_REG1, TMP_PC, 0)) & ~COND_MASK) | get_cc(type), 0)); - - if (jump->flags & SLJIT_REWRITABLE_JUMP) { - jump->addr = compiler->size; - compiler->patches++; - } - - if (type >= SLJIT_FAST_CALL) { - jump->flags |= IS_BL; - PTR_FAIL_IF(emit_blx(compiler)); - } - - if (!(jump->flags & SLJIT_REWRITABLE_JUMP)) - jump->addr = compiler->size; -#else - if (type >= SLJIT_FAST_CALL) - jump->flags |= IS_BL; - PTR_FAIL_IF(emit_imm(compiler, TMP_REG1, 0)); - PTR_FAIL_IF(push_inst(compiler, (((type <= SLJIT_JUMP ? BX : BLX) | RM(TMP_REG1)) & ~COND_MASK) | get_cc(type))); - jump->addr = compiler->size; -#endif - return jump; -} - -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_ijump(struct sljit_compiler *compiler, sljit_si type, sljit_si src, sljit_sw srcw) -{ - struct sljit_jump *jump; - - CHECK_ERROR(); - check_sljit_emit_ijump(compiler, type, src, srcw); - ADJUST_LOCAL_OFFSET(src, srcw); - - /* In ARM, we don't need to touch the arguments. */ - if (src & SLJIT_IMM) { - jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); - FAIL_IF(!jump); - set_jump(jump, compiler, JUMP_ADDR | ((type >= SLJIT_FAST_CALL) ? IS_BL : 0)); - jump->u.target = srcw; - -#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) - if (type >= SLJIT_FAST_CALL) - FAIL_IF(prepare_blx(compiler)); - FAIL_IF(push_inst_with_unique_literal(compiler, EMIT_DATA_TRANSFER(WORD_DATA | LOAD_DATA, 1, 0, type <= SLJIT_JUMP ? TMP_PC : TMP_REG1, TMP_PC, 0), 0)); - if (type >= SLJIT_FAST_CALL) - FAIL_IF(emit_blx(compiler)); -#else - FAIL_IF(emit_imm(compiler, TMP_REG1, 0)); - FAIL_IF(push_inst(compiler, (type <= SLJIT_JUMP ? BX : BLX) | RM(TMP_REG1))); -#endif - jump->addr = compiler->size; - } - else { - if (src <= TMP_REG3) - return push_inst(compiler, (type <= SLJIT_JUMP ? BX : BLX) | RM(src)); - - SLJIT_ASSERT(src & SLJIT_MEM); - FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_REG2, src, srcw)); - return push_inst(compiler, (type <= SLJIT_JUMP ? BX : BLX) | RM(TMP_REG2)); - } - - return SLJIT_SUCCESS; -} - -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_si op, - sljit_si dst, sljit_sw dstw, - sljit_si src, sljit_sw srcw, - sljit_si type) -{ - sljit_si dst_r, flags = GET_ALL_FLAGS(op); - sljit_uw cc, ins; - - CHECK_ERROR(); - check_sljit_emit_op_flags(compiler, op, dst, dstw, src, srcw, type); - ADJUST_LOCAL_OFFSET(dst, dstw); - ADJUST_LOCAL_OFFSET(src, srcw); - - if (dst == SLJIT_UNUSED) - return SLJIT_SUCCESS; - - op = GET_OPCODE(op); - cc = get_cc(type); - dst_r = (dst <= TMP_REG3) ? dst : TMP_REG2; - - if (op < SLJIT_ADD) { - EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(MOV_DP, 0, dst_r, SLJIT_UNUSED, SRC2_IMM | 0)); - EMIT_INSTRUCTION((EMIT_DATA_PROCESS_INS(MOV_DP, 0, dst_r, SLJIT_UNUSED, SRC2_IMM | 1) & ~COND_MASK) | cc); - return (dst_r == TMP_REG2) ? emit_op_mem(compiler, WORD_DATA, TMP_REG2, dst, dstw) : SLJIT_SUCCESS; - } - - ins = (op == SLJIT_AND ? AND_DP : (op == SLJIT_OR ? ORR_DP : EOR_DP)); - if ((op == SLJIT_OR || op == SLJIT_XOR) && dst <= TMP_REG3 && dst == src) { - EMIT_INSTRUCTION((EMIT_DATA_PROCESS_INS(ins, 0, dst, dst, SRC2_IMM | 1) & ~COND_MASK) | cc); - /* The condition must always be set, even if the ORR/EOR is not executed above. */ - return (flags & SLJIT_SET_E) ? push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, SET_FLAGS, TMP_REG1, SLJIT_UNUSED, RM(dst))) : SLJIT_SUCCESS; - } - - compiler->cache_arg = 0; - compiler->cache_argw = 0; - if (src & SLJIT_MEM) { - FAIL_IF(emit_op_mem2(compiler, WORD_DATA | LOAD_DATA, TMP_REG1, src, srcw, dst, dstw)); - src = TMP_REG1; - srcw = 0; - } else if (src & SLJIT_IMM) { - FAIL_IF(load_immediate(compiler, TMP_REG1, srcw)); - src = TMP_REG1; - srcw = 0; - } - - EMIT_INSTRUCTION((EMIT_DATA_PROCESS_INS(ins, 0, dst_r, src, SRC2_IMM | 1) & ~COND_MASK) | cc); - EMIT_INSTRUCTION((EMIT_DATA_PROCESS_INS(ins, 0, dst_r, src, SRC2_IMM | 0) & ~COND_MASK) | (cc ^ 0x10000000)); - if (dst_r == TMP_REG2) - FAIL_IF(emit_op_mem2(compiler, WORD_DATA, TMP_REG2, dst, dstw, 0, 0)); - - return (flags & SLJIT_SET_E) ? push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, SET_FLAGS, TMP_REG1, SLJIT_UNUSED, RM(dst_r))) : SLJIT_SUCCESS; -} - -SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw, sljit_sw init_value) -{ - struct sljit_const *const_; - sljit_si reg; - - CHECK_ERROR_PTR(); - check_sljit_emit_const(compiler, dst, dstw, init_value); - ADJUST_LOCAL_OFFSET(dst, dstw); - - const_ = (struct sljit_const*)ensure_abuf(compiler, sizeof(struct sljit_const)); - PTR_FAIL_IF(!const_); - - reg = (dst <= TMP_REG3) ? dst : TMP_REG2; - -#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) - PTR_FAIL_IF(push_inst_with_unique_literal(compiler, EMIT_DATA_TRANSFER(WORD_DATA | LOAD_DATA, 1, 0, reg, TMP_PC, 0), init_value)); - compiler->patches++; -#else - PTR_FAIL_IF(emit_imm(compiler, reg, init_value)); -#endif - set_const(const_, compiler); - - if (reg == TMP_REG2 && dst != SLJIT_UNUSED) - PTR_FAIL_IF(emit_op_mem(compiler, WORD_DATA, TMP_REG2, dst, dstw)); - return const_; -} - -SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_addr) -{ - inline_set_jump_addr(addr, new_addr, 1); -} - -SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant) -{ - inline_set_const(addr, new_constant, 1); -} diff --git a/src/3rd/pcre/sjconf.h b/src/3rd/pcre/sjconf.h index 4f0fe4463af..d54b5e6f544 100644 --- a/src/3rd/pcre/sjconf.h +++ b/src/3rd/pcre/sjconf.h @@ -1,7 +1,7 @@ /* * Stack-less Just-In-Time compiler * - * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: @@ -44,9 +44,11 @@ /* #define SLJIT_CONFIG_ARM_V5 1 */ /* #define SLJIT_CONFIG_ARM_V7 1 */ /* #define SLJIT_CONFIG_ARM_THUMB2 1 */ +/* #define SLJIT_CONFIG_ARM_64 1 */ /* #define SLJIT_CONFIG_PPC_32 1 */ /* #define SLJIT_CONFIG_PPC_64 1 */ /* #define SLJIT_CONFIG_MIPS_32 1 */ +/* #define SLJIT_CONFIG_MIPS_64 1 */ /* #define SLJIT_CONFIG_SPARC_32 1 */ /* #define SLJIT_CONFIG_TILEGX 1 */ @@ -80,7 +82,7 @@ /* --------------------------------------------------------------------- */ /* If SLJIT_STD_MACROS_DEFINED is not defined, the application should - define SLJIT_MALLOC, SLJIT_FREE, SLJIT_MEMMOVE, and NULL. */ + define SLJIT_MALLOC, SLJIT_FREE, SLJIT_MEMCPY, and NULL. */ #ifndef SLJIT_STD_MACROS_DEFINED /* Disabled by default. */ #define SLJIT_STD_MACROS_DEFINED 0 @@ -88,10 +90,37 @@ /* Executable code allocation: If SLJIT_EXECUTABLE_ALLOCATOR is not defined, the application should - define both SLJIT_MALLOC_EXEC and SLJIT_FREE_EXEC. */ + define SLJIT_MALLOC_EXEC, SLJIT_FREE_EXEC, and SLJIT_EXEC_OFFSET. */ #ifndef SLJIT_EXECUTABLE_ALLOCATOR /* Enabled by default. */ #define SLJIT_EXECUTABLE_ALLOCATOR 1 + +/* When SLJIT_PROT_EXECUTABLE_ALLOCATOR is enabled SLJIT uses + an allocator which does not set writable and executable + permission flags at the same time. The trade-of is increased + memory consumption and disabled dynamic code modifications. */ +#ifndef SLJIT_PROT_EXECUTABLE_ALLOCATOR +/* Disabled by default. */ +#define SLJIT_PROT_EXECUTABLE_ALLOCATOR 0 +#endif + +#endif + +/* Force cdecl calling convention even if a better calling + convention (e.g. fastcall) is supported by the C compiler. + If this option is disabled (this is the default), functions + called from JIT should be defined with SLJIT_FUNC attribute. + Standard C functions can still be called by using the + SLJIT_CALL_CDECL jump type. */ +#ifndef SLJIT_USE_CDECL_CALLING_CONVENTION +/* Disabled by default */ +#define SLJIT_USE_CDECL_CALLING_CONVENTION 0 +#endif + +/* Return with error when an invalid argument is passed. */ +#ifndef SLJIT_ARGUMENT_CHECKS +/* Disabled by default */ +#define SLJIT_ARGUMENT_CHECKS 0 #endif /* Debug checks (assertions, etc.). */ @@ -100,12 +129,19 @@ #define SLJIT_DEBUG 1 #endif -/* Verbose operations */ +/* Verbose operations. */ #ifndef SLJIT_VERBOSE /* Enabled by default */ #define SLJIT_VERBOSE 1 #endif -/* See the beginning of sljitConfigInternal.h */ +/* + SLJIT_IS_FPU_AVAILABLE + The availability of the FPU can be controlled by SLJIT_IS_FPU_AVAILABLE. + zero value - FPU is NOT present. + nonzero value - FPU is present. +*/ + +/* For further configurations, see the beginning of sljitConfigInternal.h */ #endif diff --git a/src/3rd/pcre/sjconfi.h b/src/3rd/pcre/sjconfi.h index 6281066f625..56a82c0ad38 100644 --- a/src/3rd/pcre/sjconfi.h +++ b/src/3rd/pcre/sjconfi.h @@ -1,7 +1,7 @@ /* * Stack-less Just-In-Time compiler * - * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: @@ -28,40 +28,62 @@ #define _SLJIT_CONFIG_INTERNAL_H_ /* - SLJIT defines the following macros depending on the target architecture: - - Feature detection (boolean) macros: - SLJIT_32BIT_ARCHITECTURE : 32 bit architecture - SLJIT_64BIT_ARCHITECTURE : 64 bit architecture - SLJIT_WORD_SHIFT : the shift required to apply when accessing a sljit_sw/sljit_uw array by index - SLJIT_DOUBLE_SHIFT : the shift required to apply when accessing a double array by index - SLJIT_LITTLE_ENDIAN : little endian architecture - SLJIT_BIG_ENDIAN : big endian architecture - SLJIT_UNALIGNED : allows unaligned memory accesses for non-fpu operations (only!) - SLJIT_INDIRECT_CALL : see SLJIT_FUNC_OFFSET() for more information - SLJIT_RETURN_ADDRESS_OFFSET : a return instruction always adds this offset to the return address - - Types and useful macros: - sljit_sb, sljit_ub : signed and unsigned 8 bit byte - sljit_sh, sljit_uh : signed and unsigned 16 bit half-word (short) type - sljit_si, sljit_ui : signed and unsigned 32 bit integer type - sljit_sw, sljit_uw : signed and unsigned machine word, enough to store a pointer - sljit_p : unsgined pointer value (usually the same as sljit_uw, but - some 64 bit ABIs may use 32 bit pointers) - sljit_s : single precision floating point value - sljit_d : double precision floating point value - SLJIT_CALL : C calling convention define for both calling JIT form C and C callbacks for JIT - SLJIT_W(number) : defining 64 bit constants on 64 bit architectures (compiler independent helper) + SLJIT defines the following architecture dependent types and macros: + + Types: + sljit_s8, sljit_u8 : signed and unsigned 8 bit integer type + sljit_s16, sljit_u16 : signed and unsigned 16 bit integer type + sljit_s32, sljit_u32 : signed and unsigned 32 bit integer type + sljit_sw, sljit_uw : signed and unsigned machine word, enough to store a pointer + sljit_p : unsgined pointer value (usually the same as sljit_uw, but + some 64 bit ABIs may use 32 bit pointers) + sljit_f32 : 32 bit single precision floating point value + sljit_f64 : 64 bit double precision floating point value + + Macros for feature detection (boolean): + SLJIT_32BIT_ARCHITECTURE : 32 bit architecture + SLJIT_64BIT_ARCHITECTURE : 64 bit architecture + SLJIT_LITTLE_ENDIAN : little endian architecture + SLJIT_BIG_ENDIAN : big endian architecture + SLJIT_UNALIGNED : allows unaligned memory accesses for non-fpu operations (only!) + SLJIT_INDIRECT_CALL : see SLJIT_FUNC_OFFSET() for more information + + Constants: + SLJIT_NUMBER_OF_REGISTERS : number of available registers + SLJIT_NUMBER_OF_SCRATCH_REGISTERS : number of available scratch registers + SLJIT_NUMBER_OF_SAVED_REGISTERS : number of available saved registers + SLJIT_NUMBER_OF_FLOAT_REGISTERS : number of available floating point registers + SLJIT_NUMBER_OF_SCRATCH_FLOAT_REGISTERS : number of available floating point scratch registers + SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS : number of available floating point saved registers + SLJIT_WORD_SHIFT : the shift required to apply when accessing a sljit_sw/sljit_uw array by index + SLJIT_F32_SHIFT : the shift required to apply when accessing + a single precision floating point array by index + SLJIT_F64_SHIFT : the shift required to apply when accessing + a double precision floating point array by index + SLJIT_PREF_SHIFT_REG : x86 systems prefers ecx for shifting by register + the scratch register index of ecx is stored in this variable + SLJIT_LOCALS_OFFSET : local space starting offset (SLJIT_SP + SLJIT_LOCALS_OFFSET) + SLJIT_RETURN_ADDRESS_OFFSET : a return instruction always adds this offset to the return address + + Other macros: + SLJIT_FUNC : calling convention attribute for both calling JIT from C and C calling back from JIT + SLJIT_W(number) : defining 64 bit constants on 64 bit architectures (compiler independent helper) */ +/*****************/ +/* Sanity check. */ +/*****************/ + #if !((defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) \ || (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) \ || (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) \ || (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) \ || (defined SLJIT_CONFIG_ARM_THUMB2 && SLJIT_CONFIG_ARM_THUMB2) \ + || (defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) \ || (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) \ || (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) \ || (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) \ + || (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) \ || (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) \ || (defined SLJIT_CONFIG_TILEGX && SLJIT_CONFIG_TILEGX) \ || (defined SLJIT_CONFIG_AUTO && SLJIT_CONFIG_AUTO) \ @@ -69,23 +91,27 @@ #error "An architecture must be selected" #endif -/* Sanity check. */ #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) \ + (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) \ + (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) \ + (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) \ + (defined SLJIT_CONFIG_ARM_THUMB2 && SLJIT_CONFIG_ARM_THUMB2) \ + + (defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) \ + (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) \ + (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) \ + (defined SLJIT_CONFIG_TILEGX && SLJIT_CONFIG_TILEGX) \ + (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) \ + + (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) \ + (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) \ + (defined SLJIT_CONFIG_AUTO && SLJIT_CONFIG_AUTO) \ + (defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED) >= 2 #error "Multiple architectures are selected" #endif -/* Auto select option (requires compiler support) */ +/********************************************************/ +/* Automatic CPU detection (requires compiler support). */ +/********************************************************/ + #if (defined SLJIT_CONFIG_AUTO && SLJIT_CONFIG_AUTO) #ifndef _WIN32 @@ -102,12 +128,16 @@ #else #define SLJIT_CONFIG_ARM_V5 1 #endif +#elif defined (__aarch64__) +#define SLJIT_CONFIG_ARM_64 1 #elif defined(__ppc64__) || defined(__powerpc64__) || defined(_ARCH_PPC64) || (defined(_POWER) && defined(__64BIT__)) #define SLJIT_CONFIG_PPC_64 1 #elif defined(__ppc__) || defined(__powerpc__) || defined(_ARCH_PPC) || defined(_ARCH_PWR) || defined(_ARCH_PWR2) || defined(_POWER) #define SLJIT_CONFIG_PPC_32 1 #elif defined(__mips__) && !defined(_LP64) #define SLJIT_CONFIG_MIPS_32 1 +#elif defined(__mips64) +#define SLJIT_CONFIG_MIPS_64 1 #elif defined(__sparc__) || defined(__sparc) #define SLJIT_CONFIG_SPARC_32 1 #elif defined(__tilegx__) @@ -117,43 +147,71 @@ #define SLJIT_CONFIG_UNSUPPORTED 1 #endif -#else /* !_WIN32 */ +#else /* _WIN32 */ #if defined(_M_X64) || defined(__x86_64__) #define SLJIT_CONFIG_X86_64 1 +#elif (defined(_M_ARM) && _M_ARM >= 7 && defined(_M_ARMT)) || defined(__thumb2__) +#define SLJIT_CONFIG_ARM_THUMB2 1 +#elif (defined(_M_ARM) && _M_ARM >= 7) +#define SLJIT_CONFIG_ARM_V7 1 #elif defined(_ARM_) #define SLJIT_CONFIG_ARM_V5 1 +#elif defined(_M_ARM64) || defined(__aarch64__) +#define SLJIT_CONFIG_ARM_64 1 #else #define SLJIT_CONFIG_X86_32 1 #endif -#endif /* !WIN32 */ +#endif /* !_WIN32 */ #endif /* SLJIT_CONFIG_AUTO */ #if (defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED) #undef SLJIT_EXECUTABLE_ALLOCATOR #endif -#if !(defined SLJIT_STD_MACROS_DEFINED && SLJIT_STD_MACROS_DEFINED) +/******************************/ +/* CPU family type detection. */ +/******************************/ -/* These libraries are needed for the macros below. */ -#include -#include +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) || (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) \ + || (defined SLJIT_CONFIG_ARM_THUMB2 && SLJIT_CONFIG_ARM_THUMB2) +#define SLJIT_CONFIG_ARM_32 1 +#endif + +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) || (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) +#define SLJIT_CONFIG_X86 1 +#elif (defined SLJIT_CONFIG_ARM_32 && SLJIT_CONFIG_ARM_32) || (defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) +#define SLJIT_CONFIG_ARM 1 +#elif (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) || (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) +#define SLJIT_CONFIG_PPC 1 +#elif (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) || (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) +#define SLJIT_CONFIG_MIPS 1 +#elif (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) || (defined SLJIT_CONFIG_SPARC_64 && SLJIT_CONFIG_SPARC_64) +#define SLJIT_CONFIG_SPARC 1 +#endif -#endif /* STD_MACROS_DEFINED */ +/**********************************/ +/* External function definitions. */ +/**********************************/ /* General macros: Note: SLJIT is designed to be independent from them as possible. - In release mode (SLJIT_DEBUG is not defined) only the following macros are needed: + In release mode (SLJIT_DEBUG is not defined) only the following + external functions are needed: */ #ifndef SLJIT_MALLOC -#define SLJIT_MALLOC(size) malloc(size) +#define SLJIT_MALLOC(size, allocator_data) malloc(size) #endif #ifndef SLJIT_FREE -#define SLJIT_FREE(ptr) free(ptr) +#define SLJIT_FREE(ptr, allocator_data) free(ptr) +#endif + +#ifndef SLJIT_MEMCPY +#define SLJIT_MEMCPY(dest, src, len) memcpy(dest, src, len) #endif #ifndef SLJIT_MEMMOVE @@ -164,6 +222,10 @@ #define SLJIT_ZEROMEM(dest, len) memset(dest, 0, len) #endif +/***************************/ +/* Compiler helper macros. */ +/***************************/ + #if !defined(SLJIT_LIKELY) && !defined(SLJIT_UNLIKELY) #if defined(__GNUC__) && (__GNUC__ >= 3) @@ -185,16 +247,24 @@ #endif #endif /* !SLJIT_INLINE */ -#ifndef SLJIT_CONST -/* Const variables. */ -#define SLJIT_CONST const +#ifndef SLJIT_NOINLINE +/* Not inline functions. */ +#if defined(__GNUC__) +#define SLJIT_NOINLINE __attribute__ ((noinline)) +#else +#define SLJIT_NOINLINE #endif +#endif /* !SLJIT_INLINE */ #ifndef SLJIT_UNUSED_ARG /* Unused arguments. */ #define SLJIT_UNUSED_ARG(arg) (void)arg #endif +/*********************************/ +/* Type of public API functions. */ +/*********************************/ + #if (defined SLJIT_CONFIG_STATIC && SLJIT_CONFIG_STATIC) /* Static ABI functions. For all-in-one programs. */ @@ -209,9 +279,22 @@ #define SLJIT_API_FUNC_ATTRIBUTE #endif /* (defined SLJIT_CONFIG_STATIC && SLJIT_CONFIG_STATIC) */ +/****************************/ +/* Instruction cache flush. */ +/****************************/ + +#if (!defined SLJIT_CACHE_FLUSH && defined __has_builtin) +#if __has_builtin(__builtin___clear_cache) + +#define SLJIT_CACHE_FLUSH(from, to) \ + __builtin___clear_cache((char*)from, (char*)to) + +#endif /* __has_builtin(__builtin___clear_cache) */ +#endif /* (!defined SLJIT_CACHE_FLUSH && defined __has_builtin) */ + #ifndef SLJIT_CACHE_FLUSH -#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) || (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) +#if (defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86) /* Not required to implement on archs with unified caches. */ #define SLJIT_CACHE_FLUSH(from, to) @@ -225,24 +308,36 @@ #define SLJIT_CACHE_FLUSH(from, to) \ sys_icache_invalidate((char*)(from), (char*)(to) - (char*)(from)) -#elif defined __ANDROID__ +#elif (defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC) -/* Android lacks __clear_cache; instead, cacheflush should be used. */ +/* The __clear_cache() implementation of GCC is a dummy function on PowerPC. */ +#define SLJIT_CACHE_FLUSH(from, to) \ + ppc_cache_flush((from), (to)) +#define SLJIT_CACHE_FLUSH_OWN_IMPL 1 + +#elif (defined(__GNUC__) && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))) #define SLJIT_CACHE_FLUSH(from, to) \ - cacheflush((long)(from), (long)(to), 0) + __builtin___clear_cache((char*)from, (char*)to) -#elif (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) || (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) +#elif defined __ANDROID__ + +/* Android lacks __clear_cache; instead, cacheflush should be used. */ -/* The __clear_cache() implementation of GCC is a dummy function on PowerPC. */ #define SLJIT_CACHE_FLUSH(from, to) \ - ppc_cache_flush((from), (to)) + cacheflush((long)(from), (long)(to), 0) #elif (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) /* The __clear_cache() implementation of GCC is a dummy function on Sparc. */ #define SLJIT_CACHE_FLUSH(from, to) \ sparc_cache_flush((from), (to)) +#define SLJIT_CACHE_FLUSH_OWN_IMPL 1 + +#elif defined _WIN32 + +#define SLJIT_CACHE_FLUSH(from, to) \ + FlushInstructionCache(GetCurrentProcess(), (char*)(from), (char*)(to) - (char*)(from)) #else @@ -254,19 +349,23 @@ #endif /* !SLJIT_CACHE_FLUSH */ +/******************************************************/ +/* Integer and floating point type definitions. */ +/******************************************************/ + /* 8 bit byte type. */ -typedef unsigned char sljit_ub; -typedef signed char sljit_sb; +typedef unsigned char sljit_u8; +typedef signed char sljit_s8; /* 16 bit half-word type. */ -typedef unsigned short int sljit_uh; -typedef signed short int sljit_sh; +typedef unsigned short int sljit_u16; +typedef signed short int sljit_s16; /* 32 bit integer type. */ -typedef unsigned int sljit_ui; -typedef signed int sljit_si; +typedef unsigned int sljit_u32; +typedef signed int sljit_s32; -/* Machine word type. Can encapsulate a pointer. +/* Machine word type. Enough for storing a pointer. 32 bit for 32 bit machines. 64 bit for 64 bit machines. */ #if (defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED) @@ -275,7 +374,9 @@ typedef signed int sljit_si; typedef unsigned long int sljit_uw; typedef long int sljit_sw; #elif !(defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) \ + && !(defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) \ && !(defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) \ + && !(defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) \ && !(defined SLJIT_CONFIG_TILEGX && SLJIT_CONFIG_TILEGX) #define SLJIT_32BIT_ARCHITECTURE 1 #define SLJIT_WORD_SHIFT 2 @@ -285,30 +386,39 @@ typedef int sljit_sw; #define SLJIT_64BIT_ARCHITECTURE 1 #define SLJIT_WORD_SHIFT 3 #ifdef _WIN32 +#ifdef __GNUC__ +/* These types do not require windows.h */ +typedef unsigned long long sljit_uw; +typedef long long sljit_sw; +#else typedef unsigned __int64 sljit_uw; typedef __int64 sljit_sw; -#else +#endif +#else /* !_WIN32 */ typedef unsigned long int sljit_uw; typedef long int sljit_sw; -#endif +#endif /* _WIN32 */ #endif typedef sljit_uw sljit_p; /* Floating point types. */ -typedef float sljit_s; -typedef double sljit_d; +typedef float sljit_f32; +typedef double sljit_f64; /* Shift for pointer sized data. */ #define SLJIT_POINTER_SHIFT SLJIT_WORD_SHIFT /* Shift for double precision sized data. */ -#define SLJIT_DOUBLE_SHIFT 3 +#define SLJIT_F32_SHIFT 2 +#define SLJIT_F64_SHIFT 3 #ifndef SLJIT_W /* Defining long constants. */ -#if (defined SLJIT_64BIT_ARCHITECTURE && SLJIT_64BIT_ARCHITECTURE) +#if (defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED) +#define SLJIT_W(w) (w##l) +#elif (defined SLJIT_64BIT_ARCHITECTURE && SLJIT_64BIT_ARCHITECTURE) #define SLJIT_W(w) (w##ll) #else #define SLJIT_W(w) (w) @@ -316,84 +426,129 @@ typedef double sljit_d; #endif /* !SLJIT_W */ -#ifndef SLJIT_CALL +/*************************/ +/* Endianness detection. */ +/*************************/ + +#if !defined(SLJIT_BIG_ENDIAN) && !defined(SLJIT_LITTLE_ENDIAN) + +/* These macros are mostly useful for the applications. */ +#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) \ + || (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) + +#ifdef __LITTLE_ENDIAN__ +#define SLJIT_LITTLE_ENDIAN 1 +#else +#define SLJIT_BIG_ENDIAN 1 +#endif + +#elif (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) \ + || (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) + +#ifdef __MIPSEL__ +#define SLJIT_LITTLE_ENDIAN 1 +#else +#define SLJIT_BIG_ENDIAN 1 +#endif + +#elif (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) + +#define SLJIT_BIG_ENDIAN 1 + +#else +#define SLJIT_LITTLE_ENDIAN 1 +#endif + +#endif /* !defined(SLJIT_BIG_ENDIAN) && !defined(SLJIT_LITTLE_ENDIAN) */ + +/* Sanity check. */ +#if (defined SLJIT_BIG_ENDIAN && SLJIT_BIG_ENDIAN) && (defined SLJIT_LITTLE_ENDIAN && SLJIT_LITTLE_ENDIAN) +#error "Exactly one endianness must be selected" +#endif + +#if !(defined SLJIT_BIG_ENDIAN && SLJIT_BIG_ENDIAN) && !(defined SLJIT_LITTLE_ENDIAN && SLJIT_LITTLE_ENDIAN) +#error "Exactly one endianness must be selected" +#endif + +#ifndef SLJIT_UNALIGNED + +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) \ + || (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) \ + || (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) \ + || (defined SLJIT_CONFIG_ARM_THUMB2 && SLJIT_CONFIG_ARM_THUMB2) \ + || (defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) \ + || (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) \ + || (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) +#define SLJIT_UNALIGNED 1 +#endif + +#endif /* !SLJIT_UNALIGNED */ -/* ABI (Application Binary Interface) types. */ #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) +/* Auto detect SSE2 support using CPUID. + On 64 bit x86 cpus, sse2 must be present. */ +#define SLJIT_DETECT_SSE2 1 +#endif + +/*****************************************************************************************/ +/* Calling convention of functions generated by SLJIT or called from the generated code. */ +/*****************************************************************************************/ + +#ifndef SLJIT_FUNC + +#if (defined SLJIT_USE_CDECL_CALLING_CONVENTION && SLJIT_USE_CDECL_CALLING_CONVENTION) + +/* Force cdecl. */ +#define SLJIT_FUNC + +#elif (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) #if defined(__GNUC__) && !defined(__APPLE__) #if ( __GNUC__ > 3 ) || ( ( __GNUC__ == 3 ) && ( __GNUC_MINOR__ >= 4 ) ) -#define SLJIT_CALL __attribute__ ((fastcall)) +#define SLJIT_FUNC __attribute__ ((fastcall)) #define SLJIT_X86_32_FASTCALL 1 #else -#define SLJIT_CALL +#define SLJIT_FUNC #endif + #elif defined(_MSC_VER) -#define SLJIT_CALL __fastcall +#define SLJIT_FUNC __fastcall #define SLJIT_X86_32_FASTCALL 1 #elif defined(__BORLANDC__) -#define SLJIT_CALL __msfastcall +#define SLJIT_FUNC __msfastcall #define SLJIT_X86_32_FASTCALL 1 #else /* Unknown compiler. */ /* The cdecl attribute is the default. */ -#define SLJIT_CALL +#define SLJIT_FUNC #endif #else /* Non x86-32 architectures. */ -#define SLJIT_CALL +#define SLJIT_FUNC #endif /* SLJIT_CONFIG_X86_32 */ -#endif /* !SLJIT_CALL */ - -#if !defined(SLJIT_BIG_ENDIAN) && !defined(SLJIT_LITTLE_ENDIAN) - -/* These macros are useful for the application. */ -#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) \ - || (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) \ - || (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) -#define SLJIT_BIG_ENDIAN 1 - -#elif (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) - -#ifdef __MIPSEL__ -#define SLJIT_LITTLE_ENDIAN 1 -#else -#define SLJIT_BIG_ENDIAN 1 -#endif - -#else -#define SLJIT_LITTLE_ENDIAN 1 -#endif - -#endif /* !defined(SLJIT_BIG_ENDIAN) && !defined(SLJIT_LITTLE_ENDIAN) */ - -/* Sanity check. */ -#if (defined SLJIT_BIG_ENDIAN && SLJIT_BIG_ENDIAN) && (defined SLJIT_LITTLE_ENDIAN && SLJIT_LITTLE_ENDIAN) -#error "Exactly one endianness must be selected" -#endif - -#if !(defined SLJIT_BIG_ENDIAN && SLJIT_BIG_ENDIAN) && !(defined SLJIT_LITTLE_ENDIAN && SLJIT_LITTLE_ENDIAN) -#error "Exactly one endianness must be selected" -#endif +#endif /* !SLJIT_FUNC */ #ifndef SLJIT_INDIRECT_CALL -#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) || (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32 && defined _AIX) +#if ((defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) && (!defined _CALL_ELF || _CALL_ELF == 1)) \ + || ((defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) && defined _AIX) /* It seems certain ppc compilers use an indirect addressing for functions which makes things complicated. */ #define SLJIT_INDIRECT_CALL 1 #endif #endif /* SLJIT_INDIRECT_CALL */ +/* The offset which needs to be substracted from the return address to +determine the next executed instruction after return. */ #ifndef SLJIT_RETURN_ADDRESS_OFFSET #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) #define SLJIT_RETURN_ADDRESS_OFFSET 8 @@ -402,50 +557,140 @@ typedef double sljit_d; #endif #endif /* SLJIT_RETURN_ADDRESS_OFFSET */ -#ifndef SLJIT_SSE2 +/***************************************************/ +/* Functions of the built-in executable allocator. */ +/***************************************************/ -#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) || (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) -/* Turn on SSE2 support on x86. */ -#define SLJIT_SSE2 1 +#if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR) +SLJIT_API_FUNC_ATTRIBUTE void* sljit_malloc_exec(sljit_uw size); +SLJIT_API_FUNC_ATTRIBUTE void sljit_free_exec(void* ptr); +SLJIT_API_FUNC_ATTRIBUTE void sljit_free_unused_memory_exec(void); +#define SLJIT_MALLOC_EXEC(size) sljit_malloc_exec(size) +#define SLJIT_FREE_EXEC(ptr) sljit_free_exec(ptr) + +#if (defined SLJIT_PROT_EXECUTABLE_ALLOCATOR && SLJIT_PROT_EXECUTABLE_ALLOCATOR) +SLJIT_API_FUNC_ATTRIBUTE sljit_sw sljit_exec_offset(void* ptr); +#define SLJIT_EXEC_OFFSET(ptr) sljit_exec_offset(ptr) +#else +#define SLJIT_EXEC_OFFSET(ptr) 0 +#endif + +#endif + +/**********************************************/ +/* Registers and locals offset determination. */ +/**********************************************/ #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) -/* Auto detect SSE2 support using CPUID. - On 64 bit x86 cpus, sse2 must be present. */ -#define SLJIT_DETECT_SSE2 1 + +#define SLJIT_NUMBER_OF_REGISTERS 12 +#define SLJIT_NUMBER_OF_SAVED_REGISTERS 9 +#define SLJIT_LOCALS_OFFSET_BASE (compiler->locals_offset) +#define SLJIT_PREF_SHIFT_REG SLJIT_R2 + +#elif (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + +#define SLJIT_NUMBER_OF_REGISTERS 13 +#ifndef _WIN64 +#define SLJIT_NUMBER_OF_SAVED_REGISTERS 6 +#define SLJIT_LOCALS_OFFSET_BASE 0 +#else /* _WIN64 */ +#define SLJIT_NUMBER_OF_SAVED_REGISTERS 8 +#define SLJIT_LOCALS_OFFSET_BASE (compiler->locals_offset) +#endif /* !_WIN64 */ +#define SLJIT_PREF_SHIFT_REG SLJIT_R3 + +#elif (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) || (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) + +#define SLJIT_NUMBER_OF_REGISTERS 12 +#define SLJIT_NUMBER_OF_SAVED_REGISTERS 8 +#define SLJIT_LOCALS_OFFSET_BASE 0 + +#elif (defined SLJIT_CONFIG_ARM_THUMB2 && SLJIT_CONFIG_ARM_THUMB2) + +#define SLJIT_NUMBER_OF_REGISTERS 12 +#define SLJIT_NUMBER_OF_SAVED_REGISTERS 8 +#define SLJIT_LOCALS_OFFSET_BASE 0 + +#elif (defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) + +#define SLJIT_NUMBER_OF_REGISTERS 26 +#define SLJIT_NUMBER_OF_SAVED_REGISTERS 10 +#define SLJIT_LOCALS_OFFSET_BASE 0 + +#elif (defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC) + +#define SLJIT_NUMBER_OF_REGISTERS 23 +#define SLJIT_NUMBER_OF_SAVED_REGISTERS 17 +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) || (defined _AIX) +#define SLJIT_LOCALS_OFFSET_BASE ((6 + 8) * sizeof(sljit_sw)) +#elif (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) +/* Add +1 for double alignment. */ +#define SLJIT_LOCALS_OFFSET_BASE ((3 + 1) * sizeof(sljit_sw)) +#else +#define SLJIT_LOCALS_OFFSET_BASE (3 * sizeof(sljit_sw)) +#endif /* SLJIT_CONFIG_PPC_64 || _AIX */ + +#elif (defined SLJIT_CONFIG_MIPS && SLJIT_CONFIG_MIPS) + +#define SLJIT_NUMBER_OF_REGISTERS 21 +#define SLJIT_NUMBER_OF_SAVED_REGISTERS 8 +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) +#define SLJIT_LOCALS_OFFSET_BASE (4 * sizeof(sljit_sw)) +#else +#define SLJIT_LOCALS_OFFSET_BASE 0 #endif -#endif /* (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) || (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) */ +#elif (defined SLJIT_CONFIG_SPARC && SLJIT_CONFIG_SPARC) -#endif /* !SLJIT_SSE2 */ +#define SLJIT_NUMBER_OF_REGISTERS 18 +#define SLJIT_NUMBER_OF_SAVED_REGISTERS 14 +#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) +/* saved registers (16), return struct pointer (1), space for 6 argument words (1), + 4th double arg (2), double alignment (1). */ +#define SLJIT_LOCALS_OFFSET_BASE ((16 + 1 + 6 + 2 + 1) * sizeof(sljit_sw)) +#endif -#ifndef SLJIT_UNALIGNED +#elif (defined SLJIT_CONFIG_TILEGX && SLJIT_CONFIG_TILEGX) + +#define SLJIT_NUMBER_OF_REGISTERS 10 +#define SLJIT_NUMBER_OF_SAVED_REGISTERS 5 +#define SLJIT_LOCALS_OFFSET_BASE 0 + +#elif (defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED) + +#define SLJIT_NUMBER_OF_REGISTERS 0 +#define SLJIT_NUMBER_OF_SAVED_REGISTERS 0 +#define SLJIT_LOCALS_OFFSET_BASE 0 -#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) \ - || (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) \ - || (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) \ - || (defined SLJIT_CONFIG_ARM_THUMB2 && SLJIT_CONFIG_ARM_THUMB2) \ - || (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) \ - || (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) -#define SLJIT_UNALIGNED 1 #endif -#endif /* !SLJIT_UNALIGNED */ +#define SLJIT_LOCALS_OFFSET (SLJIT_LOCALS_OFFSET_BASE) -#if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR) -SLJIT_API_FUNC_ATTRIBUTE void* sljit_malloc_exec(sljit_uw size); -SLJIT_API_FUNC_ATTRIBUTE void sljit_free_exec(void* ptr); -SLJIT_API_FUNC_ATTRIBUTE void sljit_free_unused_memory_exec(void); -#define SLJIT_MALLOC_EXEC(size) sljit_malloc_exec(size) -#define SLJIT_FREE_EXEC(ptr) sljit_free_exec(ptr) +#define SLJIT_NUMBER_OF_SCRATCH_REGISTERS \ + (SLJIT_NUMBER_OF_REGISTERS - SLJIT_NUMBER_OF_SAVED_REGISTERS) + +#define SLJIT_NUMBER_OF_FLOAT_REGISTERS 6 +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) && (defined _WIN64) +#define SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS 1 +#else +#define SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS 0 #endif +#define SLJIT_NUMBER_OF_SCRATCH_FLOAT_REGISTERS \ + (SLJIT_NUMBER_OF_FLOAT_REGISTERS - SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS) + +/*************************************/ +/* Debug and verbose related macros. */ +/*************************************/ + #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) #include #endif #if (defined SLJIT_DEBUG && SLJIT_DEBUG) -#if !defined(SLJIT_ASSERT) || !defined(SLJIT_ASSERT_STOP) +#if !defined(SLJIT_ASSERT) || !defined(SLJIT_UNREACHABLE) /* SLJIT_HALT_PROCESS must halt the process. */ #ifndef SLJIT_HALT_PROCESS @@ -457,7 +702,7 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_free_unused_memory_exec(void); #include -#endif /* !SLJIT_ASSERT || !SLJIT_ASSERT_STOP */ +#endif /* !SLJIT_ASSERT || !SLJIT_UNREACHABLE */ /* Feel free to redefine these two macros. */ #ifndef SLJIT_ASSERT @@ -472,34 +717,33 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_free_unused_memory_exec(void); #endif /* !SLJIT_ASSERT */ -#ifndef SLJIT_ASSERT_STOP +#ifndef SLJIT_UNREACHABLE -#define SLJIT_ASSERT_STOP() \ +#define SLJIT_UNREACHABLE() \ do { \ printf("Should never been reached " __FILE__ ":%d\n", __LINE__); \ SLJIT_HALT_PROCESS(); \ } while (0) -#endif /* !SLJIT_ASSERT_STOP */ +#endif /* !SLJIT_UNREACHABLE */ #else /* (defined SLJIT_DEBUG && SLJIT_DEBUG) */ /* Forcing empty, but valid statements. */ #undef SLJIT_ASSERT -#undef SLJIT_ASSERT_STOP +#undef SLJIT_UNREACHABLE #define SLJIT_ASSERT(x) \ do { } while (0) -#define SLJIT_ASSERT_STOP() \ +#define SLJIT_UNREACHABLE() \ do { } while (0) #endif /* (defined SLJIT_DEBUG && SLJIT_DEBUG) */ #ifndef SLJIT_COMPILE_ASSERT -/* Should be improved eventually. */ #define SLJIT_COMPILE_ASSERT(x, description) \ - SLJIT_ASSERT(x) + switch(0) { case 0: case ((x) ? 1 : 0): break; } #endif /* !SLJIT_COMPILE_ASSERT */ diff --git a/src/3rd/pcre/sjexeca.c b/src/3rd/pcre/sjexeca.c index f24ed337973..92ddb949143 100644 --- a/src/3rd/pcre/sjexeca.c +++ b/src/3rd/pcre/sjexeca.c @@ -1,7 +1,7 @@ /* * Stack-less Just-In-Time compiler * - * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: @@ -86,7 +86,7 @@ static SLJIT_INLINE void* alloc_chunk(sljit_uw size) return VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE); } -static SLJIT_INLINE void free_chunk(void* chunk, sljit_uw size) +static SLJIT_INLINE void free_chunk(void *chunk, sljit_uw size) { SLJIT_UNUSED_ARG(size); VirtualFree(chunk, 0, MEM_RELEASE); @@ -94,24 +94,91 @@ static SLJIT_INLINE void free_chunk(void* chunk, sljit_uw size) #else +#ifdef __APPLE__ +/* Configures TARGET_OS_OSX when appropriate */ +#include + +#if TARGET_OS_OSX && defined(MAP_JIT) +#include +#endif /* TARGET_OS_OSX && MAP_JIT */ + +#ifdef MAP_JIT + +static SLJIT_INLINE int get_map_jit_flag() +{ +#if TARGET_OS_OSX + /* On macOS systems, returns MAP_JIT if it is defined _and_ we're running on a version + of macOS where it's OK to have more than one JIT block. On non-macOS systems, returns + MAP_JIT if it is defined. */ + static int map_jit_flag = -1; + + /* The following code is thread safe because multiple initialization + sets map_jit_flag to the same value and the code has no side-effects. + Changing the kernel version witout system restart is (very) unlikely. */ + if (map_jit_flag == -1) { + struct utsname name; + + map_jit_flag = 0; + uname(&name); + + /* Kernel version for 10.14.0 (Mojave) */ + if (atoi(name.release) >= 18) { + /* Only use MAP_JIT if a hardened runtime is used, because MAP_JIT is incompatible with fork(). */ + void *ptr = mmap(NULL, getpagesize(), PROT_WRITE|PROT_EXEC, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + + if (ptr == MAP_FAILED) { + map_jit_flag = MAP_JIT; + } else { + munmap(ptr, getpagesize()); + } + } + } + + return map_jit_flag; +#else /* !TARGET_OS_OSX */ + return MAP_JIT; +#endif /* TARGET_OS_OSX */ +} + +#endif /* MAP_JIT */ + +#endif /* __APPLE__ */ + static SLJIT_INLINE void* alloc_chunk(sljit_uw size) { - void* retval; + void *retval; + const int prot = PROT_READ | PROT_WRITE | PROT_EXEC; #ifdef MAP_ANON - retval = mmap(NULL, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANON, -1, 0); -#else + + int flags = MAP_PRIVATE | MAP_ANON; + +#ifdef MAP_JIT + flags |= get_map_jit_flag(); +#endif + + retval = mmap(NULL, size, prot, flags, -1, 0); +#else /* !MAP_ANON */ if (dev_zero < 0) { if (open_dev_zero()) return NULL; } - retval = mmap(NULL, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE, dev_zero, 0); -#endif + retval = mmap(NULL, size, prot, MAP_PRIVATE, dev_zero, 0); +#endif /* MAP_ANON */ + + if (retval == MAP_FAILED) + retval = NULL; + else { + if (mprotect(retval, size, prot) < 0) { + munmap(retval, size); + retval = NULL; + } + } - return (retval != MAP_FAILED) ? retval : NULL; + return retval; } -static SLJIT_INLINE void free_chunk(void* chunk, sljit_uw size) +static SLJIT_INLINE void free_chunk(void *chunk, sljit_uw size) { munmap(chunk, size); } @@ -137,10 +204,10 @@ struct free_block { }; #define AS_BLOCK_HEADER(base, offset) \ - ((struct block_header*)(((sljit_ub*)base) + offset)) + ((struct block_header*)(((sljit_u8*)base) + offset)) #define AS_FREE_BLOCK(base, offset) \ - ((struct free_block*)(((sljit_ub*)base) + offset)) -#define MEM_START(base) ((void*)(((sljit_ub*)base) + sizeof(struct block_header))) + ((struct free_block*)(((sljit_u8*)base) + offset)) +#define MEM_START(base) ((void*)(((sljit_u8*)base) + sizeof(struct block_header))) #define ALIGN_SIZE(size) (((size) + sizeof(struct block_header) + 7) & ~7) static struct free_block* free_blocks; @@ -153,7 +220,7 @@ static SLJIT_INLINE void sljit_insert_free_block(struct free_block *free_block, free_block->size = size; free_block->next = free_blocks; - free_block->prev = 0; + free_block->prev = NULL; if (free_blocks) free_blocks->prev = free_block; free_blocks = free_block; @@ -180,8 +247,8 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_malloc_exec(sljit_uw size) sljit_uw chunk_size; allocator_grab_lock(); - if (size < sizeof(struct free_block)) - size = sizeof(struct free_block); + if (size < (64 - sizeof(struct block_header))) + size = (64 - sizeof(struct block_header)); size = ALIGN_SIZE(size); free_block = free_blocks; diff --git a/src/3rd/pcre/sjlir.c b/src/3rd/pcre/sjlir.c index 59ed08ce5f8..57387d87f6f 100644 --- a/src/3rd/pcre/sjlir.c +++ b/src/3rd/pcre/sjlir.c @@ -1,7 +1,7 @@ /* * Stack-less Just-In-Time compiler * - * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: @@ -26,6 +26,21 @@ #include "sjlir.h" +#ifdef _WIN32 + +/* For SLJIT_CACHE_FLUSH, which can expand to FlushInstructionCache. */ +#include + +#endif /* _WIN32 */ + +#if !(defined SLJIT_STD_MACROS_DEFINED && SLJIT_STD_MACROS_DEFINED) + +/* These libraries are needed for the macros below. */ +#include +#include + +#endif /* SLJIT_STD_MACROS_DEFINED */ + #define CHECK_ERROR() \ do { \ if (SLJIT_UNLIKELY(compiler->error)) \ @@ -38,12 +53,6 @@ return NULL; \ } while (0) -#define CHECK_ERROR_VOID() \ - do { \ - if (SLJIT_UNLIKELY(compiler->error)) \ - return; \ - } while (0) - #define FAIL_IF(expr) \ do { \ if (SLJIT_UNLIKELY(expr)) \ @@ -82,17 +91,26 @@ #if !(defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED) +#define VARIABLE_FLAG_SHIFT (10) +#define VARIABLE_FLAG_MASK (0x3f << VARIABLE_FLAG_SHIFT) +#define GET_FLAG_TYPE(op) ((op) >> VARIABLE_FLAG_SHIFT) + #define GET_OPCODE(op) \ - ((op) & ~(SLJIT_INT_OP | SLJIT_SET_E | SLJIT_SET_S | SLJIT_SET_U | SLJIT_SET_O | SLJIT_SET_C | SLJIT_KEEP_FLAGS)) + ((op) & ~(SLJIT_I32_OP | SLJIT_SET_Z | VARIABLE_FLAG_MASK)) -#define GET_FLAGS(op) \ - ((op) & (SLJIT_SET_E | SLJIT_SET_S | SLJIT_SET_U | SLJIT_SET_O | SLJIT_SET_C)) +#define HAS_FLAGS(op) \ + ((op) & (SLJIT_SET_Z | VARIABLE_FLAG_MASK)) #define GET_ALL_FLAGS(op) \ - ((op) & (SLJIT_INT_OP | SLJIT_SET_E | SLJIT_SET_S | SLJIT_SET_U | SLJIT_SET_O | SLJIT_SET_C | SLJIT_KEEP_FLAGS)) + ((op) & (SLJIT_I32_OP | SLJIT_SET_Z | VARIABLE_FLAG_MASK)) +#if (defined SLJIT_64BIT_ARCHITECTURE && SLJIT_64BIT_ARCHITECTURE) #define TYPE_CAST_NEEDED(op) \ - (((op) >= SLJIT_MOV_UB && (op) <= SLJIT_MOV_SH) || ((op) >= SLJIT_MOVU_UB && (op) <= SLJIT_MOVU_SH)) + ((op) >= SLJIT_MOV_U8 && (op) <= SLJIT_MOV_S32) +#else +#define TYPE_CAST_NEEDED(op) \ + ((op) >= SLJIT_MOV_U8 && (op) <= SLJIT_MOV_S16) +#endif #define BUF_SIZE 4096 @@ -102,17 +120,31 @@ #define ABUF_SIZE 4096 #endif +/* Parameter parsing. */ +#define REG_MASK 0x3f +#define OFFS_REG(reg) (((reg) >> 8) & REG_MASK) +#define OFFS_REG_MASK (REG_MASK << 8) +#define TO_OFFS_REG(reg) ((reg) << 8) +/* When reg cannot be unused. */ +#define FAST_IS_REG(reg) ((reg) <= REG_MASK) +/* When reg can be unused. */ +#define SLOW_IS_REG(reg) ((reg) > 0 && (reg) <= REG_MASK) + +/* Mask for argument types. */ +#define SLJIT_DEF_MASK ((1 << SLJIT_DEF_SHIFT) - 1) + /* Jump flags. */ #define JUMP_LABEL 0x1 #define JUMP_ADDR 0x2 /* SLJIT_REWRITABLE_JUMP is 0x1000. */ -#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) || (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) -# define PATCH_MB 0x4 -# define PATCH_MW 0x8 +#if (defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86) +# define PATCH_MB 0x4 +# define PATCH_MW 0x8 #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) -# define PATCH_MD 0x10 +# define PATCH_MD 0x10 #endif +# define TYPE_SHIFT 13 #endif #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) || (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) @@ -127,39 +159,60 @@ #if (defined SLJIT_CONFIG_ARM_THUMB2 && SLJIT_CONFIG_ARM_THUMB2) # define IS_COND 0x04 # define IS_BL 0x08 - /* cannot be encoded as branch */ -# define B_TYPE0 0x00 /* conditional + imm8 */ -# define B_TYPE1 0x10 +# define PATCH_TYPE1 0x10 /* conditional + imm20 */ -# define B_TYPE2 0x20 +# define PATCH_TYPE2 0x20 /* IT + imm24 */ -# define B_TYPE3 0x30 +# define PATCH_TYPE3 0x30 /* imm11 */ -# define B_TYPE4 0x40 +# define PATCH_TYPE4 0x40 /* imm24 */ -# define B_TYPE5 0x50 +# define PATCH_TYPE5 0x50 /* BL + imm24 */ -# define BL_TYPE6 0x60 +# define PATCH_BL 0x60 /* 0xf00 cc code for branches */ #endif -#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) || (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) -# define UNCOND_B 0x04 -# define PATCH_B 0x08 -# define ABSOLUTE_B 0x10 +#if (defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) +# define IS_COND 0x004 +# define IS_CBZ 0x008 +# define IS_BL 0x010 +# define PATCH_B 0x020 +# define PATCH_COND 0x040 +# define PATCH_ABS48 0x080 +# define PATCH_ABS64 0x100 #endif -#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) -# define IS_MOVABLE 0x04 -# define IS_JAL 0x08 -# define IS_BIT26_COND 0x10 -# define IS_BIT16_COND 0x20 +#if (defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC) +# define IS_COND 0x004 +# define IS_CALL 0x008 +# define PATCH_B 0x010 +# define PATCH_ABS_B 0x020 +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) +# define PATCH_ABS32 0x040 +# define PATCH_ABS48 0x080 +#endif +# define REMOVE_COND 0x100 +#endif + +#if (defined SLJIT_CONFIG_MIPS && SLJIT_CONFIG_MIPS) +# define IS_MOVABLE 0x004 +# define IS_JAL 0x008 +# define IS_CALL 0x010 +# define IS_BIT26_COND 0x020 +# define IS_BIT16_COND 0x040 +# define IS_BIT23_COND 0x080 -# define IS_COND (IS_BIT26_COND | IS_BIT16_COND) +# define IS_COND (IS_BIT26_COND | IS_BIT16_COND | IS_BIT23_COND) -# define PATCH_B 0x40 -# define PATCH_J 0x80 +# define PATCH_B 0x100 +# define PATCH_J 0x200 + +#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) +# define PATCH_ABS32 0x400 +# define PATCH_ABS48 0x800 +#endif /* instruction types */ # define MOVABLE_INS 0 @@ -167,15 +220,15 @@ /* no destination (i.e: store) */ # define UNMOVABLE_INS 32 /* FPU status register */ -# define FCSR_FCC 33 +# define FCSR_FCC 33 #endif #if (defined SLJIT_CONFIG_TILEGX && SLJIT_CONFIG_TILEGX) -# define IS_JAL 0x04 -# define IS_COND 0x08 +# define IS_JAL 0x04 +# define IS_COND 0x08 -# define PATCH_B 0x10 -# define PATCH_J 0x20 +# define PATCH_B 0x10 +# define PATCH_J 0x20 #endif #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) @@ -199,123 +252,156 @@ # define FCC_IS_SET (1 << 24) #endif -#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) -#define SLJIT_HAS_VARIABLE_LOCALS_OFFSET 1 -#if !(defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) -#define FIXED_LOCALS_OFFSET (3 * sizeof(sljit_sw)) -#endif -#endif +/* Stack management. */ -#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) -#define SLJIT_HAS_FIXED_LOCALS_OFFSET 1 -#ifdef _WIN64 -#define FIXED_LOCALS_OFFSET ((4 + 2) * sizeof(sljit_sw)) +#define GET_SAVED_REGISTERS_SIZE(scratches, saveds, extra) \ + (((scratches < SLJIT_NUMBER_OF_SCRATCH_REGISTERS ? 0 : (scratches - SLJIT_NUMBER_OF_SCRATCH_REGISTERS)) + \ + (saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? saveds : SLJIT_NUMBER_OF_SAVED_REGISTERS) + \ + extra) * sizeof(sljit_sw)) + +#define ADJUST_LOCAL_OFFSET(p, i) \ + if ((p) == (SLJIT_MEM1(SLJIT_SP))) \ + (i) += SLJIT_LOCALS_OFFSET; + +#endif /* !(defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED) */ + +/* Utils can still be used even if SLJIT_CONFIG_UNSUPPORTED is set. */ +#include "sjutils.c" + +#if !(defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED) + +#if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR) + +#if (defined SLJIT_PROT_EXECUTABLE_ALLOCATOR && SLJIT_PROT_EXECUTABLE_ALLOCATOR) +#include "sljitProtExecAllocator.c" #else -#define FIXED_LOCALS_OFFSET (sizeof(sljit_sw)) +#include "sjexeca.c" #endif + #endif -#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) -#define SLJIT_HAS_FIXED_LOCALS_OFFSET 1 -#if (defined SLJIT_INDIRECT_CALL && SLJIT_INDIRECT_CALL) -#define FIXED_LOCALS_OFFSET ((6 + 8) * sizeof(sljit_sw)) +#if (defined SLJIT_PROT_EXECUTABLE_ALLOCATOR && SLJIT_PROT_EXECUTABLE_ALLOCATOR) +#define SLJIT_ADD_EXEC_OFFSET(ptr, exec_offset) ((sljit_u8 *)(ptr) + (exec_offset)) #else -#define FIXED_LOCALS_OFFSET (2 * sizeof(sljit_sw)) -#endif +#define SLJIT_ADD_EXEC_OFFSET(ptr, exec_offset) ((sljit_u8 *)(ptr)) #endif -#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) -#define SLJIT_HAS_FIXED_LOCALS_OFFSET 1 -#define FIXED_LOCALS_OFFSET ((6 + 8) * sizeof(sljit_sw)) -#endif +/* Argument checking features. */ -#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) -#define SLJIT_HAS_FIXED_LOCALS_OFFSET 1 -#define FIXED_LOCALS_OFFSET (4 * sizeof(sljit_sw)) -#endif +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) -#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) -#define SLJIT_HAS_FIXED_LOCALS_OFFSET 1 -#define FIXED_LOCALS_OFFSET (23 * sizeof(sljit_sw)) -#endif +/* Returns with error when an invalid argument is passed. */ -#if (defined SLJIT_HAS_VARIABLE_LOCALS_OFFSET && SLJIT_HAS_VARIABLE_LOCALS_OFFSET) +#define CHECK_ARGUMENT(x) \ + do { \ + if (SLJIT_UNLIKELY(!(x))) \ + return 1; \ + } while (0) -#define ADJUST_LOCAL_OFFSET(p, i) \ - if ((p) == (SLJIT_MEM1(SLJIT_LOCALS_REG))) \ - (i) += compiler->locals_offset; +#define CHECK_RETURN_TYPE sljit_s32 +#define CHECK_RETURN_OK return 0 -#elif (defined SLJIT_HAS_FIXED_LOCALS_OFFSET && SLJIT_HAS_FIXED_LOCALS_OFFSET) +#define CHECK(x) \ + do { \ + if (SLJIT_UNLIKELY(x)) { \ + compiler->error = SLJIT_ERR_BAD_ARGUMENT; \ + return SLJIT_ERR_BAD_ARGUMENT; \ + } \ + } while (0) -#define ADJUST_LOCAL_OFFSET(p, i) \ - if ((p) == (SLJIT_MEM1(SLJIT_LOCALS_REG))) \ - (i) += FIXED_LOCALS_OFFSET; +#define CHECK_PTR(x) \ + do { \ + if (SLJIT_UNLIKELY(x)) { \ + compiler->error = SLJIT_ERR_BAD_ARGUMENT; \ + return NULL; \ + } \ + } while (0) -#else +#define CHECK_REG_INDEX(x) \ + do { \ + if (SLJIT_UNLIKELY(x)) { \ + return -2; \ + } \ + } while (0) -#define ADJUST_LOCAL_OFFSET(p, i) +#elif (defined SLJIT_DEBUG && SLJIT_DEBUG) -#endif +/* Assertion failure occures if an invalid argument is passed. */ +#undef SLJIT_ARGUMENT_CHECKS +#define SLJIT_ARGUMENT_CHECKS 1 -#endif /* !(defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED) */ +#define CHECK_ARGUMENT(x) SLJIT_ASSERT(x) +#define CHECK_RETURN_TYPE void +#define CHECK_RETURN_OK return +#define CHECK(x) x +#define CHECK_PTR(x) x +#define CHECK_REG_INDEX(x) x -/* Utils can still be used even if SLJIT_CONFIG_UNSUPPORTED is set. */ -#include "sjutils.c" +#elif (defined SLJIT_VERBOSE && SLJIT_VERBOSE) -#if !(defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED) +/* Arguments are not checked. */ +#define CHECK_RETURN_TYPE void +#define CHECK_RETURN_OK return +#define CHECK(x) x +#define CHECK_PTR(x) x +#define CHECK_REG_INDEX(x) x -#if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR) -#include "sjexeca.c" -#endif +#else -#if (defined SLJIT_SSE2_AUTO && SLJIT_SSE2_AUTO) && !(defined SLJIT_SSE2 && SLJIT_SSE2) -#error SLJIT_SSE2_AUTO cannot be enabled without SLJIT_SSE2 -#endif +/* Arguments are not checked. */ +#define CHECK(x) +#define CHECK_PTR(x) +#define CHECK_REG_INDEX(x) + +#endif /* SLJIT_ARGUMENT_CHECKS */ /* --------------------------------------------------------------------- */ /* Public functions */ /* --------------------------------------------------------------------- */ -#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) || ((defined SLJIT_SSE2 && SLJIT_SSE2) && ((defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) || (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64))) +#if (defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86) #define SLJIT_NEEDS_COMPILER_INIT 1 -static sljit_si compiler_initialized = 0; +static sljit_s32 compiler_initialized = 0; /* A thread safe initialization. */ static void init_compiler(void); #endif -SLJIT_API_FUNC_ATTRIBUTE struct sljit_compiler* sljit_create_compiler(void) +SLJIT_API_FUNC_ATTRIBUTE struct sljit_compiler* sljit_create_compiler(void *allocator_data) { - struct sljit_compiler *compiler = (struct sljit_compiler*)SLJIT_MALLOC(sizeof(struct sljit_compiler)); + struct sljit_compiler *compiler = (struct sljit_compiler*)SLJIT_MALLOC(sizeof(struct sljit_compiler), allocator_data); if (!compiler) return NULL; SLJIT_ZEROMEM(compiler, sizeof(struct sljit_compiler)); SLJIT_COMPILE_ASSERT( - sizeof(sljit_sb) == 1 && sizeof(sljit_ub) == 1 - && sizeof(sljit_sh) == 2 && sizeof(sljit_uh) == 2 - && sizeof(sljit_si) == 4 && sizeof(sljit_ui) == 4 + sizeof(sljit_s8) == 1 && sizeof(sljit_u8) == 1 + && sizeof(sljit_s16) == 2 && sizeof(sljit_u16) == 2 + && sizeof(sljit_s32) == 4 && sizeof(sljit_u32) == 4 && (sizeof(sljit_p) == 4 || sizeof(sljit_p) == 8) && sizeof(sljit_p) <= sizeof(sljit_sw) && (sizeof(sljit_sw) == 4 || sizeof(sljit_sw) == 8) && (sizeof(sljit_uw) == 4 || sizeof(sljit_uw) == 8), invalid_integer_types); - SLJIT_COMPILE_ASSERT(SLJIT_INT_OP == SLJIT_SINGLE_OP, + SLJIT_COMPILE_ASSERT(SLJIT_I32_OP == SLJIT_F32_OP, int_op_and_single_op_must_be_the_same); - SLJIT_COMPILE_ASSERT(SLJIT_REWRITABLE_JUMP != SLJIT_SINGLE_OP, + SLJIT_COMPILE_ASSERT(SLJIT_REWRITABLE_JUMP != SLJIT_F32_OP, rewritable_jump_and_single_op_must_not_be_the_same); + SLJIT_COMPILE_ASSERT(!(SLJIT_EQUAL & 0x1) && !(SLJIT_LESS & 0x1) && !(SLJIT_EQUAL_F64 & 0x1) && !(SLJIT_JUMP & 0x1), + conditional_flags_must_be_even_numbers); /* Only the non-zero members must be set. */ compiler->error = SLJIT_SUCCESS; - compiler->buf = (struct sljit_memory_fragment*)SLJIT_MALLOC(BUF_SIZE); - compiler->abuf = (struct sljit_memory_fragment*)SLJIT_MALLOC(ABUF_SIZE); + compiler->allocator_data = allocator_data; + compiler->buf = (struct sljit_memory_fragment*)SLJIT_MALLOC(BUF_SIZE, allocator_data); + compiler->abuf = (struct sljit_memory_fragment*)SLJIT_MALLOC(ABUF_SIZE, allocator_data); if (!compiler->buf || !compiler->abuf) { if (compiler->buf) - SLJIT_FREE(compiler->buf); + SLJIT_FREE(compiler->buf, allocator_data); if (compiler->abuf) - SLJIT_FREE(compiler->abuf); - SLJIT_FREE(compiler); + SLJIT_FREE(compiler->abuf, allocator_data); + SLJIT_FREE(compiler, allocator_data); return NULL; } @@ -326,24 +412,28 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_compiler* sljit_create_compiler(void) compiler->scratches = -1; compiler->saveds = -1; + compiler->fscratches = -1; + compiler->fsaveds = -1; + compiler->local_size = -1; #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) compiler->args = -1; #endif #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) - compiler->cpool = (sljit_uw*)SLJIT_MALLOC(CPOOL_SIZE * sizeof(sljit_uw) + CPOOL_SIZE * sizeof(sljit_ub)); + compiler->cpool = (sljit_uw*)SLJIT_MALLOC(CPOOL_SIZE * sizeof(sljit_uw) + + CPOOL_SIZE * sizeof(sljit_u8), allocator_data); if (!compiler->cpool) { - SLJIT_FREE(compiler->buf); - SLJIT_FREE(compiler->abuf); - SLJIT_FREE(compiler); + SLJIT_FREE(compiler->buf, allocator_data); + SLJIT_FREE(compiler->abuf, allocator_data); + SLJIT_FREE(compiler, allocator_data); return NULL; } - compiler->cpool_unique = (sljit_ub*)(compiler->cpool + CPOOL_SIZE); + compiler->cpool_unique = (sljit_u8*)(compiler->cpool + CPOOL_SIZE); compiler->cpool_diff = 0xffffffff; #endif -#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) +#if (defined SLJIT_CONFIG_MIPS && SLJIT_CONFIG_MIPS) compiler->delay_slot = UNMOVABLE_INS; #endif @@ -365,25 +455,33 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_free_compiler(struct sljit_compiler *compile { struct sljit_memory_fragment *buf; struct sljit_memory_fragment *curr; + void *allocator_data = compiler->allocator_data; + SLJIT_UNUSED_ARG(allocator_data); buf = compiler->buf; while (buf) { curr = buf; buf = buf->next; - SLJIT_FREE(curr); + SLJIT_FREE(curr, allocator_data); } buf = compiler->abuf; while (buf) { curr = buf; buf = buf->next; - SLJIT_FREE(curr); + SLJIT_FREE(curr, allocator_data); } #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) - SLJIT_FREE(compiler->cpool); + SLJIT_FREE(compiler->cpool, allocator_data); #endif - SLJIT_FREE(compiler); + SLJIT_FREE(compiler, allocator_data); +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_compiler_memory_error(struct sljit_compiler *compiler) +{ + if (compiler->error == SLJIT_SUCCESS) + compiler->error = SLJIT_ERR_ALLOC_FAILED; } #if (defined SLJIT_CONFIG_ARM_THUMB2 && SLJIT_CONFIG_ARM_THUMB2) @@ -418,21 +516,37 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_set_label(struct sljit_jump *jump, struct sl SLJIT_API_FUNC_ATTRIBUTE void sljit_set_target(struct sljit_jump *jump, sljit_uw target) { if (SLJIT_LIKELY(!!jump)) { - SLJIT_ASSERT(jump->flags & SLJIT_REWRITABLE_JUMP); - jump->flags &= ~JUMP_LABEL; jump->flags |= JUMP_ADDR; jump->u.target = target; } } +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_put_label(struct sljit_put_label *put_label, struct sljit_label *label) +{ + if (SLJIT_LIKELY(!!put_label)) + put_label->label = label; +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_current_flags(struct sljit_compiler *compiler, sljit_s32 current_flags) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(current_flags); + +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + if ((current_flags & ~(VARIABLE_FLAG_MASK | SLJIT_I32_OP | SLJIT_SET_Z)) == 0) { + compiler->last_flags = GET_FLAG_TYPE(current_flags) | (current_flags & (SLJIT_I32_OP | SLJIT_SET_Z)); + } +#endif +} + /* --------------------------------------------------------------------- */ /* Private functions */ /* --------------------------------------------------------------------- */ static void* ensure_buf(struct sljit_compiler *compiler, sljit_uw size) { - sljit_ub *ret; + sljit_u8 *ret; struct sljit_memory_fragment *new_frag; SLJIT_ASSERT(size <= 256); @@ -441,7 +555,7 @@ static void* ensure_buf(struct sljit_compiler *compiler, sljit_uw size) compiler->buf->used_size += size; return ret; } - new_frag = (struct sljit_memory_fragment*)SLJIT_MALLOC(BUF_SIZE); + new_frag = (struct sljit_memory_fragment*)SLJIT_MALLOC(BUF_SIZE, compiler->allocator_data); PTR_FAIL_IF_NULL(new_frag); new_frag->next = compiler->buf; compiler->buf = new_frag; @@ -451,7 +565,7 @@ static void* ensure_buf(struct sljit_compiler *compiler, sljit_uw size) static void* ensure_abuf(struct sljit_compiler *compiler, sljit_uw size) { - sljit_ub *ret; + sljit_u8 *ret; struct sljit_memory_fragment *new_frag; SLJIT_ASSERT(size <= 256); @@ -460,7 +574,7 @@ static void* ensure_abuf(struct sljit_compiler *compiler, sljit_uw size) compiler->abuf->used_size += size; return ret; } - new_frag = (struct sljit_memory_fragment*)SLJIT_MALLOC(ABUF_SIZE); + new_frag = (struct sljit_memory_fragment*)SLJIT_MALLOC(ABUF_SIZE, compiler->allocator_data); PTR_FAIL_IF_NULL(new_frag); new_frag->next = compiler->abuf; compiler->abuf = new_frag; @@ -468,7 +582,7 @@ static void* ensure_abuf(struct sljit_compiler *compiler, sljit_uw size) return new_frag->memory; } -SLJIT_API_FUNC_ATTRIBUTE void* sljit_alloc_memory(struct sljit_compiler *compiler, sljit_si size) +SLJIT_API_FUNC_ATTRIBUTE void* sljit_alloc_memory(struct sljit_compiler *compiler, sljit_s32 size) { CHECK_ERROR_PTR(); @@ -500,6 +614,77 @@ static SLJIT_INLINE void reverse_buf(struct sljit_compiler *compiler) compiler->buf = prev; } +static SLJIT_INLINE sljit_s32 get_arg_count(sljit_s32 arg_types) +{ + sljit_s32 arg_count = 0; + + arg_types >>= SLJIT_DEF_SHIFT; + while (arg_types) { + arg_count++; + arg_types >>= SLJIT_DEF_SHIFT; + } + + return arg_count; +} + +#if !(defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86) + +static SLJIT_INLINE sljit_uw compute_next_addr(struct sljit_label *label, struct sljit_jump *jump, + struct sljit_const *const_, struct sljit_put_label *put_label) +{ + sljit_uw result = ~(sljit_uw)0; + + if (label) + result = label->size; + + if (jump && jump->addr < result) + result = jump->addr; + + if (const_ && const_->addr < result) + result = const_->addr; + + if (put_label && put_label->addr < result) + result = put_label->addr; + + return result; +} + +#endif /* !SLJIT_CONFIG_X86 */ + +static SLJIT_INLINE void set_emit_enter(struct sljit_compiler *compiler, + sljit_s32 options, sljit_s32 args, sljit_s32 scratches, sljit_s32 saveds, + sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size) +{ + SLJIT_UNUSED_ARG(args); + SLJIT_UNUSED_ARG(local_size); + + compiler->options = options; + compiler->scratches = scratches; + compiler->saveds = saveds; + compiler->fscratches = fscratches; + compiler->fsaveds = fsaveds; +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + compiler->logical_local_size = local_size; +#endif +} + +static SLJIT_INLINE void set_set_context(struct sljit_compiler *compiler, + sljit_s32 options, sljit_s32 args, sljit_s32 scratches, sljit_s32 saveds, + sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size) +{ + SLJIT_UNUSED_ARG(args); + SLJIT_UNUSED_ARG(local_size); + + compiler->options = options; + compiler->scratches = scratches; + compiler->saveds = saveds; + compiler->fscratches = fscratches; + compiler->fsaveds = fsaveds; +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + compiler->logical_local_size = local_size; +#endif +} + static SLJIT_INLINE void set_label(struct sljit_label *label, struct sljit_compiler *compiler) { label->next = NULL; @@ -511,7 +696,7 @@ static SLJIT_INLINE void set_label(struct sljit_label *label, struct sljit_compi compiler->last_label = label; } -static SLJIT_INLINE void set_jump(struct sljit_jump *jump, struct sljit_compiler *compiler, sljit_si flags) +static SLJIT_INLINE void set_jump(struct sljit_jump *jump, struct sljit_compiler *compiler, sljit_s32 flags) { jump->next = NULL; jump->flags = flags; @@ -533,124 +718,127 @@ static SLJIT_INLINE void set_const(struct sljit_const *const_, struct sljit_comp compiler->last_const = const_; } +static SLJIT_INLINE void set_put_label(struct sljit_put_label *put_label, struct sljit_compiler *compiler, sljit_uw offset) +{ + put_label->next = NULL; + put_label->label = NULL; + put_label->addr = compiler->size - offset; + put_label->flags = 0; + if (compiler->last_put_label) + compiler->last_put_label->next = put_label; + else + compiler->put_labels = put_label; + compiler->last_put_label = put_label; +} + #define ADDRESSING_DEPENDS_ON(exp, reg) \ - (((exp) & SLJIT_MEM) && (((exp) & 0xf) == reg || (((exp) >> 4) & 0xf) == reg)) - -#if (defined SLJIT_DEBUG && SLJIT_DEBUG) -#define FUNCTION_CHECK_OP() \ - SLJIT_ASSERT(!GET_FLAGS(op) || !(op & SLJIT_KEEP_FLAGS)); \ - switch (GET_OPCODE(op)) { \ - case SLJIT_NOT: \ - case SLJIT_CLZ: \ - case SLJIT_AND: \ - case SLJIT_OR: \ - case SLJIT_XOR: \ - case SLJIT_SHL: \ - case SLJIT_LSHR: \ - case SLJIT_ASHR: \ - SLJIT_ASSERT(!(op & (SLJIT_SET_S | SLJIT_SET_U | SLJIT_SET_O | SLJIT_SET_C))); \ - break; \ - case SLJIT_NEG: \ - SLJIT_ASSERT(!(op & (SLJIT_SET_S | SLJIT_SET_U | SLJIT_SET_C))); \ - break; \ - case SLJIT_MUL: \ - SLJIT_ASSERT(!(op & (SLJIT_SET_E | SLJIT_SET_S | SLJIT_SET_U | SLJIT_SET_C))); \ - break; \ - case SLJIT_CMPD: \ - SLJIT_ASSERT(!(op & (SLJIT_SET_U | SLJIT_SET_O | SLJIT_SET_C | SLJIT_KEEP_FLAGS))); \ - SLJIT_ASSERT((op & (SLJIT_SET_E | SLJIT_SET_S))); \ - break; \ - case SLJIT_ADD: \ - SLJIT_ASSERT(!(op & (SLJIT_SET_S | SLJIT_SET_U))); \ - break; \ - case SLJIT_SUB: \ - break; \ - case SLJIT_ADDC: \ - case SLJIT_SUBC: \ - SLJIT_ASSERT(!(op & (SLJIT_SET_E | SLJIT_SET_S | SLJIT_SET_U | SLJIT_SET_O))); \ - break; \ - case SLJIT_BREAKPOINT: \ - case SLJIT_NOP: \ - case SLJIT_UMUL: \ - case SLJIT_SMUL: \ - case SLJIT_MOV: \ - case SLJIT_MOV_P: \ - case SLJIT_MOVU: \ - case SLJIT_MOVU_P: \ - /* Nothing allowed */ \ - SLJIT_ASSERT(!(op & (SLJIT_INT_OP | SLJIT_SET_E | SLJIT_SET_S | SLJIT_SET_U | SLJIT_SET_O | SLJIT_SET_C | SLJIT_KEEP_FLAGS))); \ - break; \ - default: \ - /* Only SLJIT_INT_OP or SLJIT_SINGLE_OP is allowed. */ \ - SLJIT_ASSERT(!(op & (SLJIT_SET_E | SLJIT_SET_S | SLJIT_SET_U | SLJIT_SET_O | SLJIT_SET_C | SLJIT_KEEP_FLAGS))); \ - break; \ - } + (((exp) & SLJIT_MEM) && (((exp) & REG_MASK) == reg || OFFS_REG(exp) == reg)) + +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) #define FUNCTION_CHECK_IS_REG(r) \ - ((r) == SLJIT_UNUSED || \ - ((r) >= SLJIT_SCRATCH_REG1 && (r) <= SLJIT_SCRATCH_REG1 - 1 + compiler->scratches) || \ - ((r) >= SLJIT_SAVED_REG1 && (r) <= SLJIT_SAVED_REG1 - 1 + compiler->saveds)) + (((r) >= SLJIT_R0 && (r) < (SLJIT_R0 + compiler->scratches)) \ + || ((r) > (SLJIT_S0 - compiler->saveds) && (r) <= SLJIT_S0)) + +#define FUNCTION_CHECK_IS_FREG(fr) \ + (((fr) >= SLJIT_FR0 && (fr) < (SLJIT_FR0 + compiler->fscratches)) \ + || ((fr) > (SLJIT_FS0 - compiler->fsaveds) && (fr) <= SLJIT_FS0)) + +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) +#define CHECK_IF_VIRTUAL_REGISTER(p) ((p) <= SLJIT_S3 && (p) >= SLJIT_S8) +#else +#define CHECK_IF_VIRTUAL_REGISTER(p) 0 +#endif + +static sljit_s32 function_check_src_mem(struct sljit_compiler *compiler, sljit_s32 p, sljit_sw i) +{ + if (compiler->scratches == -1 || compiler->saveds == -1) + return 0; + + if (!(p & SLJIT_MEM)) + return 0; + + if (!((p & REG_MASK) == SLJIT_UNUSED || FUNCTION_CHECK_IS_REG(p & REG_MASK))) + return 0; + + if (CHECK_IF_VIRTUAL_REGISTER(p & REG_MASK)) + return 0; + + if (p & OFFS_REG_MASK) { + if ((p & REG_MASK) == SLJIT_UNUSED) + return 0; + + if (!(FUNCTION_CHECK_IS_REG(OFFS_REG(p)))) + return 0; + + if (CHECK_IF_VIRTUAL_REGISTER(OFFS_REG(p))) + return 0; + + if ((i & ~0x3) != 0) + return 0; + } + + return (p & ~(SLJIT_MEM | REG_MASK | OFFS_REG_MASK)) == 0; +} + +#define FUNCTION_CHECK_SRC_MEM(p, i) \ + CHECK_ARGUMENT(function_check_src_mem(compiler, p, i)); + +static sljit_s32 function_check_src(struct sljit_compiler *compiler, sljit_s32 p, sljit_sw i) +{ + if (compiler->scratches == -1 || compiler->saveds == -1) + return 0; + + if (FUNCTION_CHECK_IS_REG(p)) + return (i == 0); + + if (p == SLJIT_IMM) + return 1; + + if (p == SLJIT_MEM1(SLJIT_SP)) + return (i >= 0 && i < compiler->logical_local_size); + + return function_check_src_mem(compiler, p, i); +} #define FUNCTION_CHECK_SRC(p, i) \ - SLJIT_ASSERT(compiler->scratches != -1 && compiler->saveds != -1); \ - if (FUNCTION_CHECK_IS_REG(p)) \ - SLJIT_ASSERT((i) == 0 && (p) != SLJIT_UNUSED); \ - else if ((p) == SLJIT_IMM) \ - ; \ - else if ((p) == (SLJIT_MEM1(SLJIT_LOCALS_REG))) \ - SLJIT_ASSERT((i) >= 0 && (i) < compiler->logical_local_size); \ - else if ((p) & SLJIT_MEM) { \ - SLJIT_ASSERT(FUNCTION_CHECK_IS_REG((p) & 0xf)); \ - if ((p) & 0xf0) { \ - SLJIT_ASSERT(FUNCTION_CHECK_IS_REG(((p) >> 4) & 0xf)); \ - SLJIT_ASSERT(!((i) & ~0x3)); \ - } \ - SLJIT_ASSERT(((p) >> 9) == 0); \ - } \ - else \ - SLJIT_ASSERT_STOP(); - -#define FUNCTION_CHECK_DST(p, i) \ - SLJIT_ASSERT(compiler->scratches != -1 && compiler->saveds != -1); \ - if (FUNCTION_CHECK_IS_REG(p)) \ - SLJIT_ASSERT((i) == 0); \ - else if ((p) == (SLJIT_MEM1(SLJIT_LOCALS_REG))) \ - SLJIT_ASSERT((i) >= 0 && (i) < compiler->logical_local_size); \ - else if ((p) & SLJIT_MEM) { \ - SLJIT_ASSERT(FUNCTION_CHECK_IS_REG((p) & 0xf)); \ - if ((p) & 0xf0) { \ - SLJIT_ASSERT(FUNCTION_CHECK_IS_REG(((p) >> 4) & 0xf)); \ - SLJIT_ASSERT(!((i) & ~0x3)); \ - } \ - SLJIT_ASSERT(((p) >> 9) == 0); \ - } \ - else \ - SLJIT_ASSERT_STOP(); + CHECK_ARGUMENT(function_check_src(compiler, p, i)); + +static sljit_s32 function_check_dst(struct sljit_compiler *compiler, sljit_s32 p, sljit_sw i, sljit_s32 unused) +{ + if (compiler->scratches == -1 || compiler->saveds == -1) + return 0; + + if (FUNCTION_CHECK_IS_REG(p) || ((unused) && (p) == SLJIT_UNUSED)) + return (i == 0); + + if (p == SLJIT_MEM1(SLJIT_SP)) + return (i >= 0 && i < compiler->logical_local_size); + + return function_check_src_mem(compiler, p, i); +} + +#define FUNCTION_CHECK_DST(p, i, unused) \ + CHECK_ARGUMENT(function_check_dst(compiler, p, i, unused)); + +static sljit_s32 function_fcheck(struct sljit_compiler *compiler, sljit_s32 p, sljit_sw i) +{ + if (compiler->scratches == -1 || compiler->saveds == -1) + return 0; + + if (FUNCTION_CHECK_IS_FREG(p)) + return (i == 0); + + if (p == SLJIT_MEM1(SLJIT_SP)) + return (i >= 0 && i < compiler->logical_local_size); + + return function_check_src_mem(compiler, p, i); +} #define FUNCTION_FCHECK(p, i) \ - if ((p) >= SLJIT_FLOAT_REG1 && (p) <= SLJIT_FLOAT_REG6) \ - SLJIT_ASSERT(i == 0); \ - else if ((p) & SLJIT_MEM) { \ - SLJIT_ASSERT(FUNCTION_CHECK_IS_REG((p) & 0xf)); \ - if ((p) & 0xf0) { \ - SLJIT_ASSERT(FUNCTION_CHECK_IS_REG(((p) >> 4) & 0xf)); \ - SLJIT_ASSERT(((p) & 0xf0) != (SLJIT_LOCALS_REG << 4) && !(i & ~0x3)); \ - } else \ - SLJIT_ASSERT((((p) >> 4) & 0xf) == 0); \ - SLJIT_ASSERT(((p) >> 9) == 0); \ - } \ - else \ - SLJIT_ASSERT_STOP(); - -#define FUNCTION_CHECK_OP1() \ - if (GET_OPCODE(op) >= SLJIT_MOVU && GET_OPCODE(op) <= SLJIT_MOVU_P) { \ - SLJIT_ASSERT(!(src & SLJIT_MEM) || (src & 0xf) != SLJIT_LOCALS_REG); \ - SLJIT_ASSERT(!(dst & SLJIT_MEM) || (dst & 0xf) != SLJIT_LOCALS_REG); \ - if ((src & SLJIT_MEM) && (src & 0xf)) \ - SLJIT_ASSERT((dst & 0xf) != (src & 0xf) && ((dst >> 4) & 0xf) != (src & 0xf)); \ - } + CHECK_ARGUMENT(function_fcheck(compiler, p, i)); -#endif +#endif /* SLJIT_ARGUMENT_CHECKS */ #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) @@ -659,18 +847,7 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_compiler_verbose(struct sljit_compiler *comp compiler->verbose = verbose; } -static char* reg_names[] = { - (char*)"unused", (char*)"s1", (char*)"s2", (char*)"s3", - (char*)"se1", (char*)"se2", (char*)"p1", (char*)"p2", - (char*)"p3", (char*)"pe1", (char*)"pe2", (char*)"lc" -}; - -static char* freg_names[] = { - (char*)"unused", (char*)"f1", (char*)"f2", (char*)"f3", - (char*)"f4", (char*)"f5", (char*)"f6" -}; - -#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) || (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) +#if (defined SLJIT_64BIT_ARCHITECTURE && SLJIT_64BIT_ARCHITECTURE) #ifdef _WIN64 # define SLJIT_PRINT_D "I64" #else @@ -680,69 +857,106 @@ static char* freg_names[] = { # define SLJIT_PRINT_D "" #endif -#define sljit_verbose_param(p, i) \ - if ((p) & SLJIT_IMM) \ - fprintf(compiler->verbose, "#%" SLJIT_PRINT_D "d", (i)); \ - else if ((p) & SLJIT_MEM) { \ - if ((p) & 0xf) { \ - if (i) { \ - if (((p) >> 4) & 0xf) \ - fprintf(compiler->verbose, "[%s + %s * %d]", reg_names[(p) & 0xF], reg_names[((p) >> 4)& 0xF], 1 << (i)); \ - else \ - fprintf(compiler->verbose, "[%s + #%" SLJIT_PRINT_D "d]", reg_names[(p) & 0xF], (i)); \ - } \ - else { \ - if (((p) >> 4) & 0xf) \ - fprintf(compiler->verbose, "[%s + %s]", reg_names[(p) & 0xF], reg_names[((p) >> 4)& 0xF]); \ - else \ - fprintf(compiler->verbose, "[%s]", reg_names[(p) & 0xF]); \ - } \ - } \ - else \ - fprintf(compiler->verbose, "[#%" SLJIT_PRINT_D "d]", (i)); \ - } else \ - fprintf(compiler->verbose, "%s", reg_names[p]); -#define sljit_verbose_fparam(p, i) \ - if ((p) & SLJIT_MEM) { \ - if ((p) & 0xf) { \ - if (i) { \ - if (((p) >> 4) & 0xf) \ - fprintf(compiler->verbose, "[%s + %s * %d]", reg_names[(p) & 0xF], reg_names[((p) >> 4)& 0xF], 1 << (i)); \ - else \ - fprintf(compiler->verbose, "[%s + #%" SLJIT_PRINT_D "d]", reg_names[(p) & 0xF], (i)); \ - } \ - else { \ - if (((p) >> 4) & 0xF) \ - fprintf(compiler->verbose, "[%s + %s]", reg_names[(p) & 0xF], reg_names[((p) >> 4)& 0xF]); \ - else \ - fprintf(compiler->verbose, "[%s]", reg_names[(p) & 0xF]); \ - } \ - } \ - else \ - fprintf(compiler->verbose, "[#%" SLJIT_PRINT_D "d]", (i)); \ - } else \ - fprintf(compiler->verbose, "%s", freg_names[p]); - -static SLJIT_CONST char* op_names[] = { - /* op0 */ - (char*)"breakpoint", (char*)"nop", - (char*)"umul", (char*)"smul", (char*)"udiv", (char*)"sdiv", - /* op1 */ - (char*)"mov", (char*)"mov.ub", (char*)"mov.sb", (char*)"mov.uh", - (char*)"mov.sh", (char*)"mov.ui", (char*)"mov.si", (char*)"mov.p", - (char*)"movu", (char*)"movu.ub", (char*)"movu.sb", (char*)"movu.uh", - (char*)"movu.sh", (char*)"movu.ui", (char*)"movu.si", (char*)"movu.p", +static void sljit_verbose_reg(struct sljit_compiler *compiler, sljit_s32 r) +{ + if (r < (SLJIT_R0 + compiler->scratches)) + fprintf(compiler->verbose, "r%d", r - SLJIT_R0); + else if (r != SLJIT_SP) + fprintf(compiler->verbose, "s%d", SLJIT_NUMBER_OF_REGISTERS - r); + else + fprintf(compiler->verbose, "sp"); +} + +static void sljit_verbose_freg(struct sljit_compiler *compiler, sljit_s32 r) +{ + if (r < (SLJIT_FR0 + compiler->fscratches)) + fprintf(compiler->verbose, "fr%d", r - SLJIT_FR0); + else + fprintf(compiler->verbose, "fs%d", SLJIT_NUMBER_OF_FLOAT_REGISTERS - r); +} + +static void sljit_verbose_param(struct sljit_compiler *compiler, sljit_s32 p, sljit_sw i) +{ + if ((p) & SLJIT_IMM) + fprintf(compiler->verbose, "#%" SLJIT_PRINT_D "d", (i)); + else if ((p) & SLJIT_MEM) { + if ((p) & REG_MASK) { + fputc('[', compiler->verbose); + sljit_verbose_reg(compiler, (p) & REG_MASK); + if ((p) & OFFS_REG_MASK) { + fprintf(compiler->verbose, " + "); + sljit_verbose_reg(compiler, OFFS_REG(p)); + if (i) + fprintf(compiler->verbose, " * %d", 1 << (i)); + } + else if (i) + fprintf(compiler->verbose, " + %" SLJIT_PRINT_D "d", (i)); + fputc(']', compiler->verbose); + } + else + fprintf(compiler->verbose, "[#%" SLJIT_PRINT_D "d]", (i)); + } else if (p) + sljit_verbose_reg(compiler, p); + else + fprintf(compiler->verbose, "unused"); +} + +static void sljit_verbose_fparam(struct sljit_compiler *compiler, sljit_s32 p, sljit_sw i) +{ + if ((p) & SLJIT_MEM) { + if ((p) & REG_MASK) { + fputc('[', compiler->verbose); + sljit_verbose_reg(compiler, (p) & REG_MASK); + if ((p) & OFFS_REG_MASK) { + fprintf(compiler->verbose, " + "); + sljit_verbose_reg(compiler, OFFS_REG(p)); + if (i) + fprintf(compiler->verbose, "%d", 1 << (i)); + } + else if (i) + fprintf(compiler->verbose, " + %" SLJIT_PRINT_D "d", (i)); + fputc(']', compiler->verbose); + } + else + fprintf(compiler->verbose, "[#%" SLJIT_PRINT_D "d]", (i)); + } + else + sljit_verbose_freg(compiler, p); +} + +static const char* op0_names[] = { + (char*)"breakpoint", (char*)"nop", (char*)"lmul.uw", (char*)"lmul.sw", + (char*)"divmod.u", (char*)"divmod.s", (char*)"div.u", (char*)"div.s" +}; + +static const char* op1_names[] = { + (char*)"", (char*)".u8", (char*)".s8", (char*)".u16", + (char*)".s16", (char*)".u32", (char*)".s32", (char*)".p", + (char*)"", (char*)".u8", (char*)".s8", (char*)".u16", + (char*)".s16", (char*)".u32", (char*)".s32", (char*)".p", (char*)"not", (char*)"neg", (char*)"clz", - /* op2 */ +}; + +static const char* op2_names[] = { (char*)"add", (char*)"addc", (char*)"sub", (char*)"subc", (char*)"mul", (char*)"and", (char*)"or", (char*)"xor", (char*)"shl", (char*)"lshr", (char*)"ashr", - /* fop1 */ - (char*)"cmp", (char*)"mov", (char*)"neg", (char*)"abs", - /* fop2 */ +}; + +static const char* fop1_names[] = { + (char*)"mov", (char*)"conv", (char*)"conv", (char*)"conv", + (char*)"conv", (char*)"conv", (char*)"cmp", (char*)"neg", + (char*)"abs", +}; + +static const char* fop2_names[] = { (char*)"add", (char*)"sub", (char*)"mul", (char*)"div" }; +#define JUMP_POSTFIX(type) \ + ((type & 0xff) <= SLJIT_MUL_NOT_OVERFLOW ? ((type & SLJIT_I32_OP) ? "32" : "") \ + : ((type & 0xff) <= SLJIT_ORDERED_F64 ? ((type & SLJIT_F32_OP) ? ".f32" : ".f64") : "")) + static char* jump_names[] = { (char*)"equal", (char*)"not_equal", (char*)"less", (char*)"greater_equal", @@ -751,544 +965,1033 @@ static char* jump_names[] = { (char*)"sig_greater", (char*)"sig_less_equal", (char*)"overflow", (char*)"not_overflow", (char*)"mul_overflow", (char*)"mul_not_overflow", - (char*)"float_equal", (char*)"float_not_equal", - (char*)"float_less", (char*)"float_greater_equal", - (char*)"float_greater", (char*)"float_less_equal", - (char*)"float_unordered", (char*)"float_ordered", + (char*)"carry", (char*)"", + (char*)"equal", (char*)"not_equal", + (char*)"less", (char*)"greater_equal", + (char*)"greater", (char*)"less_equal", + (char*)"unordered", (char*)"ordered", (char*)"jump", (char*)"fast_call", - (char*)"call0", (char*)"call1", (char*)"call2", (char*)"call3" + (char*)"call", (char*)"call.cdecl" }; -#endif +static char* call_arg_names[] = { + (char*)"void", (char*)"sw", (char*)"uw", (char*)"s32", (char*)"u32", (char*)"f32", (char*)"f64" +}; + +#endif /* SLJIT_VERBOSE */ /* --------------------------------------------------------------------- */ /* Arch dependent */ /* --------------------------------------------------------------------- */ -static SLJIT_INLINE void check_sljit_generate_code(struct sljit_compiler *compiler) +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) \ + || (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + +static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_generate_code(struct sljit_compiler *compiler) { -#if (defined SLJIT_DEBUG && SLJIT_DEBUG) +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) struct sljit_jump *jump; #endif - /* If debug and verbose are disabled, all arguments are unused. */ + SLJIT_UNUSED_ARG(compiler); - SLJIT_ASSERT(compiler->size > 0); -#if (defined SLJIT_DEBUG && SLJIT_DEBUG) +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + CHECK_ARGUMENT(compiler->size > 0); jump = compiler->jumps; while (jump) { /* All jumps have target. */ - SLJIT_ASSERT(jump->flags & (JUMP_LABEL | JUMP_ADDR)); + CHECK_ARGUMENT(jump->flags & (JUMP_LABEL | JUMP_ADDR)); jump = jump->next; } #endif + CHECK_RETURN_OK; } -static SLJIT_INLINE void check_sljit_emit_enter(struct sljit_compiler *compiler, sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size) +static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_enter(struct sljit_compiler *compiler, + sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds, + sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size) { - /* If debug and verbose are disabled, all arguments are unused. */ +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + sljit_s32 types, arg_count, curr_type; +#endif + SLJIT_UNUSED_ARG(compiler); - SLJIT_UNUSED_ARG(args); - SLJIT_UNUSED_ARG(scratches); - SLJIT_UNUSED_ARG(saveds); - SLJIT_UNUSED_ARG(local_size); - SLJIT_ASSERT(args >= 0 && args <= 3); - SLJIT_ASSERT(scratches >= 0 && scratches <= SLJIT_NO_TMP_REGISTERS); - SLJIT_ASSERT(saveds >= 0 && saveds <= SLJIT_NO_GEN_REGISTERS); - SLJIT_ASSERT(args <= saveds); - SLJIT_ASSERT(local_size >= 0 && local_size <= SLJIT_MAX_LOCAL_SIZE); +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + CHECK_ARGUMENT(!(options & ~SLJIT_F64_ALIGNMENT)); + CHECK_ARGUMENT(scratches >= 0 && scratches <= SLJIT_NUMBER_OF_REGISTERS); + CHECK_ARGUMENT(saveds >= 0 && saveds <= SLJIT_NUMBER_OF_REGISTERS); + CHECK_ARGUMENT(scratches + saveds <= SLJIT_NUMBER_OF_REGISTERS); + CHECK_ARGUMENT(fscratches >= 0 && fscratches <= SLJIT_NUMBER_OF_FLOAT_REGISTERS); + CHECK_ARGUMENT(fsaveds >= 0 && fsaveds <= SLJIT_NUMBER_OF_FLOAT_REGISTERS); + CHECK_ARGUMENT(fscratches + fsaveds <= SLJIT_NUMBER_OF_FLOAT_REGISTERS); + CHECK_ARGUMENT(local_size >= 0 && local_size <= SLJIT_MAX_LOCAL_SIZE); + CHECK_ARGUMENT((arg_types & SLJIT_DEF_MASK) == 0); + + types = (arg_types >> SLJIT_DEF_SHIFT); + arg_count = 0; + while (types != 0 && arg_count < 3) { + curr_type = (types & SLJIT_DEF_MASK); + CHECK_ARGUMENT(curr_type == SLJIT_ARG_TYPE_SW || curr_type == SLJIT_ARG_TYPE_UW); + arg_count++; + types >>= SLJIT_DEF_SHIFT; + } + CHECK_ARGUMENT(arg_count <= saveds && types == 0); + + compiler->last_flags = 0; +#endif #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) - if (SLJIT_UNLIKELY(!!compiler->verbose)) - fprintf(compiler->verbose, " enter args=%d scratches=%d saveds=%d local_size=%d\n", args, scratches, saveds, local_size); + if (SLJIT_UNLIKELY(!!compiler->verbose)) { + fprintf(compiler->verbose, " enter options:%s args[", (options & SLJIT_F64_ALIGNMENT) ? "f64_align" : ""); + + arg_types >>= SLJIT_DEF_SHIFT; + while (arg_types) { + fprintf(compiler->verbose, "%s", call_arg_names[arg_types & SLJIT_DEF_MASK]); + arg_types >>= SLJIT_DEF_SHIFT; + if (arg_types) + fprintf(compiler->verbose, ","); + } + + fprintf(compiler->verbose, "] scratches:%d saveds:%d fscratches:%d fsaveds:%d local_size:%d\n", + scratches, saveds, fscratches, fsaveds, local_size); + } #endif + CHECK_RETURN_OK; } -static SLJIT_INLINE void check_sljit_set_context(struct sljit_compiler *compiler, sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size) +static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_set_context(struct sljit_compiler *compiler, + sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds, + sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size) { - /* If debug and verbose are disabled, all arguments are unused. */ +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + sljit_s32 types, arg_count, curr_type; +#endif + SLJIT_UNUSED_ARG(compiler); - SLJIT_UNUSED_ARG(args); - SLJIT_UNUSED_ARG(scratches); - SLJIT_UNUSED_ARG(saveds); - SLJIT_UNUSED_ARG(local_size); -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) - if (SLJIT_UNLIKELY(compiler->skip_checks)) { - compiler->skip_checks = 0; - return; +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + CHECK_ARGUMENT(!(options & ~SLJIT_F64_ALIGNMENT)); + CHECK_ARGUMENT(scratches >= 0 && scratches <= SLJIT_NUMBER_OF_REGISTERS); + CHECK_ARGUMENT(saveds >= 0 && saveds <= SLJIT_NUMBER_OF_REGISTERS); + CHECK_ARGUMENT(scratches + saveds <= SLJIT_NUMBER_OF_REGISTERS); + CHECK_ARGUMENT(fscratches >= 0 && fscratches <= SLJIT_NUMBER_OF_FLOAT_REGISTERS); + CHECK_ARGUMENT(fsaveds >= 0 && fsaveds <= SLJIT_NUMBER_OF_FLOAT_REGISTERS); + CHECK_ARGUMENT(fscratches + fsaveds <= SLJIT_NUMBER_OF_FLOAT_REGISTERS); + CHECK_ARGUMENT(local_size >= 0 && local_size <= SLJIT_MAX_LOCAL_SIZE); + + types = (arg_types >> SLJIT_DEF_SHIFT); + arg_count = 0; + while (types != 0 && arg_count < 3) { + curr_type = (types & SLJIT_DEF_MASK); + CHECK_ARGUMENT(curr_type == SLJIT_ARG_TYPE_SW || curr_type == SLJIT_ARG_TYPE_UW); + arg_count++; + types >>= SLJIT_DEF_SHIFT; } -#endif + CHECK_ARGUMENT(arg_count <= saveds && types == 0); - SLJIT_ASSERT(args >= 0 && args <= 3); - SLJIT_ASSERT(scratches >= 0 && scratches <= SLJIT_NO_TMP_REGISTERS); - SLJIT_ASSERT(saveds >= 0 && saveds <= SLJIT_NO_GEN_REGISTERS); - SLJIT_ASSERT(args <= saveds); - SLJIT_ASSERT(local_size >= 0 && local_size <= SLJIT_MAX_LOCAL_SIZE); + compiler->last_flags = 0; +#endif #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) - if (SLJIT_UNLIKELY(!!compiler->verbose)) - fprintf(compiler->verbose, " set_context args=%d scratches=%d saveds=%d local_size=%d\n", args, scratches, saveds, local_size); + if (SLJIT_UNLIKELY(!!compiler->verbose)) { + fprintf(compiler->verbose, " set_context options:%s args[", (options & SLJIT_F64_ALIGNMENT) ? "f64_align" : ""); + + arg_types >>= SLJIT_DEF_SHIFT; + while (arg_types) { + fprintf(compiler->verbose, "%s", call_arg_names[arg_types & SLJIT_DEF_MASK]); + arg_types >>= SLJIT_DEF_SHIFT; + if (arg_types) + fprintf(compiler->verbose, ","); + } + + fprintf(compiler->verbose, "] scratches:%d saveds:%d fscratches:%d fsaveds:%d local_size:%d\n", + scratches, saveds, fscratches, fsaveds, local_size); + } #endif + CHECK_RETURN_OK; } -static SLJIT_INLINE void check_sljit_emit_return(struct sljit_compiler *compiler, sljit_si op, sljit_si src, sljit_sw srcw) +static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw) { - /* If debug and verbose are disabled, all arguments are unused. */ - SLJIT_UNUSED_ARG(compiler); - SLJIT_UNUSED_ARG(op); - SLJIT_UNUSED_ARG(src); - SLJIT_UNUSED_ARG(srcw); - -#if (defined SLJIT_DEBUG && SLJIT_DEBUG) +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + CHECK_ARGUMENT(compiler->scratches >= 0); if (op != SLJIT_UNUSED) { - SLJIT_ASSERT(op >= SLJIT_MOV && op <= SLJIT_MOV_P); + CHECK_ARGUMENT(op >= SLJIT_MOV && op <= SLJIT_MOV_P); FUNCTION_CHECK_SRC(src, srcw); } else - SLJIT_ASSERT(src == 0 && srcw == 0); + CHECK_ARGUMENT(src == 0 && srcw == 0); + compiler->last_flags = 0; #endif #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) if (SLJIT_UNLIKELY(!!compiler->verbose)) { if (op == SLJIT_UNUSED) fprintf(compiler->verbose, " return\n"); else { - fprintf(compiler->verbose, " return %s ", op_names[op]); - sljit_verbose_param(src, srcw); + fprintf(compiler->verbose, " return%s ", op1_names[op - SLJIT_OP1_BASE]); + sljit_verbose_param(compiler, src, srcw); fprintf(compiler->verbose, "\n"); } } #endif + CHECK_RETURN_OK; } -static SLJIT_INLINE void check_sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw) +static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw) { - /* If debug and verbose are disabled, all arguments are unused. */ - SLJIT_UNUSED_ARG(compiler); - SLJIT_UNUSED_ARG(dst); - SLJIT_UNUSED_ARG(dstw); - -#if (defined SLJIT_DEBUG && SLJIT_DEBUG) - FUNCTION_CHECK_DST(dst, dstw); +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + FUNCTION_CHECK_DST(dst, dstw, 0); + compiler->last_flags = 0; #endif #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) if (SLJIT_UNLIKELY(!!compiler->verbose)) { fprintf(compiler->verbose, " fast_enter "); - sljit_verbose_param(dst, dstw); + sljit_verbose_param(compiler, dst, dstw); fprintf(compiler->verbose, "\n"); } #endif + CHECK_RETURN_OK; } -static SLJIT_INLINE void check_sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_si src, sljit_sw srcw) +static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_s32 src, sljit_sw srcw) { - /* If debug and verbose are disabled, all arguments are unused. */ - SLJIT_UNUSED_ARG(compiler); - SLJIT_UNUSED_ARG(src); - SLJIT_UNUSED_ARG(srcw); - -#if (defined SLJIT_DEBUG && SLJIT_DEBUG) +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) FUNCTION_CHECK_SRC(src, srcw); + CHECK_ARGUMENT(src != SLJIT_IMM); + compiler->last_flags = 0; #endif #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) if (SLJIT_UNLIKELY(!!compiler->verbose)) { fprintf(compiler->verbose, " fast_return "); - sljit_verbose_param(src, srcw); + sljit_verbose_param(compiler, src, srcw); fprintf(compiler->verbose, "\n"); } #endif + CHECK_RETURN_OK; } -static SLJIT_INLINE void check_sljit_emit_op0(struct sljit_compiler *compiler, sljit_si op) +static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_op0(struct sljit_compiler *compiler, sljit_s32 op) { - /* If debug and verbose are disabled, all arguments are unused. */ - SLJIT_UNUSED_ARG(compiler); - SLJIT_UNUSED_ARG(op); - - SLJIT_ASSERT((op >= SLJIT_BREAKPOINT && op <= SLJIT_SMUL) - || ((op & ~SLJIT_INT_OP) >= SLJIT_UDIV && (op & ~SLJIT_INT_OP) <= SLJIT_SDIV)); +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + CHECK_ARGUMENT((op >= SLJIT_BREAKPOINT && op <= SLJIT_LMUL_SW) + || ((op & ~SLJIT_I32_OP) >= SLJIT_DIVMOD_UW && (op & ~SLJIT_I32_OP) <= SLJIT_DIV_SW)); + CHECK_ARGUMENT(op < SLJIT_LMUL_UW || compiler->scratches >= 2); + if (op >= SLJIT_LMUL_UW) + compiler->last_flags = 0; +#endif #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) if (SLJIT_UNLIKELY(!!compiler->verbose)) - fprintf(compiler->verbose, " %s%s\n", !(op & SLJIT_INT_OP) ? "" : "i", op_names[GET_OPCODE(op)]); -#endif -} + { + fprintf(compiler->verbose, " %s", op0_names[GET_OPCODE(op) - SLJIT_OP0_BASE]); + if (GET_OPCODE(op) >= SLJIT_DIVMOD_UW) { + fprintf(compiler->verbose, (op & SLJIT_I32_OP) ? "32" : "w"); + } + fprintf(compiler->verbose, "\n"); + } +#endif + CHECK_RETURN_OK; +} -static SLJIT_INLINE void check_sljit_emit_op1(struct sljit_compiler *compiler, sljit_si op, - sljit_si dst, sljit_sw dstw, - sljit_si src, sljit_sw srcw) +static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_op1(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) { - /* If debug and verbose are disabled, all arguments are unused. */ - SLJIT_UNUSED_ARG(compiler); - SLJIT_UNUSED_ARG(op); - SLJIT_UNUSED_ARG(dst); - SLJIT_UNUSED_ARG(dstw); - SLJIT_UNUSED_ARG(src); - SLJIT_UNUSED_ARG(srcw); - -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) if (SLJIT_UNLIKELY(compiler->skip_checks)) { compiler->skip_checks = 0; - return; + CHECK_RETURN_OK; + } + +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + CHECK_ARGUMENT(GET_OPCODE(op) >= SLJIT_MOV && GET_OPCODE(op) <= SLJIT_CLZ); + + switch (GET_OPCODE(op)) { + case SLJIT_NOT: + /* Only SLJIT_I32_OP and SLJIT_SET_Z are allowed. */ + CHECK_ARGUMENT(!(op & VARIABLE_FLAG_MASK)); + break; + case SLJIT_NEG: + CHECK_ARGUMENT(!(op & VARIABLE_FLAG_MASK) + || GET_FLAG_TYPE(op) == SLJIT_OVERFLOW); + break; + case SLJIT_MOV: + case SLJIT_MOV_U32: + case SLJIT_MOV_P: + /* Nothing allowed */ + CHECK_ARGUMENT(!(op & (SLJIT_I32_OP | SLJIT_SET_Z | VARIABLE_FLAG_MASK))); + break; + default: + /* Only SLJIT_I32_OP is allowed. */ + CHECK_ARGUMENT(!(op & (SLJIT_SET_Z | VARIABLE_FLAG_MASK))); + break; } -#endif - SLJIT_ASSERT(GET_OPCODE(op) >= SLJIT_MOV && GET_OPCODE(op) <= SLJIT_CLZ); -#if (defined SLJIT_DEBUG && SLJIT_DEBUG) - FUNCTION_CHECK_OP(); + FUNCTION_CHECK_DST(dst, dstw, 1); FUNCTION_CHECK_SRC(src, srcw); - FUNCTION_CHECK_DST(dst, dstw); - FUNCTION_CHECK_OP1(); + + if (GET_OPCODE(op) >= SLJIT_NOT) { + CHECK_ARGUMENT(src != SLJIT_IMM); + compiler->last_flags = GET_FLAG_TYPE(op) | (op & (SLJIT_I32_OP | SLJIT_SET_Z)); + } #endif #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) if (SLJIT_UNLIKELY(!!compiler->verbose)) { - fprintf(compiler->verbose, " %s%s%s%s%s%s%s%s ", !(op & SLJIT_INT_OP) ? "" : "i", op_names[GET_OPCODE(op)], - !(op & SLJIT_SET_E) ? "" : ".e", !(op & SLJIT_SET_S) ? "" : ".s", !(op & SLJIT_SET_U) ? "" : ".u", - !(op & SLJIT_SET_O) ? "" : ".o", !(op & SLJIT_SET_C) ? "" : ".c", !(op & SLJIT_KEEP_FLAGS) ? "" : ".k"); - sljit_verbose_param(dst, dstw); + if (GET_OPCODE(op) <= SLJIT_MOV_P) + { + fprintf(compiler->verbose, " mov%s%s ", !(op & SLJIT_I32_OP) ? "" : "32", + (op != SLJIT_MOV32) ? op1_names[GET_OPCODE(op) - SLJIT_OP1_BASE] : ""); + } + else + { + fprintf(compiler->verbose, " %s%s%s%s%s ", op1_names[GET_OPCODE(op) - SLJIT_OP1_BASE], !(op & SLJIT_I32_OP) ? "" : "32", + !(op & SLJIT_SET_Z) ? "" : ".z", !(op & VARIABLE_FLAG_MASK) ? "" : ".", + !(op & VARIABLE_FLAG_MASK) ? "" : jump_names[GET_FLAG_TYPE(op)]); + } + + sljit_verbose_param(compiler, dst, dstw); fprintf(compiler->verbose, ", "); - sljit_verbose_param(src, srcw); + sljit_verbose_param(compiler, src, srcw); fprintf(compiler->verbose, "\n"); } #endif + CHECK_RETURN_OK; } -static SLJIT_INLINE void check_sljit_emit_op2(struct sljit_compiler *compiler, sljit_si op, - sljit_si dst, sljit_sw dstw, - sljit_si src1, sljit_sw src1w, - sljit_si src2, sljit_sw src2w) +static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_op2(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) { - /* If debug and verbose are disabled, all arguments are unused. */ - SLJIT_UNUSED_ARG(compiler); - SLJIT_UNUSED_ARG(op); - SLJIT_UNUSED_ARG(dst); - SLJIT_UNUSED_ARG(dstw); - SLJIT_UNUSED_ARG(src1); - SLJIT_UNUSED_ARG(src1w); - SLJIT_UNUSED_ARG(src2); - SLJIT_UNUSED_ARG(src2w); - -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) if (SLJIT_UNLIKELY(compiler->skip_checks)) { compiler->skip_checks = 0; - return; + CHECK_RETURN_OK; } -#endif - SLJIT_ASSERT(GET_OPCODE(op) >= SLJIT_ADD && GET_OPCODE(op) <= SLJIT_ASHR); -#if (defined SLJIT_DEBUG && SLJIT_DEBUG) - FUNCTION_CHECK_OP(); +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + CHECK_ARGUMENT(GET_OPCODE(op) >= SLJIT_ADD && GET_OPCODE(op) <= SLJIT_ASHR); + + switch (GET_OPCODE(op)) { + case SLJIT_AND: + case SLJIT_OR: + case SLJIT_XOR: + case SLJIT_SHL: + case SLJIT_LSHR: + case SLJIT_ASHR: + CHECK_ARGUMENT(!(op & VARIABLE_FLAG_MASK)); + break; + case SLJIT_MUL: + CHECK_ARGUMENT(!(op & SLJIT_SET_Z)); + CHECK_ARGUMENT(!(op & VARIABLE_FLAG_MASK) + || GET_FLAG_TYPE(op) == SLJIT_MUL_OVERFLOW); + break; + case SLJIT_ADD: + CHECK_ARGUMENT(!(op & VARIABLE_FLAG_MASK) + || GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY) + || GET_FLAG_TYPE(op) == SLJIT_OVERFLOW); + break; + case SLJIT_SUB: + CHECK_ARGUMENT(!(op & VARIABLE_FLAG_MASK) + || (GET_FLAG_TYPE(op) >= SLJIT_LESS && GET_FLAG_TYPE(op) <= SLJIT_OVERFLOW) + || GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY)); + break; + case SLJIT_ADDC: + case SLJIT_SUBC: + CHECK_ARGUMENT(!(op & VARIABLE_FLAG_MASK) + || GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY)); + CHECK_ARGUMENT((compiler->last_flags & 0xff) == GET_FLAG_TYPE(SLJIT_SET_CARRY)); + CHECK_ARGUMENT((op & SLJIT_I32_OP) == (compiler->last_flags & SLJIT_I32_OP)); + break; + default: + SLJIT_UNREACHABLE(); + break; + } + + FUNCTION_CHECK_DST(dst, dstw, 1); FUNCTION_CHECK_SRC(src1, src1w); FUNCTION_CHECK_SRC(src2, src2w); - FUNCTION_CHECK_DST(dst, dstw); + compiler->last_flags = GET_FLAG_TYPE(op) | (op & (SLJIT_I32_OP | SLJIT_SET_Z)); #endif #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) if (SLJIT_UNLIKELY(!!compiler->verbose)) { - fprintf(compiler->verbose, " %s%s%s%s%s%s%s%s ", !(op & SLJIT_INT_OP) ? "" : "i", op_names[GET_OPCODE(op)], - !(op & SLJIT_SET_E) ? "" : ".e", !(op & SLJIT_SET_S) ? "" : ".s", !(op & SLJIT_SET_U) ? "" : ".u", - !(op & SLJIT_SET_O) ? "" : ".o", !(op & SLJIT_SET_C) ? "" : ".c", !(op & SLJIT_KEEP_FLAGS) ? "" : ".k"); - sljit_verbose_param(dst, dstw); + fprintf(compiler->verbose, " %s%s%s%s%s ", op2_names[GET_OPCODE(op) - SLJIT_OP2_BASE], !(op & SLJIT_I32_OP) ? "" : "32", + !(op & SLJIT_SET_Z) ? "" : ".z", !(op & VARIABLE_FLAG_MASK) ? "" : ".", + !(op & VARIABLE_FLAG_MASK) ? "" : jump_names[GET_FLAG_TYPE(op)]); + sljit_verbose_param(compiler, dst, dstw); fprintf(compiler->verbose, ", "); - sljit_verbose_param(src1, src1w); + sljit_verbose_param(compiler, src1, src1w); fprintf(compiler->verbose, ", "); - sljit_verbose_param(src2, src2w); + sljit_verbose_param(compiler, src2, src2w); fprintf(compiler->verbose, "\n"); } #endif + CHECK_RETURN_OK; } -static SLJIT_INLINE void check_sljit_get_register_index(sljit_si reg) +static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_get_register_index(sljit_s32 reg) { SLJIT_UNUSED_ARG(reg); - SLJIT_ASSERT(reg > 0 && reg <= SLJIT_NO_REGISTERS); +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + CHECK_ARGUMENT(reg > 0 && reg <= SLJIT_NUMBER_OF_REGISTERS); +#endif + CHECK_RETURN_OK; } -static SLJIT_INLINE void check_sljit_get_float_register_index(sljit_si reg) +static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_get_float_register_index(sljit_s32 reg) { SLJIT_UNUSED_ARG(reg); - SLJIT_ASSERT(reg > 0 && reg <= SLJIT_NO_FLOAT_REGISTERS); +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + CHECK_ARGUMENT(reg > 0 && reg <= SLJIT_NUMBER_OF_FLOAT_REGISTERS); +#endif + CHECK_RETURN_OK; } -static SLJIT_INLINE void check_sljit_emit_op_custom(struct sljit_compiler *compiler, - void *instruction, sljit_si size) +static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_op_custom(struct sljit_compiler *compiler, + void *instruction, sljit_s32 size) { +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + int i; +#endif + SLJIT_UNUSED_ARG(compiler); - SLJIT_UNUSED_ARG(instruction); - SLJIT_UNUSED_ARG(size); - SLJIT_ASSERT(instruction); + +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + CHECK_ARGUMENT(instruction); + +#if (defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86) + CHECK_ARGUMENT(size > 0 && size < 16); +#elif (defined SLJIT_CONFIG_ARM_THUMB2 && SLJIT_CONFIG_ARM_THUMB2) + CHECK_ARGUMENT((size == 2 && (((sljit_sw)instruction) & 0x1) == 0) + || (size == 4 && (((sljit_sw)instruction) & 0x3) == 0)); +#else + CHECK_ARGUMENT(size == 4 && (((sljit_sw)instruction) & 0x3) == 0); +#endif + + compiler->last_flags = 0; +#endif +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + if (SLJIT_UNLIKELY(!!compiler->verbose)) { + fprintf(compiler->verbose, " op_custom"); + for (i = 0; i < size; i++) + fprintf(compiler->verbose, " 0x%x", ((sljit_u8*)instruction)[i]); + fprintf(compiler->verbose, "\n"); + } +#endif + CHECK_RETURN_OK; } -static SLJIT_INLINE void check_sljit_emit_fop1(struct sljit_compiler *compiler, sljit_si op, - sljit_si dst, sljit_sw dstw, - sljit_si src, sljit_sw srcw) +static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fop1(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) { - /* If debug and verbose are disabled, all arguments are unused. */ - SLJIT_UNUSED_ARG(compiler); - SLJIT_UNUSED_ARG(op); - SLJIT_UNUSED_ARG(dst); - SLJIT_UNUSED_ARG(dstw); - SLJIT_UNUSED_ARG(src); - SLJIT_UNUSED_ARG(srcw); + if (SLJIT_UNLIKELY(compiler->skip_checks)) { + compiler->skip_checks = 0; + CHECK_RETURN_OK; + } + +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + CHECK_ARGUMENT(sljit_has_cpu_feature(SLJIT_HAS_FPU)); + CHECK_ARGUMENT(GET_OPCODE(op) >= SLJIT_MOV_F64 && GET_OPCODE(op) <= SLJIT_ABS_F64); + CHECK_ARGUMENT(!(op & (SLJIT_SET_Z | VARIABLE_FLAG_MASK))); + FUNCTION_FCHECK(src, srcw); + FUNCTION_FCHECK(dst, dstw); +#endif +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + if (SLJIT_UNLIKELY(!!compiler->verbose)) { + if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32) + fprintf(compiler->verbose, " %s%s ", fop1_names[SLJIT_CONV_F64_FROM_F32 - SLJIT_FOP1_BASE], + (op & SLJIT_F32_OP) ? ".f32.from.f64" : ".f64.from.f32"); + else + fprintf(compiler->verbose, " %s%s ", fop1_names[GET_OPCODE(op) - SLJIT_FOP1_BASE], + (op & SLJIT_F32_OP) ? ".f32" : ".f64"); + + sljit_verbose_fparam(compiler, dst, dstw); + fprintf(compiler->verbose, ", "); + sljit_verbose_fparam(compiler, src, srcw); + fprintf(compiler->verbose, "\n"); + } +#endif + CHECK_RETURN_OK; +} + +static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fop1_cmp(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) +{ +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + compiler->last_flags = GET_FLAG_TYPE(op) | (op & (SLJIT_I32_OP | SLJIT_SET_Z)); +#endif -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) if (SLJIT_UNLIKELY(compiler->skip_checks)) { compiler->skip_checks = 0; - return; + CHECK_RETURN_OK; + } + +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + CHECK_ARGUMENT(sljit_has_cpu_feature(SLJIT_HAS_FPU)); + CHECK_ARGUMENT(GET_OPCODE(op) == SLJIT_CMP_F64); + CHECK_ARGUMENT(!(op & SLJIT_SET_Z)); + CHECK_ARGUMENT((op & VARIABLE_FLAG_MASK) + || (GET_FLAG_TYPE(op) >= SLJIT_EQUAL_F64 && GET_FLAG_TYPE(op) <= SLJIT_ORDERED_F64)); + FUNCTION_FCHECK(src1, src1w); + FUNCTION_FCHECK(src2, src2w); +#endif +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + if (SLJIT_UNLIKELY(!!compiler->verbose)) { + fprintf(compiler->verbose, " %s%s", fop1_names[SLJIT_CMP_F64 - SLJIT_FOP1_BASE], (op & SLJIT_F32_OP) ? ".f32" : ".f64"); + if (op & VARIABLE_FLAG_MASK) { + fprintf(compiler->verbose, ".%s_f", jump_names[GET_FLAG_TYPE(op)]); + } + fprintf(compiler->verbose, " "); + sljit_verbose_fparam(compiler, src1, src1w); + fprintf(compiler->verbose, ", "); + sljit_verbose_fparam(compiler, src2, src2w); + fprintf(compiler->verbose, "\n"); } #endif + CHECK_RETURN_OK; +} - SLJIT_ASSERT(sljit_is_fpu_available()); - SLJIT_ASSERT(GET_OPCODE(op) >= SLJIT_CMPD && GET_OPCODE(op) <= SLJIT_ABSD); -#if (defined SLJIT_DEBUG && SLJIT_DEBUG) - FUNCTION_CHECK_OP(); +static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) +{ + if (SLJIT_UNLIKELY(compiler->skip_checks)) { + compiler->skip_checks = 0; + CHECK_RETURN_OK; + } + +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + CHECK_ARGUMENT(sljit_has_cpu_feature(SLJIT_HAS_FPU)); + CHECK_ARGUMENT(GET_OPCODE(op) >= SLJIT_CONV_SW_FROM_F64 && GET_OPCODE(op) <= SLJIT_CONV_S32_FROM_F64); + CHECK_ARGUMENT(!(op & (SLJIT_SET_Z | VARIABLE_FLAG_MASK))); FUNCTION_FCHECK(src, srcw); - FUNCTION_FCHECK(dst, dstw); + FUNCTION_CHECK_DST(dst, dstw, 0); #endif #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) if (SLJIT_UNLIKELY(!!compiler->verbose)) { - fprintf(compiler->verbose, " %s%s%s%s ", op_names[GET_OPCODE(op)], (op & SLJIT_SINGLE_OP) ? "s" : "d", - !(op & SLJIT_SET_E) ? "" : ".e", !(op & SLJIT_SET_S) ? "" : ".s"); - sljit_verbose_fparam(dst, dstw); + fprintf(compiler->verbose, " %s%s.from%s ", fop1_names[GET_OPCODE(op) - SLJIT_FOP1_BASE], + (GET_OPCODE(op) == SLJIT_CONV_S32_FROM_F64) ? ".s32" : ".sw", + (op & SLJIT_F32_OP) ? ".f32" : ".f64"); + sljit_verbose_param(compiler, dst, dstw); fprintf(compiler->verbose, ", "); - sljit_verbose_fparam(src, srcw); + sljit_verbose_fparam(compiler, src, srcw); fprintf(compiler->verbose, "\n"); } #endif + CHECK_RETURN_OK; } -static SLJIT_INLINE void check_sljit_emit_fop2(struct sljit_compiler *compiler, sljit_si op, - sljit_si dst, sljit_sw dstw, - sljit_si src1, sljit_sw src1w, - sljit_si src2, sljit_sw src2w) +static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) { - /* If debug and verbose are disabled, all arguments are unused. */ - SLJIT_UNUSED_ARG(compiler); - SLJIT_UNUSED_ARG(op); - SLJIT_UNUSED_ARG(dst); - SLJIT_UNUSED_ARG(dstw); - SLJIT_UNUSED_ARG(src1); - SLJIT_UNUSED_ARG(src1w); - SLJIT_UNUSED_ARG(src2); - SLJIT_UNUSED_ARG(src2w); + if (SLJIT_UNLIKELY(compiler->skip_checks)) { + compiler->skip_checks = 0; + CHECK_RETURN_OK; + } - SLJIT_ASSERT(sljit_is_fpu_available()); - SLJIT_ASSERT(GET_OPCODE(op) >= SLJIT_ADDD && GET_OPCODE(op) <= SLJIT_DIVD); -#if (defined SLJIT_DEBUG && SLJIT_DEBUG) - FUNCTION_CHECK_OP(); +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + CHECK_ARGUMENT(sljit_has_cpu_feature(SLJIT_HAS_FPU)); + CHECK_ARGUMENT(GET_OPCODE(op) >= SLJIT_CONV_F64_FROM_SW && GET_OPCODE(op) <= SLJIT_CONV_F64_FROM_S32); + CHECK_ARGUMENT(!(op & (SLJIT_SET_Z | VARIABLE_FLAG_MASK))); + FUNCTION_CHECK_SRC(src, srcw); + FUNCTION_FCHECK(dst, dstw); +#endif +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + if (SLJIT_UNLIKELY(!!compiler->verbose)) { + fprintf(compiler->verbose, " %s%s.from%s ", fop1_names[GET_OPCODE(op) - SLJIT_FOP1_BASE], + (op & SLJIT_F32_OP) ? ".f32" : ".f64", + (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32) ? ".s32" : ".sw"); + sljit_verbose_fparam(compiler, dst, dstw); + fprintf(compiler->verbose, ", "); + sljit_verbose_param(compiler, src, srcw); + fprintf(compiler->verbose, "\n"); + } +#endif + CHECK_RETURN_OK; +} + +static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fop2(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) +{ +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + CHECK_ARGUMENT(sljit_has_cpu_feature(SLJIT_HAS_FPU)); + CHECK_ARGUMENT(GET_OPCODE(op) >= SLJIT_ADD_F64 && GET_OPCODE(op) <= SLJIT_DIV_F64); + CHECK_ARGUMENT(!(op & (SLJIT_SET_Z | VARIABLE_FLAG_MASK))); FUNCTION_FCHECK(src1, src1w); FUNCTION_FCHECK(src2, src2w); FUNCTION_FCHECK(dst, dstw); #endif #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) if (SLJIT_UNLIKELY(!!compiler->verbose)) { - fprintf(compiler->verbose, " %s%s ", op_names[GET_OPCODE(op)], (op & SLJIT_SINGLE_OP) ? "s" : "d"); - sljit_verbose_fparam(dst, dstw); + fprintf(compiler->verbose, " %s%s ", fop2_names[GET_OPCODE(op) - SLJIT_FOP2_BASE], (op & SLJIT_F32_OP) ? ".f32" : ".f64"); + sljit_verbose_fparam(compiler, dst, dstw); fprintf(compiler->verbose, ", "); - sljit_verbose_fparam(src1, src1w); + sljit_verbose_fparam(compiler, src1, src1w); fprintf(compiler->verbose, ", "); - sljit_verbose_fparam(src2, src2w); + sljit_verbose_fparam(compiler, src2, src2w); fprintf(compiler->verbose, "\n"); } #endif + CHECK_RETURN_OK; } -static SLJIT_INLINE void check_sljit_emit_label(struct sljit_compiler *compiler) +static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_label(struct sljit_compiler *compiler) { - /* If debug and verbose are disabled, all arguments are unused. */ SLJIT_UNUSED_ARG(compiler); + if (SLJIT_UNLIKELY(compiler->skip_checks)) { + compiler->skip_checks = 0; + CHECK_RETURN_OK; + } + +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + compiler->last_flags = 0; +#endif + #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) if (SLJIT_UNLIKELY(!!compiler->verbose)) fprintf(compiler->verbose, "label:\n"); #endif + CHECK_RETURN_OK; } -static SLJIT_INLINE void check_sljit_emit_jump(struct sljit_compiler *compiler, sljit_si type) +static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_jump(struct sljit_compiler *compiler, sljit_s32 type) { - /* If debug and verbose are disabled, all arguments are unused. */ - SLJIT_UNUSED_ARG(compiler); - SLJIT_UNUSED_ARG(type); - -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) if (SLJIT_UNLIKELY(compiler->skip_checks)) { compiler->skip_checks = 0; - return; + CHECK_RETURN_OK; } -#endif - SLJIT_ASSERT(!(type & ~(0xff | SLJIT_REWRITABLE_JUMP))); - SLJIT_ASSERT((type & 0xff) >= SLJIT_C_EQUAL && (type & 0xff) <= SLJIT_CALL3); +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_REWRITABLE_JUMP | SLJIT_I32_OP))); + CHECK_ARGUMENT((type & 0xff) != GET_FLAG_TYPE(SLJIT_SET_CARRY) && (type & 0xff) != (GET_FLAG_TYPE(SLJIT_SET_CARRY) + 1)); + CHECK_ARGUMENT((type & 0xff) >= SLJIT_EQUAL && (type & 0xff) <= SLJIT_FAST_CALL); + CHECK_ARGUMENT((type & 0xff) < SLJIT_JUMP || !(type & SLJIT_I32_OP)); + + if ((type & 0xff) < SLJIT_JUMP) { + if ((type & 0xff) <= SLJIT_NOT_ZERO) + CHECK_ARGUMENT(compiler->last_flags & SLJIT_SET_Z); + else + CHECK_ARGUMENT((type & 0xff) == (compiler->last_flags & 0xff) + || ((type & 0xff) == SLJIT_NOT_OVERFLOW && (compiler->last_flags & 0xff) == SLJIT_OVERFLOW) + || ((type & 0xff) == SLJIT_MUL_NOT_OVERFLOW && (compiler->last_flags & 0xff) == SLJIT_MUL_OVERFLOW)); + CHECK_ARGUMENT((type & SLJIT_I32_OP) == (compiler->last_flags & SLJIT_I32_OP)); + } +#endif #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) if (SLJIT_UNLIKELY(!!compiler->verbose)) - fprintf(compiler->verbose, " jump%s.%s\n", !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r", jump_names[type & 0xff]); + fprintf(compiler->verbose, " jump%s %s%s\n", !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r", + jump_names[type & 0xff], JUMP_POSTFIX(type)); +#endif + CHECK_RETURN_OK; +} + +static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 arg_types) +{ +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + sljit_s32 i, types, curr_type, scratches, fscratches; + + CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_REWRITABLE_JUMP))); + CHECK_ARGUMENT((type & 0xff) == SLJIT_CALL || (type & 0xff) == SLJIT_CALL_CDECL); + + types = arg_types; + scratches = 0; + fscratches = 0; + for (i = 0; i < 5; i++) { + curr_type = (types & SLJIT_DEF_MASK); + CHECK_ARGUMENT(curr_type <= SLJIT_ARG_TYPE_F64); + if (i > 0) { + if (curr_type == 0) { + break; + } + if (curr_type >= SLJIT_ARG_TYPE_F32) + fscratches++; + else + scratches++; + } else { + if (curr_type >= SLJIT_ARG_TYPE_F32) { + CHECK_ARGUMENT(compiler->fscratches > 0); + } else if (curr_type >= SLJIT_ARG_TYPE_SW) { + CHECK_ARGUMENT(compiler->scratches > 0); + } + } + types >>= SLJIT_DEF_SHIFT; + } + CHECK_ARGUMENT(compiler->scratches >= scratches); + CHECK_ARGUMENT(compiler->fscratches >= fscratches); + CHECK_ARGUMENT(types == 0); #endif +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + if (SLJIT_UNLIKELY(!!compiler->verbose)) { + fprintf(compiler->verbose, " %s%s ret[%s", jump_names[type & 0xff], + !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r", call_arg_names[arg_types & SLJIT_DEF_MASK]); + + arg_types >>= SLJIT_DEF_SHIFT; + if (arg_types) { + fprintf(compiler->verbose, "], args["); + do { + fprintf(compiler->verbose, "%s", call_arg_names[arg_types & SLJIT_DEF_MASK]); + arg_types >>= SLJIT_DEF_SHIFT; + if (arg_types) + fprintf(compiler->verbose, ","); + } while (arg_types); + } + fprintf(compiler->verbose, "]\n"); + } +#endif + CHECK_RETURN_OK; } -static SLJIT_INLINE void check_sljit_emit_cmp(struct sljit_compiler *compiler, sljit_si type, - sljit_si src1, sljit_sw src1w, - sljit_si src2, sljit_sw src2w) +static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_cmp(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) { - SLJIT_UNUSED_ARG(compiler); - SLJIT_UNUSED_ARG(type); - SLJIT_UNUSED_ARG(src1); - SLJIT_UNUSED_ARG(src1w); - SLJIT_UNUSED_ARG(src2); - SLJIT_UNUSED_ARG(src2w); - - SLJIT_ASSERT(!(type & ~(0xff | SLJIT_REWRITABLE_JUMP | SLJIT_INT_OP))); - SLJIT_ASSERT((type & 0xff) >= SLJIT_C_EQUAL && (type & 0xff) <= SLJIT_C_SIG_LESS_EQUAL); -#if (defined SLJIT_DEBUG && SLJIT_DEBUG) +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_REWRITABLE_JUMP | SLJIT_I32_OP))); + CHECK_ARGUMENT((type & 0xff) >= SLJIT_EQUAL && (type & 0xff) <= SLJIT_SIG_LESS_EQUAL); FUNCTION_CHECK_SRC(src1, src1w); FUNCTION_CHECK_SRC(src2, src2w); + compiler->last_flags = 0; #endif #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) if (SLJIT_UNLIKELY(!!compiler->verbose)) { - fprintf(compiler->verbose, " %scmp%s.%s ", !(type & SLJIT_INT_OP) ? "" : "i", !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r", jump_names[type & 0xff]); - sljit_verbose_param(src1, src1w); + fprintf(compiler->verbose, " cmp%s %s%s, ", !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r", + jump_names[type & 0xff], (type & SLJIT_I32_OP) ? "32" : ""); + sljit_verbose_param(compiler, src1, src1w); fprintf(compiler->verbose, ", "); - sljit_verbose_param(src2, src2w); + sljit_verbose_param(compiler, src2, src2w); fprintf(compiler->verbose, "\n"); } #endif + CHECK_RETURN_OK; } -static SLJIT_INLINE void check_sljit_emit_fcmp(struct sljit_compiler *compiler, sljit_si type, - sljit_si src1, sljit_sw src1w, - sljit_si src2, sljit_sw src2w) +static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fcmp(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) { - SLJIT_UNUSED_ARG(compiler); - SLJIT_UNUSED_ARG(type); - SLJIT_UNUSED_ARG(src1); - SLJIT_UNUSED_ARG(src1w); - SLJIT_UNUSED_ARG(src2); - SLJIT_UNUSED_ARG(src2w); - - SLJIT_ASSERT(sljit_is_fpu_available()); - SLJIT_ASSERT(!(type & ~(0xff | SLJIT_REWRITABLE_JUMP | SLJIT_SINGLE_OP))); - SLJIT_ASSERT((type & 0xff) >= SLJIT_C_FLOAT_EQUAL && (type & 0xff) <= SLJIT_C_FLOAT_ORDERED); -#if (defined SLJIT_DEBUG && SLJIT_DEBUG) +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + CHECK_ARGUMENT(sljit_has_cpu_feature(SLJIT_HAS_FPU)); + CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_REWRITABLE_JUMP | SLJIT_F32_OP))); + CHECK_ARGUMENT((type & 0xff) >= SLJIT_EQUAL_F64 && (type & 0xff) <= SLJIT_ORDERED_F64); FUNCTION_FCHECK(src1, src1w); FUNCTION_FCHECK(src2, src2w); + compiler->last_flags = 0; #endif #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) if (SLJIT_UNLIKELY(!!compiler->verbose)) { - fprintf(compiler->verbose, " %scmp%s.%s ", (type & SLJIT_SINGLE_OP) ? "s" : "d", - !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r", jump_names[type & 0xff]); - sljit_verbose_fparam(src1, src1w); + fprintf(compiler->verbose, " fcmp%s %s%s, ", !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r", + jump_names[type & 0xff], (type & SLJIT_F32_OP) ? ".f32" : ".f64"); + sljit_verbose_fparam(compiler, src1, src1w); fprintf(compiler->verbose, ", "); - sljit_verbose_fparam(src2, src2w); + sljit_verbose_fparam(compiler, src2, src2w); fprintf(compiler->verbose, "\n"); } #endif + CHECK_RETURN_OK; } -static SLJIT_INLINE void check_sljit_emit_ijump(struct sljit_compiler *compiler, sljit_si type, sljit_si src, sljit_sw srcw) +static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_ijump(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 src, sljit_sw srcw) { - /* If debug and verbose are disabled, all arguments are unused. */ - SLJIT_UNUSED_ARG(compiler); - SLJIT_UNUSED_ARG(type); - SLJIT_UNUSED_ARG(src); - SLJIT_UNUSED_ARG(srcw); - -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) if (SLJIT_UNLIKELY(compiler->skip_checks)) { compiler->skip_checks = 0; - return; + CHECK_RETURN_OK; } -#endif - SLJIT_ASSERT(type >= SLJIT_JUMP && type <= SLJIT_CALL3); -#if (defined SLJIT_DEBUG && SLJIT_DEBUG) +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + CHECK_ARGUMENT(type >= SLJIT_JUMP && type <= SLJIT_FAST_CALL); FUNCTION_CHECK_SRC(src, srcw); #endif #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) if (SLJIT_UNLIKELY(!!compiler->verbose)) { fprintf(compiler->verbose, " ijump.%s ", jump_names[type]); - sljit_verbose_param(src, srcw); + sljit_verbose_param(compiler, src, srcw); fprintf(compiler->verbose, "\n"); } #endif + CHECK_RETURN_OK; } -static SLJIT_INLINE void check_sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_si op, - sljit_si dst, sljit_sw dstw, - sljit_si src, sljit_sw srcw, - sljit_si type) +static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_icall(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 arg_types, + sljit_s32 src, sljit_sw srcw) { - /* If debug and verbose are disabled, all arguments are unused. */ - SLJIT_UNUSED_ARG(compiler); - SLJIT_UNUSED_ARG(op); - SLJIT_UNUSED_ARG(dst); - SLJIT_UNUSED_ARG(dstw); - SLJIT_UNUSED_ARG(src); - SLJIT_UNUSED_ARG(srcw); - SLJIT_UNUSED_ARG(type); +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + sljit_s32 i, types, curr_type, scratches, fscratches; - SLJIT_ASSERT(type >= SLJIT_C_EQUAL && type < SLJIT_JUMP); - SLJIT_ASSERT(op == SLJIT_MOV || GET_OPCODE(op) == SLJIT_MOV_UI || GET_OPCODE(op) == SLJIT_MOV_SI - || (GET_OPCODE(op) >= SLJIT_AND && GET_OPCODE(op) <= SLJIT_XOR)); - SLJIT_ASSERT((op & (SLJIT_SET_S | SLJIT_SET_U | SLJIT_SET_O | SLJIT_SET_C)) == 0); - SLJIT_ASSERT((op & (SLJIT_SET_E | SLJIT_KEEP_FLAGS)) != (SLJIT_SET_E | SLJIT_KEEP_FLAGS)); -#if (defined SLJIT_DEBUG && SLJIT_DEBUG) - if (GET_OPCODE(op) < SLJIT_ADD) { - SLJIT_ASSERT(src == SLJIT_UNUSED && srcw == 0); - } else { - SLJIT_ASSERT(src == dst && srcw == dstw); + CHECK_ARGUMENT(type == SLJIT_CALL || type == SLJIT_CALL_CDECL); + FUNCTION_CHECK_SRC(src, srcw); + + types = arg_types; + scratches = 0; + fscratches = 0; + for (i = 0; i < 5; i++) { + curr_type = (types & SLJIT_DEF_MASK); + CHECK_ARGUMENT(curr_type <= SLJIT_ARG_TYPE_F64); + if (i > 0) { + if (curr_type == 0) { + break; + } + if (curr_type >= SLJIT_ARG_TYPE_F32) + fscratches++; + else + scratches++; + } else { + if (curr_type >= SLJIT_ARG_TYPE_F32) { + CHECK_ARGUMENT(compiler->fscratches > 0); + } else if (curr_type >= SLJIT_ARG_TYPE_SW) { + CHECK_ARGUMENT(compiler->scratches > 0); + } + } + types >>= SLJIT_DEF_SHIFT; } - FUNCTION_CHECK_DST(dst, dstw); + CHECK_ARGUMENT(compiler->scratches >= scratches); + CHECK_ARGUMENT(compiler->fscratches >= fscratches); + CHECK_ARGUMENT(types == 0); #endif #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) if (SLJIT_UNLIKELY(!!compiler->verbose)) { - fprintf(compiler->verbose, " %sflags.%s%s%s ", !(op & SLJIT_INT_OP) ? "" : "i", - op_names[GET_OPCODE(op)], !(op & SLJIT_SET_E) ? "" : ".e", !(op & SLJIT_KEEP_FLAGS) ? "" : ".k"); - sljit_verbose_param(dst, dstw); - if (src != SLJIT_UNUSED) { - fprintf(compiler->verbose, ", "); - sljit_verbose_param(src, srcw); + fprintf(compiler->verbose, " i%s%s ret[%s", jump_names[type & 0xff], + !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r", call_arg_names[arg_types & SLJIT_DEF_MASK]); + + arg_types >>= SLJIT_DEF_SHIFT; + if (arg_types) { + fprintf(compiler->verbose, "], args["); + do { + fprintf(compiler->verbose, "%s", call_arg_names[arg_types & SLJIT_DEF_MASK]); + arg_types >>= SLJIT_DEF_SHIFT; + if (arg_types) + fprintf(compiler->verbose, ","); + } while (arg_types); } - fprintf(compiler->verbose, ", %s\n", jump_names[type]); + fprintf(compiler->verbose, "], "); + sljit_verbose_param(compiler, src, srcw); + fprintf(compiler->verbose, "\n"); } #endif + CHECK_RETURN_OK; } -static SLJIT_INLINE void check_sljit_get_local_base(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw, sljit_sw offset) +static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 type) { - SLJIT_UNUSED_ARG(compiler); - SLJIT_UNUSED_ARG(dst); - SLJIT_UNUSED_ARG(dstw); +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_I32_OP))); + CHECK_ARGUMENT((type & 0xff) >= SLJIT_EQUAL && (type & 0xff) <= SLJIT_ORDERED_F64); + CHECK_ARGUMENT((type & 0xff) != GET_FLAG_TYPE(SLJIT_SET_CARRY) && (type & 0xff) != (GET_FLAG_TYPE(SLJIT_SET_CARRY) + 1)); + CHECK_ARGUMENT(op == SLJIT_MOV || op == SLJIT_MOV32 + || (GET_OPCODE(op) >= SLJIT_AND && GET_OPCODE(op) <= SLJIT_XOR)); + CHECK_ARGUMENT(!(op & VARIABLE_FLAG_MASK)); + + if ((type & 0xff) <= SLJIT_NOT_ZERO) + CHECK_ARGUMENT(compiler->last_flags & SLJIT_SET_Z); + else + CHECK_ARGUMENT((type & 0xff) == (compiler->last_flags & 0xff) + || ((type & 0xff) == SLJIT_NOT_OVERFLOW && (compiler->last_flags & 0xff) == SLJIT_OVERFLOW) + || ((type & 0xff) == SLJIT_MUL_NOT_OVERFLOW && (compiler->last_flags & 0xff) == SLJIT_MUL_OVERFLOW)); + + FUNCTION_CHECK_DST(dst, dstw, 0); + + if (GET_OPCODE(op) >= SLJIT_ADD) + compiler->last_flags = GET_FLAG_TYPE(op) | (op & (SLJIT_I32_OP | SLJIT_SET_Z)); +#endif +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + if (SLJIT_UNLIKELY(!!compiler->verbose)) { + fprintf(compiler->verbose, " flags%s %s%s, ", + !(op & SLJIT_SET_Z) ? "" : ".z", + GET_OPCODE(op) < SLJIT_OP2_BASE ? "mov" : op2_names[GET_OPCODE(op) - SLJIT_OP2_BASE], + GET_OPCODE(op) < SLJIT_OP2_BASE ? op1_names[GET_OPCODE(op) - SLJIT_OP1_BASE] : ((op & SLJIT_I32_OP) ? "32" : "")); + sljit_verbose_param(compiler, dst, dstw); + fprintf(compiler->verbose, ", %s%s\n", jump_names[type & 0xff], JUMP_POSTFIX(type)); + } +#endif + CHECK_RETURN_OK; +} + +static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_cmov(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 dst_reg, + sljit_s32 src, sljit_sw srcw) +{ +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_I32_OP))); + CHECK_ARGUMENT((type & 0xff) >= SLJIT_EQUAL && (type & 0xff) <= SLJIT_ORDERED_F64); + + CHECK_ARGUMENT(compiler->scratches != -1 && compiler->saveds != -1); + CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(dst_reg & ~SLJIT_I32_OP)); + if (src != SLJIT_IMM) { + CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(src)); + CHECK_ARGUMENT(srcw == 0); + } + + if ((type & 0xff) <= SLJIT_NOT_ZERO) + CHECK_ARGUMENT(compiler->last_flags & SLJIT_SET_Z); + else + CHECK_ARGUMENT((type & 0xff) == (compiler->last_flags & 0xff) + || ((type & 0xff) == SLJIT_NOT_OVERFLOW && (compiler->last_flags & 0xff) == SLJIT_OVERFLOW) + || ((type & 0xff) == SLJIT_MUL_NOT_OVERFLOW && (compiler->last_flags & 0xff) == SLJIT_MUL_OVERFLOW)); +#endif +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + if (SLJIT_UNLIKELY(!!compiler->verbose)) { + fprintf(compiler->verbose, " cmov%s %s%s, ", + !(dst_reg & SLJIT_I32_OP) ? "" : "32", + jump_names[type & 0xff], JUMP_POSTFIX(type)); + sljit_verbose_reg(compiler, dst_reg & ~SLJIT_I32_OP); + fprintf(compiler->verbose, ", "); + sljit_verbose_param(compiler, src, srcw); + fprintf(compiler->verbose, "\n"); + } +#endif + CHECK_RETURN_OK; +} + +static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 reg, + sljit_s32 mem, sljit_sw memw) +{ +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + CHECK_ARGUMENT((type & 0xff) >= SLJIT_MOV && (type & 0xff) <= SLJIT_MOV_P); + CHECK_ARGUMENT(!(type & SLJIT_I32_OP) || ((type & 0xff) != SLJIT_MOV && (type & 0xff) != SLJIT_MOV_U32 && (type & 0xff) != SLJIT_MOV_P)); + CHECK_ARGUMENT((type & SLJIT_MEM_PRE) || (type & SLJIT_MEM_POST)); + CHECK_ARGUMENT((type & (SLJIT_MEM_PRE | SLJIT_MEM_POST)) != (SLJIT_MEM_PRE | SLJIT_MEM_POST)); + CHECK_ARGUMENT((type & ~(0xff | SLJIT_I32_OP | SLJIT_MEM_STORE | SLJIT_MEM_SUPP | SLJIT_MEM_PRE | SLJIT_MEM_POST)) == 0); + + FUNCTION_CHECK_SRC_MEM(mem, memw); + CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(reg)); + + CHECK_ARGUMENT((mem & REG_MASK) != SLJIT_UNUSED && (mem & REG_MASK) != reg); +#endif +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + if (!(type & SLJIT_MEM_SUPP) && SLJIT_UNLIKELY(!!compiler->verbose)) { + if (sljit_emit_mem(compiler, type | SLJIT_MEM_SUPP, reg, mem, memw) == SLJIT_ERR_UNSUPPORTED) + fprintf(compiler->verbose, " //"); + + fprintf(compiler->verbose, " mem%s.%s%s%s ", + !(type & SLJIT_I32_OP) ? "" : "32", + (type & SLJIT_MEM_STORE) ? "st" : "ld", + op1_names[(type & 0xff) - SLJIT_OP1_BASE], + (type & SLJIT_MEM_PRE) ? ".pre" : ".post"); + sljit_verbose_reg(compiler, reg); + fprintf(compiler->verbose, ", "); + sljit_verbose_param(compiler, mem, memw); + fprintf(compiler->verbose, "\n"); + } +#endif + CHECK_RETURN_OK; +} + +static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fmem(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 freg, + sljit_s32 mem, sljit_sw memw) +{ +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + CHECK_ARGUMENT((type & 0xff) == SLJIT_MOV_F64); + CHECK_ARGUMENT((type & SLJIT_MEM_PRE) || (type & SLJIT_MEM_POST)); + CHECK_ARGUMENT((type & (SLJIT_MEM_PRE | SLJIT_MEM_POST)) != (SLJIT_MEM_PRE | SLJIT_MEM_POST)); + CHECK_ARGUMENT((type & ~(0xff | SLJIT_I32_OP | SLJIT_MEM_STORE | SLJIT_MEM_SUPP | SLJIT_MEM_PRE | SLJIT_MEM_POST)) == 0); + + FUNCTION_CHECK_SRC_MEM(mem, memw); + CHECK_ARGUMENT(FUNCTION_CHECK_IS_FREG(freg)); +#endif +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + if (!(type & SLJIT_MEM_SUPP) && SLJIT_UNLIKELY(!!compiler->verbose)) { + if (sljit_emit_fmem(compiler, type | SLJIT_MEM_SUPP, freg, mem, memw) == SLJIT_ERR_UNSUPPORTED) + fprintf(compiler->verbose, " //"); + + fprintf(compiler->verbose, " fmem.%s%s%s ", + (type & SLJIT_MEM_STORE) ? "st" : "ld", + !(type & SLJIT_I32_OP) ? ".f64" : ".f32", + (type & SLJIT_MEM_PRE) ? ".pre" : ".post"); + sljit_verbose_freg(compiler, freg); + fprintf(compiler->verbose, ", "); + sljit_verbose_param(compiler, mem, memw); + fprintf(compiler->verbose, "\n"); + } +#endif + CHECK_RETURN_OK; +} + +static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_get_local_base(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw offset) +{ + /* Any offset is allowed. */ SLJIT_UNUSED_ARG(offset); -#if (defined SLJIT_DEBUG && SLJIT_DEBUG) - FUNCTION_CHECK_DST(dst, dstw); +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + FUNCTION_CHECK_DST(dst, dstw, 0); #endif #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) if (SLJIT_UNLIKELY(!!compiler->verbose)) { fprintf(compiler->verbose, " local_base "); - sljit_verbose_param(dst, dstw); + sljit_verbose_param(compiler, dst, dstw); fprintf(compiler->verbose, ", #%" SLJIT_PRINT_D "d\n", offset); } #endif + CHECK_RETURN_OK; } -static SLJIT_INLINE void check_sljit_emit_const(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw, sljit_sw init_value) +static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw init_value) { - /* If debug and verbose are disabled, all arguments are unused. */ - SLJIT_UNUSED_ARG(compiler); - SLJIT_UNUSED_ARG(dst); - SLJIT_UNUSED_ARG(dstw); SLJIT_UNUSED_ARG(init_value); -#if (defined SLJIT_DEBUG && SLJIT_DEBUG) - FUNCTION_CHECK_DST(dst, dstw); +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + FUNCTION_CHECK_DST(dst, dstw, 0); #endif #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) if (SLJIT_UNLIKELY(!!compiler->verbose)) { fprintf(compiler->verbose, " const "); - sljit_verbose_param(dst, dstw); + sljit_verbose_param(compiler, dst, dstw); fprintf(compiler->verbose, ", #%" SLJIT_PRINT_D "d\n", init_value); } #endif + CHECK_RETURN_OK; } -static SLJIT_INLINE sljit_si emit_mov_before_return(struct sljit_compiler *compiler, sljit_si op, sljit_si src, sljit_sw srcw) +static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_put_label(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw) +{ +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + FUNCTION_CHECK_DST(dst, dstw, 0); +#endif +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + if (SLJIT_UNLIKELY(!!compiler->verbose)) { + fprintf(compiler->verbose, " put_label "); + sljit_verbose_param(compiler, dst, dstw); + fprintf(compiler->verbose, "\n"); + } +#endif + CHECK_RETURN_OK; +} + +#endif /* SLJIT_ARGUMENT_CHECKS || SLJIT_VERBOSE */ + +#define SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw) \ + SLJIT_COMPILE_ASSERT(!(SLJIT_CONV_SW_FROM_F64 & 0x1) && !(SLJIT_CONV_F64_FROM_SW & 0x1), \ + invalid_float_opcodes); \ + if (GET_OPCODE(op) >= SLJIT_CONV_SW_FROM_F64 && GET_OPCODE(op) <= SLJIT_CMP_F64) { \ + if (GET_OPCODE(op) == SLJIT_CMP_F64) { \ + CHECK(check_sljit_emit_fop1_cmp(compiler, op, dst, dstw, src, srcw)); \ + ADJUST_LOCAL_OFFSET(dst, dstw); \ + ADJUST_LOCAL_OFFSET(src, srcw); \ + return sljit_emit_fop1_cmp(compiler, op, dst, dstw, src, srcw); \ + } \ + if ((GET_OPCODE(op) | 0x1) == SLJIT_CONV_S32_FROM_F64) { \ + CHECK(check_sljit_emit_fop1_conv_sw_from_f64(compiler, op, dst, dstw, src, srcw)); \ + ADJUST_LOCAL_OFFSET(dst, dstw); \ + ADJUST_LOCAL_OFFSET(src, srcw); \ + return sljit_emit_fop1_conv_sw_from_f64(compiler, op, dst, dstw, src, srcw); \ + } \ + CHECK(check_sljit_emit_fop1_conv_f64_from_sw(compiler, op, dst, dstw, src, srcw)); \ + ADJUST_LOCAL_OFFSET(dst, dstw); \ + ADJUST_LOCAL_OFFSET(src, srcw); \ + return sljit_emit_fop1_conv_f64_from_sw(compiler, op, dst, dstw, src, srcw); \ + } \ + CHECK(check_sljit_emit_fop1(compiler, op, dst, dstw, src, srcw)); \ + ADJUST_LOCAL_OFFSET(dst, dstw); \ + ADJUST_LOCAL_OFFSET(src, srcw); + +static SLJIT_INLINE sljit_s32 emit_mov_before_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw) { /* Return if don't need to do anything. */ if (op == SLJIT_UNUSED) @@ -1299,16 +2002,55 @@ static SLJIT_INLINE sljit_si emit_mov_before_return(struct sljit_compiler *compi if (src == SLJIT_RETURN_REG && (op == SLJIT_MOV || op == SLJIT_MOV_P)) return SLJIT_SUCCESS; #else - if (src == SLJIT_RETURN_REG && (op == SLJIT_MOV || op == SLJIT_MOV_UI || op == SLJIT_MOV_SI || op == SLJIT_MOV_P)) + if (src == SLJIT_RETURN_REG && (op == SLJIT_MOV || op == SLJIT_MOV_U32 || op == SLJIT_MOV_S32 || op == SLJIT_MOV_P)) return SLJIT_SUCCESS; #endif -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) \ + || (defined SLJIT_VERBOSE && SLJIT_VERBOSE) compiler->skip_checks = 1; #endif return sljit_emit_op1(compiler, op, SLJIT_RETURN_REG, 0, src, srcw); } +#if (defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86) \ + || (defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC) \ + || (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) \ + || ((defined SLJIT_CONFIG_MIPS && SLJIT_CONFIG_MIPS) && !(defined SLJIT_MIPS_R1 && SLJIT_MIPS_R1)) + +static SLJIT_INLINE sljit_s32 sljit_emit_cmov_generic(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 dst_reg, + sljit_s32 src, sljit_sw srcw) +{ + struct sljit_label *label; + struct sljit_jump *jump; + sljit_s32 op = (dst_reg & SLJIT_I32_OP) ? SLJIT_MOV32 : SLJIT_MOV; + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ + || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + compiler->skip_checks = 1; +#endif + jump = sljit_emit_jump(compiler, type ^ 0x1); + FAIL_IF(!jump); + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ + || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + compiler->skip_checks = 1; +#endif + FAIL_IF(sljit_emit_op1(compiler, op, dst_reg & ~SLJIT_I32_OP, 0, src, srcw)); + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ + || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + compiler->skip_checks = 1; +#endif + label = sljit_emit_label(compiler); + FAIL_IF(!label); + sljit_set_label(jump, label); + return SLJIT_SUCCESS; +} + +#endif + /* CPU description section */ #if (defined SLJIT_32BIT_ARCHITECTURE && SLJIT_32BIT_ARCHITECTURE) @@ -1335,71 +2077,83 @@ static SLJIT_INLINE sljit_si emit_mov_before_return(struct sljit_compiler *compi #define SLJIT_CPUINFO SLJIT_CPUINFO_PART1 SLJIT_CPUINFO_PART2 SLJIT_CPUINFO_PART3 -#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) -# include "sjx86c.c" -#elif (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) +#if (defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86) # include "sjx86c.c" #elif (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) -# include "sjarmv5.c" +# include "sjarm32.c" #elif (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) -# include "sjarmv5.c" +# include "sjarm32.c" #elif (defined SLJIT_CONFIG_ARM_THUMB2 && SLJIT_CONFIG_ARM_THUMB2) # include "sjarmth2.c" -#elif (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) -# include "sjppcc.c" -#elif (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) +#elif (defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) +# include "sjarm64.c" +#elif (defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC) # include "sjppcc.c" -#elif (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) +#elif (defined SLJIT_CONFIG_MIPS && SLJIT_CONFIG_MIPS) # include "sjmipsc.c" -#elif (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) -# include "sljitNativeSPARC_common.c" +#elif (defined SLJIT_CONFIG_SPARC && SLJIT_CONFIG_SPARC) +# include "sjsparcc.c" #elif (defined SLJIT_CONFIG_TILEGX && SLJIT_CONFIG_TILEGX) -# include "sljitNativeTILEGX.c" +# include "sljitNativeTILEGX_64.c" #endif -#if !(defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) +#if !(defined SLJIT_CONFIG_MIPS && SLJIT_CONFIG_MIPS) -SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler *compiler, sljit_si type, - sljit_si src1, sljit_sw src1w, - sljit_si src2, sljit_sw src2w) +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) { /* Default compare for most architectures. */ - sljit_si flags, tmp_src, condition; + sljit_s32 flags, tmp_src, condition; sljit_sw tmp_srcw; CHECK_ERROR_PTR(); - check_sljit_emit_cmp(compiler, type, src1, src1w, src2, src2w); + CHECK_PTR(check_sljit_emit_cmp(compiler, type, src1, src1w, src2, src2w)); condition = type & 0xff; +#if (defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) + if ((condition == SLJIT_EQUAL || condition == SLJIT_NOT_EQUAL)) { + if ((src1 & SLJIT_IMM) && !src1w) { + src1 = src2; + src1w = src2w; + src2 = SLJIT_IMM; + src2w = 0; + } + if ((src2 & SLJIT_IMM) && !src2w) + return emit_cmp_to0(compiler, type, src1, src1w); + } +#endif + if (SLJIT_UNLIKELY((src1 & SLJIT_IMM) && !(src2 & SLJIT_IMM))) { /* Immediate is prefered as second argument by most architectures. */ switch (condition) { - case SLJIT_C_LESS: - condition = SLJIT_C_GREATER; + case SLJIT_LESS: + condition = SLJIT_GREATER; break; - case SLJIT_C_GREATER_EQUAL: - condition = SLJIT_C_LESS_EQUAL; + case SLJIT_GREATER_EQUAL: + condition = SLJIT_LESS_EQUAL; break; - case SLJIT_C_GREATER: - condition = SLJIT_C_LESS; + case SLJIT_GREATER: + condition = SLJIT_LESS; break; - case SLJIT_C_LESS_EQUAL: - condition = SLJIT_C_GREATER_EQUAL; + case SLJIT_LESS_EQUAL: + condition = SLJIT_GREATER_EQUAL; break; - case SLJIT_C_SIG_LESS: - condition = SLJIT_C_SIG_GREATER; + case SLJIT_SIG_LESS: + condition = SLJIT_SIG_GREATER; break; - case SLJIT_C_SIG_GREATER_EQUAL: - condition = SLJIT_C_SIG_LESS_EQUAL; + case SLJIT_SIG_GREATER_EQUAL: + condition = SLJIT_SIG_LESS_EQUAL; break; - case SLJIT_C_SIG_GREATER: - condition = SLJIT_C_SIG_LESS; + case SLJIT_SIG_GREATER: + condition = SLJIT_SIG_LESS; break; - case SLJIT_C_SIG_LESS_EQUAL: - condition = SLJIT_C_SIG_GREATER_EQUAL; + case SLJIT_SIG_LESS_EQUAL: + condition = SLJIT_SIG_GREATER_EQUAL; break; } - type = condition | (type & (SLJIT_INT_OP | SLJIT_REWRITABLE_JUMP)); + + type = condition | (type & (SLJIT_I32_OP | SLJIT_REWRITABLE_JUMP)); tmp_src = src1; src1 = src2; src2 = tmp_src; @@ -1408,64 +2162,105 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler src2w = tmp_srcw; } - if (condition <= SLJIT_C_NOT_ZERO) - flags = SLJIT_SET_E; - else if (condition <= SLJIT_C_LESS_EQUAL) - flags = SLJIT_SET_U; + if (condition <= SLJIT_NOT_ZERO) + flags = SLJIT_SET_Z; else - flags = SLJIT_SET_S; + flags = condition << VARIABLE_FLAG_SHIFT; -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ + || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) compiler->skip_checks = 1; #endif - PTR_FAIL_IF(sljit_emit_op2(compiler, SLJIT_SUB | flags | (type & SLJIT_INT_OP), + PTR_FAIL_IF(sljit_emit_op2(compiler, SLJIT_SUB | flags | (type & SLJIT_I32_OP), SLJIT_UNUSED, 0, src1, src1w, src2, src2w)); -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ + || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) compiler->skip_checks = 1; #endif - return sljit_emit_jump(compiler, condition | (type & SLJIT_REWRITABLE_JUMP)); + return sljit_emit_jump(compiler, condition | (type & (SLJIT_REWRITABLE_JUMP | SLJIT_I32_OP))); } -SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_fcmp(struct sljit_compiler *compiler, sljit_si type, - sljit_si src1, sljit_sw src1w, - sljit_si src2, sljit_sw src2w) -{ - sljit_si flags, condition; - - check_sljit_emit_fcmp(compiler, type, src1, src1w, src2, src2w); +#endif - condition = type & 0xff; - flags = (condition <= SLJIT_C_FLOAT_NOT_EQUAL) ? SLJIT_SET_E : SLJIT_SET_S; - if (type & SLJIT_SINGLE_OP) - flags |= SLJIT_SINGLE_OP; +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_fcmp(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) +{ + CHECK_ERROR_PTR(); + CHECK_PTR(check_sljit_emit_fcmp(compiler, type, src1, src1w, src2, src2w)); -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ + || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) compiler->skip_checks = 1; #endif - sljit_emit_fop1(compiler, SLJIT_CMPD | flags, src1, src1w, src2, src2w); + sljit_emit_fop1(compiler, SLJIT_CMP_F64 | ((type & 0xff) << VARIABLE_FLAG_SHIFT) | (type & SLJIT_I32_OP), src1, src1w, src2, src2w); -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ + || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) compiler->skip_checks = 1; #endif - return sljit_emit_jump(compiler, condition | (type & SLJIT_REWRITABLE_JUMP)); + return sljit_emit_jump(compiler, type); +} + +#if !(defined SLJIT_CONFIG_ARM_32 && SLJIT_CONFIG_ARM_32) \ + && !(defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) \ + && !(defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC) + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 reg, + sljit_s32 mem, sljit_sw memw) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(type); + SLJIT_UNUSED_ARG(reg); + SLJIT_UNUSED_ARG(mem); + SLJIT_UNUSED_ARG(memw); + + CHECK_ERROR(); + CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw)); + + return SLJIT_ERR_UNSUPPORTED; } #endif -#if !(defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) && !(defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) +#if !(defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) \ + && !(defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC) -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_get_local_base(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw, sljit_sw offset) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 freg, + sljit_s32 mem, sljit_sw memw) { + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(type); + SLJIT_UNUSED_ARG(freg); + SLJIT_UNUSED_ARG(mem); + SLJIT_UNUSED_ARG(memw); + CHECK_ERROR(); - check_sljit_get_local_base(compiler, dst, dstw, offset); + CHECK(check_sljit_emit_fmem(compiler, type, freg, mem, memw)); - ADJUST_LOCAL_OFFSET(SLJIT_MEM1(SLJIT_LOCALS_REG), offset); -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) + return SLJIT_ERR_UNSUPPORTED; +} + +#endif + +#if !(defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86) \ + && !(defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_local_base(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw offset) +{ + CHECK_ERROR(); + CHECK(check_sljit_get_local_base(compiler, dst, dstw, offset)); + + ADJUST_LOCAL_OFFSET(SLJIT_MEM1(SLJIT_SP), offset); +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ + || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) compiler->skip_checks = 1; #endif if (offset != 0) - return sljit_emit_op2(compiler, SLJIT_ADD | SLJIT_KEEP_FLAGS, dst, dstw, SLJIT_LOCALS_REG, 0, SLJIT_IMM, offset); - return sljit_emit_op1(compiler, SLJIT_MOV, dst, dstw, SLJIT_LOCALS_REG, 0); + return sljit_emit_op2(compiler, SLJIT_ADD, dst, dstw, SLJIT_SP, 0, SLJIT_IMM, offset); + return sljit_emit_op1(compiler, SLJIT_MOV, dst, dstw, SLJIT_SP, 0); } #endif @@ -1474,28 +2269,35 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_get_local_base(struct sljit_compiler *co /* Empty function bodies for those machines, which are not (yet) supported. */ -SLJIT_API_FUNC_ATTRIBUTE SLJIT_CONST char* sljit_get_platform_name(void) +SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void) { return "unsupported"; } -SLJIT_API_FUNC_ATTRIBUTE struct sljit_compiler* sljit_create_compiler(void) +SLJIT_API_FUNC_ATTRIBUTE struct sljit_compiler* sljit_create_compiler(void *allocator_data) { - SLJIT_ASSERT_STOP(); + SLJIT_UNUSED_ARG(allocator_data); + SLJIT_UNREACHABLE(); return NULL; } SLJIT_API_FUNC_ATTRIBUTE void sljit_free_compiler(struct sljit_compiler *compiler) { SLJIT_UNUSED_ARG(compiler); - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); } -SLJIT_API_FUNC_ATTRIBUTE void* sljit_alloc_memory(struct sljit_compiler *compiler, sljit_si size) +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_compiler_memory_error(struct sljit_compiler *compiler) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNREACHABLE(); +} + +SLJIT_API_FUNC_ATTRIBUTE void* sljit_alloc_memory(struct sljit_compiler *compiler, sljit_s32 size) { SLJIT_UNUSED_ARG(compiler); SLJIT_UNUSED_ARG(size); - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); return NULL; } @@ -1504,83 +2306,101 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_compiler_verbose(struct sljit_compiler *comp { SLJIT_UNUSED_ARG(compiler); SLJIT_UNUSED_ARG(verbose); - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); } #endif SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compiler) { SLJIT_UNUSED_ARG(compiler); - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); return NULL; } +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type) +{ + SLJIT_UNUSED_ARG(feature_type); + SLJIT_UNREACHABLE(); + return 0; +} + SLJIT_API_FUNC_ATTRIBUTE void sljit_free_code(void* code) { SLJIT_UNUSED_ARG(code); - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_enter(struct sljit_compiler *compiler, sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler, + sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds, + sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size) { SLJIT_UNUSED_ARG(compiler); - SLJIT_UNUSED_ARG(args); + SLJIT_UNUSED_ARG(options); + SLJIT_UNUSED_ARG(arg_types); SLJIT_UNUSED_ARG(scratches); SLJIT_UNUSED_ARG(saveds); + SLJIT_UNUSED_ARG(fscratches); + SLJIT_UNUSED_ARG(fsaveds); SLJIT_UNUSED_ARG(local_size); - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); return SLJIT_ERR_UNSUPPORTED; } -SLJIT_API_FUNC_ATTRIBUTE void sljit_set_context(struct sljit_compiler *compiler, sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *compiler, + sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds, + sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size) { SLJIT_UNUSED_ARG(compiler); - SLJIT_UNUSED_ARG(args); + SLJIT_UNUSED_ARG(options); + SLJIT_UNUSED_ARG(arg_types); SLJIT_UNUSED_ARG(scratches); SLJIT_UNUSED_ARG(saveds); + SLJIT_UNUSED_ARG(fscratches); + SLJIT_UNUSED_ARG(fsaveds); SLJIT_UNUSED_ARG(local_size); - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); + return SLJIT_ERR_UNSUPPORTED; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_return(struct sljit_compiler *compiler, sljit_si op, sljit_si src, sljit_sw srcw) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw) { SLJIT_UNUSED_ARG(compiler); SLJIT_UNUSED_ARG(op); SLJIT_UNUSED_ARG(src); SLJIT_UNUSED_ARG(srcw); - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); return SLJIT_ERR_UNSUPPORTED; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw) { SLJIT_UNUSED_ARG(compiler); SLJIT_UNUSED_ARG(dst); SLJIT_UNUSED_ARG(dstw); - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); return SLJIT_ERR_UNSUPPORTED; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_si src, sljit_sw srcw) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_s32 src, sljit_sw srcw) { SLJIT_UNUSED_ARG(compiler); SLJIT_UNUSED_ARG(src); SLJIT_UNUSED_ARG(srcw); - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); return SLJIT_ERR_UNSUPPORTED; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op0(struct sljit_compiler *compiler, sljit_si op) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compiler, sljit_s32 op) { SLJIT_UNUSED_ARG(compiler); SLJIT_UNUSED_ARG(op); - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); return SLJIT_ERR_UNSUPPORTED; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op1(struct sljit_compiler *compiler, sljit_si op, - sljit_si dst, sljit_sw dstw, - sljit_si src, sljit_sw srcw) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) { SLJIT_UNUSED_ARG(compiler); SLJIT_UNUSED_ARG(op); @@ -1588,14 +2408,14 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op1(struct sljit_compiler *compiler SLJIT_UNUSED_ARG(dstw); SLJIT_UNUSED_ARG(src); SLJIT_UNUSED_ARG(srcw); - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); return SLJIT_ERR_UNSUPPORTED; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op2(struct sljit_compiler *compiler, sljit_si op, - sljit_si dst, sljit_sw dstw, - sljit_si src1, sljit_sw src1w, - sljit_si src2, sljit_sw src2w) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) { SLJIT_UNUSED_ARG(compiler); SLJIT_UNUSED_ARG(op); @@ -1605,35 +2425,35 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op2(struct sljit_compiler *compiler SLJIT_UNUSED_ARG(src1w); SLJIT_UNUSED_ARG(src2); SLJIT_UNUSED_ARG(src2w); - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); return SLJIT_ERR_UNSUPPORTED; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_get_register_index(sljit_si reg) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg) { - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); return reg; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_custom(struct sljit_compiler *compiler, - void *instruction, sljit_si size) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler, + void *instruction, sljit_s32 size) { SLJIT_UNUSED_ARG(compiler); SLJIT_UNUSED_ARG(instruction); SLJIT_UNUSED_ARG(size); - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); return SLJIT_ERR_UNSUPPORTED; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_is_fpu_available(void) +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_current_flags(struct sljit_compiler *compiler, sljit_s32 current_flags) { - SLJIT_ASSERT_STOP(); - return 0; + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(current_flags); } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop1(struct sljit_compiler *compiler, sljit_si op, - sljit_si dst, sljit_sw dstw, - sljit_si src, sljit_sw srcw) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) { SLJIT_UNUSED_ARG(compiler); SLJIT_UNUSED_ARG(op); @@ -1641,14 +2461,14 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop1(struct sljit_compiler *compile SLJIT_UNUSED_ARG(dstw); SLJIT_UNUSED_ARG(src); SLJIT_UNUSED_ARG(srcw); - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); return SLJIT_ERR_UNSUPPORTED; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop2(struct sljit_compiler *compiler, sljit_si op, - sljit_si dst, sljit_sw dstw, - sljit_si src1, sljit_sw src1w, - sljit_si src2, sljit_sw src2w) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) { SLJIT_UNUSED_ARG(compiler); SLJIT_UNUSED_ARG(op); @@ -1658,28 +2478,38 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop2(struct sljit_compiler *compile SLJIT_UNUSED_ARG(src1w); SLJIT_UNUSED_ARG(src2); SLJIT_UNUSED_ARG(src2w); - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); return SLJIT_ERR_UNSUPPORTED; } SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compiler *compiler) { SLJIT_UNUSED_ARG(compiler); - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); return NULL; } -SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_si type) +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_s32 type) { SLJIT_UNUSED_ARG(compiler); SLJIT_UNUSED_ARG(type); - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); return NULL; } -SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler *compiler, sljit_si type, - sljit_si src1, sljit_sw src1w, - sljit_si src2, sljit_sw src2w) +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 arg_types) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(type); + SLJIT_UNUSED_ARG(arg_types); + SLJIT_UNREACHABLE(); + return NULL; +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) { SLJIT_UNUSED_ARG(compiler); SLJIT_UNUSED_ARG(type); @@ -1687,13 +2517,13 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler SLJIT_UNUSED_ARG(src1w); SLJIT_UNUSED_ARG(src2); SLJIT_UNUSED_ARG(src2w); - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); return NULL; } -SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_fcmp(struct sljit_compiler *compiler, sljit_si type, - sljit_si src1, sljit_sw src1w, - sljit_si src2, sljit_sw src2w) +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_fcmp(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) { SLJIT_UNUSED_ARG(compiler); SLJIT_UNUSED_ARG(type); @@ -1701,7 +2531,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_fcmp(struct sljit_compile SLJIT_UNUSED_ARG(src1w); SLJIT_UNUSED_ARG(src2); SLJIT_UNUSED_ARG(src2w); - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); return NULL; } @@ -1709,74 +2539,129 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_set_label(struct sljit_jump *jump, struct sl { SLJIT_UNUSED_ARG(jump); SLJIT_UNUSED_ARG(label); - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); } SLJIT_API_FUNC_ATTRIBUTE void sljit_set_target(struct sljit_jump *jump, sljit_uw target) { SLJIT_UNUSED_ARG(jump); SLJIT_UNUSED_ARG(target); - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_ijump(struct sljit_compiler *compiler, sljit_si type, sljit_si src, sljit_sw srcw) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 src, sljit_sw srcw) { SLJIT_UNUSED_ARG(compiler); SLJIT_UNUSED_ARG(type); SLJIT_UNUSED_ARG(src); SLJIT_UNUSED_ARG(srcw); - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); return SLJIT_ERR_UNSUPPORTED; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_si op, - sljit_si dst, sljit_sw dstw, - sljit_si src, sljit_sw srcw, - sljit_si type) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 arg_types, + sljit_s32 src, sljit_sw srcw) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(type); + SLJIT_UNUSED_ARG(arg_types); + SLJIT_UNUSED_ARG(src); + SLJIT_UNUSED_ARG(srcw); + SLJIT_UNREACHABLE(); + return SLJIT_ERR_UNSUPPORTED; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 type) { SLJIT_UNUSED_ARG(compiler); SLJIT_UNUSED_ARG(op); SLJIT_UNUSED_ARG(dst); SLJIT_UNUSED_ARG(dstw); + SLJIT_UNUSED_ARG(type); + SLJIT_UNREACHABLE(); + return SLJIT_ERR_UNSUPPORTED; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 dst_reg, + sljit_s32 src, sljit_sw srcw) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(type); + SLJIT_UNUSED_ARG(dst_reg); SLJIT_UNUSED_ARG(src); SLJIT_UNUSED_ARG(srcw); + SLJIT_UNREACHABLE(); + return SLJIT_ERR_UNSUPPORTED; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 reg, sljit_s32 mem, sljit_sw memw) +{ + SLJIT_UNUSED_ARG(compiler); SLJIT_UNUSED_ARG(type); - SLJIT_ASSERT_STOP(); + SLJIT_UNUSED_ARG(reg); + SLJIT_UNUSED_ARG(mem); + SLJIT_UNUSED_ARG(memw); + SLJIT_UNREACHABLE(); return SLJIT_ERR_UNSUPPORTED; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_get_local_base(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw, sljit_sw offset) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 freg, sljit_s32 mem, sljit_sw memw) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(type); + SLJIT_UNUSED_ARG(freg); + SLJIT_UNUSED_ARG(mem); + SLJIT_UNUSED_ARG(memw); + SLJIT_UNREACHABLE(); + return SLJIT_ERR_UNSUPPORTED; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_local_base(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw offset) { SLJIT_UNUSED_ARG(compiler); SLJIT_UNUSED_ARG(dst); SLJIT_UNUSED_ARG(dstw); SLJIT_UNUSED_ARG(offset); - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); return SLJIT_ERR_UNSUPPORTED; } -SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw, sljit_sw initval) +SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw initval) { SLJIT_UNUSED_ARG(compiler); SLJIT_UNUSED_ARG(dst); SLJIT_UNUSED_ARG(dstw); SLJIT_UNUSED_ARG(initval); - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); + return NULL; +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_put_label* sljit_emit_put_label(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(dst); + SLJIT_UNUSED_ARG(dstw); return NULL; } -SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_addr) +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_target, sljit_sw executable_offset) { SLJIT_UNUSED_ARG(addr); - SLJIT_UNUSED_ARG(new_addr); - SLJIT_ASSERT_STOP(); + SLJIT_UNUSED_ARG(new_target); + SLJIT_UNUSED_ARG(executable_offset); + SLJIT_UNREACHABLE(); } -SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant) +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset) { SLJIT_UNUSED_ARG(addr); SLJIT_UNUSED_ARG(new_constant); - SLJIT_ASSERT_STOP(); + SLJIT_UNUSED_ARG(executable_offset); + SLJIT_UNREACHABLE(); } -#endif +#endif /* !SLJIT_CONFIG_UNSUPPORTED */ diff --git a/src/3rd/pcre/sjlir.h b/src/3rd/pcre/sjlir.h index 4a154cfeb34..bf8640ada14 100644 --- a/src/3rd/pcre/sjlir.h +++ b/src/3rd/pcre/sjlir.h @@ -1,7 +1,7 @@ /* * Stack-less Just-In-Time compiler * - * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: @@ -56,8 +56,6 @@ Disadvantages: - No automatic register allocation, and temporary results are not stored on the stack. (hence the name comes) - - Limited number of registers (only 6+4 integer registers, max 3+2 - scratch, max 3+2 saved and 6 floating point registers) In practice: - This approach is very effective for interpreters - One of the saved registers typically points to a stack interface @@ -97,88 +95,247 @@ of sljitConfigInternal.h */ /* Cannot allocate executable memory. Only for sljit_generate_code() */ #define SLJIT_ERR_EX_ALLOC_FAILED 3 -/* return value for SLJIT_CONFIG_UNSUPPORTED empty architecture. */ +/* Return value for SLJIT_CONFIG_UNSUPPORTED placeholder architecture. */ #define SLJIT_ERR_UNSUPPORTED 4 +/* An ivalid argument is passed to any SLJIT function. */ +#define SLJIT_ERR_BAD_ARGUMENT 5 +/* Dynamic code modification is not enabled. */ +#define SLJIT_ERR_DYN_CODE_MOD 6 /* --------------------------------------------------------------------- */ /* Registers */ /* --------------------------------------------------------------------- */ +/* + Scratch (R) registers: registers whose may not preserve their values + across function calls. + + Saved (S) registers: registers whose preserve their values across + function calls. + + The scratch and saved register sets are overlap. The last scratch register + is the first saved register, the one before the last is the second saved + register, and so on. + + If an architecture provides two scratch and three saved registers, + its scratch and saved register sets are the following: + + R0 | | R0 is always a scratch register + R1 | | R1 is always a scratch register + [R2] | S2 | R2 and S2 represent the same physical register + [R3] | S1 | R3 and S1 represent the same physical register + [R4] | S0 | R4 and S0 represent the same physical register + + Note: SLJIT_NUMBER_OF_SCRATCH_REGISTERS would be 2 and + SLJIT_NUMBER_OF_SAVED_REGISTERS would be 3 for this architecture. + + Note: On all supported architectures SLJIT_NUMBER_OF_REGISTERS >= 12 + and SLJIT_NUMBER_OF_SAVED_REGISTERS >= 6. However, 6 registers + are virtual on x86-32. See below. + + The purpose of this definition is convenience: saved registers can + be used as extra scratch registers. For example four registers can + be specified as scratch registers and the fifth one as saved register + on the CPU above and any user code which requires four scratch + registers can run unmodified. The SLJIT compiler automatically saves + the content of the two extra scratch register on the stack. Scratch + registers can also be preserved by saving their value on the stack + but this needs to be done manually. + + Note: To emphasize that registers assigned to R2-R4 are saved + registers, they are enclosed by square brackets. + + Note: sljit_emit_enter and sljit_set_context defines whether a register + is S or R register. E.g: when 3 scratches and 1 saved is mapped + by sljit_emit_enter, the allowed register set will be: R0-R2 and + S0. Although S2 is mapped to the same position as R2, it does not + available in the current configuration. Furthermore the S1 register + is not available at all. +*/ + +/* When SLJIT_UNUSED is specified as the destination of sljit_emit_op1 + or sljit_emit_op2 operations the result is discarded. If no status + flags are set, no instructions are emitted for these operations. Data + prefetch is a special exception, see SLJIT_MOV operation. Other SLJIT + operations do not support SLJIT_UNUSED as a destination operand. */ #define SLJIT_UNUSED 0 -/* Scratch (temporary) registers whose may not preserve their values - across function calls. */ -#define SLJIT_SCRATCH_REG1 1 -#define SLJIT_SCRATCH_REG2 2 -#define SLJIT_SCRATCH_REG3 3 -/* Note: extra registers cannot be used for memory addressing. */ -/* Note: on x86-32, these registers are emulated (using stack - loads & stores). */ -#define SLJIT_TEMPORARY_EREG1 4 -#define SLJIT_TEMPORARY_EREG2 5 - -/* Saved registers whose preserve their values across function calls. */ -#define SLJIT_SAVED_REG1 6 -#define SLJIT_SAVED_REG2 7 -#define SLJIT_SAVED_REG3 8 -/* Note: extra registers cannot be used for memory addressing. */ -/* Note: on x86-32, these registers are emulated (using stack - loads & stores). */ -#define SLJIT_SAVED_EREG1 9 -#define SLJIT_SAVED_EREG2 10 - -/* Read-only register (cannot be the destination of an operation). - Only SLJIT_MEM1(SLJIT_LOCALS_REG) addressing mode is allowed since - several ABIs has certain limitations about the stack layout. However - sljit_get_local_base() can be used to obtain the offset of a value - on the stack. */ -#define SLJIT_LOCALS_REG 11 - -/* Number of registers. */ -#define SLJIT_NO_TMP_REGISTERS 5 -#define SLJIT_NO_GEN_REGISTERS 5 -#define SLJIT_NO_REGISTERS 11 +/* Scratch registers. */ +#define SLJIT_R0 1 +#define SLJIT_R1 2 +#define SLJIT_R2 3 +/* Note: on x86-32, R3 - R6 (same as S3 - S6) are emulated (they + are allocated on the stack). These registers are called virtual + and cannot be used for memory addressing (cannot be part of + any SLJIT_MEM1, SLJIT_MEM2 construct). There is no such + limitation on other CPUs. See sljit_get_register_index(). */ +#define SLJIT_R3 4 +#define SLJIT_R4 5 +#define SLJIT_R5 6 +#define SLJIT_R6 7 +#define SLJIT_R7 8 +#define SLJIT_R8 9 +#define SLJIT_R9 10 +/* All R registers provided by the architecture can be accessed by SLJIT_R(i) + The i parameter must be >= 0 and < SLJIT_NUMBER_OF_REGISTERS. */ +#define SLJIT_R(i) (1 + (i)) + +/* Saved registers. */ +#define SLJIT_S0 (SLJIT_NUMBER_OF_REGISTERS) +#define SLJIT_S1 (SLJIT_NUMBER_OF_REGISTERS - 1) +#define SLJIT_S2 (SLJIT_NUMBER_OF_REGISTERS - 2) +/* Note: on x86-32, S3 - S6 (same as R3 - R6) are emulated (they + are allocated on the stack). These registers are called virtual + and cannot be used for memory addressing (cannot be part of + any SLJIT_MEM1, SLJIT_MEM2 construct). There is no such + limitation on other CPUs. See sljit_get_register_index(). */ +#define SLJIT_S3 (SLJIT_NUMBER_OF_REGISTERS - 3) +#define SLJIT_S4 (SLJIT_NUMBER_OF_REGISTERS - 4) +#define SLJIT_S5 (SLJIT_NUMBER_OF_REGISTERS - 5) +#define SLJIT_S6 (SLJIT_NUMBER_OF_REGISTERS - 6) +#define SLJIT_S7 (SLJIT_NUMBER_OF_REGISTERS - 7) +#define SLJIT_S8 (SLJIT_NUMBER_OF_REGISTERS - 8) +#define SLJIT_S9 (SLJIT_NUMBER_OF_REGISTERS - 9) +/* All S registers provided by the architecture can be accessed by SLJIT_S(i) + The i parameter must be >= 0 and < SLJIT_NUMBER_OF_SAVED_REGISTERS. */ +#define SLJIT_S(i) (SLJIT_NUMBER_OF_REGISTERS - (i)) + +/* Registers >= SLJIT_FIRST_SAVED_REG are saved registers. */ +#define SLJIT_FIRST_SAVED_REG (SLJIT_S0 - SLJIT_NUMBER_OF_SAVED_REGISTERS + 1) + +/* The SLJIT_SP provides direct access to the linear stack space allocated by + sljit_emit_enter. It can only be used in the following form: SLJIT_MEM1(SLJIT_SP). + The immediate offset is extended by the relative stack offset automatically. + The sljit_get_local_base can be used to obtain the absolute offset. */ +#define SLJIT_SP (SLJIT_NUMBER_OF_REGISTERS + 1) /* Return with machine word. */ -#define SLJIT_RETURN_REG SLJIT_SCRATCH_REG1 - -/* x86 prefers specific registers for special purposes. In case of shift - by register it supports only SLJIT_SCRATCH_REG3 for shift argument - (which is the src2 argument of sljit_emit_op2). If another register is - used, sljit must exchange data between registers which cause a minor - slowdown. Other architectures has no such limitation. */ - -#define SLJIT_PREF_SHIFT_REG SLJIT_SCRATCH_REG3 +#define SLJIT_RETURN_REG SLJIT_R0 /* --------------------------------------------------------------------- */ /* Floating point registers */ /* --------------------------------------------------------------------- */ -/* Note: SLJIT_UNUSED as destination is not valid for floating point - operations, since they cannot be used for setting flags. */ +/* Each floating point register can store a 32 or a 64 bit precision + value. The FR and FS register sets are overlap in the same way as R + and S register sets. See above. */ -/* Floating point operations are performed on double or - single precision values. */ +/* Note: SLJIT_UNUSED as destination is not valid for floating point + operations, since they cannot be used for setting flags. */ + +/* Floating point scratch registers. */ +#define SLJIT_FR0 1 +#define SLJIT_FR1 2 +#define SLJIT_FR2 3 +#define SLJIT_FR3 4 +#define SLJIT_FR4 5 +#define SLJIT_FR5 6 +/* All FR registers provided by the architecture can be accessed by SLJIT_FR(i) + The i parameter must be >= 0 and < SLJIT_NUMBER_OF_FLOAT_REGISTERS. */ +#define SLJIT_FR(i) (1 + (i)) + +/* Floating point saved registers. */ +#define SLJIT_FS0 (SLJIT_NUMBER_OF_FLOAT_REGISTERS) +#define SLJIT_FS1 (SLJIT_NUMBER_OF_FLOAT_REGISTERS - 1) +#define SLJIT_FS2 (SLJIT_NUMBER_OF_FLOAT_REGISTERS - 2) +#define SLJIT_FS3 (SLJIT_NUMBER_OF_FLOAT_REGISTERS - 3) +#define SLJIT_FS4 (SLJIT_NUMBER_OF_FLOAT_REGISTERS - 4) +#define SLJIT_FS5 (SLJIT_NUMBER_OF_FLOAT_REGISTERS - 5) +/* All S registers provided by the architecture can be accessed by SLJIT_FS(i) + The i parameter must be >= 0 and < SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS. */ +#define SLJIT_FS(i) (SLJIT_NUMBER_OF_FLOAT_REGISTERS - (i)) + +/* Float registers >= SLJIT_FIRST_SAVED_FLOAT_REG are saved registers. */ +#define SLJIT_FIRST_SAVED_FLOAT_REG (SLJIT_FS0 - SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS + 1) -#define SLJIT_FLOAT_REG1 1 -#define SLJIT_FLOAT_REG2 2 -#define SLJIT_FLOAT_REG3 3 -#define SLJIT_FLOAT_REG4 4 -#define SLJIT_FLOAT_REG5 5 -#define SLJIT_FLOAT_REG6 6 +/* --------------------------------------------------------------------- */ +/* Argument type definitions */ +/* --------------------------------------------------------------------- */ -#define SLJIT_NO_FLOAT_REGISTERS 6 +/* Argument type definitions. + Used by SLJIT_[DEF_]ARGx and SLJIT_[DEF]_RET macros. */ + +#define SLJIT_ARG_TYPE_VOID 0 +#define SLJIT_ARG_TYPE_SW 1 +#define SLJIT_ARG_TYPE_UW 2 +#define SLJIT_ARG_TYPE_S32 3 +#define SLJIT_ARG_TYPE_U32 4 +#define SLJIT_ARG_TYPE_F32 5 +#define SLJIT_ARG_TYPE_F64 6 + +/* The following argument type definitions are used by sljit_emit_enter, + sljit_set_context, sljit_emit_call, and sljit_emit_icall functions. + The following return type definitions are used by sljit_emit_call + and sljit_emit_icall functions. + + When a function is called, the first integer argument must be placed + in SLJIT_R0, the second in SLJIT_R1, and so on. Similarly the first + floating point argument must be placed in SLJIT_FR0, the second in + SLJIT_FR1, and so on. + + Example function definition: + sljit_f32 SLJIT_FUNC example_c_callback(sljit_sw arg_a, + sljit_f64 arg_b, sljit_u32 arg_c, sljit_f32 arg_d); + + Argument type definition: + SLJIT_DEF_RET(SLJIT_ARG_TYPE_F32) + | SLJIT_DEF_ARG1(SLJIT_ARG_TYPE_SW) | SLJIT_DEF_ARG2(SLJIT_ARG_TYPE_F64) + | SLJIT_DEF_ARG3(SLJIT_ARG_TYPE_U32) | SLJIT_DEF_ARG2(SLJIT_ARG_TYPE_F32) + + Short form of argument type definition: + SLJIT_RET(F32) | SLJIT_ARG1(SW) | SLJIT_ARG2(F64) + | SLJIT_ARG3(S32) | SLJIT_ARG4(F32) + + Argument passing: + arg_a must be placed in SLJIT_R0 + arg_c must be placed in SLJIT_R1 + arg_b must be placed in SLJIT_FR0 + arg_d must be placed in SLJIT_FR1 + +Note: + The SLJIT_ARG_TYPE_VOID type is only supported by + SLJIT_DEF_RET, and SLJIT_ARG_TYPE_VOID is also the + default value when SLJIT_DEF_RET is not specified. */ +#define SLJIT_DEF_SHIFT 4 +#define SLJIT_DEF_RET(type) (type) +#define SLJIT_DEF_ARG1(type) ((type) << SLJIT_DEF_SHIFT) +#define SLJIT_DEF_ARG2(type) ((type) << (2 * SLJIT_DEF_SHIFT)) +#define SLJIT_DEF_ARG3(type) ((type) << (3 * SLJIT_DEF_SHIFT)) +#define SLJIT_DEF_ARG4(type) ((type) << (4 * SLJIT_DEF_SHIFT)) + +/* Short form of the macros above. + + For example the following definition: + SLJIT_DEF_RET(SLJIT_ARG_TYPE_SW) | SLJIT_DEF_ARG1(SLJIT_ARG_TYPE_F32) + + can be shortened to: + SLJIT_RET(SW) | SLJIT_ARG1(F32) + +Note: + The VOID type is only supported by SLJIT_RET, and + VOID is also the default value when SLJIT_RET is + not specified. */ +#define SLJIT_RET(type) SLJIT_DEF_RET(SLJIT_ARG_TYPE_ ## type) +#define SLJIT_ARG1(type) SLJIT_DEF_ARG1(SLJIT_ARG_TYPE_ ## type) +#define SLJIT_ARG2(type) SLJIT_DEF_ARG2(SLJIT_ARG_TYPE_ ## type) +#define SLJIT_ARG3(type) SLJIT_DEF_ARG3(SLJIT_ARG_TYPE_ ## type) +#define SLJIT_ARG4(type) SLJIT_DEF_ARG4(SLJIT_ARG_TYPE_ ## type) /* --------------------------------------------------------------------- */ /* Main structures and functions */ /* --------------------------------------------------------------------- */ +/* + The following structures are private, and can be changed in the + future. Keeping them here allows code inlining. +*/ + struct sljit_memory_fragment { struct sljit_memory_fragment *next; sljit_uw used_size; /* Must be aligned to sljit_sw. */ - sljit_ub memory[1]; + sljit_u8 memory[1]; }; struct sljit_label { @@ -191,61 +348,77 @@ struct sljit_label { struct sljit_jump { struct sljit_jump *next; sljit_uw addr; - sljit_sw flags; + sljit_uw flags; union { sljit_uw target; - struct sljit_label* label; + struct sljit_label *label; } u; }; +struct sljit_put_label { + struct sljit_put_label *next; + struct sljit_label *label; + sljit_uw addr; + sljit_uw flags; +}; + struct sljit_const { struct sljit_const *next; sljit_uw addr; }; struct sljit_compiler { - sljit_si error; + sljit_s32 error; + sljit_s32 options; struct sljit_label *labels; struct sljit_jump *jumps; + struct sljit_put_label *put_labels; struct sljit_const *consts; struct sljit_label *last_label; struct sljit_jump *last_jump; struct sljit_const *last_const; + struct sljit_put_label *last_put_label; + void *allocator_data; struct sljit_memory_fragment *buf; struct sljit_memory_fragment *abuf; - /* Used local registers. */ - sljit_si scratches; + /* Used scratch registers. */ + sljit_s32 scratches; /* Used saved registers. */ - sljit_si saveds; + sljit_s32 saveds; + /* Used float scratch registers. */ + sljit_s32 fscratches; + /* Used float saved registers. */ + sljit_s32 fsaveds; /* Local stack size. */ - sljit_si local_size; + sljit_s32 local_size; /* Code size. */ sljit_uw size; - /* For statistical purposes. */ + /* Relative offset of the executable mapping from the writable mapping. */ + sljit_uw executable_offset; + /* Executable size for statistical purposes. */ sljit_uw executable_size; #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - sljit_si args; - sljit_si locals_offset; - sljit_si scratches_start; - sljit_si saveds_start; + sljit_s32 args; + sljit_s32 locals_offset; + sljit_s32 saveds_offset; + sljit_s32 stack_tmp_size; #endif #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) - sljit_si mode32; + sljit_s32 mode32; +#ifdef _WIN64 + sljit_s32 locals_offset; #endif - -#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) || (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) - sljit_si flags_saved; #endif #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) /* Constant pool handling. */ sljit_uw *cpool; - sljit_ub *cpool_unique; + sljit_u8 *cpool_unique; sljit_uw cpool_diff; sljit_uw cpool_fill; /* Other members. */ @@ -256,35 +429,26 @@ struct sljit_compiler { #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) || (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) /* Temporary fields. */ sljit_uw shift_imm; - sljit_si cache_arg; - sljit_sw cache_argw; #endif -#if (defined SLJIT_CONFIG_ARM_THUMB2 && SLJIT_CONFIG_ARM_THUMB2) - sljit_si cache_arg; - sljit_sw cache_argw; -#endif - -#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) || (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) +#if (defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC) sljit_sw imm; - sljit_si cache_arg; - sljit_sw cache_argw; #endif -#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) - sljit_si delay_slot; - sljit_si cache_arg; +#if (defined SLJIT_CONFIG_MIPS && SLJIT_CONFIG_MIPS) + sljit_s32 delay_slot; + sljit_s32 cache_arg; sljit_sw cache_argw; #endif #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) - sljit_si delay_slot; - sljit_si cache_arg; + sljit_s32 delay_slot; + sljit_s32 cache_arg; sljit_sw cache_argw; #endif #if (defined SLJIT_CONFIG_TILEGX && SLJIT_CONFIG_TILEGX) - sljit_si cache_arg; + sljit_s32 cache_arg; sljit_sw cache_argw; #endif @@ -292,13 +456,20 @@ struct sljit_compiler { FILE* verbose; #endif -#if (defined SLJIT_DEBUG && SLJIT_DEBUG) +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) \ + || (defined SLJIT_DEBUG && SLJIT_DEBUG) + /* Flags specified by the last arithmetic instruction. + It contains the type of the variable flag. */ + sljit_s32 last_flags; /* Local size passed to the functions. */ - sljit_si logical_local_size; + sljit_s32 logical_local_size; #endif -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) - sljit_si skip_checks; +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) \ + || (defined SLJIT_DEBUG && SLJIT_DEBUG) \ + || (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + /* Trust arguments when the API function is called. */ + sljit_s32 skip_checks; #endif }; @@ -306,11 +477,16 @@ struct sljit_compiler { /* Main functions */ /* --------------------------------------------------------------------- */ -/* Creates an sljit compiler. +/* Creates an sljit compiler. The allocator_data is required by some + custom memory managers. This pointer is passed to SLJIT_MALLOC + and SLJIT_FREE macros. Most allocators (including the default + one) ignores this value, and it is recommended to pass NULL + as a dummy value for allocator_data. + Returns NULL if failed. */ -SLJIT_API_FUNC_ATTRIBUTE struct sljit_compiler* sljit_create_compiler(void); +SLJIT_API_FUNC_ATTRIBUTE struct sljit_compiler* sljit_create_compiler(void *allocator_data); -/* Free everything except the compiled machine code. */ +/* Frees everything except the compiled machine code. */ SLJIT_API_FUNC_ATTRIBUTE void sljit_free_compiler(struct sljit_compiler *compiler); /* Returns the current error code. If an error is occurred, future sljit @@ -318,7 +494,15 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_free_compiler(struct sljit_compiler *compile error code. Thus there is no need for checking the error after every call, it is enough to do it before the code is compiled. Removing these checks increases the performance of the compiling process. */ -static SLJIT_INLINE sljit_si sljit_get_compiler_error(struct sljit_compiler *compiler) { return compiler->error; } +static SLJIT_INLINE sljit_s32 sljit_get_compiler_error(struct sljit_compiler *compiler) { return compiler->error; } + +/* Sets the compiler error code to SLJIT_ERR_ALLOC_FAILED except + if an error was detected before. After the error code is set + the compiler behaves as if the allocation failure happened + during an sljit function call. This can greatly simplify error + checking, since only the compiler status needs to be checked + after the compilation. */ +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_compiler_memory_error(struct sljit_compiler *compiler); /* Allocate a small amount of memory. The size must be <= 64 bytes on 32 bit, @@ -331,70 +515,138 @@ static SLJIT_INLINE sljit_si sljit_get_compiler_error(struct sljit_compiler *com indicate that there is no more memory (does not set the current error code of the compiler to out-of-memory status). */ -SLJIT_API_FUNC_ATTRIBUTE void* sljit_alloc_memory(struct sljit_compiler *compiler, sljit_si size); +SLJIT_API_FUNC_ATTRIBUTE void* sljit_alloc_memory(struct sljit_compiler *compiler, sljit_s32 size); #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) /* Passing NULL disables verbose. */ SLJIT_API_FUNC_ATTRIBUTE void sljit_compiler_verbose(struct sljit_compiler *compiler, FILE* verbose); #endif +/* + Create executable code from the sljit instruction stream. This is the final step + of the code generation so no more instructions can be added after this call. +*/ + SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compiler); + +/* Free executable code. */ + SLJIT_API_FUNC_ATTRIBUTE void sljit_free_code(void* code); /* - After the machine code generation is finished we can retrieve the allocated - executable memory size, although this area may not be fully filled with - instructions depending on some optimizations. This function is useful only - for statistical purposes. + When the protected executable allocator is used the JIT code is mapped + twice. The first mapping has read/write and the second mapping has read/exec + permissions. This function returns with the relative offset of the executable + mapping using the writable mapping as the base after the machine code is + successfully generated. The returned value is always 0 for the normal executable + allocator, since it uses only one mapping with read/write/exec permissions. + Dynamic code modifications requires this value. + + Before a successful code generation, this function returns with 0. +*/ +static SLJIT_INLINE sljit_sw sljit_get_executable_offset(struct sljit_compiler *compiler) { return compiler->executable_offset; } + +/* + The executable memory consumption of the generated code can be retrieved by + this function. The returned value can be used for statistical purposes. Before a successful code generation, this function returns with 0. */ static SLJIT_INLINE sljit_uw sljit_get_generated_code_size(struct sljit_compiler *compiler) { return compiler->executable_size; } +/* Returns with non-zero if the feature or limitation type passed as its + argument is present on the current CPU. + + Some features (e.g. floating point operations) require hardware (CPU) + support while others (e.g. move with update) are emulated if not available. + However even if a feature is emulated, specialized code paths can be faster + than the emulation. Some limitations are emulated as well so their general + case is supported but it has extra performance costs. */ + +/* [Not emulated] Floating-point support is available. */ +#define SLJIT_HAS_FPU 0 +/* [Limitation] Some registers are virtual registers. */ +#define SLJIT_HAS_VIRTUAL_REGISTERS 1 +/* [Emulated] Count leading zero is supported. */ +#define SLJIT_HAS_CLZ 2 +/* [Emulated] Conditional move is supported. */ +#define SLJIT_HAS_CMOV 3 + +#if (defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86) +/* [Not emulated] SSE2 support is available on x86. */ +#define SLJIT_HAS_SSE2 100 +#endif + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type); + /* Instruction generation. Returns with any error code. If there is no error, they return with SLJIT_SUCCESS. */ /* - The executable code is basically a function call from the viewpoint of - the C language. The function calls must obey to the ABI (Application - Binary Interface) of the platform, which specify the purpose of machine - registers and stack handling among other things. The sljit_emit_enter - function emits the necessary instructions for setting up a new context - for the executable code and moves function arguments to the saved - registers. The number of arguments are specified in the "args" - parameter and the first argument goes to SLJIT_SAVED_REG1, the second - goes to SLJIT_SAVED_REG2 and so on. The number of scratch and - saved registers are passed in "scratches" and "saveds" arguments - respectively. Since the saved registers contains the arguments, - "args" must be less or equal than "saveds". The sljit_emit_enter - is also capable of allocating a stack space for local variables. The - "local_size" argument contains the size in bytes of this local area - and its staring address is stored in SLJIT_LOCALS_REG. However - the SLJIT_LOCALS_REG is not necessary the machine stack pointer. - The memory bytes between SLJIT_LOCALS_REG (inclusive) and - SLJIT_LOCALS_REG + local_size (exclusive) can be modified freely - until the function returns. The stack space is uninitialized. + The executable code is a function from the viewpoint of the C + language. The function calls must obey to the ABI (Application + Binary Interface) of the platform, which specify the purpose of + machine registers and stack handling among other things. The + sljit_emit_enter function emits the necessary instructions for + setting up a new context for the executable code and moves function + arguments to the saved registers. Furthermore the options argument + can be used to pass configuration options to the compiler. The + available options are listed before sljit_emit_enter. + + The function argument list is the combination of SLJIT_ARGx + (SLJIT_DEF_ARG1) macros. Currently maximum 3 SW / UW + (SLJIT_ARG_TYPE_SW / LJIT_ARG_TYPE_UW) arguments are supported. + The first argument goes to SLJIT_S0, the second goes to SLJIT_S1 + and so on. The register set used by the function must be declared + as well. The number of scratch and saved registers used by the + function must be passed to sljit_emit_enter. Only R registers + between R0 and "scratches" argument can be used later. E.g. if + "scratches" is set to 2, the scratch register set will be limited + to SLJIT_R0 and SLJIT_R1. The S registers and the floating point + registers ("fscratches" and "fsaveds") are specified in a similar + manner. The sljit_emit_enter is also capable of allocating a stack + space for local variables. The "local_size" argument contains the + size in bytes of this local area and its staring address is stored + in SLJIT_SP. The memory area between SLJIT_SP (inclusive) and + SLJIT_SP + local_size (exclusive) can be modified freely until + the function returns. The stack space is not initialized. + + Note: the following conditions must met: + 0 <= scratches <= SLJIT_NUMBER_OF_REGISTERS + 0 <= saveds <= SLJIT_NUMBER_OF_REGISTERS + scratches + saveds <= SLJIT_NUMBER_OF_REGISTERS + 0 <= fscratches <= SLJIT_NUMBER_OF_FLOAT_REGISTERS + 0 <= fsaveds <= SLJIT_NUMBER_OF_FLOAT_REGISTERS + fscratches + fsaveds <= SLJIT_NUMBER_OF_FLOAT_REGISTERS Note: every call of sljit_emit_enter and sljit_set_context - overwrites the previous context. */ + overwrites the previous context. +*/ +/* The absolute address returned by sljit_get_local_base with +offset 0 is aligned to sljit_f64. Otherwise it is aligned to sljit_sw. */ +#define SLJIT_F64_ALIGNMENT 0x00000001 + +/* The local_size must be >= 0 and <= SLJIT_MAX_LOCAL_SIZE. */ #define SLJIT_MAX_LOCAL_SIZE 65536 -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_enter(struct sljit_compiler *compiler, - sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size); +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler, + sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds, + sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size); /* The machine code has a context (which contains the local stack space size, number of used registers, etc.) which initialized by sljit_emit_enter. Several functions (like sljit_emit_return) requres this context to be able to generate the appropriate code. However, some code fragments (like inline cache) may have - no normal entry point so their context is unknown for the compiler. Using the - function below we can specify their context. + no normal entry point so their context is unknown for the compiler. Their context + can be provided to the compiler by the sljit_set_context function. Note: every call of sljit_emit_enter and sljit_set_context overwrites the previous context. */ -SLJIT_API_FUNC_ATTRIBUTE void sljit_set_context(struct sljit_compiler *compiler, - sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size); +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *compiler, + sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds, + sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size); /* Return from machine code. The op argument can be SLJIT_UNUSED which means the function does not return with anything or any opcode between SLJIT_MOV and @@ -402,29 +654,34 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_set_context(struct sljit_compiler *compiler, is SLJIT_UNUSED, otherwise see below the description about source and destination arguments. */ -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_return(struct sljit_compiler *compiler, sljit_si op, - sljit_si src, sljit_sw srcw); +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 src, sljit_sw srcw); -/* Fast calling mechanism for utility functions (see SLJIT_FAST_CALL). All registers and - even the stack frame is passed to the callee. The return address is preserved in - dst/dstw by sljit_emit_fast_enter (the type of the value stored by this function - is sljit_p), and sljit_emit_fast_return can use this as a return value later. */ +/* Generating entry and exit points for fast call functions (see SLJIT_FAST_CALL). + Both sljit_emit_fast_enter and sljit_emit_fast_return functions preserve the + values of all registers and stack frame. The return address is stored in the + dst argument of sljit_emit_fast_enter, and this return address can be passed + to sljit_emit_fast_return to continue the execution after the fast call. -/* Note: only for sljit specific, non ABI compilant calls. Fast, since only a few machine - instructions are needed. Excellent for small uility functions, where saving registers - and setting up a new stack frame would cost too much performance. However, it is still - possible to return to the address of the caller (or anywhere else). */ + Fast calls are cheap operations (usually only a single call instruction is + emitted) but they do not preserve any registers. However the callee function + can freely use / update any registers and stack values which can be + efficiently exploited by various optimizations. Registers can be saved + manually by the callee function if needed. -/* Note: flags are not changed (unlike sljit_emit_enter / sljit_emit_return). */ + Although returning to different address by sljit_emit_fast_return is possible, + this address usually cannot be predicted by the return address predictor of + modern CPUs which may reduce performance. Furthermore using sljit_emit_ijump + to return is also inefficient since return address prediction is usually + triggered by a specific form of ijump. -/* Note: although sljit_emit_fast_return could be replaced by an ijump, it is not suggested, - since many architectures do clever branch prediction on call / return instruction pairs. */ + Flags: - (does not modify flags). */ -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw); -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_si src, sljit_sw srcw); +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw); +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_s32 src, sljit_sw srcw); /* - Source and destination values for arithmetical instructions + Source and destination operands for arithmetical instructions imm - a simple immediate value (cannot be used as a destination) reg - any of the registers (immediate argument must be 0) [imm] - absolute immediate memory address @@ -465,6 +722,9 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_return(struct sljit_compiler * arm-t2: [reg+imm], -255 <= imm <= 4095 [reg+(reg<addr; } static SLJIT_INLINE sljit_uw sljit_get_const_addr(struct sljit_const *const_) { return const_->addr; } -/* Only the address is required to rewrite the code. */ -SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_addr); -SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant); +/* Only the address and executable offset are required to perform dynamic + code modifications. See sljit_get_executable_offset function. */ +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_target, sljit_sw executable_offset); +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset); /* --------------------------------------------------------------------- */ /* Miscellaneous utility functions */ /* --------------------------------------------------------------------- */ #define SLJIT_MAJOR_VERSION 0 -#define SLJIT_MINOR_VERSION 91 +#define SLJIT_MINOR_VERSION 94 /* Get the human readable name of the platform. Can be useful on platforms like ARM, where ARM and Thumb2 functions can be mixed, and it is useful to know the type of the code generator. */ -SLJIT_API_FUNC_ATTRIBUTE SLJIT_CONST char* sljit_get_platform_name(void); +SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void); /* Portable helper function to get an offset of a member. */ #define SLJIT_OFFSETOF(base, member) ((sljit_sw)(&((base*)0x10)->member) - 0x10) #if (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK) /* This global lock is useful to compile common functions. */ -SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void); -SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void); +SLJIT_API_FUNC_ATTRIBUTE void SLJIT_FUNC sljit_grab_lock(void); +SLJIT_API_FUNC_ATTRIBUTE void SLJIT_FUNC sljit_release_lock(void); #endif #if (defined SLJIT_UTIL_STACK && SLJIT_UTIL_STACK) -/* The sljit_stack is a utiliy feature of sljit, which allocates a - writable memory region between base (inclusive) and limit (exclusive). - Both base and limit is a pointer, and base is always <= than limit. - This feature uses the "address space reserve" feature - of modern operating systems. Basically we don't need to allocate a - huge memory block in one step for the worst case, we can start with - a smaller chunk and extend it later. Since the address space is - reserved, the data never copied to other regions, thus it is safe - to store pointers here. */ - -/* Note: The base field is aligned to PAGE_SIZE bytes (usually 4k or more). - Note: stack growing should not happen in small steps: 4k, 16k or even - bigger growth is better. - Note: this structure may not be supported by all operating systems. - Some kind of fallback mechanism is suggested when SLJIT_UTIL_STACK - is not defined. */ +/* The sljit_stack structure and its manipulation functions provides + an implementation for a top-down stack. The stack top is stored + in the end field of the sljit_stack structure and the stack goes + down to the min_start field, so the memory region reserved for + this stack is between min_start (inclusive) and end (exclusive) + fields. However the application can only use the region between + start (inclusive) and end (exclusive) fields. The sljit_stack_resize + function can be used to extend this region up to min_start. + + This feature uses the "address space reserve" feature of modern + operating systems. Instead of allocating a large memory block + applications can allocate a small memory region and extend it + later without moving the content of the memory area. Therefore + after a successful resize by sljit_stack_resize all pointers into + this region are still valid. + + Note: + this structure may not be supported by all operating systems. + end and max_limit fields are aligned to PAGE_SIZE bytes (usually + 4 Kbyte or more). + stack should grow in larger steps, e.g. 4Kbyte, 16Kbyte or more. */ struct sljit_stack { /* User data, anything can be stored here. - Starting with the same value as base. */ - sljit_uw top; - /* These members are read only. */ - sljit_uw base; - sljit_uw limit; - sljit_uw max_limit; + Initialized to the same value as the end field. */ + sljit_u8 *top; +/* These members are read only. */ + /* End address of the stack */ + sljit_u8 *end; + /* Current start address of the stack. */ + sljit_u8 *start; + /* Lowest start address of the stack. */ + sljit_u8 *min_start; }; -/* Returns NULL if unsuccessful. - Note: limit and max_limit contains the size for stack allocation - Note: the top field is initialized to base. */ -SLJIT_API_FUNC_ATTRIBUTE struct sljit_stack* SLJIT_CALL sljit_allocate_stack(sljit_uw limit, sljit_uw max_limit); -SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_free_stack(struct sljit_stack* stack); +/* Allocates a new stack. Returns NULL if unsuccessful. + Note: see sljit_create_compiler for the explanation of allocator_data. */ +SLJIT_API_FUNC_ATTRIBUTE struct sljit_stack* SLJIT_FUNC sljit_allocate_stack(sljit_uw start_size, sljit_uw max_size, void *allocator_data); +SLJIT_API_FUNC_ATTRIBUTE void SLJIT_FUNC sljit_free_stack(struct sljit_stack *stack, void *allocator_data); -/* Can be used to increase (allocate) or decrease (free) the memory area. - Returns with a non-zero value if unsuccessful. If new_limit is greater than - max_limit, it will fail. It is very easy to implement a stack data structure, - since the growth ratio can be added to the current limit, and sljit_stack_resize - will do all the necessary checks. The fields of the stack are not changed if - sljit_stack_resize fails. */ -SLJIT_API_FUNC_ATTRIBUTE sljit_sw SLJIT_CALL sljit_stack_resize(struct sljit_stack* stack, sljit_uw new_limit); +/* Can be used to increase (extend) or decrease (shrink) the stack + memory area. Returns with new_start if successful and NULL otherwise. + It always fails if new_start is less than min_start or greater or equal + than end fields. The fields of the stack are not changed if the returned + value is NULL (the current memory content is never lost). */ +SLJIT_API_FUNC_ATTRIBUTE sljit_u8 *SLJIT_FUNC sljit_stack_resize(struct sljit_stack *stack, sljit_u8 *new_start); #endif /* (defined SLJIT_UTIL_STACK && SLJIT_UTIL_STACK) */ @@ -993,4 +1443,51 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_set_function_context(void** func_ptr, struct #endif /* !(defined SLJIT_INDIRECT_CALL && SLJIT_INDIRECT_CALL) */ +#if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR) +/* Free unused executable memory. The allocator keeps some free memory + around to reduce the number of OS executable memory allocations. + This improves performance since these calls are costly. However + it is sometimes desired to free all unused memory regions, e.g. + before the application terminates. */ +SLJIT_API_FUNC_ATTRIBUTE void sljit_free_unused_memory_exec(void); +#endif + +/* --------------------------------------------------------------------- */ +/* CPU specific functions */ +/* --------------------------------------------------------------------- */ + +/* The following function is a helper function for sljit_emit_op_custom. + It returns with the real machine register index ( >=0 ) of any SLJIT_R, + SLJIT_S and SLJIT_SP registers. + + Note: it returns with -1 for virtual registers (only on x86-32). */ + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg); + +/* The following function is a helper function for sljit_emit_op_custom. + It returns with the real machine register index of any SLJIT_FLOAT register. + + Note: the index is always an even number on ARM (except ARM-64), MIPS, and SPARC. */ + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg); + +/* Any instruction can be inserted into the instruction stream by + sljit_emit_op_custom. It has a similar purpose as inline assembly. + The size parameter must match to the instruction size of the target + architecture: + + x86: 0 < size <= 15. The instruction argument can be byte aligned. + Thumb2: if size == 2, the instruction argument must be 2 byte aligned. + if size == 4, the instruction argument must be 4 byte aligned. + Otherwise: size must be 4 and instruction argument must be 4 byte aligned. */ + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler, + void *instruction, sljit_s32 size); + +/* Define the currently available CPU status flags. It is usually used after an + sljit_emit_op_custom call to define which flags are set. */ + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_current_flags(struct sljit_compiler *compiler, + sljit_s32 current_flags); + #endif /* _SLJIT_LIR_H_ */ diff --git a/src/3rd/pcre/sjmips32.c b/src/3rd/pcre/sjmips32.c index f8c214863d1..16dec052fe7 100644 --- a/src/3rd/pcre/sjmips32.c +++ b/src/3rd/pcre/sjmips32.c @@ -1,7 +1,7 @@ /* * Stack-less Just-In-Time compiler * - * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: @@ -26,7 +26,7 @@ /* mips 32-bit arch dependent functions. */ -static sljit_si load_immediate(struct sljit_compiler *compiler, sljit_si dst_ar, sljit_sw imm) +static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 dst_ar, sljit_sw imm) { if (!(imm & ~0xffff)) return push_inst(compiler, ORI | SA(0) | TA(dst_ar) | IMM(imm), dst_ar); @@ -40,53 +40,53 @@ static sljit_si load_immediate(struct sljit_compiler *compiler, sljit_si dst_ar, #define EMIT_LOGICAL(op_imm, op_norm) \ if (flags & SRC2_IMM) { \ - if (op & SLJIT_SET_E) \ + if (op & SLJIT_SET_Z) \ FAIL_IF(push_inst(compiler, op_imm | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG)); \ - if (CHECK_FLAGS(SLJIT_SET_E)) \ + if (!(flags & UNUSED_DEST)) \ FAIL_IF(push_inst(compiler, op_imm | S(src1) | T(dst) | IMM(src2), DR(dst))); \ } \ else { \ - if (op & SLJIT_SET_E) \ + if (op & SLJIT_SET_Z) \ FAIL_IF(push_inst(compiler, op_norm | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); \ - if (CHECK_FLAGS(SLJIT_SET_E)) \ + if (!(flags & UNUSED_DEST)) \ FAIL_IF(push_inst(compiler, op_norm | S(src1) | T(src2) | D(dst), DR(dst))); \ } -#define EMIT_SHIFT(op_imm, op_norm) \ +#define EMIT_SHIFT(op_imm, op_v) \ if (flags & SRC2_IMM) { \ - if (op & SLJIT_SET_E) \ + if (op & SLJIT_SET_Z) \ FAIL_IF(push_inst(compiler, op_imm | T(src1) | DA(EQUAL_FLAG) | SH_IMM(src2), EQUAL_FLAG)); \ - if (CHECK_FLAGS(SLJIT_SET_E)) \ + if (!(flags & UNUSED_DEST)) \ FAIL_IF(push_inst(compiler, op_imm | T(src1) | D(dst) | SH_IMM(src2), DR(dst))); \ } \ else { \ - if (op & SLJIT_SET_E) \ - FAIL_IF(push_inst(compiler, op_norm | S(src2) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG)); \ - if (CHECK_FLAGS(SLJIT_SET_E)) \ - FAIL_IF(push_inst(compiler, op_norm | S(src2) | T(src1) | D(dst), DR(dst))); \ + if (op & SLJIT_SET_Z) \ + FAIL_IF(push_inst(compiler, op_v | S(src2) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG)); \ + if (!(flags & UNUSED_DEST)) \ + FAIL_IF(push_inst(compiler, op_v | S(src2) | T(src1) | D(dst), DR(dst))); \ } -static SLJIT_INLINE sljit_si emit_single_op(struct sljit_compiler *compiler, sljit_si op, sljit_si flags, - sljit_si dst, sljit_si src1, sljit_sw src2) +static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 flags, + sljit_s32 dst, sljit_s32 src1, sljit_sw src2) { - sljit_si overflow_ra = 0; + sljit_s32 is_overflow, is_carry, is_handled; switch (GET_OPCODE(op)) { case SLJIT_MOV: - case SLJIT_MOV_UI: - case SLJIT_MOV_SI: + case SLJIT_MOV_U32: + case SLJIT_MOV_S32: case SLJIT_MOV_P: SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); if (dst != src2) return push_inst(compiler, ADDU | S(src2) | TA(0) | D(dst), DR(dst)); return SLJIT_SUCCESS; - case SLJIT_MOV_UB: - case SLJIT_MOV_SB: + case SLJIT_MOV_U8: + case SLJIT_MOV_S8: SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { - if (op == SLJIT_MOV_SB) { -#if (defined SLJIT_MIPS_32_64 && SLJIT_MIPS_32_64) + if (op == SLJIT_MOV_S8) { +#if (defined SLJIT_MIPS_R1 && SLJIT_MIPS_R1) return push_inst(compiler, SEB | T(src2) | D(dst), DR(dst)); #else FAIL_IF(push_inst(compiler, SLL | T(src2) | D(dst) | SH_IMM(24), DR(dst))); @@ -95,16 +95,17 @@ static SLJIT_INLINE sljit_si emit_single_op(struct sljit_compiler *compiler, slj } return push_inst(compiler, ANDI | S(src2) | T(dst) | IMM(0xff), DR(dst)); } - else if (dst != src2) - SLJIT_ASSERT_STOP(); + else { + SLJIT_ASSERT(dst == src2); + } return SLJIT_SUCCESS; - case SLJIT_MOV_UH: - case SLJIT_MOV_SH: + case SLJIT_MOV_U16: + case SLJIT_MOV_S16: SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { - if (op == SLJIT_MOV_SH) { -#if (defined SLJIT_MIPS_32_64 && SLJIT_MIPS_32_64) + if (op == SLJIT_MOV_S16) { +#if (defined SLJIT_MIPS_R1 && SLJIT_MIPS_R1) return push_inst(compiler, SEH | T(src2) | D(dst), DR(dst)); #else FAIL_IF(push_inst(compiler, SLL | T(src2) | D(dst) | SH_IMM(16), DR(dst))); @@ -113,24 +114,25 @@ static SLJIT_INLINE sljit_si emit_single_op(struct sljit_compiler *compiler, slj } return push_inst(compiler, ANDI | S(src2) | T(dst) | IMM(0xffff), DR(dst)); } - else if (dst != src2) - SLJIT_ASSERT_STOP(); + else { + SLJIT_ASSERT(dst == src2); + } return SLJIT_SUCCESS; case SLJIT_NOT: SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); - if (op & SLJIT_SET_E) + if (op & SLJIT_SET_Z) FAIL_IF(push_inst(compiler, NOR | S(src2) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); - if (CHECK_FLAGS(SLJIT_SET_E)) + if (!(flags & UNUSED_DEST)) FAIL_IF(push_inst(compiler, NOR | S(src2) | T(src2) | D(dst), DR(dst))); return SLJIT_SUCCESS; case SLJIT_CLZ: SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); -#if (defined SLJIT_MIPS_32_64 && SLJIT_MIPS_32_64) - if (op & SLJIT_SET_E) +#if (defined SLJIT_MIPS_R1 && SLJIT_MIPS_R1) + if (op & SLJIT_SET_Z) FAIL_IF(push_inst(compiler, CLZ | S(src2) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG)); - if (CHECK_FLAGS(SLJIT_SET_E)) + if (!(flags & UNUSED_DEST)) FAIL_IF(push_inst(compiler, CLZ | S(src2) | T(dst) | D(dst), DR(dst))); #else if (SLJIT_UNLIKELY(flags & UNUSED_DEST)) { @@ -138,171 +140,201 @@ static SLJIT_INLINE sljit_si emit_single_op(struct sljit_compiler *compiler, slj return push_inst(compiler, XORI | SA(EQUAL_FLAG) | TA(EQUAL_FLAG) | IMM(1), EQUAL_FLAG); } /* Nearly all instructions are unmovable in the following sequence. */ - FAIL_IF(push_inst(compiler, ADDU_W | S(src2) | TA(0) | D(TMP_REG1), DR(TMP_REG1))); + FAIL_IF(push_inst(compiler, ADDU | S(src2) | TA(0) | D(TMP_REG1), DR(TMP_REG1))); /* Check zero. */ FAIL_IF(push_inst(compiler, BEQ | S(TMP_REG1) | TA(0) | IMM(5), UNMOVABLE_INS)); FAIL_IF(push_inst(compiler, ORI | SA(0) | T(dst) | IMM(32), UNMOVABLE_INS)); - FAIL_IF(push_inst(compiler, ADDIU_W | SA(0) | T(dst) | IMM(-1), DR(dst))); + FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(dst) | IMM(-1), DR(dst))); /* Loop for searching the highest bit. */ - FAIL_IF(push_inst(compiler, ADDIU_W | S(dst) | T(dst) | IMM(1), DR(dst))); + FAIL_IF(push_inst(compiler, ADDIU | S(dst) | T(dst) | IMM(1), DR(dst))); FAIL_IF(push_inst(compiler, BGEZ | S(TMP_REG1) | IMM(-2), UNMOVABLE_INS)); FAIL_IF(push_inst(compiler, SLL | T(TMP_REG1) | D(TMP_REG1) | SH_IMM(1), UNMOVABLE_INS)); - if (op & SLJIT_SET_E) - return push_inst(compiler, ADDU_W | S(dst) | TA(0) | DA(EQUAL_FLAG), EQUAL_FLAG); #endif return SLJIT_SUCCESS; case SLJIT_ADD: + is_overflow = GET_FLAG_TYPE(op) == SLJIT_OVERFLOW; + is_carry = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY); + if (flags & SRC2_IMM) { - if (op & SLJIT_SET_O) { - FAIL_IF(push_inst(compiler, SRL | T(src1) | DA(TMP_EREG1) | SH_IMM(31), TMP_EREG1)); - if (src2 < 0) - FAIL_IF(push_inst(compiler, XORI | SA(TMP_EREG1) | TA(TMP_EREG1) | IMM(1), TMP_EREG1)); + if (is_overflow) { + if (src2 >= 0) + FAIL_IF(push_inst(compiler, OR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG)); + else + FAIL_IF(push_inst(compiler, NOR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG)); } - if (op & SLJIT_SET_E) + else if (op & SLJIT_SET_Z) FAIL_IF(push_inst(compiler, ADDIU | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG)); - if (op & SLJIT_SET_C) { + + if (is_overflow || is_carry) { if (src2 >= 0) - FAIL_IF(push_inst(compiler, ORI | S(src1) | TA(ULESS_FLAG) | IMM(src2), ULESS_FLAG)); + FAIL_IF(push_inst(compiler, ORI | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG)); else { - FAIL_IF(push_inst(compiler, ADDIU | SA(0) | TA(ULESS_FLAG) | IMM(src2), ULESS_FLAG)); - FAIL_IF(push_inst(compiler, OR | S(src1) | TA(ULESS_FLAG) | DA(ULESS_FLAG), ULESS_FLAG)); + FAIL_IF(push_inst(compiler, ADDIU | SA(0) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG)); + FAIL_IF(push_inst(compiler, OR | S(src1) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG)); } } /* dst may be the same as src1 or src2. */ - if (CHECK_FLAGS(SLJIT_SET_E)) + if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK)) FAIL_IF(push_inst(compiler, ADDIU | S(src1) | T(dst) | IMM(src2), DR(dst))); - if (op & SLJIT_SET_O) { - FAIL_IF(push_inst(compiler, SRL | T(dst) | DA(OVERFLOW_FLAG) | SH_IMM(31), OVERFLOW_FLAG)); - if (src2 < 0) - FAIL_IF(push_inst(compiler, XORI | SA(OVERFLOW_FLAG) | TA(OVERFLOW_FLAG) | IMM(1), OVERFLOW_FLAG)); - } } else { - if (op & SLJIT_SET_O) { - FAIL_IF(push_inst(compiler, XOR | S(src1) | T(src2) | DA(TMP_EREG1), TMP_EREG1)); - FAIL_IF(push_inst(compiler, SRL | TA(TMP_EREG1) | DA(TMP_EREG1) | SH_IMM(31), TMP_EREG1)); - if (src1 != dst) - overflow_ra = DR(src1); - else if (src2 != dst) - overflow_ra = DR(src2); - else { - /* Rare ocasion. */ - FAIL_IF(push_inst(compiler, ADDU | S(src1) | TA(0) | DA(TMP_EREG2), TMP_EREG2)); - overflow_ra = TMP_EREG2; - } - } - if (op & SLJIT_SET_E) + if (is_overflow) + FAIL_IF(push_inst(compiler, XOR | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); + else if (op & SLJIT_SET_Z) FAIL_IF(push_inst(compiler, ADDU | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); - if (op & SLJIT_SET_C) - FAIL_IF(push_inst(compiler, OR | S(src1) | T(src2) | DA(ULESS_FLAG), ULESS_FLAG)); + + if (is_overflow || is_carry) + FAIL_IF(push_inst(compiler, OR | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG)); /* dst may be the same as src1 or src2. */ - if (CHECK_FLAGS(SLJIT_SET_E)) + if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK)) FAIL_IF(push_inst(compiler, ADDU | S(src1) | T(src2) | D(dst), DR(dst))); - if (op & SLJIT_SET_O) { - FAIL_IF(push_inst(compiler, XOR | S(dst) | TA(overflow_ra) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG)); - FAIL_IF(push_inst(compiler, SRL | TA(OVERFLOW_FLAG) | DA(OVERFLOW_FLAG) | SH_IMM(31), OVERFLOW_FLAG)); - } } /* a + b >= a | b (otherwise, the carry should be set to 1). */ - if (op & SLJIT_SET_C) - FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(ULESS_FLAG) | DA(ULESS_FLAG), ULESS_FLAG)); - if (op & SLJIT_SET_O) - return push_inst(compiler, MOVN | SA(0) | TA(TMP_EREG1) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG); - return SLJIT_SUCCESS; + if (is_overflow || is_carry) + FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG)); + if (!is_overflow) + return SLJIT_SUCCESS; + FAIL_IF(push_inst(compiler, SLL | TA(OTHER_FLAG) | D(TMP_REG1) | SH_IMM(31), DR(TMP_REG1))); + FAIL_IF(push_inst(compiler, XOR | S(TMP_REG1) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG)); + FAIL_IF(push_inst(compiler, XOR | S(dst) | TA(EQUAL_FLAG) | DA(OTHER_FLAG), OTHER_FLAG)); + if (op & SLJIT_SET_Z) + FAIL_IF(push_inst(compiler, ADDU | S(dst) | TA(0) | DA(EQUAL_FLAG), EQUAL_FLAG)); + return push_inst(compiler, SRL | TA(OTHER_FLAG) | DA(OTHER_FLAG) | SH_IMM(31), OTHER_FLAG); case SLJIT_ADDC: + is_carry = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY); + if (flags & SRC2_IMM) { - if (op & SLJIT_SET_C) { + if (is_carry) { if (src2 >= 0) - FAIL_IF(push_inst(compiler, ORI | S(src1) | TA(TMP_EREG1) | IMM(src2), TMP_EREG1)); + FAIL_IF(push_inst(compiler, ORI | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG)); else { - FAIL_IF(push_inst(compiler, ADDIU | SA(0) | TA(TMP_EREG1) | IMM(src2), TMP_EREG1)); - FAIL_IF(push_inst(compiler, OR | S(src1) | TA(TMP_EREG1) | DA(TMP_EREG1), TMP_EREG1)); + FAIL_IF(push_inst(compiler, ADDIU | SA(0) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG)); + FAIL_IF(push_inst(compiler, OR | S(src1) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG)); } } FAIL_IF(push_inst(compiler, ADDIU | S(src1) | T(dst) | IMM(src2), DR(dst))); } else { - if (op & SLJIT_SET_C) - FAIL_IF(push_inst(compiler, OR | S(src1) | T(src2) | DA(TMP_EREG1), TMP_EREG1)); + if (is_carry) + FAIL_IF(push_inst(compiler, OR | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); /* dst may be the same as src1 or src2. */ FAIL_IF(push_inst(compiler, ADDU | S(src1) | T(src2) | D(dst), DR(dst))); } - if (op & SLJIT_SET_C) - FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(TMP_EREG1) | DA(TMP_EREG1), TMP_EREG1)); + if (is_carry) + FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG)); - FAIL_IF(push_inst(compiler, ADDU | S(dst) | TA(ULESS_FLAG) | D(dst), DR(dst))); - if (!(op & SLJIT_SET_C)) + FAIL_IF(push_inst(compiler, ADDU | S(dst) | TA(OTHER_FLAG) | D(dst), DR(dst))); + if (!is_carry) return SLJIT_SUCCESS; - /* Set TMP_EREG2 (dst == 0) && (ULESS_FLAG == 1). */ - FAIL_IF(push_inst(compiler, SLTIU | S(dst) | TA(TMP_EREG2) | IMM(1), TMP_EREG2)); - FAIL_IF(push_inst(compiler, AND | SA(TMP_EREG2) | TA(ULESS_FLAG) | DA(TMP_EREG2), TMP_EREG2)); + /* Set ULESS_FLAG (dst == 0) && (OTHER_FLAG == 1). */ + FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG)); /* Set carry flag. */ - return push_inst(compiler, OR | SA(TMP_EREG2) | TA(TMP_EREG1) | DA(ULESS_FLAG), ULESS_FLAG); + return push_inst(compiler, OR | SA(OTHER_FLAG) | TA(EQUAL_FLAG) | DA(OTHER_FLAG), OTHER_FLAG); case SLJIT_SUB: - if ((flags & SRC2_IMM) && ((op & (SLJIT_SET_S | SLJIT_SET_U)) || src2 == SIMM_MIN)) { + if ((flags & SRC2_IMM) && src2 == SIMM_MIN) { FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(TMP_REG2) | IMM(src2), DR(TMP_REG2))); src2 = TMP_REG2; flags &= ~SRC2_IMM; } + is_handled = 0; + if (flags & SRC2_IMM) { - if (op & SLJIT_SET_O) { - FAIL_IF(push_inst(compiler, SRL | T(src1) | DA(TMP_EREG1) | SH_IMM(31), TMP_EREG1)); - if (src2 < 0) - FAIL_IF(push_inst(compiler, XORI | SA(TMP_EREG1) | TA(TMP_EREG1) | IMM(1), TMP_EREG1)); - if (src1 != dst) - overflow_ra = DR(src1); - else { - /* Rare ocasion. */ - FAIL_IF(push_inst(compiler, ADDU | S(src1) | TA(0) | DA(TMP_EREG2), TMP_EREG2)); - overflow_ra = TMP_EREG2; - } + if (GET_FLAG_TYPE(op) == SLJIT_LESS || GET_FLAG_TYPE(op) == SLJIT_GREATER_EQUAL) { + FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG)); + is_handled = 1; + } + else if (GET_FLAG_TYPE(op) == SLJIT_SIG_LESS || GET_FLAG_TYPE(op) == SLJIT_SIG_GREATER_EQUAL) { + FAIL_IF(push_inst(compiler, SLTI | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG)); + is_handled = 1; + } + } + + if (!is_handled && GET_FLAG_TYPE(op) >= SLJIT_LESS && GET_FLAG_TYPE(op) <= SLJIT_SIG_LESS_EQUAL) { + is_handled = 1; + + if (flags & SRC2_IMM) { + FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(TMP_REG2) | IMM(src2), DR(TMP_REG2))); + src2 = TMP_REG2; + flags &= ~SRC2_IMM; + } + + if (GET_FLAG_TYPE(op) == SLJIT_LESS || GET_FLAG_TYPE(op) == SLJIT_GREATER_EQUAL) { + FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG)); } - if (op & SLJIT_SET_E) + else if (GET_FLAG_TYPE(op) == SLJIT_GREATER || GET_FLAG_TYPE(op) == SLJIT_LESS_EQUAL) + { + FAIL_IF(push_inst(compiler, SLTU | S(src2) | T(src1) | DA(OTHER_FLAG), OTHER_FLAG)); + } + else if (GET_FLAG_TYPE(op) == SLJIT_SIG_LESS || GET_FLAG_TYPE(op) == SLJIT_SIG_GREATER_EQUAL) { + FAIL_IF(push_inst(compiler, SLT | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG)); + } + else if (GET_FLAG_TYPE(op) == SLJIT_SIG_GREATER || GET_FLAG_TYPE(op) == SLJIT_SIG_LESS_EQUAL) + { + FAIL_IF(push_inst(compiler, SLT | S(src2) | T(src1) | DA(OTHER_FLAG), OTHER_FLAG)); + } + } + + if (is_handled) { + if (flags & SRC2_IMM) { + if (op & SLJIT_SET_Z) + FAIL_IF(push_inst(compiler, ADDIU | S(src1) | TA(EQUAL_FLAG) | IMM(-src2), EQUAL_FLAG)); + if (!(flags & UNUSED_DEST)) + return push_inst(compiler, ADDIU | S(src1) | T(dst) | IMM(-src2), DR(dst)); + } + else { + if (op & SLJIT_SET_Z) + FAIL_IF(push_inst(compiler, SUBU | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); + if (!(flags & UNUSED_DEST)) + return push_inst(compiler, SUBU | S(src1) | T(src2) | D(dst), DR(dst)); + } + return SLJIT_SUCCESS; + } + + is_overflow = GET_FLAG_TYPE(op) == SLJIT_OVERFLOW; + is_carry = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY); + + if (flags & SRC2_IMM) { + if (is_overflow) { + if (src2 >= 0) + FAIL_IF(push_inst(compiler, OR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG)); + else + FAIL_IF(push_inst(compiler, NOR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG)); + } + else if (op & SLJIT_SET_Z) FAIL_IF(push_inst(compiler, ADDIU | S(src1) | TA(EQUAL_FLAG) | IMM(-src2), EQUAL_FLAG)); - if (op & SLJIT_SET_C) - FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(ULESS_FLAG) | IMM(src2), ULESS_FLAG)); + + if (is_overflow || is_carry) + FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG)); /* dst may be the same as src1 or src2. */ - if (CHECK_FLAGS(SLJIT_SET_E)) + if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK)) FAIL_IF(push_inst(compiler, ADDIU | S(src1) | T(dst) | IMM(-src2), DR(dst))); } else { - if (op & SLJIT_SET_O) { - FAIL_IF(push_inst(compiler, XOR | S(src1) | T(src2) | DA(TMP_EREG1), TMP_EREG1)); - FAIL_IF(push_inst(compiler, SRL | TA(TMP_EREG1) | DA(TMP_EREG1) | SH_IMM(31), TMP_EREG1)); - if (src1 != dst) - overflow_ra = DR(src1); - else { - /* Rare ocasion. */ - FAIL_IF(push_inst(compiler, ADDU | S(src1) | TA(0) | DA(TMP_EREG2), TMP_EREG2)); - overflow_ra = TMP_EREG2; - } - } - if (op & SLJIT_SET_E) + if (is_overflow) + FAIL_IF(push_inst(compiler, XOR | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); + else if (op & SLJIT_SET_Z) FAIL_IF(push_inst(compiler, SUBU | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); - if (op & (SLJIT_SET_U | SLJIT_SET_C)) - FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(ULESS_FLAG), ULESS_FLAG)); - if (op & SLJIT_SET_U) - FAIL_IF(push_inst(compiler, SLTU | S(src2) | T(src1) | DA(UGREATER_FLAG), UGREATER_FLAG)); - if (op & SLJIT_SET_S) { - FAIL_IF(push_inst(compiler, SLT | S(src1) | T(src2) | DA(LESS_FLAG), LESS_FLAG)); - FAIL_IF(push_inst(compiler, SLT | S(src2) | T(src1) | DA(GREATER_FLAG), GREATER_FLAG)); - } + + if (is_overflow || is_carry) + FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG)); /* dst may be the same as src1 or src2. */ - if (CHECK_FLAGS(SLJIT_SET_E | SLJIT_SET_S | SLJIT_SET_U | SLJIT_SET_C)) + if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK)) FAIL_IF(push_inst(compiler, SUBU | S(src1) | T(src2) | D(dst), DR(dst))); } - if (op & SLJIT_SET_O) { - FAIL_IF(push_inst(compiler, XOR | S(dst) | TA(overflow_ra) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG)); - FAIL_IF(push_inst(compiler, SRL | TA(OVERFLOW_FLAG) | DA(OVERFLOW_FLAG) | SH_IMM(31), OVERFLOW_FLAG)); - return push_inst(compiler, MOVZ | SA(0) | TA(TMP_EREG1) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG); - } - return SLJIT_SUCCESS; + if (!is_overflow) + return SLJIT_SUCCESS; + FAIL_IF(push_inst(compiler, SLL | TA(OTHER_FLAG) | D(TMP_REG1) | SH_IMM(31), DR(TMP_REG1))); + FAIL_IF(push_inst(compiler, XOR | S(TMP_REG1) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG)); + FAIL_IF(push_inst(compiler, XOR | S(dst) | TA(EQUAL_FLAG) | DA(OTHER_FLAG), OTHER_FLAG)); + if (op & SLJIT_SET_Z) + FAIL_IF(push_inst(compiler, ADDU | S(dst) | TA(0) | DA(EQUAL_FLAG), EQUAL_FLAG)); + return push_inst(compiler, SRL | TA(OTHER_FLAG) | DA(OTHER_FLAG) | SH_IMM(31), OTHER_FLAG); case SLJIT_SUBC: if ((flags & SRC2_IMM) && src2 == SIMM_MIN) { @@ -311,44 +343,48 @@ static SLJIT_INLINE sljit_si emit_single_op(struct sljit_compiler *compiler, slj flags &= ~SRC2_IMM; } + is_carry = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY); + if (flags & SRC2_IMM) { - if (op & SLJIT_SET_C) - FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(TMP_EREG1) | IMM(-src2), TMP_EREG1)); + if (is_carry) + FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG)); /* dst may be the same as src1 or src2. */ FAIL_IF(push_inst(compiler, ADDIU | S(src1) | T(dst) | IMM(-src2), DR(dst))); } else { - if (op & SLJIT_SET_C) - FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(TMP_EREG1), TMP_EREG1)); + if (is_carry) + FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); /* dst may be the same as src1 or src2. */ FAIL_IF(push_inst(compiler, SUBU | S(src1) | T(src2) | D(dst), DR(dst))); } - if (op & SLJIT_SET_C) - FAIL_IF(push_inst(compiler, MOVZ | SA(ULESS_FLAG) | T(dst) | DA(TMP_EREG1), TMP_EREG1)); + if (is_carry) + FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(OTHER_FLAG) | D(TMP_REG1), DR(TMP_REG1))); - FAIL_IF(push_inst(compiler, SUBU | S(dst) | TA(ULESS_FLAG) | D(dst), DR(dst))); - - if (op & SLJIT_SET_C) - FAIL_IF(push_inst(compiler, ADDU | SA(TMP_EREG1) | TA(0) | DA(ULESS_FLAG), ULESS_FLAG)); - - return SLJIT_SUCCESS; + FAIL_IF(push_inst(compiler, SUBU | S(dst) | TA(OTHER_FLAG) | D(dst), DR(dst))); + return (is_carry) ? push_inst(compiler, OR | SA(EQUAL_FLAG) | T(TMP_REG1) | DA(OTHER_FLAG), OTHER_FLAG) : SLJIT_SUCCESS; case SLJIT_MUL: SLJIT_ASSERT(!(flags & SRC2_IMM)); - if (!(op & SLJIT_SET_O)) { -#if (defined SLJIT_MIPS_32_64 && SLJIT_MIPS_32_64) + + if (GET_FLAG_TYPE(op) != SLJIT_MUL_OVERFLOW) { +#if (defined SLJIT_MIPS_R1 && SLJIT_MIPS_R1) || (defined SLJIT_MIPS_R6 && SLJIT_MIPS_R6) return push_inst(compiler, MUL | S(src1) | T(src2) | D(dst), DR(dst)); -#else +#else /* !SLJIT_MIPS_R1 && !SLJIT_MIPS_R6 */ FAIL_IF(push_inst(compiler, MULT | S(src1) | T(src2), MOVABLE_INS)); return push_inst(compiler, MFLO | D(dst), DR(dst)); -#endif +#endif /* SLJIT_MIPS_R1 || SLJIT_MIPS_R6 */ } +#if (defined SLJIT_MIPS_R6 && SLJIT_MIPS_R6) + FAIL_IF(push_inst(compiler, MUL | S(src1) | T(src2) | D(dst), DR(dst))); + FAIL_IF(push_inst(compiler, MUH | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); +#else /* !SLJIT_MIPS_R6 */ FAIL_IF(push_inst(compiler, MULT | S(src1) | T(src2), MOVABLE_INS)); - FAIL_IF(push_inst(compiler, MFHI | DA(TMP_EREG1), TMP_EREG1)); + FAIL_IF(push_inst(compiler, MFHI | DA(EQUAL_FLAG), EQUAL_FLAG)); FAIL_IF(push_inst(compiler, MFLO | D(dst), DR(dst))); - FAIL_IF(push_inst(compiler, SRA | T(dst) | DA(TMP_EREG2) | SH_IMM(31), TMP_EREG2)); - return push_inst(compiler, SUBU | SA(TMP_EREG1) | TA(TMP_EREG2) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG); +#endif /* SLJIT_MIPS_R6 */ + FAIL_IF(push_inst(compiler, SRA | T(dst) | DA(OTHER_FLAG) | SH_IMM(31), OTHER_FLAG)); + return push_inst(compiler, SUBU | SA(EQUAL_FLAG) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG); case SLJIT_AND: EMIT_LOGICAL(ANDI, AND); @@ -375,30 +411,263 @@ static SLJIT_INLINE sljit_si emit_single_op(struct sljit_compiler *compiler, slj return SLJIT_SUCCESS; } - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); return SLJIT_SUCCESS; } -static SLJIT_INLINE sljit_si emit_const(struct sljit_compiler *compiler, sljit_si dst, sljit_sw init_value) +static SLJIT_INLINE sljit_s32 emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw init_value) { FAIL_IF(push_inst(compiler, LUI | T(dst) | IMM(init_value >> 16), DR(dst))); return push_inst(compiler, ORI | S(dst) | T(dst) | IMM(init_value), DR(dst)); } -SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_addr) +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_target, sljit_sw executable_offset) { - sljit_ins *inst = (sljit_ins*)addr; + sljit_ins *inst = (sljit_ins *)addr; - inst[0] = (inst[0] & 0xffff0000) | ((new_addr >> 16) & 0xffff); - inst[1] = (inst[1] & 0xffff0000) | (new_addr & 0xffff); + SLJIT_ASSERT((inst[0] & 0xffe00000) == LUI && (inst[1] & 0xfc000000) == ORI); + inst[0] = (inst[0] & 0xffff0000) | ((new_target >> 16) & 0xffff); + inst[1] = (inst[1] & 0xffff0000) | (new_target & 0xffff); + inst = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset); SLJIT_CACHE_FLUSH(inst, inst + 2); } -SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant) +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset) { - sljit_ins *inst = (sljit_ins*)addr; + sljit_ins *inst = (sljit_ins *)addr; + SLJIT_ASSERT((inst[0] & 0xffe00000) == LUI && (inst[1] & 0xfc000000) == ORI); inst[0] = (inst[0] & 0xffff0000) | ((new_constant >> 16) & 0xffff); inst[1] = (inst[1] & 0xffff0000) | (new_constant & 0xffff); + inst = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset); SLJIT_CACHE_FLUSH(inst, inst + 2); } + +static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_ins *ins_ptr) +{ + sljit_s32 stack_offset = 0; + sljit_s32 arg_count = 0; + sljit_s32 float_arg_count = 0; + sljit_s32 word_arg_count = 0; + sljit_s32 types = 0; + sljit_s32 arg_count_save, types_save; + sljit_ins prev_ins = NOP; + sljit_ins ins = NOP; + sljit_u8 offsets[4]; + + SLJIT_ASSERT(reg_map[TMP_REG1] == 4 && freg_map[TMP_FREG1] == 12); + + arg_types >>= SLJIT_DEF_SHIFT; + + while (arg_types) { + types = (types << SLJIT_DEF_SHIFT) | (arg_types & SLJIT_DEF_MASK); + + switch (arg_types & SLJIT_DEF_MASK) { + case SLJIT_ARG_TYPE_F32: + offsets[arg_count] = (sljit_u8)stack_offset; + + if (word_arg_count == 0 && arg_count <= 1) + offsets[arg_count] = 254 + arg_count; + + stack_offset += sizeof(sljit_f32); + arg_count++; + float_arg_count++; + break; + case SLJIT_ARG_TYPE_F64: + if (stack_offset & 0x7) + stack_offset += sizeof(sljit_sw); + offsets[arg_count] = (sljit_u8)stack_offset; + + if (word_arg_count == 0 && arg_count <= 1) + offsets[arg_count] = 254 + arg_count; + + stack_offset += sizeof(sljit_f64); + arg_count++; + float_arg_count++; + break; + default: + offsets[arg_count] = (sljit_u8)stack_offset; + stack_offset += sizeof(sljit_sw); + arg_count++; + word_arg_count++; + break; + } + + arg_types >>= SLJIT_DEF_SHIFT; + } + + /* Stack is aligned to 16 bytes, max two doubles can be placed on the stack. */ + if (stack_offset > 16) + FAIL_IF(push_inst(compiler, ADDIU | S(SLJIT_SP) | T(SLJIT_SP) | IMM(-16), DR(SLJIT_SP))); + + types_save = types; + arg_count_save = arg_count; + + while (types) { + switch (types & SLJIT_DEF_MASK) { + case SLJIT_ARG_TYPE_F32: + arg_count--; + if (offsets[arg_count] < 254) + ins = SWC1 | S(SLJIT_SP) | FT(float_arg_count) | IMM(offsets[arg_count]); + float_arg_count--; + break; + case SLJIT_ARG_TYPE_F64: + arg_count--; + if (offsets[arg_count] < 254) + ins = SDC1 | S(SLJIT_SP) | FT(float_arg_count) | IMM(offsets[arg_count]); + float_arg_count--; + break; + default: + if (offsets[arg_count - 1] >= 16) + ins = SW | S(SLJIT_SP) | T(word_arg_count) | IMM(offsets[arg_count - 1]); + else if (arg_count != word_arg_count) + ins = ADDU | S(word_arg_count) | TA(0) | DA(4 + (offsets[arg_count - 1] >> 2)); + else if (arg_count == 1) + ins = ADDU | S(SLJIT_R0) | TA(0) | DA(4); + + arg_count--; + word_arg_count--; + break; + } + + if (ins != NOP) { + if (prev_ins != NOP) + FAIL_IF(push_inst(compiler, prev_ins, MOVABLE_INS)); + prev_ins = ins; + ins = NOP; + } + + types >>= SLJIT_DEF_SHIFT; + } + + types = types_save; + arg_count = arg_count_save; + + while (types) { + switch (types & SLJIT_DEF_MASK) { + case SLJIT_ARG_TYPE_F32: + arg_count--; + if (offsets[arg_count] == 254) + ins = MOV_S | FMT_S | FS(SLJIT_FR0) | FD(TMP_FREG1); + else if (offsets[arg_count] < 16) + ins = LW | S(SLJIT_SP) | TA(4 + (offsets[arg_count] >> 2)) | IMM(offsets[arg_count]); + break; + case SLJIT_ARG_TYPE_F64: + arg_count--; + if (offsets[arg_count] == 254) + ins = MOV_S | FMT_D | FS(SLJIT_FR0) | FD(TMP_FREG1); + else if (offsets[arg_count] < 16) { + if (prev_ins != NOP) + FAIL_IF(push_inst(compiler, prev_ins, MOVABLE_INS)); + prev_ins = LW | S(SLJIT_SP) | TA(4 + (offsets[arg_count] >> 2)) | IMM(offsets[arg_count]); + ins = LW | S(SLJIT_SP) | TA(5 + (offsets[arg_count] >> 2)) | IMM(offsets[arg_count] + sizeof(sljit_sw)); + } + break; + default: + arg_count--; + break; + } + + if (ins != NOP) { + if (prev_ins != NOP) + FAIL_IF(push_inst(compiler, prev_ins, MOVABLE_INS)); + prev_ins = ins; + ins = NOP; + } + + types >>= SLJIT_DEF_SHIFT; + } + + *ins_ptr = prev_ins; + + return SLJIT_SUCCESS; +} + +static sljit_s32 post_call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types) +{ + sljit_s32 stack_offset = 0; + + arg_types >>= SLJIT_DEF_SHIFT; + + while (arg_types) { + switch (arg_types & SLJIT_DEF_MASK) { + case SLJIT_ARG_TYPE_F32: + stack_offset += sizeof(sljit_f32); + break; + case SLJIT_ARG_TYPE_F64: + if (stack_offset & 0x7) + stack_offset += sizeof(sljit_sw); + stack_offset += sizeof(sljit_f64); + break; + default: + stack_offset += sizeof(sljit_sw); + break; + } + + arg_types >>= SLJIT_DEF_SHIFT; + } + + /* Stack is aligned to 16 bytes, max two doubles can be placed on the stack. */ + if (stack_offset > 16) + return push_inst(compiler, ADDIU | S(SLJIT_SP) | T(SLJIT_SP) | IMM(16), DR(SLJIT_SP)); + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 arg_types) +{ + struct sljit_jump *jump; + sljit_ins ins; + + CHECK_ERROR_PTR(); + CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types)); + + jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); + PTR_FAIL_IF(!jump); + set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP); + type &= 0xff; + + PTR_FAIL_IF(call_with_args(compiler, arg_types, &ins)); + + SLJIT_ASSERT(DR(PIC_ADDR_REG) == 25 && PIC_ADDR_REG == TMP_REG2); + + PTR_FAIL_IF(emit_const(compiler, PIC_ADDR_REG, 0)); + + jump->flags |= IS_JAL | IS_CALL; + PTR_FAIL_IF(push_inst(compiler, JALR | S(PIC_ADDR_REG) | DA(RETURN_ADDR_REG), UNMOVABLE_INS)); + jump->addr = compiler->size; + PTR_FAIL_IF(push_inst(compiler, ins, UNMOVABLE_INS)); + + PTR_FAIL_IF(post_call_with_args(compiler, arg_types)); + + return jump; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 arg_types, + sljit_s32 src, sljit_sw srcw) +{ + sljit_ins ins; + + CHECK_ERROR(); + CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw)); + + SLJIT_ASSERT(DR(PIC_ADDR_REG) == 25 && PIC_ADDR_REG == TMP_REG2); + + if (src & SLJIT_IMM) + FAIL_IF(load_immediate(compiler, DR(PIC_ADDR_REG), srcw)); + else if (FAST_IS_REG(src)) + FAIL_IF(push_inst(compiler, ADDU | S(src) | TA(0) | D(PIC_ADDR_REG), DR(PIC_ADDR_REG))); + else if (src & SLJIT_MEM) { + ADJUST_LOCAL_OFFSET(src, srcw); + FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, DR(PIC_ADDR_REG), src, srcw)); + } + + FAIL_IF(call_with_args(compiler, arg_types, &ins)); + + /* Register input. */ + FAIL_IF(push_inst(compiler, JALR | S(PIC_ADDR_REG) | DA(RETURN_ADDR_REG), UNMOVABLE_INS)); + FAIL_IF(push_inst(compiler, ins, UNMOVABLE_INS)); + return post_call_with_args(compiler, arg_types); +} diff --git a/src/3rd/pcre/sjmips64.c b/src/3rd/pcre/sjmips64.c new file mode 100644 index 00000000000..a6a2bcc0c9a --- /dev/null +++ b/src/3rd/pcre/sjmips64.c @@ -0,0 +1,675 @@ +/* + * Stack-less Just-In-Time compiler + * + * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are + * permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of + * conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list + * of conditions and the following disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* mips 64-bit arch dependent functions. */ + +static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 dst_ar, sljit_sw imm) +{ + sljit_s32 shift = 32; + sljit_s32 shift2; + sljit_s32 inv = 0; + sljit_ins ins; + sljit_uw uimm; + + if (!(imm & ~0xffff)) + return push_inst(compiler, ORI | SA(0) | TA(dst_ar) | IMM(imm), dst_ar); + + if (imm < 0 && imm >= SIMM_MIN) + return push_inst(compiler, ADDIU | SA(0) | TA(dst_ar) | IMM(imm), dst_ar); + + if (imm <= 0x7fffffffl && imm >= -0x80000000l) { + FAIL_IF(push_inst(compiler, LUI | TA(dst_ar) | IMM(imm >> 16), dst_ar)); + return (imm & 0xffff) ? push_inst(compiler, ORI | SA(dst_ar) | TA(dst_ar) | IMM(imm), dst_ar) : SLJIT_SUCCESS; + } + + /* Zero extended number. */ + uimm = imm; + if (imm < 0) { + uimm = ~imm; + inv = 1; + } + + while (!(uimm & 0xff00000000000000l)) { + shift -= 8; + uimm <<= 8; + } + + if (!(uimm & 0xf000000000000000l)) { + shift -= 4; + uimm <<= 4; + } + + if (!(uimm & 0xc000000000000000l)) { + shift -= 2; + uimm <<= 2; + } + + if ((sljit_sw)uimm < 0) { + uimm >>= 1; + shift += 1; + } + SLJIT_ASSERT(((uimm & 0xc000000000000000l) == 0x4000000000000000l) && (shift > 0) && (shift <= 32)); + + if (inv) + uimm = ~uimm; + + FAIL_IF(push_inst(compiler, LUI | TA(dst_ar) | IMM(uimm >> 48), dst_ar)); + if (uimm & 0x0000ffff00000000l) + FAIL_IF(push_inst(compiler, ORI | SA(dst_ar) | TA(dst_ar) | IMM(uimm >> 32), dst_ar)); + + imm &= (1l << shift) - 1; + if (!(imm & ~0xffff)) { + ins = (shift == 32) ? DSLL32 : DSLL; + if (shift < 32) + ins |= SH_IMM(shift); + FAIL_IF(push_inst(compiler, ins | TA(dst_ar) | DA(dst_ar), dst_ar)); + return !(imm & 0xffff) ? SLJIT_SUCCESS : push_inst(compiler, ORI | SA(dst_ar) | TA(dst_ar) | IMM(imm), dst_ar); + } + + /* Double shifts needs to be performed. */ + uimm <<= 32; + shift2 = shift - 16; + + while (!(uimm & 0xf000000000000000l)) { + shift2 -= 4; + uimm <<= 4; + } + + if (!(uimm & 0xc000000000000000l)) { + shift2 -= 2; + uimm <<= 2; + } + + if (!(uimm & 0x8000000000000000l)) { + shift2--; + uimm <<= 1; + } + + SLJIT_ASSERT((uimm & 0x8000000000000000l) && (shift2 > 0) && (shift2 <= 16)); + + FAIL_IF(push_inst(compiler, DSLL | TA(dst_ar) | DA(dst_ar) | SH_IMM(shift - shift2), dst_ar)); + FAIL_IF(push_inst(compiler, ORI | SA(dst_ar) | TA(dst_ar) | IMM(uimm >> 48), dst_ar)); + FAIL_IF(push_inst(compiler, DSLL | TA(dst_ar) | DA(dst_ar) | SH_IMM(shift2), dst_ar)); + + imm &= (1l << shift2) - 1; + return !(imm & 0xffff) ? SLJIT_SUCCESS : push_inst(compiler, ORI | SA(dst_ar) | TA(dst_ar) | IMM(imm), dst_ar); +} + +#define SELECT_OP(a, b) \ + (!(op & SLJIT_I32_OP) ? a : b) + +#define EMIT_LOGICAL(op_imm, op_norm) \ + if (flags & SRC2_IMM) { \ + if (op & SLJIT_SET_Z) \ + FAIL_IF(push_inst(compiler, op_imm | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG)); \ + if (!(flags & UNUSED_DEST)) \ + FAIL_IF(push_inst(compiler, op_imm | S(src1) | T(dst) | IMM(src2), DR(dst))); \ + } \ + else { \ + if (op & SLJIT_SET_Z) \ + FAIL_IF(push_inst(compiler, op_norm | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); \ + if (!(flags & UNUSED_DEST)) \ + FAIL_IF(push_inst(compiler, op_norm | S(src1) | T(src2) | D(dst), DR(dst))); \ + } + +#define EMIT_SHIFT(op_dimm, op_dimm32, op_imm, op_dv, op_v) \ + if (flags & SRC2_IMM) { \ + if (src2 >= 32) { \ + SLJIT_ASSERT(!(op & SLJIT_I32_OP)); \ + ins = op_dimm32; \ + src2 -= 32; \ + } \ + else \ + ins = (op & SLJIT_I32_OP) ? op_imm : op_dimm; \ + if (op & SLJIT_SET_Z) \ + FAIL_IF(push_inst(compiler, ins | T(src1) | DA(EQUAL_FLAG) | SH_IMM(src2), EQUAL_FLAG)); \ + if (!(flags & UNUSED_DEST)) \ + FAIL_IF(push_inst(compiler, ins | T(src1) | D(dst) | SH_IMM(src2), DR(dst))); \ + } \ + else { \ + ins = (op & SLJIT_I32_OP) ? op_v : op_dv; \ + if (op & SLJIT_SET_Z) \ + FAIL_IF(push_inst(compiler, ins | S(src2) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG)); \ + if (!(flags & UNUSED_DEST)) \ + FAIL_IF(push_inst(compiler, ins | S(src2) | T(src1) | D(dst), DR(dst))); \ + } + +static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 flags, + sljit_s32 dst, sljit_s32 src1, sljit_sw src2) +{ + sljit_ins ins; + sljit_s32 is_overflow, is_carry, is_handled; + + switch (GET_OPCODE(op)) { + case SLJIT_MOV: + case SLJIT_MOV_P: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); + if (dst != src2) + return push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src2) | TA(0) | D(dst), DR(dst)); + return SLJIT_SUCCESS; + + case SLJIT_MOV_U8: + case SLJIT_MOV_S8: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); + if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { + if (op == SLJIT_MOV_S8) { + FAIL_IF(push_inst(compiler, DSLL32 | T(src2) | D(dst) | SH_IMM(24), DR(dst))); + return push_inst(compiler, DSRA32 | T(dst) | D(dst) | SH_IMM(24), DR(dst)); + } + return push_inst(compiler, ANDI | S(src2) | T(dst) | IMM(0xff), DR(dst)); + } + else { + SLJIT_ASSERT(dst == src2); + } + return SLJIT_SUCCESS; + + case SLJIT_MOV_U16: + case SLJIT_MOV_S16: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); + if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { + if (op == SLJIT_MOV_S16) { + FAIL_IF(push_inst(compiler, DSLL32 | T(src2) | D(dst) | SH_IMM(16), DR(dst))); + return push_inst(compiler, DSRA32 | T(dst) | D(dst) | SH_IMM(16), DR(dst)); + } + return push_inst(compiler, ANDI | S(src2) | T(dst) | IMM(0xffff), DR(dst)); + } + else { + SLJIT_ASSERT(dst == src2); + } + return SLJIT_SUCCESS; + + case SLJIT_MOV_U32: + SLJIT_ASSERT(!(op & SLJIT_I32_OP)); + FAIL_IF(push_inst(compiler, DSLL32 | T(src2) | D(dst) | SH_IMM(0), DR(dst))); + return push_inst(compiler, DSRL32 | T(dst) | D(dst) | SH_IMM(0), DR(dst)); + + case SLJIT_MOV_S32: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); + return push_inst(compiler, SLL | T(src2) | D(dst) | SH_IMM(0), DR(dst)); + + case SLJIT_NOT: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); + if (op & SLJIT_SET_Z) + FAIL_IF(push_inst(compiler, NOR | S(src2) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); + if (!(flags & UNUSED_DEST)) + FAIL_IF(push_inst(compiler, NOR | S(src2) | T(src2) | D(dst), DR(dst))); + return SLJIT_SUCCESS; + + case SLJIT_CLZ: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); +#if (defined SLJIT_MIPS_R1 && SLJIT_MIPS_R1) + if (op & SLJIT_SET_Z) + FAIL_IF(push_inst(compiler, SELECT_OP(DCLZ, CLZ) | S(src2) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG)); + if (!(flags & UNUSED_DEST)) + FAIL_IF(push_inst(compiler, SELECT_OP(DCLZ, CLZ) | S(src2) | T(dst) | D(dst), DR(dst))); +#else + if (SLJIT_UNLIKELY(flags & UNUSED_DEST)) { + FAIL_IF(push_inst(compiler, SELECT_OP(DSRL32, SRL) | T(src2) | DA(EQUAL_FLAG) | SH_IMM(31), EQUAL_FLAG)); + return push_inst(compiler, XORI | SA(EQUAL_FLAG) | TA(EQUAL_FLAG) | IMM(1), EQUAL_FLAG); + } + /* Nearly all instructions are unmovable in the following sequence. */ + FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src2) | TA(0) | D(TMP_REG1), DR(TMP_REG1))); + /* Check zero. */ + FAIL_IF(push_inst(compiler, BEQ | S(TMP_REG1) | TA(0) | IMM(5), UNMOVABLE_INS)); + FAIL_IF(push_inst(compiler, ORI | SA(0) | T(dst) | IMM((op & SLJIT_I32_OP) ? 32 : 64), UNMOVABLE_INS)); + FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | SA(0) | T(dst) | IMM(-1), DR(dst))); + /* Loop for searching the highest bit. */ + FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(dst) | T(dst) | IMM(1), DR(dst))); + FAIL_IF(push_inst(compiler, BGEZ | S(TMP_REG1) | IMM(-2), UNMOVABLE_INS)); + FAIL_IF(push_inst(compiler, SELECT_OP(DSLL, SLL) | T(TMP_REG1) | D(TMP_REG1) | SH_IMM(1), UNMOVABLE_INS)); +#endif + return SLJIT_SUCCESS; + + case SLJIT_ADD: + is_overflow = GET_FLAG_TYPE(op) == SLJIT_OVERFLOW; + is_carry = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY); + + if (flags & SRC2_IMM) { + if (is_overflow) { + if (src2 >= 0) + FAIL_IF(push_inst(compiler, OR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG)); + else + FAIL_IF(push_inst(compiler, NOR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG)); + } + else if (op & SLJIT_SET_Z) + FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG)); + + if (is_overflow || is_carry) { + if (src2 >= 0) + FAIL_IF(push_inst(compiler, ORI | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG)); + else { + FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | SA(0) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG)); + FAIL_IF(push_inst(compiler, OR | S(src1) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG)); + } + } + /* dst may be the same as src1 or src2. */ + if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK)) + FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | T(dst) | IMM(src2), DR(dst))); + } + else { + if (is_overflow) + FAIL_IF(push_inst(compiler, XOR | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); + else if (op & SLJIT_SET_Z) + FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); + + if (is_overflow || is_carry) + FAIL_IF(push_inst(compiler, OR | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG)); + /* dst may be the same as src1 or src2. */ + if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK)) + FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src1) | T(src2) | D(dst), DR(dst))); + } + + /* a + b >= a | b (otherwise, the carry should be set to 1). */ + if (is_overflow || is_carry) + FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG)); + if (!is_overflow) + return SLJIT_SUCCESS; + FAIL_IF(push_inst(compiler, SELECT_OP(DSLL32, SLL) | TA(OTHER_FLAG) | D(TMP_REG1) | SH_IMM(31), DR(TMP_REG1))); + FAIL_IF(push_inst(compiler, XOR | S(TMP_REG1) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG)); + FAIL_IF(push_inst(compiler, XOR | S(dst) | TA(EQUAL_FLAG) | DA(OTHER_FLAG), OTHER_FLAG)); + if (op & SLJIT_SET_Z) + FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(dst) | TA(0) | DA(EQUAL_FLAG), EQUAL_FLAG)); + return push_inst(compiler, SELECT_OP(DSRL32, SRL) | TA(OTHER_FLAG) | DA(OTHER_FLAG) | SH_IMM(31), OTHER_FLAG); + + case SLJIT_ADDC: + is_carry = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY); + + if (flags & SRC2_IMM) { + if (is_carry) { + if (src2 >= 0) + FAIL_IF(push_inst(compiler, ORI | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG)); + else { + FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | SA(0) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG)); + FAIL_IF(push_inst(compiler, OR | S(src1) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG)); + } + } + FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | T(dst) | IMM(src2), DR(dst))); + } else { + if (is_carry) + FAIL_IF(push_inst(compiler, OR | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); + /* dst may be the same as src1 or src2. */ + FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src1) | T(src2) | D(dst), DR(dst))); + } + if (is_carry) + FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG)); + + FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(dst) | TA(OTHER_FLAG) | D(dst), DR(dst))); + if (!is_carry) + return SLJIT_SUCCESS; + + /* Set ULESS_FLAG (dst == 0) && (OTHER_FLAG == 1). */ + FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG)); + /* Set carry flag. */ + return push_inst(compiler, OR | SA(OTHER_FLAG) | TA(EQUAL_FLAG) | DA(OTHER_FLAG), OTHER_FLAG); + + case SLJIT_SUB: + if ((flags & SRC2_IMM) && src2 == SIMM_MIN) { + FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(TMP_REG2) | IMM(src2), DR(TMP_REG2))); + src2 = TMP_REG2; + flags &= ~SRC2_IMM; + } + + is_handled = 0; + + if (flags & SRC2_IMM) { + if (GET_FLAG_TYPE(op) == SLJIT_LESS || GET_FLAG_TYPE(op) == SLJIT_GREATER_EQUAL) { + FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG)); + is_handled = 1; + } + else if (GET_FLAG_TYPE(op) == SLJIT_SIG_LESS || GET_FLAG_TYPE(op) == SLJIT_SIG_GREATER_EQUAL) { + FAIL_IF(push_inst(compiler, SLTI | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG)); + is_handled = 1; + } + } + + if (!is_handled && GET_FLAG_TYPE(op) >= SLJIT_LESS && GET_FLAG_TYPE(op) <= SLJIT_SIG_LESS_EQUAL) { + is_handled = 1; + + if (flags & SRC2_IMM) { + FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(TMP_REG2) | IMM(src2), DR(TMP_REG2))); + src2 = TMP_REG2; + flags &= ~SRC2_IMM; + } + + if (GET_FLAG_TYPE(op) == SLJIT_LESS || GET_FLAG_TYPE(op) == SLJIT_GREATER_EQUAL) { + FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG)); + } + else if (GET_FLAG_TYPE(op) == SLJIT_GREATER || GET_FLAG_TYPE(op) == SLJIT_LESS_EQUAL) + { + FAIL_IF(push_inst(compiler, SLTU | S(src2) | T(src1) | DA(OTHER_FLAG), OTHER_FLAG)); + } + else if (GET_FLAG_TYPE(op) == SLJIT_SIG_LESS || GET_FLAG_TYPE(op) == SLJIT_SIG_GREATER_EQUAL) { + FAIL_IF(push_inst(compiler, SLT | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG)); + } + else if (GET_FLAG_TYPE(op) == SLJIT_SIG_GREATER || GET_FLAG_TYPE(op) == SLJIT_SIG_LESS_EQUAL) + { + FAIL_IF(push_inst(compiler, SLT | S(src2) | T(src1) | DA(OTHER_FLAG), OTHER_FLAG)); + } + } + + if (is_handled) { + if (flags & SRC2_IMM) { + if (op & SLJIT_SET_Z) + FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | TA(EQUAL_FLAG) | IMM(-src2), EQUAL_FLAG)); + if (!(flags & UNUSED_DEST)) + return push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | T(dst) | IMM(-src2), DR(dst)); + } + else { + if (op & SLJIT_SET_Z) + FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); + if (!(flags & UNUSED_DEST)) + return push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(src1) | T(src2) | D(dst), DR(dst)); + } + return SLJIT_SUCCESS; + } + + is_overflow = GET_FLAG_TYPE(op) == SLJIT_OVERFLOW; + is_carry = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY); + + if (flags & SRC2_IMM) { + if (is_overflow) { + if (src2 >= 0) + FAIL_IF(push_inst(compiler, OR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG)); + else + FAIL_IF(push_inst(compiler, NOR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG)); + } + else if (op & SLJIT_SET_Z) + FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | TA(EQUAL_FLAG) | IMM(-src2), EQUAL_FLAG)); + + if (is_overflow || is_carry) + FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG)); + /* dst may be the same as src1 or src2. */ + if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK)) + FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | T(dst) | IMM(-src2), DR(dst))); + } + else { + if (is_overflow) + FAIL_IF(push_inst(compiler, XOR | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); + else if (op & SLJIT_SET_Z) + FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); + + if (is_overflow || is_carry) + FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG)); + /* dst may be the same as src1 or src2. */ + if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK)) + FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(src1) | T(src2) | D(dst), DR(dst))); + } + + if (!is_overflow) + return SLJIT_SUCCESS; + FAIL_IF(push_inst(compiler, SELECT_OP(DSLL32, SLL) | TA(OTHER_FLAG) | D(TMP_REG1) | SH_IMM(31), DR(TMP_REG1))); + FAIL_IF(push_inst(compiler, XOR | S(TMP_REG1) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG)); + FAIL_IF(push_inst(compiler, XOR | S(dst) | TA(EQUAL_FLAG) | DA(OTHER_FLAG), OTHER_FLAG)); + if (op & SLJIT_SET_Z) + FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(dst) | TA(0) | DA(EQUAL_FLAG), EQUAL_FLAG)); + return push_inst(compiler, SELECT_OP(DSRL32, SRL) | TA(OTHER_FLAG) | DA(OTHER_FLAG) | SH_IMM(31), OTHER_FLAG); + + case SLJIT_SUBC: + if ((flags & SRC2_IMM) && src2 == SIMM_MIN) { + FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(TMP_REG2) | IMM(src2), DR(TMP_REG2))); + src2 = TMP_REG2; + flags &= ~SRC2_IMM; + } + + is_carry = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY); + + if (flags & SRC2_IMM) { + if (is_carry) + FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG)); + /* dst may be the same as src1 or src2. */ + FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | T(dst) | IMM(-src2), DR(dst))); + } + else { + if (is_carry) + FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); + /* dst may be the same as src1 or src2. */ + FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(src1) | T(src2) | D(dst), DR(dst))); + } + + if (is_carry) + FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(OTHER_FLAG) | D(TMP_REG1), DR(TMP_REG1))); + + FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(dst) | TA(OTHER_FLAG) | D(dst), DR(dst))); + return (is_carry) ? push_inst(compiler, OR | SA(EQUAL_FLAG) | T(TMP_REG1) | DA(OTHER_FLAG), OTHER_FLAG) : SLJIT_SUCCESS; + + case SLJIT_MUL: + SLJIT_ASSERT(!(flags & SRC2_IMM)); + + if (GET_FLAG_TYPE(op) != SLJIT_MUL_OVERFLOW) { +#if (defined SLJIT_MIPS_R6 && SLJIT_MIPS_R6) + return push_inst(compiler, SELECT_OP(DMUL, MUL) | S(src1) | T(src2) | D(dst), DR(dst)); +#elif (defined SLJIT_MIPS_R1 && SLJIT_MIPS_R1) + if (op & SLJIT_I32_OP) + return push_inst(compiler, MUL | S(src1) | T(src2) | D(dst), DR(dst)); + FAIL_IF(push_inst(compiler, DMULT | S(src1) | T(src2), MOVABLE_INS)); + return push_inst(compiler, MFLO | D(dst), DR(dst)); +#else /* !SLJIT_MIPS_R6 && !SLJIT_MIPS_R1 */ + FAIL_IF(push_inst(compiler, SELECT_OP(DMULT, MULT) | S(src1) | T(src2), MOVABLE_INS)); + return push_inst(compiler, MFLO | D(dst), DR(dst)); +#endif /* SLJIT_MIPS_R6 */ + } +#if (defined SLJIT_MIPS_R6 && SLJIT_MIPS_R6) + FAIL_IF(push_inst(compiler, SELECT_OP(DMUL, MUL) | S(src1) | T(src2) | D(dst), DR(dst))); + FAIL_IF(push_inst(compiler, SELECT_OP(DMUH, MUH) | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); +#else /* !SLJIT_MIPS_R6 */ + FAIL_IF(push_inst(compiler, SELECT_OP(DMULT, MULT) | S(src1) | T(src2), MOVABLE_INS)); + FAIL_IF(push_inst(compiler, MFHI | DA(EQUAL_FLAG), EQUAL_FLAG)); + FAIL_IF(push_inst(compiler, MFLO | D(dst), DR(dst))); +#endif /* SLJIT_MIPS_R6 */ + FAIL_IF(push_inst(compiler, SELECT_OP(DSRA32, SRA) | T(dst) | DA(OTHER_FLAG) | SH_IMM(31), OTHER_FLAG)); + return push_inst(compiler, SELECT_OP(DSUBU, SUBU) | SA(EQUAL_FLAG) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG); + + case SLJIT_AND: + EMIT_LOGICAL(ANDI, AND); + return SLJIT_SUCCESS; + + case SLJIT_OR: + EMIT_LOGICAL(ORI, OR); + return SLJIT_SUCCESS; + + case SLJIT_XOR: + EMIT_LOGICAL(XORI, XOR); + return SLJIT_SUCCESS; + + case SLJIT_SHL: + EMIT_SHIFT(DSLL, DSLL32, SLL, DSLLV, SLLV); + return SLJIT_SUCCESS; + + case SLJIT_LSHR: + EMIT_SHIFT(DSRL, DSRL32, SRL, DSRLV, SRLV); + return SLJIT_SUCCESS; + + case SLJIT_ASHR: + EMIT_SHIFT(DSRA, DSRA32, SRA, DSRAV, SRAV); + return SLJIT_SUCCESS; + } + + SLJIT_UNREACHABLE(); + return SLJIT_SUCCESS; +} + +static SLJIT_INLINE sljit_s32 emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw init_value) +{ + FAIL_IF(push_inst(compiler, LUI | T(dst) | IMM(init_value >> 48), DR(dst))); + FAIL_IF(push_inst(compiler, ORI | S(dst) | T(dst) | IMM(init_value >> 32), DR(dst))); + FAIL_IF(push_inst(compiler, DSLL | T(dst) | D(dst) | SH_IMM(16), DR(dst))); + FAIL_IF(push_inst(compiler, ORI | S(dst) | T(dst) | IMM(init_value >> 16), DR(dst))); + FAIL_IF(push_inst(compiler, DSLL | T(dst) | D(dst) | SH_IMM(16), DR(dst))); + return push_inst(compiler, ORI | S(dst) | T(dst) | IMM(init_value), DR(dst)); +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_target, sljit_sw executable_offset) +{ + sljit_ins *inst = (sljit_ins *)addr; + + inst[0] = (inst[0] & 0xffff0000) | ((new_target >> 48) & 0xffff); + inst[1] = (inst[1] & 0xffff0000) | ((new_target >> 32) & 0xffff); + inst[3] = (inst[3] & 0xffff0000) | ((new_target >> 16) & 0xffff); + inst[5] = (inst[5] & 0xffff0000) | (new_target & 0xffff); + inst = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset); + SLJIT_CACHE_FLUSH(inst, inst + 6); +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset) +{ + sljit_ins *inst = (sljit_ins *)addr; + + inst[0] = (inst[0] & 0xffff0000) | ((new_constant >> 48) & 0xffff); + inst[1] = (inst[1] & 0xffff0000) | ((new_constant >> 32) & 0xffff); + inst[3] = (inst[3] & 0xffff0000) | ((new_constant >> 16) & 0xffff); + inst[5] = (inst[5] & 0xffff0000) | (new_constant & 0xffff); + inst = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset); + SLJIT_CACHE_FLUSH(inst, inst + 6); +} + +static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_ins *ins_ptr) +{ + sljit_s32 arg_count = 0; + sljit_s32 word_arg_count = 0; + sljit_s32 float_arg_count = 0; + sljit_s32 types = 0; + sljit_ins prev_ins = NOP; + sljit_ins ins = NOP; + + SLJIT_ASSERT(reg_map[TMP_REG1] == 4 && freg_map[TMP_FREG1] == 12); + + arg_types >>= SLJIT_DEF_SHIFT; + + while (arg_types) { + types = (types << SLJIT_DEF_SHIFT) | (arg_types & SLJIT_DEF_MASK); + + switch (arg_types & SLJIT_DEF_MASK) { + case SLJIT_ARG_TYPE_F32: + case SLJIT_ARG_TYPE_F64: + arg_count++; + float_arg_count++; + break; + default: + arg_count++; + word_arg_count++; + break; + } + + arg_types >>= SLJIT_DEF_SHIFT; + } + + while (types) { + switch (types & SLJIT_DEF_MASK) { + case SLJIT_ARG_TYPE_F32: + if (arg_count != float_arg_count) + ins = MOV_S | FMT_S | FS(float_arg_count) | FD(arg_count); + else if (arg_count == 1) + ins = MOV_S | FMT_S | FS(SLJIT_FR0) | FD(TMP_FREG1); + arg_count--; + float_arg_count--; + break; + case SLJIT_ARG_TYPE_F64: + if (arg_count != float_arg_count) + ins = MOV_S | FMT_D | FS(float_arg_count) | FD(arg_count); + else if (arg_count == 1) + ins = MOV_S | FMT_D | FS(SLJIT_FR0) | FD(TMP_FREG1); + arg_count--; + float_arg_count--; + break; + default: + if (arg_count != word_arg_count) + ins = DADDU | S(word_arg_count) | TA(0) | D(arg_count); + else if (arg_count == 1) + ins = DADDU | S(SLJIT_R0) | TA(0) | DA(4); + arg_count--; + word_arg_count--; + break; + } + + if (ins != NOP) { + if (prev_ins != NOP) + FAIL_IF(push_inst(compiler, prev_ins, MOVABLE_INS)); + prev_ins = ins; + ins = NOP; + } + + types >>= SLJIT_DEF_SHIFT; + } + + *ins_ptr = prev_ins; + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 arg_types) +{ + struct sljit_jump *jump; + sljit_ins ins; + + CHECK_ERROR_PTR(); + CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types)); + + jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); + PTR_FAIL_IF(!jump); + set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP); + type &= 0xff; + + PTR_FAIL_IF(call_with_args(compiler, arg_types, &ins)); + + SLJIT_ASSERT(DR(PIC_ADDR_REG) == 25 && PIC_ADDR_REG == TMP_REG2); + + PTR_FAIL_IF(emit_const(compiler, PIC_ADDR_REG, 0)); + + jump->flags |= IS_JAL | IS_CALL; + PTR_FAIL_IF(push_inst(compiler, JALR | S(PIC_ADDR_REG) | DA(RETURN_ADDR_REG), UNMOVABLE_INS)); + jump->addr = compiler->size; + PTR_FAIL_IF(push_inst(compiler, ins, UNMOVABLE_INS)); + + return jump; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 arg_types, + sljit_s32 src, sljit_sw srcw) +{ + sljit_ins ins; + + CHECK_ERROR(); + CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw)); + + SLJIT_ASSERT(DR(PIC_ADDR_REG) == 25 && PIC_ADDR_REG == TMP_REG2); + + if (src & SLJIT_IMM) + FAIL_IF(load_immediate(compiler, DR(PIC_ADDR_REG), srcw)); + else if (FAST_IS_REG(src)) + FAIL_IF(push_inst(compiler, DADDU | S(src) | TA(0) | D(PIC_ADDR_REG), DR(PIC_ADDR_REG))); + else if (src & SLJIT_MEM) { + ADJUST_LOCAL_OFFSET(src, srcw); + FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, DR(PIC_ADDR_REG), src, srcw)); + } + + FAIL_IF(call_with_args(compiler, arg_types, &ins)); + + /* Register input. */ + FAIL_IF(push_inst(compiler, JALR | S(PIC_ADDR_REG) | DA(RETURN_ADDR_REG), UNMOVABLE_INS)); + return push_inst(compiler, ins, UNMOVABLE_INS); +} diff --git a/src/3rd/pcre/sjmipsc.c b/src/3rd/pcre/sjmipsc.c index 8849b057b16..7c29080241c 100644 --- a/src/3rd/pcre/sjmipsc.c +++ b/src/3rd/pcre/sjmipsc.c @@ -1,7 +1,7 @@ /* * Stack-less Just-In-Time compiler * - * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: @@ -25,52 +25,77 @@ */ /* Latest MIPS architecture. */ -/* Automatically detect SLJIT_MIPS_32_64 */ +/* Automatically detect SLJIT_MIPS_R1 */ -SLJIT_API_FUNC_ATTRIBUTE SLJIT_CONST char* sljit_get_platform_name(void) +#if (defined __mips_isa_rev) && (__mips_isa_rev >= 6) +#define SLJIT_MIPS_R6 1 +#endif + +SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void) { -#if (defined SLJIT_MIPS_32_64 && SLJIT_MIPS_32_64) - return "MIPS(32)" SLJIT_CPUINFO; -#else +#if (defined SLJIT_MIPS_R6 && SLJIT_MIPS_R6) + +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) + return "MIPS32-R6" SLJIT_CPUINFO; +#else /* !SLJIT_CONFIG_MIPS_32 */ + return "MIPS64-R6" SLJIT_CPUINFO; +#endif /* SLJIT_CONFIG_MIPS_32 */ + +#elif (defined SLJIT_MIPS_R1 && SLJIT_MIPS_R1) + +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) + return "MIPS32-R1" SLJIT_CPUINFO; +#else /* !SLJIT_CONFIG_MIPS_32 */ + return "MIPS64-R1" SLJIT_CPUINFO; +#endif /* SLJIT_CONFIG_MIPS_32 */ + +#else /* SLJIT_MIPS_R1 */ return "MIPS III" SLJIT_CPUINFO; -#endif +#endif /* SLJIT_MIPS_R6 */ } /* Length of an instruction word Both for mips-32 and mips-64 */ -typedef sljit_ui sljit_ins; +typedef sljit_u32 sljit_ins; -#define TMP_REG1 (SLJIT_NO_REGISTERS + 1) -#define TMP_REG2 (SLJIT_NO_REGISTERS + 2) -#define TMP_REG3 (SLJIT_NO_REGISTERS + 3) +#define TMP_REG1 (SLJIT_NUMBER_OF_REGISTERS + 2) +#define TMP_REG2 (SLJIT_NUMBER_OF_REGISTERS + 3) +#define TMP_REG3 (SLJIT_NUMBER_OF_REGISTERS + 4) /* For position independent code, t9 must contain the function address. */ #define PIC_ADDR_REG TMP_REG2 -/* TMP_EREG1 is used mainly for literal encoding on 64 bit. */ -#define TMP_EREG1 15 -#define TMP_EREG2 24 /* Floating point status register. */ #define FCSR_REG 31 /* Return address register. */ #define RETURN_ADDR_REG 31 -/* Flags are keept in volatile registers. */ -#define EQUAL_FLAG 7 -/* And carry flag as well. */ -#define ULESS_FLAG 10 -#define UGREATER_FLAG 11 -#define LESS_FLAG 12 -#define GREATER_FLAG 13 -#define OVERFLOW_FLAG 14 +/* Flags are kept in volatile registers. */ +#define EQUAL_FLAG 3 +#define OTHER_FLAG 1 -#define TMP_FREG1 (0) -#define TMP_FREG2 ((SLJIT_FLOAT_REG6 + 1) << 1) +#define TMP_FREG1 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1) +#define TMP_FREG2 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 2) +#define TMP_FREG3 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3) -static SLJIT_CONST sljit_ub reg_map[SLJIT_NO_REGISTERS + 4] = { - 0, 2, 5, 6, 3, 8, 16, 17, 18, 19, 20, 29, 4, 25, 9 +static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 5] = { + 0, 2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 24, 23, 22, 21, 20, 19, 18, 17, 16, 29, 4, 25, 31 }; +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) + +static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 4] = { + 0, 0, 14, 2, 4, 6, 8, 12, 10, 16 +}; + +#else + +static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 4] = { + 0, 0, 13, 14, 15, 16, 17, 12, 18, 10 +}; + +#endif + /* --------------------------------------------------------------------- */ /* Instrucion forms */ /* --------------------------------------------------------------------- */ @@ -78,32 +103,44 @@ static SLJIT_CONST sljit_ub reg_map[SLJIT_NO_REGISTERS + 4] = { #define S(s) (reg_map[s] << 21) #define T(t) (reg_map[t] << 16) #define D(d) (reg_map[d] << 11) +#define FT(t) (freg_map[t] << 16) +#define FS(s) (freg_map[s] << 11) +#define FD(d) (freg_map[d] << 6) /* Absolute registers. */ #define SA(s) ((s) << 21) #define TA(t) ((t) << 16) #define DA(d) ((d) << 11) -#define FT(t) ((t) << 16) -#define FS(s) ((s) << 11) -#define FD(d) ((d) << 6) #define IMM(imm) ((imm) & 0xffff) -#define SH_IMM(imm) ((imm & 0x1f) << 6) +#define SH_IMM(imm) ((imm) << 6) #define DR(dr) (reg_map[dr]) +#define FR(dr) (freg_map[dr]) #define HI(opcode) ((opcode) << 26) #define LO(opcode) (opcode) +#if (defined SLJIT_MIPS_R6 && SLJIT_MIPS_R6) +/* CMP.cond.fmt */ +/* S = (20 << 21) D = (21 << 21) */ +#define CMP_FMT_S (20 << 21) +#endif /* SLJIT_MIPS_R6 */ /* S = (16 << 21) D = (17 << 21) */ -#define FMT_SD (16 << 21) +#define FMT_S (16 << 21) +#define FMT_D (17 << 21) -#define ABS_fmt (HI(17) | FMT_SD | LO(5)) -#define ADD_fmt (HI(17) | FMT_SD | LO(0)) -#define ADDU (HI(0) | LO(33)) +#define ABS_S (HI(17) | FMT_S | LO(5)) +#define ADD_S (HI(17) | FMT_S | LO(0)) #define ADDIU (HI(9)) +#define ADDU (HI(0) | LO(33)) #define AND (HI(0) | LO(36)) #define ANDI (HI(12)) #define B (HI(4)) #define BAL (HI(1) | (17 << 16)) +#if (defined SLJIT_MIPS_R6 && SLJIT_MIPS_R6) +#define BC1EQZ (HI(17) | (9 << 21) | FT(TMP_FREG3)) +#define BC1NEZ (HI(17) | (13 << 21) | FT(TMP_FREG3)) +#else /* !SLJIT_MIPS_R6 */ #define BC1F (HI(17) | (8 << 21)) #define BC1T (HI(17) | (8 << 21) | (1 << 16)) +#endif /* SLJIT_MIPS_R6 */ #define BEQ (HI(4)) #define BGEZ (HI(1) | (1 << 16)) #define BGTZ (HI(7)) @@ -112,34 +149,90 @@ static SLJIT_CONST sljit_ub reg_map[SLJIT_NO_REGISTERS + 4] = { #define BNE (HI(5)) #define BREAK (HI(0) | LO(13)) #define CFC1 (HI(17) | (2 << 21)) -#define C_UN_fmt (HI(17) | FMT_SD | LO(49)) -#define C_UEQ_fmt (HI(17) | FMT_SD | LO(51)) -#define C_ULE_fmt (HI(17) | FMT_SD | LO(55)) -#define C_ULT_fmt (HI(17) | FMT_SD | LO(53)) +#if (defined SLJIT_MIPS_R6 && SLJIT_MIPS_R6) +#define C_UEQ_S (HI(17) | CMP_FMT_S | LO(3)) +#define C_ULE_S (HI(17) | CMP_FMT_S | LO(7)) +#define C_ULT_S (HI(17) | CMP_FMT_S | LO(5)) +#define C_UN_S (HI(17) | CMP_FMT_S | LO(1)) +#define C_FD (FD(TMP_FREG3)) +#else /* !SLJIT_MIPS_R6 */ +#define C_UEQ_S (HI(17) | FMT_S | LO(51)) +#define C_ULE_S (HI(17) | FMT_S | LO(55)) +#define C_ULT_S (HI(17) | FMT_S | LO(53)) +#define C_UN_S (HI(17) | FMT_S | LO(49)) +#define C_FD (0) +#endif /* SLJIT_MIPS_R6 */ +#define CVT_S_S (HI(17) | FMT_S | LO(32)) +#define DADDIU (HI(25)) +#define DADDU (HI(0) | LO(45)) +#if (defined SLJIT_MIPS_R6 && SLJIT_MIPS_R6) +#define DDIV (HI(0) | (2 << 6) | LO(30)) +#define DDIVU (HI(0) | (2 << 6) | LO(31)) +#define DMOD (HI(0) | (3 << 6) | LO(30)) +#define DMODU (HI(0) | (3 << 6) | LO(31)) +#define DIV (HI(0) | (2 << 6) | LO(26)) +#define DIVU (HI(0) | (2 << 6) | LO(27)) +#define DMUH (HI(0) | (3 << 6) | LO(28)) +#define DMUHU (HI(0) | (3 << 6) | LO(29)) +#define DMUL (HI(0) | (2 << 6) | LO(28)) +#define DMULU (HI(0) | (2 << 6) | LO(29)) +#else /* !SLJIT_MIPS_R6 */ +#define DDIV (HI(0) | LO(30)) +#define DDIVU (HI(0) | LO(31)) #define DIV (HI(0) | LO(26)) #define DIVU (HI(0) | LO(27)) -#define DIV_fmt (HI(17) | FMT_SD | LO(3)) +#define DMULT (HI(0) | LO(28)) +#define DMULTU (HI(0) | LO(29)) +#endif /* SLJIT_MIPS_R6 */ +#define DIV_S (HI(17) | FMT_S | LO(3)) +#define DSLL (HI(0) | LO(56)) +#define DSLL32 (HI(0) | LO(60)) +#define DSLLV (HI(0) | LO(20)) +#define DSRA (HI(0) | LO(59)) +#define DSRA32 (HI(0) | LO(63)) +#define DSRAV (HI(0) | LO(23)) +#define DSRL (HI(0) | LO(58)) +#define DSRL32 (HI(0) | LO(62)) +#define DSRLV (HI(0) | LO(22)) +#define DSUBU (HI(0) | LO(47)) #define J (HI(2)) #define JAL (HI(3)) #define JALR (HI(0) | LO(9)) +#if (defined SLJIT_MIPS_R6 && SLJIT_MIPS_R6) +#define JR (HI(0) | LO(9)) +#else /* !SLJIT_MIPS_R6 */ #define JR (HI(0) | LO(8)) +#endif /* SLJIT_MIPS_R6 */ #define LD (HI(55)) #define LUI (HI(15)) #define LW (HI(35)) +#define MFC1 (HI(17)) +#if !(defined SLJIT_MIPS_R6 && SLJIT_MIPS_R6) #define MFHI (HI(0) | LO(16)) #define MFLO (HI(0) | LO(18)) -#define MOV_fmt (HI(17) | FMT_SD | LO(6)) -#define MOVN (HI(0) | LO(11)) -#define MOVZ (HI(0) | LO(10)) -#define MUL_fmt (HI(17) | FMT_SD | LO(2)) +#else /* SLJIT_MIPS_R6 */ +#define MOD (HI(0) | (3 << 6) | LO(26)) +#define MODU (HI(0) | (3 << 6) | LO(27)) +#endif /* !SLJIT_MIPS_R6 */ +#define MOV_S (HI(17) | FMT_S | LO(6)) +#define MTC1 (HI(17) | (4 << 21)) +#if (defined SLJIT_MIPS_R6 && SLJIT_MIPS_R6) +#define MUH (HI(0) | (3 << 6) | LO(24)) +#define MUHU (HI(0) | (3 << 6) | LO(25)) +#define MUL (HI(0) | (2 << 6) | LO(24)) +#define MULU (HI(0) | (2 << 6) | LO(25)) +#else /* !SLJIT_MIPS_R6 */ #define MULT (HI(0) | LO(24)) #define MULTU (HI(0) | LO(25)) -#define NEG_fmt (HI(17) | FMT_SD | LO(7)) +#endif /* SLJIT_MIPS_R6 */ +#define MUL_S (HI(17) | FMT_S | LO(2)) +#define NEG_S (HI(17) | FMT_S | LO(7)) #define NOP (HI(0) | LO(0)) #define NOR (HI(0) | LO(39)) #define OR (HI(0) | LO(37)) #define ORI (HI(13)) #define SD (HI(63)) +#define SDC1 (HI(61)) #define SLT (HI(0) | LO(42)) #define SLTI (HI(10)) #define SLTIU (HI(11)) @@ -150,15 +243,28 @@ static SLJIT_CONST sljit_ub reg_map[SLJIT_NO_REGISTERS + 4] = { #define SRLV (HI(0) | LO(6)) #define SRA (HI(0) | LO(3)) #define SRAV (HI(0) | LO(7)) -#define SUB_fmt (HI(17) | FMT_SD | LO(1)) +#define SUB_S (HI(17) | FMT_S | LO(1)) #define SUBU (HI(0) | LO(35)) #define SW (HI(43)) +#define SWC1 (HI(57)) +#define TRUNC_W_S (HI(17) | FMT_S | LO(13)) #define XOR (HI(0) | LO(38)) #define XORI (HI(14)) -#if (defined SLJIT_MIPS_32_64 && SLJIT_MIPS_32_64) +#if (defined SLJIT_MIPS_R1 && SLJIT_MIPS_R1) || (defined SLJIT_MIPS_R6 && SLJIT_MIPS_R6) #define CLZ (HI(28) | LO(32)) +#if (defined SLJIT_MIPS_R6 && SLJIT_MIPS_R6) +#define DCLZ (LO(18)) +#else /* !SLJIT_MIPS_R6 */ +#define DCLZ (HI(28) | LO(36)) +#define MOVF (HI(0) | (0 << 16) | LO(1)) +#define MOVN (HI(0) | LO(11)) +#define MOVT (HI(0) | (1 << 16) | LO(1)) +#define MOVZ (HI(0) | LO(10)) #define MUL (HI(28) | LO(2)) +#endif /* SLJIT_MIPS_R6 */ +#define PREF (HI(51)) +#define PREFX (HI(19) | LO(15)) #define SEB (HI(31) | (16 << 6) | LO(32)) #define SEH (HI(31) | (24 << 6) | LO(32)) #endif @@ -181,7 +287,7 @@ static SLJIT_CONST sljit_ub reg_map[SLJIT_NO_REGISTERS + 4] = { /* dest_reg is the absolute name of the register Useful for reordering instructions in the delay slot. */ -static sljit_si push_inst(struct sljit_compiler *compiler, sljit_ins ins, sljit_si delay_slot) +static sljit_s32 push_inst(struct sljit_compiler *compiler, sljit_ins ins, sljit_s32 delay_slot) { SLJIT_ASSERT(delay_slot == MOVABLE_INS || delay_slot >= UNMOVABLE_INS || delay_slot == ((ins >> 11) & 0x1f) || delay_slot == ((ins >> 16) & 0x1f)); @@ -193,34 +299,51 @@ static sljit_si push_inst(struct sljit_compiler *compiler, sljit_ins ins, sljit_ return SLJIT_SUCCESS; } -static SLJIT_INLINE sljit_ins invert_branch(sljit_si flags) +static SLJIT_INLINE sljit_ins invert_branch(sljit_s32 flags) { - return (flags & IS_BIT26_COND) ? (1 << 26) : (1 << 16); + if (flags & IS_BIT26_COND) + return (1 << 26); +#if (defined SLJIT_MIPS_R6 && SLJIT_MIPS_R6) + if (flags & IS_BIT23_COND) + return (1 << 23); +#endif /* SLJIT_MIPS_R6 */ + return (1 << 16); } -static SLJIT_INLINE sljit_ins* optimize_jump(struct sljit_jump *jump, sljit_ins *code_ptr, sljit_ins *code) +static SLJIT_INLINE sljit_ins* detect_jump_type(struct sljit_jump *jump, sljit_ins *code_ptr, sljit_ins *code, sljit_sw executable_offset) { sljit_sw diff; sljit_uw target_addr; sljit_ins *inst; sljit_ins saved_inst; +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) + if (jump->flags & (SLJIT_REWRITABLE_JUMP | IS_CALL)) + return code_ptr; +#else if (jump->flags & SLJIT_REWRITABLE_JUMP) return code_ptr; +#endif if (jump->flags & JUMP_ADDR) target_addr = jump->u.target; else { SLJIT_ASSERT(jump->flags & JUMP_LABEL); - target_addr = (sljit_uw)(code + jump->u.label->size); + target_addr = (sljit_uw)(code + jump->u.label->size) + (sljit_uw)executable_offset; } - inst = (sljit_ins*)jump->addr; + + inst = (sljit_ins *)jump->addr; if (jump->flags & IS_COND) inst--; +#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) + if (jump->flags & IS_CALL) + goto keep_address; +#endif + /* B instructions. */ if (jump->flags & IS_MOVABLE) { - diff = ((sljit_sw)target_addr - (sljit_sw)(inst)) >> 2; + diff = ((sljit_sw)target_addr - (sljit_sw)inst - executable_offset) >> 2; if (diff <= SIMM_MAX && diff >= SIMM_MIN) { jump->flags |= PATCH_B; @@ -237,24 +360,34 @@ static SLJIT_INLINE sljit_ins* optimize_jump(struct sljit_jump *jump, sljit_ins return inst; } } + else { + diff = ((sljit_sw)target_addr - (sljit_sw)(inst + 1) - executable_offset) >> 2; + if (diff <= SIMM_MAX && diff >= SIMM_MIN) { + jump->flags |= PATCH_B; - diff = ((sljit_sw)target_addr - (sljit_sw)(inst + 1)) >> 2; - if (diff <= SIMM_MAX && diff >= SIMM_MIN) { - jump->flags |= PATCH_B; - - if (!(jump->flags & IS_COND)) { - inst[0] = (jump->flags & IS_JAL) ? BAL : B; + if (!(jump->flags & IS_COND)) { + inst[0] = (jump->flags & IS_JAL) ? BAL : B; + inst[1] = NOP; + return inst + 1; + } + inst[0] = inst[0] ^ invert_branch(jump->flags); inst[1] = NOP; + jump->addr -= sizeof(sljit_ins); return inst + 1; } - inst[0] = inst[0] ^ invert_branch(jump->flags); - inst[1] = NOP; - jump->addr -= sizeof(sljit_ins); - return inst + 1; } if (jump->flags & IS_COND) { - if ((target_addr & ~0xfffffff) == ((jump->addr + 3 * sizeof(sljit_ins)) & ~0xfffffff)) { + if ((jump->flags & IS_MOVABLE) && (target_addr & ~0xfffffff) == ((jump->addr + 2 * sizeof(sljit_ins)) & ~0xfffffff)) { + jump->flags |= PATCH_J; + saved_inst = inst[0]; + inst[0] = inst[-1]; + inst[-1] = (saved_inst & 0xffff0000) | 3; + inst[1] = J; + inst[2] = NOP; + return inst + 2; + } + else if ((target_addr & ~0xfffffff) == ((jump->addr + 3 * sizeof(sljit_ins)) & ~0xfffffff)) { jump->flags |= PATCH_J; inst[0] = (inst[0] & 0xffff0000) | 3; inst[1] = NOP; @@ -263,26 +396,48 @@ static SLJIT_INLINE sljit_ins* optimize_jump(struct sljit_jump *jump, sljit_ins jump->addr += sizeof(sljit_ins); return inst + 3; } - return code_ptr; } - - /* J instuctions. */ - if (jump->flags & IS_MOVABLE) { - if ((target_addr & ~0xfffffff) == (jump->addr & ~0xfffffff)) { + else { + /* J instuctions. */ + if ((jump->flags & IS_MOVABLE) && (target_addr & ~0xfffffff) == (jump->addr & ~0xfffffff)) { jump->flags |= PATCH_J; inst[0] = inst[-1]; inst[-1] = (jump->flags & IS_JAL) ? JAL : J; jump->addr -= sizeof(sljit_ins); return inst; } + + if ((target_addr & ~0xfffffff) == ((jump->addr + sizeof(sljit_ins)) & ~0xfffffff)) { + jump->flags |= PATCH_J; + inst[0] = (jump->flags & IS_JAL) ? JAL : J; + inst[1] = NOP; + return inst + 1; + } } - if ((target_addr & ~0xfffffff) == ((jump->addr + sizeof(sljit_ins)) & ~0xfffffff)) { - jump->flags |= PATCH_J; - inst[0] = (jump->flags & IS_JAL) ? JAL : J; - inst[1] = NOP; - return inst + 1; +#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) +keep_address: + if (target_addr <= 0x7fffffff) { + jump->flags |= PATCH_ABS32; + if (jump->flags & IS_COND) { + inst[0] -= 4; + inst++; + } + inst[2] = inst[6]; + inst[3] = inst[7]; + return inst + 3; + } + if (target_addr <= 0x7fffffffffffl) { + jump->flags |= PATCH_ABS48; + if (jump->flags & IS_COND) { + inst[0] -= 2; + inst++; + } + inst[4] = inst[6]; + inst[5] = inst[7]; + return inst + 5; } +#endif return code_ptr; } @@ -294,6 +449,55 @@ static __attribute__ ((noinline)) void sljit_cache_flush(void* code, void* code_ } #endif +#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) + +static SLJIT_INLINE sljit_sw put_label_get_length(struct sljit_put_label *put_label, sljit_uw max_label) +{ + if (max_label < 0x80000000l) { + put_label->flags = 0; + return 1; + } + + if (max_label < 0x800000000000l) { + put_label->flags = 1; + return 3; + } + + put_label->flags = 2; + return 5; +} + +static SLJIT_INLINE void put_label_set(struct sljit_put_label *put_label) +{ + sljit_uw addr = put_label->label->addr; + sljit_ins *inst = (sljit_ins *)put_label->addr; + sljit_s32 reg = *inst; + + if (put_label->flags == 0) { + SLJIT_ASSERT(addr < 0x80000000l); + inst[0] = LUI | T(reg) | IMM(addr >> 16); + } + else if (put_label->flags == 1) { + SLJIT_ASSERT(addr < 0x800000000000l); + inst[0] = LUI | T(reg) | IMM(addr >> 32); + inst[1] = ORI | S(reg) | T(reg) | IMM((addr >> 16) & 0xffff); + inst[2] = DSLL | T(reg) | D(reg) | SH_IMM(16); + inst += 2; + } + else { + inst[0] = LUI | T(reg) | IMM(addr >> 48); + inst[1] = ORI | S(reg) | T(reg) | IMM((addr >> 32) & 0xffff); + inst[2] = DSLL | T(reg) | D(reg) | SH_IMM(16); + inst[3] = ORI | S(reg) | T(reg) | IMM((addr >> 16) & 0xffff); + inst[4] = DSLL | T(reg) | D(reg) | SH_IMM(16); + inst += 4; + } + + inst[1] = ORI | S(reg) | T(reg) | IMM(addr & 0xffff); +} + +#endif + SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compiler) { struct sljit_memory_fragment *buf; @@ -302,14 +506,17 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil sljit_ins *buf_ptr; sljit_ins *buf_end; sljit_uw word_count; + sljit_uw next_addr; + sljit_sw executable_offset; sljit_uw addr; struct sljit_label *label; struct sljit_jump *jump; struct sljit_const *const_; + struct sljit_put_label *put_label; CHECK_ERROR_PTR(); - check_sljit_generate_code(compiler); + CHECK_PTR(check_sljit_generate_code(compiler)); reverse_buf(compiler); code = (sljit_ins*)SLJIT_MALLOC_EXEC(compiler->size * sizeof(sljit_ins)); @@ -318,37 +525,54 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil code_ptr = code; word_count = 0; + next_addr = 0; + executable_offset = SLJIT_EXEC_OFFSET(code); + label = compiler->labels; jump = compiler->jumps; const_ = compiler->consts; + put_label = compiler->put_labels; + do { buf_ptr = (sljit_ins*)buf->memory; buf_end = buf_ptr + (buf->used_size >> 2); do { *code_ptr = *buf_ptr++; - SLJIT_ASSERT(!label || label->size >= word_count); - SLJIT_ASSERT(!jump || jump->addr >= word_count); - SLJIT_ASSERT(!const_ || const_->addr >= word_count); - /* These structures are ordered by their address. */ - if (label && label->size == word_count) { - /* Just recording the address. */ - label->addr = (sljit_uw)code_ptr; - label->size = code_ptr - code; - label = label->next; - } - if (jump && jump->addr == word_count) { + if (next_addr == word_count) { + SLJIT_ASSERT(!label || label->size >= word_count); + SLJIT_ASSERT(!jump || jump->addr >= word_count); + SLJIT_ASSERT(!const_ || const_->addr >= word_count); + SLJIT_ASSERT(!put_label || put_label->addr >= word_count); + + /* These structures are ordered by their address. */ + if (label && label->size == word_count) { + label->addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset); + label->size = code_ptr - code; + label = label->next; + } + if (jump && jump->addr == word_count) { #if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) - jump->addr = (sljit_uw)(code_ptr - 3); + jump->addr = (sljit_uw)(code_ptr - 3); #else -#error "Implementation required" + jump->addr = (sljit_uw)(code_ptr - 7); #endif - code_ptr = optimize_jump(jump, code_ptr, code); - jump = jump->next; - } - if (const_ && const_->addr == word_count) { - /* Just recording the address. */ - const_->addr = (sljit_uw)code_ptr; - const_ = const_->next; + code_ptr = detect_jump_type(jump, code_ptr, code, executable_offset); + jump = jump->next; + } + if (const_ && const_->addr == word_count) { + const_->addr = (sljit_uw)code_ptr; + const_ = const_->next; + } + if (put_label && put_label->addr == word_count) { + SLJIT_ASSERT(put_label->label); + put_label->addr = (sljit_uw)code_ptr; +#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) + code_ptr += put_label_get_length(put_label, (sljit_uw)(SLJIT_ADD_EXEC_OFFSET(code, executable_offset) + put_label->label->size)); + word_count += 5; +#endif + put_label = put_label->next; + } + next_addr = compute_next_addr(label, jump, const_, put_label); } code_ptr ++; word_count ++; @@ -366,22 +590,23 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil SLJIT_ASSERT(!label); SLJIT_ASSERT(!jump); SLJIT_ASSERT(!const_); + SLJIT_ASSERT(!put_label); SLJIT_ASSERT(code_ptr - code <= (sljit_sw)compiler->size); jump = compiler->jumps; while (jump) { do { addr = (jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target; - buf_ptr = (sljit_ins*)jump->addr; + buf_ptr = (sljit_ins *)jump->addr; if (jump->flags & PATCH_B) { - addr = (sljit_sw)(addr - (jump->addr + sizeof(sljit_ins))) >> 2; + addr = (sljit_sw)(addr - ((sljit_uw)SLJIT_ADD_EXEC_OFFSET(buf_ptr, executable_offset) + sizeof(sljit_ins))) >> 2; SLJIT_ASSERT((sljit_sw)addr <= SIMM_MAX && (sljit_sw)addr >= SIMM_MIN); buf_ptr[0] = (buf_ptr[0] & 0xffff0000) | (addr & 0xffff); break; } if (jump->flags & PATCH_J) { - SLJIT_ASSERT((addr & ~0xfffffff) == ((jump->addr + sizeof(sljit_ins)) & ~0xfffffff)); + SLJIT_ASSERT((addr & ~0xfffffff) == (((sljit_uw)SLJIT_ADD_EXEC_OFFSET(buf_ptr, executable_offset) + sizeof(sljit_ins)) & ~0xfffffff)); buf_ptr[0] |= (addr >> 2) & 0x03ffffff; break; } @@ -391,14 +616,50 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil buf_ptr[0] = (buf_ptr[0] & 0xffff0000) | ((addr >> 16) & 0xffff); buf_ptr[1] = (buf_ptr[1] & 0xffff0000) | (addr & 0xffff); #else -#error "Implementation required" + if (jump->flags & PATCH_ABS32) { + SLJIT_ASSERT(addr <= 0x7fffffff); + buf_ptr[0] = (buf_ptr[0] & 0xffff0000) | ((addr >> 16) & 0xffff); + buf_ptr[1] = (buf_ptr[1] & 0xffff0000) | (addr & 0xffff); + } + else if (jump->flags & PATCH_ABS48) { + SLJIT_ASSERT(addr <= 0x7fffffffffffl); + buf_ptr[0] = (buf_ptr[0] & 0xffff0000) | ((addr >> 32) & 0xffff); + buf_ptr[1] = (buf_ptr[1] & 0xffff0000) | ((addr >> 16) & 0xffff); + buf_ptr[3] = (buf_ptr[3] & 0xffff0000) | (addr & 0xffff); + } + else { + buf_ptr[0] = (buf_ptr[0] & 0xffff0000) | ((addr >> 48) & 0xffff); + buf_ptr[1] = (buf_ptr[1] & 0xffff0000) | ((addr >> 32) & 0xffff); + buf_ptr[3] = (buf_ptr[3] & 0xffff0000) | ((addr >> 16) & 0xffff); + buf_ptr[5] = (buf_ptr[5] & 0xffff0000) | (addr & 0xffff); + } #endif } while (0); jump = jump->next; } + put_label = compiler->put_labels; + while (put_label) { +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) + addr = put_label->label->addr; + buf_ptr = (sljit_ins *)put_label->addr; + + SLJIT_ASSERT((buf_ptr[0] & 0xffe00000) == LUI && (buf_ptr[1] & 0xfc000000) == ORI); + buf_ptr[0] |= (addr >> 16) & 0xffff; + buf_ptr[1] |= addr & 0xffff; +#else + put_label_set(put_label); +#endif + put_label = put_label->next; + } + compiler->error = SLJIT_ERR_COMPILED; + compiler->executable_offset = executable_offset; compiler->executable_size = (code_ptr - code) * sizeof(sljit_ins); + + code = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(code, executable_offset); + code_ptr = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset); + #ifndef __GNUC__ SLJIT_CACHE_FLUSH(code, code_ptr); #else @@ -408,6 +669,32 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil return code; } +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type) +{ + sljit_sw fir = 0; + + switch (feature_type) { + case SLJIT_HAS_FPU: +#ifdef SLJIT_IS_FPU_AVAILABLE + return SLJIT_IS_FPU_AVAILABLE; +#elif defined(__GNUC__) + asm ("cfc1 %0, $0" : "=r"(fir)); + return (fir >> 22) & 0x1; +#else +#error "FIR check is not implemented for this architecture" +#endif + +#if (defined SLJIT_MIPS_R1 && SLJIT_MIPS_R1) + case SLJIT_HAS_CLZ: + case SLJIT_HAS_CMOV: + return 1; +#endif + + default: + return fir; + } +} + /* --------------------------------------------------------------------- */ /* Entry, exit */ /* --------------------------------------------------------------------- */ @@ -422,28 +709,24 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil /* Separates integer and floating point registers */ #define GPR_REG 0x0f #define DOUBLE_DATA 0x10 +#define SINGLE_DATA 0x12 #define MEM_MASK 0x1f -#define WRITE_BACK 0x00020 -#define ARG_TEST 0x00040 -#define ALT_KEEP_CACHE 0x00080 -#define CUMULATIVE_OP 0x00100 -#define LOGICAL_OP 0x00200 -#define IMM_OP 0x00400 -#define SRC2_IMM 0x00800 - -#define UNUSED_DEST 0x01000 -#define REG_DEST 0x02000 -#define REG1_SOURCE 0x04000 -#define REG2_SOURCE 0x08000 -#define SLOW_SRC1 0x10000 -#define SLOW_SRC2 0x20000 -#define SLOW_DEST 0x40000 - -/* Only these flags are set. UNUSED_DEST is not set when no flags should be set. */ -#define CHECK_FLAGS(list) \ - (!(flags & UNUSED_DEST) || (op & GET_FLAGS(~(list)))) +#define ARG_TEST 0x00020 +#define ALT_KEEP_CACHE 0x00040 +#define CUMULATIVE_OP 0x00080 +#define LOGICAL_OP 0x00100 +#define IMM_OP 0x00200 +#define SRC2_IMM 0x00400 + +#define UNUSED_DEST 0x00800 +#define REG_DEST 0x01000 +#define REG1_SOURCE 0x02000 +#define REG2_SOURCE 0x04000 +#define SLOW_SRC1 0x08000 +#define SLOW_SRC2 0x10000 +#define SLOW_DEST 0x20000 #if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) #define STACK_STORE SW @@ -453,116 +736,132 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil #define STACK_LOAD LD #endif +static SLJIT_INLINE sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg_ar, sljit_s32 arg, sljit_sw argw); + #if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) #include "sjmips32.c" #else -#include "sljitNativeMIPS_64.c" +#include "sjmips64.c" #endif -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_enter(struct sljit_compiler *compiler, sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler, + sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds, + sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size) { sljit_ins base; + sljit_s32 args, i, tmp, offs; CHECK_ERROR(); - check_sljit_emit_enter(compiler, args, scratches, saveds, local_size); + CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size)); + set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size); - compiler->scratches = scratches; - compiler->saveds = saveds; -#if (defined SLJIT_DEBUG && SLJIT_DEBUG) - compiler->logical_local_size = local_size; -#endif - - local_size += (saveds + 1 + 4) * sizeof(sljit_sw); + local_size += GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1) + SLJIT_LOCALS_OFFSET; +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) local_size = (local_size + 15) & ~0xf; +#else + local_size = (local_size + 31) & ~0x1f; +#endif compiler->local_size = local_size; if (local_size <= SIMM_MAX) { /* Frequent case. */ - FAIL_IF(push_inst(compiler, ADDIU_W | S(SLJIT_LOCALS_REG) | T(SLJIT_LOCALS_REG) | IMM(-local_size), DR(SLJIT_LOCALS_REG))); - base = S(SLJIT_LOCALS_REG); + FAIL_IF(push_inst(compiler, ADDIU_W | S(SLJIT_SP) | T(SLJIT_SP) | IMM(-local_size), DR(SLJIT_SP))); + base = S(SLJIT_SP); + offs = local_size - (sljit_sw)sizeof(sljit_sw); } else { - FAIL_IF(load_immediate(compiler, DR(TMP_REG1), local_size)); - FAIL_IF(push_inst(compiler, ADDU_W | S(SLJIT_LOCALS_REG) | TA(0) | D(TMP_REG2), DR(TMP_REG2))); - FAIL_IF(push_inst(compiler, SUBU_W | S(SLJIT_LOCALS_REG) | T(TMP_REG1) | D(SLJIT_LOCALS_REG), DR(SLJIT_LOCALS_REG))); + FAIL_IF(load_immediate(compiler, DR(OTHER_FLAG), local_size)); + FAIL_IF(push_inst(compiler, ADDU_W | S(SLJIT_SP) | TA(0) | D(TMP_REG2), DR(TMP_REG2))); + FAIL_IF(push_inst(compiler, SUBU_W | S(SLJIT_SP) | T(OTHER_FLAG) | D(SLJIT_SP), DR(SLJIT_SP))); base = S(TMP_REG2); local_size = 0; + offs = -(sljit_sw)sizeof(sljit_sw); + } + + FAIL_IF(push_inst(compiler, STACK_STORE | base | TA(RETURN_ADDR_REG) | IMM(offs), MOVABLE_INS)); + + tmp = saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - saveds) : SLJIT_FIRST_SAVED_REG; + for (i = SLJIT_S0; i >= tmp; i--) { + offs -= (sljit_s32)(sizeof(sljit_sw)); + FAIL_IF(push_inst(compiler, STACK_STORE | base | T(i) | IMM(offs), MOVABLE_INS)); + } + + for (i = scratches; i >= SLJIT_FIRST_SAVED_REG; i--) { + offs -= (sljit_s32)(sizeof(sljit_sw)); + FAIL_IF(push_inst(compiler, STACK_STORE | base | T(i) | IMM(offs), MOVABLE_INS)); } - FAIL_IF(push_inst(compiler, STACK_STORE | base | TA(RETURN_ADDR_REG) | IMM(local_size - 1 * (sljit_si)sizeof(sljit_sw)), MOVABLE_INS)); - if (saveds >= 1) - FAIL_IF(push_inst(compiler, STACK_STORE | base | T(SLJIT_SAVED_REG1) | IMM(local_size - 2 * (sljit_si)sizeof(sljit_sw)), MOVABLE_INS)); - if (saveds >= 2) - FAIL_IF(push_inst(compiler, STACK_STORE | base | T(SLJIT_SAVED_REG2) | IMM(local_size - 3 * (sljit_si)sizeof(sljit_sw)), MOVABLE_INS)); - if (saveds >= 3) - FAIL_IF(push_inst(compiler, STACK_STORE | base | T(SLJIT_SAVED_REG3) | IMM(local_size - 4 * (sljit_si)sizeof(sljit_sw)), MOVABLE_INS)); - if (saveds >= 4) - FAIL_IF(push_inst(compiler, STACK_STORE | base | T(SLJIT_SAVED_EREG1) | IMM(local_size - 5 * (sljit_si)sizeof(sljit_sw)), MOVABLE_INS)); - if (saveds >= 5) - FAIL_IF(push_inst(compiler, STACK_STORE | base | T(SLJIT_SAVED_EREG2) | IMM(local_size - 6 * (sljit_si)sizeof(sljit_sw)), MOVABLE_INS)); + args = get_arg_count(arg_types); if (args >= 1) - FAIL_IF(push_inst(compiler, ADDU_W | SA(4) | TA(0) | D(SLJIT_SAVED_REG1), DR(SLJIT_SAVED_REG1))); + FAIL_IF(push_inst(compiler, ADDU_W | SA(4) | TA(0) | D(SLJIT_S0), DR(SLJIT_S0))); if (args >= 2) - FAIL_IF(push_inst(compiler, ADDU_W | SA(5) | TA(0) | D(SLJIT_SAVED_REG2), DR(SLJIT_SAVED_REG2))); + FAIL_IF(push_inst(compiler, ADDU_W | SA(5) | TA(0) | D(SLJIT_S1), DR(SLJIT_S1))); if (args >= 3) - FAIL_IF(push_inst(compiler, ADDU_W | SA(6) | TA(0) | D(SLJIT_SAVED_REG3), DR(SLJIT_SAVED_REG3))); + FAIL_IF(push_inst(compiler, ADDU_W | SA(6) | TA(0) | D(SLJIT_S2), DR(SLJIT_S2))); return SLJIT_SUCCESS; } -SLJIT_API_FUNC_ATTRIBUTE void sljit_set_context(struct sljit_compiler *compiler, sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *compiler, + sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds, + sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size) { - CHECK_ERROR_VOID(); - check_sljit_set_context(compiler, args, scratches, saveds, local_size); - - compiler->scratches = scratches; - compiler->saveds = saveds; -#if (defined SLJIT_DEBUG && SLJIT_DEBUG) - compiler->logical_local_size = local_size; -#endif + CHECK_ERROR(); + CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size)); + set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size); - local_size += (saveds + 1 + 4) * sizeof(sljit_sw); + local_size += GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1) + SLJIT_LOCALS_OFFSET; +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) compiler->local_size = (local_size + 15) & ~0xf; +#else + compiler->local_size = (local_size + 31) & ~0x1f; +#endif + return SLJIT_SUCCESS; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_return(struct sljit_compiler *compiler, sljit_si op, sljit_si src, sljit_sw srcw) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw) { - sljit_si local_size; + sljit_s32 local_size, i, tmp, offs; sljit_ins base; CHECK_ERROR(); - check_sljit_emit_return(compiler, op, src, srcw); + CHECK(check_sljit_emit_return(compiler, op, src, srcw)); FAIL_IF(emit_mov_before_return(compiler, op, src, srcw)); local_size = compiler->local_size; if (local_size <= SIMM_MAX) - base = S(SLJIT_LOCALS_REG); + base = S(SLJIT_SP); else { FAIL_IF(load_immediate(compiler, DR(TMP_REG1), local_size)); - FAIL_IF(push_inst(compiler, ADDU_W | S(SLJIT_LOCALS_REG) | T(TMP_REG1) | D(TMP_REG1), DR(TMP_REG1))); + FAIL_IF(push_inst(compiler, ADDU_W | S(SLJIT_SP) | T(TMP_REG1) | D(TMP_REG1), DR(TMP_REG1))); base = S(TMP_REG1); local_size = 0; } - FAIL_IF(push_inst(compiler, STACK_LOAD | base | TA(RETURN_ADDR_REG) | IMM(local_size - 1 * (sljit_si)sizeof(sljit_sw)), RETURN_ADDR_REG)); - if (compiler->saveds >= 5) - FAIL_IF(push_inst(compiler, STACK_LOAD | base | T(SLJIT_SAVED_EREG2) | IMM(local_size - 6 * (sljit_si)sizeof(sljit_sw)), DR(SLJIT_SAVED_EREG2))); - if (compiler->saveds >= 4) - FAIL_IF(push_inst(compiler, STACK_LOAD | base | T(SLJIT_SAVED_EREG1) | IMM(local_size - 5 * (sljit_si)sizeof(sljit_sw)), DR(SLJIT_SAVED_EREG1))); - if (compiler->saveds >= 3) - FAIL_IF(push_inst(compiler, STACK_LOAD | base | T(SLJIT_SAVED_REG3) | IMM(local_size - 4 * (sljit_si)sizeof(sljit_sw)), DR(SLJIT_SAVED_REG3))); - if (compiler->saveds >= 2) - FAIL_IF(push_inst(compiler, STACK_LOAD | base | T(SLJIT_SAVED_REG2) | IMM(local_size - 3 * (sljit_si)sizeof(sljit_sw)), DR(SLJIT_SAVED_REG2))); - if (compiler->saveds >= 1) - FAIL_IF(push_inst(compiler, STACK_LOAD | base | T(SLJIT_SAVED_REG1) | IMM(local_size - 2 * (sljit_si)sizeof(sljit_sw)), DR(SLJIT_SAVED_REG1))); + FAIL_IF(push_inst(compiler, STACK_LOAD | base | TA(RETURN_ADDR_REG) | IMM(local_size - (sljit_s32)sizeof(sljit_sw)), RETURN_ADDR_REG)); + offs = local_size - (sljit_s32)GET_SAVED_REGISTERS_SIZE(compiler->scratches, compiler->saveds, 1); + + tmp = compiler->scratches; + for (i = SLJIT_FIRST_SAVED_REG; i <= tmp; i++) { + FAIL_IF(push_inst(compiler, STACK_LOAD | base | T(i) | IMM(offs), DR(i))); + offs += (sljit_s32)(sizeof(sljit_sw)); + } + + tmp = compiler->saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - compiler->saveds) : SLJIT_FIRST_SAVED_REG; + for (i = tmp; i <= SLJIT_S0; i++) { + FAIL_IF(push_inst(compiler, STACK_LOAD | base | T(i) | IMM(offs), DR(i))); + offs += (sljit_s32)(sizeof(sljit_sw)); + } + + SLJIT_ASSERT(offs == local_size - (sljit_sw)(sizeof(sljit_sw))); FAIL_IF(push_inst(compiler, JR | SA(RETURN_ADDR_REG), UNMOVABLE_INS)); if (compiler->local_size <= SIMM_MAX) - return push_inst(compiler, ADDIU_W | S(SLJIT_LOCALS_REG) | T(SLJIT_LOCALS_REG) | IMM(compiler->local_size), UNMOVABLE_INS); + return push_inst(compiler, ADDIU_W | S(SLJIT_SP) | T(SLJIT_SP) | IMM(compiler->local_size), UNMOVABLE_INS); else - return push_inst(compiler, ADDU_W | S(TMP_REG1) | TA(0) | D(SLJIT_LOCALS_REG), UNMOVABLE_INS); + return push_inst(compiler, ADDU_W | S(TMP_REG1) | TA(0) | D(SLJIT_SP), UNMOVABLE_INS); } #undef STACK_STORE @@ -578,7 +877,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_return(struct sljit_compiler *compi #define ARCH_32_64(a, b) b #endif -static SLJIT_CONST sljit_ins data_transfer_insts[16 + 4] = { +static const sljit_ins data_transfer_insts[16 + 4] = { /* u w s */ ARCH_32_64(HI(43) /* sw */, HI(63) /* sd */), /* u w l */ ARCH_32_64(HI(35) /* lw */, HI(55) /* ld */), /* u b s */ HI(40) /* sb */, @@ -608,15 +907,15 @@ static SLJIT_CONST sljit_ins data_transfer_insts[16 + 4] = { /* reg_ar is an absoulute register! */ /* Can perform an operation using at most 1 instruction. */ -static sljit_si getput_arg_fast(struct sljit_compiler *compiler, sljit_si flags, sljit_si reg_ar, sljit_si arg, sljit_sw argw) +static sljit_s32 getput_arg_fast(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg_ar, sljit_s32 arg, sljit_sw argw) { SLJIT_ASSERT(arg & SLJIT_MEM); - if ((!(flags & WRITE_BACK) || !(arg & 0xf)) && !(arg & 0xf0) && argw <= SIMM_MAX && argw >= SIMM_MIN) { + if (!(arg & OFFS_REG_MASK) && argw <= SIMM_MAX && argw >= SIMM_MIN) { /* Works for both absoulte and relative addresses. */ if (SLJIT_UNLIKELY(flags & ARG_TEST)) return 1; - FAIL_IF(push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | S(arg & 0xf) + FAIL_IF(push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | S(arg & REG_MASK) | TA(reg_ar) | IMM(argw), ((flags & MEM_MASK) <= GPR_REG && (flags & LOAD_DATA)) ? reg_ar : MOVABLE_INS)); return -1; } @@ -626,15 +925,15 @@ static sljit_si getput_arg_fast(struct sljit_compiler *compiler, sljit_si flags, /* See getput_arg below. Note: can_cache is called only for binary operators. Those operators always uses word arguments without write back. */ -static sljit_si can_cache(sljit_si arg, sljit_sw argw, sljit_si next_arg, sljit_sw next_argw) +static sljit_s32 can_cache(sljit_s32 arg, sljit_sw argw, sljit_s32 next_arg, sljit_sw next_argw) { SLJIT_ASSERT((arg & SLJIT_MEM) && (next_arg & SLJIT_MEM)); /* Simple operation except for updates. */ - if (arg & 0xf0) { + if (arg & OFFS_REG_MASK) { argw &= 0x3; next_argw &= 0x3; - if (argw && argw == next_argw && (arg == next_arg || (arg & 0xf0) == (next_arg & 0xf0))) + if (argw && argw == next_argw && (arg == next_arg || (arg & OFFS_REG_MASK) == (next_arg & OFFS_REG_MASK))) return 1; return 0; } @@ -649,9 +948,9 @@ static sljit_si can_cache(sljit_si arg, sljit_sw argw, sljit_si next_arg, sljit_ } /* Emit the necessary instructions. See can_cache above. */ -static sljit_si getput_arg(struct sljit_compiler *compiler, sljit_si flags, sljit_si reg_ar, sljit_si arg, sljit_sw argw, sljit_si next_arg, sljit_sw next_argw) +static sljit_s32 getput_arg(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg_ar, sljit_s32 arg, sljit_sw argw, sljit_s32 next_arg, sljit_sw next_argw) { - sljit_si tmp_ar, base, delay_slot; + sljit_s32 tmp_ar, base, delay_slot; SLJIT_ASSERT(arg & SLJIT_MEM); if (!(next_arg & SLJIT_MEM)) { @@ -662,99 +961,48 @@ static sljit_si getput_arg(struct sljit_compiler *compiler, sljit_si flags, slji if ((flags & MEM_MASK) <= GPR_REG && (flags & LOAD_DATA)) { tmp_ar = reg_ar; delay_slot = reg_ar; - } else { + } + else { tmp_ar = DR(TMP_REG1); delay_slot = MOVABLE_INS; } - base = arg & 0xf; + base = arg & REG_MASK; - if (SLJIT_UNLIKELY(arg & 0xf0)) { + if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) { argw &= 0x3; - if ((flags & WRITE_BACK) && reg_ar == DR(base)) { - SLJIT_ASSERT(!(flags & LOAD_DATA) && DR(TMP_REG1) != reg_ar); - FAIL_IF(push_inst(compiler, ADDU_W | SA(reg_ar) | TA(0) | D(TMP_REG1), DR(TMP_REG1))); - reg_ar = DR(TMP_REG1); - } /* Using the cache. */ if (argw == compiler->cache_argw) { - if (!(flags & WRITE_BACK)) { - if (arg == compiler->cache_arg) + if (arg == compiler->cache_arg) + return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | S(TMP_REG3) | TA(reg_ar), delay_slot); + + if ((SLJIT_MEM | (arg & OFFS_REG_MASK)) == compiler->cache_arg) { + if (arg == next_arg && argw == (next_argw & 0x3)) { + compiler->cache_arg = arg; + compiler->cache_argw = argw; + FAIL_IF(push_inst(compiler, ADDU_W | S(base) | T(TMP_REG3) | D(TMP_REG3), DR(TMP_REG3))); return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | S(TMP_REG3) | TA(reg_ar), delay_slot); - if ((SLJIT_MEM | (arg & 0xf0)) == compiler->cache_arg) { - if (arg == next_arg && argw == (next_argw & 0x3)) { - compiler->cache_arg = arg; - compiler->cache_argw = argw; - FAIL_IF(push_inst(compiler, ADDU_W | S(base) | T(TMP_REG3) | D(TMP_REG3), DR(TMP_REG3))); - return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | S(TMP_REG3) | TA(reg_ar), delay_slot); - } - FAIL_IF(push_inst(compiler, ADDU_W | S(base) | T(TMP_REG3) | DA(tmp_ar), tmp_ar)); - return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | SA(tmp_ar) | TA(reg_ar), delay_slot); - } - } - else { - if ((SLJIT_MEM | (arg & 0xf0)) == compiler->cache_arg) { - FAIL_IF(push_inst(compiler, ADDU_W | S(base) | T(TMP_REG3) | D(base), DR(base))); - return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | S(base) | TA(reg_ar), delay_slot); } + FAIL_IF(push_inst(compiler, ADDU_W | S(base) | T(TMP_REG3) | DA(tmp_ar), tmp_ar)); + return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | SA(tmp_ar) | TA(reg_ar), delay_slot); } } if (SLJIT_UNLIKELY(argw)) { - compiler->cache_arg = SLJIT_MEM | (arg & 0xf0); + compiler->cache_arg = SLJIT_MEM | (arg & OFFS_REG_MASK); compiler->cache_argw = argw; - FAIL_IF(push_inst(compiler, SLL_W | T((arg >> 4) & 0xf) | D(TMP_REG3) | SH_IMM(argw), DR(TMP_REG3))); + FAIL_IF(push_inst(compiler, SLL_W | T(OFFS_REG(arg)) | D(TMP_REG3) | SH_IMM(argw), DR(TMP_REG3))); } - if (!(flags & WRITE_BACK)) { - if (arg == next_arg && argw == (next_argw & 0x3)) { - compiler->cache_arg = arg; - compiler->cache_argw = argw; - FAIL_IF(push_inst(compiler, ADDU_W | S(base) | T(!argw ? ((arg >> 4) & 0xf) : TMP_REG3) | D(TMP_REG3), DR(TMP_REG3))); - tmp_ar = DR(TMP_REG3); - } - else - FAIL_IF(push_inst(compiler, ADDU_W | S(base) | T(!argw ? ((arg >> 4) & 0xf) : TMP_REG3) | DA(tmp_ar), tmp_ar)); - return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | SA(tmp_ar) | TA(reg_ar), delay_slot); - } - FAIL_IF(push_inst(compiler, ADDU_W | S(base) | T(!argw ? ((arg >> 4) & 0xf) : TMP_REG3) | D(base), DR(base))); - return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | S(base) | TA(reg_ar), delay_slot); - } - - if (SLJIT_UNLIKELY(flags & WRITE_BACK) && base) { - /* Update only applies if a base register exists. */ - if (reg_ar == DR(base)) { - SLJIT_ASSERT(!(flags & LOAD_DATA) && DR(TMP_REG1) != reg_ar); - if (argw <= SIMM_MAX && argw >= SIMM_MIN) { - FAIL_IF(push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | S(base) | TA(reg_ar) | IMM(argw), MOVABLE_INS)); - if (argw) - return push_inst(compiler, ADDIU_W | S(base) | T(base) | IMM(argw), DR(base)); - return SLJIT_SUCCESS; - } - FAIL_IF(push_inst(compiler, ADDU_W | SA(reg_ar) | TA(0) | D(TMP_REG1), DR(TMP_REG1))); - reg_ar = DR(TMP_REG1); - } - - if (argw <= SIMM_MAX && argw >= SIMM_MIN) { - if (argw) - FAIL_IF(push_inst(compiler, ADDIU_W | S(base) | T(base) | IMM(argw), DR(base))); - } - else { - if (compiler->cache_arg == SLJIT_MEM && argw - compiler->cache_argw <= SIMM_MAX && argw - compiler->cache_argw >= SIMM_MIN) { - if (argw != compiler->cache_argw) { - FAIL_IF(push_inst(compiler, ADDIU_W | S(TMP_REG3) | T(TMP_REG3) | IMM(argw - compiler->cache_argw), DR(TMP_REG3))); - compiler->cache_argw = argw; - } - FAIL_IF(push_inst(compiler, ADDU_W | S(base) | T(TMP_REG3) | D(base), DR(base))); - } - else { - compiler->cache_arg = SLJIT_MEM; - compiler->cache_argw = argw; - FAIL_IF(load_immediate(compiler, DR(TMP_REG3), argw)); - FAIL_IF(push_inst(compiler, ADDU_W | S(base) | T(TMP_REG3) | D(base), DR(base))); - } + if (arg == next_arg && argw == (next_argw & 0x3)) { + compiler->cache_arg = arg; + compiler->cache_argw = argw; + FAIL_IF(push_inst(compiler, ADDU_W | S(base) | T(!argw ? OFFS_REG(arg) : TMP_REG3) | D(TMP_REG3), DR(TMP_REG3))); + tmp_ar = DR(TMP_REG3); } - return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | S(base) | TA(reg_ar), delay_slot); + else + FAIL_IF(push_inst(compiler, ADDU_W | S(base) | T(!argw ? OFFS_REG(arg) : TMP_REG3) | DA(tmp_ar), tmp_ar)); + return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | SA(tmp_ar) | TA(reg_ar), delay_slot); } if (compiler->cache_arg == arg && argw - compiler->cache_argw <= SIMM_MAX && argw - compiler->cache_argw >= SIMM_MIN) { @@ -788,35 +1036,63 @@ static sljit_si getput_arg(struct sljit_compiler *compiler, sljit_si flags, slji return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | SA(tmp_ar) | TA(reg_ar), delay_slot); } -static SLJIT_INLINE sljit_si emit_op_mem(struct sljit_compiler *compiler, sljit_si flags, sljit_si reg_ar, sljit_si arg, sljit_sw argw) +static SLJIT_INLINE sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg_ar, sljit_s32 arg, sljit_sw argw) { + sljit_s32 tmp_ar, base, delay_slot; + if (getput_arg_fast(compiler, flags, reg_ar, arg, argw)) return compiler->error; - compiler->cache_arg = 0; - compiler->cache_argw = 0; - return getput_arg(compiler, flags, reg_ar, arg, argw, 0, 0); + + if ((flags & MEM_MASK) <= GPR_REG && (flags & LOAD_DATA)) { + tmp_ar = reg_ar; + delay_slot = reg_ar; + } + else { + tmp_ar = DR(TMP_REG1); + delay_slot = MOVABLE_INS; + } + base = arg & REG_MASK; + + if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) { + argw &= 0x3; + + if (SLJIT_UNLIKELY(argw)) { + FAIL_IF(push_inst(compiler, SLL_W | T(OFFS_REG(arg)) | DA(tmp_ar) | SH_IMM(argw), tmp_ar)); + FAIL_IF(push_inst(compiler, ADDU_W | S(base) | TA(tmp_ar) | DA(tmp_ar), tmp_ar)); + } + else + FAIL_IF(push_inst(compiler, ADDU_W | S(base) | T(OFFS_REG(arg)) | DA(tmp_ar), tmp_ar)); + return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | SA(tmp_ar) | TA(reg_ar), delay_slot); + } + + FAIL_IF(load_immediate(compiler, tmp_ar, argw)); + + if (base != 0) + FAIL_IF(push_inst(compiler, ADDU_W | S(base) | TA(tmp_ar) | DA(tmp_ar), tmp_ar)); + + return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | SA(tmp_ar) | TA(reg_ar), delay_slot); } -static SLJIT_INLINE sljit_si emit_op_mem2(struct sljit_compiler *compiler, sljit_si flags, sljit_si reg, sljit_si arg1, sljit_sw arg1w, sljit_si arg2, sljit_sw arg2w) +static SLJIT_INLINE sljit_s32 emit_op_mem2(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg1, sljit_sw arg1w, sljit_s32 arg2, sljit_sw arg2w) { if (getput_arg_fast(compiler, flags, reg, arg1, arg1w)) return compiler->error; return getput_arg(compiler, flags, reg, arg1, arg1w, arg2, arg2w); } -static sljit_si emit_op(struct sljit_compiler *compiler, sljit_si op, sljit_si flags, - sljit_si dst, sljit_sw dstw, - sljit_si src1, sljit_sw src1w, - sljit_si src2, sljit_sw src2w) +static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 flags, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) { /* arg1 goes to TMP_REG1 or src reg arg2 goes to TMP_REG2, imm or src reg TMP_REG3 can be used for caching result goes to TMP_REG2, so put result can use TMP_REG1 and TMP_REG3. */ - sljit_si dst_r = TMP_REG2; - sljit_si src1_r; + sljit_s32 dst_r = TMP_REG2; + sljit_s32 src1_r; sljit_sw src2_r = 0; - sljit_si sugg_src2_r = TMP_REG2; + sljit_s32 sugg_src2_r = TMP_REG2; if (!(flags & ALT_KEEP_CACHE)) { compiler->cache_arg = 0; @@ -824,15 +1100,13 @@ static sljit_si emit_op(struct sljit_compiler *compiler, sljit_si op, sljit_si f } if (SLJIT_UNLIKELY(dst == SLJIT_UNUSED)) { - if (op >= SLJIT_MOV && op <= SLJIT_MOVU_SI && !(src2 & SLJIT_MEM)) - return SLJIT_SUCCESS; - if (GET_FLAGS(op)) - flags |= UNUSED_DEST; + SLJIT_ASSERT(HAS_FLAGS(op)); + flags |= UNUSED_DEST; } - else if (dst <= TMP_REG3) { + else if (FAST_IS_REG(dst)) { dst_r = dst; flags |= REG_DEST; - if (op >= SLJIT_MOV && op <= SLJIT_MOVU_SI) + if (op >= SLJIT_MOV && op <= SLJIT_MOV_P) sugg_src2_r = dst_r; } else if ((dst & SLJIT_MEM) && !getput_arg_fast(compiler, flags | ARG_TEST, DR(TMP_REG1), dst, dstw)) @@ -862,7 +1136,7 @@ static sljit_si emit_op(struct sljit_compiler *compiler, sljit_si op, sljit_si f } /* Source 1. */ - if (src1 <= TMP_REG3) { + if (FAST_IS_REG(src1)) { src1_r = src1; flags |= REG1_SOURCE; } @@ -883,10 +1157,10 @@ static sljit_si emit_op(struct sljit_compiler *compiler, sljit_si op, sljit_si f } /* Source 2. */ - if (src2 <= TMP_REG3) { + if (FAST_IS_REG(src2)) { src2_r = src2; flags |= REG2_SOURCE; - if (!(flags & REG_DEST) && op >= SLJIT_MOV && op <= SLJIT_MOVU_SI) + if (!(flags & REG_DEST) && op >= SLJIT_MOV && op <= SLJIT_MOV_P) dst_r = src2_r; } else if (src2 & SLJIT_IMM) { @@ -897,7 +1171,7 @@ static sljit_si emit_op(struct sljit_compiler *compiler, sljit_si op, sljit_si f } else { src2_r = 0; - if ((op >= SLJIT_MOV && op <= SLJIT_MOVU_SI) && (dst & SLJIT_MEM)) + if ((op >= SLJIT_MOV && op <= SLJIT_MOV_P) && (dst & SLJIT_MEM)) dst_r = 0; } } @@ -939,10 +1213,14 @@ static sljit_si emit_op(struct sljit_compiler *compiler, sljit_si op, sljit_si f return SLJIT_SUCCESS; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op0(struct sljit_compiler *compiler, sljit_si op) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compiler, sljit_s32 op) { +#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) + sljit_s32 int_op = op & SLJIT_I32_OP; +#endif + CHECK_ERROR(); - check_sljit_emit_op0(compiler, op); + CHECK(check_sljit_emit_op0(compiler, op)); op = GET_OPCODE(op); switch (op) { @@ -950,82 +1228,150 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op0(struct sljit_compiler *compiler return push_inst(compiler, BREAK, UNMOVABLE_INS); case SLJIT_NOP: return push_inst(compiler, NOP, UNMOVABLE_INS); - case SLJIT_UMUL: - case SLJIT_SMUL: - FAIL_IF(push_inst(compiler, (op == SLJIT_UMUL ? MULTU : MULT) | S(SLJIT_SCRATCH_REG1) | T(SLJIT_SCRATCH_REG2), MOVABLE_INS)); - FAIL_IF(push_inst(compiler, MFLO | D(SLJIT_SCRATCH_REG1), DR(SLJIT_SCRATCH_REG1))); - return push_inst(compiler, MFHI | D(SLJIT_SCRATCH_REG2), DR(SLJIT_SCRATCH_REG2)); - case SLJIT_UDIV: - case SLJIT_SDIV: -#if !(defined SLJIT_MIPS_32_64 && SLJIT_MIPS_32_64) + case SLJIT_LMUL_UW: + case SLJIT_LMUL_SW: +#if (defined SLJIT_MIPS_R6 && SLJIT_MIPS_R6) +#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) + FAIL_IF(push_inst(compiler, (op == SLJIT_LMUL_UW ? DMULU : DMUL) | S(SLJIT_R0) | T(SLJIT_R1) | D(TMP_REG3), DR(TMP_REG3))); + FAIL_IF(push_inst(compiler, (op == SLJIT_LMUL_UW ? DMUHU : DMUH) | S(SLJIT_R0) | T(SLJIT_R1) | D(TMP_REG1), DR(TMP_REG1))); +#else /* !SLJIT_CONFIG_MIPS_64 */ + FAIL_IF(push_inst(compiler, (op == SLJIT_LMUL_UW ? MULU : MUL) | S(SLJIT_R0) | T(SLJIT_R1) | D(TMP_REG3), DR(TMP_REG3))); + FAIL_IF(push_inst(compiler, (op == SLJIT_LMUL_UW ? MUHU : MUH) | S(SLJIT_R0) | T(SLJIT_R1) | D(TMP_REG1), DR(TMP_REG1))); +#endif /* SLJIT_CONFIG_MIPS_64 */ + FAIL_IF(push_inst(compiler, ADDU_W | S(TMP_REG3) | TA(0) | D(SLJIT_R0), DR(SLJIT_R0))); + return push_inst(compiler, ADDU_W | S(TMP_REG1) | TA(0) | D(SLJIT_R1), DR(SLJIT_R1)); +#else /* !SLJIT_MIPS_R6 */ +#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) + FAIL_IF(push_inst(compiler, (op == SLJIT_LMUL_UW ? DMULTU : DMULT) | S(SLJIT_R0) | T(SLJIT_R1), MOVABLE_INS)); +#else /* !SLJIT_CONFIG_MIPS_64 */ + FAIL_IF(push_inst(compiler, (op == SLJIT_LMUL_UW ? MULTU : MULT) | S(SLJIT_R0) | T(SLJIT_R1), MOVABLE_INS)); +#endif /* SLJIT_CONFIG_MIPS_64 */ + FAIL_IF(push_inst(compiler, MFLO | D(SLJIT_R0), DR(SLJIT_R0))); + return push_inst(compiler, MFHI | D(SLJIT_R1), DR(SLJIT_R1)); +#endif /* SLJIT_MIPS_R6 */ + case SLJIT_DIVMOD_UW: + case SLJIT_DIVMOD_SW: + case SLJIT_DIV_UW: + case SLJIT_DIV_SW: + SLJIT_COMPILE_ASSERT((SLJIT_DIVMOD_UW & 0x2) == 0 && SLJIT_DIV_UW - 0x2 == SLJIT_DIVMOD_UW, bad_div_opcode_assignments); +#if (defined SLJIT_MIPS_R6 && SLJIT_MIPS_R6) +#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) + if (int_op) { + FAIL_IF(push_inst(compiler, ((op | 0x2) == SLJIT_DIV_UW ? DIVU : DIV) | S(SLJIT_R0) | T(SLJIT_R1) | D(TMP_REG3), DR(TMP_REG3))); + FAIL_IF(push_inst(compiler, ((op | 0x2) == SLJIT_DIV_UW ? MODU : MOD) | S(SLJIT_R0) | T(SLJIT_R1) | D(TMP_REG1), DR(TMP_REG1))); + } + else { + FAIL_IF(push_inst(compiler, ((op | 0x2) == SLJIT_DIV_UW ? DDIVU : DDIV) | S(SLJIT_R0) | T(SLJIT_R1) | D(TMP_REG3), DR(TMP_REG3))); + FAIL_IF(push_inst(compiler, ((op | 0x2) == SLJIT_DIV_UW ? DMODU : DMOD) | S(SLJIT_R0) | T(SLJIT_R1) | D(TMP_REG1), DR(TMP_REG1))); + } +#else /* !SLJIT_CONFIG_MIPS_64 */ + FAIL_IF(push_inst(compiler, ((op | 0x2) == SLJIT_DIV_UW ? DIVU : DIV) | S(SLJIT_R0) | T(SLJIT_R1) | D(TMP_REG3), DR(TMP_REG3))); + FAIL_IF(push_inst(compiler, ((op | 0x2) == SLJIT_DIV_UW ? MODU : MOD) | S(SLJIT_R0) | T(SLJIT_R1) | D(TMP_REG1), DR(TMP_REG1))); +#endif /* SLJIT_CONFIG_MIPS_64 */ + FAIL_IF(push_inst(compiler, ADDU_W | S(TMP_REG3) | TA(0) | D(SLJIT_R0), DR(SLJIT_R0))); + return (op >= SLJIT_DIV_UW) ? SLJIT_SUCCESS : push_inst(compiler, ADDU_W | S(TMP_REG1) | TA(0) | D(SLJIT_R1), DR(SLJIT_R1)); +#else /* !SLJIT_MIPS_R6 */ +#if !(defined SLJIT_MIPS_R1 && SLJIT_MIPS_R1) FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS)); FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS)); -#endif - FAIL_IF(push_inst(compiler, (op == SLJIT_UDIV ? DIVU : DIV) | S(SLJIT_SCRATCH_REG1) | T(SLJIT_SCRATCH_REG2), MOVABLE_INS)); - FAIL_IF(push_inst(compiler, MFLO | D(SLJIT_SCRATCH_REG1), DR(SLJIT_SCRATCH_REG1))); - return push_inst(compiler, MFHI | D(SLJIT_SCRATCH_REG2), DR(SLJIT_SCRATCH_REG2)); +#endif /* !SLJIT_MIPS_R1 */ +#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) + if (int_op) + FAIL_IF(push_inst(compiler, ((op | 0x2) == SLJIT_DIV_UW ? DIVU : DIV) | S(SLJIT_R0) | T(SLJIT_R1), MOVABLE_INS)); + else + FAIL_IF(push_inst(compiler, ((op | 0x2) == SLJIT_DIV_UW ? DDIVU : DDIV) | S(SLJIT_R0) | T(SLJIT_R1), MOVABLE_INS)); +#else /* !SLJIT_CONFIG_MIPS_64 */ + FAIL_IF(push_inst(compiler, ((op | 0x2) == SLJIT_DIV_UW ? DIVU : DIV) | S(SLJIT_R0) | T(SLJIT_R1), MOVABLE_INS)); +#endif /* SLJIT_CONFIG_MIPS_64 */ + FAIL_IF(push_inst(compiler, MFLO | D(SLJIT_R0), DR(SLJIT_R0))); + return (op >= SLJIT_DIV_UW) ? SLJIT_SUCCESS : push_inst(compiler, MFHI | D(SLJIT_R1), DR(SLJIT_R1)); +#endif /* SLJIT_MIPS_R6 */ } return SLJIT_SUCCESS; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op1(struct sljit_compiler *compiler, sljit_si op, - sljit_si dst, sljit_sw dstw, - sljit_si src, sljit_sw srcw) +#if (defined SLJIT_MIPS_R1 && SLJIT_MIPS_R1) +static sljit_s32 emit_prefetch(struct sljit_compiler *compiler, + sljit_s32 src, sljit_sw srcw) +{ + if (!(src & OFFS_REG_MASK)) { + if (srcw <= SIMM_MAX && srcw >= SIMM_MIN) + return push_inst(compiler, PREF | S(src & REG_MASK) | IMM(srcw), MOVABLE_INS); + + FAIL_IF(load_immediate(compiler, DR(TMP_REG1), srcw)); + return push_inst(compiler, PREFX | S(src & REG_MASK) | T(TMP_REG1), MOVABLE_INS); + } + + srcw &= 0x3; + + if (SLJIT_UNLIKELY(srcw != 0)) { + FAIL_IF(push_inst(compiler, SLL_W | T(OFFS_REG(src)) | D(TMP_REG1) | SH_IMM(srcw), DR(TMP_REG1))); + return push_inst(compiler, PREFX | S(src & REG_MASK) | T(TMP_REG1), MOVABLE_INS); + } + + return push_inst(compiler, PREFX | S(src & REG_MASK) | T(OFFS_REG(src)), MOVABLE_INS); +} +#endif + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) { #if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) # define flags 0 +#else + sljit_s32 flags = 0; #endif CHECK_ERROR(); - check_sljit_emit_op1(compiler, op, dst, dstw, src, srcw); + CHECK(check_sljit_emit_op1(compiler, op, dst, dstw, src, srcw)); ADJUST_LOCAL_OFFSET(dst, dstw); ADJUST_LOCAL_OFFSET(src, srcw); + if (dst == SLJIT_UNUSED && !HAS_FLAGS(op)) { +#if (defined SLJIT_MIPS_R1 && SLJIT_MIPS_R1) + if (op <= SLJIT_MOV_P && (src & SLJIT_MEM)) + return emit_prefetch(compiler, src, srcw); +#endif + return SLJIT_SUCCESS; + } + +#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) + if ((op & SLJIT_I32_OP) && GET_OPCODE(op) >= SLJIT_NOT) + flags |= INT_DATA | SIGNED_DATA; +#endif + switch (GET_OPCODE(op)) { case SLJIT_MOV: case SLJIT_MOV_P: - return emit_op(compiler, SLJIT_MOV, flags | WORD_DATA, dst, dstw, TMP_REG1, 0, src, srcw); - - case SLJIT_MOV_UI: - return emit_op(compiler, SLJIT_MOV_UI, flags | INT_DATA, dst, dstw, TMP_REG1, 0, src, srcw); - - case SLJIT_MOV_SI: - return emit_op(compiler, SLJIT_MOV_SI, flags | INT_DATA | SIGNED_DATA, dst, dstw, TMP_REG1, 0, src, srcw); + return emit_op(compiler, SLJIT_MOV, WORD_DATA, dst, dstw, TMP_REG1, 0, src, srcw); - case SLJIT_MOV_UB: - return emit_op(compiler, SLJIT_MOV_UB, flags | BYTE_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_ub)srcw : srcw); - - case SLJIT_MOV_SB: - return emit_op(compiler, SLJIT_MOV_SB, flags | BYTE_DATA | SIGNED_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_sb)srcw : srcw); - - case SLJIT_MOV_UH: - return emit_op(compiler, SLJIT_MOV_UH, flags | HALF_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_uh)srcw : srcw); - - case SLJIT_MOV_SH: - return emit_op(compiler, SLJIT_MOV_SH, flags | HALF_DATA | SIGNED_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_sh)srcw : srcw); - - case SLJIT_MOVU: - case SLJIT_MOVU_P: - return emit_op(compiler, SLJIT_MOV, flags | WORD_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, srcw); - - case SLJIT_MOVU_UI: - return emit_op(compiler, SLJIT_MOV_UI, flags | INT_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, srcw); + case SLJIT_MOV_U32: +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) + return emit_op(compiler, SLJIT_MOV_U32, INT_DATA, dst, dstw, TMP_REG1, 0, src, srcw); +#else + return emit_op(compiler, SLJIT_MOV_U32, INT_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u32)srcw : srcw); +#endif - case SLJIT_MOVU_SI: - return emit_op(compiler, SLJIT_MOV_SI, flags | INT_DATA | SIGNED_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, srcw); + case SLJIT_MOV_S32: +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) + return emit_op(compiler, SLJIT_MOV_S32, INT_DATA | SIGNED_DATA, dst, dstw, TMP_REG1, 0, src, srcw); +#else + return emit_op(compiler, SLJIT_MOV_S32, INT_DATA | SIGNED_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s32)srcw : srcw); +#endif - case SLJIT_MOVU_UB: - return emit_op(compiler, SLJIT_MOV_UB, flags | BYTE_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_ub)srcw : srcw); + case SLJIT_MOV_U8: + return emit_op(compiler, SLJIT_MOV_U8, BYTE_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u8)srcw : srcw); - case SLJIT_MOVU_SB: - return emit_op(compiler, SLJIT_MOV_SB, flags | BYTE_DATA | SIGNED_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_sb)srcw : srcw); + case SLJIT_MOV_S8: + return emit_op(compiler, SLJIT_MOV_S8, BYTE_DATA | SIGNED_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s8)srcw : srcw); - case SLJIT_MOVU_UH: - return emit_op(compiler, SLJIT_MOV_UH, flags | HALF_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_uh)srcw : srcw); + case SLJIT_MOV_U16: + return emit_op(compiler, SLJIT_MOV_U16, HALF_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u16)srcw : srcw); - case SLJIT_MOVU_SH: - return emit_op(compiler, SLJIT_MOV_SH, flags | HALF_DATA | SIGNED_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_sh)srcw : srcw); + case SLJIT_MOV_S16: + return emit_op(compiler, SLJIT_MOV_S16, HALF_DATA | SIGNED_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s16)srcw : srcw); case SLJIT_NOT: return emit_op(compiler, op, flags, dst, dstw, TMP_REG1, 0, src, srcw); @@ -1037,27 +1383,44 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op1(struct sljit_compiler *compiler return emit_op(compiler, op, flags, dst, dstw, TMP_REG1, 0, src, srcw); } + SLJIT_UNREACHABLE(); return SLJIT_SUCCESS; + #if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) # undef flags #endif } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op2(struct sljit_compiler *compiler, sljit_si op, - sljit_si dst, sljit_sw dstw, - sljit_si src1, sljit_sw src1w, - sljit_si src2, sljit_sw src2w) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) { #if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) # define flags 0 +#else + sljit_s32 flags = 0; #endif CHECK_ERROR(); - check_sljit_emit_op2(compiler, op, dst, dstw, src1, src1w, src2, src2w); + CHECK(check_sljit_emit_op2(compiler, op, dst, dstw, src1, src1w, src2, src2w)); ADJUST_LOCAL_OFFSET(dst, dstw); ADJUST_LOCAL_OFFSET(src1, src1w); ADJUST_LOCAL_OFFSET(src2, src2w); + if (dst == SLJIT_UNUSED && !HAS_FLAGS(op)) + return SLJIT_SUCCESS; + +#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) + if (op & SLJIT_I32_OP) { + flags |= INT_DATA | SIGNED_DATA; + if (src1 & SLJIT_IMM) + src1w = (sljit_s32)src1w; + if (src2 & SLJIT_IMM) + src2w = (sljit_s32)src2w; + } +#endif + switch (GET_OPCODE(op)) { case SLJIT_ADD: case SLJIT_ADDC: @@ -1082,35 +1445,41 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op2(struct sljit_compiler *compiler if (src2 & SLJIT_IMM) src2w &= 0x1f; #else - SLJIT_ASSERT_STOP(); + if (src2 & SLJIT_IMM) { + if (op & SLJIT_I32_OP) + src2w &= 0x1f; + else + src2w &= 0x3f; + } #endif return emit_op(compiler, op, flags | IMM_OP, dst, dstw, src1, src1w, src2, src2w); } + SLJIT_UNREACHABLE(); return SLJIT_SUCCESS; + #if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) # undef flags #endif } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_get_register_index(sljit_si reg) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg) { - check_sljit_get_register_index(reg); + CHECK_REG_INDEX(check_sljit_get_register_index(reg)); return reg_map[reg]; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_get_float_register_index(sljit_si reg) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg) { - check_sljit_get_float_register_index(reg); - return reg << 1; + CHECK_REG_INDEX(check_sljit_get_float_register_index(reg)); + return FR(reg); } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_custom(struct sljit_compiler *compiler, - void *instruction, sljit_si size) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler, + void *instruction, sljit_s32 size) { CHECK_ERROR(); - check_sljit_emit_op_custom(compiler, instruction, size); - SLJIT_ASSERT(size == 4); + CHECK(check_sljit_emit_op_custom(compiler, instruction, size)); return push_inst(compiler, *(sljit_ins*)instruction, UNMOVABLE_INS); } @@ -1119,152 +1488,209 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_custom(struct sljit_compiler *co /* Floating point operators */ /* --------------------------------------------------------------------- */ -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_is_fpu_available(void) +#define FLOAT_DATA(op) (DOUBLE_DATA | ((op & SLJIT_F32_OP) >> 7)) +#define FMT(op) (((op & SLJIT_F32_OP) ^ SLJIT_F32_OP) << (21 - 8)) + +static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) { -#if (defined SLJIT_QEMU && SLJIT_QEMU) - /* Qemu says fir is 0 by default. */ - return 1; -#elif defined(__GNUC__) - sljit_sw fir; - asm ("cfc1 %0, $0" : "=r"(fir)); - return (fir >> 22) & 0x1; +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) +# define flags 0 #else -#error "FIR check is not implemented for this architecture" + sljit_s32 flags = (GET_OPCODE(op) == SLJIT_CONV_SW_FROM_F64) << 21; +#endif + + if (src & SLJIT_MEM) { + FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, FR(TMP_FREG1), src, srcw, dst, dstw)); + src = TMP_FREG1; + } + + FAIL_IF(push_inst(compiler, (TRUNC_W_S ^ (flags >> 19)) | FMT(op) | FS(src) | FD(TMP_FREG1), MOVABLE_INS)); + + if (FAST_IS_REG(dst)) + return push_inst(compiler, MFC1 | flags | T(dst) | FS(TMP_FREG1), MOVABLE_INS); + + /* Store the integer value from a VFP register. */ + return emit_op_mem2(compiler, flags ? DOUBLE_DATA : SINGLE_DATA, FR(TMP_FREG1), dst, dstw, 0, 0); + +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) +# undef is_long #endif } -#define FLOAT_DATA(op) (DOUBLE_DATA | ((op & SLJIT_SINGLE_OP) >> 7)) -#define FMT(op) (((op & SLJIT_SINGLE_OP) ^ SLJIT_SINGLE_OP) << (21 - 8)) +static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) +{ +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) +# define flags 0 +#else + sljit_s32 flags = (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_SW) << 21; +#endif + + sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1; + + if (FAST_IS_REG(src)) + FAIL_IF(push_inst(compiler, MTC1 | flags | T(src) | FS(TMP_FREG1), MOVABLE_INS)); + else if (src & SLJIT_MEM) { + /* Load the integer value into a VFP register. */ + FAIL_IF(emit_op_mem2(compiler, ((flags) ? DOUBLE_DATA : SINGLE_DATA) | LOAD_DATA, FR(TMP_FREG1), src, srcw, dst, dstw)); + } + else { +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32) + srcw = (sljit_s32)srcw; +#endif + FAIL_IF(load_immediate(compiler, DR(TMP_REG1), srcw)); + FAIL_IF(push_inst(compiler, MTC1 | flags | T(TMP_REG1) | FS(TMP_FREG1), MOVABLE_INS)); + } + + FAIL_IF(push_inst(compiler, CVT_S_S | flags | (4 << 21) | (((op & SLJIT_F32_OP) ^ SLJIT_F32_OP) >> 8) | FS(TMP_FREG1) | FD(dst_r), MOVABLE_INS)); + + if (dst & SLJIT_MEM) + return emit_op_mem2(compiler, FLOAT_DATA(op), FR(TMP_FREG1), dst, dstw, 0, 0); + return SLJIT_SUCCESS; -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop1(struct sljit_compiler *compiler, sljit_si op, - sljit_si dst, sljit_sw dstw, - sljit_si src, sljit_sw srcw) +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) +# undef flags +#endif +} + +static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) { - sljit_si dst_fr; + sljit_ins inst; - CHECK_ERROR(); - check_sljit_emit_fop1(compiler, op, dst, dstw, src, srcw); - SLJIT_COMPILE_ASSERT((SLJIT_SINGLE_OP == 0x100) && !(DOUBLE_DATA & 0x2), float_transfer_bit_error); + if (src1 & SLJIT_MEM) { + FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, FR(TMP_FREG1), src1, src1w, src2, src2w)); + src1 = TMP_FREG1; + } + + if (src2 & SLJIT_MEM) { + FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, FR(TMP_FREG2), src2, src2w, 0, 0)); + src2 = TMP_FREG2; + } + switch (GET_FLAG_TYPE(op)) { + case SLJIT_EQUAL_F64: + case SLJIT_NOT_EQUAL_F64: + inst = C_UEQ_S; + break; + case SLJIT_LESS_F64: + case SLJIT_GREATER_EQUAL_F64: + inst = C_ULT_S; + break; + case SLJIT_GREATER_F64: + case SLJIT_LESS_EQUAL_F64: + inst = C_ULE_S; + break; + default: + SLJIT_ASSERT(GET_FLAG_TYPE(op) == SLJIT_UNORDERED_F64 || GET_FLAG_TYPE(op) == SLJIT_ORDERED_F64); + inst = C_UN_S; + break; + } + return push_inst(compiler, inst | FMT(op) | FT(src2) | FS(src1) | C_FD, UNMOVABLE_INS); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) +{ + sljit_s32 dst_r; + + CHECK_ERROR(); compiler->cache_arg = 0; compiler->cache_argw = 0; - if (GET_OPCODE(op) == SLJIT_CMPD) { - if (dst > SLJIT_FLOAT_REG6) { - FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, dst, dstw, src, srcw)); - dst = TMP_FREG1; - } - else - dst <<= 1; + SLJIT_COMPILE_ASSERT((SLJIT_F32_OP == 0x100) && !(DOUBLE_DATA & 0x2), float_transfer_bit_error); + SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw); - if (src > SLJIT_FLOAT_REG6) { - FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src, srcw, 0, 0)); - src = TMP_FREG2; - } - else - src <<= 1; - - /* src and dst are swapped. */ - if (op & SLJIT_SET_E) { - FAIL_IF(push_inst(compiler, C_UEQ_fmt | FMT(op) | FT(src) | FS(dst), UNMOVABLE_INS)); - FAIL_IF(push_inst(compiler, CFC1 | TA(EQUAL_FLAG) | DA(FCSR_REG), EQUAL_FLAG)); - FAIL_IF(push_inst(compiler, SRL | TA(EQUAL_FLAG) | DA(EQUAL_FLAG) | SH_IMM(23), EQUAL_FLAG)); - FAIL_IF(push_inst(compiler, ANDI | SA(EQUAL_FLAG) | TA(EQUAL_FLAG) | IMM(1), EQUAL_FLAG)); - } - if (op & SLJIT_SET_S) { - /* Mixing the instructions for the two checks. */ - FAIL_IF(push_inst(compiler, C_ULT_fmt | FMT(op) | FT(src) | FS(dst), UNMOVABLE_INS)); - FAIL_IF(push_inst(compiler, CFC1 | TA(ULESS_FLAG) | DA(FCSR_REG), ULESS_FLAG)); - FAIL_IF(push_inst(compiler, C_ULT_fmt | FMT(op) | FT(dst) | FS(src), UNMOVABLE_INS)); - FAIL_IF(push_inst(compiler, SRL | TA(ULESS_FLAG) | DA(ULESS_FLAG) | SH_IMM(23), ULESS_FLAG)); - FAIL_IF(push_inst(compiler, ANDI | SA(ULESS_FLAG) | TA(ULESS_FLAG) | IMM(1), ULESS_FLAG)); - FAIL_IF(push_inst(compiler, CFC1 | TA(UGREATER_FLAG) | DA(FCSR_REG), UGREATER_FLAG)); - FAIL_IF(push_inst(compiler, SRL | TA(UGREATER_FLAG) | DA(UGREATER_FLAG) | SH_IMM(23), UGREATER_FLAG)); - FAIL_IF(push_inst(compiler, ANDI | SA(UGREATER_FLAG) | TA(UGREATER_FLAG) | IMM(1), UGREATER_FLAG)); - } - return push_inst(compiler, C_UN_fmt | FMT(op) | FT(src) | FS(dst), FCSR_FCC); - } + if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32) + op ^= SLJIT_F32_OP; - dst_fr = (dst > SLJIT_FLOAT_REG6) ? TMP_FREG1 : (dst << 1); + dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1; - if (src > SLJIT_FLOAT_REG6) { - FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, dst_fr, src, srcw, dst, dstw)); - src = dst_fr; + if (src & SLJIT_MEM) { + FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, FR(dst_r), src, srcw, dst, dstw)); + src = dst_r; } - else - src <<= 1; switch (GET_OPCODE(op)) { - case SLJIT_MOVD: - if (src != dst_fr && dst_fr != TMP_FREG1) - FAIL_IF(push_inst(compiler, MOV_fmt | FMT(op) | FS(src) | FD(dst_fr), MOVABLE_INS)); - break; - case SLJIT_NEGD: - FAIL_IF(push_inst(compiler, NEG_fmt | FMT(op) | FS(src) | FD(dst_fr), MOVABLE_INS)); - break; - case SLJIT_ABSD: - FAIL_IF(push_inst(compiler, ABS_fmt | FMT(op) | FS(src) | FD(dst_fr), MOVABLE_INS)); - break; - } - - if (dst_fr == TMP_FREG1) { - if (GET_OPCODE(op) == SLJIT_MOVD) - dst_fr = src; - FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op), dst_fr, dst, dstw, 0, 0)); + case SLJIT_MOV_F64: + if (src != dst_r) { + if (dst_r != TMP_FREG1) + FAIL_IF(push_inst(compiler, MOV_S | FMT(op) | FS(src) | FD(dst_r), MOVABLE_INS)); + else + dst_r = src; + } + break; + case SLJIT_NEG_F64: + FAIL_IF(push_inst(compiler, NEG_S | FMT(op) | FS(src) | FD(dst_r), MOVABLE_INS)); + break; + case SLJIT_ABS_F64: + FAIL_IF(push_inst(compiler, ABS_S | FMT(op) | FS(src) | FD(dst_r), MOVABLE_INS)); + break; + case SLJIT_CONV_F64_FROM_F32: + FAIL_IF(push_inst(compiler, CVT_S_S | ((op & SLJIT_F32_OP) ? 1 : (1 << 21)) | FS(src) | FD(dst_r), MOVABLE_INS)); + op ^= SLJIT_F32_OP; + break; } + if (dst & SLJIT_MEM) + return emit_op_mem2(compiler, FLOAT_DATA(op), FR(dst_r), dst, dstw, 0, 0); return SLJIT_SUCCESS; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop2(struct sljit_compiler *compiler, sljit_si op, - sljit_si dst, sljit_sw dstw, - sljit_si src1, sljit_sw src1w, - sljit_si src2, sljit_sw src2w) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) { - sljit_si dst_fr, flags = 0; + sljit_s32 dst_r, flags = 0; CHECK_ERROR(); - check_sljit_emit_fop2(compiler, op, dst, dstw, src1, src1w, src2, src2w); + CHECK(check_sljit_emit_fop2(compiler, op, dst, dstw, src1, src1w, src2, src2w)); + ADJUST_LOCAL_OFFSET(dst, dstw); + ADJUST_LOCAL_OFFSET(src1, src1w); + ADJUST_LOCAL_OFFSET(src2, src2w); compiler->cache_arg = 0; compiler->cache_argw = 0; - dst_fr = (dst > SLJIT_FLOAT_REG6) ? TMP_FREG2 : (dst << 1); + dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG2; - if (src1 > SLJIT_FLOAT_REG6) { - if (getput_arg_fast(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w)) { + if (src1 & SLJIT_MEM) { + if (getput_arg_fast(compiler, FLOAT_DATA(op) | LOAD_DATA, FR(TMP_FREG1), src1, src1w)) { FAIL_IF(compiler->error); src1 = TMP_FREG1; } else flags |= SLOW_SRC1; } - else - src1 <<= 1; - if (src2 > SLJIT_FLOAT_REG6) { - if (getput_arg_fast(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w)) { + if (src2 & SLJIT_MEM) { + if (getput_arg_fast(compiler, FLOAT_DATA(op) | LOAD_DATA, FR(TMP_FREG2), src2, src2w)) { FAIL_IF(compiler->error); src2 = TMP_FREG2; } else flags |= SLOW_SRC2; } - else - src2 <<= 1; if ((flags & (SLOW_SRC1 | SLOW_SRC2)) == (SLOW_SRC1 | SLOW_SRC2)) { if (!can_cache(src1, src1w, src2, src2w) && can_cache(src1, src1w, dst, dstw)) { - FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, src1, src1w)); - FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, dst, dstw)); + FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, FR(TMP_FREG2), src2, src2w, src1, src1w)); + FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, FR(TMP_FREG1), src1, src1w, dst, dstw)); } else { - FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, src2, src2w)); - FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, dst, dstw)); + FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, FR(TMP_FREG1), src1, src1w, src2, src2w)); + FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, FR(TMP_FREG2), src2, src2w, dst, dstw)); } } else if (flags & SLOW_SRC1) - FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, dst, dstw)); + FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, FR(TMP_FREG1), src1, src1w, dst, dstw)); else if (flags & SLOW_SRC2) - FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, dst, dstw)); + FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, FR(TMP_FREG2), src2, src2w, dst, dstw)); if (flags & SLOW_SRC1) src1 = TMP_FREG1; @@ -1272,25 +1698,25 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop2(struct sljit_compiler *compile src2 = TMP_FREG2; switch (GET_OPCODE(op)) { - case SLJIT_ADDD: - FAIL_IF(push_inst(compiler, ADD_fmt | FMT(op) | FT(src2) | FS(src1) | FD(dst_fr), MOVABLE_INS)); + case SLJIT_ADD_F64: + FAIL_IF(push_inst(compiler, ADD_S | FMT(op) | FT(src2) | FS(src1) | FD(dst_r), MOVABLE_INS)); break; - case SLJIT_SUBD: - FAIL_IF(push_inst(compiler, SUB_fmt | FMT(op) | FT(src2) | FS(src1) | FD(dst_fr), MOVABLE_INS)); + case SLJIT_SUB_F64: + FAIL_IF(push_inst(compiler, SUB_S | FMT(op) | FT(src2) | FS(src1) | FD(dst_r), MOVABLE_INS)); break; - case SLJIT_MULD: - FAIL_IF(push_inst(compiler, MUL_fmt | FMT(op) | FT(src2) | FS(src1) | FD(dst_fr), MOVABLE_INS)); + case SLJIT_MUL_F64: + FAIL_IF(push_inst(compiler, MUL_S | FMT(op) | FT(src2) | FS(src1) | FD(dst_r), MOVABLE_INS)); break; - case SLJIT_DIVD: - FAIL_IF(push_inst(compiler, DIV_fmt | FMT(op) | FT(src2) | FS(src1) | FD(dst_fr), MOVABLE_INS)); + case SLJIT_DIV_F64: + FAIL_IF(push_inst(compiler, DIV_S | FMT(op) | FT(src2) | FS(src1) | FD(dst_r), MOVABLE_INS)); break; } - if (dst_fr == TMP_FREG2) - FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op), TMP_FREG2, dst, dstw, 0, 0)); + if (dst_r == TMP_FREG2) + FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op), FR(TMP_FREG2), dst, dstw, 0, 0)); return SLJIT_SUCCESS; } @@ -1299,35 +1725,29 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop2(struct sljit_compiler *compile /* Other instructions */ /* --------------------------------------------------------------------- */ -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw) { CHECK_ERROR(); - check_sljit_emit_fast_enter(compiler, dst, dstw); + CHECK(check_sljit_emit_fast_enter(compiler, dst, dstw)); ADJUST_LOCAL_OFFSET(dst, dstw); - /* For UNUSED dst. Uncommon, but possible. */ - if (dst == SLJIT_UNUSED) - return SLJIT_SUCCESS; - - if (dst <= TMP_REG3) + if (FAST_IS_REG(dst)) return push_inst(compiler, ADDU_W | SA(RETURN_ADDR_REG) | TA(0) | D(dst), DR(dst)); /* Memory. */ return emit_op_mem(compiler, WORD_DATA, RETURN_ADDR_REG, dst, dstw); } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_si src, sljit_sw srcw) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_s32 src, sljit_sw srcw) { CHECK_ERROR(); - check_sljit_emit_fast_return(compiler, src, srcw); + CHECK(check_sljit_emit_fast_return(compiler, src, srcw)); ADJUST_LOCAL_OFFSET(src, srcw); - if (src <= TMP_REG3) + if (FAST_IS_REG(src)) FAIL_IF(push_inst(compiler, ADDU_W | S(src) | TA(0) | DA(RETURN_ADDR_REG), RETURN_ADDR_REG)); - else if (src & SLJIT_MEM) + else FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, RETURN_ADDR_REG, src, srcw)); - else if (src & SLJIT_IMM) - FAIL_IF(load_immediate(compiler, RETURN_ADDR_REG, srcw)); FAIL_IF(push_inst(compiler, JR | SA(RETURN_ADDR_REG), UNMOVABLE_INS)); return push_inst(compiler, NOP, UNMOVABLE_INS); @@ -1342,7 +1762,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compi struct sljit_label *label; CHECK_ERROR_PTR(); - check_sljit_emit_label(compiler); + CHECK_PTR(check_sljit_emit_label(compiler)); if (compiler->last_label && compiler->last_label->size == compiler->size) return compiler->last_label; @@ -1357,7 +1777,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compi #if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) #define JUMP_LENGTH 4 #else -#error "Implementation required" +#define JUMP_LENGTH 8 #endif #define BR_Z(src) \ @@ -1370,25 +1790,39 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compi flags = IS_BIT26_COND; \ delay_check = src; +#if (defined SLJIT_MIPS_R6 && SLJIT_MIPS_R6) + +#define BR_T() \ + inst = BC1NEZ; \ + flags = IS_BIT23_COND; \ + delay_check = FCSR_FCC; +#define BR_F() \ + inst = BC1EQZ; \ + flags = IS_BIT23_COND; \ + delay_check = FCSR_FCC; + +#else /* !SLJIT_MIPS_R6 */ + #define BR_T() \ inst = BC1T | JUMP_LENGTH; \ flags = IS_BIT16_COND; \ delay_check = FCSR_FCC; - #define BR_F() \ inst = BC1F | JUMP_LENGTH; \ flags = IS_BIT16_COND; \ delay_check = FCSR_FCC; -SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_si type) +#endif /* SLJIT_MIPS_R6 */ + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_s32 type) { struct sljit_jump *jump; sljit_ins inst; - sljit_si flags = 0; - sljit_si delay_check = UNMOVABLE_INS; + sljit_s32 flags = 0; + sljit_s32 delay_check = UNMOVABLE_INS; CHECK_ERROR_PTR(); - check_sljit_emit_jump(compiler, type); + CHECK_PTR(check_sljit_emit_jump(compiler, type)); jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); PTR_FAIL_IF(!jump); @@ -1396,56 +1830,40 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile type &= 0xff; switch (type) { - case SLJIT_C_EQUAL: - case SLJIT_C_FLOAT_NOT_EQUAL: + case SLJIT_EQUAL: BR_NZ(EQUAL_FLAG); break; - case SLJIT_C_NOT_EQUAL: - case SLJIT_C_FLOAT_EQUAL: + case SLJIT_NOT_EQUAL: BR_Z(EQUAL_FLAG); break; - case SLJIT_C_LESS: - case SLJIT_C_FLOAT_LESS: - BR_Z(ULESS_FLAG); - break; - case SLJIT_C_GREATER_EQUAL: - case SLJIT_C_FLOAT_GREATER_EQUAL: - BR_NZ(ULESS_FLAG); - break; - case SLJIT_C_GREATER: - case SLJIT_C_FLOAT_GREATER: - BR_Z(UGREATER_FLAG); + case SLJIT_LESS: + case SLJIT_GREATER: + case SLJIT_SIG_LESS: + case SLJIT_SIG_GREATER: + case SLJIT_OVERFLOW: + case SLJIT_MUL_OVERFLOW: + BR_Z(OTHER_FLAG); break; - case SLJIT_C_LESS_EQUAL: - case SLJIT_C_FLOAT_LESS_EQUAL: - BR_NZ(UGREATER_FLAG); + case SLJIT_GREATER_EQUAL: + case SLJIT_LESS_EQUAL: + case SLJIT_SIG_GREATER_EQUAL: + case SLJIT_SIG_LESS_EQUAL: + case SLJIT_NOT_OVERFLOW: + case SLJIT_MUL_NOT_OVERFLOW: + BR_NZ(OTHER_FLAG); break; - case SLJIT_C_SIG_LESS: - BR_Z(LESS_FLAG); - break; - case SLJIT_C_SIG_GREATER_EQUAL: - BR_NZ(LESS_FLAG); - break; - case SLJIT_C_SIG_GREATER: - BR_Z(GREATER_FLAG); - break; - case SLJIT_C_SIG_LESS_EQUAL: - BR_NZ(GREATER_FLAG); - break; - case SLJIT_C_OVERFLOW: - case SLJIT_C_MUL_OVERFLOW: - BR_Z(OVERFLOW_FLAG); - break; - case SLJIT_C_NOT_OVERFLOW: - case SLJIT_C_MUL_NOT_OVERFLOW: - BR_NZ(OVERFLOW_FLAG); + case SLJIT_NOT_EQUAL_F64: + case SLJIT_GREATER_EQUAL_F64: + case SLJIT_GREATER_F64: + case SLJIT_ORDERED_F64: + BR_T(); break; - case SLJIT_C_FLOAT_UNORDERED: + case SLJIT_EQUAL_F64: + case SLJIT_LESS_F64: + case SLJIT_LESS_EQUAL_F64: + case SLJIT_UNORDERED_F64: BR_F(); break; - case SLJIT_C_FLOAT_ORDERED: - BR_T(); - break; default: /* Not conditional branch. */ inst = 0; @@ -1460,19 +1878,16 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile PTR_FAIL_IF(push_inst(compiler, inst, UNMOVABLE_INS)); PTR_FAIL_IF(emit_const(compiler, TMP_REG2, 0)); - if (type <= SLJIT_JUMP) { + + if (type <= SLJIT_JUMP) PTR_FAIL_IF(push_inst(compiler, JR | S(TMP_REG2), UNMOVABLE_INS)); - jump->addr = compiler->size; - PTR_FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS)); - } else { - SLJIT_ASSERT(DR(PIC_ADDR_REG) == 25 && PIC_ADDR_REG == TMP_REG2); - /* Cannot be optimized out if type is >= CALL0. */ - jump->flags |= IS_JAL | (type >= SLJIT_CALL0 ? SLJIT_REWRITABLE_JUMP : 0); + else { + jump->flags |= IS_JAL; PTR_FAIL_IF(push_inst(compiler, JALR | S(TMP_REG2) | DA(RETURN_ADDR_REG), UNMOVABLE_INS)); - jump->addr = compiler->size; - /* A NOP if type < CALL1. */ - PTR_FAIL_IF(push_inst(compiler, ADDU_W | S(SLJIT_SCRATCH_REG1) | TA(0) | DA(4), UNMOVABLE_INS)); } + + jump->addr = compiler->size; + PTR_FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS)); return jump; } @@ -1496,22 +1911,22 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile src2 = 0; \ } -SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler *compiler, sljit_si type, - sljit_si src1, sljit_sw src1w, - sljit_si src2, sljit_sw src2w) +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) { struct sljit_jump *jump; - sljit_si flags; + sljit_s32 flags; sljit_ins inst; CHECK_ERROR_PTR(); - check_sljit_emit_cmp(compiler, type, src1, src1w, src2, src2w); + CHECK_PTR(check_sljit_emit_cmp(compiler, type, src1, src1w, src2, src2w)); ADJUST_LOCAL_OFFSET(src1, src1w); ADJUST_LOCAL_OFFSET(src2, src2w); compiler->cache_arg = 0; compiler->cache_argw = 0; - flags = ((type & SLJIT_INT_OP) ? INT_DATA : WORD_DATA) | LOAD_DATA; + flags = ((type & SLJIT_I32_OP) ? INT_DATA : WORD_DATA) | LOAD_DATA; if (src1 & SLJIT_MEM) { PTR_FAIL_IF(emit_op_mem2(compiler, flags, DR(TMP_REG1), src1, src1w, src2, src2w)); src1 = TMP_REG1; @@ -1526,32 +1941,32 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP); type &= 0xff; - if (type <= SLJIT_C_NOT_EQUAL) { + if (type <= SLJIT_NOT_EQUAL) { RESOLVE_IMM1(); RESOLVE_IMM2(); jump->flags |= IS_BIT26_COND; if (compiler->delay_slot == MOVABLE_INS || (compiler->delay_slot != UNMOVABLE_INS && compiler->delay_slot != DR(src1) && compiler->delay_slot != DR(src2))) jump->flags |= IS_MOVABLE; - PTR_FAIL_IF(push_inst(compiler, (type == SLJIT_C_EQUAL ? BNE : BEQ) | S(src1) | T(src2) | JUMP_LENGTH, UNMOVABLE_INS)); + PTR_FAIL_IF(push_inst(compiler, (type == SLJIT_EQUAL ? BNE : BEQ) | S(src1) | T(src2) | JUMP_LENGTH, UNMOVABLE_INS)); } - else if (type >= SLJIT_C_SIG_LESS && (((src1 & SLJIT_IMM) && (src1w == 0)) || ((src2 & SLJIT_IMM) && (src2w == 0)))) { + else if (type >= SLJIT_SIG_LESS && (((src1 & SLJIT_IMM) && (src1w == 0)) || ((src2 & SLJIT_IMM) && (src2w == 0)))) { inst = NOP; if ((src1 & SLJIT_IMM) && (src1w == 0)) { RESOLVE_IMM2(); switch (type) { - case SLJIT_C_SIG_LESS: + case SLJIT_SIG_LESS: inst = BLEZ; jump->flags |= IS_BIT26_COND; break; - case SLJIT_C_SIG_GREATER_EQUAL: + case SLJIT_SIG_GREATER_EQUAL: inst = BGTZ; jump->flags |= IS_BIT26_COND; break; - case SLJIT_C_SIG_GREATER: + case SLJIT_SIG_GREATER: inst = BGEZ; jump->flags |= IS_BIT16_COND; break; - case SLJIT_C_SIG_LESS_EQUAL: + case SLJIT_SIG_LESS_EQUAL: inst = BLTZ; jump->flags |= IS_BIT16_COND; break; @@ -1561,19 +1976,19 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler else { RESOLVE_IMM1(); switch (type) { - case SLJIT_C_SIG_LESS: + case SLJIT_SIG_LESS: inst = BGEZ; jump->flags |= IS_BIT16_COND; break; - case SLJIT_C_SIG_GREATER_EQUAL: + case SLJIT_SIG_GREATER_EQUAL: inst = BLTZ; jump->flags |= IS_BIT16_COND; break; - case SLJIT_C_SIG_GREATER: + case SLJIT_SIG_GREATER: inst = BLEZ; jump->flags |= IS_BIT26_COND; break; - case SLJIT_C_SIG_LESS_EQUAL: + case SLJIT_SIG_LESS_EQUAL: inst = BGTZ; jump->flags |= IS_BIT26_COND; break; @@ -1582,29 +1997,29 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler PTR_FAIL_IF(push_inst(compiler, inst | S(src1) | JUMP_LENGTH, UNMOVABLE_INS)); } else { - if (type == SLJIT_C_LESS || type == SLJIT_C_GREATER_EQUAL || type == SLJIT_C_SIG_LESS || type == SLJIT_C_SIG_GREATER_EQUAL) { + if (type == SLJIT_LESS || type == SLJIT_GREATER_EQUAL || type == SLJIT_SIG_LESS || type == SLJIT_SIG_GREATER_EQUAL) { RESOLVE_IMM1(); if ((src2 & SLJIT_IMM) && src2w <= SIMM_MAX && src2w >= SIMM_MIN) - PTR_FAIL_IF(push_inst(compiler, (type <= SLJIT_C_LESS_EQUAL ? SLTIU : SLTI) | S(src1) | T(TMP_REG1) | IMM(src2w), DR(TMP_REG1))); + PTR_FAIL_IF(push_inst(compiler, (type <= SLJIT_LESS_EQUAL ? SLTIU : SLTI) | S(src1) | T(TMP_REG1) | IMM(src2w), DR(TMP_REG1))); else { RESOLVE_IMM2(); - PTR_FAIL_IF(push_inst(compiler, (type <= SLJIT_C_LESS_EQUAL ? SLTU : SLT) | S(src1) | T(src2) | D(TMP_REG1), DR(TMP_REG1))); + PTR_FAIL_IF(push_inst(compiler, (type <= SLJIT_LESS_EQUAL ? SLTU : SLT) | S(src1) | T(src2) | D(TMP_REG1), DR(TMP_REG1))); } - type = (type == SLJIT_C_LESS || type == SLJIT_C_SIG_LESS) ? SLJIT_C_NOT_EQUAL : SLJIT_C_EQUAL; + type = (type == SLJIT_LESS || type == SLJIT_SIG_LESS) ? SLJIT_NOT_EQUAL : SLJIT_EQUAL; } else { RESOLVE_IMM2(); if ((src1 & SLJIT_IMM) && src1w <= SIMM_MAX && src1w >= SIMM_MIN) - PTR_FAIL_IF(push_inst(compiler, (type <= SLJIT_C_LESS_EQUAL ? SLTIU : SLTI) | S(src2) | T(TMP_REG1) | IMM(src1w), DR(TMP_REG1))); + PTR_FAIL_IF(push_inst(compiler, (type <= SLJIT_LESS_EQUAL ? SLTIU : SLTI) | S(src2) | T(TMP_REG1) | IMM(src1w), DR(TMP_REG1))); else { RESOLVE_IMM1(); - PTR_FAIL_IF(push_inst(compiler, (type <= SLJIT_C_LESS_EQUAL ? SLTU : SLT) | S(src2) | T(src1) | D(TMP_REG1), DR(TMP_REG1))); + PTR_FAIL_IF(push_inst(compiler, (type <= SLJIT_LESS_EQUAL ? SLTU : SLT) | S(src2) | T(src1) | D(TMP_REG1), DR(TMP_REG1))); } - type = (type == SLJIT_C_GREATER || type == SLJIT_C_SIG_GREATER) ? SLJIT_C_NOT_EQUAL : SLJIT_C_EQUAL; + type = (type == SLJIT_GREATER || type == SLJIT_SIG_GREATER) ? SLJIT_NOT_EQUAL : SLJIT_EQUAL; } jump->flags |= IS_BIT26_COND; - PTR_FAIL_IF(push_inst(compiler, (type == SLJIT_C_EQUAL ? BNE : BEQ) | S(TMP_REG1) | TA(0) | JUMP_LENGTH, UNMOVABLE_INS)); + PTR_FAIL_IF(push_inst(compiler, (type == SLJIT_EQUAL ? BNE : BEQ) | S(TMP_REG1) | TA(0) | JUMP_LENGTH, UNMOVABLE_INS)); } PTR_FAIL_IF(emit_const(compiler, TMP_REG2, 0)); @@ -1617,85 +2032,6 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler #undef RESOLVE_IMM1 #undef RESOLVE_IMM2 -SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_fcmp(struct sljit_compiler *compiler, sljit_si type, - sljit_si src1, sljit_sw src1w, - sljit_si src2, sljit_sw src2w) -{ - struct sljit_jump *jump; - sljit_ins inst; - sljit_si if_true; - - CHECK_ERROR_PTR(); - check_sljit_emit_fcmp(compiler, type, src1, src1w, src2, src2w); - - compiler->cache_arg = 0; - compiler->cache_argw = 0; - - if (src1 > SLJIT_FLOAT_REG6) { - PTR_FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(type) | LOAD_DATA, TMP_FREG1, src1, src1w, src2, src2w)); - src1 = TMP_FREG1; - } - else - src1 <<= 1; - - if (src2 > SLJIT_FLOAT_REG6) { - PTR_FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(type) | LOAD_DATA, TMP_FREG2, src2, src2w, 0, 0)); - src2 = TMP_FREG2; - } - else - src2 <<= 1; - - jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); - PTR_FAIL_IF(!jump); - set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP); - jump->flags |= IS_BIT16_COND; - - switch (type & 0xff) { - case SLJIT_C_FLOAT_EQUAL: - inst = C_UEQ_fmt; - if_true = 1; - break; - case SLJIT_C_FLOAT_NOT_EQUAL: - inst = C_UEQ_fmt; - if_true = 0; - break; - case SLJIT_C_FLOAT_LESS: - inst = C_ULT_fmt; - if_true = 1; - break; - case SLJIT_C_FLOAT_GREATER_EQUAL: - inst = C_ULT_fmt; - if_true = 0; - break; - case SLJIT_C_FLOAT_GREATER: - inst = C_ULE_fmt; - if_true = 0; - break; - case SLJIT_C_FLOAT_LESS_EQUAL: - inst = C_ULE_fmt; - if_true = 1; - break; - case SLJIT_C_FLOAT_UNORDERED: - inst = C_UN_fmt; - if_true = 1; - break; - case SLJIT_C_FLOAT_ORDERED: - default: /* Make compilers happy. */ - inst = C_UN_fmt; - if_true = 0; - break; - } - - PTR_FAIL_IF(push_inst(compiler, inst | FMT(type) | FT(src2) | FS(src1), UNMOVABLE_INS)); - /* Intentionally the other opcode. */ - PTR_FAIL_IF(push_inst(compiler, (if_true ? BC1F : BC1T) | JUMP_LENGTH, UNMOVABLE_INS)); - PTR_FAIL_IF(emit_const(compiler, TMP_REG2, 0)); - PTR_FAIL_IF(push_inst(compiler, JR | S(TMP_REG2), UNMOVABLE_INS)); - jump->addr = compiler->size; - PTR_FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS)); - return jump; -} - #undef JUMP_LENGTH #undef BR_Z #undef BR_NZ @@ -1705,43 +2041,14 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_fcmp(struct sljit_compile #undef FLOAT_DATA #undef FMT -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_ijump(struct sljit_compiler *compiler, sljit_si type, sljit_si src, sljit_sw srcw) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 src, sljit_sw srcw) { - sljit_si src_r = TMP_REG2; struct sljit_jump *jump = NULL; CHECK_ERROR(); - check_sljit_emit_ijump(compiler, type, src, srcw); + CHECK(check_sljit_emit_ijump(compiler, type, src, srcw)); ADJUST_LOCAL_OFFSET(src, srcw); - if (src <= TMP_REG3) { - if (DR(src) != 4) - src_r = src; - else - FAIL_IF(push_inst(compiler, ADDU_W | S(src) | TA(0) | D(TMP_REG2), DR(TMP_REG2))); - } - - if (type >= SLJIT_CALL0) { - SLJIT_ASSERT(DR(PIC_ADDR_REG) == 25 && PIC_ADDR_REG == TMP_REG2); - if (src & (SLJIT_IMM | SLJIT_MEM)) { - if (src & SLJIT_IMM) - FAIL_IF(load_immediate(compiler, DR(PIC_ADDR_REG), srcw)); - else { - SLJIT_ASSERT(src_r == TMP_REG2 && (src & SLJIT_MEM)); - FAIL_IF(emit_op(compiler, SLJIT_MOV, WORD_DATA, TMP_REG2, 0, TMP_REG1, 0, src, srcw)); - } - FAIL_IF(push_inst(compiler, JALR | S(PIC_ADDR_REG) | DA(RETURN_ADDR_REG), UNMOVABLE_INS)); - /* We need an extra instruction in any case. */ - return push_inst(compiler, ADDU_W | S(SLJIT_SCRATCH_REG1) | TA(0) | DA(4), UNMOVABLE_INS); - } - - /* Register input. */ - if (type >= SLJIT_CALL1) - FAIL_IF(push_inst(compiler, ADDU_W | S(SLJIT_SCRATCH_REG1) | TA(0) | DA(4), 4)); - FAIL_IF(push_inst(compiler, JALR | S(src_r) | DA(RETURN_ADDR_REG), UNMOVABLE_INS)); - return push_inst(compiler, ADDU_W | S(src_r) | TA(0) | D(PIC_ADDR_REG), UNMOVABLE_INS); - } - if (src & SLJIT_IMM) { jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); FAIL_IF(!jump); @@ -1752,136 +2059,228 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_ijump(struct sljit_compiler *compil jump->flags |= IS_MOVABLE; FAIL_IF(emit_const(compiler, TMP_REG2, 0)); + src = TMP_REG2; + } + else if (src & SLJIT_MEM) { + FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, DR(TMP_REG2), src, srcw)); + src = TMP_REG2; } - else if (src & SLJIT_MEM) - FAIL_IF(emit_op(compiler, SLJIT_MOV, WORD_DATA, TMP_REG2, 0, TMP_REG1, 0, src, srcw)); - FAIL_IF(push_inst(compiler, JR | S(src_r), UNMOVABLE_INS)); + FAIL_IF(push_inst(compiler, JR | S(src), UNMOVABLE_INS)); if (jump) jump->addr = compiler->size; FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS)); return SLJIT_SUCCESS; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_si op, - sljit_si dst, sljit_sw dstw, - sljit_si src, sljit_sw srcw, - sljit_si type) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 type) { - sljit_si sugg_dst_ar, dst_ar; - sljit_si flags = GET_ALL_FLAGS(op); + sljit_s32 src_ar, dst_ar; + sljit_s32 saved_op = op; +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) + sljit_s32 mem_type = WORD_DATA; +#else + sljit_s32 mem_type = (op & SLJIT_I32_OP) ? (INT_DATA | SIGNED_DATA) : WORD_DATA; +#endif CHECK_ERROR(); - check_sljit_emit_op_flags(compiler, op, dst, dstw, src, srcw, type); + CHECK(check_sljit_emit_op_flags(compiler, op, dst, dstw, type)); ADJUST_LOCAL_OFFSET(dst, dstw); - if (dst == SLJIT_UNUSED) - return SLJIT_SUCCESS; - op = GET_OPCODE(op); - sugg_dst_ar = DR((op < SLJIT_ADD && dst <= TMP_REG3) ? dst : TMP_REG2); +#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) + if (op == SLJIT_MOV_S32) + mem_type = INT_DATA | SIGNED_DATA; +#endif + dst_ar = DR((op < SLJIT_ADD && FAST_IS_REG(dst)) ? dst : TMP_REG2); compiler->cache_arg = 0; compiler->cache_argw = 0; - if (op >= SLJIT_ADD && (src & SLJIT_MEM)) { - ADJUST_LOCAL_OFFSET(src, srcw); - FAIL_IF(emit_op_mem2(compiler, WORD_DATA | LOAD_DATA, DR(TMP_REG1), src, srcw, dst, dstw)); - src = TMP_REG1; - srcw = 0; - } - switch (type) { - case SLJIT_C_EQUAL: - case SLJIT_C_NOT_EQUAL: - FAIL_IF(push_inst(compiler, SLTIU | SA(EQUAL_FLAG) | TA(sugg_dst_ar) | IMM(1), sugg_dst_ar)); - dst_ar = sugg_dst_ar; - break; - case SLJIT_C_LESS: - case SLJIT_C_GREATER_EQUAL: - case SLJIT_C_FLOAT_LESS: - case SLJIT_C_FLOAT_GREATER_EQUAL: - dst_ar = ULESS_FLAG; - break; - case SLJIT_C_GREATER: - case SLJIT_C_LESS_EQUAL: - case SLJIT_C_FLOAT_GREATER: - case SLJIT_C_FLOAT_LESS_EQUAL: - dst_ar = UGREATER_FLAG; - break; - case SLJIT_C_SIG_LESS: - case SLJIT_C_SIG_GREATER_EQUAL: - dst_ar = LESS_FLAG; - break; - case SLJIT_C_SIG_GREATER: - case SLJIT_C_SIG_LESS_EQUAL: - dst_ar = GREATER_FLAG; - break; - case SLJIT_C_OVERFLOW: - case SLJIT_C_NOT_OVERFLOW: - dst_ar = OVERFLOW_FLAG; + if (op >= SLJIT_ADD && (dst & SLJIT_MEM)) + FAIL_IF(emit_op_mem2(compiler, mem_type | LOAD_DATA, DR(TMP_REG1), dst, dstw, dst, dstw)); + + switch (type & 0xff) { + case SLJIT_EQUAL: + case SLJIT_NOT_EQUAL: + FAIL_IF(push_inst(compiler, SLTIU | SA(EQUAL_FLAG) | TA(dst_ar) | IMM(1), dst_ar)); + src_ar = dst_ar; break; - case SLJIT_C_MUL_OVERFLOW: - case SLJIT_C_MUL_NOT_OVERFLOW: - FAIL_IF(push_inst(compiler, SLTIU | SA(OVERFLOW_FLAG) | TA(sugg_dst_ar) | IMM(1), sugg_dst_ar)); - dst_ar = sugg_dst_ar; + case SLJIT_MUL_OVERFLOW: + case SLJIT_MUL_NOT_OVERFLOW: + FAIL_IF(push_inst(compiler, SLTIU | SA(OTHER_FLAG) | TA(dst_ar) | IMM(1), dst_ar)); + src_ar = dst_ar; type ^= 0x1; /* Flip type bit for the XORI below. */ break; - case SLJIT_C_FLOAT_EQUAL: - case SLJIT_C_FLOAT_NOT_EQUAL: - dst_ar = EQUAL_FLAG; - break; - - case SLJIT_C_FLOAT_UNORDERED: - case SLJIT_C_FLOAT_ORDERED: - FAIL_IF(push_inst(compiler, CFC1 | TA(sugg_dst_ar) | DA(FCSR_REG), sugg_dst_ar)); - FAIL_IF(push_inst(compiler, SRL | TA(sugg_dst_ar) | DA(sugg_dst_ar) | SH_IMM(23), sugg_dst_ar)); - FAIL_IF(push_inst(compiler, ANDI | SA(sugg_dst_ar) | TA(sugg_dst_ar) | IMM(1), sugg_dst_ar)); - dst_ar = sugg_dst_ar; + case SLJIT_GREATER_F64: + case SLJIT_LESS_EQUAL_F64: + type ^= 0x1; /* Flip type bit for the XORI below. */ + case SLJIT_EQUAL_F64: + case SLJIT_NOT_EQUAL_F64: + case SLJIT_LESS_F64: + case SLJIT_GREATER_EQUAL_F64: + case SLJIT_UNORDERED_F64: + case SLJIT_ORDERED_F64: +#if (defined SLJIT_MIPS_R6 && SLJIT_MIPS_R6) + FAIL_IF(push_inst(compiler, MFC1 | TA(dst_ar) | FS(TMP_FREG3), dst_ar)); +#else /* !SLJIT_MIPS_R6 */ + FAIL_IF(push_inst(compiler, CFC1 | TA(dst_ar) | DA(FCSR_REG), dst_ar)); +#endif /* SLJIT_MIPS_R6 */ + FAIL_IF(push_inst(compiler, SRL | TA(dst_ar) | DA(dst_ar) | SH_IMM(23), dst_ar)); + FAIL_IF(push_inst(compiler, ANDI | SA(dst_ar) | TA(dst_ar) | IMM(1), dst_ar)); + src_ar = dst_ar; break; default: - SLJIT_ASSERT_STOP(); - dst_ar = sugg_dst_ar; + src_ar = OTHER_FLAG; break; } if (type & 0x1) { - FAIL_IF(push_inst(compiler, XORI | SA(dst_ar) | TA(sugg_dst_ar) | IMM(1), sugg_dst_ar)); - dst_ar = sugg_dst_ar; + FAIL_IF(push_inst(compiler, XORI | SA(src_ar) | TA(dst_ar) | IMM(1), dst_ar)); + src_ar = dst_ar; } - if (op >= SLJIT_ADD) { - if (DR(TMP_REG2) != dst_ar) - FAIL_IF(push_inst(compiler, ADDU_W | SA(dst_ar) | TA(0) | D(TMP_REG2), DR(TMP_REG2))); - return emit_op(compiler, op | flags, CUMULATIVE_OP | LOGICAL_OP | IMM_OP | ALT_KEEP_CACHE, dst, dstw, src, srcw, TMP_REG2, 0); + if (op < SLJIT_ADD) { + if (dst & SLJIT_MEM) + return emit_op_mem(compiler, mem_type, src_ar, dst, dstw); + + if (src_ar != dst_ar) + return push_inst(compiler, ADDU_W | SA(src_ar) | TA(0) | DA(dst_ar), dst_ar); + return SLJIT_SUCCESS; } + /* OTHER_FLAG cannot be specified as src2 argument at the moment. */ + if (DR(TMP_REG2) != src_ar) + FAIL_IF(push_inst(compiler, ADDU_W | SA(src_ar) | TA(0) | D(TMP_REG2), DR(TMP_REG2))); + + mem_type |= CUMULATIVE_OP | LOGICAL_OP | IMM_OP | ALT_KEEP_CACHE; + if (dst & SLJIT_MEM) - return emit_op_mem(compiler, WORD_DATA, dst_ar, dst, dstw); + return emit_op(compiler, saved_op, mem_type, dst, dstw, TMP_REG1, 0, TMP_REG2, 0); + return emit_op(compiler, saved_op, mem_type, dst, dstw, dst, dstw, TMP_REG2, 0); +} - if (sugg_dst_ar != dst_ar) - return push_inst(compiler, ADDU_W | SA(dst_ar) | TA(0) | DA(sugg_dst_ar), sugg_dst_ar); - return SLJIT_SUCCESS; +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 dst_reg, + sljit_s32 src, sljit_sw srcw) +{ +#if (defined SLJIT_MIPS_R1 && SLJIT_MIPS_R1) + sljit_ins ins; +#endif + + CHECK_ERROR(); + CHECK(check_sljit_emit_cmov(compiler, type, dst_reg, src, srcw)); + +#if (defined SLJIT_MIPS_R1 && SLJIT_MIPS_R1) + + if (SLJIT_UNLIKELY(src & SLJIT_IMM)) { +#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) + if (dst_reg & SLJIT_I32_OP) + srcw = (sljit_s32)srcw; +#endif + FAIL_IF(load_immediate(compiler, DR(TMP_REG1), srcw)); + src = TMP_REG1; + srcw = 0; + } + + dst_reg &= ~SLJIT_I32_OP; + + switch (type & 0xff) { + case SLJIT_EQUAL: + ins = MOVZ | TA(EQUAL_FLAG); + break; + case SLJIT_NOT_EQUAL: + ins = MOVN | TA(EQUAL_FLAG); + break; + case SLJIT_LESS: + case SLJIT_GREATER: + case SLJIT_SIG_LESS: + case SLJIT_SIG_GREATER: + case SLJIT_OVERFLOW: + case SLJIT_MUL_OVERFLOW: + ins = MOVN | TA(OTHER_FLAG); + break; + case SLJIT_GREATER_EQUAL: + case SLJIT_LESS_EQUAL: + case SLJIT_SIG_GREATER_EQUAL: + case SLJIT_SIG_LESS_EQUAL: + case SLJIT_NOT_OVERFLOW: + case SLJIT_MUL_NOT_OVERFLOW: + ins = MOVZ | TA(OTHER_FLAG); + break; + case SLJIT_EQUAL_F64: + case SLJIT_LESS_F64: + case SLJIT_LESS_EQUAL_F64: + case SLJIT_UNORDERED_F64: + ins = MOVT; + break; + case SLJIT_NOT_EQUAL_F64: + case SLJIT_GREATER_EQUAL_F64: + case SLJIT_GREATER_F64: + case SLJIT_ORDERED_F64: + ins = MOVF; + break; + default: + ins = MOVZ | TA(OTHER_FLAG); + SLJIT_UNREACHABLE(); + break; + } + + return push_inst(compiler, ins | S(src) | D(dst_reg), DR(dst_reg)); + +#else + return sljit_emit_cmov_generic(compiler, type, dst_reg, src, srcw); +#endif } -SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw, sljit_sw init_value) +SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw init_value) { struct sljit_const *const_; - sljit_si reg; + sljit_s32 dst_r; CHECK_ERROR_PTR(); - check_sljit_emit_const(compiler, dst, dstw, init_value); + CHECK_PTR(check_sljit_emit_const(compiler, dst, dstw, init_value)); ADJUST_LOCAL_OFFSET(dst, dstw); const_ = (struct sljit_const*)ensure_abuf(compiler, sizeof(struct sljit_const)); PTR_FAIL_IF(!const_); set_const(const_, compiler); - reg = (dst <= TMP_REG3) ? dst : TMP_REG2; - - PTR_FAIL_IF(emit_const(compiler, reg, init_value)); + dst_r = FAST_IS_REG(dst) ? dst : TMP_REG2; + PTR_FAIL_IF(emit_const(compiler, dst_r, init_value)); if (dst & SLJIT_MEM) PTR_FAIL_IF(emit_op(compiler, SLJIT_MOV, WORD_DATA, dst, dstw, TMP_REG1, 0, TMP_REG2, 0)); + return const_; } + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_put_label* sljit_emit_put_label(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw) +{ + struct sljit_put_label *put_label; + sljit_s32 dst_r; + + CHECK_ERROR_PTR(); + CHECK_PTR(check_sljit_emit_put_label(compiler, dst, dstw)); + ADJUST_LOCAL_OFFSET(dst, dstw); + + put_label = (struct sljit_put_label*)ensure_abuf(compiler, sizeof(struct sljit_put_label)); + PTR_FAIL_IF(!put_label); + set_put_label(put_label, compiler, 0); + + dst_r = FAST_IS_REG(dst) ? dst : TMP_REG2; +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) + PTR_FAIL_IF(emit_const(compiler, dst_r, 0)); +#else + PTR_FAIL_IF(push_inst(compiler, dst_r, UNMOVABLE_INS)); + compiler->size += 5; +#endif + + if (dst & SLJIT_MEM) + PTR_FAIL_IF(emit_op(compiler, SLJIT_MOV, WORD_DATA, dst, dstw, TMP_REG1, 0, TMP_REG2, 0)); + + return put_label; +} diff --git a/src/3rd/pcre/sjppc32.c b/src/3rd/pcre/sjppc32.c index 0bd35a6e0ee..3ce741153f8 100644 --- a/src/3rd/pcre/sjppc32.c +++ b/src/3rd/pcre/sjppc32.c @@ -1,7 +1,7 @@ /* * Stack-less Just-In-Time compiler * - * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: @@ -26,13 +26,13 @@ /* ppc 32-bit arch dependent functions. */ -static sljit_si load_immediate(struct sljit_compiler *compiler, sljit_si reg, sljit_sw imm) +static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 reg, sljit_sw imm) { if (imm <= SIMM_MAX && imm >= SIMM_MIN) return push_inst(compiler, ADDI | D(reg) | A(0) | IMM(imm)); if (!(imm & ~0xffff)) - return push_inst(compiler, ORI | S(ZERO_REG) | A(reg) | IMM(imm)); + return push_inst(compiler, ORI | S(TMP_ZERO) | A(reg) | IMM(imm)); FAIL_IF(push_inst(compiler, ADDIS | D(reg) | A(0) | IMM(imm >> 16))); return (imm & 0xffff) ? push_inst(compiler, ORI | S(reg) | A(reg) | IMM(imm)) : SLJIT_SUCCESS; @@ -41,39 +41,39 @@ static sljit_si load_immediate(struct sljit_compiler *compiler, sljit_si reg, sl #define INS_CLEAR_LEFT(dst, src, from) \ (RLWINM | S(src) | A(dst) | ((from) << 6) | (31 << 1)) -static SLJIT_INLINE sljit_si emit_single_op(struct sljit_compiler *compiler, sljit_si op, sljit_si flags, - sljit_si dst, sljit_si src1, sljit_si src2) +static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 flags, + sljit_s32 dst, sljit_s32 src1, sljit_s32 src2) { switch (op) { case SLJIT_MOV: - case SLJIT_MOV_UI: - case SLJIT_MOV_SI: + case SLJIT_MOV_U32: + case SLJIT_MOV_S32: case SLJIT_MOV_P: SLJIT_ASSERT(src1 == TMP_REG1); if (dst != src2) return push_inst(compiler, OR | S(src2) | A(dst) | B(src2)); return SLJIT_SUCCESS; - case SLJIT_MOV_UB: - case SLJIT_MOV_SB: + case SLJIT_MOV_U8: + case SLJIT_MOV_S8: SLJIT_ASSERT(src1 == TMP_REG1); if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { - if (op == SLJIT_MOV_SB) + if (op == SLJIT_MOV_S8) return push_inst(compiler, EXTSB | S(src2) | A(dst)); return push_inst(compiler, INS_CLEAR_LEFT(dst, src2, 24)); } - else if ((flags & REG_DEST) && op == SLJIT_MOV_SB) + else if ((flags & REG_DEST) && op == SLJIT_MOV_S8) return push_inst(compiler, EXTSB | S(src2) | A(dst)); else { SLJIT_ASSERT(dst == src2); } return SLJIT_SUCCESS; - case SLJIT_MOV_UH: - case SLJIT_MOV_SH: + case SLJIT_MOV_U16: + case SLJIT_MOV_S16: SLJIT_ASSERT(src1 == TMP_REG1); if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { - if (op == SLJIT_MOV_SH) + if (op == SLJIT_MOV_S16) return push_inst(compiler, EXTSH | S(src2) | A(dst)); return push_inst(compiler, INS_CLEAR_LEFT(dst, src2, 16)); } @@ -88,77 +88,86 @@ static SLJIT_INLINE sljit_si emit_single_op(struct sljit_compiler *compiler, slj case SLJIT_NEG: SLJIT_ASSERT(src1 == TMP_REG1); - return push_inst(compiler, NEG | OERC(flags) | D(dst) | A(src2)); + /* Setting XER SO is not enough, CR SO is also needed. */ + return push_inst(compiler, NEG | OE((flags & ALT_FORM1) ? ALT_SET_FLAGS : 0) | RC(flags) | D(dst) | A(src2)); case SLJIT_CLZ: SLJIT_ASSERT(src1 == TMP_REG1); - return push_inst(compiler, CNTLZW | RC(flags) | S(src2) | A(dst)); + return push_inst(compiler, CNTLZW | S(src2) | A(dst)); case SLJIT_ADD: if (flags & ALT_FORM1) { - /* Flags does not set: BIN_IMM_EXTS unnecessary. */ - SLJIT_ASSERT(src2 == TMP_REG2); - return push_inst(compiler, ADDI | D(dst) | A(src1) | compiler->imm); + /* Setting XER SO is not enough, CR SO is also needed. */ + return push_inst(compiler, ADD | OE(ALT_SET_FLAGS) | RC(ALT_SET_FLAGS) | D(dst) | A(src1) | B(src2)); } + if (flags & ALT_FORM2) { /* Flags does not set: BIN_IMM_EXTS unnecessary. */ SLJIT_ASSERT(src2 == TMP_REG2); - return push_inst(compiler, ADDIS | D(dst) | A(src1) | compiler->imm); + + if (flags & ALT_FORM3) + return push_inst(compiler, ADDIS | D(dst) | A(src1) | compiler->imm); + + if (flags & ALT_FORM4) { + FAIL_IF(push_inst(compiler, ADDIS | D(dst) | A(src1) | (((compiler->imm >> 16) & 0xffff) + ((compiler->imm >> 15) & 0x1)))); + src1 = dst; + } + + return push_inst(compiler, ADDI | D(dst) | A(src1) | (compiler->imm & 0xffff)); } if (flags & ALT_FORM3) { SLJIT_ASSERT(src2 == TMP_REG2); return push_inst(compiler, ADDIC | D(dst) | A(src1) | compiler->imm); } - if (flags & ALT_FORM4) { - /* Flags does not set: BIN_IMM_EXTS unnecessary. */ - FAIL_IF(push_inst(compiler, ADDI | D(dst) | A(src1) | (compiler->imm & 0xffff))); - return push_inst(compiler, ADDIS | D(dst) | A(dst) | (((compiler->imm >> 16) & 0xffff) + ((compiler->imm >> 15) & 0x1))); - } if (!(flags & ALT_SET_FLAGS)) return push_inst(compiler, ADD | D(dst) | A(src1) | B(src2)); - return push_inst(compiler, ADDC | OERC(ALT_SET_FLAGS) | D(dst) | A(src1) | B(src2)); + if (flags & ALT_FORM4) + return push_inst(compiler, ADDC | RC(ALT_SET_FLAGS) | D(dst) | A(src1) | B(src2)); + return push_inst(compiler, ADD | RC(flags) | D(dst) | A(src1) | B(src2)); case SLJIT_ADDC: - if (flags & ALT_FORM1) { - FAIL_IF(push_inst(compiler, MFXER | D(0))); - FAIL_IF(push_inst(compiler, ADDE | D(dst) | A(src1) | B(src2))); - return push_inst(compiler, MTXER | S(0)); - } return push_inst(compiler, ADDE | D(dst) | A(src1) | B(src2)); case SLJIT_SUB: if (flags & ALT_FORM1) { + if (flags & ALT_FORM2) { + FAIL_IF(push_inst(compiler, CMPLI | CRD(0) | A(src1) | compiler->imm)); + if (!(flags & ALT_FORM3)) + return SLJIT_SUCCESS; + return push_inst(compiler, ADDI | D(dst) | A(src1) | (-compiler->imm & 0xffff)); + } + FAIL_IF(push_inst(compiler, CMPL | CRD(0) | A(src1) | B(src2))); + if (!(flags & ALT_FORM3)) + return SLJIT_SUCCESS; + return push_inst(compiler, SUBF | D(dst) | A(src2) | B(src1)); + } + + if (flags & ALT_FORM2) { + /* Setting XER SO is not enough, CR SO is also needed. */ + return push_inst(compiler, SUBF | OE(ALT_SET_FLAGS) | RC(ALT_SET_FLAGS) | D(dst) | A(src2) | B(src1)); + } + + if (flags & ALT_FORM3) { /* Flags does not set: BIN_IMM_EXTS unnecessary. */ SLJIT_ASSERT(src2 == TMP_REG2); return push_inst(compiler, SUBFIC | D(dst) | A(src1) | compiler->imm); } - if (flags & (ALT_FORM2 | ALT_FORM3)) { - SLJIT_ASSERT(src2 == TMP_REG2); - if (flags & ALT_FORM2) - FAIL_IF(push_inst(compiler, CMPI | CRD(0) | A(src1) | compiler->imm)); - if (flags & ALT_FORM3) - return push_inst(compiler, CMPLI | CRD(4) | A(src1) | compiler->imm); - return SLJIT_SUCCESS; - } - if (flags & (ALT_FORM4 | ALT_FORM5)) { - if (flags & ALT_FORM4) - FAIL_IF(push_inst(compiler, CMPL | CRD(4) | A(src1) | B(src2))); - if (flags & ALT_FORM5) - FAIL_IF(push_inst(compiler, CMP | CRD(0) | A(src1) | B(src2))); - return SLJIT_SUCCESS; + + if (flags & ALT_FORM4) { + if (flags & ALT_FORM5) { + SLJIT_ASSERT(src2 == TMP_REG2); + return push_inst(compiler, CMPI | CRD(0) | A(src1) | compiler->imm); + } + return push_inst(compiler, CMP | CRD(0) | A(src1) | B(src2)); } + if (!(flags & ALT_SET_FLAGS)) return push_inst(compiler, SUBF | D(dst) | A(src2) | B(src1)); - if (flags & ALT_FORM6) - FAIL_IF(push_inst(compiler, CMPL | CRD(4) | A(src1) | B(src2))); - return push_inst(compiler, SUBFC | OERC(ALT_SET_FLAGS) | D(dst) | A(src2) | B(src1)); + if (flags & ALT_FORM5) + return push_inst(compiler, SUBFC | RC(ALT_SET_FLAGS) | D(dst) | A(src2) | B(src1)); + return push_inst(compiler, SUBF | RC(flags) | D(dst) | A(src2) | B(src1)); case SLJIT_SUBC: - if (flags & ALT_FORM1) { - FAIL_IF(push_inst(compiler, MFXER | D(0))); - FAIL_IF(push_inst(compiler, SUBFE | D(dst) | A(src2) | B(src1))); - return push_inst(compiler, MTXER | S(0)); - } return push_inst(compiler, SUBFE | D(dst) | A(src2) | B(src1)); case SLJIT_MUL: @@ -166,7 +175,7 @@ static SLJIT_INLINE sljit_si emit_single_op(struct sljit_compiler *compiler, slj SLJIT_ASSERT(src2 == TMP_REG2); return push_inst(compiler, MULLI | D(dst) | A(src1) | compiler->imm); } - return push_inst(compiler, MULLW | OERC(flags) | D(dst) | A(src2) | B(src1)); + return push_inst(compiler, MULLW | OE(flags) | RC(flags) | D(dst) | A(src2) | B(src1)); case SLJIT_AND: if (flags & ALT_FORM1) { @@ -228,42 +237,42 @@ static SLJIT_INLINE sljit_si emit_single_op(struct sljit_compiler *compiler, slj return push_inst(compiler, SRW | RC(flags) | S(src1) | A(dst) | B(src2)); case SLJIT_ASHR: - if (flags & ALT_FORM3) - FAIL_IF(push_inst(compiler, MFXER | D(0))); if (flags & ALT_FORM1) { SLJIT_ASSERT(src2 == TMP_REG2); compiler->imm &= 0x1f; - FAIL_IF(push_inst(compiler, SRAWI | RC(flags) | S(src1) | A(dst) | (compiler->imm << 11))); + return push_inst(compiler, SRAWI | RC(flags) | S(src1) | A(dst) | (compiler->imm << 11)); } - else - FAIL_IF(push_inst(compiler, SRAW | RC(flags) | S(src1) | A(dst) | B(src2))); - return (flags & ALT_FORM3) ? push_inst(compiler, MTXER | S(0)) : SLJIT_SUCCESS; + return push_inst(compiler, SRAW | RC(flags) | S(src1) | A(dst) | B(src2)); } - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); return SLJIT_SUCCESS; } -static SLJIT_INLINE sljit_si emit_const(struct sljit_compiler *compiler, sljit_si reg, sljit_sw init_value) +static SLJIT_INLINE sljit_s32 emit_const(struct sljit_compiler *compiler, sljit_s32 reg, sljit_sw init_value) { FAIL_IF(push_inst(compiler, ADDIS | D(reg) | A(0) | IMM(init_value >> 16))); return push_inst(compiler, ORI | S(reg) | A(reg) | IMM(init_value)); } -SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_addr) +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_target, sljit_sw executable_offset) { - sljit_ins *inst = (sljit_ins*)addr; + sljit_ins *inst = (sljit_ins *)addr; - inst[0] = (inst[0] & 0xffff0000) | ((new_addr >> 16) & 0xffff); - inst[1] = (inst[1] & 0xffff0000) | (new_addr & 0xffff); + SLJIT_ASSERT((inst[0] & 0xfc1f0000) == ADDIS && (inst[1] & 0xfc000000) == ORI); + inst[0] = (inst[0] & 0xffff0000) | ((new_target >> 16) & 0xffff); + inst[1] = (inst[1] & 0xffff0000) | (new_target & 0xffff); + inst = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset); SLJIT_CACHE_FLUSH(inst, inst + 2); } -SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant) +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset) { - sljit_ins *inst = (sljit_ins*)addr; + sljit_ins *inst = (sljit_ins *)addr; + SLJIT_ASSERT((inst[0] & 0xfc1f0000) == ADDIS && (inst[1] & 0xfc000000) == ORI); inst[0] = (inst[0] & 0xffff0000) | ((new_constant >> 16) & 0xffff); inst[1] = (inst[1] & 0xffff0000) | (new_constant & 0xffff); + inst = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset); SLJIT_CACHE_FLUSH(inst, inst + 2); } diff --git a/src/3rd/pcre/sjppc64.c b/src/3rd/pcre/sjppc64.c index 8eaeb41f4e2..3b73021cc8c 100644 --- a/src/3rd/pcre/sjppc64.c +++ b/src/3rd/pcre/sjppc64.c @@ -1,7 +1,7 @@ /* * Stack-less Just-In-Time compiler * - * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: @@ -35,13 +35,10 @@ #error "Must implement count leading zeroes" #endif -#define RLDI(dst, src, sh, mb, type) \ - (HI(30) | S(src) | A(dst) | ((type) << 2) | (((sh) & 0x1f) << 11) | (((sh) & 0x20) >> 4) | (((mb) & 0x1f) << 6) | ((mb) & 0x20)) - #define PUSH_RLDICR(reg, shift) \ push_inst(compiler, RLDI(reg, reg, 63 - shift, shift, 1)) -static sljit_si load_immediate(struct sljit_compiler *compiler, sljit_si reg, sljit_sw imm) +static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 reg, sljit_sw imm) { sljit_uw tmp; sljit_uw shift; @@ -52,9 +49,9 @@ static sljit_si load_immediate(struct sljit_compiler *compiler, sljit_si reg, sl return push_inst(compiler, ADDI | D(reg) | A(0) | IMM(imm)); if (!(imm & ~0xffff)) - return push_inst(compiler, ORI | S(ZERO_REG) | A(reg) | IMM(imm)); + return push_inst(compiler, ORI | S(TMP_ZERO) | A(reg) | IMM(imm)); - if (imm <= SLJIT_W(0x7fffffff) && imm >= SLJIT_W(-0x80000000)) { + if (imm <= 0x7fffffffl && imm >= -0x80000000l) { FAIL_IF(push_inst(compiler, ADDIS | D(reg) | A(0) | IMM(imm >> 16))); return (imm & 0xffff) ? push_inst(compiler, ORI | S(reg) | A(reg) | IMM(imm)) : SLJIT_SUCCESS; } @@ -145,8 +142,8 @@ static sljit_si load_immediate(struct sljit_compiler *compiler, sljit_si reg, sl src1 = TMP_REG1; \ } -static SLJIT_INLINE sljit_si emit_single_op(struct sljit_compiler *compiler, sljit_si op, sljit_si flags, - sljit_si dst, sljit_si src1, sljit_si src2) +static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 flags, + sljit_s32 dst, sljit_s32 src1, sljit_s32 src2) { switch (op) { case SLJIT_MOV: @@ -156,11 +153,11 @@ static SLJIT_INLINE sljit_si emit_single_op(struct sljit_compiler *compiler, slj return push_inst(compiler, OR | S(src2) | A(dst) | B(src2)); return SLJIT_SUCCESS; - case SLJIT_MOV_UI: - case SLJIT_MOV_SI: + case SLJIT_MOV_U32: + case SLJIT_MOV_S32: SLJIT_ASSERT(src1 == TMP_REG1); if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { - if (op == SLJIT_MOV_SI) + if (op == SLJIT_MOV_S32) return push_inst(compiler, EXTSW | S(src2) | A(dst)); return push_inst(compiler, INS_CLEAR_LEFT(dst, src2, 0)); } @@ -169,26 +166,26 @@ static SLJIT_INLINE sljit_si emit_single_op(struct sljit_compiler *compiler, slj } return SLJIT_SUCCESS; - case SLJIT_MOV_UB: - case SLJIT_MOV_SB: + case SLJIT_MOV_U8: + case SLJIT_MOV_S8: SLJIT_ASSERT(src1 == TMP_REG1); if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { - if (op == SLJIT_MOV_SB) + if (op == SLJIT_MOV_S8) return push_inst(compiler, EXTSB | S(src2) | A(dst)); return push_inst(compiler, INS_CLEAR_LEFT(dst, src2, 24)); } - else if ((flags & REG_DEST) && op == SLJIT_MOV_SB) + else if ((flags & REG_DEST) && op == SLJIT_MOV_S8) return push_inst(compiler, EXTSB | S(src2) | A(dst)); else { SLJIT_ASSERT(dst == src2); } return SLJIT_SUCCESS; - case SLJIT_MOV_UH: - case SLJIT_MOV_SH: + case SLJIT_MOV_U16: + case SLJIT_MOV_S16: SLJIT_ASSERT(src1 == TMP_REG1); if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { - if (op == SLJIT_MOV_SH) + if (op == SLJIT_MOV_S16) return push_inst(compiler, EXTSH | S(src2) | A(dst)); return push_inst(compiler, INS_CLEAR_LEFT(dst, src2, 16)); } @@ -204,84 +201,118 @@ static SLJIT_INLINE sljit_si emit_single_op(struct sljit_compiler *compiler, slj case SLJIT_NEG: SLJIT_ASSERT(src1 == TMP_REG1); + + if ((flags & (ALT_FORM1 | ALT_SIGN_EXT)) == (ALT_FORM1 | ALT_SIGN_EXT)) { + FAIL_IF(push_inst(compiler, RLDI(TMP_REG2, src2, 32, 31, 1))); + FAIL_IF(push_inst(compiler, NEG | OE(ALT_SET_FLAGS) | RC(ALT_SET_FLAGS) | D(dst) | A(TMP_REG2))); + return push_inst(compiler, RLDI(dst, dst, 32, 32, 0)); + } + UN_EXTS(); - return push_inst(compiler, NEG | OERC(flags) | D(dst) | A(src2)); + /* Setting XER SO is not enough, CR SO is also needed. */ + return push_inst(compiler, NEG | OE((flags & ALT_FORM1) ? ALT_SET_FLAGS : 0) | RC(flags) | D(dst) | A(src2)); case SLJIT_CLZ: SLJIT_ASSERT(src1 == TMP_REG1); if (flags & ALT_FORM1) - return push_inst(compiler, CNTLZW | RC(flags) | S(src2) | A(dst)); - return push_inst(compiler, CNTLZD | RC(flags) | S(src2) | A(dst)); + return push_inst(compiler, CNTLZW | S(src2) | A(dst)); + return push_inst(compiler, CNTLZD | S(src2) | A(dst)); case SLJIT_ADD: if (flags & ALT_FORM1) { - /* Flags does not set: BIN_IMM_EXTS unnecessary. */ - SLJIT_ASSERT(src2 == TMP_REG2); - return push_inst(compiler, ADDI | D(dst) | A(src1) | compiler->imm); + if (flags & ALT_SIGN_EXT) { + FAIL_IF(push_inst(compiler, RLDI(TMP_REG1, src1, 32, 31, 1))); + src1 = TMP_REG1; + FAIL_IF(push_inst(compiler, RLDI(TMP_REG2, src2, 32, 31, 1))); + src2 = TMP_REG2; + } + /* Setting XER SO is not enough, CR SO is also needed. */ + FAIL_IF(push_inst(compiler, ADD | OE(ALT_SET_FLAGS) | RC(ALT_SET_FLAGS) | D(dst) | A(src1) | B(src2))); + if (flags & ALT_SIGN_EXT) + return push_inst(compiler, RLDI(dst, dst, 32, 32, 0)); + return SLJIT_SUCCESS; } + if (flags & ALT_FORM2) { /* Flags does not set: BIN_IMM_EXTS unnecessary. */ SLJIT_ASSERT(src2 == TMP_REG2); - return push_inst(compiler, ADDIS | D(dst) | A(src1) | compiler->imm); + + if (flags & ALT_FORM3) + return push_inst(compiler, ADDIS | D(dst) | A(src1) | compiler->imm); + + if (flags & ALT_FORM4) { + FAIL_IF(push_inst(compiler, ADDIS | D(dst) | A(src1) | (((compiler->imm >> 16) & 0xffff) + ((compiler->imm >> 15) & 0x1)))); + src1 = dst; + } + + return push_inst(compiler, ADDI | D(dst) | A(src1) | (compiler->imm & 0xffff)); } if (flags & ALT_FORM3) { SLJIT_ASSERT(src2 == TMP_REG2); BIN_IMM_EXTS(); return push_inst(compiler, ADDIC | D(dst) | A(src1) | compiler->imm); } - if (flags & ALT_FORM4) { - /* Flags does not set: BIN_IMM_EXTS unnecessary. */ - FAIL_IF(push_inst(compiler, ADDI | D(dst) | A(src1) | (compiler->imm & 0xffff))); - return push_inst(compiler, ADDIS | D(dst) | A(dst) | (((compiler->imm >> 16) & 0xffff) + ((compiler->imm >> 15) & 0x1))); - } if (!(flags & ALT_SET_FLAGS)) return push_inst(compiler, ADD | D(dst) | A(src1) | B(src2)); BIN_EXTS(); - return push_inst(compiler, ADDC | OERC(ALT_SET_FLAGS) | D(dst) | A(src1) | B(src2)); + if (flags & ALT_FORM4) + return push_inst(compiler, ADDC | RC(ALT_SET_FLAGS) | D(dst) | A(src1) | B(src2)); + return push_inst(compiler, ADD | RC(flags) | D(dst) | A(src1) | B(src2)); case SLJIT_ADDC: - if (flags & ALT_FORM1) { - FAIL_IF(push_inst(compiler, MFXER | D(0))); - FAIL_IF(push_inst(compiler, ADDE | D(dst) | A(src1) | B(src2))); - return push_inst(compiler, MTXER | S(0)); - } BIN_EXTS(); return push_inst(compiler, ADDE | D(dst) | A(src1) | B(src2)); case SLJIT_SUB: if (flags & ALT_FORM1) { + if (flags & ALT_FORM2) { + FAIL_IF(push_inst(compiler, CMPLI | CRD(0 | ((flags & ALT_SIGN_EXT) ? 0 : 1)) | A(src1) | compiler->imm)); + if (!(flags & ALT_FORM3)) + return SLJIT_SUCCESS; + return push_inst(compiler, ADDI | D(dst) | A(src1) | (-compiler->imm & 0xffff)); + } + FAIL_IF(push_inst(compiler, CMPL | CRD(0 | ((flags & ALT_SIGN_EXT) ? 0 : 1)) | A(src1) | B(src2))); + if (!(flags & ALT_FORM3)) + return SLJIT_SUCCESS; + return push_inst(compiler, SUBF | D(dst) | A(src2) | B(src1)); + } + + if (flags & ALT_FORM2) { + if (flags & ALT_SIGN_EXT) { + FAIL_IF(push_inst(compiler, RLDI(TMP_REG1, src1, 32, 31, 1))); + src1 = TMP_REG1; + FAIL_IF(push_inst(compiler, RLDI(TMP_REG2, src2, 32, 31, 1))); + src2 = TMP_REG2; + } + /* Setting XER SO is not enough, CR SO is also needed. */ + FAIL_IF(push_inst(compiler, SUBF | OE(ALT_SET_FLAGS) | RC(ALT_SET_FLAGS) | D(dst) | A(src2) | B(src1))); + if (flags & ALT_SIGN_EXT) + return push_inst(compiler, RLDI(dst, dst, 32, 32, 0)); + return SLJIT_SUCCESS; + } + + if (flags & ALT_FORM3) { /* Flags does not set: BIN_IMM_EXTS unnecessary. */ SLJIT_ASSERT(src2 == TMP_REG2); return push_inst(compiler, SUBFIC | D(dst) | A(src1) | compiler->imm); } - if (flags & (ALT_FORM2 | ALT_FORM3)) { - SLJIT_ASSERT(src2 == TMP_REG2); - if (flags & ALT_FORM2) - FAIL_IF(push_inst(compiler, CMPI | CRD(0 | ((flags & ALT_SIGN_EXT) ? 0 : 1)) | A(src1) | compiler->imm)); - if (flags & ALT_FORM3) - return push_inst(compiler, CMPLI | CRD(4 | ((flags & ALT_SIGN_EXT) ? 0 : 1)) | A(src1) | compiler->imm); - return SLJIT_SUCCESS; - } - if (flags & (ALT_FORM4 | ALT_FORM5)) { - if (flags & ALT_FORM4) - FAIL_IF(push_inst(compiler, CMPL | CRD(4 | ((flags & ALT_SIGN_EXT) ? 0 : 1)) | A(src1) | B(src2))); - if (flags & ALT_FORM5) - return push_inst(compiler, CMP | CRD(0 | ((flags & ALT_SIGN_EXT) ? 0 : 1)) | A(src1) | B(src2)); - return SLJIT_SUCCESS; + + if (flags & ALT_FORM4) { + if (flags & ALT_FORM5) { + SLJIT_ASSERT(src2 == TMP_REG2); + return push_inst(compiler, CMPI | CRD(0 | ((flags & ALT_SIGN_EXT) ? 0 : 1)) | A(src1) | compiler->imm); + } + return push_inst(compiler, CMP | CRD(0 | ((flags & ALT_SIGN_EXT) ? 0 : 1)) | A(src1) | B(src2)); } + if (!(flags & ALT_SET_FLAGS)) return push_inst(compiler, SUBF | D(dst) | A(src2) | B(src1)); BIN_EXTS(); - if (flags & ALT_FORM6) - FAIL_IF(push_inst(compiler, CMPL | CRD(4 | ((flags & ALT_SIGN_EXT) ? 0 : 1)) | A(src1) | B(src2))); - return push_inst(compiler, SUBFC | OERC(ALT_SET_FLAGS) | D(dst) | A(src2) | B(src1)); + if (flags & ALT_FORM5) + return push_inst(compiler, SUBFC | RC(ALT_SET_FLAGS) | D(dst) | A(src2) | B(src1)); + return push_inst(compiler, SUBF | RC(flags) | D(dst) | A(src2) | B(src1)); case SLJIT_SUBC: - if (flags & ALT_FORM1) { - FAIL_IF(push_inst(compiler, MFXER | D(0))); - FAIL_IF(push_inst(compiler, SUBFE | D(dst) | A(src2) | B(src1))); - return push_inst(compiler, MTXER | S(0)); - } BIN_EXTS(); return push_inst(compiler, SUBFE | D(dst) | A(src2) | B(src1)); @@ -292,8 +323,8 @@ static SLJIT_INLINE sljit_si emit_single_op(struct sljit_compiler *compiler, slj } BIN_EXTS(); if (flags & ALT_FORM2) - return push_inst(compiler, MULLW | OERC(flags) | D(dst) | A(src2) | B(src1)); - return push_inst(compiler, MULLD | OERC(flags) | D(dst) | A(src2) | B(src1)); + return push_inst(compiler, MULLW | OE(flags) | RC(flags) | D(dst) | A(src2) | B(src1)); + return push_inst(compiler, MULLD | OE(flags) | RC(flags) | D(dst) | A(src2) | B(src1)); case SLJIT_AND: if (flags & ALT_FORM1) { @@ -345,10 +376,8 @@ static SLJIT_INLINE sljit_si emit_single_op(struct sljit_compiler *compiler, slj compiler->imm &= 0x1f; return push_inst(compiler, RLWINM | RC(flags) | S(src1) | A(dst) | (compiler->imm << 11) | ((31 - compiler->imm) << 1)); } - else { - compiler->imm &= 0x3f; - return push_inst(compiler, RLDI(dst, src1, compiler->imm, 63 - compiler->imm, 1) | RC(flags)); - } + compiler->imm &= 0x3f; + return push_inst(compiler, RLDI(dst, src1, compiler->imm, 63 - compiler->imm, 1) | RC(flags)); } return push_inst(compiler, ((flags & ALT_FORM2) ? SLW : SLD) | RC(flags) | S(src1) | A(dst) | B(src2)); @@ -359,37 +388,84 @@ static SLJIT_INLINE sljit_si emit_single_op(struct sljit_compiler *compiler, slj compiler->imm &= 0x1f; return push_inst(compiler, RLWINM | RC(flags) | S(src1) | A(dst) | (((32 - compiler->imm) & 0x1f) << 11) | (compiler->imm << 6) | (31 << 1)); } - else { - compiler->imm &= 0x3f; - return push_inst(compiler, RLDI(dst, src1, 64 - compiler->imm, compiler->imm, 0) | RC(flags)); - } + compiler->imm &= 0x3f; + return push_inst(compiler, RLDI(dst, src1, 64 - compiler->imm, compiler->imm, 0) | RC(flags)); } return push_inst(compiler, ((flags & ALT_FORM2) ? SRW : SRD) | RC(flags) | S(src1) | A(dst) | B(src2)); case SLJIT_ASHR: - if (flags & ALT_FORM3) - FAIL_IF(push_inst(compiler, MFXER | D(0))); if (flags & ALT_FORM1) { SLJIT_ASSERT(src2 == TMP_REG2); if (flags & ALT_FORM2) { compiler->imm &= 0x1f; - FAIL_IF(push_inst(compiler, SRAWI | RC(flags) | S(src1) | A(dst) | (compiler->imm << 11))); + return push_inst(compiler, SRAWI | RC(flags) | S(src1) | A(dst) | (compiler->imm << 11)); } - else { - compiler->imm &= 0x3f; - FAIL_IF(push_inst(compiler, SRADI | RC(flags) | S(src1) | A(dst) | ((compiler->imm & 0x1f) << 11) | ((compiler->imm & 0x20) >> 4))); + compiler->imm &= 0x3f; + return push_inst(compiler, SRADI | RC(flags) | S(src1) | A(dst) | ((compiler->imm & 0x1f) << 11) | ((compiler->imm & 0x20) >> 4)); + } + return push_inst(compiler, ((flags & ALT_FORM2) ? SRAW : SRAD) | RC(flags) | S(src1) | A(dst) | B(src2)); + } + + SLJIT_UNREACHABLE(); + return SLJIT_SUCCESS; +} + +static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_s32 *src) +{ + sljit_s32 arg_count = 0; + sljit_s32 word_arg_count = 0; + sljit_s32 types = 0; + sljit_s32 reg = 0; + + if (src) + reg = *src & REG_MASK; + + arg_types >>= SLJIT_DEF_SHIFT; + + while (arg_types) { + types = (types << SLJIT_DEF_SHIFT) | (arg_types & SLJIT_DEF_MASK); + + switch (arg_types & SLJIT_DEF_MASK) { + case SLJIT_ARG_TYPE_F32: + case SLJIT_ARG_TYPE_F64: + arg_count++; + break; + default: + arg_count++; + word_arg_count++; + + if (arg_count != word_arg_count && arg_count == reg) { + FAIL_IF(push_inst(compiler, OR | S(reg) | A(TMP_CALL_REG) | B(reg))); + *src = TMP_CALL_REG; } + break; + } + + arg_types >>= SLJIT_DEF_SHIFT; + } + + while (types) { + switch (types & SLJIT_DEF_MASK) { + case SLJIT_ARG_TYPE_F32: + case SLJIT_ARG_TYPE_F64: + arg_count--; + break; + default: + if (arg_count != word_arg_count) + FAIL_IF(push_inst(compiler, OR | S(word_arg_count) | A(arg_count) | B(word_arg_count))); + + arg_count--; + word_arg_count--; + break; } - else - FAIL_IF(push_inst(compiler, ((flags & ALT_FORM2) ? SRAW : SRAD) | RC(flags) | S(src1) | A(dst) | B(src2))); - return (flags & ALT_FORM3) ? push_inst(compiler, MTXER | S(0)) : SLJIT_SUCCESS; + + types >>= SLJIT_DEF_SHIFT; } - SLJIT_ASSERT_STOP(); return SLJIT_SUCCESS; } -static SLJIT_INLINE sljit_si emit_const(struct sljit_compiler *compiler, sljit_si reg, sljit_sw init_value) +static SLJIT_INLINE sljit_s32 emit_const(struct sljit_compiler *compiler, sljit_s32 reg, sljit_sw init_value) { FAIL_IF(push_inst(compiler, ADDIS | D(reg) | A(0) | IMM(init_value >> 48))); FAIL_IF(push_inst(compiler, ORI | S(reg) | A(reg) | IMM(init_value >> 32))); @@ -398,18 +474,19 @@ static SLJIT_INLINE sljit_si emit_const(struct sljit_compiler *compiler, sljit_s return push_inst(compiler, ORI | S(reg) | A(reg) | IMM(init_value)); } -SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_addr) +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_target, sljit_sw executable_offset) { sljit_ins *inst = (sljit_ins*)addr; - inst[0] = (inst[0] & 0xffff0000) | ((new_addr >> 48) & 0xffff); - inst[1] = (inst[1] & 0xffff0000) | ((new_addr >> 32) & 0xffff); - inst[3] = (inst[3] & 0xffff0000) | ((new_addr >> 16) & 0xffff); - inst[4] = (inst[4] & 0xffff0000) | (new_addr & 0xffff); + inst[0] = (inst[0] & 0xffff0000) | ((new_target >> 48) & 0xffff); + inst[1] = (inst[1] & 0xffff0000) | ((new_target >> 32) & 0xffff); + inst[3] = (inst[3] & 0xffff0000) | ((new_target >> 16) & 0xffff); + inst[4] = (inst[4] & 0xffff0000) | (new_target & 0xffff); + inst = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset); SLJIT_CACHE_FLUSH(inst, inst + 5); } -SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant) +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset) { sljit_ins *inst = (sljit_ins*)addr; @@ -417,5 +494,6 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_consta inst[1] = (inst[1] & 0xffff0000) | ((new_constant >> 32) & 0xffff); inst[3] = (inst[3] & 0xffff0000) | ((new_constant >> 16) & 0xffff); inst[4] = (inst[4] & 0xffff0000) | (new_constant & 0xffff); + inst = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset); SLJIT_CACHE_FLUSH(inst, inst + 5); } diff --git a/src/3rd/pcre/sjppcc.c b/src/3rd/pcre/sjppcc.c index 6c8e5810e3a..55a826c5cca 100644 --- a/src/3rd/pcre/sjppcc.c +++ b/src/3rd/pcre/sjppcc.c @@ -1,7 +1,7 @@ /* * Stack-less Just-In-Time compiler * - * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: @@ -24,19 +24,30 @@ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -SLJIT_API_FUNC_ATTRIBUTE SLJIT_CONST char* sljit_get_platform_name(void) +SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void) { return "PowerPC" SLJIT_CPUINFO; } /* Length of an instruction word. Both for ppc-32 and ppc-64. */ -typedef sljit_ui sljit_ins; +typedef sljit_u32 sljit_ins; + +#if ((defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) && (defined _AIX)) \ + || (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) +#define SLJIT_PPC_STACK_FRAME_V2 1 +#endif #ifdef _AIX #include #endif +#if (defined _CALL_ELF && _CALL_ELF == 2) +#define SLJIT_PASS_ENTRY_ADDR_TO_CALL 1 +#endif + +#if (defined SLJIT_CACHE_FLUSH_OWN_IMPL && SLJIT_CACHE_FLUSH_OWN_IMPL) + static void ppc_cache_flush(sljit_ins *from, sljit_ins *to) { #ifdef _AIX @@ -78,16 +89,27 @@ static void ppc_cache_flush(sljit_ins *from, sljit_ins *to) #endif /* _AIX */ } -#define TMP_REG1 (SLJIT_NO_REGISTERS + 1) -#define TMP_REG2 (SLJIT_NO_REGISTERS + 2) -#define TMP_REG3 (SLJIT_NO_REGISTERS + 3) -#define ZERO_REG (SLJIT_NO_REGISTERS + 4) +#endif /* (defined SLJIT_CACHE_FLUSH_OWN_IMPL && SLJIT_CACHE_FLUSH_OWN_IMPL) */ + +#define TMP_REG1 (SLJIT_NUMBER_OF_REGISTERS + 2) +#define TMP_REG2 (SLJIT_NUMBER_OF_REGISTERS + 3) +#define TMP_ZERO (SLJIT_NUMBER_OF_REGISTERS + 4) + +#if (defined SLJIT_PASS_ENTRY_ADDR_TO_CALL && SLJIT_PASS_ENTRY_ADDR_TO_CALL) +#define TMP_CALL_REG (SLJIT_NUMBER_OF_REGISTERS + 5) +#else +#define TMP_CALL_REG TMP_REG2 +#endif + +#define TMP_FREG1 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1) +#define TMP_FREG2 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 2) -#define TMP_FREG1 (0) -#define TMP_FREG2 (SLJIT_FLOAT_REG6 + 1) +static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 7] = { + 0, 3, 4, 5, 6, 7, 8, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 1, 9, 10, 31, 12 +}; -static SLJIT_CONST sljit_ub reg_map[SLJIT_NO_REGISTERS + 5] = { - 0, 3, 4, 5, 6, 7, 30, 29, 28, 27, 26, 1, 8, 9, 10, 31 +static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = { + 0, 1, 2, 3, 4, 5, 6, 0, 7 }; /* --------------------------------------------------------------------- */ @@ -98,18 +120,19 @@ static SLJIT_CONST sljit_ub reg_map[SLJIT_NO_REGISTERS + 5] = { #define A(a) (reg_map[a] << 16) #define B(b) (reg_map[b] << 11) #define C(c) (reg_map[c] << 6) -#define FD(fd) ((fd) << 21) -#define FA(fa) ((fa) << 16) -#define FB(fb) ((fb) << 11) -#define FC(fc) ((fc) << 6) +#define FD(fd) (freg_map[fd] << 21) +#define FS(fs) (freg_map[fs] << 21) +#define FA(fa) (freg_map[fa] << 16) +#define FB(fb) (freg_map[fb] << 11) +#define FC(fc) (freg_map[fc] << 6) #define IMM(imm) ((imm) & 0xffff) #define CRD(d) ((d) << 21) /* Instruction bit sections. OE and Rc flag (see ALT_SET_FLAGS). */ -#define OERC(flags) (((flags & ALT_SET_FLAGS) >> 10) | (flags & ALT_SET_FLAGS)) +#define OE(flags) ((flags) & ALT_SET_FLAGS) /* Rc flag (see ALT_SET_FLAGS). */ -#define RC(flags) ((flags & ALT_SET_FLAGS) >> 10) +#define RC(flags) (((flags) & ALT_SET_FLAGS) >> 10) #define HI(opcode) ((opcode) << 26) #define LO(opcode) ((opcode) << 1) @@ -134,6 +157,7 @@ static SLJIT_CONST sljit_ub reg_map[SLJIT_NO_REGISTERS + 5] = { #define CMPL (HI(31) | LO(32)) #define CMPLI (HI(10)) #define CROR (HI(19) | LO(449)) +#define DCBT (HI(31) | LO(278)) #define DIVD (HI(31) | LO(489)) #define DIVDU (HI(31) | LO(457)) #define DIVW (HI(31) | LO(491)) @@ -144,13 +168,17 @@ static SLJIT_CONST sljit_ub reg_map[SLJIT_NO_REGISTERS + 5] = { #define FABS (HI(63) | LO(264)) #define FADD (HI(63) | LO(21)) #define FADDS (HI(59) | LO(21)) +#define FCFID (HI(63) | LO(846)) #define FCMPU (HI(63) | LO(0)) +#define FCTIDZ (HI(63) | LO(815)) +#define FCTIWZ (HI(63) | LO(15)) #define FDIV (HI(63) | LO(18)) #define FDIVS (HI(59) | LO(18)) #define FMR (HI(63) | LO(72)) #define FMUL (HI(63) | LO(25)) #define FMULS (HI(59) | LO(25)) #define FNEG (HI(63) | LO(40)) +#define FRSP (HI(63) | LO(12)) #define FSUB (HI(63) | LO(20)) #define FSUBS (HI(59) | LO(20)) #define LD (HI(58) | 0) @@ -187,6 +215,7 @@ static SLJIT_CONST sljit_ub reg_map[SLJIT_NO_REGISTERS + 5] = { #define STD (HI(62) | 0) #define STDU (HI(62) | 1) #define STDUX (HI(31) | LO(181)) +#define STFIWX (HI(31) | LO(983)) #define STW (HI(36)) #define STWU (HI(37)) #define STWUX (HI(31) | LO(183)) @@ -202,6 +231,9 @@ static SLJIT_CONST sljit_ub reg_map[SLJIT_NO_REGISTERS + 5] = { #define SIMM_MIN (-0x8000) #define UIMM_MAX (0xffff) +#define RLDI(dst, src, sh, mb, type) \ + (HI(30) | S(src) | A(dst) | ((type) << 2) | (((sh) & 0x1f) << 11) | (((sh) & 0x20) >> 4) | (((mb) & 0x1f) << 6) | ((mb) & 0x20)) + #if (defined SLJIT_INDIRECT_CALL && SLJIT_INDIRECT_CALL) SLJIT_API_FUNC_ATTRIBUTE void sljit_set_function_context(void** func_ptr, struct sljit_function_context* context, sljit_sw addr, void* func) { @@ -215,7 +247,7 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_set_function_context(void** func_ptr, struct } #endif -static sljit_si push_inst(struct sljit_compiler *compiler, sljit_ins ins) +static sljit_s32 push_inst(struct sljit_compiler *compiler, sljit_ins ins) { sljit_ins *ptr = (sljit_ins*)ensure_buf(compiler, sizeof(sljit_ins)); FAIL_IF(!ptr); @@ -224,45 +256,126 @@ static sljit_si push_inst(struct sljit_compiler *compiler, sljit_ins ins) return SLJIT_SUCCESS; } -static SLJIT_INLINE sljit_si optimize_jump(struct sljit_jump *jump, sljit_ins *code_ptr, sljit_ins *code) +static SLJIT_INLINE sljit_s32 detect_jump_type(struct sljit_jump *jump, sljit_ins *code_ptr, sljit_ins *code, sljit_sw executable_offset) { sljit_sw diff; sljit_uw target_addr; + sljit_sw extra_jump_flags; +#if (defined SLJIT_PASS_ENTRY_ADDR_TO_CALL && SLJIT_PASS_ENTRY_ADDR_TO_CALL) && (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) + if (jump->flags & (SLJIT_REWRITABLE_JUMP | IS_CALL)) + return 0; +#else if (jump->flags & SLJIT_REWRITABLE_JUMP) return 0; +#endif if (jump->flags & JUMP_ADDR) target_addr = jump->u.target; else { SLJIT_ASSERT(jump->flags & JUMP_LABEL); - target_addr = (sljit_uw)(code + jump->u.label->size); + target_addr = (sljit_uw)(code + jump->u.label->size) + (sljit_uw)executable_offset; } - diff = ((sljit_sw)target_addr - (sljit_sw)(code_ptr)) & ~0x3l; - if (jump->flags & UNCOND_B) { - if (diff <= 0x01ffffff && diff >= -0x02000000) { +#if (defined SLJIT_PASS_ENTRY_ADDR_TO_CALL && SLJIT_PASS_ENTRY_ADDR_TO_CALL) && (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) + if (jump->flags & IS_CALL) + goto keep_address; +#endif + + diff = ((sljit_sw)target_addr - (sljit_sw)(code_ptr) - executable_offset) & ~0x3l; + + extra_jump_flags = 0; + if (jump->flags & IS_COND) { + if (diff <= 0x7fff && diff >= -0x8000) { jump->flags |= PATCH_B; return 1; } - if (target_addr <= 0x03ffffff) { - jump->flags |= PATCH_B | ABSOLUTE_B; + if (target_addr <= 0xffff) { + jump->flags |= PATCH_B | PATCH_ABS_B; return 1; } + extra_jump_flags = REMOVE_COND; + + diff -= sizeof(sljit_ins); + } + + if (diff <= 0x01ffffff && diff >= -0x02000000) { + jump->flags |= PATCH_B | extra_jump_flags; + return 1; + } + + if (target_addr <= 0x03ffffff) { + jump->flags |= PATCH_B | PATCH_ABS_B | extra_jump_flags; + return 1; + } + +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) +#if (defined SLJIT_PASS_ENTRY_ADDR_TO_CALL && SLJIT_PASS_ENTRY_ADDR_TO_CALL) +keep_address: +#endif + if (target_addr <= 0x7fffffff) { + jump->flags |= PATCH_ABS32; + return 1; + } + + if (target_addr <= 0x7fffffffffffl) { + jump->flags |= PATCH_ABS48; + return 1; + } +#endif + + return 0; +} + +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) + +static SLJIT_INLINE sljit_sw put_label_get_length(struct sljit_put_label *put_label, sljit_uw max_label) +{ + if (max_label < 0x100000000l) { + put_label->flags = 0; + return 1; + } + + if (max_label < 0x1000000000000l) { + put_label->flags = 1; + return 3; + } + + put_label->flags = 2; + return 4; +} + +static SLJIT_INLINE void put_label_set(struct sljit_put_label *put_label) +{ + sljit_uw addr = put_label->label->addr; + sljit_ins *inst = (sljit_ins *)put_label->addr; + sljit_s32 reg = *inst; + + if (put_label->flags == 0) { + SLJIT_ASSERT(addr < 0x100000000l); + inst[0] = ORIS | S(TMP_ZERO) | A(reg) | IMM(addr >> 16); } else { - if (diff <= 0x7fff && diff >= -0x8000) { - jump->flags |= PATCH_B; - return 1; + if (put_label->flags == 1) { + SLJIT_ASSERT(addr < 0x1000000000000l); + inst[0] = ORI | S(TMP_ZERO) | A(reg) | IMM(addr >> 32); } - if (target_addr <= 0xffff) { - jump->flags |= PATCH_B | ABSOLUTE_B; - return 1; + else { + inst[0] = ORIS | S(TMP_ZERO) | A(reg) | IMM(addr >> 48); + inst[1] = ORI | S(reg) | A(reg) | IMM((addr >> 32) & 0xffff); + inst ++; } + + inst[1] = RLDI(reg, reg, 32, 31, 1); + inst[2] = ORIS | S(reg) | A(reg) | IMM((addr >> 16) & 0xffff); + inst += 2; } - return 0; + + inst[1] = ORI | S(reg) | A(reg) | IMM(addr & 0xffff); } +#endif + SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compiler) { struct sljit_memory_fragment *buf; @@ -271,14 +384,17 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil sljit_ins *buf_ptr; sljit_ins *buf_end; sljit_uw word_count; + sljit_uw next_addr; + sljit_sw executable_offset; sljit_uw addr; struct sljit_label *label; struct sljit_jump *jump; struct sljit_const *const_; + struct sljit_put_label *put_label; CHECK_ERROR_PTR(); - check_sljit_generate_code(compiler); + CHECK_PTR(check_sljit_generate_code(compiler)); reverse_buf(compiler); #if (defined SLJIT_INDIRECT_CALL && SLJIT_INDIRECT_CALL) @@ -294,45 +410,87 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil code_ptr = code; word_count = 0; + next_addr = 0; + executable_offset = SLJIT_EXEC_OFFSET(code); + label = compiler->labels; jump = compiler->jumps; const_ = compiler->consts; + put_label = compiler->put_labels; + do { buf_ptr = (sljit_ins*)buf->memory; buf_end = buf_ptr + (buf->used_size >> 2); do { *code_ptr = *buf_ptr++; - SLJIT_ASSERT(!label || label->size >= word_count); - SLJIT_ASSERT(!jump || jump->addr >= word_count); - SLJIT_ASSERT(!const_ || const_->addr >= word_count); - /* These structures are ordered by their address. */ - if (label && label->size == word_count) { - /* Just recording the address. */ - label->addr = (sljit_uw)code_ptr; - label->size = code_ptr - code; - label = label->next; - } - if (jump && jump->addr == word_count) { + if (next_addr == word_count) { + SLJIT_ASSERT(!label || label->size >= word_count); + SLJIT_ASSERT(!jump || jump->addr >= word_count); + SLJIT_ASSERT(!const_ || const_->addr >= word_count); + SLJIT_ASSERT(!put_label || put_label->addr >= word_count); + + /* These structures are ordered by their address. */ + if (label && label->size == word_count) { + /* Just recording the address. */ + label->addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset); + label->size = code_ptr - code; + label = label->next; + } + if (jump && jump->addr == word_count) { #if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) - jump->addr = (sljit_uw)(code_ptr - 3); + jump->addr = (sljit_uw)(code_ptr - 3); #else - jump->addr = (sljit_uw)(code_ptr - 6); + jump->addr = (sljit_uw)(code_ptr - 6); #endif - if (optimize_jump(jump, code_ptr, code)) { + if (detect_jump_type(jump, code_ptr, code, executable_offset)) { #if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) - code_ptr[-3] = code_ptr[0]; - code_ptr -= 3; + code_ptr[-3] = code_ptr[0]; + code_ptr -= 3; #else - code_ptr[-6] = code_ptr[0]; - code_ptr -= 6; + if (jump->flags & PATCH_ABS32) { + code_ptr -= 3; + code_ptr[-1] = code_ptr[2]; + code_ptr[0] = code_ptr[3]; + } + else if (jump->flags & PATCH_ABS48) { + code_ptr--; + code_ptr[-1] = code_ptr[0]; + code_ptr[0] = code_ptr[1]; + /* rldicr rX,rX,32,31 -> rX,rX,16,47 */ + SLJIT_ASSERT((code_ptr[-3] & 0xfc00ffff) == 0x780007c6); + code_ptr[-3] ^= 0x8422; + /* oris -> ori */ + code_ptr[-2] ^= 0x4000000; + } + else { + code_ptr[-6] = code_ptr[0]; + code_ptr -= 6; + } #endif + if (jump->flags & REMOVE_COND) { + code_ptr[0] = BCx | (2 << 2) | ((code_ptr[0] ^ (8 << 21)) & 0x03ff0001); + code_ptr++; + jump->addr += sizeof(sljit_ins); + code_ptr[0] = Bx; + jump->flags -= IS_COND; + } + } + jump = jump->next; } - jump = jump->next; - } - if (const_ && const_->addr == word_count) { - /* Just recording the address. */ - const_->addr = (sljit_uw)code_ptr; - const_ = const_->next; + if (const_ && const_->addr == word_count) { + const_->addr = (sljit_uw)code_ptr; + const_ = const_->next; + } + if (put_label && put_label->addr == word_count) { + SLJIT_ASSERT(put_label->label); + put_label->addr = (sljit_uw)code_ptr; +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) + code_ptr += put_label_get_length(put_label, (sljit_uw)(SLJIT_ADD_EXEC_OFFSET(code, executable_offset) + put_label->label->size)); + word_count += 4; +#endif + put_label = put_label->next; + } + next_addr = compute_next_addr(label, jump, const_, put_label); } code_ptr ++; word_count ++; @@ -342,7 +500,7 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil } while (buf); if (label && label->size == word_count) { - label->addr = (sljit_uw)code_ptr; + label->addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset); label->size = code_ptr - code; label = label->next; } @@ -350,6 +508,8 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil SLJIT_ASSERT(!label); SLJIT_ASSERT(!jump); SLJIT_ASSERT(!const_); + SLJIT_ASSERT(!put_label); + #if (defined SLJIT_INDIRECT_CALL && SLJIT_INDIRECT_CALL) SLJIT_ASSERT(code_ptr - code <= (sljit_sw)compiler->size - (sizeof(struct sljit_function_context) / sizeof(sljit_ins))); #else @@ -360,39 +520,52 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil while (jump) { do { addr = (jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target; - buf_ptr = (sljit_ins*)jump->addr; + buf_ptr = (sljit_ins *)jump->addr; + if (jump->flags & PATCH_B) { - if (jump->flags & UNCOND_B) { - if (!(jump->flags & ABSOLUTE_B)) { - addr = addr - jump->addr; - SLJIT_ASSERT((sljit_sw)addr <= 0x01ffffff && (sljit_sw)addr >= -0x02000000); - *buf_ptr = Bx | (addr & 0x03fffffc) | ((*buf_ptr) & 0x1); + if (jump->flags & IS_COND) { + if (!(jump->flags & PATCH_ABS_B)) { + addr -= (sljit_uw)SLJIT_ADD_EXEC_OFFSET(buf_ptr, executable_offset); + SLJIT_ASSERT((sljit_sw)addr <= 0x7fff && (sljit_sw)addr >= -0x8000); + *buf_ptr = BCx | (addr & 0xfffc) | ((*buf_ptr) & 0x03ff0001); } else { - SLJIT_ASSERT(addr <= 0x03ffffff); - *buf_ptr = Bx | (addr & 0x03fffffc) | 0x2 | ((*buf_ptr) & 0x1); + SLJIT_ASSERT(addr <= 0xffff); + *buf_ptr = BCx | (addr & 0xfffc) | 0x2 | ((*buf_ptr) & 0x03ff0001); } } else { - if (!(jump->flags & ABSOLUTE_B)) { - addr = addr - jump->addr; - SLJIT_ASSERT((sljit_sw)addr <= 0x7fff && (sljit_sw)addr >= -0x8000); - *buf_ptr = BCx | (addr & 0xfffc) | ((*buf_ptr) & 0x03ff0001); + if (!(jump->flags & PATCH_ABS_B)) { + addr -= (sljit_uw)SLJIT_ADD_EXEC_OFFSET(buf_ptr, executable_offset); + SLJIT_ASSERT((sljit_sw)addr <= 0x01ffffff && (sljit_sw)addr >= -0x02000000); + *buf_ptr = Bx | (addr & 0x03fffffc) | ((*buf_ptr) & 0x1); } else { - addr = addr & ~0x3l; - SLJIT_ASSERT(addr <= 0xffff); - *buf_ptr = BCx | (addr & 0xfffc) | 0x2 | ((*buf_ptr) & 0x03ff0001); + SLJIT_ASSERT(addr <= 0x03ffffff); + *buf_ptr = Bx | (addr & 0x03fffffc) | 0x2 | ((*buf_ptr) & 0x1); } - } break; } + /* Set the fields of immediate loads. */ #if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) buf_ptr[0] = (buf_ptr[0] & 0xffff0000) | ((addr >> 16) & 0xffff); buf_ptr[1] = (buf_ptr[1] & 0xffff0000) | (addr & 0xffff); #else + if (jump->flags & PATCH_ABS32) { + SLJIT_ASSERT(addr <= 0x7fffffff); + buf_ptr[0] = (buf_ptr[0] & 0xffff0000) | ((addr >> 16) & 0xffff); + buf_ptr[1] = (buf_ptr[1] & 0xffff0000) | (addr & 0xffff); + break; + } + if (jump->flags & PATCH_ABS48) { + SLJIT_ASSERT(addr <= 0x7fffffffffff); + buf_ptr[0] = (buf_ptr[0] & 0xffff0000) | ((addr >> 32) & 0xffff); + buf_ptr[1] = (buf_ptr[1] & 0xffff0000) | ((addr >> 16) & 0xffff); + buf_ptr[3] = (buf_ptr[3] & 0xffff0000) | (addr & 0xffff); + break; + } buf_ptr[0] = (buf_ptr[0] & 0xffff0000) | ((addr >> 48) & 0xffff); buf_ptr[1] = (buf_ptr[1] & 0xffff0000) | ((addr >> 32) & 0xffff); buf_ptr[3] = (buf_ptr[3] & 0xffff0000) | ((addr >> 16) & 0xffff); @@ -402,23 +575,63 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil jump = jump->next; } + put_label = compiler->put_labels; + while (put_label) { +#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) + addr = put_label->label->addr; + buf_ptr = (sljit_ins *)put_label->addr; + + SLJIT_ASSERT((buf_ptr[0] & 0xfc1f0000) == ADDIS && (buf_ptr[1] & 0xfc000000) == ORI); + buf_ptr[0] |= (addr >> 16) & 0xffff; + buf_ptr[1] |= addr & 0xffff; +#else + put_label_set(put_label); +#endif + put_label = put_label->next; + } + compiler->error = SLJIT_ERR_COMPILED; + compiler->executable_offset = executable_offset; compiler->executable_size = (code_ptr - code) * sizeof(sljit_ins); - SLJIT_CACHE_FLUSH(code, code_ptr); + + code = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(code, executable_offset); #if (defined SLJIT_INDIRECT_CALL && SLJIT_INDIRECT_CALL) #if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) if (((sljit_sw)code_ptr) & 0x4) code_ptr++; +#endif sljit_set_function_context(NULL, (struct sljit_function_context*)code_ptr, (sljit_sw)code, (void*)sljit_generate_code); +#endif + + code_ptr = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset); + + SLJIT_CACHE_FLUSH(code, code_ptr); + +#if (defined SLJIT_INDIRECT_CALL && SLJIT_INDIRECT_CALL) return code_ptr; #else - sljit_set_function_context(NULL, (struct sljit_function_context*)code_ptr, (sljit_sw)code, (void*)sljit_generate_code); - return code_ptr; + return code; #endif +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type) +{ + switch (feature_type) { + case SLJIT_HAS_FPU: +#ifdef SLJIT_IS_FPU_AVAILABLE + return SLJIT_IS_FPU_AVAILABLE; #else - return code; + /* Available by default. */ + return 1; #endif + + case SLJIT_HAS_CLZ: + return 1; + + default: + return 0; + } } /* --------------------------------------------------------------------- */ @@ -430,47 +643,40 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil /* Creates an index in data_transfer_insts array. */ #define LOAD_DATA 0x01 #define INDEXED 0x02 -#define WRITE_BACK 0x04 +#define SIGNED_DATA 0x04 + #define WORD_DATA 0x00 #define BYTE_DATA 0x08 #define HALF_DATA 0x10 #define INT_DATA 0x18 -#define SIGNED_DATA 0x20 /* Separates integer and floating point registers */ -#define GPR_REG 0x3f -#define DOUBLE_DATA 0x40 +#define GPR_REG 0x1f +#define DOUBLE_DATA 0x20 #define MEM_MASK 0x7f /* Other inp_flags. */ -#define ARG_TEST 0x000100 /* Integer opertion and set flags -> requires exts on 64 bit systems. */ -#define ALT_SIGN_EXT 0x000200 +#define ALT_SIGN_EXT 0x000100 /* This flag affects the RC() and OERC() macros. */ #define ALT_SET_FLAGS 0x000400 -#define ALT_KEEP_CACHE 0x000800 -#define ALT_FORM1 0x010000 -#define ALT_FORM2 0x020000 -#define ALT_FORM3 0x040000 -#define ALT_FORM4 0x080000 -#define ALT_FORM5 0x100000 -#define ALT_FORM6 0x200000 +#define ALT_FORM1 0x001000 +#define ALT_FORM2 0x002000 +#define ALT_FORM3 0x004000 +#define ALT_FORM4 0x008000 +#define ALT_FORM5 0x010000 /* Source and destination is register. */ #define REG_DEST 0x000001 #define REG1_SOURCE 0x000002 #define REG2_SOURCE 0x000004 -/* getput_arg_fast returned true. */ -#define FAST_DEST 0x000008 -/* Multiple instructions are required. */ -#define SLOW_DEST 0x000010 /* -ALT_SIGN_EXT 0x000200 -ALT_SET_FLAGS 0x000400 -ALT_FORM1 0x010000 +ALT_SIGN_EXT 0x000100 +ALT_SET_FLAGS 0x000200 +ALT_FORM1 0x001000 ... -ALT_FORM6 0x200000 */ +ALT_FORM5 0x010000 */ #if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) #include "sjppc32.c" @@ -486,110 +692,124 @@ ALT_FORM6 0x200000 */ #define STACK_LOAD LD #endif -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_enter(struct sljit_compiler *compiler, sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler, + sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds, + sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size) { + sljit_s32 args, i, tmp, offs; + CHECK_ERROR(); - check_sljit_emit_enter(compiler, args, scratches, saveds, local_size); + CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size)); + set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size); + + FAIL_IF(push_inst(compiler, MFLR | D(0))); + offs = -(sljit_s32)(sizeof(sljit_sw)); + FAIL_IF(push_inst(compiler, STACK_STORE | S(TMP_ZERO) | A(SLJIT_SP) | IMM(offs))); + + tmp = saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - saveds) : SLJIT_FIRST_SAVED_REG; + for (i = SLJIT_S0; i >= tmp; i--) { + offs -= (sljit_s32)(sizeof(sljit_sw)); + FAIL_IF(push_inst(compiler, STACK_STORE | S(i) | A(SLJIT_SP) | IMM(offs))); + } + + for (i = scratches; i >= SLJIT_FIRST_SAVED_REG; i--) { + offs -= (sljit_s32)(sizeof(sljit_sw)); + FAIL_IF(push_inst(compiler, STACK_STORE | S(i) | A(SLJIT_SP) | IMM(offs))); + } + + SLJIT_ASSERT(offs == -(sljit_s32)GET_SAVED_REGISTERS_SIZE(compiler->scratches, compiler->saveds, 1)); - compiler->scratches = scratches; - compiler->saveds = saveds; -#if (defined SLJIT_DEBUG && SLJIT_DEBUG) - compiler->logical_local_size = local_size; +#if (defined SLJIT_PPC_STACK_FRAME_V2 && SLJIT_PPC_STACK_FRAME_V2) + FAIL_IF(push_inst(compiler, STACK_STORE | S(0) | A(SLJIT_SP) | IMM(2 * sizeof(sljit_sw)))); +#else + FAIL_IF(push_inst(compiler, STACK_STORE | S(0) | A(SLJIT_SP) | IMM(sizeof(sljit_sw)))); #endif - FAIL_IF(push_inst(compiler, MFLR | D(0))); - FAIL_IF(push_inst(compiler, STACK_STORE | S(ZERO_REG) | A(SLJIT_LOCALS_REG) | IMM(-(sljit_si)(sizeof(sljit_sw))) )); - if (saveds >= 1) - FAIL_IF(push_inst(compiler, STACK_STORE | S(SLJIT_SAVED_REG1) | A(SLJIT_LOCALS_REG) | IMM(-2 * (sljit_si)(sizeof(sljit_sw))) )); - if (saveds >= 2) - FAIL_IF(push_inst(compiler, STACK_STORE | S(SLJIT_SAVED_REG2) | A(SLJIT_LOCALS_REG) | IMM(-3 * (sljit_si)(sizeof(sljit_sw))) )); - if (saveds >= 3) - FAIL_IF(push_inst(compiler, STACK_STORE | S(SLJIT_SAVED_REG3) | A(SLJIT_LOCALS_REG) | IMM(-4 * (sljit_si)(sizeof(sljit_sw))) )); - if (saveds >= 4) - FAIL_IF(push_inst(compiler, STACK_STORE | S(SLJIT_SAVED_EREG1) | A(SLJIT_LOCALS_REG) | IMM(-5 * (sljit_si)(sizeof(sljit_sw))) )); - if (saveds >= 5) - FAIL_IF(push_inst(compiler, STACK_STORE | S(SLJIT_SAVED_EREG2) | A(SLJIT_LOCALS_REG) | IMM(-6 * (sljit_si)(sizeof(sljit_sw))) )); - FAIL_IF(push_inst(compiler, STACK_STORE | S(0) | A(SLJIT_LOCALS_REG) | IMM(sizeof(sljit_sw)) )); - - FAIL_IF(push_inst(compiler, ADDI | D(ZERO_REG) | A(0) | 0)); + FAIL_IF(push_inst(compiler, ADDI | D(TMP_ZERO) | A(0) | 0)); + + args = get_arg_count(arg_types); + if (args >= 1) - FAIL_IF(push_inst(compiler, OR | S(SLJIT_SCRATCH_REG1) | A(SLJIT_SAVED_REG1) | B(SLJIT_SCRATCH_REG1))); + FAIL_IF(push_inst(compiler, OR | S(SLJIT_R0) | A(SLJIT_S0) | B(SLJIT_R0))); if (args >= 2) - FAIL_IF(push_inst(compiler, OR | S(SLJIT_SCRATCH_REG2) | A(SLJIT_SAVED_REG2) | B(SLJIT_SCRATCH_REG2))); + FAIL_IF(push_inst(compiler, OR | S(SLJIT_R1) | A(SLJIT_S1) | B(SLJIT_R1))); if (args >= 3) - FAIL_IF(push_inst(compiler, OR | S(SLJIT_SCRATCH_REG3) | A(SLJIT_SAVED_REG3) | B(SLJIT_SCRATCH_REG3))); + FAIL_IF(push_inst(compiler, OR | S(SLJIT_R2) | A(SLJIT_S2) | B(SLJIT_R2))); -#if (defined SLJIT_INDIRECT_CALL && SLJIT_INDIRECT_CALL) - compiler->local_size = (1 + saveds + 6 + 8) * sizeof(sljit_sw) + local_size; -#else - compiler->local_size = (1 + saveds + 2) * sizeof(sljit_sw) + local_size; -#endif - compiler->local_size = (compiler->local_size + 15) & ~0xf; + local_size += GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1) + SLJIT_LOCALS_OFFSET; + local_size = (local_size + 15) & ~0xf; + compiler->local_size = local_size; #if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) - if (compiler->local_size <= SIMM_MAX) - FAIL_IF(push_inst(compiler, STWU | S(SLJIT_LOCALS_REG) | A(SLJIT_LOCALS_REG) | IMM(-compiler->local_size))); + if (local_size <= SIMM_MAX) + FAIL_IF(push_inst(compiler, STWU | S(SLJIT_SP) | A(SLJIT_SP) | IMM(-local_size))); else { - FAIL_IF(load_immediate(compiler, 0, -compiler->local_size)); - FAIL_IF(push_inst(compiler, STWUX | S(SLJIT_LOCALS_REG) | A(SLJIT_LOCALS_REG) | B(0))); + FAIL_IF(load_immediate(compiler, 0, -local_size)); + FAIL_IF(push_inst(compiler, STWUX | S(SLJIT_SP) | A(SLJIT_SP) | B(0))); } #else - if (compiler->local_size <= SIMM_MAX) - FAIL_IF(push_inst(compiler, STDU | S(SLJIT_LOCALS_REG) | A(SLJIT_LOCALS_REG) | IMM(-compiler->local_size))); + if (local_size <= SIMM_MAX) + FAIL_IF(push_inst(compiler, STDU | S(SLJIT_SP) | A(SLJIT_SP) | IMM(-local_size))); else { - FAIL_IF(load_immediate(compiler, 0, -compiler->local_size)); - FAIL_IF(push_inst(compiler, STDUX | S(SLJIT_LOCALS_REG) | A(SLJIT_LOCALS_REG) | B(0))); + FAIL_IF(load_immediate(compiler, 0, -local_size)); + FAIL_IF(push_inst(compiler, STDUX | S(SLJIT_SP) | A(SLJIT_SP) | B(0))); } #endif return SLJIT_SUCCESS; } -SLJIT_API_FUNC_ATTRIBUTE void sljit_set_context(struct sljit_compiler *compiler, sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *compiler, + sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds, + sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size) { - CHECK_ERROR_VOID(); - check_sljit_set_context(compiler, args, scratches, saveds, local_size); - - compiler->scratches = scratches; - compiler->saveds = saveds; -#if (defined SLJIT_DEBUG && SLJIT_DEBUG) - compiler->logical_local_size = local_size; -#endif + CHECK_ERROR(); + CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size)); + set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size); -#if (defined SLJIT_INDIRECT_CALL && SLJIT_INDIRECT_CALL) - compiler->local_size = (1 + saveds + 6 + 8) * sizeof(sljit_sw) + local_size; -#else - compiler->local_size = (1 + saveds + 2) * sizeof(sljit_sw) + local_size; -#endif - compiler->local_size = (compiler->local_size + 15) & ~0xf; + local_size += GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1) + SLJIT_LOCALS_OFFSET; + compiler->local_size = (local_size + 15) & ~0xf; + return SLJIT_SUCCESS; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_return(struct sljit_compiler *compiler, sljit_si op, sljit_si src, sljit_sw srcw) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw) { + sljit_s32 i, tmp, offs; + CHECK_ERROR(); - check_sljit_emit_return(compiler, op, src, srcw); + CHECK(check_sljit_emit_return(compiler, op, src, srcw)); FAIL_IF(emit_mov_before_return(compiler, op, src, srcw)); if (compiler->local_size <= SIMM_MAX) - FAIL_IF(push_inst(compiler, ADDI | D(SLJIT_LOCALS_REG) | A(SLJIT_LOCALS_REG) | IMM(compiler->local_size))); + FAIL_IF(push_inst(compiler, ADDI | D(SLJIT_SP) | A(SLJIT_SP) | IMM(compiler->local_size))); else { FAIL_IF(load_immediate(compiler, 0, compiler->local_size)); - FAIL_IF(push_inst(compiler, ADD | D(SLJIT_LOCALS_REG) | A(SLJIT_LOCALS_REG) | B(0))); - } - - FAIL_IF(push_inst(compiler, STACK_LOAD | D(0) | A(SLJIT_LOCALS_REG) | IMM(sizeof(sljit_sw)))); - if (compiler->saveds >= 5) - FAIL_IF(push_inst(compiler, STACK_LOAD | D(SLJIT_SAVED_EREG2) | A(SLJIT_LOCALS_REG) | IMM(-6 * (sljit_si)(sizeof(sljit_sw))) )); - if (compiler->saveds >= 4) - FAIL_IF(push_inst(compiler, STACK_LOAD | D(SLJIT_SAVED_EREG1) | A(SLJIT_LOCALS_REG) | IMM(-5 * (sljit_si)(sizeof(sljit_sw))) )); - if (compiler->saveds >= 3) - FAIL_IF(push_inst(compiler, STACK_LOAD | D(SLJIT_SAVED_REG3) | A(SLJIT_LOCALS_REG) | IMM(-4 * (sljit_si)(sizeof(sljit_sw))) )); - if (compiler->saveds >= 2) - FAIL_IF(push_inst(compiler, STACK_LOAD | D(SLJIT_SAVED_REG2) | A(SLJIT_LOCALS_REG) | IMM(-3 * (sljit_si)(sizeof(sljit_sw))) )); - if (compiler->saveds >= 1) - FAIL_IF(push_inst(compiler, STACK_LOAD | D(SLJIT_SAVED_REG1) | A(SLJIT_LOCALS_REG) | IMM(-2 * (sljit_si)(sizeof(sljit_sw))) )); - FAIL_IF(push_inst(compiler, STACK_LOAD | D(ZERO_REG) | A(SLJIT_LOCALS_REG) | IMM(-(sljit_si)(sizeof(sljit_sw))) )); + FAIL_IF(push_inst(compiler, ADD | D(SLJIT_SP) | A(SLJIT_SP) | B(0))); + } + +#if (defined SLJIT_PPC_STACK_FRAME_V2 && SLJIT_PPC_STACK_FRAME_V2) + FAIL_IF(push_inst(compiler, STACK_LOAD | D(0) | A(SLJIT_SP) | IMM(2 * sizeof(sljit_sw)))); +#else + FAIL_IF(push_inst(compiler, STACK_LOAD | D(0) | A(SLJIT_SP) | IMM(sizeof(sljit_sw)))); +#endif + + offs = -(sljit_s32)GET_SAVED_REGISTERS_SIZE(compiler->scratches, compiler->saveds, 1); + + tmp = compiler->scratches; + for (i = SLJIT_FIRST_SAVED_REG; i <= tmp; i++) { + FAIL_IF(push_inst(compiler, STACK_LOAD | D(i) | A(SLJIT_SP) | IMM(offs))); + offs += (sljit_s32)(sizeof(sljit_sw)); + } + + tmp = compiler->saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - compiler->saveds) : SLJIT_FIRST_SAVED_REG; + for (i = tmp; i <= SLJIT_S0; i++) { + FAIL_IF(push_inst(compiler, STACK_LOAD | D(i) | A(SLJIT_SP) | IMM(offs))); + offs += (sljit_s32)(sizeof(sljit_sw)); + } + + FAIL_IF(push_inst(compiler, STACK_LOAD | D(TMP_ZERO) | A(SLJIT_SP) | IMM(offs))); + SLJIT_ASSERT(offs == -(sljit_sw)(sizeof(sljit_sw))); FAIL_IF(push_inst(compiler, MTLR | S(0))); FAIL_IF(push_inst(compiler, BLR)); @@ -604,17 +824,17 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_return(struct sljit_compiler *compi /* Operators */ /* --------------------------------------------------------------------- */ -/* i/x - immediate/indexed form - n/w - no write-back / write-back (1 bit) - s/l - store/load (1 bit) +/* s/l - store/load (1 bit) + i/x - immediate/indexed form u/s - signed/unsigned (1 bit) w/b/h/i - word/byte/half/int allowed (2 bit) - It contans 32 items, but not all are different. */ + + Some opcodes are repeated (e.g. store signed / unsigned byte is the same instruction). */ /* 64 bit only: [reg+imm] must be aligned to 4 bytes. */ -#define ADDR_MODE2 0x10000 -/* 64-bit only: there is no lwau instruction. */ -#define UPDATE_REQ 0x20000 +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) +#define INT_ALIGNED 0x10000 +#endif #if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) #define ARCH_32_64(a, b) a @@ -623,393 +843,242 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_return(struct sljit_compiler *compi #else #define ARCH_32_64(a, b) b #define INST_CODE_AND_DST(inst, flags, reg) \ - (((inst) & ~(ADDR_MODE2 | UPDATE_REQ)) | (((flags) & MEM_MASK) <= GPR_REG ? D(reg) : FD(reg))) + (((inst) & ~INT_ALIGNED) | (((flags) & MEM_MASK) <= GPR_REG ? D(reg) : FD(reg))) #endif -static SLJIT_CONST sljit_ins data_transfer_insts[64 + 8] = { +static const sljit_ins data_transfer_insts[64 + 16] = { -/* -------- Unsigned -------- */ +/* -------- Integer -------- */ /* Word. */ -/* u w n i s */ ARCH_32_64(HI(36) /* stw */, HI(62) | ADDR_MODE2 | 0x0 /* std */), -/* u w n i l */ ARCH_32_64(HI(32) /* lwz */, HI(58) | ADDR_MODE2 | 0x0 /* ld */), -/* u w n x s */ ARCH_32_64(HI(31) | LO(151) /* stwx */, HI(31) | LO(149) /* stdx */), -/* u w n x l */ ARCH_32_64(HI(31) | LO(23) /* lwzx */, HI(31) | LO(21) /* ldx */), +/* w u i s */ ARCH_32_64(HI(36) /* stw */, HI(62) | INT_ALIGNED | 0x0 /* std */), +/* w u i l */ ARCH_32_64(HI(32) /* lwz */, HI(58) | INT_ALIGNED | 0x0 /* ld */), +/* w u x s */ ARCH_32_64(HI(31) | LO(151) /* stwx */, HI(31) | LO(149) /* stdx */), +/* w u x l */ ARCH_32_64(HI(31) | LO(23) /* lwzx */, HI(31) | LO(21) /* ldx */), -/* u w w i s */ ARCH_32_64(HI(37) /* stwu */, HI(62) | ADDR_MODE2 | 0x1 /* stdu */), -/* u w w i l */ ARCH_32_64(HI(33) /* lwzu */, HI(58) | ADDR_MODE2 | 0x1 /* ldu */), -/* u w w x s */ ARCH_32_64(HI(31) | LO(183) /* stwux */, HI(31) | LO(181) /* stdux */), -/* u w w x l */ ARCH_32_64(HI(31) | LO(55) /* lwzux */, HI(31) | LO(53) /* ldux */), +/* w s i s */ ARCH_32_64(HI(36) /* stw */, HI(62) | INT_ALIGNED | 0x0 /* std */), +/* w s i l */ ARCH_32_64(HI(32) /* lwz */, HI(58) | INT_ALIGNED | 0x0 /* ld */), +/* w s x s */ ARCH_32_64(HI(31) | LO(151) /* stwx */, HI(31) | LO(149) /* stdx */), +/* w s x l */ ARCH_32_64(HI(31) | LO(23) /* lwzx */, HI(31) | LO(21) /* ldx */), /* Byte. */ -/* u b n i s */ HI(38) /* stb */, -/* u b n i l */ HI(34) /* lbz */, -/* u b n x s */ HI(31) | LO(215) /* stbx */, -/* u b n x l */ HI(31) | LO(87) /* lbzx */, +/* b u i s */ HI(38) /* stb */, +/* b u i l */ HI(34) /* lbz */, +/* b u x s */ HI(31) | LO(215) /* stbx */, +/* b u x l */ HI(31) | LO(87) /* lbzx */, -/* u b w i s */ HI(39) /* stbu */, -/* u b w i l */ HI(35) /* lbzu */, -/* u b w x s */ HI(31) | LO(247) /* stbux */, -/* u b w x l */ HI(31) | LO(119) /* lbzux */, +/* b s i s */ HI(38) /* stb */, +/* b s i l */ HI(34) /* lbz */ /* EXTS_REQ */, +/* b s x s */ HI(31) | LO(215) /* stbx */, +/* b s x l */ HI(31) | LO(87) /* lbzx */ /* EXTS_REQ */, /* Half. */ -/* u h n i s */ HI(44) /* sth */, -/* u h n i l */ HI(40) /* lhz */, -/* u h n x s */ HI(31) | LO(407) /* sthx */, -/* u h n x l */ HI(31) | LO(279) /* lhzx */, +/* h u i s */ HI(44) /* sth */, +/* h u i l */ HI(40) /* lhz */, +/* h u x s */ HI(31) | LO(407) /* sthx */, +/* h u x l */ HI(31) | LO(279) /* lhzx */, -/* u h w i s */ HI(45) /* sthu */, -/* u h w i l */ HI(41) /* lhzu */, -/* u h w x s */ HI(31) | LO(439) /* sthux */, -/* u h w x l */ HI(31) | LO(311) /* lhzux */, +/* h s i s */ HI(44) /* sth */, +/* h s i l */ HI(42) /* lha */, +/* h s x s */ HI(31) | LO(407) /* sthx */, +/* h s x l */ HI(31) | LO(343) /* lhax */, /* Int. */ -/* u i n i s */ HI(36) /* stw */, -/* u i n i l */ HI(32) /* lwz */, -/* u i n x s */ HI(31) | LO(151) /* stwx */, -/* u i n x l */ HI(31) | LO(23) /* lwzx */, +/* i u i s */ HI(36) /* stw */, +/* i u i l */ HI(32) /* lwz */, +/* i u x s */ HI(31) | LO(151) /* stwx */, +/* i u x l */ HI(31) | LO(23) /* lwzx */, + +/* i s i s */ HI(36) /* stw */, +/* i s i l */ ARCH_32_64(HI(32) /* lwz */, HI(58) | INT_ALIGNED | 0x2 /* lwa */), +/* i s x s */ HI(31) | LO(151) /* stwx */, +/* i s x l */ ARCH_32_64(HI(31) | LO(23) /* lwzx */, HI(31) | LO(341) /* lwax */), + +/* -------- Floating point -------- */ -/* u i w i s */ HI(37) /* stwu */, -/* u i w i l */ HI(33) /* lwzu */, -/* u i w x s */ HI(31) | LO(183) /* stwux */, -/* u i w x l */ HI(31) | LO(55) /* lwzux */, +/* d i s */ HI(54) /* stfd */, +/* d i l */ HI(50) /* lfd */, +/* d x s */ HI(31) | LO(727) /* stfdx */, +/* d x l */ HI(31) | LO(599) /* lfdx */, -/* -------- Signed -------- */ +/* s i s */ HI(52) /* stfs */, +/* s i l */ HI(48) /* lfs */, +/* s x s */ HI(31) | LO(663) /* stfsx */, +/* s x l */ HI(31) | LO(535) /* lfsx */, +}; + +static const sljit_ins updated_data_transfer_insts[64] = { + +/* -------- Integer -------- */ /* Word. */ -/* s w n i s */ ARCH_32_64(HI(36) /* stw */, HI(62) | ADDR_MODE2 | 0x0 /* std */), -/* s w n i l */ ARCH_32_64(HI(32) /* lwz */, HI(58) | ADDR_MODE2 | 0x0 /* ld */), -/* s w n x s */ ARCH_32_64(HI(31) | LO(151) /* stwx */, HI(31) | LO(149) /* stdx */), -/* s w n x l */ ARCH_32_64(HI(31) | LO(23) /* lwzx */, HI(31) | LO(21) /* ldx */), +/* w u i s */ ARCH_32_64(HI(37) /* stwu */, HI(62) | INT_ALIGNED | 0x1 /* stdu */), +/* w u i l */ ARCH_32_64(HI(33) /* lwzu */, HI(58) | INT_ALIGNED | 0x1 /* ldu */), +/* w u x s */ ARCH_32_64(HI(31) | LO(183) /* stwux */, HI(31) | LO(181) /* stdux */), +/* w u x l */ ARCH_32_64(HI(31) | LO(55) /* lwzux */, HI(31) | LO(53) /* ldux */), -/* s w w i s */ ARCH_32_64(HI(37) /* stwu */, HI(62) | ADDR_MODE2 | 0x1 /* stdu */), -/* s w w i l */ ARCH_32_64(HI(33) /* lwzu */, HI(58) | ADDR_MODE2 | 0x1 /* ldu */), -/* s w w x s */ ARCH_32_64(HI(31) | LO(183) /* stwux */, HI(31) | LO(181) /* stdux */), -/* s w w x l */ ARCH_32_64(HI(31) | LO(55) /* lwzux */, HI(31) | LO(53) /* ldux */), +/* w s i s */ ARCH_32_64(HI(37) /* stwu */, HI(62) | INT_ALIGNED | 0x1 /* stdu */), +/* w s i l */ ARCH_32_64(HI(33) /* lwzu */, HI(58) | INT_ALIGNED | 0x1 /* ldu */), +/* w s x s */ ARCH_32_64(HI(31) | LO(183) /* stwux */, HI(31) | LO(181) /* stdux */), +/* w s x l */ ARCH_32_64(HI(31) | LO(55) /* lwzux */, HI(31) | LO(53) /* ldux */), /* Byte. */ -/* s b n i s */ HI(38) /* stb */, -/* s b n i l */ HI(34) /* lbz */ /* EXTS_REQ */, -/* s b n x s */ HI(31) | LO(215) /* stbx */, -/* s b n x l */ HI(31) | LO(87) /* lbzx */ /* EXTS_REQ */, +/* b u i s */ HI(39) /* stbu */, +/* b u i l */ HI(35) /* lbzu */, +/* b u x s */ HI(31) | LO(247) /* stbux */, +/* b u x l */ HI(31) | LO(119) /* lbzux */, -/* s b w i s */ HI(39) /* stbu */, -/* s b w i l */ HI(35) /* lbzu */ /* EXTS_REQ */, -/* s b w x s */ HI(31) | LO(247) /* stbux */, -/* s b w x l */ HI(31) | LO(119) /* lbzux */ /* EXTS_REQ */, +/* b s i s */ HI(39) /* stbu */, +/* b s i l */ 0 /* no such instruction */, +/* b s x s */ HI(31) | LO(247) /* stbux */, +/* b s x l */ 0 /* no such instruction */, /* Half. */ -/* s h n i s */ HI(44) /* sth */, -/* s h n i l */ HI(42) /* lha */, -/* s h n x s */ HI(31) | LO(407) /* sthx */, -/* s h n x l */ HI(31) | LO(343) /* lhax */, +/* h u i s */ HI(45) /* sthu */, +/* h u i l */ HI(41) /* lhzu */, +/* h u x s */ HI(31) | LO(439) /* sthux */, +/* h u x l */ HI(31) | LO(311) /* lhzux */, -/* s h w i s */ HI(45) /* sthu */, -/* s h w i l */ HI(43) /* lhau */, -/* s h w x s */ HI(31) | LO(439) /* sthux */, -/* s h w x l */ HI(31) | LO(375) /* lhaux */, +/* h s i s */ HI(45) /* sthu */, +/* h s i l */ HI(43) /* lhau */, +/* h s x s */ HI(31) | LO(439) /* sthux */, +/* h s x l */ HI(31) | LO(375) /* lhaux */, /* Int. */ -/* s i n i s */ HI(36) /* stw */, -/* s i n i l */ ARCH_32_64(HI(32) /* lwz */, HI(58) | ADDR_MODE2 | 0x2 /* lwa */), -/* s i n x s */ HI(31) | LO(151) /* stwx */, -/* s i n x l */ ARCH_32_64(HI(31) | LO(23) /* lwzx */, HI(31) | LO(341) /* lwax */), - -/* s i w i s */ HI(37) /* stwu */, -/* s i w i l */ ARCH_32_64(HI(33) /* lwzu */, HI(58) | ADDR_MODE2 | UPDATE_REQ | 0x2 /* lwa */), -/* s i w x s */ HI(31) | LO(183) /* stwux */, -/* s i w x l */ ARCH_32_64(HI(31) | LO(55) /* lwzux */, HI(31) | LO(373) /* lwaux */), +/* i u i s */ HI(37) /* stwu */, +/* i u i l */ HI(33) /* lwzu */, +/* i u x s */ HI(31) | LO(183) /* stwux */, +/* i u x l */ HI(31) | LO(55) /* lwzux */, -/* -------- Double -------- */ +/* i s i s */ HI(37) /* stwu */, +/* i s i l */ ARCH_32_64(HI(33) /* lwzu */, 0 /* no such instruction */), +/* i s x s */ HI(31) | LO(183) /* stwux */, +/* i s x l */ ARCH_32_64(HI(31) | LO(55) /* lwzux */, HI(31) | LO(373) /* lwaux */), -/* d n i s */ HI(54) /* stfd */, -/* d n i l */ HI(50) /* lfd */, -/* d n x s */ HI(31) | LO(727) /* stfdx */, -/* d n x l */ HI(31) | LO(599) /* lfdx */, +/* -------- Floating point -------- */ -/* s n i s */ HI(52) /* stfs */, -/* s n i l */ HI(48) /* lfs */, -/* s n x s */ HI(31) | LO(663) /* stfsx */, -/* s n x l */ HI(31) | LO(535) /* lfsx */, +/* d i s */ HI(55) /* stfdu */, +/* d i l */ HI(51) /* lfdu */, +/* d x s */ HI(31) | LO(759) /* stfdux */, +/* d x l */ HI(31) | LO(631) /* lfdux */, +/* s i s */ HI(53) /* stfsu */, +/* s i l */ HI(49) /* lfsu */, +/* s x s */ HI(31) | LO(695) /* stfsux */, +/* s x l */ HI(31) | LO(567) /* lfsux */, }; #undef ARCH_32_64 /* Simple cases, (no caching is required). */ -static sljit_si getput_arg_fast(struct sljit_compiler *compiler, sljit_si inp_flags, sljit_si reg, sljit_si arg, sljit_sw argw) +static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 inp_flags, sljit_s32 reg, + sljit_s32 arg, sljit_sw argw, sljit_s32 tmp_reg) { sljit_ins inst; -#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) - sljit_si tmp_reg; -#endif + sljit_s32 offs_reg; + sljit_sw high_short; + /* Should work when (arg & REG_MASK) == 0. */ + SLJIT_ASSERT(A(0) == 0); SLJIT_ASSERT(arg & SLJIT_MEM); - if (!(arg & 0xf)) { -#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) - if (argw <= SIMM_MAX && argw >= SIMM_MIN) { - if (inp_flags & ARG_TEST) - return 1; - - inst = data_transfer_insts[(inp_flags & ~WRITE_BACK) & MEM_MASK]; - SLJIT_ASSERT(!(inst & (ADDR_MODE2 | UPDATE_REQ))); - push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | IMM(argw)); - return -1; - } -#else - inst = data_transfer_insts[(inp_flags & ~WRITE_BACK) & MEM_MASK]; - if (argw <= SIMM_MAX && argw >= SIMM_MIN && - (!(inst & ADDR_MODE2) || (argw & 0x3) == 0)) { - if (inp_flags & ARG_TEST) - return 1; - - push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | IMM(argw)); - return -1; - } -#endif - return 0; - } - if (!(arg & 0xf0)) { + if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) { + argw &= 0x3; + offs_reg = OFFS_REG(arg); + + if (argw != 0) { #if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) - if (argw <= SIMM_MAX && argw >= SIMM_MIN) { - if (inp_flags & ARG_TEST) - return 1; - - inst = data_transfer_insts[inp_flags & MEM_MASK]; - SLJIT_ASSERT(!(inst & (ADDR_MODE2 | UPDATE_REQ))); - push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg & 0xf) | IMM(argw)); - return -1; - } + FAIL_IF(push_inst(compiler, RLWINM | S(OFFS_REG(arg)) | A(tmp_reg) | (argw << 11) | ((31 - argw) << 1))); #else - inst = data_transfer_insts[inp_flags & MEM_MASK]; - if (argw <= SIMM_MAX && argw >= SIMM_MIN && (!(inst & ADDR_MODE2) || (argw & 0x3) == 0)) { - if (inp_flags & ARG_TEST) - return 1; - - if ((inp_flags & WRITE_BACK) && (inst & UPDATE_REQ)) { - tmp_reg = (inp_flags & LOAD_DATA) ? (arg & 0xf) : TMP_REG3; - if (push_inst(compiler, ADDI | D(tmp_reg) | A(arg & 0xf) | IMM(argw))) - return -1; - arg = tmp_reg | SLJIT_MEM; - argw = 0; - } - push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg & 0xf) | IMM(argw)); - return -1; - } + FAIL_IF(push_inst(compiler, RLDI(tmp_reg, OFFS_REG(arg), argw, 63 - argw, 1))); #endif - } - else if (!(argw & 0x3)) { - if (inp_flags & ARG_TEST) - return 1; - inst = data_transfer_insts[(inp_flags | INDEXED) & MEM_MASK]; - SLJIT_ASSERT(!(inst & (ADDR_MODE2 | UPDATE_REQ))); - push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg & 0xf) | B((arg >> 4) & 0xf)); - return -1; - } - return 0; -} - -/* See getput_arg below. - Note: can_cache is called only for binary operators. Those operator always - uses word arguments without write back. */ -static sljit_si can_cache(sljit_si arg, sljit_sw argw, sljit_si next_arg, sljit_sw next_argw) -{ - SLJIT_ASSERT((arg & SLJIT_MEM) && (next_arg & SLJIT_MEM)); + offs_reg = tmp_reg; + } - if (!(arg & 0xf)) - return (next_arg & SLJIT_MEM) && ((sljit_uw)argw - (sljit_uw)next_argw <= SIMM_MAX || (sljit_uw)next_argw - (sljit_uw)argw <= SIMM_MAX); + inst = data_transfer_insts[(inp_flags | INDEXED) & MEM_MASK]; - if (arg & 0xf0) - return ((arg & 0xf0) == (next_arg & 0xf0) && (argw & 0x3) == (next_argw & 0x3)); +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) + SLJIT_ASSERT(!(inst & INT_ALIGNED)); +#endif - if (argw <= SIMM_MAX && argw >= SIMM_MIN) { - if (arg == next_arg && (next_argw >= SIMM_MAX && next_argw <= SIMM_MIN)) - return 1; + return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg & REG_MASK) | B(offs_reg)); } - if (arg == next_arg && ((sljit_uw)argw - (sljit_uw)next_argw <= SIMM_MAX || (sljit_uw)next_argw - (sljit_uw)argw <= SIMM_MAX)) - return 1; - - return 0; -} + inst = data_transfer_insts[inp_flags & MEM_MASK]; + arg &= REG_MASK; #if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) -#define ADJUST_CACHED_IMM(imm) \ - if ((inst & ADDR_MODE2) && (imm & 0x3)) { \ - /* Adjust cached value. Fortunately this is really a rare case */ \ - compiler->cache_argw += imm & 0x3; \ - FAIL_IF(push_inst(compiler, ADDI | D(TMP_REG3) | A(TMP_REG3) | (imm & 0x3))); \ - imm &= ~0x3; \ + if ((inst & INT_ALIGNED) && (argw & 0x3) != 0) { + FAIL_IF(load_immediate(compiler, tmp_reg, argw)); + + inst = data_transfer_insts[(inp_flags | INDEXED) & MEM_MASK]; + return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg) | B(tmp_reg)); } -#else -#define ADJUST_CACHED_IMM(imm) #endif -/* Emit the necessary instructions. See can_cache above. */ -static sljit_si getput_arg(struct sljit_compiler *compiler, sljit_si inp_flags, sljit_si reg, sljit_si arg, sljit_sw argw, sljit_si next_arg, sljit_sw next_argw) -{ - sljit_si tmp_r; - sljit_ins inst; - - SLJIT_ASSERT(arg & SLJIT_MEM); - - tmp_r = ((inp_flags & LOAD_DATA) && ((inp_flags) & MEM_MASK) <= GPR_REG) ? reg : TMP_REG1; - /* Special case for "mov reg, [reg, ... ]". */ - if ((arg & 0xf) == tmp_r) - tmp_r = TMP_REG1; - - if (!(arg & 0xf)) { - inst = data_transfer_insts[(inp_flags & ~WRITE_BACK) & MEM_MASK]; - if ((compiler->cache_arg & SLJIT_IMM) && (((sljit_uw)argw - (sljit_uw)compiler->cache_argw) <= SIMM_MAX || ((sljit_uw)compiler->cache_argw - (sljit_uw)argw) <= SIMM_MAX)) { - argw = argw - compiler->cache_argw; - ADJUST_CACHED_IMM(argw); - SLJIT_ASSERT(!(inst & UPDATE_REQ)); - return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(TMP_REG3) | IMM(argw)); - } - - if ((next_arg & SLJIT_MEM) && (argw - next_argw <= SIMM_MAX || next_argw - argw <= SIMM_MAX)) { - SLJIT_ASSERT(inp_flags & LOAD_DATA); - - compiler->cache_arg = SLJIT_IMM; - compiler->cache_argw = argw; - tmp_r = TMP_REG3; - } + if (argw <= SIMM_MAX && argw >= SIMM_MIN) + return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg) | IMM(argw)); - FAIL_IF(load_immediate(compiler, tmp_r, argw)); - return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(tmp_r)); - } +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) + if (argw <= 0x7fff7fffl && argw >= -0x80000000l) { +#endif - if (SLJIT_UNLIKELY(arg & 0xf0)) { - argw &= 0x3; - /* Otherwise getput_arg_fast would capture it. */ - SLJIT_ASSERT(argw); + high_short = (sljit_s32)(argw + ((argw & 0x8000) << 1)) & ~0xffff; - if ((SLJIT_MEM | (arg & 0xf0)) == compiler->cache_arg && argw == compiler->cache_argw) - tmp_r = TMP_REG3; - else { - if ((arg & 0xf0) == (next_arg & 0xf0) && argw == (next_argw & 0x3)) { - compiler->cache_arg = SLJIT_MEM | (arg & 0xf0); - compiler->cache_argw = argw; - tmp_r = TMP_REG3; - } -#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) - FAIL_IF(push_inst(compiler, RLWINM | S((arg >> 4) & 0xf) | A(tmp_r) | (argw << 11) | ((31 - argw) << 1))); +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) + SLJIT_ASSERT(high_short && high_short <= 0x7fffffffl && high_short >= -0x80000000l); #else - FAIL_IF(push_inst(compiler, RLDI(tmp_r, (arg >> 4) & 0xf, argw, 63 - argw, 1))); + SLJIT_ASSERT(high_short); #endif - } - inst = data_transfer_insts[(inp_flags | INDEXED) & MEM_MASK]; - SLJIT_ASSERT(!(inst & (ADDR_MODE2 | UPDATE_REQ))); - return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg & 0xf) | B(tmp_r)); - } - - inst = data_transfer_insts[inp_flags & MEM_MASK]; - - if (compiler->cache_arg == arg && ((sljit_uw)argw - (sljit_uw)compiler->cache_argw <= SIMM_MAX || (sljit_uw)compiler->cache_argw - (sljit_uw)argw <= SIMM_MAX)) { - SLJIT_ASSERT(!(inp_flags & WRITE_BACK)); - argw = argw - compiler->cache_argw; - ADJUST_CACHED_IMM(argw); - return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(TMP_REG3) | IMM(argw)); - } - - if ((compiler->cache_arg & SLJIT_IMM) && compiler->cache_argw == argw) { - inst = data_transfer_insts[(inp_flags | INDEXED) & MEM_MASK]; - SLJIT_ASSERT(!(inst & (ADDR_MODE2 | UPDATE_REQ))); - return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg & 0xf) | B(TMP_REG3)); - } - if (argw == next_argw && (next_arg & SLJIT_MEM)) { - SLJIT_ASSERT(inp_flags & LOAD_DATA); - FAIL_IF(load_immediate(compiler, TMP_REG3, argw)); + FAIL_IF(push_inst(compiler, ADDIS | D(tmp_reg) | A(arg) | IMM(high_short >> 16))); + return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(tmp_reg) | IMM(argw)); - compiler->cache_arg = SLJIT_IMM; - compiler->cache_argw = argw; - - inst = data_transfer_insts[(inp_flags | INDEXED) & MEM_MASK]; - SLJIT_ASSERT(!(inst & (ADDR_MODE2 | UPDATE_REQ))); - return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg & 0xf) | B(TMP_REG3)); +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) } - if (arg == next_arg && !(inp_flags & WRITE_BACK) && ((sljit_uw)argw - (sljit_uw)next_argw <= SIMM_MAX || (sljit_uw)next_argw - (sljit_uw)argw <= SIMM_MAX)) { - SLJIT_ASSERT(inp_flags & LOAD_DATA); - FAIL_IF(load_immediate(compiler, TMP_REG3, argw)); - FAIL_IF(push_inst(compiler, ADD | D(TMP_REG3) | A(TMP_REG3) | B(arg & 0xf))); - - compiler->cache_arg = arg; - compiler->cache_argw = argw; + /* The rest is PPC-64 only. */ - return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(TMP_REG3)); - } + FAIL_IF(load_immediate(compiler, tmp_reg, argw)); - /* Get the indexed version instead of the normal one. */ inst = data_transfer_insts[(inp_flags | INDEXED) & MEM_MASK]; - SLJIT_ASSERT(!(inst & (ADDR_MODE2 | UPDATE_REQ))); - FAIL_IF(load_immediate(compiler, tmp_r, argw)); - return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg & 0xf) | B(tmp_r)); -} - -static SLJIT_INLINE sljit_si emit_op_mem2(struct sljit_compiler *compiler, sljit_si flags, sljit_si reg, sljit_si arg1, sljit_sw arg1w, sljit_si arg2, sljit_sw arg2w) -{ - if (getput_arg_fast(compiler, flags, reg, arg1, arg1w)) - return compiler->error; - return getput_arg(compiler, flags, reg, arg1, arg1w, arg2, arg2w); + return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg) | B(tmp_reg)); +#endif } -static sljit_si emit_op(struct sljit_compiler *compiler, sljit_si op, sljit_si input_flags, - sljit_si dst, sljit_sw dstw, - sljit_si src1, sljit_sw src1w, - sljit_si src2, sljit_sw src2w) +static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 input_flags, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) { /* arg1 goes to TMP_REG1 or src reg arg2 goes to TMP_REG2, imm or src reg - TMP_REG3 can be used for caching - result goes to TMP_REG2, so put result can use TMP_REG1 and TMP_REG3. */ - sljit_si dst_r; - sljit_si src1_r; - sljit_si src2_r; - sljit_si sugg_src2_r = TMP_REG2; - sljit_si flags = input_flags & (ALT_FORM1 | ALT_FORM2 | ALT_FORM3 | ALT_FORM4 | ALT_FORM5 | ALT_FORM6 | ALT_SIGN_EXT | ALT_SET_FLAGS); - - if (!(input_flags & ALT_KEEP_CACHE)) { - compiler->cache_arg = 0; - compiler->cache_argw = 0; - } + result goes to TMP_REG2, so put result can use TMP_REG1. */ + sljit_s32 dst_r = TMP_REG2; + sljit_s32 src1_r; + sljit_s32 src2_r; + sljit_s32 sugg_src2_r = TMP_REG2; + sljit_s32 flags = input_flags & (ALT_FORM1 | ALT_FORM2 | ALT_FORM3 | ALT_FORM4 | ALT_FORM5 | ALT_SIGN_EXT | ALT_SET_FLAGS); /* Destination check. */ - if (SLJIT_UNLIKELY(dst == SLJIT_UNUSED)) { - if (op >= SLJIT_MOV && op <= SLJIT_MOVU_SI && !(src2 & SLJIT_MEM)) - return SLJIT_SUCCESS; - dst_r = TMP_REG2; - } - else if (dst <= ZERO_REG) { + if (SLOW_IS_REG(dst)) { dst_r = dst; flags |= REG_DEST; - if (op >= SLJIT_MOV && op <= SLJIT_MOVU_SI) + + if (op >= SLJIT_MOV && op <= SLJIT_MOV_P) sugg_src2_r = dst_r; } - else { - SLJIT_ASSERT(dst & SLJIT_MEM); - if (getput_arg_fast(compiler, input_flags | ARG_TEST, TMP_REG2, dst, dstw)) { - flags |= FAST_DEST; - dst_r = TMP_REG2; - } - else { - flags |= SLOW_DEST; - dst_r = 0; - } - } /* Source 1. */ - if (src1 <= ZERO_REG) { + if (FAST_IS_REG(src1)) { src1_r = src1; flags |= REG1_SOURCE; } @@ -1017,239 +1086,201 @@ static sljit_si emit_op(struct sljit_compiler *compiler, sljit_si op, sljit_si i FAIL_IF(load_immediate(compiler, TMP_REG1, src1w)); src1_r = TMP_REG1; } - else if (getput_arg_fast(compiler, input_flags | LOAD_DATA, TMP_REG1, src1, src1w)) { - FAIL_IF(compiler->error); + else { + FAIL_IF(emit_op_mem(compiler, input_flags | LOAD_DATA, TMP_REG1, src1, src1w, TMP_REG1)); src1_r = TMP_REG1; } - else - src1_r = 0; /* Source 2. */ - if (src2 <= ZERO_REG) { + if (FAST_IS_REG(src2)) { src2_r = src2; flags |= REG2_SOURCE; - if (!(flags & REG_DEST) && op >= SLJIT_MOV && op <= SLJIT_MOVU_SI) + + if (!(flags & REG_DEST) && op >= SLJIT_MOV && op <= SLJIT_MOV_P) dst_r = src2_r; } else if (src2 & SLJIT_IMM) { FAIL_IF(load_immediate(compiler, sugg_src2_r, src2w)); src2_r = sugg_src2_r; } - else if (getput_arg_fast(compiler, input_flags | LOAD_DATA, sugg_src2_r, src2, src2w)) { - FAIL_IF(compiler->error); - src2_r = sugg_src2_r; - } - else - src2_r = 0; - - /* src1_r, src2_r and dst_r can be zero (=unprocessed). - All arguments are complex addressing modes, and it is a binary operator. */ - if (src1_r == 0 && src2_r == 0 && dst_r == 0) { - if (!can_cache(src1, src1w, src2, src2w) && can_cache(src1, src1w, dst, dstw)) { - FAIL_IF(getput_arg(compiler, input_flags | LOAD_DATA, TMP_REG2, src2, src2w, src1, src1w)); - FAIL_IF(getput_arg(compiler, input_flags | LOAD_DATA, TMP_REG1, src1, src1w, dst, dstw)); - } - else { - FAIL_IF(getput_arg(compiler, input_flags | LOAD_DATA, TMP_REG1, src1, src1w, src2, src2w)); - FAIL_IF(getput_arg(compiler, input_flags | LOAD_DATA, TMP_REG2, src2, src2w, dst, dstw)); - } - src1_r = TMP_REG1; - src2_r = TMP_REG2; - } - else if (src1_r == 0 && src2_r == 0) { - FAIL_IF(getput_arg(compiler, input_flags | LOAD_DATA, TMP_REG1, src1, src1w, src2, src2w)); - src1_r = TMP_REG1; - } - else if (src1_r == 0 && dst_r == 0) { - FAIL_IF(getput_arg(compiler, input_flags | LOAD_DATA, TMP_REG1, src1, src1w, dst, dstw)); - src1_r = TMP_REG1; - } - else if (src2_r == 0 && dst_r == 0) { - FAIL_IF(getput_arg(compiler, input_flags | LOAD_DATA, sugg_src2_r, src2, src2w, dst, dstw)); - src2_r = sugg_src2_r; - } - - if (dst_r == 0) - dst_r = TMP_REG2; - - if (src1_r == 0) { - FAIL_IF(getput_arg(compiler, input_flags | LOAD_DATA, TMP_REG1, src1, src1w, 0, 0)); - src1_r = TMP_REG1; - } - - if (src2_r == 0) { - FAIL_IF(getput_arg(compiler, input_flags | LOAD_DATA, sugg_src2_r, src2, src2w, 0, 0)); + else { + FAIL_IF(emit_op_mem(compiler, input_flags | LOAD_DATA, sugg_src2_r, src2, src2w, TMP_REG2)); src2_r = sugg_src2_r; } FAIL_IF(emit_single_op(compiler, op, flags, dst_r, src1_r, src2_r)); - if (flags & (FAST_DEST | SLOW_DEST)) { - if (flags & FAST_DEST) - FAIL_IF(getput_arg_fast(compiler, input_flags, dst_r, dst, dstw)); - else - FAIL_IF(getput_arg(compiler, input_flags, dst_r, dst, dstw, 0, 0)); - } - return SLJIT_SUCCESS; + if (!(dst & SLJIT_MEM)) + return SLJIT_SUCCESS; + + return emit_op_mem(compiler, input_flags, dst_r, dst, dstw, TMP_REG1); } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op0(struct sljit_compiler *compiler, sljit_si op) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compiler, sljit_s32 op) { +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) + sljit_s32 int_op = op & SLJIT_I32_OP; +#endif + CHECK_ERROR(); - check_sljit_emit_op0(compiler, op); + CHECK(check_sljit_emit_op0(compiler, op)); - switch (GET_OPCODE(op)) { + op = GET_OPCODE(op); + switch (op) { case SLJIT_BREAKPOINT: case SLJIT_NOP: return push_inst(compiler, NOP); - break; - case SLJIT_UMUL: - case SLJIT_SMUL: - FAIL_IF(push_inst(compiler, OR | S(SLJIT_SCRATCH_REG1) | A(TMP_REG1) | B(SLJIT_SCRATCH_REG1))); + case SLJIT_LMUL_UW: + case SLJIT_LMUL_SW: + FAIL_IF(push_inst(compiler, OR | S(SLJIT_R0) | A(TMP_REG1) | B(SLJIT_R0))); #if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) - FAIL_IF(push_inst(compiler, MULLD | D(SLJIT_SCRATCH_REG1) | A(TMP_REG1) | B(SLJIT_SCRATCH_REG2))); - return push_inst(compiler, (GET_OPCODE(op) == SLJIT_UMUL ? MULHDU : MULHD) | D(SLJIT_SCRATCH_REG2) | A(TMP_REG1) | B(SLJIT_SCRATCH_REG2)); + FAIL_IF(push_inst(compiler, MULLD | D(SLJIT_R0) | A(TMP_REG1) | B(SLJIT_R1))); + return push_inst(compiler, (op == SLJIT_LMUL_UW ? MULHDU : MULHD) | D(SLJIT_R1) | A(TMP_REG1) | B(SLJIT_R1)); #else - FAIL_IF(push_inst(compiler, MULLW | D(SLJIT_SCRATCH_REG1) | A(TMP_REG1) | B(SLJIT_SCRATCH_REG2))); - return push_inst(compiler, (GET_OPCODE(op) == SLJIT_UMUL ? MULHWU : MULHW) | D(SLJIT_SCRATCH_REG2) | A(TMP_REG1) | B(SLJIT_SCRATCH_REG2)); + FAIL_IF(push_inst(compiler, MULLW | D(SLJIT_R0) | A(TMP_REG1) | B(SLJIT_R1))); + return push_inst(compiler, (op == SLJIT_LMUL_UW ? MULHWU : MULHW) | D(SLJIT_R1) | A(TMP_REG1) | B(SLJIT_R1)); #endif - case SLJIT_UDIV: - case SLJIT_SDIV: - FAIL_IF(push_inst(compiler, OR | S(SLJIT_SCRATCH_REG1) | A(TMP_REG1) | B(SLJIT_SCRATCH_REG1))); + case SLJIT_DIVMOD_UW: + case SLJIT_DIVMOD_SW: + FAIL_IF(push_inst(compiler, OR | S(SLJIT_R0) | A(TMP_REG1) | B(SLJIT_R0))); #if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) - if (op & SLJIT_INT_OP) { - FAIL_IF(push_inst(compiler, (GET_OPCODE(op) == SLJIT_UDIV ? DIVWU : DIVW) | D(SLJIT_SCRATCH_REG1) | A(TMP_REG1) | B(SLJIT_SCRATCH_REG2))); - FAIL_IF(push_inst(compiler, MULLW | D(SLJIT_SCRATCH_REG2) | A(SLJIT_SCRATCH_REG1) | B(SLJIT_SCRATCH_REG2))); - return push_inst(compiler, SUBF | D(SLJIT_SCRATCH_REG2) | A(SLJIT_SCRATCH_REG2) | B(TMP_REG1)); - } - FAIL_IF(push_inst(compiler, (GET_OPCODE(op) == SLJIT_UDIV ? DIVDU : DIVD) | D(SLJIT_SCRATCH_REG1) | A(TMP_REG1) | B(SLJIT_SCRATCH_REG2))); - FAIL_IF(push_inst(compiler, MULLD | D(SLJIT_SCRATCH_REG2) | A(SLJIT_SCRATCH_REG1) | B(SLJIT_SCRATCH_REG2))); - return push_inst(compiler, SUBF | D(SLJIT_SCRATCH_REG2) | A(SLJIT_SCRATCH_REG2) | B(TMP_REG1)); + FAIL_IF(push_inst(compiler, (int_op ? (op == SLJIT_DIVMOD_UW ? DIVWU : DIVW) : (op == SLJIT_DIVMOD_UW ? DIVDU : DIVD)) | D(SLJIT_R0) | A(SLJIT_R0) | B(SLJIT_R1))); + FAIL_IF(push_inst(compiler, (int_op ? MULLW : MULLD) | D(SLJIT_R1) | A(SLJIT_R0) | B(SLJIT_R1))); +#else + FAIL_IF(push_inst(compiler, (op == SLJIT_DIVMOD_UW ? DIVWU : DIVW) | D(SLJIT_R0) | A(SLJIT_R0) | B(SLJIT_R1))); + FAIL_IF(push_inst(compiler, MULLW | D(SLJIT_R1) | A(SLJIT_R0) | B(SLJIT_R1))); +#endif + return push_inst(compiler, SUBF | D(SLJIT_R1) | A(SLJIT_R1) | B(TMP_REG1)); + case SLJIT_DIV_UW: + case SLJIT_DIV_SW: +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) + return push_inst(compiler, (int_op ? (op == SLJIT_DIV_UW ? DIVWU : DIVW) : (op == SLJIT_DIV_UW ? DIVDU : DIVD)) | D(SLJIT_R0) | A(SLJIT_R0) | B(SLJIT_R1)); #else - FAIL_IF(push_inst(compiler, (GET_OPCODE(op) == SLJIT_UDIV ? DIVWU : DIVW) | D(SLJIT_SCRATCH_REG1) | A(TMP_REG1) | B(SLJIT_SCRATCH_REG2))); - FAIL_IF(push_inst(compiler, MULLW | D(SLJIT_SCRATCH_REG2) | A(SLJIT_SCRATCH_REG1) | B(SLJIT_SCRATCH_REG2))); - return push_inst(compiler, SUBF | D(SLJIT_SCRATCH_REG2) | A(SLJIT_SCRATCH_REG2) | B(TMP_REG1)); + return push_inst(compiler, (op == SLJIT_DIV_UW ? DIVWU : DIVW) | D(SLJIT_R0) | A(SLJIT_R0) | B(SLJIT_R1)); #endif } return SLJIT_SUCCESS; } +static sljit_s32 emit_prefetch(struct sljit_compiler *compiler, + sljit_s32 src, sljit_sw srcw) +{ + if (!(src & OFFS_REG_MASK)) { + if (srcw == 0 && (src & REG_MASK) != SLJIT_UNUSED) + return push_inst(compiler, DCBT | A(0) | B(src & REG_MASK)); + + FAIL_IF(load_immediate(compiler, TMP_REG1, srcw)); + /* Works with SLJIT_MEM0() case as well. */ + return push_inst(compiler, DCBT | A(src & REG_MASK) | B(TMP_REG1)); + } + + srcw &= 0x3; + + if (srcw == 0) + return push_inst(compiler, DCBT | A(src & REG_MASK) | B(OFFS_REG(src))); + +#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) + FAIL_IF(push_inst(compiler, RLWINM | S(OFFS_REG(src)) | A(TMP_REG1) | (srcw << 11) | ((31 - srcw) << 1))); +#else + FAIL_IF(push_inst(compiler, RLDI(TMP_REG1, OFFS_REG(src), srcw, 63 - srcw, 1))); +#endif + return push_inst(compiler, DCBT | A(src & REG_MASK) | B(TMP_REG1)); +} + #define EMIT_MOV(type, type_flags, type_cast) \ emit_op(compiler, (src & SLJIT_IMM) ? SLJIT_MOV : type, flags | (type_flags), dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? type_cast srcw : srcw) -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op1(struct sljit_compiler *compiler, sljit_si op, - sljit_si dst, sljit_sw dstw, - sljit_si src, sljit_sw srcw) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) { - sljit_si flags = GET_FLAGS(op) ? ALT_SET_FLAGS : 0; - sljit_si op_flags = GET_ALL_FLAGS(op); + sljit_s32 flags = HAS_FLAGS(op) ? ALT_SET_FLAGS : 0; + sljit_s32 op_flags = GET_ALL_FLAGS(op); CHECK_ERROR(); - check_sljit_emit_op1(compiler, op, dst, dstw, src, srcw); + CHECK(check_sljit_emit_op1(compiler, op, dst, dstw, src, srcw)); ADJUST_LOCAL_OFFSET(dst, dstw); ADJUST_LOCAL_OFFSET(src, srcw); + if (dst == SLJIT_UNUSED && !HAS_FLAGS(op)) { + if (op <= SLJIT_MOV_P && (src & SLJIT_MEM)) + return emit_prefetch(compiler, src, srcw); + + return SLJIT_SUCCESS; + } + op = GET_OPCODE(op); if ((src & SLJIT_IMM) && srcw == 0) - src = ZERO_REG; + src = TMP_ZERO; - if (op_flags & SLJIT_SET_O) - FAIL_IF(push_inst(compiler, MTXER | S(ZERO_REG))); + if (GET_FLAG_TYPE(op_flags) == SLJIT_OVERFLOW) + FAIL_IF(push_inst(compiler, MTXER | S(TMP_ZERO))); + + if (op < SLJIT_NOT && FAST_IS_REG(src) && src == dst) { + if (!TYPE_CAST_NEEDED(op)) + return SLJIT_SUCCESS; + } - if (op_flags & SLJIT_INT_OP) { - if (op >= SLJIT_MOV && op <= SLJIT_MOVU_P) { - if (src <= ZERO_REG && src == dst) { - if (!TYPE_CAST_NEEDED(op)) - return SLJIT_SUCCESS; - } #if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) - if (op == SLJIT_MOV_SI && (src & SLJIT_MEM)) - op = SLJIT_MOV_UI; - if (op == SLJIT_MOVU_SI && (src & SLJIT_MEM)) - op = SLJIT_MOVU_UI; - if (op == SLJIT_MOV_UI && (src & SLJIT_IMM)) - op = SLJIT_MOV_SI; - if (op == SLJIT_MOVU_UI && (src & SLJIT_IMM)) - op = SLJIT_MOVU_SI; -#endif + if (op_flags & SLJIT_I32_OP) { + if (op < SLJIT_NOT) { + if (src & SLJIT_MEM) { + if (op == SLJIT_MOV_S32) + op = SLJIT_MOV_U32; + } + else if (src & SLJIT_IMM) { + if (op == SLJIT_MOV_U32) + op = SLJIT_MOV_S32; + } } -#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) else { /* Most operations expect sign extended arguments. */ flags |= INT_DATA | SIGNED_DATA; - if (src & SLJIT_IMM) - srcw = (sljit_si)srcw; + if (HAS_FLAGS(op_flags)) + flags |= ALT_SIGN_EXT; } -#endif } +#endif switch (op) { case SLJIT_MOV: case SLJIT_MOV_P: #if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) - case SLJIT_MOV_UI: - case SLJIT_MOV_SI: + case SLJIT_MOV_U32: + case SLJIT_MOV_S32: #endif return emit_op(compiler, SLJIT_MOV, flags | WORD_DATA, dst, dstw, TMP_REG1, 0, src, srcw); #if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) - case SLJIT_MOV_UI: - return EMIT_MOV(SLJIT_MOV_UI, INT_DATA, (sljit_ui)); + case SLJIT_MOV_U32: + return EMIT_MOV(SLJIT_MOV_U32, INT_DATA, (sljit_u32)); - case SLJIT_MOV_SI: - return EMIT_MOV(SLJIT_MOV_SI, INT_DATA | SIGNED_DATA, (sljit_si)); + case SLJIT_MOV_S32: + return EMIT_MOV(SLJIT_MOV_S32, INT_DATA | SIGNED_DATA, (sljit_s32)); #endif - case SLJIT_MOV_UB: - return EMIT_MOV(SLJIT_MOV_UB, BYTE_DATA, (sljit_ub)); - - case SLJIT_MOV_SB: - return EMIT_MOV(SLJIT_MOV_SB, BYTE_DATA | SIGNED_DATA, (sljit_sb)); + case SLJIT_MOV_U8: + return EMIT_MOV(SLJIT_MOV_U8, BYTE_DATA, (sljit_u8)); - case SLJIT_MOV_UH: - return EMIT_MOV(SLJIT_MOV_UH, HALF_DATA, (sljit_uh)); + case SLJIT_MOV_S8: + return EMIT_MOV(SLJIT_MOV_S8, BYTE_DATA | SIGNED_DATA, (sljit_s8)); - case SLJIT_MOV_SH: - return EMIT_MOV(SLJIT_MOV_SH, HALF_DATA | SIGNED_DATA, (sljit_sh)); + case SLJIT_MOV_U16: + return EMIT_MOV(SLJIT_MOV_U16, HALF_DATA, (sljit_u16)); - case SLJIT_MOVU: - case SLJIT_MOVU_P: -#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) - case SLJIT_MOVU_UI: - case SLJIT_MOVU_SI: -#endif - return emit_op(compiler, SLJIT_MOV, flags | WORD_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, srcw); - -#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) - case SLJIT_MOVU_UI: - return EMIT_MOV(SLJIT_MOV_UI, INT_DATA | WRITE_BACK, (sljit_ui)); - - case SLJIT_MOVU_SI: - return EMIT_MOV(SLJIT_MOV_SI, INT_DATA | SIGNED_DATA | WRITE_BACK, (sljit_si)); -#endif - - case SLJIT_MOVU_UB: - return EMIT_MOV(SLJIT_MOV_UB, BYTE_DATA | WRITE_BACK, (sljit_ub)); - - case SLJIT_MOVU_SB: - return EMIT_MOV(SLJIT_MOV_SB, BYTE_DATA | SIGNED_DATA | WRITE_BACK, (sljit_sb)); - - case SLJIT_MOVU_UH: - return EMIT_MOV(SLJIT_MOV_UH, HALF_DATA | WRITE_BACK, (sljit_uh)); - - case SLJIT_MOVU_SH: - return EMIT_MOV(SLJIT_MOV_SH, HALF_DATA | SIGNED_DATA | WRITE_BACK, (sljit_sh)); + case SLJIT_MOV_S16: + return EMIT_MOV(SLJIT_MOV_S16, HALF_DATA | SIGNED_DATA, (sljit_s16)); case SLJIT_NOT: return emit_op(compiler, SLJIT_NOT, flags, dst, dstw, TMP_REG1, 0, src, srcw); case SLJIT_NEG: - return emit_op(compiler, SLJIT_NEG, flags, dst, dstw, TMP_REG1, 0, src, srcw); + return emit_op(compiler, SLJIT_NEG, flags | (GET_FLAG_TYPE(op_flags) ? ALT_FORM1 : 0), dst, dstw, TMP_REG1, 0, src, srcw); case SLJIT_CLZ: #if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) - return emit_op(compiler, SLJIT_CLZ, flags | (!(op_flags & SLJIT_INT_OP) ? 0 : ALT_FORM1), dst, dstw, TMP_REG1, 0, src, srcw); + return emit_op(compiler, SLJIT_CLZ, flags | (!(op_flags & SLJIT_I32_OP) ? 0 : ALT_FORM1), dst, dstw, TMP_REG1, 0, src, srcw); #else return emit_op(compiler, SLJIT_CLZ, flags, dst, dstw, TMP_REG1, 0, src, srcw); #endif @@ -1268,7 +1299,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op1(struct sljit_compiler *compiler #if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) #define TEST_SH_IMM(src, srcw) \ - (((src) & SLJIT_IMM) && !((srcw) & 0xffff) && (srcw) <= SLJIT_W(0x7fffffff) && (srcw) >= SLJIT_W(-0x80000000)) + (((src) & SLJIT_IMM) && !((srcw) & 0xffff) && (srcw) <= 0x7fffffffl && (srcw) >= -0x80000000l) #else #define TEST_SH_IMM(src, srcw) \ (((src) & SLJIT_IMM) && !((srcw) & 0xffff)) @@ -1279,7 +1310,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op1(struct sljit_compiler *compiler #if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) #define TEST_ADD_IMM(src, srcw) \ - (((src) & SLJIT_IMM) && (srcw) <= SLJIT_W(0x7fff7fff) && (srcw) >= SLJIT_W(-0x80000000)) + (((src) & SLJIT_IMM) && (srcw) <= 0x7fff7fffl && (srcw) >= -0x80000000l) #else #define TEST_ADD_IMM(src, srcw) \ ((src) & SLJIT_IMM) @@ -1293,71 +1324,75 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op1(struct sljit_compiler *compiler ((src) & SLJIT_IMM) #endif -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op2(struct sljit_compiler *compiler, sljit_si op, - sljit_si dst, sljit_sw dstw, - sljit_si src1, sljit_sw src1w, - sljit_si src2, sljit_sw src2w) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) { - sljit_si flags = GET_FLAGS(op) ? ALT_SET_FLAGS : 0; + sljit_s32 flags = HAS_FLAGS(op) ? ALT_SET_FLAGS : 0; CHECK_ERROR(); - check_sljit_emit_op2(compiler, op, dst, dstw, src1, src1w, src2, src2w); + CHECK(check_sljit_emit_op2(compiler, op, dst, dstw, src1, src1w, src2, src2w)); ADJUST_LOCAL_OFFSET(dst, dstw); ADJUST_LOCAL_OFFSET(src1, src1w); ADJUST_LOCAL_OFFSET(src2, src2w); + if (dst == SLJIT_UNUSED && !HAS_FLAGS(op)) + return SLJIT_SUCCESS; + if ((src1 & SLJIT_IMM) && src1w == 0) - src1 = ZERO_REG; + src1 = TMP_ZERO; if ((src2 & SLJIT_IMM) && src2w == 0) - src2 = ZERO_REG; + src2 = TMP_ZERO; #if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) - if (op & SLJIT_INT_OP) { + if (op & SLJIT_I32_OP) { /* Most operations expect sign extended arguments. */ flags |= INT_DATA | SIGNED_DATA; if (src1 & SLJIT_IMM) - src1w = (sljit_si)(src1w); + src1w = (sljit_s32)(src1w); if (src2 & SLJIT_IMM) - src2w = (sljit_si)(src2w); - if (GET_FLAGS(op)) + src2w = (sljit_s32)(src2w); + if (HAS_FLAGS(op)) flags |= ALT_SIGN_EXT; } #endif - if (op & SLJIT_SET_O) - FAIL_IF(push_inst(compiler, MTXER | S(ZERO_REG))); - if (src2 == TMP_REG2) - flags |= ALT_KEEP_CACHE; + if (GET_FLAG_TYPE(op) == SLJIT_OVERFLOW) + FAIL_IF(push_inst(compiler, MTXER | S(TMP_ZERO))); switch (GET_OPCODE(op)) { case SLJIT_ADD: - if (!GET_FLAGS(op) && ((src1 | src2) & SLJIT_IMM)) { + if (GET_FLAG_TYPE(op) == SLJIT_OVERFLOW) + return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM1, dst, dstw, src1, src1w, src2, src2w); + + if (!HAS_FLAGS(op) && ((src1 | src2) & SLJIT_IMM)) { if (TEST_SL_IMM(src2, src2w)) { compiler->imm = src2w & 0xffff; - return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM1, dst, dstw, src1, src1w, TMP_REG2, 0); + return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM2, dst, dstw, src1, src1w, TMP_REG2, 0); } if (TEST_SL_IMM(src1, src1w)) { compiler->imm = src1w & 0xffff; - return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM1, dst, dstw, src2, src2w, TMP_REG2, 0); + return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM2, dst, dstw, src2, src2w, TMP_REG2, 0); } if (TEST_SH_IMM(src2, src2w)) { compiler->imm = (src2w >> 16) & 0xffff; - return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM2, dst, dstw, src1, src1w, TMP_REG2, 0); + return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM2 | ALT_FORM3, dst, dstw, src1, src1w, TMP_REG2, 0); } if (TEST_SH_IMM(src1, src1w)) { compiler->imm = (src1w >> 16) & 0xffff; - return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM2, dst, dstw, src2, src2w, TMP_REG2, 0); + return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM2 | ALT_FORM3, dst, dstw, src2, src2w, TMP_REG2, 0); } /* Range between -1 and -32768 is covered above. */ if (TEST_ADD_IMM(src2, src2w)) { compiler->imm = src2w & 0xffffffff; - return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM4, dst, dstw, src1, src1w, TMP_REG2, 0); + return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM2 | ALT_FORM4, dst, dstw, src1, src1w, TMP_REG2, 0); } if (TEST_ADD_IMM(src1, src1w)) { compiler->imm = src1w & 0xffffffff; - return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM4, dst, dstw, src2, src2w, TMP_REG2, 0); + return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM2 | ALT_FORM4, dst, dstw, src2, src2w, TMP_REG2, 0); } } - if (!(GET_FLAGS(op) & (SLJIT_SET_E | SLJIT_SET_O))) { + if (HAS_FLAGS(op)) { if (TEST_SL_IMM(src2, src2w)) { compiler->imm = src2w & 0xffff; return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM3, dst, dstw, src1, src1w, TMP_REG2, 0); @@ -1367,75 +1402,75 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op2(struct sljit_compiler *compiler return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM3, dst, dstw, src2, src2w, TMP_REG2, 0); } } - return emit_op(compiler, SLJIT_ADD, flags, dst, dstw, src1, src1w, src2, src2w); + return emit_op(compiler, SLJIT_ADD, flags | ((GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY)) ? ALT_FORM4 : 0), dst, dstw, src1, src1w, src2, src2w); case SLJIT_ADDC: - return emit_op(compiler, SLJIT_ADDC, flags | (!(op & SLJIT_KEEP_FLAGS) ? 0 : ALT_FORM1), dst, dstw, src1, src1w, src2, src2w); + return emit_op(compiler, SLJIT_ADDC, flags, dst, dstw, src1, src1w, src2, src2w); case SLJIT_SUB: - if (!GET_FLAGS(op) && ((src1 | src2) & SLJIT_IMM)) { + if (GET_FLAG_TYPE(op) >= SLJIT_LESS && GET_FLAG_TYPE(op) <= SLJIT_LESS_EQUAL) { + if (dst == SLJIT_UNUSED) { + if (TEST_UL_IMM(src2, src2w)) { + compiler->imm = src2w & 0xffff; + return emit_op(compiler, SLJIT_SUB, flags | ALT_FORM1 | ALT_FORM2, dst, dstw, src1, src1w, TMP_REG2, 0); + } + return emit_op(compiler, SLJIT_SUB, flags | ALT_FORM1, dst, dstw, src1, src1w, src2, src2w); + } + + if ((src2 & SLJIT_IMM) && src2w >= 0 && src2w <= (SIMM_MAX + 1)) { + compiler->imm = src2w; + return emit_op(compiler, SLJIT_SUB, flags | ALT_FORM1 | ALT_FORM2 | ALT_FORM3, dst, dstw, src1, src1w, TMP_REG2, 0); + } + return emit_op(compiler, SLJIT_SUB, flags | ALT_FORM1 | ALT_FORM3, dst, dstw, src1, src1w, src2, src2w); + } + + if (GET_FLAG_TYPE(op) == SLJIT_OVERFLOW) + return emit_op(compiler, SLJIT_SUB, flags | ALT_FORM2, dst, dstw, src1, src1w, src2, src2w); + + if (!HAS_FLAGS(op) && ((src1 | src2) & SLJIT_IMM)) { if (TEST_SL_IMM(src2, -src2w)) { compiler->imm = (-src2w) & 0xffff; - return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM1, dst, dstw, src1, src1w, TMP_REG2, 0); + return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM2, dst, dstw, src1, src1w, TMP_REG2, 0); } if (TEST_SL_IMM(src1, src1w)) { compiler->imm = src1w & 0xffff; - return emit_op(compiler, SLJIT_SUB, flags | ALT_FORM1, dst, dstw, src2, src2w, TMP_REG2, 0); + return emit_op(compiler, SLJIT_SUB, flags | ALT_FORM3, dst, dstw, src2, src2w, TMP_REG2, 0); } if (TEST_SH_IMM(src2, -src2w)) { compiler->imm = ((-src2w) >> 16) & 0xffff; - return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM2, dst, dstw, src1, src1w, TMP_REG2, 0); + return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM2 | ALT_FORM3, dst, dstw, src1, src1w, TMP_REG2, 0); } /* Range between -1 and -32768 is covered above. */ if (TEST_ADD_IMM(src2, -src2w)) { compiler->imm = -src2w & 0xffffffff; - return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM4, dst, dstw, src1, src1w, TMP_REG2, 0); + return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM2 | ALT_FORM4, dst, dstw, src1, src1w, TMP_REG2, 0); } } - if (dst == SLJIT_UNUSED && (op & (SLJIT_SET_E | SLJIT_SET_S | SLJIT_SET_U)) && !(op & (SLJIT_SET_O | SLJIT_SET_C))) { - if (!(op & SLJIT_SET_U)) { - /* We know ALT_SIGN_EXT is set if it is an SLJIT_INT_OP on 64 bit systems. */ - if (TEST_SL_IMM(src2, src2w)) { - compiler->imm = src2w & 0xffff; - return emit_op(compiler, SLJIT_SUB, flags | ALT_FORM2, dst, dstw, src1, src1w, TMP_REG2, 0); - } - if (GET_FLAGS(op) == SLJIT_SET_E && TEST_SL_IMM(src1, src1w)) { - compiler->imm = src1w & 0xffff; - return emit_op(compiler, SLJIT_SUB, flags | ALT_FORM2, dst, dstw, src2, src2w, TMP_REG2, 0); - } - } - if (!(op & (SLJIT_SET_E | SLJIT_SET_S))) { - /* We know ALT_SIGN_EXT is set if it is an SLJIT_INT_OP on 64 bit systems. */ - if (TEST_UL_IMM(src2, src2w)) { - compiler->imm = src2w & 0xffff; - return emit_op(compiler, SLJIT_SUB, flags | ALT_FORM3, dst, dstw, src1, src1w, TMP_REG2, 0); - } - return emit_op(compiler, SLJIT_SUB, flags | ALT_FORM4, dst, dstw, src1, src1w, src2, src2w); - } - if ((src2 & SLJIT_IMM) && src2w >= 0 && src2w <= 0x7fff) { - compiler->imm = src2w; - return emit_op(compiler, SLJIT_SUB, flags | ALT_FORM2 | ALT_FORM3, dst, dstw, src1, src1w, TMP_REG2, 0); + + if (dst == SLJIT_UNUSED && GET_FLAG_TYPE(op) != GET_FLAG_TYPE(SLJIT_SET_CARRY)) { + if (TEST_SL_IMM(src2, src2w)) { + compiler->imm = src2w & 0xffff; + return emit_op(compiler, SLJIT_SUB, flags | ALT_FORM4 | ALT_FORM5, dst, dstw, src1, src1w, TMP_REG2, 0); } - return emit_op(compiler, SLJIT_SUB, flags | ((op & SLJIT_SET_U) ? ALT_FORM4 : 0) | ((op & (SLJIT_SET_E | SLJIT_SET_S)) ? ALT_FORM5 : 0), dst, dstw, src1, src1w, src2, src2w); + return emit_op(compiler, SLJIT_SUB, flags | ALT_FORM4, dst, dstw, src1, src1w, src2, src2w); } - if (!(op & (SLJIT_SET_E | SLJIT_SET_S | SLJIT_SET_U | SLJIT_SET_O))) { - if (TEST_SL_IMM(src2, -src2w)) { - compiler->imm = (-src2w) & 0xffff; - return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM3, dst, dstw, src1, src1w, TMP_REG2, 0); - } + + if (TEST_SL_IMM(src2, -src2w)) { + compiler->imm = (-src2w) & 0xffff; + return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM3, dst, dstw, src1, src1w, TMP_REG2, 0); } - /* We know ALT_SIGN_EXT is set if it is an SLJIT_INT_OP on 64 bit systems. */ - return emit_op(compiler, SLJIT_SUB, flags | (!(op & SLJIT_SET_U) ? 0 : ALT_FORM6), dst, dstw, src1, src1w, src2, src2w); + /* We know ALT_SIGN_EXT is set if it is an SLJIT_I32_OP on 64 bit systems. */ + return emit_op(compiler, SLJIT_SUB, flags | ((GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY)) ? ALT_FORM5 : 0), dst, dstw, src1, src1w, src2, src2w); case SLJIT_SUBC: - return emit_op(compiler, SLJIT_SUBC, flags | (!(op & SLJIT_KEEP_FLAGS) ? 0 : ALT_FORM1), dst, dstw, src1, src1w, src2, src2w); + return emit_op(compiler, SLJIT_SUBC, flags, dst, dstw, src1, src1w, src2, src2w); case SLJIT_MUL: #if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) - if (op & SLJIT_INT_OP) + if (op & SLJIT_I32_OP) flags |= ALT_FORM2; #endif - if (!GET_FLAGS(op)) { + if (!HAS_FLAGS(op)) { if (TEST_SL_IMM(src2, src2w)) { compiler->imm = src2w & 0xffff; return emit_op(compiler, SLJIT_MUL, flags | ALT_FORM1, dst, dstw, src1, src1w, TMP_REG2, 0); @@ -1445,13 +1480,15 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op2(struct sljit_compiler *compiler return emit_op(compiler, SLJIT_MUL, flags | ALT_FORM1, dst, dstw, src2, src2w, TMP_REG2, 0); } } + else + FAIL_IF(push_inst(compiler, MTXER | S(TMP_ZERO))); return emit_op(compiler, SLJIT_MUL, flags, dst, dstw, src1, src1w, src2, src2w); case SLJIT_AND: case SLJIT_OR: case SLJIT_XOR: /* Commutative unsigned operations. */ - if (!GET_FLAGS(op) || GET_OPCODE(op) == SLJIT_AND) { + if (!HAS_FLAGS(op) || GET_OPCODE(op) == SLJIT_AND) { if (TEST_UL_IMM(src2, src2w)) { compiler->imm = src2w; return emit_op(compiler, GET_OPCODE(op), flags | ALT_FORM1, dst, dstw, src1, src1w, TMP_REG2, 0); @@ -1469,7 +1506,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op2(struct sljit_compiler *compiler return emit_op(compiler, GET_OPCODE(op), flags | ALT_FORM2, dst, dstw, src2, src2w, TMP_REG2, 0); } } - if (!GET_FLAGS(op) && GET_OPCODE(op) != SLJIT_AND) { + if (GET_OPCODE(op) != SLJIT_AND && GET_OPCODE(op) != SLJIT_AND) { + /* Unlike or and xor, and resets unwanted bits as well. */ if (TEST_UI_IMM(src2, src2w)) { compiler->imm = src2w; return emit_op(compiler, GET_OPCODE(op), flags | ALT_FORM3, dst, dstw, src1, src1w, TMP_REG2, 0); @@ -1481,14 +1519,11 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op2(struct sljit_compiler *compiler } return emit_op(compiler, GET_OPCODE(op), flags, dst, dstw, src1, src1w, src2, src2w); - case SLJIT_ASHR: - if (op & SLJIT_KEEP_FLAGS) - flags |= ALT_FORM3; - /* Fall through. */ case SLJIT_SHL: case SLJIT_LSHR: + case SLJIT_ASHR: #if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) - if (op & SLJIT_INT_OP) + if (op & SLJIT_I32_OP) flags |= ALT_FORM2; #endif if (src2 & SLJIT_IMM) { @@ -1501,24 +1536,23 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op2(struct sljit_compiler *compiler return SLJIT_SUCCESS; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_get_register_index(sljit_si reg) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg) { - check_sljit_get_register_index(reg); + CHECK_REG_INDEX(check_sljit_get_register_index(reg)); return reg_map[reg]; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_get_float_register_index(sljit_si reg) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg) { - check_sljit_get_float_register_index(reg); - return reg; + CHECK_REG_INDEX(check_sljit_get_float_register_index(reg)); + return freg_map[reg]; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_custom(struct sljit_compiler *compiler, - void *instruction, sljit_si size) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler, + void *instruction, sljit_s32 size) { CHECK_ERROR(); - check_sljit_emit_op_custom(compiler, instruction, size); - SLJIT_ASSERT(size == 4); + CHECK(check_sljit_emit_op_custom(compiler, instruction, size)); return push_inst(compiler, *(sljit_ins*)instruction); } @@ -1527,164 +1561,292 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_custom(struct sljit_compiler *co /* Floating point operators */ /* --------------------------------------------------------------------- */ -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_is_fpu_available(void) -{ - /* Always available. */ - return 1; -} +#define FLOAT_DATA(op) (DOUBLE_DATA | ((op & SLJIT_F32_OP) >> 6)) +#define SELECT_FOP(op, single, double) ((op & SLJIT_F32_OP) ? single : double) -#define FLOAT_DATA(op) (DOUBLE_DATA | ((op & SLJIT_SINGLE_OP) >> 6)) -#define SELECT_FOP(op, single, double) ((op & SLJIT_SINGLE_OP) ? single : double) +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) +#define FLOAT_TMP_MEM_OFFSET (6 * sizeof(sljit_sw)) +#else +#define FLOAT_TMP_MEM_OFFSET (2 * sizeof(sljit_sw)) -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop1(struct sljit_compiler *compiler, sljit_si op, - sljit_si dst, sljit_sw dstw, - sljit_si src, sljit_sw srcw) -{ - sljit_si dst_fr; +#if (defined SLJIT_LITTLE_ENDIAN && SLJIT_LITTLE_ENDIAN) +#define FLOAT_TMP_MEM_OFFSET_LOW (2 * sizeof(sljit_sw)) +#define FLOAT_TMP_MEM_OFFSET_HI (3 * sizeof(sljit_sw)) +#else +#define FLOAT_TMP_MEM_OFFSET_LOW (3 * sizeof(sljit_sw)) +#define FLOAT_TMP_MEM_OFFSET_HI (2 * sizeof(sljit_sw)) +#endif - CHECK_ERROR(); - check_sljit_emit_fop1(compiler, op, dst, dstw, src, srcw); - SLJIT_COMPILE_ASSERT((SLJIT_SINGLE_OP == 0x100) && !(DOUBLE_DATA & 0x4), float_transfer_bit_error); +#endif /* SLJIT_CONFIG_PPC_64 */ - compiler->cache_arg = 0; - compiler->cache_argw = 0; +static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) +{ + if (src & SLJIT_MEM) { + /* We can ignore the temporary data store on the stack from caching point of view. */ + FAIL_IF(emit_op_mem(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src, srcw, TMP_REG1)); + src = TMP_FREG1; + } - if (GET_OPCODE(op) == SLJIT_CMPD) { - if (dst > SLJIT_FLOAT_REG6) { - FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, dst, dstw, src, srcw)); - dst = TMP_FREG1; - } +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) + op = GET_OPCODE(op); + FAIL_IF(push_inst(compiler, (op == SLJIT_CONV_S32_FROM_F64 ? FCTIWZ : FCTIDZ) | FD(TMP_FREG1) | FB(src))); - if (src > SLJIT_FLOAT_REG6) { - FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src, srcw, 0, 0)); - src = TMP_FREG2; + if (op == SLJIT_CONV_SW_FROM_F64) { + if (FAST_IS_REG(dst)) { + FAIL_IF(emit_op_mem(compiler, DOUBLE_DATA, TMP_FREG1, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, TMP_REG1)); + return emit_op_mem(compiler, WORD_DATA | LOAD_DATA, dst, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, TMP_REG1); } + return emit_op_mem(compiler, DOUBLE_DATA, TMP_FREG1, dst, dstw, TMP_REG1); + } +#else + FAIL_IF(push_inst(compiler, FCTIWZ | FD(TMP_FREG1) | FB(src))); +#endif + + if (FAST_IS_REG(dst)) { + FAIL_IF(load_immediate(compiler, TMP_REG1, FLOAT_TMP_MEM_OFFSET)); + FAIL_IF(push_inst(compiler, STFIWX | FS(TMP_FREG1) | A(SLJIT_SP) | B(TMP_REG1))); + return emit_op_mem(compiler, INT_DATA | LOAD_DATA, dst, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, TMP_REG1); + } - return push_inst(compiler, FCMPU | CRD(4) | FA(dst) | FB(src)); + SLJIT_ASSERT(dst & SLJIT_MEM); + + if (dst & OFFS_REG_MASK) { + dstw &= 0x3; + if (dstw) { +#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) + FAIL_IF(push_inst(compiler, RLWINM | S(OFFS_REG(dst)) | A(TMP_REG1) | (dstw << 11) | ((31 - dstw) << 1))); +#else + FAIL_IF(push_inst(compiler, RLDI(TMP_REG1, OFFS_REG(dst), dstw, 63 - dstw, 1))); +#endif + dstw = TMP_REG1; + } + else + dstw = OFFS_REG(dst); + } + else { + if ((dst & REG_MASK) && !dstw) { + dstw = dst & REG_MASK; + dst = 0; + } + else { + /* This works regardless we have SLJIT_MEM1 or SLJIT_MEM0. */ + FAIL_IF(load_immediate(compiler, TMP_REG1, dstw)); + dstw = TMP_REG1; + } } - dst_fr = (dst > SLJIT_FLOAT_REG6) ? TMP_FREG1 : dst; + return push_inst(compiler, STFIWX | FS(TMP_FREG1) | A(dst & REG_MASK) | B(dstw)); +} + +static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) +{ +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) + + sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1; - if (src > SLJIT_FLOAT_REG6) { - FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, dst_fr, src, srcw, dst, dstw)); - src = dst_fr; + if (src & SLJIT_IMM) { + if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32) + srcw = (sljit_s32)srcw; + FAIL_IF(load_immediate(compiler, TMP_REG1, srcw)); + src = TMP_REG1; + } + else if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32) { + if (FAST_IS_REG(src)) + FAIL_IF(push_inst(compiler, EXTSW | S(src) | A(TMP_REG1))); + else + FAIL_IF(emit_op_mem(compiler, INT_DATA | SIGNED_DATA | LOAD_DATA, TMP_REG1, src, srcw, TMP_REG1)); + src = TMP_REG1; } - switch (GET_OPCODE(op)) { - case SLJIT_MOVD: - if (src != dst_fr && dst_fr != TMP_FREG1) - FAIL_IF(push_inst(compiler, FMR | FD(dst_fr) | FB(src))); - break; - case SLJIT_NEGD: - FAIL_IF(push_inst(compiler, FNEG | FD(dst_fr) | FB(src))); - break; - case SLJIT_ABSD: - FAIL_IF(push_inst(compiler, FABS | FD(dst_fr) | FB(src))); - break; + if (FAST_IS_REG(src)) { + FAIL_IF(emit_op_mem(compiler, WORD_DATA, src, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, TMP_REG1)); + FAIL_IF(emit_op_mem(compiler, DOUBLE_DATA | LOAD_DATA, TMP_FREG1, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, TMP_REG1)); } + else + FAIL_IF(emit_op_mem(compiler, DOUBLE_DATA | LOAD_DATA, TMP_FREG1, src, srcw, TMP_REG1)); + + FAIL_IF(push_inst(compiler, FCFID | FD(dst_r) | FB(TMP_FREG1))); - if (dst_fr == TMP_FREG1) { - if (GET_OPCODE(op) == SLJIT_MOVD) - dst_fr = src; - FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op), dst_fr, dst, dstw, 0, 0)); + if (dst & SLJIT_MEM) + return emit_op_mem(compiler, FLOAT_DATA(op), TMP_FREG1, dst, dstw, TMP_REG1); + if (op & SLJIT_F32_OP) + return push_inst(compiler, FRSP | FD(dst_r) | FB(dst_r)); + return SLJIT_SUCCESS; + +#else + + sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1; + sljit_s32 invert_sign = 1; + + if (src & SLJIT_IMM) { + FAIL_IF(load_immediate(compiler, TMP_REG1, srcw ^ 0x80000000)); + src = TMP_REG1; + invert_sign = 0; } + else if (!FAST_IS_REG(src)) { + FAIL_IF(emit_op_mem(compiler, WORD_DATA | SIGNED_DATA | LOAD_DATA, TMP_REG1, src, srcw, TMP_REG1)); + src = TMP_REG1; + } + + /* First, a special double floating point value is constructed: (2^53 + (input xor (2^31))) + The double precision format has exactly 53 bit precision, so the lower 32 bit represents + the lower 32 bit of such value. The result of xor 2^31 is the same as adding 0x80000000 + to the input, which shifts it into the 0 - 0xffffffff range. To get the converted floating + point value, we need to substract 2^53 + 2^31 from the constructed value. */ + FAIL_IF(push_inst(compiler, ADDIS | D(TMP_REG2) | A(0) | 0x4330)); + if (invert_sign) + FAIL_IF(push_inst(compiler, XORIS | S(src) | A(TMP_REG1) | 0x8000)); + FAIL_IF(emit_op_mem(compiler, WORD_DATA, TMP_REG2, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET_HI, TMP_REG1)); + FAIL_IF(emit_op_mem(compiler, WORD_DATA, TMP_REG1, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET_LOW, TMP_REG2)); + FAIL_IF(push_inst(compiler, ADDIS | D(TMP_REG1) | A(0) | 0x8000)); + FAIL_IF(emit_op_mem(compiler, DOUBLE_DATA | LOAD_DATA, TMP_FREG1, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, TMP_REG1)); + FAIL_IF(emit_op_mem(compiler, WORD_DATA, TMP_REG1, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET_LOW, TMP_REG2)); + FAIL_IF(emit_op_mem(compiler, DOUBLE_DATA | LOAD_DATA, TMP_FREG2, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, TMP_REG1)); + + FAIL_IF(push_inst(compiler, FSUB | FD(dst_r) | FA(TMP_FREG1) | FB(TMP_FREG2))); + if (dst & SLJIT_MEM) + return emit_op_mem(compiler, FLOAT_DATA(op), TMP_FREG1, dst, dstw, TMP_REG1); + if (op & SLJIT_F32_OP) + return push_inst(compiler, FRSP | FD(dst_r) | FB(dst_r)); return SLJIT_SUCCESS; + +#endif } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop2(struct sljit_compiler *compiler, sljit_si op, - sljit_si dst, sljit_sw dstw, - sljit_si src1, sljit_sw src1w, - sljit_si src2, sljit_sw src2w) +static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) { - sljit_si dst_fr, flags = 0; + if (src1 & SLJIT_MEM) { + FAIL_IF(emit_op_mem(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, TMP_REG1)); + src1 = TMP_FREG1; + } + + if (src2 & SLJIT_MEM) { + FAIL_IF(emit_op_mem(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, TMP_REG2)); + src2 = TMP_FREG2; + } + + return push_inst(compiler, FCMPU | CRD(4) | FA(src1) | FB(src2)); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) +{ + sljit_s32 dst_r; CHECK_ERROR(); - check_sljit_emit_fop2(compiler, op, dst, dstw, src1, src1w, src2, src2w); - compiler->cache_arg = 0; - compiler->cache_argw = 0; + SLJIT_COMPILE_ASSERT((SLJIT_F32_OP == 0x100) && !(DOUBLE_DATA & 0x4), float_transfer_bit_error); + SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw); - dst_fr = (dst > SLJIT_FLOAT_REG6) ? TMP_FREG2 : dst; + if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32) + op ^= SLJIT_F32_OP; - if (src1 > SLJIT_FLOAT_REG6) { - if (getput_arg_fast(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w)) { - FAIL_IF(compiler->error); - src1 = TMP_FREG1; - } else - flags |= ALT_FORM1; - } + dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1; - if (src2 > SLJIT_FLOAT_REG6) { - if (getput_arg_fast(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w)) { - FAIL_IF(compiler->error); - src2 = TMP_FREG2; - } else - flags |= ALT_FORM2; + if (src & SLJIT_MEM) { + FAIL_IF(emit_op_mem(compiler, FLOAT_DATA(op) | LOAD_DATA, dst_r, src, srcw, TMP_REG1)); + src = dst_r; } - if ((flags & (ALT_FORM1 | ALT_FORM2)) == (ALT_FORM1 | ALT_FORM2)) { - if (!can_cache(src1, src1w, src2, src2w) && can_cache(src1, src1w, dst, dstw)) { - FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, src1, src1w)); - FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, dst, dstw)); + switch (GET_OPCODE(op)) { + case SLJIT_CONV_F64_FROM_F32: + op ^= SLJIT_F32_OP; + if (op & SLJIT_F32_OP) { + FAIL_IF(push_inst(compiler, FRSP | FD(dst_r) | FB(src))); + break; } - else { - FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, src2, src2w)); - FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, dst, dstw)); + /* Fall through. */ + case SLJIT_MOV_F64: + if (src != dst_r) { + if (dst_r != TMP_FREG1) + FAIL_IF(push_inst(compiler, FMR | FD(dst_r) | FB(src))); + else + dst_r = src; } + break; + case SLJIT_NEG_F64: + FAIL_IF(push_inst(compiler, FNEG | FD(dst_r) | FB(src))); + break; + case SLJIT_ABS_F64: + FAIL_IF(push_inst(compiler, FABS | FD(dst_r) | FB(src))); + break; } - else if (flags & ALT_FORM1) - FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, dst, dstw)); - else if (flags & ALT_FORM2) - FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, dst, dstw)); - if (flags & ALT_FORM1) + if (dst & SLJIT_MEM) + FAIL_IF(emit_op_mem(compiler, FLOAT_DATA(op), dst_r, dst, dstw, TMP_REG1)); + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) +{ + sljit_s32 dst_r; + + CHECK_ERROR(); + CHECK(check_sljit_emit_fop2(compiler, op, dst, dstw, src1, src1w, src2, src2w)); + ADJUST_LOCAL_OFFSET(dst, dstw); + ADJUST_LOCAL_OFFSET(src1, src1w); + ADJUST_LOCAL_OFFSET(src2, src2w); + + dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG2; + + if (src1 & SLJIT_MEM) { + FAIL_IF(emit_op_mem(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, TMP_REG1)); src1 = TMP_FREG1; - if (flags & ALT_FORM2) + } + + if (src2 & SLJIT_MEM) { + FAIL_IF(emit_op_mem(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, TMP_REG2)); src2 = TMP_FREG2; + } switch (GET_OPCODE(op)) { - case SLJIT_ADDD: - FAIL_IF(push_inst(compiler, SELECT_FOP(op, FADDS, FADD) | FD(dst_fr) | FA(src1) | FB(src2))); + case SLJIT_ADD_F64: + FAIL_IF(push_inst(compiler, SELECT_FOP(op, FADDS, FADD) | FD(dst_r) | FA(src1) | FB(src2))); break; - case SLJIT_SUBD: - FAIL_IF(push_inst(compiler, SELECT_FOP(op, FSUBS, FSUB) | FD(dst_fr) | FA(src1) | FB(src2))); + case SLJIT_SUB_F64: + FAIL_IF(push_inst(compiler, SELECT_FOP(op, FSUBS, FSUB) | FD(dst_r) | FA(src1) | FB(src2))); break; - case SLJIT_MULD: - FAIL_IF(push_inst(compiler, SELECT_FOP(op, FMULS, FMUL) | FD(dst_fr) | FA(src1) | FC(src2) /* FMUL use FC as src2 */)); + case SLJIT_MUL_F64: + FAIL_IF(push_inst(compiler, SELECT_FOP(op, FMULS, FMUL) | FD(dst_r) | FA(src1) | FC(src2) /* FMUL use FC as src2 */)); break; - case SLJIT_DIVD: - FAIL_IF(push_inst(compiler, SELECT_FOP(op, FDIVS, FDIV) | FD(dst_fr) | FA(src1) | FB(src2))); + case SLJIT_DIV_F64: + FAIL_IF(push_inst(compiler, SELECT_FOP(op, FDIVS, FDIV) | FD(dst_r) | FA(src1) | FB(src2))); break; } - if (dst_fr == TMP_FREG2) - FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op), TMP_FREG2, dst, dstw, 0, 0)); + if (dst & SLJIT_MEM) + FAIL_IF(emit_op_mem(compiler, FLOAT_DATA(op), TMP_FREG2, dst, dstw, TMP_REG1)); return SLJIT_SUCCESS; } -#undef FLOAT_DATA #undef SELECT_FOP /* --------------------------------------------------------------------- */ /* Other instructions */ /* --------------------------------------------------------------------- */ -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw) { CHECK_ERROR(); - check_sljit_emit_fast_enter(compiler, dst, dstw); + CHECK(check_sljit_emit_fast_enter(compiler, dst, dstw)); ADJUST_LOCAL_OFFSET(dst, dstw); - /* For UNUSED dst. Uncommon, but possible. */ - if (dst == SLJIT_UNUSED) - return SLJIT_SUCCESS; - - if (dst <= ZERO_REG) + if (FAST_IS_REG(dst)) return push_inst(compiler, MFLR | D(dst)); /* Memory. */ @@ -1692,21 +1854,19 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_enter(struct sljit_compiler *c return emit_op(compiler, SLJIT_MOV, WORD_DATA, dst, dstw, TMP_REG1, 0, TMP_REG2, 0); } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_si src, sljit_sw srcw) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_s32 src, sljit_sw srcw) { CHECK_ERROR(); - check_sljit_emit_fast_return(compiler, src, srcw); + CHECK(check_sljit_emit_fast_return(compiler, src, srcw)); ADJUST_LOCAL_OFFSET(src, srcw); - if (src <= ZERO_REG) + if (FAST_IS_REG(src)) FAIL_IF(push_inst(compiler, MTLR | S(src))); else { - if (src & SLJIT_MEM) - FAIL_IF(emit_op(compiler, SLJIT_MOV, WORD_DATA, TMP_REG2, 0, TMP_REG1, 0, src, srcw)); - else if (src & SLJIT_IMM) - FAIL_IF(load_immediate(compiler, TMP_REG2, srcw)); + FAIL_IF(emit_op(compiler, SLJIT_MOV, WORD_DATA, TMP_REG2, 0, TMP_REG1, 0, src, srcw)); FAIL_IF(push_inst(compiler, MTLR | S(TMP_REG2))); } + return push_inst(compiler, BLR); } @@ -1719,7 +1879,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compi struct sljit_label *label; CHECK_ERROR_PTR(); - check_sljit_emit_label(compiler); + CHECK_PTR(check_sljit_emit_label(compiler)); if (compiler->last_label && compiler->last_label->size == compiler->size) return compiler->last_label; @@ -1730,76 +1890,76 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compi return label; } -static sljit_ins get_bo_bi_flags(sljit_si type) +static sljit_ins get_bo_bi_flags(sljit_s32 type) { switch (type) { - case SLJIT_C_EQUAL: + case SLJIT_EQUAL: return (12 << 21) | (2 << 16); - case SLJIT_C_NOT_EQUAL: + case SLJIT_NOT_EQUAL: return (4 << 21) | (2 << 16); - case SLJIT_C_LESS: - case SLJIT_C_FLOAT_LESS: - return (12 << 21) | ((4 + 0) << 16); - - case SLJIT_C_GREATER_EQUAL: - case SLJIT_C_FLOAT_GREATER_EQUAL: - return (4 << 21) | ((4 + 0) << 16); - - case SLJIT_C_GREATER: - case SLJIT_C_FLOAT_GREATER: - return (12 << 21) | ((4 + 1) << 16); - - case SLJIT_C_LESS_EQUAL: - case SLJIT_C_FLOAT_LESS_EQUAL: - return (4 << 21) | ((4 + 1) << 16); - - case SLJIT_C_SIG_LESS: + case SLJIT_LESS: + case SLJIT_SIG_LESS: return (12 << 21) | (0 << 16); - case SLJIT_C_SIG_GREATER_EQUAL: + case SLJIT_GREATER_EQUAL: + case SLJIT_SIG_GREATER_EQUAL: return (4 << 21) | (0 << 16); - case SLJIT_C_SIG_GREATER: + case SLJIT_GREATER: + case SLJIT_SIG_GREATER: return (12 << 21) | (1 << 16); - case SLJIT_C_SIG_LESS_EQUAL: + case SLJIT_LESS_EQUAL: + case SLJIT_SIG_LESS_EQUAL: return (4 << 21) | (1 << 16); - case SLJIT_C_OVERFLOW: - case SLJIT_C_MUL_OVERFLOW: + case SLJIT_LESS_F64: + return (12 << 21) | ((4 + 0) << 16); + + case SLJIT_GREATER_EQUAL_F64: + return (4 << 21) | ((4 + 0) << 16); + + case SLJIT_GREATER_F64: + return (12 << 21) | ((4 + 1) << 16); + + case SLJIT_LESS_EQUAL_F64: + return (4 << 21) | ((4 + 1) << 16); + + case SLJIT_OVERFLOW: + case SLJIT_MUL_OVERFLOW: return (12 << 21) | (3 << 16); - case SLJIT_C_NOT_OVERFLOW: - case SLJIT_C_MUL_NOT_OVERFLOW: + case SLJIT_NOT_OVERFLOW: + case SLJIT_MUL_NOT_OVERFLOW: return (4 << 21) | (3 << 16); - case SLJIT_C_FLOAT_EQUAL: + case SLJIT_EQUAL_F64: return (12 << 21) | ((4 + 2) << 16); - case SLJIT_C_FLOAT_NOT_EQUAL: + case SLJIT_NOT_EQUAL_F64: return (4 << 21) | ((4 + 2) << 16); - case SLJIT_C_FLOAT_UNORDERED: + case SLJIT_UNORDERED_F64: return (12 << 21) | ((4 + 3) << 16); - case SLJIT_C_FLOAT_ORDERED: + case SLJIT_ORDERED_F64: return (4 << 21) | ((4 + 3) << 16); default: - SLJIT_ASSERT(type >= SLJIT_JUMP && type <= SLJIT_CALL3); + SLJIT_ASSERT(type >= SLJIT_JUMP && type <= SLJIT_CALL_CDECL); return (20 << 21); } } -SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_si type) +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_s32 type) { struct sljit_jump *jump; sljit_ins bo_bi_flags; CHECK_ERROR_PTR(); - check_sljit_emit_jump(compiler, type); + CHECK_PTR(check_sljit_emit_jump(compiler, type)); bo_bi_flags = get_bo_bi_flags(type & 0xff); if (!bo_bi_flags) @@ -1811,39 +1971,74 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile type &= 0xff; /* In PPC, we don't need to touch the arguments. */ - if (type >= SLJIT_JUMP) - jump->flags |= UNCOND_B; + if (type < SLJIT_JUMP) + jump->flags |= IS_COND; +#if (defined SLJIT_PASS_ENTRY_ADDR_TO_CALL && SLJIT_PASS_ENTRY_ADDR_TO_CALL) + if (type >= SLJIT_CALL) + jump->flags |= IS_CALL; +#endif - PTR_FAIL_IF(emit_const(compiler, TMP_REG1, 0)); - PTR_FAIL_IF(push_inst(compiler, MTCTR | S(TMP_REG1))); + PTR_FAIL_IF(emit_const(compiler, TMP_CALL_REG, 0)); + PTR_FAIL_IF(push_inst(compiler, MTCTR | S(TMP_CALL_REG))); jump->addr = compiler->size; PTR_FAIL_IF(push_inst(compiler, BCCTR | bo_bi_flags | (type >= SLJIT_FAST_CALL ? 1 : 0))); return jump; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_ijump(struct sljit_compiler *compiler, sljit_si type, sljit_si src, sljit_sw srcw) +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 arg_types) +{ + CHECK_ERROR_PTR(); + CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types)); + +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) + PTR_FAIL_IF(call_with_args(compiler, arg_types, NULL)); +#endif + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ + || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + compiler->skip_checks = 1; +#endif + + return sljit_emit_jump(compiler, type); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 src, sljit_sw srcw) { struct sljit_jump *jump = NULL; - sljit_si src_r; + sljit_s32 src_r; CHECK_ERROR(); - check_sljit_emit_ijump(compiler, type, src, srcw); + CHECK(check_sljit_emit_ijump(compiler, type, src, srcw)); ADJUST_LOCAL_OFFSET(src, srcw); - if (src <= ZERO_REG) + if (FAST_IS_REG(src)) { +#if (defined SLJIT_PASS_ENTRY_ADDR_TO_CALL && SLJIT_PASS_ENTRY_ADDR_TO_CALL) + if (type >= SLJIT_CALL) { + FAIL_IF(push_inst(compiler, OR | S(src) | A(TMP_CALL_REG) | B(src))); + src_r = TMP_CALL_REG; + } + else + src_r = src; +#else src_r = src; - else if (src & SLJIT_IMM) { +#endif + } else if (src & SLJIT_IMM) { + /* These jumps are converted to jump/call instructions when possible. */ jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); FAIL_IF(!jump); - set_jump(jump, compiler, JUMP_ADDR | UNCOND_B); + set_jump(jump, compiler, JUMP_ADDR); jump->u.target = srcw; - - FAIL_IF(emit_const(compiler, TMP_REG2, 0)); - src_r = TMP_REG2; +#if (defined SLJIT_PASS_ENTRY_ADDR_TO_CALL && SLJIT_PASS_ENTRY_ADDR_TO_CALL) + if (type >= SLJIT_CALL) + jump->flags |= IS_CALL; +#endif + FAIL_IF(emit_const(compiler, TMP_CALL_REG, 0)); + src_r = TMP_CALL_REG; } else { - FAIL_IF(emit_op(compiler, SLJIT_MOV, WORD_DATA, TMP_REG2, 0, TMP_REG1, 0, src, srcw)); - src_r = TMP_REG2; + FAIL_IF(emit_op(compiler, SLJIT_MOV, WORD_DATA, TMP_CALL_REG, 0, TMP_REG1, 0, src, srcw)); + src_r = TMP_CALL_REG; } FAIL_IF(push_inst(compiler, MTCTR | S(src_r))); @@ -1852,169 +2047,349 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_ijump(struct sljit_compiler *compil return push_inst(compiler, BCCTR | (20 << 21) | (type >= SLJIT_FAST_CALL ? 1 : 0)); } -/* Get a bit from CR, all other bits are zeroed. */ -#define GET_CR_BIT(bit, dst) \ - FAIL_IF(push_inst(compiler, MFCR | D(dst))); \ - FAIL_IF(push_inst(compiler, RLWINM | S(dst) | A(dst) | ((1 + (bit)) << 11) | (31 << 6) | (31 << 1))); +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 arg_types, + sljit_s32 src, sljit_sw srcw) +{ + CHECK_ERROR(); + CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw)); -#define INVERT_BIT(dst) \ - FAIL_IF(push_inst(compiler, XORI | S(dst) | A(dst) | 0x1)); +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) + if (src & SLJIT_MEM) { + ADJUST_LOCAL_OFFSET(src, srcw); + FAIL_IF(emit_op(compiler, SLJIT_MOV, WORD_DATA, TMP_CALL_REG, 0, TMP_REG1, 0, src, srcw)); + src = TMP_CALL_REG; + } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_si op, - sljit_si dst, sljit_sw dstw, - sljit_si src, sljit_sw srcw, - sljit_si type) + FAIL_IF(call_with_args(compiler, arg_types, &src)); +#endif + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ + || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + compiler->skip_checks = 1; +#endif + + return sljit_emit_ijump(compiler, type, src, srcw); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 type) { - sljit_si reg, input_flags; - sljit_si flags = GET_ALL_FLAGS(op); + sljit_s32 reg, input_flags, cr_bit, invert; + sljit_s32 saved_op = op; + sljit_sw saved_dstw = dstw; CHECK_ERROR(); - check_sljit_emit_op_flags(compiler, op, dst, dstw, src, srcw, type); + CHECK(check_sljit_emit_op_flags(compiler, op, dst, dstw, type)); ADJUST_LOCAL_OFFSET(dst, dstw); - if (dst == SLJIT_UNUSED) - return SLJIT_SUCCESS; - - op = GET_OPCODE(op); - reg = (op < SLJIT_ADD && dst <= ZERO_REG) ? dst : TMP_REG2; - - compiler->cache_arg = 0; - compiler->cache_argw = 0; - if (op >= SLJIT_ADD && (src & SLJIT_MEM)) { - ADJUST_LOCAL_OFFSET(src, srcw); #if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) - input_flags = (flags & SLJIT_INT_OP) ? INT_DATA : WORD_DATA; + input_flags = (op & SLJIT_I32_OP) ? INT_DATA : WORD_DATA; #else - input_flags = WORD_DATA; + input_flags = WORD_DATA; #endif - FAIL_IF(emit_op_mem2(compiler, input_flags | LOAD_DATA, TMP_REG1, src, srcw, dst, dstw)); - src = TMP_REG1; - srcw = 0; - } - switch (type) { - case SLJIT_C_EQUAL: - GET_CR_BIT(2, reg); + op = GET_OPCODE(op); + reg = (op < SLJIT_ADD && FAST_IS_REG(dst)) ? dst : TMP_REG2; + + if (op >= SLJIT_ADD && (dst & SLJIT_MEM)) + FAIL_IF(emit_op_mem(compiler, input_flags | LOAD_DATA, TMP_REG1, dst, dstw, TMP_REG1)); + + invert = 0; + cr_bit = 0; + + switch (type & 0xff) { + case SLJIT_LESS: + case SLJIT_SIG_LESS: break; - case SLJIT_C_NOT_EQUAL: - GET_CR_BIT(2, reg); - INVERT_BIT(reg); + case SLJIT_GREATER_EQUAL: + case SLJIT_SIG_GREATER_EQUAL: + invert = 1; break; - case SLJIT_C_LESS: - case SLJIT_C_FLOAT_LESS: - GET_CR_BIT(4 + 0, reg); + case SLJIT_GREATER: + case SLJIT_SIG_GREATER: + cr_bit = 1; break; - case SLJIT_C_GREATER_EQUAL: - case SLJIT_C_FLOAT_GREATER_EQUAL: - GET_CR_BIT(4 + 0, reg); - INVERT_BIT(reg); + case SLJIT_LESS_EQUAL: + case SLJIT_SIG_LESS_EQUAL: + cr_bit = 1; + invert = 1; break; - case SLJIT_C_GREATER: - case SLJIT_C_FLOAT_GREATER: - GET_CR_BIT(4 + 1, reg); + case SLJIT_EQUAL: + cr_bit = 2; break; - case SLJIT_C_LESS_EQUAL: - case SLJIT_C_FLOAT_LESS_EQUAL: - GET_CR_BIT(4 + 1, reg); - INVERT_BIT(reg); + case SLJIT_NOT_EQUAL: + cr_bit = 2; + invert = 1; break; - case SLJIT_C_SIG_LESS: - GET_CR_BIT(0, reg); + case SLJIT_OVERFLOW: + case SLJIT_MUL_OVERFLOW: + cr_bit = 3; break; - case SLJIT_C_SIG_GREATER_EQUAL: - GET_CR_BIT(0, reg); - INVERT_BIT(reg); + case SLJIT_NOT_OVERFLOW: + case SLJIT_MUL_NOT_OVERFLOW: + cr_bit = 3; + invert = 1; break; - case SLJIT_C_SIG_GREATER: - GET_CR_BIT(1, reg); + case SLJIT_LESS_F64: + cr_bit = 4 + 0; break; - case SLJIT_C_SIG_LESS_EQUAL: - GET_CR_BIT(1, reg); - INVERT_BIT(reg); + case SLJIT_GREATER_EQUAL_F64: + cr_bit = 4 + 0; + invert = 1; break; - case SLJIT_C_OVERFLOW: - case SLJIT_C_MUL_OVERFLOW: - GET_CR_BIT(3, reg); + case SLJIT_GREATER_F64: + cr_bit = 4 + 1; break; - case SLJIT_C_NOT_OVERFLOW: - case SLJIT_C_MUL_NOT_OVERFLOW: - GET_CR_BIT(3, reg); - INVERT_BIT(reg); + case SLJIT_LESS_EQUAL_F64: + cr_bit = 4 + 1; + invert = 1; break; - case SLJIT_C_FLOAT_EQUAL: - GET_CR_BIT(4 + 2, reg); + case SLJIT_EQUAL_F64: + cr_bit = 4 + 2; break; - case SLJIT_C_FLOAT_NOT_EQUAL: - GET_CR_BIT(4 + 2, reg); - INVERT_BIT(reg); + case SLJIT_NOT_EQUAL_F64: + cr_bit = 4 + 2; + invert = 1; break; - case SLJIT_C_FLOAT_UNORDERED: - GET_CR_BIT(4 + 3, reg); + case SLJIT_UNORDERED_F64: + cr_bit = 4 + 3; break; - case SLJIT_C_FLOAT_ORDERED: - GET_CR_BIT(4 + 3, reg); - INVERT_BIT(reg); + case SLJIT_ORDERED_F64: + cr_bit = 4 + 3; + invert = 1; break; default: - SLJIT_ASSERT_STOP(); + SLJIT_UNREACHABLE(); break; } + FAIL_IF(push_inst(compiler, MFCR | D(reg))); + FAIL_IF(push_inst(compiler, RLWINM | S(reg) | A(reg) | ((1 + (cr_bit)) << 11) | (31 << 6) | (31 << 1))); + + if (invert) + FAIL_IF(push_inst(compiler, XORI | S(reg) | A(reg) | 0x1)); + if (op < SLJIT_ADD) { + if (!(dst & SLJIT_MEM)) + return SLJIT_SUCCESS; + return emit_op_mem(compiler, input_flags, reg, dst, dstw, TMP_REG1); + } + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ + || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + compiler->skip_checks = 1; +#endif + if (dst & SLJIT_MEM) + return sljit_emit_op2(compiler, saved_op, dst, saved_dstw, TMP_REG1, 0, TMP_REG2, 0); + return sljit_emit_op2(compiler, saved_op, dst, 0, dst, 0, TMP_REG2, 0); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 dst_reg, + sljit_s32 src, sljit_sw srcw) +{ + CHECK_ERROR(); + CHECK(check_sljit_emit_cmov(compiler, type, dst_reg, src, srcw)); + + return sljit_emit_cmov_generic(compiler, type, dst_reg, src, srcw);; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 reg, + sljit_s32 mem, sljit_sw memw) +{ + sljit_s32 mem_flags; + sljit_ins inst; + + CHECK_ERROR(); + CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw)); + + if (type & SLJIT_MEM_POST) + return SLJIT_ERR_UNSUPPORTED; + + switch (type & 0xff) { + case SLJIT_MOV: + case SLJIT_MOV_P: +#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) + case SLJIT_MOV_U32: + case SLJIT_MOV_S32: +#endif + mem_flags = WORD_DATA; + break; + #if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) - if (op == SLJIT_MOV) - input_flags = WORD_DATA; - else { - op = SLJIT_MOV_UI; - input_flags = INT_DATA; + case SLJIT_MOV_U32: + mem_flags = INT_DATA; + break; + + case SLJIT_MOV_S32: + mem_flags = INT_DATA; + + if (!(type & SLJIT_MEM_STORE) && !(type & SLJIT_I32_OP)) { + if (mem & OFFS_REG_MASK) + mem_flags |= SIGNED_DATA; + else + return SLJIT_ERR_UNSUPPORTED; } -#else - op = SLJIT_MOV; - input_flags = WORD_DATA; + break; #endif - return (reg == TMP_REG2) ? emit_op(compiler, op, input_flags, dst, dstw, TMP_REG1, 0, TMP_REG2, 0) : SLJIT_SUCCESS; + + case SLJIT_MOV_U8: + case SLJIT_MOV_S8: + mem_flags = BYTE_DATA; + break; + + case SLJIT_MOV_U16: + mem_flags = HALF_DATA; + break; + + case SLJIT_MOV_S16: + mem_flags = HALF_DATA | SIGNED_DATA; + break; + + default: + SLJIT_UNREACHABLE(); + mem_flags = WORD_DATA; + break; } -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) - compiler->skip_checks = 1; + if (!(type & SLJIT_MEM_STORE)) + mem_flags |= LOAD_DATA; + + if (SLJIT_UNLIKELY(mem & OFFS_REG_MASK)) { + if (memw != 0) + return SLJIT_ERR_UNSUPPORTED; + + if (type & SLJIT_MEM_SUPP) + return SLJIT_SUCCESS; + + inst = updated_data_transfer_insts[mem_flags | INDEXED]; + FAIL_IF(push_inst(compiler, INST_CODE_AND_DST(inst, 0, reg) | A(mem & REG_MASK) | B(OFFS_REG(mem)))); + } + else { + if (memw > SIMM_MAX || memw < SIMM_MIN) + return SLJIT_ERR_UNSUPPORTED; + + inst = updated_data_transfer_insts[mem_flags]; + +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) + if ((inst & INT_ALIGNED) && (memw & 0x3) != 0) + return SLJIT_ERR_UNSUPPORTED; #endif - return sljit_emit_op2(compiler, op | flags, dst, dstw, src, srcw, TMP_REG2, 0); + + if (type & SLJIT_MEM_SUPP) + return SLJIT_SUCCESS; + + FAIL_IF(push_inst(compiler, INST_CODE_AND_DST(inst, 0, reg) | A(mem & REG_MASK) | IMM(memw))); + } + + if ((mem_flags & LOAD_DATA) && (type & 0xff) == SLJIT_MOV_S8) + return push_inst(compiler, EXTSB | S(reg) | A(reg)); + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 freg, + sljit_s32 mem, sljit_sw memw) +{ + sljit_s32 mem_flags; + sljit_ins inst; + + CHECK_ERROR(); + CHECK(check_sljit_emit_fmem(compiler, type, freg, mem, memw)); + + if (type & SLJIT_MEM_POST) + return SLJIT_ERR_UNSUPPORTED; + + if (SLJIT_UNLIKELY(mem & OFFS_REG_MASK)) { + if (memw != 0) + return SLJIT_ERR_UNSUPPORTED; + } + else { + if (memw > SIMM_MAX || memw < SIMM_MIN) + return SLJIT_ERR_UNSUPPORTED; + } + + if (type & SLJIT_MEM_SUPP) + return SLJIT_SUCCESS; + + mem_flags = FLOAT_DATA(type); + + if (!(type & SLJIT_MEM_STORE)) + mem_flags |= LOAD_DATA; + + if (SLJIT_UNLIKELY(mem & OFFS_REG_MASK)) { + inst = updated_data_transfer_insts[mem_flags | INDEXED]; + return push_inst(compiler, INST_CODE_AND_DST(inst, DOUBLE_DATA, freg) | A(mem & REG_MASK) | B(OFFS_REG(mem))); + } + + inst = updated_data_transfer_insts[mem_flags]; + return push_inst(compiler, INST_CODE_AND_DST(inst, DOUBLE_DATA, freg) | A(mem & REG_MASK) | IMM(memw)); } -SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw, sljit_sw init_value) +SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw init_value) { struct sljit_const *const_; - sljit_si reg; + sljit_s32 dst_r; CHECK_ERROR_PTR(); - check_sljit_emit_const(compiler, dst, dstw, init_value); + CHECK_PTR(check_sljit_emit_const(compiler, dst, dstw, init_value)); ADJUST_LOCAL_OFFSET(dst, dstw); const_ = (struct sljit_const*)ensure_abuf(compiler, sizeof(struct sljit_const)); PTR_FAIL_IF(!const_); set_const(const_, compiler); - reg = (dst <= ZERO_REG) ? dst : TMP_REG2; - - PTR_FAIL_IF(emit_const(compiler, reg, init_value)); + dst_r = FAST_IS_REG(dst) ? dst : TMP_REG2; + PTR_FAIL_IF(emit_const(compiler, dst_r, init_value)); if (dst & SLJIT_MEM) PTR_FAIL_IF(emit_op(compiler, SLJIT_MOV, WORD_DATA, dst, dstw, TMP_REG1, 0, TMP_REG2, 0)); + return const_; } + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_put_label* sljit_emit_put_label(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw) +{ + struct sljit_put_label *put_label; + sljit_s32 dst_r; + + CHECK_ERROR_PTR(); + CHECK_PTR(check_sljit_emit_put_label(compiler, dst, dstw)); + ADJUST_LOCAL_OFFSET(dst, dstw); + + put_label = (struct sljit_put_label*)ensure_abuf(compiler, sizeof(struct sljit_put_label)); + PTR_FAIL_IF(!put_label); + set_put_label(put_label, compiler, 0); + + dst_r = FAST_IS_REG(dst) ? dst : TMP_REG2; +#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) + PTR_FAIL_IF(emit_const(compiler, dst_r, 0)); +#else + PTR_FAIL_IF(push_inst(compiler, dst_r)); + compiler->size += 4; +#endif + + if (dst & SLJIT_MEM) + PTR_FAIL_IF(emit_op(compiler, SLJIT_MOV, WORD_DATA, dst, dstw, TMP_REG1, 0, TMP_REG2, 0)); + + return put_label; +} diff --git a/src/3rd/pcre/sjsparcc.c b/src/3rd/pcre/sjsparcc.c new file mode 100644 index 00000000000..0061a0511db --- /dev/null +++ b/src/3rd/pcre/sjsparcc.c @@ -0,0 +1,1536 @@ +/* + * Stack-less Just-In-Time compiler + * + * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are + * permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of + * conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list + * of conditions and the following disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void) +{ + return "SPARC" SLJIT_CPUINFO; +} + +/* Length of an instruction word + Both for sparc-32 and sparc-64 */ +typedef sljit_u32 sljit_ins; + +#if (defined SLJIT_CACHE_FLUSH_OWN_IMPL && SLJIT_CACHE_FLUSH_OWN_IMPL) + +static void sparc_cache_flush(sljit_ins *from, sljit_ins *to) +{ +#if defined(__SUNPRO_C) && __SUNPRO_C < 0x590 + __asm ( + /* if (from == to) return */ + "cmp %i0, %i1\n" + "be .leave\n" + "nop\n" + + /* loop until from >= to */ + ".mainloop:\n" + "flush %i0\n" + "add %i0, 8, %i0\n" + "cmp %i0, %i1\n" + "bcs .mainloop\n" + "nop\n" + + /* The comparison was done above. */ + "bne .leave\n" + /* nop is not necessary here, since the + sub operation has no side effect. */ + "sub %i0, 4, %i0\n" + "flush %i0\n" + ".leave:" + ); +#else + if (SLJIT_UNLIKELY(from == to)) + return; + + do { + __asm__ volatile ( + "flush %0\n" + : : "r"(from) + ); + /* Operates at least on doubleword. */ + from += 2; + } while (from < to); + + if (from == to) { + /* Flush the last word. */ + from --; + __asm__ volatile ( + "flush %0\n" + : : "r"(from) + ); + } +#endif +} + +#endif /* (defined SLJIT_CACHE_FLUSH_OWN_IMPL && SLJIT_CACHE_FLUSH_OWN_IMPL) */ + +/* TMP_REG2 is not used by getput_arg */ +#define TMP_REG1 (SLJIT_NUMBER_OF_REGISTERS + 2) +#define TMP_REG2 (SLJIT_NUMBER_OF_REGISTERS + 3) +#define TMP_REG3 (SLJIT_NUMBER_OF_REGISTERS + 4) +/* This register is modified by calls, which affects the instruction + in the delay slot if it is used as a source register. */ +#define TMP_LINK (SLJIT_NUMBER_OF_REGISTERS + 5) + +#define TMP_FREG1 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1) +#define TMP_FREG2 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 2) + +static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 6] = { + 0, 8, 9, 10, 11, 29, 28, 27, 23, 22, 21, 20, 19, 18, 17, 16, 26, 25, 24, 14, 1, 12, 13, 15 +}; + +static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = { + 0, 0, 2, 4, 6, 8, 10, 12, 14 +}; + +/* --------------------------------------------------------------------- */ +/* Instrucion forms */ +/* --------------------------------------------------------------------- */ + +#define D(d) (reg_map[d] << 25) +#define FD(d) (freg_map[d] << 25) +#define FDN(d) ((freg_map[d] | 0x1) << 25) +#define DA(d) ((d) << 25) +#define S1(s1) (reg_map[s1] << 14) +#define FS1(s1) (freg_map[s1] << 14) +#define S1A(s1) ((s1) << 14) +#define S2(s2) (reg_map[s2]) +#define FS2(s2) (freg_map[s2]) +#define FS2N(s2) (freg_map[s2] | 0x1) +#define S2A(s2) (s2) +#define IMM_ARG 0x2000 +#define DOP(op) ((op) << 5) +#define IMM(imm) (((imm) & 0x1fff) | IMM_ARG) + +#define DR(dr) (reg_map[dr]) +#define OPC1(opcode) ((opcode) << 30) +#define OPC2(opcode) ((opcode) << 22) +#define OPC3(opcode) ((opcode) << 19) +#define SET_FLAGS OPC3(0x10) + +#define ADD (OPC1(0x2) | OPC3(0x00)) +#define ADDC (OPC1(0x2) | OPC3(0x08)) +#define AND (OPC1(0x2) | OPC3(0x01)) +#define ANDN (OPC1(0x2) | OPC3(0x05)) +#define CALL (OPC1(0x1)) +#define FABSS (OPC1(0x2) | OPC3(0x34) | DOP(0x09)) +#define FADDD (OPC1(0x2) | OPC3(0x34) | DOP(0x42)) +#define FADDS (OPC1(0x2) | OPC3(0x34) | DOP(0x41)) +#define FCMPD (OPC1(0x2) | OPC3(0x35) | DOP(0x52)) +#define FCMPS (OPC1(0x2) | OPC3(0x35) | DOP(0x51)) +#define FDIVD (OPC1(0x2) | OPC3(0x34) | DOP(0x4e)) +#define FDIVS (OPC1(0x2) | OPC3(0x34) | DOP(0x4d)) +#define FDTOI (OPC1(0x2) | OPC3(0x34) | DOP(0xd2)) +#define FDTOS (OPC1(0x2) | OPC3(0x34) | DOP(0xc6)) +#define FITOD (OPC1(0x2) | OPC3(0x34) | DOP(0xc8)) +#define FITOS (OPC1(0x2) | OPC3(0x34) | DOP(0xc4)) +#define FMOVS (OPC1(0x2) | OPC3(0x34) | DOP(0x01)) +#define FMULD (OPC1(0x2) | OPC3(0x34) | DOP(0x4a)) +#define FMULS (OPC1(0x2) | OPC3(0x34) | DOP(0x49)) +#define FNEGS (OPC1(0x2) | OPC3(0x34) | DOP(0x05)) +#define FSTOD (OPC1(0x2) | OPC3(0x34) | DOP(0xc9)) +#define FSTOI (OPC1(0x2) | OPC3(0x34) | DOP(0xd1)) +#define FSUBD (OPC1(0x2) | OPC3(0x34) | DOP(0x46)) +#define FSUBS (OPC1(0x2) | OPC3(0x34) | DOP(0x45)) +#define JMPL (OPC1(0x2) | OPC3(0x38)) +#define LDD (OPC1(0x3) | OPC3(0x03)) +#define LDUW (OPC1(0x3) | OPC3(0x00)) +#define NOP (OPC1(0x0) | OPC2(0x04)) +#define OR (OPC1(0x2) | OPC3(0x02)) +#define ORN (OPC1(0x2) | OPC3(0x06)) +#define RDY (OPC1(0x2) | OPC3(0x28) | S1A(0)) +#define RESTORE (OPC1(0x2) | OPC3(0x3d)) +#define SAVE (OPC1(0x2) | OPC3(0x3c)) +#define SETHI (OPC1(0x0) | OPC2(0x04)) +#define SLL (OPC1(0x2) | OPC3(0x25)) +#define SLLX (OPC1(0x2) | OPC3(0x25) | (1 << 12)) +#define SRA (OPC1(0x2) | OPC3(0x27)) +#define SRAX (OPC1(0x2) | OPC3(0x27) | (1 << 12)) +#define SRL (OPC1(0x2) | OPC3(0x26)) +#define SRLX (OPC1(0x2) | OPC3(0x26) | (1 << 12)) +#define STDF (OPC1(0x3) | OPC3(0x27)) +#define STF (OPC1(0x3) | OPC3(0x24)) +#define STW (OPC1(0x3) | OPC3(0x04)) +#define SUB (OPC1(0x2) | OPC3(0x04)) +#define SUBC (OPC1(0x2) | OPC3(0x0c)) +#define TA (OPC1(0x2) | OPC3(0x3a) | (8 << 25)) +#define WRY (OPC1(0x2) | OPC3(0x30) | DA(0)) +#define XOR (OPC1(0x2) | OPC3(0x03)) +#define XNOR (OPC1(0x2) | OPC3(0x07)) + +#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) +#define MAX_DISP (0x1fffff) +#define MIN_DISP (-0x200000) +#define DISP_MASK (0x3fffff) + +#define BICC (OPC1(0x0) | OPC2(0x2)) +#define FBFCC (OPC1(0x0) | OPC2(0x6)) +#define SLL_W SLL +#define SDIV (OPC1(0x2) | OPC3(0x0f)) +#define SMUL (OPC1(0x2) | OPC3(0x0b)) +#define UDIV (OPC1(0x2) | OPC3(0x0e)) +#define UMUL (OPC1(0x2) | OPC3(0x0a)) +#else +#define SLL_W SLLX +#endif + +#define SIMM_MAX (0x0fff) +#define SIMM_MIN (-0x1000) + +/* dest_reg is the absolute name of the register + Useful for reordering instructions in the delay slot. */ +static sljit_s32 push_inst(struct sljit_compiler *compiler, sljit_ins ins, sljit_s32 delay_slot) +{ + sljit_ins *ptr; + SLJIT_ASSERT((delay_slot & DST_INS_MASK) == UNMOVABLE_INS + || (delay_slot & DST_INS_MASK) == MOVABLE_INS + || (delay_slot & DST_INS_MASK) == ((ins >> 25) & 0x1f)); + ptr = (sljit_ins*)ensure_buf(compiler, sizeof(sljit_ins)); + FAIL_IF(!ptr); + *ptr = ins; + compiler->size++; + compiler->delay_slot = delay_slot; + return SLJIT_SUCCESS; +} + +static SLJIT_INLINE sljit_ins* detect_jump_type(struct sljit_jump *jump, sljit_ins *code_ptr, sljit_ins *code, sljit_sw executable_offset) +{ + sljit_sw diff; + sljit_uw target_addr; + sljit_ins *inst; + sljit_ins saved_inst; + + if (jump->flags & SLJIT_REWRITABLE_JUMP) + return code_ptr; + + if (jump->flags & JUMP_ADDR) + target_addr = jump->u.target; + else { + SLJIT_ASSERT(jump->flags & JUMP_LABEL); + target_addr = (sljit_uw)(code + jump->u.label->size) + (sljit_uw)executable_offset; + } + inst = (sljit_ins*)jump->addr; + +#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) + if (jump->flags & IS_CALL) { + /* Call is always patchable on sparc 32. */ + jump->flags |= PATCH_CALL; + if (jump->flags & IS_MOVABLE) { + inst[0] = inst[-1]; + inst[-1] = CALL; + jump->addr -= sizeof(sljit_ins); + return inst; + } + inst[0] = CALL; + inst[1] = NOP; + return inst + 1; + } +#else + /* Both calls and BPr instructions shall not pass this point. */ +#error "Implementation required" +#endif + + if (jump->flags & IS_COND) + inst--; + + diff = ((sljit_sw)target_addr - (sljit_sw)(inst - 1) - executable_offset) >> 2; + + if (jump->flags & IS_MOVABLE) { + if (diff <= MAX_DISP && diff >= MIN_DISP) { + jump->flags |= PATCH_B; + inst--; + if (jump->flags & IS_COND) { + saved_inst = inst[0]; + inst[0] = inst[1] ^ (1 << 28); + inst[1] = saved_inst; + } else { + inst[1] = inst[0]; + inst[0] = BICC | DA(0x8); + } + jump->addr = (sljit_uw)inst; + return inst + 1; + } + } + + diff += sizeof(sljit_ins); + + if (diff <= MAX_DISP && diff >= MIN_DISP) { + jump->flags |= PATCH_B; + if (jump->flags & IS_COND) + inst[0] ^= (1 << 28); + else + inst[0] = BICC | DA(0x8); + inst[1] = NOP; + jump->addr = (sljit_uw)inst; + return inst + 1; + } + + return code_ptr; +} + +SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compiler) +{ + struct sljit_memory_fragment *buf; + sljit_ins *code; + sljit_ins *code_ptr; + sljit_ins *buf_ptr; + sljit_ins *buf_end; + sljit_uw word_count; + sljit_uw next_addr; + sljit_sw executable_offset; + sljit_uw addr; + + struct sljit_label *label; + struct sljit_jump *jump; + struct sljit_const *const_; + struct sljit_put_label *put_label; + + CHECK_ERROR_PTR(); + CHECK_PTR(check_sljit_generate_code(compiler)); + reverse_buf(compiler); + + code = (sljit_ins*)SLJIT_MALLOC_EXEC(compiler->size * sizeof(sljit_ins)); + PTR_FAIL_WITH_EXEC_IF(code); + buf = compiler->buf; + + code_ptr = code; + word_count = 0; + next_addr = 0; + executable_offset = SLJIT_EXEC_OFFSET(code); + + label = compiler->labels; + jump = compiler->jumps; + const_ = compiler->consts; + put_label = compiler->put_labels; + + do { + buf_ptr = (sljit_ins*)buf->memory; + buf_end = buf_ptr + (buf->used_size >> 2); + do { + *code_ptr = *buf_ptr++; + if (next_addr == word_count) { + SLJIT_ASSERT(!label || label->size >= word_count); + SLJIT_ASSERT(!jump || jump->addr >= word_count); + SLJIT_ASSERT(!const_ || const_->addr >= word_count); + SLJIT_ASSERT(!put_label || put_label->addr >= word_count); + + /* These structures are ordered by their address. */ + if (label && label->size == word_count) { + /* Just recording the address. */ + label->addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset); + label->size = code_ptr - code; + label = label->next; + } + if (jump && jump->addr == word_count) { +#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) + jump->addr = (sljit_uw)(code_ptr - 3); +#else + jump->addr = (sljit_uw)(code_ptr - 6); +#endif + code_ptr = detect_jump_type(jump, code_ptr, code, executable_offset); + jump = jump->next; + } + if (const_ && const_->addr == word_count) { + /* Just recording the address. */ + const_->addr = (sljit_uw)code_ptr; + const_ = const_->next; + } + if (put_label && put_label->addr == word_count) { + SLJIT_ASSERT(put_label->label); + put_label->addr = (sljit_uw)code_ptr; + put_label = put_label->next; + } + next_addr = compute_next_addr(label, jump, const_, put_label); + } + code_ptr ++; + word_count ++; + } while (buf_ptr < buf_end); + + buf = buf->next; + } while (buf); + + if (label && label->size == word_count) { + label->addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset); + label->size = code_ptr - code; + label = label->next; + } + + SLJIT_ASSERT(!label); + SLJIT_ASSERT(!jump); + SLJIT_ASSERT(!const_); + SLJIT_ASSERT(!put_label); + SLJIT_ASSERT(code_ptr - code <= (sljit_s32)compiler->size); + + jump = compiler->jumps; + while (jump) { + do { + addr = (jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target; + buf_ptr = (sljit_ins *)jump->addr; + + if (jump->flags & PATCH_CALL) { + addr = (sljit_sw)(addr - (sljit_uw)SLJIT_ADD_EXEC_OFFSET(buf_ptr, executable_offset)) >> 2; + SLJIT_ASSERT((sljit_sw)addr <= 0x1fffffff && (sljit_sw)addr >= -0x20000000); + buf_ptr[0] = CALL | (addr & 0x3fffffff); + break; + } + if (jump->flags & PATCH_B) { + addr = (sljit_sw)(addr - (sljit_uw)SLJIT_ADD_EXEC_OFFSET(buf_ptr, executable_offset)) >> 2; + SLJIT_ASSERT((sljit_sw)addr <= MAX_DISP && (sljit_sw)addr >= MIN_DISP); + buf_ptr[0] = (buf_ptr[0] & ~DISP_MASK) | (addr & DISP_MASK); + break; + } + + /* Set the fields of immediate loads. */ +#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) + SLJIT_ASSERT(((buf_ptr[0] & 0xc1cfffff) == 0x01000000) && ((buf_ptr[1] & 0xc1f83fff) == 0x80102000)); + buf_ptr[0] |= (addr >> 10) & 0x3fffff; + buf_ptr[1] |= addr & 0x3ff; +#else +#error "Implementation required" +#endif + } while (0); + jump = jump->next; + } + + put_label = compiler->put_labels; + while (put_label) { + addr = put_label->label->addr; + buf_ptr = (sljit_ins *)put_label->addr; + +#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) + SLJIT_ASSERT(((buf_ptr[0] & 0xc1cfffff) == 0x01000000) && ((buf_ptr[1] & 0xc1f83fff) == 0x80102000)); + buf_ptr[0] |= (addr >> 10) & 0x3fffff; + buf_ptr[1] |= addr & 0x3ff; +#else +#error "Implementation required" +#endif + put_label = put_label->next; + } + + compiler->error = SLJIT_ERR_COMPILED; + compiler->executable_offset = executable_offset; + compiler->executable_size = (code_ptr - code) * sizeof(sljit_ins); + + code = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(code, executable_offset); + code_ptr = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset); + + SLJIT_CACHE_FLUSH(code, code_ptr); + return code; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type) +{ + switch (feature_type) { + case SLJIT_HAS_FPU: +#ifdef SLJIT_IS_FPU_AVAILABLE + return SLJIT_IS_FPU_AVAILABLE; +#else + /* Available by default. */ + return 1; +#endif + +#if (defined SLJIT_CONFIG_SPARC_64 && SLJIT_CONFIG_SPARC_64) + case SLJIT_HAS_CMOV: + return 1; +#endif + + default: + return 0; + } +} + +/* --------------------------------------------------------------------- */ +/* Entry, exit */ +/* --------------------------------------------------------------------- */ + +/* Creates an index in data_transfer_insts array. */ +#define LOAD_DATA 0x01 +#define WORD_DATA 0x00 +#define BYTE_DATA 0x02 +#define HALF_DATA 0x04 +#define INT_DATA 0x06 +#define SIGNED_DATA 0x08 +/* Separates integer and floating point registers */ +#define GPR_REG 0x0f +#define DOUBLE_DATA 0x10 +#define SINGLE_DATA 0x12 + +#define MEM_MASK 0x1f + +#define ARG_TEST 0x00020 +#define ALT_KEEP_CACHE 0x00040 +#define CUMULATIVE_OP 0x00080 +#define IMM_OP 0x00100 +#define SRC2_IMM 0x00200 + +#define REG_DEST 0x00400 +#define REG2_SOURCE 0x00800 +#define SLOW_SRC1 0x01000 +#define SLOW_SRC2 0x02000 +#define SLOW_DEST 0x04000 + +/* SET_FLAGS (0x10 << 19) also belong here! */ + +#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) +#include "sjsprc32.c" +#else +#include "sljitNativeSPARC_64.c" +#endif + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler, + sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds, + sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size) +{ + CHECK_ERROR(); + CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size)); + set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size); + + local_size = (local_size + SLJIT_LOCALS_OFFSET + 7) & ~0x7; + compiler->local_size = local_size; + + if (local_size <= SIMM_MAX) { + FAIL_IF(push_inst(compiler, SAVE | D(SLJIT_SP) | S1(SLJIT_SP) | IMM(-local_size), UNMOVABLE_INS)); + } + else { + FAIL_IF(load_immediate(compiler, TMP_REG1, -local_size)); + FAIL_IF(push_inst(compiler, SAVE | D(SLJIT_SP) | S1(SLJIT_SP) | S2(TMP_REG1), UNMOVABLE_INS)); + } + + /* Arguments are in their appropriate registers. */ + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *compiler, + sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds, + sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size) +{ + CHECK_ERROR(); + CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size)); + set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size); + + compiler->local_size = (local_size + SLJIT_LOCALS_OFFSET + 7) & ~0x7; + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw) +{ + CHECK_ERROR(); + CHECK(check_sljit_emit_return(compiler, op, src, srcw)); + + if (op != SLJIT_MOV || !FAST_IS_REG(src)) { + FAIL_IF(emit_mov_before_return(compiler, op, src, srcw)); + src = SLJIT_R0; + } + + FAIL_IF(push_inst(compiler, JMPL | D(0) | S1A(31) | IMM(8), UNMOVABLE_INS)); + return push_inst(compiler, RESTORE | D(SLJIT_R0) | S1(src) | S2(0), UNMOVABLE_INS); +} + +/* --------------------------------------------------------------------- */ +/* Operators */ +/* --------------------------------------------------------------------- */ + +#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) +#define ARCH_32_64(a, b) a +#else +#define ARCH_32_64(a, b) b +#endif + +static const sljit_ins data_transfer_insts[16 + 4] = { +/* u w s */ ARCH_32_64(OPC1(3) | OPC3(0x04) /* stw */, OPC1(3) | OPC3(0x0e) /* stx */), +/* u w l */ ARCH_32_64(OPC1(3) | OPC3(0x00) /* lduw */, OPC1(3) | OPC3(0x0b) /* ldx */), +/* u b s */ OPC1(3) | OPC3(0x05) /* stb */, +/* u b l */ OPC1(3) | OPC3(0x01) /* ldub */, +/* u h s */ OPC1(3) | OPC3(0x06) /* sth */, +/* u h l */ OPC1(3) | OPC3(0x02) /* lduh */, +/* u i s */ OPC1(3) | OPC3(0x04) /* stw */, +/* u i l */ OPC1(3) | OPC3(0x00) /* lduw */, + +/* s w s */ ARCH_32_64(OPC1(3) | OPC3(0x04) /* stw */, OPC1(3) | OPC3(0x0e) /* stx */), +/* s w l */ ARCH_32_64(OPC1(3) | OPC3(0x00) /* lduw */, OPC1(3) | OPC3(0x0b) /* ldx */), +/* s b s */ OPC1(3) | OPC3(0x05) /* stb */, +/* s b l */ OPC1(3) | OPC3(0x09) /* ldsb */, +/* s h s */ OPC1(3) | OPC3(0x06) /* sth */, +/* s h l */ OPC1(3) | OPC3(0x0a) /* ldsh */, +/* s i s */ OPC1(3) | OPC3(0x04) /* stw */, +/* s i l */ ARCH_32_64(OPC1(3) | OPC3(0x00) /* lduw */, OPC1(3) | OPC3(0x08) /* ldsw */), + +/* d s */ OPC1(3) | OPC3(0x27), +/* d l */ OPC1(3) | OPC3(0x23), +/* s s */ OPC1(3) | OPC3(0x24), +/* s l */ OPC1(3) | OPC3(0x20), +}; + +#undef ARCH_32_64 + +/* Can perform an operation using at most 1 instruction. */ +static sljit_s32 getput_arg_fast(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw) +{ + SLJIT_ASSERT(arg & SLJIT_MEM); + + if ((!(arg & OFFS_REG_MASK) && argw <= SIMM_MAX && argw >= SIMM_MIN) + || ((arg & OFFS_REG_MASK) && (argw & 0x3) == 0)) { + /* Works for both absoulte and relative addresses (immediate case). */ + if (SLJIT_UNLIKELY(flags & ARG_TEST)) + return 1; + FAIL_IF(push_inst(compiler, data_transfer_insts[flags & MEM_MASK] + | ((flags & MEM_MASK) <= GPR_REG ? D(reg) : FD(reg)) + | S1(arg & REG_MASK) | ((arg & OFFS_REG_MASK) ? S2(OFFS_REG(arg)) : IMM(argw)), + ((flags & MEM_MASK) <= GPR_REG && (flags & LOAD_DATA)) ? DR(reg) : MOVABLE_INS)); + return -1; + } + return 0; +} + +/* See getput_arg below. + Note: can_cache is called only for binary operators. Those + operators always uses word arguments without write back. */ +static sljit_s32 can_cache(sljit_s32 arg, sljit_sw argw, sljit_s32 next_arg, sljit_sw next_argw) +{ + SLJIT_ASSERT((arg & SLJIT_MEM) && (next_arg & SLJIT_MEM)); + + /* Simple operation except for updates. */ + if (arg & OFFS_REG_MASK) { + argw &= 0x3; + SLJIT_ASSERT(argw); + next_argw &= 0x3; + if ((arg & OFFS_REG_MASK) == (next_arg & OFFS_REG_MASK) && argw == next_argw) + return 1; + return 0; + } + + if (((next_argw - argw) <= SIMM_MAX && (next_argw - argw) >= SIMM_MIN)) + return 1; + return 0; +} + +/* Emit the necessary instructions. See can_cache above. */ +static sljit_s32 getput_arg(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw, sljit_s32 next_arg, sljit_sw next_argw) +{ + sljit_s32 base, arg2, delay_slot; + sljit_ins dest; + + SLJIT_ASSERT(arg & SLJIT_MEM); + if (!(next_arg & SLJIT_MEM)) { + next_arg = 0; + next_argw = 0; + } + + base = arg & REG_MASK; + if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) { + argw &= 0x3; + + /* Using the cache. */ + if (((SLJIT_MEM | (arg & OFFS_REG_MASK)) == compiler->cache_arg) && (argw == compiler->cache_argw)) + arg2 = TMP_REG3; + else { + if ((arg & OFFS_REG_MASK) == (next_arg & OFFS_REG_MASK) && argw == (next_argw & 0x3)) { + compiler->cache_arg = SLJIT_MEM | (arg & OFFS_REG_MASK); + compiler->cache_argw = argw; + arg2 = TMP_REG3; + } + else if ((flags & LOAD_DATA) && ((flags & MEM_MASK) <= GPR_REG) && reg != base && reg != OFFS_REG(arg)) + arg2 = reg; + else /* It must be a mov operation, so tmp1 must be free to use. */ + arg2 = TMP_REG1; + FAIL_IF(push_inst(compiler, SLL_W | D(arg2) | S1(OFFS_REG(arg)) | IMM_ARG | argw, DR(arg2))); + } + } + else { + /* Using the cache. */ + if ((compiler->cache_arg == SLJIT_MEM) && (argw - compiler->cache_argw) <= SIMM_MAX && (argw - compiler->cache_argw) >= SIMM_MIN) { + if (argw != compiler->cache_argw) { + FAIL_IF(push_inst(compiler, ADD | D(TMP_REG3) | S1(TMP_REG3) | IMM(argw - compiler->cache_argw), DR(TMP_REG3))); + compiler->cache_argw = argw; + } + arg2 = TMP_REG3; + } else { + if ((next_argw - argw) <= SIMM_MAX && (next_argw - argw) >= SIMM_MIN) { + compiler->cache_arg = SLJIT_MEM; + compiler->cache_argw = argw; + arg2 = TMP_REG3; + } + else if ((flags & LOAD_DATA) && ((flags & MEM_MASK) <= GPR_REG) && reg != base) + arg2 = reg; + else /* It must be a mov operation, so tmp1 must be free to use. */ + arg2 = TMP_REG1; + FAIL_IF(load_immediate(compiler, arg2, argw)); + } + } + + dest = ((flags & MEM_MASK) <= GPR_REG ? D(reg) : FD(reg)); + delay_slot = ((flags & MEM_MASK) <= GPR_REG && (flags & LOAD_DATA)) ? DR(reg) : MOVABLE_INS; + if (!base) + return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | dest | S1(arg2) | IMM(0), delay_slot); + return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | dest | S1(base) | S2(arg2), delay_slot); +} + +static SLJIT_INLINE sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw) +{ + if (getput_arg_fast(compiler, flags, reg, arg, argw)) + return compiler->error; + compiler->cache_arg = 0; + compiler->cache_argw = 0; + return getput_arg(compiler, flags, reg, arg, argw, 0, 0); +} + +static SLJIT_INLINE sljit_s32 emit_op_mem2(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg1, sljit_sw arg1w, sljit_s32 arg2, sljit_sw arg2w) +{ + if (getput_arg_fast(compiler, flags, reg, arg1, arg1w)) + return compiler->error; + return getput_arg(compiler, flags, reg, arg1, arg1w, arg2, arg2w); +} + +static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 flags, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) +{ + /* arg1 goes to TMP_REG1 or src reg + arg2 goes to TMP_REG2, imm or src reg + TMP_REG3 can be used for caching + result goes to TMP_REG2, so put result can use TMP_REG1 and TMP_REG3. */ + sljit_s32 dst_r = TMP_REG2; + sljit_s32 src1_r; + sljit_sw src2_r = 0; + sljit_s32 sugg_src2_r = TMP_REG2; + + if (!(flags & ALT_KEEP_CACHE)) { + compiler->cache_arg = 0; + compiler->cache_argw = 0; + } + + if (dst != SLJIT_UNUSED) { + if (FAST_IS_REG(dst)) { + dst_r = dst; + flags |= REG_DEST; + if (op >= SLJIT_MOV && op <= SLJIT_MOV_P) + sugg_src2_r = dst_r; + } + else if ((dst & SLJIT_MEM) && !getput_arg_fast(compiler, flags | ARG_TEST, TMP_REG1, dst, dstw)) + flags |= SLOW_DEST; + } + + if (flags & IMM_OP) { + if ((src2 & SLJIT_IMM) && src2w) { + if (src2w <= SIMM_MAX && src2w >= SIMM_MIN) { + flags |= SRC2_IMM; + src2_r = src2w; + } + } + if (!(flags & SRC2_IMM) && (flags & CUMULATIVE_OP) && (src1 & SLJIT_IMM) && src1w) { + if (src1w <= SIMM_MAX && src1w >= SIMM_MIN) { + flags |= SRC2_IMM; + src2_r = src1w; + + /* And swap arguments. */ + src1 = src2; + src1w = src2w; + src2 = SLJIT_IMM; + /* src2w = src2_r unneeded. */ + } + } + } + + /* Source 1. */ + if (FAST_IS_REG(src1)) + src1_r = src1; + else if (src1 & SLJIT_IMM) { + if (src1w) { + FAIL_IF(load_immediate(compiler, TMP_REG1, src1w)); + src1_r = TMP_REG1; + } + else + src1_r = 0; + } + else { + if (getput_arg_fast(compiler, flags | LOAD_DATA, TMP_REG1, src1, src1w)) + FAIL_IF(compiler->error); + else + flags |= SLOW_SRC1; + src1_r = TMP_REG1; + } + + /* Source 2. */ + if (FAST_IS_REG(src2)) { + src2_r = src2; + flags |= REG2_SOURCE; + if (!(flags & REG_DEST) && op >= SLJIT_MOV && op <= SLJIT_MOV_P) + dst_r = src2_r; + } + else if (src2 & SLJIT_IMM) { + if (!(flags & SRC2_IMM)) { + if (src2w) { + FAIL_IF(load_immediate(compiler, sugg_src2_r, src2w)); + src2_r = sugg_src2_r; + } + else { + src2_r = 0; + if ((op >= SLJIT_MOV && op <= SLJIT_MOV_P) && (dst & SLJIT_MEM)) + dst_r = 0; + } + } + } + else { + if (getput_arg_fast(compiler, flags | LOAD_DATA, sugg_src2_r, src2, src2w)) + FAIL_IF(compiler->error); + else + flags |= SLOW_SRC2; + src2_r = sugg_src2_r; + } + + if ((flags & (SLOW_SRC1 | SLOW_SRC2)) == (SLOW_SRC1 | SLOW_SRC2)) { + SLJIT_ASSERT(src2_r == TMP_REG2); + if (!can_cache(src1, src1w, src2, src2w) && can_cache(src1, src1w, dst, dstw)) { + FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, TMP_REG2, src2, src2w, src1, src1w)); + FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, TMP_REG1, src1, src1w, dst, dstw)); + } + else { + FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, TMP_REG1, src1, src1w, src2, src2w)); + FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, TMP_REG2, src2, src2w, dst, dstw)); + } + } + else if (flags & SLOW_SRC1) + FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, TMP_REG1, src1, src1w, dst, dstw)); + else if (flags & SLOW_SRC2) + FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, sugg_src2_r, src2, src2w, dst, dstw)); + + FAIL_IF(emit_single_op(compiler, op, flags, dst_r, src1_r, src2_r)); + + if (dst & SLJIT_MEM) { + if (!(flags & SLOW_DEST)) { + getput_arg_fast(compiler, flags, dst_r, dst, dstw); + return compiler->error; + } + return getput_arg(compiler, flags, dst_r, dst, dstw, 0, 0); + } + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compiler, sljit_s32 op) +{ + CHECK_ERROR(); + CHECK(check_sljit_emit_op0(compiler, op)); + + op = GET_OPCODE(op); + switch (op) { + case SLJIT_BREAKPOINT: + return push_inst(compiler, TA, UNMOVABLE_INS); + case SLJIT_NOP: + return push_inst(compiler, NOP, UNMOVABLE_INS); + case SLJIT_LMUL_UW: + case SLJIT_LMUL_SW: +#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) + FAIL_IF(push_inst(compiler, (op == SLJIT_LMUL_UW ? UMUL : SMUL) | D(SLJIT_R0) | S1(SLJIT_R0) | S2(SLJIT_R1), DR(SLJIT_R0))); + return push_inst(compiler, RDY | D(SLJIT_R1), DR(SLJIT_R1)); +#else +#error "Implementation required" +#endif + case SLJIT_DIVMOD_UW: + case SLJIT_DIVMOD_SW: + case SLJIT_DIV_UW: + case SLJIT_DIV_SW: + SLJIT_COMPILE_ASSERT((SLJIT_DIVMOD_UW & 0x2) == 0 && SLJIT_DIV_UW - 0x2 == SLJIT_DIVMOD_UW, bad_div_opcode_assignments); +#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) + if ((op | 0x2) == SLJIT_DIV_UW) + FAIL_IF(push_inst(compiler, WRY | S1(0), MOVABLE_INS)); + else { + FAIL_IF(push_inst(compiler, SRA | D(TMP_REG1) | S1(SLJIT_R0) | IMM(31), DR(TMP_REG1))); + FAIL_IF(push_inst(compiler, WRY | S1(TMP_REG1), MOVABLE_INS)); + } + if (op <= SLJIT_DIVMOD_SW) + FAIL_IF(push_inst(compiler, OR | D(TMP_REG2) | S1(0) | S2(SLJIT_R0), DR(TMP_REG2))); + FAIL_IF(push_inst(compiler, ((op | 0x2) == SLJIT_DIV_UW ? UDIV : SDIV) | D(SLJIT_R0) | S1(SLJIT_R0) | S2(SLJIT_R1), DR(SLJIT_R0))); + if (op >= SLJIT_DIV_UW) + return SLJIT_SUCCESS; + FAIL_IF(push_inst(compiler, SMUL | D(SLJIT_R1) | S1(SLJIT_R0) | S2(SLJIT_R1), DR(SLJIT_R1))); + return push_inst(compiler, SUB | D(SLJIT_R1) | S1(TMP_REG2) | S2(SLJIT_R1), DR(SLJIT_R1)); +#else +#error "Implementation required" +#endif + } + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) +{ + sljit_s32 flags = HAS_FLAGS(op) ? SET_FLAGS : 0; + + CHECK_ERROR(); + CHECK(check_sljit_emit_op1(compiler, op, dst, dstw, src, srcw)); + ADJUST_LOCAL_OFFSET(dst, dstw); + ADJUST_LOCAL_OFFSET(src, srcw); + + if (dst == SLJIT_UNUSED && !HAS_FLAGS(op)) + return SLJIT_SUCCESS; + + op = GET_OPCODE(op); + switch (op) { + case SLJIT_MOV: + case SLJIT_MOV_P: + return emit_op(compiler, SLJIT_MOV, flags | WORD_DATA, dst, dstw, TMP_REG1, 0, src, srcw); + + case SLJIT_MOV_U32: + return emit_op(compiler, SLJIT_MOV_U32, flags | INT_DATA, dst, dstw, TMP_REG1, 0, src, srcw); + + case SLJIT_MOV_S32: + return emit_op(compiler, SLJIT_MOV_S32, flags | INT_DATA | SIGNED_DATA, dst, dstw, TMP_REG1, 0, src, srcw); + + case SLJIT_MOV_U8: + return emit_op(compiler, SLJIT_MOV_U8, flags | BYTE_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u8)srcw : srcw); + + case SLJIT_MOV_S8: + return emit_op(compiler, SLJIT_MOV_S8, flags | BYTE_DATA | SIGNED_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s8)srcw : srcw); + + case SLJIT_MOV_U16: + return emit_op(compiler, SLJIT_MOV_U16, flags | HALF_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u16)srcw : srcw); + + case SLJIT_MOV_S16: + return emit_op(compiler, SLJIT_MOV_S16, flags | HALF_DATA | SIGNED_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s16)srcw : srcw); + + case SLJIT_NOT: + case SLJIT_CLZ: + return emit_op(compiler, op, flags, dst, dstw, TMP_REG1, 0, src, srcw); + + case SLJIT_NEG: + return emit_op(compiler, SLJIT_SUB, flags | IMM_OP, dst, dstw, SLJIT_IMM, 0, src, srcw); + } + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) +{ + sljit_s32 flags = HAS_FLAGS(op) ? SET_FLAGS : 0; + + CHECK_ERROR(); + CHECK(check_sljit_emit_op2(compiler, op, dst, dstw, src1, src1w, src2, src2w)); + ADJUST_LOCAL_OFFSET(dst, dstw); + ADJUST_LOCAL_OFFSET(src1, src1w); + ADJUST_LOCAL_OFFSET(src2, src2w); + + if (dst == SLJIT_UNUSED && !HAS_FLAGS(op)) + return SLJIT_SUCCESS; + + op = GET_OPCODE(op); + switch (op) { + case SLJIT_ADD: + case SLJIT_ADDC: + case SLJIT_MUL: + case SLJIT_AND: + case SLJIT_OR: + case SLJIT_XOR: + return emit_op(compiler, op, flags | CUMULATIVE_OP | IMM_OP, dst, dstw, src1, src1w, src2, src2w); + + case SLJIT_SUB: + case SLJIT_SUBC: + return emit_op(compiler, op, flags | IMM_OP, dst, dstw, src1, src1w, src2, src2w); + + case SLJIT_SHL: + case SLJIT_LSHR: + case SLJIT_ASHR: +#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) + if (src2 & SLJIT_IMM) + src2w &= 0x1f; +#else + SLJIT_UNREACHABLE(); +#endif + return emit_op(compiler, op, flags | IMM_OP, dst, dstw, src1, src1w, src2, src2w); + } + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg) +{ + CHECK_REG_INDEX(check_sljit_get_register_index(reg)); + return reg_map[reg]; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg) +{ + CHECK_REG_INDEX(check_sljit_get_float_register_index(reg)); + return freg_map[reg]; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler, + void *instruction, sljit_s32 size) +{ + CHECK_ERROR(); + CHECK(check_sljit_emit_op_custom(compiler, instruction, size)); + + return push_inst(compiler, *(sljit_ins*)instruction, UNMOVABLE_INS); +} + +/* --------------------------------------------------------------------- */ +/* Floating point operators */ +/* --------------------------------------------------------------------- */ + +#define FLOAT_DATA(op) (DOUBLE_DATA | ((op & SLJIT_F32_OP) >> 7)) +#define SELECT_FOP(op, single, double) ((op & SLJIT_F32_OP) ? single : double) +#define FLOAT_TMP_MEM_OFFSET (22 * sizeof(sljit_sw)) + +static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) +{ + if (src & SLJIT_MEM) { + FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src, srcw, dst, dstw)); + src = TMP_FREG1; + } + + FAIL_IF(push_inst(compiler, SELECT_FOP(op, FSTOI, FDTOI) | FD(TMP_FREG1) | FS2(src), MOVABLE_INS)); + + if (FAST_IS_REG(dst)) { + FAIL_IF(emit_op_mem2(compiler, SINGLE_DATA, TMP_FREG1, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET)); + return emit_op_mem2(compiler, WORD_DATA | LOAD_DATA, dst, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET); + } + + /* Store the integer value from a VFP register. */ + return emit_op_mem2(compiler, SINGLE_DATA, TMP_FREG1, dst, dstw, 0, 0); +} + +static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) +{ + sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1; + + if (src & SLJIT_IMM) { +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32) + srcw = (sljit_s32)srcw; +#endif + FAIL_IF(load_immediate(compiler, TMP_REG1, srcw)); + src = TMP_REG1; + srcw = 0; + } + + if (FAST_IS_REG(src)) { + FAIL_IF(emit_op_mem2(compiler, WORD_DATA, src, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET)); + src = SLJIT_MEM1(SLJIT_SP); + srcw = FLOAT_TMP_MEM_OFFSET; + } + + FAIL_IF(emit_op_mem2(compiler, SINGLE_DATA | LOAD_DATA, TMP_FREG1, src, srcw, dst, dstw)); + FAIL_IF(push_inst(compiler, SELECT_FOP(op, FITOS, FITOD) | FD(dst_r) | FS2(TMP_FREG1), MOVABLE_INS)); + + if (dst & SLJIT_MEM) + return emit_op_mem2(compiler, FLOAT_DATA(op), TMP_FREG1, dst, dstw, 0, 0); + return SLJIT_SUCCESS; +} + +static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) +{ + if (src1 & SLJIT_MEM) { + FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, src2, src2w)); + src1 = TMP_FREG1; + } + + if (src2 & SLJIT_MEM) { + FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, 0, 0)); + src2 = TMP_FREG2; + } + + return push_inst(compiler, SELECT_FOP(op, FCMPS, FCMPD) | FS1(src1) | FS2(src2), FCC_IS_SET | MOVABLE_INS); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) +{ + sljit_s32 dst_r; + + CHECK_ERROR(); + compiler->cache_arg = 0; + compiler->cache_argw = 0; + + SLJIT_COMPILE_ASSERT((SLJIT_F32_OP == 0x100) && !(DOUBLE_DATA & 0x2), float_transfer_bit_error); + SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw); + + if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32) + op ^= SLJIT_F32_OP; + + dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1; + + if (src & SLJIT_MEM) { + FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, dst_r, src, srcw, dst, dstw)); + src = dst_r; + } + + switch (GET_OPCODE(op)) { + case SLJIT_MOV_F64: + if (src != dst_r) { + if (dst_r != TMP_FREG1) { + FAIL_IF(push_inst(compiler, FMOVS | FD(dst_r) | FS2(src), MOVABLE_INS)); + if (!(op & SLJIT_F32_OP)) + FAIL_IF(push_inst(compiler, FMOVS | FDN(dst_r) | FS2N(src), MOVABLE_INS)); + } + else + dst_r = src; + } + break; + case SLJIT_NEG_F64: + FAIL_IF(push_inst(compiler, FNEGS | FD(dst_r) | FS2(src), MOVABLE_INS)); + if (dst_r != src && !(op & SLJIT_F32_OP)) + FAIL_IF(push_inst(compiler, FMOVS | FDN(dst_r) | FS2N(src), MOVABLE_INS)); + break; + case SLJIT_ABS_F64: + FAIL_IF(push_inst(compiler, FABSS | FD(dst_r) | FS2(src), MOVABLE_INS)); + if (dst_r != src && !(op & SLJIT_F32_OP)) + FAIL_IF(push_inst(compiler, FMOVS | FDN(dst_r) | FS2N(src), MOVABLE_INS)); + break; + case SLJIT_CONV_F64_FROM_F32: + FAIL_IF(push_inst(compiler, SELECT_FOP(op, FSTOD, FDTOS) | FD(dst_r) | FS2(src), MOVABLE_INS)); + op ^= SLJIT_F32_OP; + break; + } + + if (dst & SLJIT_MEM) + FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op), dst_r, dst, dstw, 0, 0)); + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) +{ + sljit_s32 dst_r, flags = 0; + + CHECK_ERROR(); + CHECK(check_sljit_emit_fop2(compiler, op, dst, dstw, src1, src1w, src2, src2w)); + ADJUST_LOCAL_OFFSET(dst, dstw); + ADJUST_LOCAL_OFFSET(src1, src1w); + ADJUST_LOCAL_OFFSET(src2, src2w); + + compiler->cache_arg = 0; + compiler->cache_argw = 0; + + dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG2; + + if (src1 & SLJIT_MEM) { + if (getput_arg_fast(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w)) { + FAIL_IF(compiler->error); + src1 = TMP_FREG1; + } else + flags |= SLOW_SRC1; + } + + if (src2 & SLJIT_MEM) { + if (getput_arg_fast(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w)) { + FAIL_IF(compiler->error); + src2 = TMP_FREG2; + } else + flags |= SLOW_SRC2; + } + + if ((flags & (SLOW_SRC1 | SLOW_SRC2)) == (SLOW_SRC1 | SLOW_SRC2)) { + if (!can_cache(src1, src1w, src2, src2w) && can_cache(src1, src1w, dst, dstw)) { + FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, src1, src1w)); + FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, dst, dstw)); + } + else { + FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, src2, src2w)); + FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, dst, dstw)); + } + } + else if (flags & SLOW_SRC1) + FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, dst, dstw)); + else if (flags & SLOW_SRC2) + FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, dst, dstw)); + + if (flags & SLOW_SRC1) + src1 = TMP_FREG1; + if (flags & SLOW_SRC2) + src2 = TMP_FREG2; + + switch (GET_OPCODE(op)) { + case SLJIT_ADD_F64: + FAIL_IF(push_inst(compiler, SELECT_FOP(op, FADDS, FADDD) | FD(dst_r) | FS1(src1) | FS2(src2), MOVABLE_INS)); + break; + + case SLJIT_SUB_F64: + FAIL_IF(push_inst(compiler, SELECT_FOP(op, FSUBS, FSUBD) | FD(dst_r) | FS1(src1) | FS2(src2), MOVABLE_INS)); + break; + + case SLJIT_MUL_F64: + FAIL_IF(push_inst(compiler, SELECT_FOP(op, FMULS, FMULD) | FD(dst_r) | FS1(src1) | FS2(src2), MOVABLE_INS)); + break; + + case SLJIT_DIV_F64: + FAIL_IF(push_inst(compiler, SELECT_FOP(op, FDIVS, FDIVD) | FD(dst_r) | FS1(src1) | FS2(src2), MOVABLE_INS)); + break; + } + + if (dst_r == TMP_FREG2) + FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op), TMP_FREG2, dst, dstw, 0, 0)); + + return SLJIT_SUCCESS; +} + +#undef FLOAT_DATA +#undef SELECT_FOP + +/* --------------------------------------------------------------------- */ +/* Other instructions */ +/* --------------------------------------------------------------------- */ + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw) +{ + CHECK_ERROR(); + CHECK(check_sljit_emit_fast_enter(compiler, dst, dstw)); + ADJUST_LOCAL_OFFSET(dst, dstw); + + if (FAST_IS_REG(dst)) + return push_inst(compiler, OR | D(dst) | S1(0) | S2(TMP_LINK), DR(dst)); + + /* Memory. */ + return emit_op_mem(compiler, WORD_DATA, TMP_LINK, dst, dstw); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_s32 src, sljit_sw srcw) +{ + CHECK_ERROR(); + CHECK(check_sljit_emit_fast_return(compiler, src, srcw)); + ADJUST_LOCAL_OFFSET(src, srcw); + + if (FAST_IS_REG(src)) + FAIL_IF(push_inst(compiler, OR | D(TMP_LINK) | S1(0) | S2(src), DR(TMP_LINK))); + else + FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_LINK, src, srcw)); + + FAIL_IF(push_inst(compiler, JMPL | D(0) | S1(TMP_LINK) | IMM(8), UNMOVABLE_INS)); + return push_inst(compiler, NOP, UNMOVABLE_INS); +} + +/* --------------------------------------------------------------------- */ +/* Conditional instructions */ +/* --------------------------------------------------------------------- */ + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compiler *compiler) +{ + struct sljit_label *label; + + CHECK_ERROR_PTR(); + CHECK_PTR(check_sljit_emit_label(compiler)); + + if (compiler->last_label && compiler->last_label->size == compiler->size) + return compiler->last_label; + + label = (struct sljit_label*)ensure_abuf(compiler, sizeof(struct sljit_label)); + PTR_FAIL_IF(!label); + set_label(label, compiler); + compiler->delay_slot = UNMOVABLE_INS; + return label; +} + +static sljit_ins get_cc(sljit_s32 type) +{ + switch (type) { + case SLJIT_EQUAL: + case SLJIT_MUL_NOT_OVERFLOW: + case SLJIT_NOT_EQUAL_F64: /* Unordered. */ + return DA(0x1); + + case SLJIT_NOT_EQUAL: + case SLJIT_MUL_OVERFLOW: + case SLJIT_EQUAL_F64: + return DA(0x9); + + case SLJIT_LESS: + case SLJIT_GREATER_F64: /* Unordered. */ + return DA(0x5); + + case SLJIT_GREATER_EQUAL: + case SLJIT_LESS_EQUAL_F64: + return DA(0xd); + + case SLJIT_GREATER: + case SLJIT_GREATER_EQUAL_F64: /* Unordered. */ + return DA(0xc); + + case SLJIT_LESS_EQUAL: + case SLJIT_LESS_F64: + return DA(0x4); + + case SLJIT_SIG_LESS: + return DA(0x3); + + case SLJIT_SIG_GREATER_EQUAL: + return DA(0xb); + + case SLJIT_SIG_GREATER: + return DA(0xa); + + case SLJIT_SIG_LESS_EQUAL: + return DA(0x2); + + case SLJIT_OVERFLOW: + case SLJIT_UNORDERED_F64: + return DA(0x7); + + case SLJIT_NOT_OVERFLOW: + case SLJIT_ORDERED_F64: + return DA(0xf); + + default: + SLJIT_UNREACHABLE(); + return DA(0x8); + } +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_s32 type) +{ + struct sljit_jump *jump; + + CHECK_ERROR_PTR(); + CHECK_PTR(check_sljit_emit_jump(compiler, type)); + + jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); + PTR_FAIL_IF(!jump); + set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP); + type &= 0xff; + + if (type < SLJIT_EQUAL_F64) { + jump->flags |= IS_COND; + if (((compiler->delay_slot & DST_INS_MASK) != UNMOVABLE_INS) && !(compiler->delay_slot & ICC_IS_SET)) + jump->flags |= IS_MOVABLE; +#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) + PTR_FAIL_IF(push_inst(compiler, BICC | get_cc(type ^ 1) | 5, UNMOVABLE_INS)); +#else +#error "Implementation required" +#endif + } + else if (type < SLJIT_JUMP) { + jump->flags |= IS_COND; + if (((compiler->delay_slot & DST_INS_MASK) != UNMOVABLE_INS) && !(compiler->delay_slot & FCC_IS_SET)) + jump->flags |= IS_MOVABLE; +#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) + PTR_FAIL_IF(push_inst(compiler, FBFCC | get_cc(type ^ 1) | 5, UNMOVABLE_INS)); +#else +#error "Implementation required" +#endif + } + else { + if ((compiler->delay_slot & DST_INS_MASK) != UNMOVABLE_INS) + jump->flags |= IS_MOVABLE; + if (type >= SLJIT_FAST_CALL) + jump->flags |= IS_CALL; + } + + PTR_FAIL_IF(emit_const(compiler, TMP_REG1, 0)); + PTR_FAIL_IF(push_inst(compiler, JMPL | D(type >= SLJIT_FAST_CALL ? TMP_LINK : 0) | S1(TMP_REG1) | IMM(0), UNMOVABLE_INS)); + jump->addr = compiler->size; + PTR_FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS)); + + return jump; +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 arg_types) +{ + CHECK_ERROR_PTR(); + CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types)); + + PTR_FAIL_IF(call_with_args(compiler, arg_types, NULL)); + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ + || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + compiler->skip_checks = 1; +#endif + + return sljit_emit_jump(compiler, type); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 src, sljit_sw srcw) +{ + struct sljit_jump *jump = NULL; + sljit_s32 src_r; + + CHECK_ERROR(); + CHECK(check_sljit_emit_ijump(compiler, type, src, srcw)); + ADJUST_LOCAL_OFFSET(src, srcw); + + if (FAST_IS_REG(src)) + src_r = src; + else if (src & SLJIT_IMM) { + jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); + FAIL_IF(!jump); + set_jump(jump, compiler, JUMP_ADDR); + jump->u.target = srcw; + + if ((compiler->delay_slot & DST_INS_MASK) != UNMOVABLE_INS) + jump->flags |= IS_MOVABLE; + if (type >= SLJIT_FAST_CALL) + jump->flags |= IS_CALL; + + FAIL_IF(emit_const(compiler, TMP_REG1, 0)); + src_r = TMP_REG1; + } + else { + FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_REG1, src, srcw)); + src_r = TMP_REG1; + } + + FAIL_IF(push_inst(compiler, JMPL | D(type >= SLJIT_FAST_CALL ? TMP_LINK : 0) | S1(src_r) | IMM(0), UNMOVABLE_INS)); + if (jump) + jump->addr = compiler->size; + return push_inst(compiler, NOP, UNMOVABLE_INS); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 arg_types, + sljit_s32 src, sljit_sw srcw) +{ + CHECK_ERROR(); + CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw)); + + if (src & SLJIT_MEM) { + ADJUST_LOCAL_OFFSET(src, srcw); + FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_REG1, src, srcw)); + src = TMP_REG1; + } + + FAIL_IF(call_with_args(compiler, arg_types, &src)); + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ + || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + compiler->skip_checks = 1; +#endif + + return sljit_emit_ijump(compiler, type, src, srcw); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 type) +{ + sljit_s32 reg, flags = HAS_FLAGS(op) ? SET_FLAGS : 0; + + CHECK_ERROR(); + CHECK(check_sljit_emit_op_flags(compiler, op, dst, dstw, type)); + ADJUST_LOCAL_OFFSET(dst, dstw); + +#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) + op = GET_OPCODE(op); + reg = (op < SLJIT_ADD && FAST_IS_REG(dst)) ? dst : TMP_REG2; + + compiler->cache_arg = 0; + compiler->cache_argw = 0; + + if (op >= SLJIT_ADD && (dst & SLJIT_MEM)) + FAIL_IF(emit_op_mem2(compiler, WORD_DATA | LOAD_DATA, TMP_REG1, dst, dstw, dst, dstw)); + + type &= 0xff; + if (type < SLJIT_EQUAL_F64) + FAIL_IF(push_inst(compiler, BICC | get_cc(type) | 3, UNMOVABLE_INS)); + else + FAIL_IF(push_inst(compiler, FBFCC | get_cc(type) | 3, UNMOVABLE_INS)); + + FAIL_IF(push_inst(compiler, OR | D(reg) | S1(0) | IMM(1), UNMOVABLE_INS)); + FAIL_IF(push_inst(compiler, OR | D(reg) | S1(0) | IMM(0), UNMOVABLE_INS)); + + if (op >= SLJIT_ADD) { + flags |= CUMULATIVE_OP | IMM_OP | ALT_KEEP_CACHE; + if (dst & SLJIT_MEM) + return emit_op(compiler, op, flags, dst, dstw, TMP_REG1, 0, TMP_REG2, 0); + return emit_op(compiler, op, flags, dst, 0, dst, 0, TMP_REG2, 0); + } + + if (!(dst & SLJIT_MEM)) + return SLJIT_SUCCESS; + + return emit_op_mem(compiler, WORD_DATA, TMP_REG2, dst, dstw); +#else +#error "Implementation required" +#endif +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 dst_reg, + sljit_s32 src, sljit_sw srcw) +{ + CHECK_ERROR(); + CHECK(check_sljit_emit_cmov(compiler, type, dst_reg, src, srcw)); + +#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) + return sljit_emit_cmov_generic(compiler, type, dst_reg, src, srcw);; +#else +#error "Implementation required" +#endif +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw init_value) +{ + struct sljit_const *const_; + sljit_s32 dst_r; + + CHECK_ERROR_PTR(); + CHECK_PTR(check_sljit_emit_const(compiler, dst, dstw, init_value)); + ADJUST_LOCAL_OFFSET(dst, dstw); + + const_ = (struct sljit_const*)ensure_abuf(compiler, sizeof(struct sljit_const)); + PTR_FAIL_IF(!const_); + set_const(const_, compiler); + + dst_r = FAST_IS_REG(dst) ? dst : TMP_REG2; + PTR_FAIL_IF(emit_const(compiler, dst_r, init_value)); + + if (dst & SLJIT_MEM) + PTR_FAIL_IF(emit_op_mem(compiler, WORD_DATA, TMP_REG2, dst, dstw)); + return const_; +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_put_label* sljit_emit_put_label(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw) +{ + struct sljit_put_label *put_label; + sljit_s32 dst_r; + + CHECK_ERROR_PTR(); + CHECK_PTR(check_sljit_emit_put_label(compiler, dst, dstw)); + ADJUST_LOCAL_OFFSET(dst, dstw); + + put_label = (struct sljit_put_label*)ensure_abuf(compiler, sizeof(struct sljit_put_label)); + PTR_FAIL_IF(!put_label); + set_put_label(put_label, compiler, 0); + + dst_r = FAST_IS_REG(dst) ? dst : TMP_REG2; + PTR_FAIL_IF(emit_const(compiler, dst_r, 0)); + + if (dst & SLJIT_MEM) + PTR_FAIL_IF(emit_op_mem(compiler, WORD_DATA, TMP_REG2, dst, dstw)); + return put_label; +} diff --git a/src/3rd/pcre/sjsprc32.c b/src/3rd/pcre/sjsprc32.c new file mode 100644 index 00000000000..8079fad8df8 --- /dev/null +++ b/src/3rd/pcre/sjsprc32.c @@ -0,0 +1,286 @@ +/* + * Stack-less Just-In-Time compiler + * + * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are + * permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of + * conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list + * of conditions and the following disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw imm) +{ + if (imm <= SIMM_MAX && imm >= SIMM_MIN) + return push_inst(compiler, OR | D(dst) | S1(0) | IMM(imm), DR(dst)); + + FAIL_IF(push_inst(compiler, SETHI | D(dst) | ((imm >> 10) & 0x3fffff), DR(dst))); + return (imm & 0x3ff) ? push_inst(compiler, OR | D(dst) | S1(dst) | IMM_ARG | (imm & 0x3ff), DR(dst)) : SLJIT_SUCCESS; +} + +#define ARG2(flags, src2) ((flags & SRC2_IMM) ? IMM(src2) : S2(src2)) + +static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 flags, + sljit_s32 dst, sljit_s32 src1, sljit_sw src2) +{ + SLJIT_COMPILE_ASSERT(ICC_IS_SET == SET_FLAGS, icc_is_set_and_set_flags_must_be_the_same); + + switch (op) { + case SLJIT_MOV: + case SLJIT_MOV_U32: + case SLJIT_MOV_S32: + case SLJIT_MOV_P: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); + if (dst != src2) + return push_inst(compiler, OR | D(dst) | S1(0) | S2(src2), DR(dst)); + return SLJIT_SUCCESS; + + case SLJIT_MOV_U8: + case SLJIT_MOV_S8: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); + if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { + if (op == SLJIT_MOV_U8) + return push_inst(compiler, AND | D(dst) | S1(src2) | IMM(0xff), DR(dst)); + FAIL_IF(push_inst(compiler, SLL | D(dst) | S1(src2) | IMM(24), DR(dst))); + return push_inst(compiler, SRA | D(dst) | S1(dst) | IMM(24), DR(dst)); + } + else if (dst != src2) + SLJIT_UNREACHABLE(); + return SLJIT_SUCCESS; + + case SLJIT_MOV_U16: + case SLJIT_MOV_S16: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); + if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { + FAIL_IF(push_inst(compiler, SLL | D(dst) | S1(src2) | IMM(16), DR(dst))); + return push_inst(compiler, (op == SLJIT_MOV_S16 ? SRA : SRL) | D(dst) | S1(dst) | IMM(16), DR(dst)); + } + else if (dst != src2) + SLJIT_UNREACHABLE(); + return SLJIT_SUCCESS; + + case SLJIT_NOT: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); + return push_inst(compiler, XNOR | (flags & SET_FLAGS) | D(dst) | S1(0) | S2(src2), DR(dst) | (flags & SET_FLAGS)); + + case SLJIT_CLZ: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); + FAIL_IF(push_inst(compiler, SUB | SET_FLAGS | D(0) | S1(src2) | S2(0), SET_FLAGS)); + FAIL_IF(push_inst(compiler, OR | D(TMP_REG1) | S1(0) | S2(src2), DR(TMP_REG1))); + FAIL_IF(push_inst(compiler, BICC | DA(0x1) | (7 & DISP_MASK), UNMOVABLE_INS)); + FAIL_IF(push_inst(compiler, OR | D(dst) | S1(0) | IMM(32), UNMOVABLE_INS)); + FAIL_IF(push_inst(compiler, OR | D(dst) | S1(0) | IMM(-1), DR(dst))); + + /* Loop. */ + FAIL_IF(push_inst(compiler, SUB | SET_FLAGS | D(0) | S1(TMP_REG1) | S2(0), SET_FLAGS)); + FAIL_IF(push_inst(compiler, SLL | D(TMP_REG1) | S1(TMP_REG1) | IMM(1), DR(TMP_REG1))); + FAIL_IF(push_inst(compiler, BICC | DA(0xe) | (-2 & DISP_MASK), UNMOVABLE_INS)); + return push_inst(compiler, ADD | D(dst) | S1(dst) | IMM(1), UNMOVABLE_INS); + + case SLJIT_ADD: + return push_inst(compiler, ADD | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst) | (flags & SET_FLAGS)); + + case SLJIT_ADDC: + return push_inst(compiler, ADDC | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst) | (flags & SET_FLAGS)); + + case SLJIT_SUB: + return push_inst(compiler, SUB | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst) | (flags & SET_FLAGS)); + + case SLJIT_SUBC: + return push_inst(compiler, SUBC | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst) | (flags & SET_FLAGS)); + + case SLJIT_MUL: + FAIL_IF(push_inst(compiler, SMUL | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst))); + if (!(flags & SET_FLAGS)) + return SLJIT_SUCCESS; + FAIL_IF(push_inst(compiler, SRA | D(TMP_REG1) | S1(dst) | IMM(31), DR(TMP_REG1))); + FAIL_IF(push_inst(compiler, RDY | D(TMP_LINK), DR(TMP_LINK))); + return push_inst(compiler, SUB | SET_FLAGS | D(0) | S1(TMP_REG1) | S2(TMP_LINK), MOVABLE_INS | SET_FLAGS); + + case SLJIT_AND: + return push_inst(compiler, AND | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst) | (flags & SET_FLAGS)); + + case SLJIT_OR: + return push_inst(compiler, OR | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst) | (flags & SET_FLAGS)); + + case SLJIT_XOR: + return push_inst(compiler, XOR | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst) | (flags & SET_FLAGS)); + + case SLJIT_SHL: + FAIL_IF(push_inst(compiler, SLL | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst))); + return !(flags & SET_FLAGS) ? SLJIT_SUCCESS : push_inst(compiler, SUB | SET_FLAGS | D(0) | S1(dst) | S2(0), SET_FLAGS); + + case SLJIT_LSHR: + FAIL_IF(push_inst(compiler, SRL | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst))); + return !(flags & SET_FLAGS) ? SLJIT_SUCCESS : push_inst(compiler, SUB | SET_FLAGS | D(0) | S1(dst) | S2(0), SET_FLAGS); + + case SLJIT_ASHR: + FAIL_IF(push_inst(compiler, SRA | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst))); + return !(flags & SET_FLAGS) ? SLJIT_SUCCESS : push_inst(compiler, SUB | SET_FLAGS | D(0) | S1(dst) | S2(0), SET_FLAGS); + } + + SLJIT_UNREACHABLE(); + return SLJIT_SUCCESS; +} + +static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_s32 *src) +{ + sljit_s32 reg_index = 8; + sljit_s32 word_reg_index = 8; + sljit_s32 float_arg_index = 1; + sljit_s32 double_arg_count = 0; + sljit_s32 float_offset = (16 + 6) * sizeof(sljit_sw); + sljit_s32 types = 0; + sljit_s32 reg = 0; + sljit_s32 move_to_tmp2 = 0; + + if (src) + reg = reg_map[*src & REG_MASK]; + + arg_types >>= SLJIT_DEF_SHIFT; + + while (arg_types) { + types = (types << SLJIT_DEF_SHIFT) | (arg_types & SLJIT_DEF_MASK); + + switch (arg_types & SLJIT_DEF_MASK) { + case SLJIT_ARG_TYPE_F32: + float_arg_index++; + if (reg_index == reg) + move_to_tmp2 = 1; + reg_index++; + break; + case SLJIT_ARG_TYPE_F64: + float_arg_index++; + double_arg_count++; + if (reg_index == reg || reg_index + 1 == reg) + move_to_tmp2 = 1; + reg_index += 2; + break; + default: + if (reg_index != word_reg_index && reg_index < 14 && reg_index == reg) + move_to_tmp2 = 1; + reg_index++; + word_reg_index++; + break; + } + + if (move_to_tmp2) { + move_to_tmp2 = 0; + if (reg < 14) + FAIL_IF(push_inst(compiler, OR | D(TMP_REG1) | S1(0) | S2A(reg), DR(TMP_REG1))); + *src = TMP_REG1; + } + + arg_types >>= SLJIT_DEF_SHIFT; + } + + arg_types = types; + + while (arg_types) { + switch (arg_types & SLJIT_DEF_MASK) { + case SLJIT_ARG_TYPE_F32: + float_arg_index--; + FAIL_IF(push_inst(compiler, STF | FD(float_arg_index) | S1(SLJIT_SP) | IMM(float_offset), MOVABLE_INS)); + float_offset -= sizeof(sljit_f64); + break; + case SLJIT_ARG_TYPE_F64: + float_arg_index--; + if (float_arg_index == 4 && double_arg_count == 4) { + FAIL_IF(push_inst(compiler, STF | FD(float_arg_index) | S1(SLJIT_SP) | IMM((16 + 7) * sizeof(sljit_sw)), MOVABLE_INS)); + FAIL_IF(push_inst(compiler, STF | FD(float_arg_index) | (1 << 25) | S1(SLJIT_SP) | IMM((16 + 8) * sizeof(sljit_sw)), MOVABLE_INS)); + } + else + FAIL_IF(push_inst(compiler, STDF | FD(float_arg_index) | S1(SLJIT_SP) | IMM(float_offset), MOVABLE_INS)); + float_offset -= sizeof(sljit_f64); + break; + default: + break; + } + + arg_types >>= SLJIT_DEF_SHIFT; + } + + float_offset = (16 + 6) * sizeof(sljit_sw); + + while (types) { + switch (types & SLJIT_DEF_MASK) { + case SLJIT_ARG_TYPE_F32: + reg_index--; + if (reg_index < 14) + FAIL_IF(push_inst(compiler, LDUW | DA(reg_index) | S1(SLJIT_SP) | IMM(float_offset), reg_index)); + float_offset -= sizeof(sljit_f64); + break; + case SLJIT_ARG_TYPE_F64: + reg_index -= 2; + if (reg_index < 14) { + if ((reg_index & 0x1) != 0) { + FAIL_IF(push_inst(compiler, LDUW | DA(reg_index) | S1(SLJIT_SP) | IMM(float_offset), reg_index)); + if (reg_index < 13) + FAIL_IF(push_inst(compiler, LDUW | DA(reg_index + 1) | S1(SLJIT_SP) | IMM(float_offset + sizeof(sljit_sw)), reg_index + 1)); + } + else + FAIL_IF(push_inst(compiler, LDD | DA(reg_index) | S1(SLJIT_SP) | IMM(float_offset), reg_index)); + } + float_offset -= sizeof(sljit_f64); + break; + default: + reg_index--; + word_reg_index--; + + if (reg_index != word_reg_index) { + if (reg_index < 14) + FAIL_IF(push_inst(compiler, OR | DA(reg_index) | S1(0) | S2A(word_reg_index), reg_index)); + else + FAIL_IF(push_inst(compiler, STW | DA(word_reg_index) | S1(SLJIT_SP) | IMM(92), word_reg_index)); + } + break; + } + + types >>= SLJIT_DEF_SHIFT; + } + + return SLJIT_SUCCESS; +} + +static SLJIT_INLINE sljit_s32 emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw init_value) +{ + FAIL_IF(push_inst(compiler, SETHI | D(dst) | ((init_value >> 10) & 0x3fffff), DR(dst))); + return push_inst(compiler, OR | D(dst) | S1(dst) | IMM_ARG | (init_value & 0x3ff), DR(dst)); +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_target, sljit_sw executable_offset) +{ + sljit_ins *inst = (sljit_ins *)addr; + + SLJIT_ASSERT(((inst[0] & 0xc1c00000) == 0x01000000) && ((inst[1] & 0xc1f82000) == 0x80102000)); + inst[0] = (inst[0] & 0xffc00000) | ((new_target >> 10) & 0x3fffff); + inst[1] = (inst[1] & 0xfffffc00) | (new_target & 0x3ff); + inst = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset); + SLJIT_CACHE_FLUSH(inst, inst + 2); +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset) +{ + sljit_ins *inst = (sljit_ins *)addr; + + SLJIT_ASSERT(((inst[0] & 0xc1c00000) == 0x01000000) && ((inst[1] & 0xc1f82000) == 0x80102000)); + inst[0] = (inst[0] & 0xffc00000) | ((new_constant >> 10) & 0x3fffff); + inst[1] = (inst[1] & 0xfffffc00) | (new_constant & 0x3ff); + inst = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset); + SLJIT_CACHE_FLUSH(inst, inst + 2); +} diff --git a/src/3rd/pcre/sjutils.c b/src/3rd/pcre/sjutils.c index b29b4036b30..857492a1748 100644 --- a/src/3rd/pcre/sjutils.c +++ b/src/3rd/pcre/sjutils.c @@ -1,7 +1,7 @@ /* * Stack-less Just-In-Time compiler * - * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: @@ -48,12 +48,12 @@ static SLJIT_INLINE void allocator_release_lock(void) #if (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK) -SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void) +SLJIT_API_FUNC_ATTRIBUTE void SLJIT_FUNC sljit_grab_lock(void) { /* Always successful. */ } -SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void) +SLJIT_API_FUNC_ATTRIBUTE void SLJIT_FUNC sljit_release_lock(void) { /* Always successful. */ } @@ -88,7 +88,7 @@ static SLJIT_INLINE void allocator_release_lock(void) static HANDLE global_mutex = 0; -SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void) +SLJIT_API_FUNC_ATTRIBUTE void SLJIT_FUNC sljit_grab_lock(void) { /* No idea what to do if an error occures. Static mutexes should never fail... */ if (!global_mutex) @@ -97,7 +97,7 @@ SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void) WaitForSingleObject(global_mutex, INFINITE); } -SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void) +SLJIT_API_FUNC_ATTRIBUTE void SLJIT_FUNC sljit_release_lock(void) { ReleaseMutex(global_mutex); } @@ -130,12 +130,12 @@ static SLJIT_INLINE void allocator_release_lock(void) static pthread_mutex_t global_mutex = PTHREAD_MUTEX_INITIALIZER; -SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void) +SLJIT_API_FUNC_ATTRIBUTE void SLJIT_FUNC sljit_grab_lock(void) { pthread_mutex_lock(&global_mutex); } -SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void) +SLJIT_API_FUNC_ATTRIBUTE void SLJIT_FUNC sljit_release_lock(void) { pthread_mutex_unlock(&global_mutex); } @@ -154,7 +154,13 @@ SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void) #include "windows.h" #else /* Provides mmap function. */ +#include #include +#ifndef MAP_ANON +#ifdef MAP_ANONYMOUS +#define MAP_ANON MAP_ANONYMOUS +#endif +#endif /* For detecting the page size. */ #include @@ -163,11 +169,11 @@ SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void) #include /* Some old systems does not have MAP_ANON. */ -static sljit_si dev_zero = -1; +static sljit_s32 dev_zero = -1; #if (defined SLJIT_SINGLE_THREADED && SLJIT_SINGLE_THREADED) -static SLJIT_INLINE sljit_si open_dev_zero(void) +static SLJIT_INLINE sljit_s32 open_dev_zero(void) { dev_zero = open("/dev/zero", O_RDWR); return dev_zero < 0; @@ -179,10 +185,13 @@ static SLJIT_INLINE sljit_si open_dev_zero(void) static pthread_mutex_t dev_zero_mutex = PTHREAD_MUTEX_INITIALIZER; -static SLJIT_INLINE sljit_si open_dev_zero(void) +static SLJIT_INLINE sljit_s32 open_dev_zero(void) { pthread_mutex_lock(&dev_zero_mutex); - dev_zero = open("/dev/zero", O_RDWR); + /* The dev_zero might be initialized by another thread during the waiting. */ + if (dev_zero < 0) { + dev_zero = open("/dev/zero", O_RDWR); + } pthread_mutex_unlock(&dev_zero_mutex); return dev_zero < 0; } @@ -200,18 +209,16 @@ static SLJIT_INLINE sljit_si open_dev_zero(void) /* Planning to make it even more clever in the future. */ static sljit_sw sljit_page_align = 0; -SLJIT_API_FUNC_ATTRIBUTE struct sljit_stack* SLJIT_CALL sljit_allocate_stack(sljit_uw limit, sljit_uw max_limit) +SLJIT_API_FUNC_ATTRIBUTE struct sljit_stack* SLJIT_FUNC sljit_allocate_stack(sljit_uw start_size, sljit_uw max_size, void *allocator_data) { struct sljit_stack *stack; - union { - void *ptr; - sljit_uw uw; - } base; + void *ptr; #ifdef _WIN32 SYSTEM_INFO si; #endif - if (limit > max_limit || limit < 1) + SLJIT_UNUSED_ARG(allocator_data); + if (start_size > max_size || start_size < 1) return NULL; #ifdef _WIN32 @@ -229,102 +236,102 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_stack* SLJIT_CALL sljit_allocate_stack(slj } #endif - /* Align limit and max_limit. */ - max_limit = (max_limit + sljit_page_align) & ~sljit_page_align; - - stack = (struct sljit_stack*)SLJIT_MALLOC(sizeof(struct sljit_stack)); + stack = (struct sljit_stack*)SLJIT_MALLOC(sizeof(struct sljit_stack), allocator_data); if (!stack) return NULL; + /* Align max_size. */ + max_size = (max_size + sljit_page_align) & ~sljit_page_align; + #ifdef _WIN32 - base.ptr = VirtualAlloc(NULL, max_limit, MEM_RESERVE, PAGE_READWRITE); - if (!base.ptr) { - SLJIT_FREE(stack); + ptr = VirtualAlloc(NULL, max_size, MEM_RESERVE, PAGE_READWRITE); + if (!ptr) { + SLJIT_FREE(stack, allocator_data); return NULL; } - stack->base = base.uw; - stack->limit = stack->base; - stack->max_limit = stack->base + max_limit; - if (sljit_stack_resize(stack, stack->base + limit)) { - sljit_free_stack(stack); + + stack->min_start = (sljit_u8 *)ptr; + stack->end = stack->min_start + max_size; + stack->start = stack->end; + + if (sljit_stack_resize(stack, stack->end - start_size) == NULL) { + sljit_free_stack(stack, allocator_data); return NULL; } #else #ifdef MAP_ANON - base.ptr = mmap(NULL, max_limit, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); + ptr = mmap(NULL, max_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); #else if (dev_zero < 0) { if (open_dev_zero()) { - SLJIT_FREE(stack); + SLJIT_FREE(stack, allocator_data); return NULL; } } - base.ptr = mmap(NULL, max_limit, PROT_READ | PROT_WRITE, MAP_PRIVATE, dev_zero, 0); + ptr = mmap(NULL, max_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, dev_zero, 0); #endif - if (base.ptr == MAP_FAILED) { - SLJIT_FREE(stack); + if (ptr == MAP_FAILED) { + SLJIT_FREE(stack, allocator_data); return NULL; } - stack->base = base.uw; - stack->limit = stack->base + limit; - stack->max_limit = stack->base + max_limit; + stack->min_start = (sljit_u8 *)ptr; + stack->end = stack->min_start + max_size; + stack->start = stack->end - start_size; #endif - stack->top = stack->base; + stack->top = stack->end; return stack; } #undef PAGE_ALIGN -SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_free_stack(struct sljit_stack* stack) +SLJIT_API_FUNC_ATTRIBUTE void SLJIT_FUNC sljit_free_stack(struct sljit_stack *stack, void *allocator_data) { + SLJIT_UNUSED_ARG(allocator_data); #ifdef _WIN32 - VirtualFree((void*)stack->base, 0, MEM_RELEASE); + VirtualFree((void*)stack->min_start, 0, MEM_RELEASE); #else - munmap((void*)stack->base, stack->max_limit - stack->base); + munmap((void*)stack->min_start, stack->end - stack->min_start); #endif - SLJIT_FREE(stack); + SLJIT_FREE(stack, allocator_data); } -SLJIT_API_FUNC_ATTRIBUTE sljit_sw SLJIT_CALL sljit_stack_resize(struct sljit_stack* stack, sljit_uw new_limit) +SLJIT_API_FUNC_ATTRIBUTE sljit_u8 *SLJIT_FUNC sljit_stack_resize(struct sljit_stack *stack, sljit_u8 *new_start) { - sljit_uw aligned_old_limit; - sljit_uw aligned_new_limit; + sljit_uw aligned_old_start; + sljit_uw aligned_new_start; + + if ((new_start < stack->min_start) || (new_start >= stack->end)) + return NULL; - if ((new_limit > stack->max_limit) || (new_limit < stack->base)) - return -1; #ifdef _WIN32 - aligned_new_limit = (new_limit + sljit_page_align) & ~sljit_page_align; - aligned_old_limit = (stack->limit + sljit_page_align) & ~sljit_page_align; - if (aligned_new_limit != aligned_old_limit) { - if (aligned_new_limit > aligned_old_limit) { - if (!VirtualAlloc((void*)aligned_old_limit, aligned_new_limit - aligned_old_limit, MEM_COMMIT, PAGE_READWRITE)) - return -1; + aligned_new_start = (sljit_uw)new_start & ~sljit_page_align; + aligned_old_start = ((sljit_uw)stack->start) & ~sljit_page_align; + if (aligned_new_start != aligned_old_start) { + if (aligned_new_start < aligned_old_start) { + if (!VirtualAlloc((void*)aligned_new_start, aligned_old_start - aligned_new_start, MEM_COMMIT, PAGE_READWRITE)) + return NULL; } else { - if (!VirtualFree((void*)aligned_new_limit, aligned_old_limit - aligned_new_limit, MEM_DECOMMIT)) - return -1; + if (!VirtualFree((void*)aligned_old_start, aligned_new_start - aligned_old_start, MEM_DECOMMIT)) + return NULL; } } - stack->limit = new_limit; - return 0; #else - if (new_limit >= stack->limit) { - stack->limit = new_limit; - return 0; - } - aligned_new_limit = (new_limit + sljit_page_align) & ~sljit_page_align; - aligned_old_limit = (stack->limit + sljit_page_align) & ~sljit_page_align; - /* If madvise is available, we release the unnecessary space. */ + if (stack->start < new_start) { + aligned_new_start = (sljit_uw)new_start & ~sljit_page_align; + aligned_old_start = ((sljit_uw)stack->start) & ~sljit_page_align; + /* If madvise is available, we release the unnecessary space. */ #if defined(MADV_DONTNEED) - if (aligned_new_limit < aligned_old_limit) - madvise((void*)aligned_new_limit, aligned_old_limit - aligned_new_limit, MADV_DONTNEED); + if (aligned_new_start > aligned_old_start) + madvise((void*)aligned_old_start, aligned_new_start - aligned_old_start, MADV_DONTNEED); #elif defined(POSIX_MADV_DONTNEED) - if (aligned_new_limit < aligned_old_limit) - posix_madvise((void*)aligned_new_limit, aligned_old_limit - aligned_new_limit, POSIX_MADV_DONTNEED); + if (aligned_new_start > aligned_old_start) + posix_madvise((void*)aligned_old_start, aligned_new_start - aligned_old_start, POSIX_MADV_DONTNEED); #endif - stack->limit = new_limit; - return 0; + } #endif + stack->start = new_start; + return new_start; } #endif /* SLJIT_UTIL_STACK */ diff --git a/src/3rd/pcre/sjx8632.c b/src/3rd/pcre/sjx8632.c index 2866e8f2a15..34a3a3d9402 100644 --- a/src/3rd/pcre/sjx8632.c +++ b/src/3rd/pcre/sjx8632.c @@ -1,7 +1,7 @@ /* * Stack-less Just-In-Time compiler * - * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: @@ -26,20 +26,22 @@ /* x86 32-bit arch dependent functions. */ -static sljit_si emit_do_imm(struct sljit_compiler *compiler, sljit_ub opcode, sljit_sw imm) +static sljit_s32 emit_do_imm(struct sljit_compiler *compiler, sljit_u8 opcode, sljit_sw imm) { - sljit_ub *inst; + sljit_u8 *inst; - inst = (sljit_ub*)ensure_buf(compiler, 1 + 1 + sizeof(sljit_sw)); + inst = (sljit_u8*)ensure_buf(compiler, 1 + 1 + sizeof(sljit_sw)); FAIL_IF(!inst); INC_SIZE(1 + sizeof(sljit_sw)); *inst++ = opcode; - *(sljit_sw*)inst = imm; + sljit_unaligned_store_sw(inst, imm); return SLJIT_SUCCESS; } -static sljit_ub* generate_far_jump_code(struct sljit_jump *jump, sljit_ub *code_ptr, sljit_si type) +static sljit_u8* generate_far_jump_code(struct sljit_jump *jump, sljit_u8 *code_ptr, sljit_sw executable_offset) { + sljit_s32 type = jump->flags >> TYPE_SHIFT; + if (type == SLJIT_JUMP) { *code_ptr++ = JMP_i32; jump->addr++; @@ -57,176 +59,256 @@ static sljit_ub* generate_far_jump_code(struct sljit_jump *jump, sljit_ub *code_ if (jump->flags & JUMP_LABEL) jump->flags |= PATCH_MW; else - *(sljit_sw*)code_ptr = jump->u.target - (jump->addr + 4); + sljit_unaligned_store_sw(code_ptr, jump->u.target - (jump->addr + 4) - (sljit_uw)executable_offset); code_ptr += 4; return code_ptr; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_enter(struct sljit_compiler *compiler, sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler, + sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds, + sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size) { - sljit_si size; - sljit_si locals_offset; - sljit_ub *inst; + sljit_s32 args, size; + sljit_u8 *inst; CHECK_ERROR(); - check_sljit_emit_enter(compiler, args, scratches, saveds, local_size); + CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size)); + set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size); - compiler->scratches = scratches; - compiler->saveds = saveds; + args = get_arg_count(arg_types); compiler->args = args; - compiler->flags_saved = 0; -#if (defined SLJIT_DEBUG && SLJIT_DEBUG) - compiler->logical_local_size = local_size; + + /* [esp+0] for saving temporaries and function calls. */ + compiler->stack_tmp_size = 2 * sizeof(sljit_sw); + +#if !(defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) + if (scratches > 3) + compiler->stack_tmp_size = 3 * sizeof(sljit_sw); #endif + compiler->saveds_offset = compiler->stack_tmp_size; + if (scratches > 3) + compiler->saveds_offset += ((scratches > (3 + 6)) ? 6 : (scratches - 3)) * sizeof(sljit_sw); + + compiler->locals_offset = compiler->saveds_offset; + + if (saveds > 3) + compiler->locals_offset += (saveds - 3) * sizeof(sljit_sw); + + if (options & SLJIT_F64_ALIGNMENT) + compiler->locals_offset = (compiler->locals_offset + sizeof(sljit_f64) - 1) & ~(sizeof(sljit_f64) - 1); + + size = 1 + (scratches > 9 ? (scratches - 9) : 0) + (saveds <= 3 ? saveds : 3); #if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - size = 1 + (saveds <= 3 ? saveds : 3) + (args > 0 ? (args * 2) : 0) + (args > 2 ? 2 : 0); + size += (args > 0 ? (args * 2) : 0) + (args > 2 ? 2 : 0); #else - size = 1 + (saveds <= 3 ? saveds : 3) + (args > 0 ? (2 + args * 3) : 0); + size += (args > 0 ? (2 + args * 3) : 0); #endif - inst = (sljit_ub*)ensure_buf(compiler, 1 + size); + inst = (sljit_u8*)ensure_buf(compiler, 1 + size); FAIL_IF(!inst); INC_SIZE(size); - PUSH_REG(reg_map[TMP_REGISTER]); + PUSH_REG(reg_map[TMP_REG1]); #if !(defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) if (args > 0) { *inst++ = MOV_r_rm; - *inst++ = MOD_REG | (reg_map[TMP_REGISTER] << 3) | 0x4 /* esp */; + *inst++ = MOD_REG | (reg_map[TMP_REG1] << 3) | 0x4 /* esp */; } #endif - if (saveds > 2) - PUSH_REG(reg_map[SLJIT_SAVED_REG3]); - if (saveds > 1) - PUSH_REG(reg_map[SLJIT_SAVED_REG2]); - if (saveds > 0) - PUSH_REG(reg_map[SLJIT_SAVED_REG1]); + if (saveds > 2 || scratches > 9) + PUSH_REG(reg_map[SLJIT_S2]); + if (saveds > 1 || scratches > 10) + PUSH_REG(reg_map[SLJIT_S1]); + if (saveds > 0 || scratches > 11) + PUSH_REG(reg_map[SLJIT_S0]); #if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) if (args > 0) { - *inst++ = MOV_r_rm; - *inst++ = MOD_REG | (reg_map[SLJIT_SAVED_REG1] << 3) | reg_map[SLJIT_SCRATCH_REG3]; + inst[0] = MOV_r_rm; + inst[1] = MOD_REG | (reg_map[SLJIT_S0] << 3) | reg_map[SLJIT_R2]; + inst += 2; } if (args > 1) { - *inst++ = MOV_r_rm; - *inst++ = MOD_REG | (reg_map[SLJIT_SAVED_REG2] << 3) | reg_map[SLJIT_SCRATCH_REG2]; + inst[0] = MOV_r_rm; + inst[1] = MOD_REG | (reg_map[SLJIT_S1] << 3) | reg_map[SLJIT_R1]; + inst += 2; } if (args > 2) { - *inst++ = MOV_r_rm; - *inst++ = MOD_DISP8 | (reg_map[SLJIT_SAVED_REG3] << 3) | 0x4 /* esp */; - *inst++ = 0x24; - *inst++ = sizeof(sljit_sw) * (3 + 2); /* saveds >= 3 as well. */ + inst[0] = MOV_r_rm; + inst[1] = MOD_DISP8 | (reg_map[SLJIT_S2] << 3) | 0x4 /* esp */; + inst[2] = 0x24; + inst[3] = sizeof(sljit_sw) * (3 + 2); /* saveds >= 3 as well. */ } #else if (args > 0) { - *inst++ = MOV_r_rm; - *inst++ = MOD_DISP8 | (reg_map[SLJIT_SAVED_REG1] << 3) | reg_map[TMP_REGISTER]; - *inst++ = sizeof(sljit_sw) * 2; + inst[0] = MOV_r_rm; + inst[1] = MOD_DISP8 | (reg_map[SLJIT_S0] << 3) | reg_map[TMP_REG1]; + inst[2] = sizeof(sljit_sw) * 2; + inst += 3; } if (args > 1) { - *inst++ = MOV_r_rm; - *inst++ = MOD_DISP8 | (reg_map[SLJIT_SAVED_REG2] << 3) | reg_map[TMP_REGISTER]; - *inst++ = sizeof(sljit_sw) * 3; + inst[0] = MOV_r_rm; + inst[1] = MOD_DISP8 | (reg_map[SLJIT_S1] << 3) | reg_map[TMP_REG1]; + inst[2] = sizeof(sljit_sw) * 3; + inst += 3; } if (args > 2) { - *inst++ = MOV_r_rm; - *inst++ = MOD_DISP8 | (reg_map[SLJIT_SAVED_REG3] << 3) | reg_map[TMP_REGISTER]; - *inst++ = sizeof(sljit_sw) * 4; + inst[0] = MOV_r_rm; + inst[1] = MOD_DISP8 | (reg_map[SLJIT_S2] << 3) | reg_map[TMP_REG1]; + inst[2] = sizeof(sljit_sw) * 4; } #endif -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - locals_offset = 2 * sizeof(sljit_uw); -#else - SLJIT_COMPILE_ASSERT(FIXED_LOCALS_OFFSET >= 2 * sizeof(sljit_uw), require_at_least_two_words); - locals_offset = FIXED_LOCALS_OFFSET; -#endif - compiler->scratches_start = locals_offset; - if (scratches > 3) - locals_offset += (scratches - 3) * sizeof(sljit_uw); - compiler->saveds_start = locals_offset; - if (saveds > 3) - locals_offset += (saveds - 3) * sizeof(sljit_uw); - compiler->locals_offset = locals_offset; + SLJIT_ASSERT(SLJIT_LOCALS_OFFSET > 0); + #if defined(__APPLE__) - saveds = (2 + (saveds <= 3 ? saveds : 3)) * sizeof(sljit_uw); - local_size = ((locals_offset + saveds + local_size + 15) & ~15) - saveds; + /* Ignore pushed registers and SLJIT_LOCALS_OFFSET when computing the aligned local size. */ + saveds = (2 + (scratches > 9 ? (scratches - 9) : 0) + (saveds <= 3 ? saveds : 3)) * sizeof(sljit_uw); + local_size = ((SLJIT_LOCALS_OFFSET + saveds + local_size + 15) & ~15) - saveds; #else - local_size = locals_offset + ((local_size + sizeof(sljit_uw) - 1) & ~(sizeof(sljit_uw) - 1)); + if (options & SLJIT_F64_ALIGNMENT) + local_size = SLJIT_LOCALS_OFFSET + ((local_size + sizeof(sljit_f64) - 1) & ~(sizeof(sljit_f64) - 1)); + else + local_size = SLJIT_LOCALS_OFFSET + ((local_size + sizeof(sljit_sw) - 1) & ~(sizeof(sljit_sw) - 1)); #endif compiler->local_size = local_size; + #ifdef _WIN32 - if (local_size > 1024) { -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - FAIL_IF(emit_do_imm(compiler, MOV_r_i32 + reg_map[SLJIT_SCRATCH_REG1], local_size)); -#else - local_size -= FIXED_LOCALS_OFFSET; - FAIL_IF(emit_do_imm(compiler, MOV_r_i32 + reg_map[SLJIT_SCRATCH_REG1], local_size)); - FAIL_IF(emit_non_cum_binary(compiler, SUB_r_rm, SUB_rm_r, SUB, SUB_EAX_i32, - SLJIT_LOCALS_REG, 0, SLJIT_LOCALS_REG, 0, SLJIT_IMM, FIXED_LOCALS_OFFSET)); -#endif - FAIL_IF(sljit_emit_ijump(compiler, SLJIT_CALL1, SLJIT_IMM, SLJIT_FUNC_OFFSET(sljit_grow_stack))); + if (local_size > 0) { + if (local_size <= 4 * 4096) { + if (local_size > 4096) + EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), -4096); + if (local_size > 2 * 4096) + EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), -4096 * 2); + if (local_size > 3 * 4096) + EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), -4096 * 3); + } + else { + EMIT_MOV(compiler, SLJIT_R0, 0, SLJIT_SP, 0); + EMIT_MOV(compiler, SLJIT_R1, 0, SLJIT_IMM, (local_size - 1) >> 12); + + SLJIT_ASSERT (reg_map[SLJIT_R0] == 0); + + EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_R0), -4096); + FAIL_IF(emit_non_cum_binary(compiler, BINARY_OPCODE(SUB), + SLJIT_R0, 0, SLJIT_R0, 0, SLJIT_IMM, 4096)); + FAIL_IF(emit_non_cum_binary(compiler, BINARY_OPCODE(SUB), + SLJIT_R1, 0, SLJIT_R1, 0, SLJIT_IMM, 1)); + + inst = (sljit_u8*)ensure_buf(compiler, 1 + 2); + FAIL_IF(!inst); + + INC_SIZE(2); + inst[0] = JNE_i8; + inst[1] = (sljit_s8) -16; + } + + EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), -local_size); } #endif SLJIT_ASSERT(local_size > 0); - return emit_non_cum_binary(compiler, SUB_r_rm, SUB_rm_r, SUB, SUB_EAX_i32, - SLJIT_LOCALS_REG, 0, SLJIT_LOCALS_REG, 0, SLJIT_IMM, local_size); + +#if !defined(__APPLE__) + if (options & SLJIT_F64_ALIGNMENT) { + EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_SP, 0); + + /* Some space might allocated during sljit_grow_stack() above on WIN32. */ + FAIL_IF(emit_non_cum_binary(compiler, BINARY_OPCODE(SUB), + SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, local_size + sizeof(sljit_sw))); + +#if defined _WIN32 && !(defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) + if (compiler->local_size > 1024) + FAIL_IF(emit_cum_binary(compiler, BINARY_OPCODE(ADD), + TMP_REG1, 0, TMP_REG1, 0, SLJIT_IMM, sizeof(sljit_sw))); +#endif + + inst = (sljit_u8*)ensure_buf(compiler, 1 + 6); + FAIL_IF(!inst); + + INC_SIZE(6); + inst[0] = GROUP_BINARY_81; + inst[1] = MOD_REG | AND | reg_map[SLJIT_SP]; + sljit_unaligned_store_sw(inst + 2, ~(sizeof(sljit_f64) - 1)); + + /* The real local size must be used. */ + return emit_mov(compiler, SLJIT_MEM1(SLJIT_SP), compiler->local_size, TMP_REG1, 0); + } +#endif + return emit_non_cum_binary(compiler, BINARY_OPCODE(SUB), + SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, local_size); } -SLJIT_API_FUNC_ATTRIBUTE void sljit_set_context(struct sljit_compiler *compiler, sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *compiler, + sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds, + sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size) { - sljit_si locals_offset; + CHECK_ERROR(); + CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size)); + set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size); - CHECK_ERROR_VOID(); - check_sljit_set_context(compiler, args, scratches, saveds, local_size); + compiler->args = get_arg_count(arg_types); - compiler->scratches = scratches; - compiler->saveds = saveds; - compiler->args = args; -#if (defined SLJIT_DEBUG && SLJIT_DEBUG) - compiler->logical_local_size = local_size; -#endif + /* [esp+0] for saving temporaries and function calls. */ + compiler->stack_tmp_size = 2 * sizeof(sljit_sw); -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - locals_offset = 2 * sizeof(sljit_uw); -#else - locals_offset = FIXED_LOCALS_OFFSET; +#if !(defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) + if (scratches > 3) + compiler->stack_tmp_size = 3 * sizeof(sljit_sw); #endif - compiler->scratches_start = locals_offset; + + compiler->saveds_offset = compiler->stack_tmp_size; if (scratches > 3) - locals_offset += (scratches - 3) * sizeof(sljit_uw); - compiler->saveds_start = locals_offset; + compiler->saveds_offset += ((scratches > (3 + 6)) ? 6 : (scratches - 3)) * sizeof(sljit_sw); + + compiler->locals_offset = compiler->saveds_offset; + if (saveds > 3) - locals_offset += (saveds - 3) * sizeof(sljit_uw); - compiler->locals_offset = locals_offset; + compiler->locals_offset += (saveds - 3) * sizeof(sljit_sw); + + if (options & SLJIT_F64_ALIGNMENT) + compiler->locals_offset = (compiler->locals_offset + sizeof(sljit_f64) - 1) & ~(sizeof(sljit_f64) - 1); + #if defined(__APPLE__) - saveds = (2 + (saveds <= 3 ? saveds : 3)) * sizeof(sljit_uw); - compiler->local_size = ((locals_offset + saveds + local_size + 15) & ~15) - saveds; + saveds = (2 + (scratches > 9 ? (scratches - 9) : 0) + (saveds <= 3 ? saveds : 3)) * sizeof(sljit_uw); + compiler->local_size = ((SLJIT_LOCALS_OFFSET + saveds + local_size + 15) & ~15) - saveds; #else - compiler->local_size = locals_offset + ((local_size + sizeof(sljit_uw) - 1) & ~(sizeof(sljit_uw) - 1)); + if (options & SLJIT_F64_ALIGNMENT) + compiler->local_size = SLJIT_LOCALS_OFFSET + ((local_size + sizeof(sljit_f64) - 1) & ~(sizeof(sljit_f64) - 1)); + else + compiler->local_size = SLJIT_LOCALS_OFFSET + ((local_size + sizeof(sljit_sw) - 1) & ~(sizeof(sljit_sw) - 1)); #endif + return SLJIT_SUCCESS; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_return(struct sljit_compiler *compiler, sljit_si op, sljit_si src, sljit_sw srcw) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw) { - sljit_si size; - sljit_ub *inst; + sljit_s32 size; + sljit_u8 *inst; CHECK_ERROR(); - check_sljit_emit_return(compiler, op, src, srcw); + CHECK(check_sljit_emit_return(compiler, op, src, srcw)); SLJIT_ASSERT(compiler->args >= 0); - compiler->flags_saved = 0; FAIL_IF(emit_mov_before_return(compiler, op, src, srcw)); SLJIT_ASSERT(compiler->local_size > 0); - FAIL_IF(emit_cum_binary(compiler, ADD_r_rm, ADD_rm_r, ADD, ADD_EAX_i32, - SLJIT_LOCALS_REG, 0, SLJIT_LOCALS_REG, 0, SLJIT_IMM, compiler->local_size)); - size = 2 + (compiler->saveds <= 3 ? compiler->saveds : 3); +#if !defined(__APPLE__) + if (compiler->options & SLJIT_F64_ALIGNMENT) + EMIT_MOV(compiler, SLJIT_SP, 0, SLJIT_MEM1(SLJIT_SP), compiler->local_size) + else + FAIL_IF(emit_cum_binary(compiler, BINARY_OPCODE(ADD), + SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, compiler->local_size)); +#else + FAIL_IF(emit_cum_binary(compiler, BINARY_OPCODE(ADD), + SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, compiler->local_size)); +#endif + + size = 2 + (compiler->scratches > 7 ? (compiler->scratches - 7) : 0) + + (compiler->saveds <= 3 ? compiler->saveds : 3); #if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) if (compiler->args > 2) size += 2; @@ -234,18 +316,18 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_return(struct sljit_compiler *compi if (compiler->args > 0) size += 2; #endif - inst = (sljit_ub*)ensure_buf(compiler, 1 + size); + inst = (sljit_u8*)ensure_buf(compiler, 1 + size); FAIL_IF(!inst); INC_SIZE(size); - if (compiler->saveds > 0) - POP_REG(reg_map[SLJIT_SAVED_REG1]); - if (compiler->saveds > 1) - POP_REG(reg_map[SLJIT_SAVED_REG2]); - if (compiler->saveds > 2) - POP_REG(reg_map[SLJIT_SAVED_REG3]); - POP_REG(reg_map[TMP_REGISTER]); + if (compiler->saveds > 0 || compiler->scratches > 11) + POP_REG(reg_map[SLJIT_S0]); + if (compiler->saveds > 1 || compiler->scratches > 10) + POP_REG(reg_map[SLJIT_S1]); + if (compiler->saveds > 2 || compiler->scratches > 9) + POP_REG(reg_map[SLJIT_S2]); + POP_REG(reg_map[TMP_REG1]); #if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) if (compiler->args > 2) RET_I16(sizeof(sljit_sw)); @@ -263,16 +345,16 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_return(struct sljit_compiler *compi /* --------------------------------------------------------------------- */ /* Size contains the flags as well. */ -static sljit_ub* emit_x86_instruction(struct sljit_compiler *compiler, sljit_si size, +static sljit_u8* emit_x86_instruction(struct sljit_compiler *compiler, sljit_s32 size, /* The register or immediate operand. */ - sljit_si a, sljit_sw imma, + sljit_s32 a, sljit_sw imma, /* The general operand (not immediate). */ - sljit_si b, sljit_sw immb) + sljit_s32 b, sljit_sw immb) { - sljit_ub *inst; - sljit_ub *buf_ptr; - sljit_si flags = size & ~0xf; - sljit_si inst_size; + sljit_u8 *inst; + sljit_u8 *buf_ptr; + sljit_s32 flags = size & ~0xf; + sljit_s32 inst_size; /* Both cannot be switched on. */ SLJIT_ASSERT((flags & (EX86_BIN_INS | EX86_SHIFT_INS)) != (EX86_BIN_INS | EX86_SHIFT_INS)); @@ -280,41 +362,37 @@ static sljit_ub* emit_x86_instruction(struct sljit_compiler *compiler, sljit_si SLJIT_ASSERT(!(flags & (EX86_BIN_INS | EX86_SHIFT_INS)) || (flags & (EX86_BYTE_ARG | EX86_HALF_ARG)) == 0); /* Both size flags cannot be switched on. */ SLJIT_ASSERT((flags & (EX86_BYTE_ARG | EX86_HALF_ARG)) != (EX86_BYTE_ARG | EX86_HALF_ARG)); -#if (defined SLJIT_SSE2 && SLJIT_SSE2) /* SSE2 and immediate is not possible. */ SLJIT_ASSERT(!(a & SLJIT_IMM) || !(flags & EX86_SSE2)); SLJIT_ASSERT((flags & (EX86_PREF_F2 | EX86_PREF_F3)) != (EX86_PREF_F2 | EX86_PREF_F3) && (flags & (EX86_PREF_F2 | EX86_PREF_66)) != (EX86_PREF_F2 | EX86_PREF_66) && (flags & (EX86_PREF_F3 | EX86_PREF_66)) != (EX86_PREF_F3 | EX86_PREF_66)); -#endif size &= 0xf; inst_size = size; -#if (defined SLJIT_SSE2 && SLJIT_SSE2) if (flags & (EX86_PREF_F2 | EX86_PREF_F3)) inst_size++; -#endif if (flags & EX86_PREF_66) inst_size++; /* Calculate size of b. */ inst_size += 1; /* mod r/m byte. */ if (b & SLJIT_MEM) { - if ((b & 0x0f) == SLJIT_UNUSED) + if ((b & REG_MASK) == SLJIT_UNUSED) inst_size += sizeof(sljit_sw); - else if (immb != 0 && !(b & 0xf0)) { + else if (immb != 0 && !(b & OFFS_REG_MASK)) { /* Immediate operand. */ if (immb <= 127 && immb >= -128) - inst_size += sizeof(sljit_sb); + inst_size += sizeof(sljit_s8); else inst_size += sizeof(sljit_sw); } - if ((b & 0xf) == SLJIT_LOCALS_REG && !(b & 0xf0)) - b |= SLJIT_LOCALS_REG << 4; + if ((b & REG_MASK) == SLJIT_SP && !(b & OFFS_REG_MASK)) + b |= TO_OFFS_REG(SLJIT_SP); - if ((b & 0xf0) != SLJIT_UNUSED) + if ((b & OFFS_REG_MASK) != SLJIT_UNUSED) inst_size += 1; /* SIB byte. */ } @@ -343,17 +421,15 @@ static sljit_ub* emit_x86_instruction(struct sljit_compiler *compiler, sljit_si else SLJIT_ASSERT(!(flags & EX86_SHIFT_INS) || a == SLJIT_PREF_SHIFT_REG); - inst = (sljit_ub*)ensure_buf(compiler, 1 + inst_size); + inst = (sljit_u8*)ensure_buf(compiler, 1 + inst_size); PTR_FAIL_IF(!inst); /* Encoding the byte. */ INC_SIZE(inst_size); -#if (defined SLJIT_SSE2 && SLJIT_SSE2) if (flags & EX86_PREF_F2) *inst++ = 0xf2; if (flags & EX86_PREF_F3) *inst++ = 0xf3; -#endif if (flags & EX86_PREF_66) *inst++ = 0x66; @@ -364,17 +440,12 @@ static sljit_ub* emit_x86_instruction(struct sljit_compiler *compiler, sljit_si if ((flags & EX86_BIN_INS) && (a & SLJIT_IMM)) *inst = (flags & EX86_BYTE_ARG) ? GROUP_BINARY_83 : GROUP_BINARY_81; - if ((a & SLJIT_IMM) || (a == 0)) + if (a & SLJIT_IMM) *buf_ptr = 0; -#if (defined SLJIT_SSE2 && SLJIT_SSE2) - else if (!(flags & EX86_SSE2)) + else if (!(flags & EX86_SSE2_OP1)) *buf_ptr = reg_map[a] << 3; else *buf_ptr = a << 3; -#else - else - *buf_ptr = reg_map[a] << 3; -#endif } else { if (a & SLJIT_IMM) { @@ -388,13 +459,9 @@ static sljit_ub* emit_x86_instruction(struct sljit_compiler *compiler, sljit_si } if (!(b & SLJIT_MEM)) -#if (defined SLJIT_SSE2 && SLJIT_SSE2) - *buf_ptr++ |= MOD_REG + ((!(flags & EX86_SSE2)) ? reg_map[b] : b); -#else - *buf_ptr++ |= MOD_REG + reg_map[b]; -#endif - else if ((b & 0x0f) != SLJIT_UNUSED) { - if ((b & 0xf0) == SLJIT_UNUSED || (b & 0xf0) == (SLJIT_LOCALS_REG << 4)) { + *buf_ptr++ |= MOD_REG + ((!(flags & EX86_SSE2_OP2)) ? reg_map[b] : b); + else if ((b & REG_MASK) != SLJIT_UNUSED) { + if ((b & OFFS_REG_MASK) == SLJIT_UNUSED || (b & OFFS_REG_MASK) == TO_OFFS_REG(SLJIT_SP)) { if (immb != 0) { if (immb <= 127 && immb >= -128) *buf_ptr |= 0x40; @@ -402,30 +469,30 @@ static sljit_ub* emit_x86_instruction(struct sljit_compiler *compiler, sljit_si *buf_ptr |= 0x80; } - if ((b & 0xf0) == SLJIT_UNUSED) - *buf_ptr++ |= reg_map[b & 0x0f]; + if ((b & OFFS_REG_MASK) == SLJIT_UNUSED) + *buf_ptr++ |= reg_map[b & REG_MASK]; else { *buf_ptr++ |= 0x04; - *buf_ptr++ = reg_map[b & 0x0f] | (reg_map[(b >> 4) & 0x0f] << 3); + *buf_ptr++ = reg_map[b & REG_MASK] | (reg_map[OFFS_REG(b)] << 3); } if (immb != 0) { if (immb <= 127 && immb >= -128) *buf_ptr++ = immb; /* 8 bit displacement. */ else { - *(sljit_sw*)buf_ptr = immb; /* 32 bit displacement. */ + sljit_unaligned_store_sw(buf_ptr, immb); /* 32 bit displacement. */ buf_ptr += sizeof(sljit_sw); } } } else { *buf_ptr++ |= 0x04; - *buf_ptr++ = reg_map[b & 0x0f] | (reg_map[(b >> 4) & 0x0f] << 3) | (immb << 6); + *buf_ptr++ = reg_map[b & REG_MASK] | (reg_map[OFFS_REG(b)] << 3) | (immb << 6); } } else { *buf_ptr++ |= 0x05; - *(sljit_sw*)buf_ptr = immb; /* 32 bit displacement. */ + sljit_unaligned_store_sw(buf_ptr, immb); /* 32 bit displacement. */ buf_ptr += sizeof(sljit_sw); } @@ -433,9 +500,9 @@ static sljit_ub* emit_x86_instruction(struct sljit_compiler *compiler, sljit_si if (flags & EX86_BYTE_ARG) *buf_ptr = imma; else if (flags & EX86_HALF_ARG) - *(short*)buf_ptr = imma; + sljit_unaligned_store_s16(buf_ptr, imma); else if (!(flags & EX86_SHIFT_INS)) - *(sljit_sw*)buf_ptr = imma; + sljit_unaligned_store_sw(buf_ptr, imma); } return !(flags & EX86_SHIFT_INS) ? inst : (inst + 1); @@ -445,61 +512,343 @@ static sljit_ub* emit_x86_instruction(struct sljit_compiler *compiler, sljit_si /* Call / return instructions */ /* --------------------------------------------------------------------- */ -static SLJIT_INLINE sljit_si call_with_args(struct sljit_compiler *compiler, sljit_si type) +#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) + +static sljit_s32 c_fast_call_get_stack_size(sljit_s32 arg_types, sljit_s32 *word_arg_count_ptr) { - sljit_ub *inst; + sljit_s32 stack_size = 0; + sljit_s32 word_arg_count = 0; + + arg_types >>= SLJIT_DEF_SHIFT; + + while (arg_types) { + switch (arg_types & SLJIT_DEF_MASK) { + case SLJIT_ARG_TYPE_F32: + stack_size += sizeof(sljit_f32); + break; + case SLJIT_ARG_TYPE_F64: + stack_size += sizeof(sljit_f64); + break; + default: + word_arg_count++; + if (word_arg_count > 2) + stack_size += sizeof(sljit_sw); + break; + } -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - inst = (sljit_ub*)ensure_buf(compiler, type >= SLJIT_CALL3 ? 1 + 2 + 1 : 1 + 2); - FAIL_IF(!inst); - INC_SIZE(type >= SLJIT_CALL3 ? 2 + 1 : 2); + arg_types >>= SLJIT_DEF_SHIFT; + } + + if (word_arg_count_ptr) + *word_arg_count_ptr = word_arg_count; + + return stack_size; +} + +static sljit_s32 c_fast_call_with_args(struct sljit_compiler *compiler, + sljit_s32 arg_types, sljit_s32 stack_size, sljit_s32 word_arg_count, sljit_s32 swap_args) +{ + sljit_u8 *inst; + sljit_s32 float_arg_count; - if (type >= SLJIT_CALL3) - PUSH_REG(reg_map[SLJIT_SCRATCH_REG3]); - *inst++ = MOV_r_rm; - *inst++ = MOD_REG | (reg_map[SLJIT_SCRATCH_REG3] << 3) | reg_map[SLJIT_SCRATCH_REG1]; + if (stack_size == sizeof(sljit_sw) && word_arg_count == 3) { + inst = (sljit_u8*)ensure_buf(compiler, 1 + 1); + FAIL_IF(!inst); + INC_SIZE(1); + PUSH_REG(reg_map[SLJIT_R2]); + } + else if (stack_size > 0) { + if (word_arg_count >= 4) + EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), compiler->saveds_offset - sizeof(sljit_sw)); + + FAIL_IF(emit_non_cum_binary(compiler, BINARY_OPCODE(SUB), + SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, stack_size)); + + stack_size = 0; + arg_types >>= SLJIT_DEF_SHIFT; + word_arg_count = 0; + float_arg_count = 0; + while (arg_types) { + switch (arg_types & SLJIT_DEF_MASK) { + case SLJIT_ARG_TYPE_F32: + float_arg_count++; + FAIL_IF(emit_sse2_store(compiler, 1, SLJIT_MEM1(SLJIT_SP), stack_size, float_arg_count)); + stack_size += sizeof(sljit_f32); + break; + case SLJIT_ARG_TYPE_F64: + float_arg_count++; + FAIL_IF(emit_sse2_store(compiler, 0, SLJIT_MEM1(SLJIT_SP), stack_size, float_arg_count)); + stack_size += sizeof(sljit_f64); + break; + default: + word_arg_count++; + if (word_arg_count == 3) { + EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), stack_size, SLJIT_R2, 0); + stack_size += sizeof(sljit_sw); + } + else if (word_arg_count == 4) { + EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), stack_size, TMP_REG1, 0); + stack_size += sizeof(sljit_sw); + } + break; + } + + arg_types >>= SLJIT_DEF_SHIFT; + } + } + + if (word_arg_count > 0) { + if (swap_args) { + inst = (sljit_u8*)ensure_buf(compiler, 1 + 1); + FAIL_IF(!inst); + INC_SIZE(1); + + *inst++ = XCHG_EAX_r | reg_map[SLJIT_R2]; + } + else { + inst = (sljit_u8*)ensure_buf(compiler, 1 + 2); + FAIL_IF(!inst); + INC_SIZE(2); + + *inst++ = MOV_r_rm; + *inst++ = MOD_REG | (reg_map[SLJIT_R2] << 3) | reg_map[SLJIT_R0]; + } + } + + return SLJIT_SUCCESS; +} + +#endif + +static sljit_s32 cdecl_call_get_stack_size(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_s32 *word_arg_count_ptr) +{ + sljit_s32 stack_size = 0; + sljit_s32 word_arg_count = 0; + + arg_types >>= SLJIT_DEF_SHIFT; + + while (arg_types) { + switch (arg_types & SLJIT_DEF_MASK) { + case SLJIT_ARG_TYPE_F32: + stack_size += sizeof(sljit_f32); + break; + case SLJIT_ARG_TYPE_F64: + stack_size += sizeof(sljit_f64); + break; + default: + word_arg_count++; + stack_size += sizeof(sljit_sw); + break; + } + + arg_types >>= SLJIT_DEF_SHIFT; + } + + if (word_arg_count_ptr) + *word_arg_count_ptr = word_arg_count; + + if (stack_size <= compiler->stack_tmp_size) + return 0; + +#if defined(__APPLE__) + return ((stack_size - compiler->stack_tmp_size + 15) & ~15); #else - inst = (sljit_ub*)ensure_buf(compiler, 1 + 4 * (type - SLJIT_CALL0)); + return stack_size - compiler->stack_tmp_size; +#endif +} + +static sljit_s32 cdecl_call_with_args(struct sljit_compiler *compiler, + sljit_s32 arg_types, sljit_s32 stack_size, sljit_s32 word_arg_count) +{ + sljit_s32 float_arg_count = 0; + + if (word_arg_count >= 4) + EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), compiler->saveds_offset - sizeof(sljit_sw)); + + if (stack_size > 0) + FAIL_IF(emit_non_cum_binary(compiler, BINARY_OPCODE(SUB), + SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, stack_size)); + + stack_size = 0; + word_arg_count = 0; + arg_types >>= SLJIT_DEF_SHIFT; + + while (arg_types) { + switch (arg_types & SLJIT_DEF_MASK) { + case SLJIT_ARG_TYPE_F32: + float_arg_count++; + FAIL_IF(emit_sse2_store(compiler, 1, SLJIT_MEM1(SLJIT_SP), stack_size, float_arg_count)); + stack_size += sizeof(sljit_f32); + break; + case SLJIT_ARG_TYPE_F64: + float_arg_count++; + FAIL_IF(emit_sse2_store(compiler, 0, SLJIT_MEM1(SLJIT_SP), stack_size, float_arg_count)); + stack_size += sizeof(sljit_f64); + break; + default: + word_arg_count++; + EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), stack_size, (word_arg_count >= 4) ? TMP_REG1 : word_arg_count, 0); + stack_size += sizeof(sljit_sw); + break; + } + + arg_types >>= SLJIT_DEF_SHIFT; + } + + return SLJIT_SUCCESS; +} + +static sljit_s32 post_call_with_args(struct sljit_compiler *compiler, + sljit_s32 arg_types, sljit_s32 stack_size) +{ + sljit_u8 *inst; + sljit_s32 single; + + if (stack_size > 0) + FAIL_IF(emit_cum_binary(compiler, BINARY_OPCODE(ADD), + SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, stack_size)); + + if ((arg_types & SLJIT_DEF_MASK) < SLJIT_ARG_TYPE_F32) + return SLJIT_SUCCESS; + + single = ((arg_types & SLJIT_DEF_MASK) == SLJIT_ARG_TYPE_F32); + + inst = (sljit_u8*)ensure_buf(compiler, 1 + 3); FAIL_IF(!inst); - INC_SIZE(4 * (type - SLJIT_CALL0)); - - *inst++ = MOV_rm_r; - *inst++ = MOD_DISP8 | (reg_map[SLJIT_SCRATCH_REG1] << 3) | 0x4 /* SIB */; - *inst++ = (0x4 /* none*/ << 3) | reg_map[SLJIT_LOCALS_REG]; - *inst++ = 0; - if (type >= SLJIT_CALL2) { - *inst++ = MOV_rm_r; - *inst++ = MOD_DISP8 | (reg_map[SLJIT_SCRATCH_REG2] << 3) | 0x4 /* SIB */; - *inst++ = (0x4 /* none*/ << 3) | reg_map[SLJIT_LOCALS_REG]; - *inst++ = sizeof(sljit_sw); + INC_SIZE(3); + inst[0] = single ? FSTPS : FSTPD; + inst[1] = (0x03 << 3) | 0x04; + inst[2] = (0x04 << 3) | reg_map[SLJIT_SP]; + + return emit_sse2_load(compiler, single, SLJIT_FR0, SLJIT_MEM1(SLJIT_SP), 0); +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 arg_types) +{ + struct sljit_jump *jump; + sljit_s32 stack_size = 0; + sljit_s32 word_arg_count; + + CHECK_ERROR_PTR(); + CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types)); + +#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) + if ((type & 0xff) == SLJIT_CALL) { + stack_size = c_fast_call_get_stack_size(arg_types, &word_arg_count); + PTR_FAIL_IF(c_fast_call_with_args(compiler, arg_types, stack_size, word_arg_count, 0)); + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ + || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + compiler->skip_checks = 1; +#endif + + jump = sljit_emit_jump(compiler, type); + PTR_FAIL_IF(jump == NULL); + + PTR_FAIL_IF(post_call_with_args(compiler, arg_types, 0)); + return jump; } - if (type >= SLJIT_CALL3) { - *inst++ = MOV_rm_r; - *inst++ = MOD_DISP8 | (reg_map[SLJIT_SCRATCH_REG3] << 3) | 0x4 /* SIB */; - *inst++ = (0x4 /* none*/ << 3) | reg_map[SLJIT_LOCALS_REG]; - *inst++ = 2 * sizeof(sljit_sw); +#endif + + stack_size = cdecl_call_get_stack_size(compiler, arg_types, &word_arg_count); + PTR_FAIL_IF(cdecl_call_with_args(compiler, arg_types, stack_size, word_arg_count)); + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ + || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + compiler->skip_checks = 1; +#endif + + jump = sljit_emit_jump(compiler, type); + PTR_FAIL_IF(jump == NULL); + + PTR_FAIL_IF(post_call_with_args(compiler, arg_types, stack_size)); + return jump; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 arg_types, + sljit_s32 src, sljit_sw srcw) +{ + sljit_s32 stack_size = 0; + sljit_s32 word_arg_count; +#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) + sljit_s32 swap_args; +#endif + + CHECK_ERROR(); + CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw)); + +#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) + SLJIT_ASSERT(reg_map[SLJIT_R0] == 0 && reg_map[SLJIT_R2] == 1 && SLJIT_R0 == 1 && SLJIT_R2 == 3); + + if ((type & 0xff) == SLJIT_CALL) { + stack_size = c_fast_call_get_stack_size(arg_types, &word_arg_count); + swap_args = 0; + + if (word_arg_count > 0) { + if ((src & REG_MASK) == SLJIT_R2 || OFFS_REG(src) == SLJIT_R2) { + swap_args = 1; + if (((src & REG_MASK) | 0x2) == SLJIT_R2) + src ^= 0x2; + if ((OFFS_REG(src) | 0x2) == SLJIT_R2) + src ^= TO_OFFS_REG(0x2); + } + } + + FAIL_IF(c_fast_call_with_args(compiler, arg_types, stack_size, word_arg_count, swap_args)); + + compiler->saveds_offset += stack_size; + compiler->locals_offset += stack_size; + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ + || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + compiler->skip_checks = 1; +#endif + FAIL_IF(sljit_emit_ijump(compiler, type, src, srcw)); + + compiler->saveds_offset -= stack_size; + compiler->locals_offset -= stack_size; + + return post_call_with_args(compiler, arg_types, 0); } #endif - return SLJIT_SUCCESS; + + stack_size = cdecl_call_get_stack_size(compiler, arg_types, &word_arg_count); + FAIL_IF(cdecl_call_with_args(compiler, arg_types, stack_size, word_arg_count)); + + compiler->saveds_offset += stack_size; + compiler->locals_offset += stack_size; + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ + || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + compiler->skip_checks = 1; +#endif + FAIL_IF(sljit_emit_ijump(compiler, type, src, srcw)); + + compiler->saveds_offset -= stack_size; + compiler->locals_offset -= stack_size; + + return post_call_with_args(compiler, arg_types, stack_size); } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw) { - sljit_ub *inst; + sljit_u8 *inst; CHECK_ERROR(); - check_sljit_emit_fast_enter(compiler, dst, dstw); + CHECK(check_sljit_emit_fast_enter(compiler, dst, dstw)); ADJUST_LOCAL_OFFSET(dst, dstw); CHECK_EXTRA_REGS(dst, dstw, (void)0); /* For UNUSED dst. Uncommon, but possible. */ if (dst == SLJIT_UNUSED) - dst = TMP_REGISTER; + dst = TMP_REG1; - if (dst <= TMP_REGISTER) { + if (FAST_IS_REG(dst)) { /* Unused dest is possible here. */ - inst = (sljit_ub*)ensure_buf(compiler, 1 + 1); + inst = (sljit_u8*)ensure_buf(compiler, 1 + 1); FAIL_IF(!inst); INC_SIZE(1); @@ -514,43 +863,33 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_enter(struct sljit_compiler *c return SLJIT_SUCCESS; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_si src, sljit_sw srcw) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_s32 src, sljit_sw srcw) { - sljit_ub *inst; + sljit_u8 *inst; CHECK_ERROR(); - check_sljit_emit_fast_return(compiler, src, srcw); + CHECK(check_sljit_emit_fast_return(compiler, src, srcw)); ADJUST_LOCAL_OFFSET(src, srcw); CHECK_EXTRA_REGS(src, srcw, (void)0); - if (src <= TMP_REGISTER) { - inst = (sljit_ub*)ensure_buf(compiler, 1 + 1 + 1); + if (FAST_IS_REG(src)) { + inst = (sljit_u8*)ensure_buf(compiler, 1 + 1 + 1); FAIL_IF(!inst); INC_SIZE(1 + 1); PUSH_REG(reg_map[src]); } - else if (src & SLJIT_MEM) { + else { inst = emit_x86_instruction(compiler, 1, 0, 0, src, srcw); FAIL_IF(!inst); *inst++ = GROUP_FF; *inst |= PUSH_rm; - inst = (sljit_ub*)ensure_buf(compiler, 1 + 1); + inst = (sljit_u8*)ensure_buf(compiler, 1 + 1); FAIL_IF(!inst); INC_SIZE(1); } - else { - /* SLJIT_IMM. */ - inst = (sljit_ub*)ensure_buf(compiler, 1 + 5 + 1); - FAIL_IF(!inst); - - INC_SIZE(5 + 1); - *inst++ = PUSH_i32; - *(sljit_sw*)inst = srcw; - inst += sizeof(sljit_sw); - } RET(); return SLJIT_SUCCESS; diff --git a/src/3rd/pcre/sjx8664.c b/src/3rd/pcre/sjx8664.c index 28f04fddd80..57587119540 100644 --- a/src/3rd/pcre/sjx8664.c +++ b/src/3rd/pcre/sjx8664.c @@ -1,7 +1,7 @@ /* * Stack-less Just-In-Time compiler * - * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: @@ -26,346 +26,344 @@ /* x86 64-bit arch dependent functions. */ -static sljit_si emit_load_imm64(struct sljit_compiler *compiler, sljit_si reg, sljit_sw imm) +static sljit_s32 emit_load_imm64(struct sljit_compiler *compiler, sljit_s32 reg, sljit_sw imm) { - sljit_ub *inst; + sljit_u8 *inst; - inst = (sljit_ub*)ensure_buf(compiler, 1 + 2 + sizeof(sljit_sw)); + inst = (sljit_u8*)ensure_buf(compiler, 1 + 2 + sizeof(sljit_sw)); FAIL_IF(!inst); INC_SIZE(2 + sizeof(sljit_sw)); *inst++ = REX_W | ((reg_map[reg] <= 7) ? 0 : REX_B); *inst++ = MOV_r_i32 + (reg_map[reg] & 0x7); - *(sljit_sw*)inst = imm; + sljit_unaligned_store_sw(inst, imm); return SLJIT_SUCCESS; } -static sljit_ub* generate_far_jump_code(struct sljit_jump *jump, sljit_ub *code_ptr, sljit_si type) +static sljit_u8* generate_far_jump_code(struct sljit_jump *jump, sljit_u8 *code_ptr) { + sljit_s32 type = jump->flags >> TYPE_SHIFT; + + int short_addr = !(jump->flags & SLJIT_REWRITABLE_JUMP) && !(jump->flags & JUMP_LABEL) && (jump->u.target <= 0xffffffff); + + /* The relative jump below specialized for this case. */ + SLJIT_ASSERT(reg_map[TMP_REG2] >= 8); + if (type < SLJIT_JUMP) { /* Invert type. */ *code_ptr++ = get_jump_code(type ^ 0x1) - 0x10; - *code_ptr++ = 10 + 3; + *code_ptr++ = short_addr ? (6 + 3) : (10 + 3); } - SLJIT_COMPILE_ASSERT(reg_map[TMP_REG3] == 9, tmp3_is_9_first); - *code_ptr++ = REX_W | REX_B; - *code_ptr++ = MOV_r_i32 + 1; + *code_ptr++ = short_addr ? REX_B : (REX_W | REX_B); + *code_ptr++ = MOV_r_i32 | reg_lmap[TMP_REG2]; jump->addr = (sljit_uw)code_ptr; if (jump->flags & JUMP_LABEL) jump->flags |= PATCH_MD; + else if (short_addr) + sljit_unaligned_store_s32(code_ptr, (sljit_s32)jump->u.target); else - *(sljit_sw*)code_ptr = jump->u.target; + sljit_unaligned_store_sw(code_ptr, jump->u.target); + + code_ptr += short_addr ? sizeof(sljit_s32) : sizeof(sljit_sw); - code_ptr += sizeof(sljit_sw); *code_ptr++ = REX_B; *code_ptr++ = GROUP_FF; - *code_ptr++ = (type >= SLJIT_FAST_CALL) ? (MOD_REG | CALL_rm | 1) : (MOD_REG | JMP_rm | 1); + *code_ptr++ = MOD_REG | (type >= SLJIT_FAST_CALL ? CALL_rm : JMP_rm) | reg_lmap[TMP_REG2]; return code_ptr; } -static sljit_ub* generate_fixed_jump(sljit_ub *code_ptr, sljit_sw addr, sljit_si type) +static sljit_u8* generate_put_label_code(struct sljit_put_label *put_label, sljit_u8 *code_ptr, sljit_uw max_label) { - sljit_sw delta = addr - ((sljit_sw)code_ptr + 1 + sizeof(sljit_si)); + if (max_label > HALFWORD_MAX) { + put_label->addr -= put_label->flags; + put_label->flags = PATCH_MD; + return code_ptr; + } + + if (put_label->flags == 0) { + /* Destination is register. */ + code_ptr = (sljit_u8*)put_label->addr - 2 - sizeof(sljit_uw); + + SLJIT_ASSERT((code_ptr[0] & 0xf8) == REX_W); + SLJIT_ASSERT((code_ptr[1] & 0xf8) == MOV_r_i32); - if (delta <= SLJIT_W(0x7fffffff) && delta >= SLJIT_W(-0x80000000)) { - *code_ptr++ = (type == 2) ? CALL_i32 : JMP_i32; - *(sljit_sw*)code_ptr = delta; + if ((code_ptr[0] & 0x07) != 0) { + code_ptr[0] = (sljit_u8)(code_ptr[0] & ~0x08); + code_ptr += 2 + sizeof(sljit_s32); + } + else { + code_ptr[0] = code_ptr[1]; + code_ptr += 1 + sizeof(sljit_s32); + } + + put_label->addr = (sljit_uw)code_ptr; + return code_ptr; } - else { - SLJIT_COMPILE_ASSERT(reg_map[TMP_REG3] == 9, tmp3_is_9_second); - *code_ptr++ = REX_W | REX_B; - *code_ptr++ = MOV_r_i32 + 1; - *(sljit_sw*)code_ptr = addr; - code_ptr += sizeof(sljit_sw); - *code_ptr++ = REX_B; - *code_ptr++ = GROUP_FF; - *code_ptr++ = (type == 2) ? (MOD_REG | CALL_rm | 1) : (MOD_REG | JMP_rm | 1); + + code_ptr -= put_label->flags + (2 + sizeof(sljit_uw)); + SLJIT_MEMMOVE(code_ptr, code_ptr + (2 + sizeof(sljit_uw)), put_label->flags); + + SLJIT_ASSERT((code_ptr[0] & 0xf8) == REX_W); + + if ((code_ptr[1] & 0xf8) == MOV_r_i32) { + code_ptr += 2 + sizeof(sljit_uw); + SLJIT_ASSERT((code_ptr[0] & 0xf8) == REX_W); } + SLJIT_ASSERT(code_ptr[1] == MOV_rm_r); + + code_ptr[0] = (sljit_u8)(code_ptr[0] & ~0x4); + code_ptr[1] = MOV_rm_i32; + code_ptr[2] = (sljit_u8)(code_ptr[2] & ~(0x7 << 3)); + + code_ptr = (sljit_u8*)(put_label->addr - (2 + sizeof(sljit_uw)) + sizeof(sljit_s32)); + put_label->addr = (sljit_uw)code_ptr; + put_label->flags = 0; return code_ptr; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_enter(struct sljit_compiler *compiler, sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler, + sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds, + sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size) { - sljit_si size, pushed_size; - sljit_ub *inst; + sljit_s32 args, i, tmp, size, saved_register_size; + sljit_u8 *inst; CHECK_ERROR(); - check_sljit_emit_enter(compiler, args, scratches, saveds, local_size); + CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size)); + set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size); - compiler->scratches = scratches; - compiler->saveds = saveds; - compiler->flags_saved = 0; -#if (defined SLJIT_DEBUG && SLJIT_DEBUG) - compiler->logical_local_size = local_size; + compiler->mode32 = 0; + +#ifdef _WIN64 + /* Two/four register slots for parameters plus space for xmm6 register if needed. */ + if (fscratches >= 6 || fsaveds >= 1) + compiler->locals_offset = 6 * sizeof(sljit_sw); + else + compiler->locals_offset = ((scratches > 2) ? 4 : 2) * sizeof(sljit_sw); #endif - size = saveds; /* Including the return address saved by the call instruction. */ - pushed_size = (saveds + 1) * sizeof(sljit_sw); -#ifndef _WIN64 - if (saveds >= 2) - size += saveds - 1; -#else - if (saveds >= 4) - size += saveds - 3; - if (scratches >= 5) { - size += (5 - 4) * 2; - pushed_size += sizeof(sljit_sw); - } -#endif - size += args * 3; - if (size > 0) { - inst = (sljit_ub*)ensure_buf(compiler, 1 + size); - FAIL_IF(!inst); + saved_register_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1); + tmp = saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - saveds) : SLJIT_FIRST_SAVED_REG; + for (i = SLJIT_S0; i >= tmp; i--) { + size = reg_map[i] >= 8 ? 2 : 1; + inst = (sljit_u8*)ensure_buf(compiler, 1 + size); + FAIL_IF(!inst); INC_SIZE(size); - if (saveds >= 5) { - SLJIT_COMPILE_ASSERT(reg_map[SLJIT_SAVED_EREG2] >= 8, saved_ereg2_is_hireg); - *inst++ = REX_B; - PUSH_REG(reg_lmap[SLJIT_SAVED_EREG2]); - } - if (saveds >= 4) { - SLJIT_COMPILE_ASSERT(reg_map[SLJIT_SAVED_EREG1] >= 8, saved_ereg1_is_hireg); + if (reg_map[i] >= 8) *inst++ = REX_B; - PUSH_REG(reg_lmap[SLJIT_SAVED_EREG1]); - } - if (saveds >= 3) { -#ifndef _WIN64 - SLJIT_COMPILE_ASSERT(reg_map[SLJIT_SAVED_REG3] >= 8, saved_reg3_is_hireg); - *inst++ = REX_B; -#else - SLJIT_COMPILE_ASSERT(reg_map[SLJIT_SAVED_REG3] < 8, saved_reg3_is_loreg); -#endif - PUSH_REG(reg_lmap[SLJIT_SAVED_REG3]); - } - if (saveds >= 2) { -#ifndef _WIN64 - SLJIT_COMPILE_ASSERT(reg_map[SLJIT_SAVED_REG2] >= 8, saved_reg2_is_hireg); - *inst++ = REX_B; -#else - SLJIT_COMPILE_ASSERT(reg_map[SLJIT_SAVED_REG2] < 8, saved_reg2_is_loreg); -#endif - PUSH_REG(reg_lmap[SLJIT_SAVED_REG2]); - } - if (saveds >= 1) { - SLJIT_COMPILE_ASSERT(reg_map[SLJIT_SAVED_REG1] < 8, saved_reg1_is_loreg); - PUSH_REG(reg_lmap[SLJIT_SAVED_REG1]); - } -#ifdef _WIN64 - if (scratches >= 5) { - SLJIT_COMPILE_ASSERT(reg_map[SLJIT_TEMPORARY_EREG2] >= 8, temporary_ereg2_is_hireg); + PUSH_REG(reg_lmap[i]); + } + + for (i = scratches; i >= SLJIT_FIRST_SAVED_REG; i--) { + size = reg_map[i] >= 8 ? 2 : 1; + inst = (sljit_u8*)ensure_buf(compiler, 1 + size); + FAIL_IF(!inst); + INC_SIZE(size); + if (reg_map[i] >= 8) *inst++ = REX_B; - PUSH_REG(reg_lmap[SLJIT_TEMPORARY_EREG2]); - } -#endif + PUSH_REG(reg_lmap[i]); + } + + args = get_arg_count(arg_types); + + if (args > 0) { + size = args * 3; + inst = (sljit_u8*)ensure_buf(compiler, 1 + size); + FAIL_IF(!inst); + + INC_SIZE(size); #ifndef _WIN64 if (args > 0) { - *inst++ = REX_W; - *inst++ = MOV_r_rm; - *inst++ = MOD_REG | (reg_map[SLJIT_SAVED_REG1] << 3) | 0x7 /* rdi */; + inst[0] = REX_W; + inst[1] = MOV_r_rm; + inst[2] = MOD_REG | (reg_map[SLJIT_S0] << 3) | 0x7 /* rdi */; + inst += 3; } if (args > 1) { - *inst++ = REX_W | REX_R; - *inst++ = MOV_r_rm; - *inst++ = MOD_REG | (reg_lmap[SLJIT_SAVED_REG2] << 3) | 0x6 /* rsi */; + inst[0] = REX_W | REX_R; + inst[1] = MOV_r_rm; + inst[2] = MOD_REG | (reg_lmap[SLJIT_S1] << 3) | 0x6 /* rsi */; + inst += 3; } if (args > 2) { - *inst++ = REX_W | REX_R; - *inst++ = MOV_r_rm; - *inst++ = MOD_REG | (reg_lmap[SLJIT_SAVED_REG3] << 3) | 0x2 /* rdx */; + inst[0] = REX_W | REX_R; + inst[1] = MOV_r_rm; + inst[2] = MOD_REG | (reg_lmap[SLJIT_S2] << 3) | 0x2 /* rdx */; } #else if (args > 0) { - *inst++ = REX_W; - *inst++ = MOV_r_rm; - *inst++ = MOD_REG | (reg_map[SLJIT_SAVED_REG1] << 3) | 0x1 /* rcx */; + inst[0] = REX_W; + inst[1] = MOV_r_rm; + inst[2] = MOD_REG | (reg_map[SLJIT_S0] << 3) | 0x1 /* rcx */; + inst += 3; } if (args > 1) { - *inst++ = REX_W; - *inst++ = MOV_r_rm; - *inst++ = MOD_REG | (reg_map[SLJIT_SAVED_REG2] << 3) | 0x2 /* rdx */; + inst[0] = REX_W; + inst[1] = MOV_r_rm; + inst[2] = MOD_REG | (reg_map[SLJIT_S1] << 3) | 0x2 /* rdx */; + inst += 3; } if (args > 2) { - *inst++ = REX_W | REX_B; - *inst++ = MOV_r_rm; - *inst++ = MOD_REG | (reg_map[SLJIT_SAVED_REG3] << 3) | 0x0 /* r8 */; + inst[0] = REX_W | REX_B; + inst[1] = MOV_r_rm; + inst[2] = MOD_REG | (reg_map[SLJIT_S2] << 3) | 0x0 /* r8 */; } #endif } - local_size = ((local_size + FIXED_LOCALS_OFFSET + pushed_size + 16 - 1) & ~(16 - 1)) - pushed_size; + local_size = ((local_size + SLJIT_LOCALS_OFFSET + saved_register_size + 15) & ~15) - saved_register_size; compiler->local_size = local_size; + #ifdef _WIN64 - if (local_size > 1024) { - /* Allocate stack for the callback, which grows the stack. */ - inst = (sljit_ub*)ensure_buf(compiler, 1 + 4 + (3 + sizeof(sljit_si))); - FAIL_IF(!inst); - INC_SIZE(4 + (3 + sizeof(sljit_si))); - *inst++ = REX_W; - *inst++ = GROUP_BINARY_83; - *inst++ = MOD_REG | SUB | 4; - /* Pushed size must be divisible by 8. */ - SLJIT_ASSERT(!(pushed_size & 0x7)); - if (pushed_size & 0x8) { - *inst++ = 5 * sizeof(sljit_sw); - local_size -= 5 * sizeof(sljit_sw); - } else { - *inst++ = 4 * sizeof(sljit_sw); - local_size -= 4 * sizeof(sljit_sw); + if (local_size > 0) { + if (local_size <= 4 * 4096) { + if (local_size > 4096) + EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), -4096); + if (local_size > 2 * 4096) + EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), -4096 * 2); + if (local_size > 3 * 4096) + EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), -4096 * 3); } - /* Second instruction */ - SLJIT_COMPILE_ASSERT(reg_map[SLJIT_SCRATCH_REG1] < 8, temporary_reg1_is_loreg); - *inst++ = REX_W; - *inst++ = MOV_rm_i32; - *inst++ = MOD_REG | reg_lmap[SLJIT_SCRATCH_REG1]; - *(sljit_si*)inst = local_size; -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) - compiler->skip_checks = 1; -#endif - FAIL_IF(sljit_emit_ijump(compiler, SLJIT_CALL1, SLJIT_IMM, SLJIT_FUNC_OFFSET(sljit_grow_stack))); + else { + EMIT_MOV(compiler, SLJIT_R0, 0, SLJIT_SP, 0); + EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, (local_size - 1) >> 12); + + SLJIT_ASSERT (reg_map[SLJIT_R0] == 0); + + EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_MEM1(SLJIT_R0), -4096); + FAIL_IF(emit_non_cum_binary(compiler, BINARY_OPCODE(SUB), + SLJIT_R0, 0, SLJIT_R0, 0, SLJIT_IMM, 4096)); + FAIL_IF(emit_non_cum_binary(compiler, BINARY_OPCODE(SUB), + TMP_REG1, 0, TMP_REG1, 0, SLJIT_IMM, 1)); + + inst = (sljit_u8*)ensure_buf(compiler, 1 + 2); + FAIL_IF(!inst); + + INC_SIZE(2); + inst[0] = JNE_i8; + inst[1] = (sljit_s8) -19; + } + + EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), -local_size); } #endif - SLJIT_ASSERT(local_size > 0); - if (local_size <= 127) { - inst = (sljit_ub*)ensure_buf(compiler, 1 + 4); - FAIL_IF(!inst); - INC_SIZE(4); - *inst++ = REX_W; - *inst++ = GROUP_BINARY_83; - *inst++ = MOD_REG | SUB | 4; - *inst++ = local_size; + + if (local_size > 0) { + FAIL_IF(emit_non_cum_binary(compiler, BINARY_OPCODE(SUB), + SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, local_size)); } - else { - inst = (sljit_ub*)ensure_buf(compiler, 1 + 7); + +#ifdef _WIN64 + /* Save xmm6 register: movaps [rsp + 0x20], xmm6 */ + if (fscratches >= 6 || fsaveds >= 1) { + inst = (sljit_u8*)ensure_buf(compiler, 1 + 5); FAIL_IF(!inst); - INC_SIZE(7); - *inst++ = REX_W; - *inst++ = GROUP_BINARY_81; - *inst++ = MOD_REG | SUB | 4; - *(sljit_si*)inst = local_size; - inst += sizeof(sljit_si); + INC_SIZE(5); + *inst++ = GROUP_0F; + sljit_unaligned_store_s32(inst, 0x20247429); } -#ifdef _WIN64 - /* Save xmm6 with MOVAPS instruction. */ - inst = (sljit_ub*)ensure_buf(compiler, 1 + 5); - FAIL_IF(!inst); - INC_SIZE(5); - *inst++ = GROUP_0F; - *(sljit_si*)inst = 0x20247429; #endif return SLJIT_SUCCESS; } -SLJIT_API_FUNC_ATTRIBUTE void sljit_set_context(struct sljit_compiler *compiler, sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *compiler, + sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds, + sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size) { - sljit_si pushed_size; + sljit_s32 saved_register_size; - CHECK_ERROR_VOID(); - check_sljit_set_context(compiler, args, scratches, saveds, local_size); + CHECK_ERROR(); + CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size)); + set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size); - compiler->scratches = scratches; - compiler->saveds = saveds; -#if (defined SLJIT_DEBUG && SLJIT_DEBUG) - compiler->logical_local_size = local_size; +#ifdef _WIN64 + /* Two/four register slots for parameters plus space for xmm6 register if needed. */ + if (fscratches >= 6 || fsaveds >= 1) + compiler->locals_offset = 6 * sizeof(sljit_sw); + else + compiler->locals_offset = ((scratches > 2) ? 4 : 2) * sizeof(sljit_sw); #endif /* Including the return address saved by the call instruction. */ - pushed_size = (saveds + 1) * sizeof(sljit_sw); -#ifdef _WIN64 - if (scratches >= 5) - pushed_size += sizeof(sljit_sw); -#endif - compiler->local_size = ((local_size + FIXED_LOCALS_OFFSET + pushed_size + 16 - 1) & ~(16 - 1)) - pushed_size; + saved_register_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1); + compiler->local_size = ((local_size + SLJIT_LOCALS_OFFSET + saved_register_size + 15) & ~15) - saved_register_size; + return SLJIT_SUCCESS; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_return(struct sljit_compiler *compiler, sljit_si op, sljit_si src, sljit_sw srcw) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw) { - sljit_si size; - sljit_ub *inst; + sljit_s32 i, tmp, size; + sljit_u8 *inst; CHECK_ERROR(); - check_sljit_emit_return(compiler, op, src, srcw); + CHECK(check_sljit_emit_return(compiler, op, src, srcw)); - compiler->flags_saved = 0; FAIL_IF(emit_mov_before_return(compiler, op, src, srcw)); #ifdef _WIN64 - /* Restore xmm6 with MOVAPS instruction. */ - inst = (sljit_ub*)ensure_buf(compiler, 1 + 5); - FAIL_IF(!inst); - INC_SIZE(5); - *inst++ = GROUP_0F; - *(sljit_si*)inst = 0x20247428; -#endif - SLJIT_ASSERT(compiler->local_size > 0); - if (compiler->local_size <= 127) { - inst = (sljit_ub*)ensure_buf(compiler, 1 + 4); + /* Restore xmm6 register: movaps xmm6, [rsp + 0x20] */ + if (compiler->fscratches >= 6 || compiler->fsaveds >= 1) { + inst = (sljit_u8*)ensure_buf(compiler, 1 + 5); FAIL_IF(!inst); - INC_SIZE(4); - *inst++ = REX_W; - *inst++ = GROUP_BINARY_83; - *inst++ = MOD_REG | ADD | 4; - *inst = compiler->local_size; + INC_SIZE(5); + *inst++ = GROUP_0F; + sljit_unaligned_store_s32(inst, 0x20247428); } - else { - inst = (sljit_ub*)ensure_buf(compiler, 1 + 7); - FAIL_IF(!inst); - INC_SIZE(7); - *inst++ = REX_W; - *inst++ = GROUP_BINARY_81; - *inst++ = MOD_REG | ADD | 4; - *(sljit_si*)inst = compiler->local_size; - } - - size = 1 + compiler->saveds; -#ifndef _WIN64 - if (compiler->saveds >= 2) - size += compiler->saveds - 1; -#else - if (compiler->saveds >= 4) - size += compiler->saveds - 3; - if (compiler->scratches >= 5) - size += (5 - 4) * 2; #endif - inst = (sljit_ub*)ensure_buf(compiler, 1 + size); - FAIL_IF(!inst); - INC_SIZE(size); - -#ifdef _WIN64 - if (compiler->scratches >= 5) { - *inst++ = REX_B; - POP_REG(reg_lmap[SLJIT_TEMPORARY_EREG2]); - } -#endif - if (compiler->saveds >= 1) - POP_REG(reg_map[SLJIT_SAVED_REG1]); - if (compiler->saveds >= 2) { -#ifndef _WIN64 - *inst++ = REX_B; -#endif - POP_REG(reg_lmap[SLJIT_SAVED_REG2]); - } - if (compiler->saveds >= 3) { -#ifndef _WIN64 - *inst++ = REX_B; -#endif - POP_REG(reg_lmap[SLJIT_SAVED_REG3]); + if (compiler->local_size > 0) { + if (compiler->local_size <= 127) { + inst = (sljit_u8*)ensure_buf(compiler, 1 + 4); + FAIL_IF(!inst); + INC_SIZE(4); + *inst++ = REX_W; + *inst++ = GROUP_BINARY_83; + *inst++ = MOD_REG | ADD | 4; + *inst = compiler->local_size; + } + else { + inst = (sljit_u8*)ensure_buf(compiler, 1 + 7); + FAIL_IF(!inst); + INC_SIZE(7); + *inst++ = REX_W; + *inst++ = GROUP_BINARY_81; + *inst++ = MOD_REG | ADD | 4; + sljit_unaligned_store_s32(inst, compiler->local_size); + } } - if (compiler->saveds >= 4) { - *inst++ = REX_B; - POP_REG(reg_lmap[SLJIT_SAVED_EREG1]); + + tmp = compiler->scratches; + for (i = SLJIT_FIRST_SAVED_REG; i <= tmp; i++) { + size = reg_map[i] >= 8 ? 2 : 1; + inst = (sljit_u8*)ensure_buf(compiler, 1 + size); + FAIL_IF(!inst); + INC_SIZE(size); + if (reg_map[i] >= 8) + *inst++ = REX_B; + POP_REG(reg_lmap[i]); } - if (compiler->saveds >= 5) { - *inst++ = REX_B; - POP_REG(reg_lmap[SLJIT_SAVED_EREG2]); + + tmp = compiler->saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - compiler->saveds) : SLJIT_FIRST_SAVED_REG; + for (i = tmp; i <= SLJIT_S0; i++) { + size = reg_map[i] >= 8 ? 2 : 1; + inst = (sljit_u8*)ensure_buf(compiler, 1 + size); + FAIL_IF(!inst); + INC_SIZE(size); + if (reg_map[i] >= 8) + *inst++ = REX_B; + POP_REG(reg_lmap[i]); } + inst = (sljit_u8*)ensure_buf(compiler, 1 + 1); + FAIL_IF(!inst); + INC_SIZE(1); RET(); return SLJIT_SUCCESS; } @@ -374,32 +372,32 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_return(struct sljit_compiler *compi /* Operators */ /* --------------------------------------------------------------------- */ -static sljit_si emit_do_imm32(struct sljit_compiler *compiler, sljit_ub rex, sljit_ub opcode, sljit_sw imm) +static sljit_s32 emit_do_imm32(struct sljit_compiler *compiler, sljit_u8 rex, sljit_u8 opcode, sljit_sw imm) { - sljit_ub *inst; - sljit_si length = 1 + (rex ? 1 : 0) + sizeof(sljit_si); + sljit_u8 *inst; + sljit_s32 length = 1 + (rex ? 1 : 0) + sizeof(sljit_s32); - inst = (sljit_ub*)ensure_buf(compiler, 1 + length); + inst = (sljit_u8*)ensure_buf(compiler, 1 + length); FAIL_IF(!inst); INC_SIZE(length); if (rex) *inst++ = rex; *inst++ = opcode; - *(sljit_si*)inst = imm; + sljit_unaligned_store_s32(inst, imm); return SLJIT_SUCCESS; } -static sljit_ub* emit_x86_instruction(struct sljit_compiler *compiler, sljit_si size, +static sljit_u8* emit_x86_instruction(struct sljit_compiler *compiler, sljit_s32 size, /* The register or immediate operand. */ - sljit_si a, sljit_sw imma, + sljit_s32 a, sljit_sw imma, /* The general operand (not immediate). */ - sljit_si b, sljit_sw immb) + sljit_s32 b, sljit_sw immb) { - sljit_ub *inst; - sljit_ub *buf_ptr; - sljit_ub rex = 0; - sljit_si flags = size & ~0xf; - sljit_si inst_size; + sljit_u8 *inst; + sljit_u8 *buf_ptr; + sljit_u8 rex = 0; + sljit_s32 flags = size & ~0xf; + sljit_s32 inst_size; /* The immediate operand must be 32 bit. */ SLJIT_ASSERT(!(a & SLJIT_IMM) || compiler->mode32 || IS_HALFWORD(imma)); @@ -409,72 +407,70 @@ static sljit_ub* emit_x86_instruction(struct sljit_compiler *compiler, sljit_si SLJIT_ASSERT(!(flags & (EX86_BIN_INS | EX86_SHIFT_INS)) || (flags & (EX86_BYTE_ARG | EX86_HALF_ARG)) == 0); /* Both size flags cannot be switched on. */ SLJIT_ASSERT((flags & (EX86_BYTE_ARG | EX86_HALF_ARG)) != (EX86_BYTE_ARG | EX86_HALF_ARG)); -#if (defined SLJIT_SSE2 && SLJIT_SSE2) /* SSE2 and immediate is not possible. */ SLJIT_ASSERT(!(a & SLJIT_IMM) || !(flags & EX86_SSE2)); SLJIT_ASSERT((flags & (EX86_PREF_F2 | EX86_PREF_F3)) != (EX86_PREF_F2 | EX86_PREF_F3) && (flags & (EX86_PREF_F2 | EX86_PREF_66)) != (EX86_PREF_F2 | EX86_PREF_66) && (flags & (EX86_PREF_F3 | EX86_PREF_66)) != (EX86_PREF_F3 | EX86_PREF_66)); -#endif size &= 0xf; inst_size = size; - if ((b & SLJIT_MEM) && !(b & 0xf0) && NOT_HALFWORD(immb)) { - if (emit_load_imm64(compiler, TMP_REG3, immb)) - return NULL; - immb = 0; - if (b & 0xf) - b |= TMP_REG3 << 4; - else - b |= TMP_REG3; - } - if (!compiler->mode32 && !(flags & EX86_NO_REXW)) rex |= REX_W; else if (flags & EX86_REX) rex |= REX; -#if (defined SLJIT_SSE2 && SLJIT_SSE2) if (flags & (EX86_PREF_F2 | EX86_PREF_F3)) inst_size++; -#endif if (flags & EX86_PREF_66) inst_size++; /* Calculate size of b. */ inst_size += 1; /* mod r/m byte. */ if (b & SLJIT_MEM) { - if ((b & 0x0f) == SLJIT_UNUSED) - inst_size += 1 + sizeof(sljit_si); /* SIB byte required to avoid RIP based addressing. */ + if (!(b & OFFS_REG_MASK)) { + if (NOT_HALFWORD(immb)) { + PTR_FAIL_IF(emit_load_imm64(compiler, TMP_REG2, immb)); + immb = 0; + if (b & REG_MASK) + b |= TO_OFFS_REG(TMP_REG2); + else + b |= TMP_REG2; + } + else if (reg_lmap[b & REG_MASK] == 4) + b |= TO_OFFS_REG(SLJIT_SP); + } + + if ((b & REG_MASK) == SLJIT_UNUSED) + inst_size += 1 + sizeof(sljit_s32); /* SIB byte required to avoid RIP based addressing. */ else { - if (reg_map[b & 0x0f] >= 8) + if (reg_map[b & REG_MASK] >= 8) rex |= REX_B; - if (immb != 0 && !(b & 0xf0)) { + + if (immb != 0 && (!(b & OFFS_REG_MASK) || (b & OFFS_REG_MASK) == TO_OFFS_REG(SLJIT_SP))) { /* Immediate operand. */ if (immb <= 127 && immb >= -128) - inst_size += sizeof(sljit_sb); + inst_size += sizeof(sljit_s8); else - inst_size += sizeof(sljit_si); + inst_size += sizeof(sljit_s32); } - } + else if (reg_lmap[b & REG_MASK] == 5) + inst_size += sizeof(sljit_s8); - if ((b & 0xf) == SLJIT_LOCALS_REG && !(b & 0xf0)) - b |= SLJIT_LOCALS_REG << 4; - - if ((b & 0xf0) != SLJIT_UNUSED) { - inst_size += 1; /* SIB byte. */ - if (reg_map[(b >> 4) & 0x0f] >= 8) - rex |= REX_X; + if ((b & OFFS_REG_MASK) != SLJIT_UNUSED) { + inst_size += 1; /* SIB byte. */ + if (reg_map[OFFS_REG(b)] >= 8) + rex |= REX_X; + } } } -#if (defined SLJIT_SSE2 && SLJIT_SSE2) - else if (!(flags & EX86_SSE2) && reg_map[b] >= 8) - rex |= REX_B; -#else - else if (reg_map[b] >= 8) + else if (!(flags & EX86_SSE2_OP2)) { + if (reg_map[b] >= 8) + rex |= REX_B; + } + else if (freg_map[b] >= 8) rex |= REX_B; -#endif if (a & SLJIT_IMM) { if (flags & EX86_BIN_INS) { @@ -495,34 +491,31 @@ static sljit_ub* emit_x86_instruction(struct sljit_compiler *compiler, sljit_si else if (flags & EX86_HALF_ARG) inst_size += sizeof(short); else - inst_size += sizeof(sljit_si); + inst_size += sizeof(sljit_s32); } else { SLJIT_ASSERT(!(flags & EX86_SHIFT_INS) || a == SLJIT_PREF_SHIFT_REG); /* reg_map[SLJIT_PREF_SHIFT_REG] is less than 8. */ -#if (defined SLJIT_SSE2 && SLJIT_SSE2) - if (!(flags & EX86_SSE2) && reg_map[a] >= 8) - rex |= REX_R; -#else - if (reg_map[a] >= 8) + if (!(flags & EX86_SSE2_OP1)) { + if (reg_map[a] >= 8) + rex |= REX_R; + } + else if (freg_map[a] >= 8) rex |= REX_R; -#endif } if (rex) inst_size++; - inst = (sljit_ub*)ensure_buf(compiler, 1 + inst_size); + inst = (sljit_u8*)ensure_buf(compiler, 1 + inst_size); PTR_FAIL_IF(!inst); /* Encoding the byte. */ INC_SIZE(inst_size); -#if (defined SLJIT_SSE2 && SLJIT_SSE2) if (flags & EX86_PREF_F2) *inst++ = 0xf2; if (flags & EX86_PREF_F3) *inst++ = 0xf3; -#endif if (flags & EX86_PREF_66) *inst++ = 0x66; if (rex) @@ -534,17 +527,12 @@ static sljit_ub* emit_x86_instruction(struct sljit_compiler *compiler, sljit_si if ((flags & EX86_BIN_INS) && (a & SLJIT_IMM)) *inst = (flags & EX86_BYTE_ARG) ? GROUP_BINARY_83 : GROUP_BINARY_81; - if ((a & SLJIT_IMM) || (a == 0)) + if (a & SLJIT_IMM) *buf_ptr = 0; -#if (defined SLJIT_SSE2 && SLJIT_SSE2) - else if (!(flags & EX86_SSE2)) + else if (!(flags & EX86_SSE2_OP1)) *buf_ptr = reg_lmap[a] << 3; else - *buf_ptr = a << 3; -#else - else - *buf_ptr = reg_lmap[a] << 3; -#endif + *buf_ptr = freg_lmap[a] << 3; } else { if (a & SLJIT_IMM) { @@ -558,55 +546,55 @@ static sljit_ub* emit_x86_instruction(struct sljit_compiler *compiler, sljit_si } if (!(b & SLJIT_MEM)) -#if (defined SLJIT_SSE2 && SLJIT_SSE2) - *buf_ptr++ |= MOD_REG + ((!(flags & EX86_SSE2)) ? reg_lmap[b] : b); -#else - *buf_ptr++ |= MOD_REG + reg_lmap[b]; -#endif - else if ((b & 0x0f) != SLJIT_UNUSED) { - if ((b & 0xf0) == SLJIT_UNUSED || (b & 0xf0) == (SLJIT_LOCALS_REG << 4)) { - if (immb != 0) { + *buf_ptr++ |= MOD_REG + ((!(flags & EX86_SSE2_OP2)) ? reg_lmap[b] : freg_lmap[b]); + else if ((b & REG_MASK) != SLJIT_UNUSED) { + if ((b & OFFS_REG_MASK) == SLJIT_UNUSED || (b & OFFS_REG_MASK) == TO_OFFS_REG(SLJIT_SP)) { + if (immb != 0 || reg_lmap[b & REG_MASK] == 5) { if (immb <= 127 && immb >= -128) *buf_ptr |= 0x40; else *buf_ptr |= 0x80; } - if ((b & 0xf0) == SLJIT_UNUSED) - *buf_ptr++ |= reg_lmap[b & 0x0f]; + if ((b & OFFS_REG_MASK) == SLJIT_UNUSED) + *buf_ptr++ |= reg_lmap[b & REG_MASK]; else { *buf_ptr++ |= 0x04; - *buf_ptr++ = reg_lmap[b & 0x0f] | (reg_lmap[(b >> 4) & 0x0f] << 3); + *buf_ptr++ = reg_lmap[b & REG_MASK] | (reg_lmap[OFFS_REG(b)] << 3); } - if (immb != 0) { + if (immb != 0 || reg_lmap[b & REG_MASK] == 5) { if (immb <= 127 && immb >= -128) *buf_ptr++ = immb; /* 8 bit displacement. */ else { - *(sljit_si*)buf_ptr = immb; /* 32 bit displacement. */ - buf_ptr += sizeof(sljit_si); + sljit_unaligned_store_s32(buf_ptr, immb); /* 32 bit displacement. */ + buf_ptr += sizeof(sljit_s32); } } } else { + if (reg_lmap[b & REG_MASK] == 5) + *buf_ptr |= 0x40; *buf_ptr++ |= 0x04; - *buf_ptr++ = reg_lmap[b & 0x0f] | (reg_lmap[(b >> 4) & 0x0f] << 3) | (immb << 6); + *buf_ptr++ = reg_lmap[b & REG_MASK] | (reg_lmap[OFFS_REG(b)] << 3) | (immb << 6); + if (reg_lmap[b & REG_MASK] == 5) + *buf_ptr++ = 0; } } else { *buf_ptr++ |= 0x04; *buf_ptr++ = 0x25; - *(sljit_si*)buf_ptr = immb; /* 32 bit displacement. */ - buf_ptr += sizeof(sljit_si); + sljit_unaligned_store_s32(buf_ptr, immb); /* 32 bit displacement. */ + buf_ptr += sizeof(sljit_s32); } if (a & SLJIT_IMM) { if (flags & EX86_BYTE_ARG) *buf_ptr = imma; else if (flags & EX86_HALF_ARG) - *(short*)buf_ptr = imma; + sljit_unaligned_store_s16(buf_ptr, imma); else if (!(flags & EX86_SHIFT_INS)) - *(sljit_si*)buf_ptr = imma; + sljit_unaligned_store_s32(buf_ptr, imma); } return !(flags & EX86_SHIFT_INS) ? inst : (inst + 1); @@ -616,64 +604,183 @@ static sljit_ub* emit_x86_instruction(struct sljit_compiler *compiler, sljit_si /* Call / return instructions */ /* --------------------------------------------------------------------- */ -static SLJIT_INLINE sljit_si call_with_args(struct sljit_compiler *compiler, sljit_si type) +#ifndef _WIN64 + +static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_s32 *src_ptr, sljit_sw srcw) { - sljit_ub *inst; + sljit_s32 src = src_ptr ? (*src_ptr) : 0; + sljit_s32 word_arg_count = 0; -#ifndef _WIN64 - SLJIT_COMPILE_ASSERT(reg_map[SLJIT_SCRATCH_REG2] == 6 && reg_map[SLJIT_SCRATCH_REG1] < 8 && reg_map[SLJIT_SCRATCH_REG3] < 8, args_registers); + SLJIT_ASSERT(reg_map[SLJIT_R1] == 6 && reg_map[SLJIT_R3] == 1 && reg_map[TMP_REG1] == 2); + + compiler->mode32 = 0; + + /* Remove return value. */ + arg_types >>= SLJIT_DEF_SHIFT; + + while (arg_types) { + if ((arg_types & SLJIT_DEF_MASK) < SLJIT_ARG_TYPE_F32) + word_arg_count++; + arg_types >>= SLJIT_DEF_SHIFT; + } + + if (word_arg_count == 0) + return SLJIT_SUCCESS; + + if (src & SLJIT_MEM) { + ADJUST_LOCAL_OFFSET(src, srcw); + EMIT_MOV(compiler, TMP_REG2, 0, src, srcw); + *src_ptr = TMP_REG2; + } + else if (src == SLJIT_R2 && word_arg_count >= SLJIT_R2) + *src_ptr = TMP_REG1; + + if (word_arg_count >= 3) + EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_R2, 0); + return emit_mov(compiler, SLJIT_R2, 0, SLJIT_R0, 0); +} - inst = (sljit_ub*)ensure_buf(compiler, 1 + ((type < SLJIT_CALL3) ? 3 : 6)); - FAIL_IF(!inst); - INC_SIZE((type < SLJIT_CALL3) ? 3 : 6); - if (type >= SLJIT_CALL3) { - *inst++ = REX_W; - *inst++ = MOV_r_rm; - *inst++ = MOD_REG | (0x2 /* rdx */ << 3) | reg_lmap[SLJIT_SCRATCH_REG3]; - } - *inst++ = REX_W; - *inst++ = MOV_r_rm; - *inst++ = MOD_REG | (0x7 /* rdi */ << 3) | reg_lmap[SLJIT_SCRATCH_REG1]; #else - SLJIT_COMPILE_ASSERT(reg_map[SLJIT_SCRATCH_REG2] == 2 && reg_map[SLJIT_SCRATCH_REG1] < 8 && reg_map[SLJIT_SCRATCH_REG3] < 8, args_registers); - inst = (sljit_ub*)ensure_buf(compiler, 1 + ((type < SLJIT_CALL3) ? 3 : 6)); - FAIL_IF(!inst); - INC_SIZE((type < SLJIT_CALL3) ? 3 : 6); - if (type >= SLJIT_CALL3) { - *inst++ = REX_W | REX_R; - *inst++ = MOV_r_rm; - *inst++ = MOD_REG | (0x0 /* r8 */ << 3) | reg_lmap[SLJIT_SCRATCH_REG3]; - } - *inst++ = REX_W; - *inst++ = MOV_r_rm; - *inst++ = MOD_REG | (0x1 /* rcx */ << 3) | reg_lmap[SLJIT_SCRATCH_REG1]; -#endif +static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_s32 *src_ptr, sljit_sw srcw) +{ + sljit_s32 src = src_ptr ? (*src_ptr) : 0; + sljit_s32 arg_count = 0; + sljit_s32 word_arg_count = 0; + sljit_s32 float_arg_count = 0; + sljit_s32 types = 0; + sljit_s32 data_trandfer = 0; + static sljit_u8 word_arg_regs[5] = { 0, SLJIT_R3, SLJIT_R1, SLJIT_R2, TMP_REG1 }; + + SLJIT_ASSERT(reg_map[SLJIT_R3] == 1 && reg_map[SLJIT_R1] == 2 && reg_map[SLJIT_R2] == 8 && reg_map[TMP_REG1] == 9); + + compiler->mode32 = 0; + arg_types >>= SLJIT_DEF_SHIFT; + + while (arg_types) { + types = (types << SLJIT_DEF_SHIFT) | (arg_types & SLJIT_DEF_MASK); + + switch (arg_types & SLJIT_DEF_MASK) { + case SLJIT_ARG_TYPE_F32: + case SLJIT_ARG_TYPE_F64: + arg_count++; + float_arg_count++; + + if (arg_count != float_arg_count) + data_trandfer = 1; + break; + default: + arg_count++; + word_arg_count++; + + if (arg_count != word_arg_count || arg_count != word_arg_regs[arg_count]) { + data_trandfer = 1; + + if (src == word_arg_regs[arg_count]) { + EMIT_MOV(compiler, TMP_REG2, 0, src, 0); + *src_ptr = TMP_REG2; + } + } + break; + } + + arg_types >>= SLJIT_DEF_SHIFT; + } + + if (!data_trandfer) + return SLJIT_SUCCESS; + + if (src & SLJIT_MEM) { + ADJUST_LOCAL_OFFSET(src, srcw); + EMIT_MOV(compiler, TMP_REG2, 0, src, srcw); + *src_ptr = TMP_REG2; + } + + while (types) { + switch (types & SLJIT_DEF_MASK) { + case SLJIT_ARG_TYPE_F32: + if (arg_count != float_arg_count) + FAIL_IF(emit_sse2_load(compiler, 1, arg_count, float_arg_count, 0)); + arg_count--; + float_arg_count--; + break; + case SLJIT_ARG_TYPE_F64: + if (arg_count != float_arg_count) + FAIL_IF(emit_sse2_load(compiler, 0, arg_count, float_arg_count, 0)); + arg_count--; + float_arg_count--; + break; + default: + if (arg_count != word_arg_count || arg_count != word_arg_regs[arg_count]) + EMIT_MOV(compiler, word_arg_regs[arg_count], 0, word_arg_count, 0); + arg_count--; + word_arg_count--; + break; + } + + types >>= SLJIT_DEF_SHIFT; + } + return SLJIT_SUCCESS; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw) +#endif + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 arg_types) { - sljit_ub *inst; + CHECK_ERROR_PTR(); + CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types)); + + PTR_FAIL_IF(call_with_args(compiler, arg_types, NULL, 0)); + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ + || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + compiler->skip_checks = 1; +#endif + + return sljit_emit_jump(compiler, type); +} +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 arg_types, + sljit_s32 src, sljit_sw srcw) +{ CHECK_ERROR(); - check_sljit_emit_fast_enter(compiler, dst, dstw); + CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw)); + + FAIL_IF(call_with_args(compiler, arg_types, &src, srcw)); + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ + || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + compiler->skip_checks = 1; +#endif + + return sljit_emit_ijump(compiler, type, src, srcw); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw) +{ + sljit_u8 *inst; + + CHECK_ERROR(); + CHECK(check_sljit_emit_fast_enter(compiler, dst, dstw)); ADJUST_LOCAL_OFFSET(dst, dstw); /* For UNUSED dst. Uncommon, but possible. */ if (dst == SLJIT_UNUSED) - dst = TMP_REGISTER; + dst = TMP_REG1; - if (dst <= TMP_REGISTER) { + if (FAST_IS_REG(dst)) { if (reg_map[dst] < 8) { - inst = (sljit_ub*)ensure_buf(compiler, 1 + 1); + inst = (sljit_u8*)ensure_buf(compiler, 1 + 1); FAIL_IF(!inst); INC_SIZE(1); POP_REG(reg_lmap[dst]); return SLJIT_SUCCESS; } - inst = (sljit_ub*)ensure_buf(compiler, 1 + 2); + inst = (sljit_u8*)ensure_buf(compiler, 1 + 2); FAIL_IF(!inst); INC_SIZE(2); *inst++ = REX_B; @@ -689,29 +796,24 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_enter(struct sljit_compiler *c return SLJIT_SUCCESS; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_si src, sljit_sw srcw) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_s32 src, sljit_sw srcw) { - sljit_ub *inst; + sljit_u8 *inst; CHECK_ERROR(); - check_sljit_emit_fast_return(compiler, src, srcw); + CHECK(check_sljit_emit_fast_return(compiler, src, srcw)); ADJUST_LOCAL_OFFSET(src, srcw); - if ((src & SLJIT_IMM) && NOT_HALFWORD(srcw)) { - FAIL_IF(emit_load_imm64(compiler, TMP_REGISTER, srcw)); - src = TMP_REGISTER; - } - - if (src <= TMP_REGISTER) { + if (FAST_IS_REG(src)) { if (reg_map[src] < 8) { - inst = (sljit_ub*)ensure_buf(compiler, 1 + 1 + 1); + inst = (sljit_u8*)ensure_buf(compiler, 1 + 1 + 1); FAIL_IF(!inst); INC_SIZE(1 + 1); PUSH_REG(reg_lmap[src]); } else { - inst = (sljit_ub*)ensure_buf(compiler, 1 + 2 + 1); + inst = (sljit_u8*)ensure_buf(compiler, 1 + 2 + 1); FAIL_IF(!inst); INC_SIZE(2 + 1); @@ -719,7 +821,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_return(struct sljit_compiler * PUSH_REG(reg_lmap[src]); } } - else if (src & SLJIT_MEM) { + else { /* REX_W is not necessary (src is not immediate). */ compiler->mode32 = 1; inst = emit_x86_instruction(compiler, 1, 0, 0, src, srcw); @@ -727,37 +829,25 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_return(struct sljit_compiler * *inst++ = GROUP_FF; *inst |= PUSH_rm; - inst = (sljit_ub*)ensure_buf(compiler, 1 + 1); + inst = (sljit_u8*)ensure_buf(compiler, 1 + 1); FAIL_IF(!inst); INC_SIZE(1); } - else { - SLJIT_ASSERT(IS_HALFWORD(srcw)); - /* SLJIT_IMM. */ - inst = (sljit_ub*)ensure_buf(compiler, 1 + 5 + 1); - FAIL_IF(!inst); - - INC_SIZE(5 + 1); - *inst++ = PUSH_i32; - *(sljit_si*)inst = srcw; - inst += sizeof(sljit_si); - } RET(); return SLJIT_SUCCESS; } - /* --------------------------------------------------------------------- */ /* Extend input */ /* --------------------------------------------------------------------- */ -static sljit_si emit_mov_int(struct sljit_compiler *compiler, sljit_si sign, - sljit_si dst, sljit_sw dstw, - sljit_si src, sljit_sw srcw) +static sljit_s32 emit_mov_int(struct sljit_compiler *compiler, sljit_s32 sign, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) { - sljit_ub* inst; - sljit_si dst_r; + sljit_u8* inst; + sljit_s32 dst_r; compiler->mode32 = 0; @@ -765,9 +855,9 @@ static sljit_si emit_mov_int(struct sljit_compiler *compiler, sljit_si sign, return SLJIT_SUCCESS; /* Empty instruction. */ if (src & SLJIT_IMM) { - if (dst <= TMP_REGISTER) { + if (FAST_IS_REG(dst)) { if (sign || ((sljit_uw)srcw <= 0x7fffffff)) { - inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, (sljit_sw)(sljit_si)srcw, dst, dstw); + inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, (sljit_sw)(sljit_s32)srcw, dst, dstw); FAIL_IF(!inst); *inst = MOV_rm_i32; return SLJIT_SUCCESS; @@ -775,16 +865,16 @@ static sljit_si emit_mov_int(struct sljit_compiler *compiler, sljit_si sign, return emit_load_imm64(compiler, dst, srcw); } compiler->mode32 = 1; - inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, (sljit_sw)(sljit_si)srcw, dst, dstw); + inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, (sljit_sw)(sljit_s32)srcw, dst, dstw); FAIL_IF(!inst); *inst = MOV_rm_i32; compiler->mode32 = 0; return SLJIT_SUCCESS; } - dst_r = (dst <= TMP_REGISTER) ? dst : TMP_REGISTER; + dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1; - if ((dst & SLJIT_MEM) && (src <= TMP_REGISTER)) + if ((dst & SLJIT_MEM) && FAST_IS_REG(src)) dst_r = src; else { if (sign) { diff --git a/src/3rd/pcre/sjx86c.c b/src/3rd/pcre/sjx86c.c index 922e8edda7d..5a4e0dfb983 100644 --- a/src/3rd/pcre/sjx86c.c +++ b/src/3rd/pcre/sjx86c.c @@ -1,7 +1,7 @@ /* * Stack-less Just-In-Time compiler * - * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: @@ -24,9 +24,13 @@ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -SLJIT_API_FUNC_ATTRIBUTE SLJIT_CONST char* sljit_get_platform_name(void) +SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void) { +#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) + return "x86" SLJIT_CPUINFO " ABI:fastcall"; +#else return "x86" SLJIT_CPUINFO; +#endif } /* @@ -35,7 +39,7 @@ SLJIT_API_FUNC_ATTRIBUTE SLJIT_CONST char* sljit_get_platform_name(void) 1 - ECX 2 - EDX 3 - EBX - 4 - none + 4 - ESP 5 - EBP 6 - ESI 7 - EDI @@ -47,7 +51,7 @@ SLJIT_API_FUNC_ATTRIBUTE SLJIT_CONST char* sljit_get_platform_name(void) 1 - RCX 2 - RDX 3 - RBX - 4 - none + 4 - RSP 5 - RBP 6 - RSI 7 - RDI @@ -64,70 +68,82 @@ SLJIT_API_FUNC_ATTRIBUTE SLJIT_CONST char* sljit_get_platform_name(void) #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) /* Last register + 1. */ -#define TMP_REGISTER (SLJIT_NO_REGISTERS + 1) +#define TMP_REG1 (SLJIT_NUMBER_OF_REGISTERS + 2) -static SLJIT_CONST sljit_ub reg_map[SLJIT_NO_REGISTERS + 2] = { - 0, 0, 2, 1, 0, 0, 3, 6, 7, 0, 0, 4, 5 +static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 3] = { + 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 7, 6, 3, 4, 5 }; #define CHECK_EXTRA_REGS(p, w, do) \ - if (p >= SLJIT_TEMPORARY_EREG1 && p <= SLJIT_TEMPORARY_EREG2) { \ - w = compiler->scratches_start + (p - SLJIT_TEMPORARY_EREG1) * sizeof(sljit_sw); \ - p = SLJIT_MEM1(SLJIT_LOCALS_REG); \ - do; \ - } \ - else if (p >= SLJIT_SAVED_EREG1 && p <= SLJIT_SAVED_EREG2) { \ - w = compiler->saveds_start + (p - SLJIT_SAVED_EREG1) * sizeof(sljit_sw); \ - p = SLJIT_MEM1(SLJIT_LOCALS_REG); \ + if (p >= SLJIT_R3 && p <= SLJIT_S3) { \ + if (p <= compiler->scratches) \ + w = compiler->saveds_offset - ((p) - SLJIT_R2) * (sljit_sw)sizeof(sljit_sw); \ + else \ + w = compiler->locals_offset + ((p) - SLJIT_S2) * (sljit_sw)sizeof(sljit_sw); \ + p = SLJIT_MEM1(SLJIT_SP); \ do; \ } #else /* SLJIT_CONFIG_X86_32 */ /* Last register + 1. */ -#define TMP_REGISTER (SLJIT_NO_REGISTERS + 1) -#define TMP_REG2 (SLJIT_NO_REGISTERS + 2) -#define TMP_REG3 (SLJIT_NO_REGISTERS + 3) +#define TMP_REG1 (SLJIT_NUMBER_OF_REGISTERS + 2) +#define TMP_REG2 (SLJIT_NUMBER_OF_REGISTERS + 3) /* Note: r12 & 0x7 == 0b100, which decoded as SIB byte present Note: avoid to use r12 and r13 for memory addessing - therefore r12 is better for SAVED_EREG than SAVED_REG. */ + therefore r12 is better to be a higher saved register. */ #ifndef _WIN64 -/* 1st passed in rdi, 2nd argument passed in rsi, 3rd in rdx. */ -static SLJIT_CONST sljit_ub reg_map[SLJIT_NO_REGISTERS + 4] = { - 0, 0, 6, 1, 8, 11, 3, 15, 14, 13, 12, 4, 2, 7, 9 +/* Args: rdi(=7), rsi(=6), rdx(=2), rcx(=1), r8, r9. Scratches: rax(=0), r10, r11 */ +static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 4] = { + 0, 0, 6, 7, 1, 8, 11, 10, 12, 5, 13, 14, 15, 3, 4, 2, 9 }; /* low-map. reg_map & 0x7. */ -static SLJIT_CONST sljit_ub reg_lmap[SLJIT_NO_REGISTERS + 4] = { - 0, 0, 6, 1, 0, 3, 3, 7, 6, 5, 4, 4, 2, 7, 1 +static const sljit_u8 reg_lmap[SLJIT_NUMBER_OF_REGISTERS + 4] = { + 0, 0, 6, 7, 1, 0, 3, 2, 4, 5, 5, 6, 7, 3, 4, 2, 1 }; #else -/* 1st passed in rcx, 2nd argument passed in rdx, 3rd in r8. */ -static SLJIT_CONST sljit_ub reg_map[SLJIT_NO_REGISTERS + 4] = { - 0, 0, 2, 1, 11, 13, 3, 6, 7, 14, 15, 4, 10, 8, 9 +/* Args: rcx(=1), rdx(=2), r8, r9. Scratches: rax(=0), r10, r11 */ +static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 4] = { + 0, 0, 2, 8, 1, 11, 12, 5, 13, 14, 15, 7, 6, 3, 4, 9, 10 }; /* low-map. reg_map & 0x7. */ -static SLJIT_CONST sljit_ub reg_lmap[SLJIT_NO_REGISTERS + 4] = { - 0, 0, 2, 1, 3, 5, 3, 6, 7, 6, 7, 4, 2, 0, 1 +static const sljit_u8 reg_lmap[SLJIT_NUMBER_OF_REGISTERS + 4] = { + 0, 0, 2, 0, 1, 3, 4, 5, 5, 6, 7, 7, 6, 3, 4, 1, 2 }; #endif +/* Args: xmm0-xmm3 */ +static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = { + 4, 0, 1, 2, 3, 5, 6 +}; +/* low-map. freg_map & 0x7. */ +static const sljit_u8 freg_lmap[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = { + 4, 0, 1, 2, 3, 5, 6 +}; + #define REX_W 0x48 #define REX_R 0x44 #define REX_X 0x42 #define REX_B 0x41 #define REX 0x40 -#define IS_HALFWORD(x) ((x) <= 0x7fffffffll && (x) >= -0x80000000ll) -#define NOT_HALFWORD(x) ((x) > 0x7fffffffll || (x) < -0x80000000ll) +#ifndef _WIN64 +#define HALFWORD_MAX 0x7fffffffl +#define HALFWORD_MIN -0x80000000l +#else +#define HALFWORD_MAX 0x7fffffffll +#define HALFWORD_MIN -0x80000000ll +#endif + +#define IS_HALFWORD(x) ((x) <= HALFWORD_MAX && (x) >= HALFWORD_MIN) +#define NOT_HALFWORD(x) ((x) > HALFWORD_MAX || (x) < HALFWORD_MIN) #define CHECK_EXTRA_REGS(p, w, do) #endif /* SLJIT_CONFIG_X86_32 */ -#if (defined SLJIT_SSE2 && SLJIT_SSE2) #define TMP_FREG (0) -#endif /* Size flags for emit_x86_instruction: */ #define EX86_BIN_INS 0x0010 @@ -137,12 +153,11 @@ static SLJIT_CONST sljit_ub reg_lmap[SLJIT_NO_REGISTERS + 4] = { #define EX86_BYTE_ARG 0x0100 #define EX86_HALF_ARG 0x0200 #define EX86_PREF_66 0x0400 - -#if (defined SLJIT_SSE2 && SLJIT_SSE2) -#define EX86_SSE2 0x0800 -#define EX86_PREF_F2 0x1000 -#define EX86_PREF_F3 0x2000 -#endif +#define EX86_PREF_F2 0x0800 +#define EX86_PREF_F3 0x1000 +#define EX86_SSE2_OP1 0x2000 +#define EX86_SSE2_OP2 0x4000 +#define EX86_SSE2 (EX86_SSE2_OP1 | EX86_SSE2_OP2) /* --------------------------------------------------------------------- */ /* Instrucion forms */ @@ -166,13 +181,18 @@ static SLJIT_CONST sljit_ub reg_lmap[SLJIT_NO_REGISTERS + 4] = { #define CALL_i32 0xe8 #define CALL_rm (/* GROUP_FF */ 2 << 3) #define CDQ 0x99 -#define CMOVNE_r_rm (/* GROUP_0F */ 0x45) +#define CMOVE_r_rm (/* GROUP_0F */ 0x44) #define CMP (/* BINARY */ 7 << 3) #define CMP_EAX_i32 0x3d #define CMP_r_rm 0x3b #define CMP_rm_r 0x39 +#define CVTPD2PS_x_xm 0x5a +#define CVTSI2SD_x_rm 0x2a +#define CVTTSD2SI_r_xm 0x2c #define DIV (/* GROUP_F7 */ 6 << 3) #define DIVSD_x_xm 0x5e +#define FSTPS 0xd9 +#define FSTPD 0xdd #define INT3 0xcc #define IDIV (/* GROUP_F7 */ 7 << 3) #define IMUL (/* GROUP_F7 */ 5 << 3) @@ -180,6 +200,7 @@ static SLJIT_CONST sljit_ub reg_lmap[SLJIT_NO_REGISTERS + 4] = { #define IMUL_r_rm_i8 0x6b #define IMUL_r_rm_i32 0x69 #define JE_i8 0x74 +#define JNE_i8 0x75 #define JMP_i8 0xeb #define JMP_i32 0xe9 #define JMP_rm (/* GROUP_FF */ 4 << 3) @@ -210,6 +231,7 @@ static SLJIT_CONST sljit_ub reg_lmap[SLJIT_NO_REGISTERS + 4] = { #define POP_r 0x58 #define POP_rm 0x8f #define POPF 0x9d +#define PREFETCH 0x18 #define PUSH_i32 0x68 #define PUSH_r 0x50 #define PUSH_rm (/* GROUP_FF */ 6 << 3) @@ -231,6 +253,7 @@ static SLJIT_CONST sljit_ub reg_lmap[SLJIT_NO_REGISTERS + 4] = { #define TEST_EAX_i32 0xa9 #define TEST_rm_r 0x85 #define UCOMISD_x_xm 0x2e +#define UNPCKLPD_x_xm 0x14 #define XCHG_EAX_r 0x90 #define XCHG_r_rm 0x87 #define XOR (/* BINARY */ 6 << 3) @@ -263,24 +286,49 @@ static SLJIT_CONST sljit_ub reg_lmap[SLJIT_NO_REGISTERS + 4] = { /* Multithreading does not affect these static variables, since they store built-in CPU features. Therefore they can be overwritten by different threads if they detect the CPU features in the same time. */ -#if (defined SLJIT_SSE2 && SLJIT_SSE2) && (defined SLJIT_DETECT_SSE2 && SLJIT_DETECT_SSE2) -static sljit_si cpu_has_sse2 = -1; +#if (defined SLJIT_DETECT_SSE2 && SLJIT_DETECT_SSE2) +static sljit_s32 cpu_has_sse2 = -1; #endif -static sljit_si cpu_has_cmov = -1; +static sljit_s32 cpu_has_cmov = -1; -#if defined(_MSC_VER) && _MSC_VER >= 1400 +#ifdef _WIN32_WCE +#include +#elif defined(_MSC_VER) && _MSC_VER >= 1400 #include #endif +/******************************************************/ +/* Unaligned-store functions */ +/******************************************************/ + +static SLJIT_INLINE void sljit_unaligned_store_s16(void *addr, sljit_s16 value) +{ + SLJIT_MEMCPY(addr, &value, sizeof(value)); +} + +static SLJIT_INLINE void sljit_unaligned_store_s32(void *addr, sljit_s32 value) +{ + SLJIT_MEMCPY(addr, &value, sizeof(value)); +} + +static SLJIT_INLINE void sljit_unaligned_store_sw(void *addr, sljit_sw value) +{ + SLJIT_MEMCPY(addr, &value, sizeof(value)); +} + +/******************************************************/ +/* Utility functions */ +/******************************************************/ + static void get_cpu_features(void) { - sljit_ui features; + sljit_u32 features; #if defined(_MSC_VER) && _MSC_VER >= 1400 int CPUInfo[4]; __cpuid(CPUInfo, 1); - features = (sljit_ui)CPUInfo[3]; + features = (sljit_u32)CPUInfo[3]; #elif defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__SUNPRO_C) @@ -317,88 +365,91 @@ static void get_cpu_features(void) #endif /* _MSC_VER && _MSC_VER >= 1400 */ -#if (defined SLJIT_SSE2 && SLJIT_SSE2) && (defined SLJIT_DETECT_SSE2 && SLJIT_DETECT_SSE2) +#if (defined SLJIT_DETECT_SSE2 && SLJIT_DETECT_SSE2) cpu_has_sse2 = (features >> 26) & 0x1; #endif cpu_has_cmov = (features >> 15) & 0x1; } -static sljit_ub get_jump_code(sljit_si type) +static sljit_u8 get_jump_code(sljit_s32 type) { switch (type) { - case SLJIT_C_EQUAL: - case SLJIT_C_FLOAT_EQUAL: + case SLJIT_EQUAL: + case SLJIT_EQUAL_F64: return 0x84 /* je */; - case SLJIT_C_NOT_EQUAL: - case SLJIT_C_FLOAT_NOT_EQUAL: + case SLJIT_NOT_EQUAL: + case SLJIT_NOT_EQUAL_F64: return 0x85 /* jne */; - case SLJIT_C_LESS: - case SLJIT_C_FLOAT_LESS: + case SLJIT_LESS: + case SLJIT_LESS_F64: return 0x82 /* jc */; - case SLJIT_C_GREATER_EQUAL: - case SLJIT_C_FLOAT_GREATER_EQUAL: + case SLJIT_GREATER_EQUAL: + case SLJIT_GREATER_EQUAL_F64: return 0x83 /* jae */; - case SLJIT_C_GREATER: - case SLJIT_C_FLOAT_GREATER: + case SLJIT_GREATER: + case SLJIT_GREATER_F64: return 0x87 /* jnbe */; - case SLJIT_C_LESS_EQUAL: - case SLJIT_C_FLOAT_LESS_EQUAL: + case SLJIT_LESS_EQUAL: + case SLJIT_LESS_EQUAL_F64: return 0x86 /* jbe */; - case SLJIT_C_SIG_LESS: + case SLJIT_SIG_LESS: return 0x8c /* jl */; - case SLJIT_C_SIG_GREATER_EQUAL: + case SLJIT_SIG_GREATER_EQUAL: return 0x8d /* jnl */; - case SLJIT_C_SIG_GREATER: + case SLJIT_SIG_GREATER: return 0x8f /* jnle */; - case SLJIT_C_SIG_LESS_EQUAL: + case SLJIT_SIG_LESS_EQUAL: return 0x8e /* jle */; - case SLJIT_C_OVERFLOW: - case SLJIT_C_MUL_OVERFLOW: + case SLJIT_OVERFLOW: + case SLJIT_MUL_OVERFLOW: return 0x80 /* jo */; - case SLJIT_C_NOT_OVERFLOW: - case SLJIT_C_MUL_NOT_OVERFLOW: + case SLJIT_NOT_OVERFLOW: + case SLJIT_MUL_NOT_OVERFLOW: return 0x81 /* jno */; - case SLJIT_C_FLOAT_UNORDERED: + case SLJIT_UNORDERED_F64: return 0x8a /* jp */; - case SLJIT_C_FLOAT_ORDERED: + case SLJIT_ORDERED_F64: return 0x8b /* jpo */; } return 0; } -static sljit_ub* generate_far_jump_code(struct sljit_jump *jump, sljit_ub *code_ptr, sljit_si type); - -#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) -static sljit_ub* generate_fixed_jump(sljit_ub *code_ptr, sljit_sw addr, sljit_si type); +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) +static sljit_u8* generate_far_jump_code(struct sljit_jump *jump, sljit_u8 *code_ptr, sljit_sw executable_offset); +#else +static sljit_u8* generate_far_jump_code(struct sljit_jump *jump, sljit_u8 *code_ptr); +static sljit_u8* generate_put_label_code(struct sljit_put_label *put_label, sljit_u8 *code_ptr, sljit_uw max_label); #endif -static sljit_ub* generate_near_jump_code(struct sljit_jump *jump, sljit_ub *code_ptr, sljit_ub *code, sljit_si type) +static sljit_u8* generate_near_jump_code(struct sljit_jump *jump, sljit_u8 *code_ptr, sljit_u8 *code, sljit_sw executable_offset) { - sljit_si short_jump; + sljit_s32 type = jump->flags >> TYPE_SHIFT; + sljit_s32 short_jump; sljit_uw label_addr; if (jump->flags & JUMP_LABEL) label_addr = (sljit_uw)(code + jump->u.label->size); else - label_addr = jump->u.target; + label_addr = jump->u.target - executable_offset; + short_jump = (sljit_sw)(label_addr - (jump->addr + 2)) >= -128 && (sljit_sw)(label_addr - (jump->addr + 2)) <= 127; #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) - if ((sljit_sw)(label_addr - (jump->addr + 1)) > 0x7fffffffll || (sljit_sw)(label_addr - (jump->addr + 1)) < -0x80000000ll) - return generate_far_jump_code(jump, code_ptr, type); + if ((sljit_sw)(label_addr - (jump->addr + 1)) > HALFWORD_MAX || (sljit_sw)(label_addr - (jump->addr + 1)) < HALFWORD_MIN) + return generate_far_jump_code(jump, code_ptr); #endif if (type == SLJIT_JUMP) { @@ -425,14 +476,10 @@ static sljit_ub* generate_near_jump_code(struct sljit_jump *jump, sljit_ub *code if (short_jump) { jump->flags |= PATCH_MB; - code_ptr += sizeof(sljit_sb); + code_ptr += sizeof(sljit_s8); } else { jump->flags |= PATCH_MW; -#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - code_ptr += sizeof(sljit_sw); -#else - code_ptr += sizeof(sljit_si); -#endif + code_ptr += sizeof(sljit_s32); } return code_ptr; @@ -441,22 +488,25 @@ static sljit_ub* generate_near_jump_code(struct sljit_jump *jump, sljit_ub *code SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compiler) { struct sljit_memory_fragment *buf; - sljit_ub *code; - sljit_ub *code_ptr; - sljit_ub *buf_ptr; - sljit_ub *buf_end; - sljit_ub len; + sljit_u8 *code; + sljit_u8 *code_ptr; + sljit_u8 *buf_ptr; + sljit_u8 *buf_end; + sljit_u8 len; + sljit_sw executable_offset; + sljit_sw jump_addr; struct sljit_label *label; struct sljit_jump *jump; struct sljit_const *const_; + struct sljit_put_label *put_label; CHECK_ERROR_PTR(); - check_sljit_generate_code(compiler); + CHECK_PTR(check_sljit_generate_code(compiler)); reverse_buf(compiler); /* Second code generation pass. */ - code = (sljit_ub*)SLJIT_MALLOC_EXEC(compiler->size); + code = (sljit_u8*)SLJIT_MALLOC_EXEC(compiler->size); PTR_FAIL_WITH_EXEC_IF(code); buf = compiler->buf; @@ -464,6 +514,9 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil label = compiler->labels; jump = compiler->jumps; const_ = compiler->consts; + put_label = compiler->put_labels; + executable_offset = SLJIT_EXEC_OFFSET(code); + do { buf_ptr = buf->memory; buf_end = buf_ptr + buf->used_size; @@ -471,39 +524,43 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil len = *buf_ptr++; if (len > 0) { /* The code is already generated. */ - SLJIT_MEMMOVE(code_ptr, buf_ptr, len); + SLJIT_MEMCPY(code_ptr, buf_ptr, len); code_ptr += len; buf_ptr += len; } else { - if (*buf_ptr >= 4) { + switch (*buf_ptr) { + case 0: + label->addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset); + label->size = code_ptr - code; + label = label->next; + break; + case 1: jump->addr = (sljit_uw)code_ptr; if (!(jump->flags & SLJIT_REWRITABLE_JUMP)) - code_ptr = generate_near_jump_code(jump, code_ptr, code, *buf_ptr - 4); - else - code_ptr = generate_far_jump_code(jump, code_ptr, *buf_ptr - 4); + code_ptr = generate_near_jump_code(jump, code_ptr, code, executable_offset); + else { +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + code_ptr = generate_far_jump_code(jump, code_ptr, executable_offset); +#else + code_ptr = generate_far_jump_code(jump, code_ptr); +#endif + } jump = jump->next; - } - else if (*buf_ptr == 0) { - label->addr = (sljit_uw)code_ptr; - label->size = code_ptr - code; - label = label->next; - } - else if (*buf_ptr == 1) { + break; + case 2: const_->addr = ((sljit_uw)code_ptr) - sizeof(sljit_sw); const_ = const_->next; - } - else { -#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - *code_ptr++ = (*buf_ptr == 2) ? CALL_i32 : JMP_i32; - buf_ptr++; - *(sljit_sw*)code_ptr = *(sljit_sw*)buf_ptr - ((sljit_sw)code_ptr + sizeof(sljit_sw)); - code_ptr += sizeof(sljit_sw); - buf_ptr += sizeof(sljit_sw) - 1; -#else - code_ptr = generate_fixed_jump(code_ptr, *(sljit_sw*)(buf_ptr + 1), *buf_ptr); - buf_ptr += sizeof(sljit_sw); + break; + default: + SLJIT_ASSERT(*buf_ptr == 3); + SLJIT_ASSERT(put_label->label); + put_label->addr = (sljit_uw)code_ptr; +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + code_ptr = generate_put_label_code(put_label, code_ptr, (sljit_uw)(SLJIT_ADD_EXEC_OFFSET(code, executable_offset) + put_label->label->size)); #endif + put_label = put_label->next; + break; } buf_ptr++; } @@ -515,128 +572,135 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil SLJIT_ASSERT(!label); SLJIT_ASSERT(!jump); SLJIT_ASSERT(!const_); + SLJIT_ASSERT(!put_label); + SLJIT_ASSERT(code_ptr <= code + compiler->size); jump = compiler->jumps; while (jump) { + jump_addr = jump->addr + executable_offset; + if (jump->flags & PATCH_MB) { - SLJIT_ASSERT((sljit_sw)(jump->u.label->addr - (jump->addr + sizeof(sljit_sb))) >= -128 && (sljit_sw)(jump->u.label->addr - (jump->addr + sizeof(sljit_sb))) <= 127); - *(sljit_ub*)jump->addr = (sljit_ub)(jump->u.label->addr - (jump->addr + sizeof(sljit_sb))); + SLJIT_ASSERT((sljit_sw)(jump->u.label->addr - (jump_addr + sizeof(sljit_s8))) >= -128 && (sljit_sw)(jump->u.label->addr - (jump_addr + sizeof(sljit_s8))) <= 127); + *(sljit_u8*)jump->addr = (sljit_u8)(jump->u.label->addr - (jump_addr + sizeof(sljit_s8))); } else if (jump->flags & PATCH_MW) { if (jump->flags & JUMP_LABEL) { #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - *(sljit_sw*)jump->addr = (sljit_sw)(jump->u.label->addr - (jump->addr + sizeof(sljit_sw))); + sljit_unaligned_store_sw((void*)jump->addr, (sljit_sw)(jump->u.label->addr - (jump_addr + sizeof(sljit_sw)))); #else - SLJIT_ASSERT((sljit_sw)(jump->u.label->addr - (jump->addr + sizeof(sljit_si))) >= -0x80000000ll && (sljit_sw)(jump->u.label->addr - (jump->addr + sizeof(sljit_si))) <= 0x7fffffffll); - *(sljit_si*)jump->addr = (sljit_si)(jump->u.label->addr - (jump->addr + sizeof(sljit_si))); + SLJIT_ASSERT((sljit_sw)(jump->u.label->addr - (jump_addr + sizeof(sljit_s32))) >= HALFWORD_MIN && (sljit_sw)(jump->u.label->addr - (jump_addr + sizeof(sljit_s32))) <= HALFWORD_MAX); + sljit_unaligned_store_s32((void*)jump->addr, (sljit_s32)(jump->u.label->addr - (jump_addr + sizeof(sljit_s32)))); #endif } else { #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - *(sljit_sw*)jump->addr = (sljit_sw)(jump->u.target - (jump->addr + sizeof(sljit_sw))); + sljit_unaligned_store_sw((void*)jump->addr, (sljit_sw)(jump->u.target - (jump_addr + sizeof(sljit_sw)))); #else - SLJIT_ASSERT((sljit_sw)(jump->u.target - (jump->addr + sizeof(sljit_si))) >= -0x80000000ll && (sljit_sw)(jump->u.target - (jump->addr + sizeof(sljit_si))) <= 0x7fffffffll); - *(sljit_si*)jump->addr = (sljit_si)(jump->u.target - (jump->addr + sizeof(sljit_si))); + SLJIT_ASSERT((sljit_sw)(jump->u.target - (jump_addr + sizeof(sljit_s32))) >= HALFWORD_MIN && (sljit_sw)(jump->u.target - (jump_addr + sizeof(sljit_s32))) <= HALFWORD_MAX); + sljit_unaligned_store_s32((void*)jump->addr, (sljit_s32)(jump->u.target - (jump_addr + sizeof(sljit_s32)))); #endif } } #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) else if (jump->flags & PATCH_MD) - *(sljit_sw*)jump->addr = jump->u.label->addr; + sljit_unaligned_store_sw((void*)jump->addr, jump->u.label->addr); #endif jump = jump->next; } - /* Maybe we waste some space because of short jumps. */ - SLJIT_ASSERT(code_ptr <= code + compiler->size); + put_label = compiler->put_labels; + while (put_label) { +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + sljit_unaligned_store_sw((void*)(put_label->addr - sizeof(sljit_sw)), (sljit_sw)put_label->label->addr); +#else + if (put_label->flags & PATCH_MD) { + SLJIT_ASSERT(put_label->label->addr > HALFWORD_MAX); + sljit_unaligned_store_sw((void*)(put_label->addr - sizeof(sljit_sw)), (sljit_sw)put_label->label->addr); + } + else { + SLJIT_ASSERT(put_label->label->addr <= HALFWORD_MAX); + sljit_unaligned_store_s32((void*)(put_label->addr - sizeof(sljit_s32)), (sljit_s32)put_label->label->addr); + } +#endif + + put_label = put_label->next; + } + compiler->error = SLJIT_ERR_COMPILED; + compiler->executable_offset = executable_offset; compiler->executable_size = code_ptr - code; - return (void*)code; + return (void*)(code + executable_offset); } -/* --------------------------------------------------------------------- */ -/* Operators */ -/* --------------------------------------------------------------------- */ - -static sljit_si emit_cum_binary(struct sljit_compiler *compiler, - sljit_ub op_rm, sljit_ub op_mr, sljit_ub op_imm, sljit_ub op_eax_imm, - sljit_si dst, sljit_sw dstw, - sljit_si src1, sljit_sw src1w, - sljit_si src2, sljit_sw src2w); - -static sljit_si emit_non_cum_binary(struct sljit_compiler *compiler, - sljit_ub op_rm, sljit_ub op_mr, sljit_ub op_imm, sljit_ub op_eax_imm, - sljit_si dst, sljit_sw dstw, - sljit_si src1, sljit_sw src1w, - sljit_si src2, sljit_sw src2w); - -static sljit_si emit_mov(struct sljit_compiler *compiler, - sljit_si dst, sljit_sw dstw, - sljit_si src, sljit_sw srcw); - -static SLJIT_INLINE sljit_si emit_save_flags(struct sljit_compiler *compiler) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type) { - sljit_ub *inst; + switch (feature_type) { + case SLJIT_HAS_FPU: +#ifdef SLJIT_IS_FPU_AVAILABLE + return SLJIT_IS_FPU_AVAILABLE; +#elif (defined SLJIT_DETECT_SSE2 && SLJIT_DETECT_SSE2) + if (cpu_has_sse2 == -1) + get_cpu_features(); + return cpu_has_sse2; +#else /* SLJIT_DETECT_SSE2 */ + return 1; +#endif /* SLJIT_DETECT_SSE2 */ #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - inst = (sljit_ub*)ensure_buf(compiler, 1 + 5); - FAIL_IF(!inst); - INC_SIZE(5); -#else - inst = (sljit_ub*)ensure_buf(compiler, 1 + 6); - FAIL_IF(!inst); - INC_SIZE(6); - *inst++ = REX_W; -#endif - *inst++ = LEA_r_m; /* lea esp/rsp, [esp/rsp + sizeof(sljit_sw)] */ - *inst++ = 0x64; - *inst++ = 0x24; - *inst++ = (sljit_ub)sizeof(sljit_sw); - *inst++ = PUSHF; - compiler->flags_saved = 1; - return SLJIT_SUCCESS; -} + case SLJIT_HAS_VIRTUAL_REGISTERS: + return 1; +#endif -static SLJIT_INLINE sljit_si emit_restore_flags(struct sljit_compiler *compiler, sljit_si keep_flags) -{ - sljit_ub *inst; + case SLJIT_HAS_CLZ: + case SLJIT_HAS_CMOV: + if (cpu_has_cmov == -1) + get_cpu_features(); + return cpu_has_cmov; -#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - inst = (sljit_ub*)ensure_buf(compiler, 1 + 5); - FAIL_IF(!inst); - INC_SIZE(5); - *inst++ = POPF; + case SLJIT_HAS_SSE2: +#if (defined SLJIT_DETECT_SSE2 && SLJIT_DETECT_SSE2) + if (cpu_has_sse2 == -1) + get_cpu_features(); + return cpu_has_sse2; #else - inst = (sljit_ub*)ensure_buf(compiler, 1 + 6); - FAIL_IF(!inst); - INC_SIZE(6); - *inst++ = POPF; - *inst++ = REX_W; -#endif - *inst++ = LEA_r_m; /* lea esp/rsp, [esp/rsp - sizeof(sljit_sw)] */ - *inst++ = 0x64; - *inst++ = 0x24; - *inst++ = (sljit_ub)-(sljit_sb)sizeof(sljit_sw); - compiler->flags_saved = keep_flags; - return SLJIT_SUCCESS; + return 1; +#endif + + default: + return 0; + } } -#ifdef _WIN32 -#include +/* --------------------------------------------------------------------- */ +/* Operators */ +/* --------------------------------------------------------------------- */ + +#define BINARY_OPCODE(opcode) (((opcode ## _EAX_i32) << 24) | ((opcode ## _r_rm) << 16) | ((opcode ## _rm_r) << 8) | (opcode)) -static void SLJIT_CALL sljit_grow_stack(sljit_sw local_size) -{ - /* Workaround for calling the internal _chkstk() function on Windows. - This function touches all 4k pages belongs to the requested stack space, - which size is passed in local_size. This is necessary on Windows where - the stack can only grow in 4k steps. However, this function just burn - CPU cycles if the stack is large enough. However, you don't know it in - advance, so it must always be called. I think this is a bad design in - general even if it has some reasons. */ - *(sljit_si*)alloca(local_size) = 0; -} +static sljit_s32 emit_cum_binary(struct sljit_compiler *compiler, + sljit_u32 op_types, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w); -#endif +static sljit_s32 emit_non_cum_binary(struct sljit_compiler *compiler, + sljit_u32 op_types, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w); + +static sljit_s32 emit_mov(struct sljit_compiler *compiler, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw); + +#define EMIT_MOV(compiler, dst, dstw, src, srcw) \ + FAIL_IF(emit_mov(compiler, dst, dstw, src, srcw)); + +static SLJIT_INLINE sljit_s32 emit_sse2_store(struct sljit_compiler *compiler, + sljit_s32 single, sljit_s32 dst, sljit_sw dstw, sljit_s32 src); + +static SLJIT_INLINE sljit_s32 emit_sse2_load(struct sljit_compiler *compiler, + sljit_s32 single, sljit_s32 dst, sljit_s32 src, sljit_sw srcw); #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) #include "sjx8632.c" @@ -644,29 +708,22 @@ static void SLJIT_CALL sljit_grow_stack(sljit_sw local_size) #include "sjx8664.c" #endif -static sljit_si emit_mov(struct sljit_compiler *compiler, - sljit_si dst, sljit_sw dstw, - sljit_si src, sljit_sw srcw) +static sljit_s32 emit_mov(struct sljit_compiler *compiler, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) { - sljit_ub* inst; + sljit_u8* inst; - if (dst == SLJIT_UNUSED) { - /* No destination, doesn't need to setup flags. */ - if (src & SLJIT_MEM) { - inst = emit_x86_instruction(compiler, 1, TMP_REGISTER, 0, src, srcw); - FAIL_IF(!inst); - *inst = MOV_r_rm; - } - return SLJIT_SUCCESS; - } - if (src <= TMP_REGISTER) { + SLJIT_ASSERT(dst != SLJIT_UNUSED); + + if (FAST_IS_REG(src)) { inst = emit_x86_instruction(compiler, 1, src, 0, dst, dstw); FAIL_IF(!inst); *inst = MOV_rm_r; return SLJIT_SUCCESS; } if (src & SLJIT_IMM) { - if (dst <= TMP_REGISTER) { + if (FAST_IS_REG(dst)) { #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) return emit_do_imm(compiler, MOV_r_i32 + reg_map[dst], srcw); #else @@ -680,8 +737,10 @@ static sljit_si emit_mov(struct sljit_compiler *compiler, } #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) if (!compiler->mode32 && NOT_HALFWORD(srcw)) { - FAIL_IF(emit_load_imm64(compiler, TMP_REG2, srcw)); - inst = emit_x86_instruction(compiler, 1, TMP_REG2, 0, dst, dstw); + /* Immediate to memory move. Only SLJIT_MOV operation copies + an immediate directly into memory so TMP_REG1 can be used. */ + FAIL_IF(emit_load_imm64(compiler, TMP_REG1, srcw)); + inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, dst, dstw); FAIL_IF(!inst); *inst = MOV_rm_r; return SLJIT_SUCCESS; @@ -692,101 +751,99 @@ static sljit_si emit_mov(struct sljit_compiler *compiler, *inst = MOV_rm_i32; return SLJIT_SUCCESS; } - if (dst <= TMP_REGISTER) { + if (FAST_IS_REG(dst)) { inst = emit_x86_instruction(compiler, 1, dst, 0, src, srcw); FAIL_IF(!inst); *inst = MOV_r_rm; return SLJIT_SUCCESS; } - /* Memory to memory move. Requires two instruction. */ - inst = emit_x86_instruction(compiler, 1, TMP_REGISTER, 0, src, srcw); + /* Memory to memory move. Only SLJIT_MOV operation copies + data from memory to memory so TMP_REG1 can be used. */ + inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, src, srcw); FAIL_IF(!inst); *inst = MOV_r_rm; - inst = emit_x86_instruction(compiler, 1, TMP_REGISTER, 0, dst, dstw); + inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, dst, dstw); FAIL_IF(!inst); *inst = MOV_rm_r; return SLJIT_SUCCESS; } -#define EMIT_MOV(compiler, dst, dstw, src, srcw) \ - FAIL_IF(emit_mov(compiler, dst, dstw, src, srcw)); - -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op0(struct sljit_compiler *compiler, sljit_si op) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compiler, sljit_s32 op) { - sljit_ub *inst; + sljit_u8 *inst; #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) - sljit_si size; + sljit_s32 size; #endif CHECK_ERROR(); - check_sljit_emit_op0(compiler, op); + CHECK(check_sljit_emit_op0(compiler, op)); switch (GET_OPCODE(op)) { case SLJIT_BREAKPOINT: - inst = (sljit_ub*)ensure_buf(compiler, 1 + 1); + inst = (sljit_u8*)ensure_buf(compiler, 1 + 1); FAIL_IF(!inst); INC_SIZE(1); *inst = INT3; break; case SLJIT_NOP: - inst = (sljit_ub*)ensure_buf(compiler, 1 + 1); + inst = (sljit_u8*)ensure_buf(compiler, 1 + 1); FAIL_IF(!inst); INC_SIZE(1); *inst = NOP; break; - case SLJIT_UMUL: - case SLJIT_SMUL: - case SLJIT_UDIV: - case SLJIT_SDIV: - compiler->flags_saved = 0; + case SLJIT_LMUL_UW: + case SLJIT_LMUL_SW: + case SLJIT_DIVMOD_UW: + case SLJIT_DIVMOD_SW: + case SLJIT_DIV_UW: + case SLJIT_DIV_SW: #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) #ifdef _WIN64 - SLJIT_COMPILE_ASSERT( - reg_map[SLJIT_SCRATCH_REG1] == 0 - && reg_map[SLJIT_SCRATCH_REG2] == 2 - && reg_map[TMP_REGISTER] > 7, - invalid_register_assignment_for_div_mul); + SLJIT_ASSERT( + reg_map[SLJIT_R0] == 0 + && reg_map[SLJIT_R1] == 2 + && reg_map[TMP_REG1] > 7); #else - SLJIT_COMPILE_ASSERT( - reg_map[SLJIT_SCRATCH_REG1] == 0 - && reg_map[SLJIT_SCRATCH_REG2] < 7 - && reg_map[TMP_REGISTER] == 2, - invalid_register_assignment_for_div_mul); + SLJIT_ASSERT( + reg_map[SLJIT_R0] == 0 + && reg_map[SLJIT_R1] < 7 + && reg_map[TMP_REG1] == 2); #endif - compiler->mode32 = op & SLJIT_INT_OP; + compiler->mode32 = op & SLJIT_I32_OP; #endif + SLJIT_COMPILE_ASSERT((SLJIT_DIVMOD_UW & 0x2) == 0 && SLJIT_DIV_UW - 0x2 == SLJIT_DIVMOD_UW, bad_div_opcode_assignments); op = GET_OPCODE(op); - if (op == SLJIT_UDIV) { + if ((op | 0x2) == SLJIT_DIV_UW) { #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) || defined(_WIN64) - EMIT_MOV(compiler, TMP_REGISTER, 0, SLJIT_SCRATCH_REG2, 0); - inst = emit_x86_instruction(compiler, 1, SLJIT_SCRATCH_REG2, 0, SLJIT_SCRATCH_REG2, 0); + EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_R1, 0); + inst = emit_x86_instruction(compiler, 1, SLJIT_R1, 0, SLJIT_R1, 0); #else - inst = emit_x86_instruction(compiler, 1, TMP_REGISTER, 0, TMP_REGISTER, 0); + inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, TMP_REG1, 0); #endif FAIL_IF(!inst); *inst = XOR_r_rm; } - if (op == SLJIT_SDIV) { + if ((op | 0x2) == SLJIT_DIV_SW) { #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) || defined(_WIN64) - EMIT_MOV(compiler, TMP_REGISTER, 0, SLJIT_SCRATCH_REG2, 0); + EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_R1, 0); #endif #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - inst = (sljit_ub*)ensure_buf(compiler, 1 + 1); + inst = (sljit_u8*)ensure_buf(compiler, 1 + 1); FAIL_IF(!inst); INC_SIZE(1); *inst = CDQ; #else if (compiler->mode32) { - inst = (sljit_ub*)ensure_buf(compiler, 1 + 1); + inst = (sljit_u8*)ensure_buf(compiler, 1 + 1); FAIL_IF(!inst); INC_SIZE(1); *inst = CDQ; } else { - inst = (sljit_ub*)ensure_buf(compiler, 1 + 2); + inst = (sljit_u8*)ensure_buf(compiler, 1 + 2); FAIL_IF(!inst); INC_SIZE(2); *inst++ = REX_W; @@ -796,50 +853,56 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op0(struct sljit_compiler *compiler } #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - inst = (sljit_ub*)ensure_buf(compiler, 1 + 2); + inst = (sljit_u8*)ensure_buf(compiler, 1 + 2); FAIL_IF(!inst); INC_SIZE(2); *inst++ = GROUP_F7; - *inst = MOD_REG | ((op >= SLJIT_UDIV) ? reg_map[TMP_REGISTER] : reg_map[SLJIT_SCRATCH_REG2]); + *inst = MOD_REG | ((op >= SLJIT_DIVMOD_UW) ? reg_map[TMP_REG1] : reg_map[SLJIT_R1]); #else #ifdef _WIN64 - size = (!compiler->mode32 || op >= SLJIT_UDIV) ? 3 : 2; + size = (!compiler->mode32 || op >= SLJIT_DIVMOD_UW) ? 3 : 2; #else size = (!compiler->mode32) ? 3 : 2; #endif - inst = (sljit_ub*)ensure_buf(compiler, 1 + size); + inst = (sljit_u8*)ensure_buf(compiler, 1 + size); FAIL_IF(!inst); INC_SIZE(size); #ifdef _WIN64 if (!compiler->mode32) - *inst++ = REX_W | ((op >= SLJIT_UDIV) ? REX_B : 0); - else if (op >= SLJIT_UDIV) + *inst++ = REX_W | ((op >= SLJIT_DIVMOD_UW) ? REX_B : 0); + else if (op >= SLJIT_DIVMOD_UW) *inst++ = REX_B; *inst++ = GROUP_F7; - *inst = MOD_REG | ((op >= SLJIT_UDIV) ? reg_lmap[TMP_REGISTER] : reg_lmap[SLJIT_SCRATCH_REG2]); + *inst = MOD_REG | ((op >= SLJIT_DIVMOD_UW) ? reg_lmap[TMP_REG1] : reg_lmap[SLJIT_R1]); #else if (!compiler->mode32) *inst++ = REX_W; *inst++ = GROUP_F7; - *inst = MOD_REG | reg_map[SLJIT_SCRATCH_REG2]; + *inst = MOD_REG | reg_map[SLJIT_R1]; #endif #endif switch (op) { - case SLJIT_UMUL: + case SLJIT_LMUL_UW: *inst |= MUL; break; - case SLJIT_SMUL: + case SLJIT_LMUL_SW: *inst |= IMUL; break; - case SLJIT_UDIV: + case SLJIT_DIVMOD_UW: + case SLJIT_DIV_UW: *inst |= DIV; break; - case SLJIT_SDIV: + case SLJIT_DIVMOD_SW: + case SLJIT_DIV_SW: *inst |= IDIV; break; } #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) && !defined(_WIN64) - EMIT_MOV(compiler, SLJIT_SCRATCH_REG2, 0, TMP_REGISTER, 0); + if (op <= SLJIT_DIVMOD_SW) + EMIT_MOV(compiler, SLJIT_R1, 0, TMP_REG1, 0); +#else + if (op >= SLJIT_DIV_UW) + EMIT_MOV(compiler, SLJIT_R1, 0, TMP_REG1, 0); #endif break; } @@ -849,31 +912,28 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op0(struct sljit_compiler *compiler #define ENCODE_PREFIX(prefix) \ do { \ - inst = (sljit_ub*)ensure_buf(compiler, 1 + 1); \ + inst = (sljit_u8*)ensure_buf(compiler, 1 + 1); \ FAIL_IF(!inst); \ INC_SIZE(1); \ *inst = (prefix); \ } while (0) -static sljit_si emit_mov_byte(struct sljit_compiler *compiler, sljit_si sign, - sljit_si dst, sljit_sw dstw, - sljit_si src, sljit_sw srcw) +static sljit_s32 emit_mov_byte(struct sljit_compiler *compiler, sljit_s32 sign, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) { - sljit_ub* inst; - sljit_si dst_r; + sljit_u8* inst; + sljit_s32 dst_r; #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - sljit_si work_r; + sljit_s32 work_r; #endif #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) compiler->mode32 = 0; #endif - if (dst == SLJIT_UNUSED && !(src & SLJIT_MEM)) - return SLJIT_SUCCESS; /* Empty instruction. */ - if (src & SLJIT_IMM) { - if (dst <= TMP_REGISTER) { + if (FAST_IS_REG(dst)) { #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) return emit_do_imm(compiler, MOV_r_i32 + reg_map[dst], srcw); #else @@ -889,13 +949,13 @@ static sljit_si emit_mov_byte(struct sljit_compiler *compiler, sljit_si sign, return SLJIT_SUCCESS; } - dst_r = (dst <= TMP_REGISTER) ? dst : TMP_REGISTER; + dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1; - if ((dst & SLJIT_MEM) && src <= TMP_REGISTER) { + if ((dst & SLJIT_MEM) && FAST_IS_REG(src)) { #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) if (reg_map[src] >= 4) { - SLJIT_ASSERT(dst_r == TMP_REGISTER); - EMIT_MOV(compiler, TMP_REGISTER, 0, src, 0); + SLJIT_ASSERT(dst_r == TMP_REG1); + EMIT_MOV(compiler, TMP_REG1, 0, src, 0); } else dst_r = src; #else @@ -903,9 +963,9 @@ static sljit_si emit_mov_byte(struct sljit_compiler *compiler, sljit_si sign, #endif } #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - else if (src <= TMP_REGISTER && reg_map[src] >= 4) { + else if (FAST_IS_REG(src) && reg_map[src] >= 4) { /* src, dst are registers. */ - SLJIT_ASSERT(dst >= SLJIT_SCRATCH_REG1 && dst <= TMP_REGISTER); + SLJIT_ASSERT(SLOW_IS_REG(dst)); if (reg_map[dst] < 4) { if (dst != src) EMIT_MOV(compiler, dst, 0, src, 0); @@ -946,25 +1006,25 @@ static sljit_si emit_mov_byte(struct sljit_compiler *compiler, sljit_si sign, if (dst & SLJIT_MEM) { #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - if (dst_r == TMP_REGISTER) { + if (dst_r == TMP_REG1) { /* Find a non-used register, whose reg_map[src] < 4. */ - if ((dst & 0xf) == SLJIT_SCRATCH_REG1) { - if ((dst & 0xf0) == (SLJIT_SCRATCH_REG2 << 4)) - work_r = SLJIT_SCRATCH_REG3; + if ((dst & REG_MASK) == SLJIT_R0) { + if ((dst & OFFS_REG_MASK) == TO_OFFS_REG(SLJIT_R1)) + work_r = SLJIT_R2; else - work_r = SLJIT_SCRATCH_REG2; + work_r = SLJIT_R1; } else { - if ((dst & 0xf0) != (SLJIT_SCRATCH_REG1 << 4)) - work_r = SLJIT_SCRATCH_REG1; - else if ((dst & 0xf) == SLJIT_SCRATCH_REG2) - work_r = SLJIT_SCRATCH_REG3; + if ((dst & OFFS_REG_MASK) != TO_OFFS_REG(SLJIT_R0)) + work_r = SLJIT_R0; + else if ((dst & REG_MASK) == SLJIT_R1) + work_r = SLJIT_R2; else - work_r = SLJIT_SCRATCH_REG2; + work_r = SLJIT_R1; } - if (work_r == SLJIT_SCRATCH_REG1) { - ENCODE_PREFIX(XCHG_EAX_r + reg_map[TMP_REGISTER]); + if (work_r == SLJIT_R0) { + ENCODE_PREFIX(XCHG_EAX_r + reg_map[TMP_REG1]); } else { inst = emit_x86_instruction(compiler, 1, work_r, 0, dst_r, 0); @@ -976,8 +1036,8 @@ static sljit_si emit_mov_byte(struct sljit_compiler *compiler, sljit_si sign, FAIL_IF(!inst); *inst = MOV_rm8_r8; - if (work_r == SLJIT_SCRATCH_REG1) { - ENCODE_PREFIX(XCHG_EAX_r + reg_map[TMP_REGISTER]); + if (work_r == SLJIT_R0) { + ENCODE_PREFIX(XCHG_EAX_r + reg_map[TMP_REG1]); } else { inst = emit_x86_instruction(compiler, 1, work_r, 0, dst_r, 0); @@ -1000,22 +1060,43 @@ static sljit_si emit_mov_byte(struct sljit_compiler *compiler, sljit_si sign, return SLJIT_SUCCESS; } -static sljit_si emit_mov_half(struct sljit_compiler *compiler, sljit_si sign, - sljit_si dst, sljit_sw dstw, - sljit_si src, sljit_sw srcw) +static sljit_s32 emit_prefetch(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 src, sljit_sw srcw) { - sljit_ub* inst; - sljit_si dst_r; + sljit_u8* inst; #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) - compiler->mode32 = 0; + compiler->mode32 = 1; #endif - if (dst == SLJIT_UNUSED && !(src & SLJIT_MEM)) - return SLJIT_SUCCESS; /* Empty instruction. */ + inst = emit_x86_instruction(compiler, 2, 0, 0, src, srcw); + FAIL_IF(!inst); + *inst++ = GROUP_0F; + *inst++ = PREFETCH; + + if (op >= SLJIT_MOV_U8 && op <= SLJIT_MOV_S8) + *inst |= (3 << 3); + else if (op >= SLJIT_MOV_U16 && op <= SLJIT_MOV_S16) + *inst |= (2 << 3); + else + *inst |= (1 << 3); + + return SLJIT_SUCCESS; +} + +static sljit_s32 emit_mov_half(struct sljit_compiler *compiler, sljit_s32 sign, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) +{ + sljit_u8* inst; + sljit_s32 dst_r; + +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + compiler->mode32 = 0; +#endif if (src & SLJIT_IMM) { - if (dst <= TMP_REGISTER) { + if (FAST_IS_REG(dst)) { #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) return emit_do_imm(compiler, MOV_r_i32 + reg_map[dst], srcw); #else @@ -1031,9 +1112,9 @@ static sljit_si emit_mov_half(struct sljit_compiler *compiler, sljit_si sign, return SLJIT_SUCCESS; } - dst_r = (dst <= TMP_REGISTER) ? dst : TMP_REGISTER; + dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1; - if ((dst & SLJIT_MEM) && src <= TMP_REGISTER) + if ((dst & SLJIT_MEM) && FAST_IS_REG(src)) dst_r = src; else { inst = emit_x86_instruction(compiler, 2, dst_r, 0, src, srcw); @@ -1051,20 +1132,12 @@ static sljit_si emit_mov_half(struct sljit_compiler *compiler, sljit_si sign, return SLJIT_SUCCESS; } -static sljit_si emit_unary(struct sljit_compiler *compiler, sljit_ub opcode, - sljit_si dst, sljit_sw dstw, - sljit_si src, sljit_sw srcw) +static sljit_s32 emit_unary(struct sljit_compiler *compiler, sljit_u8 opcode, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) { - sljit_ub* inst; + sljit_u8* inst; - if (dst == SLJIT_UNUSED) { - EMIT_MOV(compiler, TMP_REGISTER, 0, src, srcw); - inst = emit_x86_instruction(compiler, 1, 0, 0, TMP_REGISTER, 0); - FAIL_IF(!inst); - *inst++ = GROUP_F7; - *inst |= opcode; - return SLJIT_SUCCESS; - } if (dst == src && dstw == srcw) { /* Same input and output */ inst = emit_x86_instruction(compiler, 1, 0, 0, dst, dstw); @@ -1073,43 +1146,40 @@ static sljit_si emit_unary(struct sljit_compiler *compiler, sljit_ub opcode, *inst |= opcode; return SLJIT_SUCCESS; } - if (dst <= TMP_REGISTER) { + + if (SLJIT_UNLIKELY(dst == SLJIT_UNUSED)) + dst = TMP_REG1; + + if (FAST_IS_REG(dst)) { EMIT_MOV(compiler, dst, 0, src, srcw); - inst = emit_x86_instruction(compiler, 1, 0, 0, dst, dstw); + inst = emit_x86_instruction(compiler, 1, 0, 0, dst, 0); FAIL_IF(!inst); *inst++ = GROUP_F7; *inst |= opcode; return SLJIT_SUCCESS; } - EMIT_MOV(compiler, TMP_REGISTER, 0, src, srcw); - inst = emit_x86_instruction(compiler, 1, 0, 0, TMP_REGISTER, 0); + + EMIT_MOV(compiler, TMP_REG1, 0, src, srcw); + inst = emit_x86_instruction(compiler, 1, 0, 0, TMP_REG1, 0); FAIL_IF(!inst); *inst++ = GROUP_F7; *inst |= opcode; - EMIT_MOV(compiler, dst, dstw, TMP_REGISTER, 0); + EMIT_MOV(compiler, dst, dstw, TMP_REG1, 0); return SLJIT_SUCCESS; } -static sljit_si emit_not_with_flags(struct sljit_compiler *compiler, - sljit_si dst, sljit_sw dstw, - sljit_si src, sljit_sw srcw) +static sljit_s32 emit_not_with_flags(struct sljit_compiler *compiler, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) { - sljit_ub* inst; + sljit_u8* inst; - if (dst == SLJIT_UNUSED) { - EMIT_MOV(compiler, TMP_REGISTER, 0, src, srcw); - inst = emit_x86_instruction(compiler, 1, 0, 0, TMP_REGISTER, 0); - FAIL_IF(!inst); - *inst++ = GROUP_F7; - *inst |= NOT_rm; - inst = emit_x86_instruction(compiler, 1, TMP_REGISTER, 0, TMP_REGISTER, 0); - FAIL_IF(!inst); - *inst = OR_r_rm; - return SLJIT_SUCCESS; - } - if (dst <= TMP_REGISTER) { + if (dst == SLJIT_UNUSED) + dst = TMP_REG1; + + if (FAST_IS_REG(dst)) { EMIT_MOV(compiler, dst, 0, src, srcw); - inst = emit_x86_instruction(compiler, 1, 0, 0, dst, dstw); + inst = emit_x86_instruction(compiler, 1, 0, 0, dst, 0); FAIL_IF(!inst); *inst++ = GROUP_F7; *inst |= NOT_rm; @@ -1118,201 +1188,153 @@ static sljit_si emit_not_with_flags(struct sljit_compiler *compiler, *inst = OR_r_rm; return SLJIT_SUCCESS; } - EMIT_MOV(compiler, TMP_REGISTER, 0, src, srcw); - inst = emit_x86_instruction(compiler, 1, 0, 0, TMP_REGISTER, 0); + + EMIT_MOV(compiler, TMP_REG1, 0, src, srcw); + inst = emit_x86_instruction(compiler, 1, 0, 0, TMP_REG1, 0); FAIL_IF(!inst); *inst++ = GROUP_F7; *inst |= NOT_rm; - inst = emit_x86_instruction(compiler, 1, TMP_REGISTER, 0, TMP_REGISTER, 0); + inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, TMP_REG1, 0); FAIL_IF(!inst); *inst = OR_r_rm; - EMIT_MOV(compiler, dst, dstw, TMP_REGISTER, 0); + EMIT_MOV(compiler, dst, dstw, TMP_REG1, 0); return SLJIT_SUCCESS; } -static sljit_si emit_clz(struct sljit_compiler *compiler, sljit_si op_flags, - sljit_si dst, sljit_sw dstw, - sljit_si src, sljit_sw srcw) +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) +static const sljit_sw emit_clz_arg = 32 + 31; +#endif + +static sljit_s32 emit_clz(struct sljit_compiler *compiler, sljit_s32 op_flags, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) { - sljit_ub* inst; - sljit_si dst_r; + sljit_u8* inst; + sljit_s32 dst_r; SLJIT_UNUSED_ARG(op_flags); - if (SLJIT_UNLIKELY(dst == SLJIT_UNUSED)) { - /* Just set the zero flag. */ - EMIT_MOV(compiler, TMP_REGISTER, 0, src, srcw); - inst = emit_x86_instruction(compiler, 1, 0, 0, TMP_REGISTER, 0); - FAIL_IF(!inst); - *inst++ = GROUP_F7; - *inst |= NOT_rm; -#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_IMM, 31, TMP_REGISTER, 0); -#else - inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_IMM, !(op_flags & SLJIT_INT_OP) ? 63 : 31, TMP_REGISTER, 0); -#endif - FAIL_IF(!inst); - *inst |= SHR; - return SLJIT_SUCCESS; - } - if (SLJIT_UNLIKELY(src & SLJIT_IMM)) { - EMIT_MOV(compiler, TMP_REGISTER, 0, SLJIT_IMM, srcw); - src = TMP_REGISTER; - srcw = 0; - } + if (cpu_has_cmov == -1) + get_cpu_features(); - inst = emit_x86_instruction(compiler, 2, TMP_REGISTER, 0, src, srcw); + dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1; + + inst = emit_x86_instruction(compiler, 2, dst_r, 0, src, srcw); FAIL_IF(!inst); *inst++ = GROUP_0F; *inst = BSR_r_rm; #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - if (dst <= TMP_REGISTER) - dst_r = dst; - else { - /* Find an unused temporary register. */ - if ((dst & 0xf) != SLJIT_SCRATCH_REG1 && (dst & 0xf0) != (SLJIT_SCRATCH_REG1 << 4)) - dst_r = SLJIT_SCRATCH_REG1; - else if ((dst & 0xf) != SLJIT_SCRATCH_REG2 && (dst & 0xf0) != (SLJIT_SCRATCH_REG2 << 4)) - dst_r = SLJIT_SCRATCH_REG2; + if (cpu_has_cmov) { + if (dst_r != TMP_REG1) { + EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, 32 + 31); + inst = emit_x86_instruction(compiler, 2, dst_r, 0, TMP_REG1, 0); + } else - dst_r = SLJIT_SCRATCH_REG3; - EMIT_MOV(compiler, dst, dstw, dst_r, 0); - } - EMIT_MOV(compiler, dst_r, 0, SLJIT_IMM, 32 + 31); -#else - dst_r = (dst <= TMP_REGISTER) ? dst : TMP_REG2; - compiler->mode32 = 0; - EMIT_MOV(compiler, dst_r, 0, SLJIT_IMM, !(op_flags & SLJIT_INT_OP) ? 64 + 63 : 32 + 31); - compiler->mode32 = op_flags & SLJIT_INT_OP; -#endif + inst = emit_x86_instruction(compiler, 2, dst_r, 0, SLJIT_MEM0(), (sljit_sw)&emit_clz_arg); - if (cpu_has_cmov == -1) - get_cpu_features(); - - if (cpu_has_cmov) { - inst = emit_x86_instruction(compiler, 2, dst_r, 0, TMP_REGISTER, 0); FAIL_IF(!inst); *inst++ = GROUP_0F; - *inst = CMOVNE_r_rm; - } else { -#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - inst = (sljit_ub*)ensure_buf(compiler, 1 + 4); - FAIL_IF(!inst); - INC_SIZE(4); + *inst = CMOVE_r_rm; + } + else + FAIL_IF(sljit_emit_cmov_generic(compiler, SLJIT_EQUAL, dst_r, SLJIT_IMM, 32 + 31)); - *inst++ = JE_i8; - *inst++ = 2; - *inst++ = MOV_r_rm; - *inst++ = MOD_REG | (reg_map[dst_r] << 3) | reg_map[TMP_REGISTER]; + inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, 31, dst_r, 0); #else - inst = (sljit_ub*)ensure_buf(compiler, 1 + 5); - FAIL_IF(!inst); - INC_SIZE(5); + if (cpu_has_cmov) { + EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_IMM, !(op_flags & SLJIT_I32_OP) ? (64 + 63) : (32 + 31)); - *inst++ = JE_i8; - *inst++ = 3; - *inst++ = REX_W | (reg_map[dst_r] >= 8 ? REX_R : 0) | (reg_map[TMP_REGISTER] >= 8 ? REX_B : 0); - *inst++ = MOV_r_rm; - *inst++ = MOD_REG | (reg_lmap[dst_r] << 3) | reg_lmap[TMP_REGISTER]; -#endif + inst = emit_x86_instruction(compiler, 2, dst_r, 0, TMP_REG2, 0); + FAIL_IF(!inst); + *inst++ = GROUP_0F; + *inst = CMOVE_r_rm; } + else + FAIL_IF(sljit_emit_cmov_generic(compiler, SLJIT_EQUAL, dst_r, SLJIT_IMM, !(op_flags & SLJIT_I32_OP) ? (64 + 63) : (32 + 31))); -#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, 31, dst_r, 0); -#else - inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, !(op_flags & SLJIT_INT_OP) ? 63 : 31, dst_r, 0); + inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, !(op_flags & SLJIT_I32_OP) ? 63 : 31, dst_r, 0); #endif + FAIL_IF(!inst); *(inst + 1) |= XOR; -#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - if (dst & SLJIT_MEM) { - inst = emit_x86_instruction(compiler, 1, dst_r, 0, dst, dstw); - FAIL_IF(!inst); - *inst = XCHG_r_rm; - } -#else if (dst & SLJIT_MEM) - EMIT_MOV(compiler, dst, dstw, TMP_REG2, 0); -#endif + EMIT_MOV(compiler, dst, dstw, TMP_REG1, 0); return SLJIT_SUCCESS; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op1(struct sljit_compiler *compiler, sljit_si op, - sljit_si dst, sljit_sw dstw, - sljit_si src, sljit_sw srcw) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) { - sljit_ub* inst; - sljit_si update = 0; - sljit_si op_flags = GET_ALL_FLAGS(op); + sljit_s32 op_flags = GET_ALL_FLAGS(op); #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - sljit_si dst_is_ereg = 0; - sljit_si src_is_ereg = 0; -#else -# define src_is_ereg 0 + sljit_s32 dst_is_ereg = 0; #endif CHECK_ERROR(); - check_sljit_emit_op1(compiler, op, dst, dstw, src, srcw); + CHECK(check_sljit_emit_op1(compiler, op, dst, dstw, src, srcw)); ADJUST_LOCAL_OFFSET(dst, dstw); ADJUST_LOCAL_OFFSET(src, srcw); CHECK_EXTRA_REGS(dst, dstw, dst_is_ereg = 1); - CHECK_EXTRA_REGS(src, srcw, src_is_ereg = 1); + CHECK_EXTRA_REGS(src, srcw, (void)0); #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) - compiler->mode32 = op_flags & SLJIT_INT_OP; + compiler->mode32 = op_flags & SLJIT_I32_OP; #endif + if (dst == SLJIT_UNUSED && !HAS_FLAGS(op)) { + if (op <= SLJIT_MOV_P && (src & SLJIT_MEM)) + return emit_prefetch(compiler, op, src, srcw); + return SLJIT_SUCCESS; + } + op = GET_OPCODE(op); - if (op >= SLJIT_MOV && op <= SLJIT_MOVU_P) { + + if (op >= SLJIT_MOV && op <= SLJIT_MOV_P) { #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) compiler->mode32 = 0; #endif - if (op_flags & SLJIT_INT_OP) { - if (src <= TMP_REGISTER && src == dst) { - if (!TYPE_CAST_NEEDED(op)) - return SLJIT_SUCCESS; - } -#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) - if (op == SLJIT_MOV_SI && (src & SLJIT_MEM)) - op = SLJIT_MOV_UI; - if (op == SLJIT_MOVU_SI && (src & SLJIT_MEM)) - op = SLJIT_MOVU_UI; - if (op == SLJIT_MOV_UI && (src & SLJIT_IMM)) - op = SLJIT_MOV_SI; - if (op == SLJIT_MOVU_UI && (src & SLJIT_IMM)) - op = SLJIT_MOVU_SI; -#endif + if (FAST_IS_REG(src) && src == dst) { + if (!TYPE_CAST_NEEDED(op)) + return SLJIT_SUCCESS; } - SLJIT_COMPILE_ASSERT(SLJIT_MOV + 8 == SLJIT_MOVU, movu_offset); - if (op >= SLJIT_MOVU) { - update = 1; - op -= 8; + if (op_flags & SLJIT_I32_OP) { +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + if (src & SLJIT_MEM) { + if (op == SLJIT_MOV_S32) + op = SLJIT_MOV_U32; + } + else if (src & SLJIT_IMM) { + if (op == SLJIT_MOV_U32) + op = SLJIT_MOV_S32; + } +#endif } if (src & SLJIT_IMM) { switch (op) { - case SLJIT_MOV_UB: - srcw = (sljit_ub)srcw; + case SLJIT_MOV_U8: + srcw = (sljit_u8)srcw; break; - case SLJIT_MOV_SB: - srcw = (sljit_sb)srcw; + case SLJIT_MOV_S8: + srcw = (sljit_s8)srcw; break; - case SLJIT_MOV_UH: - srcw = (sljit_uh)srcw; + case SLJIT_MOV_U16: + srcw = (sljit_u16)srcw; break; - case SLJIT_MOV_SH: - srcw = (sljit_sh)srcw; + case SLJIT_MOV_S16: + srcw = (sljit_s16)srcw; break; #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) - case SLJIT_MOV_UI: - srcw = (sljit_ui)srcw; + case SLJIT_MOV_U32: + srcw = (sljit_u32)srcw; break; - case SLJIT_MOV_SI: - srcw = (sljit_si)srcw; + case SLJIT_MOV_S32: + srcw = (sljit_s32)srcw; break; #endif } @@ -1322,18 +1344,10 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op1(struct sljit_compiler *compiler #endif } - if (SLJIT_UNLIKELY(update) && (src & SLJIT_MEM) && !src_is_ereg && (src & 0xf) && (srcw != 0 || (src & 0xf0) != 0)) { - inst = emit_x86_instruction(compiler, 1, src & 0xf, 0, src, srcw); - FAIL_IF(!inst); - *inst = LEA_r_m; - src &= SLJIT_MEM | 0xf; - srcw = 0; - } - #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - if (SLJIT_UNLIKELY(dst_is_ereg) && (!(op == SLJIT_MOV || op == SLJIT_MOV_UI || op == SLJIT_MOV_SI || op == SLJIT_MOV_P) || (src & SLJIT_MEM))) { - SLJIT_ASSERT(dst == SLJIT_MEM1(SLJIT_LOCALS_REG)); - dst = TMP_REGISTER; + if (SLJIT_UNLIKELY(dst_is_ereg) && (!(op == SLJIT_MOV || op == SLJIT_MOV_U32 || op == SLJIT_MOV_S32 || op == SLJIT_MOV_P) || (src & SLJIT_MEM))) { + SLJIT_ASSERT(dst == SLJIT_MEM1(SLJIT_SP)); + dst = TMP_REG1; } #endif @@ -1341,71 +1355,54 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op1(struct sljit_compiler *compiler case SLJIT_MOV: case SLJIT_MOV_P: #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - case SLJIT_MOV_UI: - case SLJIT_MOV_SI: + case SLJIT_MOV_U32: + case SLJIT_MOV_S32: #endif FAIL_IF(emit_mov(compiler, dst, dstw, src, srcw)); break; - case SLJIT_MOV_UB: + case SLJIT_MOV_U8: FAIL_IF(emit_mov_byte(compiler, 0, dst, dstw, src, srcw)); break; - case SLJIT_MOV_SB: + case SLJIT_MOV_S8: FAIL_IF(emit_mov_byte(compiler, 1, dst, dstw, src, srcw)); break; - case SLJIT_MOV_UH: + case SLJIT_MOV_U16: FAIL_IF(emit_mov_half(compiler, 0, dst, dstw, src, srcw)); break; - case SLJIT_MOV_SH: + case SLJIT_MOV_S16: FAIL_IF(emit_mov_half(compiler, 1, dst, dstw, src, srcw)); break; #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) - case SLJIT_MOV_UI: + case SLJIT_MOV_U32: FAIL_IF(emit_mov_int(compiler, 0, dst, dstw, src, srcw)); break; - case SLJIT_MOV_SI: + case SLJIT_MOV_S32: FAIL_IF(emit_mov_int(compiler, 1, dst, dstw, src, srcw)); break; #endif } #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - if (SLJIT_UNLIKELY(dst_is_ereg) && dst == TMP_REGISTER) - return emit_mov(compiler, SLJIT_MEM1(SLJIT_LOCALS_REG), dstw, TMP_REGISTER, 0); + if (SLJIT_UNLIKELY(dst_is_ereg) && dst == TMP_REG1) + return emit_mov(compiler, SLJIT_MEM1(SLJIT_SP), dstw, TMP_REG1, 0); #endif - - if (SLJIT_UNLIKELY(update) && (dst & SLJIT_MEM) && (dst & 0xf) && (dstw != 0 || (dst & 0xf0) != 0)) { - inst = emit_x86_instruction(compiler, 1, dst & 0xf, 0, dst, dstw); - FAIL_IF(!inst); - *inst = LEA_r_m; - } return SLJIT_SUCCESS; } - if (SLJIT_UNLIKELY(GET_FLAGS(op_flags))) - compiler->flags_saved = 0; - switch (op) { case SLJIT_NOT: - if (SLJIT_UNLIKELY(op_flags & SLJIT_SET_E)) + if (SLJIT_UNLIKELY(op_flags & SLJIT_SET_Z)) return emit_not_with_flags(compiler, dst, dstw, src, srcw); return emit_unary(compiler, NOT_rm, dst, dstw, src, srcw); case SLJIT_NEG: - if (SLJIT_UNLIKELY(op_flags & SLJIT_KEEP_FLAGS) && !compiler->flags_saved) - FAIL_IF(emit_save_flags(compiler)); return emit_unary(compiler, NEG_rm, dst, dstw, src, srcw); case SLJIT_CLZ: - if (SLJIT_UNLIKELY(op_flags & SLJIT_KEEP_FLAGS) && !compiler->flags_saved) - FAIL_IF(emit_save_flags(compiler)); return emit_clz(compiler, op_flags, dst, dstw, src, srcw); } return SLJIT_SUCCESS; - -#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) -# undef src_is_ereg -#endif } #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) @@ -1417,8 +1414,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op1(struct sljit_compiler *compiler *(inst + 1) |= (op_imm); \ } \ else { \ - FAIL_IF(emit_load_imm64(compiler, TMP_REG2, immw)); \ - inst = emit_x86_instruction(compiler, 1, TMP_REG2, 0, arg, argw); \ + FAIL_IF(emit_load_imm64(compiler, (arg == TMP_REG1) ? TMP_REG2 : TMP_REG1, immw)); \ + inst = emit_x86_instruction(compiler, 1, (arg == TMP_REG1) ? TMP_REG2 : TMP_REG1, 0, arg, argw); \ FAIL_IF(!inst); \ *inst = (op_mr); \ } @@ -1438,21 +1435,25 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op1(struct sljit_compiler *compiler #endif -static sljit_si emit_cum_binary(struct sljit_compiler *compiler, - sljit_ub op_rm, sljit_ub op_mr, sljit_ub op_imm, sljit_ub op_eax_imm, - sljit_si dst, sljit_sw dstw, - sljit_si src1, sljit_sw src1w, - sljit_si src2, sljit_sw src2w) +static sljit_s32 emit_cum_binary(struct sljit_compiler *compiler, + sljit_u32 op_types, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) { - sljit_ub* inst; + sljit_u8* inst; + sljit_u8 op_eax_imm = (op_types >> 24); + sljit_u8 op_rm = (op_types >> 16) & 0xff; + sljit_u8 op_mr = (op_types >> 8) & 0xff; + sljit_u8 op_imm = op_types & 0xff; if (dst == SLJIT_UNUSED) { - EMIT_MOV(compiler, TMP_REGISTER, 0, src1, src1w); + EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w); if (src2 & SLJIT_IMM) { - BINARY_IMM(op_imm, op_mr, src2w, TMP_REGISTER, 0); + BINARY_IMM(op_imm, op_mr, src2w, TMP_REG1, 0); } else { - inst = emit_x86_instruction(compiler, 1, TMP_REGISTER, 0, src2, src2w); + inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, src2, src2w); FAIL_IF(!inst); *inst = op_rm; } @@ -1462,9 +1463,9 @@ static sljit_si emit_cum_binary(struct sljit_compiler *compiler, if (dst == src1 && dstw == src1w) { if (src2 & SLJIT_IMM) { #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) - if ((dst == SLJIT_SCRATCH_REG1) && (src2w > 127 || src2w < -128) && (compiler->mode32 || IS_HALFWORD(src2w))) { + if ((dst == SLJIT_R0) && (src2w > 127 || src2w < -128) && (compiler->mode32 || IS_HALFWORD(src2w))) { #else - if ((dst == SLJIT_SCRATCH_REG1) && (src2w > 127 || src2w < -128)) { + if ((dst == SLJIT_R0) && (src2w > 127 || src2w < -128)) { #endif BINARY_EAX_IMM(op_eax_imm, src2w); } @@ -1472,20 +1473,20 @@ static sljit_si emit_cum_binary(struct sljit_compiler *compiler, BINARY_IMM(op_imm, op_mr, src2w, dst, dstw); } } - else if (dst <= TMP_REGISTER) { + else if (FAST_IS_REG(dst)) { inst = emit_x86_instruction(compiler, 1, dst, dstw, src2, src2w); FAIL_IF(!inst); *inst = op_rm; } - else if (src2 <= TMP_REGISTER) { + else if (FAST_IS_REG(src2)) { /* Special exception for sljit_emit_op_flags. */ inst = emit_x86_instruction(compiler, 1, src2, src2w, dst, dstw); FAIL_IF(!inst); *inst = op_mr; } else { - EMIT_MOV(compiler, TMP_REGISTER, 0, src2, src2w); - inst = emit_x86_instruction(compiler, 1, TMP_REGISTER, 0, dst, dstw); + EMIT_MOV(compiler, TMP_REG1, 0, src2, src2w); + inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, dst, dstw); FAIL_IF(!inst); *inst = op_mr; } @@ -1496,9 +1497,9 @@ static sljit_si emit_cum_binary(struct sljit_compiler *compiler, if (dst == src2 && dstw == src2w) { if (src1 & SLJIT_IMM) { #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) - if ((dst == SLJIT_SCRATCH_REG1) && (src1w > 127 || src1w < -128) && (compiler->mode32 || IS_HALFWORD(src1w))) { + if ((dst == SLJIT_R0) && (src1w > 127 || src1w < -128) && (compiler->mode32 || IS_HALFWORD(src1w))) { #else - if ((dst == SLJIT_SCRATCH_REG1) && (src1w > 127 || src1w < -128)) { + if ((dst == SLJIT_R0) && (src1w > 127 || src1w < -128)) { #endif BINARY_EAX_IMM(op_eax_imm, src1w); } @@ -1506,19 +1507,19 @@ static sljit_si emit_cum_binary(struct sljit_compiler *compiler, BINARY_IMM(op_imm, op_mr, src1w, dst, dstw); } } - else if (dst <= TMP_REGISTER) { + else if (FAST_IS_REG(dst)) { inst = emit_x86_instruction(compiler, 1, dst, dstw, src1, src1w); FAIL_IF(!inst); *inst = op_rm; } - else if (src1 <= TMP_REGISTER) { + else if (FAST_IS_REG(src1)) { inst = emit_x86_instruction(compiler, 1, src1, src1w, dst, dstw); FAIL_IF(!inst); *inst = op_mr; } else { - EMIT_MOV(compiler, TMP_REGISTER, 0, src1, src1w); - inst = emit_x86_instruction(compiler, 1, TMP_REGISTER, 0, dst, dstw); + EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w); + inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, dst, dstw); FAIL_IF(!inst); *inst = op_mr; } @@ -1526,7 +1527,7 @@ static sljit_si emit_cum_binary(struct sljit_compiler *compiler, } /* General version. */ - if (dst <= TMP_REGISTER) { + if (FAST_IS_REG(dst)) { EMIT_MOV(compiler, dst, 0, src1, src1w); if (src2 & SLJIT_IMM) { BINARY_IMM(op_imm, op_mr, src2w, dst, 0); @@ -1539,36 +1540,40 @@ static sljit_si emit_cum_binary(struct sljit_compiler *compiler, } else { /* This version requires less memory writing. */ - EMIT_MOV(compiler, TMP_REGISTER, 0, src1, src1w); + EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w); if (src2 & SLJIT_IMM) { - BINARY_IMM(op_imm, op_mr, src2w, TMP_REGISTER, 0); + BINARY_IMM(op_imm, op_mr, src2w, TMP_REG1, 0); } else { - inst = emit_x86_instruction(compiler, 1, TMP_REGISTER, 0, src2, src2w); + inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, src2, src2w); FAIL_IF(!inst); *inst = op_rm; } - EMIT_MOV(compiler, dst, dstw, TMP_REGISTER, 0); + EMIT_MOV(compiler, dst, dstw, TMP_REG1, 0); } return SLJIT_SUCCESS; } -static sljit_si emit_non_cum_binary(struct sljit_compiler *compiler, - sljit_ub op_rm, sljit_ub op_mr, sljit_ub op_imm, sljit_ub op_eax_imm, - sljit_si dst, sljit_sw dstw, - sljit_si src1, sljit_sw src1w, - sljit_si src2, sljit_sw src2w) +static sljit_s32 emit_non_cum_binary(struct sljit_compiler *compiler, + sljit_u32 op_types, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) { - sljit_ub* inst; + sljit_u8* inst; + sljit_u8 op_eax_imm = (op_types >> 24); + sljit_u8 op_rm = (op_types >> 16) & 0xff; + sljit_u8 op_mr = (op_types >> 8) & 0xff; + sljit_u8 op_imm = op_types & 0xff; if (dst == SLJIT_UNUSED) { - EMIT_MOV(compiler, TMP_REGISTER, 0, src1, src1w); + EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w); if (src2 & SLJIT_IMM) { - BINARY_IMM(op_imm, op_mr, src2w, TMP_REGISTER, 0); + BINARY_IMM(op_imm, op_mr, src2w, TMP_REG1, 0); } else { - inst = emit_x86_instruction(compiler, 1, TMP_REGISTER, 0, src2, src2w); + inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, src2, src2w); FAIL_IF(!inst); *inst = op_rm; } @@ -1578,9 +1583,9 @@ static sljit_si emit_non_cum_binary(struct sljit_compiler *compiler, if (dst == src1 && dstw == src1w) { if (src2 & SLJIT_IMM) { #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) - if ((dst == SLJIT_SCRATCH_REG1) && (src2w > 127 || src2w < -128) && (compiler->mode32 || IS_HALFWORD(src2w))) { + if ((dst == SLJIT_R0) && (src2w > 127 || src2w < -128) && (compiler->mode32 || IS_HALFWORD(src2w))) { #else - if ((dst == SLJIT_SCRATCH_REG1) && (src2w > 127 || src2w < -128)) { + if ((dst == SLJIT_R0) && (src2w > 127 || src2w < -128)) { #endif BINARY_EAX_IMM(op_eax_imm, src2w); } @@ -1588,19 +1593,19 @@ static sljit_si emit_non_cum_binary(struct sljit_compiler *compiler, BINARY_IMM(op_imm, op_mr, src2w, dst, dstw); } } - else if (dst <= TMP_REGISTER) { + else if (FAST_IS_REG(dst)) { inst = emit_x86_instruction(compiler, 1, dst, dstw, src2, src2w); FAIL_IF(!inst); *inst = op_rm; } - else if (src2 <= TMP_REGISTER) { + else if (FAST_IS_REG(src2)) { inst = emit_x86_instruction(compiler, 1, src2, src2w, dst, dstw); FAIL_IF(!inst); *inst = op_mr; } else { - EMIT_MOV(compiler, TMP_REGISTER, 0, src2, src2w); - inst = emit_x86_instruction(compiler, 1, TMP_REGISTER, 0, dst, dstw); + EMIT_MOV(compiler, TMP_REG1, 0, src2, src2w); + inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, dst, dstw); FAIL_IF(!inst); *inst = op_mr; } @@ -1608,7 +1613,7 @@ static sljit_si emit_non_cum_binary(struct sljit_compiler *compiler, } /* General version. */ - if (dst <= TMP_REGISTER && dst != src2) { + if (FAST_IS_REG(dst) && dst != src2) { EMIT_MOV(compiler, dst, 0, src1, src1w); if (src2 & SLJIT_IMM) { BINARY_IMM(op_imm, op_mr, src2w, dst, 0); @@ -1621,30 +1626,30 @@ static sljit_si emit_non_cum_binary(struct sljit_compiler *compiler, } else { /* This version requires less memory writing. */ - EMIT_MOV(compiler, TMP_REGISTER, 0, src1, src1w); + EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w); if (src2 & SLJIT_IMM) { - BINARY_IMM(op_imm, op_mr, src2w, TMP_REGISTER, 0); + BINARY_IMM(op_imm, op_mr, src2w, TMP_REG1, 0); } else { - inst = emit_x86_instruction(compiler, 1, TMP_REGISTER, 0, src2, src2w); + inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, src2, src2w); FAIL_IF(!inst); *inst = op_rm; } - EMIT_MOV(compiler, dst, dstw, TMP_REGISTER, 0); + EMIT_MOV(compiler, dst, dstw, TMP_REG1, 0); } return SLJIT_SUCCESS; } -static sljit_si emit_mul(struct sljit_compiler *compiler, - sljit_si dst, sljit_sw dstw, - sljit_si src1, sljit_sw src1w, - sljit_si src2, sljit_sw src2w) +static sljit_s32 emit_mul(struct sljit_compiler *compiler, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) { - sljit_ub* inst; - sljit_si dst_r; + sljit_u8* inst; + sljit_s32 dst_r; - dst_r = (dst <= TMP_REGISTER) ? dst : TMP_REGISTER; + dst_r = SLOW_IS_REG(dst) ? dst : TMP_REG1; /* Register destination. */ if (dst_r == src1 && !(src2 & SLJIT_IMM)) { @@ -1670,35 +1675,35 @@ static sljit_si emit_mul(struct sljit_compiler *compiler, inst = emit_x86_instruction(compiler, 1, dst_r, 0, src2, src2w); FAIL_IF(!inst); *inst = IMUL_r_rm_i8; - inst = (sljit_ub*)ensure_buf(compiler, 1 + 1); + inst = (sljit_u8*)ensure_buf(compiler, 1 + 1); FAIL_IF(!inst); INC_SIZE(1); - *inst = (sljit_sb)src1w; + *inst = (sljit_s8)src1w; } #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) else { inst = emit_x86_instruction(compiler, 1, dst_r, 0, src2, src2w); FAIL_IF(!inst); *inst = IMUL_r_rm_i32; - inst = (sljit_ub*)ensure_buf(compiler, 1 + 4); + inst = (sljit_u8*)ensure_buf(compiler, 1 + 4); FAIL_IF(!inst); INC_SIZE(4); - *(sljit_sw*)inst = src1w; + sljit_unaligned_store_sw(inst, src1w); } #else else if (IS_HALFWORD(src1w)) { inst = emit_x86_instruction(compiler, 1, dst_r, 0, src2, src2w); FAIL_IF(!inst); *inst = IMUL_r_rm_i32; - inst = (sljit_ub*)ensure_buf(compiler, 1 + 4); + inst = (sljit_u8*)ensure_buf(compiler, 1 + 4); FAIL_IF(!inst); INC_SIZE(4); - *(sljit_si*)inst = (sljit_si)src1w; + sljit_unaligned_store_s32(inst, (sljit_s32)src1w); } else { - EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_IMM, src1w); if (dst_r != src2) EMIT_MOV(compiler, dst_r, 0, src2, src2w); + FAIL_IF(emit_load_imm64(compiler, TMP_REG2, src1w)); inst = emit_x86_instruction(compiler, 2, dst_r, 0, TMP_REG2, 0); FAIL_IF(!inst); *inst++ = GROUP_0F; @@ -1713,35 +1718,35 @@ static sljit_si emit_mul(struct sljit_compiler *compiler, inst = emit_x86_instruction(compiler, 1, dst_r, 0, src1, src1w); FAIL_IF(!inst); *inst = IMUL_r_rm_i8; - inst = (sljit_ub*)ensure_buf(compiler, 1 + 1); + inst = (sljit_u8*)ensure_buf(compiler, 1 + 1); FAIL_IF(!inst); INC_SIZE(1); - *inst = (sljit_sb)src2w; + *inst = (sljit_s8)src2w; } #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) else { inst = emit_x86_instruction(compiler, 1, dst_r, 0, src1, src1w); FAIL_IF(!inst); *inst = IMUL_r_rm_i32; - inst = (sljit_ub*)ensure_buf(compiler, 1 + 4); + inst = (sljit_u8*)ensure_buf(compiler, 1 + 4); FAIL_IF(!inst); INC_SIZE(4); - *(sljit_sw*)inst = src2w; + sljit_unaligned_store_sw(inst, src2w); } #else else if (IS_HALFWORD(src2w)) { inst = emit_x86_instruction(compiler, 1, dst_r, 0, src1, src1w); FAIL_IF(!inst); *inst = IMUL_r_rm_i32; - inst = (sljit_ub*)ensure_buf(compiler, 1 + 4); + inst = (sljit_u8*)ensure_buf(compiler, 1 + 4); FAIL_IF(!inst); INC_SIZE(4); - *(sljit_si*)inst = (sljit_si)src2w; + sljit_unaligned_store_s32(inst, (sljit_s32)src2w); } else { - EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_IMM, src1w); if (dst_r != src1) EMIT_MOV(compiler, dst_r, 0, src1, src1w); + FAIL_IF(emit_load_imm64(compiler, TMP_REG2, src2w)); inst = emit_x86_instruction(compiler, 2, dst_r, 0, TMP_REG2, 0); FAIL_IF(!inst); *inst++ = GROUP_0F; @@ -1752,7 +1757,7 @@ static sljit_si emit_mul(struct sljit_compiler *compiler, else { /* Neither argument is immediate. */ if (ADDRESSING_DEPENDS_ON(src2, dst_r)) - dst_r = TMP_REGISTER; + dst_r = TMP_REG1; EMIT_MOV(compiler, dst_r, 0, src1, src1w); inst = emit_x86_instruction(compiler, 2, dst_r, 0, src2, src2w); FAIL_IF(!inst); @@ -1760,32 +1765,30 @@ static sljit_si emit_mul(struct sljit_compiler *compiler, *inst = IMUL_r_rm; } - if (dst_r == TMP_REGISTER) - EMIT_MOV(compiler, dst, dstw, TMP_REGISTER, 0); + if (dst & SLJIT_MEM) + EMIT_MOV(compiler, dst, dstw, TMP_REG1, 0); return SLJIT_SUCCESS; } -static sljit_si emit_lea_binary(struct sljit_compiler *compiler, sljit_si keep_flags, - sljit_si dst, sljit_sw dstw, - sljit_si src1, sljit_sw src1w, - sljit_si src2, sljit_sw src2w) +static sljit_s32 emit_lea_binary(struct sljit_compiler *compiler, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) { - sljit_ub* inst; - sljit_si dst_r, done = 0; + sljit_u8* inst; + sljit_s32 dst_r, done = 0; /* These cases better be left to handled by normal way. */ - if (!keep_flags) { - if (dst == src1 && dstw == src1w) - return SLJIT_ERR_UNSUPPORTED; - if (dst == src2 && dstw == src2w) - return SLJIT_ERR_UNSUPPORTED; - } + if (dst == src1 && dstw == src1w) + return SLJIT_ERR_UNSUPPORTED; + if (dst == src2 && dstw == src2w) + return SLJIT_ERR_UNSUPPORTED; - dst_r = (dst <= TMP_REGISTER) ? dst : TMP_REGISTER; + dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1; - if (src1 <= TMP_REGISTER) { - if (src2 <= TMP_REGISTER || src2 == TMP_REGISTER) { + if (FAST_IS_REG(src1)) { + if (FAST_IS_REG(src2)) { inst = emit_x86_instruction(compiler, 1, dst_r, 0, SLJIT_MEM2(src1, src2), 0); FAIL_IF(!inst); *inst = LEA_r_m; @@ -1793,7 +1796,7 @@ static sljit_si emit_lea_binary(struct sljit_compiler *compiler, sljit_si keep_f } #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) if ((src2 & SLJIT_IMM) && (compiler->mode32 || IS_HALFWORD(src2w))) { - inst = emit_x86_instruction(compiler, 1, dst_r, 0, SLJIT_MEM1(src1), (sljit_si)src2w); + inst = emit_x86_instruction(compiler, 1, dst_r, 0, SLJIT_MEM1(src1), (sljit_s32)src2w); #else if (src2 & SLJIT_IMM) { inst = emit_x86_instruction(compiler, 1, dst_r, 0, SLJIT_MEM1(src1), src2w); @@ -1803,10 +1806,10 @@ static sljit_si emit_lea_binary(struct sljit_compiler *compiler, sljit_si keep_f done = 1; } } - else if (src2 <= TMP_REGISTER) { + else if (FAST_IS_REG(src2)) { #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) if ((src1 & SLJIT_IMM) && (compiler->mode32 || IS_HALFWORD(src1w))) { - inst = emit_x86_instruction(compiler, 1, dst_r, 0, SLJIT_MEM1(src2), (sljit_si)src1w); + inst = emit_x86_instruction(compiler, 1, dst_r, 0, SLJIT_MEM1(src2), (sljit_s32)src1w); #else if (src1 & SLJIT_IMM) { inst = emit_x86_instruction(compiler, 1, dst_r, 0, SLJIT_MEM1(src2), src1w); @@ -1818,29 +1821,29 @@ static sljit_si emit_lea_binary(struct sljit_compiler *compiler, sljit_si keep_f } if (done) { - if (dst_r == TMP_REGISTER) - return emit_mov(compiler, dst, dstw, TMP_REGISTER, 0); + if (dst_r == TMP_REG1) + return emit_mov(compiler, dst, dstw, TMP_REG1, 0); return SLJIT_SUCCESS; } return SLJIT_ERR_UNSUPPORTED; } -static sljit_si emit_cmp_binary(struct sljit_compiler *compiler, - sljit_si src1, sljit_sw src1w, - sljit_si src2, sljit_sw src2w) +static sljit_s32 emit_cmp_binary(struct sljit_compiler *compiler, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) { - sljit_ub* inst; + sljit_u8* inst; #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) - if (src1 == SLJIT_SCRATCH_REG1 && (src2 & SLJIT_IMM) && (src2w > 127 || src2w < -128) && (compiler->mode32 || IS_HALFWORD(src2w))) { + if (src1 == SLJIT_R0 && (src2 & SLJIT_IMM) && (src2w > 127 || src2w < -128) && (compiler->mode32 || IS_HALFWORD(src2w))) { #else - if (src1 == SLJIT_SCRATCH_REG1 && (src2 & SLJIT_IMM) && (src2w > 127 || src2w < -128)) { + if (src1 == SLJIT_R0 && (src2 & SLJIT_IMM) && (src2w > 127 || src2w < -128)) { #endif BINARY_EAX_IMM(CMP_EAX_i32, src2w); return SLJIT_SUCCESS; } - if (src1 <= TMP_REGISTER) { + if (FAST_IS_REG(src1)) { if (src2 & SLJIT_IMM) { BINARY_IMM(CMP, CMP_rm_r, src2w, src1, 0); } @@ -1852,7 +1855,7 @@ static sljit_si emit_cmp_binary(struct sljit_compiler *compiler, return SLJIT_SUCCESS; } - if (src2 <= TMP_REGISTER && !(src1 & SLJIT_IMM)) { + if (FAST_IS_REG(src2) && !(src1 & SLJIT_IMM)) { inst = emit_x86_instruction(compiler, 1, src2, 0, src1, src1w); FAIL_IF(!inst); *inst = CMP_rm_r; @@ -1861,136 +1864,138 @@ static sljit_si emit_cmp_binary(struct sljit_compiler *compiler, if (src2 & SLJIT_IMM) { if (src1 & SLJIT_IMM) { - EMIT_MOV(compiler, TMP_REGISTER, 0, src1, src1w); - src1 = TMP_REGISTER; + EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w); + src1 = TMP_REG1; src1w = 0; } BINARY_IMM(CMP, CMP_rm_r, src2w, src1, src1w); } else { - EMIT_MOV(compiler, TMP_REGISTER, 0, src1, src1w); - inst = emit_x86_instruction(compiler, 1, TMP_REGISTER, 0, src2, src2w); + EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w); + inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, src2, src2w); FAIL_IF(!inst); *inst = CMP_r_rm; } return SLJIT_SUCCESS; } -static sljit_si emit_test_binary(struct sljit_compiler *compiler, - sljit_si src1, sljit_sw src1w, - sljit_si src2, sljit_sw src2w) +static sljit_s32 emit_test_binary(struct sljit_compiler *compiler, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) { - sljit_ub* inst; + sljit_u8* inst; #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) - if (src1 == SLJIT_SCRATCH_REG1 && (src2 & SLJIT_IMM) && (src2w > 127 || src2w < -128) && (compiler->mode32 || IS_HALFWORD(src2w))) { + if (src1 == SLJIT_R0 && (src2 & SLJIT_IMM) && (src2w > 127 || src2w < -128) && (compiler->mode32 || IS_HALFWORD(src2w))) { #else - if (src1 == SLJIT_SCRATCH_REG1 && (src2 & SLJIT_IMM) && (src2w > 127 || src2w < -128)) { + if (src1 == SLJIT_R0 && (src2 & SLJIT_IMM) && (src2w > 127 || src2w < -128)) { #endif BINARY_EAX_IMM(TEST_EAX_i32, src2w); return SLJIT_SUCCESS; } #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) - if (src2 == SLJIT_SCRATCH_REG1 && (src2 & SLJIT_IMM) && (src1w > 127 || src1w < -128) && (compiler->mode32 || IS_HALFWORD(src1w))) { + if (src2 == SLJIT_R0 && (src1 & SLJIT_IMM) && (src1w > 127 || src1w < -128) && (compiler->mode32 || IS_HALFWORD(src1w))) { #else - if (src2 == SLJIT_SCRATCH_REG1 && (src1 & SLJIT_IMM) && (src1w > 127 || src1w < -128)) { + if (src2 == SLJIT_R0 && (src1 & SLJIT_IMM) && (src1w > 127 || src1w < -128)) { #endif BINARY_EAX_IMM(TEST_EAX_i32, src1w); return SLJIT_SUCCESS; } - if (src1 <= TMP_REGISTER) { + if (!(src1 & SLJIT_IMM)) { if (src2 & SLJIT_IMM) { #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) if (IS_HALFWORD(src2w) || compiler->mode32) { - inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, src2w, src1, 0); + inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, src2w, src1, src1w); FAIL_IF(!inst); *inst = GROUP_F7; } else { - FAIL_IF(emit_load_imm64(compiler, TMP_REG2, src2w)); - inst = emit_x86_instruction(compiler, 1, TMP_REG2, 0, src1, 0); + FAIL_IF(emit_load_imm64(compiler, TMP_REG1, src2w)); + inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, src1, src1w); FAIL_IF(!inst); *inst = TEST_rm_r; } #else - inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, src2w, src1, 0); + inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, src2w, src1, src1w); FAIL_IF(!inst); *inst = GROUP_F7; #endif + return SLJIT_SUCCESS; } - else { + else if (FAST_IS_REG(src1)) { inst = emit_x86_instruction(compiler, 1, src1, 0, src2, src2w); FAIL_IF(!inst); *inst = TEST_rm_r; + return SLJIT_SUCCESS; } - return SLJIT_SUCCESS; } - if (src2 <= TMP_REGISTER) { + if (!(src2 & SLJIT_IMM)) { if (src1 & SLJIT_IMM) { #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) if (IS_HALFWORD(src1w) || compiler->mode32) { - inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, src1w, src2, 0); + inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, src1w, src2, src2w); FAIL_IF(!inst); *inst = GROUP_F7; } else { - FAIL_IF(emit_load_imm64(compiler, TMP_REG2, src1w)); - inst = emit_x86_instruction(compiler, 1, TMP_REG2, 0, src2, 0); + FAIL_IF(emit_load_imm64(compiler, TMP_REG1, src1w)); + inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, src2, src2w); FAIL_IF(!inst); *inst = TEST_rm_r; } #else - inst = emit_x86_instruction(compiler, 1, src1, src1w, src2, 0); + inst = emit_x86_instruction(compiler, 1, src1, src1w, src2, src2w); FAIL_IF(!inst); *inst = GROUP_F7; #endif + return SLJIT_SUCCESS; } - else { + else if (FAST_IS_REG(src2)) { inst = emit_x86_instruction(compiler, 1, src2, 0, src1, src1w); FAIL_IF(!inst); *inst = TEST_rm_r; + return SLJIT_SUCCESS; } - return SLJIT_SUCCESS; } - EMIT_MOV(compiler, TMP_REGISTER, 0, src1, src1w); + EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w); if (src2 & SLJIT_IMM) { #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) if (IS_HALFWORD(src2w) || compiler->mode32) { - inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, src2w, TMP_REGISTER, 0); + inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, src2w, TMP_REG1, 0); FAIL_IF(!inst); *inst = GROUP_F7; } else { FAIL_IF(emit_load_imm64(compiler, TMP_REG2, src2w)); - inst = emit_x86_instruction(compiler, 1, TMP_REG2, 0, TMP_REGISTER, 0); + inst = emit_x86_instruction(compiler, 1, TMP_REG2, 0, TMP_REG1, 0); FAIL_IF(!inst); *inst = TEST_rm_r; } #else - inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, src2w, TMP_REGISTER, 0); + inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, src2w, TMP_REG1, 0); FAIL_IF(!inst); *inst = GROUP_F7; #endif } else { - inst = emit_x86_instruction(compiler, 1, TMP_REGISTER, 0, src2, src2w); + inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, src2, src2w); FAIL_IF(!inst); *inst = TEST_rm_r; } return SLJIT_SUCCESS; } -static sljit_si emit_shift(struct sljit_compiler *compiler, - sljit_ub mode, - sljit_si dst, sljit_sw dstw, - sljit_si src1, sljit_sw src1w, - sljit_si src2, sljit_sw src2w) +static sljit_s32 emit_shift(struct sljit_compiler *compiler, + sljit_u8 mode, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) { - sljit_ub* inst; + sljit_u8* inst; if ((src2 & SLJIT_IMM) || (src2 == SLJIT_PREF_SHIFT_REG)) { if (dst == src1 && dstw == src1w) { @@ -2000,21 +2005,21 @@ static sljit_si emit_shift(struct sljit_compiler *compiler, return SLJIT_SUCCESS; } if (dst == SLJIT_UNUSED) { - EMIT_MOV(compiler, TMP_REGISTER, 0, src1, src1w); - inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, src2, src2w, TMP_REGISTER, 0); + EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w); + inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, src2, src2w, TMP_REG1, 0); FAIL_IF(!inst); *inst |= mode; return SLJIT_SUCCESS; } if (dst == SLJIT_PREF_SHIFT_REG && src2 == SLJIT_PREF_SHIFT_REG) { - EMIT_MOV(compiler, TMP_REGISTER, 0, src1, src1w); - inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, TMP_REGISTER, 0); + EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w); + inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0); FAIL_IF(!inst); *inst |= mode; - EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REGISTER, 0); + EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0); return SLJIT_SUCCESS; } - if (dst <= TMP_REGISTER) { + if (FAST_IS_REG(dst)) { EMIT_MOV(compiler, dst, 0, src1, src1w); inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, src2, src2w, dst, 0); FAIL_IF(!inst); @@ -2022,62 +2027,63 @@ static sljit_si emit_shift(struct sljit_compiler *compiler, return SLJIT_SUCCESS; } - EMIT_MOV(compiler, TMP_REGISTER, 0, src1, src1w); - inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, src2, src2w, TMP_REGISTER, 0); + EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w); + inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, src2, src2w, TMP_REG1, 0); FAIL_IF(!inst); *inst |= mode; - EMIT_MOV(compiler, dst, dstw, TMP_REGISTER, 0); + EMIT_MOV(compiler, dst, dstw, TMP_REG1, 0); return SLJIT_SUCCESS; } if (dst == SLJIT_PREF_SHIFT_REG) { - EMIT_MOV(compiler, TMP_REGISTER, 0, src1, src1w); + EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w); EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w); - inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, TMP_REGISTER, 0); + inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0); FAIL_IF(!inst); *inst |= mode; - EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REGISTER, 0); + EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0); } - else if (dst <= TMP_REGISTER && dst != src2 && !ADDRESSING_DEPENDS_ON(src2, dst)) { + else if (SLOW_IS_REG(dst) && dst != src2 && !ADDRESSING_DEPENDS_ON(src2, dst)) { if (src1 != dst) EMIT_MOV(compiler, dst, 0, src1, src1w); - EMIT_MOV(compiler, TMP_REGISTER, 0, SLJIT_PREF_SHIFT_REG, 0); + EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_PREF_SHIFT_REG, 0); EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w); inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, dst, 0); FAIL_IF(!inst); *inst |= mode; - EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REGISTER, 0); + EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0); } else { - /* This case is really difficult, since ecx itself may used for - addressing, and we must ensure to work even in that case. */ - EMIT_MOV(compiler, TMP_REGISTER, 0, src1, src1w); -#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) - EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_PREF_SHIFT_REG, 0); + /* This case is complex since ecx itself may be used for + addressing, and this case must be supported as well. */ + EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w); +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), 0, SLJIT_PREF_SHIFT_REG, 0); + EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w); + inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0); + FAIL_IF(!inst); + *inst |= mode; + EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, SLJIT_MEM1(SLJIT_SP), 0); #else - /* [esp+0] contains the flags. */ - EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_LOCALS_REG), sizeof(sljit_sw), SLJIT_PREF_SHIFT_REG, 0); -#endif + EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_PREF_SHIFT_REG, 0); EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w); - inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, TMP_REGISTER, 0); + inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0); FAIL_IF(!inst); *inst |= mode; -#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG2, 0); -#else - EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), sizeof(sljit_sw)); #endif - EMIT_MOV(compiler, dst, dstw, TMP_REGISTER, 0); + if (dst != SLJIT_UNUSED) + return emit_mov(compiler, dst, dstw, TMP_REG1, 0); } return SLJIT_SUCCESS; } -static sljit_si emit_shift_with_flags(struct sljit_compiler *compiler, - sljit_ub mode, sljit_si set_flags, - sljit_si dst, sljit_sw dstw, - sljit_si src1, sljit_sw src1w, - sljit_si src2, sljit_sw src2w) +static sljit_s32 emit_shift_with_flags(struct sljit_compiler *compiler, + sljit_u8 mode, sljit_s32 set_flags, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) { /* The CPU does not set flags if the shift count is 0. */ if (src2 & SLJIT_IMM) { @@ -2091,30 +2097,30 @@ static sljit_si emit_shift_with_flags(struct sljit_compiler *compiler, if (!set_flags) return emit_mov(compiler, dst, dstw, src1, src1w); /* OR dst, src, 0 */ - return emit_cum_binary(compiler, OR_r_rm, OR_rm_r, OR, OR_EAX_i32, + return emit_cum_binary(compiler, BINARY_OPCODE(OR), dst, dstw, src1, src1w, SLJIT_IMM, 0); } if (!set_flags) return emit_shift(compiler, mode, dst, dstw, src1, src1w, src2, src2w); - if (!(dst <= TMP_REGISTER)) + if (!FAST_IS_REG(dst)) FAIL_IF(emit_cmp_binary(compiler, src1, src1w, SLJIT_IMM, 0)); - FAIL_IF(emit_shift(compiler,mode, dst, dstw, src1, src1w, src2, src2w)); + FAIL_IF(emit_shift(compiler, mode, dst, dstw, src1, src1w, src2, src2w)); - if (dst <= TMP_REGISTER) - return emit_cmp_binary(compiler, dst, dstw, SLJIT_IMM, 0); + if (FAST_IS_REG(dst)) + return emit_cmp_binary(compiler, (dst == SLJIT_UNUSED) ? TMP_REG1 : dst, dstw, SLJIT_IMM, 0); return SLJIT_SUCCESS; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op2(struct sljit_compiler *compiler, sljit_si op, - sljit_si dst, sljit_sw dstw, - sljit_si src1, sljit_sw src1w, - sljit_si src2, sljit_sw src2w) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) { CHECK_ERROR(); - check_sljit_emit_op2(compiler, op, dst, dstw, src1, src1w, src2, src2w); + CHECK(check_sljit_emit_op2(compiler, op, dst, dstw, src1, src1w, src2, src2w)); ADJUST_LOCAL_OFFSET(dst, dstw); ADJUST_LOCAL_OFFSET(src1, src1w); ADJUST_LOCAL_OFFSET(src2, src2w); @@ -2123,116 +2129,95 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op2(struct sljit_compiler *compiler CHECK_EXTRA_REGS(src1, src1w, (void)0); CHECK_EXTRA_REGS(src2, src2w, (void)0); #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) - compiler->mode32 = op & SLJIT_INT_OP; + compiler->mode32 = op & SLJIT_I32_OP; #endif - if (GET_OPCODE(op) >= SLJIT_MUL) { - if (SLJIT_UNLIKELY(GET_FLAGS(op))) - compiler->flags_saved = 0; - else if (SLJIT_UNLIKELY(op & SLJIT_KEEP_FLAGS) && !compiler->flags_saved) - FAIL_IF(emit_save_flags(compiler)); - } + if (dst == SLJIT_UNUSED && !HAS_FLAGS(op)) + return SLJIT_SUCCESS; switch (GET_OPCODE(op)) { case SLJIT_ADD: - if (!GET_FLAGS(op)) { - if (emit_lea_binary(compiler, op & SLJIT_KEEP_FLAGS, dst, dstw, src1, src1w, src2, src2w) != SLJIT_ERR_UNSUPPORTED) + if (!HAS_FLAGS(op)) { + if (emit_lea_binary(compiler, dst, dstw, src1, src1w, src2, src2w) != SLJIT_ERR_UNSUPPORTED) return compiler->error; } - else - compiler->flags_saved = 0; - if (SLJIT_UNLIKELY(op & SLJIT_KEEP_FLAGS) && !compiler->flags_saved) - FAIL_IF(emit_save_flags(compiler)); - return emit_cum_binary(compiler, ADD_r_rm, ADD_rm_r, ADD, ADD_EAX_i32, + return emit_cum_binary(compiler, BINARY_OPCODE(ADD), dst, dstw, src1, src1w, src2, src2w); case SLJIT_ADDC: - if (SLJIT_UNLIKELY(compiler->flags_saved)) /* C flag must be restored. */ - FAIL_IF(emit_restore_flags(compiler, 1)); - else if (SLJIT_UNLIKELY(op & SLJIT_KEEP_FLAGS)) - FAIL_IF(emit_save_flags(compiler)); - if (SLJIT_UNLIKELY(GET_FLAGS(op))) - compiler->flags_saved = 0; - return emit_cum_binary(compiler, ADC_r_rm, ADC_rm_r, ADC, ADC_EAX_i32, + return emit_cum_binary(compiler, BINARY_OPCODE(ADC), dst, dstw, src1, src1w, src2, src2w); case SLJIT_SUB: - if (!GET_FLAGS(op)) { - if ((src2 & SLJIT_IMM) && emit_lea_binary(compiler, op & SLJIT_KEEP_FLAGS, dst, dstw, src1, src1w, SLJIT_IMM, -src2w) != SLJIT_ERR_UNSUPPORTED) + if (!HAS_FLAGS(op)) { + if ((src2 & SLJIT_IMM) && emit_lea_binary(compiler, dst, dstw, src1, src1w, SLJIT_IMM, -src2w) != SLJIT_ERR_UNSUPPORTED) return compiler->error; } - else - compiler->flags_saved = 0; - if (SLJIT_UNLIKELY(op & SLJIT_KEEP_FLAGS) && !compiler->flags_saved) - FAIL_IF(emit_save_flags(compiler)); + if (dst == SLJIT_UNUSED) return emit_cmp_binary(compiler, src1, src1w, src2, src2w); - return emit_non_cum_binary(compiler, SUB_r_rm, SUB_rm_r, SUB, SUB_EAX_i32, + return emit_non_cum_binary(compiler, BINARY_OPCODE(SUB), dst, dstw, src1, src1w, src2, src2w); case SLJIT_SUBC: - if (SLJIT_UNLIKELY(compiler->flags_saved)) /* C flag must be restored. */ - FAIL_IF(emit_restore_flags(compiler, 1)); - else if (SLJIT_UNLIKELY(op & SLJIT_KEEP_FLAGS)) - FAIL_IF(emit_save_flags(compiler)); - if (SLJIT_UNLIKELY(GET_FLAGS(op))) - compiler->flags_saved = 0; - return emit_non_cum_binary(compiler, SBB_r_rm, SBB_rm_r, SBB, SBB_EAX_i32, + return emit_non_cum_binary(compiler, BINARY_OPCODE(SBB), dst, dstw, src1, src1w, src2, src2w); case SLJIT_MUL: return emit_mul(compiler, dst, dstw, src1, src1w, src2, src2w); case SLJIT_AND: if (dst == SLJIT_UNUSED) return emit_test_binary(compiler, src1, src1w, src2, src2w); - return emit_cum_binary(compiler, AND_r_rm, AND_rm_r, AND, AND_EAX_i32, + return emit_cum_binary(compiler, BINARY_OPCODE(AND), dst, dstw, src1, src1w, src2, src2w); case SLJIT_OR: - return emit_cum_binary(compiler, OR_r_rm, OR_rm_r, OR, OR_EAX_i32, + return emit_cum_binary(compiler, BINARY_OPCODE(OR), dst, dstw, src1, src1w, src2, src2w); case SLJIT_XOR: - return emit_cum_binary(compiler, XOR_r_rm, XOR_rm_r, XOR, XOR_EAX_i32, + return emit_cum_binary(compiler, BINARY_OPCODE(XOR), dst, dstw, src1, src1w, src2, src2w); case SLJIT_SHL: - return emit_shift_with_flags(compiler, SHL, GET_FLAGS(op), + return emit_shift_with_flags(compiler, SHL, HAS_FLAGS(op), dst, dstw, src1, src1w, src2, src2w); case SLJIT_LSHR: - return emit_shift_with_flags(compiler, SHR, GET_FLAGS(op), + return emit_shift_with_flags(compiler, SHR, HAS_FLAGS(op), dst, dstw, src1, src1w, src2, src2w); case SLJIT_ASHR: - return emit_shift_with_flags(compiler, SAR, GET_FLAGS(op), + return emit_shift_with_flags(compiler, SAR, HAS_FLAGS(op), dst, dstw, src1, src1w, src2, src2w); } return SLJIT_SUCCESS; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_get_register_index(sljit_si reg) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg) { - check_sljit_get_register_index(reg); + CHECK_REG_INDEX(check_sljit_get_register_index(reg)); #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - if (reg == SLJIT_TEMPORARY_EREG1 || reg == SLJIT_TEMPORARY_EREG2 - || reg == SLJIT_SAVED_EREG1 || reg == SLJIT_SAVED_EREG2) + if (reg >= SLJIT_R3 && reg <= SLJIT_R8) return -1; #endif return reg_map[reg]; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_get_float_register_index(sljit_si reg) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg) { - check_sljit_get_float_register_index(reg); + CHECK_REG_INDEX(check_sljit_get_float_register_index(reg)); +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) return reg; +#else + return freg_map[reg]; +#endif } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_custom(struct sljit_compiler *compiler, - void *instruction, sljit_si size) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler, + void *instruction, sljit_s32 size) { - sljit_ub *inst; + sljit_u8 *inst; CHECK_ERROR(); - check_sljit_emit_op_custom(compiler, instruction, size); - SLJIT_ASSERT(size > 0 && size < 16); + CHECK(check_sljit_emit_op_custom(compiler, instruction, size)); - inst = (sljit_ub*)ensure_buf(compiler, 1 + size); + inst = (sljit_u8*)ensure_buf(compiler, 1 + size); FAIL_IF(!inst); INC_SIZE(size); - SLJIT_MEMMOVE(inst, instruction, size); + SLJIT_MEMCPY(inst, instruction, size); return SLJIT_SUCCESS; } @@ -2240,48 +2225,29 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_custom(struct sljit_compiler *co /* Floating point operators */ /* --------------------------------------------------------------------- */ -#if (defined SLJIT_SSE2 && SLJIT_SSE2) - -/* Alignment + 2 * 16 bytes. */ -static sljit_si sse2_data[3 + (4 + 4) * 2]; -static sljit_si *sse2_buffer; +/* Alignment(3) + 4 * 16 bytes. */ +static sljit_s32 sse2_data[3 + (4 * 4)]; +static sljit_s32 *sse2_buffer; static void init_compiler(void) { - sse2_buffer = (sljit_si*)(((sljit_uw)sse2_data + 15) & ~0xf); - /* Single precision constants. */ + /* Align to 16 bytes. */ + sse2_buffer = (sljit_s32*)(((sljit_uw)sse2_data + 15) & ~0xf); + + /* Single precision constants (each constant is 16 byte long). */ sse2_buffer[0] = 0x80000000; sse2_buffer[4] = 0x7fffffff; - /* Double precision constants. */ + /* Double precision constants (each constant is 16 byte long). */ sse2_buffer[8] = 0; sse2_buffer[9] = 0x80000000; sse2_buffer[12] = 0xffffffff; sse2_buffer[13] = 0x7fffffff; } -#endif - -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_is_fpu_available(void) +static sljit_s32 emit_sse2(struct sljit_compiler *compiler, sljit_u8 opcode, + sljit_s32 single, sljit_s32 xmm1, sljit_s32 xmm2, sljit_sw xmm2w) { -#if (defined SLJIT_SSE2 && SLJIT_SSE2) -#if (defined SLJIT_DETECT_SSE2 && SLJIT_DETECT_SSE2) - if (cpu_has_sse2 == -1) - get_cpu_features(); - return cpu_has_sse2; -#else /* SLJIT_DETECT_SSE2 */ - return 1; -#endif /* SLJIT_DETECT_SSE2 */ -#else /* SLJIT_SSE2 */ - return 0; -#endif -} - -#if (defined SLJIT_SSE2 && SLJIT_SSE2) - -static sljit_si emit_sse2(struct sljit_compiler *compiler, sljit_ub opcode, - sljit_si single, sljit_si xmm1, sljit_si xmm2, sljit_sw xmm2w) -{ - sljit_ub *inst; + sljit_u8 *inst; inst = emit_x86_instruction(compiler, 2 | (single ? EX86_PREF_F3 : EX86_PREF_F2) | EX86_SSE2, xmm1, 0, xmm2, xmm2w); FAIL_IF(!inst); @@ -2290,10 +2256,10 @@ static sljit_si emit_sse2(struct sljit_compiler *compiler, sljit_ub opcode, return SLJIT_SUCCESS; } -static sljit_si emit_sse2_logic(struct sljit_compiler *compiler, sljit_ub opcode, - sljit_si pref66, sljit_si xmm1, sljit_si xmm2, sljit_sw xmm2w) +static sljit_s32 emit_sse2_logic(struct sljit_compiler *compiler, sljit_u8 opcode, + sljit_s32 pref66, sljit_s32 xmm1, sljit_s32 xmm2, sljit_sw xmm2w) { - sljit_ub *inst; + sljit_u8 *inst; inst = emit_x86_instruction(compiler, 2 | (pref66 ? EX86_PREF_66 : 0) | EX86_SSE2, xmm1, 0, xmm2, xmm2w); FAIL_IF(!inst); @@ -2302,177 +2268,225 @@ static sljit_si emit_sse2_logic(struct sljit_compiler *compiler, sljit_ub opcode return SLJIT_SUCCESS; } -static SLJIT_INLINE sljit_si emit_sse2_load(struct sljit_compiler *compiler, - sljit_si single, sljit_si dst, sljit_si src, sljit_sw srcw) +static SLJIT_INLINE sljit_s32 emit_sse2_load(struct sljit_compiler *compiler, + sljit_s32 single, sljit_s32 dst, sljit_s32 src, sljit_sw srcw) { return emit_sse2(compiler, MOVSD_x_xm, single, dst, src, srcw); } -static SLJIT_INLINE sljit_si emit_sse2_store(struct sljit_compiler *compiler, - sljit_si single, sljit_si dst, sljit_sw dstw, sljit_si src) +static SLJIT_INLINE sljit_s32 emit_sse2_store(struct sljit_compiler *compiler, + sljit_s32 single, sljit_s32 dst, sljit_sw dstw, sljit_s32 src) { return emit_sse2(compiler, MOVSD_xm_x, single, src, dst, dstw); } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop1(struct sljit_compiler *compiler, sljit_si op, - sljit_si dst, sljit_sw dstw, - sljit_si src, sljit_sw srcw) +static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) { - sljit_si dst_r; + sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1; + sljit_u8 *inst; - CHECK_ERROR(); - check_sljit_emit_fop1(compiler, op, dst, dstw, src, srcw); +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + if (GET_OPCODE(op) == SLJIT_CONV_SW_FROM_F64) + compiler->mode32 = 0; +#endif + + inst = emit_x86_instruction(compiler, 2 | ((op & SLJIT_F32_OP) ? EX86_PREF_F3 : EX86_PREF_F2) | EX86_SSE2_OP2, dst_r, 0, src, srcw); + FAIL_IF(!inst); + *inst++ = GROUP_0F; + *inst = CVTTSD2SI_r_xm; + + if (dst & SLJIT_MEM) + return emit_mov(compiler, dst, dstw, TMP_REG1, 0); + return SLJIT_SUCCESS; +} + +static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) +{ + sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG; + sljit_u8 *inst; + +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_SW) + compiler->mode32 = 0; +#endif + + if (src & SLJIT_IMM) { +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32) + srcw = (sljit_s32)srcw; +#endif + EMIT_MOV(compiler, TMP_REG1, 0, src, srcw); + src = TMP_REG1; + srcw = 0; + } + + inst = emit_x86_instruction(compiler, 2 | ((op & SLJIT_F32_OP) ? EX86_PREF_F3 : EX86_PREF_F2) | EX86_SSE2_OP1, dst_r, 0, src, srcw); + FAIL_IF(!inst); + *inst++ = GROUP_0F; + *inst = CVTSI2SD_x_rm; + +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + compiler->mode32 = 1; +#endif + if (dst_r == TMP_FREG) + return emit_sse2_store(compiler, op & SLJIT_F32_OP, dst, dstw, TMP_FREG); + return SLJIT_SUCCESS; +} + +static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) +{ + if (!FAST_IS_REG(src1)) { + FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, TMP_FREG, src1, src1w)); + src1 = TMP_FREG; + } + + return emit_sse2_logic(compiler, UCOMISD_x_xm, !(op & SLJIT_F32_OP), src1, src2, src2w); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) +{ + sljit_s32 dst_r; #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) compiler->mode32 = 1; #endif - if (GET_OPCODE(op) == SLJIT_CMPD) { - compiler->flags_saved = 0; - if (dst <= SLJIT_FLOAT_REG6) - dst_r = dst; + CHECK_ERROR(); + SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw); + + if (GET_OPCODE(op) == SLJIT_MOV_F64) { + if (FAST_IS_REG(dst)) + return emit_sse2_load(compiler, op & SLJIT_F32_OP, dst, src, srcw); + if (FAST_IS_REG(src)) + return emit_sse2_store(compiler, op & SLJIT_F32_OP, dst, dstw, src); + FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, TMP_FREG, src, srcw)); + return emit_sse2_store(compiler, op & SLJIT_F32_OP, dst, dstw, TMP_FREG); + } + + if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32) { + dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG; + if (FAST_IS_REG(src)) { + /* We overwrite the high bits of source. From SLJIT point of view, + this is not an issue. + Note: In SSE3, we could also use MOVDDUP and MOVSLDUP. */ + FAIL_IF(emit_sse2_logic(compiler, UNPCKLPD_x_xm, op & SLJIT_F32_OP, src, src, 0)); + } else { - dst_r = TMP_FREG; - FAIL_IF(emit_sse2_load(compiler, op & SLJIT_SINGLE_OP, dst_r, dst, dstw)); + FAIL_IF(emit_sse2_load(compiler, !(op & SLJIT_F32_OP), TMP_FREG, src, srcw)); + src = TMP_FREG; } - return emit_sse2_logic(compiler, UCOMISD_x_xm, !(op & SLJIT_SINGLE_OP), dst_r, src, srcw); - } - if (op == SLJIT_MOVD) { - if (dst <= SLJIT_FLOAT_REG6) - return emit_sse2_load(compiler, op & SLJIT_SINGLE_OP, dst, src, srcw); - if (src <= SLJIT_FLOAT_REG6) - return emit_sse2_store(compiler, op & SLJIT_SINGLE_OP, dst, dstw, src); - FAIL_IF(emit_sse2_load(compiler, op & SLJIT_SINGLE_OP, TMP_FREG, src, srcw)); - return emit_sse2_store(compiler, op & SLJIT_SINGLE_OP, dst, dstw, TMP_FREG); + FAIL_IF(emit_sse2_logic(compiler, CVTPD2PS_x_xm, op & SLJIT_F32_OP, dst_r, src, 0)); + if (dst_r == TMP_FREG) + return emit_sse2_store(compiler, op & SLJIT_F32_OP, dst, dstw, TMP_FREG); + return SLJIT_SUCCESS; } - if (dst >= SLJIT_FLOAT_REG1 && dst <= SLJIT_FLOAT_REG6) { + if (FAST_IS_REG(dst)) { dst_r = dst; if (dst != src) - FAIL_IF(emit_sse2_load(compiler, op & SLJIT_SINGLE_OP, dst_r, src, srcw)); + FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, dst_r, src, srcw)); } else { dst_r = TMP_FREG; - FAIL_IF(emit_sse2_load(compiler, op & SLJIT_SINGLE_OP, dst_r, src, srcw)); + FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, dst_r, src, srcw)); } switch (GET_OPCODE(op)) { - case SLJIT_NEGD: - FAIL_IF(emit_sse2_logic(compiler, XORPD_x_xm, 1, dst_r, SLJIT_MEM0(), (sljit_sw)(op & SLJIT_SINGLE_OP ? sse2_buffer : sse2_buffer + 8))); + case SLJIT_NEG_F64: + FAIL_IF(emit_sse2_logic(compiler, XORPD_x_xm, 1, dst_r, SLJIT_MEM0(), (sljit_sw)(op & SLJIT_F32_OP ? sse2_buffer : sse2_buffer + 8))); break; - case SLJIT_ABSD: - FAIL_IF(emit_sse2_logic(compiler, ANDPD_x_xm, 1, dst_r, SLJIT_MEM0(), (sljit_sw)(op & SLJIT_SINGLE_OP ? sse2_buffer + 4 : sse2_buffer + 12))); + case SLJIT_ABS_F64: + FAIL_IF(emit_sse2_logic(compiler, ANDPD_x_xm, 1, dst_r, SLJIT_MEM0(), (sljit_sw)(op & SLJIT_F32_OP ? sse2_buffer + 4 : sse2_buffer + 12))); break; } if (dst_r == TMP_FREG) - return emit_sse2_store(compiler, op & SLJIT_SINGLE_OP, dst, dstw, TMP_FREG); + return emit_sse2_store(compiler, op & SLJIT_F32_OP, dst, dstw, TMP_FREG); return SLJIT_SUCCESS; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop2(struct sljit_compiler *compiler, sljit_si op, - sljit_si dst, sljit_sw dstw, - sljit_si src1, sljit_sw src1w, - sljit_si src2, sljit_sw src2w) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) { - sljit_si dst_r; + sljit_s32 dst_r; CHECK_ERROR(); - check_sljit_emit_fop2(compiler, op, dst, dstw, src1, src1w, src2, src2w); + CHECK(check_sljit_emit_fop2(compiler, op, dst, dstw, src1, src1w, src2, src2w)); + ADJUST_LOCAL_OFFSET(dst, dstw); + ADJUST_LOCAL_OFFSET(src1, src1w); + ADJUST_LOCAL_OFFSET(src2, src2w); #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) compiler->mode32 = 1; #endif - if (dst <= SLJIT_FLOAT_REG6) { + if (FAST_IS_REG(dst)) { dst_r = dst; if (dst == src1) ; /* Do nothing here. */ - else if (dst == src2 && (op == SLJIT_ADDD || op == SLJIT_MULD)) { + else if (dst == src2 && (op == SLJIT_ADD_F64 || op == SLJIT_MUL_F64)) { /* Swap arguments. */ src2 = src1; src2w = src1w; } else if (dst != src2) - FAIL_IF(emit_sse2_load(compiler, op & SLJIT_SINGLE_OP, dst_r, src1, src1w)); + FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, dst_r, src1, src1w)); else { dst_r = TMP_FREG; - FAIL_IF(emit_sse2_load(compiler, op & SLJIT_SINGLE_OP, TMP_FREG, src1, src1w)); + FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, TMP_FREG, src1, src1w)); } } else { dst_r = TMP_FREG; - FAIL_IF(emit_sse2_load(compiler, op & SLJIT_SINGLE_OP, TMP_FREG, src1, src1w)); + FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, TMP_FREG, src1, src1w)); } switch (GET_OPCODE(op)) { - case SLJIT_ADDD: - FAIL_IF(emit_sse2(compiler, ADDSD_x_xm, op & SLJIT_SINGLE_OP, dst_r, src2, src2w)); + case SLJIT_ADD_F64: + FAIL_IF(emit_sse2(compiler, ADDSD_x_xm, op & SLJIT_F32_OP, dst_r, src2, src2w)); break; - case SLJIT_SUBD: - FAIL_IF(emit_sse2(compiler, SUBSD_x_xm, op & SLJIT_SINGLE_OP, dst_r, src2, src2w)); + case SLJIT_SUB_F64: + FAIL_IF(emit_sse2(compiler, SUBSD_x_xm, op & SLJIT_F32_OP, dst_r, src2, src2w)); break; - case SLJIT_MULD: - FAIL_IF(emit_sse2(compiler, MULSD_x_xm, op & SLJIT_SINGLE_OP, dst_r, src2, src2w)); + case SLJIT_MUL_F64: + FAIL_IF(emit_sse2(compiler, MULSD_x_xm, op & SLJIT_F32_OP, dst_r, src2, src2w)); break; - case SLJIT_DIVD: - FAIL_IF(emit_sse2(compiler, DIVSD_x_xm, op & SLJIT_SINGLE_OP, dst_r, src2, src2w)); + case SLJIT_DIV_F64: + FAIL_IF(emit_sse2(compiler, DIVSD_x_xm, op & SLJIT_F32_OP, dst_r, src2, src2w)); break; } if (dst_r == TMP_FREG) - return emit_sse2_store(compiler, op & SLJIT_SINGLE_OP, dst, dstw, TMP_FREG); + return emit_sse2_store(compiler, op & SLJIT_F32_OP, dst, dstw, TMP_FREG); return SLJIT_SUCCESS; } -#else - -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop1(struct sljit_compiler *compiler, sljit_si op, - sljit_si dst, sljit_sw dstw, - sljit_si src, sljit_sw srcw) -{ - CHECK_ERROR(); - /* Should cause an assertion fail. */ - check_sljit_emit_fop1(compiler, op, dst, dstw, src, srcw); - compiler->error = SLJIT_ERR_UNSUPPORTED; - return SLJIT_ERR_UNSUPPORTED; -} - -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop2(struct sljit_compiler *compiler, sljit_si op, - sljit_si dst, sljit_sw dstw, - sljit_si src1, sljit_sw src1w, - sljit_si src2, sljit_sw src2w) -{ - CHECK_ERROR(); - /* Should cause an assertion fail. */ - check_sljit_emit_fop2(compiler, op, dst, dstw, src1, src1w, src2, src2w); - compiler->error = SLJIT_ERR_UNSUPPORTED; - return SLJIT_ERR_UNSUPPORTED; -} - -#endif - /* --------------------------------------------------------------------- */ /* Conditional instructions */ /* --------------------------------------------------------------------- */ SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compiler *compiler) { - sljit_ub *inst; + sljit_u8 *inst; struct sljit_label *label; CHECK_ERROR_PTR(); - check_sljit_emit_label(compiler); - - /* We should restore the flags before the label, - since other taken jumps has their own flags as well. */ - if (SLJIT_UNLIKELY(compiler->flags_saved)) - PTR_FAIL_IF(emit_restore_flags(compiler, 0)); + CHECK_PTR(check_sljit_emit_label(compiler)); if (compiler->last_label && compiler->last_label->size == compiler->size) return compiler->last_label; @@ -2481,7 +2495,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compi PTR_FAIL_IF(!label); set_label(label, compiler); - inst = (sljit_ub*)ensure_buf(compiler, 2); + inst = (sljit_u8*)ensure_buf(compiler, 2); PTR_FAIL_IF(!inst); *inst++ = 0; @@ -2490,28 +2504,19 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compi return label; } -SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_si type) +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_s32 type) { - sljit_ub *inst; + sljit_u8 *inst; struct sljit_jump *jump; CHECK_ERROR_PTR(); - check_sljit_emit_jump(compiler, type); - - if (SLJIT_UNLIKELY(compiler->flags_saved)) { - if ((type & 0xff) <= SLJIT_JUMP) - PTR_FAIL_IF(emit_restore_flags(compiler, 0)); - compiler->flags_saved = 0; - } + CHECK_PTR(check_sljit_emit_jump(compiler, type)); jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); PTR_FAIL_IF_NULL(jump); - set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP); + set_jump(jump, compiler, (type & SLJIT_REWRITABLE_JUMP) | ((type & 0xff) << TYPE_SHIFT)); type &= 0xff; - if (type >= SLJIT_CALL1) - PTR_FAIL_IF(call_with_args(compiler, type)); - /* Worst case size. */ #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) compiler->size += (type >= SLJIT_JUMP) ? 5 : 6; @@ -2519,55 +2524,29 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile compiler->size += (type >= SLJIT_JUMP) ? (10 + 3) : (2 + 10 + 3); #endif - inst = (sljit_ub*)ensure_buf(compiler, 2); + inst = (sljit_u8*)ensure_buf(compiler, 2); PTR_FAIL_IF_NULL(inst); *inst++ = 0; - *inst++ = type + 4; + *inst++ = 1; return jump; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_ijump(struct sljit_compiler *compiler, sljit_si type, sljit_si src, sljit_sw srcw) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 src, sljit_sw srcw) { - sljit_ub *inst; + sljit_u8 *inst; struct sljit_jump *jump; CHECK_ERROR(); - check_sljit_emit_ijump(compiler, type, src, srcw); + CHECK(check_sljit_emit_ijump(compiler, type, src, srcw)); ADJUST_LOCAL_OFFSET(src, srcw); CHECK_EXTRA_REGS(src, srcw, (void)0); - if (SLJIT_UNLIKELY(compiler->flags_saved)) { - if (type <= SLJIT_JUMP) - FAIL_IF(emit_restore_flags(compiler, 0)); - compiler->flags_saved = 0; - } - - if (type >= SLJIT_CALL1) { -#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - if (src == SLJIT_SCRATCH_REG3) { - EMIT_MOV(compiler, TMP_REGISTER, 0, src, 0); - src = TMP_REGISTER; - } - if (src == SLJIT_MEM1(SLJIT_LOCALS_REG) && type >= SLJIT_CALL3) - srcw += sizeof(sljit_sw); -#endif -#endif -#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) && defined(_WIN64) - if (src == SLJIT_SCRATCH_REG3) { - EMIT_MOV(compiler, TMP_REGISTER, 0, src, 0); - src = TMP_REGISTER; - } -#endif - FAIL_IF(call_with_args(compiler, type)); - } - if (src == SLJIT_IMM) { jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); FAIL_IF_NULL(jump); - set_jump(jump, compiler, JUMP_ADDR); + set_jump(jump, compiler, JUMP_ADDR | (type << TYPE_SHIFT)); jump->u.target = srcw; /* Worst case size. */ @@ -2577,11 +2556,11 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_ijump(struct sljit_compiler *compil compiler->size += 10 + 3; #endif - inst = (sljit_ub*)ensure_buf(compiler, 2); + inst = (sljit_u8*)ensure_buf(compiler, 2); FAIL_IF_NULL(inst); *inst++ = 0; - *inst++ = type + 4; + *inst++ = 1; } else { #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) @@ -2596,54 +2575,48 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_ijump(struct sljit_compiler *compil return SLJIT_SUCCESS; } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_si op, - sljit_si dst, sljit_sw dstw, - sljit_si src, sljit_sw srcw, - sljit_si type) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 type) { - sljit_ub *inst; - sljit_ub cond_set = 0; + sljit_u8 *inst; + sljit_u8 cond_set = 0; #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) - sljit_si reg; -#else - /* CHECK_EXTRA_REGS migh overwrite these values. */ - sljit_si dst_save = dst; - sljit_sw dstw_save = dstw; + sljit_s32 reg; #endif + /* ADJUST_LOCAL_OFFSET and CHECK_EXTRA_REGS might overwrite these values. */ + sljit_s32 dst_save = dst; + sljit_sw dstw_save = dstw; CHECK_ERROR(); - check_sljit_emit_op_flags(compiler, op, dst, dstw, src, srcw, type); - - if (dst == SLJIT_UNUSED) - return SLJIT_SUCCESS; + CHECK(check_sljit_emit_op_flags(compiler, op, dst, dstw, type)); ADJUST_LOCAL_OFFSET(dst, dstw); CHECK_EXTRA_REGS(dst, dstw, (void)0); - if (SLJIT_UNLIKELY(compiler->flags_saved)) - FAIL_IF(emit_restore_flags(compiler, op & SLJIT_KEEP_FLAGS)); + type &= 0xff; /* setcc = jcc + 0x10. */ cond_set = get_jump_code(type) + 0x10; #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) - if (GET_OPCODE(op) == SLJIT_OR && !GET_ALL_FLAGS(op) && dst <= TMP_REGISTER && dst == src) { - inst = (sljit_ub*)ensure_buf(compiler, 1 + 4 + 3); + if (GET_OPCODE(op) == SLJIT_OR && !GET_ALL_FLAGS(op) && FAST_IS_REG(dst)) { + inst = (sljit_u8*)ensure_buf(compiler, 1 + 4 + 3); FAIL_IF(!inst); INC_SIZE(4 + 3); /* Set low register to conditional flag. */ - *inst++ = (reg_map[TMP_REGISTER] <= 7) ? REX : REX_B; + *inst++ = (reg_map[TMP_REG1] <= 7) ? REX : REX_B; *inst++ = GROUP_0F; *inst++ = cond_set; - *inst++ = MOD_REG | reg_lmap[TMP_REGISTER]; - *inst++ = REX | (reg_map[TMP_REGISTER] <= 7 ? 0 : REX_R) | (reg_map[dst] <= 7 ? 0 : REX_B); + *inst++ = MOD_REG | reg_lmap[TMP_REG1]; + *inst++ = REX | (reg_map[TMP_REG1] <= 7 ? 0 : REX_R) | (reg_map[dst] <= 7 ? 0 : REX_B); *inst++ = OR_rm8_r8; - *inst++ = MOD_REG | (reg_lmap[TMP_REGISTER] << 3) | reg_lmap[dst]; + *inst++ = MOD_REG | (reg_lmap[TMP_REG1] << 3) | reg_lmap[dst]; return SLJIT_SUCCESS; } - reg = (op == SLJIT_MOV && dst <= TMP_REGISTER) ? dst : TMP_REGISTER; + reg = (GET_OPCODE(op) < SLJIT_ADD && FAST_IS_REG(dst)) ? dst : TMP_REG1; - inst = (sljit_ub*)ensure_buf(compiler, 1 + 4 + 4); + inst = (sljit_u8*)ensure_buf(compiler, 1 + 4 + 4); FAIL_IF(!inst); INC_SIZE(4 + 4); /* Set low register to conditional flag. */ @@ -2652,26 +2625,31 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_flags(struct sljit_compiler *com *inst++ = cond_set; *inst++ = MOD_REG | reg_lmap[reg]; *inst++ = REX_W | (reg_map[reg] <= 7 ? 0 : (REX_B | REX_R)); + /* The movzx instruction does not affect flags. */ *inst++ = GROUP_0F; *inst++ = MOVZX_r_rm8; *inst = MOD_REG | (reg_lmap[reg] << 3) | reg_lmap[reg]; - if (reg != TMP_REGISTER) + if (reg != TMP_REG1) return SLJIT_SUCCESS; if (GET_OPCODE(op) < SLJIT_ADD) { compiler->mode32 = GET_OPCODE(op) != SLJIT_MOV; - return emit_mov(compiler, dst, dstw, TMP_REGISTER, 0); + return emit_mov(compiler, dst, dstw, TMP_REG1, 0); } -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ + || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) compiler->skip_checks = 1; #endif - return sljit_emit_op2(compiler, op, dst, dstw, dst, dstw, TMP_REGISTER, 0); -#else /* SLJIT_CONFIG_X86_64 */ - if (GET_OPCODE(op) < SLJIT_ADD && dst <= TMP_REGISTER) { + return sljit_emit_op2(compiler, op, dst_save, dstw_save, dst_save, dstw_save, TMP_REG1, 0); + +#else + /* The SLJIT_CONFIG_X86_32 code path starts here. */ + if (GET_OPCODE(op) < SLJIT_ADD && FAST_IS_REG(dst)) { if (reg_map[dst] <= 4) { /* Low byte is accessible. */ - inst = (sljit_ub*)ensure_buf(compiler, 1 + 3 + 3); + inst = (sljit_u8*)ensure_buf(compiler, 1 + 3 + 3); FAIL_IF(!inst); INC_SIZE(3 + 3); /* Set low byte to conditional flag. */ @@ -2690,25 +2668,25 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_flags(struct sljit_compiler *com get_cpu_features(); if (cpu_has_cmov) { - EMIT_MOV(compiler, TMP_REGISTER, 0, SLJIT_IMM, 1); + EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, 1); /* a xor reg, reg operation would overwrite the flags. */ EMIT_MOV(compiler, dst, 0, SLJIT_IMM, 0); - inst = (sljit_ub*)ensure_buf(compiler, 1 + 3); + inst = (sljit_u8*)ensure_buf(compiler, 1 + 3); FAIL_IF(!inst); INC_SIZE(3); *inst++ = GROUP_0F; /* cmovcc = setcc - 0x50. */ *inst++ = cond_set - 0x50; - *inst++ = MOD_REG | (reg_map[dst] << 3) | reg_map[TMP_REGISTER]; + *inst++ = MOD_REG | (reg_map[dst] << 3) | reg_map[TMP_REG1]; return SLJIT_SUCCESS; } - inst = (sljit_ub*)ensure_buf(compiler, 1 + 1 + 3 + 3 + 1); + inst = (sljit_u8*)ensure_buf(compiler, 1 + 1 + 3 + 3 + 1); FAIL_IF(!inst); INC_SIZE(1 + 3 + 3 + 1); - *inst++ = XCHG_EAX_r + reg_map[TMP_REGISTER]; + *inst++ = XCHG_EAX_r + reg_map[TMP_REG1]; /* Set al to conditional flag. */ *inst++ = GROUP_0F; *inst++ = cond_set; @@ -2717,48 +2695,49 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_flags(struct sljit_compiler *com *inst++ = GROUP_0F; *inst++ = MOVZX_r_rm8; *inst++ = MOD_REG | (reg_map[dst] << 3) | 0 /* eax */; - *inst++ = XCHG_EAX_r + reg_map[TMP_REGISTER]; + *inst++ = XCHG_EAX_r + reg_map[TMP_REG1]; return SLJIT_SUCCESS; } - if (GET_OPCODE(op) == SLJIT_OR && !GET_ALL_FLAGS(op) && dst <= TMP_REGISTER && dst == src && reg_map[dst] <= 4) { - SLJIT_COMPILE_ASSERT(reg_map[SLJIT_SCRATCH_REG1] == 0, scratch_reg1_must_be_eax); - if (dst != SLJIT_SCRATCH_REG1) { - inst = (sljit_ub*)ensure_buf(compiler, 1 + 1 + 3 + 2 + 1); + if (GET_OPCODE(op) == SLJIT_OR && !GET_ALL_FLAGS(op) && FAST_IS_REG(dst) && reg_map[dst] <= 4) { + SLJIT_ASSERT(reg_map[SLJIT_R0] == 0); + + if (dst != SLJIT_R0) { + inst = (sljit_u8*)ensure_buf(compiler, 1 + 1 + 3 + 2 + 1); FAIL_IF(!inst); INC_SIZE(1 + 3 + 2 + 1); /* Set low register to conditional flag. */ - *inst++ = XCHG_EAX_r + reg_map[TMP_REGISTER]; + *inst++ = XCHG_EAX_r + reg_map[TMP_REG1]; *inst++ = GROUP_0F; *inst++ = cond_set; *inst++ = MOD_REG | 0 /* eax */; *inst++ = OR_rm8_r8; *inst++ = MOD_REG | (0 /* eax */ << 3) | reg_map[dst]; - *inst++ = XCHG_EAX_r + reg_map[TMP_REGISTER]; + *inst++ = XCHG_EAX_r + reg_map[TMP_REG1]; } else { - inst = (sljit_ub*)ensure_buf(compiler, 1 + 2 + 3 + 2 + 2); + inst = (sljit_u8*)ensure_buf(compiler, 1 + 2 + 3 + 2 + 2); FAIL_IF(!inst); INC_SIZE(2 + 3 + 2 + 2); /* Set low register to conditional flag. */ *inst++ = XCHG_r_rm; - *inst++ = MOD_REG | (1 /* ecx */ << 3) | reg_map[TMP_REGISTER]; + *inst++ = MOD_REG | (1 /* ecx */ << 3) | reg_map[TMP_REG1]; *inst++ = GROUP_0F; *inst++ = cond_set; *inst++ = MOD_REG | 1 /* ecx */; *inst++ = OR_rm8_r8; *inst++ = MOD_REG | (1 /* ecx */ << 3) | 0 /* eax */; *inst++ = XCHG_r_rm; - *inst++ = MOD_REG | (1 /* ecx */ << 3) | reg_map[TMP_REGISTER]; + *inst++ = MOD_REG | (1 /* ecx */ << 3) | reg_map[TMP_REG1]; } return SLJIT_SUCCESS; } - /* Set TMP_REGISTER to the bit. */ - inst = (sljit_ub*)ensure_buf(compiler, 1 + 1 + 3 + 3 + 1); + /* Set TMP_REG1 to the bit. */ + inst = (sljit_u8*)ensure_buf(compiler, 1 + 1 + 3 + 3 + 1); FAIL_IF(!inst); INC_SIZE(1 + 3 + 3 + 1); - *inst++ = XCHG_EAX_r + reg_map[TMP_REGISTER]; + *inst++ = XCHG_EAX_r + reg_map[TMP_REG1]; /* Set al to conditional flag. */ *inst++ = GROUP_0F; *inst++ = cond_set; @@ -2768,22 +2747,63 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_flags(struct sljit_compiler *com *inst++ = MOVZX_r_rm8; *inst++ = MOD_REG | (0 << 3) /* eax */ | 0 /* eax */; - *inst++ = XCHG_EAX_r + reg_map[TMP_REGISTER]; + *inst++ = XCHG_EAX_r + reg_map[TMP_REG1]; if (GET_OPCODE(op) < SLJIT_ADD) - return emit_mov(compiler, dst, dstw, TMP_REGISTER, 0); + return emit_mov(compiler, dst, dstw, TMP_REG1, 0); -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ + || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) compiler->skip_checks = 1; #endif - return sljit_emit_op2(compiler, op, dst_save, dstw_save, dst_save, dstw_save, TMP_REGISTER, 0); + return sljit_emit_op2(compiler, op, dst_save, dstw_save, dst_save, dstw_save, TMP_REG1, 0); #endif /* SLJIT_CONFIG_X86_64 */ } -SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_get_local_base(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw, sljit_sw offset) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 dst_reg, + sljit_s32 src, sljit_sw srcw) { + sljit_u8* inst; + CHECK_ERROR(); - check_sljit_get_local_base(compiler, dst, dstw, offset); + CHECK(check_sljit_emit_cmov(compiler, type, dst_reg, src, srcw)); + +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + dst_reg &= ~SLJIT_I32_OP; + + if (!sljit_has_cpu_feature(SLJIT_HAS_CMOV) || (dst_reg >= SLJIT_R3 && dst_reg <= SLJIT_S3)) + return sljit_emit_cmov_generic(compiler, type, dst_reg, src, srcw); +#else + if (!sljit_has_cpu_feature(SLJIT_HAS_CMOV)) + return sljit_emit_cmov_generic(compiler, type, dst_reg, src, srcw); +#endif + + /* ADJUST_LOCAL_OFFSET is not needed. */ + CHECK_EXTRA_REGS(src, srcw, (void)0); + +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + compiler->mode32 = dst_reg & SLJIT_I32_OP; + dst_reg &= ~SLJIT_I32_OP; +#endif + + if (SLJIT_UNLIKELY(src & SLJIT_IMM)) { + EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, srcw); + src = TMP_REG1; + srcw = 0; + } + + inst = emit_x86_instruction(compiler, 2, dst_reg, 0, src, srcw); + FAIL_IF(!inst); + *inst++ = GROUP_0F; + *inst = get_jump_code(type & 0xff) - 0x40; + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_local_base(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw offset) +{ + CHECK_ERROR(); + CHECK(check_sljit_get_local_base(compiler, dst, dstw, offset)); ADJUST_LOCAL_OFFSET(dst, dstw); CHECK_EXTRA_REGS(dst, dstw, (void)0); @@ -2792,35 +2812,35 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_get_local_base(struct sljit_compiler *co compiler->mode32 = 0; #endif - ADJUST_LOCAL_OFFSET(SLJIT_MEM1(SLJIT_LOCALS_REG), offset); + ADJUST_LOCAL_OFFSET(SLJIT_MEM1(SLJIT_SP), offset); #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) if (NOT_HALFWORD(offset)) { - FAIL_IF(emit_load_imm64(compiler, TMP_REGISTER, offset)); + FAIL_IF(emit_load_imm64(compiler, TMP_REG1, offset)); #if (defined SLJIT_DEBUG && SLJIT_DEBUG) - SLJIT_ASSERT(emit_lea_binary(compiler, SLJIT_KEEP_FLAGS, dst, dstw, SLJIT_LOCALS_REG, 0, TMP_REGISTER, 0) != SLJIT_ERR_UNSUPPORTED); + SLJIT_ASSERT(emit_lea_binary(compiler, dst, dstw, SLJIT_SP, 0, TMP_REG1, 0) != SLJIT_ERR_UNSUPPORTED); return compiler->error; #else - return emit_lea_binary(compiler, SLJIT_KEEP_FLAGS, dst, dstw, SLJIT_LOCALS_REG, 0, TMP_REGISTER, 0); + return emit_lea_binary(compiler, dst, dstw, SLJIT_SP, 0, TMP_REG1, 0); #endif } #endif if (offset != 0) - return emit_lea_binary(compiler, SLJIT_KEEP_FLAGS, dst, dstw, SLJIT_LOCALS_REG, 0, SLJIT_IMM, offset); - return emit_mov(compiler, dst, dstw, SLJIT_LOCALS_REG, 0); + return emit_lea_binary(compiler, dst, dstw, SLJIT_SP, 0, SLJIT_IMM, offset); + return emit_mov(compiler, dst, dstw, SLJIT_SP, 0); } -SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw, sljit_sw init_value) +SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw init_value) { - sljit_ub *inst; + sljit_u8 *inst; struct sljit_const *const_; #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) - sljit_si reg; + sljit_s32 reg; #endif CHECK_ERROR_PTR(); - check_sljit_emit_const(compiler, dst, dstw, init_value); + CHECK_PTR(check_sljit_emit_const(compiler, dst, dstw, init_value)); ADJUST_LOCAL_OFFSET(dst, dstw); CHECK_EXTRA_REGS(dst, dstw, (void)0); @@ -2831,43 +2851,90 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compi #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) compiler->mode32 = 0; - reg = (dst <= TMP_REGISTER) ? dst : TMP_REGISTER; + reg = FAST_IS_REG(dst) ? dst : TMP_REG1; if (emit_load_imm64(compiler, reg, init_value)) return NULL; #else - if (dst == SLJIT_UNUSED) - dst = TMP_REGISTER; - if (emit_mov(compiler, dst, dstw, SLJIT_IMM, init_value)) return NULL; #endif - inst = (sljit_ub*)ensure_buf(compiler, 2); + inst = (sljit_u8*)ensure_buf(compiler, 2); PTR_FAIL_IF(!inst); *inst++ = 0; - *inst++ = 1; + *inst++ = 2; #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) - if (reg == TMP_REGISTER && dst != SLJIT_UNUSED) - if (emit_mov(compiler, dst, dstw, TMP_REGISTER, 0)) + if (dst & SLJIT_MEM) + if (emit_mov(compiler, dst, dstw, TMP_REG1, 0)) return NULL; #endif return const_; } -SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_addr) +SLJIT_API_FUNC_ATTRIBUTE struct sljit_put_label* sljit_emit_put_label(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw) +{ + struct sljit_put_label *put_label; + sljit_u8 *inst; +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + sljit_s32 reg; + sljit_uw start_size; +#endif + + CHECK_ERROR_PTR(); + CHECK_PTR(check_sljit_emit_put_label(compiler, dst, dstw)); + ADJUST_LOCAL_OFFSET(dst, dstw); + + CHECK_EXTRA_REGS(dst, dstw, (void)0); + + put_label = (struct sljit_put_label*)ensure_abuf(compiler, sizeof(struct sljit_put_label)); + PTR_FAIL_IF(!put_label); + set_put_label(put_label, compiler, 0); + +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + compiler->mode32 = 0; + reg = FAST_IS_REG(dst) ? dst : TMP_REG1; + + if (emit_load_imm64(compiler, reg, 0)) + return NULL; +#else + if (emit_mov(compiler, dst, dstw, SLJIT_IMM, 0)) + return NULL; +#endif + +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + if (dst & SLJIT_MEM) { + start_size = compiler->size; + if (emit_mov(compiler, dst, dstw, TMP_REG1, 0)) + return NULL; + put_label->flags = compiler->size - start_size; + } +#endif + + inst = (sljit_u8*)ensure_buf(compiler, 2); + PTR_FAIL_IF(!inst); + + *inst++ = 0; + *inst++ = 3; + + return put_label; +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_target, sljit_sw executable_offset) { + SLJIT_UNUSED_ARG(executable_offset); #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - *(sljit_sw*)addr = new_addr - (addr + 4); + sljit_unaligned_store_sw((void*)addr, new_target - (addr + 4) - (sljit_uw)executable_offset); #else - *(sljit_uw*)addr = new_addr; + sljit_unaligned_store_sw((void*)addr, (sljit_sw) new_target); #endif } -SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant) +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset) { - *(sljit_sw*)addr = new_constant; + SLJIT_UNUSED_ARG(executable_offset); + sljit_unaligned_store_sw((void*)addr, new_constant); } diff --git a/src/3rd/pcre/ucp.h b/src/3rd/pcre/ucp.h index d8b34bfcc5b..2fa00296e42 100644 --- a/src/3rd/pcre/ucp.h +++ b/src/3rd/pcre/ucp.h @@ -192,7 +192,31 @@ enum { ucp_Miao, ucp_Sharada, ucp_Sora_Sompeng, - ucp_Takri + ucp_Takri, + /* New for Unicode 7.0.0: */ + ucp_Bassa_Vah, + ucp_Caucasian_Albanian, + ucp_Duployan, + ucp_Elbasan, + ucp_Grantha, + ucp_Khojki, + ucp_Khudawadi, + ucp_Linear_A, + ucp_Mahajani, + ucp_Manichaean, + ucp_Mende_Kikakui, + ucp_Modi, + ucp_Mro, + ucp_Nabataean, + ucp_Old_North_Arabian, + ucp_Old_Permic, + ucp_Pahawh_Hmong, + ucp_Palmyrene, + ucp_Psalter_Pahlavi, + ucp_Pau_Cin_Hau, + ucp_Siddham, + ucp_Tirhuta, + ucp_Warang_Citi }; #endif