"Fossies" - the Fresh Open Source Software Archive

Member "pcre-8.42/sljit/sljitNativeSPARC_common.c" (8 Jan 2018, 47351 Bytes) of package /linux/misc/pcre-8.42.tar.bz2:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "sljitNativeSPARC_common.c" see the Fossies "Dox" file reference documentation and the latest Fossies "Diffs" side-by-side code changes report: 8.41_vs_8.42.

    1 /*
    2  *    Stack-less Just-In-Time compiler
    3  *
    4  *    Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without modification, are
    7  * permitted provided that the following conditions are met:
    8  *
    9  *   1. Redistributions of source code must retain the above copyright notice, this list of
   10  *      conditions and the following disclaimer.
   11  *
   12  *   2. Redistributions in binary form must reproduce the above copyright notice, this list
   13  *      of conditions and the following disclaimer in the documentation and/or other materials
   14  *      provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
   17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
   19  * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
   21  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
   22  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
   24  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   25  */
   26 
   27 SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void)
   28 {
   29     return "SPARC" SLJIT_CPUINFO;
   30 }
   31 
   32 /* Length of an instruction word
   33    Both for sparc-32 and sparc-64 */
   34 typedef sljit_u32 sljit_ins;
   35 
   36 #if (defined SLJIT_CACHE_FLUSH_OWN_IMPL && SLJIT_CACHE_FLUSH_OWN_IMPL)
   37 
   38 static void sparc_cache_flush(sljit_ins *from, sljit_ins *to)
   39 {
   40 #if defined(__SUNPRO_C) && __SUNPRO_C < 0x590
   41     __asm (
   42         /* if (from == to) return */
   43         "cmp %i0, %i1\n"
   44         "be .leave\n"
   45         "nop\n"
   46 
   47         /* loop until from >= to */
   48         ".mainloop:\n"
   49         "flush %i0\n"
   50         "add %i0, 8, %i0\n"
   51         "cmp %i0, %i1\n"
   52         "bcs .mainloop\n"
   53         "nop\n"
   54 
   55         /* The comparison was done above. */
   56         "bne .leave\n"
   57         /* nop is not necessary here, since the
   58            sub operation has no side effect. */
   59         "sub %i0, 4, %i0\n"
   60         "flush %i0\n"
   61         ".leave:"
   62     );
   63 #else
   64     if (SLJIT_UNLIKELY(from == to))
   65         return;
   66 
   67     do {
   68         __asm__ volatile (
   69             "flush %0\n"
   70             : : "r"(from)
   71         );
   72         /* Operates at least on doubleword. */
   73         from += 2;
   74     } while (from < to);
   75 
   76     if (from == to) {
   77         /* Flush the last word. */
   78         from --;
   79         __asm__ volatile (
   80             "flush %0\n"
   81             : : "r"(from)
   82         );
   83     }
   84 #endif
   85 }
   86 
   87 #endif /* (defined SLJIT_CACHE_FLUSH_OWN_IMPL && SLJIT_CACHE_FLUSH_OWN_IMPL) */
   88 
   89 /* TMP_REG2 is not used by getput_arg */
   90 #define TMP_REG1    (SLJIT_NUMBER_OF_REGISTERS + 2)
   91 #define TMP_REG2    (SLJIT_NUMBER_OF_REGISTERS + 3)
   92 #define TMP_REG3    (SLJIT_NUMBER_OF_REGISTERS + 4)
   93 /* This register is modified by calls, which affects the instruction
   94    in the delay slot if it is used as a source register. */
   95 #define TMP_LINK    (SLJIT_NUMBER_OF_REGISTERS + 5)
   96 
   97 #define TMP_FREG1   (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1)
   98 #define TMP_FREG2   (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 2)
   99 
  100 static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 6] = {
  101     0, 8, 9, 10, 11, 29, 28, 27, 23, 22, 21, 20, 19, 18, 17, 16, 26, 25, 24, 14, 1, 12, 13, 15
  102 };
  103 
  104 static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
  105     0, 0, 2, 4, 6, 8, 10, 12, 14
  106 };
  107 
  108 /* --------------------------------------------------------------------- */
  109 /*  Instrucion forms                                                     */
  110 /* --------------------------------------------------------------------- */
  111 
  112 #define D(d)        (reg_map[d] << 25)
  113 #define FD(d)       (freg_map[d] << 25)
  114 #define FDN(d)      ((freg_map[d] | 0x1) << 25)
  115 #define DA(d)       ((d) << 25)
  116 #define S1(s1)      (reg_map[s1] << 14)
  117 #define FS1(s1)     (freg_map[s1] << 14)
  118 #define S1A(s1)     ((s1) << 14)
  119 #define S2(s2)      (reg_map[s2])
  120 #define FS2(s2)     (freg_map[s2])
  121 #define FS2N(s2)    (freg_map[s2] | 0x1)
  122 #define S2A(s2)     (s2)
  123 #define IMM_ARG     0x2000
  124 #define DOP(op)     ((op) << 5)
  125 #define IMM(imm)    (((imm) & 0x1fff) | IMM_ARG)
  126 
  127 #define DR(dr)      (reg_map[dr])
  128 #define OPC1(opcode)    ((opcode) << 30)
  129 #define OPC2(opcode)    ((opcode) << 22)
  130 #define OPC3(opcode)    ((opcode) << 19)
  131 #define SET_FLAGS   OPC3(0x10)
  132 
  133 #define ADD     (OPC1(0x2) | OPC3(0x00))
  134 #define ADDC        (OPC1(0x2) | OPC3(0x08))
  135 #define AND     (OPC1(0x2) | OPC3(0x01))
  136 #define ANDN        (OPC1(0x2) | OPC3(0x05))
  137 #define CALL        (OPC1(0x1))
  138 #define FABSS       (OPC1(0x2) | OPC3(0x34) | DOP(0x09))
  139 #define FADDD       (OPC1(0x2) | OPC3(0x34) | DOP(0x42))
  140 #define FADDS       (OPC1(0x2) | OPC3(0x34) | DOP(0x41))
  141 #define FCMPD       (OPC1(0x2) | OPC3(0x35) | DOP(0x52))
  142 #define FCMPS       (OPC1(0x2) | OPC3(0x35) | DOP(0x51))
  143 #define FDIVD       (OPC1(0x2) | OPC3(0x34) | DOP(0x4e))
  144 #define FDIVS       (OPC1(0x2) | OPC3(0x34) | DOP(0x4d))
  145 #define FDTOI       (OPC1(0x2) | OPC3(0x34) | DOP(0xd2))
  146 #define FDTOS       (OPC1(0x2) | OPC3(0x34) | DOP(0xc6))
  147 #define FITOD       (OPC1(0x2) | OPC3(0x34) | DOP(0xc8))
  148 #define FITOS       (OPC1(0x2) | OPC3(0x34) | DOP(0xc4))
  149 #define FMOVS       (OPC1(0x2) | OPC3(0x34) | DOP(0x01))
  150 #define FMULD       (OPC1(0x2) | OPC3(0x34) | DOP(0x4a))
  151 #define FMULS       (OPC1(0x2) | OPC3(0x34) | DOP(0x49))
  152 #define FNEGS       (OPC1(0x2) | OPC3(0x34) | DOP(0x05))
  153 #define FSTOD       (OPC1(0x2) | OPC3(0x34) | DOP(0xc9))
  154 #define FSTOI       (OPC1(0x2) | OPC3(0x34) | DOP(0xd1))
  155 #define FSUBD       (OPC1(0x2) | OPC3(0x34) | DOP(0x46))
  156 #define FSUBS       (OPC1(0x2) | OPC3(0x34) | DOP(0x45))
  157 #define JMPL        (OPC1(0x2) | OPC3(0x38))
  158 #define LDD     (OPC1(0x3) | OPC3(0x03))
  159 #define LDUW        (OPC1(0x3) | OPC3(0x00))
  160 #define NOP     (OPC1(0x0) | OPC2(0x04))
  161 #define OR      (OPC1(0x2) | OPC3(0x02))
  162 #define ORN     (OPC1(0x2) | OPC3(0x06))
  163 #define RDY     (OPC1(0x2) | OPC3(0x28) | S1A(0))
  164 #define RESTORE     (OPC1(0x2) | OPC3(0x3d))
  165 #define SAVE        (OPC1(0x2) | OPC3(0x3c))
  166 #define SETHI       (OPC1(0x0) | OPC2(0x04))
  167 #define SLL     (OPC1(0x2) | OPC3(0x25))
  168 #define SLLX        (OPC1(0x2) | OPC3(0x25) | (1 << 12))
  169 #define SRA     (OPC1(0x2) | OPC3(0x27))
  170 #define SRAX        (OPC1(0x2) | OPC3(0x27) | (1 << 12))
  171 #define SRL     (OPC1(0x2) | OPC3(0x26))
  172 #define SRLX        (OPC1(0x2) | OPC3(0x26) | (1 << 12))
  173 #define STDF        (OPC1(0x3) | OPC3(0x27))
  174 #define STF     (OPC1(0x3) | OPC3(0x24))
  175 #define STW     (OPC1(0x3) | OPC3(0x04))
  176 #define SUB     (OPC1(0x2) | OPC3(0x04))
  177 #define SUBC        (OPC1(0x2) | OPC3(0x0c))
  178 #define TA      (OPC1(0x2) | OPC3(0x3a) | (8 << 25))
  179 #define WRY     (OPC1(0x2) | OPC3(0x30) | DA(0))
  180 #define XOR     (OPC1(0x2) | OPC3(0x03))
  181 #define XNOR        (OPC1(0x2) | OPC3(0x07))
  182 
  183 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
  184 #define MAX_DISP    (0x1fffff)
  185 #define MIN_DISP    (-0x200000)
  186 #define DISP_MASK   (0x3fffff)
  187 
  188 #define BICC        (OPC1(0x0) | OPC2(0x2))
  189 #define FBFCC       (OPC1(0x0) | OPC2(0x6))
  190 #define SLL_W       SLL
  191 #define SDIV        (OPC1(0x2) | OPC3(0x0f))
  192 #define SMUL        (OPC1(0x2) | OPC3(0x0b))
  193 #define UDIV        (OPC1(0x2) | OPC3(0x0e))
  194 #define UMUL        (OPC1(0x2) | OPC3(0x0a))
  195 #else
  196 #define SLL_W       SLLX
  197 #endif
  198 
  199 #define SIMM_MAX    (0x0fff)
  200 #define SIMM_MIN    (-0x1000)
  201 
  202 /* dest_reg is the absolute name of the register
  203    Useful for reordering instructions in the delay slot. */
  204 static sljit_s32 push_inst(struct sljit_compiler *compiler, sljit_ins ins, sljit_s32 delay_slot)
  205 {
  206     sljit_ins *ptr;
  207     SLJIT_ASSERT((delay_slot & DST_INS_MASK) == UNMOVABLE_INS
  208         || (delay_slot & DST_INS_MASK) == MOVABLE_INS
  209         || (delay_slot & DST_INS_MASK) == ((ins >> 25) & 0x1f));
  210     ptr = (sljit_ins*)ensure_buf(compiler, sizeof(sljit_ins));
  211     FAIL_IF(!ptr);
  212     *ptr = ins;
  213     compiler->size++;
  214     compiler->delay_slot = delay_slot;
  215     return SLJIT_SUCCESS;
  216 }
  217 
  218 static SLJIT_INLINE sljit_ins* detect_jump_type(struct sljit_jump *jump, sljit_ins *code_ptr, sljit_ins *code, sljit_sw executable_offset)
  219 {
  220     sljit_sw diff;
  221     sljit_uw target_addr;
  222     sljit_ins *inst;
  223     sljit_ins saved_inst;
  224 
  225     if (jump->flags & SLJIT_REWRITABLE_JUMP)
  226         return code_ptr;
  227 
  228     if (jump->flags & JUMP_ADDR)
  229         target_addr = jump->u.target;
  230     else {
  231         SLJIT_ASSERT(jump->flags & JUMP_LABEL);
  232         target_addr = (sljit_uw)(code + jump->u.label->size) + (sljit_uw)executable_offset;
  233     }
  234     inst = (sljit_ins*)jump->addr;
  235 
  236 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
  237     if (jump->flags & IS_CALL) {
  238         /* Call is always patchable on sparc 32. */
  239         jump->flags |= PATCH_CALL;
  240         if (jump->flags & IS_MOVABLE) {
  241             inst[0] = inst[-1];
  242             inst[-1] = CALL;
  243             jump->addr -= sizeof(sljit_ins);
  244             return inst;
  245         }
  246         inst[0] = CALL;
  247         inst[1] = NOP;
  248         return inst + 1;
  249     }
  250 #else
  251     /* Both calls and BPr instructions shall not pass this point. */
  252 #error "Implementation required"
  253 #endif
  254 
  255     if (jump->flags & IS_COND)
  256         inst--;
  257 
  258     diff = ((sljit_sw)target_addr - (sljit_sw)(inst - 1) - executable_offset) >> 2;
  259 
  260     if (jump->flags & IS_MOVABLE) {
  261         if (diff <= MAX_DISP && diff >= MIN_DISP) {
  262             jump->flags |= PATCH_B;
  263             inst--;
  264             if (jump->flags & IS_COND) {
  265                 saved_inst = inst[0];
  266                 inst[0] = inst[1] ^ (1 << 28);
  267                 inst[1] = saved_inst;
  268             } else {
  269                 inst[1] = inst[0];
  270                 inst[0] = BICC | DA(0x8);
  271             }
  272             jump->addr = (sljit_uw)inst;
  273             return inst + 1;
  274         }
  275     }
  276 
  277     diff += sizeof(sljit_ins);
  278 
  279     if (diff <= MAX_DISP && diff >= MIN_DISP) {
  280         jump->flags |= PATCH_B;
  281         if (jump->flags & IS_COND)
  282             inst[0] ^= (1 << 28);
  283         else
  284             inst[0] = BICC | DA(0x8);
  285         inst[1] = NOP;
  286         jump->addr = (sljit_uw)inst;
  287         return inst + 1;
  288     }
  289 
  290     return code_ptr;
  291 }
  292 
  293 SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compiler)
  294 {
  295     struct sljit_memory_fragment *buf;
  296     sljit_ins *code;
  297     sljit_ins *code_ptr;
  298     sljit_ins *buf_ptr;
  299     sljit_ins *buf_end;
  300     sljit_uw word_count;
  301     sljit_sw executable_offset;
  302     sljit_uw addr;
  303 
  304     struct sljit_label *label;
  305     struct sljit_jump *jump;
  306     struct sljit_const *const_;
  307 
  308     CHECK_ERROR_PTR();
  309     CHECK_PTR(check_sljit_generate_code(compiler));
  310     reverse_buf(compiler);
  311 
  312     code = (sljit_ins*)SLJIT_MALLOC_EXEC(compiler->size * sizeof(sljit_ins));
  313     PTR_FAIL_WITH_EXEC_IF(code);
  314     buf = compiler->buf;
  315 
  316     code_ptr = code;
  317     word_count = 0;
  318     executable_offset = SLJIT_EXEC_OFFSET(code);
  319 
  320     label = compiler->labels;
  321     jump = compiler->jumps;
  322     const_ = compiler->consts;
  323 
  324     do {
  325         buf_ptr = (sljit_ins*)buf->memory;
  326         buf_end = buf_ptr + (buf->used_size >> 2);
  327         do {
  328             *code_ptr = *buf_ptr++;
  329             SLJIT_ASSERT(!label || label->size >= word_count);
  330             SLJIT_ASSERT(!jump || jump->addr >= word_count);
  331             SLJIT_ASSERT(!const_ || const_->addr >= word_count);
  332             /* These structures are ordered by their address. */
  333             if (label && label->size == word_count) {
  334                 /* Just recording the address. */
  335                 label->addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
  336                 label->size = code_ptr - code;
  337                 label = label->next;
  338             }
  339             if (jump && jump->addr == word_count) {
  340 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
  341                 jump->addr = (sljit_uw)(code_ptr - 3);
  342 #else
  343                 jump->addr = (sljit_uw)(code_ptr - 6);
  344 #endif
  345                 code_ptr = detect_jump_type(jump, code_ptr, code, executable_offset);
  346                 jump = jump->next;
  347             }
  348             if (const_ && const_->addr == word_count) {
  349                 /* Just recording the address. */
  350                 const_->addr = (sljit_uw)code_ptr;
  351                 const_ = const_->next;
  352             }
  353             code_ptr ++;
  354             word_count ++;
  355         } while (buf_ptr < buf_end);
  356 
  357         buf = buf->next;
  358     } while (buf);
  359 
  360     if (label && label->size == word_count) {
  361         label->addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
  362         label->size = code_ptr - code;
  363         label = label->next;
  364     }
  365 
  366     SLJIT_ASSERT(!label);
  367     SLJIT_ASSERT(!jump);
  368     SLJIT_ASSERT(!const_);
  369     SLJIT_ASSERT(code_ptr - code <= (sljit_s32)compiler->size);
  370 
  371     jump = compiler->jumps;
  372     while (jump) {
  373         do {
  374             addr = (jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target;
  375             buf_ptr = (sljit_ins *)jump->addr;
  376 
  377             if (jump->flags & PATCH_CALL) {
  378                 addr = (sljit_sw)(addr - (sljit_uw)SLJIT_ADD_EXEC_OFFSET(buf_ptr, executable_offset)) >> 2;
  379                 SLJIT_ASSERT((sljit_sw)addr <= 0x1fffffff && (sljit_sw)addr >= -0x20000000);
  380                 buf_ptr[0] = CALL | (addr & 0x3fffffff);
  381                 break;
  382             }
  383             if (jump->flags & PATCH_B) {
  384                 addr = (sljit_sw)(addr - (sljit_uw)SLJIT_ADD_EXEC_OFFSET(buf_ptr, executable_offset)) >> 2;
  385                 SLJIT_ASSERT((sljit_sw)addr <= MAX_DISP && (sljit_sw)addr >= MIN_DISP);
  386                 buf_ptr[0] = (buf_ptr[0] & ~DISP_MASK) | (addr & DISP_MASK);
  387                 break;
  388             }
  389 
  390             /* Set the fields of immediate loads. */
  391 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
  392             buf_ptr[0] = (buf_ptr[0] & 0xffc00000) | ((addr >> 10) & 0x3fffff);
  393             buf_ptr[1] = (buf_ptr[1] & 0xfffffc00) | (addr & 0x3ff);
  394 #else
  395 #error "Implementation required"
  396 #endif
  397         } while (0);
  398         jump = jump->next;
  399     }
  400 
  401 
  402     compiler->error = SLJIT_ERR_COMPILED;
  403     compiler->executable_offset = executable_offset;
  404     compiler->executable_size = (code_ptr - code) * sizeof(sljit_ins);
  405 
  406     code = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(code, executable_offset);
  407     code_ptr = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
  408 
  409     SLJIT_CACHE_FLUSH(code, code_ptr);
  410     return code;
  411 }
  412 
  413 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
  414 {
  415     switch (feature_type) {
  416     case SLJIT_HAS_FPU:
  417 #ifdef SLJIT_IS_FPU_AVAILABLE
  418         return SLJIT_IS_FPU_AVAILABLE;
  419 #else
  420         /* Available by default. */
  421         return 1;
  422 #endif
  423 
  424 #if (defined SLJIT_CONFIG_SPARC_64 && SLJIT_CONFIG_SPARC_64)
  425     case SLJIT_HAS_CMOV:
  426         return 1;
  427 #endif
  428 
  429     default:
  430         return 0;
  431     }
  432 }
  433 
  434 /* --------------------------------------------------------------------- */
  435 /*  Entry, exit                                                          */
  436 /* --------------------------------------------------------------------- */
  437 
  438 /* Creates an index in data_transfer_insts array. */
  439 #define LOAD_DATA   0x01
  440 #define WORD_DATA   0x00
  441 #define BYTE_DATA   0x02
  442 #define HALF_DATA   0x04
  443 #define INT_DATA    0x06
  444 #define SIGNED_DATA 0x08
  445 /* Separates integer and floating point registers */
  446 #define GPR_REG     0x0f
  447 #define DOUBLE_DATA 0x10
  448 #define SINGLE_DATA 0x12
  449 
  450 #define MEM_MASK    0x1f
  451 
  452 #define ARG_TEST    0x00020
  453 #define ALT_KEEP_CACHE  0x00040
  454 #define CUMULATIVE_OP   0x00080
  455 #define IMM_OP      0x00100
  456 #define SRC2_IMM    0x00200
  457 
  458 #define REG_DEST    0x00400
  459 #define REG2_SOURCE 0x00800
  460 #define SLOW_SRC1   0x01000
  461 #define SLOW_SRC2   0x02000
  462 #define SLOW_DEST   0x04000
  463 
  464 /* SET_FLAGS (0x10 << 19) also belong here! */
  465 
  466 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
  467 #include "sljitNativeSPARC_32.c"
  468 #else
  469 #include "sljitNativeSPARC_64.c"
  470 #endif
  471 
  472 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler,
  473     sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
  474     sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
  475 {
  476     CHECK_ERROR();
  477     CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
  478     set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
  479 
  480     local_size = (local_size + SLJIT_LOCALS_OFFSET + 7) & ~0x7;
  481     compiler->local_size = local_size;
  482 
  483     if (local_size <= SIMM_MAX) {
  484         FAIL_IF(push_inst(compiler, SAVE | D(SLJIT_SP) | S1(SLJIT_SP) | IMM(-local_size), UNMOVABLE_INS));
  485     }
  486     else {
  487         FAIL_IF(load_immediate(compiler, TMP_REG1, -local_size));
  488         FAIL_IF(push_inst(compiler, SAVE | D(SLJIT_SP) | S1(SLJIT_SP) | S2(TMP_REG1), UNMOVABLE_INS));
  489     }
  490 
  491     /* Arguments are in their appropriate registers. */
  492 
  493     return SLJIT_SUCCESS;
  494 }
  495 
  496 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *compiler,
  497     sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
  498     sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
  499 {
  500     CHECK_ERROR();
  501     CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
  502     set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
  503 
  504     compiler->local_size = (local_size + SLJIT_LOCALS_OFFSET + 7) & ~0x7;
  505     return SLJIT_SUCCESS;
  506 }
  507 
  508 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw)
  509 {
  510     CHECK_ERROR();
  511     CHECK(check_sljit_emit_return(compiler, op, src, srcw));
  512 
  513     if (op != SLJIT_MOV || !FAST_IS_REG(src)) {
  514         FAIL_IF(emit_mov_before_return(compiler, op, src, srcw));
  515         src = SLJIT_R0;
  516     }
  517 
  518     FAIL_IF(push_inst(compiler, JMPL | D(0) | S1A(31) | IMM(8), UNMOVABLE_INS));
  519     return push_inst(compiler, RESTORE | D(SLJIT_R0) | S1(src) | S2(0), UNMOVABLE_INS);
  520 }
  521 
  522 /* --------------------------------------------------------------------- */
  523 /*  Operators                                                            */
  524 /* --------------------------------------------------------------------- */
  525 
  526 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
  527 #define ARCH_32_64(a, b)    a
  528 #else
  529 #define ARCH_32_64(a, b)    b
  530 #endif
  531 
  532 static const sljit_ins data_transfer_insts[16 + 4] = {
  533 /* u w s */ ARCH_32_64(OPC1(3) | OPC3(0x04) /* stw */, OPC1(3) | OPC3(0x0e) /* stx */),
  534 /* u w l */ ARCH_32_64(OPC1(3) | OPC3(0x00) /* lduw */, OPC1(3) | OPC3(0x0b) /* ldx */),
  535 /* u b s */ OPC1(3) | OPC3(0x05) /* stb */,
  536 /* u b l */ OPC1(3) | OPC3(0x01) /* ldub */,
  537 /* u h s */ OPC1(3) | OPC3(0x06) /* sth */,
  538 /* u h l */ OPC1(3) | OPC3(0x02) /* lduh */,
  539 /* u i s */ OPC1(3) | OPC3(0x04) /* stw */,
  540 /* u i l */ OPC1(3) | OPC3(0x00) /* lduw */,
  541 
  542 /* s w s */ ARCH_32_64(OPC1(3) | OPC3(0x04) /* stw */, OPC1(3) | OPC3(0x0e) /* stx */),
  543 /* s w l */ ARCH_32_64(OPC1(3) | OPC3(0x00) /* lduw */, OPC1(3) | OPC3(0x0b) /* ldx */),
  544 /* s b s */ OPC1(3) | OPC3(0x05) /* stb */,
  545 /* s b l */ OPC1(3) | OPC3(0x09) /* ldsb */,
  546 /* s h s */ OPC1(3) | OPC3(0x06) /* sth */,
  547 /* s h l */ OPC1(3) | OPC3(0x0a) /* ldsh */,
  548 /* s i s */ OPC1(3) | OPC3(0x04) /* stw */,
  549 /* s i l */ ARCH_32_64(OPC1(3) | OPC3(0x00) /* lduw */, OPC1(3) | OPC3(0x08) /* ldsw */),
  550 
  551 /* d   s */ OPC1(3) | OPC3(0x27),
  552 /* d   l */ OPC1(3) | OPC3(0x23),
  553 /* s   s */ OPC1(3) | OPC3(0x24),
  554 /* s   l */ OPC1(3) | OPC3(0x20),
  555 };
  556 
  557 #undef ARCH_32_64
  558 
  559 /* Can perform an operation using at most 1 instruction. */
  560 static sljit_s32 getput_arg_fast(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw)
  561 {
  562     SLJIT_ASSERT(arg & SLJIT_MEM);
  563 
  564     if ((!(arg & OFFS_REG_MASK) && argw <= SIMM_MAX && argw >= SIMM_MIN)
  565             || ((arg & OFFS_REG_MASK) && (argw & 0x3) == 0)) {
  566         /* Works for both absoulte and relative addresses (immediate case). */
  567         if (SLJIT_UNLIKELY(flags & ARG_TEST))
  568             return 1;
  569         FAIL_IF(push_inst(compiler, data_transfer_insts[flags & MEM_MASK]
  570             | ((flags & MEM_MASK) <= GPR_REG ? D(reg) : FD(reg))
  571             | S1(arg & REG_MASK) | ((arg & OFFS_REG_MASK) ? S2(OFFS_REG(arg)) : IMM(argw)),
  572             ((flags & MEM_MASK) <= GPR_REG && (flags & LOAD_DATA)) ? DR(reg) : MOVABLE_INS));
  573         return -1;
  574     }
  575     return 0;
  576 }
  577 
  578 /* See getput_arg below.
  579    Note: can_cache is called only for binary operators. Those
  580    operators always uses word arguments without write back. */
  581 static sljit_s32 can_cache(sljit_s32 arg, sljit_sw argw, sljit_s32 next_arg, sljit_sw next_argw)
  582 {
  583     SLJIT_ASSERT((arg & SLJIT_MEM) && (next_arg & SLJIT_MEM));
  584 
  585     /* Simple operation except for updates. */
  586     if (arg & OFFS_REG_MASK) {
  587         argw &= 0x3;
  588         SLJIT_ASSERT(argw);
  589         next_argw &= 0x3;
  590         if ((arg & OFFS_REG_MASK) == (next_arg & OFFS_REG_MASK) && argw == next_argw)
  591             return 1;
  592         return 0;
  593     }
  594 
  595     if (((next_argw - argw) <= SIMM_MAX && (next_argw - argw) >= SIMM_MIN))
  596         return 1;
  597     return 0;
  598 }
  599 
  600 /* Emit the necessary instructions. See can_cache above. */
  601 static sljit_s32 getput_arg(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw, sljit_s32 next_arg, sljit_sw next_argw)
  602 {
  603     sljit_s32 base, arg2, delay_slot;
  604     sljit_ins dest;
  605 
  606     SLJIT_ASSERT(arg & SLJIT_MEM);
  607     if (!(next_arg & SLJIT_MEM)) {
  608         next_arg = 0;
  609         next_argw = 0;
  610     }
  611 
  612     base = arg & REG_MASK;
  613     if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) {
  614         argw &= 0x3;
  615 
  616         /* Using the cache. */
  617         if (((SLJIT_MEM | (arg & OFFS_REG_MASK)) == compiler->cache_arg) && (argw == compiler->cache_argw))
  618             arg2 = TMP_REG3;
  619         else {
  620             if ((arg & OFFS_REG_MASK) == (next_arg & OFFS_REG_MASK) && argw == (next_argw & 0x3)) {
  621                 compiler->cache_arg = SLJIT_MEM | (arg & OFFS_REG_MASK);
  622                 compiler->cache_argw = argw;
  623                 arg2 = TMP_REG3;
  624             }
  625             else if ((flags & LOAD_DATA) && ((flags & MEM_MASK) <= GPR_REG) && reg != base && reg != OFFS_REG(arg))
  626                 arg2 = reg;
  627             else /* It must be a mov operation, so tmp1 must be free to use. */
  628                 arg2 = TMP_REG1;
  629             FAIL_IF(push_inst(compiler, SLL_W | D(arg2) | S1(OFFS_REG(arg)) | IMM_ARG | argw, DR(arg2)));
  630         }
  631     }
  632     else {
  633         /* Using the cache. */
  634         if ((compiler->cache_arg == SLJIT_MEM) && (argw - compiler->cache_argw) <= SIMM_MAX && (argw - compiler->cache_argw) >= SIMM_MIN) {
  635             if (argw != compiler->cache_argw) {
  636                 FAIL_IF(push_inst(compiler, ADD | D(TMP_REG3) | S1(TMP_REG3) | IMM(argw - compiler->cache_argw), DR(TMP_REG3)));
  637                 compiler->cache_argw = argw;
  638             }
  639             arg2 = TMP_REG3;
  640         } else {
  641             if ((next_argw - argw) <= SIMM_MAX && (next_argw - argw) >= SIMM_MIN) {
  642                 compiler->cache_arg = SLJIT_MEM;
  643                 compiler->cache_argw = argw;
  644                 arg2 = TMP_REG3;
  645             }
  646             else if ((flags & LOAD_DATA) && ((flags & MEM_MASK) <= GPR_REG) && reg != base)
  647                 arg2 = reg;
  648             else /* It must be a mov operation, so tmp1 must be free to use. */
  649                 arg2 = TMP_REG1;
  650             FAIL_IF(load_immediate(compiler, arg2, argw));
  651         }
  652     }
  653 
  654     dest = ((flags & MEM_MASK) <= GPR_REG ? D(reg) : FD(reg));
  655     delay_slot = ((flags & MEM_MASK) <= GPR_REG && (flags & LOAD_DATA)) ? DR(reg) : MOVABLE_INS;
  656     if (!base)
  657         return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | dest | S1(arg2) | IMM(0), delay_slot);
  658     return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | dest | S1(base) | S2(arg2), delay_slot);
  659 }
  660 
  661 static SLJIT_INLINE sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw)
  662 {
  663     if (getput_arg_fast(compiler, flags, reg, arg, argw))
  664         return compiler->error;
  665     compiler->cache_arg = 0;
  666     compiler->cache_argw = 0;
  667     return getput_arg(compiler, flags, reg, arg, argw, 0, 0);
  668 }
  669 
  670 static SLJIT_INLINE sljit_s32 emit_op_mem2(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg1, sljit_sw arg1w, sljit_s32 arg2, sljit_sw arg2w)
  671 {
  672     if (getput_arg_fast(compiler, flags, reg, arg1, arg1w))
  673         return compiler->error;
  674     return getput_arg(compiler, flags, reg, arg1, arg1w, arg2, arg2w);
  675 }
  676 
  677 static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 flags,
  678     sljit_s32 dst, sljit_sw dstw,
  679     sljit_s32 src1, sljit_sw src1w,
  680     sljit_s32 src2, sljit_sw src2w)
  681 {
  682     /* arg1 goes to TMP_REG1 or src reg
  683        arg2 goes to TMP_REG2, imm or src reg
  684        TMP_REG3 can be used for caching
  685        result goes to TMP_REG2, so put result can use TMP_REG1 and TMP_REG3. */
  686     sljit_s32 dst_r = TMP_REG2;
  687     sljit_s32 src1_r;
  688     sljit_sw src2_r = 0;
  689     sljit_s32 sugg_src2_r = TMP_REG2;
  690 
  691     if (!(flags & ALT_KEEP_CACHE)) {
  692         compiler->cache_arg = 0;
  693         compiler->cache_argw = 0;
  694     }
  695 
  696     if (dst != SLJIT_UNUSED) {
  697         if (FAST_IS_REG(dst)) {
  698             dst_r = dst;
  699             flags |= REG_DEST;
  700             if (op >= SLJIT_MOV && op <= SLJIT_MOV_P)
  701                 sugg_src2_r = dst_r;
  702         }
  703         else if ((dst & SLJIT_MEM) && !getput_arg_fast(compiler, flags | ARG_TEST, TMP_REG1, dst, dstw))
  704             flags |= SLOW_DEST;
  705     }
  706 
  707     if (flags & IMM_OP) {
  708         if ((src2 & SLJIT_IMM) && src2w) {
  709             if (src2w <= SIMM_MAX && src2w >= SIMM_MIN) {
  710                 flags |= SRC2_IMM;
  711                 src2_r = src2w;
  712             }
  713         }
  714         if (!(flags & SRC2_IMM) && (flags & CUMULATIVE_OP) && (src1 & SLJIT_IMM) && src1w) {
  715             if (src1w <= SIMM_MAX && src1w >= SIMM_MIN) {
  716                 flags |= SRC2_IMM;
  717                 src2_r = src1w;
  718 
  719                 /* And swap arguments. */
  720                 src1 = src2;
  721                 src1w = src2w;
  722                 src2 = SLJIT_IMM;
  723                 /* src2w = src2_r unneeded. */
  724             }
  725         }
  726     }
  727 
  728     /* Source 1. */
  729     if (FAST_IS_REG(src1))
  730         src1_r = src1;
  731     else if (src1 & SLJIT_IMM) {
  732         if (src1w) {
  733             FAIL_IF(load_immediate(compiler, TMP_REG1, src1w));
  734             src1_r = TMP_REG1;
  735         }
  736         else
  737             src1_r = 0;
  738     }
  739     else {
  740         if (getput_arg_fast(compiler, flags | LOAD_DATA, TMP_REG1, src1, src1w))
  741             FAIL_IF(compiler->error);
  742         else
  743             flags |= SLOW_SRC1;
  744         src1_r = TMP_REG1;
  745     }
  746 
  747     /* Source 2. */
  748     if (FAST_IS_REG(src2)) {
  749         src2_r = src2;
  750         flags |= REG2_SOURCE;
  751         if (!(flags & REG_DEST) && op >= SLJIT_MOV && op <= SLJIT_MOV_P)
  752             dst_r = src2_r;
  753     }
  754     else if (src2 & SLJIT_IMM) {
  755         if (!(flags & SRC2_IMM)) {
  756             if (src2w) {
  757                 FAIL_IF(load_immediate(compiler, sugg_src2_r, src2w));
  758                 src2_r = sugg_src2_r;
  759             }
  760             else {
  761                 src2_r = 0;
  762                 if ((op >= SLJIT_MOV && op <= SLJIT_MOV_P) && (dst & SLJIT_MEM))
  763                     dst_r = 0;
  764             }
  765         }
  766     }
  767     else {
  768         if (getput_arg_fast(compiler, flags | LOAD_DATA, sugg_src2_r, src2, src2w))
  769             FAIL_IF(compiler->error);
  770         else
  771             flags |= SLOW_SRC2;
  772         src2_r = sugg_src2_r;
  773     }
  774 
  775     if ((flags & (SLOW_SRC1 | SLOW_SRC2)) == (SLOW_SRC1 | SLOW_SRC2)) {
  776         SLJIT_ASSERT(src2_r == TMP_REG2);
  777         if (!can_cache(src1, src1w, src2, src2w) && can_cache(src1, src1w, dst, dstw)) {
  778             FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, TMP_REG2, src2, src2w, src1, src1w));
  779             FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, TMP_REG1, src1, src1w, dst, dstw));
  780         }
  781         else {
  782             FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, TMP_REG1, src1, src1w, src2, src2w));
  783             FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, TMP_REG2, src2, src2w, dst, dstw));
  784         }
  785     }
  786     else if (flags & SLOW_SRC1)
  787         FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, TMP_REG1, src1, src1w, dst, dstw));
  788     else if (flags & SLOW_SRC2)
  789         FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, sugg_src2_r, src2, src2w, dst, dstw));
  790 
  791     FAIL_IF(emit_single_op(compiler, op, flags, dst_r, src1_r, src2_r));
  792 
  793     if (dst & SLJIT_MEM) {
  794         if (!(flags & SLOW_DEST)) {
  795             getput_arg_fast(compiler, flags, dst_r, dst, dstw);
  796             return compiler->error;
  797         }
  798         return getput_arg(compiler, flags, dst_r, dst, dstw, 0, 0);
  799     }
  800 
  801     return SLJIT_SUCCESS;
  802 }
  803 
  804 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compiler, sljit_s32 op)
  805 {
  806     CHECK_ERROR();
  807     CHECK(check_sljit_emit_op0(compiler, op));
  808 
  809     op = GET_OPCODE(op);
  810     switch (op) {
  811     case SLJIT_BREAKPOINT:
  812         return push_inst(compiler, TA, UNMOVABLE_INS);
  813     case SLJIT_NOP:
  814         return push_inst(compiler, NOP, UNMOVABLE_INS);
  815     case SLJIT_LMUL_UW:
  816     case SLJIT_LMUL_SW:
  817 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
  818         FAIL_IF(push_inst(compiler, (op == SLJIT_LMUL_UW ? UMUL : SMUL) | D(SLJIT_R0) | S1(SLJIT_R0) | S2(SLJIT_R1), DR(SLJIT_R0)));
  819         return push_inst(compiler, RDY | D(SLJIT_R1), DR(SLJIT_R1));
  820 #else
  821 #error "Implementation required"
  822 #endif
  823     case SLJIT_DIVMOD_UW:
  824     case SLJIT_DIVMOD_SW:
  825     case SLJIT_DIV_UW:
  826     case SLJIT_DIV_SW:
  827         SLJIT_COMPILE_ASSERT((SLJIT_DIVMOD_UW & 0x2) == 0 && SLJIT_DIV_UW - 0x2 == SLJIT_DIVMOD_UW, bad_div_opcode_assignments);
  828 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
  829         if ((op | 0x2) == SLJIT_DIV_UW)
  830             FAIL_IF(push_inst(compiler, WRY | S1(0), MOVABLE_INS));
  831         else {
  832             FAIL_IF(push_inst(compiler, SRA | D(TMP_REG1) | S1(SLJIT_R0) | IMM(31), DR(TMP_REG1)));
  833             FAIL_IF(push_inst(compiler, WRY | S1(TMP_REG1), MOVABLE_INS));
  834         }
  835         if (op <= SLJIT_DIVMOD_SW)
  836             FAIL_IF(push_inst(compiler, OR | D(TMP_REG2) | S1(0) | S2(SLJIT_R0), DR(TMP_REG2)));
  837         FAIL_IF(push_inst(compiler, ((op | 0x2) == SLJIT_DIV_UW ? UDIV : SDIV) | D(SLJIT_R0) | S1(SLJIT_R0) | S2(SLJIT_R1), DR(SLJIT_R0)));
  838         if (op >= SLJIT_DIV_UW)
  839             return SLJIT_SUCCESS;
  840         FAIL_IF(push_inst(compiler, SMUL | D(SLJIT_R1) | S1(SLJIT_R0) | S2(SLJIT_R1), DR(SLJIT_R1)));
  841         return push_inst(compiler, SUB | D(SLJIT_R1) | S1(TMP_REG2) | S2(SLJIT_R1), DR(SLJIT_R1));
  842 #else
  843 #error "Implementation required"
  844 #endif
  845     }
  846 
  847     return SLJIT_SUCCESS;
  848 }
  849 
  850 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compiler, sljit_s32 op,
  851     sljit_s32 dst, sljit_sw dstw,
  852     sljit_s32 src, sljit_sw srcw)
  853 {
  854     sljit_s32 flags = HAS_FLAGS(op) ? SET_FLAGS : 0;
  855 
  856     CHECK_ERROR();
  857     CHECK(check_sljit_emit_op1(compiler, op, dst, dstw, src, srcw));
  858     ADJUST_LOCAL_OFFSET(dst, dstw);
  859     ADJUST_LOCAL_OFFSET(src, srcw);
  860 
  861     if (dst == SLJIT_UNUSED && !HAS_FLAGS(op))
  862         return SLJIT_SUCCESS;
  863 
  864     op = GET_OPCODE(op);
  865     switch (op) {
  866     case SLJIT_MOV:
  867     case SLJIT_MOV_P:
  868         return emit_op(compiler, SLJIT_MOV, flags | WORD_DATA, dst, dstw, TMP_REG1, 0, src, srcw);
  869 
  870     case SLJIT_MOV_U32:
  871         return emit_op(compiler, SLJIT_MOV_U32, flags | INT_DATA, dst, dstw, TMP_REG1, 0, src, srcw);
  872 
  873     case SLJIT_MOV_S32:
  874         return emit_op(compiler, SLJIT_MOV_S32, flags | INT_DATA | SIGNED_DATA, dst, dstw, TMP_REG1, 0, src, srcw);
  875 
  876     case SLJIT_MOV_U8:
  877         return emit_op(compiler, SLJIT_MOV_U8, flags | BYTE_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u8)srcw : srcw);
  878 
  879     case SLJIT_MOV_S8:
  880         return emit_op(compiler, SLJIT_MOV_S8, flags | BYTE_DATA | SIGNED_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s8)srcw : srcw);
  881 
  882     case SLJIT_MOV_U16:
  883         return emit_op(compiler, SLJIT_MOV_U16, flags | HALF_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u16)srcw : srcw);
  884 
  885     case SLJIT_MOV_S16:
  886         return emit_op(compiler, SLJIT_MOV_S16, flags | HALF_DATA | SIGNED_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s16)srcw : srcw);
  887 
  888     case SLJIT_NOT:
  889     case SLJIT_CLZ:
  890         return emit_op(compiler, op, flags, dst, dstw, TMP_REG1, 0, src, srcw);
  891 
  892     case SLJIT_NEG:
  893         return emit_op(compiler, SLJIT_SUB, flags | IMM_OP, dst, dstw, SLJIT_IMM, 0, src, srcw);
  894     }
  895 
  896     return SLJIT_SUCCESS;
  897 }
  898 
  899 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compiler, sljit_s32 op,
  900     sljit_s32 dst, sljit_sw dstw,
  901     sljit_s32 src1, sljit_sw src1w,
  902     sljit_s32 src2, sljit_sw src2w)
  903 {
  904     sljit_s32 flags = HAS_FLAGS(op) ? SET_FLAGS : 0;
  905 
  906     CHECK_ERROR();
  907     CHECK(check_sljit_emit_op2(compiler, op, dst, dstw, src1, src1w, src2, src2w));
  908     ADJUST_LOCAL_OFFSET(dst, dstw);
  909     ADJUST_LOCAL_OFFSET(src1, src1w);
  910     ADJUST_LOCAL_OFFSET(src2, src2w);
  911 
  912     if (dst == SLJIT_UNUSED && !HAS_FLAGS(op))
  913         return SLJIT_SUCCESS;
  914 
  915     op = GET_OPCODE(op);
  916     switch (op) {
  917     case SLJIT_ADD:
  918     case SLJIT_ADDC:
  919     case SLJIT_MUL:
  920     case SLJIT_AND:
  921     case SLJIT_OR:
  922     case SLJIT_XOR:
  923         return emit_op(compiler, op, flags | CUMULATIVE_OP | IMM_OP, dst, dstw, src1, src1w, src2, src2w);
  924 
  925     case SLJIT_SUB:
  926     case SLJIT_SUBC:
  927         return emit_op(compiler, op, flags | IMM_OP, dst, dstw, src1, src1w, src2, src2w);
  928 
  929     case SLJIT_SHL:
  930     case SLJIT_LSHR:
  931     case SLJIT_ASHR:
  932 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
  933         if (src2 & SLJIT_IMM)
  934             src2w &= 0x1f;
  935 #else
  936         SLJIT_UNREACHABLE();
  937 #endif
  938         return emit_op(compiler, op, flags | IMM_OP, dst, dstw, src1, src1w, src2, src2w);
  939     }
  940 
  941     return SLJIT_SUCCESS;
  942 }
  943 
  944 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg)
  945 {
  946     CHECK_REG_INDEX(check_sljit_get_register_index(reg));
  947     return reg_map[reg];
  948 }
  949 
  950 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg)
  951 {
  952     CHECK_REG_INDEX(check_sljit_get_float_register_index(reg));
  953     return freg_map[reg];
  954 }
  955 
  956 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler,
  957     void *instruction, sljit_s32 size)
  958 {
  959     CHECK_ERROR();
  960     CHECK(check_sljit_emit_op_custom(compiler, instruction, size));
  961 
  962     return push_inst(compiler, *(sljit_ins*)instruction, UNMOVABLE_INS);
  963 }
  964 
  965 /* --------------------------------------------------------------------- */
  966 /*  Floating point operators                                             */
  967 /* --------------------------------------------------------------------- */
  968 
  969 #define FLOAT_DATA(op) (DOUBLE_DATA | ((op & SLJIT_F32_OP) >> 7))
  970 #define SELECT_FOP(op, single, double) ((op & SLJIT_F32_OP) ? single : double)
  971 #define FLOAT_TMP_MEM_OFFSET (22 * sizeof(sljit_sw))
  972 
  973 static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op,
  974     sljit_s32 dst, sljit_sw dstw,
  975     sljit_s32 src, sljit_sw srcw)
  976 {
  977     if (src & SLJIT_MEM) {
  978         FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src, srcw, dst, dstw));
  979         src = TMP_FREG1;
  980     }
  981 
  982     FAIL_IF(push_inst(compiler, SELECT_FOP(op, FSTOI, FDTOI) | FD(TMP_FREG1) | FS2(src), MOVABLE_INS));
  983 
  984     if (FAST_IS_REG(dst)) {
  985         FAIL_IF(emit_op_mem2(compiler, SINGLE_DATA, TMP_FREG1, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET));
  986         return emit_op_mem2(compiler, WORD_DATA | LOAD_DATA, dst, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET);
  987     }
  988 
  989     /* Store the integer value from a VFP register. */
  990     return emit_op_mem2(compiler, SINGLE_DATA, TMP_FREG1, dst, dstw, 0, 0);
  991 }
  992 
  993 static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
  994     sljit_s32 dst, sljit_sw dstw,
  995     sljit_s32 src, sljit_sw srcw)
  996 {
  997     sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
  998 
  999     if (src & SLJIT_IMM) {
 1000 #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
 1001         if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32)
 1002             srcw = (sljit_s32)srcw;
 1003 #endif
 1004         FAIL_IF(load_immediate(compiler, TMP_REG1, srcw));
 1005         src = TMP_REG1;
 1006         srcw = 0;
 1007     }
 1008 
 1009     if (FAST_IS_REG(src)) {
 1010         FAIL_IF(emit_op_mem2(compiler, WORD_DATA, src, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET));
 1011         src = SLJIT_MEM1(SLJIT_SP);
 1012         srcw = FLOAT_TMP_MEM_OFFSET;
 1013     }
 1014 
 1015     FAIL_IF(emit_op_mem2(compiler, SINGLE_DATA | LOAD_DATA, TMP_FREG1, src, srcw, dst, dstw));
 1016     FAIL_IF(push_inst(compiler, SELECT_FOP(op, FITOS, FITOD) | FD(dst_r) | FS2(TMP_FREG1), MOVABLE_INS));
 1017 
 1018     if (dst & SLJIT_MEM)
 1019         return emit_op_mem2(compiler, FLOAT_DATA(op), TMP_FREG1, dst, dstw, 0, 0);
 1020     return SLJIT_SUCCESS;
 1021 }
 1022 
 1023 static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compiler, sljit_s32 op,
 1024     sljit_s32 src1, sljit_sw src1w,
 1025     sljit_s32 src2, sljit_sw src2w)
 1026 {
 1027     if (src1 & SLJIT_MEM) {
 1028         FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, src2, src2w));
 1029         src1 = TMP_FREG1;
 1030     }
 1031 
 1032     if (src2 & SLJIT_MEM) {
 1033         FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, 0, 0));
 1034         src2 = TMP_FREG2;
 1035     }
 1036 
 1037     return push_inst(compiler, SELECT_FOP(op, FCMPS, FCMPD) | FS1(src1) | FS2(src2), FCC_IS_SET | MOVABLE_INS);
 1038 }
 1039 
 1040 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compiler, sljit_s32 op,
 1041     sljit_s32 dst, sljit_sw dstw,
 1042     sljit_s32 src, sljit_sw srcw)
 1043 {
 1044     sljit_s32 dst_r;
 1045 
 1046     CHECK_ERROR();
 1047     compiler->cache_arg = 0;
 1048     compiler->cache_argw = 0;
 1049 
 1050     SLJIT_COMPILE_ASSERT((SLJIT_F32_OP == 0x100) && !(DOUBLE_DATA & 0x2), float_transfer_bit_error);
 1051     SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw);
 1052 
 1053     if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32)
 1054         op ^= SLJIT_F32_OP;
 1055 
 1056     dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
 1057 
 1058     if (src & SLJIT_MEM) {
 1059         FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, dst_r, src, srcw, dst, dstw));
 1060         src = dst_r;
 1061     }
 1062 
 1063     switch (GET_OPCODE(op)) {
 1064     case SLJIT_MOV_F64:
 1065         if (src != dst_r) {
 1066             if (dst_r != TMP_FREG1) {
 1067                 FAIL_IF(push_inst(compiler, FMOVS | FD(dst_r) | FS2(src), MOVABLE_INS));
 1068                 if (!(op & SLJIT_F32_OP))
 1069                     FAIL_IF(push_inst(compiler, FMOVS | FDN(dst_r) | FS2N(src), MOVABLE_INS));
 1070             }
 1071             else
 1072                 dst_r = src;
 1073         }
 1074         break;
 1075     case SLJIT_NEG_F64:
 1076         FAIL_IF(push_inst(compiler, FNEGS | FD(dst_r) | FS2(src), MOVABLE_INS));
 1077         if (dst_r != src && !(op & SLJIT_F32_OP))
 1078             FAIL_IF(push_inst(compiler, FMOVS | FDN(dst_r) | FS2N(src), MOVABLE_INS));
 1079         break;
 1080     case SLJIT_ABS_F64:
 1081         FAIL_IF(push_inst(compiler, FABSS | FD(dst_r) | FS2(src), MOVABLE_INS));
 1082         if (dst_r != src && !(op & SLJIT_F32_OP))
 1083             FAIL_IF(push_inst(compiler, FMOVS | FDN(dst_r) | FS2N(src), MOVABLE_INS));
 1084         break;
 1085     case SLJIT_CONV_F64_FROM_F32:
 1086         FAIL_IF(push_inst(compiler, SELECT_FOP(op, FSTOD, FDTOS) | FD(dst_r) | FS2(src), MOVABLE_INS));
 1087         op ^= SLJIT_F32_OP;
 1088         break;
 1089     }
 1090 
 1091     if (dst & SLJIT_MEM)
 1092         FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op), dst_r, dst, dstw, 0, 0));
 1093     return SLJIT_SUCCESS;
 1094 }
 1095 
 1096 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compiler, sljit_s32 op,
 1097     sljit_s32 dst, sljit_sw dstw,
 1098     sljit_s32 src1, sljit_sw src1w,
 1099     sljit_s32 src2, sljit_sw src2w)
 1100 {
 1101     sljit_s32 dst_r, flags = 0;
 1102 
 1103     CHECK_ERROR();
 1104     CHECK(check_sljit_emit_fop2(compiler, op, dst, dstw, src1, src1w, src2, src2w));
 1105     ADJUST_LOCAL_OFFSET(dst, dstw);
 1106     ADJUST_LOCAL_OFFSET(src1, src1w);
 1107     ADJUST_LOCAL_OFFSET(src2, src2w);
 1108 
 1109     compiler->cache_arg = 0;
 1110     compiler->cache_argw = 0;
 1111 
 1112     dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG2;
 1113 
 1114     if (src1 & SLJIT_MEM) {
 1115         if (getput_arg_fast(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w)) {
 1116             FAIL_IF(compiler->error);
 1117             src1 = TMP_FREG1;
 1118         } else
 1119             flags |= SLOW_SRC1;
 1120     }
 1121 
 1122     if (src2 & SLJIT_MEM) {
 1123         if (getput_arg_fast(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w)) {
 1124             FAIL_IF(compiler->error);
 1125             src2 = TMP_FREG2;
 1126         } else
 1127             flags |= SLOW_SRC2;
 1128     }
 1129 
 1130     if ((flags & (SLOW_SRC1 | SLOW_SRC2)) == (SLOW_SRC1 | SLOW_SRC2)) {
 1131         if (!can_cache(src1, src1w, src2, src2w) && can_cache(src1, src1w, dst, dstw)) {
 1132             FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, src1, src1w));
 1133             FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, dst, dstw));
 1134         }
 1135         else {
 1136             FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, src2, src2w));
 1137             FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, dst, dstw));
 1138         }
 1139     }
 1140     else if (flags & SLOW_SRC1)
 1141         FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, dst, dstw));
 1142     else if (flags & SLOW_SRC2)
 1143         FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, dst, dstw));
 1144 
 1145     if (flags & SLOW_SRC1)
 1146         src1 = TMP_FREG1;
 1147     if (flags & SLOW_SRC2)
 1148         src2 = TMP_FREG2;
 1149 
 1150     switch (GET_OPCODE(op)) {
 1151     case SLJIT_ADD_F64:
 1152         FAIL_IF(push_inst(compiler, SELECT_FOP(op, FADDS, FADDD) | FD(dst_r) | FS1(src1) | FS2(src2), MOVABLE_INS));
 1153         break;
 1154 
 1155     case SLJIT_SUB_F64:
 1156         FAIL_IF(push_inst(compiler, SELECT_FOP(op, FSUBS, FSUBD) | FD(dst_r) | FS1(src1) | FS2(src2), MOVABLE_INS));
 1157         break;
 1158 
 1159     case SLJIT_MUL_F64:
 1160         FAIL_IF(push_inst(compiler, SELECT_FOP(op, FMULS, FMULD) | FD(dst_r) | FS1(src1) | FS2(src2), MOVABLE_INS));
 1161         break;
 1162 
 1163     case SLJIT_DIV_F64:
 1164         FAIL_IF(push_inst(compiler, SELECT_FOP(op, FDIVS, FDIVD) | FD(dst_r) | FS1(src1) | FS2(src2), MOVABLE_INS));
 1165         break;
 1166     }
 1167 
 1168     if (dst_r == TMP_FREG2)
 1169         FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op), TMP_FREG2, dst, dstw, 0, 0));
 1170 
 1171     return SLJIT_SUCCESS;
 1172 }
 1173 
 1174 #undef FLOAT_DATA
 1175 #undef SELECT_FOP
 1176 
 1177 /* --------------------------------------------------------------------- */
 1178 /*  Other instructions                                                   */
 1179 /* --------------------------------------------------------------------- */
 1180 
 1181 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw)
 1182 {
 1183     CHECK_ERROR();
 1184     CHECK(check_sljit_emit_fast_enter(compiler, dst, dstw));
 1185     ADJUST_LOCAL_OFFSET(dst, dstw);
 1186 
 1187     if (FAST_IS_REG(dst))
 1188         return push_inst(compiler, OR | D(dst) | S1(0) | S2(TMP_LINK), DR(dst));
 1189 
 1190     /* Memory. */
 1191     return emit_op_mem(compiler, WORD_DATA, TMP_LINK, dst, dstw);
 1192 }
 1193 
 1194 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_s32 src, sljit_sw srcw)
 1195 {
 1196     CHECK_ERROR();
 1197     CHECK(check_sljit_emit_fast_return(compiler, src, srcw));
 1198     ADJUST_LOCAL_OFFSET(src, srcw);
 1199 
 1200     if (FAST_IS_REG(src))
 1201         FAIL_IF(push_inst(compiler, OR | D(TMP_LINK) | S1(0) | S2(src), DR(TMP_LINK)));
 1202     else
 1203         FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_LINK, src, srcw));
 1204 
 1205     FAIL_IF(push_inst(compiler, JMPL | D(0) | S1(TMP_LINK) | IMM(8), UNMOVABLE_INS));
 1206     return push_inst(compiler, NOP, UNMOVABLE_INS);
 1207 }
 1208 
 1209 /* --------------------------------------------------------------------- */
 1210 /*  Conditional instructions                                             */
 1211 /* --------------------------------------------------------------------- */
 1212 
 1213 SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compiler *compiler)
 1214 {
 1215     struct sljit_label *label;
 1216 
 1217     CHECK_ERROR_PTR();
 1218     CHECK_PTR(check_sljit_emit_label(compiler));
 1219 
 1220     if (compiler->last_label && compiler->last_label->size == compiler->size)
 1221         return compiler->last_label;
 1222 
 1223     label = (struct sljit_label*)ensure_abuf(compiler, sizeof(struct sljit_label));
 1224     PTR_FAIL_IF(!label);
 1225     set_label(label, compiler);
 1226     compiler->delay_slot = UNMOVABLE_INS;
 1227     return label;
 1228 }
 1229 
 1230 static sljit_ins get_cc(sljit_s32 type)
 1231 {
 1232     switch (type) {
 1233     case SLJIT_EQUAL:
 1234     case SLJIT_MUL_NOT_OVERFLOW:
 1235     case SLJIT_NOT_EQUAL_F64: /* Unordered. */
 1236         return DA(0x1);
 1237 
 1238     case SLJIT_NOT_EQUAL:
 1239     case SLJIT_MUL_OVERFLOW:
 1240     case SLJIT_EQUAL_F64:
 1241         return DA(0x9);
 1242 
 1243     case SLJIT_LESS:
 1244     case SLJIT_GREATER_F64: /* Unordered. */
 1245         return DA(0x5);
 1246 
 1247     case SLJIT_GREATER_EQUAL:
 1248     case SLJIT_LESS_EQUAL_F64:
 1249         return DA(0xd);
 1250 
 1251     case SLJIT_GREATER:
 1252     case SLJIT_GREATER_EQUAL_F64: /* Unordered. */
 1253         return DA(0xc);
 1254 
 1255     case SLJIT_LESS_EQUAL:
 1256     case SLJIT_LESS_F64:
 1257         return DA(0x4);
 1258 
 1259     case SLJIT_SIG_LESS:
 1260         return DA(0x3);
 1261 
 1262     case SLJIT_SIG_GREATER_EQUAL:
 1263         return DA(0xb);
 1264 
 1265     case SLJIT_SIG_GREATER:
 1266         return DA(0xa);
 1267 
 1268     case SLJIT_SIG_LESS_EQUAL:
 1269         return DA(0x2);
 1270 
 1271     case SLJIT_OVERFLOW:
 1272     case SLJIT_UNORDERED_F64:
 1273         return DA(0x7);
 1274 
 1275     case SLJIT_NOT_OVERFLOW:
 1276     case SLJIT_ORDERED_F64:
 1277         return DA(0xf);
 1278 
 1279     default:
 1280         SLJIT_UNREACHABLE();
 1281         return DA(0x8);
 1282     }
 1283 }
 1284 
 1285 SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_s32 type)
 1286 {
 1287     struct sljit_jump *jump;
 1288 
 1289     CHECK_ERROR_PTR();
 1290     CHECK_PTR(check_sljit_emit_jump(compiler, type));
 1291 
 1292     jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
 1293     PTR_FAIL_IF(!jump);
 1294     set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP);
 1295     type &= 0xff;
 1296 
 1297     if (type < SLJIT_EQUAL_F64) {
 1298         jump->flags |= IS_COND;
 1299         if (((compiler->delay_slot & DST_INS_MASK) != UNMOVABLE_INS) && !(compiler->delay_slot & ICC_IS_SET))
 1300             jump->flags |= IS_MOVABLE;
 1301 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
 1302         PTR_FAIL_IF(push_inst(compiler, BICC | get_cc(type ^ 1) | 5, UNMOVABLE_INS));
 1303 #else
 1304 #error "Implementation required"
 1305 #endif
 1306     }
 1307     else if (type < SLJIT_JUMP) {
 1308         jump->flags |= IS_COND;
 1309         if (((compiler->delay_slot & DST_INS_MASK) != UNMOVABLE_INS) && !(compiler->delay_slot & FCC_IS_SET))
 1310             jump->flags |= IS_MOVABLE;
 1311 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
 1312         PTR_FAIL_IF(push_inst(compiler, FBFCC | get_cc(type ^ 1) | 5, UNMOVABLE_INS));
 1313 #else
 1314 #error "Implementation required"
 1315 #endif
 1316     }
 1317     else {
 1318         if ((compiler->delay_slot & DST_INS_MASK) != UNMOVABLE_INS)
 1319             jump->flags |= IS_MOVABLE;
 1320         if (type >= SLJIT_FAST_CALL)
 1321             jump->flags |= IS_CALL;
 1322     }
 1323 
 1324     PTR_FAIL_IF(emit_const(compiler, TMP_REG1, 0));
 1325     PTR_FAIL_IF(push_inst(compiler, JMPL | D(type >= SLJIT_FAST_CALL ? TMP_LINK : 0) | S1(TMP_REG1) | IMM(0), UNMOVABLE_INS));
 1326     jump->addr = compiler->size;
 1327     PTR_FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS));
 1328 
 1329     return jump;
 1330 }
 1331 
 1332 SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type,
 1333     sljit_s32 arg_types)
 1334 {
 1335     CHECK_ERROR_PTR();
 1336     CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types));
 1337 
 1338     PTR_FAIL_IF(call_with_args(compiler, arg_types, NULL));
 1339 
 1340 #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
 1341         || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
 1342     compiler->skip_checks = 1;
 1343 #endif
 1344 
 1345     return sljit_emit_jump(compiler, type);
 1346 }
 1347 
 1348 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 src, sljit_sw srcw)
 1349 {
 1350     struct sljit_jump *jump = NULL;
 1351     sljit_s32 src_r;
 1352 
 1353     CHECK_ERROR();
 1354     CHECK(check_sljit_emit_ijump(compiler, type, src, srcw));
 1355     ADJUST_LOCAL_OFFSET(src, srcw);
 1356 
 1357     if (FAST_IS_REG(src))
 1358         src_r = src;
 1359     else if (src & SLJIT_IMM) {
 1360         jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
 1361         FAIL_IF(!jump);
 1362         set_jump(jump, compiler, JUMP_ADDR);
 1363         jump->u.target = srcw;
 1364 
 1365         if ((compiler->delay_slot & DST_INS_MASK) != UNMOVABLE_INS)
 1366             jump->flags |= IS_MOVABLE;
 1367         if (type >= SLJIT_FAST_CALL)
 1368             jump->flags |= IS_CALL;
 1369 
 1370         FAIL_IF(emit_const(compiler, TMP_REG1, 0));
 1371         src_r = TMP_REG1;
 1372     }
 1373     else {
 1374         FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_REG1, src, srcw));
 1375         src_r = TMP_REG1;
 1376     }
 1377 
 1378     FAIL_IF(push_inst(compiler, JMPL | D(type >= SLJIT_FAST_CALL ? TMP_LINK : 0) | S1(src_r) | IMM(0), UNMOVABLE_INS));
 1379     if (jump)
 1380         jump->addr = compiler->size;
 1381     return push_inst(compiler, NOP, UNMOVABLE_INS);
 1382 }
 1383 
 1384 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compiler, sljit_s32 type,
 1385     sljit_s32 arg_types,
 1386     sljit_s32 src, sljit_sw srcw)
 1387 {
 1388     CHECK_ERROR();
 1389     CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw));
 1390 
 1391     if (src & SLJIT_MEM) {
 1392         ADJUST_LOCAL_OFFSET(src, srcw);
 1393         FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_REG1, src, srcw));
 1394         src = TMP_REG1;
 1395     }
 1396 
 1397     FAIL_IF(call_with_args(compiler, arg_types, &src));
 1398 
 1399 #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
 1400         || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
 1401     compiler->skip_checks = 1;
 1402 #endif
 1403 
 1404     return sljit_emit_ijump(compiler, type, src, srcw);
 1405 }
 1406 
 1407 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_s32 op,
 1408     sljit_s32 dst, sljit_sw dstw,
 1409     sljit_s32 type)
 1410 {
 1411     sljit_s32 reg, flags = HAS_FLAGS(op) ? SET_FLAGS : 0;
 1412 
 1413     CHECK_ERROR();
 1414     CHECK(check_sljit_emit_op_flags(compiler, op, dst, dstw, type));
 1415     ADJUST_LOCAL_OFFSET(dst, dstw);
 1416 
 1417 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
 1418     op = GET_OPCODE(op);
 1419     reg = (op < SLJIT_ADD && FAST_IS_REG(dst)) ? dst : TMP_REG2;
 1420 
 1421     compiler->cache_arg = 0;
 1422     compiler->cache_argw = 0;
 1423 
 1424     if (op >= SLJIT_ADD && (dst & SLJIT_MEM))
 1425         FAIL_IF(emit_op_mem2(compiler, WORD_DATA | LOAD_DATA, TMP_REG1, dst, dstw, dst, dstw));
 1426 
 1427     type &= 0xff;
 1428     if (type < SLJIT_EQUAL_F64)
 1429         FAIL_IF(push_inst(compiler, BICC | get_cc(type) | 3, UNMOVABLE_INS));
 1430     else
 1431         FAIL_IF(push_inst(compiler, FBFCC | get_cc(type) | 3, UNMOVABLE_INS));
 1432 
 1433     FAIL_IF(push_inst(compiler, OR | D(reg) | S1(0) | IMM(1), UNMOVABLE_INS));
 1434     FAIL_IF(push_inst(compiler, OR | D(reg) | S1(0) | IMM(0), UNMOVABLE_INS));
 1435 
 1436     if (op >= SLJIT_ADD) {
 1437         flags |= CUMULATIVE_OP | IMM_OP | ALT_KEEP_CACHE;
 1438         if (dst & SLJIT_MEM)
 1439             return emit_op(compiler, op, flags, dst, dstw, TMP_REG1, 0, TMP_REG2, 0);
 1440         return emit_op(compiler, op, flags, dst, 0, dst, 0, TMP_REG2, 0);
 1441     }
 1442 
 1443     if (!(dst & SLJIT_MEM))
 1444         return SLJIT_SUCCESS;
 1445 
 1446     return emit_op_mem(compiler, WORD_DATA, TMP_REG2, dst, dstw);
 1447 #else
 1448 #error "Implementation required"
 1449 #endif
 1450 }
 1451 
 1452 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compiler, sljit_s32 type,
 1453     sljit_s32 dst_reg,
 1454     sljit_s32 src, sljit_sw srcw)
 1455 {
 1456     CHECK_ERROR();
 1457     CHECK(check_sljit_emit_cmov(compiler, type, dst_reg, src, srcw));
 1458 
 1459 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
 1460     return sljit_emit_cmov_generic(compiler, type, dst_reg, src, srcw);;
 1461 #else
 1462 #error "Implementation required"
 1463 #endif
 1464 }
 1465 
 1466 SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw init_value)
 1467 {
 1468     sljit_s32 reg;
 1469     struct sljit_const *const_;
 1470 
 1471     CHECK_ERROR_PTR();
 1472     CHECK_PTR(check_sljit_emit_const(compiler, dst, dstw, init_value));
 1473     ADJUST_LOCAL_OFFSET(dst, dstw);
 1474 
 1475     const_ = (struct sljit_const*)ensure_abuf(compiler, sizeof(struct sljit_const));
 1476     PTR_FAIL_IF(!const_);
 1477     set_const(const_, compiler);
 1478 
 1479     reg = FAST_IS_REG(dst) ? dst : TMP_REG2;
 1480 
 1481     PTR_FAIL_IF(emit_const(compiler, reg, init_value));
 1482 
 1483     if (dst & SLJIT_MEM)
 1484         PTR_FAIL_IF(emit_op_mem(compiler, WORD_DATA, TMP_REG2, dst, dstw));
 1485     return const_;
 1486 }