2015-08-21 07:04:50 +00:00
|
|
|
/*
|
|
|
|
* Tiny Code Generator for QEMU
|
|
|
|
*
|
|
|
|
* Copyright (c) 2008 Fabrice Bellard
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
|
|
* in the Software without restriction, including without limitation the rights
|
|
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
* THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* DEF(name, oargs, iargs, cargs, flags)
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* predefined ops */
|
|
|
|
DEF(discard, 1, 0, 0, TCG_OPF_NOT_PRESENT)
|
|
|
|
DEF(set_label, 0, 0, 1, TCG_OPF_BB_END | TCG_OPF_NOT_PRESENT)
|
|
|
|
|
|
|
|
/* variable number of parameters */
|
|
|
|
DEF(call, 0, 0, 3, TCG_OPF_CALL_CLOBBER | TCG_OPF_NOT_PRESENT)
|
|
|
|
|
|
|
|
DEF(br, 0, 0, 1, TCG_OPF_BB_END)
|
|
|
|
|
2019-05-24 22:28:09 +00:00
|
|
|
#define IMPL(X) (__builtin_constant_p(X) && (X) <= 0 ? TCG_OPF_NOT_PRESENT : 0)
|
2015-08-21 07:04:50 +00:00
|
|
|
#if TCG_TARGET_REG_BITS == 32
|
|
|
|
# define IMPL64 TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT
|
|
|
|
#else
|
|
|
|
# define IMPL64 TCG_OPF_64BIT
|
|
|
|
#endif
|
|
|
|
|
2018-02-26 07:59:13 +00:00
|
|
|
DEF(mb, 0, 0, 1, 0)
|
|
|
|
|
2015-08-21 07:04:50 +00:00
|
|
|
DEF(mov_i32, 1, 1, 0, TCG_OPF_NOT_PRESENT)
|
|
|
|
DEF(movi_i32, 1, 0, 1, TCG_OPF_NOT_PRESENT)
|
|
|
|
DEF(setcond_i32, 1, 2, 1, 0)
|
|
|
|
DEF(movcond_i32, 1, 4, 1, IMPL(TCG_TARGET_HAS_movcond_i32))
|
|
|
|
/* load/store */
|
|
|
|
DEF(ld8u_i32, 1, 1, 1, 0)
|
|
|
|
DEF(ld8s_i32, 1, 1, 1, 0)
|
|
|
|
DEF(ld16u_i32, 1, 1, 1, 0)
|
|
|
|
DEF(ld16s_i32, 1, 1, 1, 0)
|
|
|
|
DEF(ld_i32, 1, 1, 1, 0)
|
|
|
|
DEF(st8_i32, 0, 2, 1, 0)
|
|
|
|
DEF(st16_i32, 0, 2, 1, 0)
|
|
|
|
DEF(st_i32, 0, 2, 1, 0)
|
|
|
|
/* arith */
|
|
|
|
DEF(add_i32, 1, 2, 0, 0)
|
|
|
|
DEF(sub_i32, 1, 2, 0, 0)
|
|
|
|
DEF(mul_i32, 1, 2, 0, 0)
|
|
|
|
DEF(div_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_div_i32))
|
|
|
|
DEF(divu_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_div_i32))
|
|
|
|
DEF(rem_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rem_i32))
|
|
|
|
DEF(remu_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rem_i32))
|
|
|
|
DEF(div2_i32, 2, 3, 0, IMPL(TCG_TARGET_HAS_div2_i32))
|
|
|
|
DEF(divu2_i32, 2, 3, 0, IMPL(TCG_TARGET_HAS_div2_i32))
|
|
|
|
DEF(and_i32, 1, 2, 0, 0)
|
|
|
|
DEF(or_i32, 1, 2, 0, 0)
|
|
|
|
DEF(xor_i32, 1, 2, 0, 0)
|
|
|
|
/* shifts/rotates */
|
|
|
|
DEF(shl_i32, 1, 2, 0, 0)
|
|
|
|
DEF(shr_i32, 1, 2, 0, 0)
|
|
|
|
DEF(sar_i32, 1, 2, 0, 0)
|
|
|
|
DEF(rotl_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rot_i32))
|
|
|
|
DEF(rotr_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rot_i32))
|
|
|
|
DEF(deposit_i32, 1, 2, 2, IMPL(TCG_TARGET_HAS_deposit_i32))
|
2018-03-01 18:13:49 +00:00
|
|
|
DEF(extract_i32, 1, 1, 2, IMPL(TCG_TARGET_HAS_extract_i32))
|
|
|
|
DEF(sextract_i32, 1, 1, 2, IMPL(TCG_TARGET_HAS_sextract_i32))
|
2019-04-30 13:23:43 +00:00
|
|
|
DEF(extract2_i32, 1, 2, 1, IMPL(TCG_TARGET_HAS_extract2_i32))
|
2015-08-21 07:04:50 +00:00
|
|
|
|
|
|
|
DEF(brcond_i32, 0, 2, 2, TCG_OPF_BB_END)
|
|
|
|
|
|
|
|
DEF(add2_i32, 2, 4, 0, IMPL(TCG_TARGET_HAS_add2_i32))
|
|
|
|
DEF(sub2_i32, 2, 4, 0, IMPL(TCG_TARGET_HAS_sub2_i32))
|
|
|
|
DEF(mulu2_i32, 2, 2, 0, IMPL(TCG_TARGET_HAS_mulu2_i32))
|
|
|
|
DEF(muls2_i32, 2, 2, 0, IMPL(TCG_TARGET_HAS_muls2_i32))
|
|
|
|
DEF(muluh_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_muluh_i32))
|
|
|
|
DEF(mulsh_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_mulsh_i32))
|
|
|
|
DEF(brcond2_i32, 0, 4, 2, TCG_OPF_BB_END | IMPL(TCG_TARGET_REG_BITS == 32))
|
|
|
|
DEF(setcond2_i32, 1, 4, 1, IMPL(TCG_TARGET_REG_BITS == 32))
|
|
|
|
|
|
|
|
DEF(ext8s_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext8s_i32))
|
|
|
|
DEF(ext16s_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext16s_i32))
|
|
|
|
DEF(ext8u_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext8u_i32))
|
|
|
|
DEF(ext16u_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext16u_i32))
|
|
|
|
DEF(bswap16_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_bswap16_i32))
|
|
|
|
DEF(bswap32_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_bswap32_i32))
|
|
|
|
DEF(not_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_not_i32))
|
|
|
|
DEF(neg_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_neg_i32))
|
|
|
|
DEF(andc_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_andc_i32))
|
|
|
|
DEF(orc_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_orc_i32))
|
|
|
|
DEF(eqv_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_eqv_i32))
|
|
|
|
DEF(nand_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_nand_i32))
|
|
|
|
DEF(nor_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_nor_i32))
|
2018-03-01 20:53:35 +00:00
|
|
|
DEF(clz_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_clz_i32))
|
|
|
|
DEF(ctz_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_ctz_i32))
|
2018-03-01 23:21:05 +00:00
|
|
|
DEF(ctpop_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ctpop_i32))
|
2015-08-21 07:04:50 +00:00
|
|
|
|
|
|
|
DEF(mov_i64, 1, 1, 0, TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT)
|
|
|
|
DEF(movi_i64, 1, 0, 1, TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT)
|
|
|
|
DEF(setcond_i64, 1, 2, 1, IMPL64)
|
|
|
|
DEF(movcond_i64, 1, 4, 1, IMPL64 | IMPL(TCG_TARGET_HAS_movcond_i64))
|
|
|
|
/* load/store */
|
|
|
|
DEF(ld8u_i64, 1, 1, 1, IMPL64)
|
|
|
|
DEF(ld8s_i64, 1, 1, 1, IMPL64)
|
|
|
|
DEF(ld16u_i64, 1, 1, 1, IMPL64)
|
|
|
|
DEF(ld16s_i64, 1, 1, 1, IMPL64)
|
|
|
|
DEF(ld32u_i64, 1, 1, 1, IMPL64)
|
|
|
|
DEF(ld32s_i64, 1, 1, 1, IMPL64)
|
|
|
|
DEF(ld_i64, 1, 1, 1, IMPL64)
|
|
|
|
DEF(st8_i64, 0, 2, 1, IMPL64)
|
|
|
|
DEF(st16_i64, 0, 2, 1, IMPL64)
|
|
|
|
DEF(st32_i64, 0, 2, 1, IMPL64)
|
|
|
|
DEF(st_i64, 0, 2, 1, IMPL64)
|
|
|
|
/* arith */
|
|
|
|
DEF(add_i64, 1, 2, 0, IMPL64)
|
|
|
|
DEF(sub_i64, 1, 2, 0, IMPL64)
|
|
|
|
DEF(mul_i64, 1, 2, 0, IMPL64)
|
|
|
|
DEF(div_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_div_i64))
|
|
|
|
DEF(divu_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_div_i64))
|
|
|
|
DEF(rem_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rem_i64))
|
|
|
|
DEF(remu_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rem_i64))
|
|
|
|
DEF(div2_i64, 2, 3, 0, IMPL64 | IMPL(TCG_TARGET_HAS_div2_i64))
|
|
|
|
DEF(divu2_i64, 2, 3, 0, IMPL64 | IMPL(TCG_TARGET_HAS_div2_i64))
|
|
|
|
DEF(and_i64, 1, 2, 0, IMPL64)
|
|
|
|
DEF(or_i64, 1, 2, 0, IMPL64)
|
|
|
|
DEF(xor_i64, 1, 2, 0, IMPL64)
|
|
|
|
/* shifts/rotates */
|
|
|
|
DEF(shl_i64, 1, 2, 0, IMPL64)
|
|
|
|
DEF(shr_i64, 1, 2, 0, IMPL64)
|
|
|
|
DEF(sar_i64, 1, 2, 0, IMPL64)
|
|
|
|
DEF(rotl_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rot_i64))
|
|
|
|
DEF(rotr_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rot_i64))
|
|
|
|
DEF(deposit_i64, 1, 2, 2, IMPL64 | IMPL(TCG_TARGET_HAS_deposit_i64))
|
2018-03-01 18:13:49 +00:00
|
|
|
DEF(extract_i64, 1, 1, 2, IMPL64 | IMPL(TCG_TARGET_HAS_extract_i64))
|
|
|
|
DEF(sextract_i64, 1, 1, 2, IMPL64 | IMPL(TCG_TARGET_HAS_sextract_i64))
|
2019-04-30 13:23:43 +00:00
|
|
|
DEF(extract2_i64, 1, 2, 1, IMPL64 | IMPL(TCG_TARGET_HAS_extract2_i64))
|
2015-08-21 07:04:50 +00:00
|
|
|
|
2018-02-11 03:44:47 +00:00
|
|
|
/* size changing ops */
|
|
|
|
DEF(ext_i32_i64, 1, 1, 0, IMPL64)
|
|
|
|
DEF(extu_i32_i64, 1, 1, 0, IMPL64)
|
2018-02-11 03:57:27 +00:00
|
|
|
DEF(extrl_i64_i32, 1, 1, 0,
|
|
|
|
IMPL(TCG_TARGET_HAS_extrl_i64_i32)
|
2015-08-21 07:04:50 +00:00
|
|
|
| (TCG_TARGET_REG_BITS == 32 ? TCG_OPF_NOT_PRESENT : 0))
|
2018-02-11 03:57:27 +00:00
|
|
|
DEF(extrh_i64_i32, 1, 1, 0,
|
|
|
|
IMPL(TCG_TARGET_HAS_extrh_i64_i32)
|
|
|
|
| (TCG_TARGET_REG_BITS == 32 ? TCG_OPF_NOT_PRESENT : 0))
|
|
|
|
|
2015-08-21 07:04:50 +00:00
|
|
|
DEF(brcond_i64, 0, 2, 2, TCG_OPF_BB_END | IMPL64)
|
|
|
|
DEF(ext8s_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext8s_i64))
|
|
|
|
DEF(ext16s_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext16s_i64))
|
|
|
|
DEF(ext32s_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext32s_i64))
|
|
|
|
DEF(ext8u_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext8u_i64))
|
|
|
|
DEF(ext16u_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext16u_i64))
|
|
|
|
DEF(ext32u_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext32u_i64))
|
|
|
|
DEF(bswap16_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_bswap16_i64))
|
|
|
|
DEF(bswap32_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_bswap32_i64))
|
|
|
|
DEF(bswap64_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_bswap64_i64))
|
|
|
|
DEF(not_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_not_i64))
|
|
|
|
DEF(neg_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_neg_i64))
|
|
|
|
DEF(andc_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_andc_i64))
|
|
|
|
DEF(orc_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_orc_i64))
|
|
|
|
DEF(eqv_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_eqv_i64))
|
|
|
|
DEF(nand_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_nand_i64))
|
|
|
|
DEF(nor_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_nor_i64))
|
2018-03-01 20:53:35 +00:00
|
|
|
DEF(clz_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_clz_i64))
|
|
|
|
DEF(ctz_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ctz_i64))
|
2018-03-01 23:21:05 +00:00
|
|
|
DEF(ctpop_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ctpop_i64))
|
2015-08-21 07:04:50 +00:00
|
|
|
|
|
|
|
DEF(add2_i64, 2, 4, 0, IMPL64 | IMPL(TCG_TARGET_HAS_add2_i64))
|
|
|
|
DEF(sub2_i64, 2, 4, 0, IMPL64 | IMPL(TCG_TARGET_HAS_sub2_i64))
|
|
|
|
DEF(mulu2_i64, 2, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_mulu2_i64))
|
|
|
|
DEF(muls2_i64, 2, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_muls2_i64))
|
2018-03-29 18:02:27 +00:00
|
|
|
DEF(muluh_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_muluh_i64))
|
|
|
|
DEF(mulsh_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_mulsh_i64))
|
2015-08-21 07:04:50 +00:00
|
|
|
|
2018-02-16 20:34:56 +00:00
|
|
|
#define TLADDR_ARGS (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? 1 : 2)
|
|
|
|
#define DATA64_ARGS (TCG_TARGET_REG_BITS == 64 ? 1 : 2)
|
|
|
|
|
2015-08-21 07:04:50 +00:00
|
|
|
/* QEMU specific */
|
2018-02-16 20:34:56 +00:00
|
|
|
DEF(insn_start, 0, 0, TLADDR_ARGS * TARGET_INSN_START_WORDS,
|
|
|
|
TCG_OPF_NOT_PRESENT)
|
2019-01-05 12:09:36 +00:00
|
|
|
DEF(exit_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END)
|
|
|
|
DEF(goto_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END)
|
|
|
|
DEF(goto_ptr, 0, 1, 0,
|
|
|
|
TCG_OPF_BB_EXIT | TCG_OPF_BB_END | IMPL(TCG_TARGET_HAS_goto_ptr))
|
2015-08-21 07:04:50 +00:00
|
|
|
|
2018-02-11 00:01:17 +00:00
|
|
|
DEF(qemu_ld_i32, 1, TLADDR_ARGS, 1,
|
2015-08-21 07:04:50 +00:00
|
|
|
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
|
2018-02-11 00:01:17 +00:00
|
|
|
DEF(qemu_st_i32, 0, TLADDR_ARGS + 1, 1,
|
2015-08-21 07:04:50 +00:00
|
|
|
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
|
2018-02-11 00:01:17 +00:00
|
|
|
DEF(qemu_ld_i64, DATA64_ARGS, TLADDR_ARGS, 1,
|
2015-08-21 07:04:50 +00:00
|
|
|
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT)
|
2018-02-11 00:01:17 +00:00
|
|
|
DEF(qemu_st_i64, 0, TLADDR_ARGS + DATA64_ARGS, 1,
|
2015-08-21 07:04:50 +00:00
|
|
|
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT)
|
|
|
|
|
2019-04-26 13:06:21 +00:00
|
|
|
/* Host vector support. */
|
|
|
|
|
2018-03-06 16:49:50 +00:00
|
|
|
#define IMPLVEC TCG_OPF_VECTOR | IMPL(TCG_TARGET_MAYBE_vec)
|
|
|
|
|
|
|
|
DEF(mov_vec, 1, 1, 0, TCG_OPF_VECTOR | TCG_OPF_NOT_PRESENT)
|
|
|
|
DEF(dupi_vec, 1, 0, 1, TCG_OPF_VECTOR | TCG_OPF_NOT_PRESENT)
|
|
|
|
|
|
|
|
DEF(dup_vec, 1, 1, 0, IMPLVEC)
|
|
|
|
DEF(dup2_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_REG_BITS == 32))
|
|
|
|
|
|
|
|
DEF(ld_vec, 1, 1, 1, IMPLVEC)
|
|
|
|
DEF(st_vec, 0, 2, 1, IMPLVEC)
|
tcg: Add INDEX_op_dupm_vec
Allow the backend to expand dup from memory directly, instead of
forcing the value into a temp first. This is especially important
if integer/vector register moves do not exist.
Note that officially tcg_out_dupm_vec is allowed to fail.
If it did, we could fix this up relatively easily:
VECE == 32/64:
Load the value into a vector register, then dup.
Both of these must work.
VECE == 8/16:
If the value happens to be at an offset such that an aligned
load would place the desired value in the least significant
end of the register, go ahead and load w/garbage in high bits.
Load the value w/INDEX_op_ld{8,16}_i32.
Attempt a move directly to vector reg, which may fail.
Store the value into the backing store for OTS.
Load the value into the vector reg w/TCG_TYPE_I32, which must work.
Duplicate from the vector reg into itself, which must work.
All of which is well and good, except that all supported
hosts can support dupm for all vece, so all of the failure
paths would be dead code and untestable.
Backports commit 37ee55a081b7863ffab2151068dd1b2f11376914 from qemu
2019-05-16 19:37:57 +00:00
|
|
|
DEF(dupm_vec, 1, 1, 1, IMPLVEC)
|
2018-03-06 16:49:50 +00:00
|
|
|
|
|
|
|
DEF(add_vec, 1, 2, 0, IMPLVEC)
|
|
|
|
DEF(sub_vec, 1, 2, 0, IMPLVEC)
|
2018-03-06 19:36:48 +00:00
|
|
|
DEF(mul_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_mul_vec))
|
2018-03-06 16:49:50 +00:00
|
|
|
DEF(neg_vec, 1, 1, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_neg_vec))
|
2019-05-16 20:33:39 +00:00
|
|
|
DEF(abs_vec, 1, 1, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_abs_vec))
|
2019-01-29 21:08:12 +00:00
|
|
|
DEF(ssadd_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec))
|
|
|
|
DEF(usadd_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec))
|
|
|
|
DEF(sssub_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec))
|
|
|
|
DEF(ussub_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec))
|
2019-01-29 21:23:24 +00:00
|
|
|
DEF(smin_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_minmax_vec))
|
|
|
|
DEF(umin_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_minmax_vec))
|
|
|
|
DEF(smax_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_minmax_vec))
|
|
|
|
DEF(umax_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_minmax_vec))
|
2018-03-06 16:49:50 +00:00
|
|
|
|
|
|
|
DEF(and_vec, 1, 2, 0, IMPLVEC)
|
|
|
|
DEF(or_vec, 1, 2, 0, IMPLVEC)
|
|
|
|
DEF(xor_vec, 1, 2, 0, IMPLVEC)
|
|
|
|
DEF(andc_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_andc_vec))
|
|
|
|
DEF(orc_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_orc_vec))
|
|
|
|
DEF(not_vec, 1, 1, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_not_vec))
|
|
|
|
|
2018-03-06 18:45:25 +00:00
|
|
|
DEF(shli_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_shi_vec))
|
|
|
|
DEF(shri_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_shi_vec))
|
|
|
|
DEF(sari_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_shi_vec))
|
|
|
|
|
|
|
|
DEF(shls_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shs_vec))
|
|
|
|
DEF(shrs_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shs_vec))
|
|
|
|
DEF(sars_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shs_vec))
|
|
|
|
|
|
|
|
DEF(shlv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shv_vec))
|
|
|
|
DEF(shrv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shv_vec))
|
|
|
|
DEF(sarv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shv_vec))
|
|
|
|
|
2018-03-06 19:07:42 +00:00
|
|
|
DEF(cmp_vec, 1, 2, 1, IMPLVEC)
|
|
|
|
|
2019-05-24 22:14:31 +00:00
|
|
|
DEF(bitsel_vec, 1, 3, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_bitsel_vec))
|
2019-05-24 22:21:10 +00:00
|
|
|
DEF(cmpsel_vec, 1, 4, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_cmpsel_vec))
|
2019-05-24 22:14:31 +00:00
|
|
|
|
2018-03-06 17:19:54 +00:00
|
|
|
DEF(last_generic, 0, 0, 0, TCG_OPF_NOT_PRESENT)
|
|
|
|
|
|
|
|
#if TCG_TARGET_MAYBE_vec
|
|
|
|
#include "tcg-target.opc.h"
|
|
|
|
#endif
|
|
|
|
|
2015-08-21 07:04:50 +00:00
|
|
|
#undef TLADDR_ARGS
|
|
|
|
#undef DATA64_ARGS
|
|
|
|
#undef IMPL
|
|
|
|
#undef IMPL64
|
2018-03-06 16:49:50 +00:00
|
|
|
#undef IMPLVEC
|
2015-08-21 07:04:50 +00:00
|
|
|
#undef DEF
|