RISC-V: Add crypto vector api-testing cases.
Patch v8: Resubmit after fix the rtl-checking issue. Passed all the riscv regression test. Patch v7: Add newline at the end of file. Patch v6: Move intrinsic tests into rvv/base. Patch v5: Rebase Patch v4: Add some RV32 vx constraint testcase. Patch v3: Refine crypto vector api-testing cases. Patch v2: Update march info according to the change of riscv-common.c This patch add crypto vector api-testing cases based on https://github.com/riscv-non-isa/rvv-intrinsic-doc/blob/eopc/vector-crypto/auto-generated/vector-crypto gcc/testsuite/ChangeLog: * gcc.target/riscv/rvv/base/zvbb-intrinsic.c: New test. * gcc.target/riscv/rvv/base/zvbb_vandn_vx_constraint.c: New test. * gcc.target/riscv/rvv/base/zvbc-intrinsic.c: New test. * gcc.target/riscv/rvv/base/zvbc_vx_constraint-1.c: New test. * gcc.target/riscv/rvv/base/zvbc_vx_constraint-2.c: New test. * gcc.target/riscv/rvv/base/zvkg-intrinsic.c: New test. * gcc.target/riscv/rvv/base/zvkned-intrinsic.c: New test. * gcc.target/riscv/rvv/base/zvknha-intrinsic.c: New test. * gcc.target/riscv/rvv/base/zvknhb-intrinsic.c: New test. * gcc.target/riscv/rvv/base/zvksed-intrinsic.c: New test. * gcc.target/riscv/rvv/base/zvksh-intrinsic.c: New test. * gcc.target/riscv/zvkb.c: New test.
This commit is contained in:
parent
e50a1ed3d3
commit
411b210797
12 changed files with 548 additions and 0 deletions
179
gcc/testsuite/gcc.target/riscv/rvv/base/zvbb-intrinsic.c
Normal file
179
gcc/testsuite/gcc.target/riscv/rvv/base/zvbb-intrinsic.c
Normal file
|
@ -0,0 +1,179 @@
|
|||
/* { dg-do compile } */
|
||||
/* { dg-options "-march=rv64gc_zvbb_zve64x -mabi=lp64d -Wno-psabi" } */
|
||||
#include "riscv_vector.h"
|
||||
|
||||
vuint8mf8_t test_vandn_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
|
||||
return __riscv_vandn_vv_u8mf8(vs2, vs1, vl);
|
||||
}
|
||||
|
||||
vuint32m1_t test_vandn_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) {
|
||||
return __riscv_vandn_vx_u32m1(vs2, rs1, vl);
|
||||
}
|
||||
|
||||
vuint32m2_t test_vandn_vv_u32m2_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
|
||||
return __riscv_vandn_vv_u32m2_m(mask, vs2, vs1, vl);
|
||||
}
|
||||
|
||||
vuint16mf2_t test_vandn_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, uint16_t rs1, size_t vl) {
|
||||
return __riscv_vandn_vx_u16mf2_m(mask, vs2, rs1, vl);
|
||||
}
|
||||
|
||||
vuint32m4_t test_vandn_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
|
||||
return __riscv_vandn_vv_u32m4_tumu(mask, maskedoff, vs2, vs1, vl);
|
||||
}
|
||||
|
||||
vuint64m4_t test_vandn_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
|
||||
return __riscv_vandn_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl);
|
||||
}
|
||||
|
||||
vuint8m8_t test_vbrev_v_u8m8(vuint8m8_t vs2, size_t vl) {
|
||||
return __riscv_vbrev_v_u8m8(vs2, vl);
|
||||
}
|
||||
|
||||
vuint16m1_t test_vbrev_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) {
|
||||
return __riscv_vbrev_v_u16m1_m(mask, vs2, vl);
|
||||
}
|
||||
|
||||
vuint32m4_t test_vbrev_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
|
||||
return __riscv_vbrev_v_u32m4_tumu(mask, maskedoff, vs2, vl);
|
||||
}
|
||||
|
||||
vuint16mf4_t test_vbrev8_v_u16mf4(vuint16mf4_t vs2, size_t vl) {
|
||||
return __riscv_vbrev8_v_u16mf4(vs2, vl);
|
||||
}
|
||||
|
||||
vuint32m1_t test_vbrev8_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) {
|
||||
return __riscv_vbrev8_v_u32m1_m(mask, vs2, vl);
|
||||
}
|
||||
|
||||
vuint64m1_t test_vbrev8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
|
||||
return __riscv_vbrev8_v_u64m1_tumu(mask, maskedoff, vs2, vl);
|
||||
}
|
||||
|
||||
vuint16m4_t test_vrev8_v_u16m4(vuint16m4_t vs2, size_t vl) {
|
||||
return __riscv_vrev8_v_u16m4(vs2, vl);
|
||||
}
|
||||
|
||||
vuint8m4_t test_vrev8_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) {
|
||||
return __riscv_vrev8_v_u8m4_m(mask, vs2, vl);
|
||||
}
|
||||
|
||||
vuint32m1_t test_vrev8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
|
||||
return __riscv_vrev8_v_u32m1_tumu(mask, maskedoff, vs2, vl);
|
||||
}
|
||||
|
||||
vuint8m8_t test_vrol_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
|
||||
return __riscv_vrol_vv_u8m8(vs2, vs1, vl);
|
||||
}
|
||||
|
||||
vuint16m4_t test_vrol_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) {
|
||||
return __riscv_vrol_vx_u16m4(vs2, rs1, vl);
|
||||
}
|
||||
|
||||
vuint16mf2_t test_vrol_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
|
||||
return __riscv_vrol_vv_u16mf2_m(mask, vs2, vs1, vl);
|
||||
}
|
||||
|
||||
vuint64m1_t test_vrol_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t rs1, size_t vl) {
|
||||
return __riscv_vrol_vx_u64m1_m(mask, vs2, rs1, vl);
|
||||
}
|
||||
|
||||
vuint8m1_t test_vrol_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
|
||||
return __riscv_vrol_vv_u8m1_tumu(mask, maskedoff, vs2, vs1, vl);
|
||||
}
|
||||
|
||||
vuint16m2_t test_vrol_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t rs1, size_t vl) {
|
||||
return __riscv_vrol_vx_u16m2_tumu(mask, maskedoff, vs2, rs1, vl);
|
||||
}
|
||||
|
||||
vuint8m8_t test_vror_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
|
||||
return __riscv_vror_vv_u8m8(vs2, vs1, vl);
|
||||
}
|
||||
|
||||
vuint32m2_t test_vror_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) {
|
||||
return __riscv_vror_vx_u32m2(vs2, rs1, vl);
|
||||
}
|
||||
|
||||
vuint16mf2_t test_vror_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
|
||||
return __riscv_vror_vv_u16mf2_m(mask, vs2, vs1, vl);
|
||||
}
|
||||
|
||||
vuint16m1_t test_vror_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) {
|
||||
return __riscv_vror_vx_u16m1_m(mask, vs2, rs1, vl);
|
||||
}
|
||||
|
||||
vuint16mf2_t test_vror_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
|
||||
return __riscv_vror_vv_u16mf2_tumu(mask, maskedoff, vs2, vs1, vl);
|
||||
}
|
||||
|
||||
vuint64m1_t test_vror_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t rs1, size_t vl) {
|
||||
return __riscv_vror_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl);
|
||||
}
|
||||
|
||||
vuint8m2_t test_vclz_v_u8m2(vuint8m2_t vs2, size_t vl) {
|
||||
return __riscv_vclz_v_u8m2(vs2, vl);
|
||||
}
|
||||
|
||||
vuint64m2_t test_vclz_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) {
|
||||
return __riscv_vclz_v_u64m2_m(mask, vs2, vl);
|
||||
}
|
||||
|
||||
vuint16mf4_t test_vctz_v_u16mf4(vuint16mf4_t vs2, size_t vl) {
|
||||
return __riscv_vctz_v_u16mf4(vs2, vl);
|
||||
}
|
||||
|
||||
vuint32m8_t test_vctz_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) {
|
||||
return __riscv_vctz_v_u32m8_m(mask, vs2, vl);
|
||||
}
|
||||
|
||||
vuint16mf4_t test_vwsll_vx_u16mf4(vuint8mf8_t vs2, size_t rs1, size_t vl) {
|
||||
return __riscv_vwsll_vx_u16mf4(vs2, rs1, vl);
|
||||
}
|
||||
|
||||
vuint16m1_t test_vwsll_vv_u16m1(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
|
||||
return __riscv_vwsll_vv_u16m1(vs2, vs1, vl);
|
||||
}
|
||||
|
||||
vuint32m2_t test_vwsll_vv_u32m2_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
|
||||
return __riscv_vwsll_vv_u32m2_m(mask, vs2, vs1, vl);
|
||||
}
|
||||
|
||||
vuint32m2_t test_vwsll_vx_u32m2_m(vbool16_t mask, vuint16m1_t vs2, size_t rs1, size_t vl) {
|
||||
return __riscv_vwsll_vx_u32m2_m(mask, vs2, rs1, vl);
|
||||
}
|
||||
|
||||
vuint16mf4_t test_vwsll_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
|
||||
return __riscv_vwsll_vv_u16mf4_tumu(mask, maskedoff, vs2, vs1, vl);
|
||||
}
|
||||
|
||||
vuint16mf4_t test_vwsll_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t vs2, size_t rs1, size_t vl) {
|
||||
return __riscv_vwsll_vx_u16mf4_tumu(mask, maskedoff, vs2, rs1, vl);
|
||||
}
|
||||
/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 26 } } */
|
||||
/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*mu} 11 } } */
|
||||
/* { dg-final { scan-assembler-times {vandn\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]} 3 } } */
|
||||
/* { dg-final { scan-assembler-times {vandn\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {vandn\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]} 3 } } */
|
||||
/* { dg-final { scan-assembler-times {vandn\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]+,\s*v0.t} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {vbrev\.v\s+v[0-9]+,\s*v[0-9]} 3 } } */
|
||||
/* { dg-final { scan-assembler-times {vbrev\.v\s+v[0-9]+,\s*v[0-9]+,\s*v0.t} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {vbrev8\.v\s+v[0-9]+,\s*v[0-9]} 3 } } */
|
||||
/* { dg-final { scan-assembler-times {vbrev8\.v\s+v[0-9]+,\s*v[0-9]+,\s*v0.t} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {vrev8\.v\s+v[0-9]+,\s*v[0-9]} 3} } */
|
||||
/* { dg-final { scan-assembler-times {vrev8\.v\s+v[0-9]+,\s*v[0-9]+,\s*v0.t} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {vrol\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]} 3 } } */
|
||||
/* { dg-final { scan-assembler-times {vrol\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {vrol\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]} 3 } } */
|
||||
/* { dg-final { scan-assembler-times {vrol\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]+,\s*v0.t} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {vror\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]} 3 } } */
|
||||
/* { dg-final { scan-assembler-times {vror\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {vror\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]} 3 } } */
|
||||
/* { dg-final { scan-assembler-times {vror\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]+,\s*v0.t} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {vclz\.v\s+v[0-9]+,\s*v[0-9]} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {vclz\.v\s+v[0-9]+,\s*v[0-9]+,\s*v0.t} 1 } } */
|
||||
/* { dg-final { scan-assembler-times {vctz\.v\s+v[0-9]+,\s*v[0-9]} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {vctz\.v\s+v[0-9]+,\s*v[0-9]+,\s*v0.t} 1 } } */
|
||||
/* { dg-final { scan-assembler-times {vwsll\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]} 3 } } */
|
||||
/* { dg-final { scan-assembler-times {vwsll\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {vwsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]} 3 } } */
|
||||
/* { dg-final { scan-assembler-times {vwsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]+,\s*v0.t} 2 } } */
|
|
@ -0,0 +1,15 @@
|
|||
/* { dg-do compile } */
|
||||
/* { dg-options "-march=rv32gc_zvbb_zve64x -mabi=ilp32 -O3 -Wno-psabi" } */
|
||||
#include "riscv_vector.h"
|
||||
|
||||
vuint64m1_t test_vandn_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) {
|
||||
return __riscv_vandn_vx_u64m1(vs2, rs1, vl);
|
||||
}
|
||||
|
||||
vuint64m1_t test_vandn_vx_u64m1_extend(vuint64m1_t vs2, size_t vl) {
|
||||
uint32_t rs1 = 0x12345678;
|
||||
return __riscv_vandn_vx_u64m1(vs2, rs1, vl);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler-times {vandn\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]} 1 } } */
|
||||
/* { dg-final { scan-assembler-times {vandn\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]} 1 } } */
|
62
gcc/testsuite/gcc.target/riscv/rvv/base/zvbc-intrinsic.c
Normal file
62
gcc/testsuite/gcc.target/riscv/rvv/base/zvbc-intrinsic.c
Normal file
|
@ -0,0 +1,62 @@
|
|||
/* { dg-do compile } */
|
||||
/* { dg-options "-march=rv64gc_zvbc -mabi=lp64d -O2 -Wno-psabi" } */
|
||||
|
||||
#include "riscv_vector.h"
|
||||
|
||||
vuint64m1_t test_vclmul_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
|
||||
return __riscv_vclmul_vv_u64m1(vs2, vs1, vl);
|
||||
}
|
||||
|
||||
vuint64m1_t test_vclmul_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) {
|
||||
return __riscv_vclmul_vx_u64m1(vs2, rs1, vl);
|
||||
}
|
||||
|
||||
vuint64m2_t test_vclmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
|
||||
return __riscv_vclmul_vv_u64m2_m(mask, vs2, vs1, vl);
|
||||
}
|
||||
|
||||
vuint64m2_t test_vclmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
|
||||
return __riscv_vclmul_vx_u64m2_m(mask, vs2, rs1, vl);
|
||||
}
|
||||
|
||||
vuint64m4_t test_vclmul_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
|
||||
return __riscv_vclmul_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl);
|
||||
}
|
||||
|
||||
vuint64m4_t test_vclmul_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
|
||||
return __riscv_vclmul_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl);
|
||||
}
|
||||
|
||||
vuint64m2_t test_vclmulh_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
|
||||
return __riscv_vclmulh_vv_u64m2(vs2, vs1, vl);
|
||||
}
|
||||
|
||||
vuint64m2_t test_vclmulh_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) {
|
||||
return __riscv_vclmulh_vx_u64m2(vs2, rs1, vl);
|
||||
}
|
||||
|
||||
vuint64m1_t test_vclmulh_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
|
||||
return __riscv_vclmulh_vx_u64m1_m(mask, vs2, rs1, vl);
|
||||
}
|
||||
|
||||
vuint64m2_t test_vclmulh_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
|
||||
return __riscv_vclmulh_vv_u64m2_m(mask, vs2, vs1, vl);
|
||||
}
|
||||
|
||||
vuint64m8_t test_vclmulh_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
|
||||
return __riscv_vclmulh_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl);
|
||||
}
|
||||
|
||||
vuint64m8_t test_vclmulh_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
|
||||
return __riscv_vclmulh_vx_u64m8_tumu(mask, maskedoff, vs2, rs1, vl);
|
||||
}
|
||||
/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 8 } } */
|
||||
/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*mu} 4 } } */
|
||||
/* { dg-final { scan-assembler-times {vclmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]} 3 } } */
|
||||
/* { dg-final { scan-assembler-times {vclmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {vclmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]} 3 } } */
|
||||
/* { dg-final { scan-assembler-times {vclmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]+,\s*v0.t} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {vclmulh\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]} 3 } } */
|
||||
/* { dg-final { scan-assembler-times {vclmulh\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {vclmulh\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]} 3 } } */
|
||||
/* { dg-final { scan-assembler-times {vclmulh\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]+,\s*v0.t} 2 } } */
|
|
@ -0,0 +1,14 @@
|
|||
/* { dg-do compile } */
|
||||
/* { dg-options "-march=rv32gc_zvbc -mabi=ilp32 -O3 -Wno-psabi" } */
|
||||
#include "riscv_vector.h"
|
||||
|
||||
vuint64m1_t test_vclmul_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) {
|
||||
return __riscv_vclmul_vx_u64m1(vs2, rs1, vl);
|
||||
}
|
||||
|
||||
vuint64m1_t test_vclmulh_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) {
|
||||
return __riscv_vclmulh_vx_u64m1(vs2, rs1, vl);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler-times {vclmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]} 1 } } */
|
||||
/* { dg-final { scan-assembler-times {vclmulh\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]} 1 } } */
|
|
@ -0,0 +1,14 @@
|
|||
/* { dg-do compile } */
|
||||
/* { dg-options "-march=rv64gc_zvbc -mabi=lp64d -O3 -Wno-psabi" } */
|
||||
#include "riscv_vector.h"
|
||||
|
||||
vuint64m1_t test_vclmul_vx_u64m1_extend(vuint64m1_t vs2, uint32_t rs1, size_t vl) {
|
||||
return __riscv_vclmul_vx_u64m1(vs2, rs1, vl);
|
||||
}
|
||||
|
||||
vuint64m1_t test_vclmulh_vx_u64m1_extend(vuint64m1_t vs2, uint32_t rs1, size_t vl) {
|
||||
return __riscv_vclmulh_vx_u64m1(vs2, rs1, vl);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler-times {vclmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]} 1 } } */
|
||||
/* { dg-final { scan-assembler-times {vclmulh\.vx\s+v[0-9]+,\s*v[0-9]+,\s*a[0-9]} 1 } } */
|
24
gcc/testsuite/gcc.target/riscv/rvv/base/zvkg-intrinsic.c
Normal file
24
gcc/testsuite/gcc.target/riscv/rvv/base/zvkg-intrinsic.c
Normal file
|
@ -0,0 +1,24 @@
|
|||
/* { dg-do compile } */
|
||||
/* { dg-options "-march=rv64gc_zvkg_zve64x -mabi=lp64d -O2 -Wno-psabi" } */
|
||||
|
||||
#include "riscv_vector.h"
|
||||
|
||||
vuint32mf2_t test_vgmul_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
|
||||
return __riscv_vgmul_vv_u32mf2(vd, vs2, vl);
|
||||
}
|
||||
|
||||
vuint32m1_t test_vgmul_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
|
||||
return __riscv_vgmul_vv_u32m1_tu(vd, vs2, vl);
|
||||
}
|
||||
|
||||
vuint32m2_t test_vghsh_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
|
||||
return __riscv_vghsh_vv_u32m2(vd, vs2, vs1, vl);
|
||||
}
|
||||
|
||||
vuint32m4_t test_vghsh_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
|
||||
return __riscv_vghsh_vv_u32m4_tu(vd, vs2, vs1, vl);
|
||||
}
|
||||
/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {vgmul\.vv\s+v[0-9]+,\s*v[0-9]} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {vghsh\.vv\s+v[0-9]+,\s*v[0-9]} 2 } } */
|
104
gcc/testsuite/gcc.target/riscv/rvv/base/zvkned-intrinsic.c
Normal file
104
gcc/testsuite/gcc.target/riscv/rvv/base/zvkned-intrinsic.c
Normal file
|
@ -0,0 +1,104 @@
|
|||
/* { dg-do compile } */
|
||||
/* { dg-options "-march=rv64gc_zvkned_zve64x -mabi=lp64d -O2 -Wno-psabi" } */
|
||||
#include "riscv_vector.h"
|
||||
|
||||
vuint32mf2_t test_vaesdf_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
|
||||
return __riscv_vaesdf_vv_u32mf2(vd, vs2, vl);
|
||||
}
|
||||
|
||||
vuint32mf2_t test_vaesdf_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
|
||||
return __riscv_vaesdf_vs_u32mf2_u32mf2(vd, vs2, vl);
|
||||
}
|
||||
|
||||
vuint32m2_t test_vaesdf_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
|
||||
return __riscv_vaesdf_vv_u32m2_tu(vd, vs2, vl);
|
||||
}
|
||||
|
||||
vuint32m2_t test_vaesdf_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
|
||||
return __riscv_vaesdf_vs_u32m2_u32m2_tu(vd, vs2, vl);
|
||||
}
|
||||
|
||||
vuint32m1_t test_vaesdm_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
|
||||
return __riscv_vaesdm_vv_u32m1(vd, vs2, vl);
|
||||
}
|
||||
|
||||
vuint32m4_t test_vaesdm_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) {
|
||||
return __riscv_vaesdm_vs_u32m1_u32m4(vd, vs2, vl);
|
||||
}
|
||||
|
||||
vuint32m1_t test_vaesdm_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
|
||||
return __riscv_vaesdm_vv_u32m1_tu(vd, vs2, vl);
|
||||
}
|
||||
|
||||
vuint32m2_t test_vaesdm_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) {
|
||||
return __riscv_vaesdm_vs_u32m1_u32m2_tu(vd, vs2, vl);
|
||||
}
|
||||
|
||||
vuint32m2_t test_vaesef_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
|
||||
return __riscv_vaesef_vv_u32m2(vd, vs2, vl);
|
||||
}
|
||||
|
||||
vuint32m2_t test_vaesef_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
|
||||
return __riscv_vaesef_vs_u32m2_u32m2(vd, vs2, vl);
|
||||
}
|
||||
|
||||
vuint32m4_t test_vaesef_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
|
||||
return __riscv_vaesef_vv_u32m4_tu(vd, vs2, vl);
|
||||
}
|
||||
|
||||
vuint32m8_t test_vaesef_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) {
|
||||
return __riscv_vaesef_vs_u32m4_u32m8_tu(vd, vs2, vl);
|
||||
}
|
||||
|
||||
vuint32m8_t test_vaesem_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
|
||||
return __riscv_vaesem_vv_u32m8(vd, vs2, vl);
|
||||
}
|
||||
|
||||
vuint32m8_t test_vaesem_vs_u32m8_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
|
||||
return __riscv_vaesem_vs_u32m8_u32m8(vd, vs2, vl);
|
||||
}
|
||||
|
||||
vuint32mf2_t test_vaesem_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
|
||||
return __riscv_vaesem_vv_u32mf2_tu(vd, vs2, vl);
|
||||
}
|
||||
|
||||
vuint32m8_t test_vaesem_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) {
|
||||
return __riscv_vaesem_vs_u32mf2_u32m8_tu(vd, vs2, vl);
|
||||
}
|
||||
|
||||
vuint32mf2_t test_vaeskf1_vi_u32mf2(vuint32mf2_t vs2, size_t vl) {
|
||||
return __riscv_vaeskf1_vi_u32mf2(vs2, 0, vl);
|
||||
}
|
||||
|
||||
vuint32m1_t test_vaeskf1_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
|
||||
return __riscv_vaeskf1_vi_u32m1_tu(maskedoff, vs2, 0, vl);
|
||||
}
|
||||
|
||||
vuint32m2_t test_vaeskf2_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
|
||||
return __riscv_vaeskf2_vi_u32m2(vd, vs2, 0, vl);
|
||||
}
|
||||
|
||||
vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
|
||||
return __riscv_vaeskf2_vi_u32m4_tu(vd, vs2, 0, vl);
|
||||
}
|
||||
|
||||
vuint32m4_t test_vaesz_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) {
|
||||
return __riscv_vaesz_vs_u32m1_u32m4(vd, vs2, vl);
|
||||
}
|
||||
|
||||
vuint32m8_t test_vaesz_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) {
|
||||
return __riscv_vaesz_vs_u32m1_u32m8_tu(vd, vs2, vl);
|
||||
}
|
||||
/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 11 } } */
|
||||
/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 11 } } */
|
||||
/* { dg-final { scan-assembler-times {vaesdf\.vv\s+v[0-9]+,\s*v[0-9]} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {vaesdf\.vs\s+v[0-9]+,\s*v[0-9]} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {vaesdm\.vv\s+v[0-9]+,\s*v[0-9]} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {vaesdm\.vs\s+v[0-9]+,\s*v[0-9]} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {vaesef\.vv\s+v[0-9]+,\s*v[0-9]} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {vaesef\.vs\s+v[0-9]+,\s*v[0-9]} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {vaesem\.vv\s+v[0-9]+,\s*v[0-9]} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {vaesem\.vs\s+v[0-9]+,\s*v[0-9]} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {vaeskf1\.vi\s+v[0-9]+,\s*v[0-9]+,0} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {vaeskf2\.vi\s+v[0-9]+,\s*v[0-9]+,0} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {vaesz\.vs\s+v[0-9]+,\s*v[0-9]} 2 } } */
|
33
gcc/testsuite/gcc.target/riscv/rvv/base/zvknha-intrinsic.c
Normal file
33
gcc/testsuite/gcc.target/riscv/rvv/base/zvknha-intrinsic.c
Normal file
|
@ -0,0 +1,33 @@
|
|||
/* { dg-do compile } */
|
||||
/* { dg-options "-march=rv64gc_zvknha_zve64x -mabi=lp64d -O2 -Wno-psabi" } */
|
||||
|
||||
#include "riscv_vector.h"
|
||||
|
||||
vuint32mf2_t test_vsha2cl_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
|
||||
return __riscv_vsha2cl_vv_u32mf2(vd, vs2, vs1, vl);
|
||||
}
|
||||
|
||||
vuint32m1_t test_vsha2cl_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
|
||||
return __riscv_vsha2cl_vv_u32m1_tu(vd, vs2, vs1, vl);
|
||||
}
|
||||
|
||||
vuint32m2_t test_vsha2ch_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
|
||||
return __riscv_vsha2ch_vv_u32m2(vd, vs2, vs1, vl);
|
||||
}
|
||||
|
||||
vuint32m4_t test_vsha2ch_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
|
||||
return __riscv_vsha2ch_vv_u32m4_tu(vd, vs2, vs1, vl);
|
||||
}
|
||||
|
||||
vuint32m4_t test_vsha2ms_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
|
||||
return __riscv_vsha2ms_vv_u32m4(vd, vs2, vs1, vl);
|
||||
}
|
||||
|
||||
vuint32m8_t test_vsha2ms_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
|
||||
return __riscv_vsha2ms_vv_u32m8_tu(vd, vs2, vs1, vl);
|
||||
}
|
||||
/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 3 } } */
|
||||
/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 3 } } */
|
||||
/* { dg-final { scan-assembler-times {vsha2cl\.vv\s+v[0-9]+,\s*v[0-9]} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {vsha2ch\.vv\s+v[0-9]+,\s*v[0-9]} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {vsha2ms\.vv\s+v[0-9]+,\s*v[0-9]} 2 } } */
|
33
gcc/testsuite/gcc.target/riscv/rvv/base/zvknhb-intrinsic.c
Normal file
33
gcc/testsuite/gcc.target/riscv/rvv/base/zvknhb-intrinsic.c
Normal file
|
@ -0,0 +1,33 @@
|
|||
/* { dg-do compile } */
|
||||
/* { dg-options "-march=rv64gc_zvknhb -mabi=lp64d -O2 -Wno-psabi" } */
|
||||
|
||||
#include "riscv_vector.h"
|
||||
|
||||
vuint32mf2_t test_vsha2cl_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
|
||||
return __riscv_vsha2cl_vv_u32mf2(vd, vs2, vs1, vl);
|
||||
}
|
||||
|
||||
vuint32mf2_t test_vsha2cl_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
|
||||
return __riscv_vsha2cl_vv_u32mf2_tu(vd, vs2, vs1, vl);
|
||||
}
|
||||
|
||||
vuint32m1_t test_vsha2ch_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
|
||||
return __riscv_vsha2ch_vv_u32m1(vd, vs2, vs1, vl);
|
||||
}
|
||||
|
||||
vuint32m2_t test_vsha2ch_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
|
||||
return __riscv_vsha2ch_vv_u32m2_tu(vd, vs2, vs1, vl);
|
||||
}
|
||||
|
||||
vuint32m2_t test_vsha2ms_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
|
||||
return __riscv_vsha2ms_vv_u32m2(vd, vs2, vs1, vl);
|
||||
}
|
||||
|
||||
vuint64m8_t test_vsha2ms_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
|
||||
return __riscv_vsha2ms_vv_u64m8_tu(vd, vs2, vs1, vl);
|
||||
}
|
||||
/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 3 } } */
|
||||
/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 3 } } */
|
||||
/* { dg-final { scan-assembler-times {vsha2cl\.vv\s+v[0-9]+,\s*v[0-9]} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {vsha2ch\.vv\s+v[0-9]+,\s*v[0-9]} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {vsha2ms\.vv\s+v[0-9]+,\s*v[0-9]} 2 } } */
|
33
gcc/testsuite/gcc.target/riscv/rvv/base/zvksed-intrinsic.c
Normal file
33
gcc/testsuite/gcc.target/riscv/rvv/base/zvksed-intrinsic.c
Normal file
|
@ -0,0 +1,33 @@
|
|||
/* { dg-do compile } */
|
||||
/* { dg-options "-march=rv64gc_zvksed_zve64x -mabi=lp64d -O2 -Wno-psabi" } */
|
||||
|
||||
#include "riscv_vector.h"
|
||||
|
||||
vuint32mf2_t test_vsm4k_vi_u32mf2(vuint32mf2_t vs2, size_t vl) {
|
||||
return __riscv_vsm4k_vi_u32mf2(vs2, 0, vl);
|
||||
}
|
||||
|
||||
vuint32m1_t test_vsm4k_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
|
||||
return __riscv_vsm4k_vi_u32m1_tu(maskedoff, vs2, 0, vl);
|
||||
}
|
||||
|
||||
vuint32m2_t test_vsm4r_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
|
||||
return __riscv_vsm4r_vv_u32m2(vd, vs2, vl);
|
||||
}
|
||||
|
||||
vuint32m4_t test_vsm4r_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
|
||||
return __riscv_vsm4r_vv_u32m4_tu(vd, vs2, vl);
|
||||
}
|
||||
|
||||
vuint32m4_t test_vsm4r_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) {
|
||||
return __riscv_vsm4r_vs_u32mf2_u32m4(vd, vs2, vl);
|
||||
}
|
||||
|
||||
vuint32m8_t test_vsm4r_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) {
|
||||
return __riscv_vsm4r_vs_u32m1_u32m8_tu(vd, vs2, vl);
|
||||
}
|
||||
/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 3 } } */
|
||||
/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 3 } } */
|
||||
/* { dg-final { scan-assembler-times {vsm4k\.vi\s+v[0-9]+,\s*v[0-9]+,0} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {vsm4r\.vv\s+v[0-9]+,\s*v[0-9]} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {vsm4r\.vs\s+v[0-9]+,\s*v[0-9]} 2 } } */
|
24
gcc/testsuite/gcc.target/riscv/rvv/base/zvksh-intrinsic.c
Normal file
24
gcc/testsuite/gcc.target/riscv/rvv/base/zvksh-intrinsic.c
Normal file
|
@ -0,0 +1,24 @@
|
|||
/* { dg-do compile } */
|
||||
/* { dg-options "-march=rv64gc_zvksh_zve64x -mabi=lp64d -O2 -Wno-psabi" } */
|
||||
|
||||
#include "riscv_vector.h"
|
||||
|
||||
vuint32mf2_t test_vsm3c_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
|
||||
return __riscv_vsm3c_vi_u32mf2(vd, vs2, 0, vl);
|
||||
}
|
||||
|
||||
vuint32m1_t test_vsm3c_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
|
||||
return __riscv_vsm3c_vi_u32m1_tu(vd, vs2, 0, vl);
|
||||
}
|
||||
|
||||
vuint32m2_t test_vsm3me_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
|
||||
return __riscv_vsm3me_vv_u32m2(vs2, vs1, vl);
|
||||
}
|
||||
|
||||
vuint32m4_t test_vsm3me_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
|
||||
return __riscv_vsm3me_vv_u32m4_tu(maskedoff, vs2, vs1, vl);
|
||||
}
|
||||
/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {vsm3c\.vi\s+v[0-9]+,\s*v[0-9]+,0} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {vsm3me\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */
|
13
gcc/testsuite/gcc.target/riscv/zvkb.c
Normal file
13
gcc/testsuite/gcc.target/riscv/zvkb.c
Normal file
|
@ -0,0 +1,13 @@
|
|||
/* { dg-do compile } */
|
||||
/* { dg-options "-march=rv64gc_zvkb" { target { rv64 } } } */
|
||||
/* { dg-options "-march=rv32gc_zvkb" { target { rv32 } } } */
|
||||
|
||||
#ifndef __riscv_zvkb
|
||||
#error "Feature macro not defined"
|
||||
#endif
|
||||
|
||||
int
|
||||
foo (int a)
|
||||
{
|
||||
return a;
|
||||
}
|
Loading…
Add table
Reference in a new issue