Revert "aarch64: Convert SVE gather patterns to compact syntax"

This reverts commit bb3c69058a.
This commit is contained in:
Kyrylo Tkachov 2023-06-21 13:38:56 +01:00
parent 4b23d10ce8
commit 31cd5f9ae4
2 changed files with 191 additions and 275 deletions

View file

@ -1418,79 +1418,64 @@
;; Predicated gather loads for 32-bit elements. Operand 3 is true for
;; unsigned extension and false for signed extension.
(define_insn "mask_gather_load<mode><v_int_container>"
[(set (match_operand:SVE_4 0 "register_operand")
[(set (match_operand:SVE_4 0 "register_operand" "=w, w, w, w, w, w")
(unspec:SVE_4
[(match_operand:VNx4BI 5 "register_operand")
(match_operand:DI 1 "aarch64_sve_gather_offset_<Vesize>")
(match_operand:VNx4SI 2 "register_operand")
(match_operand:DI 3 "const_int_operand")
(match_operand:DI 4 "aarch64_gather_scale_operand_<Vesize>")
[(match_operand:VNx4BI 5 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
(match_operand:DI 1 "aarch64_sve_gather_offset_<Vesize>" "Z, vgw, rk, rk, rk, rk")
(match_operand:VNx4SI 2 "register_operand" "w, w, w, w, w, w")
(match_operand:DI 3 "const_int_operand" "Ui1, Ui1, Z, Ui1, Z, Ui1")
(match_operand:DI 4 "aarch64_gather_scale_operand_<Vesize>" "Ui1, Ui1, Ui1, Ui1, i, i")
(mem:BLK (scratch))]
UNSPEC_LD1_GATHER))]
"TARGET_SVE"
{@ [cons: =0, 1, 2, 3, 4, 5 ]
[&w, Z, w, Ui1, Ui1, Upl] ld1<Vesize>\t%0.s, %5/z, [%2.s]
[?w, Z, 0, Ui1, Ui1, Upl] ^
[&w, vgw, w, Ui1, Ui1, Upl] ld1<Vesize>\t%0.s, %5/z, [%2.s, #%1]
[?w, vgw, 0, Ui1, Ui1, Upl] ^
[&w, rk, w, Z, Ui1, Upl] ld1<Vesize>\t%0.s, %5/z, [%1, %2.s, sxtw]
[?w, rk, 0, Z, Ui1, Upl] ^
[&w, rk, w, Ui1, Ui1, Upl] ld1<Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw]
[?w, rk, 0, Ui1, Ui1, Upl] ^
[&w, rk, w, Z, i, Upl] ld1<Vesize>\t%0.s, %5/z, [%1, %2.s, sxtw %p4]
[?w, rk, 0, Z, i, Upl] ^
[&w, rk, w, Ui1, i, Upl] ld1<Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw %p4]
[?w, rk, 0, Ui1, i, Upl] ^
}
"@
ld1<Vesize>\t%0.s, %5/z, [%2.s]
ld1<Vesize>\t%0.s, %5/z, [%2.s, #%1]
ld1<Vesize>\t%0.s, %5/z, [%1, %2.s, sxtw]
ld1<Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw]
ld1<Vesize>\t%0.s, %5/z, [%1, %2.s, sxtw %p4]
ld1<Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw %p4]"
)
;; Predicated gather loads for 64-bit elements. The value of operand 3
;; doesn't matter in this case.
(define_insn "mask_gather_load<mode><v_int_container>"
[(set (match_operand:SVE_2 0 "register_operand")
[(set (match_operand:SVE_2 0 "register_operand" "=w, w, w, w")
(unspec:SVE_2
[(match_operand:VNx2BI 5 "register_operand")
(match_operand:DI 1 "aarch64_sve_gather_offset_<Vesize>")
(match_operand:VNx2DI 2 "register_operand")
[(match_operand:VNx2BI 5 "register_operand" "Upl, Upl, Upl, Upl")
(match_operand:DI 1 "aarch64_sve_gather_offset_<Vesize>" "Z, vgd, rk, rk")
(match_operand:VNx2DI 2 "register_operand" "w, w, w, w")
(match_operand:DI 3 "const_int_operand")
(match_operand:DI 4 "aarch64_gather_scale_operand_<Vesize>")
(match_operand:DI 4 "aarch64_gather_scale_operand_<Vesize>" "Ui1, Ui1, Ui1, i")
(mem:BLK (scratch))]
UNSPEC_LD1_GATHER))]
"TARGET_SVE"
{@ [cons: =0, 1, 2, 3, 4, 5]
[&w, Z, w, i, Ui1, Upl] ld1<Vesize>\t%0.d, %5/z, [%2.d]
[?w, Z, 0, i, Ui1, Upl] ^
[&w, vgd, w, i, Ui1, Upl] ld1<Vesize>\t%0.d, %5/z, [%2.d, #%1]
[?w, vgd, 0, i, Ui1, Upl] ^
[&w, rk, w, i, Ui1, Upl] ld1<Vesize>\t%0.d, %5/z, [%1, %2.d]
[?w, rk, 0, i, Ui1, Upl] ^
[&w, rk, w, i, i, Upl] ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, lsl %p4]
[?w, rk, 0, i, i, Upl] ^
}
"@
ld1<Vesize>\t%0.d, %5/z, [%2.d]
ld1<Vesize>\t%0.d, %5/z, [%2.d, #%1]
ld1<Vesize>\t%0.d, %5/z, [%1, %2.d]
ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, lsl %p4]"
)
;; Likewise, but with the offset being extended from 32 bits.
(define_insn_and_rewrite "*mask_gather_load<mode><v_int_container>_<su>xtw_unpacked"
[(set (match_operand:SVE_2 0 "register_operand")
[(set (match_operand:SVE_2 0 "register_operand" "=w, w")
(unspec:SVE_2
[(match_operand:VNx2BI 5 "register_operand")
(match_operand:DI 1 "register_operand")
[(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
(match_operand:DI 1 "register_operand" "rk, rk")
(unspec:VNx2DI
[(match_operand 6)
(ANY_EXTEND:VNx2DI
(match_operand:VNx2SI 2 "register_operand"))]
(match_operand:VNx2SI 2 "register_operand" "w, w"))]
UNSPEC_PRED_X)
(match_operand:DI 3 "const_int_operand")
(match_operand:DI 4 "aarch64_gather_scale_operand_<Vesize>")
(match_operand:DI 4 "aarch64_gather_scale_operand_<Vesize>" "Ui1, i")
(mem:BLK (scratch))]
UNSPEC_LD1_GATHER))]
"TARGET_SVE"
{@ [cons: =0, 1, 2, 3, 4, 5]
[&w, rk, w, i, Ui1, Upl ] ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, <su>xtw]
[?w, rk, 0, i, Ui1, Upl ] ^
[&w, rk, w, i, i, Upl ] ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, <su>xtw %p4]
[?w, rk, 0, i, i, Upl ] ^
}
"@
ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, <su>xtw]
ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, <su>xtw %p4]"
"&& !CONSTANT_P (operands[6])"
{
operands[6] = CONSTM1_RTX (VNx2BImode);
@ -1500,27 +1485,24 @@
;; Likewise, but with the offset being truncated to 32 bits and then
;; sign-extended.
(define_insn_and_rewrite "*mask_gather_load<mode><v_int_container>_sxtw"
[(set (match_operand:SVE_2 0 "register_operand")
[(set (match_operand:SVE_2 0 "register_operand" "=w, w")
(unspec:SVE_2
[(match_operand:VNx2BI 5 "register_operand")
(match_operand:DI 1 "register_operand")
[(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
(match_operand:DI 1 "register_operand" "rk, rk")
(unspec:VNx2DI
[(match_operand 6)
(sign_extend:VNx2DI
(truncate:VNx2SI
(match_operand:VNx2DI 2 "register_operand")))]
(match_operand:VNx2DI 2 "register_operand" "w, w")))]
UNSPEC_PRED_X)
(match_operand:DI 3 "const_int_operand")
(match_operand:DI 4 "aarch64_gather_scale_operand_<Vesize>")
(match_operand:DI 4 "aarch64_gather_scale_operand_<Vesize>" "Ui1, i")
(mem:BLK (scratch))]
UNSPEC_LD1_GATHER))]
"TARGET_SVE"
{@ [cons: =0, 1, 2, 3, 4, 5]
[&w, rk, w, i, Ui1, Upl ] ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw]
[?w, rk, 0, i, Ui1, Upl ] ^
[&w, rk, w, i, i, Upl ] ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw %p4]
[?w, rk, 0, i, i, Upl ] ^
}
"@
ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw]
ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw %p4]"
"&& !CONSTANT_P (operands[6])"
{
operands[6] = CONSTM1_RTX (VNx2BImode);
@ -1530,24 +1512,21 @@
;; Likewise, but with the offset being truncated to 32 bits and then
;; zero-extended.
(define_insn "*mask_gather_load<mode><v_int_container>_uxtw"
[(set (match_operand:SVE_2 0 "register_operand")
[(set (match_operand:SVE_2 0 "register_operand" "=w, w")
(unspec:SVE_2
[(match_operand:VNx2BI 5 "register_operand")
(match_operand:DI 1 "register_operand")
[(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
(match_operand:DI 1 "register_operand" "rk, rk")
(and:VNx2DI
(match_operand:VNx2DI 2 "register_operand")
(match_operand:VNx2DI 2 "register_operand" "w, w")
(match_operand:VNx2DI 6 "aarch64_sve_uxtw_immediate"))
(match_operand:DI 3 "const_int_operand")
(match_operand:DI 4 "aarch64_gather_scale_operand_<Vesize>")
(match_operand:DI 4 "aarch64_gather_scale_operand_<Vesize>" "Ui1, i")
(mem:BLK (scratch))]
UNSPEC_LD1_GATHER))]
"TARGET_SVE"
{@ [cons: =0, 1, 2, 3, 4, 5]
[&w, rk, w, i, Ui1, Upl ] ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw]
[?w, rk, 0, i, Ui1, Upl ] ^
[&w, rk, w, i, i, Upl ] ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw %p4]
[?w, rk, 0, i, i, Upl ] ^
}
"@
ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw]
ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw %p4]"
)
;; -------------------------------------------------------------------------
@ -1565,34 +1544,27 @@
;; Predicated extending gather loads for 32-bit elements. Operand 3 is
;; true for unsigned extension and false for signed extension.
(define_insn_and_rewrite "@aarch64_gather_load_<ANY_EXTEND:optab><SVE_4HSI:mode><SVE_4BHI:mode>"
[(set (match_operand:SVE_4HSI 0 "register_operand")
[(set (match_operand:SVE_4HSI 0 "register_operand" "=w, w, w, w, w, w")
(unspec:SVE_4HSI
[(match_operand:VNx4BI 6 "general_operand")
[(match_operand:VNx4BI 6 "general_operand" "UplDnm, UplDnm, UplDnm, UplDnm, UplDnm, UplDnm")
(ANY_EXTEND:SVE_4HSI
(unspec:SVE_4BHI
[(match_operand:VNx4BI 5 "register_operand")
(match_operand:DI 1 "aarch64_sve_gather_offset_<SVE_4BHI:Vesize>")
(match_operand:VNx4SI 2 "register_operand")
(match_operand:DI 3 "const_int_operand")
(match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_4BHI:Vesize>")
[(match_operand:VNx4BI 5 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
(match_operand:DI 1 "aarch64_sve_gather_offset_<SVE_4BHI:Vesize>" "Z, vg<SVE_4BHI:Vesize>, rk, rk, rk, rk")
(match_operand:VNx4SI 2 "register_operand" "w, w, w, w, w, w")
(match_operand:DI 3 "const_int_operand" "Ui1, Ui1, Z, Ui1, Z, Ui1")
(match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_4BHI:Vesize>" "Ui1, Ui1, Ui1, Ui1, i, i")
(mem:BLK (scratch))]
UNSPEC_LD1_GATHER))]
UNSPEC_PRED_X))]
"TARGET_SVE && (~<SVE_4HSI:narrower_mask> & <SVE_4BHI:self_mask>) == 0"
{@ [cons: =0, 1, 2, 3, 4, 5, 6]
[&w, Z, w, Ui1, Ui1, Upl, UplDnm] ld1<ANY_EXTEND:s><SVE_4BHI:Vesize>\t%0.s, %5/z, [%2.s]
[?w, Z, 0, Ui1, Ui1, Upl, UplDnm] ^
[&w, vg<SVE_4BHI:Vesize>, w, Ui1, Ui1, Upl, UplDnm] ld1<ANY_EXTEND:s><SVE_4BHI:Vesize>\t%0.s, %5/z, [%2.s, #%1]
[?w, vg<SVE_4BHI:Vesize>, 0, Ui1, Ui1, Upl, UplDnm] ^
[&w, rk, w, Z, Ui1, Upl, UplDnm] ld1<ANY_EXTEND:s><SVE_4BHI:Vesize>\t%0.s, %5/z, [%1, %2.s, sxtw]
[?w, rk, 0, Z, Ui1, Upl, UplDnm] ^
[&w, rk, w, Ui1, Ui1, Upl, UplDnm] ld1<ANY_EXTEND:s><SVE_4BHI:Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw]
[?w, rk, 0, Ui1, Ui1, Upl, UplDnm] ^
[&w, rk, w, Z, i, Upl, UplDnm] ld1<ANY_EXTEND:s><SVE_4BHI:Vesize>\t%0.s, %5/z, [%1, %2.s, sxtw %p4]
[?w, rk, 0, Z, i, Upl, UplDnm] ^
[&w, rk, w, Ui1, i, Upl, UplDnm] ld1<ANY_EXTEND:s><SVE_4BHI:Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw %p4]
[?w, rk, 0, Ui1, i, Upl, UplDnm] ^
}
"@
ld1<ANY_EXTEND:s><SVE_4BHI:Vesize>\t%0.s, %5/z, [%2.s]
ld1<ANY_EXTEND:s><SVE_4BHI:Vesize>\t%0.s, %5/z, [%2.s, #%1]
ld1<ANY_EXTEND:s><SVE_4BHI:Vesize>\t%0.s, %5/z, [%1, %2.s, sxtw]
ld1<ANY_EXTEND:s><SVE_4BHI:Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw]
ld1<ANY_EXTEND:s><SVE_4BHI:Vesize>\t%0.s, %5/z, [%1, %2.s, sxtw %p4]
ld1<ANY_EXTEND:s><SVE_4BHI:Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw %p4]"
"&& !CONSTANT_P (operands[6])"
{
operands[6] = CONSTM1_RTX (VNx4BImode);
@ -1602,30 +1574,25 @@
;; Predicated extending gather loads for 64-bit elements. The value of
;; operand 3 doesn't matter in this case.
(define_insn_and_rewrite "@aarch64_gather_load_<ANY_EXTEND:optab><SVE_2HSDI:mode><SVE_2BHSI:mode>"
[(set (match_operand:SVE_2HSDI 0 "register_operand")
[(set (match_operand:SVE_2HSDI 0 "register_operand" "=w, w, w, w")
(unspec:SVE_2HSDI
[(match_operand:VNx2BI 6 "general_operand")
[(match_operand:VNx2BI 6 "general_operand" "UplDnm, UplDnm, UplDnm, UplDnm")
(ANY_EXTEND:SVE_2HSDI
(unspec:SVE_2BHSI
[(match_operand:VNx2BI 5 "register_operand")
(match_operand:DI 1 "aarch64_sve_gather_offset_<SVE_2BHSI:Vesize>")
(match_operand:VNx2DI 2 "register_operand")
[(match_operand:VNx2BI 5 "register_operand" "Upl, Upl, Upl, Upl")
(match_operand:DI 1 "aarch64_sve_gather_offset_<SVE_2BHSI:Vesize>" "Z, vg<SVE_2BHSI:Vesize>, rk, rk")
(match_operand:VNx2DI 2 "register_operand" "w, w, w, w")
(match_operand:DI 3 "const_int_operand")
(match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_2BHSI:Vesize>")
(match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_2BHSI:Vesize>" "Ui1, Ui1, Ui1, i")
(mem:BLK (scratch))]
UNSPEC_LD1_GATHER))]
UNSPEC_PRED_X))]
"TARGET_SVE && (~<SVE_2HSDI:narrower_mask> & <SVE_2BHSI:self_mask>) == 0"
{@ [cons: =0, 1, 2, 3, 4, 5, 6]
[&w, Z, w, i, Ui1, Upl, UplDnm] ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%2.d]
[?w, Z, 0, i, Ui1, Upl, UplDnm] ^
[&w, vg<SVE_2BHSI:Vesize>, w, i, Ui1, Upl, UplDnm] ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%2.d, #%1]
[?w, vg<SVE_2BHSI:Vesize>, 0, i, Ui1, Upl, UplDnm] ^
[&w, rk, w, i, Ui1, Upl, UplDnm] ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d]
[?w, rk, 0, i, Ui1, Upl, UplDnm] ^
[&w, rk, w, i, i, Upl, UplDnm] ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, lsl %p4]
[?w, rk, 0, i, i, Upl, UplDnm] ^
}
"@
ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%2.d]
ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%2.d, #%1]
ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d]
ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, lsl %p4]"
"&& !CONSTANT_P (operands[6])"
{
operands[6] = CONSTM1_RTX (VNx2BImode);
@ -1634,30 +1601,27 @@
;; Likewise, but with the offset being extended from 32 bits.
(define_insn_and_rewrite "*aarch64_gather_load_<ANY_EXTEND:optab><SVE_2HSDI:mode><SVE_2BHSI:mode>_<ANY_EXTEND2:su>xtw_unpacked"
[(set (match_operand:SVE_2HSDI 0 "register_operand")
[(set (match_operand:SVE_2HSDI 0 "register_operand" "=w, w")
(unspec:SVE_2HSDI
[(match_operand 6)
(ANY_EXTEND:SVE_2HSDI
(unspec:SVE_2BHSI
[(match_operand:VNx2BI 5 "register_operand")
(match_operand:DI 1 "aarch64_reg_or_zero")
[(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
(match_operand:DI 1 "aarch64_reg_or_zero" "rk, rk")
(unspec:VNx2DI
[(match_operand 7)
(ANY_EXTEND2:VNx2DI
(match_operand:VNx2SI 2 "register_operand"))]
(match_operand:VNx2SI 2 "register_operand" "w, w"))]
UNSPEC_PRED_X)
(match_operand:DI 3 "const_int_operand")
(match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_2BHSI:Vesize>")
(match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_2BHSI:Vesize>" "Ui1, i")
(mem:BLK (scratch))]
UNSPEC_LD1_GATHER))]
UNSPEC_PRED_X))]
"TARGET_SVE && (~<SVE_2HSDI:narrower_mask> & <SVE_2BHSI:self_mask>) == 0"
{@ [cons: =0, 1, 2, 3, 4, 5]
[&w, rk, w, i, Ui1, Upl ] ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, <ANY_EXTEND2:su>xtw]
[?w, rk, 0, i, Ui1, Upl ] ^
[&w, rk, w, i, i, Upl ] ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, <ANY_EXTEND2:su>xtw %p4]
[?w, rk, 0, i, i, Upl ] ^
}
"@
ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, <ANY_EXTEND2:su>xtw]
ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, <ANY_EXTEND2:su>xtw %p4]"
"&& (!CONSTANT_P (operands[6]) || !CONSTANT_P (operands[7]))"
{
operands[6] = CONSTM1_RTX (VNx2BImode);
@ -1668,31 +1632,28 @@
;; Likewise, but with the offset being truncated to 32 bits and then
;; sign-extended.
(define_insn_and_rewrite "*aarch64_gather_load_<ANY_EXTEND:optab><SVE_2HSDI:mode><SVE_2BHSI:mode>_sxtw"
[(set (match_operand:SVE_2HSDI 0 "register_operand")
[(set (match_operand:SVE_2HSDI 0 "register_operand" "=w, w")
(unspec:SVE_2HSDI
[(match_operand 6)
(ANY_EXTEND:SVE_2HSDI
(unspec:SVE_2BHSI
[(match_operand:VNx2BI 5 "register_operand")
(match_operand:DI 1 "aarch64_reg_or_zero")
[(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
(match_operand:DI 1 "aarch64_reg_or_zero" "rk, rk")
(unspec:VNx2DI
[(match_operand 7)
(sign_extend:VNx2DI
(truncate:VNx2SI
(match_operand:VNx2DI 2 "register_operand")))]
(match_operand:VNx2DI 2 "register_operand" "w, w")))]
UNSPEC_PRED_X)
(match_operand:DI 3 "const_int_operand")
(match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_2BHSI:Vesize>")
(match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_2BHSI:Vesize>" "Ui1, i")
(mem:BLK (scratch))]
UNSPEC_LD1_GATHER))]
UNSPEC_PRED_X))]
"TARGET_SVE && (~<SVE_2HSDI:narrower_mask> & <SVE_2BHSI:self_mask>) == 0"
{@ [cons: =0, 1, 2, 3, 4, 5]
[&w, rk, w, i, Ui1, Upl ] ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw]
[?w, rk, 0, i, Ui1, Upl ] ^
[&w, rk, w, i, i, Upl ] ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw %p4]
[?w, rk, 0, i, i, Upl ] ^
}
"@
ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw]
ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw %p4]"
"&& (!CONSTANT_P (operands[6]) || !CONSTANT_P (operands[7]))"
{
operands[6] = CONSTM1_RTX (VNx2BImode);
@ -1703,28 +1664,25 @@
;; Likewise, but with the offset being truncated to 32 bits and then
;; zero-extended.
(define_insn_and_rewrite "*aarch64_gather_load_<ANY_EXTEND:optab><SVE_2HSDI:mode><SVE_2BHSI:mode>_uxtw"
[(set (match_operand:SVE_2HSDI 0 "register_operand")
[(set (match_operand:SVE_2HSDI 0 "register_operand" "=w, w")
(unspec:SVE_2HSDI
[(match_operand 7)
(ANY_EXTEND:SVE_2HSDI
(unspec:SVE_2BHSI
[(match_operand:VNx2BI 5 "register_operand")
(match_operand:DI 1 "aarch64_reg_or_zero")
[(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
(match_operand:DI 1 "aarch64_reg_or_zero" "rk, rk")
(and:VNx2DI
(match_operand:VNx2DI 2 "register_operand")
(match_operand:VNx2DI 2 "register_operand" "w, w")
(match_operand:VNx2DI 6 "aarch64_sve_uxtw_immediate"))
(match_operand:DI 3 "const_int_operand")
(match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_2BHSI:Vesize>")
(match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_2BHSI:Vesize>" "Ui1, i")
(mem:BLK (scratch))]
UNSPEC_LD1_GATHER))]
UNSPEC_PRED_X))]
"TARGET_SVE && (~<SVE_2HSDI:narrower_mask> & <SVE_2BHSI:self_mask>) == 0"
{@ [cons: =0, 1, 2, 3, 4, 5]
[&w, rk, w, i, Ui1, Upl ] ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw]
[?w, rk, 0, i, Ui1, Upl ] ^
[&w, rk, w, i, i, Upl ] ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw %p4]
[?w, rk, 0, i, i, Upl ] ^
}
"@
ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw]
ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw %p4]"
"&& !CONSTANT_P (operands[7])"
{
operands[7] = CONSTM1_RTX (VNx2BImode);
@ -1742,83 +1700,68 @@
;; Predicated first-faulting gather loads for 32-bit elements. Operand
;; 3 is true for unsigned extension and false for signed extension.
(define_insn "@aarch64_ldff1_gather<mode>"
[(set (match_operand:SVE_FULL_S 0 "register_operand")
[(set (match_operand:SVE_FULL_S 0 "register_operand" "=w, w, w, w, w, w")
(unspec:SVE_FULL_S
[(match_operand:VNx4BI 5 "register_operand")
(match_operand:DI 1 "aarch64_sve_gather_offset_w")
(match_operand:VNx4SI 2 "register_operand")
(match_operand:DI 3 "const_int_operand")
(match_operand:DI 4 "aarch64_gather_scale_operand_w")
[(match_operand:VNx4BI 5 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
(match_operand:DI 1 "aarch64_sve_gather_offset_w" "Z, vgw, rk, rk, rk, rk")
(match_operand:VNx4SI 2 "register_operand" "w, w, w, w, w, w")
(match_operand:DI 3 "const_int_operand" "i, i, Z, Ui1, Z, Ui1")
(match_operand:DI 4 "aarch64_gather_scale_operand_w" "Ui1, Ui1, Ui1, Ui1, i, i")
(mem:BLK (scratch))
(reg:VNx16BI FFRT_REGNUM)]
UNSPEC_LDFF1_GATHER))]
"TARGET_SVE"
{@ [cons: =0, 1, 2, 3, 4, 5 ]
[&w, Z, w, i, Ui1, Upl] ldff1w\t%0.s, %5/z, [%2.s]
[?w, Z, 0, i, Ui1, Upl] ^
[&w, vgw, w, i, Ui1, Upl] ldff1w\t%0.s, %5/z, [%2.s, #%1]
[?w, vgw, 0, i, Ui1, Upl] ^
[&w, rk, w, Z, Ui1, Upl] ldff1w\t%0.s, %5/z, [%1, %2.s, sxtw]
[?w, rk, 0, Z, Ui1, Upl] ^
[&w, rk, w, Ui1, Ui1, Upl] ldff1w\t%0.s, %5/z, [%1, %2.s, uxtw]
[?w, rk, 0, Ui1, Ui1, Upl] ^
[&w, rk, w, Z, i, Upl] ldff1w\t%0.s, %5/z, [%1, %2.s, sxtw %p4]
[?w, rk, 0, Z, i, Upl] ^
[&w, rk, w, Ui1, i, Upl] ldff1w\t%0.s, %5/z, [%1, %2.s, uxtw %p4]
[?w, rk, 0, Ui1, i, Upl] ^
}
"@
ldff1w\t%0.s, %5/z, [%2.s]
ldff1w\t%0.s, %5/z, [%2.s, #%1]
ldff1w\t%0.s, %5/z, [%1, %2.s, sxtw]
ldff1w\t%0.s, %5/z, [%1, %2.s, uxtw]
ldff1w\t%0.s, %5/z, [%1, %2.s, sxtw %p4]
ldff1w\t%0.s, %5/z, [%1, %2.s, uxtw %p4]"
)
;; Predicated first-faulting gather loads for 64-bit elements. The value
;; of operand 3 doesn't matter in this case.
(define_insn "@aarch64_ldff1_gather<mode>"
[(set (match_operand:SVE_FULL_D 0 "register_operand")
[(set (match_operand:SVE_FULL_D 0 "register_operand" "=w, w, w, w")
(unspec:SVE_FULL_D
[(match_operand:VNx2BI 5 "register_operand")
(match_operand:DI 1 "aarch64_sve_gather_offset_d")
(match_operand:VNx2DI 2 "register_operand")
[(match_operand:VNx2BI 5 "register_operand" "Upl, Upl, Upl, Upl")
(match_operand:DI 1 "aarch64_sve_gather_offset_d" "Z, vgd, rk, rk")
(match_operand:VNx2DI 2 "register_operand" "w, w, w, w")
(match_operand:DI 3 "const_int_operand")
(match_operand:DI 4 "aarch64_gather_scale_operand_d")
(match_operand:DI 4 "aarch64_gather_scale_operand_d" "Ui1, Ui1, Ui1, i")
(mem:BLK (scratch))
(reg:VNx16BI FFRT_REGNUM)]
UNSPEC_LDFF1_GATHER))]
"TARGET_SVE"
{@ [cons: =0, 1, 2, 3, 4, 5 ]
[&w, Z, w, i, Ui1, Upl ] ldff1d\t%0.d, %5/z, [%2.d]
[?w, Z, 0, i, Ui1, Upl ] ^
[&w, vgd, w, i, Ui1, Upl ] ldff1d\t%0.d, %5/z, [%2.d, #%1]
[?w, vgd, 0, i, Ui1, Upl ] ^
[&w, rk, w, i, Ui1, Upl ] ldff1d\t%0.d, %5/z, [%1, %2.d]
[?w, rk, 0, i, Ui1, Upl ] ^
[&w, rk, w, i, i, Upl ] ldff1d\t%0.d, %5/z, [%1, %2.d, lsl %p4]
[?w, rk, 0, i, i, Upl ] ^
}
"@
ldff1d\t%0.d, %5/z, [%2.d]
ldff1d\t%0.d, %5/z, [%2.d, #%1]
ldff1d\t%0.d, %5/z, [%1, %2.d]
ldff1d\t%0.d, %5/z, [%1, %2.d, lsl %p4]"
)
;; Likewise, but with the offset being sign-extended from 32 bits.
(define_insn_and_rewrite "*aarch64_ldff1_gather<mode>_sxtw"
[(set (match_operand:SVE_FULL_D 0 "register_operand")
[(set (match_operand:SVE_FULL_D 0 "register_operand" "=w, w")
(unspec:SVE_FULL_D
[(match_operand:VNx2BI 5 "register_operand")
(match_operand:DI 1 "register_operand")
[(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
(match_operand:DI 1 "register_operand" "rk, rk")
(unspec:VNx2DI
[(match_operand 6)
(sign_extend:VNx2DI
(truncate:VNx2SI
(match_operand:VNx2DI 2 "register_operand")))]
(match_operand:VNx2DI 2 "register_operand" "w, w")))]
UNSPEC_PRED_X)
(match_operand:DI 3 "const_int_operand")
(match_operand:DI 4 "aarch64_gather_scale_operand_d")
(match_operand:DI 4 "aarch64_gather_scale_operand_d" "Ui1, i")
(mem:BLK (scratch))
(reg:VNx16BI FFRT_REGNUM)]
UNSPEC_LDFF1_GATHER))]
"TARGET_SVE"
{@ [cons: =0, 1, 2, 3, 4, 5]
[&w, rk, w, i, Ui1, Upl ] ldff1d\t%0.d, %5/z, [%1, %2.d, sxtw]
[?w, rk, 0, i, Ui1, Upl ] ^
[&w, rk, w, i, i, Upl ] ldff1d\t%0.d, %5/z, [%1, %2.d, sxtw %p4]
[?w, rk, 0, i, i, Upl ] ^
}
"@
ldff1d\t%0.d, %5/z, [%1, %2.d, sxtw]
ldff1d\t%0.d, %5/z, [%1, %2.d, sxtw %p4]"
"&& !CONSTANT_P (operands[6])"
{
operands[6] = CONSTM1_RTX (VNx2BImode);
@ -1827,25 +1770,22 @@
;; Likewise, but with the offset being zero-extended from 32 bits.
(define_insn "*aarch64_ldff1_gather<mode>_uxtw"
[(set (match_operand:SVE_FULL_D 0 "register_operand")
[(set (match_operand:SVE_FULL_D 0 "register_operand" "=w, w")
(unspec:SVE_FULL_D
[(match_operand:VNx2BI 5 "register_operand")
(match_operand:DI 1 "register_operand")
[(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
(match_operand:DI 1 "register_operand" "rk, rk")
(and:VNx2DI
(match_operand:VNx2DI 2 "register_operand")
(match_operand:VNx2DI 2 "register_operand" "w, w")
(match_operand:VNx2DI 6 "aarch64_sve_uxtw_immediate"))
(match_operand:DI 3 "const_int_operand")
(match_operand:DI 4 "aarch64_gather_scale_operand_d")
(match_operand:DI 4 "aarch64_gather_scale_operand_d" "Ui1, i")
(mem:BLK (scratch))
(reg:VNx16BI FFRT_REGNUM)]
UNSPEC_LDFF1_GATHER))]
"TARGET_SVE"
{@ [cons: =0, 1, 2, 3, 4, 5]
[&w, rk, w, i, Ui1, Upl ] ldff1d\t%0.d, %5/z, [%1, %2.d, uxtw]
[?w, rk, 0, i, Ui1, Upl ] ^
[&w, rk, w, i, i, Upl ] ldff1d\t%0.d, %5/z, [%1, %2.d, uxtw %p4]
[?w, rk, 0, i, i, Upl ] ^
}
"@
ldff1d\t%0.d, %5/z, [%1, %2.d, uxtw]
ldff1d\t%0.d, %5/z, [%1, %2.d, uxtw %p4]"
)
;; -------------------------------------------------------------------------
@ -1863,35 +1803,28 @@
;; Predicated extending first-faulting gather loads for 32-bit elements.
;; Operand 3 is true for unsigned extension and false for signed extension.
(define_insn_and_rewrite "@aarch64_ldff1_gather_<ANY_EXTEND:optab><VNx4_WIDE:mode><VNx4_NARROW:mode>"
[(set (match_operand:VNx4_WIDE 0 "register_operand")
[(set (match_operand:VNx4_WIDE 0 "register_operand" "=w, w, w, w, w, w")
(unspec:VNx4_WIDE
[(match_operand:VNx4BI 6 "general_operand")
[(match_operand:VNx4BI 6 "general_operand" "UplDnm, UplDnm, UplDnm, UplDnm, UplDnm, UplDnm")
(ANY_EXTEND:VNx4_WIDE
(unspec:VNx4_NARROW
[(match_operand:VNx4BI 5 "register_operand")
(match_operand:DI 1 "aarch64_sve_gather_offset_<VNx4_NARROW:Vesize>")
(match_operand:VNx4_WIDE 2 "register_operand")
(match_operand:DI 3 "const_int_operand")
(match_operand:DI 4 "aarch64_gather_scale_operand_<VNx4_NARROW:Vesize>")
[(match_operand:VNx4BI 5 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
(match_operand:DI 1 "aarch64_sve_gather_offset_<VNx4_NARROW:Vesize>" "Z, vg<VNx4_NARROW:Vesize>, rk, rk, rk, rk")
(match_operand:VNx4_WIDE 2 "register_operand" "w, w, w, w, w, w")
(match_operand:DI 3 "const_int_operand" "i, i, Z, Ui1, Z, Ui1")
(match_operand:DI 4 "aarch64_gather_scale_operand_<VNx4_NARROW:Vesize>" "Ui1, Ui1, Ui1, Ui1, i, i")
(mem:BLK (scratch))
(reg:VNx16BI FFRT_REGNUM)]
UNSPEC_LDFF1_GATHER))]
UNSPEC_PRED_X))]
"TARGET_SVE"
{@ [cons: =0, 1, 2, 3, 4, 5, 6]
[&w, Z, w, i, Ui1, Upl, UplDnm] ldff1<ANY_EXTEND:s><VNx4_NARROW:Vesize>\t%0.s, %5/z, [%2.s]
[?w, Z, 0, i, Ui1, Upl, UplDnm] ^
[&w, vg<VNx4_NARROW:Vesize>, w, i, Ui1, Upl, UplDnm] ldff1<ANY_EXTEND:s><VNx4_NARROW:Vesize>\t%0.s, %5/z, [%2.s, #%1]
[?w, vg<VNx4_NARROW:Vesize>, 0, i, Ui1, Upl, UplDnm] ^
[&w, rk, w, Z, Ui1, Upl, UplDnm] ldff1<ANY_EXTEND:s><VNx4_NARROW:Vesize>\t%0.s, %5/z, [%1, %2.s, sxtw]
[?w, rk, 0, Z, Ui1, Upl, UplDnm] ^
[&w, rk, w, Ui1, Ui1, Upl, UplDnm] ldff1<ANY_EXTEND:s><VNx4_NARROW:Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw]
[?w, rk, 0, Ui1, Ui1, Upl, UplDnm] ^
[&w, rk, w, Z, i, Upl, UplDnm] ldff1<ANY_EXTEND:s><VNx4_NARROW:Vesize>\t%0.s, %5/z, [%1, %2.s, sxtw %p4]
[?w, rk, 0, Z, i, Upl, UplDnm] ^
[&w, rk, w, Ui1, i, Upl, UplDnm] ldff1<ANY_EXTEND:s><VNx4_NARROW:Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw %p4]
[?w, rk, 0, Ui1, i, Upl, UplDnm] ^
}
"@
ldff1<ANY_EXTEND:s><VNx4_NARROW:Vesize>\t%0.s, %5/z, [%2.s]
ldff1<ANY_EXTEND:s><VNx4_NARROW:Vesize>\t%0.s, %5/z, [%2.s, #%1]
ldff1<ANY_EXTEND:s><VNx4_NARROW:Vesize>\t%0.s, %5/z, [%1, %2.s, sxtw]
ldff1<ANY_EXTEND:s><VNx4_NARROW:Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw]
ldff1<ANY_EXTEND:s><VNx4_NARROW:Vesize>\t%0.s, %5/z, [%1, %2.s, sxtw %p4]
ldff1<ANY_EXTEND:s><VNx4_NARROW:Vesize>\t%0.s, %5/z, [%1, %2.s, uxtw %p4]"
"&& !CONSTANT_P (operands[6])"
{
operands[6] = CONSTM1_RTX (VNx4BImode);
@ -1901,31 +1834,26 @@
;; Predicated extending first-faulting gather loads for 64-bit elements.
;; The value of operand 3 doesn't matter in this case.
(define_insn_and_rewrite "@aarch64_ldff1_gather_<ANY_EXTEND:optab><VNx2_WIDE:mode><VNx2_NARROW:mode>"
[(set (match_operand:VNx2_WIDE 0 "register_operand")
[(set (match_operand:VNx2_WIDE 0 "register_operand" "=w, w, w, w")
(unspec:VNx2_WIDE
[(match_operand:VNx2BI 6 "general_operand")
[(match_operand:VNx2BI 6 "general_operand" "UplDnm, UplDnm, UplDnm, UplDnm")
(ANY_EXTEND:VNx2_WIDE
(unspec:VNx2_NARROW
[(match_operand:VNx2BI 5 "register_operand")
(match_operand:DI 1 "aarch64_sve_gather_offset_<VNx2_NARROW:Vesize>")
(match_operand:VNx2_WIDE 2 "register_operand")
[(match_operand:VNx2BI 5 "register_operand" "Upl, Upl, Upl, Upl")
(match_operand:DI 1 "aarch64_sve_gather_offset_<VNx2_NARROW:Vesize>" "Z, vg<VNx2_NARROW:Vesize>, rk, rk")
(match_operand:VNx2_WIDE 2 "register_operand" "w, w, w, w")
(match_operand:DI 3 "const_int_operand")
(match_operand:DI 4 "aarch64_gather_scale_operand_<VNx2_NARROW:Vesize>")
(match_operand:DI 4 "aarch64_gather_scale_operand_<VNx2_NARROW:Vesize>" "Ui1, Ui1, Ui1, i")
(mem:BLK (scratch))
(reg:VNx16BI FFRT_REGNUM)]
UNSPEC_LDFF1_GATHER))]
UNSPEC_PRED_X))]
"TARGET_SVE"
{@ [cons: =0, 1, 2, 3, 4, 5, 6]
[&w, Z, w, i, Ui1, Upl, UplDnm] ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%2.d]
[?w, Z, 0, i, Ui1, Upl, UplDnm] ^
[&w, vg<VNx2_NARROW:Vesize>, w, i, Ui1, Upl, UplDnm] ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%2.d, #%1]
[?w, vg<VNx2_NARROW:Vesize>, 0, i, Ui1, Upl, UplDnm] ^
[&w, rk, w, i, Ui1, Upl, UplDnm] ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d]
[?w, rk, 0, i, Ui1, Upl, UplDnm] ^
[&w, rk, w, i, i, Upl, UplDnm] ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d, lsl %p4]
[?w, rk, w, i, i, Upl, UplDnm] ^
}
"@
ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%2.d]
ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%2.d, #%1]
ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d]
ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d, lsl %p4]"
"&& !CONSTANT_P (operands[6])"
{
operands[6] = CONSTM1_RTX (VNx2BImode);
@ -1934,32 +1862,29 @@
;; Likewise, but with the offset being sign-extended from 32 bits.
(define_insn_and_rewrite "*aarch64_ldff1_gather_<ANY_EXTEND:optab><VNx2_WIDE:mode><VNx2_NARROW:mode>_sxtw"
[(set (match_operand:VNx2_WIDE 0 "register_operand")
[(set (match_operand:VNx2_WIDE 0 "register_operand" "=w, w")
(unspec:VNx2_WIDE
[(match_operand 6)
(ANY_EXTEND:VNx2_WIDE
(unspec:VNx2_NARROW
[(match_operand:VNx2BI 5 "register_operand")
(match_operand:DI 1 "aarch64_reg_or_zero")
[(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
(match_operand:DI 1 "aarch64_reg_or_zero" "rk, rk")
(unspec:VNx2DI
[(match_operand 7)
(sign_extend:VNx2DI
(truncate:VNx2SI
(match_operand:VNx2DI 2 "register_operand")))]
(match_operand:VNx2DI 2 "register_operand" "w, w")))]
UNSPEC_PRED_X)
(match_operand:DI 3 "const_int_operand")
(match_operand:DI 4 "aarch64_gather_scale_operand_<VNx2_NARROW:Vesize>")
(match_operand:DI 4 "aarch64_gather_scale_operand_<VNx2_NARROW:Vesize>" "Ui1, i")
(mem:BLK (scratch))
(reg:VNx16BI FFRT_REGNUM)]
UNSPEC_LDFF1_GATHER))]
UNSPEC_PRED_X))]
"TARGET_SVE"
{@ [cons: =0, 1, 2, 3, 4, 5]
[&w, rk, w, i, Ui1, Upl ] ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw]
[?w, rk, 0, i, Ui1, Upl ] ^
[&w, rk, w, i, i, Upl ] ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw %p4]
[?w, rk, 0, i, i, Upl ] ^
}
"@
ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw]
ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw %p4]"
"&& (!CONSTANT_P (operands[6]) || !CONSTANT_P (operands[7]))"
{
operands[6] = CONSTM1_RTX (VNx2BImode);
@ -1969,29 +1894,26 @@
;; Likewise, but with the offset being zero-extended from 32 bits.
(define_insn_and_rewrite "*aarch64_ldff1_gather_<ANY_EXTEND:optab><VNx2_WIDE:mode><VNx2_NARROW:mode>_uxtw"
[(set (match_operand:VNx2_WIDE 0 "register_operand")
[(set (match_operand:VNx2_WIDE 0 "register_operand" "=w, w")
(unspec:VNx2_WIDE
[(match_operand 7)
(ANY_EXTEND:VNx2_WIDE
(unspec:VNx2_NARROW
[(match_operand:VNx2BI 5 "register_operand")
(match_operand:DI 1 "aarch64_reg_or_zero")
[(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
(match_operand:DI 1 "aarch64_reg_or_zero" "rk, rk")
(and:VNx2DI
(match_operand:VNx2DI 2 "register_operand")
(match_operand:VNx2DI 2 "register_operand" "w, w")
(match_operand:VNx2DI 6 "aarch64_sve_uxtw_immediate"))
(match_operand:DI 3 "const_int_operand")
(match_operand:DI 4 "aarch64_gather_scale_operand_<VNx2_NARROW:Vesize>")
(match_operand:DI 4 "aarch64_gather_scale_operand_<VNx2_NARROW:Vesize>" "Ui1, i")
(mem:BLK (scratch))
(reg:VNx16BI FFRT_REGNUM)]
UNSPEC_LDFF1_GATHER))]
UNSPEC_PRED_X))]
"TARGET_SVE"
{@ [cons: =0, 1, 2, 3, 4, 5]
[&w, rk, w, i, Ui1, Upl ] ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw]
[?w, rk, 0, i, Ui1, Upl ] ^
[&w, rk, w, i, i, Upl ] ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw %p4]
[?w, rk, 0, i, i, Upl ] ^
}
"@
ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw]
ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw %p4]"
"&& !CONSTANT_P (operands[7])"
{
operands[7] = CONSTM1_RTX (VNx2BImode);

View file

@ -102,43 +102,37 @@
;; Non-extending loads.
(define_insn "@aarch64_gather_ldnt<mode>"
[(set (match_operand:SVE_FULL_SD 0 "register_operand")
[(set (match_operand:SVE_FULL_SD 0 "register_operand" "=w, w")
(unspec:SVE_FULL_SD
[(match_operand:<VPRED> 1 "register_operand")
(match_operand:DI 2 "aarch64_reg_or_zero")
(match_operand:<V_INT_EQUIV> 3 "register_operand")
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
(match_operand:DI 2 "aarch64_reg_or_zero" "Z, r")
(match_operand:<V_INT_EQUIV> 3 "register_operand" "w, w")
(mem:BLK (scratch))]
UNSPEC_LDNT1_GATHER))]
"TARGET_SVE2"
{@ [cons: =0, 1, 2, 3]
[&w, Upl, Z, w ] ldnt1<Vesize>\t%0.<Vetype>, %1/z, [%3.<Vetype>]
[?w, Upl, Z, 0 ] ^
[&w, Upl, r, w ] ldnt1<Vesize>\t%0.<Vetype>, %1/z, [%3.<Vetype>, %2]
[?w, Upl, r, 0 ] ^
}
"@
ldnt1<Vesize>\t%0.<Vetype>, %1/z, [%3.<Vetype>]
ldnt1<Vesize>\t%0.<Vetype>, %1/z, [%3.<Vetype>, %2]"
)
;; Extending loads.
(define_insn_and_rewrite "@aarch64_gather_ldnt_<ANY_EXTEND:optab><SVE_FULL_SDI:mode><SVE_PARTIAL_I:mode>"
[(set (match_operand:SVE_FULL_SDI 0 "register_operand")
[(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w, w")
(unspec:SVE_FULL_SDI
[(match_operand:<SVE_FULL_SDI:VPRED> 4 "general_operand")
[(match_operand:<SVE_FULL_SDI:VPRED> 4 "general_operand" "UplDnm, UplDnm")
(ANY_EXTEND:SVE_FULL_SDI
(unspec:SVE_PARTIAL_I
[(match_operand:<SVE_FULL_SDI:VPRED> 1 "register_operand")
(match_operand:DI 2 "aarch64_reg_or_zero")
(match_operand:<SVE_FULL_SDI:V_INT_EQUIV> 3 "register_operand")
[(match_operand:<SVE_FULL_SDI:VPRED> 1 "register_operand" "Upl, Upl")
(match_operand:DI 2 "aarch64_reg_or_zero" "Z, r")
(match_operand:<SVE_FULL_SDI:V_INT_EQUIV> 3 "register_operand" "w, w")
(mem:BLK (scratch))]
UNSPEC_LDNT1_GATHER))]
UNSPEC_PRED_X))]
"TARGET_SVE2
&& (~<SVE_FULL_SDI:narrower_mask> & <SVE_PARTIAL_I:self_mask>) == 0"
{@ [cons: =0, 1, 2, 3, 4]
[&w, Upl, Z, w, UplDnm] ldnt1<ANY_EXTEND:s><SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_SDI:Vetype>, %1/z, [%3.<SVE_FULL_SDI:Vetype>]
[?w, Upl, Z, 0, UplDnm] ^
[&w, Upl, r, w, UplDnm] ldnt1<ANY_EXTEND:s><SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_SDI:Vetype>, %1/z, [%3.<SVE_FULL_SDI:Vetype>, %2]
[?w, Upl, r, 0, UplDnm] ^
}
"@
ldnt1<ANY_EXTEND:s><SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_SDI:Vetype>, %1/z, [%3.<SVE_FULL_SDI:Vetype>]
ldnt1<ANY_EXTEND:s><SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_SDI:Vetype>, %1/z, [%3.<SVE_FULL_SDI:Vetype>, %2]"
"&& !CONSTANT_P (operands[4])"
{
operands[4] = CONSTM1_RTX (<SVE_FULL_SDI:VPRED>mode);