@@ -552,7 +552,7 @@ simde_vextq_f32(simde_float32x4_t a, simde_float32x4_t b, const int n)
552552 HEDLEY_STATIC_CAST(int8_t, ((n) + 2)), HEDLEY_STATIC_CAST(int8_t, ((n) + 3))); \
553553 simde_float32x4_from_private(simde_vextq_f32_r_); \
554554 }))
555- #elif defined(SIMDE_SHUFFLE_VECTOR_ ) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE ) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32 )
555+ #elif defined(SIMDE_SHUFFLE_VECTOR_ ) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE ) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32 ) && !defined( SIMDE_BUG_GCC_121064 )
556556 #define simde_vextq_f32 (a , b , n ) (__extension__ ({ \
557557 simde_float32x4_private simde_vextq_f32_r_; \
558558 simde_vextq_f32_r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, simde_float32x4_to_private(a).values, simde_float32x4_to_private(b).values, \
@@ -661,7 +661,7 @@ simde_vextq_s8(simde_int8x16_t a, simde_int8x16_t b, const int n)
661661 HEDLEY_STATIC_CAST(int8_t, ((n) + 14)), HEDLEY_STATIC_CAST(int8_t, ((n) + 15))); \
662662 simde_int8x16_from_private(simde_vextq_s8_r_); \
663663 }))
664- #elif defined(SIMDE_SHUFFLE_VECTOR_ ) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE ) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32 )
664+ #elif defined(SIMDE_SHUFFLE_VECTOR_ ) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE ) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32 ) && !defined( SIMDE_BUG_GCC_121064 )
665665 #define simde_vextq_s8 (a , b , n ) (__extension__ ({ \
666666 simde_int8x16_private simde_vextq_s8_r_; \
667667 simde_vextq_s8_r_.values = SIMDE_SHUFFLE_VECTOR_(8, 16, simde_int8x16_to_private(a).values, simde_int8x16_to_private(b).values, \
@@ -719,7 +719,7 @@ simde_vextq_s16(simde_int16x8_t a, simde_int16x8_t b, const int n)
719719 HEDLEY_STATIC_CAST(int8_t, ((n) + 6)), HEDLEY_STATIC_CAST(int8_t, ((n) + 7))); \
720720 simde_int16x8_from_private(simde_vextq_s16_r_); \
721721 }))
722- #elif defined(SIMDE_SHUFFLE_VECTOR_ ) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE ) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32 )
722+ #elif defined(SIMDE_SHUFFLE_VECTOR_ ) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE ) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32 ) && !defined( SIMDE_BUG_GCC_121064 )
723723 #define simde_vextq_s16 (a , b , n ) (__extension__ ({ \
724724 simde_int16x8_private simde_vextq_s16_r_; \
725725 simde_vextq_s16_r_.values = SIMDE_SHUFFLE_VECTOR_(16, 16, simde_int16x8_to_private(a).values, simde_int16x8_to_private(b).values, \
@@ -771,7 +771,7 @@ simde_vextq_s32(simde_int32x4_t a, simde_int32x4_t b, const int n)
771771 HEDLEY_STATIC_CAST(int8_t, ((n) + 2)), HEDLEY_STATIC_CAST(int8_t, ((n) + 3))); \
772772 simde_int32x4_from_private(simde_vextq_s32_r_); \
773773 }))
774- #elif defined(SIMDE_SHUFFLE_VECTOR_ ) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE ) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32 )
774+ #elif defined(SIMDE_SHUFFLE_VECTOR_ ) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE ) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32 ) && !defined( SIMDE_BUG_GCC_121064 )
775775 #define simde_vextq_s32 (a , b , n ) (__extension__ ({ \
776776 simde_int32x4_private simde_vextq_s32_r_; \
777777 simde_vextq_s32_r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, simde_int32x4_to_private(a).values, simde_int32x4_to_private(b).values, \
@@ -824,7 +824,7 @@ simde_vextq_s64(simde_int64x2_t a, simde_int64x2_t b, const int n)
824824 HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1))); \
825825 simde_int64x2_from_private(simde_vextq_s64_r_); \
826826 }))
827- #elif defined(SIMDE_SHUFFLE_VECTOR_ ) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE ) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32 )
827+ #elif defined(SIMDE_SHUFFLE_VECTOR_ ) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE ) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32 ) && !defined( SIMDE_BUG_GCC_121064 )
828828 #define simde_vextq_s64 (a , b , n ) (__extension__ ({ \
829829 simde_int64x2_private simde_vextq_s64_r_; \
830830 simde_vextq_s64_r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, simde_int64x2_to_private(a).values, simde_int64x2_to_private(b).values, \
@@ -870,7 +870,7 @@ simde_vextq_u8(simde_uint8x16_t a, simde_uint8x16_t b, const int n)
870870}
871871#if defined(SIMDE_X86_SSSE3_NATIVE ) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE )
872872 #define simde_vextq_u8 (a , b , n ) simde_uint8x16_from_m128i(_mm_alignr_epi8(simde_uint8x16_to_m128i(b), simde_uint8x16_to_m128i(a), n * sizeof(uint8_t)))
873- #elif defined(SIMDE_SHUFFLE_VECTOR_ ) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE ) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32 )
873+ #elif defined(SIMDE_SHUFFLE_VECTOR_ ) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE ) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32 ) && !defined( SIMDE_BUG_GCC_121064 )
874874 #define simde_vextq_u8 (a , b , n ) (__extension__ ({ \
875875 simde_uint8x16_private simde_vextq_u8_r_; \
876876 simde_vextq_u8_r_.values = SIMDE_SHUFFLE_VECTOR_(8, 16, simde_uint8x16_to_private(a).values, simde_uint8x16_to_private(b).values, \
@@ -918,7 +918,7 @@ simde_vextq_u16(simde_uint16x8_t a, simde_uint16x8_t b, const int n)
918918}
919919#if defined(SIMDE_X86_SSSE3_NATIVE ) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE )
920920 #define simde_vextq_u16 (a , b , n ) simde_uint16x8_from_m128i(_mm_alignr_epi8(simde_uint16x8_to_m128i(b), simde_uint16x8_to_m128i(a), n * sizeof(uint16_t)))
921- #elif defined(SIMDE_SHUFFLE_VECTOR_ ) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE ) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32 )
921+ #elif defined(SIMDE_SHUFFLE_VECTOR_ ) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE ) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32 ) && !defined( SIMDE_BUG_GCC_121064 )
922922 #define simde_vextq_u16 (a , b , n ) (__extension__ ({ \
923923 simde_uint16x8_private simde_vextq_u16_r_; \
924924 simde_vextq_u16_r_.values = SIMDE_SHUFFLE_VECTOR_(16, 16, simde_uint16x8_to_private(a).values, simde_uint16x8_to_private(b).values, \
@@ -928,7 +928,7 @@ simde_vextq_u16(simde_uint16x8_t a, simde_uint16x8_t b, const int n)
928928 HEDLEY_STATIC_CAST(int8_t, ((n) + 6)), HEDLEY_STATIC_CAST(int8_t, ((n) + 7))); \
929929 simde_uint16x8_from_private(simde_vextq_u16_r_); \
930930 }))
931- #elif HEDLEY_HAS_BUILTIN (__builtin_shufflevector )
931+ #elif HEDLEY_HAS_BUILTIN (__builtin_shufflevector ) && !defined( SIMDE_BUG_GCC_121064 )
932932 #define simde_vextq_u16 (a , b , n ) (__extension__ ({ \
933933 simde_uint16x8_private r_; \
934934 r_.values = __builtin_shufflevector( \
@@ -971,7 +971,7 @@ simde_vextq_u32(simde_uint32x4_t a, simde_uint32x4_t b, const int n)
971971}
972972#if defined(SIMDE_X86_SSSE3_NATIVE ) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE )
973973 #define simde_vextq_u32 (a , b , n ) simde_uint32x4_from_m128i(_mm_alignr_epi8(simde_uint32x4_to_m128i(b), simde_uint32x4_to_m128i(a), n * sizeof(uint32_t)))
974- #elif defined(SIMDE_SHUFFLE_VECTOR_ ) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE ) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32 )
974+ #elif defined(SIMDE_SHUFFLE_VECTOR_ ) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE ) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32 ) && !defined( SIMDE_BUG_GCC_121064 )
975975 #define simde_vextq_u32 (a , b , n ) (__extension__ ({ \
976976 simde_uint32x4_private simde_vextq_u32_r_; \
977977 simde_vextq_u32_r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, simde_uint32x4_to_private(a).values, simde_uint32x4_to_private(b).values, \
@@ -1017,7 +1017,7 @@ simde_vextq_u64(simde_uint64x2_t a, simde_uint64x2_t b, const int n)
10171017}
10181018#if defined(SIMDE_X86_SSSE3_NATIVE ) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE )
10191019 #define simde_vextq_u64 (a , b , n ) simde_uint64x2_from_m128i(_mm_alignr_epi8(simde_uint64x2_to_m128i(b), simde_uint64x2_to_m128i(a), n * sizeof(uint64_t)))
1020- #elif defined(SIMDE_SHUFFLE_VECTOR_ ) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE ) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32 )
1020+ #elif defined(SIMDE_SHUFFLE_VECTOR_ ) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE ) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32 ) && !defined( SIMDE_BUG_GCC_121064 )
10211021 #define simde_vextq_u64 (a , b , n ) (__extension__ ({ \
10221022 simde_uint64x2_private simde_vextq_u64_r_; \
10231023 simde_vextq_u64_r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, simde_uint64x2_to_private(a).values, simde_uint64x2_to_private(b).values, \
0 commit comments