@@ -9799,7 +9799,7 @@ pub fn svdupq_n_f32(x0: f32, x1: f32, x2: f32, x3: f32) -> svfloat32_t {
97999799 unsafe extern "unadjusted" {
98009800 #[cfg_attr(
98019801 target_arch = "aarch64",
9802- link_name = "llvm.experimental. vector.insert.nxv4f32.v4f32"
9802+ link_name = "llvm.vector.insert.nxv4f32.v4f32"
98039803 )]
98049804 fn _svdupq_n_f32(op0: svfloat32_t, op1: float32x4_t, idx: i64) -> svfloat32_t;
98059805 }
@@ -9817,7 +9817,7 @@ pub fn svdupq_n_s32(x0: i32, x1: i32, x2: i32, x3: i32) -> svint32_t {
98179817 unsafe extern "unadjusted" {
98189818 #[cfg_attr(
98199819 target_arch = "aarch64",
9820- link_name = "llvm.experimental. vector.insert.nxv4i32.v4i32"
9820+ link_name = "llvm.vector.insert.nxv4i32.v4i32"
98219821 )]
98229822 fn _svdupq_n_s32(op0: svint32_t, op1: int32x4_t, idx: i64) -> svint32_t;
98239823 }
@@ -9851,7 +9851,7 @@ pub fn svdupq_n_f64(x0: f64, x1: f64) -> svfloat64_t {
98519851 unsafe extern "unadjusted" {
98529852 #[cfg_attr(
98539853 target_arch = "aarch64",
9854- link_name = "llvm.experimental. vector.insert.nxv2f64.v2f64"
9854+ link_name = "llvm.vector.insert.nxv2f64.v2f64"
98559855 )]
98569856 fn _svdupq_n_f64(op0: svfloat64_t, op1: float64x2_t, idx: i64) -> svfloat64_t;
98579857 }
@@ -9869,7 +9869,7 @@ pub fn svdupq_n_s64(x0: i64, x1: i64) -> svint64_t {
98699869 unsafe extern "unadjusted" {
98709870 #[cfg_attr(
98719871 target_arch = "aarch64",
9872- link_name = "llvm.experimental. vector.insert.nxv2i64.v2i64"
9872+ link_name = "llvm.vector.insert.nxv2i64.v2i64"
98739873 )]
98749874 fn _svdupq_n_s64(op0: svint64_t, op1: int64x2_t, idx: i64) -> svint64_t;
98759875 }
@@ -9904,7 +9904,7 @@ pub fn svdupq_n_s16(
99049904 unsafe extern "unadjusted" {
99059905 #[cfg_attr(
99069906 target_arch = "aarch64",
9907- link_name = "llvm.experimental. vector.insert.nxv8i16.v8i16"
9907+ link_name = "llvm.vector.insert.nxv8i16.v8i16"
99089908 )]
99099909 fn _svdupq_n_s16(op0: svint16_t, op1: int16x8_t, idx: i64) -> svint16_t;
99109910 }
@@ -9972,7 +9972,7 @@ pub fn svdupq_n_s8(
99729972 unsafe extern "unadjusted" {
99739973 #[cfg_attr(
99749974 target_arch = "aarch64",
9975- link_name = "llvm.experimental. vector.insert.nxv16i8.v16i8"
9975+ link_name = "llvm.vector.insert.nxv16i8.v16i8"
99769976 )]
99779977 fn _svdupq_n_s8(op0: svint8_t, op1: int8x16_t, idx: i64) -> svint8_t;
99789978 }
@@ -35208,7 +35208,7 @@ pub fn svreinterpret_u64_u64(op: svuint64_t) -> svuint64_t {
3520835208#[cfg_attr(test, assert_instr(rev))]
3520935209pub fn svrev_b8(op: svbool_t) -> svbool_t {
3521035210 unsafe extern "unadjusted" {
35211- #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev .nxv16i1")]
35211+ #[cfg_attr(target_arch = "aarch64", link_name = "llvm.vector.reverse .nxv16i1")]
3521235212 fn _svrev_b8(op: svbool_t) -> svbool_t;
3521335213 }
3521435214 unsafe { _svrev_b8(op) }
@@ -35221,7 +35221,7 @@ pub fn svrev_b8(op: svbool_t) -> svbool_t {
3522135221#[cfg_attr(test, assert_instr(rev))]
3522235222pub fn svrev_b16(op: svbool_t) -> svbool_t {
3522335223 unsafe extern "unadjusted" {
35224- #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev .nxv8i1")]
35224+ #[cfg_attr(target_arch = "aarch64", link_name = "llvm.vector.reverse .nxv8i1")]
3522535225 fn _svrev_b16(op: svbool8_t) -> svbool8_t;
3522635226 }
3522735227 unsafe { _svrev_b16(op.sve_into()).sve_into() }
@@ -35234,7 +35234,7 @@ pub fn svrev_b16(op: svbool_t) -> svbool_t {
3523435234#[cfg_attr(test, assert_instr(rev))]
3523535235pub fn svrev_b32(op: svbool_t) -> svbool_t {
3523635236 unsafe extern "unadjusted" {
35237- #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev .nxv4i1")]
35237+ #[cfg_attr(target_arch = "aarch64", link_name = "llvm.vector.reverse .nxv4i1")]
3523835238 fn _svrev_b32(op: svbool4_t) -> svbool4_t;
3523935239 }
3524035240 unsafe { _svrev_b32(op.sve_into()).sve_into() }
@@ -35247,7 +35247,7 @@ pub fn svrev_b32(op: svbool_t) -> svbool_t {
3524735247#[cfg_attr(test, assert_instr(rev))]
3524835248pub fn svrev_b64(op: svbool_t) -> svbool_t {
3524935249 unsafe extern "unadjusted" {
35250- #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev .nxv2i1")]
35250+ #[cfg_attr(target_arch = "aarch64", link_name = "llvm.vector.reverse .nxv2i1")]
3525135251 fn _svrev_b64(op: svbool2_t) -> svbool2_t;
3525235252 }
3525335253 unsafe { _svrev_b64(op.sve_into()).sve_into() }
@@ -35260,7 +35260,7 @@ pub fn svrev_b64(op: svbool_t) -> svbool_t {
3526035260#[cfg_attr(test, assert_instr(rev))]
3526135261pub fn svrev_f32(op: svfloat32_t) -> svfloat32_t {
3526235262 unsafe extern "unadjusted" {
35263- #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev .nxv4f32")]
35263+ #[cfg_attr(target_arch = "aarch64", link_name = "llvm.vector.reverse .nxv4f32")]
3526435264 fn _svrev_f32(op: svfloat32_t) -> svfloat32_t;
3526535265 }
3526635266 unsafe { _svrev_f32(op) }
@@ -35273,7 +35273,7 @@ pub fn svrev_f32(op: svfloat32_t) -> svfloat32_t {
3527335273#[cfg_attr(test, assert_instr(rev))]
3527435274pub fn svrev_f64(op: svfloat64_t) -> svfloat64_t {
3527535275 unsafe extern "unadjusted" {
35276- #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev .nxv2f64")]
35276+ #[cfg_attr(target_arch = "aarch64", link_name = "llvm.vector.reverse .nxv2f64")]
3527735277 fn _svrev_f64(op: svfloat64_t) -> svfloat64_t;
3527835278 }
3527935279 unsafe { _svrev_f64(op) }
@@ -35286,7 +35286,7 @@ pub fn svrev_f64(op: svfloat64_t) -> svfloat64_t {
3528635286#[cfg_attr(test, assert_instr(rev))]
3528735287pub fn svrev_s8(op: svint8_t) -> svint8_t {
3528835288 unsafe extern "unadjusted" {
35289- #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev .nxv16i8")]
35289+ #[cfg_attr(target_arch = "aarch64", link_name = "llvm.vector.reverse .nxv16i8")]
3529035290 fn _svrev_s8(op: svint8_t) -> svint8_t;
3529135291 }
3529235292 unsafe { _svrev_s8(op) }
@@ -35299,7 +35299,7 @@ pub fn svrev_s8(op: svint8_t) -> svint8_t {
3529935299#[cfg_attr(test, assert_instr(rev))]
3530035300pub fn svrev_s16(op: svint16_t) -> svint16_t {
3530135301 unsafe extern "unadjusted" {
35302- #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev .nxv8i16")]
35302+ #[cfg_attr(target_arch = "aarch64", link_name = "llvm.vector.reverse .nxv8i16")]
3530335303 fn _svrev_s16(op: svint16_t) -> svint16_t;
3530435304 }
3530535305 unsafe { _svrev_s16(op) }
@@ -35312,7 +35312,7 @@ pub fn svrev_s16(op: svint16_t) -> svint16_t {
3531235312#[cfg_attr(test, assert_instr(rev))]
3531335313pub fn svrev_s32(op: svint32_t) -> svint32_t {
3531435314 unsafe extern "unadjusted" {
35315- #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev .nxv4i32")]
35315+ #[cfg_attr(target_arch = "aarch64", link_name = "llvm.vector.reverse .nxv4i32")]
3531635316 fn _svrev_s32(op: svint32_t) -> svint32_t;
3531735317 }
3531835318 unsafe { _svrev_s32(op) }
@@ -35325,7 +35325,7 @@ pub fn svrev_s32(op: svint32_t) -> svint32_t {
3532535325#[cfg_attr(test, assert_instr(rev))]
3532635326pub fn svrev_s64(op: svint64_t) -> svint64_t {
3532735327 unsafe extern "unadjusted" {
35328- #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev .nxv2i64")]
35328+ #[cfg_attr(target_arch = "aarch64", link_name = "llvm.vector.reverse .nxv2i64")]
3532935329 fn _svrev_s64(op: svint64_t) -> svint64_t;
3533035330 }
3533135331 unsafe { _svrev_s64(op) }
0 commit comments