diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp index 7726ad309d8aff867a6de43d345efa7817107f91..0a83d99330deb2f4bde03055dcc94474b6f7d505 100644 --- a/lib/CodeGen/CGBuiltin.cpp +++ b/lib/CodeGen/CGBuiltin.cpp @@ -2026,6 +2026,7 @@ static Value *EmitAArch64ScalarBuiltinExpr(CodeGenFunction &CGF, // The followings are intrinsics with scalar results generated AcrossVec vectors case AArch64::BI__builtin_neon_vaddlv_s8: case AArch64::BI__builtin_neon_vaddlv_s16: + case AArch64::BI__builtin_neon_vaddlv_s32: case AArch64::BI__builtin_neon_vaddlvq_s8: case AArch64::BI__builtin_neon_vaddlvq_s16: case AArch64::BI__builtin_neon_vaddlvq_s32: @@ -2033,6 +2034,7 @@ static Value *EmitAArch64ScalarBuiltinExpr(CodeGenFunction &CGF, AcrossVec = true; ExtendEle = true; s = "saddlv"; break; case AArch64::BI__builtin_neon_vaddlv_u8: case AArch64::BI__builtin_neon_vaddlv_u16: + case AArch64::BI__builtin_neon_vaddlv_u32: case AArch64::BI__builtin_neon_vaddlvq_u8: case AArch64::BI__builtin_neon_vaddlvq_u16: case AArch64::BI__builtin_neon_vaddlvq_u32: @@ -2040,6 +2042,7 @@ static Value *EmitAArch64ScalarBuiltinExpr(CodeGenFunction &CGF, AcrossVec = true; ExtendEle = true; s = "uaddlv"; break; case AArch64::BI__builtin_neon_vmaxv_s8: case AArch64::BI__builtin_neon_vmaxv_s16: + case AArch64::BI__builtin_neon_vmaxv_s32: case AArch64::BI__builtin_neon_vmaxvq_s8: case AArch64::BI__builtin_neon_vmaxvq_s16: case AArch64::BI__builtin_neon_vmaxvq_s32: @@ -2047,6 +2050,7 @@ static Value *EmitAArch64ScalarBuiltinExpr(CodeGenFunction &CGF, AcrossVec = true; ExtendEle = false; s = "smaxv"; break; case AArch64::BI__builtin_neon_vmaxv_u8: case AArch64::BI__builtin_neon_vmaxv_u16: + case AArch64::BI__builtin_neon_vmaxv_u32: case AArch64::BI__builtin_neon_vmaxvq_u8: case AArch64::BI__builtin_neon_vmaxvq_u16: case AArch64::BI__builtin_neon_vmaxvq_u32: @@ -2054,6 +2058,7 @@ static Value *EmitAArch64ScalarBuiltinExpr(CodeGenFunction &CGF, AcrossVec = true; ExtendEle = false; s = "umaxv"; break; case AArch64::BI__builtin_neon_vminv_s8: case AArch64::BI__builtin_neon_vminv_s16: + case AArch64::BI__builtin_neon_vminv_s32: case AArch64::BI__builtin_neon_vminvq_s8: case AArch64::BI__builtin_neon_vminvq_s16: case AArch64::BI__builtin_neon_vminvq_s32: @@ -2061,6 +2066,7 @@ static Value *EmitAArch64ScalarBuiltinExpr(CodeGenFunction &CGF, AcrossVec = true; ExtendEle = false; s = "sminv"; break; case AArch64::BI__builtin_neon_vminv_u8: case AArch64::BI__builtin_neon_vminv_u16: + case AArch64::BI__builtin_neon_vminv_u32: case AArch64::BI__builtin_neon_vminvq_u8: case AArch64::BI__builtin_neon_vminvq_u16: case AArch64::BI__builtin_neon_vminvq_u32: @@ -2068,12 +2074,14 @@ static Value *EmitAArch64ScalarBuiltinExpr(CodeGenFunction &CGF, AcrossVec = true; ExtendEle = false; s = "uminv"; break; case AArch64::BI__builtin_neon_vaddv_s8: case AArch64::BI__builtin_neon_vaddv_s16: + case AArch64::BI__builtin_neon_vaddv_s32: case AArch64::BI__builtin_neon_vaddvq_s8: case AArch64::BI__builtin_neon_vaddvq_s16: case AArch64::BI__builtin_neon_vaddvq_s32: case AArch64::BI__builtin_neon_vaddvq_s64: case AArch64::BI__builtin_neon_vaddv_u8: case AArch64::BI__builtin_neon_vaddv_u16: + case AArch64::BI__builtin_neon_vaddv_u32: case AArch64::BI__builtin_neon_vaddvq_u8: case AArch64::BI__builtin_neon_vaddvq_u16: case AArch64::BI__builtin_neon_vaddvq_u32: diff --git a/test/CodeGen/aarch64-neon-intrinsics.c b/test/CodeGen/aarch64-neon-intrinsics.c index 6e9b7f10b3620f97a317808932f42e914e0088df..48bb008ff2031505af0fc8fcc35f9a2cbf25a1fe 100644 --- a/test/CodeGen/aarch64-neon-intrinsics.c +++ b/test/CodeGen/aarch64-neon-intrinsics.c @@ -11723,3 +11723,51 @@ float64x1_t test_vrsqrts_f64(float64x1_t a, float64x1_t b) { return vrsqrts_f64(a, b); // CHECK: frsqrts d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} } + +int32_t test_vminv_s32(int32x2_t a) { + // CHECK-LABEL: test_vminv_s32 + return vminv_s32(a); + // CHECK: sminp {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s +} + +uint32_t test_vminv_u32(uint32x2_t a) { + // CHECK-LABEL: test_vminv_u32 + return vminv_u32(a); + // CHECK: uminp {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s +} + +int32_t test_vmaxv_s32(int32x2_t a) { + // CHECK-LABEL: test_vmaxv_s32 + return vmaxv_s32(a); + // CHECK: smaxp {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s +} + +uint32_t test_vmaxv_u32(uint32x2_t a) { + // CHECK-LABEL: test_vmaxv_u32 + return vmaxv_u32(a); + // CHECK: umaxp {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s +} + +int32_t test_vaddv_s32(int32x2_t a) { + // CHECK-LABEL: test_vaddv_s32 + return vaddv_s32(a); + // CHECK: addp {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s +} + +uint32_t test_vaddv_u32(uint32x2_t a) { + // CHECK-LABEL: test_vaddv_u32 + return vaddv_u32(a); + // CHECK: addp {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s +} + +int64_t test_vaddlv_s32(int32x2_t a) { + // CHECK-LABEL: test_vaddlv_s32 + return vaddlv_s32(a); + // CHECK: saddlp {{v[0-9]+}}.1d, {{v[0-9]+}}.2s +} + +uint64_t test_vaddlv_u32(uint32x2_t a) { + // CHECK-LABEL: test_vaddlv_u32 + return vaddlv_u32(a); + // CHECK: uaddlp {{v[0-9]+}}.1d, {{v[0-9]+}}.2s +}