From: Sanjay Patel Date: Thu, 6 Jun 2019 13:18:20 +0000 (+0000) Subject: [InstCombine] add tests for loads of bitcasted vector pointer; NFC X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=3f8e6ddd08189b6966c399c90f7964e4bcf72a99;p=llvm [InstCombine] add tests for loads of bitcasted vector pointer; NFC git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@362703 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/test/Transforms/InstCombine/load-bitcast-vec.ll b/test/Transforms/InstCombine/load-bitcast-vec.ll new file mode 100644 index 00000000000..e6540ee7061 --- /dev/null +++ b/test/Transforms/InstCombine/load-bitcast-vec.ll @@ -0,0 +1,90 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -instcombine -S < %s | FileCheck %s + +define float @matching_scalar(<4 x float>* dereferenceable(16) %p) { +; CHECK-LABEL: @matching_scalar( +; CHECK-NEXT: [[BC:%.*]] = getelementptr inbounds <4 x float>, <4 x float>* [[P:%.*]], i64 0, i64 0 +; CHECK-NEXT: [[R:%.*]] = load float, float* [[BC]], align 16 +; CHECK-NEXT: ret float [[R]] +; + %bc = bitcast <4 x float>* %p to float* + %r = load float, float* %bc, align 16 + ret float %r +} + +define i32 @nonmatching_scalar(<4 x float>* dereferenceable(16) %p) { +; CHECK-LABEL: @nonmatching_scalar( +; CHECK-NEXT: [[BC:%.*]] = bitcast <4 x float>* [[P:%.*]] to i32* +; CHECK-NEXT: [[R:%.*]] = load i32, i32* [[BC]], align 16 +; CHECK-NEXT: ret i32 [[R]] +; + %bc = bitcast <4 x float>* %p to i32* + %r = load i32, i32* %bc, align 16 + ret i32 %r +} + +define i64 @larger_scalar(<4 x float>* dereferenceable(16) %p) { +; CHECK-LABEL: @larger_scalar( +; CHECK-NEXT: [[BC:%.*]] = bitcast <4 x float>* [[P:%.*]] to i64* +; CHECK-NEXT: [[R:%.*]] = load i64, i64* [[BC]], align 16 +; CHECK-NEXT: ret i64 [[R]] +; + %bc = bitcast <4 x float>* %p to i64* + %r = load i64, i64* %bc, align 16 + ret i64 %r +} + +define i8 @smaller_scalar(<4 x float>* dereferenceable(16) %p) { +; CHECK-LABEL: @smaller_scalar( +; CHECK-NEXT: [[BC:%.*]] = bitcast <4 x float>* [[P:%.*]] to i8* +; CHECK-NEXT: [[R:%.*]] = load i8, i8* [[BC]], align 16 +; CHECK-NEXT: ret i8 [[R]] +; + %bc = bitcast <4 x float>* %p to i8* + %r = load i8, i8* %bc, align 16 + ret i8 %r +} + +define i8 @smaller_scalar_less_aligned(<4 x float>* dereferenceable(16) %p) { +; CHECK-LABEL: @smaller_scalar_less_aligned( +; CHECK-NEXT: [[BC:%.*]] = bitcast <4 x float>* [[P:%.*]] to i8* +; CHECK-NEXT: [[R:%.*]] = load i8, i8* [[BC]], align 4 +; CHECK-NEXT: ret i8 [[R]] +; + %bc = bitcast <4 x float>* %p to i8* + %r = load i8, i8* %bc, align 4 + ret i8 %r +} + +define float @matching_scalar_small_deref(<4 x float>* dereferenceable(15) %p) { +; CHECK-LABEL: @matching_scalar_small_deref( +; CHECK-NEXT: [[BC:%.*]] = getelementptr inbounds <4 x float>, <4 x float>* [[P:%.*]], i64 0, i64 0 +; CHECK-NEXT: [[R:%.*]] = load float, float* [[BC]], align 16 +; CHECK-NEXT: ret float [[R]] +; + %bc = bitcast <4 x float>* %p to float* + %r = load float, float* %bc, align 16 + ret float %r +} + +define float @matching_scalar_volatile(<4 x float>* dereferenceable(16) %p) { +; CHECK-LABEL: @matching_scalar_volatile( +; CHECK-NEXT: [[BC:%.*]] = getelementptr inbounds <4 x float>, <4 x float>* [[P:%.*]], i64 0, i64 0 +; CHECK-NEXT: [[R:%.*]] = load volatile float, float* [[BC]], align 16 +; CHECK-NEXT: ret float [[R]] +; + %bc = bitcast <4 x float>* %p to float* + %r = load volatile float, float* %bc, align 16 + ret float %r +} + +define float @nonvector(double* dereferenceable(16) %p) { +; CHECK-LABEL: @nonvector( +; CHECK-NEXT: [[BC:%.*]] = bitcast double* [[P:%.*]] to float* +; CHECK-NEXT: [[R:%.*]] = load float, float* [[BC]], align 16 +; CHECK-NEXT: ret float [[R]] +; + %bc = bitcast double* %p to float* + %r = load float, float* %bc, align 16 + ret float %r +}