From 1d7abba8db3f6a6e59b1188c6cb420a1a055ef88 Mon Sep 17 00:00:00 2001 From: David Blaikie Date: Fri, 27 Feb 2015 21:19:58 +0000 Subject: [PATCH] Update Clang tests to handle explicitly typed load changes in LLVM. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@230795 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CXX/expr/p10-0x.cpp | 22 +- test/CXX/special/class.copy/p15-inclass.cpp | 4 +- .../2010-07-14-overconservative-align.c | 2 +- test/CodeGen/24-bit.c | 2 +- test/CodeGen/aarch64-inline-asm.c | 8 +- test/CodeGen/aarch64-varargs.c | 50 ++-- test/CodeGen/address-space-field1.c | 12 +- test/CodeGen/address-space.c | 10 +- test/CodeGen/alignment.c | 2 +- test/CodeGen/annotations-builtin.c | 2 +- test/CodeGen/arm-abi-vector.c | 18 +- test/CodeGen/arm-arguments.c | 6 +- test/CodeGen/arm-atomics-m.c | 2 +- test/CodeGen/arm-atomics.c | 4 +- test/CodeGen/arm-clear.c | 4 +- test/CodeGen/arm-vector-align.c | 2 +- test/CodeGen/arm64-abi-vector.c | 18 +- test/CodeGen/arm64-arguments.c | 58 ++-- test/CodeGen/asm-inout.c | 2 +- test/CodeGen/asm-reg-var-local.c | 8 +- test/CodeGen/atomic-arm64.c | 8 +- test/CodeGen/atomic-ops.c | 54 ++-- test/CodeGen/atomic_ops.c | 2 +- test/CodeGen/atomics-inlining.c | 28 +- test/CodeGen/attributes.c | 2 +- test/CodeGen/avx-builtins.c | 6 +- test/CodeGen/avx512f-builtins.c | 4 +- test/CodeGen/big-atomic-ops.c | 10 +- test/CodeGen/block-byref-aggr.c | 6 +- test/CodeGen/blocks-seq.c | 4 +- test/CodeGen/blocks.c | 2 +- test/CodeGen/bool_test.c | 2 +- test/CodeGen/builtin-assume.c | 4 +- test/CodeGen/c-strings.c | 2 +- test/CodeGen/c11atomics-ios.c | 48 +-- test/CodeGen/c11atomics.c | 40 +-- test/CodeGen/capture-complex-expr-in-block.c | 4 +- test/CodeGen/captured-statements-nested.c | 44 +-- test/CodeGen/captured-statements.c | 12 +- test/CodeGen/complex-convert.c | 256 ++++++++-------- test/CodeGen/compound-literal.c | 12 +- test/CodeGen/exceptions-seh-finally.c | 16 +- test/CodeGen/exceptions-seh-leave.c | 4 +- test/CodeGen/exceptions-seh.c | 16 +- test/CodeGen/exprs.c | 4 +- test/CodeGen/ext-vector-member-alignment.c | 8 +- test/CodeGen/extern-inline.c | 6 +- test/CodeGen/mips-varargs.c | 44 +-- test/CodeGen/ms-anonymous-struct.c | 36 +-- test/CodeGen/ms-inline-asm.c | 2 +- test/CodeGen/ms-inline-asm.cpp | 2 +- test/CodeGen/ms-intrinsics.c | 2 +- test/CodeGen/named_reg_global.c | 2 +- test/CodeGen/object-size.c | 2 +- test/CodeGen/packed-arrays.c | 30 +- test/CodeGen/packed-nest-unpacked.c | 4 +- test/CodeGen/packed-structure.c | 14 +- test/CodeGen/ppc-varargs-struct.c | 22 +- test/CodeGen/ppc64-align-long-double.c | 2 +- test/CodeGen/ppc64-align-struct.c | 12 +- test/CodeGen/ppc64-complex-parms.c | 32 +- test/CodeGen/ppc64-struct-onefloat.c | 8 +- test/CodeGen/ppc64-varargs-complex.c | 24 +- test/CodeGen/ppc64le-aggregates.c | 62 ++-- test/CodeGen/ppc64le-varargs-complex.c | 24 +- test/CodeGen/pr12251.c | 2 +- test/CodeGen/redefine_extname.c | 2 +- test/CodeGen/sparcv9-abi.c | 16 +- test/CodeGen/sse-builtins.c | 20 +- test/CodeGen/systemz-inline-asm.c | 4 +- test/CodeGen/tbaa.cpp | 8 +- test/CodeGen/trapv.c | 12 +- test/CodeGen/unsigned-overflow.c | 32 +- test/CodeGen/unsigned-promotion.c | 48 +-- test/CodeGen/variadic-gpfp-x86.c | 2 +- test/CodeGen/vla.c | 34 +-- test/CodeGen/volatile-1.c | 44 +-- test/CodeGen/volatile-2.c | 8 +- test/CodeGen/volatile-complex.c | 32 +- test/CodeGen/volatile.c | 116 ++++---- test/CodeGen/x86-atomic-long_double.c | 160 +++++----- test/CodeGen/x86_64-arguments.c | 10 +- test/CodeGen/xcore-abi.c | 32 +- test/CodeGenCUDA/address-spaces.cu | 12 +- test/CodeGenCXX/2009-12-23-MissingSext.cpp | 8 +- .../CodeGenCXX/align-avx-complete-objects.cpp | 24 +- .../anonymous-union-member-initializer.cpp | 10 +- .../CodeGenCXX/apple-kext-indirect-call-2.cpp | 8 +- .../apple-kext-indirect-virtual-dtor-call.cpp | 4 +- test/CodeGenCXX/arm-vaarg.cpp | 6 +- test/CodeGenCXX/arm.cpp | 34 +-- test/CodeGenCXX/arm64-constructor-return.cpp | 2 +- test/CodeGenCXX/arm64-empty-struct.cpp | 4 +- test/CodeGenCXX/bitfield.cpp | 80 ++--- test/CodeGenCXX/blocks-cxx11.cpp | 10 +- test/CodeGenCXX/blocks.cpp | 6 +- test/CodeGenCXX/captured-statements.cpp | 4 +- test/CodeGenCXX/catch-undef-behavior.cpp | 10 +- test/CodeGenCXX/compound-literals.cpp | 6 +- test/CodeGenCXX/condition.cpp | 8 +- test/CodeGenCXX/conditional-gnu-ext.cpp | 4 +- test/CodeGenCXX/const-init-cxx11.cpp | 2 +- .../constructor-destructor-return-this.cpp | 2 +- test/CodeGenCXX/constructor-init.cpp | 4 +- test/CodeGenCXX/constructors.cpp | 2 +- .../CodeGenCXX/copy-constructor-synthesis.cpp | 8 +- .../cxx0x-initializer-stdinitializerlist.cpp | 2 +- .../cxx11-initializer-array-new.cpp | 4 +- .../cxx11-thread-local-reference.cpp | 2 +- test/CodeGenCXX/cxx11-thread-local.cpp | 18 +- test/CodeGenCXX/cxx1y-init-captures.cpp | 20 +- .../cxx1y-initializer-aggregate.cpp | 4 +- test/CodeGenCXX/deferred-global-init.cpp | 2 +- test/CodeGenCXX/delete-two-arg.cpp | 4 +- test/CodeGenCXX/delete.cpp | 10 +- test/CodeGenCXX/derived-to-base-conv.cpp | 2 +- ...ived-to-virtual-base-class-calls-final.cpp | 2 +- test/CodeGenCXX/destructors.cpp | 4 +- test/CodeGenCXX/eh.cpp | 20 +- test/CodeGenCXX/exceptions.cpp | 28 +- test/CodeGenCXX/global-init.cpp | 6 +- test/CodeGenCXX/homogeneous-aggregates.cpp | 2 +- test/CodeGenCXX/lambda-expressions.cpp | 22 +- test/CodeGenCXX/lvalue-bitcasts.cpp | 90 +++--- test/CodeGenCXX/m64-ptr.cpp | 2 +- test/CodeGenCXX/mangle.cpp | 2 +- test/CodeGenCXX/member-expressions.cpp | 2 +- test/CodeGenCXX/member-function-pointers.cpp | 4 +- test/CodeGenCXX/microsoft-abi-byval-sret.cpp | 6 +- .../CodeGenCXX/microsoft-abi-byval-thunks.cpp | 4 +- .../CodeGenCXX/microsoft-abi-dynamic-cast.cpp | 24 +- test/CodeGenCXX/microsoft-abi-exceptions.cpp | 4 +- .../microsoft-abi-member-pointers.cpp | 50 ++-- ...ft-abi-multiple-nonvirtual-inheritance.cpp | 26 +- .../microsoft-abi-sret-and-byval.cpp | 2 +- .../microsoft-abi-static-initializers.cpp | 8 +- test/CodeGenCXX/microsoft-abi-structors.cpp | 32 +- test/CodeGenCXX/microsoft-abi-thunks.cpp | 4 +- test/CodeGenCXX/microsoft-abi-typeid.cpp | 4 +- ...soft-abi-virtual-inheritance-vtordisps.cpp | 16 +- .../microsoft-abi-virtual-inheritance.cpp | 50 ++-- .../microsoft-abi-virtual-member-pointers.cpp | 20 +- test/CodeGenCXX/ms-inline-asm-return.cpp | 6 +- test/CodeGenCXX/new-overflow.cpp | 6 +- test/CodeGenCXX/new.cpp | 12 +- test/CodeGenCXX/noexcept.cpp | 2 +- test/CodeGenCXX/partial-destruction.cpp | 2 +- test/CodeGenCXX/pod-member-memcpys.cpp | 4 +- test/CodeGenCXX/pointers-to-data-members.cpp | 4 +- test/CodeGenCXX/pr12251.cpp | 32 +- test/CodeGenCXX/pr20897.cpp | 12 +- test/CodeGenCXX/reference-cast.cpp | 6 +- test/CodeGenCXX/references.cpp | 12 +- test/CodeGenCXX/rvalue-references.cpp | 2 +- test/CodeGenCXX/static-data-member.cpp | 2 +- test/CodeGenCXX/static-init-pnacl.cpp | 2 +- test/CodeGenCXX/static-init.cpp | 8 +- .../static-local-in-local-class.cpp | 12 +- test/CodeGenCXX/temporaries.cpp | 6 +- test/CodeGenCXX/throw-expressions.cpp | 4 +- test/CodeGenCXX/thunks.cpp | 2 +- test/CodeGenCXX/uncopyable-args.cpp | 10 +- test/CodeGenCXX/unknown-anytype.cpp | 8 +- test/CodeGenCXX/vararg-non-pod-ms-compat.cpp | 2 +- test/CodeGenCXX/varargs.cpp | 2 +- test/CodeGenCXX/virtual-base-cast.cpp | 22 +- test/CodeGenCXX/vla-lambda-capturing.cpp | 38 +-- test/CodeGenCXX/vla.cpp | 12 +- test/CodeGenCXX/volatile-1.cpp | 52 ++-- test/CodeGenCXX/volatile.cpp | 4 +- .../CodeGenCXX/windows-itanium-exceptions.cpp | 2 +- test/CodeGenObjC/arc-blocks.m | 112 +++---- test/CodeGenObjC/arc-foreach.m | 24 +- test/CodeGenObjC/arc-literals.m | 26 +- .../arc-loadweakretained-release.m | 8 +- test/CodeGenObjC/arc-precise-lifetime.m | 38 +-- test/CodeGenObjC/arc-property.m | 34 +-- test/CodeGenObjC/arc-related-result-type.m | 2 +- test/CodeGenObjC/arc-ternary-op.m | 26 +- test/CodeGenObjC/arc-unopt.m | 6 +- test/CodeGenObjC/arc-unoptimized-byref-var.m | 2 +- test/CodeGenObjC/arc-weak-property.m | 14 +- test/CodeGenObjC/arc.m | 278 +++++++++--------- test/CodeGenObjC/arm64-int32-ivar.m | 2 +- test/CodeGenObjC/atomic-aggregate-property.m | 2 +- test/CodeGenObjC/autorelease.m | 4 +- test/CodeGenObjC/bitfield-access.m | 4 +- test/CodeGenObjC/block-6.m | 2 +- test/CodeGenObjC/blocks.m | 16 +- test/CodeGenObjC/boxing.m | 22 +- test/CodeGenObjC/category-super-class-meth.m | 4 +- .../debug-info-block-captured-self.m | 2 +- test/CodeGenObjC/exceptions.m | 4 +- test/CodeGenObjC/gc.m | 2 +- test/CodeGenObjC/id-isa-codegen.m | 4 +- .../CodeGenObjC/ivar-base-as-invariant-load.m | 6 +- test/CodeGenObjC/ivar-invariant.m | 8 +- test/CodeGenObjC/messages-2.m | 4 +- test/CodeGenObjC/ns_consume_null_check.m | 14 +- test/CodeGenObjC/objc-asm-attribute-test.m | 2 +- .../objc-container-subscripting-1.m | 16 +- test/CodeGenObjC/optimize-ivar-offset-load.m | 12 +- test/CodeGenObjC/property-array-type.m | 2 +- test/CodeGenObjC/property-type-mismatch.m | 2 +- test/CodeGenObjC/property.m | 26 +- test/CodeGenObjC/selector-ref-invariance.m | 2 +- test/CodeGenObjC/super-message-fragileabi.m | 2 +- test/CodeGenObjC/synchronized.m | 4 +- test/CodeGenObjC/tentative-cfconstantstring.m | 4 +- test/CodeGenObjCXX/arc-cxx11-member-init.mm | 4 +- test/CodeGenObjCXX/arc-exceptions.mm | 2 +- test/CodeGenObjCXX/arc-move.mm | 18 +- test/CodeGenObjCXX/arc-new-delete.mm | 6 +- test/CodeGenObjCXX/arc-pseudo-destructors.mm | 8 +- test/CodeGenObjCXX/arc-references.mm | 2 +- .../arc-special-member-functions.mm | 4 +- test/CodeGenObjCXX/arc.mm | 22 +- test/CodeGenObjCXX/exceptions-legacy.mm | 8 +- test/CodeGenObjCXX/gc.mm | 4 +- test/CodeGenObjCXX/lambda-expressions.mm | 2 +- test/CodeGenObjCXX/lvalue-reference-getter.mm | 4 +- test/CodeGenObjCXX/message-reference.mm | 2 +- test/CodeGenObjCXX/message.mm | 2 +- test/CodeGenObjCXX/property-lvalue-capture.mm | 8 +- .../property-object-reference-1.mm | 2 +- .../property-object-reference-2.mm | 8 +- .../property-object-reference.mm | 4 +- test/CodeGenObjCXX/property-reference.mm | 10 +- test/Modules/templates.mm | 4 +- test/OpenMP/atomic_read_codegen.c | 104 +++---- test/OpenMP/atomic_write_codegen.c | 162 +++++----- test/OpenMP/for_codegen.cpp | 48 +-- test/OpenMP/parallel_codegen.cpp | 28 +- test/OpenMP/parallel_firstprivate_codegen.cpp | 44 +-- test/OpenMP/parallel_if_codegen.cpp | 6 +- test/OpenMP/parallel_num_threads_codegen.cpp | 2 +- test/OpenMP/parallel_private_codegen.cpp | 12 +- test/OpenMP/simd_codegen.cpp | 84 +++--- test/OpenMP/threadprivate_codegen.cpp | 178 +++++------ test/SemaCXX/linkage.cpp | 2 +- test/SemaObjC/debugger-support.m | 4 +- 241 files changed, 2167 insertions(+), 2167 deletions(-) diff --git a/test/CXX/expr/p10-0x.cpp b/test/CXX/expr/p10-0x.cpp index 564df8843a..a42986c85f 100644 --- a/test/CXX/expr/p10-0x.cpp +++ b/test/CXX/expr/p10-0x.cpp @@ -10,26 +10,26 @@ volatile int& refcall(); // CHECK: define void @_Z2f1PViPV1S void f1(volatile int *x, volatile S* s) { // We should perform the load in these cases. - // CHECK: load volatile i32* + // CHECK: load volatile i32, i32* (*x); - // CHECK: load volatile i32* + // CHECK: load volatile i32, i32* __extension__ g1; - // CHECK: load volatile i32* + // CHECK: load volatile i32, i32* s->a; - // CHECK: load volatile i32* + // CHECK: load volatile i32, i32* g2.a; - // CHECK: load volatile i32* + // CHECK: load volatile i32, i32* s->*(&S::a); - // CHECK: load volatile i32* - // CHECK: load volatile i32* + // CHECK: load volatile i32, i32* + // CHECK: load volatile i32, i32* x[0], 1 ? x[0] : *x; - // CHECK: load volatile i32* - // CHECK: load volatile i32* - // CHECK: load volatile i32* + // CHECK: load volatile i32, i32* + // CHECK: load volatile i32, i32* + // CHECK: load volatile i32, i32* *x ?: *x; - // CHECK: load volatile i32* + // CHECK: load volatile i32, i32* ({ *x; }); // CHECK-NOT: load volatile diff --git a/test/CXX/special/class.copy/p15-inclass.cpp b/test/CXX/special/class.copy/p15-inclass.cpp index 30872cc309..7e716fd6c9 100644 --- a/test/CXX/special/class.copy/p15-inclass.cpp +++ b/test/CXX/special/class.copy/p15-inclass.cpp @@ -24,7 +24,7 @@ namespace PR11418 { // CHECK-NOT: 17 // CHECK: call void @_ZN7PR114186NonPODC1ERKS0_ // CHECK-NOT: 17 - // CHECK: load i32* + // CHECK: load i32, i32* // CHECK-NOT: 17 // CHECK: store i32 // CHECK-NOT: 17 @@ -34,7 +34,7 @@ namespace PR11418 { // CHECK-NOT: 17 // CHECK: call void @_ZN7PR114186NonPODC1EOS0_ // CHECK-NOT: 17 - // CHECK: load i32* + // CHECK: load i32, i32* // CHECK-NOT: 17 // CHECK: store i32 // CHECK-NOT: 17 diff --git a/test/CodeGen/2010-07-14-overconservative-align.c b/test/CodeGen/2010-07-14-overconservative-align.c index 5c8c056451..90e694d47b 100644 --- a/test/CodeGen/2010-07-14-overconservative-align.c +++ b/test/CodeGen/2010-07-14-overconservative-align.c @@ -9,6 +9,6 @@ struct s { void func (struct s *s) { - // CHECK: load %struct.s**{{.*}}align 8 + // CHECK: load %struct.s*, %struct.s**{{.*}}align 8 s->word = 0; } diff --git a/test/CodeGen/24-bit.c b/test/CodeGen/24-bit.c index 9dd0157fd3..ad3076a30a 100644 --- a/test/CodeGen/24-bit.c +++ b/test/CodeGen/24-bit.c @@ -9,6 +9,6 @@ static union ibtt2 void callee_ibt0f(union ibtt2 ibtp5); void test(void) { -// CHECK: = load i32* +// CHECK: = load i32, i32* callee_ibt0f(ibt15); } diff --git a/test/CodeGen/aarch64-inline-asm.c b/test/CodeGen/aarch64-inline-asm.c index c7ce3758fe..a1078f1bab 100644 --- a/test/CodeGen/aarch64-inline-asm.c +++ b/test/CodeGen/aarch64-inline-asm.c @@ -8,11 +8,11 @@ long var; void test_generic_constraints(int var32, long var64) { asm("add %0, %1, %1" : "=r"(var32) : "0"(var32)); -// CHECK: [[R32_ARG:%[a-zA-Z0-9]+]] = load i32* +// CHECK: [[R32_ARG:%[a-zA-Z0-9]+]] = load i32, i32* // CHECK: call i32 asm "add $0, $1, $1", "=r,0"(i32 [[R32_ARG]]) asm("add %0, %1, %1" : "=r"(var64) : "0"(var64)); -// CHECK: [[R32_ARG:%[a-zA-Z0-9]+]] = load i64* +// CHECK: [[R32_ARG:%[a-zA-Z0-9]+]] = load i64, i64* // CHECK: call i64 asm "add $0, $1, $1", "=r,0"(i64 [[R32_ARG]]) asm("ldr %0, %1" : "=r"(var32) : "m"(var)); @@ -25,11 +25,11 @@ float f; double d; void test_constraint_w() { asm("fadd %s0, %s1, %s1" : "=w"(f) : "w"(f)); -// CHECK: [[FLT_ARG:%[a-zA-Z_0-9]+]] = load float* @f +// CHECK: [[FLT_ARG:%[a-zA-Z_0-9]+]] = load float, float* @f // CHECK: call float asm "fadd ${0:s}, ${1:s}, ${1:s}", "=w,w"(float [[FLT_ARG]]) asm("fadd %d0, %d1, %d1" : "=w"(d) : "w"(d)); -// CHECK: [[DBL_ARG:%[a-zA-Z_0-9]+]] = load double* @d +// CHECK: [[DBL_ARG:%[a-zA-Z_0-9]+]] = load double, double* @d // CHECK: call double asm "fadd ${0:d}, ${1:d}, ${1:d}", "=w,w"(double [[DBL_ARG]]) } diff --git a/test/CodeGen/aarch64-varargs.c b/test/CodeGen/aarch64-varargs.c index ca0bbca01e..b1e74e44fc 100644 --- a/test/CodeGen/aarch64-varargs.c +++ b/test/CodeGen/aarch64-varargs.c @@ -11,7 +11,7 @@ va_list the_list; int simple_int(void) { // CHECK-LABEL: define i32 @simple_int return va_arg(the_list, int); -// CHECK: [[GR_OFFS:%[a-z_0-9]+]] = load i32* getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 3) +// CHECK: [[GR_OFFS:%[a-z_0-9]+]] = load i32, i32* getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 3) // CHECK: [[EARLY_ONSTACK:%[a-z_0-9]+]] = icmp sge i32 [[GR_OFFS]], 0 // CHECK: br i1 [[EARLY_ONSTACK]], label %[[VAARG_ON_STACK:[a-z_.0-9]+]], label %[[VAARG_MAYBE_REG:[a-z_.0-9]+]] @@ -22,7 +22,7 @@ int simple_int(void) { // CHECK: br i1 [[INREG]], label %[[VAARG_IN_REG:[a-z_.0-9]+]], label %[[VAARG_ON_STACK]] // CHECK: [[VAARG_IN_REG]] -// CHECK: [[REG_TOP:%[a-z_0-9]+]] = load i8** getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 1) +// CHECK: [[REG_TOP:%[a-z_0-9]+]] = load i8*, i8** getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 1) // CHECK: [[REG_ADDR:%[a-z_0-9]+]] = getelementptr i8, i8* [[REG_TOP]], i32 [[GR_OFFS]] // CHECK-BE: [[REG_ADDR_VAL:%[0-9]+]] = ptrtoint i8* [[REG_ADDR]] to i64 // CHECK-BE: [[REG_ADDR_VAL_ALIGNED:%[a-z_0-9]*]] = add i64 [[REG_ADDR_VAL]], 4 @@ -31,7 +31,7 @@ int simple_int(void) { // CHECK: br label %[[VAARG_END:[a-z._0-9]+]] // CHECK: [[VAARG_ON_STACK]] -// CHECK: [[STACK:%[a-z_0-9]+]] = load i8** getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 0) +// CHECK: [[STACK:%[a-z_0-9]+]] = load i8*, i8** getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 0) // CHECK: [[NEW_STACK:%[a-z_0-9]+]] = getelementptr i8, i8* [[STACK]], i32 8 // CHECK: store i8* [[NEW_STACK]], i8** getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 0) // CHECK-BE: [[STACK_VAL:%[0-9]+]] = ptrtoint i8* [[STACK]] to i64 @@ -42,14 +42,14 @@ int simple_int(void) { // CHECK: [[VAARG_END]] // CHECK: [[ADDR:%[a-z._0-9]+]] = phi i32* [ [[FROMREG_ADDR]], %[[VAARG_IN_REG]] ], [ [[FROMSTACK_ADDR]], %[[VAARG_ON_STACK]] ] -// CHECK: [[RESULT:%[a-z_0-9]+]] = load i32* [[ADDR]] +// CHECK: [[RESULT:%[a-z_0-9]+]] = load i32, i32* [[ADDR]] // CHECK: ret i32 [[RESULT]] } __int128 aligned_int(void) { // CHECK-LABEL: define i128 @aligned_int return va_arg(the_list, __int128); -// CHECK: [[GR_OFFS:%[a-z_0-9]+]] = load i32* getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 3) +// CHECK: [[GR_OFFS:%[a-z_0-9]+]] = load i32, i32* getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 3) // CHECK: [[EARLY_ONSTACK:%[a-z_0-9]+]] = icmp sge i32 [[GR_OFFS]], 0 // CHECK: br i1 [[EARLY_ONSTACK]], label %[[VAARG_ON_STACK:[a-z_.0-9]+]], label %[[VAARG_MAYBE_REG:[a-z_.0-9]+]] @@ -62,13 +62,13 @@ __int128 aligned_int(void) { // CHECK: br i1 [[INREG]], label %[[VAARG_IN_REG:[a-z_.0-9]+]], label %[[VAARG_ON_STACK]] // CHECK: [[VAARG_IN_REG]] -// CHECK: [[REG_TOP:%[a-z_0-9]+]] = load i8** getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 1) +// CHECK: [[REG_TOP:%[a-z_0-9]+]] = load i8*, i8** getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 1) // CHECK: [[REG_ADDR:%[a-z_0-9]+]] = getelementptr i8, i8* [[REG_TOP]], i32 [[ALIGNED_REGOFFS]] // CHECK: [[FROMREG_ADDR:%[a-z_0-9]+]] = bitcast i8* [[REG_ADDR]] to i128* // CHECK: br label %[[VAARG_END:[a-z._0-9]+]] // CHECK: [[VAARG_ON_STACK]] -// CHECK: [[STACK:%[a-z_0-9]+]] = load i8** getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 0) +// CHECK: [[STACK:%[a-z_0-9]+]] = load i8*, i8** getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 0) // CHECK: [[STACKINT:%[a-z_0-9]+]] = ptrtoint i8* [[STACK]] to i64 // CHECK: [[ALIGN_STACK:%[a-z_0-9]+]] = add i64 [[STACKINT]], 15 // CHECK: [[ALIGNED_STACK_INT:%[a-z_0-9]+]] = and i64 [[ALIGN_STACK]], -16 @@ -80,7 +80,7 @@ __int128 aligned_int(void) { // CHECK: [[VAARG_END]] // CHECK: [[ADDR:%[a-z._0-9]+]] = phi i128* [ [[FROMREG_ADDR]], %[[VAARG_IN_REG]] ], [ [[FROMSTACK_ADDR]], %[[VAARG_ON_STACK]] ] -// CHECK: [[RESULT:%[a-z_0-9]+]] = load i128* [[ADDR]] +// CHECK: [[RESULT:%[a-z_0-9]+]] = load i128, i128* [[ADDR]] // CHECK: ret i128 [[RESULT]] } @@ -91,7 +91,7 @@ struct bigstruct { struct bigstruct simple_indirect(void) { // CHECK-LABEL: define void @simple_indirect return va_arg(the_list, struct bigstruct); -// CHECK: [[GR_OFFS:%[a-z_0-9]+]] = load i32* getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 3) +// CHECK: [[GR_OFFS:%[a-z_0-9]+]] = load i32, i32* getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 3) // CHECK: [[EARLY_ONSTACK:%[a-z_0-9]+]] = icmp sge i32 [[GR_OFFS]], 0 // CHECK: br i1 [[EARLY_ONSTACK]], label %[[VAARG_ON_STACK:[a-z_.0-9]+]], label %[[VAARG_MAYBE_REG:[a-z_.0-9]+]] @@ -103,13 +103,13 @@ struct bigstruct simple_indirect(void) { // CHECK: br i1 [[INREG]], label %[[VAARG_IN_REG:[a-z_.0-9]+]], label %[[VAARG_ON_STACK]] // CHECK: [[VAARG_IN_REG]] -// CHECK: [[REG_TOP:%[a-z_0-9]+]] = load i8** getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 1) +// CHECK: [[REG_TOP:%[a-z_0-9]+]] = load i8*, i8** getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 1) // CHECK: [[REG_ADDR:%[a-z_0-9]+]] = getelementptr i8, i8* [[REG_TOP]], i32 [[GR_OFFS]] // CHECK: [[FROMREG_ADDR:%[a-z_0-9]+]] = bitcast i8* [[REG_ADDR]] to %struct.bigstruct** // CHECK: br label %[[VAARG_END:[a-z._0-9]+]] // CHECK: [[VAARG_ON_STACK]] -// CHECK: [[STACK:%[a-z_0-9]+]] = load i8** getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 0) +// CHECK: [[STACK:%[a-z_0-9]+]] = load i8*, i8** getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 0) // CHECK-NOT: and i64 // CHECK: [[NEW_STACK:%[a-z_0-9]+]] = getelementptr i8, i8* [[STACK]], i32 8 // CHECK: store i8* [[NEW_STACK]], i8** getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 0) @@ -118,7 +118,7 @@ struct bigstruct simple_indirect(void) { // CHECK: [[VAARG_END]] // CHECK: [[ADDR:%[a-z._0-9]+]] = phi %struct.bigstruct** [ [[FROMREG_ADDR]], %[[VAARG_IN_REG]] ], [ [[FROMSTACK_ADDR]], %[[VAARG_ON_STACK]] ] -// CHECK: load %struct.bigstruct** [[ADDR]] +// CHECK: load %struct.bigstruct*, %struct.bigstruct** [[ADDR]] } struct aligned_bigstruct { @@ -129,7 +129,7 @@ struct aligned_bigstruct { struct aligned_bigstruct simple_aligned_indirect(void) { // CHECK-LABEL: define void @simple_aligned_indirect return va_arg(the_list, struct aligned_bigstruct); -// CHECK: [[GR_OFFS:%[a-z_0-9]+]] = load i32* getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 3) +// CHECK: [[GR_OFFS:%[a-z_0-9]+]] = load i32, i32* getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 3) // CHECK: [[EARLY_ONSTACK:%[a-z_0-9]+]] = icmp sge i32 [[GR_OFFS]], 0 // CHECK: br i1 [[EARLY_ONSTACK]], label %[[VAARG_ON_STACK:[a-z_.0-9]+]], label %[[VAARG_MAYBE_REG:[a-z_.0-9]+]] @@ -140,13 +140,13 @@ struct aligned_bigstruct simple_aligned_indirect(void) { // CHECK: br i1 [[INREG]], label %[[VAARG_IN_REG:[a-z_.0-9]+]], label %[[VAARG_ON_STACK]] // CHECK: [[VAARG_IN_REG]] -// CHECK: [[REG_TOP:%[a-z_0-9]+]] = load i8** getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 1) +// CHECK: [[REG_TOP:%[a-z_0-9]+]] = load i8*, i8** getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 1) // CHECK: [[REG_ADDR:%[a-z_0-9]+]] = getelementptr i8, i8* [[REG_TOP]], i32 [[GR_OFFS]] // CHECK: [[FROMREG_ADDR:%[a-z_0-9]+]] = bitcast i8* [[REG_ADDR]] to %struct.aligned_bigstruct** // CHECK: br label %[[VAARG_END:[a-z._0-9]+]] // CHECK: [[VAARG_ON_STACK]] -// CHECK: [[STACK:%[a-z_0-9]+]] = load i8** getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 0) +// CHECK: [[STACK:%[a-z_0-9]+]] = load i8*, i8** getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 0) // CHECK: [[NEW_STACK:%[a-z_0-9]+]] = getelementptr i8, i8* [[STACK]], i32 8 // CHECK: store i8* [[NEW_STACK]], i8** getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 0) // CHECK: [[FROMSTACK_ADDR:%[a-z_0-9]+]] = bitcast i8* [[STACK]] to %struct.aligned_bigstruct** @@ -154,13 +154,13 @@ struct aligned_bigstruct simple_aligned_indirect(void) { // CHECK: [[VAARG_END]] // CHECK: [[ADDR:%[a-z._0-9]+]] = phi %struct.aligned_bigstruct** [ [[FROMREG_ADDR]], %[[VAARG_IN_REG]] ], [ [[FROMSTACK_ADDR]], %[[VAARG_ON_STACK]] ] -// CHECK: load %struct.aligned_bigstruct** [[ADDR]] +// CHECK: load %struct.aligned_bigstruct*, %struct.aligned_bigstruct** [[ADDR]] } double simple_double(void) { // CHECK-LABEL: define double @simple_double return va_arg(the_list, double); -// CHECK: [[VR_OFFS:%[a-z_0-9]+]] = load i32* getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 4) +// CHECK: [[VR_OFFS:%[a-z_0-9]+]] = load i32, i32* getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 4) // CHECK: [[EARLY_ONSTACK:%[a-z_0-9]+]] = icmp sge i32 [[VR_OFFS]], 0 // CHECK: br i1 [[EARLY_ONSTACK]], label %[[VAARG_ON_STACK:[a-z_.0-9]+]], label %[[VAARG_MAYBE_REG]] @@ -171,7 +171,7 @@ double simple_double(void) { // CHECK: br i1 [[INREG]], label %[[VAARG_IN_REG:[a-z_.0-9]+]], label %[[VAARG_ON_STACK]] // CHECK: [[VAARG_IN_REG]] -// CHECK: [[REG_TOP:%[a-z_0-9]+]] = load i8** getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 2) +// CHECK: [[REG_TOP:%[a-z_0-9]+]] = load i8*, i8** getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 2) // CHECK: [[REG_ADDR:%[a-z_0-9]+]] = getelementptr i8, i8* [[REG_TOP]], i32 [[VR_OFFS]] // CHECK-BE: [[REG_ADDR_VAL:%[0-9]+]] = ptrtoint i8* [[REG_ADDR]] to i64 // CHECK-BE: [[REG_ADDR_VAL_ALIGNED:%[a-z_0-9]*]] = add i64 [[REG_ADDR_VAL]], 8 @@ -180,7 +180,7 @@ double simple_double(void) { // CHECK: br label %[[VAARG_END:[a-z._0-9]+]] // CHECK: [[VAARG_ON_STACK]] -// CHECK: [[STACK:%[a-z_0-9]+]] = load i8** getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 0) +// CHECK: [[STACK:%[a-z_0-9]+]] = load i8*, i8** getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 0) // CHECK: [[NEW_STACK:%[a-z_0-9]+]] = getelementptr i8, i8* [[STACK]], i32 8 // CHECK: store i8* [[NEW_STACK]], i8** getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 0) // CHECK: [[FROMSTACK_ADDR:%[a-z_0-9]+]] = bitcast i8* [[STACK]] to double* @@ -188,7 +188,7 @@ double simple_double(void) { // CHECK: [[VAARG_END]] // CHECK: [[ADDR:%[a-z._0-9]+]] = phi double* [ [[FROMREG_ADDR]], %[[VAARG_IN_REG]] ], [ [[FROMSTACK_ADDR]], %[[VAARG_ON_STACK]] ] -// CHECK: [[RESULT:%[a-z_0-9]+]] = load double* [[ADDR]] +// CHECK: [[RESULT:%[a-z_0-9]+]] = load double, double* [[ADDR]] // CHECK: ret double [[RESULT]] } @@ -199,7 +199,7 @@ struct hfa { struct hfa simple_hfa(void) { // CHECK-LABEL: define %struct.hfa @simple_hfa return va_arg(the_list, struct hfa); -// CHECK: [[VR_OFFS:%[a-z_0-9]+]] = load i32* getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 4) +// CHECK: [[VR_OFFS:%[a-z_0-9]+]] = load i32, i32* getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 4) // CHECK: [[EARLY_ONSTACK:%[a-z_0-9]+]] = icmp sge i32 [[VR_OFFS]], 0 // CHECK: br i1 [[EARLY_ONSTACK]], label %[[VAARG_ON_STACK:[a-z_.0-9]+]], label %[[VAARG_MAYBE_REG:[a-z_.0-9]+]] @@ -210,25 +210,25 @@ struct hfa simple_hfa(void) { // CHECK: br i1 [[INREG]], label %[[VAARG_IN_REG:[a-z_.0-9]+]], label %[[VAARG_ON_STACK]] // CHECK: [[VAARG_IN_REG]] -// CHECK: [[REG_TOP:%[a-z_0-9]+]] = load i8** getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 2) +// CHECK: [[REG_TOP:%[a-z_0-9]+]] = load i8*, i8** getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 2) // CHECK: [[FIRST_REG:%[a-z_0-9]+]] = getelementptr i8, i8* [[REG_TOP]], i32 [[VR_OFFS]] // CHECK-LE: [[EL_ADDR:%[a-z_0-9]+]] = getelementptr i8, i8* [[FIRST_REG]], i32 0 // CHECK-BE: [[EL_ADDR:%[a-z_0-9]+]] = getelementptr i8, i8* [[FIRST_REG]], i32 12 // CHECK: [[EL_TYPED:%[a-z_0-9]+]] = bitcast i8* [[EL_ADDR]] to float* // CHECK: [[EL_TMPADDR:%[a-z_0-9]+]] = getelementptr inbounds [2 x float], [2 x float]* %[[TMP_HFA:[a-z_.0-9]+]], i32 0, i32 0 -// CHECK: [[EL:%[a-z_0-9]+]] = load float* [[EL_TYPED]] +// CHECK: [[EL:%[a-z_0-9]+]] = load float, float* [[EL_TYPED]] // CHECK: store float [[EL]], float* [[EL_TMPADDR]] // CHECK-LE: [[EL_ADDR:%[a-z_0-9]+]] = getelementptr i8, i8* [[FIRST_REG]], i32 16 // CHECK-BE: [[EL_ADDR:%[a-z_0-9]+]] = getelementptr i8, i8* [[FIRST_REG]], i32 28 // CHECK: [[EL_TYPED:%[a-z_0-9]+]] = bitcast i8* [[EL_ADDR]] to float* // CHECK: [[EL_TMPADDR:%[a-z_0-9]+]] = getelementptr inbounds [2 x float], [2 x float]* %[[TMP_HFA]], i32 0, i32 1 -// CHECK: [[EL:%[a-z_0-9]+]] = load float* [[EL_TYPED]] +// CHECK: [[EL:%[a-z_0-9]+]] = load float, float* [[EL_TYPED]] // CHECK: store float [[EL]], float* [[EL_TMPADDR]] // CHECK: [[FROMREG_ADDR:%[a-z_0-9]+]] = bitcast [2 x float]* %[[TMP_HFA]] to %struct.hfa* // CHECK: br label %[[VAARG_END:[a-z_.0-9]+]] // CHECK: [[VAARG_ON_STACK]] -// CHECK: [[STACK:%[a-z_0-9]+]] = load i8** getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 0) +// CHECK: [[STACK:%[a-z_0-9]+]] = load i8*, i8** getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 0) // CHECK: [[NEW_STACK:%[a-z_0-9]+]] = getelementptr i8, i8* [[STACK]], i32 8 // CHECK: store i8* [[NEW_STACK]], i8** getelementptr inbounds (%struct.__va_list* @the_list, i32 0, i32 0) // CHECK: [[FROMSTACK_ADDR:%[a-z_0-9]+]] = bitcast i8* [[STACK]] to %struct.hfa* diff --git a/test/CodeGen/address-space-field1.c b/test/CodeGen/address-space-field1.c index f2bf010e4d..109c69201c 100644 --- a/test/CodeGen/address-space-field1.c +++ b/test/CodeGen/address-space-field1.c @@ -5,16 +5,16 @@ // CHECK: [[p2addr:%.*]] = alloca %struct.S addrspace(2)* // CHECK: store %struct.S addrspace(1)* %p1, %struct.S addrspace(1)** [[p1addr]] // CHECK: store %struct.S addrspace(2)* %p2, %struct.S addrspace(2)** [[p2addr]] -// CHECK: [[t0:%.*]] = load %struct.S addrspace(2)** [[p2addr]], align 8 +// CHECK: [[t0:%.*]] = load %struct.S addrspace(2)*, %struct.S addrspace(2)** [[p2addr]], align 8 // CHECK: [[t1:%.*]] = getelementptr inbounds %struct.S, %struct.S addrspace(2)* [[t0]], i32 0, i32 1 -// CHECK: [[t2:%.*]] = load i32 addrspace(2)* [[t1]], align 4 -// CHECK: [[t3:%.*]] = load %struct.S addrspace(1)** [[p1addr]], align 8 +// CHECK: [[t2:%.*]] = load i32, i32 addrspace(2)* [[t1]], align 4 +// CHECK: [[t3:%.*]] = load %struct.S addrspace(1)*, %struct.S addrspace(1)** [[p1addr]], align 8 // CHECK: [[t4:%.*]] = getelementptr inbounds %struct.S, %struct.S addrspace(1)* [[t3]], i32 0, i32 0 // CHECK: store i32 [[t2]], i32 addrspace(1)* [[t4]], align 4 -// CHECK: [[t5:%.*]] = load %struct.S addrspace(2)** [[p2addr]], align 8 +// CHECK: [[t5:%.*]] = load %struct.S addrspace(2)*, %struct.S addrspace(2)** [[p2addr]], align 8 // CHECK: [[t6:%.*]] = getelementptr inbounds %struct.S, %struct.S addrspace(2)* [[t5]], i32 0, i32 0 -// CHECK: [[t7:%.*]] = load i32 addrspace(2)* [[t6]], align 4 -// CHECK: [[t8:%.*]] = load %struct.S addrspace(1)** [[p1addr]], align 8 +// CHECK: [[t7:%.*]] = load i32, i32 addrspace(2)* [[t6]], align 4 +// CHECK: [[t8:%.*]] = load %struct.S addrspace(1)*, %struct.S addrspace(1)** [[p1addr]], align 8 // CHECK: [[t9:%.*]] = getelementptr inbounds %struct.S, %struct.S addrspace(1)* [[t8]], i32 0, i32 1 // CHECK: store i32 [[t7]], i32 addrspace(1)* [[t9]], align 4 // CHECK: ret void diff --git a/test/CodeGen/address-space.c b/test/CodeGen/address-space.c index 110406ecf2..61deb26253 100644 --- a/test/CodeGen/address-space.c +++ b/test/CodeGen/address-space.c @@ -7,11 +7,11 @@ int foo __attribute__((address_space(1))); int ban[10] __attribute__((address_space(1))); // CHECK-LABEL: define i32 @test1() -// CHECK: load i32 addrspace(1)* @foo +// CHECK: load i32, i32 addrspace(1)* @foo int test1() { return foo; } // CHECK-LABEL: define i32 @test2(i32 %i) -// CHECK: load i32 addrspace(1)* +// CHECK: load i32, i32 addrspace(1)* // CHECK-NEXT: ret i32 int test2(int i) { return ban[i]; } @@ -19,9 +19,9 @@ int test2(int i) { return ban[i]; } __attribute__((address_space(2))) int *A, *B; // CHECK-LABEL: define void @test3() -// CHECK: load i32 addrspace(2)** @B -// CHECK: load i32 addrspace(2)* -// CHECK: load i32 addrspace(2)** @A +// CHECK: load i32 addrspace(2)*, i32 addrspace(2)** @B +// CHECK: load i32, i32 addrspace(2)* +// CHECK: load i32 addrspace(2)*, i32 addrspace(2)** @A // CHECK: store i32 {{.*}}, i32 addrspace(2)* void test3() { *A = *B; diff --git a/test/CodeGen/alignment.c b/test/CodeGen/alignment.c index 04d6aaccc2..0a598010c8 100644 --- a/test/CodeGen/alignment.c +++ b/test/CodeGen/alignment.c @@ -23,7 +23,7 @@ int test1a(myint *p) { return *p; } // CHECK: @test1a( -// CHECK: load i32* {{.*}}, align 1 +// CHECK: load i32, i32* {{.*}}, align 1 // CHECK: ret i32 diff --git a/test/CodeGen/annotations-builtin.c b/test/CodeGen/annotations-builtin.c index 7938e49aa6..8a3b3ffcec 100644 --- a/test/CodeGen/annotations-builtin.c +++ b/test/CodeGen/annotations-builtin.c @@ -28,7 +28,7 @@ int main(int argc, char **argv) { // CHECK: call i64 @llvm.annotation.i64 int inta = __builtin_annotation(intfoo, "annotation_a"); -// CHECK: load i32* @intfoo +// CHECK: load i32, i32* @intfoo // CHECK-NEXT: call i32 @llvm.annotation.i32 // CHECK-NEXT: store diff --git a/test/CodeGen/arm-abi-vector.c b/test/CodeGen/arm-abi-vector.c index 213e516a0d..468acdf91c 100644 --- a/test/CodeGen/arm-abi-vector.c +++ b/test/CodeGen/arm-abi-vector.c @@ -25,7 +25,7 @@ double varargs_vec_2i(int fixed, ...) { // APCS-GNU: [[AP_NEXT:%.*]] = getelementptr i8, i8* {{%.*}}, i32 8 // APCS-GNU: bitcast <2 x i32>* [[VAR_ALIGN]] to i8* // APCS-GNU: call void @llvm.memcpy -// APCS-GNU: load <2 x i32>* [[VAR_ALIGN]] +// APCS-GNU: load <2 x i32>, <2 x i32>* [[VAR_ALIGN]] va_list ap; double sum = fixed; va_start(ap, fixed); @@ -82,7 +82,7 @@ double varargs_vec_5c(int fixed, ...) { // APCS-GNU: [[AP_NEXT:%.*]] = getelementptr i8, i8* {{%.*}}, i32 8 // APCS-GNU: bitcast <5 x i8>* [[VAR_ALIGN]] to i8* // APCS-GNU: call void @llvm.memcpy -// APCS-GNU: load <5 x i8>* [[VAR_ALIGN]] +// APCS-GNU: load <5 x i8>, <5 x i8>* [[VAR_ALIGN]] va_list ap; double sum = fixed; va_start(ap, fixed); @@ -109,14 +109,14 @@ double varargs_vec_9c(int fixed, ...) { // CHECK: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP_ALIGN]], i32 16 // CHECK: bitcast <9 x i8>* [[VAR_ALIGN]] to i8* // CHECK: call void @llvm.memcpy -// CHECK: load <9 x i8>* [[VAR_ALIGN]] +// CHECK: load <9 x i8>, <9 x i8>* [[VAR_ALIGN]] // APCS-GNU: varargs_vec_9c // APCS-GNU: alloca <9 x i8>, align 16 // APCS-GNU: [[VAR_ALIGN:%.*]] = alloca <9 x i8> // APCS-GNU: [[AP_NEXT:%.*]] = getelementptr i8, i8* {{%.*}}, i32 16 // APCS-GNU: bitcast <9 x i8>* [[VAR_ALIGN]] to i8* // APCS-GNU: call void @llvm.memcpy -// APCS-GNU: load <9 x i8>* [[VAR_ALIGN]] +// APCS-GNU: load <9 x i8>, <9 x i8>* [[VAR_ALIGN]] va_list ap; double sum = fixed; va_start(ap, fixed); @@ -138,12 +138,12 @@ double varargs_vec_19c(int fixed, ...) { // CHECK: varargs_vec_19c // CHECK: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP:%.*]], i32 4 // CHECK: [[VAR:%.*]] = bitcast i8* [[AP]] to i8** -// CHECK: [[VAR2:%.*]] = load i8** [[VAR]] +// CHECK: [[VAR2:%.*]] = load i8*, i8** [[VAR]] // CHECK: bitcast i8* [[VAR2]] to <19 x i8>* // APCS-GNU: varargs_vec_19c // APCS-GNU: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP:%.*]], i32 4 // APCS-GNU: [[VAR:%.*]] = bitcast i8* [[AP]] to i8** -// APCS-GNU: [[VAR2:%.*]] = load i8** [[VAR]] +// APCS-GNU: [[VAR2:%.*]] = load i8*, i8** [[VAR]] // APCS-GNU: bitcast i8* [[VAR2]] to <19 x i8>* va_list ap; double sum = fixed; @@ -175,7 +175,7 @@ double varargs_vec_3s(int fixed, ...) { // APCS-GNU: [[AP_NEXT:%.*]] = getelementptr i8, i8* {{%.*}}, i32 8 // APCS-GNU: bitcast <3 x i16>* [[VAR_ALIGN]] to i8* // APCS-GNU: call void @llvm.memcpy -// APCS-GNU: load <3 x i16>* [[VAR_ALIGN]] +// APCS-GNU: load <3 x i16>, <3 x i16>* [[VAR_ALIGN]] va_list ap; double sum = fixed; va_start(ap, fixed); @@ -202,14 +202,14 @@ double varargs_vec_5s(int fixed, ...) { // CHECK: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP_ALIGN]], i32 16 // CHECK: bitcast <5 x i16>* [[VAR_ALIGN]] to i8* // CHECK: call void @llvm.memcpy -// CHECK: load <5 x i16>* [[VAR_ALIGN]] +// CHECK: load <5 x i16>, <5 x i16>* [[VAR_ALIGN]] // APCS-GNU: varargs_vec_5s // APCS-GNU: alloca <5 x i16>, align 16 // APCS-GNU: [[VAR_ALIGN:%.*]] = alloca <5 x i16> // APCS-GNU: [[AP_NEXT:%.*]] = getelementptr i8, i8* {{%.*}}, i32 16 // APCS-GNU: bitcast <5 x i16>* [[VAR_ALIGN]] to i8* // APCS-GNU: call void @llvm.memcpy -// APCS-GNU: load <5 x i16>* [[VAR_ALIGN]] +// APCS-GNU: load <5 x i16>, <5 x i16>* [[VAR_ALIGN]] va_list ap; double sum = fixed; va_start(ap, fixed); diff --git a/test/CodeGen/arm-arguments.c b/test/CodeGen/arm-arguments.c index e4a10fd9e2..6da5b88b93 100644 --- a/test/CodeGen/arm-arguments.c +++ b/test/CodeGen/arm-arguments.c @@ -185,7 +185,7 @@ void f34(struct s34 s); void g34(struct s34 *s) { f34(*s); } // AAPCS: @g34(%struct.s34* %s) // AAPCS: %[[a:.*]] = alloca [1 x i32] -// AAPCS: load [1 x i32]* %[[a]] +// AAPCS: load [1 x i32], [1 x i32]* %[[a]] // rdar://12596507 struct s35 @@ -210,11 +210,11 @@ float32x4_t f35(int i, s35_with_align s1, s35_with_align s2) { // APCS-GNU: %[[c:.*]] = bitcast %struct.s35* %0 to i8* // APCS-GNU: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[b]], i8* %[[c]] // APCS-GNU: %[[d:.*]] = bitcast %struct.s35* %[[a]] to <4 x float>* -// APCS-GNU: load <4 x float>* %[[d]], align 16 +// APCS-GNU: load <4 x float>, <4 x float>* %[[d]], align 16 // AAPCS-LABEL: define arm_aapcscc <4 x float> @f35(i32 %i, %struct.s35* byval align 16, %struct.s35* byval align 16) // AAPCS: %[[a:.*]] = alloca %struct.s35, align 16 // AAPCS: %[[b:.*]] = bitcast %struct.s35* %[[a]] to i8* // AAPCS: %[[c:.*]] = bitcast %struct.s35* %0 to i8* // AAPCS: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[b]], i8* %[[c]] // AAPCS: %[[d:.*]] = bitcast %struct.s35* %[[a]] to <4 x float>* -// AAPCS: load <4 x float>* %[[d]], align 16 +// AAPCS: load <4 x float>, <4 x float>* %[[d]], align 16 diff --git a/test/CodeGen/arm-atomics-m.c b/test/CodeGen/arm-atomics-m.c index 51e2d1d9eb..cd9e71e5d9 100644 --- a/test/CodeGen/arm-atomics-m.c +++ b/test/CodeGen/arm-atomics-m.c @@ -15,7 +15,7 @@ void test_presence(void) __atomic_fetch_add(&i, 1, memory_order_seq_cst); // CHECK: atomicrmw sub i32* {{.*}} seq_cst __atomic_fetch_sub(&i, 1, memory_order_seq_cst); - // CHECK: load atomic i32* {{.*}} seq_cst + // CHECK: load atomic i32, i32* {{.*}} seq_cst int r; __atomic_load(&i, &r, memory_order_seq_cst); // CHECK: store atomic i32 {{.*}} seq_cst diff --git a/test/CodeGen/arm-atomics.c b/test/CodeGen/arm-atomics.c index b54e277120..aa5a6ecd0c 100644 --- a/test/CodeGen/arm-atomics.c +++ b/test/CodeGen/arm-atomics.c @@ -17,7 +17,7 @@ void test_presence(void) __atomic_fetch_add(&i, 1, memory_order_seq_cst); // CHECK: atomicrmw sub i32* {{.*}} seq_cst __atomic_fetch_sub(&i, 1, memory_order_seq_cst); - // CHECK: load atomic i32* {{.*}} seq_cst + // CHECK: load atomic i32, i32* {{.*}} seq_cst int r; __atomic_load(&i, &r, memory_order_seq_cst); // CHECK: store atomic i32 {{.*}} seq_cst @@ -28,7 +28,7 @@ void test_presence(void) __atomic_fetch_add(&l, 1, memory_order_seq_cst); // CHECK: atomicrmw sub i64* {{.*}} seq_cst __atomic_fetch_sub(&l, 1, memory_order_seq_cst); - // CHECK: load atomic i64* {{.*}} seq_cst + // CHECK: load atomic i64, i64* {{.*}} seq_cst long long rl; __atomic_load(&l, &rl, memory_order_seq_cst); // CHECK: store atomic i64 {{.*}} seq_cst diff --git a/test/CodeGen/arm-clear.c b/test/CodeGen/arm-clear.c index 8ef3675641..566d5daf4d 100644 --- a/test/CodeGen/arm-clear.c +++ b/test/CodeGen/arm-clear.c @@ -3,7 +3,7 @@ void clear(void *ptr, void *ptr2) { // CHECK: clear - // CHECK: load i8** - // CHECK: load i8** + // CHECK: load i8*, i8** + // CHECK: load i8*, i8** __clear_cache(ptr, ptr2); } diff --git a/test/CodeGen/arm-vector-align.c b/test/CodeGen/arm-vector-align.c index 9e1ae5da11..15dd13e7c1 100644 --- a/test/CodeGen/arm-vector-align.c +++ b/test/CodeGen/arm-vector-align.c @@ -23,7 +23,7 @@ void t1(AlignedAddr *addr1, AlignedAddr *addr2) { // Radar 10538555: Make sure unaligned load/stores do not gain alignment. void t2(char *addr) { // CHECK: @t2 -// CHECK: load i32* %{{.*}}, align 1 +// CHECK: load i32, i32* %{{.*}}, align 1 int32x2_t vec = vld1_dup_s32(addr); // CHECK: store i32 %{{.*}}, i32* {{.*}}, align 1 vst1_lane_s32(addr, vec, 1); diff --git a/test/CodeGen/arm64-abi-vector.c b/test/CodeGen/arm64-abi-vector.c index afcc79084a..f4895c1cb2 100644 --- a/test/CodeGen/arm64-abi-vector.c +++ b/test/CodeGen/arm64-abi-vector.c @@ -99,7 +99,7 @@ double varargs_vec_19c(int fixed, ...) { // CHECK: varargs_vec_19c // CHECK: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP_CUR:%.*]], i32 8 // CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to i8** -// CHECK: [[VAR2:%.*]] = load i8** [[VAR]] +// CHECK: [[VAR2:%.*]] = load i8*, i8** [[VAR]] // CHECK: bitcast i8* [[VAR2]] to <19 x i8>* va_list ap; double sum = fixed; @@ -185,7 +185,7 @@ double varargs_vec_5i(int fixed, ...) { // CHECK: alloca <5 x i32>, align 16 // CHECK: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP_CUR:%.*]], i32 8 // CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to i8** -// CHECK: [[VAR2:%.*]] = load i8** [[VAR]] +// CHECK: [[VAR2:%.*]] = load i8*, i8** [[VAR]] // CHECK: bitcast i8* [[VAR2]] to <5 x i32>* va_list ap; double sum = fixed; @@ -207,7 +207,7 @@ double varargs_vec_3d(int fixed, ...) { // CHECK: alloca <3 x double>, align 16 // CHECK: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP_CUR:%.*]], i32 8 // CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to i8** -// CHECK: [[VAR2:%.*]] = load i8** [[VAR]] +// CHECK: [[VAR2:%.*]] = load i8*, i8** [[VAR]] // CHECK: bitcast i8* [[VAR2]] to <3 x double>* va_list ap; double sum = fixed; @@ -246,7 +246,7 @@ double varargs_vec(int fixed, ...) { __char19 c19 = va_arg(ap, __char19); // CHECK: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP_CUR:%.*]], i32 8 // CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to i8** -// CHECK: [[VAR2:%.*]] = load i8** [[VAR]] +// CHECK: [[VAR2:%.*]] = load i8*, i8** [[VAR]] // CHECK: bitcast i8* [[VAR2]] to <19 x i8>* sum = sum + c19.x + c19.y; __short3 s3 = va_arg(ap, __short3); @@ -268,13 +268,13 @@ double varargs_vec(int fixed, ...) { __int5 i5 = va_arg(ap, __int5); // CHECK: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP_CUR:%.*]], i32 8 // CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to i8** -// CHECK: [[VAR2:%.*]] = load i8** [[VAR]] +// CHECK: [[VAR2:%.*]] = load i8*, i8** [[VAR]] // CHECK: bitcast i8* [[VAR2]] to <5 x i32>* sum = sum + i5.x + i5.y; __double3 d3 = va_arg(ap, __double3); // CHECK: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP_CUR:%.*]], i32 8 // CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to i8** -// CHECK: [[VAR2:%.*]] = load i8** [[VAR]] +// CHECK: [[VAR2:%.*]] = load i8*, i8** [[VAR]] // CHECK: bitcast i8* [[VAR2]] to <3 x double>* sum = sum + d3.x + d3.y; va_end(ap); @@ -339,7 +339,7 @@ double fixed_9c(__char9 *in) { __attribute__((noinline)) double args_vec_19c(int fixed, __char19 c19) { // CHECK: args_vec_19c -// CHECK: [[C19:%.*]] = load <19 x i8>* {{.*}}, align 16 +// CHECK: [[C19:%.*]] = load <19 x i8>, <19 x i8>* {{.*}}, align 16 double sum = fixed; sum = sum + c19.x + c19.y; return sum; @@ -401,7 +401,7 @@ double fixed_3i(__int3 *in) { __attribute__((noinline)) double args_vec_5i(int fixed, __int5 c5) { // CHECK: args_vec_5i -// CHECK: [[C5:%.*]] = load <5 x i32>* {{%.*}}, align 16 +// CHECK: [[C5:%.*]] = load <5 x i32>, <5 x i32>* {{%.*}}, align 16 double sum = fixed; sum = sum + c5.x + c5.y; return sum; @@ -416,7 +416,7 @@ double fixed_5i(__int5 *in) { __attribute__((noinline)) double args_vec_3d(int fixed, __double3 c3) { // CHECK: args_vec_3d // CHECK: [[CAST:%.*]] = bitcast <3 x double>* {{%.*}} to <4 x double>* -// CHECK: [[LOAD:%.*]] = load <4 x double>* [[CAST]] +// CHECK: [[LOAD:%.*]] = load <4 x double>, <4 x double>* [[CAST]] // CHECK: shufflevector <4 x double> [[LOAD]], <4 x double> undef, <3 x i32> double sum = fixed; sum = sum + c3.x + c3.y; diff --git a/test/CodeGen/arm64-arguments.c b/test/CodeGen/arm64-arguments.c index e3b13381e8..5c56fd4749 100644 --- a/test/CodeGen/arm64-arguments.c +++ b/test/CodeGen/arm64-arguments.c @@ -134,7 +134,7 @@ struct s34 { char c; }; void f34(struct s34 s); void g34(struct s34 *s) { f34(*s); } // CHECK: @g34(%struct.s34* %s) -// CHECK: %[[a:.*]] = load i8* %{{.*}} +// CHECK: %[[a:.*]] = load i8, i8* %{{.*}} // CHECK: zext i8 %[[a]] to i64 // CHECK: call void @f34(i64 %{{.*}}) @@ -200,9 +200,9 @@ float32x4_t f35(int i, s35_with_align s1, s35_with_align s2) { // CHECK: %s1 = alloca %struct.s35, align 16 // CHECK: %s2 = alloca %struct.s35, align 16 // CHECK: %[[a:.*]] = bitcast %struct.s35* %s1 to <4 x float>* -// CHECK: load <4 x float>* %[[a]], align 16 +// CHECK: load <4 x float>, <4 x float>* %[[a]], align 16 // CHECK: %[[b:.*]] = bitcast %struct.s35* %s2 to <4 x float>* -// CHECK: load <4 x float>* %[[b]], align 16 +// CHECK: load <4 x float>, <4 x float>* %[[b]], align 16 float32x4_t v = vaddq_f32(*(float32x4_t *)&s1, *(float32x4_t *)&s2); return v; @@ -222,9 +222,9 @@ int32x4_t f36(int i, s36_with_align s1, s36_with_align s2) { // CHECK: store i128 %s1.coerce, i128* %{{.*}}, align 1 // CHECK: store i128 %s2.coerce, i128* %{{.*}}, align 1 // CHECK: %[[a:.*]] = bitcast %struct.s36* %s1 to <4 x i32>* -// CHECK: load <4 x i32>* %[[a]], align 16 +// CHECK: load <4 x i32>, <4 x i32>* %[[a]], align 16 // CHECK: %[[b:.*]] = bitcast %struct.s36* %s2 to <4 x i32>* -// CHECK: load <4 x i32>* %[[b]], align 16 +// CHECK: load <4 x i32>, <4 x i32>* %[[b]], align 16 int32x4_t v = vaddq_s32(*(int32x4_t *)&s1, *(int32x4_t *)&s2); return v; @@ -239,9 +239,9 @@ typedef struct s37 s37_with_align; int32x4_t f37(int i, s37_with_align s1, s37_with_align s2) { // CHECK: define <4 x i32> @f37(i32 %i, %struct.s37* %s1, %struct.s37* %s2) // CHECK: %[[a:.*]] = bitcast %struct.s37* %s1 to <4 x i32>* -// CHECK: load <4 x i32>* %[[a]], align 16 +// CHECK: load <4 x i32>, <4 x i32>* %[[a]], align 16 // CHECK: %[[b:.*]] = bitcast %struct.s37* %s2 to <4 x i32>* -// CHECK: load <4 x i32>* %[[b]], align 16 +// CHECK: load <4 x i32>, <4 x i32>* %[[b]], align 16 int32x4_t v = vaddq_s32(*(int32x4_t *)&s1, *(int32x4_t *)&s2); return v; @@ -287,8 +287,8 @@ s38_no_align g38; s38_no_align g38_2; int caller38() { // CHECK: define i32 @caller38() -// CHECK: %[[a:.*]] = load i64* bitcast (%struct.s38* @g38 to i64*), align 1 -// CHECK: %[[b:.*]] = load i64* bitcast (%struct.s38* @g38_2 to i64*), align 1 +// CHECK: %[[a:.*]] = load i64, i64* bitcast (%struct.s38* @g38 to i64*), align 1 +// CHECK: %[[b:.*]] = load i64, i64* bitcast (%struct.s38* @g38_2 to i64*), align 1 // CHECK: call i32 @f38(i32 3, i64 %[[a]], i64 %[[b]]) return f38(3, g38, g38_2); } @@ -309,8 +309,8 @@ int f38_stack(int i, int i2, int i3, int i4, int i5, int i6, int i7, int i8, } int caller38_stack() { // CHECK: define i32 @caller38_stack() -// CHECK: %[[a:.*]] = load i64* bitcast (%struct.s38* @g38 to i64*), align 1 -// CHECK: %[[b:.*]] = load i64* bitcast (%struct.s38* @g38_2 to i64*), align 1 +// CHECK: %[[a:.*]] = load i64, i64* bitcast (%struct.s38* @g38 to i64*), align 1 +// CHECK: %[[b:.*]] = load i64, i64* bitcast (%struct.s38* @g38_2 to i64*), align 1 // CHECK: call i32 @f38_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i64 %[[a]], i64 %[[b]]) return f38_stack(1, 2, 3, 4, 5, 6, 7, 8, 9, g38, g38_2); } @@ -340,8 +340,8 @@ s39_with_align g39; s39_with_align g39_2; int caller39() { // CHECK: define i32 @caller39() -// CHECK: %[[a:.*]] = load i128* bitcast (%struct.s39* @g39 to i128*), align 1 -// CHECK: %[[b:.*]] = load i128* bitcast (%struct.s39* @g39_2 to i128*), align 1 +// CHECK: %[[a:.*]] = load i128, i128* bitcast (%struct.s39* @g39 to i128*), align 1 +// CHECK: %[[b:.*]] = load i128, i128* bitcast (%struct.s39* @g39_2 to i128*), align 1 // CHECK: call i32 @f39(i32 3, i128 %[[a]], i128 %[[b]]) return f39(3, g39, g39_2); } @@ -362,8 +362,8 @@ int f39_stack(int i, int i2, int i3, int i4, int i5, int i6, int i7, int i8, } int caller39_stack() { // CHECK: define i32 @caller39_stack() -// CHECK: %[[a:.*]] = load i128* bitcast (%struct.s39* @g39 to i128*), align 1 -// CHECK: %[[b:.*]] = load i128* bitcast (%struct.s39* @g39_2 to i128*), align 1 +// CHECK: %[[a:.*]] = load i128, i128* bitcast (%struct.s39* @g39 to i128*), align 1 +// CHECK: %[[b:.*]] = load i128, i128* bitcast (%struct.s39* @g39_2 to i128*), align 1 // CHECK: call i32 @f39_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i128 %[[a]], i128 %[[b]]) return f39_stack(1, 2, 3, 4, 5, 6, 7, 8, 9, g39, g39_2); } @@ -395,8 +395,8 @@ s40_no_align g40; s40_no_align g40_2; int caller40() { // CHECK: define i32 @caller40() -// CHECK: %[[a:.*]] = load [2 x i64]* bitcast (%struct.s40* @g40 to [2 x i64]*), align 1 -// CHECK: %[[b:.*]] = load [2 x i64]* bitcast (%struct.s40* @g40_2 to [2 x i64]*), align 1 +// CHECK: %[[a:.*]] = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40 to [2 x i64]*), align 1 +// CHECK: %[[b:.*]] = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40_2 to [2 x i64]*), align 1 // CHECK: call i32 @f40(i32 3, [2 x i64] %[[a]], [2 x i64] %[[b]]) return f40(3, g40, g40_2); } @@ -417,8 +417,8 @@ int f40_stack(int i, int i2, int i3, int i4, int i5, int i6, int i7, int i8, } int caller40_stack() { // CHECK: define i32 @caller40_stack() -// CHECK: %[[a:.*]] = load [2 x i64]* bitcast (%struct.s40* @g40 to [2 x i64]*), align 1 -// CHECK: %[[b:.*]] = load [2 x i64]* bitcast (%struct.s40* @g40_2 to [2 x i64]*), align 1 +// CHECK: %[[a:.*]] = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40 to [2 x i64]*), align 1 +// CHECK: %[[b:.*]] = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40_2 to [2 x i64]*), align 1 // CHECK: call i32 @f40_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, [2 x i64] %[[a]], [2 x i64] %[[b]]) return f40_stack(1, 2, 3, 4, 5, 6, 7, 8, 9, g40, g40_2); } @@ -450,8 +450,8 @@ s41_with_align g41; s41_with_align g41_2; int caller41() { // CHECK: define i32 @caller41() -// CHECK: %[[a:.*]] = load i128* bitcast (%struct.s41* @g41 to i128*), align 1 -// CHECK: %[[b:.*]] = load i128* bitcast (%struct.s41* @g41_2 to i128*), align 1 +// CHECK: %[[a:.*]] = load i128, i128* bitcast (%struct.s41* @g41 to i128*), align 1 +// CHECK: %[[b:.*]] = load i128, i128* bitcast (%struct.s41* @g41_2 to i128*), align 1 // CHECK: call i32 @f41(i32 3, i128 %[[a]], i128 %[[b]]) return f41(3, g41, g41_2); } @@ -472,8 +472,8 @@ int f41_stack(int i, int i2, int i3, int i4, int i5, int i6, int i7, int i8, } int caller41_stack() { // CHECK: define i32 @caller41_stack() -// CHECK: %[[a:.*]] = load i128* bitcast (%struct.s41* @g41 to i128*), align 1 -// CHECK: %[[b:.*]] = load i128* bitcast (%struct.s41* @g41_2 to i128*), align 1 +// CHECK: %[[a:.*]] = load i128, i128* bitcast (%struct.s41* @g41 to i128*), align 1 +// CHECK: %[[b:.*]] = load i128, i128* bitcast (%struct.s41* @g41_2 to i128*), align 1 // CHECK: call i32 @f41_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i128 %[[a]], i128 %[[b]]) return f41_stack(1, 2, 3, 4, 5, 6, 7, 8, 9, g41, g41_2); } @@ -626,7 +626,7 @@ struct HFA { float test_hfa(int n, ...) { // CHECK-LABEL: define float @test_hfa(i32 %n, ...) // CHECK: [[THELIST:%.*]] = alloca i8* -// CHECK: [[CURLIST:%.*]] = load i8** [[THELIST]] +// CHECK: [[CURLIST:%.*]] = load i8*, i8** [[THELIST]] // HFA is not indirect, so occupies its full 16 bytes on the stack. // CHECK: [[NEXTLIST:%.*]] = getelementptr i8, i8* [[CURLIST]], i32 16 @@ -652,7 +652,7 @@ struct TooBigHFA { float test_toobig_hfa(int n, ...) { // CHECK-LABEL: define float @test_toobig_hfa(i32 %n, ...) // CHECK: [[THELIST:%.*]] = alloca i8* -// CHECK: [[CURLIST:%.*]] = load i8** [[THELIST]] +// CHECK: [[CURLIST:%.*]] = load i8*, i8** [[THELIST]] // TooBigHFA is not actually an HFA, so gets passed indirectly. Only 8 bytes // of stack consumed. @@ -660,7 +660,7 @@ float test_toobig_hfa(int n, ...) { // CHECK: store i8* [[NEXTLIST]], i8** [[THELIST]] // CHECK: [[HFAPTRPTR:%.*]] = bitcast i8* [[CURLIST]] to i8** -// CHECK: [[HFAPTR:%.*]] = load i8** [[HFAPTRPTR]] +// CHECK: [[HFAPTR:%.*]] = load i8*, i8** [[HFAPTRPTR]] // CHECK: bitcast i8* [[HFAPTR]] to %struct.TooBigHFA* __builtin_va_list thelist; __builtin_va_start(thelist, n); @@ -675,7 +675,7 @@ struct HVA { int32x4_t test_hva(int n, ...) { // CHECK-LABEL: define <4 x i32> @test_hva(i32 %n, ...) // CHECK: [[THELIST:%.*]] = alloca i8* -// CHECK: [[CURLIST:%.*]] = load i8** [[THELIST]] +// CHECK: [[CURLIST:%.*]] = load i8*, i8** [[THELIST]] // HVA is not indirect, so occupies its full 16 bytes on the stack. but it // must be properly aligned. @@ -701,7 +701,7 @@ struct TooBigHVA { int32x4_t test_toobig_hva(int n, ...) { // CHECK-LABEL: define <4 x i32> @test_toobig_hva(i32 %n, ...) // CHECK: [[THELIST:%.*]] = alloca i8* -// CHECK: [[CURLIST:%.*]] = load i8** [[THELIST]] +// CHECK: [[CURLIST:%.*]] = load i8*, i8** [[THELIST]] // TooBigHVA is not actually an HVA, so gets passed indirectly. Only 8 bytes // of stack consumed. @@ -709,7 +709,7 @@ int32x4_t test_toobig_hva(int n, ...) { // CHECK: store i8* [[NEXTLIST]], i8** [[THELIST]] // CHECK: [[HVAPTRPTR:%.*]] = bitcast i8* [[CURLIST]] to i8** -// CHECK: [[HVAPTR:%.*]] = load i8** [[HVAPTRPTR]] +// CHECK: [[HVAPTR:%.*]] = load i8*, i8** [[HVAPTRPTR]] // CHECK: bitcast i8* [[HVAPTR]] to %struct.TooBigHVA* __builtin_va_list thelist; __builtin_va_start(thelist, n); diff --git a/test/CodeGen/asm-inout.c b/test/CodeGen/asm-inout.c index c7d1aeceb9..e5da5c5e93 100644 --- a/test/CodeGen/asm-inout.c +++ b/test/CodeGen/asm-inout.c @@ -12,7 +12,7 @@ void test1() { // CHECK: @test2 void test2() { // CHECK: [[REGCALLRESULT:%[a-zA-Z0-9\.]+]] = call i32* @foo() - // CHECK: load i32* [[REGCALLRESULT]] + // CHECK: load i32, i32* [[REGCALLRESULT]] // CHECK: call i32 asm // CHECK: store i32 {{%[a-zA-Z0-9\.]+}}, i32* [[REGCALLRESULT]] asm ("foobar" : "+r"(*foo())); diff --git a/test/CodeGen/asm-reg-var-local.c b/test/CodeGen/asm-reg-var-local.c index 44417d4a76..56dcab4cb9 100644 --- a/test/CodeGen/asm-reg-var-local.c +++ b/test/CodeGen/asm-reg-var-local.c @@ -16,11 +16,11 @@ int foo() { // CHECK: store i32 42, i32* [[A]] asm volatile("; %0 This asm uses rsi" : : "r"(a)); -// CHECK: [[TMP:%[a-zA-Z0-9]+]] = load i32* [[A]] +// CHECK: [[TMP:%[a-zA-Z0-9]+]] = load i32, i32* [[A]] // CHECK: call void asm sideeffect "; $0 This asm uses rsi", "{rsi},~{dirflag},~{fpsr},~{flags}"(i32 [[TMP]]) return a; -// CHECK: [[TMP1:%[a-zA-Z0-9]+]] = load i32* [[A]] +// CHECK: [[TMP1:%[a-zA-Z0-9]+]] = load i32, i32* [[A]] // CHECK: ret i32 [[TMP1]] } @@ -39,10 +39,10 @@ int earlyclobber() { // CHECK: store i32 42, i32* [[A]] asm volatile("; %0 This asm uses rsi" : : "r"(a)); -// CHECK: [[TMP:%[a-zA-Z0-9]+]] = load i32* [[A]] +// CHECK: [[TMP:%[a-zA-Z0-9]+]] = load i32, i32* [[A]] // CHECK: call void asm sideeffect "; $0 This asm uses rsi", "{rsi},~{dirflag},~{fpsr},~{flags}"(i32 [[TMP]]) return a; -// CHECK: [[TMP1:%[a-zA-Z0-9]+]] = load i32* [[A]] +// CHECK: [[TMP1:%[a-zA-Z0-9]+]] = load i32, i32* [[A]] // CHECK: ret i32 [[TMP1]] } diff --git a/test/CodeGen/atomic-arm64.c b/test/CodeGen/atomic-arm64.c index 147b570e21..98f27aba4f 100644 --- a/test/CodeGen/atomic-arm64.c +++ b/test/CodeGen/atomic-arm64.c @@ -24,7 +24,7 @@ extern _Atomic(pointer_quad_t) a_pointer_quad; // CHECK: define void @test0() // CHECK: [[TEMP:%.*]] = alloca i8, align 1 // CHECK-NEXT: store i8 1, i8* [[TEMP]] -// CHECK-NEXT: [[T0:%.*]] = load i8* [[TEMP]], align 1 +// CHECK-NEXT: [[T0:%.*]] = load i8, i8* [[TEMP]], align 1 // CHECK-NEXT: store atomic i8 [[T0]], i8* @a_bool seq_cst, align 1 void test0() { __c11_atomic_store(&a_bool, 1, memory_order_seq_cst); @@ -34,7 +34,7 @@ void test0() { // CHECK: [[TEMP:%.*]] = alloca float, align 4 // CHECK-NEXT: store float 3.000000e+00, float* [[TEMP]] // CHECK-NEXT: [[T0:%.*]] = bitcast float* [[TEMP]] to i32* -// CHECK-NEXT: [[T1:%.*]] = load i32* [[T0]], align 4 +// CHECK-NEXT: [[T1:%.*]] = load i32, i32* [[T0]], align 4 // CHECK-NEXT: store atomic i32 [[T1]], i32* bitcast (float* @a_float to i32*) seq_cst, align 4 void test1() { __c11_atomic_store(&a_float, 3, memory_order_seq_cst); @@ -44,7 +44,7 @@ void test1() { // CHECK: [[TEMP:%.*]] = alloca i8*, align 8 // CHECK-NEXT: store i8* @a_bool, i8** [[TEMP]] // CHECK-NEXT: [[T0:%.*]] = bitcast i8** [[TEMP]] to i64* -// CHECK-NEXT: [[T1:%.*]] = load i64* [[T0]], align 8 +// CHECK-NEXT: [[T1:%.*]] = load i64, i64* [[T0]], align 8 // CHECK-NEXT: store atomic i64 [[T1]], i64* bitcast (i8** @a_pointer to i64*) seq_cst, align 8 void test2() { __c11_atomic_store(&a_pointer, &a_bool, memory_order_seq_cst); @@ -55,7 +55,7 @@ void test2() { // CHECK-NEXT: [[TEMP:%.*]] = alloca [[PAIR_T]], align 8 // CHECK: llvm.memcpy // CHECK-NEXT: [[T0:%.*]] = bitcast [[PAIR_T]]* [[TEMP]] to i128* -// CHECK-NEXT: [[T1:%.*]] = load i128* [[T0]], align 16 +// CHECK-NEXT: [[T1:%.*]] = load i128, i128* [[T0]], align 16 // CHECK-NEXT: store atomic i128 [[T1]], i128* bitcast ([[PAIR_T]]* @a_pointer_pair to i128*) seq_cst, align 16 void test3(pointer_pair_t pair) { __c11_atomic_store(&a_pointer_pair, pair, memory_order_seq_cst); diff --git a/test/CodeGen/atomic-ops.c b/test/CodeGen/atomic-ops.c index 559b135413..733c60eb85 100644 --- a/test/CodeGen/atomic-ops.c +++ b/test/CodeGen/atomic-ops.c @@ -13,13 +13,13 @@ int fi1(_Atomic(int) *i) { // CHECK-LABEL: @fi1 - // CHECK: load atomic i32* {{.*}} seq_cst + // CHECK: load atomic i32, i32* {{.*}} seq_cst return __c11_atomic_load(i, memory_order_seq_cst); } int fi1a(int *i) { // CHECK-LABEL: @fi1a - // CHECK: load atomic i32* {{.*}} seq_cst + // CHECK: load atomic i32, i32* {{.*}} seq_cst int v; __atomic_load(i, &v, memory_order_seq_cst); return v; @@ -27,13 +27,13 @@ int fi1a(int *i) { int fi1b(int *i) { // CHECK-LABEL: @fi1b - // CHECK: load atomic i32* {{.*}} seq_cst + // CHECK: load atomic i32, i32* {{.*}} seq_cst return __atomic_load_n(i, memory_order_seq_cst); } int fi1c(atomic_int *i) { // CHECK-LABEL: @fi1c - // CHECK: load atomic i32* {{.*}} seq_cst + // CHECK: load atomic i32, i32* {{.*}} seq_cst return atomic_load(i); } @@ -148,7 +148,7 @@ _Bool fi4c(atomic_int *i) { float ff1(_Atomic(float) *d) { // CHECK-LABEL: @ff1 - // CHECK: load atomic i32* {{.*}} monotonic + // CHECK: load atomic i32, i32* {{.*}} monotonic return __c11_atomic_load(d, memory_order_relaxed); } @@ -184,11 +184,11 @@ void fd2(struct S *a, struct S *b) { // CHECK-NEXT: [[B_ADDR:%.*]] = alloca %struct.S*, align 4 // CHECK-NEXT: store %struct.S* %a, %struct.S** [[A_ADDR]], align 4 // CHECK-NEXT: store %struct.S* %b, %struct.S** [[B_ADDR]], align 4 - // CHECK-NEXT: [[LOAD_A_PTR:%.*]] = load %struct.S** [[A_ADDR]], align 4 - // CHECK-NEXT: [[LOAD_B_PTR:%.*]] = load %struct.S** [[B_ADDR]], align 4 + // CHECK-NEXT: [[LOAD_A_PTR:%.*]] = load %struct.S*, %struct.S** [[A_ADDR]], align 4 + // CHECK-NEXT: [[LOAD_B_PTR:%.*]] = load %struct.S*, %struct.S** [[B_ADDR]], align 4 // CHECK-NEXT: [[COERCED_A:%.*]] = bitcast %struct.S* [[LOAD_A_PTR]] to i8* // CHECK-NEXT: [[COERCED_B:%.*]] = bitcast %struct.S* [[LOAD_B_PTR]] to i64* - // CHECK-NEXT: [[LOAD_B:%.*]] = load i64* [[COERCED_B]], align 4 + // CHECK-NEXT: [[LOAD_B:%.*]] = load i64, i64* [[COERCED_B]], align 4 // CHECK-NEXT: call void @__atomic_store_8(i8* [[COERCED_A]], i64 [[LOAD_B]], // CHECK-NEXT: ret void __atomic_store(a, b, memory_order_seq_cst); @@ -202,12 +202,12 @@ void fd3(struct S *a, struct S *b, struct S *c) { // CHECK-NEXT: store %struct.S* %a, %struct.S** [[A_ADDR]], align 4 // CHECK-NEXT: store %struct.S* %b, %struct.S** [[B_ADDR]], align 4 // CHECK-NEXT: store %struct.S* %c, %struct.S** [[C_ADDR]], align 4 - // CHECK-NEXT: [[LOAD_A_PTR:%.*]] = load %struct.S** [[A_ADDR]], align 4 - // CHECK-NEXT: [[LOAD_B_PTR:%.*]] = load %struct.S** [[B_ADDR]], align 4 - // CHECK-NEXT: [[LOAD_C_PTR:%.*]] = load %struct.S** [[C_ADDR]], align 4 + // CHECK-NEXT: [[LOAD_A_PTR:%.*]] = load %struct.S*, %struct.S** [[A_ADDR]], align 4 + // CHECK-NEXT: [[LOAD_B_PTR:%.*]] = load %struct.S*, %struct.S** [[B_ADDR]], align 4 + // CHECK-NEXT: [[LOAD_C_PTR:%.*]] = load %struct.S*, %struct.S** [[C_ADDR]], align 4 // CHECK-NEXT: [[COERCED_A:%.*]] = bitcast %struct.S* [[LOAD_A_PTR]] to i8* // CHECK-NEXT: [[COERCED_B:%.*]] = bitcast %struct.S* [[LOAD_B_PTR]] to i64* - // CHECK-NEXT: [[LOAD_B:%.*]] = load i64* [[COERCED_B]], align 4 + // CHECK-NEXT: [[LOAD_B:%.*]] = load i64, i64* [[COERCED_B]], align 4 // CHECK-NEXT: [[CALL:%.*]] = call i64 @__atomic_exchange_8(i8* [[COERCED_A]], i64 [[LOAD_B]], // CHECK-NEXT: [[COERCED_C:%.*]] = bitcast %struct.S* [[LOAD_C_PTR]] to i64* // CHECK-NEXT: store i64 [[CALL]], i64* [[COERCED_C]], align 4 @@ -223,13 +223,13 @@ _Bool fd4(struct S *a, struct S *b, struct S *c) { // CHECK: store %struct.S* %a, %struct.S** [[A_ADDR]], align 4 // CHECK-NEXT: store %struct.S* %b, %struct.S** [[B_ADDR]], align 4 // CHECK-NEXT: store %struct.S* %c, %struct.S** [[C_ADDR]], align 4 - // CHECK-NEXT: [[LOAD_A_PTR:%.*]] = load %struct.S** [[A_ADDR]], align 4 - // CHECK-NEXT: [[LOAD_B_PTR:%.*]] = load %struct.S** [[B_ADDR]], align 4 - // CHECK-NEXT: [[LOAD_C_PTR:%.*]] = load %struct.S** [[C_ADDR]], align 4 + // CHECK-NEXT: [[LOAD_A_PTR:%.*]] = load %struct.S*, %struct.S** [[A_ADDR]], align 4 + // CHECK-NEXT: [[LOAD_B_PTR:%.*]] = load %struct.S*, %struct.S** [[B_ADDR]], align 4 + // CHECK-NEXT: [[LOAD_C_PTR:%.*]] = load %struct.S*, %struct.S** [[C_ADDR]], align 4 // CHECK-NEXT: [[COERCED_A:%.*]] = bitcast %struct.S* [[LOAD_A_PTR]] to i8* // CHECK-NEXT: [[COERCED_B:%.*]] = bitcast %struct.S* [[LOAD_B_PTR]] to i8* // CHECK-NEXT: [[COERCED_C:%.*]] = bitcast %struct.S* [[LOAD_C_PTR]] to i64* - // CHECK-NEXT: [[LOAD_C:%.*]] = load i64* [[COERCED_C]], align 4 + // CHECK-NEXT: [[LOAD_C:%.*]] = load i64, i64* [[COERCED_C]], align 4 // CHECK-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_compare_exchange_8(i8* [[COERCED_A]], i8* [[COERCED_B]], i64 [[LOAD_C]] // CHECK-NEXT: ret i1 [[CALL]] return __atomic_compare_exchange(a, b, c, 1, 5, 5); @@ -237,7 +237,7 @@ _Bool fd4(struct S *a, struct S *b, struct S *c) { int* fp1(_Atomic(int*) *p) { // CHECK-LABEL: @fp1 - // CHECK: load atomic i32* {{.*}} seq_cst + // CHECK: load atomic i32, i32* {{.*}} seq_cst return __c11_atomic_load(p, memory_order_seq_cst); } @@ -388,7 +388,7 @@ int structAtomicCmpExchange() { // CHECK: %[[call1:.*]] = call zeroext i1 @__atomic_compare_exchange(i32 3, {{.*}} @smallThing{{.*}} @thing1{{.*}} @thing2 // CHECK: %[[zext1:.*]] = zext i1 %[[call1]] to i8 // CHECK: store i8 %[[zext1]], i8* %[[x_mem]], align 1 - // CHECK: %[[x:.*]] = load i8* %[[x_mem]] + // CHECK: %[[x:.*]] = load i8, i8* %[[x_mem]] // CHECK: %[[x_bool:.*]] = trunc i8 %[[x]] to i1 // CHECK: %[[conv1:.*]] = zext i1 %[[x_bool]] to i32 @@ -558,11 +558,11 @@ int PR21643() { // CHECK: %[[atomictmp:.*]] = alloca i32, align 4 // CHECK: %[[atomicdst:.*]] = alloca i32, align 4 // CHECK: store i32 1, i32* %[[atomictmp]] - // CHECK: %[[one:.*]] = load i32* %[[atomictmp]], align 4 + // CHECK: %[[one:.*]] = load i32, i32* %[[atomictmp]], align 4 // CHECK: %[[old:.*]] = atomicrmw or i32 addrspace(257)* inttoptr (i32 776 to i32 addrspace(257)*), i32 %[[one]] monotonic // CHECK: %[[new:.*]] = or i32 %[[old]], %[[one]] // CHECK: store i32 %[[new]], i32* %[[atomicdst]], align 4 - // CHECK: %[[ret:.*]] = load i32* %[[atomicdst]], align 4 + // CHECK: %[[ret:.*]] = load i32, i32* %[[atomicdst]], align 4 // CHECK: ret i32 %[[ret]] } @@ -571,10 +571,10 @@ int PR17306_1(volatile _Atomic(int) *i) { // CHECK: %[[i_addr:.*]] = alloca i32 // CHECK-NEXT: %[[atomicdst:.*]] = alloca i32 // CHECK-NEXT: store i32* %i, i32** %[[i_addr]] - // CHECK-NEXT: %[[addr:.*]] = load i32** %[[i_addr]] - // CHECK-NEXT: %[[res:.*]] = load atomic volatile i32* %[[addr]] seq_cst + // CHECK-NEXT: %[[addr:.*]] = load i32*, i32** %[[i_addr]] + // CHECK-NEXT: %[[res:.*]] = load atomic volatile i32, i32* %[[addr]] seq_cst // CHECK-NEXT: store i32 %[[res]], i32* %[[atomicdst]] - // CHECK-NEXT: %[[retval:.*]] = load i32* %[[atomicdst]] + // CHECK-NEXT: %[[retval:.*]] = load i32, i32* %[[atomicdst]] // CHECK-NEXT: ret i32 %[[retval]] return __c11_atomic_load(i, memory_order_seq_cst); } @@ -587,14 +587,14 @@ int PR17306_2(volatile int *i, int value) { // CHECK-NEXT: %[[atomicdst:.*]] = alloca i32 // CHECK-NEXT: store i32* %i, i32** %[[i_addr]] // CHECK-NEXT: store i32 %value, i32* %[[value_addr]] - // CHECK-NEXT: %[[i_lval:.*]] = load i32** %[[i_addr]] - // CHECK-NEXT: %[[value:.*]] = load i32* %[[value_addr]] + // CHECK-NEXT: %[[i_lval:.*]] = load i32*, i32** %[[i_addr]] + // CHECK-NEXT: %[[value:.*]] = load i32, i32* %[[value_addr]] // CHECK-NEXT: store i32 %[[value]], i32* %[[atomictmp]] - // CHECK-NEXT: %[[value_lval:.*]] = load i32* %[[atomictmp]] + // CHECK-NEXT: %[[value_lval:.*]] = load i32, i32* %[[atomictmp]] // CHECK-NEXT: %[[old_val:.*]] = atomicrmw volatile add i32* %[[i_lval]], i32 %[[value_lval]] seq_cst // CHECK-NEXT: %[[new_val:.*]] = add i32 %[[old_val]], %[[value_lval]] // CHECK-NEXT: store i32 %[[new_val]], i32* %[[atomicdst]] - // CHECK-NEXT: %[[retval:.*]] = load i32* %[[atomicdst]] + // CHECK-NEXT: %[[retval:.*]] = load i32, i32* %[[atomicdst]] // CHECK-NEXT: ret i32 %[[retval]] return __atomic_add_fetch(i, value, memory_order_seq_cst); } diff --git a/test/CodeGen/atomic_ops.c b/test/CodeGen/atomic_ops.c index 050b543c47..1d36601c27 100644 --- a/test/CodeGen/atomic_ops.c +++ b/test/CodeGen/atomic_ops.c @@ -22,7 +22,7 @@ extern _Atomic _Bool b; _Bool bar() { // CHECK-LABEL: @bar -// CHECK: %[[load:.*]] = load atomic i8* @b seq_cst +// CHECK: %[[load:.*]] = load atomic i8, i8* @b seq_cst // CHECK: %[[tobool:.*]] = trunc i8 %[[load]] to i1 // CHECK: ret i1 %[[tobool]] return b; diff --git a/test/CodeGen/atomics-inlining.c b/test/CodeGen/atomics-inlining.c index 9cd280294f..9e6d57d7bc 100644 --- a/test/CodeGen/atomics-inlining.c +++ b/test/CodeGen/atomics-inlining.c @@ -44,11 +44,11 @@ void test1(void) { // ARM: call{{.*}} void @__atomic_store(i32 100, i8* getelementptr inbounds ([100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8]* @a2, i32 0, i32 0) // PPC32-LABEL: define void @test1 -// PPC32: = load atomic i8* @c1 seq_cst +// PPC32: = load atomic i8, i8* @c1 seq_cst // PPC32: store atomic i8 {{.*}}, i8* @c1 seq_cst -// PPC32: = load atomic i16* @s1 seq_cst +// PPC32: = load atomic i16, i16* @s1 seq_cst // PPC32: store atomic i16 {{.*}}, i16* @s1 seq_cst -// PPC32: = load atomic i32* @i1 seq_cst +// PPC32: = load atomic i32, i32* @i1 seq_cst // PPC32: store atomic i32 {{.*}}, i32* @i1 seq_cst // PPC32: = call i64 @__atomic_load_8(i8* bitcast (i64* @ll1 to i8*) // PPC32: call void @__atomic_store_8(i8* bitcast (i64* @ll1 to i8*), i64 @@ -56,23 +56,23 @@ void test1(void) { // PPC32: call void @__atomic_store(i32 100, i8* getelementptr inbounds ([100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8]* @a2, i32 0, i32 0) // PPC64-LABEL: define void @test1 -// PPC64: = load atomic i8* @c1 seq_cst +// PPC64: = load atomic i8, i8* @c1 seq_cst // PPC64: store atomic i8 {{.*}}, i8* @c1 seq_cst -// PPC64: = load atomic i16* @s1 seq_cst +// PPC64: = load atomic i16, i16* @s1 seq_cst // PPC64: store atomic i16 {{.*}}, i16* @s1 seq_cst -// PPC64: = load atomic i32* @i1 seq_cst +// PPC64: = load atomic i32, i32* @i1 seq_cst // PPC64: store atomic i32 {{.*}}, i32* @i1 seq_cst -// PPC64: = load atomic i64* @ll1 seq_cst +// PPC64: = load atomic i64, i64* @ll1 seq_cst // PPC64: store atomic i64 {{.*}}, i64* @ll1 seq_cst // PPC64: call void @__atomic_load(i64 100, i8* getelementptr inbounds ([100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8]* @a2, i32 0, i32 0) // PPC64: call void @__atomic_store(i64 100, i8* getelementptr inbounds ([100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8]* @a2, i32 0, i32 0) // MIPS32-LABEL: define void @test1 -// MIPS32: = load atomic i8* @c1 seq_cst +// MIPS32: = load atomic i8, i8* @c1 seq_cst // MIPS32: store atomic i8 {{.*}}, i8* @c1 seq_cst -// MIPS32: = load atomic i16* @s1 seq_cst +// MIPS32: = load atomic i16, i16* @s1 seq_cst // MIPS32: store atomic i16 {{.*}}, i16* @s1 seq_cst -// MIPS32: = load atomic i32* @i1 seq_cst +// MIPS32: = load atomic i32, i32* @i1 seq_cst // MIPS32: store atomic i32 {{.*}}, i32* @i1 seq_cst // MIPS32: call i64 @__atomic_load_8(i8* bitcast (i64* @ll1 to i8*) // MIPS32: call void @__atomic_store_8(i8* bitcast (i64* @ll1 to i8*), i64 @@ -80,13 +80,13 @@ void test1(void) { // MIPS32: call void @__atomic_store(i32 zeroext 100, i8* getelementptr inbounds ([100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8]* @a2, i32 0, i32 0) // MIPS64-LABEL: define void @test1 -// MIPS64: = load atomic i8* @c1 seq_cst +// MIPS64: = load atomic i8, i8* @c1 seq_cst // MIPS64: store atomic i8 {{.*}}, i8* @c1 seq_cst -// MIPS64: = load atomic i16* @s1 seq_cst +// MIPS64: = load atomic i16, i16* @s1 seq_cst // MIPS64: store atomic i16 {{.*}}, i16* @s1 seq_cst -// MIPS64: = load atomic i32* @i1 seq_cst +// MIPS64: = load atomic i32, i32* @i1 seq_cst // MIPS64: store atomic i32 {{.*}}, i32* @i1 seq_cst -// MIPS64: = load atomic i64* @ll1 seq_cst +// MIPS64: = load atomic i64, i64* @ll1 seq_cst // MIPS64: store atomic i64 {{.*}}, i64* @ll1 seq_cst // MIPS64: call void @__atomic_load(i64 zeroext 100, i8* getelementptr inbounds ([100 x i8]* @a1, i32 0, i32 0) // MIPS64: call void @__atomic_store(i64 zeroext 100, i8* getelementptr inbounds ([100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8]* @a2, i32 0, i32 0) diff --git a/test/CodeGen/attributes.c b/test/CodeGen/attributes.c index 5c9c90d7ce..4da3eca9e4 100644 --- a/test/CodeGen/attributes.c +++ b/test/CodeGen/attributes.c @@ -79,7 +79,7 @@ void (__attribute__((fastcall)) *fptr)(int); void t21(void) { fptr(10); } -// CHECK: [[FPTRVAR:%[a-z0-9]+]] = load void (i32)** @fptr +// CHECK: [[FPTRVAR:%[a-z0-9]+]] = load void (i32)*, void (i32)** @fptr // CHECK-NEXT: call x86_fastcallcc void [[FPTRVAR]](i32 inreg 10) diff --git a/test/CodeGen/avx-builtins.c b/test/CodeGen/avx-builtins.c index 28e11baeb7..99d063385c 100644 --- a/test/CodeGen/avx-builtins.c +++ b/test/CodeGen/avx-builtins.c @@ -10,17 +10,17 @@ // __m256 test__mm256_loadu_ps(void* p) { - // CHECK: load <8 x float>* %{{.*}}, align 1 + // CHECK: load <8 x float>, <8 x float>* %{{.*}}, align 1 return _mm256_loadu_ps(p); } __m256d test__mm256_loadu_pd(void* p) { - // CHECK: load <4 x double>* %{{.*}}, align 1 + // CHECK: load <4 x double>, <4 x double>* %{{.*}}, align 1 return _mm256_loadu_pd(p); } __m256i test__mm256_loadu_si256(void* p) { - // CHECK: load <4 x i64>* %{{.+}}, align 1 + // CHECK: load <4 x i64>, <4 x i64>* %{{.+}}, align 1 return _mm256_loadu_si256(p); } diff --git a/test/CodeGen/avx512f-builtins.c b/test/CodeGen/avx512f-builtins.c index f7f7df5417..89f8a535ee 100644 --- a/test/CodeGen/avx512f-builtins.c +++ b/test/CodeGen/avx512f-builtins.c @@ -106,14 +106,14 @@ void test_mm512_store_pd(void *p, __m512d a) __m512 test_mm512_loadu_ps(void *p) { // CHECK-LABEL: @test_mm512_loadu_ps - // CHECK: load <16 x float>* {{.*}}, align 1{{$}} + // CHECK: load <16 x float>, <16 x float>* {{.*}}, align 1{{$}} return _mm512_loadu_ps(p); } __m512d test_mm512_loadu_pd(void *p) { // CHECK-LABEL: @test_mm512_loadu_pd - // CHECK: load <8 x double>* {{.*}}, align 1{{$}} + // CHECK: load <8 x double>, <8 x double>* {{.*}}, align 1{{$}} return _mm512_loadu_pd(p); } diff --git a/test/CodeGen/big-atomic-ops.c b/test/CodeGen/big-atomic-ops.c index 74096616ac..28b7b5d708 100644 --- a/test/CodeGen/big-atomic-ops.c +++ b/test/CodeGen/big-atomic-ops.c @@ -16,13 +16,13 @@ typedef enum memory_order { int fi1(_Atomic(int) *i) { // CHECK: @fi1 - // CHECK: load atomic i32* {{.*}} seq_cst + // CHECK: load atomic i32, i32* {{.*}} seq_cst return __c11_atomic_load(i, memory_order_seq_cst); } int fi1a(int *i) { // CHECK: @fi1a - // CHECK: load atomic i32* {{.*}} seq_cst + // CHECK: load atomic i32, i32* {{.*}} seq_cst int v; __atomic_load(i, &v, memory_order_seq_cst); return v; @@ -30,7 +30,7 @@ int fi1a(int *i) { int fi1b(int *i) { // CHECK: @fi1b - // CHECK: load atomic i32* {{.*}} seq_cst + // CHECK: load atomic i32, i32* {{.*}} seq_cst return __atomic_load_n(i, memory_order_seq_cst); } @@ -113,7 +113,7 @@ _Bool fi4b(int *i) { float ff1(_Atomic(float) *d) { // CHECK: @ff1 - // CHECK: load atomic i32* {{.*}} monotonic + // CHECK: load atomic i32, i32* {{.*}} monotonic return __c11_atomic_load(d, memory_order_relaxed); } @@ -129,7 +129,7 @@ float ff3(_Atomic(float) *d) { int* fp1(_Atomic(int*) *p) { // CHECK: @fp1 - // CHECK: load atomic i64* {{.*}} seq_cst + // CHECK: load atomic i64, i64* {{.*}} seq_cst return __c11_atomic_load(p, memory_order_seq_cst); } diff --git a/test/CodeGen/block-byref-aggr.c b/test/CodeGen/block-byref-aggr.c index 2d162a9ce6..910f6da3cc 100644 --- a/test/CodeGen/block-byref-aggr.c +++ b/test/CodeGen/block-byref-aggr.c @@ -20,7 +20,7 @@ void test0() { // CHECK-NEXT: store i32 [[RESULT]], i32* [[T0]] // Check that we properly assign into the forwarding pointer. // CHECK-NEXT: [[A_FORWARDING:%.*]] = getelementptr inbounds [[BYREF]], [[BYREF]]* [[A]], i32 0, i32 1 -// CHECK-NEXT: [[T0:%.*]] = load [[BYREF]]** [[A_FORWARDING]] +// CHECK-NEXT: [[T0:%.*]] = load [[BYREF]]*, [[BYREF]]** [[A_FORWARDING]] // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[BYREF]], [[BYREF]]* [[T0]], i32 0, i32 4 // CHECK-NEXT: [[T2:%.*]] = bitcast [[AGG]]* [[T1]] to i8* // CHECK-NEXT: [[T3:%.*]] = bitcast [[AGG]]* [[TEMP]] to i8* @@ -46,14 +46,14 @@ void test1() { // CHECK-NEXT: store i32 [[RESULT]], i32* [[T0]] // Check that we properly assign into the forwarding pointer, first for b: // CHECK-NEXT: [[B_FORWARDING:%.*]] = getelementptr inbounds [[B_BYREF]], [[B_BYREF]]* [[B]], i32 0, i32 1 -// CHECK-NEXT: [[T0:%.*]] = load [[B_BYREF]]** [[B_FORWARDING]] +// CHECK-NEXT: [[T0:%.*]] = load [[B_BYREF]]*, [[B_BYREF]]** [[B_FORWARDING]] // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[B_BYREF]], [[B_BYREF]]* [[T0]], i32 0, i32 4 // CHECK-NEXT: [[T2:%.*]] = bitcast [[AGG]]* [[T1]] to i8* // CHECK-NEXT: [[T3:%.*]] = bitcast [[AGG]]* [[TEMP]] to i8* // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[T2]], i8* [[T3]], i64 4, i32 4, i1 false) // Then for 'a': // CHECK-NEXT: [[A_FORWARDING:%.*]] = getelementptr inbounds [[A_BYREF]], [[A_BYREF]]* [[A]], i32 0, i32 1 -// CHECK-NEXT: [[T0:%.*]] = load [[A_BYREF]]** [[A_FORWARDING]] +// CHECK-NEXT: [[T0:%.*]] = load [[A_BYREF]]*, [[A_BYREF]]** [[A_FORWARDING]] // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[A_BYREF]], [[A_BYREF]]* [[T0]], i32 0, i32 4 // CHECK-NEXT: [[T2:%.*]] = bitcast [[AGG]]* [[T1]] to i8* // CHECK-NEXT: [[T3:%.*]] = bitcast [[AGG]]* [[TEMP]] to i8* diff --git a/test/CodeGen/blocks-seq.c b/test/CodeGen/blocks-seq.c index 7ee8860a86..a7851d66d5 100644 --- a/test/CodeGen/blocks-seq.c +++ b/test/CodeGen/blocks-seq.c @@ -2,10 +2,10 @@ // CHECK: [[Vi:%.+]] = alloca %struct.__block_byref_i, align 8 // CHECK: call i32 (...)* @rhs() // CHECK: [[V7:%.+]] = getelementptr inbounds %struct.__block_byref_i, %struct.__block_byref_i* [[Vi]], i32 0, i32 1 -// CHECK: load %struct.__block_byref_i** [[V7]] +// CHECK: load %struct.__block_byref_i*, %struct.__block_byref_i** [[V7]] // CHECK: call i32 (...)* @rhs() // CHECK: [[V11:%.+]] = getelementptr inbounds %struct.__block_byref_i, %struct.__block_byref_i* [[Vi]], i32 0, i32 1 -// CHECK: load %struct.__block_byref_i** [[V11]] +// CHECK: load %struct.__block_byref_i*, %struct.__block_byref_i** [[V11]] int rhs(); diff --git a/test/CodeGen/blocks.c b/test/CodeGen/blocks.c index 5871e8c242..2a81826911 100644 --- a/test/CodeGen/blocks.c +++ b/test/CodeGen/blocks.c @@ -72,7 +72,7 @@ void (^b)() = ^{}; int main() { (b?: ^{})(); } -// CHECK: [[ZERO:%.*]] = load void (...)** @b +// CHECK: [[ZERO:%.*]] = load void (...)*, void (...)** @b // CHECK-NEXT: [[TB:%.*]] = icmp ne void (...)* [[ZERO]], null // CHECK-NEXT: br i1 [[TB]], label [[CT:%.*]], label [[CF:%.*]] // CHECK: [[ONE:%.*]] = bitcast void (...)* [[ZERO]] to void ()* diff --git a/test/CodeGen/bool_test.c b/test/CodeGen/bool_test.c index cf62dba1df..b48da3748e 100644 --- a/test/CodeGen/bool_test.c +++ b/test/CodeGen/bool_test.c @@ -9,7 +9,7 @@ void f(_Bool *x, _Bool *y) { } // CHECK-LABEL: define void @f( -// CHECK: [[FROMMEM:%.*]] = load i32* % +// CHECK: [[FROMMEM:%.*]] = load i32, i32* % // CHECK: [[BOOLVAL:%.*]] = trunc i32 [[FROMMEM]] to i1 // CHECK: [[TOMEM:%.*]] = zext i1 [[BOOLVAL]] to i32 // CHECK: store i32 [[TOMEM]] diff --git a/test/CodeGen/builtin-assume.c b/test/CodeGen/builtin-assume.c index 8411b729ab..8f83f17377 100644 --- a/test/CodeGen/builtin-assume.c +++ b/test/CodeGen/builtin-assume.c @@ -4,7 +4,7 @@ // CHECK-LABEL: @test1 int test1(int *a, int i) { // CHECK: store i32* %a, i32** [[A_ADDR:%.+]], align -// CHECK: [[A:%.+]] = load i32** [[A_ADDR]] +// CHECK: [[A:%.+]] = load i32*, i32** [[A_ADDR]] // CHECK: [[CMP:%.+]] = icmp ne i32* [[A]], null // CHECK: call void @llvm.assume(i1 [[CMP]]) #ifdef _MSC_VER @@ -14,7 +14,7 @@ int test1(int *a, int i) { #endif // Nothing is generated for an assume with side effects... -// CHECK-NOT: load i32** %i.addr +// CHECK-NOT: load i32*, i32** %i.addr // CHECK-NOT: call void @llvm.assume #ifdef _MSC_VER __assume(++i != 0) diff --git a/test/CodeGen/c-strings.c b/test/CodeGen/c-strings.c index 36934e81d5..71d4ba35f4 100644 --- a/test/CodeGen/c-strings.c +++ b/test/CodeGen/c-strings.c @@ -34,7 +34,7 @@ void f0() { void f1() { static char *x = "hello"; bar(x); - // CHECK: [[T1:%.*]] = load i8** @f1.x + // CHECK: [[T1:%.*]] = load i8*, i8** @f1.x // CHECK: call void @bar(i8* [[T1:%.*]]) } diff --git a/test/CodeGen/c11atomics-ios.c b/test/CodeGen/c11atomics-ios.c index 01d5acf315..a869982b17 100644 --- a/test/CodeGen/c11atomics-ios.c +++ b/test/CodeGen/c11atomics-ios.c @@ -13,22 +13,22 @@ void testFloat(_Atomic(float) *fp) { // CHECK-NEXT: [[F:%.*]] = alloca float // CHECK-NEXT: store float* {{%.*}}, float** [[FP]] -// CHECK-NEXT: [[T0:%.*]] = load float** [[FP]] +// CHECK-NEXT: [[T0:%.*]] = load float*, float** [[FP]] // CHECK-NEXT: store float 1.000000e+00, float* [[T0]], align 4 __c11_atomic_init(fp, 1.0f); // CHECK-NEXT: store float 2.000000e+00, float* [[X]], align 4 _Atomic(float) x = 2.0f; -// CHECK-NEXT: [[T0:%.*]] = load float** [[FP]] +// CHECK-NEXT: [[T0:%.*]] = load float*, float** [[FP]] // CHECK-NEXT: [[T1:%.*]] = bitcast float* [[T0]] to i32* -// CHECK-NEXT: [[T2:%.*]] = load atomic i32* [[T1]] seq_cst, align 4 +// CHECK-NEXT: [[T2:%.*]] = load atomic i32, i32* [[T1]] seq_cst, align 4 // CHECK-NEXT: [[T3:%.*]] = bitcast i32 [[T2]] to float // CHECK-NEXT: store float [[T3]], float* [[F]] float f = *fp; -// CHECK-NEXT: [[T0:%.*]] = load float* [[F]], align 4 -// CHECK-NEXT: [[T1:%.*]] = load float** [[FP]], align 4 +// CHECK-NEXT: [[T0:%.*]] = load float, float* [[F]], align 4 +// CHECK-NEXT: [[T1:%.*]] = load float*, float** [[FP]], align 4 // CHECK-NEXT: [[T2:%.*]] = bitcast float [[T0]] to i32 // CHECK-NEXT: [[T3:%.*]] = bitcast float* [[T1]] to i32* // CHECK-NEXT: store atomic i32 [[T2]], i32* [[T3]] seq_cst, align 4 @@ -46,7 +46,7 @@ void testComplexFloat(_Atomic(_Complex float) *fp) { // CHECK-NEXT: [[TMP1:%.*]] = alloca [[CF]], align 8 // CHECK-NEXT: store [[CF]]* -// CHECK-NEXT: [[P:%.*]] = load [[CF]]** [[FP]] +// CHECK-NEXT: [[P:%.*]] = load [[CF]]*, [[CF]]** [[FP]] // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[CF]], [[CF]]* [[P]], i32 0, i32 0 // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[CF]], [[CF]]* [[P]], i32 0, i32 1 // CHECK-NEXT: store float 1.000000e+00, float* [[T0]] @@ -59,15 +59,15 @@ void testComplexFloat(_Atomic(_Complex float) *fp) { // CHECK-NEXT: store float 0.000000e+00, float* [[T1]] _Atomic(_Complex float) x = 2.0f; -// CHECK-NEXT: [[T0:%.*]] = load [[CF]]** [[FP]] +// CHECK-NEXT: [[T0:%.*]] = load [[CF]]*, [[CF]]** [[FP]] // CHECK-NEXT: [[T1:%.*]] = bitcast [[CF]]* [[T0]] to i64* -// CHECK-NEXT: [[T2:%.*]] = load atomic i64* [[T1]] seq_cst, align 8 +// CHECK-NEXT: [[T2:%.*]] = load atomic i64, i64* [[T1]] seq_cst, align 8 // CHECK-NEXT: [[T3:%.*]] = bitcast [[CF]]* [[TMP0]] to i64* // CHECK-NEXT: store i64 [[T2]], i64* [[T3]], align 8 // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[CF]], [[CF]]* [[TMP0]], i32 0, i32 0 -// CHECK-NEXT: [[R:%.*]] = load float* [[T0]] +// CHECK-NEXT: [[R:%.*]] = load float, float* [[T0]] // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[CF]], [[CF]]* [[TMP0]], i32 0, i32 1 -// CHECK-NEXT: [[I:%.*]] = load float* [[T0]] +// CHECK-NEXT: [[I:%.*]] = load float, float* [[T0]] // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[CF]], [[CF]]* [[F]], i32 0, i32 0 // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[CF]], [[CF]]* [[F]], i32 0, i32 1 // CHECK-NEXT: store float [[R]], float* [[T0]] @@ -75,16 +75,16 @@ void testComplexFloat(_Atomic(_Complex float) *fp) { _Complex float f = *fp; // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[CF]], [[CF]]* [[F]], i32 0, i32 0 -// CHECK-NEXT: [[R:%.*]] = load float* [[T0]] +// CHECK-NEXT: [[R:%.*]] = load float, float* [[T0]] // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[CF]], [[CF]]* [[F]], i32 0, i32 1 -// CHECK-NEXT: [[I:%.*]] = load float* [[T0]] -// CHECK-NEXT: [[DEST:%.*]] = load [[CF]]** [[FP]], align 4 +// CHECK-NEXT: [[I:%.*]] = load float, float* [[T0]] +// CHECK-NEXT: [[DEST:%.*]] = load [[CF]]*, [[CF]]** [[FP]], align 4 // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[CF]], [[CF]]* [[TMP1]], i32 0, i32 0 // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[CF]], [[CF]]* [[TMP1]], i32 0, i32 1 // CHECK-NEXT: store float [[R]], float* [[T0]] // CHECK-NEXT: store float [[I]], float* [[T1]] // CHECK-NEXT: [[T0:%.*]] = bitcast [[CF]]* [[TMP1]] to i64* -// CHECK-NEXT: [[T1:%.*]] = load i64* [[T0]], align 8 +// CHECK-NEXT: [[T1:%.*]] = load i64, i64* [[T0]], align 8 // CHECK-NEXT: [[T2:%.*]] = bitcast [[CF]]* [[DEST]] to i64* // CHECK-NEXT: store atomic i64 [[T1]], i64* [[T2]] seq_cst, align 8 *fp = f; @@ -101,7 +101,7 @@ void testStruct(_Atomic(S) *fp) { // CHECK-NEXT: [[TMP0:%.*]] = alloca [[S]], align 8 // CHECK-NEXT: store [[S]]* -// CHECK-NEXT: [[P:%.*]] = load [[S]]** [[FP]] +// CHECK-NEXT: [[P:%.*]] = load [[S]]*, [[S]]** [[FP]] // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[S]], [[S]]* [[P]], i32 0, i32 0 // CHECK-NEXT: store i16 1, i16* [[T0]], align 2 // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[S]], [[S]]* [[P]], i32 0, i32 1 @@ -122,19 +122,19 @@ void testStruct(_Atomic(S) *fp) { // CHECK-NEXT: store i16 4, i16* [[T0]], align 2 _Atomic(S) x = (S){1,2,3,4}; -// CHECK-NEXT: [[T0:%.*]] = load [[S]]** [[FP]] +// CHECK-NEXT: [[T0:%.*]] = load [[S]]*, [[S]]** [[FP]] // CHECK-NEXT: [[T1:%.*]] = bitcast [[S]]* [[T0]] to i64* -// CHECK-NEXT: [[T2:%.*]] = load atomic i64* [[T1]] seq_cst, align 8 +// CHECK-NEXT: [[T2:%.*]] = load atomic i64, i64* [[T1]] seq_cst, align 8 // CHECK-NEXT: [[T3:%.*]] = bitcast [[S]]* [[F]] to i64* // CHECK-NEXT: store i64 [[T2]], i64* [[T3]], align 2 S f = *fp; -// CHECK-NEXT: [[T0:%.*]] = load [[S]]** [[FP]] +// CHECK-NEXT: [[T0:%.*]] = load [[S]]*, [[S]]** [[FP]] // CHECK-NEXT: [[T1:%.*]] = bitcast [[S]]* [[TMP0]] to i8* // CHECK-NEXT: [[T2:%.*]] = bitcast [[S]]* [[F]] to i8* // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[T1]], i8* [[T2]], i32 8, i32 2, i1 false) // CHECK-NEXT: [[T3:%.*]] = bitcast [[S]]* [[TMP0]] to i64* -// CHECK-NEXT: [[T4:%.*]] = load i64* [[T3]], align 8 +// CHECK-NEXT: [[T4:%.*]] = load i64, i64* [[T3]], align 8 // CHECK-NEXT: [[T5:%.*]] = bitcast [[S]]* [[T0]] to i64* // CHECK-NEXT: store atomic i64 [[T4]], i64* [[T5]] seq_cst, align 8 *fp = f; @@ -152,7 +152,7 @@ void testPromotedStruct(_Atomic(PS) *fp) { // CHECK-NEXT: [[TMP1:%.*]] = alloca [[APS]], align 8 // CHECK-NEXT: store [[APS]]* -// CHECK-NEXT: [[P:%.*]] = load [[APS]]** [[FP]] +// CHECK-NEXT: [[P:%.*]] = load [[APS]]*, [[APS]]** [[FP]] // CHECK-NEXT: [[T0:%.*]] = bitcast [[APS]]* [[P]] to i8* // CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* [[T0]], i8 0, i64 8, i32 8, i1 false) // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[APS]], [[APS]]* [[P]], i32 0, i32 0 @@ -175,9 +175,9 @@ void testPromotedStruct(_Atomic(PS) *fp) { // CHECK-NEXT: store i16 3, i16* [[T1]], align 2 _Atomic(PS) x = (PS){1,2,3}; -// CHECK-NEXT: [[T0:%.*]] = load [[APS]]** [[FP]] +// CHECK-NEXT: [[T0:%.*]] = load [[APS]]*, [[APS]]** [[FP]] // CHECK-NEXT: [[T1:%.*]] = bitcast [[APS]]* [[T0]] to i64* -// CHECK-NEXT: [[T2:%.*]] = load atomic i64* [[T1]] seq_cst, align 8 +// CHECK-NEXT: [[T2:%.*]] = load atomic i64, i64* [[T1]] seq_cst, align 8 // CHECK-NEXT: [[T3:%.*]] = bitcast [[APS]]* [[TMP0]] to i64* // CHECK-NEXT: store i64 [[T2]], i64* [[T3]], align 2 // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[APS]], [[APS]]* [[TMP0]], i32 0, i32 0 @@ -186,7 +186,7 @@ void testPromotedStruct(_Atomic(PS) *fp) { // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[T1]], i8* [[T2]], i32 6, i32 2, i1 false) PS f = *fp; -// CHECK-NEXT: [[T0:%.*]] = load [[APS]]** [[FP]] +// CHECK-NEXT: [[T0:%.*]] = load [[APS]]*, [[APS]]** [[FP]] // CHECK-NEXT: [[T1:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[TMP1]] to i8* // CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* [[T1]], i8 0, i32 8, i32 8, i1 false) // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[APS]], [[APS]]* [[TMP1]], i32 0, i32 0 @@ -194,7 +194,7 @@ void testPromotedStruct(_Atomic(PS) *fp) { // CHECK-NEXT: [[T3:%.*]] = bitcast [[PS]]* [[F]] to i8* // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[T2]], i8* [[T3]], i32 6, i32 2, i1 false) // CHECK-NEXT: [[T4:%.*]] = bitcast [[APS]]* [[TMP1]] to i64* -// CHECK-NEXT: [[T5:%.*]] = load i64* [[T4]], align 8 +// CHECK-NEXT: [[T5:%.*]] = load i64, i64* [[T4]], align 8 // CHECK-NEXT: [[T6:%.*]] = bitcast [[APS]]* [[T0]] to i64* // CHECK-NEXT: store atomic i64 [[T5]], i64* [[T6]] seq_cst, align 8 *fp = f; diff --git a/test/CodeGen/c11atomics.c b/test/CodeGen/c11atomics.c index 29c8924861..a35eef9426 100644 --- a/test/CodeGen/c11atomics.c +++ b/test/CodeGen/c11atomics.c @@ -147,23 +147,23 @@ void testFloat(_Atomic(float) *fp) { // CHECK-NEXT: [[TMP1:%.*]] = alloca float // CHECK-NEXT: store float* {{%.*}}, float** [[FP]] -// CHECK-NEXT: [[T0:%.*]] = load float** [[FP]] +// CHECK-NEXT: [[T0:%.*]] = load float*, float** [[FP]] // CHECK-NEXT: store float 1.000000e+00, float* [[T0]], align 4 __c11_atomic_init(fp, 1.0f); // CHECK-NEXT: store float 2.000000e+00, float* [[X]], align 4 _Atomic(float) x = 2.0f; -// CHECK-NEXT: [[T0:%.*]] = load float** [[FP]] +// CHECK-NEXT: [[T0:%.*]] = load float*, float** [[FP]] // CHECK-NEXT: [[T1:%.*]] = bitcast float* [[T0]] to i8* // CHECK-NEXT: [[T2:%.*]] = bitcast float* [[TMP0]] to i8* // CHECK-NEXT: call arm_aapcscc void @__atomic_load(i32 4, i8* [[T1]], i8* [[T2]], i32 5) -// CHECK-NEXT: [[T3:%.*]] = load float* [[TMP0]], align 4 +// CHECK-NEXT: [[T3:%.*]] = load float, float* [[TMP0]], align 4 // CHECK-NEXT: store float [[T3]], float* [[F]] float f = *fp; -// CHECK-NEXT: [[T0:%.*]] = load float* [[F]], align 4 -// CHECK-NEXT: [[T1:%.*]] = load float** [[FP]], align 4 +// CHECK-NEXT: [[T0:%.*]] = load float, float* [[F]], align 4 +// CHECK-NEXT: [[T1:%.*]] = load float*, float** [[FP]], align 4 // CHECK-NEXT: store float [[T0]], float* [[TMP1]], align 4 // CHECK-NEXT: [[T2:%.*]] = bitcast float* [[T1]] to i8* // CHECK-NEXT: [[T3:%.*]] = bitcast float* [[TMP1]] to i8* @@ -182,7 +182,7 @@ void testComplexFloat(_Atomic(_Complex float) *fp) { // CHECK-NEXT: [[TMP1:%.*]] = alloca [[CF]], align 8 // CHECK-NEXT: store [[CF]]* -// CHECK-NEXT: [[P:%.*]] = load [[CF]]** [[FP]] +// CHECK-NEXT: [[P:%.*]] = load [[CF]]*, [[CF]]** [[FP]] // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[CF]], [[CF]]* [[P]], i32 0, i32 0 // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[CF]], [[CF]]* [[P]], i32 0, i32 1 // CHECK-NEXT: store float 1.000000e+00, float* [[T0]] @@ -195,14 +195,14 @@ void testComplexFloat(_Atomic(_Complex float) *fp) { // CHECK-NEXT: store float 0.000000e+00, float* [[T1]] _Atomic(_Complex float) x = 2.0f; -// CHECK-NEXT: [[T0:%.*]] = load [[CF]]** [[FP]] +// CHECK-NEXT: [[T0:%.*]] = load [[CF]]*, [[CF]]** [[FP]] // CHECK-NEXT: [[T1:%.*]] = bitcast [[CF]]* [[T0]] to i8* // CHECK-NEXT: [[T2:%.*]] = bitcast [[CF]]* [[TMP0]] to i8* // CHECK-NEXT: call arm_aapcscc void @__atomic_load(i32 8, i8* [[T1]], i8* [[T2]], i32 5) // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[CF]], [[CF]]* [[TMP0]], i32 0, i32 0 -// CHECK-NEXT: [[R:%.*]] = load float* [[T0]] +// CHECK-NEXT: [[R:%.*]] = load float, float* [[T0]] // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[CF]], [[CF]]* [[TMP0]], i32 0, i32 1 -// CHECK-NEXT: [[I:%.*]] = load float* [[T0]] +// CHECK-NEXT: [[I:%.*]] = load float, float* [[T0]] // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[CF]], [[CF]]* [[F]], i32 0, i32 0 // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[CF]], [[CF]]* [[F]], i32 0, i32 1 // CHECK-NEXT: store float [[R]], float* [[T0]] @@ -210,10 +210,10 @@ void testComplexFloat(_Atomic(_Complex float) *fp) { _Complex float f = *fp; // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[CF]], [[CF]]* [[F]], i32 0, i32 0 -// CHECK-NEXT: [[R:%.*]] = load float* [[T0]] +// CHECK-NEXT: [[R:%.*]] = load float, float* [[T0]] // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[CF]], [[CF]]* [[F]], i32 0, i32 1 -// CHECK-NEXT: [[I:%.*]] = load float* [[T0]] -// CHECK-NEXT: [[DEST:%.*]] = load [[CF]]** [[FP]], align 4 +// CHECK-NEXT: [[I:%.*]] = load float, float* [[T0]] +// CHECK-NEXT: [[DEST:%.*]] = load [[CF]]*, [[CF]]** [[FP]], align 4 // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[CF]], [[CF]]* [[TMP1]], i32 0, i32 0 // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[CF]], [[CF]]* [[TMP1]], i32 0, i32 1 // CHECK-NEXT: store float [[R]], float* [[T0]] @@ -236,7 +236,7 @@ void testStruct(_Atomic(S) *fp) { // CHECK-NEXT: [[TMP0:%.*]] = alloca [[S]], align 8 // CHECK-NEXT: store [[S]]* -// CHECK-NEXT: [[P:%.*]] = load [[S]]** [[FP]] +// CHECK-NEXT: [[P:%.*]] = load [[S]]*, [[S]]** [[FP]] // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[S]], [[S]]* [[P]], i32 0, i32 0 // CHECK-NEXT: store i16 1, i16* [[T0]], align 2 // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[S]], [[S]]* [[P]], i32 0, i32 1 @@ -257,13 +257,13 @@ void testStruct(_Atomic(S) *fp) { // CHECK-NEXT: store i16 4, i16* [[T0]], align 2 _Atomic(S) x = (S){1,2,3,4}; -// CHECK-NEXT: [[T0:%.*]] = load [[S]]** [[FP]] +// CHECK-NEXT: [[T0:%.*]] = load [[S]]*, [[S]]** [[FP]] // CHECK-NEXT: [[T1:%.*]] = bitcast [[S]]* [[T0]] to i8* // CHECK-NEXT: [[T2:%.*]] = bitcast [[S]]* [[F]] to i8* // CHECK-NEXT: call arm_aapcscc void @__atomic_load(i32 8, i8* [[T1]], i8* [[T2]], i32 5) S f = *fp; -// CHECK-NEXT: [[T0:%.*]] = load [[S]]** [[FP]] +// CHECK-NEXT: [[T0:%.*]] = load [[S]]*, [[S]]** [[FP]] // CHECK-NEXT: [[T1:%.*]] = bitcast [[S]]* [[TMP0]] to i8* // CHECK-NEXT: [[T2:%.*]] = bitcast [[S]]* [[F]] to i8* // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[T1]], i8* [[T2]], i32 8, i32 2, i1 false) @@ -289,7 +289,7 @@ void testPromotedStruct(_Atomic(PS) *fp) { // CHECK-NEXT: [[TMP3:%.*]] = alloca [[APS]], align 8 // CHECK-NEXT: store [[APS]]* -// CHECK-NEXT: [[P:%.*]] = load [[APS]]** [[FP]] +// CHECK-NEXT: [[P:%.*]] = load [[APS]]*, [[APS]]** [[FP]] // CHECK-NEXT: [[T0:%.*]] = bitcast [[APS]]* [[P]] to i8* // CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* [[T0]], i8 0, i64 8, i32 8, i1 false) // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[APS]], [[APS]]* [[P]], i32 0, i32 0 @@ -312,7 +312,7 @@ void testPromotedStruct(_Atomic(PS) *fp) { // CHECK-NEXT: store i16 3, i16* [[T1]], align 2 _Atomic(PS) x = (PS){1,2,3}; -// CHECK-NEXT: [[T0:%.*]] = load [[APS]]** [[FP]] +// CHECK-NEXT: [[T0:%.*]] = load [[APS]]*, [[APS]]** [[FP]] // CHECK-NEXT: [[T1:%.*]] = bitcast [[APS]]* [[T0]] to i8* // CHECK-NEXT: [[T2:%.*]] = bitcast [[APS]]* [[TMP0]] to i8* // CHECK-NEXT: call arm_aapcscc void @__atomic_load(i32 8, i8* [[T1]], i8* [[T2]], i32 5) @@ -322,7 +322,7 @@ void testPromotedStruct(_Atomic(PS) *fp) { // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[T1]], i8* [[T2]], i32 6, i32 2, i1 false) PS f = *fp; -// CHECK-NEXT: [[T0:%.*]] = load [[APS]]** [[FP]] +// CHECK-NEXT: [[T0:%.*]] = load [[APS]]*, [[APS]]** [[FP]] // CHECK-NEXT: [[T1:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[TMP1]] to i8* // CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* [[T1]], i8 0, i32 8, i32 8, i1 false) // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[APS]], [[APS]]* [[TMP1]], i32 0, i32 0 @@ -334,7 +334,7 @@ void testPromotedStruct(_Atomic(PS) *fp) { // CHECK-NEXT: call arm_aapcscc void @__atomic_store(i32 8, i8* [[T4]], i8* [[T5]], i32 5) *fp = f; -// CHECK-NEXT: [[T0:%.*]] = load [[APS]]** [[FP]], align 4 +// CHECK-NEXT: [[T0:%.*]] = load [[APS]]*, [[APS]]** [[FP]], align 4 // CHECK-NEXT: [[T1:%.*]] = bitcast [[APS]]* [[T0]] to i8* // CHECK-NEXT: [[T2:%.*]] = bitcast [[APS]]* [[TMP3]] to i8* // CHECK-NEXT: call arm_aapcscc void @__atomic_load(i32 8, i8* [[T1]], i8* [[T2]], i32 5) @@ -343,7 +343,7 @@ void testPromotedStruct(_Atomic(PS) *fp) { // CHECK-NEXT: [[T2:%.*]] = bitcast %struct.PS* [[T0]] to i8* // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[T1]], i8* [[T2]], i32 6, i32 2, i1 false) // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds %struct.PS, %struct.PS* [[TMP2]], i32 0, i32 0 -// CHECK-NEXT: [[T1:%.*]] = load i16* [[T0]], align 2 +// CHECK-NEXT: [[T1:%.*]] = load i16, i16* [[T0]], align 2 // CHECK-NEXT: [[T2:%.*]] = sext i16 [[T1]] to i32 // CHECK-NEXT: store i32 [[T2]], i32* [[A]], align 4 int a = ((PS)*fp).x; diff --git a/test/CodeGen/capture-complex-expr-in-block.c b/test/CodeGen/capture-complex-expr-in-block.c index 615f0e7bd4..20e078e661 100644 --- a/test/CodeGen/capture-complex-expr-in-block.c +++ b/test/CodeGen/capture-complex-expr-in-block.c @@ -15,6 +15,6 @@ int main () // CHECK-LABEL: define internal void @__main_block_invoke // CHECK: [[C1:%.*]] = alloca { double, double }, align 8 // CHECK: [[RP:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[C1]], i32 0, i32 0 -// CHECK-NEXT: [[R:%.*]] = load double* [[RP]] +// CHECK-NEXT: [[R:%.*]] = load double, double* [[RP]] // CHECK-NEXT: [[IP:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[C1]], i32 0, i32 1 -// CHECK-NEXT: [[I:%.*]] = load double* [[IP]] +// CHECK-NEXT: [[I:%.*]] = load double, double* [[IP]] diff --git a/test/CodeGen/captured-statements-nested.c b/test/CodeGen/captured-statements-nested.c index c90711b7ac..646424339b 100644 --- a/test/CodeGen/captured-statements-nested.c +++ b/test/CodeGen/captured-statements-nested.c @@ -32,59 +32,59 @@ void test_nest_captured_stmt(int param, int size, int param_arr[size]) { // CHECK1: define internal void @__captured_stmt{{.*}}([[T]] // CHECK1: [[PARAM_ARR_SIZE_REF:%.+]] = getelementptr inbounds [[T]], [[T]]* {{.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} 5 - // CHECK1: [[PARAM_ARR_SIZE:%.+]] = load [[SIZE_TYPE]]* [[PARAM_ARR_SIZE_REF]] + // CHECK1: [[PARAM_ARR_SIZE:%.+]] = load [[SIZE_TYPE]], [[SIZE_TYPE]]* [[PARAM_ARR_SIZE_REF]] // CHECK1: [[ARR_SIZE1_REF:%.+]] = getelementptr inbounds [[T]], [[T]]* {{.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} 8 - // CHECK1: [[ARR_SIZE1:%.+]] = load [[SIZE_TYPE]]* [[ARR_SIZE1_REF]] + // CHECK1: [[ARR_SIZE1:%.+]] = load [[SIZE_TYPE]], [[SIZE_TYPE]]* [[ARR_SIZE1_REF]] // CHECK1: [[ARR_SIZE2_REF:%.+]] = getelementptr inbounds [[T]], [[T]]* {{.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} 9 - // CHECK1: [[ARR_SIZE2:%.+]] = load [[SIZE_TYPE]]* [[ARR_SIZE2_REF]] + // CHECK1: [[ARR_SIZE2:%.+]] = load [[SIZE_TYPE]], [[SIZE_TYPE]]* [[ARR_SIZE2_REF]] // // CHECK1: getelementptr inbounds [[T]], [[T]]* {{.*}}, i{{[0-9]+}} 0, i{{[0-9]+}} 2 - // CHECK1-NEXT: load %struct.A** + // CHECK1-NEXT: load %struct.A*, %struct.A** // CHECK1-NEXT: getelementptr inbounds %struct.A, %struct.A* // CHECK1-NEXT: store i{{.+}} 1 // // CHECK1: getelementptr inbounds [[T]], [[T]]* {{.*}}, i{{[0-9]+}} 0, i{{[0-9]+}} 1 - // CHECK1-NEXT: load i{{[0-9]+}}** + // CHECK1-NEXT: load i{{[0-9]+}}*, i{{[0-9]+}}** // CHECK1-NEXT: store i{{[0-9]+}} 1 // // CHECK1: getelementptr inbounds [[T]], [[T]]* {{.*}}, i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK1-NEXT: load i{{[0-9]+}}** + // CHECK1-NEXT: load i{{[0-9]+}}*, i{{[0-9]+}}** // CHECK1-NEXT: store i{{[0-9]+}} 1 // // CHECK1: getelementptr inbounds [[T]], [[T]]* {{.*}}, i{{[0-9]+}} 0, i{{[0-9]+}} 4 - // CHECK1-NEXT: load i{{[0-9]+}}** - // CHECK1-NEXT: load i{{[0-9]+}}* + // CHECK1-NEXT: load i{{[0-9]+}}*, i{{[0-9]+}}** + // CHECK1-NEXT: load i{{[0-9]+}}, i{{[0-9]+}}* // CHECK1-NEXT: getelementptr inbounds [[T]], [[T]]* {{.*}}, i{{[0-9]+}} 0, i{{[0-9]+}} 3 - // CHECK1-NEXT: load i{{[0-9]+}}*** - // CHECK1-NEXT: load i{{[0-9]+}}** + // CHECK1-NEXT: load i{{[0-9]+}}**, i{{[0-9]+}}*** + // CHECK1-NEXT: load i{{[0-9]+}}*, i{{[0-9]+}}** // CHECK1-NEXT: store i{{[0-9]+}} // // CHECK1: getelementptr inbounds [[T]], [[T]]* {{.*}}, i{{[0-9]+}} 0, i{{[0-9]+}} 2 - // CHECK1-NEXT: load %struct.A** + // CHECK1-NEXT: load %struct.A*, %struct.A** // CHECK1-NEXT: getelementptr inbounds %struct.A, %struct.A* // CHECK1-NEXT: store float // // CHECK1: getelementptr inbounds [[T]], [[T]]* {{.*}}, i{{[0-9]+}} 0, i{{[0-9]+}} 2 - // CHECK1-NEXT: load %struct.A** + // CHECK1-NEXT: load %struct.A*, %struct.A** // CHECK1-NEXT: getelementptr inbounds %struct.A, %struct.A* // CHECK1-NEXT: store i8 99 // // CHECK1: [[SIZE_ADDR_REF:%.*]] = getelementptr inbounds [[T]], [[T]]* {{.*}}, i{{.+}} 0, i{{.+}} 7 - // CHECK1-DAG: [[SIZE_ADDR:%.*]] = load i{{.+}}** [[SIZE_ADDR_REF]] - // CHECK1-DAG: [[SIZE:%.*]] = load i{{.+}}* [[SIZE_ADDR]] + // CHECK1-DAG: [[SIZE_ADDR:%.*]] = load i{{.+}}*, i{{.+}}** [[SIZE_ADDR_REF]] + // CHECK1-DAG: [[SIZE:%.*]] = load i{{.+}}, i{{.+}}* [[SIZE_ADDR]] // CHECK1-DAG: [[PARAM_ARR_IDX:%.*]] = sub nsw i{{.+}} [[SIZE]], 1 // CHECK1-DAG: [[PARAM_ARR_ADDR_REF:%.*]] = getelementptr inbounds [[T]], [[T]]* {{.*}}, i{{.+}} 0, i{{.+}} 6 - // CHECK1-DAG: [[PARAM_ARR_ADDR:%.*]] = load i{{.+}}*** [[PARAM_ARR_ADDR_REF]] - // CHECK1-DAG: [[PARAM_ARR:%.*]] = load i{{.+}}** [[PARAM_ARR_ADDR]] + // CHECK1-DAG: [[PARAM_ARR_ADDR:%.*]] = load i{{.+}}**, i{{.+}}*** [[PARAM_ARR_ADDR_REF]] + // CHECK1-DAG: [[PARAM_ARR:%.*]] = load i{{.+}}*, i{{.+}}** [[PARAM_ARR_ADDR]] // CHECK1-DAG: [[PARAM_ARR_SIZE_MINUS_1_ADDR:%.*]] = getelementptr inbounds i{{.+}}, i{{.+}}* [[PARAM_ARR]], i{{.*}} // CHECK1: store i{{.+}} 2, i{{.+}}* [[PARAM_ARR_SIZE_MINUS_1_ADDR]] // // CHECK1: [[Z_ADDR_REF:%.*]] = getelementptr inbounds [[T]], [[T]]* {{.*}}, i{{.+}} 0, i{{.+}} 2 - // CHECK1-DAG: [[Z_ADDR:%.*]] = load %struct.A** [[Z_ADDR_REF]] + // CHECK1-DAG: [[Z_ADDR:%.*]] = load %struct.A*, %struct.A** [[Z_ADDR_REF]] // CHECK1-DAG: [[Z_A_ADDR:%.*]] = getelementptr inbounds %struct.A, %struct.A* [[Z_ADDR]], i{{.+}} 0, i{{.+}} 0 - // CHECK1-DAG: [[ARR_IDX_2:%.*]] = load i{{.+}}* [[Z_A_ADDR]] + // CHECK1-DAG: [[ARR_IDX_2:%.*]] = load i{{.+}}, i{{.+}}* [[Z_A_ADDR]] // CHECK1-DAG: [[ARR_ADDR_REF:%.*]] = getelementptr inbounds [[T]], [[T]]* {{.*}}, i{{.+}} 0, i{{.+}} 10 - // CHECK1-DAG: [[ARR_ADDR:%.*]] = load i{{.+}}** [[ARR_ADDR_REF]] + // CHECK1-DAG: [[ARR_ADDR:%.*]] = load i{{.+}}*, i{{.+}}** [[ARR_ADDR_REF]] // CHECK1-DAG: [[ARR_IDX_1:%.*]] = mul {{.*}} 10 // CHECK1-DAG: [[ARR_10_ADDR:%.*]] = getelementptr inbounds i{{.+}}, i{{.+}}* [[ARR_ADDR]], i{{.*}} [[ARR_IDX_1]] // CHECK1-DAG: [[ARR_10_Z_A_ADDR:%.*]] = getelementptr inbounds i{{.+}}, i{{.+}}* [[ARR_10_ADDR]], i{{.*}} @@ -143,12 +143,12 @@ void test_nest_block() { // CHECK2: [[CapA:%[0-9a-z_.]*]] = getelementptr inbounds {{.*}}, i{{[0-9]+}} 0, i{{[0-9]+}} 7 // // CHECK2: getelementptr inbounds %struct.anon{{.*}}, i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK2: load i{{[0-9]+}}** - // CHECK2: load i{{[0-9]+}}* + // CHECK2: load i{{[0-9]+}}*, i{{[0-9]+}}** + // CHECK2: load i{{[0-9]+}}, i{{[0-9]+}}* // CHECK2: store i{{[0-9]+}} {{.*}}, i{{[0-9]+}}* [[CapA]] // // CHECK2: [[CapC:%[0-9a-z_.]*]] = getelementptr inbounds {{.*}}, i{{[0-9]+}} 0, i{{[0-9]+}} 8 - // CHECK2-NEXT: [[Val:%[0-9a-z_]*]] = load i{{[0-9]+}}* [[C]] + // CHECK2-NEXT: [[Val:%[0-9a-z_]*]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C]] // CHECK2-NEXT: store i{{[0-9]+}} [[Val]], i{{[0-9]+}}* [[CapC]] // // CHECK2: bitcast %struct.__block_byref_d* diff --git a/test/CodeGen/captured-statements.c b/test/CodeGen/captured-statements.c index 262b1638ea..64af3c0604 100644 --- a/test/CodeGen/captured-statements.c +++ b/test/CodeGen/captured-statements.c @@ -27,8 +27,8 @@ void test1() { // CHECK-1: define internal void @[[HelperName]](%struct.anon // CHECK-1: getelementptr inbounds %struct.anon{{.*}}, i32 0, i32 0 -// CHECK-1: load i32** -// CHECK-1: load i32* +// CHECK-1: load i32*, i32** +// CHECK-1: load i32, i32* // CHECK-1: add nsw i32 // CHECK-1: store i32 @@ -70,7 +70,7 @@ void test4(intptr_t size, intptr_t vla_arr[size]) { } // CHECK-3: test4([[INTPTR_T:i.+]] {{.*}}[[SIZE_ARG:%.+]], [[INTPTR_T]]* // CHECK-3: store [[INTPTR_T]] {{.*}}[[SIZE_ARG]], [[INTPTR_T]]* [[SIZE_ADDR:%.+]], - // CHECK-3: [[SIZE:%.+]] = load [[INTPTR_T]]* [[SIZE_ADDR]], + // CHECK-3: [[SIZE:%.+]] = load [[INTPTR_T]], [[INTPTR_T]]* [[SIZE_ADDR]], // CHECK-3: [[REF:%.+]] = getelementptr inbounds // CHECK-3: store [[INTPTR_T]] [[SIZE]], [[INTPTR_T]]* [[REF]] // CHECK-3: call void @__captured_stmt @@ -92,6 +92,6 @@ void dont_capture_global() { // CHECK-GLOBALS: define internal void @__captured_stmt[[HelperName]] // CHECK-GLOBALS-NOT: ret -// CHECK-GLOBALS: load i32* @global -// CHECK-GLOBALS: load i32* @ -// CHECK-GLOBALS: load i32* @e +// CHECK-GLOBALS: load i32, i32* @global +// CHECK-GLOBALS: load i32, i32* @ +// CHECK-GLOBALS: load i32, i32* @e diff --git a/test/CodeGen/complex-convert.c b/test/CodeGen/complex-convert.c index ac773f123f..0db2588405 100644 --- a/test/CodeGen/complex-convert.c +++ b/test/CodeGen/complex-convert.c @@ -33,108 +33,108 @@ void foo(signed char sc, unsigned char uc, signed long long sll, sc1 = csc; // CHECK: %[[VAR1:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CSC:[A-Za-z0-9.]+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR2:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR1]] + // CHECK-NEXT: %[[VAR2:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR1]] // CHECK-NEXT: store i[[CHSIZE]] %[[VAR2]], i[[CHSIZE]]* %[[SC1:[A-Za-z0-9.]+]], align [[CHALIGN]] sc1 = cuc; // CHECK-NEXT: %[[VAR3:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CUC:[A-Za-z0-9.]+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR4:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR3]] + // CHECK-NEXT: %[[VAR4:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR3]] // CHECK-NEXT: store i[[CHSIZE]] %[[VAR4]], i[[CHSIZE]]* %[[SC1]], align [[CHALIGN]] sc1 = csll; // CHECK-NEXT: %[[VAR5:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL:[A-Za-z0-9.]+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR6:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR5]] + // CHECK-NEXT: %[[VAR6:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR5]] // CHECK-NEXT: %[[VAR7:[A-Za-z0-9.]+]] = trunc i[[LLSIZE]] %[[VAR6]] to i[[CHSIZE]] // CHECK-NEXT: store i[[CHSIZE]] %[[VAR7]], i[[CHSIZE]]* %[[SC1]], align [[CHALIGN]] sc1 = cull; // CHECK-NEXT: %[[VAR8:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CULL:[A-Za-z0-9.]+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR9:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR8]] + // CHECK-NEXT: %[[VAR9:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR8]] // CHECK-NEXT: %[[VAR10:[A-Za-z0-9.]+]] = trunc i[[LLSIZE]] %[[VAR9]] to i[[CHSIZE]] // CHECK-NEXT: store i[[CHSIZE]] %[[VAR10]], i[[CHSIZE]]* %[[SC1]], align [[CHALIGN]] uc1 = csc; // CHECK-NEXT: %[[VAR11:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CSC]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR12:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR11]] + // CHECK-NEXT: %[[VAR12:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR11]] // CHECK-NEXT: store i[[CHSIZE]] %[[VAR12]], i[[CHSIZE]]* %[[UC1:[A-Za-z0-9.]+]], align [[CHALIGN]] uc1 = cuc; // CHECK-NEXT: %[[VAR13:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CUC]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR14:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR13]] + // CHECK-NEXT: %[[VAR14:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR13]] // CHECK-NEXT: store i[[CHSIZE]] %[[VAR14]], i[[CHSIZE]]* %[[UC1]], align [[CHALIGN]] uc1 = csll; // CHECK-NEXT: %[[VAR15:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR16:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR15]] + // CHECK-NEXT: %[[VAR16:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR15]] // CHECK-NEXT: %[[VAR17:[A-Za-z0-9.]+]] = trunc i[[LLSIZE]] %[[VAR16]] to i[[CHSIZE]] // CHECK-NEXT: store i[[CHSIZE]] %[[VAR17]], i[[CHSIZE]]* %[[UC1]], align [[CHALIGN]] uc1 = cull; // CHECK-NEXT: %[[VAR18:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CULL]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR19:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR18]] + // CHECK-NEXT: %[[VAR19:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR18]] // CHECK-NEXT: %[[VAR20:[A-Za-z0-9.]+]] = trunc i[[LLSIZE]] %[[VAR19]] to i[[CHSIZE]] // CHECK-NEXT: store i[[CHSIZE]] %[[VAR20]], i[[CHSIZE]]* %[[UC1]], align [[CHALIGN]] sll1 = csc; // CHECK-NEXT: %[[VAR21:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CSC]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR22:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR21]] + // CHECK-NEXT: %[[VAR22:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR21]] // CHECK-NEXT: %[[VAR23:[A-Za-z0-9.]+]] = sext i[[CHSIZE]] %[[VAR22]] to i[[LLSIZE]] // CHECK-NEXT: store i[[LLSIZE]] %[[VAR23]], i[[LLSIZE]]* %[[SLL1:[A-Za-z0-9]+]], align [[LLALIGN]] sll1 = cuc; // CHECK-NEXT: %[[VAR24:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CUC]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR25:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR24]] + // CHECK-NEXT: %[[VAR25:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR24]] // CHECK-NEXT: %[[VAR26:[A-Za-z0-9.]+]] = zext i[[CHSIZE]] %[[VAR25]] to i[[LLSIZE]] // CHECK-NEXT: store i[[LLSIZE]] %[[VAR26]], i[[LLSIZE]]* %[[SLL1]], align [[LLALIGN]] sll1 = csll; // CHECK-NEXT: %[[VAR27:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR28:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR27]] + // CHECK-NEXT: %[[VAR28:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR27]] // CHECK-NEXT: store i[[LLSIZE]] %[[VAR28]], i[[LLSIZE]]* %[[SLL1]], align [[LLALIGN]] sll1 = cull; // CHECK-NEXT: %[[VAR29:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CULL]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR30:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR29]] + // CHECK-NEXT: %[[VAR30:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR29]] // CHECK-NEXT: store i[[LLSIZE]] %[[VAR30]], i[[LLSIZE]]* %[[SLL1]], align [[LLALIGN]] ull1 = csc; // CHECK-NEXT: %[[VAR31:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CSC]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR32:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR31]] + // CHECK-NEXT: %[[VAR32:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR31]] // CHECK-NEXT: %[[VAR33:[A-Za-z0-9.]+]] = sext i[[CHSIZE]] %[[VAR32]] to i[[LLSIZE]] // CHECK-NEXT: store i[[LLSIZE]] %[[VAR33]], i[[LLSIZE]]* %[[ULL1:[A-Za-z0-9]+]], align [[LLALIGN]] ull1 = cuc; // CHECK-NEXT: %[[VAR34:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CUC]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR35:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR34]] + // CHECK-NEXT: %[[VAR35:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR34]] // CHECK-NEXT: %[[VAR36:[A-Za-z0-9.]+]] = zext i[[CHSIZE]] %[[VAR35]] to i[[LLSIZE]] // CHECK-NEXT: store i[[LLSIZE]] %[[VAR36]], i[[LLSIZE]]* %[[ULL1]], align [[LLALIGN]] ull1 = csll; // CHECK-NEXT: %[[VAR37:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR38:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR37]] + // CHECK-NEXT: %[[VAR38:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR37]] // CHECK-NEXT: store i[[LLSIZE]] %[[VAR38]], i[[LLSIZE]]* %[[ULL1]], align [[LLALIGN]] ull1 = cull; // CHECK-NEXT: %[[VAR39:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CULL]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR40:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR39]] + // CHECK-NEXT: %[[VAR40:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR39]] // CHECK-NEXT: store i[[LLSIZE]] %[[VAR40]], i[[LLSIZE]]* %[[ULL1]], align [[LLALIGN]] csc1 = sc; - // CHECK-NEXT: %[[VAR41:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[SCADDR:[A-Za-z0-9.]+]], align [[CHALIGN]] + // CHECK-NEXT: %[[VAR41:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[SCADDR:[A-Za-z0-9.]+]], align [[CHALIGN]] // CHECK-NEXT: %[[VAR42:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CSC1:[A-Za-z0-9.]+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 // CHECK-NEXT: %[[VAR43:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CSC1]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 // CHECK-NEXT: store i[[CHSIZE]] %[[VAR41]], i[[CHSIZE]]* %[[VAR42]] // CHECK-NEXT: store i[[CHSIZE]] 0, i[[CHSIZE]]* %[[VAR43]] csc1 = uc; - // CHECK-NEXT: %[[VAR44:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[UCADDR:[A-Za-z0-9.]+]], align [[CHALIGN]] + // CHECK-NEXT: %[[VAR44:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[UCADDR:[A-Za-z0-9.]+]], align [[CHALIGN]] // CHECK-NEXT: %[[VAR45:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CSC1]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 // CHECK-NEXT: %[[VAR46:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CSC1]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 // CHECK-NEXT: store i[[CHSIZE]] %[[VAR44]], i[[CHSIZE]]* %[[VAR45]] // CHECK-NEXT: store i[[CHSIZE]] 0, i[[CHSIZE]]* %[[VAR46]] csc1 = sll; - // CHECK-NEXT: %[[VAR47:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[SLLADDR:[A-Za-z0-9.]+]], align [[LLALIGN]] + // CHECK-NEXT: %[[VAR47:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[SLLADDR:[A-Za-z0-9.]+]], align [[LLALIGN]] // CHECK-NEXT: %[[VAR48:[A-Za-z0-9.]+]] = trunc i[[LLSIZE]] %[[VAR47]] to i[[CHSIZE]] // CHECK-NEXT: %[[VAR49:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CSC1]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 // CHECK-NEXT: %[[VAR50:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CSC1]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 @@ -142,7 +142,7 @@ void foo(signed char sc, unsigned char uc, signed long long sll, // CHECK-NEXT: store i[[CHSIZE]] 0, i[[CHSIZE]]* %[[VAR50]] csc1 = ull; - // CHECK-NEXT: %[[VAR51:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[ULLADDR:[A-Za-z0-9.]+]], align [[LLALIGN]] + // CHECK-NEXT: %[[VAR51:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[ULLADDR:[A-Za-z0-9.]+]], align [[LLALIGN]] // CHECK-NEXT: %[[VAR52:[A-Za-z0-9.]+]] = trunc i[[LLSIZE]] %[[VAR51]] to i[[CHSIZE]] // CHECK-NEXT: %[[VAR53:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CSC1]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 // CHECK-NEXT: %[[VAR54:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CSC1]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 @@ -150,21 +150,21 @@ void foo(signed char sc, unsigned char uc, signed long long sll, // CHECK-NEXT: store i[[CHSIZE]] 0, i[[CHSIZE]]* %[[VAR54]] cuc1 = sc; - // CHECK-NEXT: %[[VAR55:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[SCADDR]], align [[CHALIGN]] + // CHECK-NEXT: %[[VAR55:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[SCADDR]], align [[CHALIGN]] // CHECK-NEXT: %[[VAR56:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CUC1:[A-Za-z0-9.]+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 // CHECK-NEXT: %[[VAR57:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CUC1]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 // CHECK-NEXT: store i[[CHSIZE]] %[[VAR55]], i[[CHSIZE]]* %[[VAR56]] // CHECK-NEXT: store i[[CHSIZE]] 0, i[[CHSIZE]]* %[[VAR57]] cuc1 = uc; - // CHECK-NEXT: %[[VAR58:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[UCADDR]], align [[CHALIGN]] + // CHECK-NEXT: %[[VAR58:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[UCADDR]], align [[CHALIGN]] // CHECK-NEXT: %[[VAR59:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CUC1]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 // CHECK-NEXT: %[[VAR60:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CUC1]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 // CHECK-NEXT: store i[[CHSIZE]] %[[VAR58]], i[[CHSIZE]]* %[[VAR59]] // CHECK-NEXT: store i[[CHSIZE]] 0, i[[CHSIZE]]* %[[VAR60]] cuc1 = sll; - // CHECK-NEXT: %[[VAR61:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[SLLADDR]], align [[LLALIGN]] + // CHECK-NEXT: %[[VAR61:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[SLLADDR]], align [[LLALIGN]] // CHECK-NEXT: %[[VAR62:[A-Za-z0-9.]+]] = trunc i[[LLSIZE]] %[[VAR61]] to i[[CHSIZE]] // CHECK-NEXT: %[[VAR63:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CUC1]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 // CHECK-NEXT: %[[VAR64:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CUC1]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 @@ -172,7 +172,7 @@ void foo(signed char sc, unsigned char uc, signed long long sll, // CHECK-NEXT: store i[[CHSIZE]] 0, i[[CHSIZE]]* %[[VAR64]] cuc1 = ull; - // CHECK-NEXT: %[[VAR65:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[ULLADDR]], align [[LLALIGN]] + // CHECK-NEXT: %[[VAR65:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[ULLADDR]], align [[LLALIGN]] // CHECK-NEXT: %[[VAR66:[A-Za-z0-9.]+]] = trunc i[[LLSIZE]] %[[VAR65]] to i[[CHSIZE]] // CHECK-NEXT: %[[VAR67:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CUC1]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 // CHECK-NEXT: %[[VAR68:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CUC1]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 @@ -180,7 +180,7 @@ void foo(signed char sc, unsigned char uc, signed long long sll, // CHECK-NEXT: store i[[CHSIZE]] 0, i[[CHSIZE]]* %[[VAR68]] csll1 = sc; - // CHECK-NEXT: %[[VAR69:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[SCADDR]], align [[CHALIGN]] + // CHECK-NEXT: %[[VAR69:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[SCADDR]], align [[CHALIGN]] // CHECK-NEXT: %[[VAR70:[A-Za-z0-9.]+]] = sext i[[CHSIZE]] %[[VAR69]] to i[[LLSIZE]] // CHECK-NEXT: %[[VAR71:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL1:[A-Za-z0-9.]+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 // CHECK-NEXT: %[[VAR72:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL1]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 @@ -188,7 +188,7 @@ void foo(signed char sc, unsigned char uc, signed long long sll, // CHECK-NEXT: store i[[LLSIZE]] 0, i[[LLSIZE]]* %[[VAR72]] csll1 = uc; - // CHECK-NEXT: %[[VAR73:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[UCADDR]], align [[CHALIGN]] + // CHECK-NEXT: %[[VAR73:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[UCADDR]], align [[CHALIGN]] // CHECK-NEXT: %[[VAR74:[A-Za-z0-9.]+]] = zext i[[CHSIZE]] %[[VAR73]] to i[[LLSIZE]] // CHECK-NEXT: %[[VAR75:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL1]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 // CHECK-NEXT: %[[VAR76:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL1]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 @@ -196,21 +196,21 @@ void foo(signed char sc, unsigned char uc, signed long long sll, // CHECK-NEXT: store i[[LLSIZE]] 0, i[[LLSIZE]]* %[[VAR76]] csll1 = sll; - // CHECK-NEXT: %[[VAR77:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[SLLADDR]], align [[LLALIGN]] + // CHECK-NEXT: %[[VAR77:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[SLLADDR]], align [[LLALIGN]] // CHECK-NEXT: %[[VAR78:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL1]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 // CHECK-NEXT: %[[VAR79:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL1]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 // CHECK-NEXT: store i[[LLSIZE]] %[[VAR77]], i[[LLSIZE]]* %[[VAR78]] // CHECK-NEXT: store i[[LLSIZE]] 0, i[[LLSIZE]]* %[[VAR79]] csll1 = ull; - // CHECK-NEXT: %[[VAR77:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[ULLADDR]], align [[LLALIGN]] + // CHECK-NEXT: %[[VAR77:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[ULLADDR]], align [[LLALIGN]] // CHECK-NEXT: %[[VAR78:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL1]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 // CHECK-NEXT: %[[VAR79:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL1]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 // CHECK-NEXT: store i[[LLSIZE]] %[[VAR77]], i[[LLSIZE]]* %[[VAR78]] // CHECK-NEXT: store i[[LLSIZE]] 0, i[[LLSIZE]]* %[[VAR79]] cull1 = sc; - // CHECK-NEXT: %[[VAR80:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[SCADDR]], align [[CHALIGN]] + // CHECK-NEXT: %[[VAR80:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[SCADDR]], align [[CHALIGN]] // CHECK-NEXT: %[[VAR81:[A-Za-z0-9.]+]] = sext i[[CHSIZE]] %[[VAR80]] to i[[LLSIZE]] // CHECK-NEXT: %[[VAR82:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CULL1:[A-Za-z0-9.]+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 // CHECK-NEXT: %[[VAR83:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CULL1]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 @@ -218,7 +218,7 @@ void foo(signed char sc, unsigned char uc, signed long long sll, // CHECK-NEXT: store i[[LLSIZE]] 0, i[[LLSIZE]]* %[[VAR83]] cull1 = uc; - // CHECK-NEXT: %[[VAR84:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[UCADDR]], align [[CHALIGN]] + // CHECK-NEXT: %[[VAR84:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[UCADDR]], align [[CHALIGN]] // CHECK-NEXT: %[[VAR85:[A-Za-z0-9.]+]] = zext i[[CHSIZE]] %[[VAR84]] to i[[LLSIZE]] // CHECK-NEXT: %[[VAR86:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CULL1]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 // CHECK-NEXT: %[[VAR87:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CULL1]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 @@ -226,26 +226,26 @@ void foo(signed char sc, unsigned char uc, signed long long sll, // CHECK-NEXT: store i[[LLSIZE]] 0, i[[LLSIZE]]* %[[VAR87]] cull1 = sll; - // CHECK-NEXT: %[[VAR88:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[SLLADDR]], align [[LLALIGN]] + // CHECK-NEXT: %[[VAR88:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[SLLADDR]], align [[LLALIGN]] // CHECK-NEXT: %[[VAR89:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CULL1]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 // CHECK-NEXT: %[[VAR90:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CULL1]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 // CHECK-NEXT: store i[[LLSIZE]] %[[VAR88]], i[[LLSIZE]]* %[[VAR89]] // CHECK-NEXT: store i[[LLSIZE]] 0, i[[LLSIZE]]* %[[VAR90]] cull1 = ull; - // CHECK-NEXT: %[[VAR91:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[ULLADDR]], align [[LLALIGN]] + // CHECK-NEXT: %[[VAR91:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[ULLADDR]], align [[LLALIGN]] // CHECK-NEXT: %[[VAR92:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CULL1]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 // CHECK-NEXT: %[[VAR93:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CULL1]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 // CHECK-NEXT: store i[[LLSIZE]] %[[VAR91]], i[[LLSIZE]]* %[[VAR92]] // CHECK-NEXT: store i[[LLSIZE]] 0, i[[LLSIZE]]* %[[VAR93]] csc1 = sc + csc; - // CHECK-NEXT: %[[VAR94:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[SCADDR]], align [[CHALIGN]] + // CHECK-NEXT: %[[VAR94:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[SCADDR]], align [[CHALIGN]] // CHECK-NEXT: %[[VAR95:[A-Za-z0-9.]+]] = sext i[[CHSIZE]] %[[VAR94]] to i[[ARSIZE:[0-9]+]] // CHECK-NEXT: %[[VAR96:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CSC]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR97:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR96]] + // CHECK-NEXT: %[[VAR97:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR96]] // CHECK-NEXT: %[[VAR98:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CSC]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 - // CHECK-NEXT: %[[VAR99:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR98]] + // CHECK-NEXT: %[[VAR99:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR98]] // CHECK-NEXT: %[[VAR100:[A-Za-z0-9.]+]] = sext i[[CHSIZE]] %[[VAR97]] to i[[ARSIZE]] // CHECK-NEXT: %[[VAR101:[A-Za-z0-9.]+]] = sext i[[CHSIZE]] %[[VAR99]] to i[[ARSIZE]] // CHECK-NEXT: %[[VAR102:[A-Za-z0-9.]+]] = add i[[ARSIZE]] %[[VAR95]], %[[VAR100]] @@ -258,12 +258,12 @@ void foo(signed char sc, unsigned char uc, signed long long sll, // CHECK-NEXT: store i[[CHSIZE]] %[[VAR105]], i[[CHSIZE]]* %[[VAR107]] cuc1 = sc + cuc; - // CHECK-NEXT: %[[VAR108:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[SCADDR]], align [[CHALIGN]] + // CHECK-NEXT: %[[VAR108:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[SCADDR]], align [[CHALIGN]] // CHECK-NEXT: %[[VAR109:[A-Za-z0-9.]+]] = sext i[[CHSIZE]] %[[VAR108]] to i[[ARSIZE]] // CHECK-NEXT: %[[VAR110:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CUC]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR111:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR110]] + // CHECK-NEXT: %[[VAR111:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR110]] // CHECK-NEXT: %[[VAR112:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CUC]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 - // CHECK-NEXT: %[[VAR113:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR112]] + // CHECK-NEXT: %[[VAR113:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR112]] // CHECK-NEXT: %[[VAR114:[A-Za-z0-9.]+]] = zext i[[CHSIZE]] %[[VAR111]] to i[[ARSIZE]] // CHECK-NEXT: %[[VAR115:[A-Za-z0-9.]+]] = zext i[[CHSIZE]] %[[VAR113]] to i[[ARSIZE]] // CHECK-NEXT: %[[VAR116:[A-Za-z0-9.]+]] = add i[[ARSIZE]] %[[VAR109]], %[[VAR114]] @@ -276,12 +276,12 @@ void foo(signed char sc, unsigned char uc, signed long long sll, // CHECK-NEXT: store i[[CHSIZE]] %[[VAR119]], i[[CHSIZE]]* %[[VAR121]] csll1 = sc + csll; - // CHECK-NEXT: %[[VAR122:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[SCADDR]], align [[CHALIGN]] + // CHECK-NEXT: %[[VAR122:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[SCADDR]], align [[CHALIGN]] // CHECK-NEXT: %[[VAR123:[A-Za-z0-9.]+]] = sext i[[CHSIZE]] %[[VAR122]] to i[[LLSIZE]] // CHECK-NEXT: %[[VAR124:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR125:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR124]] + // CHECK-NEXT: %[[VAR125:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR124]] // CHECK-NEXT: %[[VAR126:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 - // CHECK-NEXT: %[[VAR127:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR126]] + // CHECK-NEXT: %[[VAR127:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR126]] // CHECK-NEXT: %[[VAR128:[A-Za-z0-9.]+]] = add i[[LLSIZE]] %[[VAR123]], %[[VAR125]] // CHECK-NEXT: %[[VAR129:[A-Za-z0-9.]+]] = add i[[LLSIZE]] 0, %[[VAR127]] // CHECK-NEXT: %[[VAR130:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL1]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 @@ -290,12 +290,12 @@ void foo(signed char sc, unsigned char uc, signed long long sll, // CHECK-NEXT: store i[[LLSIZE]] %[[VAR129]], i[[LLSIZE]]* %[[VAR131]] cull1 = sc + cull; - // CHECK-NEXT: %[[VAR132:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[SCADDR]], align [[CHALIGN]] + // CHECK-NEXT: %[[VAR132:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[SCADDR]], align [[CHALIGN]] // CHECK-NEXT: %[[VAR133:[A-Za-z0-9.]+]] = sext i[[CHSIZE]] %[[VAR132]] to i[[LLSIZE]] // CHECK-NEXT: %[[VAR134:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CULL]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR135:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR134]] + // CHECK-NEXT: %[[VAR135:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR134]] // CHECK-NEXT: %[[VAR136:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CULL]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 - // CHECK-NEXT: %[[VAR137:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR136]] + // CHECK-NEXT: %[[VAR137:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR136]] // CHECK-NEXT: %[[VAR138:[A-Za-z0-9.]+]] = add i[[LLSIZE]] %[[VAR133]], %[[VAR135]] // CHECK-NEXT: %[[VAR139:[A-Za-z0-9.]+]] = add i[[LLSIZE]] 0, %[[VAR137]] // CHECK-NEXT: %[[VAR140:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CULL1]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 @@ -304,12 +304,12 @@ void foo(signed char sc, unsigned char uc, signed long long sll, // CHECK-NEXT: store i[[LLSIZE]] %[[VAR139]], i[[LLSIZE]]* %[[VAR141]] csc1 = uc + csc; - // CHECK-NEXT: %[[VAR142:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[UCADDR]], align [[CHALIGN]] + // CHECK-NEXT: %[[VAR142:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[UCADDR]], align [[CHALIGN]] // CHECK-NEXT: %[[VAR143:[A-Za-z0-9.]+]] = zext i[[CHSIZE]] %[[VAR142]] to i[[ARSIZE]] // CHECK-NEXT: %[[VAR144:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CSC]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR145:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR144]] + // CHECK-NEXT: %[[VAR145:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR144]] // CHECK-NEXT: %[[VAR146:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CSC]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 - // CHECK-NEXT: %[[VAR147:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR146]] + // CHECK-NEXT: %[[VAR147:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR146]] // CHECK-NEXT: %[[VAR148:[A-Za-z0-9.]+]] = sext i[[CHSIZE]] %[[VAR145]] to i[[ARSIZE]] // CHECK-NEXT: %[[VAR149:[A-Za-z0-9.]+]] = sext i[[CHSIZE]] %[[VAR147]] to i[[ARSIZE]] // CHECK-NEXT: %[[VAR150:[A-Za-z0-9.]+]] = add i[[ARSIZE]] %[[VAR143]], %[[VAR148]] @@ -322,12 +322,12 @@ void foo(signed char sc, unsigned char uc, signed long long sll, // CHECK-NEXT: store i[[CHSIZE]] %[[VAR153]], i[[CHSIZE]]* %[[VAR155]] cuc1 = uc + cuc; - // CHECK-NEXT: %[[VAR156:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[UCADDR]], align [[CHALIGN]] + // CHECK-NEXT: %[[VAR156:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[UCADDR]], align [[CHALIGN]] // CHECK-NEXT: %[[VAR157:[A-Za-z0-9.]+]] = zext i[[CHSIZE]] %[[VAR156]] to i[[ARSIZE]] // CHECK-NEXT: %[[VAR158:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CUC]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR159:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR158]] + // CHECK-NEXT: %[[VAR159:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR158]] // CHECK-NEXT: %[[VAR160:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CUC]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 - // CHECK-NEXT: %[[VAR161:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR160]] + // CHECK-NEXT: %[[VAR161:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR160]] // CHECK-NEXT: %[[VAR162:[A-Za-z0-9.]+]] = zext i[[CHSIZE]] %[[VAR159]] to i[[ARSIZE]] // CHECK-NEXT: %[[VAR163:[A-Za-z0-9.]+]] = zext i[[CHSIZE]] %[[VAR161]] to i[[ARSIZE]] // CHECK-NEXT: %[[VAR164:[A-Za-z0-9.]+]] = add i[[ARSIZE]] %[[VAR157]], %[[VAR162]] @@ -340,12 +340,12 @@ void foo(signed char sc, unsigned char uc, signed long long sll, // CHECK-NEXT: store i[[CHSIZE]] %[[VAR167]], i[[CHSIZE]]* %[[VAR169]] csll1 = uc + csll; - // CHECK-NEXT: %[[VAR170:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[UCADDR]], align [[CHALIGN]] + // CHECK-NEXT: %[[VAR170:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[UCADDR]], align [[CHALIGN]] // CHECK-NEXT: %[[VAR171:[A-Za-z0-9.]+]] = zext i[[CHSIZE]] %[[VAR170]] to i[[LLSIZE]] // CHECK-NEXT: %[[VAR172:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR173:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR172]] + // CHECK-NEXT: %[[VAR173:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR172]] // CHECK-NEXT: %[[VAR174:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 - // CHECK-NEXT: %[[VAR175:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR174]] + // CHECK-NEXT: %[[VAR175:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR174]] // CHECK-NEXT: %[[VAR176:[A-Za-z0-9.]+]] = add i[[LLSIZE]] %[[VAR171]], %[[VAR173]] // CHECK-NEXT: %[[VAR177:[A-Za-z0-9.]+]] = add i[[LLSIZE]] 0, %[[VAR175]] // CHECK-NEXT: %[[VAR178:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL1]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 @@ -354,12 +354,12 @@ void foo(signed char sc, unsigned char uc, signed long long sll, // CHECK-NEXT: store i[[LLSIZE]] %[[VAR177]], i[[LLSIZE]]* %[[VAR179]] cull1 = uc + cull; - // CHECK-NEXT: %[[VAR180:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[UCADDR]], align [[CHALIGN]] + // CHECK-NEXT: %[[VAR180:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[UCADDR]], align [[CHALIGN]] // CHECK-NEXT: %[[VAR181:[A-Za-z0-9.]+]] = zext i[[CHSIZE]] %[[VAR180]] to i[[LLSIZE]] // CHECK-NEXT: %[[VAR182:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CULL]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR183:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR182]] + // CHECK-NEXT: %[[VAR183:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR182]] // CHECK-NEXT: %[[VAR184:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CULL]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 - // CHECK-NEXT: %[[VAR185:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR184]] + // CHECK-NEXT: %[[VAR185:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR184]] // CHECK-NEXT: %[[VAR186:[A-Za-z0-9.]+]] = add i[[LLSIZE]] %[[VAR181]], %[[VAR183]] // CHECK-NEXT: %[[VAR187:[A-Za-z0-9.]+]] = add i[[LLSIZE]] 0, %[[VAR185]] // CHECK-NEXT: %[[VAR188:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CULL1]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 @@ -368,11 +368,11 @@ void foo(signed char sc, unsigned char uc, signed long long sll, // CHECK-NEXT: store i[[LLSIZE]] %[[VAR187]], i[[LLSIZE]]* %[[VAR189]] csll1 = sll + csc; - // CHECK-NEXT: %[[VAR190:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[SLLADDR]], align [[LLALIGN]] + // CHECK-NEXT: %[[VAR190:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[SLLADDR]], align [[LLALIGN]] // CHECK-NEXT: %[[VAR191:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CSC]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR192:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR191]] + // CHECK-NEXT: %[[VAR192:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR191]] // CHECK-NEXT: %[[VAR193:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CSC]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 - // CHECK-NEXT: %[[VAR194:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR193]] + // CHECK-NEXT: %[[VAR194:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR193]] // CHECK-NEXT: %[[VAR195:[A-Za-z0-9.]+]] = sext i[[CHSIZE]] %[[VAR192]] to i[[LLSIZE]] // CHECK-NEXT: %[[VAR196:[A-Za-z0-9.]+]] = sext i[[CHSIZE]] %[[VAR194]] to i[[LLSIZE]] // CHECK-NEXT: %[[VAR197:[A-Za-z0-9.]+]] = add i[[LLSIZE]] %[[VAR190]], %[[VAR195]] @@ -383,11 +383,11 @@ void foo(signed char sc, unsigned char uc, signed long long sll, // CHECK-NEXT: store i[[LLSIZE]] %[[VAR198]], i[[LLSIZE]]* %[[VAR200]] csll1 = sll + cuc; - // CHECK-NEXT: %[[VAR201:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[SLLADDR]], align [[LLALIGN]] + // CHECK-NEXT: %[[VAR201:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[SLLADDR]], align [[LLALIGN]] // CHECK-NEXT: %[[VAR202:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CUC]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR203:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR202]] + // CHECK-NEXT: %[[VAR203:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR202]] // CHECK-NEXT: %[[VAR204:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CUC]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 - // CHECK-NEXT: %[[VAR205:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR204]] + // CHECK-NEXT: %[[VAR205:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR204]] // CHECK-NEXT: %[[VAR206:[A-Za-z0-9.]+]] = zext i[[CHSIZE]] %[[VAR203]] to i[[LLSIZE]] // CHECK-NEXT: %[[VAR207:[A-Za-z0-9.]+]] = zext i[[CHSIZE]] %[[VAR205]] to i[[LLSIZE]] // CHECK-NEXT: %[[VAR208:[A-Za-z0-9.]+]] = add i[[LLSIZE]] %[[VAR201]], %[[VAR206]] @@ -398,11 +398,11 @@ void foo(signed char sc, unsigned char uc, signed long long sll, // CHECK-NEXT: store i[[LLSIZE]] %[[VAR209]], i[[LLSIZE]]* %[[VAR211]] csll1 = sll + csll; - // CHECK-NEXT: %[[VAR212:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[SLLADDR]], align [[LLALIGN]] + // CHECK-NEXT: %[[VAR212:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[SLLADDR]], align [[LLALIGN]] // CHECK-NEXT: %[[VAR213:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR214:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR213]] + // CHECK-NEXT: %[[VAR214:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR213]] // CHECK-NEXT: %[[VAR215:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 - // CHECK-NEXT: %[[VAR216:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR215]] + // CHECK-NEXT: %[[VAR216:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR215]] // CHECK-NEXT: %[[VAR217:[A-Za-z0-9.]+]] = add i[[LLSIZE]] %[[VAR212]], %[[VAR214]] // CHECK-NEXT: %[[VAR218:[A-Za-z0-9.]+]] = add i[[LLSIZE]] 0, %[[VAR216]] // CHECK-NEXT: %[[VAR219:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL1]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 @@ -411,11 +411,11 @@ void foo(signed char sc, unsigned char uc, signed long long sll, // CHECK-NEXT: store i[[LLSIZE]] %[[VAR218]], i[[LLSIZE]]* %[[VAR220]] csll1 = sll + cull; - // CHECK-NEXT: %[[VAR221:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[SLLADDR]], align [[LLALIGN]] + // CHECK-NEXT: %[[VAR221:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[SLLADDR]], align [[LLALIGN]] // CHECK-NEXT: %[[VAR222:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CULL]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR223:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR222]] + // CHECK-NEXT: %[[VAR223:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR222]] // CHECK-NEXT: %[[VAR224:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CULL]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 - // CHECK-NEXT: %[[VAR225:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR224]] + // CHECK-NEXT: %[[VAR225:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR224]] // CHECK-NEXT: %[[VAR226:[A-Za-z0-9.]+]] = add i[[LLSIZE]] %[[VAR221]], %[[VAR223]] // CHECK-NEXT: %[[VAR227:[A-Za-z0-9.]+]] = add i[[LLSIZE]] 0, %[[VAR225]] // CHECK-NEXT: %[[VAR228:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL1]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 @@ -424,11 +424,11 @@ void foo(signed char sc, unsigned char uc, signed long long sll, // CHECK-NEXT: store i[[LLSIZE]] %[[VAR227]], i[[LLSIZE]]* %[[VAR229]] csll1 = ull + csc; - // CHECK-NEXT: %[[VAR230:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[ULLADDR]], align [[LLALIGN]] + // CHECK-NEXT: %[[VAR230:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[ULLADDR]], align [[LLALIGN]] // CHECK-NEXT: %[[VAR231:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CSC]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR232:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR231]] + // CHECK-NEXT: %[[VAR232:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR231]] // CHECK-NEXT: %[[VAR233:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CSC]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 - // CHECK-NEXT: %[[VAR234:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR233]] + // CHECK-NEXT: %[[VAR234:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR233]] // CHECK-NEXT: %[[VAR235:[A-Za-z0-9.]+]] = sext i[[CHSIZE]] %[[VAR232]] to i[[LLSIZE]] // CHECK-NEXT: %[[VAR236:[A-Za-z0-9.]+]] = sext i[[CHSIZE]] %[[VAR234]] to i[[LLSIZE]] // CHECK-NEXT: %[[VAR237:[A-Za-z0-9.]+]] = add i[[LLSIZE]] %[[VAR230]], %[[VAR235]] @@ -439,11 +439,11 @@ void foo(signed char sc, unsigned char uc, signed long long sll, // CHECK-NEXT: store i[[LLSIZE]] %[[VAR238]], i[[LLSIZE]]* %[[VAR240]] cull1 = ull + cuc; - // CHECK-NEXT: %[[VAR241:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[ULLADDR]], align [[LLALIGN]] + // CHECK-NEXT: %[[VAR241:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[ULLADDR]], align [[LLALIGN]] // CHECK-NEXT: %[[VAR242:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CUC]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR243:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR242]] + // CHECK-NEXT: %[[VAR243:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR242]] // CHECK-NEXT: %[[VAR244:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CUC]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 - // CHECK-NEXT: %[[VAR245:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR244]] + // CHECK-NEXT: %[[VAR245:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR244]] // CHECK-NEXT: %[[VAR246:[A-Za-z0-9.]+]] = zext i[[CHSIZE]] %[[VAR243]] to i[[LLSIZE]] // CHECK-NEXT: %[[VAR247:[A-Za-z0-9.]+]] = zext i[[CHSIZE]] %[[VAR245]] to i[[LLSIZE]] // CHECK-NEXT: %[[VAR248:[A-Za-z0-9.]+]] = add i[[LLSIZE]] %[[VAR241]], %[[VAR246]] @@ -454,11 +454,11 @@ void foo(signed char sc, unsigned char uc, signed long long sll, // CHECK-NEXT: store i[[LLSIZE]] %[[VAR249]], i[[LLSIZE]]* %[[VAR251]] csll1 = ull + csll; - // CHECK-NEXT: %[[VAR252:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[ULLADDR]], align [[LLALIGN]] + // CHECK-NEXT: %[[VAR252:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[ULLADDR]], align [[LLALIGN]] // CHECK-NEXT: %[[VAR253:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR254:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR253]] + // CHECK-NEXT: %[[VAR254:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR253]] // CHECK-NEXT: %[[VAR255:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 - // CHECK-NEXT: %[[VAR256:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR255]] + // CHECK-NEXT: %[[VAR256:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR255]] // CHECK-NEXT: %[[VAR257:[A-Za-z0-9.]+]] = add i[[LLSIZE]] %[[VAR252]], %[[VAR254]] // CHECK-NEXT: %[[VAR258:[A-Za-z0-9.]+]] = add i[[LLSIZE]] 0, %[[VAR256]] // CHECK-NEXT: %[[VAR259:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL1]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 @@ -467,11 +467,11 @@ void foo(signed char sc, unsigned char uc, signed long long sll, // CHECK-NEXT: store i[[LLSIZE]] %[[VAR258]], i[[LLSIZE]]* %[[VAR260]] cull1 = ull + cull; - // CHECK-NEXT: %[[VAR261:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[ULLADDR]], align [[LLALIGN]] + // CHECK-NEXT: %[[VAR261:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[ULLADDR]], align [[LLALIGN]] // CHECK-NEXT: %[[VAR262:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CULL]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR263:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR262]] + // CHECK-NEXT: %[[VAR263:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR262]] // CHECK-NEXT: %[[VAR264:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CULL]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 - // CHECK-NEXT: %[[VAR265:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR264]] + // CHECK-NEXT: %[[VAR265:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR264]] // CHECK-NEXT: %[[VAR266:[A-Za-z0-9.]+]] = add i[[LLSIZE]] %[[VAR261]], %[[VAR263]] // CHECK-NEXT: %[[VAR267:[A-Za-z0-9.]+]] = add i[[LLSIZE]] 0, %[[VAR265]] // CHECK-NEXT: %[[VAR268:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CULL1]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 @@ -481,12 +481,12 @@ void foo(signed char sc, unsigned char uc, signed long long sll, csc1 = csc + sc; // CHECK-NEXT: %[[VAR270:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CSC]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR271:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR270]] + // CHECK-NEXT: %[[VAR271:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR270]] // CHECK-NEXT: %[[VAR272:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CSC]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 - // CHECK-NEXT: %[[VAR273:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR272]] + // CHECK-NEXT: %[[VAR273:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR272]] // CHECK-NEXT: %[[VAR274:[A-Za-z0-9.]+]] = sext i[[CHSIZE]] %[[VAR271]] to i[[ARSIZE]] // CHECK-NEXT: %[[VAR275:[A-Za-z0-9.]+]] = sext i[[CHSIZE]] %[[VAR273]] to i[[ARSIZE]] - // CHECK-NEXT: %[[VAR276:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[SCADDR]], align [[CHALIGN]] + // CHECK-NEXT: %[[VAR276:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[SCADDR]], align [[CHALIGN]] // CHECK-NEXT: %[[VAR277:[A-Za-z0-9.]+]] = sext i[[CHSIZE]] %[[VAR276]] to i[[ARSIZE]] // CHECK-NEXT: %[[VAR278:[A-Za-z0-9.]+]] = add i[[ARSIZE]] %[[VAR274]], %[[VAR277]] // CHECK-NEXT: %[[VAR279:[A-Za-z0-9.]+]] = add i[[ARSIZE]] %[[VAR275]], 0 @@ -499,12 +499,12 @@ void foo(signed char sc, unsigned char uc, signed long long sll, csc1 = csc + uc; // CHECK-NEXT: %[[VAR284:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CSC]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR285:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR284]] + // CHECK-NEXT: %[[VAR285:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR284]] // CHECK-NEXT: %[[VAR286:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CSC]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 - // CHECK-NEXT: %[[VAR287:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR286]] + // CHECK-NEXT: %[[VAR287:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR286]] // CHECK-NEXT: %[[VAR288:[A-Za-z0-9.]+]] = sext i[[CHSIZE]] %[[VAR285]] to i[[ARSIZE]] // CHECK-NEXT: %[[VAR289:[A-Za-z0-9.]+]] = sext i[[CHSIZE]] %[[VAR287]] to i[[ARSIZE]] - // CHECK-NEXT: %[[VAR290:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[UCADDR]], align [[CHALIGN]] + // CHECK-NEXT: %[[VAR290:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[UCADDR]], align [[CHALIGN]] // CHECK-NEXT: %[[VAR291:[A-Za-z0-9.]+]] = zext i[[CHSIZE]] %[[VAR290]] to i[[ARSIZE]] // CHECK-NEXT: %[[VAR292:[A-Za-z0-9.]+]] = add i[[ARSIZE]] %[[VAR288]], %[[VAR291]] // CHECK-NEXT: %[[VAR293:[A-Za-z0-9.]+]] = add i[[ARSIZE]] %[[VAR289]], 0 @@ -517,12 +517,12 @@ void foo(signed char sc, unsigned char uc, signed long long sll, csll1 = csc + sll; // CHECK-NEXT: %[[VAR298:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CSC]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR299:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR298]] + // CHECK-NEXT: %[[VAR299:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR298]] // CHECK-NEXT: %[[VAR300:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CSC]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 - // CHECK-NEXT: %[[VAR301:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR300]] + // CHECK-NEXT: %[[VAR301:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR300]] // CHECK-NEXT: %[[VAR302:[A-Za-z0-9.]+]] = sext i[[CHSIZE]] %[[VAR299]] to i[[LLSIZE]] // CHECK-NEXT: %[[VAR303:[A-Za-z0-9.]+]] = sext i[[CHSIZE]] %[[VAR301]] to i[[LLSIZE]] - // CHECK-NEXT: %[[VAR304:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[SLLADDR]], align [[LLALIGN]] + // CHECK-NEXT: %[[VAR304:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[SLLADDR]], align [[LLALIGN]] // CHECK-NEXT: %[[VAR305:[A-Za-z0-9.]+]] = add i[[LLSIZE]] %[[VAR302]], %[[VAR304]] // CHECK-NEXT: %[[VAR306:[A-Za-z0-9.]+]] = add i[[LLSIZE]] %[[VAR303]], 0 // CHECK-NEXT: %[[VAR307:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL1]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 @@ -532,12 +532,12 @@ void foo(signed char sc, unsigned char uc, signed long long sll, csll1 = csc + ull; // CHECK-NEXT: %[[VAR309:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CSC]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR310:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR309]] + // CHECK-NEXT: %[[VAR310:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR309]] // CHECK-NEXT: %[[VAR311:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CSC]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 - // CHECK-NEXT: %[[VAR312:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR311]] + // CHECK-NEXT: %[[VAR312:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR311]] // CHECK-NEXT: %[[VAR313:[A-Za-z0-9.]+]] = sext i[[CHSIZE]] %[[VAR310]] to i[[LLSIZE]] // CHECK-NEXT: %[[VAR314:[A-Za-z0-9.]+]] = sext i[[CHSIZE]] %[[VAR312]] to i[[LLSIZE]] - // CHECK-NEXT: %[[VAR315:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[ULLADDR]], align [[LLALIGN]] + // CHECK-NEXT: %[[VAR315:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[ULLADDR]], align [[LLALIGN]] // CHECK-NEXT: %[[VAR316:[A-Za-z0-9.]+]] = add i[[LLSIZE]] %[[VAR313]], %[[VAR315]] // CHECK-NEXT: %[[VAR317:[A-Za-z0-9.]+]] = add i[[LLSIZE]] %[[VAR314]], 0 // CHECK-NEXT: %[[VAR318:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL1]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 @@ -547,12 +547,12 @@ void foo(signed char sc, unsigned char uc, signed long long sll, csc1 = cuc + sc; // CHECK-NEXT: %[[VAR320:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CUC]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR321:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR320]] + // CHECK-NEXT: %[[VAR321:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR320]] // CHECK-NEXT: %[[VAR322:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CUC]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 - // CHECK-NEXT: %[[VAR323:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR322]] + // CHECK-NEXT: %[[VAR323:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR322]] // CHECK-NEXT: %[[VAR324:[A-Za-z0-9.]+]] = zext i[[CHSIZE]] %[[VAR321]] to i[[ARSIZE]] // CHECK-NEXT: %[[VAR325:[A-Za-z0-9.]+]] = zext i[[CHSIZE]] %[[VAR323]] to i[[ARSIZE]] - // CHECK-NEXT: %[[VAR326:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[SCADDR]], align [[CHALIGN]] + // CHECK-NEXT: %[[VAR326:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[SCADDR]], align [[CHALIGN]] // CHECK-NEXT: %[[VAR327:[A-Za-z0-9.]+]] = sext i[[CHSIZE]] %[[VAR326]] to i[[ARSIZE]] // CHECK-NEXT: %[[VAR328:[A-Za-z0-9.]+]] = add i[[ARSIZE]] %[[VAR324]], %[[VAR327]] // CHECK-NEXT: %[[VAR329:[A-Za-z0-9.]+]] = add i[[ARSIZE]] %[[VAR325]], 0 @@ -565,12 +565,12 @@ void foo(signed char sc, unsigned char uc, signed long long sll, cuc1 = cuc + uc; // CHECK-NEXT: %[[VAR334:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CUC]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR335:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR334]] + // CHECK-NEXT: %[[VAR335:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR334]] // CHECK-NEXT: %[[VAR336:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CUC]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 - // CHECK-NEXT: %[[VAR337:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR336]] + // CHECK-NEXT: %[[VAR337:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR336]] // CHECK-NEXT: %[[VAR338:[A-Za-z0-9.]+]] = zext i[[CHSIZE]] %[[VAR335]] to i[[ARSIZE]] // CHECK-NEXT: %[[VAR339:[A-Za-z0-9.]+]] = zext i[[CHSIZE]] %[[VAR337]] to i[[ARSIZE]] - // CHECK-NEXT: %[[VAR340:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[UCADDR]], align [[CHALIGN]] + // CHECK-NEXT: %[[VAR340:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[UCADDR]], align [[CHALIGN]] // CHECK-NEXT: %[[VAR341:[A-Za-z0-9.]+]] = zext i[[CHSIZE]] %[[VAR340]] to i[[ARSIZE]] // CHECK-NEXT: %[[VAR342:[A-Za-z0-9.]+]] = add i[[ARSIZE]] %[[VAR338]], %[[VAR341]] // CHECK-NEXT: %[[VAR343:[A-Za-z0-9.]+]] = add i[[ARSIZE]] %[[VAR339]], 0 @@ -583,12 +583,12 @@ void foo(signed char sc, unsigned char uc, signed long long sll, csll1 = cuc + sll; // CHECK-NEXT: %[[VAR348:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CUC]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR349:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR348]] + // CHECK-NEXT: %[[VAR349:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR348]] // CHECK-NEXT: %[[VAR350:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CUC]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 - // CHECK-NEXT: %[[VAR351:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR350]] + // CHECK-NEXT: %[[VAR351:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR350]] // CHECK-NEXT: %[[VAR352:[A-Za-z0-9.]+]] = zext i[[CHSIZE]] %[[VAR349]] to i[[LLSIZE]] // CHECK-NEXT: %[[VAR353:[A-Za-z0-9.]+]] = zext i[[CHSIZE]] %[[VAR351]] to i[[LLSIZE]] - // CHECK-NEXT: %[[VAR354:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[SLLADDR]], align [[LLALIGN]] + // CHECK-NEXT: %[[VAR354:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[SLLADDR]], align [[LLALIGN]] // CHECK-NEXT: %[[VAR355:[A-Za-z0-9.]+]] = add i[[LLSIZE]] %[[VAR352]], %[[VAR354]] // CHECK-NEXT: %[[VAR356:[A-Za-z0-9.]+]] = add i[[LLSIZE]] %[[VAR353]], 0 // CHECK-NEXT: %[[VAR357:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL1]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 @@ -598,12 +598,12 @@ void foo(signed char sc, unsigned char uc, signed long long sll, cull1 = cuc + ull; // CHECK-NEXT: %[[VAR357:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CUC]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR358:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR357]] + // CHECK-NEXT: %[[VAR358:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR357]] // CHECK-NEXT: %[[VAR359:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[CHSIZE]], i[[CHSIZE]] }, { i[[CHSIZE]], i[[CHSIZE]] }* %[[CUC]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 - // CHECK-NEXT: %[[VAR360:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[VAR359]] + // CHECK-NEXT: %[[VAR360:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[VAR359]] // CHECK-NEXT: %[[VAR361:[A-Za-z0-9.]+]] = zext i[[CHSIZE]] %[[VAR358]] to i[[LLSIZE]] // CHECK-NEXT: %[[VAR362:[A-Za-z0-9.]+]] = zext i[[CHSIZE]] %[[VAR360]] to i[[LLSIZE]] - // CHECK-NEXT: %[[VAR363:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[ULLADDR]], align [[LLALIGN]] + // CHECK-NEXT: %[[VAR363:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[ULLADDR]], align [[LLALIGN]] // CHECK-NEXT: %[[VAR364:[A-Za-z0-9.]+]] = add i[[LLSIZE]] %[[VAR361]], %[[VAR363]] // CHECK-NEXT: %[[VAR365:[A-Za-z0-9.]+]] = add i[[LLSIZE]] %[[VAR362]], 0 // CHECK-NEXT: %[[VAR366:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CULL1]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 @@ -613,10 +613,10 @@ void foo(signed char sc, unsigned char uc, signed long long sll, csll1 = csll + sc; // CHECK-NEXT: %[[VAR368:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR369:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR368]] + // CHECK-NEXT: %[[VAR369:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR368]] // CHECK-NEXT: %[[VAR370:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 - // CHECK-NEXT: %[[VAR371:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR370]] - // CHECK-NEXT: %[[VAR372:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[SCADDR]], align [[CHALIGN]] + // CHECK-NEXT: %[[VAR371:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR370]] + // CHECK-NEXT: %[[VAR372:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[SCADDR]], align [[CHALIGN]] // CHECK-NEXT: %[[VAR373:[A-Za-z0-9.]+]] = sext i[[CHSIZE]] %[[VAR372]] to i[[LLSIZE]] // CHECK-NEXT: %[[VAR374:[A-Za-z0-9.]+]] = add i[[LLSIZE]] %[[VAR369]], %[[VAR373]] // CHECK-NEXT: %[[VAR375:[A-Za-z0-9.]+]] = add i[[LLSIZE]] %[[VAR371]], 0 @@ -627,10 +627,10 @@ void foo(signed char sc, unsigned char uc, signed long long sll, csll1 = csll + uc; // CHECK-NEXT: %[[VAR378:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR379:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR378]] + // CHECK-NEXT: %[[VAR379:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR378]] // CHECK-NEXT: %[[VAR380:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 - // CHECK-NEXT: %[[VAR381:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR380]] - // CHECK-NEXT: %[[VAR382:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[UCADDR]], align [[CHALIGN]] + // CHECK-NEXT: %[[VAR381:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR380]] + // CHECK-NEXT: %[[VAR382:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[UCADDR]], align [[CHALIGN]] // CHECK-NEXT: %[[VAR383:[A-Za-z0-9.]+]] = zext i[[CHSIZE]] %[[VAR382]] to i[[LLSIZE]] // CHECK-NEXT: %[[VAR384:[A-Za-z0-9.]+]] = add i[[LLSIZE]] %[[VAR379]], %[[VAR383]] // CHECK-NEXT: %[[VAR385:[A-Za-z0-9.]+]] = add i[[LLSIZE]] %[[VAR381]], 0 @@ -641,10 +641,10 @@ void foo(signed char sc, unsigned char uc, signed long long sll, csll1 = csll + sll; // CHECK-NEXT: %[[VAR388:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR389:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR388]] + // CHECK-NEXT: %[[VAR389:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR388]] // CHECK-NEXT: %[[VAR390:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 - // CHECK-NEXT: %[[VAR391:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR390]] - // CHECK-NEXT: %[[VAR392:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[SLLADDR]], align [[LLALIGN]] + // CHECK-NEXT: %[[VAR391:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR390]] + // CHECK-NEXT: %[[VAR392:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[SLLADDR]], align [[LLALIGN]] // CHECK-NEXT: %[[VAR393:[A-Za-z0-9.]+]] = add i[[LLSIZE]] %[[VAR389]], %[[VAR392]] // CHECK-NEXT: %[[VAR394:[A-Za-z0-9.]+]] = add i[[LLSIZE]] %[[VAR391]], 0 // CHECK-NEXT: %[[VAR395:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL1]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 @@ -654,10 +654,10 @@ void foo(signed char sc, unsigned char uc, signed long long sll, csll1 = csll + ull; // CHECK-NEXT: %[[VAR397:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR398:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR397]] + // CHECK-NEXT: %[[VAR398:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR397]] // CHECK-NEXT: %[[VAR399:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 - // CHECK-NEXT: %[[VAR400:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR399]] - // CHECK-NEXT: %[[VAR401:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[ULLADDR]], align [[LLALIGN]] + // CHECK-NEXT: %[[VAR400:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR399]] + // CHECK-NEXT: %[[VAR401:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[ULLADDR]], align [[LLALIGN]] // CHECK-NEXT: %[[VAR402:[A-Za-z0-9.]+]] = add i[[LLSIZE]] %[[VAR398]], %[[VAR401]] // CHECK-NEXT: %[[VAR403:[A-Za-z0-9.]+]] = add i[[LLSIZE]] %[[VAR400]], 0 // CHECK-NEXT: %[[VAR404:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL1]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 @@ -667,10 +667,10 @@ void foo(signed char sc, unsigned char uc, signed long long sll, csll1 = cull + sc; // CHECK-NEXT: %[[VAR406:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CULL]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR407:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR406]] + // CHECK-NEXT: %[[VAR407:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR406]] // CHECK-NEXT: %[[VAR408:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CULL]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 - // CHECK-NEXT: %[[VAR409:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR408]] - // CHECK-NEXT: %[[VAR410:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[SCADDR]], align [[CHALIGN]] + // CHECK-NEXT: %[[VAR409:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR408]] + // CHECK-NEXT: %[[VAR410:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[SCADDR]], align [[CHALIGN]] // CHECK-NEXT: %[[VAR411:[A-Za-z0-9.]+]] = sext i[[CHSIZE]] %[[VAR410]] to i[[LLSIZE]] // CHECK-NEXT: %[[VAR412:[A-Za-z0-9.]+]] = add i[[LLSIZE]] %[[VAR407]], %[[VAR411]] // CHECK-NEXT: %[[VAR413:[A-Za-z0-9.]+]] = add i[[LLSIZE]] %[[VAR409]], 0 @@ -681,10 +681,10 @@ void foo(signed char sc, unsigned char uc, signed long long sll, cull1 = cull + uc; // CHECK-NEXT: %[[VAR416:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CULL]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR417:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR416]] + // CHECK-NEXT: %[[VAR417:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR416]] // CHECK-NEXT: %[[VAR418:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CULL]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 - // CHECK-NEXT: %[[VAR419:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR418]] - // CHECK-NEXT: %[[VAR420:[A-Za-z0-9.]+]] = load i[[CHSIZE]]* %[[UCADDR]], align [[CHALIGN]] + // CHECK-NEXT: %[[VAR419:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR418]] + // CHECK-NEXT: %[[VAR420:[A-Za-z0-9.]+]] = load i[[CHSIZE]], i[[CHSIZE]]* %[[UCADDR]], align [[CHALIGN]] // CHECK-NEXT: %[[VAR421:[A-Za-z0-9.]+]] = zext i[[CHSIZE]] %[[VAR420]] to i[[LLSIZE]] // CHECK-NEXT: %[[VAR422:[A-Za-z0-9.]+]] = add i[[LLSIZE]] %[[VAR417]], %[[VAR421]] // CHECK-NEXT: %[[VAR423:[A-Za-z0-9.]+]] = add i[[LLSIZE]] %[[VAR419]], 0 @@ -695,10 +695,10 @@ void foo(signed char sc, unsigned char uc, signed long long sll, csll1 = cull + sll; // CHECK-NEXT: %[[VAR426:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CULL]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR427:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR426]] + // CHECK-NEXT: %[[VAR427:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR426]] // CHECK-NEXT: %[[VAR428:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CULL]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 - // CHECK-NEXT: %[[VAR429:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR428]] - // CHECK-NEXT: %[[VAR430:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[SLLADDR]], align [[LLALIGN]] + // CHECK-NEXT: %[[VAR429:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR428]] + // CHECK-NEXT: %[[VAR430:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[SLLADDR]], align [[LLALIGN]] // CHECK-NEXT: %[[VAR431:[A-Za-z0-9.]+]] = add i[[LLSIZE]] %[[VAR427]], %[[VAR430]] // CHECK-NEXT: %[[VAR432:[A-Za-z0-9.]+]] = add i[[LLSIZE]] %[[VAR429]], 0 // CHECK-NEXT: %[[VAR433:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CSLL1]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 @@ -708,10 +708,10 @@ void foo(signed char sc, unsigned char uc, signed long long sll, cull1 = cull + ull; // CHECK-NEXT: %[[VAR435:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CULL]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK-NEXT: %[[VAR436:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR435]] + // CHECK-NEXT: %[[VAR436:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR435]] // CHECK-NEXT: %[[VAR437:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CULL]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 - // CHECK-NEXT: %[[VAR438:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[VAR437]] - // CHECK-NEXT: %[[VAR439:[A-Za-z0-9.]+]] = load i[[LLSIZE]]* %[[ULLADDR]], align [[LLALIGN]] + // CHECK-NEXT: %[[VAR438:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[VAR437]] + // CHECK-NEXT: %[[VAR439:[A-Za-z0-9.]+]] = load i[[LLSIZE]], i[[LLSIZE]]* %[[ULLADDR]], align [[LLALIGN]] // CHECK-NEXT: %[[VAR440:[A-Za-z0-9.]+]] = add i[[LLSIZE]] %[[VAR436]], %[[VAR439]] // CHECK-NEXT: %[[VAR441:[A-Za-z0-9.]+]] = add i[[LLSIZE]] %[[VAR438]], 0 // CHECK-NEXT: %[[VAR442:[A-Za-z0-9.]+]] = getelementptr inbounds { i[[LLSIZE]], i[[LLSIZE]] }, { i[[LLSIZE]], i[[LLSIZE]] }* %[[CULL1]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 diff --git a/test/CodeGen/compound-literal.c b/test/CodeGen/compound-literal.c index 556e9edc25..85138bfaf5 100644 --- a/test/CodeGen/compound-literal.c +++ b/test/CodeGen/compound-literal.c @@ -20,11 +20,11 @@ void f() { // CHECK-NEXT: [[COMPOUNDLIT:%[a-zA-Z0-9.]+]] = alloca [[STRUCT]] // CHECK-NEXT: [[CX:%[a-zA-Z0-9.]+]] = getelementptr inbounds [[STRUCT]], [[STRUCT]]* [[COMPOUNDLIT]], i32 0, i32 0 // CHECK-NEXT: [[SY:%[a-zA-Z0-9.]+]] = getelementptr inbounds [[STRUCT]], [[STRUCT]]* [[S]], i32 0, i32 1 - // CHECK-NEXT: [[TMP:%[a-zA-Z0-9.]+]] = load i32* [[SY]] + // CHECK-NEXT: [[TMP:%[a-zA-Z0-9.]+]] = load i32, i32* [[SY]] // CHECK-NEXT: store i32 [[TMP]], i32* [[CX]] // CHECK-NEXT: [[CY:%[a-zA-Z0-9.]+]] = getelementptr inbounds [[STRUCT]], [[STRUCT]]* [[COMPOUNDLIT]], i32 0, i32 1 // CHECK-NEXT: [[SX:%[a-zA-Z0-9.]+]] = getelementptr inbounds [[STRUCT]], [[STRUCT]]* [[S]], i32 0, i32 0 - // CHECK-NEXT: [[TMP:%[a-zA-Z0-9.]+]] = load i32* [[SX]] + // CHECK-NEXT: [[TMP:%[a-zA-Z0-9.]+]] = load i32, i32* [[SX]] // CHECK-NEXT: store i32 [[TMP]], i32* [[CY]] // CHECK-NEXT: [[SI8:%[a-zA-Z0-9.]+]] = bitcast [[STRUCT]]* [[S]] to i8* // CHECK-NEXT: [[COMPOUNDLITI8:%[a-zA-Z0-9.]+]] = bitcast [[STRUCT]]* [[COMPOUNDLIT]] to i8* @@ -47,15 +47,15 @@ struct G g(int x, int y, int z) { // Evaluate the compound literal directly in the result value slot. // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[G]], [[G]]* [[RESULT]], i32 0, i32 0 - // CHECK-NEXT: [[T1:%.*]] = load i32* [[X]], align 4 + // CHECK-NEXT: [[T1:%.*]] = load i32, i32* [[X]], align 4 // CHECK-NEXT: [[T2:%.*]] = trunc i32 [[T1]] to i16 // CHECK-NEXT: store i16 [[T2]], i16* [[T0]], align 2 // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[G]], [[G]]* [[RESULT]], i32 0, i32 1 - // CHECK-NEXT: [[T1:%.*]] = load i32* [[Y]], align 4 + // CHECK-NEXT: [[T1:%.*]] = load i32, i32* [[Y]], align 4 // CHECK-NEXT: [[T2:%.*]] = trunc i32 [[T1]] to i16 // CHECK-NEXT: store i16 [[T2]], i16* [[T0]], align 2 // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[G]], [[G]]* [[RESULT]], i32 0, i32 2 - // CHECK-NEXT: [[T1:%.*]] = load i32* [[Z]], align 4 + // CHECK-NEXT: [[T1:%.*]] = load i32, i32* [[Z]], align 4 // CHECK-NEXT: [[T2:%.*]] = trunc i32 [[T1]] to i16 // CHECK-NEXT: store i16 [[T2]], i16* [[T0]], align 2 return (struct G) { x, y, z }; @@ -63,6 +63,6 @@ struct G g(int x, int y, int z) { // CHECK-NEXT: [[T0:%.*]] = bitcast i48* [[COERCE_TEMP]] to i8* // CHECK-NEXT: [[T1:%.*]] = bitcast [[G]]* [[RESULT]] to i8* // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[T0]], i8* [[T1]], i64 6 - // CHECK-NEXT: [[T0:%.*]] = load i48* [[COERCE_TEMP]] + // CHECK-NEXT: [[T0:%.*]] = load i48, i48* [[COERCE_TEMP]] // CHECK-NEXT: ret i48 [[T0]] } diff --git a/test/CodeGen/exceptions-seh-finally.c b/test/CodeGen/exceptions-seh-finally.c index 1bb60d820a..eeadf0d5ec 100644 --- a/test/CodeGen/exceptions-seh-finally.c +++ b/test/CodeGen/exceptions-seh-finally.c @@ -22,7 +22,7 @@ void basic_finally(void) { // // CHECK: [[finally]] // CHECK: call void @cleanup() -// CHECK: load i8* %[[abnormal]] +// CHECK: load i8, i8* %[[abnormal]] // CHECK: icmp eq // CHECK: br i1 %{{.*}}, label %[[finallycont:[^ ]*]], label %[[resumecont:[^ ]*]] // @@ -97,10 +97,10 @@ void use_abnormal_termination(void) { // CHECK: br label %[[finally:[^ ]*]] // // CHECK: [[finally]] -// CHECK: load i8* %[[abnormal]] +// CHECK: load i8, i8* %[[abnormal]] // CHECK: zext i8 %{{.*}} to i32 // CHECK: store i32 %{{.*}}, i32* @crashed -// CHECK: load i8* %[[abnormal]] +// CHECK: load i8, i8* %[[abnormal]] // CHECK: icmp eq // CHECK: br i1 %{{.*}}, label %[[finallycont:[^ ]*]], label %[[resumecont:[^ ]*]] // @@ -209,7 +209,7 @@ int nested___finally___finally() { // CHECK-NEXT: br label %[[finallycont:[^ ]*]] // // CHECK: [[finallycont]] -// CHECK-NEXT: %[[dest:[^ ]*]] = load i32* % +// CHECK-NEXT: %[[dest:[^ ]*]] = load i32, i32* % // CHECK-NEXT: switch i32 %[[dest]] // CHECK-NEXT: i32 0, label %[[cleanupcont:[^ ]*]] // @@ -218,7 +218,7 @@ int nested___finally___finally() { // CHECK-NEXT: br label %[[return:[^ ]*]] // // CHECK: [[return]] -// CHECK-NEXT: %[[reg:[^ ]*]] = load i32* % +// CHECK-NEXT: %[[reg:[^ ]*]] = load i32, i32* % // CHECK-NEXT: ret i32 %[[reg]] int nested___finally___finally_with_eh_edge() { @@ -255,12 +255,12 @@ int nested___finally___finally_with_eh_edge() { // CHECK-NEXT: br label %[[outerfinally:[^ ]*]] // // CHECK: [[outerfinally]] -// CHECK-NEXT: %[[abnormallocal:[^ ]*]] = load i8* %[[abnormal]] +// CHECK-NEXT: %[[abnormallocal:[^ ]*]] = load i8, i8* %[[abnormal]] // CHECK-NEXT: %[[reg:[^ ]*]] = icmp eq i8 %[[abnormallocal]], 0 // CHECK-NEXT: br i1 %[[reg]], label %[[finallycont:[^ ]*]], label %[[finallyresume:[^ ]*]] // // CHECK: [[finallycont]] -// CHECK-NEXT: %[[dest:[^ ]*]] = load i32* % +// CHECK-NEXT: %[[dest:[^ ]*]] = load i32, i32* % // CHECK-NEXT: switch i32 %[[dest]] // CHECK-NEXT: i32 0, label %[[cleanupcont:[^ ]*]] // @@ -283,7 +283,7 @@ int nested___finally___finally_with_eh_edge() { // CHECK-NEXT: br label %[[ehresume:[^ ]*]] // // CHECK: [[return]] -// CHECK-NEXT: %[[reg:[^ ]*]] = load i32* % +// CHECK-NEXT: %[[reg:[^ ]*]] = load i32, i32* % // CHECK-NEXT: ret i32 %[[reg]] // // The ehresume block, not reachable either. diff --git a/test/CodeGen/exceptions-seh-leave.c b/test/CodeGen/exceptions-seh-leave.c index f88380b227..0d38439c9b 100644 --- a/test/CodeGen/exceptions-seh-leave.c +++ b/test/CodeGen/exceptions-seh-leave.c @@ -286,7 +286,7 @@ int nested___finally___except() { // CHECK-NEXT: br label %[[finally:[^ ]*]] // CHECK: [[finally]] -// CHECK-NEXT: %[[abnormallocal:[^ ]*]] = load i8* %[[abnormal]] +// CHECK-NEXT: %[[abnormallocal:[^ ]*]] = load i8, i8* %[[abnormal]] // CHECK-NEXT: %[[reg:[^ ]*]] = icmp eq i8 %[[abnormallocal]], 0 // CHECK-NEXT: br i1 %[[reg]], label %[[finallycont:[^ ]*]], label %[[finallyresume:[^ ]*]] @@ -345,7 +345,7 @@ int nested___finally___finally() { // CHECK-NEXT: br label %[[outerfinally:[^ ]*]] // CHECK: [[outerfinally]] -// CHECK-NEXT: %[[abnormallocal:[^ ]*]] = load i8* %[[abnormal]] +// CHECK-NEXT: %[[abnormallocal:[^ ]*]] = load i8, i8* %[[abnormal]] // CHECK-NEXT: %[[reg:[^ ]*]] = icmp eq i8 %[[abnormallocal]], 0 // CHECK-NEXT: br i1 %[[reg]], label %[[finallycont:[^ ]*]], label %[[finallyresume:[^ ]*]] diff --git a/test/CodeGen/exceptions-seh.c b/test/CodeGen/exceptions-seh.c index ebe97bedd2..2d9d4b1f01 100644 --- a/test/CodeGen/exceptions-seh.c +++ b/test/CodeGen/exceptions-seh.c @@ -32,7 +32,7 @@ int safe_div(int numerator, int denominator, int *res) { // CHECK: [[except]] // CHECK-NEXT: store i32 -42, i32* %[[success:[^ ]*]] // -// CHECK: %[[res:[^ ]*]] = load i32* %[[success]] +// CHECK: %[[res:[^ ]*]] = load i32, i32* %[[success]] // CHECK: ret i32 %[[res]] void j(void); @@ -57,7 +57,7 @@ int filter_expr_capture(void) { // CHECK-NEXT: catch i8* bitcast (i32 (i8*, i8*)* @"\01?filt$0@0@filter_expr_capture@@" to i8*) // CHECK: store i32 13, i32* %[[r]] // -// CHECK: %[[rv:[^ ]*]] = load i32* %[[r]] +// CHECK: %[[rv:[^ ]*]] = load i32, i32* %[[r]] // CHECK: ret i32 %[[rv]] // CHECK-LABEL: define internal i32 @"\01?filt$0@0@filter_expr_capture@@"(i8* %exception_pointers, i8* %frame_pointer) @@ -95,12 +95,12 @@ int nested_try(void) { // CHECK: store i8* %{{.*}}, i8** %[[ehptr_slot:[^ ]*]] // CHECK: store i32 %{{.*}}, i32* %[[sel_slot:[^ ]*]] // -// CHECK: load i32* %[[sel_slot]] +// CHECK: load i32, i32* %[[sel_slot]] // CHECK: call i32 @llvm.eh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @"\01?filt$1@0@nested_try@@" to i8*)) // CHECK: icmp eq i32 // CHECK: br i1 // -// CHECK: load i32* %[[sel_slot]] +// CHECK: load i32, i32* %[[sel_slot]] // CHECK: call i32 @llvm.eh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @"\01?filt$0@0@nested_try@@" to i8*)) // CHECK: icmp eq i32 // CHECK: br i1 @@ -109,7 +109,7 @@ int nested_try(void) { // CHECK: br label %[[outer_try_cont:[^ ]*]] // // CHECK: [[outer_try_cont]] -// CHECK: %[[r_load:[^ ]*]] = load i32* %[[r]] +// CHECK: %[[r_load:[^ ]*]] = load i32, i32* %[[r]] // CHECK: ret i32 %[[r_load]] // // CHECK: store i32 123, i32* %[[r]] @@ -130,7 +130,7 @@ void basic_finally(void) { } } // CHECK-LABEL: define void @basic_finally() -// CHECK: load i32* @g +// CHECK: load i32, i32* @g // CHECK: add i32 %{{.*}}, 1 // CHECK: store i32 %{{.*}}, i32* @g // @@ -141,7 +141,7 @@ void basic_finally(void) { // CHECK: br label %[[finally:[^ ]*]] // // CHECK: [[finally]] -// CHECK: load i32* @g +// CHECK: load i32, i32* @g // CHECK: add i32 %{{.*}}, -1 // CHECK: store i32 %{{.*}}, i32* @g // CHECK: icmp eq @@ -177,7 +177,7 @@ int except_return(void) { // CHECK: br label %[[retbb]] // // CHECK: [[retbb]] -// CHECK: %[[r:[^ ]*]] = load i32* %[[rv]] +// CHECK: %[[r:[^ ]*]] = load i32, i32* %[[rv]] // CHECK: ret i32 %[[r]] // CHECK: attributes #[[NOINLINE]] = { {{.*noinline.*}} } diff --git a/test/CodeGen/exprs.c b/test/CodeGen/exprs.c index 77b6781296..f7b6ab87b2 100644 --- a/test/CodeGen/exprs.c +++ b/test/CodeGen/exprs.c @@ -127,9 +127,9 @@ int f11(long X) { return A[X]; // CHECK: [[Xaddr:%[^ ]+]] = alloca i64, align 8 -// CHECK: load {{.*}}* [[Xaddr]] +// CHECK: load {{.*}}, {{.*}}* [[Xaddr]] // CHECK-NEXT: getelementptr inbounds [100 x i32], [100 x i32]* %A, i32 0, -// CHECK-NEXT: load i32* +// CHECK-NEXT: load i32, i32* } int f12() { diff --git a/test/CodeGen/ext-vector-member-alignment.c b/test/CodeGen/ext-vector-member-alignment.c index 49e69977ff..5f044b8a28 100644 --- a/test/CodeGen/ext-vector-member-alignment.c +++ b/test/CodeGen/ext-vector-member-alignment.c @@ -17,11 +17,11 @@ void func(struct struct1* p, float *a, float *b, float c) { // FIXME: We should be able to come up with a more aggressive alignment // estimate. // CHECK: @func - // CHECK: load <4 x float>* {{%.*}}, align 1 + // CHECK: load <4 x float>, <4 x float>* {{%.*}}, align 1 // CHECK: store <4 x float> {{%.*}}, <4 x float>* {{%.*}}, align 1 - // CHECK: load <4 x float>* {{%.*}}, align 1 - // CHECK: load <4 x float>* {{%.*}}, align 1 - // CHECK: load <4 x float>* {{%.*}}, align 1 + // CHECK: load <4 x float>, <4 x float>* {{%.*}}, align 1 + // CHECK: load <4 x float>, <4 x float>* {{%.*}}, align 1 + // CHECK: load <4 x float>, <4 x float>* {{%.*}}, align 1 // CHECK: store <4 x float> {{%.*}}, <4 x float>* {{%.*}}, align 1 // CHECK: ret void } diff --git a/test/CodeGen/extern-inline.c b/test/CodeGen/extern-inline.c index 77cb270191..2c0fde9f79 100644 --- a/test/CodeGen/extern-inline.c +++ b/test/CodeGen/extern-inline.c @@ -8,7 +8,7 @@ extern inline int f(int a) {return a;} int g(void) {return f(0);} // CHECK: call i32 @f int f(int b) {return 1+b;} -// CHECK: load i32* %{{.*}} +// CHECK: load i32, i32* %{{.*}} // CHECK: add nsw i32 1, %{{.*}} int h(void) {return f(1);} // CHECK: call i32 @f @@ -18,8 +18,8 @@ extern inline int f2(int a, int b) {return a+b;} int g2(void) {return f2(0,1);} // CHECK: call i32 @f2 static int f2(int a, int b) {return a*b;} -// CHECK: load i32* %{{.*}} -// CHECK: load i32* %{{.*}} +// CHECK: load i32, i32* %{{.*}} +// CHECK: load i32, i32* %{{.*}} // CHECK: mul nsw i32 %{{.*}}, %{{.*}} int h2(void) {return f2(1,2);} // CHECK: call i32 @f2 diff --git a/test/CodeGen/mips-varargs.c b/test/CodeGen/mips-varargs.c index 2cb044ad59..8fd1df60b7 100644 --- a/test/CodeGen/mips-varargs.c +++ b/test/CodeGen/mips-varargs.c @@ -29,9 +29,9 @@ int test_i32(char *fmt, ...) { // ALL: call void @llvm.va_start(i8* [[VA1]]) // // O32: [[TMP0:%.+]] = bitcast i8** %va to i32** -// O32: [[AP_CUR:%.+]] = load i32** [[TMP0]], align [[PTRALIGN]] +// O32: [[AP_CUR:%.+]] = load i32*, i32** [[TMP0]], align [[PTRALIGN]] // NEW: [[TMP0:%.+]] = bitcast i8** %va to i64** -// NEW: [[AP_CUR:%.+]] = load i64** [[TMP0]], align [[PTRALIGN]] +// NEW: [[AP_CUR:%.+]] = load i64*, i64** [[TMP0]], align [[PTRALIGN]] // // O32: [[AP_NEXT:%.+]] = getelementptr i32, i32* [[AP_CUR]], i32 1 // NEW: [[AP_NEXT:%.+]] = getelementptr i64, i64* [[AP_CUR]], {{i32|i64}} 1 @@ -39,8 +39,8 @@ int test_i32(char *fmt, ...) { // O32: store i32* [[AP_NEXT]], i32** [[TMP0]], align [[PTRALIGN]] // NEW: store i64* [[AP_NEXT]], i64** [[TMP0]], align [[PTRALIGN]] // -// O32: [[ARG1:%.+]] = load i32* [[AP_CUR]], align 4 -// NEW: [[TMP2:%.+]] = load i64* [[AP_CUR]], align 8 +// O32: [[ARG1:%.+]] = load i32, i32* [[AP_CUR]], align 4 +// NEW: [[TMP2:%.+]] = load i64, i64* [[AP_CUR]], align 8 // NEW: [[ARG1:%.+]] = trunc i64 [[TMP2]] to i32 // // ALL: call void @llvm.va_end(i8* [[VA1]]) @@ -65,9 +65,9 @@ int test_i32_2args(char *fmt, ...) { // ALL: call void @llvm.va_start(i8* [[VA1]]) // // O32: [[TMP0:%.+]] = bitcast i8** %va to i32** -// O32: [[AP_CUR:%.+]] = load i32** [[TMP0]], align [[PTRALIGN]] +// O32: [[AP_CUR:%.+]] = load i32*, i32** [[TMP0]], align [[PTRALIGN]] // NEW: [[TMP0:%.+]] = bitcast i8** %va to i64** -// NEW: [[AP_CUR:%.+]] = load i64** [[TMP0]], align [[PTRALIGN]] +// NEW: [[AP_CUR:%.+]] = load i64*, i64** [[TMP0]], align [[PTRALIGN]] // // O32: [[AP_NEXT1:%.+]] = getelementptr i32, i32* [[AP_CUR]], i32 1 // NEW: [[AP_NEXT1:%.+]] = getelementptr i64, i64* [[AP_CUR]], [[INTPTR_T:i32|i64]] 1 @@ -76,8 +76,8 @@ int test_i32_2args(char *fmt, ...) { // FIXME: N32 optimised this store out. Why only for this ABI? // N64: store i64* [[AP_NEXT1]], i64** [[TMP0]], align [[PTRALIGN]] // -// O32: [[ARG1:%.+]] = load i32* [[AP_CUR]], align 4 -// NEW: [[TMP3:%.+]] = load i64* [[AP_CUR]], align 8 +// O32: [[ARG1:%.+]] = load i32, i32* [[AP_CUR]], align 4 +// NEW: [[TMP3:%.+]] = load i64, i64* [[AP_CUR]], align 8 // NEW: [[ARG1:%.+]] = trunc i64 [[TMP3]] to i32 // // O32: [[AP_NEXT2:%.+]] = getelementptr i32, i32* [[AP_CUR]], i32 2 @@ -86,8 +86,8 @@ int test_i32_2args(char *fmt, ...) { // O32: store i32* [[AP_NEXT2]], i32** [[TMP0]], align [[PTRALIGN]] // NEW: store i64* [[AP_NEXT2]], i64** [[TMP0]], align [[PTRALIGN]] // -// O32: [[ARG2:%.+]] = load i32* [[AP_NEXT1]], align 4 -// NEW: [[TMP4:%.+]] = load i64* [[AP_NEXT1]], align 8 +// O32: [[ARG2:%.+]] = load i32, i32* [[AP_NEXT1]], align 4 +// NEW: [[TMP4:%.+]] = load i64, i64* [[AP_NEXT1]], align 8 // NEW: [[ARG2:%.+]] = trunc i64 [[TMP4]] to i32 // // ALL: call void @llvm.va_end(i8* [[VA1]]) @@ -111,9 +111,9 @@ long long test_i64(char *fmt, ...) { // ALL: [[VA1:%.+]] = bitcast i8** %va to i8* // ALL: call void @llvm.va_start(i8* [[VA1]]) // -// O32: [[AP_CUR:%.+]] = load i8** %va, align [[PTRALIGN]] +// O32: [[AP_CUR:%.+]] = load i8*, i8** %va, align [[PTRALIGN]] // NEW: [[TMP0:%.+]] = bitcast i8** %va to i64** -// NEW: [[AP_CUR:%.+]] = load i64** [[TMP0]], align [[PTRALIGN]] +// NEW: [[AP_CUR:%.+]] = load i64*, i64** [[TMP0]], align [[PTRALIGN]] // // i64 is 8-byte aligned, while this is within O32's stack alignment there's no // guarantee that the offset is still 8-byte aligned after earlier reads. @@ -129,8 +129,8 @@ long long test_i64(char *fmt, ...) { // O32: store i8* [[AP_NEXT]], i8** %va, align [[PTRALIGN]] // NEW: store i64* [[AP_NEXT]], i64** [[TMP0]], align [[PTRALIGN]] // -// O32: [[ARG1:%.+]] = load i64* [[PTR3]], align 8 -// NEW: [[ARG1:%.+]] = load i64* [[AP_CUR]], align 8 +// O32: [[ARG1:%.+]] = load i64, i64* [[PTR3]], align 8 +// NEW: [[ARG1:%.+]] = load i64, i64* [[AP_CUR]], align 8 // // ALL: call void @llvm.va_end(i8* [[VA1]]) // ALL: ret i64 [[ARG1]] @@ -156,12 +156,12 @@ char *test_ptr(char *fmt, ...) { // ALL: call void @llvm.va_start(i8* [[VA1]]) // // O32: [[TMP0:%.+]] = bitcast i8** %va to i8*** -// O32: [[AP_CUR:%.+]] = load i8*** [[TMP0]], align [[PTRALIGN]] +// O32: [[AP_CUR:%.+]] = load i8**, i8*** [[TMP0]], align [[PTRALIGN]] // N32 differs because the vararg is not a N32 pointer. It's been promoted to 64-bit. // N32: [[TMP0:%.+]] = bitcast i8** %va to i64** -// N32: [[AP_CUR:%.+]] = load i64** [[TMP0]], align [[PTRALIGN]] +// N32: [[AP_CUR:%.+]] = load i64*, i64** [[TMP0]], align [[PTRALIGN]] // N64: [[TMP0:%.+]] = bitcast i8** %va to i8*** -// N64: [[AP_CUR:%.+]] = load i8*** [[TMP0]], align [[PTRALIGN]] +// N64: [[AP_CUR:%.+]] = load i8**, i8*** [[TMP0]], align [[PTRALIGN]] // // O32: [[AP_NEXT:%.+]] = getelementptr i8*, i8** [[AP_CUR]], i32 1 // N32 differs because the vararg is not a N32 pointer. It's been promoted to 64-bit. @@ -173,13 +173,13 @@ char *test_ptr(char *fmt, ...) { // N32: store i64* [[AP_NEXT]], i64** [[TMP0]], align [[PTRALIGN]] // N64: store i8** [[AP_NEXT]], i8*** [[TMP0]], align [[PTRALIGN]] // -// O32: [[ARG1:%.+]] = load i8** [[AP_CUR]], align 4 +// O32: [[ARG1:%.+]] = load i8*, i8** [[AP_CUR]], align 4 // N32 differs because the vararg is not a N32 pointer. It's been promoted to // 64-bit so we must truncate the excess and bitcast to a N32 pointer. -// N32: [[TMP2:%.+]] = load i64* [[AP_CUR]], align 8 +// N32: [[TMP2:%.+]] = load i64, i64* [[AP_CUR]], align 8 // N32: [[TMP3:%.+]] = trunc i64 [[TMP2]] to i32 // N32: [[ARG1:%.+]] = inttoptr i32 [[TMP3]] to i8* -// N64: [[ARG1:%.+]] = load i8** [[AP_CUR]], align 8 +// N64: [[ARG1:%.+]] = load i8*, i8** [[AP_CUR]], align 8 // // ALL: call void @llvm.va_end(i8* [[VA1]]) // ALL: ret i8* [[ARG1]] @@ -200,7 +200,7 @@ int test_v4i32(char *fmt, ...) { // ALL: %va = alloca i8*, align [[PTRALIGN]] // ALL: [[VA1:%.+]] = bitcast i8** %va to i8* // ALL: call void @llvm.va_start(i8* [[VA1]]) -// ALL: [[AP_CUR:%.+]] = load i8** %va, align [[PTRALIGN]] +// ALL: [[AP_CUR:%.+]] = load i8*, i8** %va, align [[PTRALIGN]] // // O32: [[PTR0:%.+]] = ptrtoint i8* [[AP_CUR]] to [[INTPTR_T:i32]] // N32: [[PTR0:%.+]] = ptrtoint i8* [[AP_CUR]] to [[INTPTR_T:i32]] @@ -218,7 +218,7 @@ int test_v4i32(char *fmt, ...) { // ALL: [[PTR4:%.+]] = inttoptr [[INTPTR_T]] [[PTR2]] to i8* // ALL: [[AP_NEXT:%.+]] = getelementptr i8, i8* [[PTR4]], [[INTPTR_T]] 16 // ALL: store i8* [[AP_NEXT]], i8** %va, align [[PTRALIGN]] -// ALL: [[PTR5:%.+]] = load <4 x i32>* [[PTR3]], align 16 +// ALL: [[PTR5:%.+]] = load <4 x i32>, <4 x i32>* [[PTR3]], align 16 // ALL: call void @llvm.va_end(i8* [[VA1]]) // ALL: [[VECEXT:%.+]] = extractelement <4 x i32> [[PTR5]], i32 0 // ALL: ret i32 [[VECEXT]] diff --git a/test/CodeGen/ms-anonymous-struct.c b/test/CodeGen/ms-anonymous-struct.c index 422ba55337..bf33406d77 100644 --- a/test/CodeGen/ms-anonymous-struct.c +++ b/test/CodeGen/ms-anonymous-struct.c @@ -28,32 +28,32 @@ void foo() // CHECK: getelementptr inbounds %struct.test, %struct.test* %var, i32 0, i32 1 // CHECK-NEXT: getelementptr inbounds %struct.nested2, %struct.nested2* %{{.*}}, i32 0, i32 0 - // CHECK-NEXT: load i32* %{{.*}}, align 4 + // CHECK-NEXT: load i32, i32* %{{.*}}, align 4 var.a; // CHECK-NEXT: getelementptr inbounds %struct.test, %struct.test* %var, i32 0, i32 1 // CHECK-NEXT: getelementptr inbounds %struct.nested2, %struct.nested2* %{{.*}}, i32 0, i32 2 - // CHECK-NEXT: load i32* %{{.*}}, align 4 + // CHECK-NEXT: load i32, i32* %{{.*}}, align 4 var.b; // CHECK-NEXT: getelementptr inbounds %struct.test, %struct.test* %var, i32 0, i32 1 // CHECK-NEXT: getelementptr inbounds %struct.nested2, %struct.nested2* %{{.*}}, i32 0, i32 1 // CHECK-NEXT: getelementptr inbounds %struct.nested1, %struct.nested1* %{{.*}}, i32 0, i32 0 - // CHECK-NEXT: load i32* %{{.*}}, align 4 + // CHECK-NEXT: load i32, i32* %{{.*}}, align 4 var.a1; // CHECK-NEXT: getelementptr inbounds %struct.test, %struct.test* %{{.*}}var, i32 0, i32 1 // CHECK-NEXT: getelementptr inbounds %struct.nested2, %struct.nested2* %{{.*}}, i32 0, i32 1 // CHECK-NEXT: getelementptr inbounds %struct.nested1, %struct.nested1* %{{.*}}, i32 0, i32 1 - // CHECK-NEXT: load i32* %{{.*}}, align 4 + // CHECK-NEXT: load i32, i32* %{{.*}}, align 4 var.b1; // CHECK-NEXT: getelementptr inbounds %struct.test, %struct.test* %var, i32 0, i32 0 - // CHECK-NEXT: load i32* %{{.*}}, align 4 + // CHECK-NEXT: load i32, i32* %{{.*}}, align 4 var.x; // CHECK-NEXT: getelementptr inbounds %struct.test, %struct.test* %var, i32 0, i32 2 - // CHECK-NEXT: load i32* %{{.*}}, align 4 + // CHECK-NEXT: load i32, i32* %{{.*}}, align 4 var.y; } @@ -61,39 +61,39 @@ void foo2(struct test* var) { // CHECK: alloca %struct.test*, align // CHECK-NEXT: store %struct.test* %var, %struct.test** %{{.*}}, align - // CHECK-NEXT: load %struct.test** %{{.*}}, align + // CHECK-NEXT: load %struct.test*, %struct.test** %{{.*}}, align // CHECK-NEXT: getelementptr inbounds %struct.test, %struct.test* %{{.*}}, i32 0, i32 1 // CHECK-NEXT: getelementptr inbounds %struct.nested2, %struct.nested2* %{{.*}}, i32 0, i32 0 - // CHECK-NEXT: load i32* %{{.*}}, align 4 + // CHECK-NEXT: load i32, i32* %{{.*}}, align 4 var->a; - // CHECK-NEXT: load %struct.test** %{{.*}}, align + // CHECK-NEXT: load %struct.test*, %struct.test** %{{.*}}, align // CHECK-NEXT: getelementptr inbounds %struct.test, %struct.test* %{{.*}}, i32 0, i32 1 // CHECK-NEXT: getelementptr inbounds %struct.nested2, %struct.nested2* %{{.*}}, i32 0, i32 2 - // CHECK-NEXT: load i32* %{{.*}}, align 4 + // CHECK-NEXT: load i32, i32* %{{.*}}, align 4 var->b; - // CHECK-NEXT: load %struct.test** %{{.*}}, align + // CHECK-NEXT: load %struct.test*, %struct.test** %{{.*}}, align // CHECK-NEXT: getelementptr inbounds %struct.test, %struct.test* %{{.*}}, i32 0, i32 1 // CHECK-NEXT: getelementptr inbounds %struct.nested2, %struct.nested2* %{{.*}}, i32 0, i32 1 // CHECK-NEXT: getelementptr inbounds %struct.nested1, %struct.nested1* %{{.*}}, i32 0, i32 0 - // CHECK-NEXT: load i32* %{{.*}}, align 4 + // CHECK-NEXT: load i32, i32* %{{.*}}, align 4 var->a1; - // CHECK-NEXT: load %struct.test** %{{.*}}, align + // CHECK-NEXT: load %struct.test*, %struct.test** %{{.*}}, align // CHECK-NEXT: getelementptr inbounds %struct.test, %struct.test* %{{.*}}, i32 0, i32 1 // CHECK-NEXT: getelementptr inbounds %struct.nested2, %struct.nested2* %{{.*}}, i32 0, i32 1 // CHECK-NEXT: getelementptr inbounds %struct.nested1, %struct.nested1* %{{.*}}, i32 0, i32 1 - // CHECK-NEXT: load i32* %{{.*}}, align 4 + // CHECK-NEXT: load i32, i32* %{{.*}}, align 4 var->b1; - // CHECK-NEXT: load %struct.test** %{{.*}}, align + // CHECK-NEXT: load %struct.test*, %struct.test** %{{.*}}, align // CHECK-NEXT: getelementptr inbounds %struct.test, %struct.test* %{{.*}}, i32 0, i32 0 - // CHECK-NEXT: load i32* %{{.*}}, align 4 + // CHECK-NEXT: load i32, i32* %{{.*}}, align 4 var->x; - // CHECK-NEXT: load %struct.test** %{{.*}}, align + // CHECK-NEXT: load %struct.test*, %struct.test** %{{.*}}, align // CHECK-NEXT: getelementptr inbounds %struct.test, %struct.test* %{{.*}}, i32 0, i32 2 - // CHECK-NEXT: load i32* %{{.*}}, align 4 + // CHECK-NEXT: load i32, i32* %{{.*}}, align 4 var->y; } diff --git a/test/CodeGen/ms-inline-asm.c b/test/CodeGen/ms-inline-asm.c index 59ff2023a4..a6f1b71b39 100644 --- a/test/CodeGen/ms-inline-asm.c +++ b/test/CodeGen/ms-inline-asm.c @@ -93,7 +93,7 @@ unsigned t10(void) { // CHECK: [[J:%[a-zA-Z0-9]+]] = alloca i32, align 4 // CHECK: store i32 1, i32* [[I]], align 4 // CHECK: call i32 asm sideeffect inteldialect "mov eax, dword ptr $2\0A\09mov dword ptr $0, eax", "=*m,={eax},*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i32* %{{.*}}, i32* %{{.*}}) -// CHECK: [[RET:%[a-zA-Z0-9]+]] = load i32* [[J]], align 4 +// CHECK: [[RET:%[a-zA-Z0-9]+]] = load i32, i32* [[J]], align 4 // CHECK: ret i32 [[RET]] } diff --git a/test/CodeGen/ms-inline-asm.cpp b/test/CodeGen/ms-inline-asm.cpp index 1575ff997a..123a0e3d6d 100644 --- a/test/CodeGen/ms-inline-asm.cpp +++ b/test/CodeGen/ms-inline-asm.cpp @@ -63,7 +63,7 @@ struct T4 { // CHECK-LABEL: define void @_ZN2T44testEv( void T4::test() { // CHECK: [[T0:%.*]] = alloca [[T4:%.*]]*, -// CHECK: [[THIS:%.*]] = load [[T4]]** [[T0]] +// CHECK: [[THIS:%.*]] = load [[T4]]*, [[T4]]** [[T0]] // CHECK: [[X:%.*]] = getelementptr inbounds [[T4]], [[T4]]* [[THIS]], i32 0, i32 0 __asm mov eax, x; __asm mov y, eax; diff --git a/test/CodeGen/ms-intrinsics.c b/test/CodeGen/ms-intrinsics.c index 4498b34bc4..9103622197 100644 --- a/test/CodeGen/ms-intrinsics.c +++ b/test/CodeGen/ms-intrinsics.c @@ -57,7 +57,7 @@ long test__readfsdword(unsigned long Offset) { // CHECK-I386: define i32 @test__readfsdword(i32 %Offset){{.*}}{ // CHECK-I386: [[PTR:%[0-9]+]] = inttoptr i32 %Offset to i32 addrspace(257)* -// CHECK-I386: [[VALUE:%[0-9]+]] = load volatile i32 addrspace(257)* [[PTR]], align 4 +// CHECK-I386: [[VALUE:%[0-9]+]] = load volatile i32, i32 addrspace(257)* [[PTR]], align 4 // CHECK-I386: ret i32 [[VALUE:%[0-9]+]] // CHECK-I386: } #endif diff --git a/test/CodeGen/named_reg_global.c b/test/CodeGen/named_reg_global.c index d888a3ff17..8f9a9c685d 100644 --- a/test/CodeGen/named_reg_global.c +++ b/test/CodeGen/named_reg_global.c @@ -21,7 +21,7 @@ unsigned long get_stack_pointer_addr() { // CHECK: declare{{.*}} i[[bits]] @llvm.read_register.i[[bits]](metadata) // CHECK: define{{.*}} void @set_stack_pointer_addr(i[[bits]] %addr) #0 { -// CHECK: [[sto:%[0-9]+]] = load i[[bits]]* % +// CHECK: [[sto:%[0-9]+]] = load i[[bits]], i[[bits]]* % // CHECK: call void @llvm.write_register.i[[bits]](metadata !0, i[[bits]] [[sto]]) // CHECK: ret void void set_stack_pointer_addr(unsigned long addr) { diff --git a/test/CodeGen/object-size.c b/test/CodeGen/object-size.c index 5a4dc99f36..19c074c80f 100644 --- a/test/CodeGen/object-size.c +++ b/test/CodeGen/object-size.c @@ -39,7 +39,7 @@ void test4() { // CHECK-LABEL: define void @test5 void test5() { - // CHECK: = load i8** @gp + // CHECK: = load i8*, i8** @gp // CHECK-NEXT:= call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false) strcpy(gp, "Hi there"); } diff --git a/test/CodeGen/packed-arrays.c b/test/CodeGen/packed-arrays.c index 8e748dfcfc..993d88e277 100644 --- a/test/CodeGen/packed-arrays.c +++ b/test/CodeGen/packed-arrays.c @@ -52,10 +52,10 @@ int align2_x0 = __alignof(((struct s2*) 0)->x[0]); int align3_x0 = __alignof(((struct s3*) 0)->x[0]); // CHECK-LABEL: define i32 @f0_a -// CHECK: load i32* %{{.*}}, align 1 +// CHECK: load i32, i32* %{{.*}}, align 1 // CHECK: } // CHECK-LABEL: define i32 @f0_b -// CHECK: load i32* %{{.*}}, align 4 +// CHECK: load i32, i32* %{{.*}}, align 4 // CHECK: } int f0_a(struct s0 *a) { return a->x[1]; @@ -67,19 +67,19 @@ int f0_b(struct s0 *a) { // Note that we are incompatible with GCC on this example. // // CHECK-LABEL: define i32 @f1_a -// CHECK: load i32* %{{.*}}, align 1 +// CHECK: load i32, i32* %{{.*}}, align 1 // CHECK: } // CHECK-LABEL: define i32 @f1_b -// CHECK: load i32* %{{.*}}, align 4 +// CHECK: load i32, i32* %{{.*}}, align 4 // CHECK: } // Note that we are incompatible with GCC on this example. // // CHECK-LABEL: define i32 @f1_c -// CHECK: load i32* %{{.*}}, align 4 +// CHECK: load i32, i32* %{{.*}}, align 4 // CHECK: } // CHECK-LABEL: define i32 @f1_d -// CHECK: load i32* %{{.*}}, align 1 +// CHECK: load i32, i32* %{{.*}}, align 1 // CHECK: } int f1_a(struct s1 *a) { return a->x[1]; @@ -95,16 +95,16 @@ int f1_d(struct s1 *a) { } // CHECK-LABEL: define i32 @f2_a -// CHECK: load i32* %{{.*}}, align 1 +// CHECK: load i32, i32* %{{.*}}, align 1 // CHECK: } // CHECK-LABEL: define i32 @f2_b -// CHECK: load i32* %{{.*}}, align 4 +// CHECK: load i32, i32* %{{.*}}, align 4 // CHECK: } // CHECK-LABEL: define i32 @f2_c -// CHECK: load i32* %{{.*}}, align 1 +// CHECK: load i32, i32* %{{.*}}, align 1 // CHECK: } // CHECK-LABEL: define i32 @f2_d -// CHECK: load i32* %{{.*}}, align 1 +// CHECK: load i32, i32* %{{.*}}, align 1 // CHECK: } int f2_a(struct s2 *a) { return a->x[1]; @@ -120,16 +120,16 @@ int f2_d(struct s2 *a) { } // CHECK-LABEL: define i32 @f3_a -// CHECK: load i32* %{{.*}}, align 1 +// CHECK: load i32, i32* %{{.*}}, align 1 // CHECK: } // CHECK-LABEL: define i32 @f3_b -// CHECK: load i32* %{{.*}}, align 4 +// CHECK: load i32, i32* %{{.*}}, align 4 // CHECK: } // CHECK-LABEL: define i32 @f3_c -// CHECK: load i32* %{{.*}}, align 1 +// CHECK: load i32, i32* %{{.*}}, align 1 // CHECK: } // CHECK-LABEL: define i32 @f3_d -// CHECK: load i32* %{{.*}}, align 1 +// CHECK: load i32, i32* %{{.*}}, align 1 // CHECK: } int f3_a(struct s3 *a) { return a->x[1]; @@ -147,7 +147,7 @@ int f3_d(struct s3 *a) { // Verify we don't claim things are overaligned. // // CHECK-LABEL: define double @f4 -// CHECK: load double* {{.*}}, align 8 +// CHECK: load double, double* {{.*}}, align 8 // CHECK: } extern double g4[5] __attribute__((aligned(16))); double f4() { diff --git a/test/CodeGen/packed-nest-unpacked.c b/test/CodeGen/packed-nest-unpacked.c index ea45660b7a..d8eb9c9a80 100644 --- a/test/CodeGen/packed-nest-unpacked.c +++ b/test/CodeGen/packed-nest-unpacked.c @@ -35,7 +35,7 @@ void test4() { // PR12395 int test5() { // CHECK: @test5 - // CHECK: load i32* getelementptr inbounds (%struct.Y* @g, i32 0, i32 1, i32 0, i64 0), align 1 + // CHECK: load i32, i32* getelementptr inbounds (%struct.Y* @g, i32 0, i32 1, i32 0, i64 0), align 1 return g.y.x[0]; } @@ -60,6 +60,6 @@ struct YBitfield gbitfield; unsigned test7() { // CHECK: @test7 - // CHECK: load i32* getelementptr inbounds (%struct.YBitfield* @gbitfield, i32 0, i32 1, i32 0), align 4 + // CHECK: load i32, i32* getelementptr inbounds (%struct.YBitfield* @gbitfield, i32 0, i32 1, i32 0), align 4 return gbitfield.y.b2; } diff --git a/test/CodeGen/packed-structure.c b/test/CodeGen/packed-structure.c index 24c3929314..8de31d6a81 100644 --- a/test/CodeGen/packed-structure.c +++ b/test/CodeGen/packed-structure.c @@ -17,7 +17,7 @@ int s0_align_y = __alignof(((struct s0*)0)->y); int s0_align = __alignof(struct s0); // CHECK-FUNCTIONS-LABEL: define i32 @s0_load_x -// CHECK-FUNCTIONS: [[s0_load_x:%.*]] = load i32* {{.*}}, align 4 +// CHECK-FUNCTIONS: [[s0_load_x:%.*]] = load i32, i32* {{.*}}, align 4 // CHECK-FUNCTIONS: ret i32 [[s0_load_x]] int s0_load_x(struct s0 *a) { return a->x; } // FIXME: This seems like it should be align 1. This is actually something which @@ -25,7 +25,7 @@ int s0_load_x(struct s0 *a) { return a->x; } // with align 1 (in 2363.1 at least). // // CHECK-FUNCTIONS-LABEL: define i32 @s0_load_y -// CHECK-FUNCTIONS: [[s0_load_y:%.*]] = load i32* {{.*}}, align 1 +// CHECK-FUNCTIONS: [[s0_load_y:%.*]] = load i32, i32* {{.*}}, align 1 // CHECK-FUNCTIONS: ret i32 [[s0_load_y]] int s0_load_y(struct s0 *a) { return a->y; } // CHECK-FUNCTIONS-LABEL: define void @s0_copy @@ -47,11 +47,11 @@ int s1_align_y = __alignof(((struct s1*)0)->y); int s1_align = __alignof(struct s1); // CHECK-FUNCTIONS-LABEL: define i32 @s1_load_x -// CHECK-FUNCTIONS: [[s1_load_x:%.*]] = load i32* {{.*}}, align 1 +// CHECK-FUNCTIONS: [[s1_load_x:%.*]] = load i32, i32* {{.*}}, align 1 // CHECK-FUNCTIONS: ret i32 [[s1_load_x]] int s1_load_x(struct s1 *a) { return a->x; } // CHECK-FUNCTIONS-LABEL: define i32 @s1_load_y -// CHECK-FUNCTIONS: [[s1_load_y:%.*]] = load i32* {{.*}}, align 1 +// CHECK-FUNCTIONS: [[s1_load_y:%.*]] = load i32, i32* {{.*}}, align 1 // CHECK-FUNCTIONS: ret i32 [[s1_load_y]] int s1_load_y(struct s1 *a) { return a->y; } // CHECK-FUNCTIONS-LABEL: define void @s1_copy @@ -75,11 +75,11 @@ int s2_align_y = __alignof(((struct s2*)0)->y); int s2_align = __alignof(struct s2); // CHECK-FUNCTIONS-LABEL: define i32 @s2_load_x -// CHECK-FUNCTIONS: [[s2_load_y:%.*]] = load i32* {{.*}}, align 2 +// CHECK-FUNCTIONS: [[s2_load_y:%.*]] = load i32, i32* {{.*}}, align 2 // CHECK-FUNCTIONS: ret i32 [[s2_load_y]] int s2_load_x(struct s2 *a) { return a->x; } // CHECK-FUNCTIONS-LABEL: define i32 @s2_load_y -// CHECK-FUNCTIONS: [[s2_load_y:%.*]] = load i32* {{.*}}, align 2 +// CHECK-FUNCTIONS: [[s2_load_y:%.*]] = load i32, i32* {{.*}}, align 2 // CHECK-FUNCTIONS: ret i32 [[s2_load_y]] int s2_load_y(struct s2 *a) { return a->y; } // CHECK-FUNCTIONS-LABEL: define void @s2_copy @@ -95,6 +95,6 @@ int s3_1 = __alignof(((struct s3*) 0)->anInt); // CHECK-FUNCTIONS-LABEL: define i32 @test3( int test3(struct s3 *ptr) { // CHECK-FUNCTIONS: [[PTR:%.*]] = getelementptr inbounds {{%.*}}, {{%.*}}* {{%.*}}, i32 0, i32 1 - // CHECK-FUNCTIONS-NEXT: load i32* [[PTR]], align 1 + // CHECK-FUNCTIONS-NEXT: load i32, i32* [[PTR]], align 1 return ptr->anInt; } diff --git a/test/CodeGen/ppc-varargs-struct.c b/test/CodeGen/ppc-varargs-struct.c index 208b181604..f5b012d3ce 100644 --- a/test/CodeGen/ppc-varargs-struct.c +++ b/test/CodeGen/ppc-varargs-struct.c @@ -28,11 +28,11 @@ void testva (int n, ...) // CHECK-PPC-NEXT: [[FOUR:%[0-9]+]] = inttoptr i32 [[THREE]] to i8** // CHECK-PPC-NEXT: [[FIVE:%[0-9]+]] = add i32 [[THREE]], 4 // CHECK-PPC-NEXT: [[SIX:%[0-9]+]] = inttoptr i32 [[FIVE]] to i8** -// CHECK-PPC-NEXT: [[GPR:%[a-z0-9]+]] = load i8* [[GPRPTR]] -// CHECK-PPC-NEXT: [[FPR:%[a-z0-9]+]] = load i8* [[TWO]] -// CHECK-PPC-NEXT: [[OVERFLOW_AREA:%[a-z_0-9]+]] = load i8** [[FOUR]] +// CHECK-PPC-NEXT: [[GPR:%[a-z0-9]+]] = load i8, i8* [[GPRPTR]] +// CHECK-PPC-NEXT: [[FPR:%[a-z0-9]+]] = load i8, i8* [[TWO]] +// CHECK-PPC-NEXT: [[OVERFLOW_AREA:%[a-z_0-9]+]] = load i8*, i8** [[FOUR]] // CHECK-PPC-NEXT: [[SEVEN:%[0-9]+]] = ptrtoint i8* [[OVERFLOW_AREA]] to i32 -// CHECK-PPC-NEXT: [[REGSAVE_AREA:%[a-z_0-9]+]] = load i8** [[SIX]] +// CHECK-PPC-NEXT: [[REGSAVE_AREA:%[a-z_0-9]+]] = load i8*, i8** [[SIX]] // CHECK-PPC-NEXT: [[EIGHT:%[0-9]+]] = ptrtoint i8* [[REGSAVE_AREA]] to i32 // CHECK-PPC-NEXT: [[COND:%[a-z0-9]+]] = icmp ult i8 [[GPR]], 8 // CHECK-PPC-NEXT: [[NINE:%[0-9]+]] = mul i8 [[GPR]], 4 @@ -56,7 +56,7 @@ void testva (int n, ...) // CHECK-PPC1:[[CONT]] // CHECK-PPC: [[VAARG_ADDR:%[a-z.0-9]+]] = phi %struct.x* [ [[TWELVE]], [[USING_REGS]] ], [ [[FOURTEEN]], [[USING_OVERFLOW]] ] // CHECK-PPC-NEXT: [[AGGRPTR:%[a-z0-9]+]] = bitcast %struct.x* [[VAARG_ADDR]] to i8** -// CHECK-PPC-NEXT: [[AGGR:%[a-z0-9]+]] = load i8** [[AGGRPTR]] +// CHECK-PPC-NEXT: [[AGGR:%[a-z0-9]+]] = load i8*, i8** [[AGGRPTR]] // CHECK-PPC-NEXT: [[SEVENTEEN:%[0-9]+]] = bitcast %struct.x* %t to i8* // CHECK-PPC-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[SEVENTEEN]], i8* [[AGGR]], i32 16, i32 8, i1 false) @@ -74,11 +74,11 @@ void testva (int n, ...) // CHECK-PPC-NEXT: [[TWENTYTWO:%[0-9]+]] = inttoptr i32 [[TWENTYONE]] to i8** // CHECK-PPC-NEXT: [[TWENTYTHREE:%[0-9]+]] = add i32 [[TWENTYONE]], 4 // CHECK-PPC-NEXT: [[TWENTYFOUR:%[0-9]+]] = inttoptr i32 [[TWENTYTHREE]] to i8** -// CHECK-PPC-NEXT: [[GPR1:%[a-z0-9]+]] = load i8* [[GPRPTR1]] -// CHECK-PPC-NEXT: [[FPR1:%[a-z0-9]+]] = load i8* [[TWENTY]] -// CHECK-PPC-NEXT: [[OVERFLOW_AREA1:%[a-z_0-9]+]] = load i8** [[TWENTYTWO]] +// CHECK-PPC-NEXT: [[GPR1:%[a-z0-9]+]] = load i8, i8* [[GPRPTR1]] +// CHECK-PPC-NEXT: [[FPR1:%[a-z0-9]+]] = load i8, i8* [[TWENTY]] +// CHECK-PPC-NEXT: [[OVERFLOW_AREA1:%[a-z_0-9]+]] = load i8*, i8** [[TWENTYTWO]] // CHECK-PPC-NEXT: [[TWENTYFIVE:%[0-9]+]] = ptrtoint i8* [[OVERFLOW_AREA1]] to i32 -// CHECK-PPC-NEXT: [[REGSAVE_AREA1:%[a-z_0-9]+]] = load i8** [[TWENTYFOUR]] +// CHECK-PPC-NEXT: [[REGSAVE_AREA1:%[a-z_0-9]+]] = load i8*, i8** [[TWENTYFOUR]] // CHECK-PPC-NEXT: [[TWENTYSIX:%[0-9]+]] = ptrtoint i8* [[REGSAVE_AREA1]] to i32 // CHECK-PPC-NEXT: [[COND1:%[a-z0-9]+]] = icmp ult i8 [[GPR1]], 8 // CHECK-PPC-NEXT: [[TWENTYSEVEN:%[0-9]+]] = mul i8 [[GPR1]], 4 @@ -101,12 +101,12 @@ void testva (int n, ...) // // CHECK-PPC1:[[CONT1]]: // CHECK-PPC: [[VAARG_ADDR1:%[a-z.0-9]+]] = phi i32* [ [[THIRTY]], [[USING_REGS1]] ], [ [[THIRTYTWO]], [[USING_OVERFLOW1]] ] -// CHECK-PPC-NEXT: [[THIRTYFIVE:%[0-9]+]] = load i32* [[VAARG_ADDR1]] +// CHECK-PPC-NEXT: [[THIRTYFIVE:%[0-9]+]] = load i32, i32* [[VAARG_ADDR1]] // CHECK-PPC-NEXT: store i32 [[THIRTYFIVE]], i32* %v, align 4 #ifdef __powerpc64__ __int128_t u = va_arg (ap, __int128_t); #endif // CHECK: bitcast i8* %{{[a-z.0-9]+}} to i128* -// CHECK-NEXT: load i128* %{{[0-9]+}} +// CHECK-NEXT: load i128, i128* %{{[0-9]+}} } diff --git a/test/CodeGen/ppc64-align-long-double.c b/test/CodeGen/ppc64-align-long-double.c index 6d07f7039d..2214e24620 100644 --- a/test/CodeGen/ppc64-align-long-double.c +++ b/test/CodeGen/ppc64-align-long-double.c @@ -13,4 +13,4 @@ long double test (struct S x) return x.b; } -// CHECK: %{{[0-9]}} = load ppc_fp128* %{{[a-zA-Z0-9]+}}, align 16 +// CHECK: %{{[0-9]}} = load ppc_fp128, ppc_fp128* %{{[a-zA-Z0-9]+}}, align 16 diff --git a/test/CodeGen/ppc64-align-struct.c b/test/CodeGen/ppc64-align-struct.c index 37dbe580af..8c4437a38d 100644 --- a/test/CodeGen/ppc64-align-struct.c +++ b/test/CodeGen/ppc64-align-struct.c @@ -49,7 +49,7 @@ void test7 (int x, struct test7 y) } // CHECK: define void @test1va(%struct.test1* noalias sret %agg.result, i32 signext %x, ...) -// CHECK: %[[CUR:[^ ]+]] = load i8** %ap +// CHECK: %[[CUR:[^ ]+]] = load i8*, i8** %ap // CHECK: %[[NEXT:[^ ]+]] = getelementptr i8, i8* %[[CUR]], i64 8 // CHECK: store i8* %[[NEXT]], i8** %ap // CHECK: bitcast i8* %[[CUR]] to %struct.test1* @@ -64,7 +64,7 @@ struct test1 test1va (int x, ...) } // CHECK: define void @test2va(%struct.test2* noalias sret %agg.result, i32 signext %x, ...) -// CHECK: %[[CUR:[^ ]+]] = load i8** %ap +// CHECK: %[[CUR:[^ ]+]] = load i8*, i8** %ap // CHECK: %[[TMP0:[^ ]+]] = ptrtoint i8* %[[CUR]] to i64 // CHECK: %[[TMP1:[^ ]+]] = add i64 %[[TMP0]], 15 // CHECK: %[[TMP2:[^ ]+]] = and i64 %[[TMP1]], -16 @@ -83,7 +83,7 @@ struct test2 test2va (int x, ...) } // CHECK: define void @test3va(%struct.test3* noalias sret %agg.result, i32 signext %x, ...) -// CHECK: %[[CUR:[^ ]+]] = load i8** %ap +// CHECK: %[[CUR:[^ ]+]] = load i8*, i8** %ap // CHECK: %[[TMP0:[^ ]+]] = ptrtoint i8* %[[CUR]] to i64 // CHECK: %[[TMP1:[^ ]+]] = add i64 %[[TMP0]], 15 // CHECK: %[[TMP2:[^ ]+]] = and i64 %[[TMP1]], -16 @@ -102,7 +102,7 @@ struct test3 test3va (int x, ...) } // CHECK: define void @test4va(%struct.test4* noalias sret %agg.result, i32 signext %x, ...) -// CHECK: %[[CUR:[^ ]+]] = load i8** %ap +// CHECK: %[[CUR:[^ ]+]] = load i8*, i8** %ap // CHECK: %[[NEXT:[^ ]+]] = getelementptr i8, i8* %[[CUR]], i64 16 // CHECK: store i8* %[[NEXT]], i8** %ap // CHECK: bitcast i8* %[[CUR]] to %struct.test4* @@ -117,7 +117,7 @@ struct test4 test4va (int x, ...) } // CHECK: define void @testva_longdouble(%struct.test_longdouble* noalias sret %agg.result, i32 signext %x, ...) -// CHECK: %[[CUR:[^ ]+]] = load i8** %ap +// CHECK: %[[CUR:[^ ]+]] = load i8*, i8** %ap // CHECK: %[[NEXT:[^ ]+]] = getelementptr i8, i8* %[[CUR]], i64 16 // CHECK: store i8* %[[NEXT]], i8** %ap // CHECK: bitcast i8* %[[CUR]] to %struct.test_longdouble* @@ -133,7 +133,7 @@ struct test_longdouble testva_longdouble (int x, ...) } // CHECK: define void @testva_vector(%struct.test_vector* noalias sret %agg.result, i32 signext %x, ...) -// CHECK: %[[CUR:[^ ]+]] = load i8** %ap +// CHECK: %[[CUR:[^ ]+]] = load i8*, i8** %ap // CHECK: %[[TMP0:[^ ]+]] = ptrtoint i8* %[[CUR]] to i64 // CHECK: %[[TMP1:[^ ]+]] = add i64 %[[TMP0]], 15 // CHECK: %[[TMP2:[^ ]+]] = and i64 %[[TMP1]], -16 diff --git a/test/CodeGen/ppc64-complex-parms.c b/test/CodeGen/ppc64-complex-parms.c index f258d192f6..f5583a0742 100644 --- a/test/CodeGen/ppc64-complex-parms.c +++ b/test/CodeGen/ppc64-complex-parms.c @@ -63,9 +63,9 @@ void bar_float(void) { // CHECK: store float 2.000000e+00, float* %[[VAR2]] // CHECK: store float -2.500000e+00, float* %[[VAR3]] // CHECK: %[[VAR4:[A-Za-z0-9.]+]] = getelementptr { float, float }, { float, float }* %[[VAR1]], i32 0, i32 0 -// CHECK: %[[VAR5:[A-Za-z0-9.]+]] = load float* %[[VAR4]], align 1 +// CHECK: %[[VAR5:[A-Za-z0-9.]+]] = load float, float* %[[VAR4]], align 1 // CHECK: %[[VAR6:[A-Za-z0-9.]+]] = getelementptr { float, float }, { float, float }* %[[VAR1]], i32 0, i32 1 -// CHECK: %[[VAR7:[A-Za-z0-9.]+]] = load float* %[[VAR6]], align 1 +// CHECK: %[[VAR7:[A-Za-z0-9.]+]] = load float, float* %[[VAR6]], align 1 // CHECK: %{{[A-Za-z0-9.]+}} = call float @foo_float(float %[[VAR5]], float %[[VAR7]]) void bar_double(void) { @@ -79,9 +79,9 @@ void bar_double(void) { // CHECK: store double 2.000000e+00, double* %[[VAR12]] // CHECK: store double -2.500000e+00, double* %[[VAR13]] // CHECK: %[[VAR14:[A-Za-z0-9.]+]] = getelementptr { double, double }, { double, double }* %[[VAR11]], i32 0, i32 0 -// CHECK: %[[VAR15:[A-Za-z0-9.]+]] = load double* %[[VAR14]], align 1 +// CHECK: %[[VAR15:[A-Za-z0-9.]+]] = load double, double* %[[VAR14]], align 1 // CHECK: %[[VAR16:[A-Za-z0-9.]+]] = getelementptr { double, double }, { double, double }* %[[VAR11]], i32 0, i32 1 -// CHECK: %[[VAR17:[A-Za-z0-9.]+]] = load double* %[[VAR16]], align 1 +// CHECK: %[[VAR17:[A-Za-z0-9.]+]] = load double, double* %[[VAR16]], align 1 // CHECK: %{{[A-Za-z0-9.]+}} = call double @foo_double(double %[[VAR15]], double %[[VAR17]]) void bar_long_double(void) { @@ -95,9 +95,9 @@ void bar_long_double(void) { // CHECK: store ppc_fp128 0xM40000000000000000000000000000000, ppc_fp128* %[[VAR22]] // CHECK: store ppc_fp128 0xMC0040000000000000000000000000000, ppc_fp128* %[[VAR23]] // CHECK: %[[VAR24:[A-Za-z0-9.]+]] = getelementptr { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %[[VAR21]], i32 0, i32 0 -// CHECK: %[[VAR25:[A-Za-z0-9.]+]] = load ppc_fp128* %[[VAR24]], align 1 +// CHECK: %[[VAR25:[A-Za-z0-9.]+]] = load ppc_fp128, ppc_fp128* %[[VAR24]], align 1 // CHECK: %[[VAR26:[A-Za-z0-9.]+]] = getelementptr { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %[[VAR21]], i32 0, i32 1 -// CHECK: %[[VAR27:[A-Za-z0-9.]+]] = load ppc_fp128* %[[VAR26]], align 1 +// CHECK: %[[VAR27:[A-Za-z0-9.]+]] = load ppc_fp128, ppc_fp128* %[[VAR26]], align 1 // CHECK: %{{[A-Za-z0-9.]+}} = call ppc_fp128 @foo_long_double(ppc_fp128 %[[VAR25]], ppc_fp128 %[[VAR27]]) void bar_int(void) { @@ -111,9 +111,9 @@ void bar_int(void) { // CHECK: store i32 2, i32* %[[VAR32]] // CHECK: store i32 -3, i32* %[[VAR33]] // CHECK: %[[VAR34:[A-Za-z0-9.]+]] = getelementptr { i32, i32 }, { i32, i32 }* %[[VAR31]], i32 0, i32 0 -// CHECK: %[[VAR35:[A-Za-z0-9.]+]] = load i32* %[[VAR34]], align 1 +// CHECK: %[[VAR35:[A-Za-z0-9.]+]] = load i32, i32* %[[VAR34]], align 1 // CHECK: %[[VAR36:[A-Za-z0-9.]+]] = getelementptr { i32, i32 }, { i32, i32 }* %[[VAR31]], i32 0, i32 1 -// CHECK: %[[VAR37:[A-Za-z0-9.]+]] = load i32* %[[VAR36]], align 1 +// CHECK: %[[VAR37:[A-Za-z0-9.]+]] = load i32, i32* %[[VAR36]], align 1 // CHECK: %{{[A-Za-z0-9.]+}} = call signext i32 @foo_int(i32 %[[VAR35]], i32 %[[VAR37]]) void bar_short(void) { @@ -127,9 +127,9 @@ void bar_short(void) { // CHECK: store i16 2, i16* %[[VAR42]] // CHECK: store i16 -3, i16* %[[VAR43]] // CHECK: %[[VAR44:[A-Za-z0-9.]+]] = getelementptr { i16, i16 }, { i16, i16 }* %[[VAR41]], i32 0, i32 0 -// CHECK: %[[VAR45:[A-Za-z0-9.]+]] = load i16* %[[VAR44]], align 1 +// CHECK: %[[VAR45:[A-Za-z0-9.]+]] = load i16, i16* %[[VAR44]], align 1 // CHECK: %[[VAR46:[A-Za-z0-9.]+]] = getelementptr { i16, i16 }, { i16, i16 }* %[[VAR41]], i32 0, i32 1 -// CHECK: %[[VAR47:[A-Za-z0-9.]+]] = load i16* %[[VAR46]], align 1 +// CHECK: %[[VAR47:[A-Za-z0-9.]+]] = load i16, i16* %[[VAR46]], align 1 // CHECK: %{{[A-Za-z0-9.]+}} = call signext i16 @foo_short(i16 %[[VAR45]], i16 %[[VAR47]]) void bar_char(void) { @@ -143,9 +143,9 @@ void bar_char(void) { // CHECK: store i8 2, i8* %[[VAR52]] // CHECK: store i8 -3, i8* %[[VAR53]] // CHECK: %[[VAR54:[A-Za-z0-9.]+]] = getelementptr { i8, i8 }, { i8, i8 }* %[[VAR51]], i32 0, i32 0 -// CHECK: %[[VAR55:[A-Za-z0-9.]+]] = load i8* %[[VAR54]], align 1 +// CHECK: %[[VAR55:[A-Za-z0-9.]+]] = load i8, i8* %[[VAR54]], align 1 // CHECK: %[[VAR56:[A-Za-z0-9.]+]] = getelementptr { i8, i8 }, { i8, i8 }* %[[VAR51]], i32 0, i32 1 -// CHECK: %[[VAR57:[A-Za-z0-9.]+]] = load i8* %[[VAR56]], align 1 +// CHECK: %[[VAR57:[A-Za-z0-9.]+]] = load i8, i8* %[[VAR56]], align 1 // CHECK: %{{[A-Za-z0-9.]+}} = call signext i8 @foo_char(i8 %[[VAR55]], i8 %[[VAR57]]) void bar_long(void) { @@ -159,9 +159,9 @@ void bar_long(void) { // CHECK: store i64 2, i64* %[[VAR62]] // CHECK: store i64 -3, i64* %[[VAR63]] // CHECK: %[[VAR64:[A-Za-z0-9.]+]] = getelementptr { i64, i64 }, { i64, i64 }* %[[VAR61]], i32 0, i32 0 -// CHECK: %[[VAR65:[A-Za-z0-9.]+]] = load i64* %[[VAR64]], align 1 +// CHECK: %[[VAR65:[A-Za-z0-9.]+]] = load i64, i64* %[[VAR64]], align 1 // CHECK: %[[VAR66:[A-Za-z0-9.]+]] = getelementptr { i64, i64 }, { i64, i64 }* %[[VAR61]], i32 0, i32 1 -// CHECK: %[[VAR67:[A-Za-z0-9.]+]] = load i64* %[[VAR66]], align 1 +// CHECK: %[[VAR67:[A-Za-z0-9.]+]] = load i64, i64* %[[VAR66]], align 1 // CHECK: %{{[A-Za-z0-9.]+}} = call i64 @foo_long(i64 %[[VAR65]], i64 %[[VAR67]]) void bar_long_long(void) { @@ -175,9 +175,9 @@ void bar_long_long(void) { // CHECK: store i64 2, i64* %[[VAR72]] // CHECK: store i64 -3, i64* %[[VAR73]] // CHECK: %[[VAR74:[A-Za-z0-9.]+]] = getelementptr { i64, i64 }, { i64, i64 }* %[[VAR71]], i32 0, i32 0 -// CHECK: %[[VAR75:[A-Za-z0-9.]+]] = load i64* %[[VAR74]], align 1 +// CHECK: %[[VAR75:[A-Za-z0-9.]+]] = load i64, i64* %[[VAR74]], align 1 // CHECK: %[[VAR76:[A-Za-z0-9.]+]] = getelementptr { i64, i64 }, { i64, i64 }* %[[VAR71]], i32 0, i32 1 -// CHECK: %[[VAR77:[A-Za-z0-9.]+]] = load i64* %[[VAR76]], align 1 +// CHECK: %[[VAR77:[A-Za-z0-9.]+]] = load i64, i64* %[[VAR76]], align 1 // CHECK: %{{[A-Za-z0-9.]+}} = call i64 @foo_long_long(i64 %[[VAR75]], i64 %[[VAR77]]) // CHECK: attributes [[NUW]] = { nounwind{{.*}} } diff --git a/test/CodeGen/ppc64-struct-onefloat.c b/test/CodeGen/ppc64-struct-onefloat.c index 121172ff4f..d0ccfbe34a 100644 --- a/test/CodeGen/ppc64-struct-onefloat.c +++ b/test/CodeGen/ppc64-struct-onefloat.c @@ -36,14 +36,14 @@ void foo(void) // CHECK-LABEL: define void @foo // CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s1, %struct.s1* %p1, i32 0, i32 0 -// CHECK: %{{[0-9]+}} = load float* %{{[a-zA-Z0-9.]+}}, align 1 +// CHECK: %{{[0-9]+}} = load float, float* %{{[a-zA-Z0-9.]+}}, align 1 // CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s2, %struct.s2* %p2, i32 0, i32 0 -// CHECK: %{{[0-9]+}} = load double* %{{[a-zA-Z0-9.]+}}, align 1 +// CHECK: %{{[0-9]+}} = load double, double* %{{[a-zA-Z0-9.]+}}, align 1 // CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s4, %struct.s4* %p4, i32 0, i32 0 // CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s1, %struct.s1* %{{[a-zA-Z0-9.]+}}, i32 0, i32 0 -// CHECK: %{{[0-9]+}} = load float* %{{[a-zA-Z0-9.]+}}, align 1 +// CHECK: %{{[0-9]+}} = load float, float* %{{[a-zA-Z0-9.]+}}, align 1 // CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s5, %struct.s5* %p5, i32 0, i32 0 // CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s2, %struct.s2* %{{[a-zA-Z0-9.]+}}, i32 0, i32 0 -// CHECK: %{{[0-9]+}} = load double* %{{[a-zA-Z0-9.]+}}, align 1 +// CHECK: %{{[0-9]+}} = load double, double* %{{[a-zA-Z0-9.]+}}, align 1 // CHECK: call void @bar(float inreg %{{[0-9]+}}, double inreg %{{[0-9]+}}, float inreg %{{[0-9]+}}, double inreg %{{[0-9]+}}) // CHECK: ret void diff --git a/test/CodeGen/ppc64-varargs-complex.c b/test/CodeGen/ppc64-varargs-complex.c index 79491cc00c..f790629345 100644 --- a/test/CodeGen/ppc64-varargs-complex.c +++ b/test/CodeGen/ppc64-varargs-complex.c @@ -8,7 +8,7 @@ void testva (int n, ...) va_list ap; _Complex int i = va_arg(ap, _Complex int); - // CHECK: %[[VAR40:[A-Za-z0-9.]+]] = load i8** %[[VAR100:[A-Za-z0-9.]+]] + // CHECK: %[[VAR40:[A-Za-z0-9.]+]] = load i8*, i8** %[[VAR100:[A-Za-z0-9.]+]] // CHECK-NEXT: %[[VAR41:[A-Za-z0-9.]+]] = getelementptr i8, i8* %[[VAR40]], i64 16 // CHECK-NEXT: store i8* %[[VAR41]], i8** %[[VAR100]] // CHECK-NEXT: %[[VAR1:[A-Za-z0-9.]+]] = ptrtoint i8* %[[VAR40]] to i64 @@ -16,15 +16,15 @@ void testva (int n, ...) // CHECK-NEXT: %[[VAR3:[A-Za-z0-9.]+]] = add i64 %[[VAR1]], 12 // CHECK-NEXT: %[[VAR4:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR2]] to i32* // CHECK-NEXT: %[[VAR5:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR3]] to i32* - // CHECK-NEXT: %[[VAR6:[A-Za-z0-9.]+]] = load i32* %[[VAR4]] - // CHECK-NEXT: %[[VAR7:[A-Za-z0-9.]+]] = load i32* %[[VAR5]] + // CHECK-NEXT: %[[VAR6:[A-Za-z0-9.]+]] = load i32, i32* %[[VAR4]] + // CHECK-NEXT: %[[VAR7:[A-Za-z0-9.]+]] = load i32, i32* %[[VAR5]] // CHECK-NEXT: %[[VAR8:[A-Za-z0-9.]+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* %[[VAR0:[A-Za-z0-9.]+]], i32 0, i32 0 // CHECK-NEXT: %[[VAR9:[A-Za-z0-9.]+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* %[[VAR0]], i32 0, i32 1 // CHECK-NEXT: store i32 %[[VAR6]], i32* %[[VAR8]] // CHECK-NEXT: store i32 %[[VAR7]], i32* %[[VAR9]] _Complex short s = va_arg(ap, _Complex short); - // CHECK: %[[VAR50:[A-Za-z0-9.]+]] = load i8** %[[VAR100:[A-Za-z0-9.]+]] + // CHECK: %[[VAR50:[A-Za-z0-9.]+]] = load i8*, i8** %[[VAR100:[A-Za-z0-9.]+]] // CHECK-NEXT: %[[VAR51:[A-Za-z0-9.]+]] = getelementptr i8, i8* %[[VAR50]], i64 16 // CHECK-NEXT: store i8* %[[VAR51]], i8** %[[VAR100]] // CHECK: %[[VAR11:[A-Za-z0-9.]+]] = ptrtoint i8* %{{[A-Za-z0-9.]+}} to i64 @@ -32,15 +32,15 @@ void testva (int n, ...) // CHECK-NEXT: %[[VAR13:[A-Za-z0-9.]+]] = add i64 %[[VAR11]], 14 // CHECK-NEXT: %[[VAR14:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR12]] to i16* // CHECK-NEXT: %[[VAR15:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR13]] to i16* - // CHECK-NEXT: %[[VAR16:[A-Za-z0-9.]+]] = load i16* %[[VAR14]] - // CHECK-NEXT: %[[VAR17:[A-Za-z0-9.]+]] = load i16* %[[VAR15]] + // CHECK-NEXT: %[[VAR16:[A-Za-z0-9.]+]] = load i16, i16* %[[VAR14]] + // CHECK-NEXT: %[[VAR17:[A-Za-z0-9.]+]] = load i16, i16* %[[VAR15]] // CHECK-NEXT: %[[VAR18:[A-Za-z0-9.]+]] = getelementptr inbounds { i16, i16 }, { i16, i16 }* %[[VAR10:[A-Za-z0-9.]+]], i32 0, i32 0 // CHECK-NEXT: %[[VAR19:[A-Za-z0-9.]+]] = getelementptr inbounds { i16, i16 }, { i16, i16 }* %[[VAR10]], i32 0, i32 1 // CHECK-NEXT: store i16 %[[VAR16]], i16* %[[VAR18]] // CHECK-NEXT: store i16 %[[VAR17]], i16* %[[VAR19]] _Complex char c = va_arg(ap, _Complex char); - // CHECK: %[[VAR60:[A-Za-z0-9.]+]] = load i8** %[[VAR100:[A-Za-z0-9.]+]] + // CHECK: %[[VAR60:[A-Za-z0-9.]+]] = load i8*, i8** %[[VAR100:[A-Za-z0-9.]+]] // CHECK-NEXT: %[[VAR61:[A-Za-z0-9.]+]] = getelementptr i8, i8* %[[VAR60]], i64 16 // CHECK-NEXT: store i8* %[[VAR61]], i8** %[[VAR100]] // CHECK: %[[VAR21:[A-Za-z0-9.]+]] = ptrtoint i8* %{{[A-Za-z0-9.]+}} to i64 @@ -48,15 +48,15 @@ void testva (int n, ...) // CHECK-NEXT: %[[VAR23:[A-Za-z0-9.]+]] = add i64 %[[VAR21]], 15 // CHECK-NEXT: %[[VAR24:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR22]] to i8* // CHECK-NEXT: %[[VAR25:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR23]] to i8* - // CHECK-NEXT: %[[VAR26:[A-Za-z0-9.]+]] = load i8* %[[VAR24]] - // CHECK-NEXT: %[[VAR27:[A-Za-z0-9.]+]] = load i8* %[[VAR25]] + // CHECK-NEXT: %[[VAR26:[A-Za-z0-9.]+]] = load i8, i8* %[[VAR24]] + // CHECK-NEXT: %[[VAR27:[A-Za-z0-9.]+]] = load i8, i8* %[[VAR25]] // CHECK-NEXT: %[[VAR28:[A-Za-z0-9.]+]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %[[VAR20:[A-Za-z0-9.]+]], i32 0, i32 0 // CHECK-NEXT: %[[VAR29:[A-Za-z0-9.]+]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %[[VAR20]], i32 0, i32 1 // CHECK-NEXT: store i8 %[[VAR26]], i8* %[[VAR28]] // CHECK-NEXT: store i8 %[[VAR27]], i8* %[[VAR29]] _Complex float f = va_arg(ap, _Complex float); - // CHECK: %[[VAR70:[A-Za-z0-9.]+]] = load i8** %[[VAR100:[A-Za-z0-9.]+]] + // CHECK: %[[VAR70:[A-Za-z0-9.]+]] = load i8*, i8** %[[VAR100:[A-Za-z0-9.]+]] // CHECK-NEXT: %[[VAR71:[A-Za-z0-9.]+]] = getelementptr i8, i8* %[[VAR70]], i64 16 // CHECK-NEXT: store i8* %[[VAR71]], i8** %[[VAR100]] // CHECK: %[[VAR31:[A-Za-z0-9.]+]] = ptrtoint i8* %{{[A-Za-z0-9.]+}} to i64 @@ -64,8 +64,8 @@ void testva (int n, ...) // CHECK-NEXT: %[[VAR33:[A-Za-z0-9.]+]] = add i64 %[[VAR31]], 12 // CHECK-NEXT: %[[VAR34:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR32]] to float* // CHECK-NEXT: %[[VAR35:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR33]] to float* - // CHECK-NEXT: %[[VAR36:[A-Za-z0-9.]+]] = load float* %[[VAR34]] - // CHECK-NEXT: %[[VAR37:[A-Za-z0-9.]+]] = load float* %[[VAR35]] + // CHECK-NEXT: %[[VAR36:[A-Za-z0-9.]+]] = load float, float* %[[VAR34]] + // CHECK-NEXT: %[[VAR37:[A-Za-z0-9.]+]] = load float, float* %[[VAR35]] // CHECK-NEXT: %[[VAR38:[A-Za-z0-9.]+]] = getelementptr inbounds { float, float }, { float, float }* %[[VAR30:[A-Za-z0-9.]+]], i32 0, i32 0 // CHECK-NEXT: %[[VAR39:[A-Za-z0-9.]+]] = getelementptr inbounds { float, float }, { float, float }* %[[VAR30]], i32 0, i32 1 // CHECK-NEXT: store float %[[VAR36]], float* %[[VAR38]] diff --git a/test/CodeGen/ppc64le-aggregates.c b/test/CodeGen/ppc64le-aggregates.c index e193dcc3fa..60a41f67c6 100644 --- a/test/CodeGen/ppc64le-aggregates.c +++ b/test/CodeGen/ppc64le-aggregates.c @@ -54,49 +54,49 @@ struct fabc func_fabc(struct fabc x) { return x; } struct f2a2b func_f2a2b(struct f2a2b x) { return x; } // CHECK-LABEL: @call_f1 -// CHECK: %[[TMP:[^ ]+]] = load float* getelementptr inbounds (%struct.f1* @global_f1, i32 0, i32 0, i32 0), align 1 +// CHECK: %[[TMP:[^ ]+]] = load float, float* getelementptr inbounds (%struct.f1* @global_f1, i32 0, i32 0, i32 0), align 1 // CHECK: call [1 x float] @func_f1(float inreg %[[TMP]]) struct f1 global_f1; void call_f1(void) { global_f1 = func_f1(global_f1); } // CHECK-LABEL: @call_f2 -// CHECK: %[[TMP:[^ ]+]] = load [2 x float]* getelementptr inbounds (%struct.f2* @global_f2, i32 0, i32 0), align 1 +// CHECK: %[[TMP:[^ ]+]] = load [2 x float], [2 x float]* getelementptr inbounds (%struct.f2* @global_f2, i32 0, i32 0), align 1 // CHECK: call [2 x float] @func_f2([2 x float] %[[TMP]]) struct f2 global_f2; void call_f2(void) { global_f2 = func_f2(global_f2); } // CHECK-LABEL: @call_f3 -// CHECK: %[[TMP:[^ ]+]] = load [3 x float]* getelementptr inbounds (%struct.f3* @global_f3, i32 0, i32 0), align 1 +// CHECK: %[[TMP:[^ ]+]] = load [3 x float], [3 x float]* getelementptr inbounds (%struct.f3* @global_f3, i32 0, i32 0), align 1 // CHECK: call [3 x float] @func_f3([3 x float] %[[TMP]]) struct f3 global_f3; void call_f3(void) { global_f3 = func_f3(global_f3); } // CHECK-LABEL: @call_f4 -// CHECK: %[[TMP:[^ ]+]] = load [4 x float]* getelementptr inbounds (%struct.f4* @global_f4, i32 0, i32 0), align 1 +// CHECK: %[[TMP:[^ ]+]] = load [4 x float], [4 x float]* getelementptr inbounds (%struct.f4* @global_f4, i32 0, i32 0), align 1 // CHECK: call [4 x float] @func_f4([4 x float] %[[TMP]]) struct f4 global_f4; void call_f4(void) { global_f4 = func_f4(global_f4); } // CHECK-LABEL: @call_f5 -// CHECK: %[[TMP:[^ ]+]] = load [5 x float]* getelementptr inbounds (%struct.f5* @global_f5, i32 0, i32 0), align 1 +// CHECK: %[[TMP:[^ ]+]] = load [5 x float], [5 x float]* getelementptr inbounds (%struct.f5* @global_f5, i32 0, i32 0), align 1 // CHECK: call [5 x float] @func_f5([5 x float] %[[TMP]]) struct f5 global_f5; void call_f5(void) { global_f5 = func_f5(global_f5); } // CHECK-LABEL: @call_f6 -// CHECK: %[[TMP:[^ ]+]] = load [6 x float]* getelementptr inbounds (%struct.f6* @global_f6, i32 0, i32 0), align 1 +// CHECK: %[[TMP:[^ ]+]] = load [6 x float], [6 x float]* getelementptr inbounds (%struct.f6* @global_f6, i32 0, i32 0), align 1 // CHECK: call [6 x float] @func_f6([6 x float] %[[TMP]]) struct f6 global_f6; void call_f6(void) { global_f6 = func_f6(global_f6); } // CHECK-LABEL: @call_f7 -// CHECK: %[[TMP:[^ ]+]] = load [7 x float]* getelementptr inbounds (%struct.f7* @global_f7, i32 0, i32 0), align 1 +// CHECK: %[[TMP:[^ ]+]] = load [7 x float], [7 x float]* getelementptr inbounds (%struct.f7* @global_f7, i32 0, i32 0), align 1 // CHECK: call [7 x float] @func_f7([7 x float] %[[TMP]]) struct f7 global_f7; void call_f7(void) { global_f7 = func_f7(global_f7); } // CHECK-LABEL: @call_f8 -// CHECK: %[[TMP:[^ ]+]] = load [8 x float]* getelementptr inbounds (%struct.f8* @global_f8, i32 0, i32 0), align 1 +// CHECK: %[[TMP:[^ ]+]] = load [8 x float], [8 x float]* getelementptr inbounds (%struct.f8* @global_f8, i32 0, i32 0), align 1 // CHECK: call [8 x float] @func_f8([8 x float] %[[TMP]]) struct f8 global_f8; void call_f8(void) { global_f8 = func_f8(global_f8); } @@ -105,19 +105,19 @@ void call_f8(void) { global_f8 = func_f8(global_f8); } // CHECK: %[[TMP1:[^ ]+]] = alloca [5 x i64] // CHECK: %[[TMP2:[^ ]+]] = bitcast [5 x i64]* %[[TMP1]] to i8* // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* %[[TMP2]], i8* bitcast (%struct.f9* @global_f9 to i8*), i64 36, i32 1, i1 false) -// CHECK: %[[TMP3:[^ ]+]] = load [5 x i64]* %[[TMP1]] +// CHECK: %[[TMP3:[^ ]+]] = load [5 x i64], [5 x i64]* %[[TMP1]] // CHECK: call void @func_f9(%struct.f9* sret %{{[^ ]+}}, [5 x i64] %[[TMP3]]) struct f9 global_f9; void call_f9(void) { global_f9 = func_f9(global_f9); } // CHECK-LABEL: @call_fab -// CHECK: %[[TMP:[^ ]+]] = load [2 x float]* bitcast (%struct.fab* @global_fab to [2 x float]*) +// CHECK: %[[TMP:[^ ]+]] = load [2 x float], [2 x float]* bitcast (%struct.fab* @global_fab to [2 x float]*) // CHECK: call [2 x float] @func_fab([2 x float] %[[TMP]]) struct fab global_fab; void call_fab(void) { global_fab = func_fab(global_fab); } // CHECK-LABEL: @call_fabc -// CHECK: %[[TMP:[^ ]+]] = load [3 x float]* bitcast (%struct.fabc* @global_fabc to [3 x float]*) +// CHECK: %[[TMP:[^ ]+]] = load [3 x float], [3 x float]* bitcast (%struct.fabc* @global_fabc to [3 x float]*) // CHECK: call [3 x float] @func_fabc([3 x float] %[[TMP]]) struct fabc global_fabc; void call_fabc(void) { global_fabc = func_fabc(global_fabc); } @@ -172,49 +172,49 @@ struct vab func_vab(struct vab x) { return x; } struct vabc func_vabc(struct vabc x) { return x; } // CHECK-LABEL: @call_v1 -// CHECK: %[[TMP:[^ ]+]] = load <4 x i32>* getelementptr inbounds (%struct.v1* @global_v1, i32 0, i32 0, i32 0), align 1 +// CHECK: %[[TMP:[^ ]+]] = load <4 x i32>, <4 x i32>* getelementptr inbounds (%struct.v1* @global_v1, i32 0, i32 0, i32 0), align 1 // CHECK: call [1 x <4 x i32>] @func_v1(<4 x i32> inreg %[[TMP]]) struct v1 global_v1; void call_v1(void) { global_v1 = func_v1(global_v1); } // CHECK-LABEL: @call_v2 -// CHECK: %[[TMP:[^ ]+]] = load [2 x <4 x i32>]* getelementptr inbounds (%struct.v2* @global_v2, i32 0, i32 0), align 1 +// CHECK: %[[TMP:[^ ]+]] = load [2 x <4 x i32>], [2 x <4 x i32>]* getelementptr inbounds (%struct.v2* @global_v2, i32 0, i32 0), align 1 // CHECK: call [2 x <4 x i32>] @func_v2([2 x <4 x i32>] %[[TMP]]) struct v2 global_v2; void call_v2(void) { global_v2 = func_v2(global_v2); } // CHECK-LABEL: @call_v3 -// CHECK: %[[TMP:[^ ]+]] = load [3 x <4 x i32>]* getelementptr inbounds (%struct.v3* @global_v3, i32 0, i32 0), align 1 +// CHECK: %[[TMP:[^ ]+]] = load [3 x <4 x i32>], [3 x <4 x i32>]* getelementptr inbounds (%struct.v3* @global_v3, i32 0, i32 0), align 1 // CHECK: call [3 x <4 x i32>] @func_v3([3 x <4 x i32>] %[[TMP]]) struct v3 global_v3; void call_v3(void) { global_v3 = func_v3(global_v3); } // CHECK-LABEL: @call_v4 -// CHECK: %[[TMP:[^ ]+]] = load [4 x <4 x i32>]* getelementptr inbounds (%struct.v4* @global_v4, i32 0, i32 0), align 1 +// CHECK: %[[TMP:[^ ]+]] = load [4 x <4 x i32>], [4 x <4 x i32>]* getelementptr inbounds (%struct.v4* @global_v4, i32 0, i32 0), align 1 // CHECK: call [4 x <4 x i32>] @func_v4([4 x <4 x i32>] %[[TMP]]) struct v4 global_v4; void call_v4(void) { global_v4 = func_v4(global_v4); } // CHECK-LABEL: @call_v5 -// CHECK: %[[TMP:[^ ]+]] = load [5 x <4 x i32>]* getelementptr inbounds (%struct.v5* @global_v5, i32 0, i32 0), align 1 +// CHECK: %[[TMP:[^ ]+]] = load [5 x <4 x i32>], [5 x <4 x i32>]* getelementptr inbounds (%struct.v5* @global_v5, i32 0, i32 0), align 1 // CHECK: call [5 x <4 x i32>] @func_v5([5 x <4 x i32>] %[[TMP]]) struct v5 global_v5; void call_v5(void) { global_v5 = func_v5(global_v5); } // CHECK-LABEL: @call_v6 -// CHECK: %[[TMP:[^ ]+]] = load [6 x <4 x i32>]* getelementptr inbounds (%struct.v6* @global_v6, i32 0, i32 0), align 1 +// CHECK: %[[TMP:[^ ]+]] = load [6 x <4 x i32>], [6 x <4 x i32>]* getelementptr inbounds (%struct.v6* @global_v6, i32 0, i32 0), align 1 // CHECK: call [6 x <4 x i32>] @func_v6([6 x <4 x i32>] %[[TMP]]) struct v6 global_v6; void call_v6(void) { global_v6 = func_v6(global_v6); } // CHECK-LABEL: @call_v7 -// CHECK: %[[TMP:[^ ]+]] = load [7 x <4 x i32>]* getelementptr inbounds (%struct.v7* @global_v7, i32 0, i32 0), align 1 +// CHECK: %[[TMP:[^ ]+]] = load [7 x <4 x i32>], [7 x <4 x i32>]* getelementptr inbounds (%struct.v7* @global_v7, i32 0, i32 0), align 1 // CHECK: call [7 x <4 x i32>] @func_v7([7 x <4 x i32>] %[[TMP]]) struct v7 global_v7; void call_v7(void) { global_v7 = func_v7(global_v7); } // CHECK-LABEL: @call_v8 -// CHECK: %[[TMP:[^ ]+]] = load [8 x <4 x i32>]* getelementptr inbounds (%struct.v8* @global_v8, i32 0, i32 0), align 1 +// CHECK: %[[TMP:[^ ]+]] = load [8 x <4 x i32>], [8 x <4 x i32>]* getelementptr inbounds (%struct.v8* @global_v8, i32 0, i32 0), align 1 // CHECK: call [8 x <4 x i32>] @func_v8([8 x <4 x i32>] %[[TMP]]) struct v8 global_v8; void call_v8(void) { global_v8 = func_v8(global_v8); } @@ -225,13 +225,13 @@ struct v9 global_v9; void call_v9(void) { global_v9 = func_v9(global_v9); } // CHECK-LABEL: @call_vab -// CHECK: %[[TMP:[^ ]+]] = load [2 x <4 x i32>]* bitcast (%struct.vab* @global_vab to [2 x <4 x i32>]*) +// CHECK: %[[TMP:[^ ]+]] = load [2 x <4 x i32>], [2 x <4 x i32>]* bitcast (%struct.vab* @global_vab to [2 x <4 x i32>]*) // CHECK: call [2 x <4 x i32>] @func_vab([2 x <4 x i32>] %[[TMP]]) struct vab global_vab; void call_vab(void) { global_vab = func_vab(global_vab); } // CHECK-LABEL: @call_vabc -// CHECK: %[[TMP:[^ ]+]] = load [3 x <4 x i32>]* bitcast (%struct.vabc* @global_vabc to [3 x <4 x i32>]*) +// CHECK: %[[TMP:[^ ]+]] = load [3 x <4 x i32>], [3 x <4 x i32>]* bitcast (%struct.vabc* @global_vabc to [3 x <4 x i32>]*) // CHECK: call [3 x <4 x i32>] @func_vabc([3 x <4 x i32>] %[[TMP]]) struct vabc global_vabc; void call_vabc(void) { global_vabc = func_vabc(global_vabc); } @@ -289,49 +289,49 @@ struct v3fab func_v3fab(struct v3fab x) { return x; } struct v3fabc func_v3fabc(struct v3fabc x) { return x; } // CHECK-LABEL: @call_v3f1 -// CHECK: %[[TMP:[^ ]+]] = load <3 x float>* getelementptr inbounds (%struct.v3f1* @global_v3f1, i32 0, i32 0, i32 0), align 1 +// CHECK: %[[TMP:[^ ]+]] = load <3 x float>, <3 x float>* getelementptr inbounds (%struct.v3f1* @global_v3f1, i32 0, i32 0, i32 0), align 1 // CHECK: call [1 x <3 x float>] @func_v3f1(<3 x float> inreg %[[TMP]]) struct v3f1 global_v3f1; void call_v3f1(void) { global_v3f1 = func_v3f1(global_v3f1); } // CHECK-LABEL: @call_v3f2 -// CHECK: %[[TMP:[^ ]+]] = load [2 x <3 x float>]* getelementptr inbounds (%struct.v3f2* @global_v3f2, i32 0, i32 0), align 1 +// CHECK: %[[TMP:[^ ]+]] = load [2 x <3 x float>], [2 x <3 x float>]* getelementptr inbounds (%struct.v3f2* @global_v3f2, i32 0, i32 0), align 1 // CHECK: call [2 x <3 x float>] @func_v3f2([2 x <3 x float>] %[[TMP]]) struct v3f2 global_v3f2; void call_v3f2(void) { global_v3f2 = func_v3f2(global_v3f2); } // CHECK-LABEL: @call_v3f3 -// CHECK: %[[TMP:[^ ]+]] = load [3 x <3 x float>]* getelementptr inbounds (%struct.v3f3* @global_v3f3, i32 0, i32 0), align 1 +// CHECK: %[[TMP:[^ ]+]] = load [3 x <3 x float>], [3 x <3 x float>]* getelementptr inbounds (%struct.v3f3* @global_v3f3, i32 0, i32 0), align 1 // CHECK: call [3 x <3 x float>] @func_v3f3([3 x <3 x float>] %[[TMP]]) struct v3f3 global_v3f3; void call_v3f3(void) { global_v3f3 = func_v3f3(global_v3f3); } // CHECK-LABEL: @call_v3f4 -// CHECK: %[[TMP:[^ ]+]] = load [4 x <3 x float>]* getelementptr inbounds (%struct.v3f4* @global_v3f4, i32 0, i32 0), align 1 +// CHECK: %[[TMP:[^ ]+]] = load [4 x <3 x float>], [4 x <3 x float>]* getelementptr inbounds (%struct.v3f4* @global_v3f4, i32 0, i32 0), align 1 // CHECK: call [4 x <3 x float>] @func_v3f4([4 x <3 x float>] %[[TMP]]) struct v3f4 global_v3f4; void call_v3f4(void) { global_v3f4 = func_v3f4(global_v3f4); } // CHECK-LABEL: @call_v3f5 -// CHECK: %[[TMP:[^ ]+]] = load [5 x <3 x float>]* getelementptr inbounds (%struct.v3f5* @global_v3f5, i32 0, i32 0), align 1 +// CHECK: %[[TMP:[^ ]+]] = load [5 x <3 x float>], [5 x <3 x float>]* getelementptr inbounds (%struct.v3f5* @global_v3f5, i32 0, i32 0), align 1 // CHECK: call [5 x <3 x float>] @func_v3f5([5 x <3 x float>] %[[TMP]]) struct v3f5 global_v3f5; void call_v3f5(void) { global_v3f5 = func_v3f5(global_v3f5); } // CHECK-LABEL: @call_v3f6 -// CHECK: %[[TMP:[^ ]+]] = load [6 x <3 x float>]* getelementptr inbounds (%struct.v3f6* @global_v3f6, i32 0, i32 0), align 1 +// CHECK: %[[TMP:[^ ]+]] = load [6 x <3 x float>], [6 x <3 x float>]* getelementptr inbounds (%struct.v3f6* @global_v3f6, i32 0, i32 0), align 1 // CHECK: call [6 x <3 x float>] @func_v3f6([6 x <3 x float>] %[[TMP]]) struct v3f6 global_v3f6; void call_v3f6(void) { global_v3f6 = func_v3f6(global_v3f6); } // CHECK-LABEL: @call_v3f7 -// CHECK: %[[TMP:[^ ]+]] = load [7 x <3 x float>]* getelementptr inbounds (%struct.v3f7* @global_v3f7, i32 0, i32 0), align 1 +// CHECK: %[[TMP:[^ ]+]] = load [7 x <3 x float>], [7 x <3 x float>]* getelementptr inbounds (%struct.v3f7* @global_v3f7, i32 0, i32 0), align 1 // CHECK: call [7 x <3 x float>] @func_v3f7([7 x <3 x float>] %[[TMP]]) struct v3f7 global_v3f7; void call_v3f7(void) { global_v3f7 = func_v3f7(global_v3f7); } // CHECK-LABEL: @call_v3f8 -// CHECK: %[[TMP:[^ ]+]] = load [8 x <3 x float>]* getelementptr inbounds (%struct.v3f8* @global_v3f8, i32 0, i32 0), align 1 +// CHECK: %[[TMP:[^ ]+]] = load [8 x <3 x float>], [8 x <3 x float>]* getelementptr inbounds (%struct.v3f8* @global_v3f8, i32 0, i32 0), align 1 // CHECK: call [8 x <3 x float>] @func_v3f8([8 x <3 x float>] %[[TMP]]) struct v3f8 global_v3f8; void call_v3f8(void) { global_v3f8 = func_v3f8(global_v3f8); } @@ -342,13 +342,13 @@ struct v3f9 global_v3f9; void call_v3f9(void) { global_v3f9 = func_v3f9(global_v3f9); } // CHECK-LABEL: @call_v3fab -// CHECK: %[[TMP:[^ ]+]] = load [2 x <3 x float>]* bitcast (%struct.v3fab* @global_v3fab to [2 x <3 x float>]*) +// CHECK: %[[TMP:[^ ]+]] = load [2 x <3 x float>], [2 x <3 x float>]* bitcast (%struct.v3fab* @global_v3fab to [2 x <3 x float>]*) // CHECK: call [2 x <3 x float>] @func_v3fab([2 x <3 x float>] %[[TMP]]) struct v3fab global_v3fab; void call_v3fab(void) { global_v3fab = func_v3fab(global_v3fab); } // CHECK-LABEL: @call_v3fabc -// CHECK: %[[TMP:[^ ]+]] = load [3 x <3 x float>]* bitcast (%struct.v3fabc* @global_v3fabc to [3 x <3 x float>]*) +// CHECK: %[[TMP:[^ ]+]] = load [3 x <3 x float>], [3 x <3 x float>]* bitcast (%struct.v3fabc* @global_v3fabc to [3 x <3 x float>]*) // CHECK: call [3 x <3 x float>] @func_v3fabc([3 x <3 x float>] %[[TMP]]) struct v3fabc global_v3fabc; void call_v3fabc(void) { global_v3fabc = func_v3fabc(global_v3fabc); } diff --git a/test/CodeGen/ppc64le-varargs-complex.c b/test/CodeGen/ppc64le-varargs-complex.c index 07d207650b..68dfa0b69f 100644 --- a/test/CodeGen/ppc64le-varargs-complex.c +++ b/test/CodeGen/ppc64le-varargs-complex.c @@ -8,60 +8,60 @@ void testva (int n, ...) va_list ap; _Complex int i = va_arg(ap, _Complex int); - // CHECK: %[[VAR40:[A-Za-z0-9.]+]] = load i8** %[[VAR100:[A-Za-z0-9.]+]] + // CHECK: %[[VAR40:[A-Za-z0-9.]+]] = load i8*, i8** %[[VAR100:[A-Za-z0-9.]+]] // CHECK-NEXT: %[[VAR41:[A-Za-z0-9.]+]] = getelementptr i8, i8* %[[VAR40]], i64 16 // CHECK-NEXT: store i8* %[[VAR41]], i8** %[[VAR100]] // CHECK-NEXT: %[[VAR1:[A-Za-z0-9.]+]] = ptrtoint i8* %[[VAR40]] to i64 // CHECK-NEXT: %[[VAR3:[A-Za-z0-9.]+]] = add i64 %[[VAR1]], 8 // CHECK-NEXT: %[[VAR4:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR1]] to i32* // CHECK-NEXT: %[[VAR5:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR3]] to i32* - // CHECK-NEXT: %[[VAR6:[A-Za-z0-9.]+]] = load i32* %[[VAR4]] - // CHECK-NEXT: %[[VAR7:[A-Za-z0-9.]+]] = load i32* %[[VAR5]] + // CHECK-NEXT: %[[VAR6:[A-Za-z0-9.]+]] = load i32, i32* %[[VAR4]] + // CHECK-NEXT: %[[VAR7:[A-Za-z0-9.]+]] = load i32, i32* %[[VAR5]] // CHECK-NEXT: %[[VAR8:[A-Za-z0-9.]+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* %[[VAR0:[A-Za-z0-9.]+]], i32 0, i32 0 // CHECK-NEXT: %[[VAR9:[A-Za-z0-9.]+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* %[[VAR0]], i32 0, i32 1 // CHECK-NEXT: store i32 %[[VAR6]], i32* %[[VAR8]] // CHECK-NEXT: store i32 %[[VAR7]], i32* %[[VAR9]] _Complex short s = va_arg(ap, _Complex short); - // CHECK: %[[VAR50:[A-Za-z0-9.]+]] = load i8** %[[VAR100:[A-Za-z0-9.]+]] + // CHECK: %[[VAR50:[A-Za-z0-9.]+]] = load i8*, i8** %[[VAR100:[A-Za-z0-9.]+]] // CHECK-NEXT: %[[VAR51:[A-Za-z0-9.]+]] = getelementptr i8, i8* %[[VAR50]], i64 16 // CHECK-NEXT: store i8* %[[VAR51]], i8** %[[VAR100]] // CHECK: %[[VAR11:[A-Za-z0-9.]+]] = ptrtoint i8* %{{[A-Za-z0-9.]+}} to i64 // CHECK-NEXT: %[[VAR13:[A-Za-z0-9.]+]] = add i64 %[[VAR11]], 8 // CHECK-NEXT: %[[VAR14:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR11]] to i16* // CHECK-NEXT: %[[VAR15:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR13]] to i16* - // CHECK-NEXT: %[[VAR16:[A-Za-z0-9.]+]] = load i16* %[[VAR14]] - // CHECK-NEXT: %[[VAR17:[A-Za-z0-9.]+]] = load i16* %[[VAR15]] + // CHECK-NEXT: %[[VAR16:[A-Za-z0-9.]+]] = load i16, i16* %[[VAR14]] + // CHECK-NEXT: %[[VAR17:[A-Za-z0-9.]+]] = load i16, i16* %[[VAR15]] // CHECK-NEXT: %[[VAR18:[A-Za-z0-9.]+]] = getelementptr inbounds { i16, i16 }, { i16, i16 }* %[[VAR10:[A-Za-z0-9.]+]], i32 0, i32 0 // CHECK-NEXT: %[[VAR19:[A-Za-z0-9.]+]] = getelementptr inbounds { i16, i16 }, { i16, i16 }* %[[VAR10]], i32 0, i32 1 // CHECK-NEXT: store i16 %[[VAR16]], i16* %[[VAR18]] // CHECK-NEXT: store i16 %[[VAR17]], i16* %[[VAR19]] _Complex char c = va_arg(ap, _Complex char); - // CHECK: %[[VAR60:[A-Za-z0-9.]+]] = load i8** %[[VAR100:[A-Za-z0-9.]+]] + // CHECK: %[[VAR60:[A-Za-z0-9.]+]] = load i8*, i8** %[[VAR100:[A-Za-z0-9.]+]] // CHECK-NEXT: %[[VAR61:[A-Za-z0-9.]+]] = getelementptr i8, i8* %[[VAR60]], i64 16 // CHECK-NEXT: store i8* %[[VAR61]], i8** %[[VAR100]] // CHECK: %[[VAR21:[A-Za-z0-9.]+]] = ptrtoint i8* %{{[A-Za-z0-9.]+}} to i64 // CHECK-NEXT: %[[VAR23:[A-Za-z0-9.]+]] = add i64 %[[VAR21]], 8 // CHECK-NEXT: %[[VAR24:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR21]] to i8* // CHECK-NEXT: %[[VAR25:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR23]] to i8* - // CHECK-NEXT: %[[VAR26:[A-Za-z0-9.]+]] = load i8* %[[VAR24]] - // CHECK-NEXT: %[[VAR27:[A-Za-z0-9.]+]] = load i8* %[[VAR25]] + // CHECK-NEXT: %[[VAR26:[A-Za-z0-9.]+]] = load i8, i8* %[[VAR24]] + // CHECK-NEXT: %[[VAR27:[A-Za-z0-9.]+]] = load i8, i8* %[[VAR25]] // CHECK-NEXT: %[[VAR28:[A-Za-z0-9.]+]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %[[VAR20:[A-Za-z0-9.]+]], i32 0, i32 0 // CHECK-NEXT: %[[VAR29:[A-Za-z0-9.]+]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %[[VAR20]], i32 0, i32 1 // CHECK-NEXT: store i8 %[[VAR26]], i8* %[[VAR28]] // CHECK-NEXT: store i8 %[[VAR27]], i8* %[[VAR29]] _Complex float f = va_arg(ap, _Complex float); - // CHECK: %[[VAR70:[A-Za-z0-9.]+]] = load i8** %[[VAR100:[A-Za-z0-9.]+]] + // CHECK: %[[VAR70:[A-Za-z0-9.]+]] = load i8*, i8** %[[VAR100:[A-Za-z0-9.]+]] // CHECK-NEXT: %[[VAR71:[A-Za-z0-9.]+]] = getelementptr i8, i8* %[[VAR70]], i64 16 // CHECK-NEXT: store i8* %[[VAR71]], i8** %[[VAR100]] // CHECK: %[[VAR31:[A-Za-z0-9.]+]] = ptrtoint i8* %{{[A-Za-z0-9.]+}} to i64 // CHECK-NEXT: %[[VAR33:[A-Za-z0-9.]+]] = add i64 %[[VAR31]], 8 // CHECK-NEXT: %[[VAR34:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR31]] to float* // CHECK-NEXT: %[[VAR35:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR33]] to float* - // CHECK-NEXT: %[[VAR36:[A-Za-z0-9.]+]] = load float* %[[VAR34]] - // CHECK-NEXT: %[[VAR37:[A-Za-z0-9.]+]] = load float* %[[VAR35]] + // CHECK-NEXT: %[[VAR36:[A-Za-z0-9.]+]] = load float, float* %[[VAR34]] + // CHECK-NEXT: %[[VAR37:[A-Za-z0-9.]+]] = load float, float* %[[VAR35]] // CHECK-NEXT: %[[VAR38:[A-Za-z0-9.]+]] = getelementptr inbounds { float, float }, { float, float }* %[[VAR30:[A-Za-z0-9.]+]], i32 0, i32 0 // CHECK-NEXT: %[[VAR39:[A-Za-z0-9.]+]] = getelementptr inbounds { float, float }, { float, float }* %[[VAR30]], i32 0, i32 1 // CHECK-NEXT: store float %[[VAR36]], float* %[[VAR38]] diff --git a/test/CodeGen/pr12251.c b/test/CodeGen/pr12251.c index ea74cc6a59..dd5e4a1f92 100644 --- a/test/CodeGen/pr12251.c +++ b/test/CodeGen/pr12251.c @@ -6,6 +6,6 @@ enum e1 g1(enum e1 *x) { } // CHECK-LABEL: define i32 @g1 -// CHECK: load i32* %x, align 4 +// CHECK: load i32, i32* %x, align 4 // CHECK-NOT: range // CHECK: ret diff --git a/test/CodeGen/redefine_extname.c b/test/CodeGen/redefine_extname.c index e73a3ad8df..a91e5b836a 100644 --- a/test/CodeGen/redefine_extname.c +++ b/test/CodeGen/redefine_extname.c @@ -12,4 +12,4 @@ int fish() { return fake() + __PRAGMA_REDEFINE_EXTNAME + name; } // Check that the call to fake() is emitted as a call to real() // CHECK: call i32 @real() // Check that this also works with variables names -// CHECK: load i32* @alias +// CHECK: load i32, i32* @alias diff --git a/test/CodeGen/sparcv9-abi.c b/test/CodeGen/sparcv9-abi.c index 094d448121..bf447198cd 100644 --- a/test/CodeGen/sparcv9-abi.c +++ b/test/CodeGen/sparcv9-abi.c @@ -131,28 +131,28 @@ int f_variable(char *f, ...) { va_start(ap, f); while ((c = *f++)) switch (c) { -// CHECK: %[[CUR:[^ ]+]] = load i8** %ap +// CHECK: %[[CUR:[^ ]+]] = load i8*, i8** %ap // CHECK-DAG: %[[NXT:[^ ]+]] = getelementptr i8, i8* %[[CUR]], i32 8 // CHECK-DAG: store i8* %[[NXT]], i8** %ap // CHECK-DAG: %[[EXT:[^ ]+]] = getelementptr i8, i8* %[[CUR]], i32 4 // CHECK-DAG: %[[ADR:[^ ]+]] = bitcast i8* %[[EXT]] to i32* -// CHECK-DAG: load i32* %[[ADR]] +// CHECK-DAG: load i32, i32* %[[ADR]] // CHECK: br case 'i': s += va_arg(ap, int); break; -// CHECK: %[[CUR:[^ ]+]] = load i8** %ap +// CHECK: %[[CUR:[^ ]+]] = load i8*, i8** %ap // CHECK-DAG: %[[NXT:[^ ]+]] = getelementptr i8, i8* %[[CUR]], i32 8 // CHECK-DAG: store i8* %[[NXT]], i8** %ap // CHECK-DAG: %[[ADR:[^ ]+]] = bitcast i8* %[[CUR]] to i64* -// CHECK-DAG: load i64* %[[ADR]] +// CHECK-DAG: load i64, i64* %[[ADR]] // CHECK: br case 'l': s += va_arg(ap, long); break; -// CHECK: %[[CUR:[^ ]+]] = load i8** %ap +// CHECK: %[[CUR:[^ ]+]] = load i8*, i8** %ap // CHECK-DAG: %[[NXT:[^ ]+]] = getelementptr i8, i8* %[[CUR]], i32 8 // CHECK-DAG: store i8* %[[NXT]], i8** %ap // CHECK-DAG: %[[ADR:[^ ]+]] = bitcast i8* %[[CUR]] to %struct.tiny* @@ -161,7 +161,7 @@ int f_variable(char *f, ...) { s += va_arg(ap, struct tiny).a; break; -// CHECK: %[[CUR:[^ ]+]] = load i8** %ap +// CHECK: %[[CUR:[^ ]+]] = load i8*, i8** %ap // CHECK-DAG: %[[NXT:[^ ]+]] = getelementptr i8, i8* %[[CUR]], i32 16 // CHECK-DAG: store i8* %[[NXT]], i8** %ap // CHECK-DAG: %[[ADR:[^ ]+]] = bitcast i8* %[[CUR]] to %struct.small* @@ -170,11 +170,11 @@ int f_variable(char *f, ...) { s += *va_arg(ap, struct small).a; break; -// CHECK: %[[CUR:[^ ]+]] = load i8** %ap +// CHECK: %[[CUR:[^ ]+]] = load i8*, i8** %ap // CHECK-DAG: %[[NXT:[^ ]+]] = getelementptr i8, i8* %[[CUR]], i32 8 // CHECK-DAG: store i8* %[[NXT]], i8** %ap // CHECK-DAG: %[[IND:[^ ]+]] = bitcast i8* %[[CUR]] to %struct.medium** -// CHECK-DAG: %[[ADR:[^ ]+]] = load %struct.medium** %[[IND]] +// CHECK-DAG: %[[ADR:[^ ]+]] = load %struct.medium*, %struct.medium** %[[IND]] // CHECK: br case 'm': s += *va_arg(ap, struct medium).a; diff --git a/test/CodeGen/sse-builtins.c b/test/CodeGen/sse-builtins.c index 3feca7470e..6d66cca24f 100644 --- a/test/CodeGen/sse-builtins.c +++ b/test/CodeGen/sse-builtins.c @@ -36,7 +36,7 @@ __m128 test_sqrt_ss(__m128 x) { __m128 test_loadl_pi(__m128 x, void* y) { // CHECK: define {{.*}} @test_loadl_pi - // CHECK: load <2 x float>* {{.*}}, align 1{{$}} + // CHECK: load <2 x float>, <2 x float>* {{.*}}, align 1{{$}} // CHECK: shufflevector {{.*}} <4 x i32> return _mm_loadl_pi(x,y); @@ -44,7 +44,7 @@ __m128 test_loadl_pi(__m128 x, void* y) { __m128 test_loadh_pi(__m128 x, void* y) { // CHECK: define {{.*}} @test_loadh_pi - // CHECK: load <2 x float>* {{.*}}, align 1{{$}} + // CHECK: load <2 x float>, <2 x float>* {{.*}}, align 1{{$}} // CHECK: shufflevector {{.*}} <4 x i32> return _mm_loadh_pi(x,y); @@ -52,13 +52,13 @@ __m128 test_loadh_pi(__m128 x, void* y) { __m128 test_load_ss(void* y) { // CHECK: define {{.*}} @test_load_ss - // CHECK: load float* {{.*}}, align 1{{$}} + // CHECK: load float, float* {{.*}}, align 1{{$}} return _mm_load_ss(y); } __m128 test_load1_ps(void* y) { // CHECK: define {{.*}} @test_load1_ps - // CHECK: load float* {{.*}}, align 1{{$}} + // CHECK: load float, float* {{.*}}, align 1{{$}} return _mm_load1_ps(y); } @@ -70,31 +70,31 @@ void test_store_ss(__m128 x, void* y) { __m128d test_load1_pd(__m128 x, void* y) { // CHECK: define {{.*}} @test_load1_pd - // CHECK: load double* {{.*}}, align 1{{$}} + // CHECK: load double, double* {{.*}}, align 1{{$}} return _mm_load1_pd(y); } __m128d test_loadr_pd(__m128 x, void* y) { // CHECK: define {{.*}} @test_loadr_pd - // CHECK: load <2 x double>* {{.*}}, align 16{{$}} + // CHECK: load <2 x double>, <2 x double>* {{.*}}, align 16{{$}} return _mm_loadr_pd(y); } __m128d test_load_sd(void* y) { // CHECK: define {{.*}} @test_load_sd - // CHECK: load double* {{.*}}, align 1{{$}} + // CHECK: load double, double* {{.*}}, align 1{{$}} return _mm_load_sd(y); } __m128d test_loadh_pd(__m128d x, void* y) { // CHECK: define {{.*}} @test_loadh_pd - // CHECK: load double* {{.*}}, align 1{{$}} + // CHECK: load double, double* {{.*}}, align 1{{$}} return _mm_loadh_pd(x, y); } __m128d test_loadl_pd(__m128d x, void* y) { // CHECK: define {{.*}} @test_loadl_pd - // CHECK: load double* {{.*}}, align 1{{$}} + // CHECK: load double, double* {{.*}}, align 1{{$}} return _mm_loadl_pd(x, y); } @@ -131,7 +131,7 @@ void test_storel_pd(__m128d x, void* y) { __m128i test_loadl_epi64(void* y) { // CHECK: define {{.*}} @test_loadl_epi64 - // CHECK: load i64* {{.*}}, align 1{{$}} + // CHECK: load i64, i64* {{.*}}, align 1{{$}} return _mm_loadl_epi64(y); } diff --git a/test/CodeGen/systemz-inline-asm.c b/test/CodeGen/systemz-inline-asm.c index c9372333b1..92ed4bb032 100644 --- a/test/CodeGen/systemz-inline-asm.c +++ b/test/CodeGen/systemz-inline-asm.c @@ -124,8 +124,8 @@ long double test_f128(long double f, long double g) { asm("axbr %0, %2" : "=f" (f) : "0" (f), "f" (g)); return f; // CHECK: define void @test_f128(fp128* noalias nocapture sret [[DEST:%.*]], fp128* nocapture readonly, fp128* nocapture readonly) -// CHECK: %f = load fp128* %0 -// CHECK: %g = load fp128* %1 +// CHECK: %f = load fp128, fp128* %0 +// CHECK: %g = load fp128, fp128* %1 // CHECK: [[RESULT:%.*]] = tail call fp128 asm "axbr $0, $2", "=f,0,f"(fp128 %f, fp128 %g) // CHECK: store fp128 [[RESULT]], fp128* [[DEST]] } diff --git a/test/CodeGen/tbaa.cpp b/test/CodeGen/tbaa.cpp index 4a723f100e..2bff5d0ba0 100644 --- a/test/CodeGen/tbaa.cpp +++ b/test/CodeGen/tbaa.cpp @@ -203,9 +203,9 @@ struct five { char g13(struct five *a, struct five *b) { return a->b; // CHECK: define signext i8 @{{.*}}( -// CHECK: load i8* %{{.*}}, align 1, !tbaa [[TAG_char:!.*]] +// CHECK: load i8, i8* %{{.*}}, align 1, !tbaa [[TAG_char:!.*]] // PATH: define signext i8 @{{.*}}( -// PATH: load i8* %{{.*}}, align 1, !tbaa [[TAG_five_b:!.*]] +// PATH: load i8, i8* %{{.*}}, align 1, !tbaa [[TAG_five_b:!.*]] } struct six { @@ -216,9 +216,9 @@ struct six { }; char g14(struct six *a, struct six *b) { // CHECK: define signext i8 @{{.*}}( -// CHECK: load i8* %{{.*}}, align 1, !tbaa [[TAG_char]] +// CHECK: load i8, i8* %{{.*}}, align 1, !tbaa [[TAG_char]] // PATH: define signext i8 @{{.*}}( -// PATH: load i8* %{{.*}}, align 1, !tbaa [[TAG_six_b:!.*]] +// PATH: load i8, i8* %{{.*}}, align 1, !tbaa [[TAG_six_b:!.*]] return a->b; } diff --git a/test/CodeGen/trapv.c b/test/CodeGen/trapv.c index 51034108ee..04842408a0 100644 --- a/test/CodeGen/trapv.c +++ b/test/CodeGen/trapv.c @@ -6,14 +6,14 @@ int i, j, k; // CHECK-LABEL: define void @test0() void test0() { // -ftrapv doesn't affect unsigned arithmetic. - // CHECK: [[T1:%.*]] = load i32* @uj - // CHECK-NEXT: [[T2:%.*]] = load i32* @uk + // CHECK: [[T1:%.*]] = load i32, i32* @uj + // CHECK-NEXT: [[T2:%.*]] = load i32, i32* @uk // CHECK-NEXT: [[T3:%.*]] = add i32 [[T1]], [[T2]] // CHECK-NEXT: store i32 [[T3]], i32* @ui ui = uj + uk; - // CHECK: [[T1:%.*]] = load i32* @j - // CHECK-NEXT: [[T2:%.*]] = load i32* @k + // CHECK: [[T1:%.*]] = load i32, i32* @j + // CHECK-NEXT: [[T2:%.*]] = load i32, i32* @k // CHECK-NEXT: [[T3:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[T1]], i32 [[T2]]) // CHECK-NEXT: [[T4:%.*]] = extractvalue { i32, i1 } [[T3]], 0 // CHECK-NEXT: [[T5:%.*]] = extractvalue { i32, i1 } [[T3]], 1 @@ -28,7 +28,7 @@ void test1() { extern void opaque(int); opaque(i++); - // CHECK: [[T1:%.*]] = load i32* @i + // CHECK: [[T1:%.*]] = load i32, i32* @i // CHECK-NEXT: [[T2:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[T1]], i32 1) // CHECK-NEXT: [[T3:%.*]] = extractvalue { i32, i1 } [[T2]], 0 // CHECK-NEXT: [[T4:%.*]] = extractvalue { i32, i1 } [[T2]], 1 @@ -42,7 +42,7 @@ void test2() { extern void opaque(int); opaque(++i); - // CHECK: [[T1:%.*]] = load i32* @i + // CHECK: [[T1:%.*]] = load i32, i32* @i // CHECK-NEXT: [[T2:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[T1]], i32 1) // CHECK-NEXT: [[T3:%.*]] = extractvalue { i32, i1 } [[T2]], 0 // CHECK-NEXT: [[T4:%.*]] = extractvalue { i32, i1 } [[T2]], 1 diff --git a/test/CodeGen/unsigned-overflow.c b/test/CodeGen/unsigned-overflow.c index 01ed0bf8cb..c91be3356e 100644 --- a/test/CodeGen/unsigned-overflow.c +++ b/test/CodeGen/unsigned-overflow.c @@ -11,8 +11,8 @@ extern void opaqueint(unsigned int); // CHECK-LABEL: define void @testlongadd() void testlongadd() { - // CHECK: [[T1:%.*]] = load i64* @lj - // CHECK-NEXT: [[T2:%.*]] = load i64* @lk + // CHECK: [[T1:%.*]] = load i64, i64* @lj + // CHECK-NEXT: [[T2:%.*]] = load i64, i64* @lk // CHECK-NEXT: [[T3:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[T1]], i64 [[T2]]) // CHECK-NEXT: [[T4:%.*]] = extractvalue { i64, i1 } [[T3]], 0 // CHECK-NEXT: [[T5:%.*]] = extractvalue { i64, i1 } [[T3]], 1 @@ -23,8 +23,8 @@ void testlongadd() { // CHECK-LABEL: define void @testlongsub() void testlongsub() { - // CHECK: [[T1:%.*]] = load i64* @lj - // CHECK-NEXT: [[T2:%.*]] = load i64* @lk + // CHECK: [[T1:%.*]] = load i64, i64* @lj + // CHECK-NEXT: [[T2:%.*]] = load i64, i64* @lk // CHECK-NEXT: [[T3:%.*]] = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 [[T1]], i64 [[T2]]) // CHECK-NEXT: [[T4:%.*]] = extractvalue { i64, i1 } [[T3]], 0 // CHECK-NEXT: [[T5:%.*]] = extractvalue { i64, i1 } [[T3]], 1 @@ -35,8 +35,8 @@ void testlongsub() { // CHECK-LABEL: define void @testlongmul() void testlongmul() { - // CHECK: [[T1:%.*]] = load i64* @lj - // CHECK-NEXT: [[T2:%.*]] = load i64* @lk + // CHECK: [[T1:%.*]] = load i64, i64* @lj + // CHECK-NEXT: [[T2:%.*]] = load i64, i64* @lk // CHECK-NEXT: [[T3:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[T1]], i64 [[T2]]) // CHECK-NEXT: [[T4:%.*]] = extractvalue { i64, i1 } [[T3]], 0 // CHECK-NEXT: [[T5:%.*]] = extractvalue { i64, i1 } [[T3]], 1 @@ -48,7 +48,7 @@ void testlongmul() { void testlongpostinc() { opaquelong(li++); - // CHECK: [[T1:%.*]] = load i64* @li + // CHECK: [[T1:%.*]] = load i64, i64* @li // CHECK-NEXT: [[T2:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[T1]], i64 1) // CHECK-NEXT: [[T3:%.*]] = extractvalue { i64, i1 } [[T2]], 0 // CHECK-NEXT: [[T4:%.*]] = extractvalue { i64, i1 } [[T2]], 1 @@ -59,7 +59,7 @@ void testlongpostinc() { void testlongpreinc() { opaquelong(++li); - // CHECK: [[T1:%.*]] = load i64* @li + // CHECK: [[T1:%.*]] = load i64, i64* @li // CHECK-NEXT: [[T2:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[T1]], i64 1) // CHECK-NEXT: [[T3:%.*]] = extractvalue { i64, i1 } [[T2]], 0 // CHECK-NEXT: [[T4:%.*]] = extractvalue { i64, i1 } [[T2]], 1 @@ -69,8 +69,8 @@ void testlongpreinc() { // CHECK-LABEL: define void @testintadd() void testintadd() { - // CHECK: [[T1:%.*]] = load i32* @ij - // CHECK-NEXT: [[T2:%.*]] = load i32* @ik + // CHECK: [[T1:%.*]] = load i32, i32* @ij + // CHECK-NEXT: [[T2:%.*]] = load i32, i32* @ik // CHECK-NEXT: [[T3:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[T1]], i32 [[T2]]) // CHECK-NEXT: [[T4:%.*]] = extractvalue { i32, i1 } [[T3]], 0 // CHECK-NEXT: [[T5:%.*]] = extractvalue { i32, i1 } [[T3]], 1 @@ -81,8 +81,8 @@ void testintadd() { // CHECK-LABEL: define void @testintsub() void testintsub() { - // CHECK: [[T1:%.*]] = load i32* @ij - // CHECK-NEXT: [[T2:%.*]] = load i32* @ik + // CHECK: [[T1:%.*]] = load i32, i32* @ij + // CHECK-NEXT: [[T2:%.*]] = load i32, i32* @ik // CHECK-NEXT: [[T3:%.*]] = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 [[T1]], i32 [[T2]]) // CHECK-NEXT: [[T4:%.*]] = extractvalue { i32, i1 } [[T3]], 0 // CHECK-NEXT: [[T5:%.*]] = extractvalue { i32, i1 } [[T3]], 1 @@ -93,8 +93,8 @@ void testintsub() { // CHECK-LABEL: define void @testintmul() void testintmul() { - // CHECK: [[T1:%.*]] = load i32* @ij - // CHECK-NEXT: [[T2:%.*]] = load i32* @ik + // CHECK: [[T1:%.*]] = load i32, i32* @ij + // CHECK-NEXT: [[T2:%.*]] = load i32, i32* @ik // CHECK-NEXT: [[T3:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[T1]], i32 [[T2]]) // CHECK-NEXT: [[T4:%.*]] = extractvalue { i32, i1 } [[T3]], 0 // CHECK-NEXT: [[T5:%.*]] = extractvalue { i32, i1 } [[T3]], 1 @@ -106,7 +106,7 @@ void testintmul() { void testintpostinc() { opaqueint(ii++); - // CHECK: [[T1:%.*]] = load i32* @ii + // CHECK: [[T1:%.*]] = load i32, i32* @ii // CHECK-NEXT: [[T2:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[T1]], i32 1) // CHECK-NEXT: [[T3:%.*]] = extractvalue { i32, i1 } [[T2]], 0 // CHECK-NEXT: [[T4:%.*]] = extractvalue { i32, i1 } [[T2]], 1 @@ -117,7 +117,7 @@ void testintpostinc() { void testintpreinc() { opaqueint(++ii); - // CHECK: [[T1:%.*]] = load i32* @ii + // CHECK: [[T1:%.*]] = load i32, i32* @ii // CHECK-NEXT: [[T2:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[T1]], i32 1) // CHECK-NEXT: [[T3:%.*]] = extractvalue { i32, i1 } [[T2]], 0 // CHECK-NEXT: [[T4:%.*]] = extractvalue { i32, i1 } [[T2]], 1 diff --git a/test/CodeGen/unsigned-promotion.c b/test/CodeGen/unsigned-promotion.c index 2c3415201c..4e7a4426a0 100644 --- a/test/CodeGen/unsigned-promotion.c +++ b/test/CodeGen/unsigned-promotion.c @@ -15,16 +15,16 @@ extern void opaquechar(unsigned char); // CHECKS-LABEL: define void @testshortadd() // CHECKU-LABEL: define void @testshortadd() void testshortadd() { - // CHECKS: load i16* @sj - // CHECKS: load i16* @sk + // CHECKS: load i16, i16* @sj + // CHECKS: load i16, i16* @sk // CHECKS: [[T1:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[T2:%.*]], i32 [[T3:%.*]]) // CHECKS-NEXT: [[T4:%.*]] = extractvalue { i32, i1 } [[T1]], 0 // CHECKS-NEXT: [[T5:%.*]] = extractvalue { i32, i1 } [[T1]], 1 // CHECKS: call void @__ubsan_handle_add_overflow // - // CHECKU: [[T1:%.*]] = load i16* @sj + // CHECKU: [[T1:%.*]] = load i16, i16* @sj // CHECKU: [[T2:%.*]] = zext i16 [[T1]] - // CHECKU: [[T3:%.*]] = load i16* @sk + // CHECKU: [[T3:%.*]] = load i16, i16* @sk // CHECKU: [[T4:%.*]] = zext i16 [[T3]] // CHECKU-NOT: llvm.sadd // CHECKU-NOT: llvm.uadd @@ -37,16 +37,16 @@ void testshortadd() { // CHECKU-LABEL: define void @testshortsub() void testshortsub() { - // CHECKS: load i16* @sj - // CHECKS: load i16* @sk + // CHECKS: load i16, i16* @sj + // CHECKS: load i16, i16* @sk // CHECKS: [[T1:%.*]] = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 [[T2:%.*]], i32 [[T3:%.*]]) // CHECKS-NEXT: [[T4:%.*]] = extractvalue { i32, i1 } [[T1]], 0 // CHECKS-NEXT: [[T5:%.*]] = extractvalue { i32, i1 } [[T1]], 1 // CHECKS: call void @__ubsan_handle_sub_overflow // - // CHECKU: [[T1:%.*]] = load i16* @sj + // CHECKU: [[T1:%.*]] = load i16, i16* @sj // CHECKU: [[T2:%.*]] = zext i16 [[T1]] - // CHECKU: [[T3:%.*]] = load i16* @sk + // CHECKU: [[T3:%.*]] = load i16, i16* @sk // CHECKU: [[T4:%.*]] = zext i16 [[T3]] // CHECKU-NOT: llvm.ssub // CHECKU-NOT: llvm.usub @@ -59,16 +59,16 @@ void testshortsub() { // CHECKU-LABEL: define void @testshortmul() void testshortmul() { - // CHECKS: load i16* @sj - // CHECKS: load i16* @sk + // CHECKS: load i16, i16* @sj + // CHECKS: load i16, i16* @sk // CHECKS: [[T1:%.*]] = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 [[T2:%.*]], i32 [[T3:%.*]]) // CHECKS-NEXT: [[T4:%.*]] = extractvalue { i32, i1 } [[T1]], 0 // CHECKS-NEXT: [[T5:%.*]] = extractvalue { i32, i1 } [[T1]], 1 // CHECKS: call void @__ubsan_handle_mul_overflow // - // CHECKU: [[T1:%.*]] = load i16* @sj + // CHECKU: [[T1:%.*]] = load i16, i16* @sj // CHECKU: [[T2:%.*]] = zext i16 [[T1]] - // CHECKU: [[T3:%.*]] = load i16* @sk + // CHECKU: [[T3:%.*]] = load i16, i16* @sk // CHECKU: [[T4:%.*]] = zext i16 [[T3]] // CHECKU-NOT: llvm.smul // CHECKU-NOT: llvm.umul @@ -80,16 +80,16 @@ void testshortmul() { // CHECKU-LABEL: define void @testcharadd() void testcharadd() { - // CHECKS: load i8* @cj - // CHECKS: load i8* @ck + // CHECKS: load i8, i8* @cj + // CHECKS: load i8, i8* @ck // CHECKS: [[T1:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[T2:%.*]], i32 [[T3:%.*]]) // CHECKS-NEXT: [[T4:%.*]] = extractvalue { i32, i1 } [[T1]], 0 // CHECKS-NEXT: [[T5:%.*]] = extractvalue { i32, i1 } [[T1]], 1 // CHECKS: call void @__ubsan_handle_add_overflow // - // CHECKU: [[T1:%.*]] = load i8* @cj + // CHECKU: [[T1:%.*]] = load i8, i8* @cj // CHECKU: [[T2:%.*]] = zext i8 [[T1]] - // CHECKU: [[T3:%.*]] = load i8* @ck + // CHECKU: [[T3:%.*]] = load i8, i8* @ck // CHECKU: [[T4:%.*]] = zext i8 [[T3]] // CHECKU-NOT: llvm.sadd // CHECKU-NOT: llvm.uadd @@ -102,16 +102,16 @@ void testcharadd() { // CHECKU-LABEL: define void @testcharsub() void testcharsub() { - // CHECKS: load i8* @cj - // CHECKS: load i8* @ck + // CHECKS: load i8, i8* @cj + // CHECKS: load i8, i8* @ck // CHECKS: [[T1:%.*]] = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 [[T2:%.*]], i32 [[T3:%.*]]) // CHECKS-NEXT: [[T4:%.*]] = extractvalue { i32, i1 } [[T1]], 0 // CHECKS-NEXT: [[T5:%.*]] = extractvalue { i32, i1 } [[T1]], 1 // CHECKS: call void @__ubsan_handle_sub_overflow // - // CHECKU: [[T1:%.*]] = load i8* @cj + // CHECKU: [[T1:%.*]] = load i8, i8* @cj // CHECKU: [[T2:%.*]] = zext i8 [[T1]] - // CHECKU: [[T3:%.*]] = load i8* @ck + // CHECKU: [[T3:%.*]] = load i8, i8* @ck // CHECKU: [[T4:%.*]] = zext i8 [[T3]] // CHECKU-NOT: llvm.ssub // CHECKU-NOT: llvm.usub @@ -124,16 +124,16 @@ void testcharsub() { // CHECKU-LABEL: define void @testcharmul() void testcharmul() { - // CHECKS: load i8* @cj - // CHECKS: load i8* @ck + // CHECKS: load i8, i8* @cj + // CHECKS: load i8, i8* @ck // CHECKS: [[T1:%.*]] = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 [[T2:%.*]], i32 [[T3:%.*]]) // CHECKS-NEXT: [[T4:%.*]] = extractvalue { i32, i1 } [[T1]], 0 // CHECKS-NEXT: [[T5:%.*]] = extractvalue { i32, i1 } [[T1]], 1 // CHECKS: call void @__ubsan_handle_mul_overflow // - // CHECKU: [[T1:%.*]] = load i8* @cj + // CHECKU: [[T1:%.*]] = load i8, i8* @cj // CHECKU: [[T2:%.*]] = zext i8 [[T1]] - // CHECKU: [[T3:%.*]] = load i8* @ck + // CHECKU: [[T3:%.*]] = load i8, i8* @ck // CHECKU: [[T4:%.*]] = zext i8 [[T3]] // CHECKU-NOT: llvm.smul // CHECKU-NOT: llvm.umul diff --git a/test/CodeGen/variadic-gpfp-x86.c b/test/CodeGen/variadic-gpfp-x86.c index 8577099cb6..854899b6a3 100644 --- a/test/CodeGen/variadic-gpfp-x86.c +++ b/test/CodeGen/variadic-gpfp-x86.c @@ -9,7 +9,7 @@ struct Bar { struct Bar foo(__builtin_va_list ap) { return __builtin_va_arg(ap, struct Bar); // CHECK: [[FPOP:%.*]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* {{.*}}, i32 0, i32 1 -// CHECK: [[FPO:%.*]] = load i32* [[FPOP]] +// CHECK: [[FPO:%.*]] = load i32, i32* [[FPOP]] // CHECK: [[FPVEC:%.*]] = getelementptr i8, i8* {{.*}}, i32 [[FPO]] // CHECK: bitcast i8* [[FPVEC]] to <2 x float>* } diff --git a/test/CodeGen/vla.c b/test/CodeGen/vla.c index e4ea7bdb2d..0f2e2cdc66 100644 --- a/test/CodeGen/vla.c +++ b/test/CodeGen/vla.c @@ -79,7 +79,7 @@ int test2(int n) { GLOB = 0; char b[1][n+3]; /* Variable length array. */ - // CHECK: [[tmp_1:%.*]] = load i32* @GLOB, align 4 + // CHECK: [[tmp_1:%.*]] = load i32, i32* @GLOB, align 4 // CHECK-NEXT: add nsw i32 [[tmp_1]], 1 __typeof__(b[GLOB++]) c; return GLOB; @@ -92,13 +92,13 @@ double test_PR8567(int n, double (*p)[n][5]) { // CHECK-NEXT: [[PV:%.*]] = alloca [5 x double]*, align 4 // CHECK-NEXT: store // CHECK-NEXT: store - // CHECK-NEXT: [[N:%.*]] = load i32* [[NV]], align 4 - // CHECK-NEXT: [[P:%.*]] = load [5 x double]** [[PV]], align 4 + // CHECK-NEXT: [[N:%.*]] = load i32, i32* [[NV]], align 4 + // CHECK-NEXT: [[P:%.*]] = load [5 x double]*, [5 x double]** [[PV]], align 4 // CHECK-NEXT: [[T0:%.*]] = mul nsw i32 1, [[N]] // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [5 x double], [5 x double]* [[P]], i32 [[T0]] // CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds [5 x double], [5 x double]* [[T1]], i32 2 // CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds [5 x double], [5 x double]* [[T2]], i32 0, i32 3 - // CHECK-NEXT: [[T4:%.*]] = load double* [[T3]] + // CHECK-NEXT: [[T4:%.*]] = load double, double* [[T3]] // CHECK-NEXT: ret double [[T4]] return p[1][2][3]; } @@ -112,17 +112,17 @@ int test4(unsigned n, char (*p)[n][n+1][6]) { // CHECK-NEXT: store [6 x i8]* // VLA captures. - // CHECK-NEXT: [[DIM0:%.*]] = load i32* [[N]], align 4 - // CHECK-NEXT: [[T0:%.*]] = load i32* [[N]], align 4 + // CHECK-NEXT: [[DIM0:%.*]] = load i32, i32* [[N]], align 4 + // CHECK-NEXT: [[T0:%.*]] = load i32, i32* [[N]], align 4 // CHECK-NEXT: [[DIM1:%.*]] = add i32 [[T0]], 1 - // CHECK-NEXT: [[T0:%.*]] = load [6 x i8]** [[P]], align 4 - // CHECK-NEXT: [[T1:%.*]] = load i32* [[N]], align 4 + // CHECK-NEXT: [[T0:%.*]] = load [6 x i8]*, [6 x i8]** [[P]], align 4 + // CHECK-NEXT: [[T1:%.*]] = load i32, i32* [[N]], align 4 // CHECK-NEXT: [[T2:%.*]] = udiv i32 [[T1]], 2 // CHECK-NEXT: [[T3:%.*]] = mul nuw i32 [[DIM0]], [[DIM1]] // CHECK-NEXT: [[T4:%.*]] = mul nsw i32 [[T2]], [[T3]] // CHECK-NEXT: [[T5:%.*]] = getelementptr inbounds [6 x i8], [6 x i8]* [[T0]], i32 [[T4]] - // CHECK-NEXT: [[T6:%.*]] = load i32* [[N]], align 4 + // CHECK-NEXT: [[T6:%.*]] = load i32, i32* [[N]], align 4 // CHECK-NEXT: [[T7:%.*]] = udiv i32 [[T6]], 4 // CHECK-NEXT: [[T8:%.*]] = sub i32 0, [[T7]] // CHECK-NEXT: [[T9:%.*]] = mul nuw i32 [[DIM0]], [[DIM1]] @@ -131,8 +131,8 @@ int test4(unsigned n, char (*p)[n][n+1][6]) { // CHECK-NEXT: store [6 x i8]* [[T11]], [6 x i8]** [[P2]], align 4 __typeof(p) p2 = (p + n/2) - n/4; - // CHECK-NEXT: [[T0:%.*]] = load [6 x i8]** [[P2]], align 4 - // CHECK-NEXT: [[T1:%.*]] = load [6 x i8]** [[P]], align 4 + // CHECK-NEXT: [[T0:%.*]] = load [6 x i8]*, [6 x i8]** [[P2]], align 4 + // CHECK-NEXT: [[T1:%.*]] = load [6 x i8]*, [6 x i8]** [[P]], align 4 // CHECK-NEXT: [[T2:%.*]] = ptrtoint [6 x i8]* [[T0]] to i32 // CHECK-NEXT: [[T3:%.*]] = ptrtoint [6 x i8]* [[T1]] to i32 // CHECK-NEXT: [[T4:%.*]] = sub i32 [[T2]], [[T3]] @@ -154,14 +154,14 @@ void test5(void) // CHECK-NEXT: store i32 0, i32* [[I]], align 4 (typeof(++i, (int (*)[i])a)){&a} += 0; - // CHECK-NEXT: [[Z:%.*]] = load i32* [[I]], align 4 + // CHECK-NEXT: [[Z:%.*]] = load i32, i32* [[I]], align 4 // CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[Z]], 1 // CHECK-NEXT: store i32 [[INC]], i32* [[I]], align 4 - // CHECK-NEXT: [[O:%.*]] = load i32* [[I]], align 4 + // CHECK-NEXT: [[O:%.*]] = load i32, i32* [[I]], align 4 // CHECK-NEXT: [[AR:%.*]] = getelementptr inbounds [5 x i32], [5 x i32]* [[A]], i32 0, i32 0 // CHECK-NEXT: [[T:%.*]] = bitcast [5 x i32]* [[A]] to i32* // CHECK-NEXT: store i32* [[T]], i32** [[CL]] - // CHECK-NEXT: [[TH:%.*]] = load i32** [[CL]] + // CHECK-NEXT: [[TH:%.*]] = load i32*, i32** [[CL]] // CHECK-NEXT: [[VLAIX:%.*]] = mul nsw i32 0, [[O]] // CHECK-NEXT: [[ADDPTR:%.*]] = getelementptr inbounds i32, i32* [[TH]], i32 [[VLAIX]] // CHECK-NEXT: store i32* [[ADDPTR]], i32** [[CL]] @@ -178,12 +178,12 @@ void test6(void) // CHECK-NEXT: [[CL:%.*]] = alloca i32**, align 4 // CHECK-NEXT: store i32 20, i32* [[N]], align 4 // CHECK-NEXT: store i32 0, i32* [[I]], align 4 - // CHECK-NEXT: [[Z:%.*]] = load i32* [[I]], align 4 + // CHECK-NEXT: [[Z:%.*]] = load i32, i32* [[I]], align 4 // CHECK-NEXT: [[O:%.*]] = bitcast i32*** [[A]] to i32** // CHECK-NEXT: store i32** [[O]], i32*** [[CL]] - // CHECK-NEXT: [[T:%.*]] = load i32*** [[CL]] + // CHECK-NEXT: [[T:%.*]] = load i32**, i32*** [[CL]] // CHECK-NEXT: [[IX:%.*]] = getelementptr inbounds i32*, i32** [[T]], i32 0 - // CHECK-NEXT: [[TH:%.*]] = load i32** [[IX]], align 4 + // CHECK-NEXT: [[TH:%.*]] = load i32*, i32** [[IX]], align 4 // CHECK-NEXT: [[F:%.*]] = mul nsw i32 1, [[Z]] // CHECK-NEXT: [[IX1:%.*]] = getelementptr inbounds i32, i32* [[TH]], i32 [[F]] // CHECK-NEXT: [[IX2:%.*]] = getelementptr inbounds i32, i32* [[IX1]], i32 5 diff --git a/test/CodeGen/volatile-1.c b/test/CodeGen/volatile-1.c index d1861d5458..df178a1081 100644 --- a/test/CodeGen/volatile-1.c +++ b/test/CodeGen/volatile-1.c @@ -24,30 +24,30 @@ int printf(const char *, ...); // CHECK-LABEL: define void @test() void test() { - // CHECK: load volatile [[INT]]* @i + // CHECK: load volatile [[INT]], [[INT]]* @i i; - // CHECK-NEXT: load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0), align 4 - // CHECK-NEXT: load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1), align 4 + // CHECK-NEXT: load volatile [[INT]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0), align 4 + // CHECK-NEXT: load volatile [[INT]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1), align 4 // CHECK-NEXT: sitofp [[INT]] (float)(ci); - // CHECK-NEXT: load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0), align 4 - // CHECK-NEXT: load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1), align 4 + // CHECK-NEXT: load volatile [[INT]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0), align 4 + // CHECK-NEXT: load volatile [[INT]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1), align 4 (void)ci; // CHECK-NEXT: bitcast // CHECK-NEXT: memcpy (void)a; - // CHECK-NEXT: [[R:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0), align 4 - // CHECK-NEXT: [[I:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1), align 4 + // CHECK-NEXT: [[R:%.*]] = load volatile [[INT]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0), align 4 + // CHECK-NEXT: [[I:%.*]] = load volatile [[INT]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1), align 4 // CHECK-NEXT: store volatile [[INT]] [[R]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0), align 4 // CHECK-NEXT: store volatile [[INT]] [[I]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1), align 4 (void)(ci=ci); - // CHECK-NEXT: [[T:%.*]] = load volatile [[INT]]* @j + // CHECK-NEXT: [[T:%.*]] = load volatile [[INT]], [[INT]]* @j // CHECK-NEXT: store volatile [[INT]] [[T]], [[INT]]* @i (void)(i=j); - // CHECK-NEXT: [[R1:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0), align 4 - // CHECK-NEXT: [[I1:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1), align 4 - // CHECK-NEXT: [[R2:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0), align 4 - // CHECK-NEXT: [[I2:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1), align 4 + // CHECK-NEXT: [[R1:%.*]] = load volatile [[INT]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0), align 4 + // CHECK-NEXT: [[I1:%.*]] = load volatile [[INT]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1), align 4 + // CHECK-NEXT: [[R2:%.*]] = load volatile [[INT]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0), align 4 + // CHECK-NEXT: [[I2:%.*]] = load volatile [[INT]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1), align 4 // Not sure why they're ordered this way. // CHECK-NEXT: [[R:%.*]] = add [[INT]] [[R2]], [[R1]] // CHECK-NEXT: [[I:%.*]] = add [[INT]] [[I2]], [[I1]] @@ -55,16 +55,16 @@ void test() { // CHECK-NEXT: store volatile [[INT]] [[I]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1), align 4 ci+=ci; - // CHECK-NEXT: [[R1:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0), align 4 - // CHECK-NEXT: [[I1:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1), align 4 - // CHECK-NEXT: [[R2:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0), align 4 - // CHECK-NEXT: [[I2:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1), align 4 + // CHECK-NEXT: [[R1:%.*]] = load volatile [[INT]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0), align 4 + // CHECK-NEXT: [[I1:%.*]] = load volatile [[INT]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1), align 4 + // CHECK-NEXT: [[R2:%.*]] = load volatile [[INT]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0), align 4 + // CHECK-NEXT: [[I2:%.*]] = load volatile [[INT]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1), align 4 // CHECK-NEXT: [[R:%.*]] = add [[INT]] [[R2]], [[R1]] // CHECK-NEXT: [[I:%.*]] = add [[INT]] [[I2]], [[I1]] // CHECK-NEXT: store volatile [[INT]] [[R]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0), align 4 // CHECK-NEXT: store volatile [[INT]] [[I]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1), align 4 - // CHECK-NEXT: [[R2:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0), align 4 - // CHECK-NEXT: [[I2:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1), align 4 + // CHECK-NEXT: [[R2:%.*]] = load volatile [[INT]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0), align 4 + // CHECK-NEXT: [[I2:%.*]] = load volatile [[INT]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1), align 4 // These additions can be elided // CHECK-NEXT: add [[INT]] [[R]], [[R2]] // CHECK-NEXT: add [[INT]] [[I]], [[I2]] @@ -192,7 +192,7 @@ void test() { // CHECK-NEXT: store volatile // CHECK-NEXT: store volatile ci=ci=ci; - // CHECK-NEXT: [[T:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) + // CHECK-NEXT: [[T:%.*]] = load volatile [[INT]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) // CHECK-NEXT: store volatile [[INT]] [[T]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) // CHECK-NEXT: store volatile [[INT]] [[T]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) __imag ci = __imag ci = __imag ci; @@ -316,9 +316,9 @@ void test1() { // CHECK: define {{.*}} @test2() int test2() { - // CHECK: load volatile i32* - // CHECK-NEXT: load volatile i32* - // CHECK-NEXT: load volatile i32* + // CHECK: load volatile i32, i32* + // CHECK-NEXT: load volatile i32, i32* + // CHECK-NEXT: load volatile i32, i32* // CHECK-NEXT: add i32 // CHECK-NEXT: add i32 // CHECK-NEXT: store volatile i32 diff --git a/test/CodeGen/volatile-2.c b/test/CodeGen/volatile-2.c index 18d0d318ef..9061bbc449 100644 --- a/test/CodeGen/volatile-2.c +++ b/test/CodeGen/volatile-2.c @@ -3,8 +3,8 @@ void test0() { // CHECK-LABEL: define void @test0() // CHECK: [[F:%.*]] = alloca float - // CHECK-NEXT: [[REAL:%.*]] = load volatile float* getelementptr inbounds ({ float, float }* @test0_v, i32 0, i32 0), align 4 - // CHECK-NEXT: load volatile float* getelementptr inbounds ({{.*}} @test0_v, i32 0, i32 1), align 4 + // CHECK-NEXT: [[REAL:%.*]] = load volatile float, float* getelementptr inbounds ({ float, float }* @test0_v, i32 0, i32 0), align 4 + // CHECK-NEXT: load volatile float, float* getelementptr inbounds ({{.*}} @test0_v, i32 0, i32 1), align 4 // CHECK-NEXT: store float [[REAL]], float* [[F]], align 4 // CHECK-NEXT: ret void extern volatile _Complex float test0_v; @@ -13,8 +13,8 @@ void test0() { void test1() { // CHECK-LABEL: define void @test1() - // CHECK: [[REAL:%.*]] = load volatile float* getelementptr inbounds ({{.*}} @test1_v, i32 0, i32 0), align 4 - // CHECK-NEXT: [[IMAG:%.*]] = load volatile float* getelementptr inbounds ({{.*}} @test1_v, i32 0, i32 1), align 4 + // CHECK: [[REAL:%.*]] = load volatile float, float* getelementptr inbounds ({{.*}} @test1_v, i32 0, i32 0), align 4 + // CHECK-NEXT: [[IMAG:%.*]] = load volatile float, float* getelementptr inbounds ({{.*}} @test1_v, i32 0, i32 1), align 4 // CHECK-NEXT: store volatile float [[REAL]], float* getelementptr inbounds ({{.*}} @test1_v, i32 0, i32 0), align 4 // CHECK-NEXT: store volatile float [[IMAG]], float* getelementptr inbounds ({{.*}} @test1_v, i32 0, i32 1), align 4 // CHECK-NEXT: ret void diff --git a/test/CodeGen/volatile-complex.c b/test/CodeGen/volatile-complex.c index fd5e52b8d7..daa103257c 100644 --- a/test/CodeGen/volatile-complex.c +++ b/test/CodeGen/volatile-complex.c @@ -14,11 +14,11 @@ volatile _Complex double cd32 __attribute__((aligned(32))); // CHECK-LABEL: define void @test_cf() void test_cf() { - // CHECK: load volatile float* getelementptr inbounds ({ float, float }* @cf, i32 0, i32 0), align 4 - // CHECK-NEXT: load volatile float* getelementptr inbounds ({ float, float }* @cf, i32 0, i32 1), align 4 + // CHECK: load volatile float, float* getelementptr inbounds ({ float, float }* @cf, i32 0, i32 0), align 4 + // CHECK-NEXT: load volatile float, float* getelementptr inbounds ({ float, float }* @cf, i32 0, i32 1), align 4 (void)(cf); - // CHECK-NEXT: [[R:%.*]] = load volatile float* getelementptr inbounds ({ float, float }* @cf, i32 0, i32 0), align 4 - // CHECK-NEXT: [[I:%.*]] = load volatile float* getelementptr inbounds ({ float, float }* @cf, i32 0, i32 1), align 4 + // CHECK-NEXT: [[R:%.*]] = load volatile float, float* getelementptr inbounds ({ float, float }* @cf, i32 0, i32 0), align 4 + // CHECK-NEXT: [[I:%.*]] = load volatile float, float* getelementptr inbounds ({ float, float }* @cf, i32 0, i32 1), align 4 // CHECK-NEXT: store volatile float [[R]], float* getelementptr inbounds ({ float, float }* @cf, i32 0, i32 0), align 4 // CHECK-NEXT: store volatile float [[I]], float* getelementptr inbounds ({ float, float }* @cf, i32 0, i32 1), align 4 (void)(cf=cf); @@ -27,11 +27,11 @@ void test_cf() { // CHECK-LABEL: define void @test_cd() void test_cd() { - // CHECK: load volatile double* getelementptr inbounds ({ double, double }* @cd, i32 0, i32 0), align 8 - // CHECK-NEXT: load volatile double* getelementptr inbounds ({ double, double }* @cd, i32 0, i32 1), align 8 + // CHECK: load volatile double, double* getelementptr inbounds ({ double, double }* @cd, i32 0, i32 0), align 8 + // CHECK-NEXT: load volatile double, double* getelementptr inbounds ({ double, double }* @cd, i32 0, i32 1), align 8 (void)(cd); - // CHECK-NEXT: [[R:%.*]] = load volatile double* getelementptr inbounds ({ double, double }* @cd, i32 0, i32 0), align 8 - // CHECK-NEXT: [[I:%.*]] = load volatile double* getelementptr inbounds ({ double, double }* @cd, i32 0, i32 1), align 8 + // CHECK-NEXT: [[R:%.*]] = load volatile double, double* getelementptr inbounds ({ double, double }* @cd, i32 0, i32 0), align 8 + // CHECK-NEXT: [[I:%.*]] = load volatile double, double* getelementptr inbounds ({ double, double }* @cd, i32 0, i32 1), align 8 // CHECK-NEXT: store volatile double [[R]], double* getelementptr inbounds ({ double, double }* @cd, i32 0, i32 0), align 8 // CHECK-NEXT: store volatile double [[I]], double* getelementptr inbounds ({ double, double }* @cd, i32 0, i32 1), align 8 (void)(cd=cd); @@ -40,11 +40,11 @@ void test_cd() { // CHECK-LABEL: define void @test_cf32() void test_cf32() { - // CHECK: load volatile float* getelementptr inbounds ({ float, float }* @cf32, i32 0, i32 0), align 32 - // CHECK-NEXT: load volatile float* getelementptr inbounds ({ float, float }* @cf32, i32 0, i32 1), align 4 + // CHECK: load volatile float, float* getelementptr inbounds ({ float, float }* @cf32, i32 0, i32 0), align 32 + // CHECK-NEXT: load volatile float, float* getelementptr inbounds ({ float, float }* @cf32, i32 0, i32 1), align 4 (void)(cf32); - // CHECK-NEXT: [[R:%.*]] = load volatile float* getelementptr inbounds ({ float, float }* @cf32, i32 0, i32 0), align 32 - // CHECK-NEXT: [[I:%.*]] = load volatile float* getelementptr inbounds ({ float, float }* @cf32, i32 0, i32 1), align 4 + // CHECK-NEXT: [[R:%.*]] = load volatile float, float* getelementptr inbounds ({ float, float }* @cf32, i32 0, i32 0), align 32 + // CHECK-NEXT: [[I:%.*]] = load volatile float, float* getelementptr inbounds ({ float, float }* @cf32, i32 0, i32 1), align 4 // CHECK-NEXT: store volatile float [[R]], float* getelementptr inbounds ({ float, float }* @cf32, i32 0, i32 0), align 32 // CHECK-NEXT: store volatile float [[I]], float* getelementptr inbounds ({ float, float }* @cf32, i32 0, i32 1), align 4 (void)(cf32=cf32); @@ -53,11 +53,11 @@ void test_cf32() { // CHECK-LABEL: define void @test_cd32() void test_cd32() { - // CHECK: load volatile double* getelementptr inbounds ({ double, double }* @cd32, i32 0, i32 0), align 32 - // CHECK-NEXT: load volatile double* getelementptr inbounds ({ double, double }* @cd32, i32 0, i32 1), align 8 + // CHECK: load volatile double, double* getelementptr inbounds ({ double, double }* @cd32, i32 0, i32 0), align 32 + // CHECK-NEXT: load volatile double, double* getelementptr inbounds ({ double, double }* @cd32, i32 0, i32 1), align 8 (void)(cd32); - // CHECK-NEXT: [[R:%.*]] = load volatile double* getelementptr inbounds ({ double, double }* @cd32, i32 0, i32 0), align 32 - // CHECK-NEXT: [[I:%.*]] = load volatile double* getelementptr inbounds ({ double, double }* @cd32, i32 0, i32 1), align 8 + // CHECK-NEXT: [[R:%.*]] = load volatile double, double* getelementptr inbounds ({ double, double }* @cd32, i32 0, i32 0), align 32 + // CHECK-NEXT: [[I:%.*]] = load volatile double, double* getelementptr inbounds ({ double, double }* @cd32, i32 0, i32 1), align 8 // CHECK-NEXT: store volatile double [[R]], double* getelementptr inbounds ({ double, double }* @cd32, i32 0, i32 0), align 32 // CHECK-NEXT: store volatile double [[I]], double* getelementptr inbounds ({ double, double }* @cd32, i32 0, i32 1), align 8 (void)(cd32=cd32); diff --git a/test/CodeGen/volatile.c b/test/CodeGen/volatile.c index 3e891aa32c..52915f6f4d 100644 --- a/test/CodeGen/volatile.c +++ b/test/CodeGen/volatile.c @@ -41,67 +41,67 @@ int main() { // CHECK: [[I:%[a-zA-Z0-9_.]+]] = alloca i32 // load i=S; -// CHECK: load i32* @S +// CHECK: load i32, i32* @S // CHECK: store i32 {{.*}}, i32* [[I]] i=vS; -// CHECK: load volatile i32* @vS +// CHECK: load volatile i32, i32* @vS // CHECK: store i32 {{.*}}, i32* [[I]] i=*pS; -// CHECK: [[PS_VAL:%[a-zA-Z0-9_.]+]] = load i32** @pS -// CHECK: load i32* [[PS_VAL]] +// CHECK: [[PS_VAL:%[a-zA-Z0-9_.]+]] = load i32*, i32** @pS +// CHECK: load i32, i32* [[PS_VAL]] // CHECK: store i32 {{.*}}, i32* [[I]] i=*pvS; -// CHECK: [[PVS_VAL:%[a-zA-Z0-9_.]+]] = load i32** @pvS -// CHECK: load volatile i32* [[PVS_VAL]] +// CHECK: [[PVS_VAL:%[a-zA-Z0-9_.]+]] = load i32*, i32** @pvS +// CHECK: load volatile i32, i32* [[PVS_VAL]] // CHECK: store i32 {{.*}}, i32* [[I]] i=A[2]; -// CHECK: load i32* getelementptr {{.*}} @A +// CHECK: load i32, i32* getelementptr {{.*}} @A // CHECK: store i32 {{.*}}, i32* [[I]] i=vA[2]; -// CHECK: load volatile i32* getelementptr {{.*}} @vA +// CHECK: load volatile i32, i32* getelementptr {{.*}} @vA // CHECK: store i32 {{.*}}, i32* [[I]] i=F.x; -// CHECK: load i32* getelementptr {{.*}} @F +// CHECK: load i32, i32* getelementptr {{.*}} @F // CHECK: store i32 {{.*}}, i32* [[I]] i=vF.x; -// CHECK: load volatile i32* getelementptr {{.*}} @vF +// CHECK: load volatile i32, i32* getelementptr {{.*}} @vF // CHECK: store i32 {{.*}}, i32* [[I]] i=F2.x; -// CHECK: load i32* getelementptr {{.*}} @F2 +// CHECK: load i32, i32* getelementptr {{.*}} @F2 // CHECK: store i32 {{.*}}, i32* [[I]] i=vF2.x; -// CHECK: load volatile i32* getelementptr {{.*}} @vF2 +// CHECK: load volatile i32, i32* getelementptr {{.*}} @vF2 // CHECK: store i32 {{.*}}, i32* [[I]] i=vpF2->x; -// CHECK: [[VPF2_VAL:%[a-zA-Z0-9_.]+]] = load {{%[a-zA-Z0-9_.]+}}** @vpF2 +// CHECK: [[VPF2_VAL:%[a-zA-Z0-9_.]+]] = load {{%[a-zA-Z0-9_.]+}}*, {{%[a-zA-Z0-9_.]+}}** @vpF2 // CHECK: [[ELT:%[a-zA-Z0-9_.]+]] = getelementptr {{.*}} [[VPF2_VAL]] -// CHECK: load volatile i32* [[ELT]] +// CHECK: load volatile i32, i32* [[ELT]] // CHECK: store i32 {{.*}}, i32* [[I]] i=F3.x.y; -// CHECK: load i32* getelementptr {{.*}} @F3 +// CHECK: load i32, i32* getelementptr {{.*}} @F3 // CHECK: store i32 {{.*}}, i32* [[I]] i=vF3.x.y; -// CHECK: load volatile i32* getelementptr {{.*}} @vF3 +// CHECK: load volatile i32, i32* getelementptr {{.*}} @vF3 // CHECK: store i32 {{.*}}, i32* [[I]] i=BF.x; -// CHECK-IT: load i8* getelementptr {{.*}} @BF -// CHECK-MS: load i32* getelementptr {{.*}} @BF +// CHECK-IT: load i8, i8* getelementptr {{.*}} @BF +// CHECK-MS: load i32, i32* getelementptr {{.*}} @BF // CHECK: store i32 {{.*}}, i32* [[I]] i=vBF.x; -// CHECK-IT: load volatile i8* getelementptr {{.*}} @vBF -// CHECK-MS: load volatile i32* getelementptr {{.*}} @vBF +// CHECK-IT: load volatile i8, i8* getelementptr {{.*}} @vBF +// CHECK-MS: load volatile i32, i32* getelementptr {{.*}} @vBF // CHECK: store i32 {{.*}}, i32* [[I]] i=V[3]; -// CHECK: load <4 x i32>* @V +// CHECK: load <4 x i32>, <4 x i32>* @V // CHECK: store i32 {{.*}}, i32* [[I]] i=vV[3]; -// CHECK: load volatile <4 x i32>* @vV +// CHECK: load volatile <4 x i32>, <4 x i32>* @vV // CHECK: store i32 {{.*}}, i32* [[I]] i=VE.yx[1]; -// CHECK: load <4 x i32>* @VE +// CHECK: load <4 x i32>, <4 x i32>* @VE // CHECK: store i32 {{.*}}, i32* [[I]] i=vVE.zy[1]; -// CHECK: load volatile <4 x i32>* @vVE +// CHECK: load volatile <4 x i32>, <4 x i32>* @vVE // CHECK: store i32 {{.*}}, i32* [[I]] i = aggFct().x; // Note: not volatile // N.b. Aggregate return is extremely target specific, all we can @@ -110,92 +110,92 @@ int main() { // CHECK-NOT: load volatile // CHECK: store i32 {{.*}}, i32* [[I]] i=vtS; -// CHECK: load volatile i32* @vtS +// CHECK: load volatile i32, i32* @vtS // CHECK: store i32 {{.*}}, i32* [[I]] // store S=i; -// CHECK: load i32* [[I]] +// CHECK: load i32, i32* [[I]] // CHECK: store i32 {{.*}}, i32* @S vS=i; -// CHECK: load i32* [[I]] +// CHECK: load i32, i32* [[I]] // CHECK: store volatile i32 {{.*}}, i32* @vS *pS=i; -// CHECK: load i32* [[I]] -// CHECK: [[PS_VAL:%[a-zA-Z0-9_.]+]] = load i32** @pS +// CHECK: load i32, i32* [[I]] +// CHECK: [[PS_VAL:%[a-zA-Z0-9_.]+]] = load i32*, i32** @pS // CHECK: store i32 {{.*}}, i32* [[PS_VAL]] *pvS=i; -// CHECK: load i32* [[I]] -// CHECK: [[PVS_VAL:%[a-zA-Z0-9_.]+]] = load i32** @pvS +// CHECK: load i32, i32* [[I]] +// CHECK: [[PVS_VAL:%[a-zA-Z0-9_.]+]] = load i32*, i32** @pvS // CHECK: store volatile i32 {{.*}}, i32* [[PVS_VAL]] A[2]=i; -// CHECK: load i32* [[I]] +// CHECK: load i32, i32* [[I]] // CHECK: store i32 {{.*}}, i32* getelementptr {{.*}} @A vA[2]=i; -// CHECK: load i32* [[I]] +// CHECK: load i32, i32* [[I]] // CHECK: store volatile i32 {{.*}}, i32* getelementptr {{.*}} @vA F.x=i; -// CHECK: load i32* [[I]] +// CHECK: load i32, i32* [[I]] // CHECK: store i32 {{.*}}, i32* getelementptr {{.*}} @F vF.x=i; -// CHECK: load i32* [[I]] +// CHECK: load i32, i32* [[I]] // CHECK: store volatile i32 {{.*}}, i32* getelementptr {{.*}} @vF F2.x=i; -// CHECK: load i32* [[I]] +// CHECK: load i32, i32* [[I]] // CHECK: store i32 {{.*}}, i32* getelementptr {{.*}} @F2 vF2.x=i; -// CHECK: load i32* [[I]] +// CHECK: load i32, i32* [[I]] // CHECK: store volatile i32 {{.*}}, i32* getelementptr {{.*}} @vF2 vpF2->x=i; -// CHECK: load i32* [[I]] -// CHECK: [[VPF2_VAL:%[a-zA-Z0-9_.]+]] = load {{%[a-zA-Z0-9._]+}}** @vpF2 +// CHECK: load i32, i32* [[I]] +// CHECK: [[VPF2_VAL:%[a-zA-Z0-9_.]+]] = load {{%[a-zA-Z0-9._]+}}*, {{%[a-zA-Z0-9._]+}}** @vpF2 // CHECK: [[ELT:%[a-zA-Z0-9_.]+]] = getelementptr {{.*}} [[VPF2_VAL]] // CHECK: store volatile i32 {{.*}}, i32* [[ELT]] vF3.x.y=i; -// CHECK: load i32* [[I]] +// CHECK: load i32, i32* [[I]] // CHECK: store volatile i32 {{.*}}, i32* getelementptr {{.*}} @vF3 BF.x=i; -// CHECK: load i32* [[I]] -// CHECK-IT: load i8* getelementptr {{.*}} @BF -// CHECK-MS: load i32* getelementptr {{.*}} @BF +// CHECK: load i32, i32* [[I]] +// CHECK-IT: load i8, i8* getelementptr {{.*}} @BF +// CHECK-MS: load i32, i32* getelementptr {{.*}} @BF // CHECK-IT: store i8 {{.*}}, i8* getelementptr {{.*}} @BF // CHECK-MS: store i32 {{.*}}, i32* getelementptr {{.*}} @BF vBF.x=i; -// CHECK: load i32* [[I]] -// CHECK-IT: load volatile i8* getelementptr {{.*}} @vBF -// CHECK-MS: load volatile i32* getelementptr {{.*}} @vBF +// CHECK: load i32, i32* [[I]] +// CHECK-IT: load volatile i8, i8* getelementptr {{.*}} @vBF +// CHECK-MS: load volatile i32, i32* getelementptr {{.*}} @vBF // CHECK-IT: store volatile i8 {{.*}}, i8* getelementptr {{.*}} @vBF // CHECK-MS: store volatile i32 {{.*}}, i32* getelementptr {{.*}} @vBF V[3]=i; -// CHECK: load i32* [[I]] -// CHECK: load <4 x i32>* @V +// CHECK: load i32, i32* [[I]] +// CHECK: load <4 x i32>, <4 x i32>* @V // CHECK: store <4 x i32> {{.*}}, <4 x i32>* @V vV[3]=i; -// CHECK: load i32* [[I]] -// CHECK: load volatile <4 x i32>* @vV +// CHECK: load i32, i32* [[I]] +// CHECK: load volatile <4 x i32>, <4 x i32>* @vV // CHECK: store volatile <4 x i32> {{.*}}, <4 x i32>* @vV vtS=i; -// CHECK: load i32* [[I]] +// CHECK: load i32, i32* [[I]] // CHECK: store volatile i32 {{.*}}, i32* @vtS // other ops: ++S; -// CHECK: load i32* @S +// CHECK: load i32, i32* @S // CHECK: store i32 {{.*}}, i32* @S ++vS; -// CHECK: load volatile i32* @vS +// CHECK: load volatile i32, i32* @vS // CHECK: store volatile i32 {{.*}}, i32* @vS i+=S; -// CHECK: load i32* @S -// CHECK: load i32* [[I]] +// CHECK: load i32, i32* @S +// CHECK: load i32, i32* [[I]] // CHECK: store i32 {{.*}}, i32* [[I]] i+=vS; -// CHECK: load volatile i32* @vS -// CHECK: load i32* [[I]] +// CHECK: load volatile i32, i32* @vS +// CHECK: load i32, i32* [[I]] // CHECK: store i32 {{.*}}, i32* [[I]] ++vtS; -// CHECK: load volatile i32* @vtS +// CHECK: load volatile i32, i32* @vtS // CHECK: store volatile i32 {{.*}}, i32* @vtS (void)vF2; // From vF2 to a temporary diff --git a/test/CodeGen/x86-atomic-long_double.c b/test/CodeGen/x86-atomic-long_double.c index 8bcf591ac7..9857c67592 100644 --- a/test/CodeGen/x86-atomic-long_double.c +++ b/test/CodeGen/x86-atomic-long_double.c @@ -4,12 +4,12 @@ long double testinc(_Atomic long double *addr) { // CHECK-LABEL: @testinc // CHECK: store x86_fp80* %{{.+}}, x86_fp80** [[ADDR_ADDR:%.+]], align 8 - // CHECK: [[ADDR:%.+]] = load x86_fp80** [[ADDR_ADDR]], align 8 + // CHECK: [[ADDR:%.+]] = load x86_fp80*, x86_fp80** [[ADDR_ADDR]], align 8 // CHECK: [[INT_ADDR:%.+]] = bitcast x86_fp80* [[ADDR]] to i128* - // CHECK: [[INT_VALUE:%.+]] = load atomic i128* [[INT_ADDR]] seq_cst, align 16 + // CHECK: [[INT_VALUE:%.+]] = load atomic i128, i128* [[INT_ADDR]] seq_cst, align 16 // CHECK: [[INT_LOAD_ADDR:%.+]] = bitcast x86_fp80* [[LD_ADDR:%.+]] to i128* // CHECK: store i128 [[INT_VALUE]], i128* [[INT_LOAD_ADDR]], align 16 - // CHECK: [[LD_VALUE:%.+]] = load x86_fp80* [[LD_ADDR]], align 16 + // CHECK: [[LD_VALUE:%.+]] = load x86_fp80, x86_fp80* [[LD_ADDR]], align 16 // CHECK: br label %[[ATOMIC_OP:.+]] // CHECK: [[ATOMIC_OP]] // CHECK: [[OLD_VALUE:%.+]] = phi x86_fp80 [ [[LD_VALUE]], %{{.+}} ], [ [[LD_VALUE:%.+]], %[[ATOMIC_OP]] ] @@ -18,29 +18,29 @@ long double testinc(_Atomic long double *addr) { // CHECK: call void @llvm.memset.p0i8.i64(i8* [[OLD_VALUE_VOID_ADDR]], i8 0, i64 16, i32 16, i1 false) // CHECK: store x86_fp80 [[OLD_VALUE]], x86_fp80* [[OLD_VALUE_ADDR]], align 16 // CHECK: [[OLD_INT_ADDR:%.+]] = bitcast x86_fp80* [[OLD_VALUE_ADDR]] to i128* - // CHECK: [[OLD_INT:%.+]] = load i128* [[OLD_INT_ADDR]], align 16 + // CHECK: [[OLD_INT:%.+]] = load i128, i128* [[OLD_INT_ADDR]], align 16 // CHECK: [[NEW_VALUE_VOID_ADDR:%.+]] = bitcast x86_fp80* [[NEW_VALUE_ADDR:%.+]] to i8* // CHECK: call void @llvm.memset.p0i8.i64(i8* [[NEW_VALUE_VOID_ADDR]], i8 0, i64 16, i32 16, i1 false) // CHECK: store x86_fp80 [[INC_VALUE]], x86_fp80* [[NEW_VALUE_ADDR]], align 16 // CHECK: [[NEW_INT_ADDR:%.+]] = bitcast x86_fp80* [[NEW_VALUE_ADDR]] to i128* - // CHECK: [[NEW_INT:%.+]] = load i128* [[NEW_INT_ADDR]], align 16 + // CHECK: [[NEW_INT:%.+]] = load i128, i128* [[NEW_INT_ADDR]], align 16 // CHECK: [[OBJ_INT_ADDR:%.+]] = bitcast x86_fp80* [[ADDR]] to i128* // CHECK: [[RES:%.+]] = cmpxchg i128* [[OBJ_INT_ADDR]], i128 [[OLD_INT]], i128 [[NEW_INT]] seq_cst seq_cst // CHECK: [[OLD_VALUE:%.+]] = extractvalue { i128, i1 } [[RES]], 0 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i128, i1 } [[RES]], 1 // CHECK: [[OLD_VALUE_RES_INT_PTR:%.+]] = bitcast x86_fp80* [[OLD_VALUE_RES_PTR:%.+]] to i128* // CHECK: store i128 [[OLD_VALUE]], i128* [[OLD_VALUE_RES_INT_PTR]], align 16 - // CHECK: [[LD_VALUE]] = load x86_fp80* [[OLD_VALUE_RES_PTR]], align 16 + // CHECK: [[LD_VALUE]] = load x86_fp80, x86_fp80* [[OLD_VALUE_RES_PTR]], align 16 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[ATOMIC_CONT:.+]], label %[[ATOMIC_OP]] // CHECK: [[ATOMIC_CONT]] // CHECK: ret x86_fp80 [[INC_VALUE]] // CHECK32-LABEL: @testinc // CHECK32: store x86_fp80* %{{.+}}, x86_fp80** [[ADDR_ADDR:%.+]], align 4 - // CHECK32: [[ADDR:%.+]] = load x86_fp80** [[ADDR_ADDR]], align 4 + // CHECK32: [[ADDR:%.+]] = load x86_fp80*, x86_fp80** [[ADDR_ADDR]], align 4 // CHECK32: [[VOID_PTR:%.+]] = bitcast x86_fp80* [[ADDR]] to i8* // CHECK32: [[TEMP_LD_PTR:%.+]] = bitcast x86_fp80* [[TEMP_LD_ADDR:%.+]] to i8* // CHECK32: call void @__atomic_load(i32 12, i8* [[VOID_PTR]], i8* [[TEMP_LD_PTR]], i32 5) - // CHECK32: [[LD_VALUE:%.+]] = load x86_fp80* [[TEMP_LD_ADDR]], align 4 + // CHECK32: [[LD_VALUE:%.+]] = load x86_fp80, x86_fp80* [[TEMP_LD_ADDR]], align 4 // CHECK32: br label %[[ATOMIC_OP:.+]] // CHECK32: [[ATOMIC_OP]] // CHECK32: [[OLD_VALUE:%.+]] = phi x86_fp80 [ [[LD_VALUE]], %{{.+}} ], [ [[LD_VALUE:%.+]], %[[ATOMIC_OP]] ] @@ -55,7 +55,7 @@ long double testinc(_Atomic long double *addr) { // CHECK32: [[EXPECTED:%.+]] = bitcast x86_fp80* [[OLD_VALUE_ADDR]] to i8* // CHECK32: [[DESIRED:%.+]] = bitcast x86_fp80* [[DESIRED_VALUE_ADDR]] to i8* // CHECK32: [[FAIL_SUCCESS:%.+]] = call zeroext i1 @__atomic_compare_exchange(i32 12, i8* [[OBJ]], i8* [[EXPECTED]], i8* [[DESIRED]], i32 5, i32 5) - // CHECK32: [[LD_VALUE:%.+]] = load x86_fp80* [[OLD_VALUE_ADDR]], align 4 + // CHECK32: [[LD_VALUE:%.+]] = load x86_fp80, x86_fp80* [[OLD_VALUE_ADDR]], align 4 // CHECK32: br i1 [[FAIL_SUCCESS]], label %[[ATOMIC_CONT:.+]], label %[[ATOMIC_OP]] // CHECK32: [[ATOMIC_CONT]] // CHECK32: ret x86_fp80 [[INC_VALUE]] @@ -66,12 +66,12 @@ long double testinc(_Atomic long double *addr) { long double testdec(_Atomic long double *addr) { // CHECK-LABEL: @testdec // CHECK: store x86_fp80* %{{.+}}, x86_fp80** [[ADDR_ADDR:%.+]], align 8 - // CHECK: [[ADDR:%.+]] = load x86_fp80** [[ADDR_ADDR]], align 8 + // CHECK: [[ADDR:%.+]] = load x86_fp80*, x86_fp80** [[ADDR_ADDR]], align 8 // CHECK: [[INT_ADDR:%.+]] = bitcast x86_fp80* [[ADDR]] to i128* - // CHECK: [[INT_VALUE:%.+]] = load atomic i128* [[INT_ADDR]] seq_cst, align 16 + // CHECK: [[INT_VALUE:%.+]] = load atomic i128, i128* [[INT_ADDR]] seq_cst, align 16 // CHECK: [[INT_LOAD_ADDR:%.+]] = bitcast x86_fp80* [[LD_ADDR:%.+]] to i128* // CHECK: store i128 [[INT_VALUE]], i128* [[INT_LOAD_ADDR]], align 16 - // CHECK: [[ORIG_LD_VALUE:%.+]] = load x86_fp80* [[LD_ADDR]], align 16 + // CHECK: [[ORIG_LD_VALUE:%.+]] = load x86_fp80, x86_fp80* [[LD_ADDR]], align 16 // CHECK: br label %[[ATOMIC_OP:.+]] // CHECK: [[ATOMIC_OP]] // CHECK: [[OLD_VALUE:%.+]] = phi x86_fp80 [ [[ORIG_LD_VALUE]], %{{.+}} ], [ [[LD_VALUE:%.+]], %[[ATOMIC_OP]] ] @@ -80,29 +80,29 @@ long double testdec(_Atomic long double *addr) { // CHECK: call void @llvm.memset.p0i8.i64(i8* [[OLD_VALUE_VOID_ADDR]], i8 0, i64 16, i32 16, i1 false) // CHECK: store x86_fp80 [[OLD_VALUE]], x86_fp80* [[OLD_VALUE_ADDR]], align 16 // CHECK: [[OLD_INT_ADDR:%.+]] = bitcast x86_fp80* [[OLD_VALUE_ADDR]] to i128* - // CHECK: [[OLD_INT:%.+]] = load i128* [[OLD_INT_ADDR]], align 16 + // CHECK: [[OLD_INT:%.+]] = load i128, i128* [[OLD_INT_ADDR]], align 16 // CHECK: [[NEW_VALUE_VOID_ADDR:%.+]] = bitcast x86_fp80* [[NEW_VALUE_ADDR:%.+]] to i8* // CHECK: call void @llvm.memset.p0i8.i64(i8* [[NEW_VALUE_VOID_ADDR]], i8 0, i64 16, i32 16, i1 false) // CHECK: store x86_fp80 [[DEC_VALUE]], x86_fp80* [[NEW_VALUE_ADDR]], align 16 // CHECK: [[NEW_INT_ADDR:%.+]] = bitcast x86_fp80* [[NEW_VALUE_ADDR]] to i128* - // CHECK: [[NEW_INT:%.+]] = load i128* [[NEW_INT_ADDR]], align 16 + // CHECK: [[NEW_INT:%.+]] = load i128, i128* [[NEW_INT_ADDR]], align 16 // CHECK: [[OBJ_INT_ADDR:%.+]] = bitcast x86_fp80* [[ADDR]] to i128* // CHECK: [[RES:%.+]] = cmpxchg i128* [[OBJ_INT_ADDR]], i128 [[OLD_INT]], i128 [[NEW_INT]] seq_cst seq_cst // CHECK: [[OLD_VALUE:%.+]] = extractvalue { i128, i1 } [[RES]], 0 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i128, i1 } [[RES]], 1 // CHECK: [[OLD_VALUE_RES_INT_PTR:%.+]] = bitcast x86_fp80* [[OLD_VALUE_RES_PTR:%.+]] to i128* // CHECK: store i128 [[OLD_VALUE]], i128* [[OLD_VALUE_RES_INT_PTR]], align 16 - // CHECK: [[LD_VALUE]] = load x86_fp80* [[OLD_VALUE_RES_PTR]], align 16 + // CHECK: [[LD_VALUE]] = load x86_fp80, x86_fp80* [[OLD_VALUE_RES_PTR]], align 16 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[ATOMIC_CONT:.+]], label %[[ATOMIC_OP]] // CHECK: [[ATOMIC_CONT]] // CHECK: ret x86_fp80 [[ORIG_LD_VALUE]] // CHECK32-LABEL: @testdec // CHECK32: store x86_fp80* %{{.+}}, x86_fp80** [[ADDR_ADDR:%.+]], align 4 - // CHECK32: [[ADDR:%.+]] = load x86_fp80** [[ADDR_ADDR]], align 4 + // CHECK32: [[ADDR:%.+]] = load x86_fp80*, x86_fp80** [[ADDR_ADDR]], align 4 // CHECK32: [[VOID_PTR:%.+]] = bitcast x86_fp80* [[ADDR]] to i8* // CHECK32: [[TEMP_LD_PTR:%.+]] = bitcast x86_fp80* [[TEMP_LD_ADDR:%.+]] to i8* // CHECK32: call void @__atomic_load(i32 12, i8* [[VOID_PTR]], i8* [[TEMP_LD_PTR]], i32 5) - // CHECK32: [[ORIG_LD_VALUE:%.+]] = load x86_fp80* [[TEMP_LD_ADDR]], align 4 + // CHECK32: [[ORIG_LD_VALUE:%.+]] = load x86_fp80, x86_fp80* [[TEMP_LD_ADDR]], align 4 // CHECK32: br label %[[ATOMIC_OP:.+]] // CHECK32: [[ATOMIC_OP]] // CHECK32: [[OLD_VALUE:%.+]] = phi x86_fp80 [ [[ORIG_LD_VALUE]], %{{.+}} ], [ [[LD_VALUE:%.+]], %[[ATOMIC_OP]] ] @@ -117,7 +117,7 @@ long double testdec(_Atomic long double *addr) { // CHECK32: [[EXPECTED:%.+]] = bitcast x86_fp80* [[OLD_VALUE_ADDR]] to i8* // CHECK32: [[DESIRED:%.+]] = bitcast x86_fp80* [[DESIRED_VALUE_ADDR]] to i8* // CHECK32: [[FAIL_SUCCESS:%.+]] = call zeroext i1 @__atomic_compare_exchange(i32 12, i8* [[OBJ]], i8* [[EXPECTED]], i8* [[DESIRED]], i32 5, i32 5) - // CHECK32: [[LD_VALUE]] = load x86_fp80* [[OLD_VALUE_ADDR]], align 4 + // CHECK32: [[LD_VALUE]] = load x86_fp80, x86_fp80* [[OLD_VALUE_ADDR]], align 4 // CHECK32: br i1 [[FAIL_SUCCESS]], label %[[ATOMIC_CONT:.+]], label %[[ATOMIC_OP]] // CHECK32: [[ATOMIC_CONT]] // CHECK32: ret x86_fp80 [[ORIG_LD_VALUE]] @@ -129,12 +129,12 @@ long double testcompassign(_Atomic long double *addr) { *addr -= 25; // CHECK-LABEL: @testcompassign // CHECK: store x86_fp80* %{{.+}}, x86_fp80** [[ADDR_ADDR:%.+]], align 8 - // CHECK: [[ADDR:%.+]] = load x86_fp80** [[ADDR_ADDR]], align 8 + // CHECK: [[ADDR:%.+]] = load x86_fp80*, x86_fp80** [[ADDR_ADDR]], align 8 // CHECK: [[INT_ADDR:%.+]] = bitcast x86_fp80* [[ADDR]] to i128* - // CHECK: [[INT_VALUE:%.+]] = load atomic i128* [[INT_ADDR]] seq_cst, align 16 + // CHECK: [[INT_VALUE:%.+]] = load atomic i128, i128* [[INT_ADDR]] seq_cst, align 16 // CHECK: [[INT_LOAD_ADDR:%.+]] = bitcast x86_fp80* [[LD_ADDR:%.+]] to i128* // CHECK: store i128 [[INT_VALUE]], i128* [[INT_LOAD_ADDR]], align 16 - // CHECK: [[LD_VALUE:%.+]] = load x86_fp80* [[LD_ADDR]], align 16 + // CHECK: [[LD_VALUE:%.+]] = load x86_fp80, x86_fp80* [[LD_ADDR]], align 16 // CHECK: br label %[[ATOMIC_OP:.+]] // CHECK: [[ATOMIC_OP]] // CHECK: [[OLD_VALUE:%.+]] = phi x86_fp80 [ [[LD_VALUE]], %{{.+}} ], [ [[LD_VALUE:%.+]], %[[ATOMIC_OP]] ] @@ -143,35 +143,35 @@ long double testcompassign(_Atomic long double *addr) { // CHECK: call void @llvm.memset.p0i8.i64(i8* [[OLD_VALUE_VOID_ADDR]], i8 0, i64 16, i32 16, i1 false) // CHECK: store x86_fp80 [[OLD_VALUE]], x86_fp80* [[OLD_VALUE_ADDR]], align 16 // CHECK: [[OLD_INT_ADDR:%.+]] = bitcast x86_fp80* [[OLD_VALUE_ADDR]] to i128* - // CHECK: [[OLD_INT:%.+]] = load i128* [[OLD_INT_ADDR]], align 16 + // CHECK: [[OLD_INT:%.+]] = load i128, i128* [[OLD_INT_ADDR]], align 16 // CHECK: [[NEW_VALUE_VOID_ADDR:%.+]] = bitcast x86_fp80* [[NEW_VALUE_ADDR:%.+]] to i8* // CHECK: call void @llvm.memset.p0i8.i64(i8* [[NEW_VALUE_VOID_ADDR]], i8 0, i64 16, i32 16, i1 false) // CHECK: store x86_fp80 [[SUB_VALUE]], x86_fp80* [[NEW_VALUE_ADDR]], align 16 // CHECK: [[NEW_INT_ADDR:%.+]] = bitcast x86_fp80* [[NEW_VALUE_ADDR]] to i128* - // CHECK: [[NEW_INT:%.+]] = load i128* [[NEW_INT_ADDR]], align 16 + // CHECK: [[NEW_INT:%.+]] = load i128, i128* [[NEW_INT_ADDR]], align 16 // CHECK: [[OBJ_INT_ADDR:%.+]] = bitcast x86_fp80* [[ADDR]] to i128* // CHECK: [[RES:%.+]] = cmpxchg i128* [[OBJ_INT_ADDR]], i128 [[OLD_INT]], i128 [[NEW_INT]] seq_cst seq_cst // CHECK: [[OLD_VALUE:%.+]] = extractvalue { i128, i1 } [[RES]], 0 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i128, i1 } [[RES]], 1 // CHECK: [[OLD_VALUE_RES_INT_PTR:%.+]] = bitcast x86_fp80* [[OLD_VALUE_RES_PTR:%.+]] to i128* // CHECK: store i128 [[OLD_VALUE]], i128* [[OLD_VALUE_RES_INT_PTR]], align 16 - // CHECK: [[LD_VALUE]] = load x86_fp80* [[OLD_VALUE_RES_PTR]], align 16 + // CHECK: [[LD_VALUE]] = load x86_fp80, x86_fp80* [[OLD_VALUE_RES_PTR]], align 16 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[ATOMIC_CONT:.+]], label %[[ATOMIC_OP]] // CHECK: [[ATOMIC_CONT]] - // CHECK: [[ADDR:%.+]] = load x86_fp80** %{{.+}}, align 8 + // CHECK: [[ADDR:%.+]] = load x86_fp80*, x86_fp80** %{{.+}}, align 8 // CHECK: [[ADDR_INT:%.+]] = bitcast x86_fp80* [[ADDR]] to i128* - // CHECK: [[INT_VAL:%.+]] = load atomic i128* [[ADDR_INT]] seq_cst, align 16 + // CHECK: [[INT_VAL:%.+]] = load atomic i128, i128* [[ADDR_INT]] seq_cst, align 16 // CHECK: [[INT_LD_TEMP:%.+]] = bitcast x86_fp80* [[LD_TEMP:%.+]] to i128* // CHECK: store i128 [[INT_VAL]], i128* [[INT_LD_TEMP:%.+]], align 16 - // CHECK: [[RET_VAL:%.+]] = load x86_fp80* [[LD_TEMP]], align 16 + // CHECK: [[RET_VAL:%.+]] = load x86_fp80, x86_fp80* [[LD_TEMP]], align 16 // CHECK: ret x86_fp80 [[RET_VAL]] // CHECK32-LABEL: @testcompassign // CHECK32: store x86_fp80* %{{.+}}, x86_fp80** [[ADDR_ADDR:%.+]], align 4 - // CHECK32: [[ADDR:%.+]] = load x86_fp80** [[ADDR_ADDR]], align 4 + // CHECK32: [[ADDR:%.+]] = load x86_fp80*, x86_fp80** [[ADDR_ADDR]], align 4 // CHECK32: [[VOID_PTR:%.+]] = bitcast x86_fp80* [[ADDR]] to i8* // CHECK32: [[TEMP_LD_PTR:%.+]] = bitcast x86_fp80* [[TEMP_LD_ADDR:%.+]] to i8* // CHECK32: call void @__atomic_load(i32 12, i8* [[VOID_PTR]], i8* [[TEMP_LD_PTR]], i32 5) - // CHECK32: [[LD_VALUE:%.+]] = load x86_fp80* [[TEMP_LD_ADDR]], align 4 + // CHECK32: [[LD_VALUE:%.+]] = load x86_fp80, x86_fp80* [[TEMP_LD_ADDR]], align 4 // CHECK32: br label %[[ATOMIC_OP:.+]] // CHECK32: [[ATOMIC_OP]] // CHECK32: [[OLD_VALUE:%.+]] = phi x86_fp80 [ [[LD_VALUE]], %{{.+}} ], [ [[LD_VALUE:%.+]], %[[ATOMIC_OP]] ] @@ -186,14 +186,14 @@ long double testcompassign(_Atomic long double *addr) { // CHECK32: [[EXPECTED:%.+]] = bitcast x86_fp80* [[OLD_VALUE_ADDR]] to i8* // CHECK32: [[DESIRED:%.+]] = bitcast x86_fp80* [[DESIRED_VALUE_ADDR]] to i8* // CHECK32: [[FAIL_SUCCESS:%.+]] = call zeroext i1 @__atomic_compare_exchange(i32 12, i8* [[OBJ]], i8* [[EXPECTED]], i8* [[DESIRED]], i32 5, i32 5) - // CHECK32: [[LD_VALUE]] = load x86_fp80* [[OLD_VALUE_ADDR]], align 4 + // CHECK32: [[LD_VALUE]] = load x86_fp80, x86_fp80* [[OLD_VALUE_ADDR]], align 4 // CHECK32: br i1 [[FAIL_SUCCESS]], label %[[ATOMIC_CONT:.+]], label %[[ATOMIC_OP]] // CHECK32: [[ATOMIC_CONT]] - // CHECK32: [[ADDR:%.+]] = load x86_fp80** %{{.+}}, align 4 + // CHECK32: [[ADDR:%.+]] = load x86_fp80*, x86_fp80** %{{.+}}, align 4 // CHECK32: [[VOID_ADDR:%.+]] = bitcast x86_fp80* [[ADDR]] to i8* // CHECK32: [[VOID_GET_ADDR:%.+]] = bitcast x86_fp80* [[GET_ADDR:%.+]] to i8* // CHECK32: call void @__atomic_load(i32 12, i8* [[VOID_ADDR]], i8* [[VOID_GET_ADDR]], i32 5) - // CHECK32: [[RET_VAL:%.+]] = load x86_fp80* [[GET_ADDR]], align 4 + // CHECK32: [[RET_VAL:%.+]] = load x86_fp80, x86_fp80* [[GET_ADDR]], align 4 // CHECK32: ret x86_fp80 [[RET_VAL]] return *addr; } @@ -201,17 +201,17 @@ long double testcompassign(_Atomic long double *addr) { long double testassign(_Atomic long double *addr) { // CHECK-LABEL: @testassign // CHECK: store x86_fp80* %{{.+}}, x86_fp80** [[ADDR_ADDR:%.+]], align 8 - // CHECK: [[ADDR:%.+]] = load x86_fp80** [[ADDR_ADDR]], align 8 + // CHECK: [[ADDR:%.+]] = load x86_fp80*, x86_fp80** [[ADDR_ADDR]], align 8 // CHECK: [[STORE_TEMP_VOID_PTR:%.+]] = bitcast x86_fp80* [[STORE_TEMP_PTR:%.+]] to i8* // CHECK: call void @llvm.memset.p0i8.i64(i8* [[STORE_TEMP_VOID_PTR]], i8 0, i64 16, i32 16, i1 false) // CHECK: store x86_fp80 {{.+}}, x86_fp80* [[STORE_TEMP_PTR]], align 16 // CHECK: [[STORE_TEMP_INT_PTR:%.+]] = bitcast x86_fp80* [[STORE_TEMP_PTR]] to i128* - // CHECK: [[STORE_TEMP_INT:%.+]] = load i128* [[STORE_TEMP_INT_PTR]], align 16 + // CHECK: [[STORE_TEMP_INT:%.+]] = load i128, i128* [[STORE_TEMP_INT_PTR]], align 16 // CHECK: [[ADDR_INT:%.+]] = bitcast x86_fp80* [[ADDR]] to i128* // CHECK: store atomic i128 [[STORE_TEMP_INT]], i128* [[ADDR_INT]] seq_cst, align 16 // CHECK32-LABEL: @testassign // CHECK32: store x86_fp80* %{{.+}}, x86_fp80** [[ADDR_ADDR:%.+]], align 4 - // CHECK32: [[ADDR:%.+]] = load x86_fp80** [[ADDR_ADDR]], align 4 + // CHECK32: [[ADDR:%.+]] = load x86_fp80*, x86_fp80** [[ADDR_ADDR]], align 4 // CHECK32: [[STORE_TEMP_VOID_PTR:%.+]] = bitcast x86_fp80* [[STORE_TEMP_PTR:%.+]] to i8* // CHECK32: call void @llvm.memset.p0i8.i64(i8* [[STORE_TEMP_VOID_PTR]], i8 0, i64 12, i32 4, i1 false) // CHECK32: store x86_fp80 {{.+}}, x86_fp80* [[STORE_TEMP_PTR]], align 4 @@ -219,18 +219,18 @@ long double testassign(_Atomic long double *addr) { // CHECK32: [[STORE_TEMP_VOID_PTR:%.+]] = bitcast x86_fp80* [[STORE_TEMP_PTR]] to i8* // CHECK32: call void @__atomic_store(i32 12, i8* [[ADDR_VOID]], i8* [[STORE_TEMP_VOID_PTR]], i32 5) *addr = 115; - // CHECK: [[ADDR:%.+]] = load x86_fp80** %{{.+}}, align 8 + // CHECK: [[ADDR:%.+]] = load x86_fp80*, x86_fp80** %{{.+}}, align 8 // CHECK: [[ADDR_INT:%.+]] = bitcast x86_fp80* [[ADDR]] to i128* - // CHECK: [[INT_VAL:%.+]] = load atomic i128* [[ADDR_INT]] seq_cst, align 16 + // CHECK: [[INT_VAL:%.+]] = load atomic i128, i128* [[ADDR_INT]] seq_cst, align 16 // CHECK: [[INT_LD_TEMP:%.+]] = bitcast x86_fp80* [[LD_TEMP:%.+]] to i128* // CHECK: store i128 [[INT_VAL]], i128* [[INT_LD_TEMP:%.+]], align 16 - // CHECK: [[RET_VAL:%.+]] = load x86_fp80* [[LD_TEMP]], align 16 + // CHECK: [[RET_VAL:%.+]] = load x86_fp80, x86_fp80* [[LD_TEMP]], align 16 // CHECK: ret x86_fp80 [[RET_VAL]] - // CHECK32: [[ADDR:%.+]] = load x86_fp80** %{{.+}}, align 4 + // CHECK32: [[ADDR:%.+]] = load x86_fp80*, x86_fp80** %{{.+}}, align 4 // CHECK32: [[VOID_ADDR:%.+]] = bitcast x86_fp80* [[ADDR]] to i8* // CHECK32: [[VOID_LD_TEMP:%.+]] = bitcast x86_fp80* [[LD_TEMP:%.+]] to i8* // CHECK32: call void @__atomic_load(i32 12, i8* [[VOID_ADDR]], i8* [[VOID_LD_TEMP]], i32 5) - // CHECK32: [[RET_VAL:%.+]] = load x86_fp80* [[LD_TEMP]], align 4 + // CHECK32: [[RET_VAL:%.+]] = load x86_fp80, x86_fp80* [[LD_TEMP]], align 4 // CHECK32: ret x86_fp80 [[RET_VAL]] return *addr; @@ -239,12 +239,12 @@ long double testassign(_Atomic long double *addr) { long double test_volatile_inc(volatile _Atomic long double *addr) { // CHECK-LABEL: @test_volatile_inc // CHECK: store x86_fp80* %{{.+}}, x86_fp80** [[ADDR_ADDR:%.+]], align 8 - // CHECK: [[ADDR:%.+]] = load x86_fp80** [[ADDR_ADDR]], align 8 + // CHECK: [[ADDR:%.+]] = load x86_fp80*, x86_fp80** [[ADDR_ADDR]], align 8 // CHECK: [[INT_ADDR:%.+]] = bitcast x86_fp80* [[ADDR]] to i128* - // CHECK: [[INT_VALUE:%.+]] = load atomic volatile i128* [[INT_ADDR]] seq_cst, align 16 + // CHECK: [[INT_VALUE:%.+]] = load atomic volatile i128, i128* [[INT_ADDR]] seq_cst, align 16 // CHECK: [[INT_LOAD_ADDR:%.+]] = bitcast x86_fp80* [[LD_ADDR:%.+]] to i128* // CHECK: store i128 [[INT_VALUE]], i128* [[INT_LOAD_ADDR]], align 16 - // CHECK: [[LD_VALUE:%.+]] = load x86_fp80* [[LD_ADDR]], align 16 + // CHECK: [[LD_VALUE:%.+]] = load x86_fp80, x86_fp80* [[LD_ADDR]], align 16 // CHECK: br label %[[ATOMIC_OP:.+]] // CHECK: [[ATOMIC_OP]] // CHECK: [[OLD_VALUE:%.+]] = phi x86_fp80 [ [[LD_VALUE]], %{{.+}} ], [ [[LD_VALUE:%.+]], %[[ATOMIC_OP]] ] @@ -253,29 +253,29 @@ long double test_volatile_inc(volatile _Atomic long double *addr) { // CHECK: call void @llvm.memset.p0i8.i64(i8* [[OLD_VALUE_VOID_ADDR]], i8 0, i64 16, i32 16, i1 false) // CHECK: store x86_fp80 [[OLD_VALUE]], x86_fp80* [[OLD_VALUE_ADDR]], align 16 // CHECK: [[OLD_INT_ADDR:%.+]] = bitcast x86_fp80* [[OLD_VALUE_ADDR]] to i128* - // CHECK: [[OLD_INT:%.+]] = load i128* [[OLD_INT_ADDR]], align 16 + // CHECK: [[OLD_INT:%.+]] = load i128, i128* [[OLD_INT_ADDR]], align 16 // CHECK: [[NEW_VALUE_VOID_ADDR:%.+]] = bitcast x86_fp80* [[NEW_VALUE_ADDR:%.+]] to i8* // CHECK: call void @llvm.memset.p0i8.i64(i8* [[NEW_VALUE_VOID_ADDR]], i8 0, i64 16, i32 16, i1 false) // CHECK: store x86_fp80 [[INC_VALUE]], x86_fp80* [[NEW_VALUE_ADDR]], align 16 // CHECK: [[NEW_INT_ADDR:%.+]] = bitcast x86_fp80* [[NEW_VALUE_ADDR]] to i128* - // CHECK: [[NEW_INT:%.+]] = load i128* [[NEW_INT_ADDR]], align 16 + // CHECK: [[NEW_INT:%.+]] = load i128, i128* [[NEW_INT_ADDR]], align 16 // CHECK: [[OBJ_INT_ADDR:%.+]] = bitcast x86_fp80* [[ADDR]] to i128* // CHECK: [[RES:%.+]] = cmpxchg volatile i128* [[OBJ_INT_ADDR]], i128 [[OLD_INT]], i128 [[NEW_INT]] seq_cst seq_cst // CHECK: [[OLD_VALUE:%.+]] = extractvalue { i128, i1 } [[RES]], 0 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i128, i1 } [[RES]], 1 // CHECK: [[OLD_VALUE_RES_INT_PTR:%.+]] = bitcast x86_fp80* [[OLD_VALUE_RES_PTR:%.+]] to i128* // CHECK: store i128 [[OLD_VALUE]], i128* [[OLD_VALUE_RES_INT_PTR]], align 16 - // CHECK: [[LD_VALUE]] = load x86_fp80* [[OLD_VALUE_RES_PTR]], align 16 + // CHECK: [[LD_VALUE]] = load x86_fp80, x86_fp80* [[OLD_VALUE_RES_PTR]], align 16 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[ATOMIC_CONT:.+]], label %[[ATOMIC_OP]] // CHECK: [[ATOMIC_CONT]] // CHECK: ret x86_fp80 [[INC_VALUE]] // CHECK32-LABEL: @test_volatile_inc // CHECK32: store x86_fp80* %{{.+}}, x86_fp80** [[ADDR_ADDR:%.+]], align 4 - // CHECK32: [[ADDR:%.+]] = load x86_fp80** [[ADDR_ADDR]], align 4 + // CHECK32: [[ADDR:%.+]] = load x86_fp80*, x86_fp80** [[ADDR_ADDR]], align 4 // CHECK32: [[VOID_PTR:%.+]] = bitcast x86_fp80* [[ADDR]] to i8* // CHECK32: [[TEMP_LD_PTR:%.+]] = bitcast x86_fp80* [[TEMP_LD_ADDR:%.+]] to i8* // CHECK32: call void @__atomic_load(i32 12, i8* [[VOID_PTR]], i8* [[TEMP_LD_PTR]], i32 5) - // CHECK32: [[LD_VALUE:%.+]] = load x86_fp80* [[TEMP_LD_ADDR]], align 4 + // CHECK32: [[LD_VALUE:%.+]] = load x86_fp80, x86_fp80* [[TEMP_LD_ADDR]], align 4 // CHECK32: br label %[[ATOMIC_OP:.+]] // CHECK32: [[ATOMIC_OP]] // CHECK32: [[OLD_VALUE:%.+]] = phi x86_fp80 [ [[LD_VALUE]], %{{.+}} ], [ [[LD_VALUE:%.+]], %[[ATOMIC_OP]] ] @@ -290,7 +290,7 @@ long double test_volatile_inc(volatile _Atomic long double *addr) { // CHECK32: [[EXPECTED:%.+]] = bitcast x86_fp80* [[OLD_VALUE_ADDR]] to i8* // CHECK32: [[DESIRED:%.+]] = bitcast x86_fp80* [[DESIRED_VALUE_ADDR]] to i8* // CHECK32: [[FAIL_SUCCESS:%.+]] = call zeroext i1 @__atomic_compare_exchange(i32 12, i8* [[OBJ]], i8* [[EXPECTED]], i8* [[DESIRED]], i32 5, i32 5) - // CHECK32: [[LD_VALUE]] = load x86_fp80* [[OLD_VALUE_ADDR]], align 4 + // CHECK32: [[LD_VALUE]] = load x86_fp80, x86_fp80* [[OLD_VALUE_ADDR]], align 4 // CHECK32: br i1 [[FAIL_SUCCESS]], label %[[ATOMIC_CONT:.+]], label %[[ATOMIC_OP]] // CHECK32: [[ATOMIC_CONT]] // CHECK32: ret x86_fp80 [[INC_VALUE]] @@ -300,12 +300,12 @@ long double test_volatile_inc(volatile _Atomic long double *addr) { long double test_volatile_dec(volatile _Atomic long double *addr) { // CHECK-LABEL: @test_volatile_dec // CHECK: store x86_fp80* %{{.+}}, x86_fp80** [[ADDR_ADDR:%.+]], align 8 - // CHECK: [[ADDR:%.+]] = load x86_fp80** [[ADDR_ADDR]], align 8 + // CHECK: [[ADDR:%.+]] = load x86_fp80*, x86_fp80** [[ADDR_ADDR]], align 8 // CHECK: [[INT_ADDR:%.+]] = bitcast x86_fp80* [[ADDR]] to i128* - // CHECK: [[INT_VALUE:%.+]] = load atomic volatile i128* [[INT_ADDR]] seq_cst, align 16 + // CHECK: [[INT_VALUE:%.+]] = load atomic volatile i128, i128* [[INT_ADDR]] seq_cst, align 16 // CHECK: [[INT_LOAD_ADDR:%.+]] = bitcast x86_fp80* [[LD_ADDR:%.+]] to i128* // CHECK: store i128 [[INT_VALUE]], i128* [[INT_LOAD_ADDR]], align 16 - // CHECK: [[ORIG_LD_VALUE:%.+]] = load x86_fp80* [[LD_ADDR]], align 16 + // CHECK: [[ORIG_LD_VALUE:%.+]] = load x86_fp80, x86_fp80* [[LD_ADDR]], align 16 // CHECK: br label %[[ATOMIC_OP:.+]] // CHECK: [[ATOMIC_OP]] // CHECK: [[OLD_VALUE:%.+]] = phi x86_fp80 [ [[ORIG_LD_VALUE]], %{{.+}} ], [ [[LD_VALUE:%.+]], %[[ATOMIC_OP]] ] @@ -314,29 +314,29 @@ long double test_volatile_dec(volatile _Atomic long double *addr) { // CHECK: call void @llvm.memset.p0i8.i64(i8* [[OLD_VALUE_VOID_ADDR]], i8 0, i64 16, i32 16, i1 false) // CHECK: store x86_fp80 [[OLD_VALUE]], x86_fp80* [[OLD_VALUE_ADDR]], align 16 // CHECK: [[OLD_INT_ADDR:%.+]] = bitcast x86_fp80* [[OLD_VALUE_ADDR]] to i128* - // CHECK: [[OLD_INT:%.+]] = load i128* [[OLD_INT_ADDR]], align 16 + // CHECK: [[OLD_INT:%.+]] = load i128, i128* [[OLD_INT_ADDR]], align 16 // CHECK: [[NEW_VALUE_VOID_ADDR:%.+]] = bitcast x86_fp80* [[NEW_VALUE_ADDR:%.+]] to i8* // CHECK: call void @llvm.memset.p0i8.i64(i8* [[NEW_VALUE_VOID_ADDR]], i8 0, i64 16, i32 16, i1 false) // CHECK: store x86_fp80 [[DEC_VALUE]], x86_fp80* [[NEW_VALUE_ADDR]], align 16 // CHECK: [[NEW_INT_ADDR:%.+]] = bitcast x86_fp80* [[NEW_VALUE_ADDR]] to i128* - // CHECK: [[NEW_INT:%.+]] = load i128* [[NEW_INT_ADDR]], align 16 + // CHECK: [[NEW_INT:%.+]] = load i128, i128* [[NEW_INT_ADDR]], align 16 // CHECK: [[OBJ_INT_ADDR:%.+]] = bitcast x86_fp80* [[ADDR]] to i128* // CHECK: [[RES:%.+]] = cmpxchg volatile i128* [[OBJ_INT_ADDR]], i128 [[OLD_INT]], i128 [[NEW_INT]] seq_cst seq_cst // CHECK: [[OLD_VALUE:%.+]] = extractvalue { i128, i1 } [[RES]], 0 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i128, i1 } [[RES]], 1 // CHECK: [[OLD_VALUE_RES_INT_PTR:%.+]] = bitcast x86_fp80* [[OLD_VALUE_RES_PTR:%.+]] to i128* // CHECK: store i128 [[OLD_VALUE]], i128* [[OLD_VALUE_RES_INT_PTR]], align 16 - // CHECK: [[LD_VALUE]] = load x86_fp80* [[OLD_VALUE_RES_PTR]], align 16 + // CHECK: [[LD_VALUE]] = load x86_fp80, x86_fp80* [[OLD_VALUE_RES_PTR]], align 16 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[ATOMIC_CONT:.+]], label %[[ATOMIC_OP]] // CHECK: [[ATOMIC_CONT]] // CHECK: ret x86_fp80 [[ORIG_LD_VALUE]] // CHECK32-LABEL: @test_volatile_dec // CHECK32: store x86_fp80* %{{.+}}, x86_fp80** [[ADDR_ADDR:%.+]], align 4 - // CHECK32: [[ADDR:%.+]] = load x86_fp80** [[ADDR_ADDR]], align 4 + // CHECK32: [[ADDR:%.+]] = load x86_fp80*, x86_fp80** [[ADDR_ADDR]], align 4 // CHECK32: [[VOID_PTR:%.+]] = bitcast x86_fp80* [[ADDR]] to i8* // CHECK32: [[TEMP_LD_PTR:%.+]] = bitcast x86_fp80* [[TEMP_LD_ADDR:%.+]] to i8* // CHECK32: call void @__atomic_load(i32 12, i8* [[VOID_PTR]], i8* [[TEMP_LD_PTR]], i32 5) - // CHECK32: [[ORIG_LD_VALUE:%.+]] = load x86_fp80* [[TEMP_LD_ADDR]], align 4 + // CHECK32: [[ORIG_LD_VALUE:%.+]] = load x86_fp80, x86_fp80* [[TEMP_LD_ADDR]], align 4 // CHECK32: br label %[[ATOMIC_OP:.+]] // CHECK32: [[ATOMIC_OP]] // CHECK32: [[OLD_VALUE:%.+]] = phi x86_fp80 [ [[ORIG_LD_VALUE]], %{{.+}} ], [ [[LD_VALUE:%.+]], %[[ATOMIC_OP]] ] @@ -351,7 +351,7 @@ long double test_volatile_dec(volatile _Atomic long double *addr) { // CHECK32: [[EXPECTED:%.+]] = bitcast x86_fp80* [[OLD_VALUE_ADDR]] to i8* // CHECK32: [[DESIRED:%.+]] = bitcast x86_fp80* [[DESIRED_VALUE_ADDR]] to i8* // CHECK32: [[FAIL_SUCCESS:%.+]] = call zeroext i1 @__atomic_compare_exchange(i32 12, i8* [[OBJ]], i8* [[EXPECTED]], i8* [[DESIRED]], i32 5, i32 5) - // CHECK32: [[LD_VALUE]] = load x86_fp80* [[OLD_VALUE_ADDR]], align 4 + // CHECK32: [[LD_VALUE]] = load x86_fp80, x86_fp80* [[OLD_VALUE_ADDR]], align 4 // CHECK32: br i1 [[FAIL_SUCCESS]], label %[[ATOMIC_CONT:.+]], label %[[ATOMIC_OP]] // CHECK32: [[ATOMIC_CONT]] // CHECK32: ret x86_fp80 [[ORIG_LD_VALUE]] @@ -362,12 +362,12 @@ long double test_volatile_compassign(volatile _Atomic long double *addr) { *addr -= 25; // CHECK-LABEL: @test_volatile_compassign // CHECK: store x86_fp80* %{{.+}}, x86_fp80** [[ADDR_ADDR:%.+]], align 8 - // CHECK: [[ADDR:%.+]] = load x86_fp80** [[ADDR_ADDR]], align 8 + // CHECK: [[ADDR:%.+]] = load x86_fp80*, x86_fp80** [[ADDR_ADDR]], align 8 // CHECK: [[INT_ADDR:%.+]] = bitcast x86_fp80* [[ADDR]] to i128* - // CHECK: [[INT_VALUE:%.+]] = load atomic volatile i128* [[INT_ADDR]] seq_cst, align 16 + // CHECK: [[INT_VALUE:%.+]] = load atomic volatile i128, i128* [[INT_ADDR]] seq_cst, align 16 // CHECK: [[INT_LOAD_ADDR:%.+]] = bitcast x86_fp80* [[LD_ADDR:%.+]] to i128* // CHECK: store i128 [[INT_VALUE]], i128* [[INT_LOAD_ADDR]], align 16 - // CHECK: [[LD_VALUE:%.+]] = load x86_fp80* [[LD_ADDR]], align 16 + // CHECK: [[LD_VALUE:%.+]] = load x86_fp80, x86_fp80* [[LD_ADDR]], align 16 // CHECK: br label %[[ATOMIC_OP:.+]] // CHECK: [[ATOMIC_OP]] // CHECK: [[OLD_VALUE:%.+]] = phi x86_fp80 [ [[LD_VALUE]], %{{.+}} ], [ [[LD_VALUE:%.+]], %[[ATOMIC_OP]] ] @@ -376,34 +376,34 @@ long double test_volatile_compassign(volatile _Atomic long double *addr) { // CHECK: call void @llvm.memset.p0i8.i64(i8* [[OLD_VALUE_VOID_ADDR]], i8 0, i64 16, i32 16, i1 false) // CHECK: store x86_fp80 [[OLD_VALUE]], x86_fp80* [[OLD_VALUE_ADDR]], align 16 // CHECK: [[OLD_INT_ADDR:%.+]] = bitcast x86_fp80* [[OLD_VALUE_ADDR]] to i128* - // CHECK: [[OLD_INT:%.+]] = load i128* [[OLD_INT_ADDR]], align 16 + // CHECK: [[OLD_INT:%.+]] = load i128, i128* [[OLD_INT_ADDR]], align 16 // CHECK: [[NEW_VALUE_VOID_ADDR:%.+]] = bitcast x86_fp80* [[NEW_VALUE_ADDR:%.+]] to i8* // CHECK: call void @llvm.memset.p0i8.i64(i8* [[NEW_VALUE_VOID_ADDR]], i8 0, i64 16, i32 16, i1 false) // CHECK: store x86_fp80 [[SUB_VALUE]], x86_fp80* [[NEW_VALUE_ADDR]], align 16 // CHECK: [[NEW_INT_ADDR:%.+]] = bitcast x86_fp80* [[NEW_VALUE_ADDR]] to i128* - // CHECK: [[NEW_INT:%.+]] = load i128* [[NEW_INT_ADDR]], align 16 + // CHECK: [[NEW_INT:%.+]] = load i128, i128* [[NEW_INT_ADDR]], align 16 // CHECK: [[OBJ_INT_ADDR:%.+]] = bitcast x86_fp80* [[ADDR]] to i128* // CHECK: [[RES:%.+]] = cmpxchg volatile i128* [[OBJ_INT_ADDR]], i128 [[OLD_INT]], i128 [[NEW_INT]] seq_cst seq_cst // CHECK: [[OLD_VALUE:%.+]] = extractvalue { i128, i1 } [[RES]], 0 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i128, i1 } [[RES]], 1 // CHECK: [[OLD_VALUE_RES_INT_PTR:%.+]] = bitcast x86_fp80* [[OLD_VALUE_RES_PTR:%.+]] to i128* // CHECK: store i128 [[OLD_VALUE]], i128* [[OLD_VALUE_RES_INT_PTR]], align 16 - // CHECK: [[LD_VALUE]] = load x86_fp80* [[OLD_VALUE_RES_PTR]], align 16 + // CHECK: [[LD_VALUE]] = load x86_fp80, x86_fp80* [[OLD_VALUE_RES_PTR]], align 16 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[ATOMIC_CONT:.+]], label %[[ATOMIC_OP]] // CHECK: [[ATOMIC_CONT]] - // CHECK: [[ADDR:%.+]] = load x86_fp80** %{{.+}}, align 8 + // CHECK: [[ADDR:%.+]] = load x86_fp80*, x86_fp80** %{{.+}}, align 8 // CHECK: [[ADDR_INT:%.+]] = bitcast x86_fp80* [[ADDR]] to i128* - // CHECK: [[INT_VAL:%.+]] = load atomic volatile i128* [[ADDR_INT]] seq_cst, align 16 + // CHECK: [[INT_VAL:%.+]] = load atomic volatile i128, i128* [[ADDR_INT]] seq_cst, align 16 // CHECK: [[INT_LD_TEMP:%.+]] = bitcast x86_fp80* [[LD_TEMP:%.+]] to i128* // CHECK: store i128 [[INT_VAL]], i128* [[INT_LD_TEMP:%.+]], align 16 - // CHECK: [[RET_VAL:%.+]] = load x86_fp80* [[LD_TEMP]], align 16 + // CHECK: [[RET_VAL:%.+]] = load x86_fp80, x86_fp80* [[LD_TEMP]], align 16 // CHECK32-LABEL: @test_volatile_compassign // CHECK32: store x86_fp80* %{{.+}}, x86_fp80** [[ADDR_ADDR:%.+]], align 4 - // CHECK32: [[ADDR:%.+]] = load x86_fp80** [[ADDR_ADDR]], align 4 + // CHECK32: [[ADDR:%.+]] = load x86_fp80*, x86_fp80** [[ADDR_ADDR]], align 4 // CHECK32: [[VOID_PTR:%.+]] = bitcast x86_fp80* [[ADDR]] to i8* // CHECK32: [[TEMP_LD_PTR:%.+]] = bitcast x86_fp80* [[TEMP_LD_ADDR:%.+]] to i8* // CHECK32: call void @__atomic_load(i32 12, i8* [[VOID_PTR]], i8* [[TEMP_LD_PTR]], i32 5) - // CHECK32: [[LD_VALUE:%.+]] = load x86_fp80* [[TEMP_LD_ADDR]], align 4 + // CHECK32: [[LD_VALUE:%.+]] = load x86_fp80, x86_fp80* [[TEMP_LD_ADDR]], align 4 // CHECK32: br label %[[ATOMIC_OP:.+]] // CHECK32: [[ATOMIC_OP]] // CHECK32: [[OLD_VALUE:%.+]] = phi x86_fp80 [ [[LD_VALUE]], %{{.+}} ], [ [[LD_VALUE:%.+]], %[[ATOMIC_OP]] ] @@ -418,14 +418,14 @@ long double test_volatile_compassign(volatile _Atomic long double *addr) { // CHECK32: [[EXPECTED:%.+]] = bitcast x86_fp80* [[OLD_VALUE_ADDR]] to i8* // CHECK32: [[DESIRED:%.+]] = bitcast x86_fp80* [[DESIRED_VALUE_ADDR]] to i8* // CHECK32: [[FAIL_SUCCESS:%.+]] = call zeroext i1 @__atomic_compare_exchange(i32 12, i8* [[OBJ]], i8* [[EXPECTED]], i8* [[DESIRED]], i32 5, i32 5) - // CHECK32: [[LD_VALUE]] = load x86_fp80* [[OLD_VALUE_ADDR]], align 4 + // CHECK32: [[LD_VALUE]] = load x86_fp80, x86_fp80* [[OLD_VALUE_ADDR]], align 4 // CHECK32: br i1 [[FAIL_SUCCESS]], label %[[ATOMIC_CONT:.+]], label %[[ATOMIC_OP]] // CHECK32: [[ATOMIC_CONT]] - // CHECK32: [[ADDR:%.+]] = load x86_fp80** %{{.+}}, align 4 + // CHECK32: [[ADDR:%.+]] = load x86_fp80*, x86_fp80** %{{.+}}, align 4 // CHECK32: [[VOID_ADDR:%.+]] = bitcast x86_fp80* [[ADDR]] to i8* // CHECK32: [[VOID_GET_ADDR:%.+]] = bitcast x86_fp80* [[GET_ADDR:%.+]] to i8* // CHECK32: call void @__atomic_load(i32 12, i8* [[VOID_ADDR]], i8* [[VOID_GET_ADDR]], i32 5) - // CHECK32: [[RET_VAL:%.+]] = load x86_fp80* [[GET_ADDR]], align 4 + // CHECK32: [[RET_VAL:%.+]] = load x86_fp80, x86_fp80* [[GET_ADDR]], align 4 // CHECK32: ret x86_fp80 [[RET_VAL]] return *addr; } @@ -433,17 +433,17 @@ long double test_volatile_compassign(volatile _Atomic long double *addr) { long double test_volatile_assign(volatile _Atomic long double *addr) { // CHECK-LABEL: @test_volatile_assign // CHECK: store x86_fp80* %{{.+}}, x86_fp80** [[ADDR_ADDR:%.+]], align 8 - // CHECK: [[ADDR:%.+]] = load x86_fp80** [[ADDR_ADDR]], align 8 + // CHECK: [[ADDR:%.+]] = load x86_fp80*, x86_fp80** [[ADDR_ADDR]], align 8 // CHECK: [[STORE_TEMP_VOID_PTR:%.+]] = bitcast x86_fp80* [[STORE_TEMP_PTR:%.+]] to i8* // CHECK: call void @llvm.memset.p0i8.i64(i8* [[STORE_TEMP_VOID_PTR]], i8 0, i64 16, i32 16, i1 false) // CHECK: store x86_fp80 {{.+}}, x86_fp80* [[STORE_TEMP_PTR]], align 16 // CHECK: [[STORE_TEMP_INT_PTR:%.+]] = bitcast x86_fp80* [[STORE_TEMP_PTR]] to i128* - // CHECK: [[STORE_TEMP_INT:%.+]] = load i128* [[STORE_TEMP_INT_PTR]], align 16 + // CHECK: [[STORE_TEMP_INT:%.+]] = load i128, i128* [[STORE_TEMP_INT_PTR]], align 16 // CHECK: [[ADDR_INT:%.+]] = bitcast x86_fp80* [[ADDR]] to i128* // CHECK: store atomic volatile i128 [[STORE_TEMP_INT]], i128* [[ADDR_INT]] seq_cst, align 16 // CHECK32-LABEL: @test_volatile_assign // CHECK32: store x86_fp80* %{{.+}}, x86_fp80** [[ADDR_ADDR:%.+]], align 4 - // CHECK32: [[ADDR:%.+]] = load x86_fp80** [[ADDR_ADDR]], align 4 + // CHECK32: [[ADDR:%.+]] = load x86_fp80*, x86_fp80** [[ADDR_ADDR]], align 4 // CHECK32: [[STORE_TEMP_VOID_PTR:%.+]] = bitcast x86_fp80* [[STORE_TEMP_PTR:%.+]] to i8* // CHECK32: call void @llvm.memset.p0i8.i64(i8* [[STORE_TEMP_VOID_PTR]], i8 0, i64 12, i32 4, i1 false) // CHECK32: store x86_fp80 {{.+}}, x86_fp80* [[STORE_TEMP_PTR]], align 4 @@ -451,18 +451,18 @@ long double test_volatile_assign(volatile _Atomic long double *addr) { // CHECK32: [[STORE_TEMP_VOID_PTR:%.+]] = bitcast x86_fp80* [[STORE_TEMP_PTR]] to i8* // CHECK32: call void @__atomic_store(i32 12, i8* [[ADDR_VOID]], i8* [[STORE_TEMP_VOID_PTR]], i32 5) *addr = 115; - // CHECK: [[ADDR:%.+]] = load x86_fp80** %{{.+}}, align 8 + // CHECK: [[ADDR:%.+]] = load x86_fp80*, x86_fp80** %{{.+}}, align 8 // CHECK: [[ADDR_INT:%.+]] = bitcast x86_fp80* [[ADDR]] to i128* - // CHECK: [[INT_VAL:%.+]] = load atomic volatile i128* [[ADDR_INT]] seq_cst, align 16 + // CHECK: [[INT_VAL:%.+]] = load atomic volatile i128, i128* [[ADDR_INT]] seq_cst, align 16 // CHECK: [[INT_LD_TEMP:%.+]] = bitcast x86_fp80* [[LD_TEMP:%.+]] to i128* // CHECK: store i128 [[INT_VAL]], i128* [[INT_LD_TEMP:%.+]], align 16 - // CHECK: [[RET_VAL:%.+]] = load x86_fp80* [[LD_TEMP]], align 16 + // CHECK: [[RET_VAL:%.+]] = load x86_fp80, x86_fp80* [[LD_TEMP]], align 16 // CHECK: ret x86_fp80 [[RET_VAL]] - // CHECK32: [[ADDR:%.+]] = load x86_fp80** %{{.+}}, align 4 + // CHECK32: [[ADDR:%.+]] = load x86_fp80*, x86_fp80** %{{.+}}, align 4 // CHECK32: [[VOID_ADDR:%.+]] = bitcast x86_fp80* [[ADDR]] to i8* // CHECK32: [[VOID_LD_TEMP:%.+]] = bitcast x86_fp80* [[LD_TEMP:%.+]] to i8* // CHECK32: call void @__atomic_load(i32 12, i8* [[VOID_ADDR]], i8* [[VOID_LD_TEMP]], i32 5) - // CHECK32: [[RET_VAL:%.+]] = load x86_fp80* [[LD_TEMP]], align 4 + // CHECK32: [[RET_VAL:%.+]] = load x86_fp80, x86_fp80* [[LD_TEMP]], align 4 // CHECK32: ret x86_fp80 [[RET_VAL]] return *addr; diff --git a/test/CodeGen/x86_64-arguments.c b/test/CodeGen/x86_64-arguments.c index f8cc765e0d..a0e30c6dea 100644 --- a/test/CodeGen/x86_64-arguments.c +++ b/test/CodeGen/x86_64-arguments.c @@ -402,8 +402,8 @@ void test49(double d, double e) { test49_helper(d, e); } // CHECK-LABEL: define void @test49( -// CHECK: [[T0:%.*]] = load double* -// CHECK-NEXT: [[T1:%.*]] = load double* +// CHECK: [[T0:%.*]] = load double, double* +// CHECK-NEXT: [[T1:%.*]] = load double, double* // CHECK-NEXT: call void (double, ...)* @test49_helper(double [[T0]], double [[T1]]) void test50_helper(); @@ -411,8 +411,8 @@ void test50(double d, double e) { test50_helper(d, e); } // CHECK-LABEL: define void @test50( -// CHECK: [[T0:%.*]] = load double* -// CHECK-NEXT: [[T1:%.*]] = load double* +// CHECK: [[T0:%.*]] = load double, double* +// CHECK-NEXT: [[T1:%.*]] = load double, double* // CHECK-NEXT: call void (double, double, ...)* bitcast (void (...)* @test50_helper to void (double, double, ...)*)(double [[T0]], double [[T1]]) struct test51_s { __uint128_t intval; }; @@ -424,7 +424,7 @@ void test51(struct test51_s *s, __builtin_va_list argList) { // CHECK: [[TMP_ADDR:%.*]] = alloca [[STRUCT_TEST51:%.*]], align 16 // CHECK: br i1 // CHECK: [[REG_SAVE_AREA_PTR:%.*]] = getelementptr inbounds {{.*}}, i32 0, i32 3 -// CHECK-NEXT: [[REG_SAVE_AREA:%.*]] = load i8** [[REG_SAVE_AREA_PTR]] +// CHECK-NEXT: [[REG_SAVE_AREA:%.*]] = load i8*, i8** [[REG_SAVE_AREA_PTR]] // CHECK-NEXT: [[VALUE_ADDR:%.*]] = getelementptr i8, i8* [[REG_SAVE_AREA]], i32 {{.*}} // CHECK-NEXT: [[CASTED_VALUE_ADDR:%.*]] = bitcast i8* [[VALUE_ADDR]] to [[STRUCT_TEST51]] // CHECK-NEXT: [[CASTED_TMP_ADDR:%.*]] = bitcast [[STRUCT_TEST51]]* [[TMP_ADDR]] to i8* diff --git a/test/CodeGen/xcore-abi.c b/test/CodeGen/xcore-abi.c index f1e33dc05e..23fb441469 100644 --- a/test/CodeGen/xcore-abi.c +++ b/test/CodeGen/xcore-abi.c @@ -31,51 +31,51 @@ void testva (int n, ...) { char* v1 = va_arg (ap, char*); f(v1); - // CHECK: [[I:%[a-z0-9]+]] = load i8** [[AP]] + // CHECK: [[I:%[a-z0-9]+]] = load i8*, i8** [[AP]] // CHECK: [[P:%[a-z0-9]+]] = bitcast i8* [[I]] to i8** // CHECK: [[IN:%[a-z0-9]+]] = getelementptr i8, i8* [[I]], i32 4 // CHECK: store i8* [[IN]], i8** [[AP]] - // CHECK: [[V1:%[a-z0-9]+]] = load i8** [[P]] + // CHECK: [[V1:%[a-z0-9]+]] = load i8*, i8** [[P]] // CHECK: store i8* [[V1]], i8** [[V:%[a-z0-9]+]], align 4 - // CHECK: [[V2:%[a-z0-9]+]] = load i8** [[V]], align 4 + // CHECK: [[V2:%[a-z0-9]+]] = load i8*, i8** [[V]], align 4 // CHECK: call void @f(i8* [[V2]]) char v2 = va_arg (ap, char); // expected-warning{{second argument to 'va_arg' is of promotable type 'char'}} f(&v2); - // CHECK: [[I:%[a-z0-9]+]] = load i8** [[AP]] + // CHECK: [[I:%[a-z0-9]+]] = load i8*, i8** [[AP]] // CHECK: [[IN:%[a-z0-9]+]] = getelementptr i8, i8* [[I]], i32 4 // CHECK: store i8* [[IN]], i8** [[AP]] - // CHECK: [[V1:%[a-z0-9]+]] = load i8* [[I]] + // CHECK: [[V1:%[a-z0-9]+]] = load i8, i8* [[I]] // CHECK: store i8 [[V1]], i8* [[V:%[a-z0-9]+]], align 1 // CHECK: call void @f(i8* [[V]]) int v3 = va_arg (ap, int); f(&v3); - // CHECK: [[I:%[a-z0-9]+]] = load i8** [[AP]] + // CHECK: [[I:%[a-z0-9]+]] = load i8*, i8** [[AP]] // CHECK: [[P:%[a-z0-9]+]] = bitcast i8* [[I]] to i32* // CHECK: [[IN:%[a-z0-9]+]] = getelementptr i8, i8* [[I]], i32 4 // CHECK: store i8* [[IN]], i8** [[AP]] - // CHECK: [[V1:%[a-z0-9]+]] = load i32* [[P]] + // CHECK: [[V1:%[a-z0-9]+]] = load i32, i32* [[P]] // CHECK: store i32 [[V1]], i32* [[V:%[a-z0-9]+]], align 4 // CHECK: [[V2:%[a-z0-9]+]] = bitcast i32* [[V]] to i8* // CHECK: call void @f(i8* [[V2]]) long long int v4 = va_arg (ap, long long int); f(&v4); - // CHECK: [[I:%[a-z0-9]+]] = load i8** [[AP]] + // CHECK: [[I:%[a-z0-9]+]] = load i8*, i8** [[AP]] // CHECK: [[P:%[a-z0-9]+]] = bitcast i8* [[I]] to i64* // CHECK: [[IN:%[a-z0-9]+]] = getelementptr i8, i8* [[I]], i32 8 // CHECK: store i8* [[IN]], i8** [[AP]] - // CHECK: [[V1:%[a-z0-9]+]] = load i64* [[P]] + // CHECK: [[V1:%[a-z0-9]+]] = load i64, i64* [[P]] // CHECK: store i64 [[V1]], i64* [[V:%[a-z0-9]+]], align 4 // CHECK:[[V2:%[a-z0-9]+]] = bitcast i64* [[V]] to i8* // CHECK: call void @f(i8* [[V2]]) struct x v5 = va_arg (ap, struct x); // typical aggregate type f(&v5); - // CHECK: [[I:%[a-z0-9]+]] = load i8** [[AP]] + // CHECK: [[I:%[a-z0-9]+]] = load i8*, i8** [[AP]] // CHECK: [[I2:%[a-z0-9]+]] = bitcast i8* [[I]] to %struct.x** - // CHECK: [[P:%[a-z0-9]+]] = load %struct.x** [[I2]] + // CHECK: [[P:%[a-z0-9]+]] = load %struct.x*, %struct.x** [[I2]] // CHECK: [[IN:%[a-z0-9]+]] = getelementptr i8, i8* [[I]], i32 4 // CHECK: store i8* [[IN]], i8** [[AP]] // CHECK: [[V1:%[a-z0-9]+]] = bitcast %struct.x* [[V:%[a-z0-9]+]] to i8* @@ -86,9 +86,9 @@ void testva (int n, ...) { int* v6 = va_arg (ap, int[4]); // an unusual aggregate type f(v6); - // CHECK: [[I:%[a-z0-9]+]] = load i8** [[AP]] + // CHECK: [[I:%[a-z0-9]+]] = load i8*, i8** [[AP]] // CHECK: [[I2:%[a-z0-9]+]] = bitcast i8* [[I]] to [4 x i32]** - // CHECK: [[P:%[a-z0-9]+]] = load [4 x i32]** [[I2]] + // CHECK: [[P:%[a-z0-9]+]] = load [4 x i32]*, [4 x i32]** [[I2]] // CHECK: [[IN:%[a-z0-9]+]] = getelementptr i8, i8* [[I]], i32 4 // CHECK: store i8* [[IN]], i8** [[AP]] // CHECK: [[V1:%[a-z0-9]+]] = bitcast [4 x i32]* [[V0:%[a-z0-9]+]] to i8* @@ -96,17 +96,17 @@ void testva (int n, ...) { // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[V1]], i8* [[P1]], i32 16, i32 4, i1 false) // CHECK: [[V2:%[a-z0-9]+]] = getelementptr inbounds [4 x i32], [4 x i32]* [[V0]], i32 0, i32 0 // CHECK: store i32* [[V2]], i32** [[V:%[a-z0-9]+]], align 4 - // CHECK: [[V3:%[a-z0-9]+]] = load i32** [[V]], align 4 + // CHECK: [[V3:%[a-z0-9]+]] = load i32*, i32** [[V]], align 4 // CHECK: [[V4:%[a-z0-9]+]] = bitcast i32* [[V3]] to i8* // CHECK: call void @f(i8* [[V4]]) double v7 = va_arg (ap, double); f(&v7); - // CHECK: [[I:%[a-z0-9]+]] = load i8** [[AP]] + // CHECK: [[I:%[a-z0-9]+]] = load i8*, i8** [[AP]] // CHECK: [[P:%[a-z0-9]+]] = bitcast i8* [[I]] to double* // CHECK: [[IN:%[a-z0-9]+]] = getelementptr i8, i8* [[I]], i32 8 // CHECK: store i8* [[IN]], i8** [[AP]] - // CHECK: [[V1:%[a-z0-9]+]] = load double* [[P]] + // CHECK: [[V1:%[a-z0-9]+]] = load double, double* [[P]] // CHECK: store double [[V1]], double* [[V:%[a-z0-9]+]], align 4 // CHECK: [[V2:%[a-z0-9]+]] = bitcast double* [[V]] to i8* // CHECK: call void @f(i8* [[V2]]) diff --git a/test/CodeGenCUDA/address-spaces.cu b/test/CodeGenCUDA/address-spaces.cu index b80820683f..5788731288 100644 --- a/test/CodeGenCUDA/address-spaces.cu +++ b/test/CodeGenCUDA/address-spaces.cu @@ -27,25 +27,25 @@ struct MyStruct { // CHECK: @b = addrspace(3) global float 0.000000e+00 __device__ void foo() { - // CHECK: load i32* addrspacecast (i32 addrspace(1)* @i to i32*) + // CHECK: load i32, i32* addrspacecast (i32 addrspace(1)* @i to i32*) i++; - // CHECK: load i32* addrspacecast (i32 addrspace(4)* @j to i32*) + // CHECK: load i32, i32* addrspacecast (i32 addrspace(4)* @j to i32*) j++; - // CHECK: load i32* addrspacecast (i32 addrspace(3)* @k to i32*) + // CHECK: load i32, i32* addrspacecast (i32 addrspace(3)* @k to i32*) k++; static int li; - // CHECK: load i32* addrspacecast (i32 addrspace(1)* @_ZZ3foovE2li to i32*) + // CHECK: load i32, i32* addrspacecast (i32 addrspace(1)* @_ZZ3foovE2li to i32*) li++; __constant__ int lj; - // CHECK: load i32* addrspacecast (i32 addrspace(4)* @_ZZ3foovE2lj to i32*) + // CHECK: load i32, i32* addrspacecast (i32 addrspace(4)* @_ZZ3foovE2lj to i32*) lj++; __shared__ int lk; - // CHECK: load i32* addrspacecast (i32 addrspace(3)* @_ZZ3foovE2lk to i32*) + // CHECK: load i32, i32* addrspacecast (i32 addrspace(3)* @_ZZ3foovE2lk to i32*) lk++; } diff --git a/test/CodeGenCXX/2009-12-23-MissingSext.cpp b/test/CodeGenCXX/2009-12-23-MissingSext.cpp index 2b42367842..bff6ac7bc8 100644 --- a/test/CodeGenCXX/2009-12-23-MissingSext.cpp +++ b/test/CodeGenCXX/2009-12-23-MissingSext.cpp @@ -8,11 +8,11 @@ struct foo { }; int bar(struct foo p, int x) { // CHECK: bar -// CHECK: %[[val:.*]] = load i32* {{.*}} +// CHECK: %[[val:.*]] = load i32, i32* {{.*}} // CHECK-NEXT: ashr i32 %[[val]] -// CHECK: = load i32* {{.*}} -// CHECK: = load i32* {{.*}} -// CHECK: %[[val:.*]] = load i32* {{.*}} +// CHECK: = load i32, i32* {{.*}} +// CHECK: = load i32, i32* {{.*}} +// CHECK: %[[val:.*]] = load i32, i32* {{.*}} // CHECK-NEXT: ashr i32 %[[val]] x = (p.y > x ? x : p.y); return x; diff --git a/test/CodeGenCXX/align-avx-complete-objects.cpp b/test/CodeGenCXX/align-avx-complete-objects.cpp index 25f4ef1099..6ab17f5d65 100644 --- a/test/CodeGenCXX/align-avx-complete-objects.cpp +++ b/test/CodeGenCXX/align-avx-complete-objects.cpp @@ -16,14 +16,14 @@ volatile float TestAlign(void) // CHECK-NEXT: [[CALL:%.*]] = call noalias i8* @_Znwm(i64 32) // CHECK-NEXT: [[ZERO:%.*]] = bitcast i8* [[CALL]] to <8 x float>* // CHECK-NEXT: store <8 x float>* [[ZERO]], <8 x float>** [[P:%.*]], align 8 -// CHECK-NEXT: [[ONE:%.*]] = load <8 x float>** [[P]], align 8 -// CHECK-NEXT: [[TWO:%.*]] = load volatile <8 x float>* [[ONE]], align 16 -// CHECK-NEXT: [[THREE:%.*]] = load <8 x float>** [[P]], align 8 +// CHECK-NEXT: [[ONE:%.*]] = load <8 x float>*, <8 x float>** [[P]], align 8 +// CHECK-NEXT: [[TWO:%.*]] = load volatile <8 x float>, <8 x float>* [[ONE]], align 16 +// CHECK-NEXT: [[THREE:%.*]] = load <8 x float>*, <8 x float>** [[P]], align 8 // CHECK-NEXT: store volatile <8 x float> [[TWO]], <8 x float>* [[THREE]], align 16 -// CHECK-NEXT: [[FOUR:%.*]] = load <8 x float>** [[P]], align 8 -// CHECK-NEXT: [[FIVE:%.*]] = load volatile <8 x float>* [[FOUR]], align 16 +// CHECK-NEXT: [[FOUR:%.*]] = load <8 x float>*, <8 x float>** [[P]], align 8 +// CHECK-NEXT: [[FIVE:%.*]] = load volatile <8 x float>, <8 x float>* [[FOUR]], align 16 // CHECK-NEXT: store <8 x float> [[FIVE]], <8 x float>* [[R]], align 32 -// CHECK-NEXT: [[SIX:%.*]] = load <8 x float>* [[R]], align 32 +// CHECK-NEXT: [[SIX:%.*]] = load <8 x float>, <8 x float>* [[R]], align 32 // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <8 x float> [[SIX]], i32 0 // CHECK-NEXT: ret float [[VECEXT]] @@ -45,13 +45,13 @@ volatile float TestAlign2(void) // CHECK-NEXT: [[CALL:%.*]] = call noalias i8* @_Znwm(i64 32) // CHECK-NEXT: [[ZERO:%.*]] = bitcast i8* [[CALL]] to <8 x float>* // CHECK-NEXT: store <8 x float>* [[ZERO]], <8 x float>** [[P:%.*]], align 8 -// CHECK-NEXT: [[ONE:%.*]] = load <8 x float>** [[P]], align 8 -// CHECK-NEXT: [[TWO:%.*]] = load volatile <8 x float>* [[ONE]], align 32 -// CHECK-NEXT: [[THREE:%.*]] = load <8 x float>** [[P]], align 8 +// CHECK-NEXT: [[ONE:%.*]] = load <8 x float>*, <8 x float>** [[P]], align 8 +// CHECK-NEXT: [[TWO:%.*]] = load volatile <8 x float>, <8 x float>* [[ONE]], align 32 +// CHECK-NEXT: [[THREE:%.*]] = load <8 x float>*, <8 x float>** [[P]], align 8 // CHECK-NEXT: store volatile <8 x float> [[TWO]], <8 x float>* [[THREE]], align 32 -// CHECK-NEXT: [[FOUR:%.*]] = load <8 x float>** [[P]], align 8 -// CHECK-NEXT: [[FIVE:%.*]] = load volatile <8 x float>* [[FOUR]], align 32 +// CHECK-NEXT: [[FOUR:%.*]] = load <8 x float>*, <8 x float>** [[P]], align 8 +// CHECK-NEXT: [[FIVE:%.*]] = load volatile <8 x float>, <8 x float>* [[FOUR]], align 32 // CHECK-NEXT: store <8 x float> [[FIVE]], <8 x float>* [[R]], align 32 -// CHECK-NEXT: [[SIX:%.*]] = load <8 x float>* [[R]], align 32 +// CHECK-NEXT: [[SIX:%.*]] = load <8 x float>, <8 x float>* [[R]], align 32 // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <8 x float> [[SIX]], i32 0 // CHECK-NEXT: ret float [[VECEXT]] diff --git a/test/CodeGenCXX/anonymous-union-member-initializer.cpp b/test/CodeGenCXX/anonymous-union-member-initializer.cpp index 98e982ddf6..69fa61c215 100644 --- a/test/CodeGenCXX/anonymous-union-member-initializer.cpp +++ b/test/CodeGenCXX/anonymous-union-member-initializer.cpp @@ -82,7 +82,7 @@ namespace PR10512 { // CHECK-LABEL: define void @_ZN7PR105121AC2Ev // CHECK: [[THISADDR:%[a-zA-z0-9.]+]] = alloca [[A:%"struct[A-Za-z0-9:.]+"]] // CHECK-NEXT: store [[A]]* [[THIS:%[a-zA-z0-9.]+]], [[A]]** [[THISADDR]] - // CHECK-NEXT: [[THIS1:%[a-zA-z0-9.]+]] = load [[A]]** [[THISADDR]] + // CHECK-NEXT: [[THIS1:%[a-zA-z0-9.]+]] = load [[A]]*, [[A]]** [[THISADDR]] // CHECK-NEXT: ret void A::A() {} @@ -91,11 +91,11 @@ namespace PR10512 { // CHECK-NEXT: [[XADDR:%[a-zA-z0-9.]+]] = alloca i32 // CHECK-NEXT: store [[A]]* [[THIS:%[a-zA-z0-9.]+]], [[A]]** [[THISADDR]] // CHECK-NEXT: store i32 [[X:%[a-zA-z0-9.]+]], i32* [[XADDR]] - // CHECK-NEXT: [[THIS1:%[a-zA-z0-9.]+]] = load [[A]]** [[THISADDR]] + // CHECK-NEXT: [[THIS1:%[a-zA-z0-9.]+]] = load [[A]]*, [[A]]** [[THISADDR]] // CHECK-NEXT: {{getelementptr inbounds.*i32 0, i32 0}} // CHECK-NEXT: {{getelementptr inbounds.*i32 0, i32 0}} // CHECK-NEXT: {{getelementptr inbounds.*i32 0, i32 0}} - // CHECK-NEXT: [[TMP:%[a-zA-z0-9.]+]] = load i32* [[XADDR]] + // CHECK-NEXT: [[TMP:%[a-zA-z0-9.]+]] = load i32, i32* [[XADDR]] // CHECK-NEXT: store i32 [[TMP]] // CHECK-NEXT: ret void A::A(int x) : x(x) { } @@ -105,11 +105,11 @@ namespace PR10512 { // CHECK-NEXT: [[XADDR:%[a-zA-z0-9.]+]] = alloca i64 // CHECK-NEXT: store [[A]]* [[THIS:%[a-zA-z0-9.]+]], [[A]]** [[THISADDR]] // CHECK-NEXT: store i64 [[X:%[a-zA-z0-9.]+]], i64* [[XADDR]] - // CHECK-NEXT: [[THIS1:%[a-zA-z0-9.]+]] = load [[A]]** [[THISADDR]] + // CHECK-NEXT: [[THIS1:%[a-zA-z0-9.]+]] = load [[A]]*, [[A]]** [[THISADDR]] // CHECK-NEXT: {{getelementptr inbounds.*i32 0, i32 0}} // CHECK-NEXT: {{getelementptr inbounds.*i32 0, i32 1}} // CHECK-NEXT: {{getelementptr inbounds.*i32 0, i32 0}} - // CHECK-NEXT: [[TMP:%[a-zA-z0-9.]+]] = load i64* [[XADDR]] + // CHECK-NEXT: [[TMP:%[a-zA-z0-9.]+]] = load i64, i64* [[XADDR]] // CHECK-NEXT: [[CONV:%[a-zA-z0-9.]+]] = trunc i64 [[TMP]] to i32 // CHECK-NEXT: store i32 [[CONV]] // CHECK-NEXT: ret void diff --git a/test/CodeGenCXX/apple-kext-indirect-call-2.cpp b/test/CodeGenCXX/apple-kext-indirect-call-2.cpp index 68ecaf0d5f..70b5c4b300 100644 --- a/test/CodeGenCXX/apple-kext-indirect-call-2.cpp +++ b/test/CodeGenCXX/apple-kext-indirect-call-2.cpp @@ -18,7 +18,7 @@ struct B : virtual A { void B::VF() {} void FUNC(B* p) { -// CHECK: [[T1:%.*]] = load i8* (%struct.A*)** getelementptr inbounds (i8* (%struct.A*)** bitcast ([4 x i8*]* @_ZTV1A to i8* (%struct.A*)**), i64 2) +// CHECK: [[T1:%.*]] = load i8* (%struct.A*)*, i8* (%struct.A*)** getelementptr inbounds (i8* (%struct.A*)** bitcast ([4 x i8*]* @_ZTV1A to i8* (%struct.A*)**), i64 2) // CHECK-NEXT: [[T2:%.*]] = call i8* [[T1]] const char* c = p->A::abc(); } @@ -33,7 +33,7 @@ struct Derived : public Base { }; void FUNC1(Derived* p) { -// CHECK: [[U1:%.*]] = load i8* (%struct.Base*)** getelementptr inbounds (i8* (%struct.Base*)** bitcast ([4 x i8*]* @_ZTV4Base to i8* (%struct.Base*)**), i64 2) +// CHECK: [[U1:%.*]] = load i8* (%struct.Base*)*, i8* (%struct.Base*)** getelementptr inbounds (i8* (%struct.Base*)** bitcast ([4 x i8*]* @_ZTV4Base to i8* (%struct.Base*)**), i64 2) // CHECK-NEXT: [[U2:%.*]] = call i8* [[U1]] char* c = p->Base::abc(); } @@ -49,7 +49,7 @@ struct Derived2 : virtual Base2 { char* Derived2::efg(void) const { return 0; } void FUNC2(Derived2* p) { -// CHECK: [[V1:%.*]] = load i8* (%struct.Derived2*)** getelementptr inbounds (i8* (%struct.Derived2*)** bitcast ([5 x i8*]* @_ZTV8Derived2 to i8* (%struct.Derived2*)**), i64 3) +// CHECK: [[V1:%.*]] = load i8* (%struct.Derived2*)*, i8* (%struct.Derived2*)** getelementptr inbounds (i8* (%struct.Derived2*)** bitcast ([5 x i8*]* @_ZTV8Derived2 to i8* (%struct.Derived2*)**), i64 3) // CHECK-NEXT: [[V2:%.*]] = call i8* [[V1]] char* c = p->Derived2::efg(); } @@ -70,7 +70,7 @@ struct Sub : D1, D2 { char* D2::abc(void) const { return 0; } void FUNC3(Sub* p) { -// CHECK: [[W1:%.*]] = load i8* (%struct.D2*)** getelementptr inbounds (i8* (%struct.D2*)** bitcast ([5 x i8*]* @_ZTV2D2 to i8* (%struct.D2*)**), i64 3) +// CHECK: [[W1:%.*]] = load i8* (%struct.D2*)*, i8* (%struct.D2*)** getelementptr inbounds (i8* (%struct.D2*)** bitcast ([5 x i8*]* @_ZTV2D2 to i8* (%struct.D2*)**), i64 3) // CHECK-NEXT: [[W2:%.*]] = call i8* [[W1]] char* c = p->D2::abc(); } diff --git a/test/CodeGenCXX/apple-kext-indirect-virtual-dtor-call.cpp b/test/CodeGenCXX/apple-kext-indirect-virtual-dtor-call.cpp index e5d85c1eab..ae57ecc59c 100644 --- a/test/CodeGenCXX/apple-kext-indirect-virtual-dtor-call.cpp +++ b/test/CodeGenCXX/apple-kext-indirect-virtual-dtor-call.cpp @@ -12,10 +12,10 @@ void DELETE(B1 *pb1) { pb1->B1::~B1(); } // CHECK-LABEL: define void @_ZN2B1D0Ev -// CHECK: [[T1:%.*]] = load void (%struct.B1*)** getelementptr inbounds (void (%struct.B1*)** bitcast ([5 x i8*]* @_ZTV2B1 to void (%struct.B1*)**), i64 2) +// CHECK: [[T1:%.*]] = load void (%struct.B1*)*, void (%struct.B1*)** getelementptr inbounds (void (%struct.B1*)** bitcast ([5 x i8*]* @_ZTV2B1 to void (%struct.B1*)**), i64 2) // CHECK-NEXT: call void [[T1]](%struct.B1* [[T2:%.*]]) // CHECK-LABEL: define void @_Z6DELETEP2B1 -// CHECK: [[T3:%.*]] = load void (%struct.B1*)** getelementptr inbounds (void (%struct.B1*)** bitcast ([5 x i8*]* @_ZTV2B1 to void (%struct.B1*)**), i64 2) +// CHECK: [[T3:%.*]] = load void (%struct.B1*)*, void (%struct.B1*)** getelementptr inbounds (void (%struct.B1*)** bitcast ([5 x i8*]* @_ZTV2B1 to void (%struct.B1*)**), i64 2) // CHECK-NEXT: call void [[T3]](%struct.B1* [[T4:%.*]]) template diff --git a/test/CodeGenCXX/arm-vaarg.cpp b/test/CodeGenCXX/arm-vaarg.cpp index 9850fb342f..0220ac862e 100644 --- a/test/CodeGenCXX/arm-vaarg.cpp +++ b/test/CodeGenCXX/arm-vaarg.cpp @@ -9,7 +9,7 @@ int take_args(int a, ...) { // CHECK: call void @llvm.va_start emptyvar = __builtin_va_arg(l, Empty); -// CHECK: load i8** +// CHECK: load i8*, i8** // CHECK-NOT: getelementptr // CHECK: [[EMPTY_PTR:%[a-zA-Z0-9._]+]] = bitcast i8* {{%[a-zA-Z0-9._]+}} to %struct.Empty* @@ -17,10 +17,10 @@ int take_args(int a, ...) { // (e.g. it's at the very bottom of the stack and the next page is // invalid). This doesn't matter provided it's never loaded (there's no // well-defined way to tell), but it becomes a problem if we do try to use it. -// CHECK-NOT: load %struct.Empty* [[EMPTY_PTR]] +// CHECK-NOT: load %struct.Empty, %struct.Empty* [[EMPTY_PTR]] int i = __builtin_va_arg(l, int); -// CHECK: load i32* +// CHECK: load i32, i32* __builtin_va_end(l); return i; diff --git a/test/CodeGenCXX/arm.cpp b/test/CodeGenCXX/arm.cpp index c986773154..7d94cba7ff 100644 --- a/test/CodeGenCXX/arm.cpp +++ b/test/CodeGenCXX/arm.cpp @@ -56,14 +56,14 @@ namespace test1 { // CHECK: define linkonce_odr [[A]]* @_ZN5test11AC1Ei([[A]]* returned %this, i32 %i) unnamed_addr // CHECK: [[THIS:%.*]] = alloca [[A]]*, align 4 // CHECK: store [[A]]* {{.*}}, [[A]]** [[THIS]] - // CHECK: [[THIS1:%.*]] = load [[A]]** [[THIS]] + // CHECK: [[THIS1:%.*]] = load [[A]]*, [[A]]** [[THIS]] // CHECK: {{%.*}} = call [[A]]* @_ZN5test11AC2Ei( // CHECK: ret [[A]]* [[THIS1]] // CHECK: define linkonce_odr [[A]]* @_ZN5test11AD1Ev([[A]]* returned %this) unnamed_addr // CHECK: [[THIS:%.*]] = alloca [[A]]*, align 4 // CHECK: store [[A]]* {{.*}}, [[A]]** [[THIS]] - // CHECK: [[THIS1:%.*]] = load [[A]]** [[THIS]] + // CHECK: [[THIS1:%.*]] = load [[A]]*, [[A]]** [[THIS]] // CHECK: {{%.*}} = call [[A]]* @_ZN5test11AD2Ev( // CHECK: ret [[A]]* [[THIS1]] } @@ -117,7 +117,7 @@ namespace test3 { void b(int n) { // CHECK-LABEL: define void @_ZN5test31bEi( - // CHECK: [[N:%.*]] = load i32* + // CHECK: [[N:%.*]] = load i32, i32* // CHECK: @llvm.umul.with.overflow.i32(i32 [[N]], i32 4) // CHECK: @llvm.uadd.with.overflow.i32(i32 {{.*}}, i32 8) // CHECK: [[OR:%.*]] = or i1 @@ -138,7 +138,7 @@ namespace test3 { void d(int n) { // CHECK-LABEL: define void @_ZN5test31dEi( - // CHECK: [[N:%.*]] = load i32* + // CHECK: [[N:%.*]] = load i32, i32* // CHECK: @llvm.umul.with.overflow.i32(i32 [[N]], i32 80) // CHECK: [[NE:%.*]] = mul i32 [[N]], 20 // CHECK: @llvm.uadd.with.overflow.i32(i32 {{.*}}, i32 8) @@ -190,7 +190,7 @@ namespace test4 { void b(int n) { // CHECK-LABEL: define void @_ZN5test41bEi( - // CHECK: [[N:%.*]] = load i32* + // CHECK: [[N:%.*]] = load i32, i32* // CHECK: @llvm.umul.with.overflow.i32(i32 [[N]], i32 4) // CHECK: @llvm.uadd.with.overflow.i32(i32 {{.*}}, i32 8) // CHECK: [[SZ:%.*]] = select @@ -210,7 +210,7 @@ namespace test4 { void d(int n) { // CHECK-LABEL: define void @_ZN5test41dEi( - // CHECK: [[N:%.*]] = load i32* + // CHECK: [[N:%.*]] = load i32, i32* // CHECK: @llvm.umul.with.overflow.i32(i32 [[N]], i32 80) // CHECK: [[NE:%.*]] = mul i32 [[N]], 20 // CHECK: @llvm.uadd.with.overflow.i32(i32 {{.*}}, i32 8) @@ -226,7 +226,7 @@ namespace test4 { // CHECK: [[ALLOC:%.*]] = getelementptr inbounds {{.*}}, i64 -8 // CHECK: getelementptr inbounds {{.*}}, i64 4 // CHECK: bitcast - // CHECK: [[T0:%.*]] = load i32* + // CHECK: [[T0:%.*]] = load i32, i32* // CHECK: [[T1:%.*]] = mul i32 4, [[T0]] // CHECK: [[T2:%.*]] = add i32 [[T1]], 8 // CHECK: call void @_ZN5test41AdaEPvm(i8* [[ALLOC]], i32 [[T2]]) @@ -238,7 +238,7 @@ namespace test4 { // CHECK: [[ALLOC:%.*]] = getelementptr inbounds {{.*}}, i64 -8 // CHECK: getelementptr inbounds {{.*}}, i64 4 // CHECK: bitcast - // CHECK: [[T0:%.*]] = load i32* + // CHECK: [[T0:%.*]] = load i32, i32* // CHECK: [[T1:%.*]] = mul i32 4, [[T0]] // CHECK: [[T2:%.*]] = add i32 [[T1]], 8 // CHECK: call void @_ZN5test41AdaEPvm(i8* [[ALLOC]], i32 [[T2]]) @@ -256,7 +256,7 @@ namespace test5 { void test(A *a) { // CHECK: [[PTR:%.*]] = alloca [[A:%.*]]*, align 4 // CHECK-NEXT: store [[A]]* {{.*}}, [[A]]** [[PTR]], align 4 - // CHECK-NEXT: [[TMP:%.*]] = load [[A]]** [[PTR]], align 4 + // CHECK-NEXT: [[TMP:%.*]] = load [[A]]*, [[A]]** [[PTR]], align 4 // CHECK-NEXT: call [[A]]* @_ZN5test51AD1Ev([[A]]* [[TMP]]) // CHECK-NEXT: ret void a->~A(); @@ -272,13 +272,13 @@ namespace test6 { void test(A *a) { // CHECK: [[AVAR:%.*]] = alloca [[A:%.*]]*, align 4 // CHECK-NEXT: store [[A]]* {{.*}}, [[A]]** [[AVAR]], align 4 - // CHECK-NEXT: [[V:%.*]] = load [[A]]** [[AVAR]], align 4 + // CHECK-NEXT: [[V:%.*]] = load [[A]]*, [[A]]** [[AVAR]], align 4 // CHECK-NEXT: [[ISNULL:%.*]] = icmp eq [[A]]* [[V]], null // CHECK-NEXT: br i1 [[ISNULL]] // CHECK: [[T0:%.*]] = bitcast [[A]]* [[V]] to void ([[A]]*)*** - // CHECK-NEXT: [[T1:%.*]] = load void ([[A]]*)*** [[T0]] + // CHECK-NEXT: [[T1:%.*]] = load void ([[A]]*)**, void ([[A]]*)*** [[T0]] // CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds void ([[A]]*)*, void ([[A]]*)** [[T1]], i64 1 - // CHECK-NEXT: [[T3:%.*]] = load void ([[A]]*)** [[T2]] + // CHECK-NEXT: [[T3:%.*]] = load void ([[A]]*)*, void ([[A]]*)** [[T2]] // CHECK-NEXT: call void [[T3]]([[A]]* [[V]]) // CHECK-NEXT: br label // CHECK: ret void @@ -293,7 +293,7 @@ namespace test7 { // CHECK-LABEL: define void @_ZN5test74testEv() void test() { - // CHECK: [[T0:%.*]] = load atomic i8* bitcast (i32* @_ZGVZN5test74testEvE1x to i8*) acquire, align 1 + // CHECK: [[T0:%.*]] = load atomic i8, i8* bitcast (i32* @_ZGVZN5test74testEvE1x to i8*) acquire, align 1 // CHECK-NEXT: [[T1:%.*]] = and i8 [[T0]], 1 // CHECK-NEXT: [[T2:%.*]] = icmp eq i8 [[T1]], 0 // CHECK-NEXT: br i1 [[T2]] @@ -328,7 +328,7 @@ namespace test8 { // CHECK-LABEL: define void @_ZN5test84testEv() void test() { - // CHECK: [[T0:%.*]] = load atomic i8* bitcast (i32* @_ZGVZN5test84testEvE1x to i8*) acquire, align 1 + // CHECK: [[T0:%.*]] = load atomic i8, i8* bitcast (i32* @_ZGVZN5test84testEvE1x to i8*) acquire, align 1 // CHECK-NEXT: [[T1:%.*]] = and i8 [[T0]], 1 // CHECK-NEXT: [[T2:%.*]] = icmp eq i8 [[T1]], 0 // CHECK-NEXT: br i1 [[T2]] @@ -374,7 +374,7 @@ namespace test9 { } // CHECK: define [[TEST9:%.*]]* @_ZN5test97testNewEj(i32 // CHECK: [[N_VAR:%.*]] = alloca i32, align 4 -// CHECK: [[N:%.*]] = load i32* [[N_VAR]], align 4 +// CHECK: [[N:%.*]] = load i32, i32* [[N_VAR]], align 4 // CHECK-NEXT: [[T0:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[N]], i32 16) // CHECK-NEXT: [[O0:%.*]] = extractvalue { i32, i1 } [[T0]], 1 // CHECK-NEXT: [[T1:%.*]] = extractvalue { i32, i1 } [[T0]], 0 @@ -396,14 +396,14 @@ namespace test9 { delete[] array; } // CHECK-LABEL: define void @_ZN5test910testDeleteEPNS_1AE( -// CHECK: [[BEGIN:%.*]] = load [[TEST9]]** +// CHECK: [[BEGIN:%.*]] = load [[TEST9]]*, [[TEST9]]** // CHECK-NEXT: [[T0:%.*]] = icmp eq [[TEST9]]* [[BEGIN]], null // CHECK-NEXT: br i1 [[T0]], // CHECK: [[T0:%.*]] = bitcast [[TEST9]]* [[BEGIN]] to i8* // CHECK-NEXT: [[ALLOC:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 -16 // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds i8, i8* [[ALLOC]], i64 4 // CHECK-NEXT: [[T1:%.*]] = bitcast i8* [[T0]] to i32* -// CHECK-NEXT: [[N:%.*]] = load i32* [[T1]] +// CHECK-NEXT: [[N:%.*]] = load i32, i32* [[T1]] // CHECK-NEXT: [[END:%.*]] = getelementptr inbounds [[TEST9]], [[TEST9]]* [[BEGIN]], i32 [[N]] // CHECK-NEXT: [[T0:%.*]] = icmp eq [[TEST9]]* [[BEGIN]], [[END]] // CHECK-NEXT: br i1 [[T0]], diff --git a/test/CodeGenCXX/arm64-constructor-return.cpp b/test/CodeGenCXX/arm64-constructor-return.cpp index 0d5b3b3825..3adc1b7a28 100644 --- a/test/CodeGenCXX/arm64-constructor-return.cpp +++ b/test/CodeGenCXX/arm64-constructor-return.cpp @@ -15,5 +15,5 @@ S::S() { // CHECK: %struct.S* @_ZN1SC1Ev(%struct.S* returned %this) // CHECK: [[THISADDR:%[a-zA-z0-9.]+]] = alloca %struct.S* // CHECK: store %struct.S* %this, %struct.S** [[THISADDR]] -// CHECK: [[THIS1:%.*]] = load %struct.S** [[THISADDR]] +// CHECK: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THISADDR]] // CHECK: ret %struct.S* [[THIS1]] diff --git a/test/CodeGenCXX/arm64-empty-struct.cpp b/test/CodeGenCXX/arm64-empty-struct.cpp index 6fa4e95295..6053c4af42 100644 --- a/test/CodeGenCXX/arm64-empty-struct.cpp +++ b/test/CodeGenCXX/arm64-empty-struct.cpp @@ -9,7 +9,7 @@ int take_args(int a, ...) { // CHECK: call void @llvm.va_start emptyvar = __builtin_va_arg(l, Empty); -// CHECK: load i8** +// CHECK: load i8*, i8** // CHECK-NOT: getelementptr // CHECK: [[EMPTY_PTR:%[a-zA-Z0-9._]+]] = bitcast i8* {{%[a-zA-Z0-9._]+}} to %struct.Empty* @@ -17,7 +17,7 @@ int take_args(int a, ...) { // (e.g. it's at the very bottom of the stack and the next page is // invalid). This doesn't matter provided it's never loaded (there's no // well-defined way to tell), but it becomes a problem if we do try to use it. -// CHECK-NOT: load %struct.Empty* [[EMPTY_PTR]] +// CHECK-NOT: load %struct.Empty, %struct.Empty* [[EMPTY_PTR]] int i = __builtin_va_arg(l, int); // CHECK: va_arg i8** {{%[a-zA-Z0-9._]+}}, i32 diff --git a/test/CodeGenCXX/bitfield.cpp b/test/CodeGenCXX/bitfield.cpp index c05a0faca4..7f55b4daa2 100644 --- a/test/CodeGenCXX/bitfield.cpp +++ b/test/CodeGenCXX/bitfield.cpp @@ -22,13 +22,13 @@ namespace N0 { unsigned read00(S* s) { // CHECK-X86-64-LABEL: define i32 @_ZN2N06read00 // CHECK-X86-64: %[[ptr:.*]] = bitcast %{{.*}}* %{{.*}} to i64* - // CHECK-X86-64: %[[val:.*]] = load i64* %[[ptr]] + // CHECK-X86-64: %[[val:.*]] = load i64, i64* %[[ptr]] // CHECK-X86-64: %[[and:.*]] = and i64 %[[val]], 16383 // CHECK-X86-64: %[[trunc:.*]] = trunc i64 %[[and]] to i32 // CHECK-X86-64: ret i32 %[[trunc]] // CHECK-PPC64-LABEL: define zeroext i32 @_ZN2N06read00 // CHECK-PPC64: %[[ptr:.*]] = bitcast %{{.*}}* %{{.*}} to i64* - // CHECK-PPC64: %[[val:.*]] = load i64* %[[ptr]] + // CHECK-PPC64: %[[val:.*]] = load i64, i64* %[[ptr]] // CHECK-PPC64: %[[shr:.*]] = lshr i64 %[[val]], 50 // CHECK-PPC64: %[[trunc:.*]] = trunc i64 %[[shr]] to i32 // CHECK-PPC64: ret i32 %[[trunc]] @@ -37,14 +37,14 @@ namespace N0 { unsigned read01(S* s) { // CHECK-X86-64-LABEL: define i32 @_ZN2N06read01 // CHECK-X86-64: %[[ptr:.*]] = bitcast %{{.*}}* %{{.*}} to i64* - // CHECK-X86-64: %[[val:.*]] = load i64* %[[ptr]] + // CHECK-X86-64: %[[val:.*]] = load i64, i64* %[[ptr]] // CHECK-X86-64: %[[shr:.*]] = lshr i64 %[[val]], 14 // CHECK-X86-64: %[[and:.*]] = and i64 %[[shr]], 3 // CHECK-X86-64: %[[trunc:.*]] = trunc i64 %[[and]] to i32 // CHECK-X86-64: ret i32 %[[trunc]] // CHECK-PPC64-LABEL: define zeroext i32 @_ZN2N06read01 // CHECK-PPC64: %[[ptr:.*]] = bitcast %{{.*}}* %{{.*}} to i64* - // CHECK-PPC64: %[[val:.*]] = load i64* %[[ptr]] + // CHECK-PPC64: %[[val:.*]] = load i64, i64* %[[ptr]] // CHECK-PPC64: %[[shr:.*]] = lshr i64 %[[val]], 48 // CHECK-PPC64: %[[and:.*]] = and i64 %[[shr]], 3 // CHECK-PPC64: %[[trunc:.*]] = trunc i64 %[[and]] to i32 @@ -54,14 +54,14 @@ namespace N0 { unsigned read20(S* s) { // CHECK-X86-64-LABEL: define i32 @_ZN2N06read20 // CHECK-X86-64: %[[ptr:.*]] = bitcast %{{.*}}* %{{.*}} to i64* - // CHECK-X86-64: %[[val:.*]] = load i64* %[[ptr]] + // CHECK-X86-64: %[[val:.*]] = load i64, i64* %[[ptr]] // CHECK-X86-64: %[[shr:.*]] = lshr i64 %[[val]], 16 // CHECK-X86-64: %[[and:.*]] = and i64 %[[shr]], 63 // CHECK-X86-64: %[[trunc:.*]] = trunc i64 %[[and]] to i32 // CHECK-X86-64: ret i32 %[[trunc]] // CHECK-PPC64-LABEL: define zeroext i32 @_ZN2N06read20 // CHECK-PPC64: %[[ptr:.*]] = bitcast %{{.*}}* %{{.*}} to i64* - // CHECK-PPC64: %[[val:.*]] = load i64* %[[ptr]] + // CHECK-PPC64: %[[val:.*]] = load i64, i64* %[[ptr]] // CHECK-PPC64: %[[shr:.*]] = lshr i64 %[[val]], 42 // CHECK-PPC64: %[[and:.*]] = and i64 %[[shr]], 63 // CHECK-PPC64: %[[trunc:.*]] = trunc i64 %[[and]] to i32 @@ -71,14 +71,14 @@ namespace N0 { unsigned read21(S* s) { // CHECK-X86-64-LABEL: define i32 @_ZN2N06read21 // CHECK-X86-64: %[[ptr:.*]] = bitcast %{{.*}}* %{{.*}} to i64* - // CHECK-X86-64: %[[val:.*]] = load i64* %[[ptr]] + // CHECK-X86-64: %[[val:.*]] = load i64, i64* %[[ptr]] // CHECK-X86-64: %[[shr:.*]] = lshr i64 %[[val]], 22 // CHECK-X86-64: %[[and:.*]] = and i64 %[[shr]], 3 // CHECK-X86-64: %[[trunc:.*]] = trunc i64 %[[and]] to i32 // CHECK-X86-64: ret i32 %[[trunc]] // CHECK-PPC64-LABEL: define zeroext i32 @_ZN2N06read21 // CHECK-PPC64: %[[ptr:.*]] = bitcast %{{.*}}* %{{.*}} to i64* - // CHECK-PPC64: %[[val:.*]] = load i64* %[[ptr]] + // CHECK-PPC64: %[[val:.*]] = load i64, i64* %[[ptr]] // CHECK-PPC64: %[[shr:.*]] = lshr i64 %[[val]], 40 // CHECK-PPC64: %[[and:.*]] = and i64 %[[shr]], 3 // CHECK-PPC64: %[[trunc:.*]] = trunc i64 %[[and]] to i32 @@ -88,14 +88,14 @@ namespace N0 { unsigned read30(S* s) { // CHECK-X86-64-LABEL: define i32 @_ZN2N06read30 // CHECK-X86-64: %[[ptr:.*]] = bitcast %{{.*}}* %{{.*}} to i64* - // CHECK-X86-64: %[[val:.*]] = load i64* %[[ptr]] + // CHECK-X86-64: %[[val:.*]] = load i64, i64* %[[ptr]] // CHECK-X86-64: %[[shr:.*]] = lshr i64 %[[val]], 24 // CHECK-X86-64: %[[and:.*]] = and i64 %[[shr]], 1073741823 // CHECK-X86-64: %[[trunc:.*]] = trunc i64 %[[and]] to i32 // CHECK-X86-64: ret i32 %[[trunc]] // CHECK-PPC64-LABEL: define zeroext i32 @_ZN2N06read30 // CHECK-PPC64: %[[ptr:.*]] = bitcast %{{.*}}* %{{.*}} to i64* - // CHECK-PPC64: %[[val:.*]] = load i64* %[[ptr]] + // CHECK-PPC64: %[[val:.*]] = load i64, i64* %[[ptr]] // CHECK-PPC64: %[[shr:.*]] = lshr i64 %[[val]], 10 // CHECK-PPC64: %[[and:.*]] = and i64 %[[shr]], 1073741823 // CHECK-PPC64: %[[trunc:.*]] = trunc i64 %[[and]] to i32 @@ -105,14 +105,14 @@ namespace N0 { unsigned read31(S* s) { // CHECK-X86-64-LABEL: define i32 @_ZN2N06read31 // CHECK-X86-64: %[[ptr:.*]] = bitcast %{{.*}}* %{{.*}} to i64* - // CHECK-X86-64: %[[val:.*]] = load i64* %[[ptr]] + // CHECK-X86-64: %[[val:.*]] = load i64, i64* %[[ptr]] // CHECK-X86-64: %[[shr:.*]] = lshr i64 %[[val]], 54 // CHECK-X86-64: %[[and:.*]] = and i64 %[[shr]], 3 // CHECK-X86-64: %[[trunc:.*]] = trunc i64 %[[and]] to i32 // CHECK-X86-64: ret i32 %[[trunc]] // CHECK-PPC64-LABEL: define zeroext i32 @_ZN2N06read31 // CHECK-PPC64: %[[ptr:.*]] = bitcast %{{.*}}* %{{.*}} to i64* - // CHECK-PPC64: %[[val:.*]] = load i64* %[[ptr]] + // CHECK-PPC64: %[[val:.*]] = load i64, i64* %[[ptr]] // CHECK-PPC64: %[[shr:.*]] = lshr i64 %[[val]], 8 // CHECK-PPC64: %[[and:.*]] = and i64 %[[shr]], 3 // CHECK-PPC64: %[[trunc:.*]] = trunc i64 %[[and]] to i32 @@ -122,14 +122,14 @@ namespace N0 { unsigned read70(S* s) { // CHECK-X86-64-LABEL: define i32 @_ZN2N06read70 // CHECK-X86-64: %[[ptr:.*]] = bitcast %{{.*}}* %{{.*}} to i64* - // CHECK-X86-64: %[[val:.*]] = load i64* %[[ptr]] + // CHECK-X86-64: %[[val:.*]] = load i64, i64* %[[ptr]] // CHECK-X86-64: %[[shr:.*]] = lshr i64 %[[val]], 56 // CHECK-X86-64: %[[and:.*]] = and i64 %[[shr]], 63 // CHECK-X86-64: %[[trunc:.*]] = trunc i64 %[[and]] to i32 // CHECK-X86-64: ret i32 %[[trunc]] // CHECK-PPC64-LABEL: define zeroext i32 @_ZN2N06read70 // CHECK-PPC64: %[[ptr:.*]] = bitcast %{{.*}}* %{{.*}} to i64* - // CHECK-PPC64: %[[val:.*]] = load i64* %[[ptr]] + // CHECK-PPC64: %[[val:.*]] = load i64, i64* %[[ptr]] // CHECK-PPC64: %[[shr:.*]] = lshr i64 %[[val]], 2 // CHECK-PPC64: %[[and:.*]] = and i64 %[[shr]], 63 // CHECK-PPC64: %[[trunc:.*]] = trunc i64 %[[and]] to i32 @@ -139,13 +139,13 @@ namespace N0 { unsigned read71(S* s) { // CHECK-X86-64-LABEL: define i32 @_ZN2N06read71 // CHECK-X86-64: %[[ptr:.*]] = bitcast %{{.*}}* %{{.*}} to i64* - // CHECK-X86-64: %[[val:.*]] = load i64* %[[ptr]] + // CHECK-X86-64: %[[val:.*]] = load i64, i64* %[[ptr]] // CHECK-X86-64: %[[shr:.*]] = lshr i64 %[[val]], 62 // CHECK-X86-64: %[[trunc:.*]] = trunc i64 %[[shr]] to i32 // CHECK-X86-64: ret i32 %[[trunc]] // CHECK-PPC64-LABEL: define zeroext i32 @_ZN2N06read71 // CHECK-PPC64: %[[ptr:.*]] = bitcast %{{.*}}* %{{.*}} to i64* - // CHECK-PPC64: %[[val:.*]] = load i64* %[[ptr]] + // CHECK-PPC64: %[[val:.*]] = load i64, i64* %[[ptr]] // CHECK-PPC64: %[[and:.*]] = and i64 %[[val]], 3 // CHECK-PPC64: %[[trunc:.*]] = trunc i64 %[[and]] to i32 // CHECK-PPC64: ret i32 %[[trunc]] @@ -168,13 +168,13 @@ namespace N1 { unsigned read(S* s) { // CHECK-X86-64-LABEL: define i32 @_ZN2N14read // CHECK-X86-64: %[[ptr:.*]] = getelementptr inbounds %{{.*}}, %{{.*}}* %{{.*}}, i32 0, i32 1 - // CHECK-X86-64: %[[val:.*]] = load i8* %[[ptr]] + // CHECK-X86-64: %[[val:.*]] = load i8, i8* %[[ptr]] // CHECK-X86-64: %[[and:.*]] = and i8 %[[val]], 1 // CHECK-X86-64: %[[ext:.*]] = zext i8 %[[and]] to i32 // CHECK-X86-64: ret i32 %[[ext]] // CHECK-PPC64-LABEL: define zeroext i32 @_ZN2N14read // CHECK-PPC64: %[[ptr:.*]] = getelementptr inbounds %{{.*}}, %{{.*}}* %{{.*}}, i32 0, i32 1 - // CHECK-PPC64: %[[val:.*]] = load i8* %[[ptr]] + // CHECK-PPC64: %[[val:.*]] = load i8, i8* %[[ptr]] // CHECK-PPC64: %[[shr:.*]] = lshr i8 %[[val]], 7 // CHECK-PPC64: %[[ext:.*]] = zext i8 %[[shr]] to i32 // CHECK-PPC64: ret i32 %[[ext]] @@ -184,7 +184,7 @@ namespace N1 { // CHECK-X86-64-LABEL: define void @_ZN2N15write // CHECK-X86-64: %[[ptr:.*]] = getelementptr inbounds %{{.*}}, %{{.*}}* %{{.*}}, i32 0, i32 1 // CHECK-X86-64: %[[x_trunc:.*]] = trunc i32 %{{.*}} to i8 - // CHECK-X86-64: %[[old:.*]] = load i8* %[[ptr]] + // CHECK-X86-64: %[[old:.*]] = load i8, i8* %[[ptr]] // CHECK-X86-64: %[[x_and:.*]] = and i8 %[[x_trunc]], 1 // CHECK-X86-64: %[[old_and:.*]] = and i8 %[[old]], -2 // CHECK-X86-64: %[[new:.*]] = or i8 %[[old_and]], %[[x_and]] @@ -192,7 +192,7 @@ namespace N1 { // CHECK-PPC64-LABEL: define void @_ZN2N15write // CHECK-PPC64: %[[ptr:.*]] = getelementptr inbounds %{{.*}}, %{{.*}}* %{{.*}}, i32 0, i32 1 // CHECK-PPC64: %[[x_trunc:.*]] = trunc i32 %{{.*}} to i8 - // CHECK-PPC64: %[[old:.*]] = load i8* %[[ptr]] + // CHECK-PPC64: %[[old:.*]] = load i8, i8* %[[ptr]] // CHECK-PPC64: %[[x_and:.*]] = and i8 %[[x_trunc]], 1 // CHECK-PPC64: %[[x_shl:.*]] = shl i8 %[[x_and]], 7 // CHECK-PPC64: %[[old_and:.*]] = and i8 %[[old]], 127 @@ -212,12 +212,12 @@ namespace N2 { unsigned read(S* s) { // CHECK-X86-64-LABEL: define i32 @_ZN2N24read // CHECK-X86-64: %[[ptr:.*]] = bitcast %{{.*}}* %{{.*}} to i32* - // CHECK-X86-64: %[[val:.*]] = load i32* %[[ptr]] + // CHECK-X86-64: %[[val:.*]] = load i32, i32* %[[ptr]] // CHECK-X86-64: %[[and:.*]] = and i32 %[[val]], 16777215 // CHECK-X86-64: ret i32 %[[and]] // CHECK-PPC64-LABEL: define zeroext i32 @_ZN2N24read // CHECK-PPC64: %[[ptr:.*]] = bitcast %{{.*}}* %{{.*}} to i32* - // CHECK-PPC64: %[[val:.*]] = load i32* %[[ptr]] + // CHECK-PPC64: %[[val:.*]] = load i32, i32* %[[ptr]] // CHECK-PPC64: %[[shr:.*]] = lshr i32 %[[val]], 8 // CHECK-PPC64: ret i32 %[[shr]] return s->b; @@ -225,14 +225,14 @@ namespace N2 { void write(S* s, unsigned x) { // CHECK-X86-64-LABEL: define void @_ZN2N25write // CHECK-X86-64: %[[ptr:.*]] = bitcast %{{.*}}* %{{.*}} to i32* - // CHECK-X86-64: %[[old:.*]] = load i32* %[[ptr]] + // CHECK-X86-64: %[[old:.*]] = load i32, i32* %[[ptr]] // CHECK-X86-64: %[[x_and:.*]] = and i32 %{{.*}}, 16777215 // CHECK-X86-64: %[[old_and:.*]] = and i32 %[[old]], -16777216 // CHECK-X86-64: %[[new:.*]] = or i32 %[[old_and]], %[[x_and]] // CHECK-X86-64: store i32 %[[new]], i32* %[[ptr]] // CHECK-PPC64-LABEL: define void @_ZN2N25write // CHECK-PPC64: %[[ptr:.*]] = bitcast %{{.*}}* %{{.*}} to i32* - // CHECK-PPC64: %[[old:.*]] = load i32* %[[ptr]] + // CHECK-PPC64: %[[old:.*]] = load i32, i32* %[[ptr]] // CHECK-PPC64: %[[x_and:.*]] = and i32 %{{.*}}, 16777215 // CHECK-PPC64: %[[x_shl:.*]] = shl i32 %[[x_and]], 8 // CHECK-PPC64: %[[old_and:.*]] = and i32 %[[old]], 255 @@ -251,12 +251,12 @@ namespace N3 { unsigned read(S* s) { // CHECK-X86-64-LABEL: define i32 @_ZN2N34read // CHECK-X86-64: %[[ptr:.*]] = bitcast %{{.*}}* %{{.*}} to i32* - // CHECK-X86-64: %[[val:.*]] = load i32* %[[ptr]] + // CHECK-X86-64: %[[val:.*]] = load i32, i32* %[[ptr]] // CHECK-X86-64: %[[and:.*]] = and i32 %[[val]], 16777215 // CHECK-X86-64: ret i32 %[[and]] // CHECK-PPC64-LABEL: define zeroext i32 @_ZN2N34read // CHECK-PPC64: %[[ptr:.*]] = bitcast %{{.*}}* %{{.*}} to i32* - // CHECK-PPC64: %[[val:.*]] = load i32* %[[ptr]] + // CHECK-PPC64: %[[val:.*]] = load i32, i32* %[[ptr]] // CHECK-PPC64: %[[shr:.*]] = lshr i32 %[[val]], 8 // CHECK-PPC64: ret i32 %[[shr]] return s->b; @@ -264,14 +264,14 @@ namespace N3 { void write(S* s, unsigned x) { // CHECK-X86-64-LABEL: define void @_ZN2N35write // CHECK-X86-64: %[[ptr:.*]] = bitcast %{{.*}}* %{{.*}} to i32* - // CHECK-X86-64: %[[old:.*]] = load i32* %[[ptr]] + // CHECK-X86-64: %[[old:.*]] = load i32, i32* %[[ptr]] // CHECK-X86-64: %[[x_and:.*]] = and i32 %{{.*}}, 16777215 // CHECK-X86-64: %[[old_and:.*]] = and i32 %[[old]], -16777216 // CHECK-X86-64: %[[new:.*]] = or i32 %[[old_and]], %[[x_and]] // CHECK-X86-64: store i32 %[[new]], i32* %[[ptr]] // CHECK-PPC64-LABEL: define void @_ZN2N35write // CHECK-PPC64: %[[ptr:.*]] = bitcast %{{.*}}* %{{.*}} to i32* - // CHECK-PPC64: %[[old:.*]] = load i32* %[[ptr]] + // CHECK-PPC64: %[[old:.*]] = load i32, i32* %[[ptr]] // CHECK-PPC64: %[[x_and:.*]] = and i32 %{{.*}}, 16777215 // CHECK-PPC64: %[[x_shl:.*]] = shl i32 %[[x_and]], 8 // CHECK-PPC64: %[[old_and:.*]] = and i32 %[[old]], 255 @@ -303,13 +303,13 @@ namespace N4 { // CHECK-X86-64-LABEL: define i32 @_ZN2N44read // CHECK-X86-64: %[[gep:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %{{.*}}, i32 0, i32 1 // CHECK-X86-64: %[[ptr:.*]] = bitcast [3 x i8]* %[[gep]] to i24* - // CHECK-X86-64: %[[val:.*]] = load i24* %[[ptr]] + // CHECK-X86-64: %[[val:.*]] = load i24, i24* %[[ptr]] // CHECK-X86-64: %[[ext:.*]] = zext i24 %[[val]] to i32 // CHECK-X86-64: ret i32 %[[ext]] // CHECK-PPC64-LABEL: define zeroext i32 @_ZN2N44read // CHECK-PPC64: %[[gep:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %{{.*}}, i32 0, i32 1 // CHECK-PPC64: %[[ptr:.*]] = bitcast [3 x i8]* %[[gep]] to i24* - // CHECK-PPC64: %[[val:.*]] = load i24* %[[ptr]] + // CHECK-PPC64: %[[val:.*]] = load i24, i24* %[[ptr]] // CHECK-PPC64: %[[ext:.*]] = zext i24 %[[val]] to i32 // CHECK-PPC64: ret i32 %[[ext]] return s->b; @@ -344,12 +344,12 @@ namespace N5 { unsigned read(U* u) { // CHECK-X86-64-LABEL: define i32 @_ZN2N54read // CHECK-X86-64: %[[ptr:.*]] = bitcast %{{.*}}* %{{.*}} to i32* - // CHECK-X86-64: %[[val:.*]] = load i32* %[[ptr]] + // CHECK-X86-64: %[[val:.*]] = load i32, i32* %[[ptr]] // CHECK-X86-64: %[[and:.*]] = and i32 %[[val]], 16777215 // CHECK-X86-64: ret i32 %[[and]] // CHECK-PPC64-LABEL: define zeroext i32 @_ZN2N54read // CHECK-PPC64: %[[ptr:.*]] = bitcast %{{.*}}* %{{.*}} to i32* - // CHECK-PPC64: %[[val:.*]] = load i32* %[[ptr]] + // CHECK-PPC64: %[[val:.*]] = load i32, i32* %[[ptr]] // CHECK-PPC64: %[[shr:.*]] = lshr i32 %[[val]], 8 // CHECK-PPC64: ret i32 %[[shr]] return u->y.b; @@ -357,14 +357,14 @@ namespace N5 { void write(U* u, unsigned x) { // CHECK-X86-64-LABEL: define void @_ZN2N55write // CHECK-X86-64: %[[ptr:.*]] = bitcast %{{.*}}* %{{.*}} to i32* - // CHECK-X86-64: %[[old:.*]] = load i32* %[[ptr]] + // CHECK-X86-64: %[[old:.*]] = load i32, i32* %[[ptr]] // CHECK-X86-64: %[[x_and:.*]] = and i32 %{{.*}}, 16777215 // CHECK-X86-64: %[[old_and:.*]] = and i32 %[[old]], -16777216 // CHECK-X86-64: %[[new:.*]] = or i32 %[[old_and]], %[[x_and]] // CHECK-X86-64: store i32 %[[new]], i32* %[[ptr]] // CHECK-PPC64-LABEL: define void @_ZN2N55write // CHECK-PPC64: %[[ptr:.*]] = bitcast %{{.*}}* %{{.*}} to i32* - // CHECK-PPC64: %[[old:.*]] = load i32* %[[ptr]] + // CHECK-PPC64: %[[old:.*]] = load i32, i32* %[[ptr]] // CHECK-PPC64: %[[x_and:.*]] = and i32 %{{.*}}, 16777215 // CHECK-PPC64: %[[x_shl:.*]] = shl i32 %[[x_and]], 8 // CHECK-PPC64: %[[old_and:.*]] = and i32 %[[old]], 255 @@ -389,19 +389,19 @@ namespace N6 { unsigned read(S* s) { // CHECK-X86-64-LABEL: define i32 @_ZN2N64read // CHECK-X86-64: %[[ptr1:.*]] = bitcast {{.*}}* %{{.*}} to i24* - // CHECK-X86-64: %[[val1:.*]] = load i24* %[[ptr1]] + // CHECK-X86-64: %[[val1:.*]] = load i24, i24* %[[ptr1]] // CHECK-X86-64: %[[ext1:.*]] = zext i24 %[[val1]] to i32 // CHECK-X86-64: %[[ptr2:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %{{.*}}, i32 0, i32 1 - // CHECK-X86-64: %[[val2:.*]] = load i8* %[[ptr2]] + // CHECK-X86-64: %[[val2:.*]] = load i8, i8* %[[ptr2]] // CHECK-X86-64: %[[ext2:.*]] = zext i8 %[[val2]] to i32 // CHECK-X86-64: %[[add:.*]] = add nsw i32 %[[ext1]], %[[ext2]] // CHECK-X86-64: ret i32 %[[add]] // CHECK-PPC64-LABEL: define zeroext i32 @_ZN2N64read // CHECK-PPC64: %[[ptr1:.*]] = bitcast {{.*}}* %{{.*}} to i24* - // CHECK-PPC64: %[[val1:.*]] = load i24* %[[ptr1]] + // CHECK-PPC64: %[[val1:.*]] = load i24, i24* %[[ptr1]] // CHECK-PPC64: %[[ext1:.*]] = zext i24 %[[val1]] to i32 // CHECK-PPC64: %[[ptr2:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %{{.*}}, i32 0, i32 1 - // CHECK-PPC64: %[[val2:.*]] = load i8* %[[ptr2]] + // CHECK-PPC64: %[[val2:.*]] = load i8, i8* %[[ptr2]] // CHECK-PPC64: %[[ext2:.*]] = zext i8 %[[val2]] to i32 // CHECK-PPC64: %[[add:.*]] = add nsw i32 %[[ext1]], %[[ext2]] // CHECK-PPC64: ret i32 %[[add]] @@ -453,13 +453,13 @@ namespace N7 { // CHECK-X86-64-LABEL: define i32 @_ZN2N74read // CHECK-X86-64: %[[gep:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %{{.*}}, i32 0, i32 1 // CHECK-X86-64: %[[ptr:.*]] = bitcast [3 x i8]* %[[gep]] to i24* - // CHECK-X86-64: %[[val:.*]] = load i24* %[[ptr]] + // CHECK-X86-64: %[[val:.*]] = load i24, i24* %[[ptr]] // CHECK-X86-64: %[[ext:.*]] = zext i24 %[[val]] to i32 // CHECK-X86-64: ret i32 %[[ext]] // CHECK-PPC64-LABEL: define zeroext i32 @_ZN2N74read // CHECK-PPC64: %[[gep:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %{{.*}}, i32 0, i32 1 // CHECK-PPC64: %[[ptr:.*]] = bitcast [3 x i8]* %[[gep]] to i24* - // CHECK-PPC64: %[[val:.*]] = load i24* %[[ptr]] + // CHECK-PPC64: %[[val:.*]] = load i24, i24* %[[ptr]] // CHECK-PPC64: %[[ext:.*]] = zext i24 %[[val]] to i32 // CHECK-PPC64: ret i32 %[[ext]] return s->b; diff --git a/test/CodeGenCXX/blocks-cxx11.cpp b/test/CodeGenCXX/blocks-cxx11.cpp index 7d7074dffe..79aa9891bf 100644 --- a/test/CodeGenCXX/blocks-cxx11.cpp +++ b/test/CodeGenCXX/blocks-cxx11.cpp @@ -51,7 +51,7 @@ namespace test_complex_int { // CHECK: store i32 500, // CHECK-NEXT: store i32 0, // CHECK-NEXT: [[COERCE:%.*]] = bitcast - // CHECK-NEXT: [[CVAL:%.*]] = load i64* [[COERCE]] + // CHECK-NEXT: [[CVAL:%.*]] = load i64, i64* [[COERCE]] // CHECK-NEXT: call void @_Z13takeItByValueICiEvT_(i64 [[CVAL]]) } } @@ -70,14 +70,14 @@ namespace test_complex_int_ref_mutable { void test() { const _Complex int &x = y; takeABlock(^{ takeItByValue(x); }); - // CHECK: [[R:%.*]] = load i32* getelementptr inbounds ({ i32, i32 }* @_ZN28test_complex_int_ref_mutable1yE, i32 0, i32 0) - // CHECK-NEXT: [[I:%.*]] = load i32* getelementptr inbounds ({ i32, i32 }* @_ZN28test_complex_int_ref_mutable1yE, i32 0, i32 1) + // CHECK: [[R:%.*]] = load i32, i32* getelementptr inbounds ({ i32, i32 }* @_ZN28test_complex_int_ref_mutable1yE, i32 0, i32 0) + // CHECK-NEXT: [[I:%.*]] = load i32, i32* getelementptr inbounds ({ i32, i32 }* @_ZN28test_complex_int_ref_mutable1yE, i32 0, i32 1) // CHECK-NEXT: [[RSLOT:%.*]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[CSLOT:%.*]], i32 0, i32 0 // CHECK-NEXT: [[ISLOT:%.*]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[CSLOT]], i32 0, i32 1 // CHECK-NEXT: store i32 [[R]], i32* [[RSLOT]] // CHECK-NEXT: store i32 [[I]], i32* [[ISLOT]] // CHECK-NEXT: [[COERCE:%.*]] = bitcast { i32, i32 }* [[CSLOT]] to i64* - // CHECK-NEXT: [[CVAL:%.*]] = load i64* [[COERCE]], + // CHECK-NEXT: [[CVAL:%.*]] = load i64, i64* [[COERCE]], // CHECK-NEXT: call void @_Z13takeItByValueICiEvT_(i64 [[CVAL]]) } } @@ -102,7 +102,7 @@ namespace test_block_in_lambda { } // CHECK-LABEL: define internal void @"_ZZN20test_block_in_lambda4testENS_1AEENK3$_0clEv"( // CHECK: [[BLOCK:%.*]] = alloca [[BLOCK_T:<{.*}>]], align 8 - // CHECK: [[THIS:%.*]] = load [[LAMBDA_T:%.*]]** + // CHECK: [[THIS:%.*]] = load [[LAMBDA_T:%.*]]*, [[LAMBDA_T:%.*]]** // CHECK: [[TO_DESTROY:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 // CHECK: [[T0:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[LAMBDA_T]], [[LAMBDA_T]]* [[THIS]], i32 0, i32 0 diff --git a/test/CodeGenCXX/blocks.cpp b/test/CodeGenCXX/blocks.cpp index f5f79a079b..5b7c7e6e46 100644 --- a/test/CodeGenCXX/blocks.cpp +++ b/test/CodeGenCXX/blocks.cpp @@ -122,7 +122,7 @@ namespace test4 { // CHECK-LABEL: define internal void @___ZN5test44testEv_block_invoke // CHECK: [[TMP:%.*]] = alloca [[A:%.*]], align 1 // CHECK-NEXT: store i8* [[BLOCKDESC:%.*]], i8** {{.*}}, align 8 - // CHECK-NEXT: load i8** + // CHECK-NEXT: load i8*, i8** // CHECK-NEXT: bitcast i8* [[BLOCKDESC]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>* // CHECK: call void @_ZN5test41AC1Ev([[A]]* [[TMP]]) // CHECK-NEXT: call void @_ZN5test43fooENS_1AE([[A]]* [[TMP]]) @@ -157,7 +157,7 @@ namespace test5 { // CHECK-NEXT: store i8 [[T0]], i8* [[COND]], align 1 // CHECK-NEXT: call void @_ZN5test51AC1Ev([[A]]* [[X]]) // CHECK-NEXT: [[CLEANUP_ADDR:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 - // CHECK-NEXT: [[T0:%.*]] = load i8* [[COND]], align 1 + // CHECK-NEXT: [[T0:%.*]] = load i8, i8* [[COND]], align 1 // CHECK-NEXT: [[T1:%.*]] = trunc i8 [[T0]] to i1 // CHECK-NEXT: store i1 false, i1* [[CLEANUP_ACTIVE]] // CHECK-NEXT: br i1 [[T1]], @@ -173,7 +173,7 @@ namespace test5 { // CHECK-NEXT: store // CHECK-NEXT: load // CHECK-NEXT: call void @_ZN5test511doWithBlockEU13block_pointerFvvE( - // CHECK-NEXT: [[T0:%.*]] = load i1* [[CLEANUP_ACTIVE]] + // CHECK-NEXT: [[T0:%.*]] = load i1, i1* [[CLEANUP_ACTIVE]] // CHECK-NEXT: br i1 [[T0]] // CHECK: call void @_ZN5test51AD1Ev([[A]]* [[CLEANUP_ADDR]]) // CHECK-NEXT: br label diff --git a/test/CodeGenCXX/captured-statements.cpp b/test/CodeGenCXX/captured-statements.cpp index 7f35ea70e8..56fe4c61c9 100644 --- a/test/CodeGenCXX/captured-statements.cpp +++ b/test/CodeGenCXX/captured-statements.cpp @@ -63,8 +63,8 @@ void test2(int x) { // // CHECK-2: define internal void @[[HelperName]] // CHECK-2: getelementptr inbounds %[[Capture]], %[[Capture]]* - // CHECK-2: load i32** - // CHECK-2: load i32* + // CHECK-2: load i32*, i32** + // CHECK-2: load i32, i32* } void test3(int x) { diff --git a/test/CodeGenCXX/catch-undef-behavior.cpp b/test/CodeGenCXX/catch-undef-behavior.cpp index 160ee58dbd..d0491a9dc9 100644 --- a/test/CodeGenCXX/catch-undef-behavior.cpp +++ b/test/CodeGenCXX/catch-undef-behavior.cpp @@ -57,7 +57,7 @@ void member_access(S *p) { // (1b) Check that 'p' actually points to an 'S'. // CHECK: %[[VPTRADDR:.*]] = bitcast {{.*}} to i64* - // CHECK-NEXT: %[[VPTR:.*]] = load i64* %[[VPTRADDR]] + // CHECK-NEXT: %[[VPTR:.*]] = load i64, i64* %[[VPTRADDR]] // // hash_16_bytes: // @@ -82,7 +82,7 @@ void member_access(S *p) { // // CHECK-NEXT: %[[IDX:.*]] = and i64 %{{.*}}, 127 // CHECK-NEXT: getelementptr inbounds [128 x i64], [128 x i64]* @__ubsan_vptr_type_cache, i32 0, i64 %[[IDX]] - // CHECK-NEXT: %[[CACHEVAL:.*]] = load i64* + // CHECK-NEXT: %[[CACHEVAL:.*]] = load i64, i64* // CHECK-NEXT: icmp eq i64 %[[CACHEVAL]], %[[HASH]] // CHECK-NEXT: br i1 @@ -116,7 +116,7 @@ void member_access(S *p) { // (3b) Check that 'p' actually points to an 'S' - // CHECK: load i64* + // CHECK: load i64, i64* // CHECK-NEXT: xor i64 {{-4030275160588942838|2562089159}}, // [...] // CHECK: getelementptr inbounds [128 x i64], [128 x i64]* @__ubsan_vptr_type_cache, i32 0, i64 % @@ -399,13 +399,13 @@ void indirect_function_call(void (*p)(int)) { // Signature check // CHECK-NEXT: [[SIGPTR:%[0-9]*]] = getelementptr <{ i32, i8* }>, <{ i32, i8* }>* [[PTR]], i32 0, i32 0 - // CHECK-NEXT: [[SIG:%[0-9]*]] = load i32* [[SIGPTR]] + // CHECK-NEXT: [[SIG:%[0-9]*]] = load i32, i32* [[SIGPTR]] // CHECK-NEXT: [[SIGCMP:%[0-9]*]] = icmp eq i32 [[SIG]], 1413876459 // CHECK-NEXT: br i1 [[SIGCMP]] // RTTI pointer check // CHECK: [[RTTIPTR:%[0-9]*]] = getelementptr <{ i32, i8* }>, <{ i32, i8* }>* [[PTR]], i32 0, i32 1 - // CHECK-NEXT: [[RTTI:%[0-9]*]] = load i8** [[RTTIPTR]] + // CHECK-NEXT: [[RTTI:%[0-9]*]] = load i8*, i8** [[RTTIPTR]] // CHECK-NEXT: [[RTTICMP:%[0-9]*]] = icmp eq i8* [[RTTI]], bitcast ({ i8*, i8* }* @_ZTIFviE to i8*) // CHECK-NEXT: br i1 [[RTTICMP]] p(42); diff --git a/test/CodeGenCXX/compound-literals.cpp b/test/CodeGenCXX/compound-literals.cpp index 0e565b7655..2b77d2490a 100644 --- a/test/CodeGenCXX/compound-literals.cpp +++ b/test/CodeGenCXX/compound-literals.cpp @@ -20,7 +20,7 @@ int f() { // CHECK-NEXT: [[X:%[a-z0-9]+]] = getelementptr inbounds {{.*}} [[LVALUE]], i32 0, i32 1 // CHECK-NEXT: call %struct.X* @_ZN1XC1EPKc({{.*}}[[X]] // CHECK-NEXT: [[I:%[a-z0-9]+]] = getelementptr inbounds {{.*}} [[LVALUE]], i32 0, i32 0 - // CHECK-NEXT: [[RESULT:%[a-z0-9]+]] = load i32* + // CHECK-NEXT: [[RESULT:%[a-z0-9]+]] = load i32, i32* // CHECK-NEXT: call %struct.Y* @_ZN1YD1Ev // CHECK-NEXT: ret i32 [[RESULT]] return ((Y){17, "seventeen"}).i; @@ -31,9 +31,9 @@ int g() { // CHECK: store [2 x i32]* %{{[a-z0-9.]+}}, [2 x i32]** [[V:%[a-z0-9.]+]] const int (&v)[2] = (int [2]) {1,2}; - // CHECK: [[A:%[a-z0-9.]+]] = load [2 x i32]** [[V]] + // CHECK: [[A:%[a-z0-9.]+]] = load [2 x i32]*, [2 x i32]** [[V]] // CHECK-NEXT: [[A0ADDR:%[a-z0-9.]+]] = getelementptr inbounds [2 x i32], [2 x i32]* [[A]], i32 0, {{.*}} 0 - // CHECK-NEXT: [[A0:%[a-z0-9.]+]] = load i32* [[A0ADDR]] + // CHECK-NEXT: [[A0:%[a-z0-9.]+]] = load i32, i32* [[A0ADDR]] // CHECK-NEXT: ret i32 [[A0]] return v[0]; } diff --git a/test/CodeGenCXX/condition.cpp b/test/CodeGenCXX/condition.cpp index 452f1c3c9b..fbba07769d 100644 --- a/test/CodeGenCXX/condition.cpp +++ b/test/CodeGenCXX/condition.cpp @@ -116,7 +116,7 @@ void while_destruct(int z) { // Cleanup. // CHECK: call void @_ZN1XD1Ev - // CHECK-NEXT: [[DEST:%.*]] = load i32* [[CLEANUPDEST]] + // CHECK-NEXT: [[DEST:%.*]] = load i32, i32* [[CLEANUPDEST]] // CHECK-NEXT: switch i32 [[DEST]] } @@ -163,7 +163,7 @@ void for_destruct(int z) { z = 23; // %for.inc: - // CHECK: [[TMP:%.*]] = load i32* [[Z]] + // CHECK: [[TMP:%.*]] = load i32, i32* [[Z]] // CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP]], 1 // CHECK-NEXT: store i32 [[INC]], i32* [[Z]] // CHECK-NEXT: store i32 0, i32* [[CLEANUPDEST]] @@ -172,7 +172,7 @@ void for_destruct(int z) { // %cleanup: Destroys X. // CHECK: call void @_ZN1XD1Ev - // CHECK-NEXT: [[YDESTTMP:%.*]] = load i32* [[CLEANUPDEST]] + // CHECK-NEXT: [[YDESTTMP:%.*]] = load i32, i32* [[CLEANUPDEST]] // CHECK-NEXT: switch i32 [[YDESTTMP]] // 0 -> %cleanup.cont, default -> %cleanup1 @@ -207,7 +207,7 @@ void for_destruct(int z) { // %for.inc11: // CHECK: call void @_Z4getXv - // CHECK-NEXT: load i32* [[I]] + // CHECK-NEXT: load i32, i32* [[I]] // CHECK-NEXT: add // CHECK-NEXT: store // CHECK-NEXT: call void @_ZN1XD1Ev diff --git a/test/CodeGenCXX/conditional-gnu-ext.cpp b/test/CodeGenCXX/conditional-gnu-ext.cpp index 3a61a63cf4..174c67b82f 100644 --- a/test/CodeGenCXX/conditional-gnu-ext.cpp +++ b/test/CodeGenCXX/conditional-gnu-ext.cpp @@ -80,7 +80,7 @@ namespace test3 { // CHECK-LABEL: define void @_ZN5test35test0ERNS_1BE( // CHECK: [[X:%.*]] = alloca [[B:%.*]]*, // CHECK-NEXT: store [[B]]* {{%.*}}, [[B]]** [[X]] - // CHECK-NEXT: [[T0:%.*]] = load [[B]]** [[X]] + // CHECK-NEXT: [[T0:%.*]] = load [[B]]*, [[B]]** [[X]] // CHECK-NEXT: [[BOOL:%.*]] = call zeroext i1 @_ZN5test31BcvbEv([[B]]* [[T0]]) // CHECK-NEXT: br i1 [[BOOL]] // CHECK: call void @_ZN5test31BC1ERKS0_([[B]]* [[RESULT:%.*]], [[B]]* dereferenceable({{[0-9]+}}) [[T0]]) @@ -112,7 +112,7 @@ namespace test3 { // CHECK-LABEL: define void @_ZN5test35test2ERNS_1BE( // CHECK: [[X:%.*]] = alloca [[B]]*, // CHECK-NEXT: store [[B]]* {{%.*}}, [[B]]** [[X]] - // CHECK-NEXT: [[T0:%.*]] = load [[B]]** [[X]] + // CHECK-NEXT: [[T0:%.*]] = load [[B]]*, [[B]]** [[X]] // CHECK-NEXT: [[BOOL:%.*]] = call zeroext i1 @_ZN5test31BcvbEv([[B]]* [[T0]]) // CHECK-NEXT: br i1 [[BOOL]] // CHECK: call void @_ZN5test31BcvNS_1AEEv([[A:%.*]]* sret [[RESULT:%.*]], [[B]]* [[T0]]) diff --git a/test/CodeGenCXX/const-init-cxx11.cpp b/test/CodeGenCXX/const-init-cxx11.cpp index 2992488c0d..e12e336813 100644 --- a/test/CodeGenCXX/const-init-cxx11.cpp +++ b/test/CodeGenCXX/const-init-cxx11.cpp @@ -497,7 +497,7 @@ namespace Unreferenced { // We must not emit a load of 'p' here, since it's not odr-used. int q = *p; // CHECK-NOT: _ZN12Unreferenced1pE - // CHECK: = load i32* @_ZN12Unreferenced1nE + // CHECK: = load i32, i32* @_ZN12Unreferenced1nE // CHECK-NEXT: store i32 {{.*}}, i32* @_ZN12Unreferenced1qE // CHECK-NOT: _ZN12Unreferenced1pE diff --git a/test/CodeGenCXX/constructor-destructor-return-this.cpp b/test/CodeGenCXX/constructor-destructor-return-this.cpp index 6a7c98dc82..893e3a079f 100644 --- a/test/CodeGenCXX/constructor-destructor-return-this.cpp +++ b/test/CodeGenCXX/constructor-destructor-return-this.cpp @@ -130,7 +130,7 @@ void test_destructor() { // Verify that virtual calls to destructors are not marked with a 'returned' // this parameter at the call site... // CHECKARM: [[VFN:%.*]] = getelementptr inbounds %class.E* (%class.E*)*, %class.E* (%class.E*)** -// CHECKARM: [[THUNK:%.*]] = load %class.E* (%class.E*)** [[VFN]] +// CHECKARM: [[THUNK:%.*]] = load %class.E* (%class.E*)*, %class.E* (%class.E*)** [[VFN]] // CHECKARM: call %class.E* [[THUNK]](%class.E* % // ...but static calls create declarations with 'returned' this diff --git a/test/CodeGenCXX/constructor-init.cpp b/test/CodeGenCXX/constructor-init.cpp index c9ea4e11d9..ec9373ea70 100644 --- a/test/CodeGenCXX/constructor-init.cpp +++ b/test/CodeGenCXX/constructor-init.cpp @@ -96,9 +96,9 @@ namespace InitVTable { // CHECK-LABEL: define void @_ZN10InitVTable1BC2Ev(%"struct.InitVTable::B"* %this) unnamed_addr // CHECK: [[T0:%.*]] = bitcast [[B:%.*]]* [[THIS:%.*]] to i32 (...)*** // CHECK-NEXT: store i32 (...)** bitcast (i8** getelementptr inbounds ([3 x i8*]* @_ZTVN10InitVTable1BE, i64 0, i64 2) to i32 (...)**), i32 (...)*** [[T0]] - // CHECK: [[VTBL:%.*]] = load i32 ([[B]]*)*** {{%.*}} + // CHECK: [[VTBL:%.*]] = load i32 ([[B]]*)**, i32 ([[B]]*)*** {{%.*}} // CHECK-NEXT: [[FNP:%.*]] = getelementptr inbounds i32 ([[B]]*)*, i32 ([[B]]*)** [[VTBL]], i64 0 - // CHECK-NEXT: [[FN:%.*]] = load i32 ([[B]]*)** [[FNP]] + // CHECK-NEXT: [[FN:%.*]] = load i32 ([[B]]*)*, i32 ([[B]]*)** [[FNP]] // CHECK-NEXT: [[ARG:%.*]] = call i32 [[FN]]([[B]]* [[THIS]]) // CHECK-NEXT: call void @_ZN10InitVTable1AC2Ei({{.*}}* {{%.*}}, i32 [[ARG]]) // CHECK-NEXT: [[T0:%.*]] = bitcast [[B]]* [[THIS]] to i32 (...)*** diff --git a/test/CodeGenCXX/constructors.cpp b/test/CodeGenCXX/constructors.cpp index b99c5a1941..ecbe5bb45b 100644 --- a/test/CodeGenCXX/constructors.cpp +++ b/test/CodeGenCXX/constructors.cpp @@ -106,6 +106,6 @@ namespace test1 { struct B { B(); int x; A a[0]; }; B::B() {} // CHECK-LABEL: define void @_ZN5test11BC2Ev( - // CHECK: [[THIS:%.*]] = load [[B:%.*]]** + // CHECK: [[THIS:%.*]] = load [[B:%.*]]*, [[B:%.*]]** // CHECK-NEXT: ret void } diff --git a/test/CodeGenCXX/copy-constructor-synthesis.cpp b/test/CodeGenCXX/copy-constructor-synthesis.cpp index 1a66547330..9c5a7f4b46 100644 --- a/test/CodeGenCXX/copy-constructor-synthesis.cpp +++ b/test/CodeGenCXX/copy-constructor-synthesis.cpp @@ -137,9 +137,9 @@ void f(B b1) { } // CHECK: define linkonce_odr dereferenceable({{[0-9]+}}) [[A:%.*]]* @_ZN12rdar138169401AaSERKS0_( -// CHECK: [[THIS:%.*]] = load [[A]]** +// CHECK: [[THIS:%.*]] = load [[A]]*, [[A]]** // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[A]], [[A]]* [[THIS]], i32 0, i32 1 -// CHECK-NEXT: [[OTHER:%.*]] = load [[A]]** +// CHECK-NEXT: [[OTHER:%.*]] = load [[A]]*, [[A]]** // CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds [[A]], [[A]]* [[OTHER]], i32 0, i32 1 // CHECK-NEXT: [[T4:%.*]] = bitcast i16* [[T0]] to i8* // CHECK-NEXT: [[T5:%.*]] = bitcast i16* [[T2]] to i8* @@ -164,11 +164,11 @@ void f(B b1) { // CHECK: call void @_ZN6PR66281TD1Ev // CHECK-LABEL: define linkonce_odr void @_ZN12rdar138169401AC2ERKS0_( -// CHECK: [[THIS:%.*]] = load [[A]]** +// CHECK: [[THIS:%.*]] = load [[A]]*, [[A]]** // CHECK-NEXT: [[T0:%.*]] = bitcast [[A]]* [[THIS]] to i32 (...)*** // CHECK-NEXT: store i32 (...)** bitcast (i8** getelementptr inbounds ([4 x i8*]* @_ZTVN12rdar138169401AE, i64 0, i64 2) to i32 (...)**), i32 (...)*** [[T0]] // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[A]], [[A]]* [[THIS]], i32 0, i32 1 -// CHECK-NEXT: [[OTHER:%.*]] = load [[A]]** +// CHECK-NEXT: [[OTHER:%.*]] = load [[A]]*, [[A]]** // CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds [[A]], [[A]]* [[OTHER]], i32 0, i32 1 // CHECK-NEXT: [[T4:%.*]] = bitcast i16* [[T0]] to i8* // CHECK-NEXT: [[T5:%.*]] = bitcast i16* [[T2]] to i8* diff --git a/test/CodeGenCXX/cxx0x-initializer-stdinitializerlist.cpp b/test/CodeGenCXX/cxx0x-initializer-stdinitializerlist.cpp index 0ddc8e42a7..d68ba7c753 100644 --- a/test/CodeGenCXX/cxx0x-initializer-stdinitializerlist.cpp +++ b/test/CodeGenCXX/cxx0x-initializer-stdinitializerlist.cpp @@ -372,7 +372,7 @@ namespace partly_constant { // // Second init list array (non-constant). // CHECK: store i32 4, i32* getelementptr inbounds ({{.*}}* @[[PARTLY_CONSTANT_SECOND]], i64 0, i64 0) - // CHECK: load i32* @_ZN15partly_constant1kE + // CHECK: load i32, i32* @_ZN15partly_constant1kE // CHECK: store i32 {{.*}}, i32* getelementptr inbounds ({{.*}}* @[[PARTLY_CONSTANT_SECOND]], i64 0, i64 1) // // Second init list. diff --git a/test/CodeGenCXX/cxx11-initializer-array-new.cpp b/test/CodeGenCXX/cxx11-initializer-array-new.cpp index 798cabfb09..2beb44ecf3 100644 --- a/test/CodeGenCXX/cxx11-initializer-array-new.cpp +++ b/test/CodeGenCXX/cxx11-initializer-array-new.cpp @@ -46,7 +46,7 @@ void *q = new S[n][3]{ { 1, 2, 3 }, { 4, 5, 6 } }; // CHECK-LABEL: define // -// CHECK: load i32* @n +// CHECK: load i32, i32* @n // CHECK: call {{.*}} @llvm.umul.with.overflow.i64(i64 %[[N:.*]], i64 12) // CHECK: %[[ELTS:.*]] = mul i64 %[[N]], 3 // CHECK: call {{.*}} @llvm.uadd.with.overflow.i64(i64 %{{.*}}, i64 8) @@ -106,7 +106,7 @@ void *r = new T[n][3]{ { 1, 2, 3 }, { 4, 5, 6 } }; // CHECK-LABEL: define // -// CHECK: load i32* @n +// CHECK: load i32, i32* @n // CHECK: call {{.*}} @llvm.umul.with.overflow.i64(i64 %[[N:.*]], i64 12) // CHECK: %[[ELTS:.*]] = mul i64 %[[N]], 3 // diff --git a/test/CodeGenCXX/cxx11-thread-local-reference.cpp b/test/CodeGenCXX/cxx11-thread-local-reference.cpp index 4143164d91..c3e165a416 100644 --- a/test/CodeGenCXX/cxx11-thread-local-reference.cpp +++ b/test/CodeGenCXX/cxx11-thread-local-reference.cpp @@ -19,7 +19,7 @@ int &g() { return r; } // CHECK: define weak_odr hidden i32* @_ZTW1r() { // CHECK: call void @_ZTH1r() -// CHECK: load i32** @r, align 8 +// CHECK: load i32*, i32** @r, align 8 // CHECK: ret i32* %{{.*}} // CHECK-LABEL: define internal void @__tls_init() diff --git a/test/CodeGenCXX/cxx11-thread-local.cpp b/test/CodeGenCXX/cxx11-thread-local.cpp index a6f010626c..9b16319088 100644 --- a/test/CodeGenCXX/cxx11-thread-local.cpp +++ b/test/CodeGenCXX/cxx11-thread-local.cpp @@ -57,7 +57,7 @@ int e = V::m; // CHECK-LABEL: define i32 @_Z1fv() int f() { - // CHECK: %[[GUARD:.*]] = load i8* @_ZGVZ1fvE1n, align 1 + // CHECK: %[[GUARD:.*]] = load i8, i8* @_ZGVZ1fvE1n, align 1 // CHECK: %[[NEED_INIT:.*]] = icmp eq i8 %[[GUARD]], 0 // CHECK: br i1 %[[NEED_INIT]] @@ -67,13 +67,13 @@ int f() { // CHECK: br label static thread_local int n = g(); - // CHECK: load i32* @_ZZ1fvE1n, align 4 + // CHECK: load i32, i32* @_ZZ1fvE1n, align 4 return n; } // CHECK: define {{.*}} @[[C_INIT:.*]]() // CHECK: call i32* @_ZTW1b() -// CHECK-NEXT: load i32* %{{.*}}, align 4 +// CHECK-NEXT: load i32, i32* %{{.*}}, align 4 // CHECK-NEXT: store i32 %{{.*}}, i32* @c, align 4 // CHECK-LABEL: define weak_odr hidden i32* @_ZTW1b() @@ -94,7 +94,7 @@ int f() { // CHECK: define {{.*}} @[[E_INIT:.*]]() // CHECK: call i32* @_ZTWN1VIiE1mE() -// CHECK-NEXT: load i32* %{{.*}}, align 4 +// CHECK-NEXT: load i32, i32* %{{.*}}, align 4 // CHECK-NEXT: store i32 %{{.*}}, i32* @e, align 4 // CHECK-LABEL: define weak_odr hidden i32* @_ZTWN1VIiE1mE() @@ -107,19 +107,19 @@ struct T { ~T(); }; // CHECK-LABEL: define void @_Z8tls_dtorv() void tls_dtor() { - // CHECK: load i8* @_ZGVZ8tls_dtorvE1s + // CHECK: load i8, i8* @_ZGVZ8tls_dtorvE1s // CHECK: call void @_ZN1SC1Ev(%struct.S* @_ZZ8tls_dtorvE1s) // CHECK: call i32 @__cxa_thread_atexit({{.*}}@_ZN1SD1Ev {{.*}} @_ZZ8tls_dtorvE1s{{.*}} @__dso_handle // CHECK: store i8 1, i8* @_ZGVZ8tls_dtorvE1s static thread_local S s; - // CHECK: load i8* @_ZGVZ8tls_dtorvE1t + // CHECK: load i8, i8* @_ZGVZ8tls_dtorvE1t // CHECK-NOT: _ZN1T // CHECK: call i32 @__cxa_thread_atexit({{.*}}@_ZN1TD1Ev {{.*}}@_ZZ8tls_dtorvE1t{{.*}} @__dso_handle // CHECK: store i8 1, i8* @_ZGVZ8tls_dtorvE1t static thread_local T t; - // CHECK: load i8* @_ZGVZ8tls_dtorvE1u + // CHECK: load i8, i8* @_ZGVZ8tls_dtorvE1u // CHECK: call void @_ZN1SC1Ev(%struct.S* @_ZGRZ8tls_dtorvE1u_) // CHECK: call i32 @__cxa_thread_atexit({{.*}}@_ZN1SD1Ev {{.*}} @_ZGRZ8tls_dtorvE1u_{{.*}} @__dso_handle // CHECK: store i8 1, i8* @_ZGVZ8tls_dtorvE1u @@ -154,7 +154,7 @@ void set_anon_i() { // CHECK-LABEL: define internal i32* @_ZTWN12_GLOBAL__N_16anon_iE() // CHECK: define {{.*}} @[[V_M_INIT:.*]]() -// CHECK: load i8* bitcast (i64* @_ZGVN1VIiE1mE to i8*) +// CHECK: load i8, i8* bitcast (i64* @_ZGVN1VIiE1mE to i8*) // CHECK: %[[V_M_INITIALIZED:.*]] = icmp eq i8 %{{.*}}, 0 // CHECK: br i1 %[[V_M_INITIALIZED]], // need init: @@ -169,7 +169,7 @@ void set_anon_i() { // CHECK: define {{.*}}@__tls_init() -// CHECK: load i8* @__tls_guard +// CHECK: load i8, i8* @__tls_guard // CHECK: %[[NEED_TLS_INIT:.*]] = icmp eq i8 %{{.*}}, 0 // CHECK: store i8 1, i8* @__tls_guard // CHECK: br i1 %[[NEED_TLS_INIT]], diff --git a/test/CodeGenCXX/cxx1y-init-captures.cpp b/test/CodeGenCXX/cxx1y-init-captures.cpp index 5794dac068..dcfe4d4729 100644 --- a/test/CodeGenCXX/cxx1y-init-captures.cpp +++ b/test/CodeGenCXX/cxx1y-init-captures.cpp @@ -32,9 +32,9 @@ void g() { // CHECK-LABEL: define internal i32 @"_ZZ1gvENK3$_1clEv"( // CHECK: getelementptr inbounds {{.*}}, i32 0, i32 0 -// CHECK: load i32* +// CHECK: load i32, i32* // CHECK: getelementptr inbounds {{.*}}, i32 0, i32 1 -// CHECK: load i32* +// CHECK: load i32, i32* // CHECK: add nsw i32 @@ -50,7 +50,7 @@ int h(int a) { // // Initialize init-capture 'c(a)' by copy. // CHECK: getelementptr inbounds {{.*}}, {{.*}}* %[[OUTER]], i32 0, i32 1 - // CHECK: load i32* %[[A_ADDR]], + // CHECK: load i32, i32* %[[A_ADDR]], // CHECK: store i32 // // CHECK: call i32 @"_ZZ1hiENK3$_2clEv"({{.*}}* %[[OUTER]]) @@ -61,7 +61,7 @@ int h(int a) { // CHECK: store {{.*}}, {{.*}}** %[[OUTER_ADDR]], // // Capture outer 'c' by reference. - // CHECK: %[[OUTER:.*]] = load {{.*}}** %[[OUTER_ADDR]] + // CHECK: %[[OUTER:.*]] = load {{.*}}*, {{.*}}** %[[OUTER_ADDR]] // CHECK: getelementptr inbounds {{.*}}, {{.*}}* %[[INNER]], i32 0, i32 0 // CHECK-NEXT: getelementptr inbounds {{.*}}, {{.*}}* %[[OUTER]], i32 0, i32 1 // CHECK-NEXT: store i32* % @@ -69,8 +69,8 @@ int h(int a) { // Capture outer 'b' by copy. // CHECK: getelementptr inbounds {{.*}}, {{.*}}* %[[INNER]], i32 0, i32 1 // CHECK-NEXT: getelementptr inbounds {{.*}}, {{.*}}* %[[OUTER]], i32 0, i32 0 - // CHECK-NEXT: load i32** % - // CHECK-NEXT: load i32* % + // CHECK-NEXT: load i32*, i32** % + // CHECK-NEXT: load i32, i32* % // CHECK-NEXT: store i32 // // CHECK: call i32 @"_ZZZ1hiENK3$_2clEvENKUlvE_clEv"({{.*}}* %[[INNER]]) @@ -81,16 +81,16 @@ int h(int a) { // CHECK-LABEL: define internal i32 @"_ZZZ1hiENK3$_2clEvENKUlvE_clEv"( // CHECK: %[[INNER_ADDR:.*]] = alloca // CHECK: store {{.*}}, {{.*}}** %[[INNER_ADDR]], - // CHECK: %[[INNER:.*]] = load {{.*}}** %[[INNER_ADDR]] + // CHECK: %[[INNER:.*]] = load {{.*}}*, {{.*}}** %[[INNER_ADDR]] // // Load capture of 'b' // CHECK: getelementptr inbounds {{.*}}, {{.*}}* %[[INNER]], i32 0, i32 1 - // CHECK: load i32* % + // CHECK: load i32, i32* % // // Load capture of 'c' // CHECK: getelementptr inbounds {{.*}}, {{.*}}* %[[INNER]], i32 0, i32 0 - // CHECK: load i32** % - // CHECK: load i32* % + // CHECK: load i32*, i32** % + // CHECK: load i32, i32* % // // CHECK: add nsw i32 return b + c; diff --git a/test/CodeGenCXX/cxx1y-initializer-aggregate.cpp b/test/CodeGenCXX/cxx1y-initializer-aggregate.cpp index 6872a38ef7..eedf71479b 100644 --- a/test/CodeGenCXX/cxx1y-initializer-aggregate.cpp +++ b/test/CodeGenCXX/cxx1y-initializer-aggregate.cpp @@ -51,8 +51,8 @@ C n{}; // CHECK: store i32 0, i32* getelementptr inbounds ({{.*}} @a, i32 0, i32 0) // CHECK: store i8* {{.*}} @[[STR_A]]{{.*}}, i8** getelementptr inbounds ({{.*}} @a, i32 0, i32 1) -// CHECK: load i32* getelementptr inbounds ({{.*}} @a, i32 0, i32 0) -// CHECK: load i8** getelementptr inbounds ({{.*}} @a, i32 0, i32 1) +// CHECK: load i32, i32* getelementptr inbounds ({{.*}} @a, i32 0, i32 0) +// CHECK: load i8*, i8** getelementptr inbounds ({{.*}} @a, i32 0, i32 1) // CHECK: getelementptr inbounds i8, i8* %{{.*}}, {{.*}} %{{.*}} // CHECK: store i8 %{{.*}}, i8* getelementptr inbounds ({{.*}} @a, i32 0, i32 2) // CHECK: call i32 @_ZN1A1fEv({{.*}} @a) diff --git a/test/CodeGenCXX/deferred-global-init.cpp b/test/CodeGenCXX/deferred-global-init.cpp index f64f507ac9..920037c253 100644 --- a/test/CodeGenCXX/deferred-global-init.cpp +++ b/test/CodeGenCXX/deferred-global-init.cpp @@ -8,7 +8,7 @@ void* bar() { return a; } // CHECK: @_ZL1a = internal global i8* null // CHECK-LABEL: define internal void @__cxx_global_var_init -// CHECK: load i8** @foo +// CHECK: load i8*, i8** @foo // CHECK: ret void // CHECK-LABEL: define internal void @_GLOBAL__sub_I_deferred_global_init.cpp diff --git a/test/CodeGenCXX/delete-two-arg.cpp b/test/CodeGenCXX/delete-two-arg.cpp index 0c14aaad44..e5a4cfa3ee 100644 --- a/test/CodeGenCXX/delete-two-arg.cpp +++ b/test/CodeGenCXX/delete-two-arg.cpp @@ -40,13 +40,13 @@ namespace test2 { void test(A *p) { // CHECK: [[P:%.*]] = alloca [[A]]*, align 4 // CHECK-NEXT: store [[A]]* {{%.*}}, [[A]]** [[P]], align 4 - // CHECK-NEXT: [[T0:%.*]] = load [[A]]** [[P]], align 4 + // CHECK-NEXT: [[T0:%.*]] = load [[A]]*, [[A]]** [[P]], align 4 // CHECK-NEXT: [[T1:%.*]] = icmp eq [[A]]* [[T0]], null // CHECK-NEXT: br i1 [[T1]], // CHECK: [[T2:%.*]] = bitcast [[A]]* [[T0]] to i8* // CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds i8, i8* [[T2]], i64 -4 // CHECK-NEXT: [[T4:%.*]] = bitcast i8* [[T3]] to i32* - // CHECK-NEXT: [[T5:%.*]] = load i32* [[T4]] + // CHECK-NEXT: [[T5:%.*]] = load i32, i32* [[T4]] // CHECK-NEXT: call void @_ZdaPv(i8* [[T3]]) // CHECK-NEXT: br label ::delete[] p; diff --git a/test/CodeGenCXX/delete.cpp b/test/CodeGenCXX/delete.cpp index 8e10c65f2c..ff448f808d 100644 --- a/test/CodeGenCXX/delete.cpp +++ b/test/CodeGenCXX/delete.cpp @@ -74,7 +74,7 @@ namespace test1 { // CHECK-NEXT: [[T0:%.*]] = bitcast [[A]]* [[BEGIN]] to i8* // CHECK-NEXT: [[ALLOC:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 -8 // CHECK-NEXT: [[T1:%.*]] = bitcast i8* [[ALLOC]] to i64* - // CHECK-NEXT: [[COUNT:%.*]] = load i64* [[T1]] + // CHECK-NEXT: [[COUNT:%.*]] = load i64, i64* [[T1]] // CHECK: [[END:%.*]] = getelementptr inbounds [[A]], [[A]]* [[BEGIN]], i64 [[COUNT]] // CHECK-NEXT: [[ISEMPTY:%.*]] = icmp eq [[A]]* [[BEGIN]], [[END]] // CHECK-NEXT: br i1 [[ISEMPTY]], @@ -116,17 +116,17 @@ namespace test4 { // Load the offset-to-top from the vtable and apply it. // This has to be done first because the dtor can mess it up. // CHECK: [[T0:%.*]] = bitcast [[X:%.*]]* [[XP:%.*]] to i64** - // CHECK-NEXT: [[VTABLE:%.*]] = load i64** [[T0]] + // CHECK-NEXT: [[VTABLE:%.*]] = load i64*, i64** [[T0]] // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds i64, i64* [[VTABLE]], i64 -2 - // CHECK-NEXT: [[OFFSET:%.*]] = load i64* [[T0]], align 8 + // CHECK-NEXT: [[OFFSET:%.*]] = load i64, i64* [[T0]], align 8 // CHECK-NEXT: [[T0:%.*]] = bitcast [[X]]* [[XP]] to i8* // CHECK-NEXT: [[ALLOCATED:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 [[OFFSET]] // Load the complete-object destructor (not the deleting destructor) // and call it. // CHECK-NEXT: [[T0:%.*]] = bitcast [[X:%.*]]* [[XP:%.*]] to void ([[X]]*)*** - // CHECK-NEXT: [[VTABLE:%.*]] = load void ([[X]]*)*** [[T0]] + // CHECK-NEXT: [[VTABLE:%.*]] = load void ([[X]]*)**, void ([[X]]*)*** [[T0]] // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds void ([[X]]*)*, void ([[X]]*)** [[VTABLE]], i64 0 - // CHECK-NEXT: [[DTOR:%.*]] = load void ([[X]]*)** [[T0]] + // CHECK-NEXT: [[DTOR:%.*]] = load void ([[X]]*)*, void ([[X]]*)** [[T0]] // CHECK-NEXT: call void [[DTOR]]([[X]]* [[OBJ:%.*]]) // Call the global operator delete. // CHECK-NEXT: call void @_ZdlPv(i8* [[ALLOCATED]]) [[NUW:#[0-9]+]] diff --git a/test/CodeGenCXX/derived-to-base-conv.cpp b/test/CodeGenCXX/derived-to-base-conv.cpp index f4ef0e5277..402fa44e58 100644 --- a/test/CodeGenCXX/derived-to-base-conv.cpp +++ b/test/CodeGenCXX/derived-to-base-conv.cpp @@ -79,7 +79,7 @@ void test2(Test2b &x) { // CHECK: [[X:%.*]] = alloca [[B:%.*]]*, align 8 // CHECK-NEXT: [[Y:%.*]] = alloca [[A:%.*]]*, align 8 // CHECK-NEXT: store [[B]]* {{%.*}}, [[B]]** [[X]], align 8 - // CHECK-NEXT: [[T0:%.*]] = load [[B]]** [[X]], align 8 + // CHECK-NEXT: [[T0:%.*]] = load [[B]]*, [[B]]** [[X]], align 8 // CHECK-NEXT: [[T1:%.*]] = bitcast [[B]]* [[T0]] to [[A]]* // CHECK-NEXT: store [[A]]* [[T1]], [[A]]** [[Y]], align 8 // CHECK-NEXT: ret void diff --git a/test/CodeGenCXX/derived-to-virtual-base-class-calls-final.cpp b/test/CodeGenCXX/derived-to-virtual-base-class-calls-final.cpp index d859283703..dd64e81230 100644 --- a/test/CodeGenCXX/derived-to-virtual-base-class-calls-final.cpp +++ b/test/CodeGenCXX/derived-to-virtual-base-class-calls-final.cpp @@ -11,6 +11,6 @@ struct D final : virtual C { // CHECK-LABEL: define dereferenceable({{[0-9]+}}) %struct.B* @_Z1fR1D B &f(D &d) { - // CHECK-NOT: load i8** + // CHECK-NOT: load i8*, i8** return d; } diff --git a/test/CodeGenCXX/destructors.cpp b/test/CodeGenCXX/destructors.cpp index 47073fcc37..dcdba04303 100644 --- a/test/CodeGenCXX/destructors.cpp +++ b/test/CodeGenCXX/destructors.cpp @@ -254,12 +254,12 @@ namespace test4 { // CHECK5: [[X:%.*]] = alloca i32 // CHECK5-NEXT: [[A:%.*]] = alloca // CHECK5: br label - // CHECK5: [[TMP:%.*]] = load i32* [[X]] + // CHECK5: [[TMP:%.*]] = load i32, i32* [[X]] // CHECK5-NEXT: [[CMP:%.*]] = icmp ne i32 [[TMP]], 0 // CHECK5-NEXT: br i1 // CHECK5: call void @_ZN5test41AD1Ev( // CHECK5: br label - // CHECK5: [[TMP:%.*]] = load i32* [[X]] + // CHECK5: [[TMP:%.*]] = load i32, i32* [[X]] // CHECK5: [[TMP2:%.*]] = add nsw i32 [[TMP]], -1 // CHECK5: store i32 [[TMP2]], i32* [[X]] // CHECK5: br label diff --git a/test/CodeGenCXX/eh.cpp b/test/CodeGenCXX/eh.cpp index ba51ec02b4..77655f0b9f 100644 --- a/test/CodeGenCXX/eh.cpp +++ b/test/CodeGenCXX/eh.cpp @@ -123,14 +123,14 @@ namespace test7 { // CHECK-NEXT: [[SELECTOR:%.*]] = extractvalue { i8*, i32 } [[CAUGHTVAL]], 1 // CHECK-NEXT: store i32 [[SELECTOR]], i32* [[SELECTORVAR]] // CHECK-NEXT: br label -// CHECK: [[SELECTOR:%.*]] = load i32* [[SELECTORVAR]] +// CHECK: [[SELECTOR:%.*]] = load i32, i32* [[SELECTORVAR]] // CHECK-NEXT: [[T0:%.*]] = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*)) // CHECK-NEXT: icmp eq i32 [[SELECTOR]], [[T0]] // CHECK-NEXT: br i1 -// CHECK: [[T0:%.*]] = load i8** [[CAUGHTEXNVAR]] +// CHECK: [[T0:%.*]] = load i8*, i8** [[CAUGHTEXNVAR]] // CHECK-NEXT: [[T1:%.*]] = call i8* @__cxa_begin_catch(i8* [[T0]]) // CHECK-NEXT: [[T2:%.*]] = bitcast i8* [[T1]] to i32* -// CHECK-NEXT: [[T3:%.*]] = load i32* [[T2]] +// CHECK-NEXT: [[T3:%.*]] = load i32, i32* [[T2]] // CHECK-NEXT: store i32 [[T3]], i32* {{%.*}}, align 4 // CHECK-NEXT: invoke void @__cxa_rethrow catch (int) { @@ -145,7 +145,7 @@ namespace test7 { // CHECK-NEXT: store i32 [[SELECTOR]], i32* [[SELECTORVAR]] // CHECK-NEXT: call void @__cxa_end_catch() // CHECK-NEXT: br label -// CHECK: load i8** [[CAUGHTEXNVAR]] +// CHECK: load i8*, i8** [[CAUGHTEXNVAR]] // CHECK-NEXT: call i8* @__cxa_begin_catch // CHECK-NEXT: call void @__cxa_end_catch catch (...) { @@ -221,7 +221,7 @@ namespace test10 { } catch (int i) { // CHECK: call i8* @__cxa_begin_catch // CHECK-NEXT: bitcast - // CHECK-NEXT: load i32* + // CHECK-NEXT: load i32, i32* // CHECK-NEXT: store i32 // CHECK-NEXT: call void @__cxa_end_catch() [[NUW:#[0-9]+]] } catch (B a) { @@ -251,7 +251,7 @@ namespace test11 { // CHECK: invoke void @_ZN6test116opaqueEv() opaque(); } catch (int**&p) { - // CHECK: [[EXN:%.*]] = load i8** + // CHECK: [[EXN:%.*]] = load i8*, i8** // CHECK-NEXT: call i8* @__cxa_begin_catch(i8* [[EXN]]) [[NUW]] // CHECK-NEXT: [[ADJ1:%.*]] = getelementptr i8, i8* [[EXN]], i32 32 // CHECK-NEXT: [[ADJ2:%.*]] = bitcast i8* [[ADJ1]] to i32*** @@ -272,7 +272,7 @@ namespace test11 { // CHECK-NEXT: invoke void @_ZN6test116opaqueEv() opaque(); } catch (A*&p) { - // CHECK: [[EXN:%.*]] = load i8** [[EXNSLOT]] + // CHECK: [[EXN:%.*]] = load i8*, i8** [[EXNSLOT]] // CHECK-NEXT: [[ADJ1:%.*]] = call i8* @__cxa_begin_catch(i8* [[EXN]]) [[NUW]] // CHECK-NEXT: [[ADJ2:%.*]] = bitcast i8* [[ADJ1]] to [[A]]* // CHECK-NEXT: store [[A]]* [[ADJ2]], [[A]]** [[TMP]] @@ -384,7 +384,7 @@ namespace test15 { int x = 10; while (true) { - // CHECK: load i32* [[X]] + // CHECK: load i32, i32* [[X]] // CHECK-NEXT: [[COND:%.*]] = invoke zeroext i1 @_ZN6test156opaqueEi // CHECK: br i1 [[COND]] if (opaque(x)) @@ -438,9 +438,9 @@ namespace test16 { // CHECK: invoke void @_ZN6test161AD1Ev([[A]]* [[TEMP]]) // CHECK: ret void - // CHECK: [[T0:%.*]] = load i1* [[EXN_ACTIVE]] + // CHECK: [[T0:%.*]] = load i1, i1* [[EXN_ACTIVE]] // CHECK-NEXT: br i1 [[T0]] - // CHECK: [[T1:%.*]] = load i8** [[EXN_SAVE]] + // CHECK: [[T1:%.*]] = load i8*, i8** [[EXN_SAVE]] // CHECK-NEXT: call void @__cxa_free_exception(i8* [[T1]]) // CHECK-NEXT: br label } diff --git a/test/CodeGenCXX/exceptions.cpp b/test/CodeGenCXX/exceptions.cpp index ff2facdf40..e8f6c7996a 100644 --- a/test/CodeGenCXX/exceptions.cpp +++ b/test/CodeGenCXX/exceptions.cpp @@ -58,12 +58,12 @@ namespace test1 { // CHECK-NEXT: [[CAST:%.*]] = bitcast i8* [[NEW]] to [[A]]* // CHECK-NEXT: invoke void @_ZN5test11BC1Ev([[B:%.*]]* [[T0:%.*]]) // CHECK: [[T1:%.*]] = getelementptr inbounds [[B]], [[B]]* [[T0]], i32 0, i32 0 - // CHECK-NEXT: [[T2:%.*]] = load i32* [[T1]], align 4 + // CHECK-NEXT: [[T2:%.*]] = load i32, i32* [[T1]], align 4 // CHECK-NEXT: invoke void @_ZN5test11AC1Ei([[A]]* [[CAST]], i32 [[T2]]) // CHECK: store i1 false, i1* [[ACTIVE]] // CHECK-NEXT: invoke void @_ZN5test11BD1Ev([[B]]* [[T0]]) // CHECK: ret [[A]]* [[CAST]] - // CHECK: [[ISACTIVE:%.*]] = load i1* [[ACTIVE]] + // CHECK: [[ISACTIVE:%.*]] = load i1, i1* [[ACTIVE]] // CHECK-NEXT: br i1 [[ISACTIVE]] // CHECK: call void @_ZdlPv(i8* [[NEW]]) return new A(B().x); @@ -88,7 +88,7 @@ namespace test1 { // CHECK: store i1 false, i1* [[ACTIVE]] // CHECK-NEXT: invoke void @_ZN5test11BD1Ev([[B]]* [[T0]]) // CHECK: ret [[A]]* [[CAST]] - // CHECK: [[ISACTIVE:%.*]] = load i1* [[ACTIVE]] + // CHECK: [[ISACTIVE:%.*]] = load i1, i1* [[ACTIVE]] // CHECK-NEXT: br i1 [[ISACTIVE]] // CHECK: call void @_ZdlPv(i8* [[NEW]]) return new A(B()); @@ -109,7 +109,7 @@ namespace test1 { // CHECK-NEXT: invoke void @_ZN5test11BD1Ev([[B]]* [[T2]]) // CHECK: invoke void @_ZN5test11BD1Ev([[B]]* [[T0]]) // CHECK: ret [[A]]* [[CAST]] - // CHECK: [[ISACTIVE:%.*]] = load i1* [[ACTIVE]] + // CHECK: [[ISACTIVE:%.*]] = load i1, i1* [[ACTIVE]] // CHECK-NEXT: br i1 [[ISACTIVE]] // CHECK: call void @_ZdlPv(i8* [[NEW]]) return new A(B(), B()); @@ -137,11 +137,11 @@ namespace test1 { // CHECK: store i1 false, i1* [[ACTIVE]] // CHECK-NEXT: store [[A]]* [[CAST]], [[A]]** [[X]], align 8 // CHECK: invoke void @_ZN5test15makeBEv([[B:%.*]]* sret [[T2:%.*]]) - // CHECK: [[RET:%.*]] = load [[A]]** [[X]], align 8 + // CHECK: [[RET:%.*]] = load [[A]]*, [[A]]** [[X]], align 8 // CHECK: invoke void @_ZN5test11BD1Ev([[B]]* [[T2]]) // CHECK: invoke void @_ZN5test11BD1Ev([[B]]* [[T0]]) // CHECK: ret [[A]]* [[RET]] - // CHECK: [[ISACTIVE:%.*]] = load i1* [[ACTIVE]] + // CHECK: [[ISACTIVE:%.*]] = load i1, i1* [[ACTIVE]] // CHECK-NEXT: br i1 [[ISACTIVE]] // CHECK: call void @_ZdlPv(i8* [[NEW]]) A *x; @@ -228,10 +228,10 @@ namespace test3 { // CHECK: ret [[A]]* [[RESULT]] // in the EH path: - // CHECK: [[ISACTIVE:%.*]] = load i1* [[CLEANUPACTIVE]] + // CHECK: [[ISACTIVE:%.*]] = load i1, i1* [[CLEANUPACTIVE]] // CHECK-NEXT: br i1 [[ISACTIVE]] - // CHECK: [[V0:%.*]] = load i8** [[SAVED0]] - // CHECK-NEXT: [[V1:%.*]] = load i8** [[SAVED1]] + // CHECK: [[V0:%.*]] = load i8*, i8** [[SAVED0]] + // CHECK-NEXT: [[V1:%.*]] = load i8*, i8** [[SAVED1]] // CHECK-NEXT: invoke void @_ZN5test31AdlEPvS1_d(i8* [[V0]], i8* [[V1]], double [[CONST]]) } } @@ -275,7 +275,7 @@ namespace test5 { // CHECK-NEXT: [[A:%.*]] = alloca [[A_T:%.*]], align 1 // CHECK-NEXT: [[T:%.*]] = alloca [[T_T:%.*]], align 1 // CHECK-NEXT: invoke void @_ZN5test53fooEv() - // CHECK: [[EXN:%.*]] = load i8** [[EXNSLOT]] + // CHECK: [[EXN:%.*]] = load i8*, i8** [[EXNSLOT]] // CHECK-NEXT: [[ADJ:%.*]] = call i8* @__cxa_get_exception_ptr(i8* [[EXN]]) // CHECK-NEXT: [[SRC:%.*]] = bitcast i8* [[ADJ]] to [[A_T]]* // CHECK-NEXT: invoke void @_ZN5test51TC1Ev([[T_T]]* [[T]]) @@ -375,12 +375,12 @@ namespace test7 { // CHECK-NEXT: store [[B]]* // Destroy the inner A object. - // CHECK-NEXT: load i1* [[INNER_A]] + // CHECK-NEXT: load i1, i1* [[INNER_A]] // CHECK-NEXT: br i1 // CHECK: invoke void @_ZN5test71AD1Ev( // Destroy the outer A object. - // CHECK: load i1* [[OUTER_A]] + // CHECK: load i1, i1* [[OUTER_A]] // CHECK-NEXT: br i1 // CHECK: invoke void @_ZN5test71AD1Ev( @@ -450,7 +450,7 @@ namespace test10 { // CHECK-LABEL: define void @_ZN6test101CD1Ev( // CHECK: invoke void @_ZN6test107cleanupEv() // CHECK: call i8* @__cxa_begin_catch - // CHECK-NEXT: load i8* @_ZN6test108suppressE, align 1 + // CHECK-NEXT: load i8, i8* @_ZN6test108suppressE, align 1 // CHECK-NEXT: trunc // CHECK-NEXT: br i1 // CHECK: call void @__cxa_end_catch() @@ -478,7 +478,7 @@ namespace test11 { throw 0; } // CHECK-LABEL: define void @_ZN6test111CC2Ev( - // CHECK: [[THIS:%.*]] = load [[C:%.*]]** {{%.*}} + // CHECK: [[THIS:%.*]] = load [[C:%.*]]*, [[C:%.*]]** {{%.*}} // Construct single. // CHECK-NEXT: [[SINGLE:%.*]] = getelementptr inbounds [[C]], [[C]]* [[THIS]], i32 0, i32 0 // CHECK-NEXT: call void @_ZN6test111AC1Ev([[A:%.*]]* [[SINGLE]]) diff --git a/test/CodeGenCXX/global-init.cpp b/test/CodeGenCXX/global-init.cpp index 9c5b03a928..cc8ec89a81 100644 --- a/test/CodeGenCXX/global-init.cpp +++ b/test/CodeGenCXX/global-init.cpp @@ -77,7 +77,7 @@ namespace test4 { extern int foo(); // This needs an initialization function and guard variables. - // CHECK: load i8* bitcast (i64* @_ZGVN5test41xE + // CHECK: load i8, i8* bitcast (i64* @_ZGVN5test41xE // CHECK: [[CALL:%.*]] = call i32 @_ZN5test43fooEv // CHECK-NEXT: store i32 [[CALL]], i32* @_ZN5test41xE // CHECK-NEXT: store i64 1, i64* @_ZGVN5test41xE @@ -187,11 +187,11 @@ namespace test7 { // At the end of the file, we check that y is initialized before z. // CHECK: define internal void [[TEST1_Z_INIT:@.*]]() -// CHECK: load i32* @_ZN5test1L1yE +// CHECK: load i32, i32* @_ZN5test1L1yE // CHECK-NEXT: xor // CHECK-NEXT: store i32 {{.*}}, i32* @_ZN5test1L1zE // CHECK: define internal void [[TEST1_Y_INIT:@.*]]() -// CHECK: load i32* @_ZN5test1L1xE +// CHECK: load i32, i32* @_ZN5test1L1xE // CHECK-NEXT: sub // CHECK-NEXT: store i32 {{.*}}, i32* @_ZN5test1L1yE diff --git a/test/CodeGenCXX/homogeneous-aggregates.cpp b/test/CodeGenCXX/homogeneous-aggregates.cpp index 94813f3578..fbbb1ebed4 100644 --- a/test/CodeGenCXX/homogeneous-aggregates.cpp +++ b/test/CodeGenCXX/homogeneous-aggregates.cpp @@ -78,7 +78,7 @@ void call_D5(D5 *p) { // Check the call site. // // ARM64-LABEL: define void @_Z7call_D5P2D5(%struct.D5* %p) -// ARM64: load [3 x double]* +// ARM64: load [3 x double], [3 x double]* // ARM64: call %struct.D5 @_Z7func_D52D5([3 x double] %{{.*}}) struct Empty { }; diff --git a/test/CodeGenCXX/lambda-expressions.cpp b/test/CodeGenCXX/lambda-expressions.cpp index 7fb22988fa..d00ebd0bef 100644 --- a/test/CodeGenCXX/lambda-expressions.cpp +++ b/test/CodeGenCXX/lambda-expressions.cpp @@ -19,11 +19,11 @@ int a() { return []{ return 1; }(); } int b(int x) { return [x]{return x;}(); } // CHECK-LABEL: define i32 @_Z1bi // CHECK: store i32 -// CHECK: load i32* +// CHECK: load i32, i32* // CHECK: store i32 // CHECK: call i32 @"_ZZ1biENK3$_1clEv" // CHECK-LABEL: define internal i32 @"_ZZ1biENK3$_1clEv" -// CHECK: load i32* +// CHECK: load i32, i32* // CHECK: ret i32 int c(int x) { return [&x]{return x;}(); } @@ -32,8 +32,8 @@ int c(int x) { return [&x]{return x;}(); } // CHECK: store i32* // CHECK: call i32 @"_ZZ1ciENK3$_2clEv" // CHECK-LABEL: define internal i32 @"_ZZ1ciENK3$_2clEv" -// CHECK: load i32** -// CHECK: load i32* +// CHECK: load i32*, i32** +// CHECK: load i32, i32* // CHECK: ret i32 struct D { D(); D(const D&); int x; }; @@ -45,8 +45,8 @@ int d(int x) { D y[10]; [x,y] { return y[x].x; }(); } // CHECK: call void @_ZN1DC1ERKS_ // CHECK: call i32 @"_ZZ1diENK3$_3clEv" // CHECK-LABEL: define internal i32 @"_ZZ1diENK3$_3clEv" -// CHECK: load i32* -// CHECK: load i32* +// CHECK: load i32, i32* +// CHECK: load i32, i32* // CHECK: ret i32 struct E { E(); E(const E&); ~E(); int x; }; @@ -60,7 +60,7 @@ int e(E a, E b, bool cond) { [a,b,cond](){ return (cond ? a : b).x; }(); } // CHECK-LABEL: define internal i32 @"_ZZ1e1ES_bENK3$_4clEv" // CHECK: trunc i8 -// CHECK: load i32* +// CHECK: load i32, i32* // CHECK: ret i32 void f() { @@ -76,12 +76,12 @@ int g() { int &r = k; // CHECK-LABEL: define internal i32 @"_ZZ1gvENK3$_6clEv"( // CHECK-NOT: } - // CHECK: load i32* @_ZL1k, + // CHECK: load i32, i32* @_ZL1k, return [] { return r; } (); }; // PR14773 -// CHECK: [[ARRVAL:%[0-9a-zA-Z]*]] = load i32* getelementptr inbounds ([0 x i32]* @_ZZ14staticarrayrefvE5array, i32 0, i64 0), align 4 +// CHECK: [[ARRVAL:%[0-9a-zA-Z]*]] = load i32, i32* getelementptr inbounds ([0 x i32]* @_ZZ14staticarrayrefvE5array, i32 0, i64 0), align 4 // CHECK-NEXT: store i32 [[ARRVAL]] void staticarrayref(){ static int array[] = {}; @@ -104,8 +104,8 @@ int *PR22071_fun() { // CHECK-LABEL: define internal i32 @"_ZZ1fvEN3$_58__invokeEii" // CHECK: store i32 // CHECK-NEXT: store i32 -// CHECK-NEXT: load i32* -// CHECK-NEXT: load i32* +// CHECK-NEXT: load i32, i32* +// CHECK-NEXT: load i32, i32* // CHECK-NEXT: call i32 @"_ZZ1fvENK3$_5clEii" // CHECK-NEXT: ret i32 diff --git a/test/CodeGenCXX/lvalue-bitcasts.cpp b/test/CodeGenCXX/lvalue-bitcasts.cpp index 86355b27ab..c9997bf417 100644 --- a/test/CodeGenCXX/lvalue-bitcasts.cpp +++ b/test/CodeGenCXX/lvalue-bitcasts.cpp @@ -5,90 +5,90 @@ struct Y { X x; }; // CHECK-LABEL: define void @_Z21reinterpret_cast_testRiRfR1X void reinterpret_cast_test(int &ir, float &fr, X &xr) { - // CHECK: load float** + // CHECK: load float*, float** // CHECK: bitcast float* - // CHECK: load i32* + // CHECK: load i32, i32* ir = reinterpret_cast(fr); // CHECK: load // CHECK: {{bitcast.*to i32\*}} - // CHECK: load i32* + // CHECK: load i32, i32* ir = reinterpret_cast(xr); // CHECK: load i32 // CHECK: {{bitcast.*to float\*}} - // CHECK: load float* + // CHECK: load float, float* fr = reinterpret_cast(ir); // CHECK: load // CHECK: {{bitcast.*to float\*}} - // CHECK: load float* + // CHECK: load float, float* fr = reinterpret_cast(xr); - // CHECK: load i32** + // CHECK: load i32*, i32** // CHECK: bitcast i32* // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64 xr = reinterpret_cast(ir); - // CHECK: load float** + // CHECK: load float*, float** // CHECK: bitcast float* // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64 xr = reinterpret_cast(fr); _Complex float cf; _Complex float &cfr = cf; - // CHECK: load i32** + // CHECK: load i32*, i32** // CHECK: bitcast i32* - // CHECK: load float* - // CHECK: load float* + // CHECK: load float, float* + // CHECK: load float, float* cfr = reinterpret_cast<_Complex float&>(ir); - // CHECK: load float** + // CHECK: load float*, float** // CHECK: bitcast float* - // CHECK: load float* - // CHECK: load float* + // CHECK: load float, float* + // CHECK: load float, float* cfr = reinterpret_cast<_Complex float&>(fr); // CHECK: bitcast - // CHECK: load float* - // CHECK: load float* + // CHECK: load float, float* + // CHECK: load float, float* cfr = reinterpret_cast<_Complex float&>(xr); // CHECK: ret void } // CHECK-LABEL: define void @_Z6c_castRiRfR1X void c_cast(int &ir, float &fr, X &xr) { - // CHECK: load float** + // CHECK: load float*, float** // CHECK: bitcast float* - // CHECK: load i32* + // CHECK: load i32, i32* ir = (int&)fr; // CHECK: load // CHECK: {{bitcast.*to i32\*}} - // CHECK: load i32* + // CHECK: load i32, i32* ir = (int&)xr; // CHECK: load i32 // CHECK: {{bitcast.*to float\*}} - // CHECK: load float* + // CHECK: load float, float* fr = (float&)ir; // CHECK: load // CHECK: {{bitcast.*to float\*}} - // CHECK: load float* + // CHECK: load float, float* fr = (float&)xr; - // CHECK: load i32** + // CHECK: load i32*, i32** // CHECK: bitcast i32* // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64 xr = (X&)ir; - // CHECK: load float** + // CHECK: load float*, float** // CHECK: bitcast float* // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64 xr = (X&)fr; _Complex float cf; _Complex float &cfr = cf; - // CHECK: load i32** + // CHECK: load i32*, i32** // CHECK: bitcast i32* - // CHECK: load float* - // CHECK: load float* + // CHECK: load float, float* + // CHECK: load float, float* cfr = (_Complex float&)ir; - // CHECK: load float** + // CHECK: load float*, float** // CHECK: bitcast float* - // CHECK: load float* - // CHECK: load float* + // CHECK: load float, float* + // CHECK: load float, float* cfr = (_Complex float&)fr; // CHECK: bitcast - // CHECK: load float* - // CHECK: load float* + // CHECK: load float, float* + // CHECK: load float, float* cfr = (_Complex float&)xr; // CHECK: ret void } @@ -98,46 +98,46 @@ void functional_cast(int &ir, float &fr, X &xr) { typedef int &intref; typedef float &floatref; typedef X &Xref; - // CHECK: load float** + // CHECK: load float*, float** // CHECK: bitcast float* - // CHECK: load i32* + // CHECK: load i32, i32* ir = intref(fr); // CHECK: load // CHECK: {{bitcast.*to i32\*}} - // CHECK: load i32* + // CHECK: load i32, i32* ir = intref(xr); // CHECK: load i32 // CHECK: {{bitcast.*to float\*}} - // CHECK: load float* + // CHECK: load float, float* fr = floatref(ir); // CHECK: load // CHECK: {{bitcast.*to float\*}} - // CHECK: load float* + // CHECK: load float, float* fr = floatref(xr); - // CHECK: load i32** + // CHECK: load i32*, i32** // CHECK: bitcast i32* // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64 xr = Xref(ir); - // CHECK: load float** + // CHECK: load float*, float** // CHECK: bitcast float* // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64 xr = Xref(fr); typedef _Complex float &complex_float_ref; _Complex float cf; _Complex float &cfr = cf; - // CHECK: load i32** + // CHECK: load i32*, i32** // CHECK: bitcast i32* - // CHECK: load float* - // CHECK: load float* + // CHECK: load float, float* + // CHECK: load float, float* cfr = complex_float_ref(ir); - // CHECK: load float** + // CHECK: load float*, float** // CHECK: bitcast float* - // CHECK: load float* - // CHECK: load float* + // CHECK: load float, float* + // CHECK: load float, float* cfr = complex_float_ref(fr); // CHECK: bitcast - // CHECK: load float* - // CHECK: load float* + // CHECK: load float, float* + // CHECK: load float, float* cfr = complex_float_ref(xr); // CHECK: ret void } diff --git a/test/CodeGenCXX/m64-ptr.cpp b/test/CodeGenCXX/m64-ptr.cpp index 29916bf32a..50ba6aea52 100644 --- a/test/CodeGenCXX/m64-ptr.cpp +++ b/test/CodeGenCXX/m64-ptr.cpp @@ -12,7 +12,7 @@ public: void foo(StringRef X); void bar(StringRef &A) { // CHECK: @_Z3barR9StringRef -// CHECK: load i8** +// CHECK: load i8*, i8** foo(A); // CHECK: ret void } diff --git a/test/CodeGenCXX/mangle.cpp b/test/CodeGenCXX/mangle.cpp index 630a251c20..34f091d036 100644 --- a/test/CodeGenCXX/mangle.cpp +++ b/test/CodeGenCXX/mangle.cpp @@ -522,7 +522,7 @@ namespace test14 { static int a(), x; }; // CHECK-LABEL: define i32 @_ZN6test141S1aEv - // CHECK: load i32* @_ZN6test141S1xE + // CHECK: load i32, i32* @_ZN6test141S1xE int S::a() { return S::x; } } } diff --git a/test/CodeGenCXX/member-expressions.cpp b/test/CodeGenCXX/member-expressions.cpp index 48502727be..bbfa51f88c 100644 --- a/test/CodeGenCXX/member-expressions.cpp +++ b/test/CodeGenCXX/member-expressions.cpp @@ -80,7 +80,7 @@ namespace test4 { // CHECK-NEXT: getelementptr // CHECK-NEXT: bitcast // CHECK-NEXT: getelementptr - // CHECK-NEXT: load i32* + // CHECK-NEXT: load i32, i32* return c_ptr->B::x; } } diff --git a/test/CodeGenCXX/member-function-pointers.cpp b/test/CodeGenCXX/member-function-pointers.cpp index 8ae57b2cdb..7ffe4cd9d2 100644 --- a/test/CodeGenCXX/member-function-pointers.cpp +++ b/test/CodeGenCXX/member-function-pointers.cpp @@ -43,14 +43,14 @@ void f() { // CODE-LP64: store volatile { i64, i64 } zeroinitializer, { i64, i64 }* @vpa vpa = 0; - // CODE-LP64: [[TMP:%.*]] = load { i64, i64 }* @pa, align 8 + // CODE-LP64: [[TMP:%.*]] = load { i64, i64 }, { i64, i64 }* @pa, align 8 // CODE-LP64: [[TMPADJ:%.*]] = extractvalue { i64, i64 } [[TMP]], 1 // CODE-LP64: [[ADJ:%.*]] = add nsw i64 [[TMPADJ]], 16 // CODE-LP64: [[RES:%.*]] = insertvalue { i64, i64 } [[TMP]], i64 [[ADJ]], 1 // CODE-LP64: store { i64, i64 } [[RES]], { i64, i64 }* @pc, align 8 pc = pa; - // CODE-LP64: [[TMP:%.*]] = load { i64, i64 }* @pc, align 8 + // CODE-LP64: [[TMP:%.*]] = load { i64, i64 }, { i64, i64 }* @pc, align 8 // CODE-LP64: [[TMPADJ:%.*]] = extractvalue { i64, i64 } [[TMP]], 1 // CODE-LP64: [[ADJ:%.*]] = sub nsw i64 [[TMPADJ]], 16 // CODE-LP64: [[RES:%.*]] = insertvalue { i64, i64 } [[TMP]], i64 [[ADJ]], 1 diff --git a/test/CodeGenCXX/microsoft-abi-byval-sret.cpp b/test/CodeGenCXX/microsoft-abi-byval-sret.cpp index 8f42e01ecc..57ac79500a 100644 --- a/test/CodeGenCXX/microsoft-abi-byval-sret.cpp +++ b/test/CodeGenCXX/microsoft-abi-byval-sret.cpp @@ -21,7 +21,7 @@ A B::foo(A x) { // CHECK-LABEL: define x86_thiscallcc %struct.A* @"\01?foo@B@@QAE?AUA@@U2@@Z" // CHECK: (%struct.B* %this, <{ %struct.A*, %struct.A }>* inalloca) // CHECK: getelementptr inbounds <{ %struct.A*, %struct.A }>, <{ %struct.A*, %struct.A }>* %{{.*}}, i32 0, i32 0 -// CHECK: load %struct.A** +// CHECK: load %struct.A*, %struct.A** // CHECK: ret %struct.A* A B::bar(A x) { @@ -31,7 +31,7 @@ A B::bar(A x) { // CHECK-LABEL: define %struct.A* @"\01?bar@B@@QAA?AUA@@U2@@Z" // CHECK: (<{ %struct.B*, %struct.A*, %struct.A }>* inalloca) // CHECK: getelementptr inbounds <{ %struct.B*, %struct.A*, %struct.A }>, <{ %struct.B*, %struct.A*, %struct.A }>* %{{.*}}, i32 0, i32 1 -// CHECK: load %struct.A** +// CHECK: load %struct.A*, %struct.A** // CHECK: ret %struct.A* A B::baz(A x) { @@ -41,7 +41,7 @@ A B::baz(A x) { // CHECK-LABEL: define x86_stdcallcc %struct.A* @"\01?baz@B@@QAG?AUA@@U2@@Z" // CHECK: (<{ %struct.B*, %struct.A*, %struct.A }>* inalloca) // CHECK: getelementptr inbounds <{ %struct.B*, %struct.A*, %struct.A }>, <{ %struct.B*, %struct.A*, %struct.A }>* %{{.*}}, i32 0, i32 1 -// CHECK: load %struct.A** +// CHECK: load %struct.A*, %struct.A** // CHECK: ret %struct.A* A B::qux(A x) { diff --git a/test/CodeGenCXX/microsoft-abi-byval-thunks.cpp b/test/CodeGenCXX/microsoft-abi-byval-thunks.cpp index f496bbf4b2..8ae85c0b66 100644 --- a/test/CodeGenCXX/microsoft-abi-byval-thunks.cpp +++ b/test/CodeGenCXX/microsoft-abi-byval-thunks.cpp @@ -46,7 +46,7 @@ C::C() {} // force emission // CHECK32-LABEL: define linkonce_odr x86_stdcallcc void @"\01?foo@C@stdcall_thunk@@W3AGXUAgg@2@@Z" // CHECK32: (<{ %"struct.stdcall_thunk::C"*, %"struct.stdcall_thunk::Agg" }>* inalloca) // CHECK32: %[[this_slot:[^ ]*]] = getelementptr inbounds <{ %"struct.stdcall_thunk::C"*, %"struct.stdcall_thunk::Agg" }>, <{ %"struct.stdcall_thunk::C"*, %"struct.stdcall_thunk::Agg" }>* %0, i32 0, i32 0 -// CHECK32: load %"struct.stdcall_thunk::C"** %[[this_slot]] +// CHECK32: load %"struct.stdcall_thunk::C"*, %"struct.stdcall_thunk::C"** %[[this_slot]] // CHECK32: getelementptr i8, i8* %{{.*}}, i32 -4 // CHECK32: store %"struct.stdcall_thunk::C"* %{{.*}}, %"struct.stdcall_thunk::C"** %[[this_slot]] // CHECK32: musttail call x86_stdcallcc void @"\01?foo@C@stdcall_thunk@@UAGXUAgg@2@@Z" @@ -78,7 +78,7 @@ C::C() {} // force emission // CHECK32-LABEL: define linkonce_odr %"struct.sret_thunk::Agg"* @"\01?foo@C@sret_thunk@@W3AA?AUAgg@2@U32@@Z" // CHECK32: (<{ %"struct.sret_thunk::C"*, %"struct.sret_thunk::Agg"*, %"struct.sret_thunk::Agg" }>* inalloca) // CHECK32: %[[this_slot:[^ ]*]] = getelementptr inbounds <{ %"struct.sret_thunk::C"*, %"struct.sret_thunk::Agg"*, %"struct.sret_thunk::Agg" }>, <{ %"struct.sret_thunk::C"*, %"struct.sret_thunk::Agg"*, %"struct.sret_thunk::Agg" }>* %0, i32 0, i32 0 -// CHECK32: load %"struct.sret_thunk::C"** %[[this_slot]] +// CHECK32: load %"struct.sret_thunk::C"*, %"struct.sret_thunk::C"** %[[this_slot]] // CHECK32: getelementptr i8, i8* %{{.*}}, i32 -4 // CHECK32: store %"struct.sret_thunk::C"* %{{.*}}, %"struct.sret_thunk::C"** %[[this_slot]] // CHECK32: %[[rv:[^ ]*]] = musttail call %"struct.sret_thunk::Agg"* @"\01?foo@C@sret_thunk@@UAA?AUAgg@2@U32@@Z" diff --git a/test/CodeGenCXX/microsoft-abi-dynamic-cast.cpp b/test/CodeGenCXX/microsoft-abi-dynamic-cast.cpp index 9037417dbc..0fef6255ad 100644 --- a/test/CodeGenCXX/microsoft-abi-dynamic-cast.cpp +++ b/test/CodeGenCXX/microsoft-abi-dynamic-cast.cpp @@ -21,9 +21,9 @@ T* test2(A* x) { return &dynamic_cast(*x); } // CHECK-LABEL: define %struct.T* @"\01?test2@@YAPAUT@@PAUA@@@Z"(%struct.A* %x) // CHECK: [[CAST:%.*]] = bitcast %struct.A* %x to i8* // CHECK-NEXT: [[VBPTRPTR:%.*]] = getelementptr inbounds %struct.A, %struct.A* %x, i32 0, i32 0 -// CHECK-NEXT: [[VBTBL:%.*]] = load i32** [[VBPTRPTR]], align 4 +// CHECK-NEXT: [[VBTBL:%.*]] = load i32*, i32** [[VBPTRPTR]], align 4 // CHECK-NEXT: [[VBOFFP:%.*]] = getelementptr inbounds i32, i32* [[VBTBL]], i32 1 -// CHECK-NEXT: [[VBOFFS:%.*]] = load i32* [[VBOFFP]], align 4 +// CHECK-NEXT: [[VBOFFS:%.*]] = load i32, i32* [[VBOFFP]], align 4 // CHECK-NEXT: [[ADJ:%.*]] = getelementptr inbounds i8, i8* [[CAST]], i32 [[VBOFFS]] // CHECK-NEXT: [[CALL:%.*]] = tail call i8* @__RTDynamicCast(i8* [[ADJ]], i32 [[VBOFFS]], i8* bitcast (%rtti.TypeDescriptor7* @"\01??_R0?AUA@@@8" to i8*), i8* bitcast (%rtti.TypeDescriptor7* @"\01??_R0?AUT@@@8" to i8*), i32 1) // CHECK-NEXT: [[RET:%.*]] = bitcast i8* [[CALL]] to %struct.T* @@ -34,9 +34,9 @@ T* test3(B* x) { return &dynamic_cast(*x); } // CHECK: [[VOIDP:%.*]] = getelementptr inbounds %struct.B, %struct.B* %x, i32 0, i32 0, i32 0 // CHECK-NEXT: [[VBPTR:%.*]] = getelementptr inbounds i8, i8* [[VOIDP]], i32 4 // CHECK-NEXT: [[VBPTRPTR:%.*]] = bitcast i8* [[VBPTR:%.*]] to i32** -// CHECK-NEXT: [[VBTBL:%.*]] = load i32** [[VBPTRPTR]], align 4 +// CHECK-NEXT: [[VBTBL:%.*]] = load i32*, i32** [[VBPTRPTR]], align 4 // CHECK-NEXT: [[VBOFFP:%.*]] = getelementptr inbounds i32, i32* [[VBTBL]], i32 1 -// CHECK-NEXT: [[VBOFFS:%.*]] = load i32* [[VBOFFP]], align 4 +// CHECK-NEXT: [[VBOFFS:%.*]] = load i32, i32* [[VBOFFP]], align 4 // CHECK-NEXT: [[DELTA:%.*]] = add nsw i32 [[VBOFFS]], 4 // CHECK-NEXT: [[ADJ:%.*]] = getelementptr inbounds i8, i8* [[VOIDP]], i32 [[DELTA]] // CHECK-NEXT: [[CALL:%.*]] = tail call i8* @__RTDynamicCast(i8* [[ADJ]], i32 [[DELTA]], i8* bitcast (%rtti.TypeDescriptor7* @"\01??_R0?AUB@@@8" to i8*), i8* bitcast (%rtti.TypeDescriptor7* @"\01??_R0?AUT@@@8" to i8*), i32 1) @@ -56,9 +56,9 @@ T* test5(A* x) { return dynamic_cast(x); } // CHECK-NEXT: br i1 [[CHECK]] // CHECK: [[VOIDP:%.*]] = bitcast %struct.A* %x to i8* // CHECK-NEXT: [[VBPTRPTR:%.*]] = getelementptr inbounds %struct.A, %struct.A* %x, i32 0, i32 0 -// CHECK-NEXT: [[VBTBL:%.*]] = load i32** [[VBPTRPTR]], align 4 +// CHECK-NEXT: [[VBTBL:%.*]] = load i32*, i32** [[VBPTRPTR]], align 4 // CHECK-NEXT: [[VBOFFP:%.*]] = getelementptr inbounds i32, i32* [[VBTBL]], i32 1 -// CHECK-NEXT: [[VBOFFS:%.*]] = load i32* [[VBOFFP]], align 4 +// CHECK-NEXT: [[VBOFFS:%.*]] = load i32, i32* [[VBOFFP]], align 4 // CHECK-NEXT: [[ADJ:%.*]] = getelementptr inbounds i8, i8* [[VOIDP]], i32 [[VBOFFS]] // CHECK-NEXT: [[CALL:%.*]] = tail call i8* @__RTDynamicCast(i8* [[ADJ]], i32 [[VBOFFS]], i8* bitcast (%rtti.TypeDescriptor7* @"\01??_R0?AUA@@@8" to i8*), i8* bitcast (%rtti.TypeDescriptor7* @"\01??_R0?AUT@@@8" to i8*), i32 0) // CHECK-NEXT: [[RES:%.*]] = bitcast i8* [[CALL]] to %struct.T* @@ -73,9 +73,9 @@ T* test6(B* x) { return dynamic_cast(x); } // CHECK: [[CAST:%.*]] = getelementptr inbounds %struct.B, %struct.B* %x, i32 0, i32 0, i32 0 // CHECK-NEXT: [[VBPTR:%.*]] = getelementptr inbounds i8, i8* [[CAST]], i32 4 // CHECK-NEXT: [[VBPTRPTR:%.*]] = bitcast i8* [[VBPTR]] to i32** -// CHECK-NEXT: [[VBTBL:%.*]] = load i32** [[VBPTRPTR]], align 4 +// CHECK-NEXT: [[VBTBL:%.*]] = load i32*, i32** [[VBPTRPTR]], align 4 // CHECK-NEXT: [[VBOFFP:%.*]] = getelementptr inbounds i32, i32* [[VBTBL]], i32 1 -// CHECK-NEXT: [[VBOFFS:%.*]] = load i32* [[VBOFFP]], align 4 +// CHECK-NEXT: [[VBOFFS:%.*]] = load i32, i32* [[VBOFFP]], align 4 // CHECK-NEXT: [[DELTA:%.*]] = add nsw i32 [[VBOFFS]], 4 // CHECK-NEXT: [[ADJ:%.*]] = getelementptr inbounds i8, i8* [[CAST]], i32 [[DELTA]] // CHECK-NEXT: [[CALL:%.*]] = tail call i8* @__RTDynamicCast(i8* [[ADJ]], i32 [[DELTA]], i8* bitcast (%rtti.TypeDescriptor7* @"\01??_R0?AUB@@@8" to i8*), i8* bitcast (%rtti.TypeDescriptor7* @"\01??_R0?AUT@@@8" to i8*), i32 0) @@ -96,9 +96,9 @@ void* test8(A* x) { return dynamic_cast(x); } // CHECK-NEXT: br i1 [[CHECK]] // CHECK: [[VOIDP:%.*]] = bitcast %struct.A* %x to i8* // CHECK-NEXT: [[VBPTRPTR:%.*]] = getelementptr inbounds %struct.A, %struct.A* %x, i32 0, i32 0 -// CHECK-NEXT: [[VBTBL:%.*]] = load i32** [[VBPTRPTR]], align 4 +// CHECK-NEXT: [[VBTBL:%.*]] = load i32*, i32** [[VBPTRPTR]], align 4 // CHECK-NEXT: [[VBOFFP:%.*]] = getelementptr inbounds i32, i32* [[VBTBL]], i32 1 -// CHECK-NEXT: [[VBOFFS:%.*]] = load i32* [[VBOFFP]], align 4 +// CHECK-NEXT: [[VBOFFS:%.*]] = load i32, i32* [[VBOFFP]], align 4 // CHECK-NEXT: [[ADJ:%.*]] = getelementptr inbounds i8, i8* [[VOIDP]], i32 [[VBOFFS]] // CHECK-NEXT: [[RES:%.*]] = tail call i8* @__RTCastToVoid(i8* [[ADJ]]) // CHECK-NEXT: br label @@ -112,9 +112,9 @@ void* test9(B* x) { return dynamic_cast(x); } // CHECK: [[CAST:%.*]] = getelementptr inbounds %struct.B, %struct.B* %x, i32 0, i32 0, i32 0 // CHECK-NEXT: [[VBPTR:%.*]] = getelementptr inbounds i8, i8* [[CAST]], i32 4 // CHECK-NEXT: [[VBPTRPTR:%.*]] = bitcast i8* [[VBPTR]] to i32** -// CHECK-NEXT: [[VBTBL:%.*]] = load i32** [[VBPTRPTR]], align 4 +// CHECK-NEXT: [[VBTBL:%.*]] = load i32*, i32** [[VBPTRPTR]], align 4 // CHECK-NEXT: [[VBOFFP:%.*]] = getelementptr inbounds i32, i32* [[VBTBL]], i32 1 -// CHECK-NEXT: [[VBOFFS:%.*]] = load i32* [[VBOFFP]], align 4 +// CHECK-NEXT: [[VBOFFS:%.*]] = load i32, i32* [[VBOFFP]], align 4 // CHECK-NEXT: [[DELTA:%.*]] = add nsw i32 [[VBOFFS]], 4 // CHECK-NEXT: [[ADJ:%.*]] = getelementptr inbounds i8, i8* [[CAST]], i32 [[DELTA]] // CHECK-NEXT: [[CALL:%.*]] = tail call i8* @__RTCastToVoid(i8* [[ADJ]]) diff --git a/test/CodeGenCXX/microsoft-abi-exceptions.cpp b/test/CodeGenCXX/microsoft-abi-exceptions.cpp index d6b61fa48b..5d51131416 100644 --- a/test/CodeGenCXX/microsoft-abi-exceptions.cpp +++ b/test/CodeGenCXX/microsoft-abi-exceptions.cpp @@ -61,7 +61,7 @@ int HasDeactivatedCleanups() { // WIN32: ret i32 // // Conditionally destroy arg1. -// WIN32: %[[cond:.*]] = load i1* %[[isactive]] +// WIN32: %[[cond:.*]] = load i1, i1* %[[isactive]] // WIN32: br i1 %[[cond]] // WIN32: invoke x86_thiscallcc void @"\01??1A@@QAE@XZ"(%struct.A* %[[arg1]]) // WIN32: } @@ -125,7 +125,7 @@ int HasConditionalDeactivatedCleanups(bool cond) { // WIN32: ret i32 // // Somewhere in the landing pad soup, we conditionally destroy arg1. -// WIN32: %[[isactive:.*]] = load i1* %[[arg1_cond]] +// WIN32: %[[isactive:.*]] = load i1, i1* %[[arg1_cond]] // WIN32: br i1 %[[isactive]] // WIN32: invoke x86_thiscallcc void @"\01??1A@@QAE@XZ" // WIN32: } diff --git a/test/CodeGenCXX/microsoft-abi-member-pointers.cpp b/test/CodeGenCXX/microsoft-abi-member-pointers.cpp index cf4217972b..77843592b7 100644 --- a/test/CodeGenCXX/microsoft-abi-member-pointers.cpp +++ b/test/CodeGenCXX/microsoft-abi-member-pointers.cpp @@ -212,7 +212,7 @@ void podMemPtrs() { // CHECK: %[[memptr:.*]] = alloca i32, align 4 // CHECK-NEXT: store i32 0, i32* %[[memptr]], align 4 // CHECK-NEXT: store i32 4, i32* %[[memptr]], align 4 -// CHECK-NEXT: %[[memptr_val:.*]] = load i32* %[[memptr]], align 4 +// CHECK-NEXT: %[[memptr_val:.*]] = load i32, i32* %[[memptr]], align 4 // CHECK-NEXT: %{{.*}} = icmp ne i32 %[[memptr_val]], -1 // CHECK-NEXT: br i1 %{{.*}}, label %{{.*}}, label %{{.*}} // CHECK: store i32 -1, i32* %[[memptr]], align 4 @@ -232,7 +232,7 @@ void polymorphicMemPtrs() { // CHECK: %[[memptr:.*]] = alloca i32, align 4 // CHECK-NEXT: store i32 4, i32* %[[memptr]], align 4 // CHECK-NEXT: store i32 8, i32* %[[memptr]], align 4 -// CHECK-NEXT: %[[memptr_val:.*]] = load i32* %[[memptr]], align 4 +// CHECK-NEXT: %[[memptr_val:.*]] = load i32, i32* %[[memptr]], align 4 // CHECK-NEXT: %{{.*}} = icmp ne i32 %[[memptr_val]], 0 // CHECK-NEXT: br i1 %{{.*}}, label %{{.*}}, label %{{.*}} // CHECK: store i32 0, i32* %[[memptr]], align 4 @@ -243,9 +243,9 @@ void polymorphicMemPtrs() { bool nullTestDataUnspecified(int Unspecified::*mp) { return mp; // CHECK: define zeroext i1 @"\01?nullTestDataUnspecified@@YA_NPQUnspecified@@H@Z"{{.*}} { -// CHECK: %{{.*}} = load { i32, i32, i32 }* %{{.*}}, align 8 +// CHECK: %{{.*}} = load { i32, i32, i32 }, { i32, i32, i32 }* %{{.*}}, align 8 // CHECK: store { i32, i32, i32 } {{.*}} align 8 -// CHECK: %[[mp:.*]] = load { i32, i32, i32 }* %{{.*}}, align 8 +// CHECK: %[[mp:.*]] = load { i32, i32, i32 }, { i32, i32, i32 }* %{{.*}}, align 8 // CHECK: %[[mp0:.*]] = extractvalue { i32, i32, i32 } %[[mp]], 0 // CHECK: %[[cmp0:.*]] = icmp ne i32 %[[mp0]], 0 // CHECK: %[[mp1:.*]] = extractvalue { i32, i32, i32 } %[[mp]], 1 @@ -265,9 +265,9 @@ bool nullTestDataUnspecified(int Unspecified::*mp) { bool nullTestFunctionUnspecified(void (Unspecified::*mp)()) { return mp; // CHECK: define zeroext i1 @"\01?nullTestFunctionUnspecified@@YA_NP8Unspecified@@AEXXZ@Z"{{.*}} { -// CHECK: %{{.*}} = load { i8*, i32, i32, i32 }* %{{.*}}, align 8 +// CHECK: %{{.*}} = load { i8*, i32, i32, i32 }, { i8*, i32, i32, i32 }* %{{.*}}, align 8 // CHECK: store { i8*, i32, i32, i32 } {{.*}} align 8 -// CHECK: %[[mp:.*]] = load { i8*, i32, i32, i32 }* %{{.*}}, align 8 +// CHECK: %[[mp:.*]] = load { i8*, i32, i32, i32 }, { i8*, i32, i32, i32 }* %{{.*}}, align 8 // CHECK: %[[mp0:.*]] = extractvalue { i8*, i32, i32, i32 } %[[mp]], 0 // CHECK: %[[cmp0:.*]] = icmp ne i8* %[[mp0]], null // CHECK: ret i1 %[[cmp0]] @@ -279,21 +279,21 @@ int loadDataMemberPointerVirtual(Virtual *o, int Virtual::*memptr) { // Test that we can unpack this aggregate member pointer and load the member // data pointer. // CHECK: define i32 @"\01?loadDataMemberPointerVirtual@@YAHPAUVirtual@@PQ1@H@Z"{{.*}} { -// CHECK: %[[o:.*]] = load %{{.*}}** %{{.*}}, align 4 -// CHECK: %[[memptr:.*]] = load { i32, i32 }* %{{.*}}, align 8 +// CHECK: %[[o:.*]] = load %{{.*}}*, %{{.*}}** %{{.*}}, align 4 +// CHECK: %[[memptr:.*]] = load { i32, i32 }, { i32, i32 }* %{{.*}}, align 8 // CHECK: %[[memptr0:.*]] = extractvalue { i32, i32 } %[[memptr:.*]], 0 // CHECK: %[[memptr1:.*]] = extractvalue { i32, i32 } %[[memptr:.*]], 1 // CHECK: %[[v6:.*]] = bitcast %{{.*}}* %[[o]] to i8* // CHECK: %[[vbptr:.*]] = getelementptr inbounds i8, i8* %[[v6]], i32 0 // CHECK: %[[vbptr_a:.*]] = bitcast i8* %[[vbptr]] to i32** -// CHECK: %[[vbtable:.*]] = load i32** %[[vbptr_a:.*]] +// CHECK: %[[vbtable:.*]] = load i32*, i32** %[[vbptr_a:.*]] // CHECK: %[[memptr1_shr:.*]] = ashr exact i32 %[[memptr1]], 2 // CHECK: %[[v7:.*]] = getelementptr inbounds i32, i32* %[[vbtable]], i32 %[[memptr1_shr]] -// CHECK: %[[vbase_offs:.*]] = load i32* %[[v7]] +// CHECK: %[[vbase_offs:.*]] = load i32, i32* %[[v7]] // CHECK: %[[v10:.*]] = getelementptr inbounds i8, i8* %[[vbptr]], i32 %[[vbase_offs]] // CHECK: %[[offset:.*]] = getelementptr inbounds i8, i8* %[[v10]], i32 %[[memptr0]] // CHECK: %[[v11:.*]] = bitcast i8* %[[offset]] to i32* -// CHECK: %[[v12:.*]] = load i32* %[[v11]] +// CHECK: %[[v12:.*]] = load i32, i32* %[[v11]] // CHECK: ret i32 %[[v12]] // CHECK: } @@ -308,8 +308,8 @@ int loadDataMemberPointerUnspecified(Unspecified *o, int Unspecified::*memptr) { // Test that we can unpack this aggregate member pointer and load the member // data pointer. // CHECK: define i32 @"\01?loadDataMemberPointerUnspecified@@YAHPAUUnspecified@@PQ1@H@Z"{{.*}} { -// CHECK: %[[o:.*]] = load %{{.*}}** %{{.*}}, align 4 -// CHECK: %[[memptr:.*]] = load { i32, i32, i32 }* %{{.*}}, align 8 +// CHECK: %[[o:.*]] = load %{{.*}}*, %{{.*}}** %{{.*}}, align 4 +// CHECK: %[[memptr:.*]] = load { i32, i32, i32 }, { i32, i32, i32 }* %{{.*}}, align 8 // CHECK: %[[memptr0:.*]] = extractvalue { i32, i32, i32 } %[[memptr:.*]], 0 // CHECK: %[[memptr1:.*]] = extractvalue { i32, i32, i32 } %[[memptr:.*]], 1 // CHECK: %[[memptr2:.*]] = extractvalue { i32, i32, i32 } %[[memptr:.*]], 2 @@ -320,17 +320,17 @@ int loadDataMemberPointerUnspecified(Unspecified *o, int Unspecified::*memptr) { // CHECK: [[vadjust]] // CHECK: %[[vbptr:.*]] = getelementptr inbounds i8, i8* %[[base]], i32 %[[memptr1]] // CHECK: %[[vbptr_a:.*]] = bitcast i8* %[[vbptr]] to i32** -// CHECK: %[[vbtable:.*]] = load i32** %[[vbptr_a:.*]] +// CHECK: %[[vbtable:.*]] = load i32*, i32** %[[vbptr_a:.*]] // CHECK: %[[memptr2_shr:.*]] = ashr exact i32 %[[memptr2]], 2 // CHECK: %[[v7:.*]] = getelementptr inbounds i32, i32* %[[vbtable]], i32 %[[memptr2_shr]] -// CHECK: %[[vbase_offs:.*]] = load i32* %[[v7]] +// CHECK: %[[vbase_offs:.*]] = load i32, i32* %[[v7]] // CHECK: %[[base_adj:.*]] = getelementptr inbounds i8, i8* %[[vbptr]], i32 %[[vbase_offs]] // // CHECK: [[skip]] // CHECK: %[[new_base:.*]] = phi i8* [ %[[base]], %{{.*}} ], [ %[[base_adj]], %[[vadjust]] ] // CHECK: %[[offset:.*]] = getelementptr inbounds i8, i8* %[[new_base]], i32 %[[memptr0]] // CHECK: %[[v11:.*]] = bitcast i8* %[[offset]] to i32* -// CHECK: %[[v12:.*]] = load i32* %[[v11]] +// CHECK: %[[v12:.*]] = load i32, i32* %[[v11]] // CHECK: ret i32 %[[v12]] // CHECK: } } @@ -371,10 +371,10 @@ void callMemberPointerVirtualBase(Virtual *o, void (Virtual::*memptr)()) { // CHECK: %[[memptr2:.*]] = extractvalue { i8*, i32, i32 } %{{.*}}, 2 // CHECK: %[[vbptr:.*]] = getelementptr inbounds i8, i8* %{{.*}}, i32 0 // CHECK: %[[vbptr_a:.*]] = bitcast i8* %[[vbptr]] to i32** -// CHECK: %[[vbtable:.*]] = load i32** %[[vbptr_a:.*]] +// CHECK: %[[vbtable:.*]] = load i32*, i32** %[[vbptr_a:.*]] // CHECK: %[[memptr2_shr:.*]] = ashr exact i32 %[[memptr2]], 2 // CHECK: %[[v7:.*]] = getelementptr inbounds i32, i32* %[[vbtable]], i32 %[[memptr2_shr]] -// CHECK: %[[vbase_offs:.*]] = load i32* %[[v7]] +// CHECK: %[[vbase_offs:.*]] = load i32, i32* %[[v7]] // CHECK: %[[v10:.*]] = getelementptr inbounds i8, i8* %[[vbptr]], i32 %[[vbase_offs]] // CHECK: %[[this_adjusted:.*]] = getelementptr inbounds i8, i8* %[[v10]], i32 %[[memptr1]] // CHECK: %[[fptr:.*]] = bitcast i8* %[[memptr0]] to void ({{.*}}) @@ -485,7 +485,7 @@ void (Multiple::*convertB2FuncToMultiple(void (B2::*mp)()))() { return mp; // CHECK: define i64 @"\01?convertB2FuncToMultiple@@YAP8Multiple@@AEXXZP8B2@@AEXXZ@Z"{{.*}} { // CHECK: store -// CHECK: %[[mp:.*]] = load i8** %{{.*}}, align 4 +// CHECK: %[[mp:.*]] = load i8*, i8** %{{.*}}, align 4 // CHECK: icmp ne i8* %[[mp]], null // CHECK: br i1 %{{.*}} label %{{.*}}, label %{{.*}} // @@ -509,7 +509,7 @@ void (B2::*convertMultipleFuncToB2(void (Multiple::*mp)()))() { // // CHECK: define i32 @"\01?convertMultipleFuncToB2@@YAP8B2@@AEXXZP8Multiple@@AEXXZ@Z"{{.*}} { // CHECK: store -// CHECK: %[[src:.*]] = load { i8*, i32 }* %{{.*}}, align 8 +// CHECK: %[[src:.*]] = load { i8*, i32 }, { i8*, i32 }* %{{.*}}, align 8 // CHECK: extractvalue { i8*, i32 } %[[src]], 0 // CHECK: icmp ne i8* %{{.*}}, null // CHECK: br i1 %{{.*}}, label %{{.*}}, label %{{.*}} @@ -534,7 +534,7 @@ void (D::*convertCToD(void (C::*mp)()))() { return mp; // CHECK: define void @"\01?convertCToD@Test1@@YAP8D@1@AEXXZP8C@1@AEXXZ@Z"{{.*}} { // CHECK: store -// CHECK: load { i8*, i32, i32 }* %{{.*}}, align 8 +// CHECK: load { i8*, i32, i32 }, { i8*, i32, i32 }* %{{.*}}, align 8 // CHECK: extractvalue { i8*, i32, i32 } %{{.*}}, 0 // CHECK: icmp ne i8* %{{.*}}, null // CHECK: br i1 %{{.*}}, label %{{.*}}, label %{{.*}} @@ -577,7 +577,7 @@ int A::*reinterpret(int B::*mp) { int A::*reinterpret(int C::*mp) { return reinterpret_cast(mp); // CHECK: define i32 @"\01?reinterpret@Test2@@YAPQA@1@HPQC@1@H@Z"{{.*}} { -// CHECK: %[[mp:.*]] = load i32* +// CHECK: %[[mp:.*]] = load i32, i32* // CHECK: %[[cmp:.*]] = icmp ne i32 %[[mp]], 0 // CHECK: select i1 %[[cmp]], i32 %[[mp]], i32 -1 // CHECK: } @@ -596,8 +596,8 @@ struct A { int *load_data(A *a, int A::*mp) { return &(a->*mp); // CHECK-LABEL: define i32* @"\01?load_data@Test3@@YAPAHPAUA@1@PQ21@H@Z"{{.*}} { -// CHECK: %[[a:.*]] = load %"struct.Test3::A"** %{{.*}}, align 4 -// CHECK: %[[mp:.*]] = load i32* %{{.*}}, align 4 +// CHECK: %[[a:.*]] = load %"struct.Test3::A"*, %"struct.Test3::A"** %{{.*}}, align 4 +// CHECK: %[[mp:.*]] = load i32, i32* %{{.*}}, align 4 // CHECK: %[[a_i8:.*]] = bitcast %"struct.Test3::A"* %[[a]] to i8* // CHECK: getelementptr inbounds i8, i8* %[[a_i8]], i32 %[[mp]] // CHECK: } @@ -620,7 +620,7 @@ void (C::*getmp())() { // CHECK-LABEL: define linkonce_odr x86_thiscallcc void @"\01??_9C@Test4@@$BA@AE"(%"struct.Test4::C"* %this, ...) {{.*}} comdat // CHECK-NOT: getelementptr -// CHECK: load void (%"struct.Test4::C"*, ...)*** %{{.*}} +// CHECK: load void (%"struct.Test4::C"*, ...)**, void (%"struct.Test4::C"*, ...)*** %{{.*}} // CHECK: getelementptr inbounds void (%"struct.Test4::C"*, ...)*, void (%"struct.Test4::C"*, ...)** %{{.*}}, i64 0 // CHECK-NOT: getelementptr // CHECK: musttail call x86_thiscallcc void (%"struct.Test4::C"*, ...)* % diff --git a/test/CodeGenCXX/microsoft-abi-multiple-nonvirtual-inheritance.cpp b/test/CodeGenCXX/microsoft-abi-multiple-nonvirtual-inheritance.cpp index 60b40ae7e0..34cb85ec28 100644 --- a/test/CodeGenCXX/microsoft-abi-multiple-nonvirtual-inheritance.cpp +++ b/test/CodeGenCXX/microsoft-abi-multiple-nonvirtual-inheritance.cpp @@ -26,9 +26,9 @@ void call_left_no_override(ChildNoOverride *child) { // Only need to cast 'this' to Left*. // CHECK: %[[LEFT:.*]] = bitcast %struct.ChildNoOverride* %[[CHILD]] to %struct.Left* // CHECK: %[[VFPTR:.*]] = bitcast %struct.Left* %[[LEFT]] to void (%struct.Left*)*** -// CHECK: %[[VFTABLE:.*]] = load void (%struct.Left*)*** %[[VFPTR]] +// CHECK: %[[VFTABLE:.*]] = load void (%struct.Left*)**, void (%struct.Left*)*** %[[VFPTR]] // CHECK: %[[VFUN:.*]] = getelementptr inbounds void (%struct.Left*)*, void (%struct.Left*)** %[[VFTABLE]], i64 0 -// CHECK: %[[VFUN_VALUE:.*]] = load void (%struct.Left*)** %[[VFUN]] +// CHECK: %[[VFUN_VALUE:.*]] = load void (%struct.Left*)*, void (%struct.Left*)** %[[VFUN]] // CHECK: call x86_thiscallcc void %[[VFUN_VALUE]](%struct.Left* %[[LEFT]]) // CHECK: ret } @@ -41,7 +41,7 @@ void ChildOverride::left() { // CHECK: store %struct.ChildOverride* %[[THIS]], %struct.ChildOverride** %[[THIS_ADDR]], align 4 foo(this); -// CHECK: %[[THIS:.*]] = load %struct.ChildOverride** %[[THIS_ADDR]] +// CHECK: %[[THIS:.*]] = load %struct.ChildOverride*, %struct.ChildOverride** %[[THIS_ADDR]] // CHECK: %[[THIS_i8:.*]] = bitcast %struct.ChildOverride* %[[THIS]] to i8* // CHECK: call void @foo(i8* %[[THIS_i8]]) // CHECK: ret @@ -53,9 +53,9 @@ void call_left_override(ChildOverride *child) { child->left(); // CHECK: %[[VFPTR:.*]] = bitcast %struct.ChildOverride* %[[CHILD]] to void (%struct.ChildOverride*)*** -// CHECK: %[[VFTABLE:.*]] = load void (%struct.ChildOverride*)*** %[[VFPTR]] +// CHECK: %[[VFTABLE:.*]] = load void (%struct.ChildOverride*)**, void (%struct.ChildOverride*)*** %[[VFPTR]] // CHECK: %[[VFUN:.*]] = getelementptr inbounds void (%struct.ChildOverride*)*, void (%struct.ChildOverride*)** %[[VFTABLE]], i64 0 -// CHECK: %[[VFUN_VALUE:.*]] = load void (%struct.ChildOverride*)** %[[VFUN]] +// CHECK: %[[VFUN_VALUE:.*]] = load void (%struct.ChildOverride*)*, void (%struct.ChildOverride*)** %[[VFUN]] // // CHECK: call x86_thiscallcc void %[[VFUN_VALUE]](%struct.ChildOverride* %[[CHILD]]) // CHECK: ret @@ -74,9 +74,9 @@ void call_right_no_override(ChildNoOverride *child) { // CHECK: %[[RIGHT:.*]] = bitcast i8* %[[RIGHT_i8]] to %struct.Right* // // CHECK: %[[VFPTR:.*]] = bitcast %struct.Right* %[[RIGHT]] to void (%struct.Right*)*** -// CHECK: %[[VFTABLE:.*]] = load void (%struct.Right*)*** %[[VFPTR]] +// CHECK: %[[VFTABLE:.*]] = load void (%struct.Right*)**, void (%struct.Right*)*** %[[VFPTR]] // CHECK: %[[VFUN:.*]] = getelementptr inbounds void (%struct.Right*)*, void (%struct.Right*)** %[[VFTABLE]], i64 0 -// CHECK: %[[VFUN_VALUE:.*]] = load void (%struct.Right*)** %[[VFUN]] +// CHECK: %[[VFUN_VALUE:.*]] = load void (%struct.Right*)*, void (%struct.Right*)** %[[VFUN]] // CHECK: call x86_thiscallcc void %[[VFUN_VALUE]](%struct.Right* %[[RIGHT]]) // CHECK: ret } @@ -93,7 +93,7 @@ void ChildOverride::right() { // CHECK: store %struct.ChildOverride* %[[THIS]], %struct.ChildOverride** %[[THIS_ADDR]], align 4 foo(this); -// CHECK: %[[THIS:.*]] = load %struct.ChildOverride** %[[THIS_ADDR]] +// CHECK: %[[THIS:.*]] = load %struct.ChildOverride*, %struct.ChildOverride** %[[THIS_ADDR]] // CHECK: %[[THIS_PARAM:.*]] = bitcast %struct.ChildOverride* %[[THIS]] to i8* // CHECK: call void @foo(i8* %[[THIS_PARAM]]) // CHECK: ret @@ -111,9 +111,9 @@ void call_right_override(ChildOverride *child) { // // CHECK: %[[VFPTR_i8:.*]] = getelementptr inbounds i8, i8* %[[CHILD_i8]], i32 4 // CHECK: %[[VFPTR:.*]] = bitcast i8* %[[VFPTR_i8]] to void (i8*)*** -// CHECK: %[[VFTABLE:.*]] = load void (i8*)*** %[[VFPTR]] +// CHECK: %[[VFTABLE:.*]] = load void (i8*)**, void (i8*)*** %[[VFPTR]] // CHECK: %[[VFUN:.*]] = getelementptr inbounds void (i8*)*, void (i8*)** %[[VFTABLE]], i64 0 -// CHECK: %[[VFUN_VALUE:.*]] = load void (i8*)** %[[VFUN]] +// CHECK: %[[VFUN_VALUE:.*]] = load void (i8*)*, void (i8*)** %[[VFUN]] // // CHECK: %[[CHILD_i8:.*]] = bitcast %struct.ChildOverride* %[[CHILD]] to i8* // CHECK: %[[RIGHT:.*]] = getelementptr inbounds i8, i8* %[[CHILD_i8]], i32 4 @@ -135,7 +135,7 @@ void GrandchildOverride::right() { // CHECK: store %struct.GrandchildOverride* %[[THIS]], %struct.GrandchildOverride** %[[THIS_ADDR]], align 4 foo(this); -// CHECK: %[[THIS:.*]] = load %struct.GrandchildOverride** %[[THIS_ADDR]] +// CHECK: %[[THIS:.*]] = load %struct.GrandchildOverride*, %struct.GrandchildOverride** %[[THIS_ADDR]] // CHECK: %[[THIS_PARAM:.*]] = bitcast %struct.GrandchildOverride* %[[THIS]] to i8* // CHECK: call void @foo(i8* %[[THIS_PARAM]]) // CHECK: ret @@ -161,7 +161,7 @@ void emit_ctors() { ChildOverride co; // CHECK: define {{.*}} @"\01??0ChildOverride@@QAE@XZ" - // CHECK: %[[THIS:.*]] = load %struct.ChildOverride** + // CHECK: %[[THIS:.*]] = load %struct.ChildOverride*, %struct.ChildOverride** // CHECK: %[[VFPTR:.*]] = bitcast %struct.ChildOverride* %[[THIS]] to i32 (...)*** // CHECK: store i32 (...)** bitcast ([1 x i8*]* @"\01??_7ChildOverride@@6BLeft@@@" to i32 (...)**), i32 (...)*** %[[VFPTR]] // CHECK: %[[THIS_i8:.*]] = bitcast %struct.ChildOverride* %[[THIS]] to i8* @@ -172,7 +172,7 @@ void emit_ctors() { GrandchildOverride gc; // CHECK: define {{.*}} @"\01??0GrandchildOverride@@QAE@XZ" - // CHECK: %[[THIS:.*]] = load %struct.GrandchildOverride** + // CHECK: %[[THIS:.*]] = load %struct.GrandchildOverride*, %struct.GrandchildOverride** // CHECK: %[[VFPTR:.*]] = bitcast %struct.GrandchildOverride* %[[THIS]] to i32 (...)*** // CHECK: store i32 (...)** bitcast ([1 x i8*]* @"\01??_7GrandchildOverride@@6BLeft@@@" to i32 (...)**), i32 (...)*** %[[VFPTR]] // CHECK: %[[THIS_i8:.*]] = bitcast %struct.GrandchildOverride* %[[THIS]] to i8* diff --git a/test/CodeGenCXX/microsoft-abi-sret-and-byval.cpp b/test/CodeGenCXX/microsoft-abi-sret-and-byval.cpp index dc90cd15d6..a4eaa1cfbc 100644 --- a/test/CodeGenCXX/microsoft-abi-sret-and-byval.cpp +++ b/test/CodeGenCXX/microsoft-abi-sret-and-byval.cpp @@ -338,7 +338,7 @@ void fn2(FnPtr1 a, SmallWithDtor b) { fn1(a, b); }; // WIN32: %[[gep1:[^ ]*]] = getelementptr inbounds [[argmem_ty]], [[argmem_ty]]* %[[argmem]], i32 0, i32 1 // WIN32: %[[bc1:[^ ]*]] = bitcast %struct.SmallWithDtor* %[[gep1]] to i8* // WIN32: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[bc1]], i8* {{.*}}, i32 4, i32 4, i1 false) -// WIN32: %[[a2:[^ ]*]] = load void [[dst_ty]]* %[[a1]], align 4 +// WIN32: %[[a2:[^ ]*]] = load void [[dst_ty]], void [[dst_ty]]* %[[a1]], align 4 // WIN32: %[[gep2:[^ ]*]] = getelementptr inbounds [[argmem_ty]], [[argmem_ty]]* %[[argmem]], i32 0, i32 0 // WIN32: %[[addr:[^ ]*]] = bitcast {}** %[[gep2]] to void [[dst_ty]]* // WIN32: store void [[dst_ty]] %[[a2]], void [[dst_ty]]* %[[addr]], align 4 diff --git a/test/CodeGenCXX/microsoft-abi-static-initializers.cpp b/test/CodeGenCXX/microsoft-abi-static-initializers.cpp index 08735926ff..e57b83081a 100644 --- a/test/CodeGenCXX/microsoft-abi-static-initializers.cpp +++ b/test/CodeGenCXX/microsoft-abi-static-initializers.cpp @@ -52,7 +52,7 @@ void StaticLocal() { } // CHECK-LABEL: define void @"\01?StaticLocal@@YAXXZ"() -// CHECK: load i32* @"\01?$S1@?1??StaticLocal@@YAXXZ@4IA" +// CHECK: load i32, i32* @"\01?$S1@?1??StaticLocal@@YAXXZ@4IA" // CHECK: store i32 {{.*}}, i32* @"\01?$S1@?1??StaticLocal@@YAXXZ@4IA" // CHECK: ret @@ -94,7 +94,7 @@ void MultipleStatics() { static S S35; } // CHECK-LABEL: define void @"\01?MultipleStatics@@YAXXZ"() -// CHECK: load i32* @"\01?$S1@?1??MultipleStatics@@YAXXZ@4IA" +// CHECK: load i32, i32* @"\01?$S1@?1??MultipleStatics@@YAXXZ@4IA" // CHECK: and i32 {{.*}}, 1 // CHECK: and i32 {{.*}}, 2 // CHECK: and i32 {{.*}}, 4 @@ -102,7 +102,7 @@ void MultipleStatics() { // CHECK: and i32 {{.*}}, 16 // ... // CHECK: and i32 {{.*}}, -2147483648 -// CHECK: load i32* @"\01?$S1@?1??MultipleStatics@@YAXXZ@4IA1" +// CHECK: load i32, i32* @"\01?$S1@?1??MultipleStatics@@YAXXZ@4IA1" // CHECK: and i32 {{.*}}, 1 // CHECK: and i32 {{.*}}, 2 // CHECK: and i32 {{.*}}, 4 @@ -144,7 +144,7 @@ inline S &getS() { } // CHECK-LABEL: define linkonce_odr dereferenceable({{[0-9]+}}) %struct.S* @"\01?getS@@YAAAUS@@XZ"() {{.*}} comdat -// CHECK: load i32* @"\01??_B?1??getS@@YAAAUS@@XZ@51" +// CHECK: load i32, i32* @"\01??_B?1??getS@@YAAAUS@@XZ@51" // CHECK: and i32 {{.*}}, 1 // CHECK: icmp ne i32 {{.*}}, 0 // CHECK: br i1 diff --git a/test/CodeGenCXX/microsoft-abi-structors.cpp b/test/CodeGenCXX/microsoft-abi-structors.cpp index 9f953c36b5..27f031d732 100644 --- a/test/CodeGenCXX/microsoft-abi-structors.cpp +++ b/test/CodeGenCXX/microsoft-abi-structors.cpp @@ -22,7 +22,7 @@ void no_constructor_destructor_infinite_recursion() { // CHECK: define linkonce_odr x86_thiscallcc %"class.basic::A"* @"\01??0A@basic@@QAE@XZ"(%"class.basic::A"* returned %this) {{.*}} comdat {{.*}} { // CHECK: [[THIS_ADDR:%[.0-9A-Z_a-z]+]] = alloca %"class.basic::A"*, align 4 // CHECK-NEXT: store %"class.basic::A"* %this, %"class.basic::A"** [[THIS_ADDR]], align 4 -// CHECK-NEXT: [[T1:%[.0-9A-Z_a-z]+]] = load %"class.basic::A"** [[THIS_ADDR]] +// CHECK-NEXT: [[T1:%[.0-9A-Z_a-z]+]] = load %"class.basic::A"*, %"class.basic::A"** [[THIS_ADDR]] // CHECK-NEXT: ret %"class.basic::A"* [[T1]] // CHECK-NEXT: } } @@ -49,7 +49,7 @@ struct C { // DTORS: define linkonce_odr x86_thiscallcc i8* @"\01??_GC@basic@@UAEPAXI@Z"(%"struct.basic::C"* %this, i32 %should_call_delete) {{.*}} comdat {{.*}} { // DTORS: store i32 %should_call_delete, i32* %[[SHOULD_DELETE_VAR:[0-9a-z._]+]], align 4 // DTORS: store i8* %{{.*}}, i8** %[[RETVAL:[0-9a-z._]+]] -// DTORS: %[[SHOULD_DELETE_VALUE:[0-9a-z._]+]] = load i32* %[[SHOULD_DELETE_VAR]] +// DTORS: %[[SHOULD_DELETE_VALUE:[0-9a-z._]+]] = load i32, i32* %[[SHOULD_DELETE_VAR]] // DTORS: call x86_thiscallcc void @"\01??1C@basic@@UAE@XZ"(%"struct.basic::C"* %[[THIS:[0-9a-z]+]]) // DTORS-NEXT: %[[CONDITION:[0-9]+]] = icmp eq i32 %[[SHOULD_DELETE_VALUE]], 0 // DTORS-NEXT: br i1 %[[CONDITION]], label %[[CONTINUE_LABEL:[0-9a-z._]+]], label %[[CALL_DELETE_LABEL:[0-9a-z._]+]] @@ -60,7 +60,7 @@ struct C { // DTORS-NEXT: br label %[[CONTINUE_LABEL]] // // DTORS: [[CONTINUE_LABEL]] -// DTORS-NEXT: %[[RET:.*]] = load i8** %[[RETVAL]] +// DTORS-NEXT: %[[RET:.*]] = load i8*, i8** %[[RETVAL]] // DTORS-NEXT: ret i8* %[[RET]] // Check that we do the mangling correctly on x64. @@ -82,11 +82,11 @@ void check_vftable_offset() { void call_complete_dtor(C *obj_ptr) { // CHECK: define void @"\01?call_complete_dtor@basic@@YAXPAUC@1@@Z"(%"struct.basic::C"* %obj_ptr) obj_ptr->~C(); -// CHECK: %[[OBJ_PTR_VALUE:.*]] = load %"struct.basic::C"** %{{.*}}, align 4 +// CHECK: %[[OBJ_PTR_VALUE:.*]] = load %"struct.basic::C"*, %"struct.basic::C"** %{{.*}}, align 4 // CHECK-NEXT: %[[PVTABLE:.*]] = bitcast %"struct.basic::C"* %[[OBJ_PTR_VALUE]] to i8* (%"struct.basic::C"*, i32)*** -// CHECK-NEXT: %[[VTABLE:.*]] = load i8* (%"struct.basic::C"*, i32)*** %[[PVTABLE]] +// CHECK-NEXT: %[[VTABLE:.*]] = load i8* (%"struct.basic::C"*, i32)**, i8* (%"struct.basic::C"*, i32)*** %[[PVTABLE]] // CHECK-NEXT: %[[PVDTOR:.*]] = getelementptr inbounds i8* (%"struct.basic::C"*, i32)*, i8* (%"struct.basic::C"*, i32)** %[[VTABLE]], i64 0 -// CHECK-NEXT: %[[VDTOR:.*]] = load i8* (%"struct.basic::C"*, i32)** %[[PVDTOR]] +// CHECK-NEXT: %[[VDTOR:.*]] = load i8* (%"struct.basic::C"*, i32)*, i8* (%"struct.basic::C"*, i32)** %[[PVDTOR]] // CHECK-NEXT: call x86_thiscallcc i8* %[[VDTOR]](%"struct.basic::C"* %[[OBJ_PTR_VALUE]], i32 0) // CHECK-NEXT: ret void } @@ -94,14 +94,14 @@ void call_complete_dtor(C *obj_ptr) { void call_deleting_dtor(C *obj_ptr) { // CHECK: define void @"\01?call_deleting_dtor@basic@@YAXPAUC@1@@Z"(%"struct.basic::C"* %obj_ptr) delete obj_ptr; -// CHECK: %[[OBJ_PTR_VALUE:.*]] = load %"struct.basic::C"** %{{.*}}, align 4 +// CHECK: %[[OBJ_PTR_VALUE:.*]] = load %"struct.basic::C"*, %"struct.basic::C"** %{{.*}}, align 4 // CHECK: br i1 {{.*}}, label %[[DELETE_NULL:.*]], label %[[DELETE_NOTNULL:.*]] // CHECK: [[DELETE_NOTNULL]] // CHECK-NEXT: %[[PVTABLE:.*]] = bitcast %"struct.basic::C"* %[[OBJ_PTR_VALUE]] to i8* (%"struct.basic::C"*, i32)*** -// CHECK-NEXT: %[[VTABLE:.*]] = load i8* (%"struct.basic::C"*, i32)*** %[[PVTABLE]] +// CHECK-NEXT: %[[VTABLE:.*]] = load i8* (%"struct.basic::C"*, i32)**, i8* (%"struct.basic::C"*, i32)*** %[[PVTABLE]] // CHECK-NEXT: %[[PVDTOR:.*]] = getelementptr inbounds i8* (%"struct.basic::C"*, i32)*, i8* (%"struct.basic::C"*, i32)** %[[VTABLE]], i64 0 -// CHECK-NEXT: %[[VDTOR:.*]] = load i8* (%"struct.basic::C"*, i32)** %[[PVDTOR]] +// CHECK-NEXT: %[[VDTOR:.*]] = load i8* (%"struct.basic::C"*, i32)*, i8* (%"struct.basic::C"*, i32)** %[[PVDTOR]] // CHECK-NEXT: call x86_thiscallcc i8* %[[VDTOR]](%"struct.basic::C"* %[[OBJ_PTR_VALUE]], i32 1) // CHECK: ret void } @@ -109,14 +109,14 @@ void call_deleting_dtor(C *obj_ptr) { void call_deleting_dtor_and_global_delete(C *obj_ptr) { // CHECK: define void @"\01?call_deleting_dtor_and_global_delete@basic@@YAXPAUC@1@@Z"(%"struct.basic::C"* %obj_ptr) ::delete obj_ptr; -// CHECK: %[[OBJ_PTR_VALUE:.*]] = load %"struct.basic::C"** %{{.*}}, align 4 +// CHECK: %[[OBJ_PTR_VALUE:.*]] = load %"struct.basic::C"*, %"struct.basic::C"** %{{.*}}, align 4 // CHECK: br i1 {{.*}}, label %[[DELETE_NULL:.*]], label %[[DELETE_NOTNULL:.*]] // CHECK: [[DELETE_NOTNULL]] // CHECK-NEXT: %[[PVTABLE:.*]] = bitcast %"struct.basic::C"* %[[OBJ_PTR_VALUE]] to i8* (%"struct.basic::C"*, i32)*** -// CHECK-NEXT: %[[VTABLE:.*]] = load i8* (%"struct.basic::C"*, i32)*** %[[PVTABLE]] +// CHECK-NEXT: %[[VTABLE:.*]] = load i8* (%"struct.basic::C"*, i32)**, i8* (%"struct.basic::C"*, i32)*** %[[PVTABLE]] // CHECK-NEXT: %[[PVDTOR:.*]] = getelementptr inbounds i8* (%"struct.basic::C"*, i32)*, i8* (%"struct.basic::C"*, i32)** %[[VTABLE]], i64 0 -// CHECK-NEXT: %[[VDTOR:.*]] = load i8* (%"struct.basic::C"*, i32)** %[[PVDTOR]] +// CHECK-NEXT: %[[VDTOR:.*]] = load i8* (%"struct.basic::C"*, i32)*, i8* (%"struct.basic::C"*, i32)** %[[PVDTOR]] // CHECK-NEXT: %[[CALL:.*]] = call x86_thiscallcc i8* %[[VDTOR]](%"struct.basic::C"* %[[OBJ_PTR_VALUE]], i32 0) // CHECK-NEXT: call void @"\01??3@YAXPAX@Z"(i8* %[[CALL]]) // CHECK: ret void @@ -158,7 +158,7 @@ C::~C() { // CHECK: (%"struct.dtor_in_second_nvbase::C"* %this) // No this adjustment! // CHECK-NOT: getelementptr -// CHECK: load %"struct.dtor_in_second_nvbase::C"** %{{.*}} +// CHECK: load %"struct.dtor_in_second_nvbase::C"*, %"struct.dtor_in_second_nvbase::C"** %{{.*}} // Now we this-adjust before calling ~B. // CHECK: bitcast %"struct.dtor_in_second_nvbase::C"* %{{.*}} to i8* // CHECK: getelementptr inbounds i8, i8* %{{.*}}, i64 4 @@ -240,7 +240,7 @@ C::C() { // CHECK: define x86_thiscallcc %"struct.constructors::C"* @"\01??0C@constructors@@QAE@XZ"(%"struct.constructors::C"* returned %this, i32 %is_most_derived) // TODO: make sure this works in the Release build too; // CHECK: store i32 %is_most_derived, i32* %[[IS_MOST_DERIVED_VAR:.*]], align 4 - // CHECK: %[[IS_MOST_DERIVED_VAL:.*]] = load i32* %[[IS_MOST_DERIVED_VAR]] + // CHECK: %[[IS_MOST_DERIVED_VAL:.*]] = load i32, i32* %[[IS_MOST_DERIVED_VAR]] // CHECK: %[[SHOULD_CALL_VBASE_CTORS:.*]] = icmp ne i32 %[[IS_MOST_DERIVED_VAL]], 0 // CHECK: br i1 %[[SHOULD_CALL_VBASE_CTORS]], label %[[INIT_VBASES:.*]], label %[[SKIP_VBASES:.*]] // @@ -275,7 +275,7 @@ struct D : C { D::D() { // CHECK: define x86_thiscallcc %"struct.constructors::D"* @"\01??0D@constructors@@QAE@XZ"(%"struct.constructors::D"* returned %this, i32 %is_most_derived) unnamed_addr // CHECK: store i32 %is_most_derived, i32* %[[IS_MOST_DERIVED_VAR:.*]], align 4 - // CHECK: %[[IS_MOST_DERIVED_VAL:.*]] = load i32* %[[IS_MOST_DERIVED_VAR]] + // CHECK: %[[IS_MOST_DERIVED_VAL:.*]] = load i32, i32* %[[IS_MOST_DERIVED_VAR]] // CHECK: %[[SHOULD_CALL_VBASE_CTORS:.*]] = icmp ne i32 %[[IS_MOST_DERIVED_VAL]], 0 // CHECK: br i1 %[[SHOULD_CALL_VBASE_CTORS]], label %[[INIT_VBASES:.*]], label %[[SKIP_VBASES:.*]] // @@ -302,7 +302,7 @@ struct E : virtual C { E::E() { // CHECK: define x86_thiscallcc %"struct.constructors::E"* @"\01??0E@constructors@@QAE@XZ"(%"struct.constructors::E"* returned %this, i32 %is_most_derived) unnamed_addr // CHECK: store i32 %is_most_derived, i32* %[[IS_MOST_DERIVED_VAR:.*]], align 4 - // CHECK: %[[IS_MOST_DERIVED_VAL:.*]] = load i32* %[[IS_MOST_DERIVED_VAR]] + // CHECK: %[[IS_MOST_DERIVED_VAL:.*]] = load i32, i32* %[[IS_MOST_DERIVED_VAR]] // CHECK: %[[SHOULD_CALL_VBASE_CTORS:.*]] = icmp ne i32 %[[IS_MOST_DERIVED_VAL]], 0 // CHECK: br i1 %[[SHOULD_CALL_VBASE_CTORS]], label %[[INIT_VBASES:.*]], label %[[SKIP_VBASES:.*]] // diff --git a/test/CodeGenCXX/microsoft-abi-thunks.cpp b/test/CodeGenCXX/microsoft-abi-thunks.cpp index 7b5eb9f4e8..8cbea5c4de 100644 --- a/test/CodeGenCXX/microsoft-abi-thunks.cpp +++ b/test/CodeGenCXX/microsoft-abi-thunks.cpp @@ -129,9 +129,9 @@ I::I() {} // Emits vftable and forces thunk generation. // CODEGEN: %[[ORIG_RET_i8:.*]] = bitcast %struct.F* %[[ORIG_RET]] to i8* // CODEGEN: %[[VBPTR_i8:.*]] = getelementptr inbounds i8, i8* %[[ORIG_RET_i8]], i32 4 // CODEGEN: %[[VBPTR:.*]] = bitcast i8* %[[VBPTR_i8]] to i32** -// CODEGEN: %[[VBTABLE:.*]] = load i32** %[[VBPTR]] +// CODEGEN: %[[VBTABLE:.*]] = load i32*, i32** %[[VBPTR]] // CODEGEN: %[[VBASE_OFFSET_PTR:.*]] = getelementptr inbounds i32, i32* %[[VBTABLE]], i32 2 -// CODEGEN: %[[VBASE_OFFSET:.*]] = load i32* %[[VBASE_OFFSET_PTR]] +// CODEGEN: %[[VBASE_OFFSET:.*]] = load i32, i32* %[[VBASE_OFFSET_PTR]] // CODEGEN: %[[RES_i8:.*]] = getelementptr inbounds i8, i8* %[[VBPTR_i8]], i32 %[[VBASE_OFFSET]] // CODEGEN: %[[RES:.*]] = bitcast i8* %[[RES_i8]] to %struct.F* // CODEGEN: phi %struct.F* {{.*}} %[[RES]] diff --git a/test/CodeGenCXX/microsoft-abi-typeid.cpp b/test/CodeGenCXX/microsoft-abi-typeid.cpp index ed8cd66b46..60c31ab470 100644 --- a/test/CodeGenCXX/microsoft-abi-typeid.cpp +++ b/test/CodeGenCXX/microsoft-abi-typeid.cpp @@ -32,9 +32,9 @@ const std::type_info* test3_typeid() { return &typeid(*fn()); } // CHECK-NEXT: unreachable // CHECK: [[THIS:%.*]] = bitcast %struct.A* [[CALL]] to i8* // CHECK-NEXT: [[VBTBLP:%.*]] = getelementptr inbounds %struct.A, %struct.A* [[CALL]], i32 0, i32 0 -// CHECK-NEXT: [[VBTBL:%.*]] = load i32** [[VBTBLP]], align 4 +// CHECK-NEXT: [[VBTBL:%.*]] = load i32*, i32** [[VBTBLP]], align 4 // CHECK-NEXT: [[VBSLOT:%.*]] = getelementptr inbounds i32, i32* [[VBTBL]], i32 1 -// CHECK-NEXT: [[VBASE_OFFS:%.*]] = load i32* [[VBSLOT]], align 4 +// CHECK-NEXT: [[VBASE_OFFS:%.*]] = load i32, i32* [[VBSLOT]], align 4 // CHECK-NEXT: [[ADJ:%.*]] = getelementptr inbounds i8, i8* [[THIS]], i32 [[VBASE_OFFS]] // CHECK-NEXT: [[RT:%.*]] = tail call i8* @__RTtypeid(i8* [[ADJ]]) // CHECK-NEXT: [[RET:%.*]] = bitcast i8* [[RT]] to %struct.type_info* diff --git a/test/CodeGenCXX/microsoft-abi-virtual-inheritance-vtordisps.cpp b/test/CodeGenCXX/microsoft-abi-virtual-inheritance-vtordisps.cpp index 8de65bf03b..204da8db15 100644 --- a/test/CodeGenCXX/microsoft-abi-virtual-inheritance-vtordisps.cpp +++ b/test/CodeGenCXX/microsoft-abi-virtual-inheritance-vtordisps.cpp @@ -24,22 +24,22 @@ struct D : virtual C { D::D() {} // Forces vftable emission. // CHECK-LABEL: define linkonce_odr x86_thiscallcc void @"\01?f@D@@$4PPPPPPPM@A@AEXXZ" -// CHECK: %[[ECX:.*]] = load %struct.D** %{{.*}} +// CHECK: %[[ECX:.*]] = load %struct.D*, %struct.D** %{{.*}} // CHECK: %[[ECX_i8:.*]] = bitcast %struct.D* %[[ECX]] to i8* // CHECK: %[[VTORDISP_PTR_i8:.*]] = getelementptr i8, i8* %[[ECX_i8]], i32 -4 // CHECK: %[[VTORDISP_PTR:.*]] = bitcast i8* %[[VTORDISP_PTR_i8]] to i32* -// CHECK: %[[VTORDISP:.*]] = load i32* %[[VTORDISP_PTR]] +// CHECK: %[[VTORDISP:.*]] = load i32, i32* %[[VTORDISP_PTR]] // CHECK: %[[VTORDISP_NEG:.*]] = sub i32 0, %[[VTORDISP]] // CHECK: %[[ADJUSTED_i8:.*]] = getelementptr i8, i8* %[[ECX_i8]], i32 %[[VTORDISP_NEG]] // CHECK: call x86_thiscallcc void @"\01?f@D@@UAEXXZ"(i8* %[[ADJUSTED_i8]]) // CHECK: ret void // CHECK-LABEL: define linkonce_odr x86_thiscallcc void @"\01?f@D@@$4PPPPPPPI@3AEXXZ" -// CHECK: %[[ECX:.*]] = load %struct.D** %{{.*}} +// CHECK: %[[ECX:.*]] = load %struct.D*, %struct.D** %{{.*}} // CHECK: %[[ECX_i8:.*]] = bitcast %struct.D* %[[ECX]] to i8* // CHECK: %[[VTORDISP_PTR_i8:.*]] = getelementptr i8, i8* %[[ECX_i8]], i32 -8 // CHECK: %[[VTORDISP_PTR:.*]] = bitcast i8* %[[VTORDISP_PTR_i8]] to i32* -// CHECK: %[[VTORDISP:.*]] = load i32* %[[VTORDISP_PTR]] +// CHECK: %[[VTORDISP:.*]] = load i32, i32* %[[VTORDISP_PTR]] // CHECK: %[[VTORDISP_NEG:.*]] = sub i32 0, %[[VTORDISP]] // CHECK: %[[VTORDISP_ADJUSTED_i8:.*]] = getelementptr i8, i8* %[[ECX_i8]], i32 %[[VTORDISP_NEG]] // CHECK: %[[ADJUSTED_i8:.*]] = getelementptr i8, i8* %[[VTORDISP_ADJUSTED_i8]], i32 -4 @@ -64,18 +64,18 @@ struct G : virtual F, virtual E { G::G() {} // Forces vftable emission. // CHECK-LABEL: define linkonce_odr x86_thiscallcc void @"\01?f@E@@$R4BA@M@PPPPPPPM@7AEXXZ"(i8*) -// CHECK: %[[ECX:.*]] = load %struct.E** %{{.*}} +// CHECK: %[[ECX:.*]] = load %struct.E*, %struct.E** %{{.*}} // CHECK: %[[ECX_i8:.*]] = bitcast %struct.E* %[[ECX]] to i8* // CHECK: %[[VTORDISP_PTR_i8:.*]] = getelementptr i8, i8* %[[ECX_i8]], i32 -4 // CHECK: %[[VTORDISP_PTR:.*]] = bitcast i8* %[[VTORDISP_PTR_i8]] to i32* -// CHECK: %[[VTORDISP:.*]] = load i32* %[[VTORDISP_PTR]] +// CHECK: %[[VTORDISP:.*]] = load i32, i32* %[[VTORDISP_PTR]] // CHECK: %[[VTORDISP_NEG:.*]] = sub i32 0, %[[VTORDISP]] // CHECK: %[[VTORDISP_ADJUSTED_i8:.*]] = getelementptr i8, i8* %[[ECX_i8]], i32 %[[VTORDISP_NEG]] // CHECK: %[[VBPTR_i8:.*]] = getelementptr inbounds i8, i8* %[[VTORDISP_ADJUSTED_i8]], i32 -16 // CHECK: %[[VBPTR:.*]] = bitcast i8* %[[VBPTR_i8]] to i32** -// CHECK: %[[VBTABLE:.*]] = load i32** %[[VBPTR]] +// CHECK: %[[VBTABLE:.*]] = load i32*, i32** %[[VBPTR]] // CHECK: %[[VBOFFSET_PTR:.*]] = getelementptr inbounds i32, i32* %[[VBTABLE]], i32 3 -// CHECK: %[[VBASE_OFFSET:.*]] = load i32* %[[VBOFFSET_PTR]] +// CHECK: %[[VBASE_OFFSET:.*]] = load i32, i32* %[[VBOFFSET_PTR]] // CHECK: %[[VBASE:.*]] = getelementptr inbounds i8, i8* %[[VBPTR_i8]], i32 %[[VBASE_OFFSET]] // CHECK: %[[ARG_i8:.*]] = getelementptr i8, i8* %[[VBASE]], i32 8 // CHECK: call x86_thiscallcc void @"\01?f@E@@UAEXXZ"(i8* %[[ARG_i8]]) diff --git a/test/CodeGenCXX/microsoft-abi-virtual-inheritance.cpp b/test/CodeGenCXX/microsoft-abi-virtual-inheritance.cpp index 666d793e1a..b868d1f0b5 100644 --- a/test/CodeGenCXX/microsoft-abi-virtual-inheritance.cpp +++ b/test/CodeGenCXX/microsoft-abi-virtual-inheritance.cpp @@ -21,7 +21,7 @@ struct B : virtual VBase { B::B() { // CHECK-LABEL: define x86_thiscallcc %struct.B* @"\01??0B@@QAE@XZ" - // CHECK: %[[THIS:.*]] = load %struct.B** + // CHECK: %[[THIS:.*]] = load %struct.B*, %struct.B** // CHECK: br i1 %{{.*}}, label %[[INIT_VBASES:.*]], label %[[SKIP_VBASES:.*]] // Don't check the INIT_VBASES case as it's covered by the ctor tests. @@ -57,7 +57,7 @@ B::~B() { // CHECK: %[[THIS_i8:.*]] = getelementptr inbounds i8, i8* %[[THIS_PARAM_i8]], i32 -8 // CHECK: %[[THIS:.*]] = bitcast i8* %[[THIS_i8]] to %struct.B* // CHECK: store %struct.B* %[[THIS]], %struct.B** %[[THIS_ADDR:.*]], align 4 - // CHECK: %[[THIS:.*]] = load %struct.B** %[[THIS_ADDR]] + // CHECK: %[[THIS:.*]] = load %struct.B*, %struct.B** %[[THIS_ADDR]] // Restore the vfptr that could have been changed by a subclass. // CHECK: %[[THIS_i8:.*]] = bitcast %struct.B* %[[THIS]] to i8* @@ -85,7 +85,7 @@ B::~B() { // CHECK: ret // CHECK2-LABEL: define linkonce_odr x86_thiscallcc void @"\01??_DB@@UAE@XZ"(%struct.B* - // CHECK2: %[[THIS:.*]] = load %struct.B** {{.*}} + // CHECK2: %[[THIS:.*]] = load %struct.B*, %struct.B** {{.*}} // CHECK2: %[[THIS_i8:.*]] = bitcast %struct.B* %[[THIS]] to i8* // CHECK2: %[[B_i8:.*]] = getelementptr i8, i8* %[[THIS_i8]], i32 8 // CHECK2: %[[B:.*]] = bitcast i8* %[[B_i8]] to %struct.B* @@ -101,7 +101,7 @@ B::~B() { // CHECK2: %[[THIS_i8:.*]] = getelementptr inbounds i8, i8* %[[THIS_PARAM_i8:.*]], i32 -8 // CHECK2: %[[THIS:.*]] = bitcast i8* %[[THIS_i8]] to %struct.B* // CHECK2: store %struct.B* %[[THIS]], %struct.B** %[[THIS_ADDR:.*]], align 4 - // CHECK2: %[[THIS:.*]] = load %struct.B** %[[THIS_ADDR]] + // CHECK2: %[[THIS:.*]] = load %struct.B*, %struct.B** %[[THIS_ADDR]] // CHECK2: call x86_thiscallcc void @"\01??_DB@@UAE@XZ"(%struct.B* %[[THIS]]) // ... // CHECK2: ret @@ -119,13 +119,13 @@ void B::foo() { // CHECK: store %struct.B* %[[THIS]], %struct.B** %[[THIS_ADDR]], align 4 field = 42; -// CHECK: %[[THIS:.*]] = load %struct.B** %[[THIS_ADDR]] +// CHECK: %[[THIS:.*]] = load %struct.B*, %struct.B** %[[THIS_ADDR]] // CHECK: %[[THIS8:.*]] = bitcast %struct.B* %[[THIS]] to i8* // CHECK: %[[VBPTR:.*]] = getelementptr inbounds i8, i8* %[[THIS8]], i32 0 // CHECK: %[[VBPTR8:.*]] = bitcast i8* %[[VBPTR]] to i32** -// CHECK: %[[VBTABLE:.*]] = load i32** %[[VBPTR8]] +// CHECK: %[[VBTABLE:.*]] = load i32*, i32** %[[VBPTR8]] // CHECK: %[[VBENTRY:.*]] = getelementptr inbounds i32, i32* %[[VBTABLE]], i32 1 -// CHECK: %[[VBOFFSET32:.*]] = load i32* %[[VBENTRY]] +// CHECK: %[[VBOFFSET32:.*]] = load i32, i32* %[[VBENTRY]] // CHECK: %[[VBOFFSET:.*]] = add nsw i32 0, %[[VBOFFSET32]] // CHECK: %[[THIS8:.*]] = bitcast %struct.B* %[[THIS]] to i8* // CHECK: %[[VBASE_i8:.*]] = getelementptr inbounds i8, i8* %[[THIS8]], i32 %[[VBOFFSET]] @@ -147,22 +147,22 @@ void call_vbase_bar(B *obj) { // CHECK: %[[OBJ_i8:.*]] = bitcast %struct.B* %[[OBJ]] to i8* // CHECK: %[[VBPTR:.*]] = getelementptr inbounds i8, i8* %[[OBJ_i8]], i32 0 // CHECK: %[[VBPTR8:.*]] = bitcast i8* %[[VBPTR]] to i32** -// CHECK: %[[VBTABLE:.*]] = load i32** %[[VBPTR8]] +// CHECK: %[[VBTABLE:.*]] = load i32*, i32** %[[VBPTR8]] // CHECK: %[[VBENTRY:.*]] = getelementptr inbounds i32, i32* %[[VBTABLE]], i32 1 -// CHECK: %[[VBOFFSET32:.*]] = load i32* %[[VBENTRY]] +// CHECK: %[[VBOFFSET32:.*]] = load i32, i32* %[[VBENTRY]] // CHECK: %[[VBOFFSET:.*]] = add nsw i32 0, %[[VBOFFSET32]] // CHECK: %[[VBASE_i8:.*]] = getelementptr inbounds i8, i8* %[[OBJ_i8]], i32 %[[VBOFFSET]] // CHECK: %[[VFPTR:.*]] = bitcast i8* %[[VBASE_i8]] to void (i8*)*** -// CHECK: %[[VFTABLE:.*]] = load void (i8*)*** %[[VFPTR]] +// CHECK: %[[VFTABLE:.*]] = load void (i8*)**, void (i8*)*** %[[VFPTR]] // CHECK: %[[VFUN:.*]] = getelementptr inbounds void (i8*)*, void (i8*)** %[[VFTABLE]], i64 2 -// CHECK: %[[VFUN_VALUE:.*]] = load void (i8*)** %[[VFUN]] +// CHECK: %[[VFUN_VALUE:.*]] = load void (i8*)*, void (i8*)** %[[VFUN]] // // CHECK: %[[OBJ_i8:.*]] = bitcast %struct.B* %[[OBJ]] to i8* // CHECK: %[[VBPTR:.*]] = getelementptr inbounds i8, i8* %[[OBJ_i8]], i32 0 // CHECK: %[[VBPTR8:.*]] = bitcast i8* %[[VBPTR]] to i32** -// CHECK: %[[VBTABLE:.*]] = load i32** %[[VBPTR8]] +// CHECK: %[[VBTABLE:.*]] = load i32*, i32** %[[VBPTR8]] // CHECK: %[[VBENTRY:.*]] = getelementptr inbounds i32, i32* %[[VBTABLE]], i32 1 -// CHECK: %[[VBOFFSET32:.*]] = load i32* %[[VBENTRY]] +// CHECK: %[[VBOFFSET32:.*]] = load i32, i32* %[[VBENTRY]] // CHECK: %[[VBOFFSET:.*]] = add nsw i32 0, %[[VBOFFSET32]] // CHECK: %[[VBASE:.*]] = getelementptr inbounds i8, i8* %[[OBJ_i8]], i32 %[[VBOFFSET]] // @@ -179,22 +179,22 @@ void delete_B(B *obj) { // CHECK: %[[OBJ_i8:.*]] = bitcast %struct.B* %[[OBJ]] to i8* // CHECK: %[[VBPTR:.*]] = getelementptr inbounds i8, i8* %[[OBJ_i8]], i32 0 // CHECK: %[[VBPTR8:.*]] = bitcast i8* %[[VBPTR]] to i32** -// CHECK: %[[VBTABLE:.*]] = load i32** %[[VBPTR8]] +// CHECK: %[[VBTABLE:.*]] = load i32*, i32** %[[VBPTR8]] // CHECK: %[[VBENTRY:.*]] = getelementptr inbounds i32, i32* %[[VBTABLE]], i32 1 -// CHECK: %[[VBOFFSET32:.*]] = load i32* %[[VBENTRY]] +// CHECK: %[[VBOFFSET32:.*]] = load i32, i32* %[[VBENTRY]] // CHECK: %[[VBOFFSET:.*]] = add nsw i32 0, %[[VBOFFSET32]] // CHECK: %[[VBASE_i8:.*]] = getelementptr inbounds i8, i8* %[[OBJ_i8]], i32 %[[VBOFFSET]] // CHECK: %[[VFPTR:.*]] = bitcast i8* %[[VBASE_i8]] to i8* (%struct.B*, i32)*** -// CHECK: %[[VFTABLE:.*]] = load i8* (%struct.B*, i32)*** %[[VFPTR]] +// CHECK: %[[VFTABLE:.*]] = load i8* (%struct.B*, i32)**, i8* (%struct.B*, i32)*** %[[VFPTR]] // CHECK: %[[VFUN:.*]] = getelementptr inbounds i8* (%struct.B*, i32)*, i8* (%struct.B*, i32)** %[[VFTABLE]], i64 0 -// CHECK: %[[VFUN_VALUE:.*]] = load i8* (%struct.B*, i32)** %[[VFUN]] +// CHECK: %[[VFUN_VALUE:.*]] = load i8* (%struct.B*, i32)*, i8* (%struct.B*, i32)** %[[VFUN]] // // CHECK: %[[OBJ_i8:.*]] = bitcast %struct.B* %[[OBJ]] to i8* // CHECK: %[[VBPTR:.*]] = getelementptr inbounds i8, i8* %[[OBJ_i8]], i32 0 // CHECK: %[[VBPTR8:.*]] = bitcast i8* %[[VBPTR]] to i32** -// CHECK: %[[VBTABLE:.*]] = load i32** %[[VBPTR8]] +// CHECK: %[[VBTABLE:.*]] = load i32*, i32** %[[VBPTR8]] // CHECK: %[[VBENTRY:.*]] = getelementptr inbounds i32, i32* %[[VBTABLE]], i32 1 -// CHECK: %[[VBOFFSET32:.*]] = load i32* %[[VBENTRY]] +// CHECK: %[[VBOFFSET32:.*]] = load i32, i32* %[[VBENTRY]] // CHECK: %[[VBOFFSET:.*]] = add nsw i32 0, %[[VBOFFSET32]] // CHECK: %[[VBASE_i8:.*]] = getelementptr inbounds i8, i8* %[[OBJ_i8]], i32 %[[VBOFFSET]] // CHECK: %[[VBASE:.*]] = bitcast i8* %[[VBASE_i8]] to %struct.B* @@ -288,7 +288,7 @@ D::~D() { // CHECK: %[[THIS_i8:.*]] = getelementptr inbounds i8, i8* %[[ARG_i8]], i32 -24 // CHECK: %[[THIS:.*]] = bitcast i8* %[[THIS_i8]] to %"struct.diamond::D"* // CHECK: store %"struct.diamond::D"* %[[THIS]], %"struct.diamond::D"** %[[THIS_VAL:.*]], align 4 - // CHECK: %[[THIS:.*]] = load %"struct.diamond::D"** %[[THIS_VAL]] + // CHECK: %[[THIS:.*]] = load %"struct.diamond::D"*, %"struct.diamond::D"** %[[THIS_VAL]] // CHECK: %[[D_i8:.*]] = bitcast %"struct.diamond::D"* %[[THIS]] to i8* // CHECK: %[[C_i8:.*]] = getelementptr inbounds i8, i8* %[[D_i8]], i64 4 // CHECK: %[[C:.*]] = bitcast i8* %[[C_i8]] to %"struct.diamond::C"* @@ -362,7 +362,7 @@ void D::bar() { C::foo(); // Shouldn't need any vbtable lookups. All we have to do is adjust to C*, // then compensate for the adjustment performed in the C::foo() prologue. - // CHECK-NOT: load i8** + // CHECK-NOT: load i8*, i8** // CHECK: %[[OBJ_i8:.*]] = bitcast %"struct.test3::D"* %{{.*}} to i8* // CHECK: %[[C_i8:.*]] = getelementptr inbounds i8, i8* %[[OBJ_i8]], i32 8 // CHECK: %[[C:.*]] = bitcast i8* %[[C_i8]] to %"struct.test3::C"* @@ -408,9 +408,9 @@ void destroy(C *obj) { delete obj; // CHECK: %[[VPTR:.*]] = bitcast %"struct.test4::C"* %[[OBJ:.*]] to i8* (%"struct.test4::C"*, i32)*** - // CHECK: %[[VFTABLE:.*]] = load i8* (%"struct.test4::C"*, i32)*** %[[VPTR]] + // CHECK: %[[VFTABLE:.*]] = load i8* (%"struct.test4::C"*, i32)**, i8* (%"struct.test4::C"*, i32)*** %[[VPTR]] // CHECK: %[[VFTENTRY:.*]] = getelementptr inbounds i8* (%"struct.test4::C"*, i32)*, i8* (%"struct.test4::C"*, i32)** %[[VFTABLE]], i64 0 - // CHECK: %[[VFUN:.*]] = load i8* (%"struct.test4::C"*, i32)** %[[VFTENTRY]] + // CHECK: %[[VFUN:.*]] = load i8* (%"struct.test4::C"*, i32)*, i8* (%"struct.test4::C"*, i32)** %[[VFTENTRY]] // CHECK: call x86_thiscallcc i8* %[[VFUN]](%"struct.test4::C"* %[[OBJ]], i32 1) // CHECK: ret } @@ -443,9 +443,9 @@ void destroy(E *obj) { // CHECK: %[[OBJ_i8:.*]] = bitcast %"struct.test4::E"* %[[OBJ:.*]] to i8* // CHECK: %[[B_i8:.*]] = getelementptr inbounds i8, i8* %[[OBJ_i8]], i32 4 // CHECK: %[[VPTR:.*]] = bitcast i8* %[[B_i8]] to i8* (%"struct.test4::E"*, i32)*** - // CHECK: %[[VFTABLE:.*]] = load i8* (%"struct.test4::E"*, i32)*** %[[VPTR]] + // CHECK: %[[VFTABLE:.*]] = load i8* (%"struct.test4::E"*, i32)**, i8* (%"struct.test4::E"*, i32)*** %[[VPTR]] // CHECK: %[[VFTENTRY:.*]] = getelementptr inbounds i8* (%"struct.test4::E"*, i32)*, i8* (%"struct.test4::E"*, i32)** %[[VFTABLE]], i64 0 - // CHECK: %[[VFUN:.*]] = load i8* (%"struct.test4::E"*, i32)** %[[VFTENTRY]] + // CHECK: %[[VFUN:.*]] = load i8* (%"struct.test4::E"*, i32)*, i8* (%"struct.test4::E"*, i32)** %[[VFTENTRY]] // CHECK: %[[OBJ_i8:.*]] = bitcast %"struct.test4::E"* %[[OBJ]] to i8* // CHECK: %[[B_i8:.*]] = getelementptr inbounds i8, i8* %[[OBJ_i8]], i32 4 // FIXME: in fact, the call should take i8* and the bitcast is redundant. diff --git a/test/CodeGenCXX/microsoft-abi-virtual-member-pointers.cpp b/test/CodeGenCXX/microsoft-abi-virtual-member-pointers.cpp index 27e3db1fc4..0479f6aa69 100644 --- a/test/CodeGenCXX/microsoft-abi-virtual-member-pointers.cpp +++ b/test/CodeGenCXX/microsoft-abi-virtual-member-pointers.cpp @@ -66,7 +66,7 @@ void f() { // CHECK32-NOT: unnamed_addr // CHECK32: comdat // CHECK32: [[VPTR:%.*]] = getelementptr inbounds void (%struct.C*, ...)*, void (%struct.C*, ...)** %{{.*}}, i64 0 -// CHECK32: [[CALLEE:%.*]] = load void (%struct.C*, ...)** [[VPTR]] +// CHECK32: [[CALLEE:%.*]] = load void (%struct.C*, ...)*, void (%struct.C*, ...)** [[VPTR]] // CHECK32: musttail call x86_thiscallcc void (%struct.C*, ...)* [[CALLEE]](%struct.C* %{{.*}}, ...) // CHECK32-NEXT: ret void // CHECK32: } @@ -76,7 +76,7 @@ void f() { // CHECK64-NOT: unnamed_addr // CHECK64: comdat // CHECK64: [[VPTR:%.*]] = getelementptr inbounds void (%struct.C*, ...)*, void (%struct.C*, ...)** %{{.*}}, i64 0 -// CHECK64: [[CALLEE:%.*]] = load void (%struct.C*, ...)** [[VPTR]] +// CHECK64: [[CALLEE:%.*]] = load void (%struct.C*, ...)*, void (%struct.C*, ...)** [[VPTR]] // CHECK64: musttail call void (%struct.C*, ...)* [[CALLEE]](%struct.C* %{{.*}}, ...) // CHECK64-NEXT: ret void // CHECK64: } @@ -85,7 +85,7 @@ void f() { // CHECK32-LABEL: define linkonce_odr x86_thiscallcc void @"\01??_9C@@$B3AE"(%struct.C* %this, ...) // CHECK32: #[[ATTR]] comdat // CHECK32: [[VPTR:%.*]] = getelementptr inbounds void (%struct.C*, ...)*, void (%struct.C*, ...)** %{{.*}}, i64 1 -// CHECK32: [[CALLEE:%.*]] = load void (%struct.C*, ...)** [[VPTR]] +// CHECK32: [[CALLEE:%.*]] = load void (%struct.C*, ...)*, void (%struct.C*, ...)** [[VPTR]] // CHECK32: musttail call x86_thiscallcc void (%struct.C*, ...)* [[CALLEE]](%struct.C* %{{.*}}, ...) // CHECK32-NEXT: ret void // CHECK32: } @@ -93,7 +93,7 @@ void f() { // CHECK64-LABEL: define linkonce_odr void @"\01??_9C@@$B7AA"(%struct.C* %this, ...) // CHECK64: #[[ATTR]] comdat // CHECK64: [[VPTR:%.*]] = getelementptr inbounds void (%struct.C*, ...)*, void (%struct.C*, ...)** %{{.*}}, i64 1 -// CHECK64: [[CALLEE:%.*]] = load void (%struct.C*, ...)** [[VPTR]] +// CHECK64: [[CALLEE:%.*]] = load void (%struct.C*, ...)*, void (%struct.C*, ...)** [[VPTR]] // CHECK64: musttail call void (%struct.C*, ...)* [[CALLEE]](%struct.C* %{{.*}}, ...) // CHECK64-NEXT: ret void // CHECK64: } @@ -102,7 +102,7 @@ void f() { // CHECK32-LABEL: define linkonce_odr x86_thiscallcc void @"\01??_9C@@$B7AE"(%struct.C* %this, ...) // CHECK32: #[[ATTR]] comdat // CHECK32: [[VPTR:%.*]] = getelementptr inbounds void (%struct.C*, ...)*, void (%struct.C*, ...)** %{{.*}}, i64 2 -// CHECK32: [[CALLEE:%.*]] = load void (%struct.C*, ...)** [[VPTR]] +// CHECK32: [[CALLEE:%.*]] = load void (%struct.C*, ...)*, void (%struct.C*, ...)** [[VPTR]] // CHECK32: musttail call x86_thiscallcc void (%struct.C*, ...)* [[CALLEE]](%struct.C* %{{.*}}, ...) // CHECK32-NEXT: ret void // CHECK32: } @@ -110,7 +110,7 @@ void f() { // CHECK64-LABEL: define linkonce_odr void @"\01??_9C@@$BBA@AA"(%struct.C* %this, ...) // CHECK64: #[[ATTR]] comdat // CHECK64: [[VPTR:%.*]] = getelementptr inbounds void (%struct.C*, ...)*, void (%struct.C*, ...)** %{{.*}}, i64 2 -// CHECK64: [[CALLEE:%.*]] = load void (%struct.C*, ...)** [[VPTR]] +// CHECK64: [[CALLEE:%.*]] = load void (%struct.C*, ...)*, void (%struct.C*, ...)** [[VPTR]] // CHECK64: musttail call void (%struct.C*, ...)* [[CALLEE]](%struct.C* %{{.*}}, ...) // CHECK64-NEXT: ret void // CHECK64: } @@ -119,7 +119,7 @@ void f() { // CHECK32-LABEL: define internal x86_thiscallcc void @"\01??_9D@?A@@$BA@AE"(%"struct.(anonymous namespace)::D"* %this, ...) // CHECK32: #[[ATTR]] // CHECK32: [[VPTR:%.*]] = getelementptr inbounds void (%"struct.(anonymous namespace)::D"*, ...)*, void (%"struct.(anonymous namespace)::D"*, ...)** %{{.*}}, i64 0 -// CHECK32: [[CALLEE:%.*]] = load void (%"struct.(anonymous namespace)::D"*, ...)** [[VPTR]] +// CHECK32: [[CALLEE:%.*]] = load void (%"struct.(anonymous namespace)::D"*, ...)*, void (%"struct.(anonymous namespace)::D"*, ...)** [[VPTR]] // CHECK32: musttail call x86_thiscallcc void (%"struct.(anonymous namespace)::D"*, ...)* [[CALLEE]](%"struct.(anonymous namespace)::D"* %{{.*}}, ...) // CHECK32-NEXT: ret void // CHECK32: } @@ -127,7 +127,7 @@ void f() { // CHECK64-LABEL: define internal void @"\01??_9D@?A@@$BA@AA"(%"struct.(anonymous namespace)::D"* %this, ...) // CHECK64: #[[ATTR]] // CHECK64: [[VPTR:%.*]] = getelementptr inbounds void (%"struct.(anonymous namespace)::D"*, ...)*, void (%"struct.(anonymous namespace)::D"*, ...)** %{{.*}}, i64 0 -// CHECK64: [[CALLEE:%.*]] = load void (%"struct.(anonymous namespace)::D"*, ...)** [[VPTR]] +// CHECK64: [[CALLEE:%.*]] = load void (%"struct.(anonymous namespace)::D"*, ...)*, void (%"struct.(anonymous namespace)::D"*, ...)** [[VPTR]] // CHECK64: musttail call void (%"struct.(anonymous namespace)::D"*, ...)* [[CALLEE]](%"struct.(anonymous namespace)::D"* %{{.*}}, ...) // CHECK64-NEXT: ret void // CHECK64: } @@ -136,14 +136,14 @@ void f() { // and returning a struct. // CHECK32-LABEL: define linkonce_odr x86_thiscallcc void @"\01??_9C@@$BM@AE"(%struct.C* %this, ...) {{.*}} comdat // CHECK32: [[VPTR:%.*]] = getelementptr inbounds void (%struct.C*, ...)*, void (%struct.C*, ...)** %{{.*}}, i64 3 -// CHECK32: [[CALLEE:%.*]] = load void (%struct.C*, ...)** [[VPTR]] +// CHECK32: [[CALLEE:%.*]] = load void (%struct.C*, ...)*, void (%struct.C*, ...)** [[VPTR]] // CHECK32: musttail call x86_thiscallcc void (%struct.C*, ...)* [[CALLEE]](%struct.C* %{{.*}}, ...) // CHECK32-NEXT: ret void // CHECK32: } // // CHECK64-LABEL: define linkonce_odr void @"\01??_9C@@$BBI@AA"(%struct.C* %this, ...) {{.*}} comdat // CHECK64: [[VPTR:%.*]] = getelementptr inbounds void (%struct.C*, ...)*, void (%struct.C*, ...)** %{{.*}}, i64 3 -// CHECK64: [[CALLEE:%.*]] = load void (%struct.C*, ...)** [[VPTR]] +// CHECK64: [[CALLEE:%.*]] = load void (%struct.C*, ...)*, void (%struct.C*, ...)** [[VPTR]] // CHECK64: musttail call void (%struct.C*, ...)* [[CALLEE]](%struct.C* %{{.*}}, ...) // CHECK64: ret void // CHECK64: } diff --git a/test/CodeGenCXX/ms-inline-asm-return.cpp b/test/CodeGenCXX/ms-inline-asm-return.cpp index 26fc426e5f..1219c617dd 100644 --- a/test/CodeGenCXX/ms-inline-asm-return.cpp +++ b/test/CodeGenCXX/ms-inline-asm-return.cpp @@ -58,7 +58,7 @@ bool f_i1() { // CHECK: %[[r:[^ ]*]] = call i32 asm sideeffect inteldialect "mov eax, $$1\0A\09mov edx, $$1", "={eax},~{eax},{{.*}}" // CHECK: %[[r_i8:[^ ]*]] = trunc i32 %[[r]] to i8 // CHECK: store i8 %[[r_i8]], i8* %{{.*}} -// CHECK: %[[r_i1:[^ ]*]] = load i1* %{{.*}} +// CHECK: %[[r_i1:[^ ]*]] = load i1, i1* %{{.*}} // CHECK: ret i1 %[[r_i1]] struct FourChars { @@ -72,7 +72,7 @@ FourChars f_s4() { // CHECK-LABEL: define i32 @f_s4() // CHECK: %[[r:[^ ]*]] = call i32 asm sideeffect inteldialect "mov eax, $$0x01010101", "={eax},~{eax},{{.*}}" // CHECK: store i32 %[[r]], i32* %{{.*}} -// CHECK: %[[r_i32:[^ ]*]] = load i32* %{{.*}} +// CHECK: %[[r_i32:[^ ]*]] = load i32, i32* %{{.*}} // CHECK: ret i32 %[[r_i32]] struct EightChars { @@ -87,7 +87,7 @@ EightChars f_s8() { // CHECK-LABEL: define i64 @f_s8() // CHECK: %[[r:[^ ]*]] = call i64 asm sideeffect inteldialect "mov eax, $$0x01010101\0A\09mov edx, $$0x01010101", "=A,~{eax},{{.*}}" // CHECK: store i64 %[[r]], i64* %{{.*}} -// CHECK: %[[r_i64:[^ ]*]] = load i64* %{{.*}} +// CHECK: %[[r_i64:[^ ]*]] = load i64, i64* %{{.*}} // CHECK: ret i64 %[[r_i64]] } // extern "C" diff --git a/test/CodeGenCXX/new-overflow.cpp b/test/CodeGenCXX/new-overflow.cpp index 68f89c35b6..9057e049b7 100644 --- a/test/CodeGenCXX/new-overflow.cpp +++ b/test/CodeGenCXX/new-overflow.cpp @@ -103,7 +103,7 @@ namespace test5 { typedef A elt; // CHECK: define [[A:%.*]]* @_ZN5test54testEi(i32 - // CHECK: [[N:%.*]] = load i32* + // CHECK: [[N:%.*]] = load i32, i32* // CHECK-NEXT: [[T0:%.*]] = icmp slt i32 [[N]], 0 // CHECK-NEXT: [[T1:%.*]] = select i1 [[T0]], i32 -1, i32 [[N]] // CHECK-NEXT: call noalias i8* @_Znaj(i32 [[T1]]) @@ -168,7 +168,7 @@ namespace test8 { typedef A elt; // CHECK: define [[A:%.*]]* @_ZN5test84testEx(i64 - // CHECK: [[N:%.*]] = load i64* + // CHECK: [[N:%.*]] = load i64, i64* // CHECK-NEXT: [[T0:%.*]] = icmp uge i64 [[N]], 4294967296 // CHECK-NEXT: [[T1:%.*]] = trunc i64 [[N]] to i32 // CHECK-NEXT: [[T2:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[T1]], i32 4) @@ -193,7 +193,7 @@ namespace test9 { typedef A elt; // CHECK: define [[A:%.*]]* @_ZN5test94testEy(i64 - // CHECK: [[N:%.*]] = load i64* + // CHECK: [[N:%.*]] = load i64, i64* // CHECK-NEXT: [[T0:%.*]] = icmp uge i64 [[N]], 4294967296 // CHECK-NEXT: [[T1:%.*]] = trunc i64 [[N]] to i32 // CHECK-NEXT: [[T2:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[T1]], i32 4) diff --git a/test/CodeGenCXX/new.cpp b/test/CodeGenCXX/new.cpp index 3b03fad1de..59b899f262 100644 --- a/test/CodeGenCXX/new.cpp +++ b/test/CodeGenCXX/new.cpp @@ -196,7 +196,7 @@ namespace test15 { struct A { A(); ~A(); }; // CHECK-LABEL: define void @_ZN6test156test0aEPv( - // CHECK: [[P:%.*]] = load i8** + // CHECK: [[P:%.*]] = load i8*, i8** // CHECK-NOT: icmp eq i8* [[P]], null // CHECK-NOT: br i1 // CHECK: [[T0:%.*]] = bitcast i8* [[P]] to [[A:%.*]]* @@ -206,7 +206,7 @@ namespace test15 { } // CHECK-LABEL: define void @_ZN6test156test0bEPv( - // CHECK: [[P0:%.*]] = load i8** + // CHECK: [[P0:%.*]] = load i8*, i8** // CHECK: [[P:%.*]] = call i8* @_ZnwmPvb(i64 1, i8* [[P0]] // CHECK-NEXT: icmp eq i8* [[P]], null // CHECK-NEXT: br i1 @@ -217,7 +217,7 @@ namespace test15 { } // CHECK-LABEL: define void @_ZN6test156test1aEPv( - // CHECK: [[P:%.*]] = load i8** + // CHECK: [[P:%.*]] = load i8*, i8** // CHECK-NOT: icmp eq i8* [[P]], null // CHECK-NOT: br i1 // CHECK: [[BEGIN:%.*]] = bitcast i8* [[P]] to [[A:%.*]]* @@ -233,7 +233,7 @@ namespace test15 { } // CHECK-LABEL: define void @_ZN6test156test1bEPv( - // CHECK: [[P0:%.*]] = load i8** + // CHECK: [[P0:%.*]] = load i8*, i8** // CHECK: [[P:%.*]] = call i8* @_ZnamPvb(i64 13, i8* [[P0]] // CHECK-NEXT: icmp eq i8* [[P]], null // CHECK-NEXT: br i1 @@ -253,11 +253,11 @@ namespace test15 { // TODO: it's okay if all these size calculations get dropped. // FIXME: maybe we should try to throw on overflow? // CHECK-LABEL: define void @_ZN6test155test2EPvi( - // CHECK: [[N:%.*]] = load i32* + // CHECK: [[N:%.*]] = load i32, i32* // CHECK-NEXT: [[T0:%.*]] = sext i32 [[N]] to i64 // CHECK-NEXT: [[T1:%.*]] = icmp slt i64 [[T0]], 0 // CHECK-NEXT: [[T2:%.*]] = select i1 [[T1]], i64 -1, i64 [[T0]] - // CHECK-NEXT: [[P:%.*]] = load i8** + // CHECK-NEXT: [[P:%.*]] = load i8*, i8** // CHECK: [[BEGIN:%.*]] = bitcast i8* [[P]] to [[A:%.*]]* // CHECK-NEXT: [[ISEMPTY:%.*]] = icmp eq i64 [[T0]], 0 // CHECK-NEXT: br i1 [[ISEMPTY]], diff --git a/test/CodeGenCXX/noexcept.cpp b/test/CodeGenCXX/noexcept.cpp index dd4cfda1d8..9d90484c1c 100644 --- a/test/CodeGenCXX/noexcept.cpp +++ b/test/CodeGenCXX/noexcept.cpp @@ -39,7 +39,7 @@ namespace test0 { // CHECK-NEXT: call void @__clang_call_terminate(i8* [[T1]]) // CHECK-NEXT: unreachable // The terminate handler chained to by the cleanup lpad. -// CHECK: [[T0:%.*]] = load i8** [[EXN]] +// CHECK: [[T0:%.*]] = load i8*, i8** [[EXN]] // CHECK-NEXT: call void @__clang_call_terminate(i8* [[T0]]) // CHECK-NEXT: unreachable diff --git a/test/CodeGenCXX/partial-destruction.cpp b/test/CodeGenCXX/partial-destruction.cpp index 9e86b4b99c..01e289450d 100644 --- a/test/CodeGenCXX/partial-destruction.cpp +++ b/test/CodeGenCXX/partial-destruction.cpp @@ -52,7 +52,7 @@ namespace test0 { // Partial destroy for initialization. // CHECK: landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) // CHECK-NEXT: cleanup - // CHECK: [[PARTIAL_END:%.*]] = load [[A]]** [[ENDVAR]] + // CHECK: [[PARTIAL_END:%.*]] = load [[A]]*, [[A]]** [[ENDVAR]] // CHECK-NEXT: [[T0:%.*]] = icmp eq [[A]]* [[E_BEGIN]], [[PARTIAL_END]] // CHECK-NEXT: br i1 [[T0]], // CHECK: [[E_AFTER:%.*]] = phi [[A]]* [ [[PARTIAL_END]], {{%.*}} ], [ [[E_CUR:%.*]], {{%.*}} ] diff --git a/test/CodeGenCXX/pod-member-memcpys.cpp b/test/CodeGenCXX/pod-member-memcpys.cpp index 3c05847685..97d203fde2 100644 --- a/test/CodeGenCXX/pod-member-memcpys.cpp +++ b/test/CodeGenCXX/pod-member-memcpys.cpp @@ -145,7 +145,7 @@ CALL_AO(PackedMembers) // VolatileMember copy-assignment: // CHECK-LABEL: define linkonce_odr dereferenceable({{[0-9]+}}) %struct.VolatileMember* @_ZN14VolatileMemberaSERKS_(%struct.VolatileMember* %this, %struct.VolatileMember* dereferenceable({{[0-9]+}})) // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 16, i32 4{{.*}}) -// CHECK: load volatile i32* {{.*}}, align 4 +// CHECK: load volatile i32, i32* {{.*}}, align 4 // CHECK: store volatile i32 {{.*}}, align 4 // CHECK: call dereferenceable({{[0-9]+}}) %struct.NonPOD* @_ZN6NonPODaSERKS_ // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 16, i32 4{{.*}}) @@ -227,7 +227,7 @@ CALL_CC(VolatileMember) // VolatileMember copy-constructor: // CHECK-LABEL: define linkonce_odr void @_ZN14VolatileMemberC2ERKS_(%struct.VolatileMember* %this, %struct.VolatileMember* dereferenceable({{[0-9]+}})) // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 16, i32 4{{.*}}) -// CHECK: load volatile i32* {{.*}}, align 4 +// CHECK: load volatile i32, i32* {{.*}}, align 4 // CHECK: store volatile i32 {{.*}}, align 4 // CHECK: call void @_ZN6NonPODC1ERKS_ // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 16, i32 4{{.*}}) diff --git a/test/CodeGenCXX/pointers-to-data-members.cpp b/test/CodeGenCXX/pointers-to-data-members.cpp index b39133111e..bb1b64e0a7 100644 --- a/test/CodeGenCXX/pointers-to-data-members.cpp +++ b/test/CodeGenCXX/pointers-to-data-members.cpp @@ -75,14 +75,14 @@ void f() { // CHECK: store i64 -1, i64* @_ZN5Casts2paE pa = 0; - // CHECK-NEXT: [[TMP:%.*]] = load i64* @_ZN5Casts2paE, align 8 + // CHECK-NEXT: [[TMP:%.*]] = load i64, i64* @_ZN5Casts2paE, align 8 // CHECK-NEXT: [[ADJ:%.*]] = add nsw i64 [[TMP]], 4 // CHECK-NEXT: [[ISNULL:%.*]] = icmp eq i64 [[TMP]], -1 // CHECK-NEXT: [[RES:%.*]] = select i1 [[ISNULL]], i64 [[TMP]], i64 [[ADJ]] // CHECK-NEXT: store i64 [[RES]], i64* @_ZN5Casts2pcE pc = pa; - // CHECK-NEXT: [[TMP:%.*]] = load i64* @_ZN5Casts2pcE, align 8 + // CHECK-NEXT: [[TMP:%.*]] = load i64, i64* @_ZN5Casts2pcE, align 8 // CHECK-NEXT: [[ADJ:%.*]] = sub nsw i64 [[TMP]], 4 // CHECK-NEXT: [[ISNULL:%.*]] = icmp eq i64 [[TMP]], -1 // CHECK-NEXT: [[RES:%.*]] = select i1 [[ISNULL]], i64 [[TMP]], i64 [[ADJ]] diff --git a/test/CodeGenCXX/pr12251.cpp b/test/CodeGenCXX/pr12251.cpp index 5b1ef9a31d..49e61cae37 100644 --- a/test/CodeGenCXX/pr12251.cpp +++ b/test/CodeGenCXX/pr12251.cpp @@ -5,12 +5,12 @@ bool f(bool *x) { return *x; } // CHECK-LABEL: define zeroext i1 @_Z1fPb -// CHECK: load i8* %{{[^ ]*}}, align 1, !range [[RANGE_i8_0_2:![^ ]*]] +// CHECK: load i8, i8* %{{[^ ]*}}, align 1, !range [[RANGE_i8_0_2:![^ ]*]] // Only enum-tests follow. Ensure that after the bool test, no further range // metadata shows up when strict enums are disabled. // NO-STRICT-ENUMS-LABEL: define zeroext i1 @_Z1fPb -// NO-STRICT-ENUMS: load i8* %{{[^ ]*}}, align 1, !range +// NO-STRICT-ENUMS: load i8, i8* %{{[^ ]*}}, align 1, !range // NO-STRICT-ENUMS-NOT: !range enum e1 { }; @@ -32,70 +32,70 @@ e3 g3(e3 *x) { return *x; } // CHECK-LABEL: define i32 @_Z2g3P2e3 -// CHECK: load i32* %x, align 4, !range [[RANGE_i32_0_32:![^ ]*]] +// CHECK: load i32, i32* %x, align 4, !range [[RANGE_i32_0_32:![^ ]*]] enum e4 { e4_a = -16}; e4 g4(e4 *x) { return *x; } // CHECK-LABEL: define i32 @_Z2g4P2e4 -// CHECK: load i32* %x, align 4, !range [[RANGE_i32_m16_16:![^ ]*]] +// CHECK: load i32, i32* %x, align 4, !range [[RANGE_i32_m16_16:![^ ]*]] enum e5 { e5_a = -16, e5_b = 16}; e5 g5(e5 *x) { return *x; } // CHECK-LABEL: define i32 @_Z2g5P2e5 -// CHECK: load i32* %x, align 4, !range [[RANGE_i32_m32_32:![^ ]*]] +// CHECK: load i32, i32* %x, align 4, !range [[RANGE_i32_m32_32:![^ ]*]] enum e6 { e6_a = -1 }; e6 g6(e6 *x) { return *x; } // CHECK-LABEL: define i32 @_Z2g6P2e6 -// CHECK: load i32* %x, align 4, !range [[RANGE_i32_m1_1:![^ ]*]] +// CHECK: load i32, i32* %x, align 4, !range [[RANGE_i32_m1_1:![^ ]*]] enum e7 { e7_a = -16, e7_b = 2}; e7 g7(e7 *x) { return *x; } // CHECK-LABEL: define i32 @_Z2g7P2e7 -// CHECK: load i32* %x, align 4, !range [[RANGE_i32_m16_16]] +// CHECK: load i32, i32* %x, align 4, !range [[RANGE_i32_m16_16]] enum e8 { e8_a = -17}; e8 g8(e8 *x) { return *x; } // CHECK-LABEL: define i32 @_Z2g8P2e8 -// CHECK: load i32* %x, align 4, !range [[RANGE_i32_m32_32:![^ ]*]] +// CHECK: load i32, i32* %x, align 4, !range [[RANGE_i32_m32_32:![^ ]*]] enum e9 { e9_a = 17}; e9 g9(e9 *x) { return *x; } // CHECK-LABEL: define i32 @_Z2g9P2e9 -// CHECK: load i32* %x, align 4, !range [[RANGE_i32_0_32]] +// CHECK: load i32, i32* %x, align 4, !range [[RANGE_i32_0_32]] enum e10 { e10_a = -16, e10_b = 32}; e10 g10(e10 *x) { return *x; } // CHECK-LABEL: define i32 @_Z3g10P3e10 -// CHECK: load i32* %x, align 4, !range [[RANGE_i32_m64_64:![^ ]*]] +// CHECK: load i32, i32* %x, align 4, !range [[RANGE_i32_m64_64:![^ ]*]] enum e11 {e11_a = 4294967296 }; enum e11 g11(enum e11 *x) { return *x; } // CHECK-LABEL: define i64 @_Z3g11P3e11 -// CHECK: load i64* %x, align {{[84]}}, !range [[RANGE_i64_0_2pow33:![^ ]*]] +// CHECK: load i64, i64* %x, align {{[84]}}, !range [[RANGE_i64_0_2pow33:![^ ]*]] enum e12 {e12_a = 9223372036854775808U }; enum e12 g12(enum e12 *x) { return *x; } // CHECK-LABEL: define i64 @_Z3g12P3e12 -// CHECK: load i64* %x, align {{[84]}} +// CHECK: load i64, i64* %x, align {{[84]}} // CHECK-NOT: range // CHECK: ret @@ -104,7 +104,7 @@ e13 g13(e13 *x) { return *x; } // CHECK-LABEL: define signext i8 @_Z3g13P3e13 -// CHECK: load i8* %x, align 1 +// CHECK: load i8, i8* %x, align 1 // CHECK-NOT: range // CHECK: ret @@ -113,7 +113,7 @@ e14 g14(e14 *x) { return *x; } // CHECK-LABEL: define i32 @_Z3g14P3e14 -// CHECK: load i32* %x, align 4 +// CHECK: load i32, i32* %x, align 4 // CHECK-NOT: range // CHECK: ret @@ -122,7 +122,7 @@ e15 g15(e15 *x) { return *x; } // CHECK-LABEL: define i32 @_Z3g15P3e15 -// CHECK: load i32* %x, align 4 +// CHECK: load i32, i32* %x, align 4 // CHECK-NOT: range // CHECK: ret @@ -131,7 +131,7 @@ e16 g16(e16 *x) { return *x; } // CHECK-LABEL: define i32 @_Z3g16P3e16 -// CHECK: load i32* %x, align 4 +// CHECK: load i32, i32* %x, align 4 // CHECK-NOT: range // CHECK: ret diff --git a/test/CodeGenCXX/pr20897.cpp b/test/CodeGenCXX/pr20897.cpp index 4989224acd..f8d6f4acd7 100644 --- a/test/CodeGenCXX/pr20897.cpp +++ b/test/CodeGenCXX/pr20897.cpp @@ -4,13 +4,13 @@ struct Base {}; // __declspec(dllexport) causes us to export the implicit constructor. struct __declspec(dllexport) Derived : virtual Base { // CHECK-LABEL: define weak_odr dllexport x86_thiscallcc %struct.Derived* @"\01??0Derived@@QAE@ABU0@@Z" -// CHECK: %[[this:.*]] = load %struct.Derived** {{.*}} +// CHECK: %[[this:.*]] = load %struct.Derived*, %struct.Derived** {{.*}} // CHECK-NEXT: store %struct.Derived* %[[this]], %struct.Derived** %[[retval:.*]] // CHECK: %[[dest_a_gep:.*]] = getelementptr inbounds %struct.Derived, %struct.Derived* %[[this]], i32 0, i32 1 -// CHECK-NEXT: %[[src_load:.*]] = load %struct.Derived** {{.*}} +// CHECK-NEXT: %[[src_load:.*]] = load %struct.Derived*, %struct.Derived** {{.*}} // CHECK-NEXT: %[[src_a_gep:.*]] = getelementptr inbounds %struct.Derived, %struct.Derived* %[[src_load:.*]], i32 0, i32 1 // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* %[[dest_a_gep]], i8* %[[src_a_gep]], i64 1, i32 4, i1 false) -// CHECK-NEXT: %[[dest_this:.*]] = load %struct.Derived** %[[retval]] +// CHECK-NEXT: %[[dest_this:.*]] = load %struct.Derived*, %struct.Derived** %[[retval]] // CHECK-NEXT: ret %struct.Derived* %[[dest_this]] bool a : 1; bool b : 1; @@ -19,15 +19,15 @@ struct __declspec(dllexport) Derived : virtual Base { // __declspec(dllexport) causes us to export the implicit copy constructor. struct __declspec(dllexport) Derived2 : virtual Base { // CHECK-LABEL: define weak_odr dllexport x86_thiscallcc %struct.Derived2* @"\01??0Derived2@@QAE@ABU0@@Z" -// CHECK: %[[this:.*]] = load %struct.Derived2** {{.*}} +// CHECK: %[[this:.*]] = load %struct.Derived2*, %struct.Derived2** {{.*}} // CHECK-NEXT: store %struct.Derived2* %[[this]], %struct.Derived2** %[[retval:.*]] // CHECK: %[[dest_a_gep:.*]] = getelementptr inbounds %struct.Derived2, %struct.Derived2* %[[this]], i32 0, i32 1 -// CHECK-NEXT: %[[src_load:.*]] = load %struct.Derived2** {{.*}} +// CHECK-NEXT: %[[src_load:.*]] = load %struct.Derived2*, %struct.Derived2** {{.*}} // CHECK-NEXT: %[[src_a_gep:.*]] = getelementptr inbounds %struct.Derived2, %struct.Derived2* %[[src_load:.*]], i32 0, i32 1 // CHECK-NEXT: %[[dest_a_bitcast:.*]] = bitcast [1 x i32]* %[[dest_a_gep]] to i8* // CHECK-NEXT: %[[src_a_bitcast:.*]] = bitcast [1 x i32]* %[[src_a_gep]] to i8* // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[dest_a_bitcast]], i8* %[[src_a_bitcast]], i32 4, i32 4, i1 false) -// CHECK-NEXT: %[[dest_this:.*]] = load %struct.Derived2** %[[retval]] +// CHECK-NEXT: %[[dest_this:.*]] = load %struct.Derived2*, %struct.Derived2** %[[retval]] // CHECK-NEXT: ret %struct.Derived2* %[[dest_this]] int Array[1]; }; diff --git a/test/CodeGenCXX/reference-cast.cpp b/test/CodeGenCXX/reference-cast.cpp index c4be5b78c2..02498a36a1 100644 --- a/test/CodeGenCXX/reference-cast.cpp +++ b/test/CodeGenCXX/reference-cast.cpp @@ -174,10 +174,10 @@ unsigned pr10592(const int &v) { // CHECK: [[VADDR:%[a-zA-Z0-9.]+]] = alloca i32* // CHECK-NEXT: [[REFTMP:%[a-zA-Z0-9.]+]] = alloca i32 // CHECK-NEXT: store i32* [[V:%[a-zA-Z0-9.]+]], i32** [[VADDR]] - // CHECK-NEXT: [[VADDR_1:%[a-zA-Z0-9.]+]] = load i32** [[VADDR]] - // CHECK-NEXT: [[VVAL:%[a-zA-Z0-9.]+]] = load i32* [[VADDR_1]] + // CHECK-NEXT: [[VADDR_1:%[a-zA-Z0-9.]+]] = load i32*, i32** [[VADDR]] + // CHECK-NEXT: [[VVAL:%[a-zA-Z0-9.]+]] = load i32, i32* [[VADDR_1]] // CHECK-NEXT: store i32 [[VVAL]], i32* [[REFTMP]] - // CHECK-NEXT: [[VVAL_I:%[a-zA-Z0-9.]+]] = load i32* [[REFTMP]] + // CHECK-NEXT: [[VVAL_I:%[a-zA-Z0-9.]+]] = load i32, i32* [[REFTMP]] // CHECK-NEXT: ret i32 [[VVAL_I]] return static_cast(v); } diff --git a/test/CodeGenCXX/references.cpp b/test/CodeGenCXX/references.cpp index 454c306a13..090afb67d3 100644 --- a/test/CodeGenCXX/references.cpp +++ b/test/CodeGenCXX/references.cpp @@ -1,16 +1,16 @@ // RUN: not %clang_cc1 -triple x86_64-apple-darwin -verify -emit-llvm -o - %s | FileCheck %s void t1() { // CHECK-LABEL: define void @_Z2t1v - // CHECK: [[REFLOAD:%.*]] = load i32** @a, align 8 - // CHECK: load i32* [[REFLOAD]], align 4 + // CHECK: [[REFLOAD:%.*]] = load i32*, i32** @a, align 8 + // CHECK: load i32, i32* [[REFLOAD]], align 4 extern int& a; int b = a; } void t2(int& a) { // CHECK-LABEL: define void @_Z2t2Ri - // CHECK: [[REFLOAD2:%.*]] = load i32** {{.*}}, align 8 - // CHECK: load i32* [[REFLOAD2]], align 4 + // CHECK: [[REFLOAD2:%.*]] = load i32*, i32** {{.*}}, align 8 + // CHECK: load i32, i32* [[REFLOAD2]], align 4 int b = a; } @@ -307,6 +307,6 @@ namespace N6 { extern struct x {char& x;}y; int a() { return y.x; } // CHECK-LABEL: define i32 @_ZN2N61aEv - // CHECK: [[REFLOAD3:%.*]] = load i8** getelementptr inbounds (%"struct.N6::x"* @_ZN2N61yE, i32 0, i32 0), align 8 - // CHECK: load i8* [[REFLOAD3]], align 1 + // CHECK: [[REFLOAD3:%.*]] = load i8*, i8** getelementptr inbounds (%"struct.N6::x"* @_ZN2N61yE, i32 0, i32 0), align 8 + // CHECK: load i8, i8* [[REFLOAD3]], align 1 } diff --git a/test/CodeGenCXX/rvalue-references.cpp b/test/CodeGenCXX/rvalue-references.cpp index 64da32b4fd..47e5745552 100644 --- a/test/CodeGenCXX/rvalue-references.cpp +++ b/test/CodeGenCXX/rvalue-references.cpp @@ -96,7 +96,7 @@ namespace test1 { // CHECK-LABEL: define void @_ZN5test11BC2Ei( // CHECK: [[T0:%.*]] = call dereferenceable({{[0-9]+}}) i32* @_ZN5test14moveERi( - // CHECK-NEXT: [[T1:%.*]] = load i32* [[T0]] + // CHECK-NEXT: [[T1:%.*]] = load i32, i32* [[T0]] // CHECK-NEXT: call void @_ZN5test11AC1Ei({{.*}}, i32 [[T1]]) // CHECK-NEXT: ret void B::B(int i) : a(move(i)) {} diff --git a/test/CodeGenCXX/static-data-member.cpp b/test/CodeGenCXX/static-data-member.cpp index d41ac8fb35..69d59b2528 100644 --- a/test/CodeGenCXX/static-data-member.cpp +++ b/test/CodeGenCXX/static-data-member.cpp @@ -67,7 +67,7 @@ namespace test3 { // CHECK-LABEL: define internal void @__cxx_global_var_init1() {{.*}} comdat($_ZN5test31AIiE1xE) // MACHO-LABEL: define internal void @__cxx_global_var_init1() // MACHO-NOT: comdat - // CHECK: [[GUARDBYTE:%.*]] = load i8* bitcast (i64* @_ZGVN5test31AIiE1xE to i8*) + // CHECK: [[GUARDBYTE:%.*]] = load i8, i8* bitcast (i64* @_ZGVN5test31AIiE1xE to i8*) // CHECK-NEXT: [[UNINITIALIZED:%.*]] = icmp eq i8 [[GUARDBYTE]], 0 // CHECK-NEXT: br i1 [[UNINITIALIZED]] // CHECK: [[TMP:%.*]] = call i32 @_ZN5test33fooEv() diff --git a/test/CodeGenCXX/static-init-pnacl.cpp b/test/CodeGenCXX/static-init-pnacl.cpp index de35ec3847..ba06420431 100644 --- a/test/CodeGenCXX/static-init-pnacl.cpp +++ b/test/CodeGenCXX/static-init-pnacl.cpp @@ -9,6 +9,6 @@ int f(); void g() { static int a = f(); } -// CHECK: [[LOAD:%.*]] = load atomic i8* bitcast (i64* @_ZGVZ1gvE1a to i8*) acquire +// CHECK: [[LOAD:%.*]] = load atomic i8, i8* bitcast (i64* @_ZGVZ1gvE1a to i8*) acquire // CHECK-NEXT: [[GUARD:%.*]] = icmp eq i8 [[LOAD]], 0 // CHECK-NEXT: br i1 [[GUARD]] diff --git a/test/CodeGenCXX/static-init.cpp b/test/CodeGenCXX/static-init.cpp index acac84d599..f26f0d02ec 100644 --- a/test/CodeGenCXX/static-init.cpp +++ b/test/CodeGenCXX/static-init.cpp @@ -16,7 +16,7 @@ struct A { }; void f() { - // CHECK: load atomic i8* bitcast (i64* @_ZGVZ1fvE1a to i8*) acquire, align 1 + // CHECK: load atomic i8, i8* bitcast (i64* @_ZGVZ1fvE1a to i8*) acquire, align 1 // CHECK: call i32 @__cxa_guard_acquire // CHECK: call void @_ZN1AC1Ev // CHECK: call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.A*)* @_ZN1AD1Ev to void (i8*)*), i8* getelementptr inbounds (%struct.A* @_ZZ1fvE1a, i32 0, i32 0), i8* @__dso_handle) @@ -106,14 +106,14 @@ namespace test2 { static int x = foo(); } // CHECK-LABEL: define void @_ZN5test21BC2Ev - // CHECK: load atomic i8* bitcast (i64* @_ZGVZN5test21BC1EvE1x to i8*) acquire, + // CHECK: load atomic i8, i8* bitcast (i64* @_ZGVZN5test21BC1EvE1x to i8*) acquire, // CHECK: call i32 @__cxa_guard_acquire(i64* @_ZGVZN5test21BC1EvE1x) // CHECK: [[T0:%.*]] = call i32 @_ZN5test23fooEv() // CHECK: store i32 [[T0]], i32* @_ZZN5test21BC1EvE1x, // CHECK: call void @__cxa_guard_release(i64* @_ZGVZN5test21BC1EvE1x) // CHECK-LABEL: define void @_ZN5test21BC1Ev - // CHECK: load atomic i8* bitcast (i64* @_ZGVZN5test21BC1EvE1x to i8*) acquire, + // CHECK: load atomic i8, i8* bitcast (i64* @_ZGVZN5test21BC1EvE1x to i8*) acquire, // CHECK: call i32 @__cxa_guard_acquire(i64* @_ZGVZN5test21BC1EvE1x) // CHECK: [[T0:%.*]] = call i32 @_ZN5test23fooEv() // CHECK: store i32 [[T0]], i32* @_ZZN5test21BC1EvE1x, @@ -125,7 +125,7 @@ namespace test2 { static int y = foo(); } // CHECK-LABEL: define void @_ZN5test21BD2Ev( - // CHECK: load atomic i8* bitcast (i64* @_ZGVZN5test21BD1EvE1y to i8*) acquire, + // CHECK: load atomic i8, i8* bitcast (i64* @_ZGVZN5test21BD1EvE1y to i8*) acquire, // CHECK: call i32 @__cxa_guard_acquire(i64* @_ZGVZN5test21BD1EvE1y) // CHECK: [[T0:%.*]] = call i32 @_ZN5test23fooEv() // CHECK: store i32 [[T0]], i32* @_ZZN5test21BD1EvE1y, diff --git a/test/CodeGenCXX/static-local-in-local-class.cpp b/test/CodeGenCXX/static-local-in-local-class.cpp index 9c13ff1c45..a70afcdc38 100644 --- a/test/CodeGenCXX/static-local-in-local-class.cpp +++ b/test/CodeGenCXX/static-local-in-local-class.cpp @@ -56,7 +56,7 @@ int f() { return x()(); } } // CHECK-LABEL: define internal i32 @"_ZZNK14pr18020_lambda3$_0clEvENKUlvE_clEv" -// CHECK: load i32* @"_ZZNK14pr18020_lambda3$_0clEvE2l1" +// CHECK: load i32, i32* @"_ZZNK14pr18020_lambda3$_0clEvE2l1" namespace pr18020_constexpr { // Taking the address of l1 in a constant expression used to crash. @@ -71,7 +71,7 @@ int f() { return x()(); } } // CHECK-LABEL: define internal i32 @"_ZZNK17pr18020_constexpr3$_1clEvENKUlvE_clEv" -// CHECK: load i32** @"_ZZZNK17pr18020_constexpr3$_1clEvENKUlvE_clEvE2l2" +// CHECK: load i32*, i32** @"_ZZZNK17pr18020_constexpr3$_1clEvENKUlvE_clEvE2l2" // Lambda-less reduction that references l1 before emitting it. This didn't // crash if you put it in a namespace. @@ -88,7 +88,7 @@ static pr18020_class x; int pr18020_f() { return x()(); } // CHECK-LABEL: define linkonce_odr i32 @_ZZN13pr18020_classclEvEN1UclEv -// CHECK: load i32* @_ZZN13pr18020_classclEvE2l1 +// CHECK: load i32, i32* @_ZZN13pr18020_classclEvE2l1 // In this test case, the function containing the static local will not be // emitted because it is unneeded. However, the operator call of the inner class @@ -104,7 +104,7 @@ extern "C" int call_deduced_return_operator() { // CHECK-LABEL: define i32 @call_deduced_return_operator() // CHECK: call i32* @_ZZL14deduced_returnvEN1SclEv( -// CHECK: load i32* % +// CHECK: load i32, i32* % // CHECK: ret i32 % // CHECK-LABEL: define internal i32* @_ZZL14deduced_returnvEN1SclEv(%struct.S* %this) @@ -124,7 +124,7 @@ extern "C" int call_block_deduced_return() { // CHECK-LABEL: define i32 @call_block_deduced_return() // CHECK: call i32* @_ZZZL20block_deduced_returnvEUb_EN1SclEv( -// CHECK: load i32* % +// CHECK: load i32, i32* % // CHECK: ret i32 % // CHECK-LABEL: define internal i32* @_ZZZL20block_deduced_returnvEUb_EN1SclEv(%struct.S.6* %this) #0 align 2 { @@ -142,7 +142,7 @@ label: void *global_label = decltype(static_local_label(0))::get(); // CHECK-LABEL: define linkonce_odr i8* @_ZZ18static_local_labelPvEN1S3getEv() -// CHECK: %[[lbl:[^ ]*]] = load i8** @_ZZ18static_local_labelPvE1q +// CHECK: %[[lbl:[^ ]*]] = load i8*, i8** @_ZZ18static_local_labelPvE1q // CHECK: ret i8* %[[lbl]] auto global_lambda = []() { diff --git a/test/CodeGenCXX/temporaries.cpp b/test/CodeGenCXX/temporaries.cpp index e587241c74..c537124c46 100644 --- a/test/CodeGenCXX/temporaries.cpp +++ b/test/CodeGenCXX/temporaries.cpp @@ -531,7 +531,7 @@ namespace Elision { struct C { operator A() const; }; void test6(const C *x) { // CHECK: [[T0:%.*]] = alloca [[A]], align 8 - // CHECK: [[X:%.*]] = load [[C]]** {{%.*}}, align 8 + // CHECK: [[X:%.*]] = load [[C]]*, [[C]]** {{%.*}}, align 8 // CHECK-NEXT: call void @_ZNK7Elision1CcvNS_1AEEv([[A]]* sret [[T0]], [[C]]* [[X]]) // CHECK-NEXT: call void @_ZNK7Elision1A3fooEv([[A]]* [[T0]]) // CHECK-NEXT: call void @_ZN7Elision1AD1Ev([[A]]* [[T0]]) @@ -557,11 +557,11 @@ namespace PR8623 { // CHECK: call void @_ZN6PR86231AC1Ei([[A]]* [[TMP]], i32 3) // CHECK-NEXT: store i1 true, i1* [[RCONS]] // CHECK-NEXT: br label - // CHECK: load i1* [[RCONS]] + // CHECK: load i1, i1* [[RCONS]] // CHECK-NEXT: br i1 // CHECK: call void @_ZN6PR86231AD1Ev([[A]]* [[TMP]]) // CHECK-NEXT: br label - // CHECK: load i1* [[LCONS]] + // CHECK: load i1, i1* [[LCONS]] // CHECK-NEXT: br i1 // CHECK: call void @_ZN6PR86231AD1Ev([[A]]* [[TMP]]) // CHECK-NEXT: br label diff --git a/test/CodeGenCXX/throw-expressions.cpp b/test/CodeGenCXX/throw-expressions.cpp index 4dd5322fba..3fe20388f2 100644 --- a/test/CodeGenCXX/throw-expressions.cpp +++ b/test/CodeGenCXX/throw-expressions.cpp @@ -34,7 +34,7 @@ int test5(bool x, bool y, int z) { // CHECK: br i1 // // y.true: -// CHECK: load i32* +// CHECK: load i32, i32* // CHECK: br label // // y.false: @@ -58,7 +58,7 @@ int test6(bool x, bool y, int z) { // CHECK: br i1 // // y.true: -// CHECK: load i32* +// CHECK: load i32, i32* // CHECK: br label // // y.false: diff --git a/test/CodeGenCXX/thunks.cpp b/test/CodeGenCXX/thunks.cpp index f32f93562b..2287d65a28 100644 --- a/test/CodeGenCXX/thunks.cpp +++ b/test/CodeGenCXX/thunks.cpp @@ -227,7 +227,7 @@ namespace Test8 { void C::helper(NonPOD var) {} // CHECK-LABEL: define void @_ZThn8_N5Test81C3barENS_6NonPODE( - // CHECK-NOT: load [[NONPODTYPE]]* + // CHECK-NOT: load [[NONPODTYPE]], [[NONPODTYPE]]* // CHECK-NOT: memcpy // CHECK: ret void void C::bar(NonPOD var) {} diff --git a/test/CodeGenCXX/uncopyable-args.cpp b/test/CodeGenCXX/uncopyable-args.cpp index 77996f656e..814cb6215d 100644 --- a/test/CodeGenCXX/uncopyable-args.cpp +++ b/test/CodeGenCXX/uncopyable-args.cpp @@ -12,7 +12,7 @@ void bar() { } // CHECK-LABEL: define void @_ZN7trivial3barEv() // CHECK: alloca %"struct.trivial::A" -// CHECK: load i8** +// CHECK: load i8*, i8** // CHECK: call void @_ZN7trivial3fooENS_1AE(i8* %{{.*}}) // CHECK-LABEL: declare void @_ZN7trivial3fooENS_1AE(i8*) @@ -33,7 +33,7 @@ void bar() { // CHECK-LABEL: define void @_ZN12default_ctor3barEv() // CHECK: alloca %"struct.default_ctor::A" // CHECK: call void @_Z{{.*}}C1Ev( -// CHECK: load i8** +// CHECK: load i8*, i8** // CHECK: call void @_ZN12default_ctor3fooENS_1AE(i8* %{{.*}}) // CHECK-LABEL: declare void @_ZN12default_ctor3fooENS_1AE(i8*) @@ -136,7 +136,7 @@ void bar() { } // CHECK-LABEL: define void @_ZN14copy_defaulted3barEv() // CHECK: call void @_Z{{.*}}C1Ev( -// CHECK: load i8** +// CHECK: load i8*, i8** // CHECK: call void @_ZN14copy_defaulted3fooENS_1AE(i8* %{{.*}}) // CHECK-LABEL: declare void @_ZN14copy_defaulted3fooENS_1AE(i8*) @@ -156,7 +156,7 @@ void bar() { } // CHECK-LABEL: define void @_ZN14move_defaulted3barEv() // CHECK: call void @_Z{{.*}}C1Ev( -// CHECK: load i8** +// CHECK: load i8*, i8** // CHECK: call void @_ZN14move_defaulted3fooENS_1AE(i8* %{{.*}}) // CHECK-LABEL: declare void @_ZN14move_defaulted3fooENS_1AE(i8*) @@ -175,7 +175,7 @@ void bar() { } // CHECK-LABEL: define void @_ZN17trivial_defaulted3barEv() // CHECK: call void @_Z{{.*}}C1Ev( -// CHECK: load i8** +// CHECK: load i8*, i8** // CHECK: call void @_ZN17trivial_defaulted3fooENS_1AE(i8* %{{.*}}) // CHECK-LABEL: declare void @_ZN17trivial_defaulted3fooENS_1AE(i8*) diff --git a/test/CodeGenCXX/unknown-anytype.cpp b/test/CodeGenCXX/unknown-anytype.cpp index e6f887bea0..fe10b13daa 100644 --- a/test/CodeGenCXX/unknown-anytype.cpp +++ b/test/CodeGenCXX/unknown-anytype.cpp @@ -12,7 +12,7 @@ int test0() { extern __unknown_anytype test0_any; - // COMMON: load i32* @test0_any + // COMMON: load i32, i32* @test0_any return (int) test0_any; } @@ -38,7 +38,7 @@ float test2a() { float test3() { extern __unknown_anytype test3_any; - // COMMON: [[FN:%.*]] = load float (i32)** @test3_any, + // COMMON: [[FN:%.*]] = load float (i32)*, float (i32)** @test3_any, // COMMON: call float [[FN]](i32 5) return ((float(*)(int)) test3_any)(5); } @@ -48,8 +48,8 @@ namespace test4 { extern __unknown_anytype test4_any2; int test() { - // COMMON: load i32* @_ZN5test410test4_any1E - // COMMON: load i8* @_ZN5test410test4_any2E + // COMMON: load i32, i32* @_ZN5test410test4_any1E + // COMMON: load i8, i8* @_ZN5test410test4_any2E return (int) test4_any1 + (char) test4_any2; } } diff --git a/test/CodeGenCXX/vararg-non-pod-ms-compat.cpp b/test/CodeGenCXX/vararg-non-pod-ms-compat.cpp index e916707c80..450860e1e6 100644 --- a/test/CodeGenCXX/vararg-non-pod-ms-compat.cpp +++ b/test/CodeGenCXX/vararg-non-pod-ms-compat.cpp @@ -19,7 +19,7 @@ void test(X x) { // X64: %[[agg:[^ ]*]] = alloca %struct.X // X64: %[[valptr:[^ ]*]] = getelementptr %struct.X, %struct.X* %[[agg]], i32 0, i32 0 - // X64: %[[val:[^ ]*]] = load i32* %[[valptr]] + // X64: %[[val:[^ ]*]] = load i32, i32* %[[valptr]] // X64: call void (...)* @"\01?vararg@@YAXZZ"(i32 %[[val]]) // CHECK-NOT: llvm.trap diff --git a/test/CodeGenCXX/varargs.cpp b/test/CodeGenCXX/varargs.cpp index 31bbee9863..3159dc68af 100644 --- a/test/CodeGenCXX/varargs.cpp +++ b/test/CodeGenCXX/varargs.cpp @@ -37,7 +37,7 @@ namespace test1 { // CHECK-NEXT: [[T1:%.*]] = bitcast [[A]]* [[X]] to i8* // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[T0]], i8* [[T1]], i64 8, i32 4, i1 false) // CHECK-NEXT: [[T0:%.*]] = bitcast [[A]]* [[TMP]] to i64* - // CHECK-NEXT: [[T1:%.*]] = load i64* [[T0]], align 1 + // CHECK-NEXT: [[T1:%.*]] = load i64, i64* [[T0]], align 1 // CHECK-NEXT: call void (...)* @_ZN5test13fooEz(i64 [[T1]]) // CHECK-NEXT: ret void } diff --git a/test/CodeGenCXX/virtual-base-cast.cpp b/test/CodeGenCXX/virtual-base-cast.cpp index 16704e34c4..554e80d3cf 100644 --- a/test/CodeGenCXX/virtual-base-cast.cpp +++ b/test/CodeGenCXX/virtual-base-cast.cpp @@ -15,15 +15,15 @@ A* a() { return x; } // CHECK: @_Z1av() [[NUW:#[0-9]+]] // CHECK: [[VBASEOFFSETPTRA:%[a-zA-Z0-9\.]+]] = getelementptr i8, i8* {{.*}}, i64 -16 // CHECK: [[CASTVBASEOFFSETPTRA:%[a-zA-Z0-9\.]+]] = bitcast i8* [[VBASEOFFSETPTRA]] to i32* -// CHECK: load i32* [[CASTVBASEOFFSETPTRA]] +// CHECK: load i32, i32* [[CASTVBASEOFFSETPTRA]] // CHECK: } // MSVC: @"\01?a@@YAPAUA@@XZ"() [[NUW:#[0-9]+]] { // MSVC: %[[vbptr_off:.*]] = getelementptr inbounds i8, i8* {{.*}}, i32 0 // MSVC: %[[vbptr:.*]] = bitcast i8* %[[vbptr_off]] to i32** -// MSVC: %[[vbtable:.*]] = load i32** %[[vbptr]] +// MSVC: %[[vbtable:.*]] = load i32*, i32** %[[vbptr]] // MSVC: %[[entry:.*]] = getelementptr inbounds i32, i32* {{.*}}, i32 1 -// MSVC: %[[offset:.*]] = load i32* %[[entry]] +// MSVC: %[[offset:.*]] = load i32, i32* %[[entry]] // MSVC: add nsw i32 0, %[[offset]] // MSVC: } @@ -31,16 +31,16 @@ B* b() { return x; } // CHECK: @_Z1bv() [[NUW]] // CHECK: [[VBASEOFFSETPTRA:%[a-zA-Z0-9\.]+]] = getelementptr i8, i8* {{.*}}, i64 -20 // CHECK: [[CASTVBASEOFFSETPTRA:%[a-zA-Z0-9\.]+]] = bitcast i8* [[VBASEOFFSETPTRA]] to i32* -// CHECK: load i32* [[CASTVBASEOFFSETPTRA]] +// CHECK: load i32, i32* [[CASTVBASEOFFSETPTRA]] // CHECK: } // Same as 'a' except we use a different vbtable offset. // MSVC: @"\01?b@@YAPAUB@@XZ"() [[NUW:#[0-9]+]] { // MSVC: %[[vbptr_off:.*]] = getelementptr inbounds i8, i8* {{.*}}, i32 0 // MSVC: %[[vbptr:.*]] = bitcast i8* %[[vbptr_off]] to i32** -// MSVC: %[[vbtable:.*]] = load i32** %[[vbptr]] +// MSVC: %[[vbtable:.*]] = load i32*, i32** %[[vbptr]] // MSVC: %[[entry:.*]] = getelementptr inbounds i32, i32* {{.*}}, i32 2 -// MSVC: %[[offset:.*]] = load i32* %[[entry]] +// MSVC: %[[offset:.*]] = load i32, i32* %[[entry]] // MSVC: add nsw i32 0, %[[offset]] // MSVC: } @@ -49,7 +49,7 @@ BB* c() { return x; } // CHECK: @_Z1cv() [[NUW]] // CHECK: [[VBASEOFFSETPTRC:%[a-zA-Z0-9\.]+]] = getelementptr i8, i8* {{.*}}, i64 -24 // CHECK: [[CASTVBASEOFFSETPTRC:%[a-zA-Z0-9\.]+]] = bitcast i8* [[VBASEOFFSETPTRC]] to i32* -// CHECK: [[VBASEOFFSETC:%[a-zA-Z0-9\.]+]] = load i32* [[CASTVBASEOFFSETPTRC]] +// CHECK: [[VBASEOFFSETC:%[a-zA-Z0-9\.]+]] = load i32, i32* [[CASTVBASEOFFSETPTRC]] // CHECK: add i32 [[VBASEOFFSETC]], 8 // CHECK: } @@ -57,9 +57,9 @@ BB* c() { return x; } // MSVC: @"\01?c@@YAPAUBB@@XZ"() [[NUW:#[0-9]+]] { // MSVC: %[[vbptr_off:.*]] = getelementptr inbounds i8, i8* {{.*}}, i32 0 // MSVC: %[[vbptr:.*]] = bitcast i8* %[[vbptr_off]] to i32** -// MSVC: %[[vbtable:.*]] = load i32** %[[vbptr]] +// MSVC: %[[vbtable:.*]] = load i32*, i32** %[[vbptr]] // MSVC: %[[entry:.*]] = getelementptr inbounds i32, i32* {{.*}}, i32 4 -// MSVC: %[[offset:.*]] = load i32* %[[entry]] +// MSVC: %[[offset:.*]] = load i32, i32* %[[entry]] // MSVC: add nsw i32 0, %[[offset]] // MSVC: } @@ -76,9 +76,9 @@ BB* d() { return y; } // MSVC: @"\01?d@@YAPAUBB@@XZ"() [[NUW:#[0-9]+]] { // MSVC: %[[vbptr_off:.*]] = getelementptr inbounds i8, i8* {{.*}}, i32 4 // MSVC: %[[vbptr:.*]] = bitcast i8* %[[vbptr_off]] to i32** -// MSVC: %[[vbtable:.*]] = load i32** %[[vbptr]] +// MSVC: %[[vbtable:.*]] = load i32*, i32** %[[vbptr]] // MSVC: %[[entry:.*]] = getelementptr inbounds i32, i32* {{.*}}, i32 4 -// MSVC: %[[offset:.*]] = load i32* %[[entry]] +// MSVC: %[[offset:.*]] = load i32, i32* %[[entry]] // MSVC: add nsw i32 4, %[[offset]] // MSVC: } diff --git a/test/CodeGenCXX/vla-lambda-capturing.cpp b/test/CodeGenCXX/vla-lambda-capturing.cpp index ededbb7eee..27d263b727 100644 --- a/test/CodeGenCXX/vla-lambda-capturing.cpp +++ b/test/CodeGenCXX/vla-lambda-capturing.cpp @@ -15,7 +15,7 @@ typedef __INTPTR_TYPE__ intptr_t; // CHECK: define void [[G:@.+]]( // CHECK: [[N_ADDR:%.+]] = alloca [[INTPTR_T]] // CHECK: store [[INTPTR_T]] %{{.+}}, [[INTPTR_T]]* [[N_ADDR]] -// CHECK: [[N_VAL:%.+]] = load [[INTPTR_T]]* [[N_ADDR]] +// CHECK: [[N_VAL:%.+]] = load [[INTPTR_T]], [[INTPTR_T]]* [[N_ADDR]] // CHECK: [[CAP_EXPR_REF:%.+]] = getelementptr inbounds [[CAP_TYPE1]], [[CAP_TYPE1]]* [[CAP_ARG:%.+]], i{{.+}} 0, i{{.+}} 0 // CHECK: store [[INTPTR_T]] [[N_VAL]], [[INTPTR_T]]* [[CAP_EXPR_REF]] // CHECK: [[CAP_BUFFER_ADDR:%.+]] = getelementptr inbounds [[CAP_TYPE1]], [[CAP_TYPE1]]* [[CAP_ARG]], i{{.+}} 0, i{{.+}} 1 @@ -32,11 +32,11 @@ void g(intptr_t n) { } // CHECK: void [[G_LAMBDA]]([[CAP_TYPE1]]* -// CHECK: [[THIS:%.+]] = load [[CAP_TYPE1]]** +// CHECK: [[THIS:%.+]] = load [[CAP_TYPE1]]*, [[CAP_TYPE1]]** // CHECK: [[N_ADDR:%.+]] = getelementptr inbounds [[CAP_TYPE1]], [[CAP_TYPE1]]* [[THIS]], i{{.+}} 0, i{{.+}} 0 -// CHECK: [[N:%.+]] = load [[INTPTR_T]]* [[N_ADDR]] +// CHECK: [[N:%.+]] = load [[INTPTR_T]], [[INTPTR_T]]* [[N_ADDR]] // CHECK: [[BUFFER_ADDR:%.+]] = getelementptr inbounds [[CAP_TYPE1]], [[CAP_TYPE1]]* [[THIS]], i{{.+}} 0, i{{.+}} 1 -// CHECK: [[BUFFER:%.+]] = load [[INTPTR_T]]** [[BUFFER_ADDR]] +// CHECK: [[BUFFER:%.+]] = load [[INTPTR_T]]*, [[INTPTR_T]]** [[BUFFER_ADDR]] // CHECK: call i{{.+}}* @llvm.stacksave() // CHECK: alloca [[INTPTR_T]], [[INTPTR_T]] [[N]] // CHECK: call void @llvm.stackrestore( @@ -112,9 +112,9 @@ int main() { // CHECK: ret void // CHECK: define linkonce_odr void [[F_INT_LAMBDA]]([[CAP_TYPE2]]* -// CHECK: [[THIS:%.+]] = load [[CAP_TYPE2]]** +// CHECK: [[THIS:%.+]] = load [[CAP_TYPE2]]*, [[CAP_TYPE2]]** // CHECK: [[SIZE_REF:%.+]] = getelementptr inbounds [[CAP_TYPE2]], [[CAP_TYPE2]]* [[THIS]], i{{.+}} 0, i{{.+}} 0 -// CHECK: [[SIZE:%.+]] = load [[INTPTR_T]]* [[SIZE_REF]] +// CHECK: [[SIZE:%.+]] = load [[INTPTR_T]], [[INTPTR_T]]* [[SIZE_REF]] // CHECK: call i{{.+}}* @llvm.stacksave() // CHECK: alloca [[INTPTR_T]], [[INTPTR_T]] [[SIZE]] // CHECK: call void @llvm.stackrestore( @@ -122,49 +122,49 @@ int main() { // CHECK: define {{.*}} void [[B_INT_LAMBDA]]([[CAP_TYPE3]]* // CHECK: [[SIZE2_REF:%.+]] = getelementptr inbounds [[CAP_TYPE3]], [[CAP_TYPE3]]* [[THIS:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 -// CHECK: [[SIZE2:%.+]] = load i{{[0-9]+}}* [[SIZE2_REF]] +// CHECK: [[SIZE2:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[SIZE2_REF]] // CHECK: [[SIZE1_REF:%.+]] = getelementptr inbounds [[CAP_TYPE3]], [[CAP_TYPE3]]* [[THIS]], i{{[0-9]+}} 0, i{{[0-9]+}} 2 -// CHECK: [[SIZE1:%.+]] = load i{{[0-9]+}}* [[SIZE1_REF]] +// CHECK: [[SIZE1:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[SIZE1_REF]] // CHECK: [[N_ADDR_REF:%.+]] = getelementptr inbounds [[CAP_TYPE3]], [[CAP_TYPE3]]* [[THIS]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 -// CHECK: [[N_ADDR:%.+]] = load [[INTPTR_T]]** [[N_ADDR_REF]] -// CHECK: [[N:%.+]] = load [[INTPTR_T]]* [[N_ADDR]] +// CHECK: [[N_ADDR:%.+]] = load [[INTPTR_T]]*, [[INTPTR_T]]** [[N_ADDR_REF]] +// CHECK: [[N:%.+]] = load [[INTPTR_T]], [[INTPTR_T]]* [[N_ADDR]] // CHECK: [[BUFFER1_ADDR_REF:%.+]] = getelementptr inbounds [[CAP_TYPE3]], [[CAP_TYPE3]]* [[THIS]], i{{[0-9]+}} 0, i{{[0-9]+}} 3 -// CHECK: [[BUFFER1_ADDR:%.+]] = load [[INTPTR_T]]** [[BUFFER1_ADDR_REF]] +// CHECK: [[BUFFER1_ADDR:%.+]] = load [[INTPTR_T]]*, [[INTPTR_T]]** [[BUFFER1_ADDR_REF]] // CHECK: [[ELEM_OFFSET:%.+]] = mul {{.*}} i{{[0-9]+}} [[N]], [[SIZE1]] // CHECK: [[ELEM_ADDR:%.+]] = getelementptr inbounds [[INTPTR_T]], [[INTPTR_T]]* [[BUFFER1_ADDR]], i{{[0-9]+}} [[ELEM_OFFSET]] // CHECK: [[SIZEOF:%.+]] = mul {{.*}} i{{[0-9]+}} {{[0-9]+}}, [[SIZE1]] // CHECK: [[N_ADDR_REF:%.+]] = getelementptr inbounds [[CAP_TYPE3]], [[CAP_TYPE3]]* [[THIS]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 -// CHECK: [[N_ADDR:%.+]] = load [[INTPTR_T]]** [[N_ADDR_REF]] +// CHECK: [[N_ADDR:%.+]] = load [[INTPTR_T]]*, [[INTPTR_T]]** [[N_ADDR_REF]] // CHECK: store [[INTPTR_T]] {{%.+}}, [[INTPTR_T]]* [[N_ADDR]] // CHECK: [[N_ADDR_REF:%.+]] = getelementptr inbounds [[CAP_TYPE4]], [[CAP_TYPE4]]* [[CAP:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 // CHECK: [[N_ADDR_REF_ORIG:%.+]] = getelementptr inbounds [[CAP_TYPE3]], [[CAP_TYPE3]]* [[THIS]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 -// CHECK: [[N_ADDR_ORIG:%.+]] = load [[INTPTR_T]]** [[N_ADDR_REF_ORIG]] +// CHECK: [[N_ADDR_ORIG:%.+]] = load [[INTPTR_T]]*, [[INTPTR_T]]** [[N_ADDR_REF_ORIG]] // CHECK: store [[INTPTR_T]]* [[N_ADDR_ORIG]], [[INTPTR_T]]** [[N_ADDR_REF]] // CHECK: [[SIZE1_REF:%.+]] = getelementptr inbounds [[CAP_TYPE4]], [[CAP_TYPE4]]* [[CAP]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 // CHECK: store i{{[0-9]+}} [[SIZE1]], i{{[0-9]+}}* [[SIZE1_REF]] // CHECK: [[BUFFER2_ADDR_REF:%.+]] = getelementptr inbounds [[CAP_TYPE4]], [[CAP_TYPE4]]* [[CAP]], i{{[0-9]+}} 0, i{{[0-9]+}} 2 // CHECK: [[BUFFER2_ADDR_REF_ORIG:%.+]] = getelementptr inbounds [[CAP_TYPE3]], [[CAP_TYPE3]]* [[THIS]], i{{[0-9]+}} 0, i{{[0-9]+}} 4 -// CHECK: [[BUFFER2_ADDR_ORIG:%.+]] = load [[INTPTR_T]]** [[BUFFER2_ADDR_REF_ORIG]] +// CHECK: [[BUFFER2_ADDR_ORIG:%.+]] = load [[INTPTR_T]]*, [[INTPTR_T]]** [[BUFFER2_ADDR_REF_ORIG]] // CHECK: store [[INTPTR_T]]* [[BUFFER2_ADDR_ORIG]], [[INTPTR_T]]** [[BUFFER2_ADDR_REF]] // CHECK: [[SIZE2_REF:%.+]] = getelementptr inbounds [[CAP_TYPE4]], [[CAP_TYPE4]]* [[CAP]], i{{[0-9]+}} 0, i{{[0-9]+}} 3 // CHECK: store i{{[0-9]+}} [[SIZE2]], i{{[0-9]+}}* [[SIZE2_REF]] // CHECK: [[BUFFER1_ADDR_REF:%.+]] = getelementptr inbounds [[CAP_TYPE4]], [[CAP_TYPE4]]* [[CAP]], i{{[0-9]+}} 0, i{{[0-9]+}} 4 // CHECK: [[BUFFER1_ADDR_REF_ORIG:%.+]] = getelementptr inbounds [[CAP_TYPE3]], [[CAP_TYPE3]]* [[THIS]], i{{[0-9]+}} 0, i{{[0-9]+}} 3 -// CHECK: [[BUFFER1_ADDR_ORIG:%.+]] = load [[INTPTR_T]]** [[BUFFER1_ADDR_REF_ORIG]] +// CHECK: [[BUFFER1_ADDR_ORIG:%.+]] = load [[INTPTR_T]]*, [[INTPTR_T]]** [[BUFFER1_ADDR_REF_ORIG]] // CHECK: store [[INTPTR_T]]* [[BUFFER1_ADDR_ORIG]], [[INTPTR_T]]** [[BUFFER1_ADDR_REF]] // CHECK: call void [[B_INT_LAMBDA_LAMBDA:@.+]]([[CAP_TYPE4]]* [[CAP]]) // CHECK: ret void // CHECK: define {{.*}} void [[B_INT_LAMBDA_LAMBDA]]([[CAP_TYPE4]]* // CHECK: [[SIZE1_REF:%.+]] = getelementptr inbounds [[CAP_TYPE4]], [[CAP_TYPE4]]* [[THIS:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 -// CHECK: [[SIZE1:%.+]] = load i{{[0-9]+}}* [[SIZE1_REF]] +// CHECK: [[SIZE1:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[SIZE1_REF]] // CHECK: [[SIZE2_REF:%.+]] = getelementptr inbounds [[CAP_TYPE4]], [[CAP_TYPE4]]* [[THIS]], i{{[0-9]+}} 0, i{{[0-9]+}} 3 -// CHECK: [[SIZE2:%.+]] = load i{{[0-9]+}}* [[SIZE2_REF]] +// CHECK: [[SIZE2:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[SIZE2_REF]] // CHECK: [[BUFFER2_ADDR_REF:%.+]] = getelementptr inbounds [[CAP_TYPE4]], [[CAP_TYPE4]]* [[THIS]], i{{[0-9]+}} 0, i{{[0-9]+}} 2 -// CHECK: [[BUFFER2_ADDR:%.+]] = load [[INTPTR_T]]** [[BUFFER2_ADDR_REF]] +// CHECK: [[BUFFER2_ADDR:%.+]] = load [[INTPTR_T]]*, [[INTPTR_T]]** [[BUFFER2_ADDR_REF]] // CHECK: [[SIZEOF_BUFFER2:%.+]] = mul {{.*}} i{{[0-9]+}} {{[0-9]+}}, [[SIZE1]] // CHECK: [[BUFFER1_ADDR_REF:%.+]] = getelementptr inbounds [[CAP_TYPE4]], [[CAP_TYPE4]]* [[THIS]], i{{[0-9]+}} 0, i{{[0-9]+}} 4 -// CHECK: [[BUFFER1_ADDR:%.+]] = load [[INTPTR_T]]** [[BUFFER1_ADDR_REF]] +// CHECK: [[BUFFER1_ADDR:%.+]] = load [[INTPTR_T]]*, [[INTPTR_T]]** [[BUFFER1_ADDR_REF]] // CHECK: [[MUL:%.+]] = mul {{.*}} i{{[0-9]+}} [[SIZE2]], [[SIZE1]] // CHECK: mul {{.*}} i{{[0-9]+}} {{[0-9]+}}, [[MUL]] // CHECK: ret void diff --git a/test/CodeGenCXX/vla.cpp b/test/CodeGenCXX/vla.cpp index dac2135522..4e22bba7d7 100644 --- a/test/CodeGenCXX/vla.cpp +++ b/test/CodeGenCXX/vla.cpp @@ -25,30 +25,30 @@ void test0(void *array, int n) { // CHECK-NEXT: store i32 // Capture the bounds. - // CHECK-NEXT: [[T0:%.*]] = load i32* [[N]], align 4 + // CHECK-NEXT: [[T0:%.*]] = load i32, i32* [[N]], align 4 // CHECK-NEXT: [[DIM0:%.*]] = zext i32 [[T0]] to i64 - // CHECK-NEXT: [[T0:%.*]] = load i32* [[N]], align 4 + // CHECK-NEXT: [[T0:%.*]] = load i32, i32* [[N]], align 4 // CHECK-NEXT: [[T1:%.*]] = add nsw i32 [[T0]], 1 // CHECK-NEXT: [[DIM1:%.*]] = zext i32 [[T1]] to i64 typedef short array_t[n][n+1]; - // CHECK-NEXT: [[T0:%.*]] = load i8** [[ARRAY]], align 8 + // CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[ARRAY]], align 8 // CHECK-NEXT: [[T1:%.*]] = bitcast i8* [[T0]] to i16* // CHECK-NEXT: store i16* [[T1]], i16** [[REF]], align 8 array_t &ref = *(array_t*) array; - // CHECK-NEXT: [[T0:%.*]] = load i16** [[REF]] + // CHECK-NEXT: [[T0:%.*]] = load i16*, i16** [[REF]] // CHECK-NEXT: [[T1:%.*]] = mul nsw i64 1, [[DIM1]] // CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds i16, i16* [[T0]], i64 [[T1]] // CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds i16, i16* [[T2]], i64 2 // CHECK-NEXT: store i16 3, i16* [[T3]] ref[1][2] = 3; - // CHECK-NEXT: [[T0:%.*]] = load i16** [[REF]] + // CHECK-NEXT: [[T0:%.*]] = load i16*, i16** [[REF]] // CHECK-NEXT: [[T1:%.*]] = mul nsw i64 4, [[DIM1]] // CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds i16, i16* [[T0]], i64 [[T1]] // CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds i16, i16* [[T2]], i64 5 - // CHECK-NEXT: [[T4:%.*]] = load i16* [[T3]] + // CHECK-NEXT: [[T4:%.*]] = load i16, i16* [[T3]] // CHECK-NEXT: store i16 [[T4]], i16* [[S]], align 2 short s = ref[4][5]; diff --git a/test/CodeGenCXX/volatile-1.cpp b/test/CodeGenCXX/volatile-1.cpp index 20389364b5..f4b545b59a 100644 --- a/test/CodeGenCXX/volatile-1.cpp +++ b/test/CodeGenCXX/volatile-1.cpp @@ -26,8 +26,8 @@ void test() { i; (float)(ci); - // CHECK-NEXT: load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) - // CHECK-NEXT: load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) + // CHECK-NEXT: load volatile [[INT]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) + // CHECK-NEXT: load volatile [[INT]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) // CHECK-NEXT: sitofp [[INT]] // These are not uses in C++: @@ -37,20 +37,20 @@ void test() { (void)a; (void)(ci=ci); - // CHECK-NEXT: [[R:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) - // CHECK-NEXT: [[I:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) + // CHECK-NEXT: [[R:%.*]] = load volatile [[INT]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) + // CHECK-NEXT: [[I:%.*]] = load volatile [[INT]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) // CHECK-NEXT: store volatile [[INT]] [[R]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) // CHECK-NEXT: store volatile [[INT]] [[I]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) (void)(i=j); - // CHECK-NEXT: [[T:%.*]] = load volatile [[INT]]* @j + // CHECK-NEXT: [[T:%.*]] = load volatile [[INT]], [[INT]]* @j // CHECK-NEXT: store volatile [[INT]] [[T]], [[INT]]* @i ci+=ci; - // CHECK-NEXT: [[R1:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) - // CHECK-NEXT: [[I1:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) - // CHECK-NEXT: [[R2:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) - // CHECK-NEXT: [[I2:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) + // CHECK-NEXT: [[R1:%.*]] = load volatile [[INT]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) + // CHECK-NEXT: [[I1:%.*]] = load volatile [[INT]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) + // CHECK-NEXT: [[R2:%.*]] = load volatile [[INT]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) + // CHECK-NEXT: [[I2:%.*]] = load volatile [[INT]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) // Not sure why they're ordered this way. // CHECK-NEXT: [[R:%.*]] = add [[INT]] [[R2]], [[R1]] // CHECK-NEXT: [[I:%.*]] = add [[INT]] [[I2]], [[I1]] @@ -59,18 +59,18 @@ void test() { // Note that C++ requires an extra load volatile over C from the LHS of the '+'. (ci += ci) + ci; - // CHECK-NEXT: [[R1:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) - // CHECK-NEXT: [[I1:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) - // CHECK-NEXT: [[R2:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) - // CHECK-NEXT: [[I2:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) + // CHECK-NEXT: [[R1:%.*]] = load volatile [[INT]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) + // CHECK-NEXT: [[I1:%.*]] = load volatile [[INT]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) + // CHECK-NEXT: [[R2:%.*]] = load volatile [[INT]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) + // CHECK-NEXT: [[I2:%.*]] = load volatile [[INT]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) // CHECK-NEXT: [[R:%.*]] = add [[INT]] [[R2]], [[R1]] // CHECK-NEXT: [[I:%.*]] = add [[INT]] [[I2]], [[I1]] // CHECK-NEXT: store volatile [[INT]] [[R]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) // CHECK-NEXT: store volatile [[INT]] [[I]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) - // CHECK-NEXT: [[R1:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) - // CHECK-NEXT: [[I1:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) - // CHECK-NEXT: [[R2:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) - // CHECK-NEXT: [[I2:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) + // CHECK-NEXT: [[R1:%.*]] = load volatile [[INT]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) + // CHECK-NEXT: [[I1:%.*]] = load volatile [[INT]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) + // CHECK-NEXT: [[R2:%.*]] = load volatile [[INT]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) + // CHECK-NEXT: [[I2:%.*]] = load volatile [[INT]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) // These additions can be elided. // CHECK-NEXT: add [[INT]] [[R1]], [[R2]] // CHECK-NEXT: add [[INT]] [[I1]], [[I2]] @@ -160,17 +160,17 @@ void test() { // CHECK-NEXT: store volatile i=i,k; - // CHECK-NEXT: load volatile [[INT]]* @i + // CHECK-NEXT: load volatile [[INT]], [[INT]]* @i // CHECK-NEXT: store volatile {{.*}}, [[INT]]* @i (i=j,k=j); - // CHECK-NEXT: load volatile [[INT]]* @j + // CHECK-NEXT: load volatile [[INT]], [[INT]]* @j // CHECK-NEXT: store volatile {{.*}}, [[INT]]* @i - // CHECK-NEXT: load volatile [[INT]]* @j + // CHECK-NEXT: load volatile [[INT]], [[INT]]* @j // CHECK-NEXT: store volatile {{.*}}, [[INT]]* @k (i=j,k); - // CHECK-NEXT: load volatile [[INT]]* @j + // CHECK-NEXT: load volatile [[INT]], [[INT]]* @j // CHECK-NEXT: store volatile {{.*}}, [[INT]]* @i (i,j); @@ -225,9 +225,9 @@ void test() { // CHECK-NEXT: store volatile __imag ci = __imag ci = __imag ci; - // CHECK-NEXT: [[T:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) + // CHECK-NEXT: [[T:%.*]] = load volatile [[INT]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) // CHECK-NEXT: store volatile [[INT]] [[T]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) - // CHECK-NEXT: [[T:%.*]] = load volatile [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) + // CHECK-NEXT: [[T:%.*]] = load volatile [[INT]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) // CHECK-NEXT: store volatile [[INT]] [[T]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) __real (i = j); @@ -339,12 +339,12 @@ void test() { // CHECK-NEXT: add (i,j)=k; - // CHECK-NEXT: load volatile [[INT]]* @k + // CHECK-NEXT: load volatile [[INT]], [[INT]]* @k // CHECK-NEXT: store volatile {{.*}}, [[INT]]* @j (j=k,i)=i; - // CHECK-NEXT: load volatile [[INT]]* @i - // CHECK-NEXT: load volatile [[INT]]* @k + // CHECK-NEXT: load volatile [[INT]], [[INT]]* @i + // CHECK-NEXT: load volatile [[INT]], [[INT]]* @k // CHECK-NEXT: store volatile {{.*}}, [[INT]]* @j // CHECK-NEXT: store volatile {{.*}}, [[INT]]* @i diff --git a/test/CodeGenCXX/volatile.cpp b/test/CodeGenCXX/volatile.cpp index ff1cb97b16..ea7429f291 100644 --- a/test/CodeGenCXX/volatile.cpp +++ b/test/CodeGenCXX/volatile.cpp @@ -13,7 +13,7 @@ namespace test0 { // CHECK-LABEL: define void @_ZN5test04testENS_1AE( void test(A t) { - // CHECK: [[ARR:%.*]] = load [[A:%.*]]** @_ZN5test05arrayE, align 8 + // CHECK: [[ARR:%.*]] = load [[A:%.*]]*, [[A:%.*]]** @_ZN5test05arrayE, align 8 // CHECK-NEXT: [[IDX:%.*]] = getelementptr inbounds [[A]], [[A]]* [[ARR]], i64 0 // CHECK-NEXT: [[TMP:%.*]] = call dereferenceable({{[0-9]+}}) [[A]]* @_ZNV5test01AaSERVKS0_([[A]]* [[IDX]], [[A]]* dereferenceable({{[0-9]+}}) [[T:%.*]]) // CHECK-NEXT: ret void @@ -26,7 +26,7 @@ namespace test1 { // CHECK-LABEL: define void @_ZN5test14testEv() void test() { - // CHECK: [[TMP:%.*]] = load i32** @_ZN5test11xE, align 8 + // CHECK: [[TMP:%.*]] = load i32*, i32** @_ZN5test11xE, align 8 // CHECK-NEXT: ret void *x; } diff --git a/test/CodeGenCXX/windows-itanium-exceptions.cpp b/test/CodeGenCXX/windows-itanium-exceptions.cpp index e2c4190ac9..3694d2c813 100644 --- a/test/CodeGenCXX/windows-itanium-exceptions.cpp +++ b/test/CodeGenCXX/windows-itanium-exceptions.cpp @@ -36,7 +36,7 @@ void attempt() { // CHECK: store i32 %2, i32* %ehselector.slot // CHECK: br label %catch // CHECK: catch: -// CHECK: %exn = load i8** %exn.slot +// CHECK: %exn = load i8*, i8** %exn.slot // CHECK: %3 = call {{.*}}i8* @__cxa_begin_catch(i8* %{{2|exn}}) // CHECK: call {{.*}}void @__cxa_end_catch() // CHECK: br label %try.cont diff --git a/test/CodeGenObjC/arc-blocks.m b/test/CodeGenObjC/arc-blocks.m index 3a06d00a4b..76106bb2fb 100644 --- a/test/CodeGenObjC/arc-blocks.m +++ b/test/CodeGenObjC/arc-blocks.m @@ -30,34 +30,34 @@ void test2(id x) { // CHECK-NEXT: store i8* [[PARM]], i8** [[X]] // CHECK-NEXT: [[SLOTREL:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 // CHECK: [[SLOT:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 -// CHECK-NEXT: [[T0:%.*]] = load i8** [[X]], +// CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[X]], // CHECK-NEXT: [[T1:%.*]] = call i8* @objc_retain(i8* [[T0]]) // CHECK-NEXT: store i8* [[T1]], i8** [[SLOT]], // CHECK-NEXT: bitcast // CHECK-NEXT: call void @test2_helper( -// CHECK-NEXT: [[T0:%.*]] = load i8** [[SLOTREL]] +// CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[SLOTREL]] // CHECK-NEXT: call void @objc_release(i8* [[T0]]) [[NUW]], !clang.imprecise_release -// CHECK-NEXT: [[T0:%.*]] = load i8** [[X]] +// CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[X]] // CHECK-NEXT: call void @objc_release(i8* [[T0]]) [[NUW]], !clang.imprecise_release // CHECK-NEXT: ret void extern void test2_helper(id (^)(void)); test2_helper(^{ return x; }); // CHECK-LABEL: define internal void @__copy_helper_block_ -// CHECK: [[T0:%.*]] = load i8** +// CHECK: [[T0:%.*]] = load i8*, i8** // CHECK-NEXT: [[SRC:%.*]] = bitcast i8* [[T0]] to [[BLOCK_T]]* -// CHECK-NEXT: [[T0:%.*]] = load i8** +// CHECK-NEXT: [[T0:%.*]] = load i8*, i8** // CHECK-NEXT: [[DST:%.*]] = bitcast i8* [[T0]] to [[BLOCK_T]]* // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[SRC]], i32 0, i32 5 -// CHECK-NEXT: [[T1:%.*]] = load i8** [[T0]] +// CHECK-NEXT: [[T1:%.*]] = load i8*, i8** [[T0]] // CHECK-NEXT: [[T2:%.*]] = call i8* @objc_retain(i8* [[T1]]) [[NUW]] // CHECK-NEXT: ret void // CHECK-LABEL: define internal void @__destroy_helper_block_ -// CHECK: [[T0:%.*]] = load i8** +// CHECK: [[T0:%.*]] = load i8*, i8** // CHECK-NEXT: [[T1:%.*]] = bitcast i8* [[T0]] to [[BLOCK_T]]* // CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[T1]], i32 0, i32 5 -// CHECK-NEXT: [[T3:%.*]] = load i8** [[T2]] +// CHECK-NEXT: [[T3:%.*]] = load i8*, i8** [[T2]] // CHECK-NEXT: call void @objc_release(i8* [[T3]]) // CHECK-NEXT: ret void } @@ -76,26 +76,26 @@ void test3(void (^sink)(id*)) { // CHECK-NEXT: store void (i8**)* {{%.*}}, void (i8**)** [[SINK]] // CHECK-NEXT: store i8* null, i8** [[STRONG]] - // CHECK-NEXT: load void (i8**)** [[SINK]] + // CHECK-NEXT: load void (i8**)*, void (i8**)** [[SINK]] // CHECK-NEXT: bitcast // CHECK-NEXT: getelementptr // CHECK-NEXT: [[BLOCK:%.*]] = bitcast - // CHECK-NEXT: [[V:%.*]] = load i8** [[STRONG]] + // CHECK-NEXT: [[V:%.*]] = load i8*, i8** [[STRONG]] // CHECK-NEXT: store i8* [[V]], i8** [[TEMP]] - // CHECK-NEXT: [[F0:%.*]] = load i8** + // CHECK-NEXT: [[F0:%.*]] = load i8*, i8** // CHECK-NEXT: [[F1:%.*]] = bitcast i8* [[F0]] to void (i8*, i8**)* // CHECK-NEXT: call void [[F1]](i8* [[BLOCK]], i8** [[TEMP]]) - // CHECK-NEXT: [[T0:%.*]] = load i8** [[TEMP]] + // CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[TEMP]] // CHECK-NEXT: [[T1:%.*]] = call i8* @objc_retain(i8* [[T0]]) // CHECK-NEXT: call void (...)* @clang.arc.use(i8* [[V]]) [[NUW]] - // CHECK-NEXT: [[T2:%.*]] = load i8** [[STRONG]] + // CHECK-NEXT: [[T2:%.*]] = load i8*, i8** [[STRONG]] // CHECK-NEXT: store i8* [[T1]], i8** [[STRONG]] // CHECK-NEXT: call void @objc_release(i8* [[T2]]) - // CHECK-NEXT: [[T0:%.*]] = load i8** [[STRONG]] + // CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[STRONG]] // CHECK-NEXT: call void @objc_release(i8* [[T0]]) - // CHECK-NEXT: load void (i8**)** [[SINK]] + // CHECK-NEXT: load void (i8**)*, void (i8**)** [[SINK]] // CHECK-NEXT: bitcast // CHECK-NEXT: call void @objc_release // CHECK-NEXT: ret void @@ -126,27 +126,27 @@ void test4(void) { // CHECK: call void @test4_helper( // CHECK: [[T0:%.*]] = bitcast [[BYREF_T]]* [[VAR]] to i8* // CHECK-NEXT: call void @_Block_object_dispose(i8* [[T0]], i32 8) - // CHECK-NEXT: [[T0:%.*]] = load i8** [[SLOT]] + // CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[SLOT]] // CHECK-NEXT: call void @objc_release(i8* [[T0]]) // CHECK: ret void // CHECK-LABEL: define internal void @__Block_byref_object_copy_ // CHECK: [[T0:%.*]] = getelementptr inbounds [[BYREF_T]], [[BYREF_T]]* {{%.*}}, i32 0, i32 6 - // CHECK-NEXT: load i8** + // CHECK-NEXT: load i8*, i8** // CHECK-NEXT: bitcast i8* {{%.*}} to [[BYREF_T]]* // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[BYREF_T]], [[BYREF_T]]* {{%.*}}, i32 0, i32 6 - // CHECK-NEXT: [[T2:%.*]] = load i8** [[T1]] + // CHECK-NEXT: [[T2:%.*]] = load i8*, i8** [[T1]] // CHECK-NEXT: store i8* [[T2]], i8** [[T0]] // CHECK-NEXT: store i8* null, i8** [[T1]] // CHECK-LABEL: define internal void @__Block_byref_object_dispose_ // CHECK: [[T0:%.*]] = getelementptr inbounds [[BYREF_T]], [[BYREF_T]]* {{%.*}}, i32 0, i32 6 - // CHECK-NEXT: [[T1:%.*]] = load i8** [[T0]] + // CHECK-NEXT: [[T1:%.*]] = load i8*, i8** [[T0]] // CHECK-NEXT: call void @objc_release(i8* [[T1]]) // CHECK-LABEL: define internal void @__test4_block_invoke // CHECK: [[SLOT:%.*]] = getelementptr inbounds {{.*}}, i32 0, i32 6 - // CHECK-NEXT: [[T0:%.*]] = load i8** [[SLOT]], align 8 + // CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[SLOT]], align 8 // CHECK-NEXT: store i8* null, i8** [[SLOT]], // CHECK-NEXT: call void @objc_release(i8* [[T0]]) // CHECK-NEXT: ret void @@ -174,7 +174,7 @@ void test5(void) { // 0x40800000 - has signature but no copy/dispose, as well as BLOCK_HAS_EXTENDED_LAYOUT // CHECK: store i32 -1073741824, i32* // CHECK: [[CAPTURE:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 - // CHECK-NEXT: [[T0:%.*]] = load i8** [[VAR]] + // CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[VAR]] // CHECK-NEXT: store i8* [[T0]], i8** [[CAPTURE]] // CHECK-NEXT: [[T0:%.*]] = bitcast [[BLOCK_T]]* [[BLOCK]] to // CHECK: call void @test5_helper @@ -211,7 +211,7 @@ void test6(void) { // CHECK-LABEL: define internal void @__Block_byref_object_copy_ // CHECK: [[T0:%.*]] = getelementptr inbounds [[BYREF_T]], [[BYREF_T]]* {{%.*}}, i32 0, i32 6 - // CHECK-NEXT: load i8** + // CHECK-NEXT: load i8*, i8** // CHECK-NEXT: bitcast i8* {{%.*}} to [[BYREF_T]]* // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[BYREF_T]], [[BYREF_T]]* {{%.*}}, i32 0, i32 6 // CHECK-NEXT: call void @objc_moveWeak(i8** [[T0]], i8** [[T1]]) @@ -286,14 +286,14 @@ void test7(void) { // CHECK-NEXT: store // CHECK: [[D0:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 // CHECK: [[T0:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 -// CHECK-NEXT: [[T1:%.*]] = load [[TEST8]]** [[SELF]], +// CHECK-NEXT: [[T1:%.*]] = load [[TEST8]]*, [[TEST8]]** [[SELF]], // CHECK-NEXT: [[T2:%.*]] = bitcast [[TEST8]]* [[T1]] to i8* // CHECK-NEXT: [[T3:%.*]] = call i8* @objc_retain(i8* [[T2]]) // CHECK-NEXT: [[T4:%.*]] = bitcast i8* [[T3]] to [[TEST8]]* // CHECK-NEXT: store [[TEST8]]* [[T4]], [[TEST8]]** [[T0]] // CHECK-NEXT: bitcast [[BLOCK_T]]* [[BLOCK]] to // CHECK: call void @test8_helper( -// CHECK-NEXT: [[T1:%.*]] = load [[TEST8]]** [[D0]] +// CHECK-NEXT: [[T1:%.*]] = load [[TEST8]]*, [[TEST8]]** [[D0]] // CHECK-NEXT: [[T2:%.*]] = bitcast [[TEST8]]* [[T1]] to i8* // CHECK-NEXT: call void @objc_release(i8* [[T2]]) // CHECK: ret void @@ -312,7 +312,7 @@ id test9(void) { }(); // CHECK-LABEL: define i8* @test9( -// CHECK: load i8** getelementptr +// CHECK: load i8*, i8** getelementptr // CHECK-NEXT: bitcast i8* // CHECK-NEXT: call i8* // CHECK-NEXT: tail call i8* @objc_autoreleaseReturnValue @@ -340,9 +340,9 @@ void test10a(void) { // CHECK-NEXT: [[T1:%.*]] = call i8* @objc_retainBlock(i8* [[T0]]) // CHECK-NEXT: [[T2:%.*]] = bitcast i8* [[T1]] to void ()* // CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds [[BYREF_T]], [[BYREF_T]]* [[BYREF]], i32 0, i32 1 - // CHECK-NEXT: [[T4:%.*]] = load [[BYREF_T]]** [[T3]] + // CHECK-NEXT: [[T4:%.*]] = load [[BYREF_T]]*, [[BYREF_T]]** [[T3]] // CHECK-NEXT: [[T5:%.*]] = getelementptr inbounds [[BYREF_T]], [[BYREF_T]]* [[T4]], i32 0, i32 6 - // CHECK-NEXT: [[T6:%.*]] = load void ()** [[T5]], align 8 + // CHECK-NEXT: [[T6:%.*]] = load void ()*, void ()** [[T5]], align 8 // CHECK-NEXT: store void ()* {{%.*}}, void ()** [[T5]], align 8 // CHECK-NEXT: [[T7:%.*]] = bitcast void ()* [[T6]] to i8* // CHECK-NEXT: call void @objc_release(i8* [[T7]]) @@ -351,7 +351,7 @@ void test10a(void) { // CHECK-NEXT: [[SLOT:%.*]] = getelementptr inbounds [[BYREF_T]], [[BYREF_T]]* [[BYREF]], i32 0, i32 6 // CHECK-NEXT: [[T0:%.*]] = bitcast [[BYREF_T]]* [[BYREF]] to i8* // CHECK-NEXT: call void @_Block_object_dispose(i8* [[T0]], i32 8) - // CHECK-NEXT: [[T1:%.*]] = load void ()** [[SLOT]] + // CHECK-NEXT: [[T1:%.*]] = load void ()*, void ()** [[SLOT]] // CHECK-NEXT: [[T2:%.*]] = bitcast void ()* [[T1]] to i8* // CHECK-NEXT: call void @objc_release(i8* [[T2]]) // CHECK: ret void @@ -363,13 +363,13 @@ void test10a(void) { // BLOCK_FIELD_IS_BLOCK as long as we don't pass BLOCK_BYREF_CALLER. // CHECK-LABEL: define internal void @__Block_byref_object_copy -// CHECK: [[D0:%.*]] = load i8** {{%.*}} +// CHECK: [[D0:%.*]] = load i8*, i8** {{%.*}} // CHECK-NEXT: [[D1:%.*]] = bitcast i8* [[D0]] to [[BYREF_T]]* // CHECK-NEXT: [[D2:%.*]] = getelementptr inbounds [[BYREF_T]], [[BYREF_T]]* [[D1]], i32 0, i32 6 -// CHECK-NEXT: [[S0:%.*]] = load i8** {{%.*}} +// CHECK-NEXT: [[S0:%.*]] = load i8*, i8** {{%.*}} // CHECK-NEXT: [[S1:%.*]] = bitcast i8* [[S0]] to [[BYREF_T]]* // CHECK-NEXT: [[S2:%.*]] = getelementptr inbounds [[BYREF_T]], [[BYREF_T]]* [[S1]], i32 0, i32 6 -// CHECK-NEXT: [[T0:%.*]] = load void ()** [[S2]], align 8 +// CHECK-NEXT: [[T0:%.*]] = load void ()*, void ()** [[S2]], align 8 // CHECK-NEXT: [[T1:%.*]] = bitcast void ()* [[T0]] to i8* // CHECK-NEXT: [[T2:%.*]] = call i8* @objc_retainBlock(i8* [[T1]]) // CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to void ()* @@ -377,10 +377,10 @@ void test10a(void) { // CHECK: ret void // CHECK-LABEL: define internal void @__Block_byref_object_dispose -// CHECK: [[T0:%.*]] = load i8** {{%.*}} +// CHECK: [[T0:%.*]] = load i8*, i8** {{%.*}} // CHECK-NEXT: [[T1:%.*]] = bitcast i8* [[T0]] to [[BYREF_T]]* // CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds [[BYREF_T]], [[BYREF_T]]* [[T1]], i32 0, i32 6 -// CHECK-NEXT: [[T3:%.*]] = load void ()** [[T2]] +// CHECK-NEXT: [[T3:%.*]] = load void ()*, void ()** [[T2]] // CHECK-NEXT: [[T4:%.*]] = bitcast void ()* [[T3]] to i8* // CHECK-NEXT: call void @objc_release(i8* [[T4]]) // CHECK-NEXT: ret void @@ -405,9 +405,9 @@ void test10b(void) { // CHECK-NEXT: [[T1:%.*]] = call i8* @objc_retainBlock(i8* [[T0]]) // CHECK-NEXT: [[T2:%.*]] = bitcast i8* [[T1]] to void ()* // CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds [[BYREF_T]], [[BYREF_T]]* [[BYREF]], i32 0, i32 1 - // CHECK-NEXT: [[T4:%.*]] = load [[BYREF_T]]** [[T3]] + // CHECK-NEXT: [[T4:%.*]] = load [[BYREF_T]]*, [[BYREF_T]]** [[T3]] // CHECK-NEXT: [[T5:%.*]] = getelementptr inbounds [[BYREF_T]], [[BYREF_T]]* [[T4]], i32 0, i32 6 - // CHECK-NEXT: [[T6:%.*]] = load void ()** [[T5]], align 8 + // CHECK-NEXT: [[T6:%.*]] = load void ()*, void ()** [[T5]], align 8 // CHECK-NEXT: store void ()* {{%.*}}, void ()** [[T5]], align 8 // CHECK-NEXT: [[T7:%.*]] = bitcast void ()* [[T6]] to i8* // CHECK-NEXT: call void @objc_release(i8* [[T7]]) @@ -415,7 +415,7 @@ void test10b(void) { // Destroy at end of function. // CHECK-NEXT: [[T0:%.*]] = bitcast [[BYREF_T]]* [[BYREF]] to i8* // CHECK-NEXT: call void @_Block_object_dispose(i8* [[T0]], i32 8) - // CHECK-NEXT: [[T1:%.*]] = load void ()** [[SLOT]] + // CHECK-NEXT: [[T1:%.*]] = load void ()*, void ()** [[SLOT]] // CHECK-NEXT: [[T2:%.*]] = bitcast void ()* [[T1]] to i8* // CHECK-NEXT: call void @objc_release(i8* [[T2]]) // CHECK: ret void @@ -454,7 +454,7 @@ void test11b(void) { // CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to void ()* // CHECK-NEXT: [[T4:%.*]] = bitcast void ()* [[T3]] to i8* // CHECK-NEXT: store i8* [[T4]], i8** [[B]], align 8 - // CHECK-NEXT: [[T5:%.*]] = load i8** [[B]] + // CHECK-NEXT: [[T5:%.*]] = load i8*, i8** [[B]] // CHECK-NEXT: call void @objc_release(i8* [[T5]]) // CHECK: ret void } @@ -495,14 +495,14 @@ void test13(id x) { // CHECK-NEXT: [[T0:%.*]] = call i8* @objc_retain(i8* {{%.*}}) // CHECK-NEXT: store i8* [[T0]], i8** [[X]], align 8 // CHECK-NEXT: [[CLEANUP_ADDR:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 - // CHECK-NEXT: [[T0:%.*]] = load i8** [[X]], align 8 + // CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[X]], align 8 // CHECK-NEXT: [[T1:%.*]] = icmp ne i8* [[T0]], null // CHECK-NEXT: store i1 false, i1* [[CLEANUP_ACTIVE]] // CHECK-NEXT: br i1 [[T1]], // CHECK-NOT: br // CHECK: [[CAPTURE:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 - // CHECK-NEXT: [[T0:%.*]] = load i8** [[X]], align 8 + // CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[X]], align 8 // CHECK-NEXT: [[T1:%.*]] = call i8* @objc_retain(i8* [[T0]]) // CHECK-NEXT: store i8* [[T1]], i8** [[CAPTURE]], align 8 // CHECK-NEXT: store i1 true, i1* [[CLEANUP_ACTIVE]] @@ -514,19 +514,19 @@ void test13(id x) { // CHECK-NEXT: [[T2:%.*]] = call i8* @objc_retainBlock(i8* [[T1]]) // CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to void ()* // CHECK-NEXT: store void ()* [[T3]], void ()** [[B]], align 8 - // CHECK-NEXT: [[T0:%.*]] = load void ()** [[B]], align 8 + // CHECK-NEXT: [[T0:%.*]] = load void ()*, void ()** [[B]], align 8 // CHECK-NEXT: call void @test13_use(void ()* [[T0]]) - // CHECK-NEXT: [[T0:%.*]] = load void ()** [[B]] + // CHECK-NEXT: [[T0:%.*]] = load void ()*, void ()** [[B]] // CHECK-NEXT: [[T1:%.*]] = bitcast void ()* [[T0]] to i8* // CHECK-NEXT: call void @objc_release(i8* [[T1]]) - // CHECK-NEXT: [[T0:%.*]] = load i1* [[CLEANUP_ACTIVE]] + // CHECK-NEXT: [[T0:%.*]] = load i1, i1* [[CLEANUP_ACTIVE]] // CHECK-NEXT: br i1 [[T0]] - // CHECK: [[T0:%.*]] = load i8** [[CLEANUP_ADDR]] + // CHECK: [[T0:%.*]] = load i8*, i8** [[CLEANUP_ADDR]] // CHECK-NEXT: call void @objc_release(i8* [[T0]]) // CHECK-NEXT: br label - // CHECK: [[T0:%.*]] = load i8** [[X]] + // CHECK: [[T0:%.*]] = load i8*, i8** [[X]] // CHECK-NEXT: call void @objc_release(i8* [[T0]]) // CHECK-NEXT: ret void } @@ -580,7 +580,7 @@ id (^test17(id self, int which))(void) { // CHECK-NOT: objc_retain // CHECK-NOT: objc_release // CHECK: [[T0:%.*]] = getelementptr inbounds [[BLOCK]], [[BLOCK]]* [[B0]], i32 0, i32 5 -// CHECK-NEXT: [[T1:%.*]] = load i8** [[SELF]], align +// CHECK-NEXT: [[T1:%.*]] = load i8*, i8** [[SELF]], align // CHECK-NEXT: [[T2:%.*]] = call i8* @objc_retain(i8* [[T1]]) // CHECK-NEXT: store i8* [[T2]], i8** [[T0]], // CHECK-NEXT: [[T0:%.*]] = bitcast [[BLOCK]]* [[B0]] to i8* ()* @@ -588,7 +588,7 @@ id (^test17(id self, int which))(void) { // CHECK-NEXT: [[T2:%.*]] = call i8* @objc_retainBlock(i8* [[T1]]) // CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to i8* ()* // CHECK-NEXT: store i8* ()* [[T3]], i8* ()** [[RET]] -// CHECK-NEXT: [[T0:%.*]] = load i8** [[DESTROY]] +// CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[DESTROY]] // CHECK-NEXT: call void @objc_release(i8* [[T0]]) // CHECK-NEXT: store i32 // CHECK-NEXT: br label @@ -598,7 +598,7 @@ id (^test17(id self, int which))(void) { // CHECK-NOT: objc_retain // CHECK-NOT: objc_release // CHECK: [[T0:%.*]] = getelementptr inbounds [[BLOCK]], [[BLOCK]]* [[B1]], i32 0, i32 5 -// CHECK-NEXT: [[T1:%.*]] = load i8** [[SELF]], align +// CHECK-NEXT: [[T1:%.*]] = load i8*, i8** [[SELF]], align // CHECK-NEXT: [[T2:%.*]] = call i8* @objc_retain(i8* [[T1]]) // CHECK-NEXT: store i8* [[T2]], i8** [[T0]], // CHECK-NEXT: [[T0:%.*]] = bitcast [[BLOCK]]* [[B1]] to i8* ()* @@ -606,7 +606,7 @@ id (^test17(id self, int which))(void) { // CHECK-NEXT: [[T2:%.*]] = call i8* @objc_retainBlock(i8* [[T1]]) // CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to i8* ()* // CHECK-NEXT: store i8* ()* [[T3]], i8* ()** [[RET]] -// CHECK-NEXT: [[T0:%.*]] = load i8** [[DESTROY]] +// CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[DESTROY]] // CHECK-NEXT: call void @objc_release(i8* [[T0]]) // CHECK-NEXT: store i32 // CHECK-NEXT: br label @@ -619,7 +619,7 @@ void test18(id x) { // CHECK-UNOPT-NEXT: call void @objc_storeStrong(i8** [[X]], // CHECK-UNOPT-NEXT: [[SLOTREL:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 // CHECK-UNOPT: [[SLOT:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 -// CHECK-UNOPT-NEXT: [[T0:%.*]] = load i8** [[X]], +// CHECK-UNOPT-NEXT: [[T0:%.*]] = load i8*, i8** [[X]], // CHECK-UNOPT-NEXT: [[T1:%.*]] = call i8* @objc_retain(i8* [[T0]]) // CHECK-UNOPT-NEXT: store i8* [[T1]], i8** [[SLOT]], // CHECK-UNOPT-NEXT: bitcast @@ -631,19 +631,19 @@ void test18(id x) { test18_helper(^{ return x; }); // CHECK-UNOPT-LABEL: define internal void @__copy_helper_block_ -// CHECK-UNOPT: [[T0:%.*]] = load i8** +// CHECK-UNOPT: [[T0:%.*]] = load i8*, i8** // CHECK-UNOPT-NEXT: [[SRC:%.*]] = bitcast i8* [[T0]] to [[BLOCK_T]]* -// CHECK-UNOPT-NEXT: [[T0:%.*]] = load i8** +// CHECK-UNOPT-NEXT: [[T0:%.*]] = load i8*, i8** // CHECK-UNOPT-NEXT: [[DST:%.*]] = bitcast i8* [[T0]] to [[BLOCK_T]]* // CHECK-UNOPT-NEXT: [[T0:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[SRC]], i32 0, i32 5 // CHECK-UNOPT-NEXT: [[T1:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[DST]], i32 0, i32 5 -// CHECK-UNOPT-NEXT: [[T2:%.*]] = load i8** [[T0]] +// CHECK-UNOPT-NEXT: [[T2:%.*]] = load i8*, i8** [[T0]] // CHECK-UNOPT-NEXT: store i8* null, i8** [[T1]] // CHECK-UNOPT-NEXT: call void @objc_storeStrong(i8** [[T1]], i8* [[T2]]) [[NUW]] // CHECK-UNOPT-NEXT: ret void // CHECK-UNOPT-LABEL: define internal void @__destroy_helper_block_ -// CHECK-UNOPT: [[T0:%.*]] = load i8** +// CHECK-UNOPT: [[T0:%.*]] = load i8*, i8** // CHECK-UNOPT-NEXT: [[T1:%.*]] = bitcast i8* [[T0]] to [[BLOCK_T]]* // CHECK-UNOPT-NEXT: [[T2:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[T1]], i32 0, i32 5 // CHECK-UNOPT-NEXT: call void @objc_storeStrong(i8** [[T2]], i8* null) @@ -665,7 +665,7 @@ void test19(void (^b)(void)) { // Block setup. We skip most of this. Note the bare retain. // CHECK-NEXT: [[SLOTREL:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 // CHECK: [[SLOT:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 -// CHECK-NEXT: [[T0:%.*]] = load void ()** [[B]], +// CHECK-NEXT: [[T0:%.*]] = load void ()*, void ()** [[B]], // CHECK-NEXT: [[T1:%.*]] = bitcast void ()* [[T0]] to i8* // CHECK-NEXT: [[T2:%.*]] = call i8* @objc_retain(i8* [[T1]]) // CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to void ()* @@ -677,12 +677,12 @@ void test19(void (^b)(void)) { test19_sink(^(int x) { b(); }); // Block teardown. -// CHECK-NEXT: [[T0:%.*]] = load void ()** [[SLOTREL]] +// CHECK-NEXT: [[T0:%.*]] = load void ()*, void ()** [[SLOTREL]] // CHECK-NEXT: [[T1:%.*]] = bitcast void ()* [[T0]] to i8* // CHECK-NEXT: call void @objc_release(i8* [[T1]]) // Local cleanup. -// CHECK-NEXT: [[T0:%.*]] = load void ()** [[B]] +// CHECK-NEXT: [[T0:%.*]] = load void ()*, void ()** [[B]] // CHECK-NEXT: [[T1:%.*]] = bitcast void ()* [[T0]] to i8* // CHECK-NEXT: call void @objc_release(i8* [[T1]]) diff --git a/test/CodeGenObjC/arc-foreach.m b/test/CodeGenObjC/arc-foreach.m index fd10ae0afe..17067a0a58 100644 --- a/test/CodeGenObjC/arc-foreach.m +++ b/test/CodeGenObjC/arc-foreach.m @@ -40,13 +40,13 @@ void test0(NSArray *array) { // CHECK-LP64-NEXT: call void @llvm.memset.p0i8.i64(i8* [[T0]], i8 0, i64 64, i32 8, i1 false) // Evaluate the collection expression and retain. -// CHECK-LP64-NEXT: [[T0:%.*]] = load [[ARRAY_T]]** [[ARRAY]], align 8 +// CHECK-LP64-NEXT: [[T0:%.*]] = load [[ARRAY_T]]*, [[ARRAY_T]]** [[ARRAY]], align 8 // CHECK-LP64-NEXT: [[T1:%.*]] = bitcast [[ARRAY_T]]* [[T0]] to i8* // CHECK-LP64-NEXT: [[T2:%.*]] = call i8* @objc_retain(i8* [[T1]]) // CHECK-LP64-NEXT: [[SAVED_ARRAY:%.*]] = bitcast i8* [[T2]] to [[ARRAY_T]]* // Call the enumeration method. -// CHECK-LP64-NEXT: [[T0:%.*]] = load i8** @OBJC_SELECTOR_REFERENCES_ +// CHECK-LP64-NEXT: [[T0:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES_ // CHECK-LP64-NEXT: [[T1:%.*]] = bitcast [[ARRAY_T]]* [[SAVED_ARRAY]] to i8* // CHECK-LP64-NEXT: [[SIZE:%.*]] = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, [[STATE_T]]*, [16 x i8*]*, i64)*)(i8* [[T1]], i8* [[T0]], [[STATE_T]]* [[STATE]], [16 x i8*]* [[BUFFER]], i64 16) @@ -55,21 +55,21 @@ void test0(NSArray *array) { // CHECK-LP64-NEXT: br i1 [[T0]] // CHECK-LP64: [[T0:%.*]] = getelementptr inbounds [[STATE_T]], [[STATE_T]]* [[STATE]], i32 0, i32 1 -// CHECK-LP64-NEXT: [[T1:%.*]] = load i8*** [[T0]] +// CHECK-LP64-NEXT: [[T1:%.*]] = load i8**, i8*** [[T0]] // CHECK-LP64-NEXT: [[T2:%.*]] = getelementptr i8*, i8** [[T1]], i64 -// CHECK-LP64-NEXT: [[T3:%.*]] = load i8** [[T2]] +// CHECK-LP64-NEXT: [[T3:%.*]] = load i8*, i8** [[T2]] // CHECK-LP64-NEXT: store i8* [[T3]], i8** [[X]] // CHECK-LP64: [[D0:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 // CHECK-LP64: [[T0:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 -// CHECK-LP64-NEXT: [[T1:%.*]] = load i8** [[X]] +// CHECK-LP64-NEXT: [[T1:%.*]] = load i8*, i8** [[X]] // CHECK-LP64-NEXT: [[T2:%.*]] = call i8* @objc_retain(i8* [[T1]]) // CHECK-LP64-NEXT: store i8* [[T2]], i8** [[T0]] // CHECK-LP64-NEXT: [[T1:%.*]] = bitcast [[BLOCK_T]]* [[BLOCK]] // CHECK-LP64: call void @use_block( // CHECK-LP64-NEXT: call void @objc_storeStrong(i8** [[D0]], i8* null) -// CHECK-LP64: [[T0:%.*]] = load i8** @OBJC_SELECTOR_REFERENCES_ +// CHECK-LP64: [[T0:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES_ // CHECK-LP64-NEXT: [[T1:%.*]] = bitcast [[ARRAY_T]]* [[SAVED_ARRAY]] to i8* // CHECK-LP64-NEXT: [[SIZE:%.*]] = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, [[STATE_T]]*, [16 x i8*]*, i64)*)(i8* [[T1]], i8* [[T0]], [[STATE_T]]* [[STATE]], [16 x i8*]* [[BUFFER]], i64 16) @@ -86,7 +86,7 @@ void test0(NSArray *array) { // CHECK-LP64: [[BLOCK:%.*]] = bitcast i8* {{%.*}} to [[BLOCK_T]]* // CHECK-LP64-NOT: ret // CHECK-LP64: [[T0:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 -// CHECK-LP64-NEXT: [[T2:%.*]] = load i8** [[T0]], align 8 +// CHECK-LP64-NEXT: [[T2:%.*]] = load i8*, i8** [[T0]], align 8 // CHECK-LP64-NEXT: call void @use(i8* [[T2]]) void test1(NSArray *array) { @@ -103,9 +103,9 @@ void test1(NSArray *array) { // CHECK-LP64-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:<{.*}>]], // CHECK-LP64: [[T0:%.*]] = getelementptr inbounds [[STATE_T]], [[STATE_T]]* [[STATE]], i32 0, i32 1 -// CHECK-LP64-NEXT: [[T1:%.*]] = load i8*** [[T0]] +// CHECK-LP64-NEXT: [[T1:%.*]] = load i8**, i8*** [[T0]] // CHECK-LP64-NEXT: [[T2:%.*]] = getelementptr i8*, i8** [[T1]], i64 -// CHECK-LP64-NEXT: [[T3:%.*]] = load i8** [[T2]] +// CHECK-LP64-NEXT: [[T3:%.*]] = load i8*, i8** [[T2]] // CHECK-LP64-NEXT: call i8* @objc_initWeak(i8** [[X]], i8* [[T3]]) // CHECK-LP64: [[D0:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 @@ -135,7 +135,7 @@ void test2(Test2 *a) { // CHECK-LP64-NEXT: [[COLL:%.*]] = bitcast i8* [[T2]] to [[ARRAY_T]]* // Make sure it's not immediately released before starting the iteration. -// CHECK-LP64-NEXT: load i8** @OBJC_SELECTOR_REFERENCES_ +// CHECK-LP64-NEXT: load i8*, i8** @OBJC_SELECTOR_REFERENCES_ // CHECK-LP64-NEXT: [[T0:%.*]] = bitcast [[ARRAY_T]]* [[COLL]] to i8* // CHECK-LP64-NEXT: @objc_msgSend @@ -163,11 +163,11 @@ void test3(NSArray *array) { // CHECK-LP64-LABEL: define void @test3( // CHECK-LP64: [[ARRAY:%.*]] = alloca [[ARRAY_T]]*, align 8 // CHECK-LP64-NEXT: [[X:%.*]] = alloca i8*, align 8 - // CHECK-LP64: [[T0:%.*]] = load i8** [[X]], align 8 + // CHECK-LP64: [[T0:%.*]] = load i8*, i8** [[X]], align 8 // CHECK-LP64-NEXT: [[T1:%.*]] = icmp ne i8* [[T0]], null // CHECK-LP64-NEXT: br i1 [[T1]], // CHECK-LP64: br label [[L:%[^ ]+]] - // CHECK-LP64: [[T0:%.*]] = load i8** [[X]], align 8 + // CHECK-LP64: [[T0:%.*]] = load i8*, i8** [[X]], align 8 // CHECK-LP64-NEXT: call void @use(i8* [[T0]]) // CHECK-LP64-NEXT: br label [[L]] } diff --git a/test/CodeGenObjC/arc-literals.m b/test/CodeGenObjC/arc-literals.m index 95c598dc4c..cce4a79de9 100644 --- a/test/CodeGenObjC/arc-literals.m +++ b/test/CodeGenObjC/arc-literals.m @@ -44,14 +44,14 @@ void test_array(id a, id b) { // Constructing the array // CHECK: [[T0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[OBJECTS:%[A-Za-z0-9]+]], i32 0, i32 0 - // CHECK-NEXT: [[V0:%.*]] = load i8** [[A]], + // CHECK-NEXT: [[V0:%.*]] = load i8*, i8** [[A]], // CHECK-NEXT: store i8* [[V0]], i8** [[T0]] // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[OBJECTS]], i32 0, i32 1 - // CHECK-NEXT: [[V1:%.*]] = load i8** [[B]], + // CHECK-NEXT: [[V1:%.*]] = load i8*, i8** [[B]], // CHECK-NEXT: store i8* [[V1]], i8** [[T0]] - // CHECK-NEXT: [[T0:%.*]] = load [[CLASS_T:%.*]]** @"OBJC_CLASSLIST - // CHECK-NEXT: [[SEL:%.*]] = load i8** @OBJC_SELECTOR_REFERENCES + // CHECK-NEXT: [[T0:%.*]] = load [[CLASS_T:%.*]]*, [[CLASS_T:%.*]]** @"OBJC_CLASSLIST + // CHECK-NEXT: [[SEL:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES // CHECK-NEXT: [[T1:%.*]] = bitcast [[CLASS_T]]* [[T0]] to i8* // CHECK-NEXT: [[T2:%.*]] = bitcast [2 x i8*]* [[OBJECTS]] to i8** // CHECK-NEXT: [[T3:%.*]] = call i8* bitcast ({{.*@objc_msgSend.*}})(i8* [[T1]], i8* [[SEL]], i8** [[T2]], i64 2) @@ -80,21 +80,21 @@ void test_dictionary(id k1, id o1, id k2, id o2) { // Constructing the arrays // CHECK: [[T0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[KEYS:%[A-Za-z0-9]+]], i32 0, i32 0 - // CHECK-NEXT: [[V0:%.*]] = load i8** [[K1]], + // CHECK-NEXT: [[V0:%.*]] = load i8*, i8** [[K1]], // CHECK-NEXT: store i8* [[V0]], i8** [[T0]] // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[OBJECTS:%[A-Za-z0-9]+]], i32 0, i32 0 - // CHECK-NEXT: [[V1:%.*]] = load i8** [[O1]], + // CHECK-NEXT: [[V1:%.*]] = load i8*, i8** [[O1]], // CHECK-NEXT: store i8* [[V1]], i8** [[T0]] // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[KEYS]], i32 0, i32 1 - // CHECK-NEXT: [[V2:%.*]] = load i8** [[K2]], + // CHECK-NEXT: [[V2:%.*]] = load i8*, i8** [[K2]], // CHECK-NEXT: store i8* [[V2]], i8** [[T0]] // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[OBJECTS]], i32 0, i32 1 - // CHECK-NEXT: [[V3:%.*]] = load i8** [[O2]], + // CHECK-NEXT: [[V3:%.*]] = load i8*, i8** [[O2]], // CHECK-NEXT: store i8* [[V3]], i8** [[T0]] // Constructing the dictionary - // CHECK-NEXT: [[T0:%.*]] = load [[CLASS_T:%.*]]** @"OBJC_CLASSLIST - // CHECK-NEXT: [[SEL:%.*]] = load i8** @OBJC_SELECTOR_REFERENCES + // CHECK-NEXT: [[T0:%.*]] = load [[CLASS_T:%.*]]*, [[CLASS_T:%.*]]** @"OBJC_CLASSLIST + // CHECK-NEXT: [[SEL:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES // CHECK-NEXT: [[T1:%.*]] = bitcast [[CLASS_T]]* [[T0]] to i8* // CHECK-NEXT: [[T2:%.*]] = bitcast [2 x i8*]* [[OBJECTS]] to i8** // CHECK-NEXT: [[T3:%.*]] = bitcast [2 x i8*]* [[KEYS]] to i8** @@ -127,7 +127,7 @@ void test_property(B *b) { // CHECK: [[T0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[OBJECTS:%.*]], i32 0, i32 0 // Invoke 'prop' - // CHECK: [[SEL:%.*]] = load i8** @OBJC_SELECTOR_REFERENCES + // CHECK: [[SEL:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES // CHECK-NEXT: [[T1:%.*]] = bitcast // CHECK-NEXT: [[T2:%.*]] = call [[B:%.*]]* bitcast ({{.*}} @objc_msgSend to {{.*}})(i8* [[T1]], i8* [[SEL]]) // CHECK-NEXT: [[T3:%.*]] = bitcast [[B]]* [[T2]] to i8* @@ -139,8 +139,8 @@ void test_property(B *b) { // CHECK-NEXT: store i8* [[V1]], i8** [[T0]] // Invoke arrayWithObjects:count: - // CHECK-NEXT: [[T0:%.*]] = load [[CLASS_T]]** @"OBJC_CLASSLIST - // CHECK-NEXT: [[SEL:%.*]] = load i8** @OBJC_SELECTOR_REFERENCES + // CHECK-NEXT: [[T0:%.*]] = load [[CLASS_T]]*, [[CLASS_T]]** @"OBJC_CLASSLIST + // CHECK-NEXT: [[SEL:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES // CHECK-NEXT: [[T1:%.*]] = bitcast [[CLASS_T]]* [[T0]] to i8* // CHECK-NEXT: [[T2:%.*]] = bitcast [1 x i8*]* [[OBJECTS]] to i8** // CHECK-NEXT: [[T3:%.*]] = call i8* bitcast ({{.*}} @objc_msgSend to {{.*}}(i8* [[T1]], i8* [[SEL]], i8** [[T2]], i64 1) diff --git a/test/CodeGenObjC/arc-loadweakretained-release.m b/test/CodeGenObjC/arc-loadweakretained-release.m index 4db67a97de..5abc8d9d4a 100644 --- a/test/CodeGenObjC/arc-loadweakretained-release.m +++ b/test/CodeGenObjC/arc-loadweakretained-release.m @@ -30,7 +30,7 @@ int main (int argc, const char * argv[]) { // CHECK: [[SIXTEEN:%.*]] = call i8* @objc_loadWeakRetained(i8** {{%.*}}) // CHECK-NEXT: [[SEVENTEEN:%.*]] = bitcast i8* [[SIXTEEN]] to {{%.*}} -// CHECK-NEXT: [[EIGHTEEN:%.*]] = load i8** @OBJC_SELECTOR_REFERENCES_6 +// CHECK-NEXT: [[EIGHTEEN:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES_6 // CHECK-NEXT: [[NINETEEN:%.*]] = bitcast %0* [[SEVENTEEN]] to i8* // CHECK-NEXT: call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend // CHECK-NEXT: [[TWENTY:%.*]] = bitcast %0* [[SEVENTEEN]] to i8* @@ -64,13 +64,13 @@ void test1(int cond) { // CHECK: call void @test34_sink( // CHECK-NEXT: [[ICRISNULL1:%.*]] = icmp eq i8** [[COND1]], null // CHECK-NEXT: br i1 [[ICRISNULL1]], label [[ICRDONE:%.*]], label [[ICRWRITEBACK:%.*]] -// CHECK: [[TWO:%.*]] = load i8** [[INCRTEMP]] +// CHECK: [[TWO:%.*]] = load i8*, i8** [[INCRTEMP]] // CHECK-NEXT: [[THREE:%.*]] = call i8* @objc_storeWeak( // CHECK-NEXT br label [[ICRDONE]] -// CHECK: [[CLEANUPISACTIVE:%.*]] = load i1* [[CONDCLEANUP]] +// CHECK: [[CLEANUPISACTIVE:%.*]] = load i1, i1* [[CONDCLEANUP]] // CHECK-NEXT: br i1 [[CLEANUPISACTIVE]], label [[CLEASNUPACTION:%.*]], label [[CLEANUPDONE:%.*]] -// CHECK: [[FOUR:%.*]] = load i8** [[CONDCLEANUPSAVE]] +// CHECK: [[FOUR:%.*]] = load i8*, i8** [[CONDCLEANUPSAVE]] // CHECK-NEXT: call void @objc_release(i8* [[FOUR]]) // CHECK-NEXT: br label // CHECK: call void @objc_destroyWeak(i8** [[WEAK]]) diff --git a/test/CodeGenObjC/arc-precise-lifetime.m b/test/CodeGenObjC/arc-precise-lifetime.m index ba2502aa11..68ca42dc66 100644 --- a/test/CodeGenObjC/arc-precise-lifetime.m +++ b/test/CodeGenObjC/arc-precise-lifetime.m @@ -10,12 +10,12 @@ void test0() { // CHECK-NEXT: [[CALL:%.*]] = call i8* @test0_helper() // CHECK-NEXT: store i8* [[CALL]], i8** [[X]] - // CHECK-NEXT: [[T1:%.*]] = load i8** [[X]] + // CHECK-NEXT: [[T1:%.*]] = load i8*, i8** [[X]] // CHECK-NEXT: store i8* null, i8** [[X]] // CHECK-NEXT: call void @objc_release(i8* [[T1]]) [[NUW:#[0-9]+]] // CHECK-NOT: clang.imprecise_release - // CHECK-NEXT: [[T1:%.*]] = load i8** [[X]] + // CHECK-NEXT: [[T1:%.*]] = load i8*, i8** [[X]] // CHECK-NEXT: call void @objc_release(i8* [[T1]]) [[NUW:#[0-9]+]] // CHECK-NOT: clang.imprecise_release @@ -37,15 +37,15 @@ void test1a(void) { // CHECK-NEXT: [[T2:%.*]] = call i8* @objc_retainAutoreleasedReturnValue(i8* [[T1]]) // CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to [[TEST1]]* // CHECK-NEXT: store [[TEST1]]* [[T3]] - // CHECK-NEXT: [[T0:%.*]] = load [[TEST1]]** + // CHECK-NEXT: [[T0:%.*]] = load [[TEST1]]*, [[TEST1]]** // CHECK-NEXT: [[T1:%.*]] = bitcast [[TEST1]]* [[T0]] to i8* // CHECK-NEXT: [[T2:%.*]] = call i8* @objc_retainAutorelease(i8* [[T1]]) // CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to [[TEST1]]* - // CHECK-NEXT: [[T4:%.*]] = load i8** @OBJC_SELECTOR_REFERENCES_ + // CHECK-NEXT: [[T4:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES_ // CHECK-NEXT: [[T5:%.*]] = bitcast [[TEST1]]* [[T3]] to i8* // CHECK-NEXT: [[T6:%.*]] = call i8* bitcast // CHECK-NEXT: store i8* [[T6]], i8** - // CHECK-NEXT: [[T0:%.*]] = load [[TEST1]]** + // CHECK-NEXT: [[T0:%.*]] = load [[TEST1]]*, [[TEST1]]** // CHECK-NEXT: [[T1:%.*]] = bitcast [[TEST1]]* [[T0]] to i8* // CHECK-NEXT: call void @objc_release(i8* [[T1]]) [[NUW]], !clang.imprecise_release // CHECK-NEXT: ret void @@ -60,12 +60,12 @@ void test1b(void) { // CHECK-NEXT: [[T2:%.*]] = call i8* @objc_retainAutoreleasedReturnValue(i8* [[T1]]) // CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to [[TEST1]]* // CHECK-NEXT: store [[TEST1]]* [[T3]] - // CHECK-NEXT: [[T0:%.*]] = load [[TEST1]]** - // CHECK-NEXT: [[T1:%.*]] = load i8** @OBJC_SELECTOR_REFERENCES_ + // CHECK-NEXT: [[T0:%.*]] = load [[TEST1]]*, [[TEST1]]** + // CHECK-NEXT: [[T1:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES_ // CHECK-NEXT: [[T2:%.*]] = bitcast [[TEST1]]* [[T0]] to i8* // CHECK-NEXT: [[T3:%.*]] = call i8* bitcast // CHECK-NEXT: store i8* [[T3]], i8** - // CHECK-NEXT: [[T0:%.*]] = load [[TEST1]]** + // CHECK-NEXT: [[T0:%.*]] = load [[TEST1]]*, [[TEST1]]** // CHECK-NEXT: [[T1:%.*]] = bitcast [[TEST1]]* [[T0]] to i8* // CHECK-NEXT: call void @objc_release(i8* [[T1]]) [[NUW]] // CHECK-NOT: clang.imprecise_release @@ -80,15 +80,15 @@ void test1c(void) { // CHECK-NEXT: [[T2:%.*]] = call i8* @objc_retainAutoreleasedReturnValue(i8* [[T1]]) // CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to [[TEST1]]* // CHECK-NEXT: store [[TEST1]]* [[T3]] - // CHECK-NEXT: [[T0:%.*]] = load [[TEST1]]** + // CHECK-NEXT: [[T0:%.*]] = load [[TEST1]]*, [[TEST1]]** // CHECK-NEXT: [[T1:%.*]] = bitcast [[TEST1]]* [[T0]] to i8* // CHECK-NEXT: [[T2:%.*]] = call i8* @objc_retainAutorelease(i8* [[T1]]) // CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to [[TEST1]]* - // CHECK-NEXT: [[T4:%.*]] = load i8** @OBJC_SELECTOR_REFERENCES_ + // CHECK-NEXT: [[T4:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES_ // CHECK-NEXT: [[T5:%.*]] = bitcast [[TEST1]]* [[T3]] to i8* // CHECK-NEXT: [[T6:%.*]] = call i8* bitcast // CHECK-NEXT: store i8* [[T6]], i8** - // CHECK-NEXT: [[T0:%.*]] = load [[TEST1]]** + // CHECK-NEXT: [[T0:%.*]] = load [[TEST1]]*, [[TEST1]]** // CHECK-NEXT: [[T1:%.*]] = bitcast [[TEST1]]* [[T0]] to i8* // CHECK-NEXT: call void @objc_release(i8* [[T1]]) [[NUW]], !clang.imprecise_release // CHECK-NEXT: ret void @@ -102,15 +102,15 @@ void test1d(void) { // CHECK-NEXT: [[T2:%.*]] = call i8* @objc_retainAutoreleasedReturnValue(i8* [[T1]]) // CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to [[TEST1]]* // CHECK-NEXT: store [[TEST1]]* [[T3]] - // CHECK-NEXT: [[T0:%.*]] = load [[TEST1]]** + // CHECK-NEXT: [[T0:%.*]] = load [[TEST1]]*, [[TEST1]]** // CHECK-NEXT: [[T2:%.*]] = bitcast [[TEST1]]* [[T0]] to i8* // CHECK-NEXT: [[T3:%.*]] = call i8* @objc_retainAutorelease // CHECK-NEXT: [[SIX:%.*]] = bitcast i8* [[T3]] to [[TEST1]]* - // CHECK-NEXT: [[SEVEN:%.*]] = load i8** @OBJC_SELECTOR_REFERENCES_ + // CHECK-NEXT: [[SEVEN:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES_ // CHECK-NEXT: [[EIGHT:%.*]] = bitcast [[TEST1]]* [[SIX]] to i8* // CHECK-NEXT: [[CALL1:%.*]] = call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)(i8* [[EIGHT]], i8* [[SEVEN]]) // CHECK-NEXT: store i8* [[CALL1]], i8** - // CHECK-NEXT: [[NINE:%.*]] = load [[TEST1]]** + // CHECK-NEXT: [[NINE:%.*]] = load [[TEST1]]*, [[TEST1]]** // CHECK-NEXT: [[TEN:%.*]] = bitcast [[TEST1]]* [[NINE]] to i8* // CHECK-NEXT: call void @objc_release(i8* [[TEN]]) // CHECK-NEXT: ret void @@ -132,17 +132,17 @@ void test2(Test2 *x) { // CHECK-NEXT: [[T2:%.*]] = bitcast i8* [[T1]] to [[TEST2]]* // CHECK-NEXT: store [[TEST2]]* [[T2]], [[TEST2]]** [[X]], - // CHECK-NEXT: [[T0:%.*]] = load [[TEST2]]** [[X]], - // CHECK-NEXT: [[OFFSET:%.*]] = load i64* @"OBJC_IVAR_$_Test2.ivar" + // CHECK-NEXT: [[T0:%.*]] = load [[TEST2]]*, [[TEST2]]** [[X]], + // CHECK-NEXT: [[OFFSET:%.*]] = load i64, i64* @"OBJC_IVAR_$_Test2.ivar" // CHECK-NEXT: [[T1:%.*]] = bitcast [[TEST2]]* [[T0]] to i8* // CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds i8, i8* [[T1]], i64 [[OFFSET]] // CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to i8** - // CHECK-NEXT: [[T4:%.*]] = load i8** [[T3]], + // CHECK-NEXT: [[T4:%.*]] = load i8*, i8** [[T3]], // CHECK-NEXT: store i8* null, i8** [[T3]], // CHECK-NEXT: call void @objc_release(i8* [[T4]]) [[NUW]] // CHECK-NOT: imprecise - // CHECK-NEXT: [[T0:%.*]] = load [[TEST2]]** [[X]] + // CHECK-NEXT: [[T0:%.*]] = load [[TEST2]]*, [[TEST2]]** [[X]] // CHECK-NEXT: [[T1:%.*]] = bitcast [[TEST2]]* [[T0]] to i8* // CHECK-NEXT: call void @objc_release(i8* [[T1]]) [[NUW]], !clang.imprecise_release @@ -155,7 +155,7 @@ void test3(PRECISE_LIFETIME id x) { // CHECK-NEXT: [[T0:%.*]] = call i8* @objc_retain(i8* {{%.*}}) [[NUW]] // CHECK-NEXT: store i8* [[T0]], i8** [[X]], - // CHECK-NEXT: [[T0:%.*]] = load i8** [[X]] + // CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[X]] // CHECK-NEXT: call void @objc_release(i8* [[T0]]) [[NUW]] // CHECK-NOT: imprecise_release diff --git a/test/CodeGenObjC/arc-property.m b/test/CodeGenObjC/arc-property.m index c54abbbcf2..b8dc18e872 100644 --- a/test/CodeGenObjC/arc-property.m +++ b/test/CodeGenObjC/arc-property.m @@ -23,18 +23,18 @@ struct S1 { Class isa; }; @end // The getter should be a simple load. // CHECK: define internal [[S1:%.*]]* @"\01-[Test1 pointer]"( -// CHECK: [[OFFSET:%.*]] = load i64* @"OBJC_IVAR_$_Test1.pointer" +// CHECK: [[OFFSET:%.*]] = load i64, i64* @"OBJC_IVAR_$_Test1.pointer" // CHECK-NEXT: [[T0:%.*]] = bitcast [[TEST1:%.*]]* {{%.*}} to i8* // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 [[OFFSET]] // CHECK-NEXT: [[T2:%.*]] = bitcast i8* [[T1]] to [[S1]]** -// CHECK-NEXT: [[T3:%.*]] = load [[S1]]** [[T2]], align 8 +// CHECK-NEXT: [[T3:%.*]] = load [[S1]]*, [[S1]]** [[T2]], align 8 // CHECK-NEXT: ret [[S1]]* [[T3]] // The setter should be using objc_setProperty. // CHECK: define internal void @"\01-[Test1 setPointer:]"( // CHECK: [[T0:%.*]] = bitcast [[TEST1]]* {{%.*}} to i8* -// CHECK-NEXT: [[OFFSET:%.*]] = load i64* @"OBJC_IVAR_$_Test1.pointer" -// CHECK-NEXT: [[T1:%.*]] = load [[S1]]** {{%.*}} +// CHECK-NEXT: [[OFFSET:%.*]] = load i64, i64* @"OBJC_IVAR_$_Test1.pointer" +// CHECK-NEXT: [[T1:%.*]] = load [[S1]]*, [[S1]]** {{%.*}} // CHECK-NEXT: [[T2:%.*]] = bitcast [[S1]]* [[T1]] to i8* // CHECK-NEXT: call void @objc_setProperty(i8* [[T0]], i8* {{%.*}}, i64 [[OFFSET]], i8* [[T2]], i1 zeroext false, i1 zeroext false) // CHECK-NEXT: ret void @@ -56,9 +56,9 @@ static Class theGlobalClass; } @end // CHECK: define internal void @"\01-[Test2 test]"( -// CHECK: [[T0:%.*]] = load i8** @theGlobalClass, align 8 -// CHECK-NEXT: [[T1:%.*]] = load [[TEST2:%.*]]** -// CHECK-NEXT: [[OFFSET:%.*]] = load i64* @"OBJC_IVAR_$_Test2._theClass" +// CHECK: [[T0:%.*]] = load i8*, i8** @theGlobalClass, align 8 +// CHECK-NEXT: [[T1:%.*]] = load [[TEST2:%.*]]*, [[TEST2:%.*]]** +// CHECK-NEXT: [[OFFSET:%.*]] = load i64, i64* @"OBJC_IVAR_$_Test2._theClass" // CHECK-NEXT: [[T2:%.*]] = bitcast [[TEST2]]* [[T1]] to i8* // CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds i8, i8* [[T2]], i64 [[OFFSET]] // CHECK-NEXT: [[T4:%.*]] = bitcast i8* [[T3]] to i8** @@ -66,20 +66,20 @@ static Class theGlobalClass; // CHECK-NEXT: ret void // CHECK: define internal i8* @"\01-[Test2 theClass]"( -// CHECK: [[OFFSET:%.*]] = load i64* @"OBJC_IVAR_$_Test2._theClass" +// CHECK: [[OFFSET:%.*]] = load i64, i64* @"OBJC_IVAR_$_Test2._theClass" // CHECK-NEXT: [[T0:%.*]] = tail call i8* @objc_getProperty(i8* {{.*}}, i8* {{.*}}, i64 [[OFFSET]], i1 zeroext true) // CHECK-NEXT: ret i8* [[T0]] // CHECK: define internal void @"\01-[Test2 setTheClass:]"( // CHECK: [[T0:%.*]] = bitcast [[TEST2]]* {{%.*}} to i8* -// CHECK-NEXT: [[OFFSET:%.*]] = load i64* @"OBJC_IVAR_$_Test2._theClass" -// CHECK-NEXT: [[T1:%.*]] = load i8** {{%.*}} +// CHECK-NEXT: [[OFFSET:%.*]] = load i64, i64* @"OBJC_IVAR_$_Test2._theClass" +// CHECK-NEXT: [[T1:%.*]] = load i8*, i8** {{%.*}} // CHECK-NEXT: call void @objc_setProperty(i8* [[T0]], i8* {{%.*}}, i64 [[OFFSET]], i8* [[T1]], i1 zeroext true, i1 zeroext true) // CHECK-NEXT: ret void // CHECK: define internal void @"\01-[Test2 .cxx_destruct]"( -// CHECK: [[T0:%.*]] = load [[TEST2]]** -// CHECK-NEXT: [[OFFSET:%.*]] = load i64* @"OBJC_IVAR_$_Test2._theClass" +// CHECK: [[T0:%.*]] = load [[TEST2]]*, [[TEST2]]** +// CHECK-NEXT: [[OFFSET:%.*]] = load i64, i64* @"OBJC_IVAR_$_Test2._theClass" // CHECK-NEXT: [[T1:%.*]] = bitcast [[TEST2]]* [[T0]] to i8* // CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds i8, i8* [[T1]], i64 [[OFFSET]] // CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to i8** @@ -100,17 +100,17 @@ void test3(Test3 *t) { // CHECK: [[T:%.*]] = alloca [[TEST3]]*, // CHECK-NEXT: [[X:%.*]] = alloca i8*, // Property access. -// CHECK: [[T0:%.*]] = load [[TEST3]]** [[T]], -// CHECK-NEXT: [[SEL:%.*]] = load i8** @OBJC_SELECTOR_REFERENCES +// CHECK: [[T0:%.*]] = load [[TEST3]]*, [[TEST3]]** [[T]], +// CHECK-NEXT: [[SEL:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES // CHECK-NEXT: [[T1:%.*]] = bitcast [[TEST3]]* [[T0]] to i8* // CHECK-NEXT: [[T2:%.*]] = call i8* bitcast ({{.*}} @objc_msgSend to {{.*}})(i8* [[T1]], i8* [[SEL]]) // CHECK-NEXT: store i8* [[T2]], i8** [[X]], // Message send. -// CHECK-NEXT: [[T0:%.*]] = load [[TEST3]]** [[T]], -// CHECK-NEXT: [[SEL:%.*]] = load i8** @OBJC_SELECTOR_REFERENCES +// CHECK-NEXT: [[T0:%.*]] = load [[TEST3]]*, [[TEST3]]** [[T]], +// CHECK-NEXT: [[SEL:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES // CHECK-NEXT: [[T1:%.*]] = bitcast [[TEST3]]* [[T0]] to i8* // CHECK-NEXT: [[T2:%.*]] = call i8* bitcast ({{.*}} @objc_msgSend to {{.*}})(i8* [[T1]], i8* [[SEL]]) -// CHECK-NEXT: [[T3:%.*]] = load i8** [[X]], +// CHECK-NEXT: [[T3:%.*]] = load i8*, i8** [[X]], // CHECK-NEXT: store i8* [[T2]], i8** [[X]], // CHECK-NEXT: call void @objc_release(i8* [[T3]]) // Epilogue. diff --git a/test/CodeGenObjC/arc-related-result-type.m b/test/CodeGenObjC/arc-related-result-type.m index 72d2871879..02d380368b 100644 --- a/test/CodeGenObjC/arc-related-result-type.m +++ b/test/CodeGenObjC/arc-related-result-type.m @@ -13,7 +13,7 @@ void test0(Test0 *val) { // CHECK-NEXT: bitcast // CHECK-NEXT: bitcast // CHECK-NEXT: call void @objc_storeStrong( -// CHECK-NEXT: load [[TEST0]]** [[VAL]], +// CHECK-NEXT: load [[TEST0]]*, [[TEST0]]** [[VAL]], // CHECK-NEXT: load // CHECK-NEXT: bitcast // CHECK-NEXT: [[T0:%.*]] = call i8* bitcast ( diff --git a/test/CodeGenObjC/arc-ternary-op.m b/test/CodeGenObjC/arc-ternary-op.m index 217db80194..3e96e40757 100644 --- a/test/CodeGenObjC/arc-ternary-op.m +++ b/test/CodeGenObjC/arc-ternary-op.m @@ -10,7 +10,7 @@ void test0(_Bool cond) { // CHECK-NEXT: [[RELCOND:%.*]] = alloca i1 // CHECK-NEXT: zext // CHECK-NEXT: store - // CHECK-NEXT: [[T0:%.*]] = load i8* [[COND]] + // CHECK-NEXT: [[T0:%.*]] = load i8, i8* [[COND]] // CHECK-NEXT: [[T1:%.*]] = trunc i8 [[T0]] to i1 // CHECK-NEXT: store i1 false, i1* [[RELCOND]] // CHECK-NEXT: br i1 [[T1]], @@ -22,12 +22,12 @@ void test0(_Bool cond) { // CHECK: [[T0:%.*]] = phi i8* [ null, {{%.*}} ], [ [[CALL]], {{%.*}} ] // CHECK-NEXT: [[T1:%.*]] = call i8* @objc_retain(i8* [[T0]]) [[NUW:#[0-9]+]] // CHECK-NEXT: store i8* [[T1]], i8** [[X]], - // CHECK-NEXT: [[REL:%.*]] = load i1* [[RELCOND]] + // CHECK-NEXT: [[REL:%.*]] = load i1, i1* [[RELCOND]] // CHECK-NEXT: br i1 [[REL]], - // CHECK: [[T0:%.*]] = load i8** [[RELVAL]] + // CHECK: [[T0:%.*]] = load i8*, i8** [[RELVAL]] // CHECK-NEXT: call void @objc_release(i8* [[T0]]) [[NUW]] // CHECK-NEXT: br label - // CHECK: [[T0:%.*]] = load i8** [[X]] + // CHECK: [[T0:%.*]] = load i8*, i8** [[X]] // CHECK-NEXT: call void @objc_release(i8* [[T0]]) [[NUW]] // CHECK-NEXT: ret void id x = (cond ? 0 : test0_helper()); @@ -52,28 +52,28 @@ void test1(int cond) { // CHECK-NEXT: store i8* null, i8** [[STRONG]] // CHECK-NEXT: call i8* @objc_initWeak(i8** [[WEAK]], i8* null) - // CHECK-NEXT: [[T0:%.*]] = load i32* [[COND]] + // CHECK-NEXT: [[T0:%.*]] = load i32, i32* [[COND]] // CHECK-NEXT: [[T1:%.*]] = icmp ne i32 [[T0]], 0 // CHECK: [[ARG:%.*]] = phi i8** // CHECK-NEXT: [[T0:%.*]] = icmp eq i8** [[ARG]], null // CHECK-NEXT: [[T1:%.*]] = select i1 [[T0]], i8** null, i8** [[TEMP1]] // CHECK-NEXT: br i1 [[T0]], - // CHECK: [[T0:%.*]] = load i8** [[ARG]] + // CHECK: [[T0:%.*]] = load i8*, i8** [[ARG]] // CHECK-NEXT: store i8* [[T0]], i8** [[TEMP1]] // CHECK-NEXT: br label // CHECK: [[W:%.*]] = phi i8* [ [[T0]], {{%.*}} ], [ undef, {{%.*}} ] // CHECK-NEXT: call void @test1_sink(i8** [[T1]]) // CHECK-NEXT: [[T0:%.*]] = icmp eq i8** [[ARG]], null // CHECK-NEXT: br i1 [[T0]], - // CHECK: [[T0:%.*]] = load i8** [[TEMP1]] + // CHECK: [[T0:%.*]] = load i8*, i8** [[TEMP1]] // CHECK-NEXT: [[T1:%.*]] = call i8* @objc_retain(i8* [[T0]]) // CHECK-NEXT: call void (...)* @clang.arc.use(i8* [[W]]) [[NUW]] - // CHECK-NEXT: [[T2:%.*]] = load i8** [[ARG]] + // CHECK-NEXT: [[T2:%.*]] = load i8*, i8** [[ARG]] // CHECK-NEXT: store i8* [[T1]], i8** [[ARG]] // CHECK-NEXT: call void @objc_release(i8* [[T2]]) // CHECK-NEXT: br label - // CHECK: [[T0:%.*]] = load i32* [[COND]] + // CHECK: [[T0:%.*]] = load i32, i32* [[COND]] // CHECK-NEXT: [[T1:%.*]] = icmp ne i32 [[T0]], 0 // CHECK: [[ARG:%.*]] = phi i8** // CHECK-NEXT: [[T0:%.*]] = icmp eq i8** [[ARG]], null @@ -88,7 +88,7 @@ void test1(int cond) { // CHECK: call void @test1_sink(i8** [[T1]]) // CHECK-NEXT: [[T0:%.*]] = icmp eq i8** [[ARG]], null // CHECK-NEXT: br i1 [[T0]], - // CHECK: [[T0:%.*]] = load i8** [[TEMP2]] + // CHECK: [[T0:%.*]] = load i8*, i8** [[TEMP2]] // CHECK-NEXT: call i8* @objc_storeWeak(i8** [[ARG]], i8* [[T0]]) // CHECK-NEXT: br label @@ -112,7 +112,7 @@ void test2(int cond) { // CHECK: [[CLEANUP_SAVE:%.*]] = alloca i8* // CHECK: [[RUN_CLEANUP:%.*]] = alloca i1 // Evaluate condition; cleanup disabled by default. - // CHECK: [[T0:%.*]] = load i32* [[COND]], + // CHECK: [[T0:%.*]] = load i32, i32* [[COND]], // CHECK-NEXT: icmp ne i32 [[T0]], 0 // CHECK-NEXT: store i1 false, i1* [[RUN_CLEANUP]] // CHECK-NEXT: br i1 @@ -126,9 +126,9 @@ void test2(int cond) { // CHECK: [[T0:%.*]] = phi i8* [ [[T1]], {{%.*}} ], [ null, {{%.*}} ] // CHECK-NEXT: [[RESULT:%.*]] = call i8* @objc_retain(i8* [[T0]]) // Leaving full-expression; run conditional cleanup. - // CHECK-NEXT: [[T0:%.*]] = load i1* [[RUN_CLEANUP]] + // CHECK-NEXT: [[T0:%.*]] = load i1, i1* [[RUN_CLEANUP]] // CHECK-NEXT: br i1 [[T0]] - // CHECK: [[T0:%.*]] = load i8** [[CLEANUP_SAVE]] + // CHECK: [[T0:%.*]] = load i8*, i8** [[CLEANUP_SAVE]] // CHECK-NEXT: call void @objc_release(i8* [[T0]]) // CHECK-NEXT: br label // And way down at the end of the loop: diff --git a/test/CodeGenObjC/arc-unopt.m b/test/CodeGenObjC/arc-unopt.m index c0e67dfd3b..f80514d6e4 100644 --- a/test/CodeGenObjC/arc-unopt.m +++ b/test/CodeGenObjC/arc-unopt.m @@ -7,7 +7,7 @@ Test0 *test0(void) { extern Test0 *test0_helper; return test0_helper; - // CHECK: [[LD:%.*]] = load [[TEST0:%.*]]** @test0_helper + // CHECK: [[LD:%.*]] = load [[TEST0:%.*]]*, [[TEST0:%.*]]** @test0_helper // CHECK-NEXT: [[T0:%.*]] = bitcast [[TEST0]]* [[LD]] to i8* // CHECK-NEXT: [[T1:%.*]] = tail call i8* @objc_retainAutoreleaseReturnValue(i8* [[T0]]) // CHECK-NEXT: [[T2:%.*]] = bitcast i8* [[T1]] to [[TEST0]]* @@ -18,7 +18,7 @@ id test1(void) { extern id test1_helper; return test1_helper; - // CHECK: [[LD:%.*]] = load i8** @test1_helper + // CHECK: [[LD:%.*]] = load i8*, i8** @test1_helper // CHECK-NEXT: [[T0:%.*]] = tail call i8* @objc_retainAutoreleaseReturnValue(i8* [[LD]]) // CHECK-NEXT: ret i8* [[T0]] } @@ -60,7 +60,7 @@ void test5(void) { // CHECK-NEXT: [[Y:%.*]] = alloca [[TEST5:%.*]]*, // CHECK-NEXT: store [[TEST5]]* null, [[TEST5]]** [[X]], // CHECK-NEXT: store [[TEST5]]* null, [[TEST5]]** [[Y]], -// CHECK-NEXT: [[T0:%.*]] = load [[TEST5]]** [[Y]], +// CHECK-NEXT: [[T0:%.*]] = load [[TEST5]]*, [[TEST5]]** [[Y]], // CHECK-NEXT: [[T1:%.*]] = bitcast [[TEST5]]** [[X]] to i8** // CHECK-NEXT: [[T2:%.*]] = bitcast [[TEST5]]* [[T0]] to i8* // CHECK-NEXT: call void @objc_storeStrong(i8** [[T1]], i8* [[T2]]) diff --git a/test/CodeGenObjC/arc-unoptimized-byref-var.m b/test/CodeGenObjC/arc-unoptimized-byref-var.m index be1ae07767..9d856d659a 100644 --- a/test/CodeGenObjC/arc-unoptimized-byref-var.m +++ b/test/CodeGenObjC/arc-unoptimized-byref-var.m @@ -6,7 +6,7 @@ void test19() { // CHECK-UNOPT-LABEL: define internal void @__Block_byref_object_copy // CHECK-UNOPT: [[X:%.*]] = getelementptr inbounds [[BYREF_T:%.*]], [[BYREF_T:%.*]]* [[VAR:%.*]], i32 0, i32 6 // CHECK-UNOPT: [[X2:%.*]] = getelementptr inbounds [[BYREF_T:%.*]], [[BYREF_T:%.*]]* [[VAR1:%.*]], i32 0, i32 6 -// CHECK-UNOPT-NEXT: [[SIX:%.*]] = load i8** [[X2]], align 8 +// CHECK-UNOPT-NEXT: [[SIX:%.*]] = load i8*, i8** [[X2]], align 8 // CHECK-UNOPT-NEXT: store i8* null, i8** [[X]], align 8 // CHECK-UNOPT-NEXT: call void @objc_storeStrong(i8** [[X]], i8* [[SIX]]) [[NUW:#[0-9]+]] // CHECK-UNOPT-NEXT: call void @objc_storeStrong(i8** [[X2]], i8* null) [[NUW]] diff --git a/test/CodeGenObjC/arc-weak-property.m b/test/CodeGenObjC/arc-weak-property.m index 21221b3d6d..d04032bc0d 100644 --- a/test/CodeGenObjC/arc-weak-property.m +++ b/test/CodeGenObjC/arc-weak-property.m @@ -16,8 +16,8 @@ // CHECK-NEXT: [[CMD:%.*]] = alloca i8*, // CHECK-NEXT: store [[WPT]]* {{%.*}}, [[WPT]]** [[SELF]] // CHECK-NEXT: store i8* {{%.*}}, i8** [[CMD]] -// CHECK-NEXT: [[T0:%.*]] = load [[WPT]]** [[SELF]] -// CHECK-NEXT: [[T1:%.*]] = load i64* @"OBJC_IVAR_$_WeakPropertyTest.PROP" +// CHECK-NEXT: [[T0:%.*]] = load [[WPT]]*, [[WPT]]** [[SELF]] +// CHECK-NEXT: [[T1:%.*]] = load i64, i64* @"OBJC_IVAR_$_WeakPropertyTest.PROP" // CHECK-NEXT: [[T2:%.*]] = bitcast [[WPT]]* [[T0]] to i8* // CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds i8, i8* [[T2]], i64 [[T1]] // CHECK-NEXT: [[T4:%.*]] = bitcast i8* [[T3]] to i8** @@ -32,9 +32,9 @@ // CHECK-NEXT: store [[WPT]]* {{%.*}}, [[WPT]]** [[SELF]] // CHECK-NEXT: store i8* {{%.*}}, i8** [[CMD]] // CHECK-NEXT: store i8* {{%.*}}, i8** [[PROP]] -// CHECK-NEXT: [[V:%.*]] = load i8** [[PROP]] -// CHECK-NEXT: [[T0:%.*]] = load [[WPT]]** [[SELF]] -// CHECK-NEXT: [[T1:%.*]] = load i64* @"OBJC_IVAR_$_WeakPropertyTest.PROP" +// CHECK-NEXT: [[V:%.*]] = load i8*, i8** [[PROP]] +// CHECK-NEXT: [[T0:%.*]] = load [[WPT]]*, [[WPT]]** [[SELF]] +// CHECK-NEXT: [[T1:%.*]] = load i64, i64* @"OBJC_IVAR_$_WeakPropertyTest.PROP" // CHECK-NEXT: [[T2:%.*]] = bitcast [[WPT]]* [[T0]] to i8* // CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds i8, i8* [[T2]], i64 [[T1]] // CHECK-NEXT: [[T4:%.*]] = bitcast i8* [[T3]] to i8** @@ -46,8 +46,8 @@ // CHECK-NEXT: [[CMD:%.*]] = alloca i8*, // CHECK-NEXT: store [[WPT]]* {{%.*}}, [[WPT]]** [[SELF]] // CHECK-NEXT: store i8* {{%.*}}, i8** [[CMD]] -// CHECK-NEXT: [[T0:%.*]] = load [[WPT]]** [[SELF]] -// CHECK-NEXT: [[T1:%.*]] = load i64* @"OBJC_IVAR_$_WeakPropertyTest.PROP" +// CHECK-NEXT: [[T0:%.*]] = load [[WPT]]*, [[WPT]]** [[SELF]] +// CHECK-NEXT: [[T1:%.*]] = load i64, i64* @"OBJC_IVAR_$_WeakPropertyTest.PROP" // CHECK-NEXT: [[T2:%.*]] = bitcast [[WPT]]* [[T0]] to i8* // CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds i8, i8* [[T2]], i64 [[T1]] // CHECK-NEXT: [[T4:%.*]] = bitcast i8* [[T3]] to i8** diff --git a/test/CodeGenObjC/arc.m b/test/CodeGenObjC/arc.m index 89cdda8667..148d8e1bd0 100644 --- a/test/CodeGenObjC/arc.m +++ b/test/CodeGenObjC/arc.m @@ -37,7 +37,7 @@ void test0(id x) { // CHECK: [[X:%.*]] = alloca i8* // CHECK-NEXT: [[PARM:%.*]] = call i8* @objc_retain(i8* {{.*}}) // CHECK-NEXT: store i8* [[PARM]], i8** [[X]] - // CHECK-NEXT: [[TMP:%.*]] = load i8** [[X]] + // CHECK-NEXT: [[TMP:%.*]] = load i8*, i8** [[X]] // CHECK-NEXT: call void @objc_release(i8* [[TMP]]) // CHECK-NEXT: ret void } @@ -50,12 +50,12 @@ id test1(id x) { // CHECK-NEXT: [[PARM:%.*]] = call i8* @objc_retain(i8* {{%.*}}) // CHECK-NEXT: store i8* [[PARM]], i8** [[X]] // CHECK-NEXT: store i8* null, i8** [[Y]] - // CHECK-NEXT: [[T0:%.*]] = load i8** [[Y]] + // CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[Y]] // CHECK-NEXT: [[RET:%.*]] = call i8* @objc_retain(i8* [[T0]]) // CHECK-NEXT: store i32 - // CHECK-NEXT: [[T0:%.*]] = load i8** [[Y]] + // CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[Y]] // CHECK-NEXT: call void @objc_release(i8* [[T0]]) - // CHECK-NEXT: [[T1:%.*]] = load i8** [[X]] + // CHECK-NEXT: [[T1:%.*]] = load i8*, i8** [[X]] // CHECK-NEXT: call void @objc_release(i8* [[T1]]) // CHECK-NEXT: [[T1:%.*]] = tail call i8* @objc_autoreleaseReturnValue(i8* [[RET]]) // CHECK-NEXT: ret i8* [[T1]] @@ -103,8 +103,8 @@ void test3_unelided() { Test3 *x; // Call to +alloc. - // CHECK-NEXT: load {{.*}}* @"OBJC_CLASSLIST_REFERENCES_ - // CHECK-NEXT: load i8** @OBJC_SELECTOR_REFERENCES_ + // CHECK-NEXT: load {{.*}}, {{.*}}* @"OBJC_CLASSLIST_REFERENCES_ + // CHECK-NEXT: load i8*, i8** @OBJC_SELECTOR_REFERENCES_ // CHECK-NEXT: bitcast // CHECK-NEXT: [[ALLOC:%.*]] = call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend // CHECK-NEXT: bitcast @@ -112,14 +112,14 @@ void test3_unelided() { // CHECK-NEXT: call void @objc_release(i8* [Test3 alloc]; - // CHECK-NEXT: [[T0:%.*]] = load [[TEST3]]** [[X]] - // CHECK-NEXT: load i8** @OBJC_SELECTOR_REFERENCES_ + // CHECK-NEXT: [[T0:%.*]] = load [[TEST3]]*, [[TEST3]]** [[X]] + // CHECK-NEXT: load i8*, i8** @OBJC_SELECTOR_REFERENCES_ // CHECK-NEXT: [[T1:%.*]] = bitcast [[TEST3]]* [[T0]] to i8* // CHECK-NEXT: [[COPY:%.*]] = call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend {{.*}})(i8* [[T1]], // CHECK-NEXT: call void @objc_release(i8* [[COPY]]) [[NUW:#[0-9]+]] [x copy]; - // CHECK-NEXT: [[T0:%.*]] = load [[TEST3]]** [[X]] + // CHECK-NEXT: [[T0:%.*]] = load [[TEST3]]*, [[TEST3]]** [[X]] // CHECK-NEXT: [[T1:%.*]] = bitcast [[TEST3]]* [[T0]] to i8* // CHECK-NEXT: call void @objc_release(i8* [[T1]]) [[NUW]] // CHECK-NEXT: ret void @@ -132,14 +132,14 @@ void test3() { id x = [[Test3 alloc] initWith: 5]; // Call to +alloc. - // CHECK-NEXT: load {{.*}}* @"OBJC_CLASSLIST_REFERENCES_ - // CHECK-NEXT: load i8** @OBJC_SELECTOR_REFERENCES_ + // CHECK-NEXT: load {{.*}}, {{.*}}* @"OBJC_CLASSLIST_REFERENCES_ + // CHECK-NEXT: load i8*, i8** @OBJC_SELECTOR_REFERENCES_ // CHECK-NEXT: bitcast // CHECK-NEXT: [[ALLOC:%.*]] = call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend // CHECK-NEXT: bitcast // Call to -initWith: with elided retain of consumed argument. - // CHECK-NEXT: load i8** @OBJC_SELECTOR_REFERENCES_ + // CHECK-NEXT: load i8*, i8** @OBJC_SELECTOR_REFERENCES_ // CHECK-NEXT: bitcast // CHECK-NEXT: [[INIT:%.*]] = call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i32)*)(i8* // CHECK-NEXT: bitcast @@ -148,19 +148,19 @@ void test3() { // CHECK-NEXT: store i8* [[INIT]], i8** [[X]] // Call to -copy. - // CHECK-NEXT: [[V:%.*]] = load i8** [[X]] - // CHECK-NEXT: load i8** @OBJC_SELECTOR_REFERENCES_ + // CHECK-NEXT: [[V:%.*]] = load i8*, i8** [[X]] + // CHECK-NEXT: load i8*, i8** @OBJC_SELECTOR_REFERENCES_ // CHECK-NEXT: [[COPY:%.*]] = call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend {{.*}})(i8* [[V]], // Assignment to x. - // CHECK-NEXT: [[TMP:%.*]] = load i8** [[X]] + // CHECK-NEXT: [[TMP:%.*]] = load i8*, i8** [[X]] // CHECK-NEXT: store i8* [[COPY]], i8** [[X]] // CHECK-NEXT: call void @objc_release(i8* [[TMP]]) [[NUW]] x = [x copy]; // Cleanup for x. - // CHECK-NEXT: [[TMP:%.*]] = load i8** [[X]] + // CHECK-NEXT: [[TMP:%.*]] = load i8*, i8** [[X]] // CHECK-NEXT: call void @objc_release(i8* [[TMP]]) [[NUW]] // CHECK-NEXT: ret void @@ -169,14 +169,14 @@ void test3() { // CHECK-LABEL: define i8* @test4() id test4() { // Call to +alloc. - // CHECK: load {{.*}}* @"OBJC_CLASSLIST_REFERENCES_ - // CHECK-NEXT: load i8** @OBJC_SELECTOR_REFERENCES_ + // CHECK: load {{.*}}, {{.*}}* @"OBJC_CLASSLIST_REFERENCES_ + // CHECK-NEXT: load i8*, i8** @OBJC_SELECTOR_REFERENCES_ // CHECK-NEXT: bitcast // CHECK-NEXT: [[ALLOC:%.*]] = call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend // CHECK-NEXT: [[ALLOC:%.*]] = bitcast // Call to -initWith: with elided retain of consumed argument. - // CHECK-NEXT: load i8** @OBJC_SELECTOR_REFERENCES_ + // CHECK-NEXT: load i8*, i8** @OBJC_SELECTOR_REFERENCES_ // CHECK-NEXT: [[ALLOC:%.*]] = bitcast // CHECK-NEXT: [[INIT:%.*]] = call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i32)*)(i8* [[ALLOC]], @@ -209,32 +209,32 @@ void test5(Test5 *x, id y) { // CHECK-NEXT: call i8* @objc_retain // CHECK-NEXT: store - // CHECK-NEXT: load [[TEST5]]** [[X]] - // CHECK-NEXT: load i64* @"OBJC_IVAR_$_Test5.var" + // CHECK-NEXT: load [[TEST5]]*, [[TEST5]]** [[X]] + // CHECK-NEXT: load i64, i64* @"OBJC_IVAR_$_Test5.var" // CHECK-NEXT: bitcast // CHECK-NEXT: getelementptr // CHECK-NEXT: [[VAR:%.*]] = bitcast - // CHECK-NEXT: [[TMP:%.*]] = load i8** [[VAR]] + // CHECK-NEXT: [[TMP:%.*]] = load i8*, i8** [[VAR]] // CHECK-NEXT: store i8* null, i8** [[VAR]] // CHECK-NEXT: call void @objc_release(i8* [[TMP]]) [[NUW]] x->var = 0; - // CHECK-NEXT: [[YVAL:%.*]] = load i8** [[Y]] - // CHECK-NEXT: load [[TEST5]]** [[X]] - // CHECK-NEXT: load i64* @"OBJC_IVAR_$_Test5.var" + // CHECK-NEXT: [[YVAL:%.*]] = load i8*, i8** [[Y]] + // CHECK-NEXT: load [[TEST5]]*, [[TEST5]]** [[X]] + // CHECK-NEXT: load i64, i64* @"OBJC_IVAR_$_Test5.var" // CHECK-NEXT: bitcast // CHECK-NEXT: getelementptr // CHECK-NEXT: [[VAR:%.*]] = bitcast // CHECK-NEXT: [[T0:%.*]] = call i8* @objc_retain(i8* [[YVAL]]) [[NUW]] - // CHECK-NEXT: [[TMP:%.*]] = load i8** [[VAR]] + // CHECK-NEXT: [[TMP:%.*]] = load i8*, i8** [[VAR]] // CHECK-NEXT: store i8* [[T0]], i8** [[VAR]] // CHECK-NEXT: call void @objc_release(i8* [[TMP]]) [[NUW]] x->var = y; // Epilogue. - // CHECK-NEXT: [[TMP:%.*]] = load i8** [[Y]] + // CHECK-NEXT: [[TMP:%.*]] = load i8*, i8** [[Y]] // CHECK-NEXT: call void @objc_release(i8* [[TMP]]) [[NUW]] - // CHECK-NEXT: [[T0:%.*]] = load [[TEST5]]** [[X]] + // CHECK-NEXT: [[T0:%.*]] = load [[TEST5]]*, [[TEST5]]** [[X]] // CHECK-NEXT: [[T1:%.*]] = bitcast [[TEST5]]* [[T0]] to i8* // CHECK-NEXT: call void @objc_release(i8* [[T1]]) [[NUW]] // CHECK-NEXT: ret void @@ -246,7 +246,7 @@ void test6() { // CHECK: [[X:%.*]] = alloca i8* // CHECK-NEXT: [[CALL:%.*]] = call i8* @test6_helper() // CHECK-NEXT: store i8* [[CALL]], i8** [[X]] - // CHECK-NEXT: [[T1:%.*]] = load i8** [[X]] + // CHECK-NEXT: [[T1:%.*]] = load i8*, i8** [[X]] // CHECK-NEXT: call void @objc_release(i8* [[T1]]) [[NUW]], !clang.imprecise_release // CHECK-NEXT: ret void id x = test6_helper(); @@ -257,10 +257,10 @@ void test7_helper(id __attribute__((ns_consumed))); void test7() { // CHECK: [[X:%.*]] = alloca i8* // CHECK-NEXT: store i8* null, i8** [[X]] - // CHECK-NEXT: [[T0:%.*]] = load i8** [[X]] + // CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[X]] // CHECK-NEXT: [[T1:%.*]] = call i8* @objc_retain(i8* [[T0]]) [[NUW]] // CHECK-NEXT: call void @test7_helper(i8* [[T1]]) - // CHECK-NEXT: [[T1:%.*]] = load i8** [[X]] + // CHECK-NEXT: [[T1:%.*]] = load i8*, i8** [[X]] // CHECK-NEXT: call void @objc_release(i8* [[T1]]) [[NUW]], !clang.imprecise_release // CHECK-NEXT: ret void id x; @@ -288,14 +288,14 @@ void test10() { // CHECK: [[X:%.*]] = alloca [[TEST10:%.*]]*, align // CHECK-NEXT: [[Y:%.*]] = alloca i8*, align // CHECK-NEXT: store [[TEST10]]* null, [[TEST10]]** [[X]] - // CHECK-NEXT: load [[TEST10]]** [[X]], align - // CHECK-NEXT: load i8** @OBJC_SELECTOR_REFERENCES_{{[0-9]*}} + // CHECK-NEXT: load [[TEST10]]*, [[TEST10]]** [[X]], align + // CHECK-NEXT: load i8*, i8** @OBJC_SELECTOR_REFERENCES_{{[0-9]*}} // CHECK-NEXT: bitcast // CHECK-NEXT: [[T0:%.*]] = call [[TEST10]]* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend // CHECK-NEXT: [[T1:%.*]] = bitcast [[TEST10]]* [[T0]] to i8* // CHECK-NEXT: [[T2:%.*]] = call i8* @objc_retainAutoreleasedReturnValue(i8* [[T1]]) // CHECK-NEXT: [[V:%.*]] = bitcast i8* [[T2]] to [[TEST10]]* - // CHECK-NEXT: load i8** @OBJC_SELECTOR_REFERENCES_{{[0-9]*}} + // CHECK-NEXT: load i8*, i8** @OBJC_SELECTOR_REFERENCES_{{[0-9]*}} // CHECK-NEXT: bitcast // CHECK-NEXT: [[T0:%.*]] = call [[TEST10]]* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend // CHECK-NEXT: [[T1:%.*]] = bitcast [[TEST10]]* [[T0]] to i8* @@ -305,9 +305,9 @@ void test10() { // CHECK-NEXT: store i8* [[T4]], i8** [[Y]] // CHECK-NEXT: [[T0:%.*]] = bitcast [[TEST10]]* [[V]] to i8* // CHECK-NEXT: call void @objc_release(i8* [[T0]]) - // CHECK-NEXT: [[T0:%.*]] = load i8** [[Y]] + // CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[Y]] // CHECK-NEXT: call void @objc_release(i8* [[T0]]) - // CHECK-NEXT: [[T0:%.*]] = load [[TEST10]]** [[X]] + // CHECK-NEXT: [[T0:%.*]] = load [[TEST10]]*, [[TEST10]]** [[X]] // CHECK-NEXT: [[T1:%.*]] = bitcast [[TEST10]]* [[T0]] to i8* // CHECK-NEXT: call void @objc_release(i8* [[T1]]) // CHECK-NEXT: ret void @@ -318,10 +318,10 @@ void test11(id (*f)(void) __attribute__((ns_returns_retained))) { // CHECK: [[F:%.*]] = alloca i8* ()*, align // CHECK-NEXT: [[X:%.*]] = alloca i8*, align // CHECK-NEXT: store i8* ()* {{%.*}}, i8* ()** [[F]], align - // CHECK-NEXT: [[T0:%.*]] = load i8* ()** [[F]], align + // CHECK-NEXT: [[T0:%.*]] = load i8* ()*, i8* ()** [[F]], align // CHECK-NEXT: [[T1:%.*]] = call i8* [[T0]]() // CHECK-NEXT: store i8* [[T1]], i8** [[X]], align - // CHECK-NEXT: [[T3:%.*]] = load i8** [[X]] + // CHECK-NEXT: [[T3:%.*]] = load i8*, i8** [[X]] // CHECK-NEXT: call void @objc_release(i8* [[T3]]) [[NUW]], !clang.imprecise_release // CHECK-NEXT: ret void id x = f(); @@ -350,7 +350,7 @@ void test12(void) { // CHECK-NEXT: [[T2:%.*]] = call i8* @objc_loadWeakRetained(i8** [[X]]) // CHECK-NEXT: store i8* [[T2]], i8** [[Y]], align - // CHECK-NEXT: [[T4:%.*]] = load i8** [[Y]] + // CHECK-NEXT: [[T4:%.*]] = load i8*, i8** [[Y]] // CHECK-NEXT: call void @objc_release(i8* [[T4]]) [[NUW]], !clang.imprecise_release // CHECK-NEXT: call void @objc_destroyWeak(i8** [[X]]) // CHECK: ret void @@ -365,25 +365,25 @@ void test13(void) { typedef void fnty(id __attribute__((ns_consumed))); extern fnty *test13_func; - // CHECK-NEXT: [[FN:%.*]] = load void (i8*)** @test13_func, align - // CHECK-NEXT: [[X_VAL:%.*]] = load i8** [[X]], align + // CHECK-NEXT: [[FN:%.*]] = load void (i8*)*, void (i8*)** @test13_func, align + // CHECK-NEXT: [[X_VAL:%.*]] = load i8*, i8** [[X]], align // CHECK-NEXT: [[X_TMP:%.*]] = call i8* @objc_retain(i8* [[X_VAL]]) [[NUW]] // CHECK-NEXT: call void [[FN]](i8* [[X_TMP]]) test13_func(x); extern fnty ^test13_block; - // CHECK-NEXT: [[TMP:%.*]] = load void (i8*)** @test13_block, align + // CHECK-NEXT: [[TMP:%.*]] = load void (i8*)*, void (i8*)** @test13_block, align // CHECK-NEXT: [[BLOCK:%.*]] = bitcast void (i8*)* [[TMP]] to [[BLOCKTY:%.*]]* // CHECK-NEXT: [[BLOCK_FN_PTR:%.*]] = getelementptr inbounds [[BLOCKTY]], [[BLOCKTY]]* [[BLOCK]], i32 0, i32 3 // CHECK-NEXT: [[BLOCK_OPAQUE:%.*]] = bitcast [[BLOCKTY]]* [[BLOCK]] to i8* - // CHECK-NEXT: [[X_VAL:%.*]] = load i8** [[X]], align + // CHECK-NEXT: [[X_VAL:%.*]] = load i8*, i8** [[X]], align // CHECK-NEXT: [[X_TMP:%.*]] = call i8* @objc_retain(i8* [[X_VAL]]) [[NUW]] - // CHECK-NEXT: [[BLOCK_FN_TMP:%.*]] = load i8** [[BLOCK_FN_PTR]] + // CHECK-NEXT: [[BLOCK_FN_TMP:%.*]] = load i8*, i8** [[BLOCK_FN_PTR]] // CHECK-NEXT: [[BLOCK_FN:%.*]] = bitcast i8* [[BLOCK_FN_TMP]] to void (i8*, i8*)* // CHECK-NEXT: call void [[BLOCK_FN]](i8* [[BLOCK_OPAQUE]], i8* [[X_TMP]]) test13_block(x); - // CHECK-NEXT: [[T0:%.*]] = load i8** [[X]] + // CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[X]] // CHECK-NEXT: call void @objc_release(i8* [[T0]]) [[NUW]] // CHECK-NEXT: ret void } @@ -406,17 +406,17 @@ void test13(void) { // CHECK-NEXT: alloca // CHECK-NEXT: store [[TEST16]]* {{%.*}}, [[TEST16]]** [[SELF]], align // CHECK-NEXT: store i8* {{%.*}}, i8** [[CMD]] - // CHECK-NEXT: [[BASE:%.*]] = load [[TEST16]]** [[SELF]] + // CHECK-NEXT: [[BASE:%.*]] = load [[TEST16]]*, [[TEST16]]** [[SELF]] // Call super. // CHECK-NEXT: [[BASE2:%.*]] = bitcast [[TEST16]]* [[BASE]] to i8* // CHECK-NEXT: [[T0:%.*]] = getelementptr // CHECK-NEXT: store i8* [[BASE2]], i8** [[T0]] - // CHECK-NEXT: load {{%.*}}** @"OBJC_CLASSLIST_SUP_REFS_$_ + // CHECK-NEXT: load {{%.*}}*, {{%.*}}** @"OBJC_CLASSLIST_SUP_REFS_$_ // CHECK-NEXT: bitcast // CHECK-NEXT: getelementptr // CHECK-NEXT: store - // CHECK-NEXT: load i8** @OBJC_SELECTOR_REFERENCES_ + // CHECK-NEXT: load i8*, i8** @OBJC_SELECTOR_REFERENCES_ // CHECK-NEXT: call void bitcast (i8* ({{.*}})* @objc_msgSendSuper2 to void ( // CHECK-NEXT: ret void } @@ -427,17 +427,17 @@ void test13(void) { // CHECK-NEXT: [[CMD:%.*]] = alloca i8*, align // CHECK-NEXT: store [[TEST16]]* {{%.*}}, [[TEST16]]** [[SELF]], align // CHECK-NEXT: store i8* {{%.*}}, i8** [[CMD]] - // CHECK-NEXT: [[BASE:%.*]] = load [[TEST16]]** [[SELF]] + // CHECK-NEXT: [[BASE:%.*]] = load [[TEST16]]*, [[TEST16]]** [[SELF]] // Destroy y. - // CHECK-NEXT: [[Y_OFF:%.*]] = load i64* @"OBJC_IVAR_$_Test16.y" + // CHECK-NEXT: [[Y_OFF:%.*]] = load i64, i64* @"OBJC_IVAR_$_Test16.y" // CHECK-NEXT: [[T0:%.*]] = bitcast [[TEST16]]* [[BASE]] to i8* // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 [[Y_OFF]] // CHECK-NEXT: [[T2:%.*]] = bitcast i8* [[T1]] to i8** // CHECK-NEXT: call void @objc_storeStrong(i8** [[T2]], i8* null) [[NUW]] // Destroy z. - // CHECK-NEXT: [[Z_OFF:%.*]] = load i64* @"OBJC_IVAR_$_Test16.z" + // CHECK-NEXT: [[Z_OFF:%.*]] = load i64, i64* @"OBJC_IVAR_$_Test16.z" // CHECK-NEXT: [[T0:%.*]] = bitcast [[TEST16]]* [[BASE]] to i8* // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 [[Z_OFF]] // CHECK-NEXT: [[T2:%.*]] = bitcast i8* [[T1]] to i8** @@ -471,7 +471,7 @@ void test19() { // CHECK-NEXT: [[CALL:%.*]] = call i8* @test19_helper() // CHECK-NEXT: [[T1:%.*]] = call i8* @objc_retainAutoreleasedReturnValue(i8* [[CALL]]) [[NUW]] // CHECK-NEXT: [[SLOT:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[X]], i32 0, i64 2 - // CHECK-NEXT: [[T0:%.*]] = load i8** [[SLOT]] + // CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[SLOT]] // CHECK-NEXT: store i8* [[T1]], i8** [[SLOT]] // CHECK-NEXT: call void @objc_release(i8* [[T0]]) [[NUW]] @@ -481,7 +481,7 @@ void test19() { // CHECK: [[AFTER:%.*]] = phi i8** [ [[END]], {{%.*}} ], [ [[NEXT:%.*]], {{%.*}} ] // CHECK-NEXT: [[CUR:%.*]] = getelementptr inbounds i8*, i8** [[AFTER]], i64 -1 - // CHECK-NEXT: [[T0:%.*]] = load i8** [[CUR]] + // CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[CUR]] // CHECK-NEXT: call void @objc_release(i8* [[T0]]) [[NUW]], !clang.imprecise_release // CHECK-NEXT: [[EQ:%.*]] = icmp eq i8** [[CUR]], [[BEGIN]] // CHECK-NEXT: br i1 [[EQ]], @@ -498,7 +498,7 @@ void test20(unsigned n) { id x[n]; // Capture the VLA size. - // CHECK-NEXT: [[T0:%.*]] = load i32* [[N]], align 4 + // CHECK-NEXT: [[T0:%.*]] = load i32, i32* [[N]], align 4 // CHECK-NEXT: [[DIM:%.*]] = zext i32 [[T0]] to i64 // Save the stack pointer. @@ -520,12 +520,12 @@ void test20(unsigned n) { // CHECK: [[AFTER:%.*]] = phi i8** [ [[END]], {{%.*}} ], [ [[CUR:%.*]], {{%.*}} ] // CHECK-NEXT: [[CUR:%.*]] = getelementptr inbounds i8*, i8** [[AFTER]], i64 -1 - // CHECK-NEXT: [[T0:%.*]] = load i8** [[CUR]] + // CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[CUR]] // CHECK-NEXT: call void @objc_release(i8* [[T0]]) [[NUW]], !clang.imprecise_release // CHECK-NEXT: [[EQ:%.*]] = icmp eq i8** [[CUR]], [[VLA]] // CHECK-NEXT: br i1 [[EQ]], - // CHECK: [[T0:%.*]] = load i8** [[SAVED_STACK]] + // CHECK: [[T0:%.*]] = load i8*, i8** [[SAVED_STACK]] // CHECK-NEXT: call void @llvm.stackrestore(i8* [[T0]]) // CHECK-NEXT: ret void } @@ -539,7 +539,7 @@ void test21(unsigned n) { id x[2][n][3]; // Capture the VLA size. - // CHECK-NEXT: [[T0:%.*]] = load i32* [[N]], align 4 + // CHECK-NEXT: [[T0:%.*]] = load i32, i32* [[N]], align 4 // CHECK-NEXT: [[DIM:%.*]] = zext i32 [[T0]] to i64 // CHECK-NEXT: [[T0:%.*]] = call i8* @llvm.stacksave() @@ -566,12 +566,12 @@ void test21(unsigned n) { // CHECK: [[AFTER:%.*]] = phi i8** [ [[END]], {{%.*}} ], [ [[CUR:%.*]], {{%.*}} ] // CHECK-NEXT: [[CUR:%.*]] = getelementptr inbounds i8*, i8** [[AFTER]], i64 -1 - // CHECK-NEXT: [[T0:%.*]] = load i8** [[CUR]] + // CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[CUR]] // CHECK-NEXT: call void @objc_release(i8* [[T0]]) [[NUW]], !clang.imprecise_release // CHECK-NEXT: [[EQ:%.*]] = icmp eq i8** [[CUR]], [[BEGIN]] // CHECK-NEXT: br i1 [[EQ]], - // CHECK: [[T0:%.*]] = load i8** [[SAVED_STACK]] + // CHECK: [[T0:%.*]] = load i8*, i8** [[SAVED_STACK]] // CHECK-NEXT: call void @llvm.stackrestore(i8* [[T0]]) // CHECK-NEXT: ret void } @@ -593,8 +593,8 @@ void test21(unsigned n) { @interface Test26 { id x[4]; } @end @implementation Test26 @end // CHECK: define internal void @"\01-[Test26 .cxx_destruct]"( -// CHECK: [[SELF:%.*]] = load [[TEST26:%.*]]** -// CHECK-NEXT: [[OFFSET:%.*]] = load i64* @"OBJC_IVAR_$_Test26.x" +// CHECK: [[SELF:%.*]] = load [[TEST26:%.*]]*, [[TEST26:%.*]]** +// CHECK-NEXT: [[OFFSET:%.*]] = load i64, i64* @"OBJC_IVAR_$_Test26.x" // CHECK-NEXT: [[T0:%.*]] = bitcast [[TEST26]]* [[SELF]] to i8* // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 [[OFFSET]] // CHECK-NEXT: [[X:%.*]] = bitcast i8* [[T1]] to [4 x i8*]* @@ -620,13 +620,13 @@ void test21(unsigned n) { // CHECK-NEXT: [[DEST:%.*]] = alloca i32 // CHECK-NEXT: store [[TEST27]]* {{%.*}}, [[TEST27]]** [[SELF]] // CHECK-NEXT: store i8* {{%.*}}, i8** [[CMD]] -// CHECK-NEXT: [[T0:%.*]] = load [[TEST27]]** [[SELF]] +// CHECK-NEXT: [[T0:%.*]] = load [[TEST27]]*, [[TEST27]]** [[SELF]] // CHECK-NEXT: [[T1:%.*]] = bitcast [[TEST27]]* [[T0]] to i8* // CHECK-NEXT: [[T2:%.*]] = call i8* @objc_retain(i8* [[T1]]) // CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to [[TEST27]]* // CHECK-NEXT: [[RET:%.*]] = bitcast [[TEST27]]* [[T3]] to i8* // CHECK-NEXT: store i32 {{[0-9]+}}, i32* [[DEST]] -// CHECK-NEXT: [[T0:%.*]] = load [[TEST27]]** [[SELF]] +// CHECK-NEXT: [[T0:%.*]] = load [[TEST27]]*, [[TEST27]]** [[SELF]] // CHECK-NEXT: [[T1:%.*]] = bitcast [[TEST27]]* [[T0]] to i8* // CHECK-NEXT: call void @objc_release(i8* [[T1]]) // CHECK-NEXT: ret i8* [[RET]] @@ -641,8 +641,8 @@ void test21(unsigned n) { @synthesize prop; @end // CHECK: define internal void @"\01-[Test28 .cxx_destruct]" -// CHECK: [[SELF:%.*]] = load [[TEST28:%.*]]** -// CHECK-NEXT: [[OFFSET:%.*]] = load i64* @"OBJC_IVAR_$_Test28.prop" +// CHECK: [[SELF:%.*]] = load [[TEST28:%.*]]*, [[TEST28:%.*]]** +// CHECK-NEXT: [[OFFSET:%.*]] = load i64, i64* @"OBJC_IVAR_$_Test28.prop" // CHECK-NEXT: [[T0:%.*]] = bitcast [[TEST28]]* [[SELF]] to i8* // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 [[OFFSET]] // CHECK-NEXT: [[T2:%.*]] = bitcast i8* [[T1]] to i8** @@ -668,14 +668,14 @@ static id _test29_allocator = 0; // Evaluate arguments. Note that the send argument is evaluated // before the zeroing of self. -// CHECK-NEXT: [[T0:%.*]] = load [[TEST29]]** [[SELF]], align 8 -// CHECK-NEXT: [[T1:%.*]] = load i8** @_test29_allocator, align 8 +// CHECK-NEXT: [[T0:%.*]] = load [[TEST29]]*, [[TEST29]]** [[SELF]], align 8 +// CHECK-NEXT: [[T1:%.*]] = load i8*, i8** @_test29_allocator, align 8 // Implicit null of 'self', i.e. direct transfer of ownership. // CHECK-NEXT: store [[TEST29]]* null, [[TEST29]]** [[SELF]] // Actual message send. -// CHECK-NEXT: [[T2:%.*]] = load i8** @OBJC_SELECTOR_REFERENCES_ +// CHECK-NEXT: [[T2:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES_ // CHECK-NEXT: [[T3:%.*]] = bitcast [[TEST29]]* [[T0]] to i8* // CHECK-NEXT: [[CALL:%.*]] = call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i8*)*)(i8* [[T3]], i8* [[T2]], i8* [[T1]]) @@ -694,7 +694,7 @@ static id _test29_allocator = 0; // CHECK-NEXT: store i32 1, i32* [[CLEANUP]] // Cleanup. -// CHECK-NEXT: [[T0:%.*]] = load [[TEST29]]** [[SELF]] +// CHECK-NEXT: [[T0:%.*]] = load [[TEST29]]*, [[TEST29]]** [[SELF]] // CHECK-NEXT: [[T1:%.*]] = bitcast [[TEST29]]* [[T0]] to i8* // CHECK-NEXT: call void @objc_release(i8* [[T1]]) [[NUW]], !clang.imprecise_release @@ -716,8 +716,8 @@ static id _test29_allocator = 0; // Evaluate arguments. Note that the send argument is evaluated // before the zeroing of self. -// CHECK-NEXT: [[T0:%.*]] = load [[TEST29]]** [[SELF]] -// CHECK-NEXT: [[T1:%.*]] = load i8** [[ALLOCATOR]], align 8 +// CHECK-NEXT: [[T0:%.*]] = load [[TEST29]]*, [[TEST29]]** [[SELF]] +// CHECK-NEXT: [[T1:%.*]] = load i8*, i8** [[ALLOCATOR]], align 8 // Implicit null of 'self', i.e. direct transfer of ownership. // CHECK-NEXT: store [[TEST29]]* null, [[TEST29]]** [[SELF]] @@ -736,13 +736,13 @@ static id _test29_allocator = 0; // CHECK-NEXT: [[T1:%.*]] = bitcast [[TEST29]]* [[T0]] to i8* // CHECK-NEXT: [[T2:%.*]] = call i8* @objc_retain(i8* [[T1]]) [[NUW]] // CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to [[TEST29]]* -// CHECK-NEXT: [[T4:%.*]] = load [[TEST29]]** [[SELF]], align +// CHECK-NEXT: [[T4:%.*]] = load [[TEST29]]*, [[TEST29]]** [[SELF]], align // CHECK-NEXT: store [[TEST29]]* [[T3]], [[TEST29]]** [[SELF]], align // CHECK-NEXT: [[T5:%.*]] = bitcast [[TEST29]]* [[T4]] to i8* // CHECK-NEXT: call void @objc_release(i8* [[T5]]) // Return statement. -// CHECK-NEXT: [[T0:%.*]] = load [[TEST29]]** [[SELF]] +// CHECK-NEXT: [[T0:%.*]] = load [[TEST29]]*, [[TEST29]]** [[SELF]] // CHECK-NEXT: [[T1:%.*]] = bitcast [[TEST29]]* [[T0]] to i8* // CHECK-NEXT: [[T0:%.*]] = call i8* @objc_retain(i8* [[T1]]) [[NUW]] // CHECK-NEXT: [[T1:%.*]] = bitcast i8* [[T0]] to [[TEST29]]* @@ -750,10 +750,10 @@ static id _test29_allocator = 0; // CHECK-NEXT: store i32 1, i32* [[CLEANUP]] // Cleanup. -// CHECK-NEXT: [[T0:%.*]] = load i8** [[ALLOCATOR]] +// CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[ALLOCATOR]] // CHECK-NEXT: call void @objc_release(i8* [[T0]]) [[NUW]], !clang.imprecise_release -// CHECK-NEXT: [[T0:%.*]] = load [[TEST29]]** [[SELF]] +// CHECK-NEXT: [[T0:%.*]] = load [[TEST29]]*, [[TEST29]]** [[SELF]] // CHECK-NEXT: [[T1:%.*]] = bitcast [[TEST29]]* [[T0]] to i8* // CHECK-NEXT: call void @objc_release(i8* [[T1]]) [[NUW]], !clang.imprecise_release @@ -781,25 +781,25 @@ char *helper; // CHECK-NEXT: store // Call. -// CHECK-NEXT: [[T0:%.*]] = load [[TEST30]]** [[SELF]] -// CHECK-NEXT: [[T1:%.*]] = load i8** @OBJC_SELECTOR_REFERENCES_ +// CHECK-NEXT: [[T0:%.*]] = load [[TEST30]]*, [[TEST30]]** [[SELF]] +// CHECK-NEXT: [[T1:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES_ // CHECK-NEXT: [[T2:%.*]] = bitcast [[TEST30]]* [[T0]] to i8* // CHECK-NEXT: [[CALL:%.*]] = call [[TEST30_HELPER:%.*]]* bitcast {{.*}} @objc_msgSend {{.*}}(i8* [[T2]], i8* [[T1]]) // Assignment. // CHECK-NEXT: [[T0:%.*]] = bitcast [[TEST30_HELPER]]* [[CALL]] to i8* -// CHECK-NEXT: [[T1:%.*]] = load [[TEST30]]** [[SELF]] -// CHECK-NEXT: [[IVAR:%.*]] = load i64* @"OBJC_IVAR_$_Test30.helper" +// CHECK-NEXT: [[T1:%.*]] = load [[TEST30]]*, [[TEST30]]** [[SELF]] +// CHECK-NEXT: [[IVAR:%.*]] = load i64, i64* @"OBJC_IVAR_$_Test30.helper" // CHECK-NEXT: [[T2:%.*]] = bitcast [[TEST30]]* [[T1]] to i8* // CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds i8, i8* [[T2]], i64 [[IVAR]] // CHECK-NEXT: [[T4:%.*]] = bitcast i8* [[T3]] to i8** -// CHECK-NEXT#: [[T5:%.*]] = load i8** [[T4]] +// CHECK-NEXT#: [[T5:%.*]] = load i8*, i8** [[T4]] // CHECK-NEXT#: [[T6:%.*]] = call i8* @objc_retain(i8* [[T0]]) // CHECK-NEXT#: call void @objc_release(i8* [[T5]]) // CHECK-NEXT: store i8* [[T0]], i8** [[T4]] // Return. -// CHECK-NEXT: [[T0:%.*]] = load [[TEST30]]** [[SELF]] +// CHECK-NEXT: [[T0:%.*]] = load [[TEST30]]*, [[TEST30]]** [[SELF]] // CHECK-NEXT: [[T1:%.*]] = bitcast [[TEST30]]* [[T0]] to i8* // CHECK-NEXT: [[T0:%.*]] = call i8* @objc_retain(i8* [[T1]]) // CHECK-NEXT: [[T1:%.*]] = bitcast i8* [[T0]] to [[TEST30]]* @@ -807,7 +807,7 @@ char *helper; // CHECK-NEXT: store i32 1 // Cleanup. -// CHECK-NEXT: [[T0:%.*]] = load [[TEST30]]** [[SELF]] +// CHECK-NEXT: [[T0:%.*]] = load [[TEST30]]*, [[TEST30]]** [[SELF]] // CHECK-NEXT: [[T1:%.*]] = bitcast [[TEST30]]* [[T0]] to i8* // CHECK-NEXT: call void @objc_release(i8* [[T1]]) @@ -865,59 +865,59 @@ void test33(Test33 *ptr) { // CHECK-NEXT: store // CHECK-NEXT: store [[A_T]]* null, [[A_T]]** [[A]] - // CHECK-NEXT: load [[TEST33]]** [[PTR]] - // CHECK-NEXT: [[W0:%.*]] = load [[A_T]]** [[A]] + // CHECK-NEXT: load [[TEST33]]*, [[TEST33]]** [[PTR]] + // CHECK-NEXT: [[W0:%.*]] = load [[A_T]]*, [[A_T]]** [[A]] // CHECK-NEXT: store [[A_T]]* [[W0]], [[A_T]]** [[TEMP0]] - // CHECK-NEXT: load i8** @OBJC_SELECTOR_REFERENCES_ + // CHECK-NEXT: load i8*, i8** @OBJC_SELECTOR_REFERENCES_ // CHECK-NEXT: bitcast // CHECK-NEXT: objc_msgSend{{.*}}, [[A_T]]** [[TEMP0]]) - // CHECK-NEXT: [[T0:%.*]] = load [[A_T]]** [[TEMP0]] + // CHECK-NEXT: [[T0:%.*]] = load [[A_T]]*, [[A_T]]** [[TEMP0]] // CHECK-NEXT: [[T1:%.*]] = bitcast [[A_T]]* [[T0]] to i8* // CHECK-NEXT: [[T2:%.*]] = call i8* @objc_retain(i8* [[T1]]) // CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to [[A_T]]* // CHECK-NEXT: call void (...)* @clang.arc.use([[A_T]]* [[W0]]) [[NUW]] - // CHECK-NEXT: [[T4:%.*]] = load [[A_T]]** [[A]] + // CHECK-NEXT: [[T4:%.*]] = load [[A_T]]*, [[A_T]]** [[A]] // CHECK-NEXT: store [[A_T]]* [[T3]], [[A_T]]** [[A]] // CHECK-NEXT: [[T5:%.*]] = bitcast [[A_T]]* [[T4]] to i8* // CHECK-NEXT: call void @objc_release(i8* [[T5]]) - // CHECK-NEXT: load [[TEST33]]** [[PTR]] - // CHECK-NEXT: [[W0:%.*]] = load [[A_T]]** [[A]] + // CHECK-NEXT: load [[TEST33]]*, [[TEST33]]** [[PTR]] + // CHECK-NEXT: [[W0:%.*]] = load [[A_T]]*, [[A_T]]** [[A]] // CHECK-NEXT: store [[A_T]]* [[W0]], [[A_T]]** [[TEMP1]] - // CHECK-NEXT: load i8** @OBJC_SELECTOR_REFERENCES_ + // CHECK-NEXT: load i8*, i8** @OBJC_SELECTOR_REFERENCES_ // CHECK-NEXT: bitcast // CHECK-NEXT: objc_msgSend{{.*}}, [[A_T]]** [[TEMP1]]) - // CHECK-NEXT: [[T0:%.*]] = load [[A_T]]** [[TEMP1]] + // CHECK-NEXT: [[T0:%.*]] = load [[A_T]]*, [[A_T]]** [[TEMP1]] // CHECK-NEXT: [[T1:%.*]] = bitcast [[A_T]]* [[T0]] to i8* // CHECK-NEXT: [[T2:%.*]] = call i8* @objc_retain(i8* [[T1]]) // CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to [[A_T]]* // CHECK-NEXT: call void (...)* @clang.arc.use([[A_T]]* [[W0]]) [[NUW]] - // CHECK-NEXT: [[T4:%.*]] = load [[A_T]]** [[A]] + // CHECK-NEXT: [[T4:%.*]] = load [[A_T]]*, [[A_T]]** [[A]] // CHECK-NEXT: store [[A_T]]* [[T3]], [[A_T]]** [[A]] // CHECK-NEXT: [[T5:%.*]] = bitcast [[A_T]]* [[T4]] to i8* // CHECK-NEXT: call void @objc_release(i8* [[T5]]) - // CHECK-NEXT: load [[TEST33]]** [[PTR]] - // CHECK-NEXT: load i8** @OBJC_SELECTOR_REFERENCES_ + // CHECK-NEXT: load [[TEST33]]*, [[TEST33]]** [[PTR]] + // CHECK-NEXT: load i8*, i8** @OBJC_SELECTOR_REFERENCES_ // CHECK-NEXT: bitcast // CHECK-NEXT: objc_msgSend{{.*}}, [[A_T]]** [[A]]) - // CHECK-NEXT: load [[TEST33]]** [[PTR]] - // CHECK-NEXT: load i8** @OBJC_SELECTOR_REFERENCES_ + // CHECK-NEXT: load [[TEST33]]*, [[TEST33]]** [[PTR]] + // CHECK-NEXT: load i8*, i8** @OBJC_SELECTOR_REFERENCES_ // CHECK-NEXT: bitcast // CHECK-NEXT: objc_msgSend{{.*}}, [[A_T]]** [[A]]) // 'out' - // CHECK-NEXT: load [[TEST33]]** [[PTR]] + // CHECK-NEXT: load [[TEST33]]*, [[TEST33]]** [[PTR]] // CHECK-NEXT: store [[A_T]]* null, [[A_T]]** [[TEMP2]] - // CHECK-NEXT: load i8** @OBJC_SELECTOR_REFERENCES_ + // CHECK-NEXT: load i8*, i8** @OBJC_SELECTOR_REFERENCES_ // CHECK-NEXT: bitcast // CHECK-NEXT: objc_msgSend{{.*}}, [[A_T]]** [[TEMP2]]) - // CHECK-NEXT: [[T0:%.*]] = load [[A_T]]** [[TEMP2]] + // CHECK-NEXT: [[T0:%.*]] = load [[A_T]]*, [[A_T]]** [[TEMP2]] // CHECK-NEXT: [[T1:%.*]] = bitcast [[A_T]]* [[T0]] to i8* // CHECK-NEXT: [[T2:%.*]] = call i8* @objc_retain(i8* [[T1]]) // CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to [[A_T]]* - // CHECK-NEXT: [[T4:%.*]] = load [[A_T]]** [[A]] + // CHECK-NEXT: [[T4:%.*]] = load [[A_T]]*, [[A_T]]** [[A]] // CHECK-NEXT: store [[A_T]]* [[T3]], [[A_T]]** [[A]] // CHECK-NEXT: [[T5:%.*]] = bitcast [[A_T]]* [[T4]] to i8* // CHECK-NEXT: call void @objc_release(i8* [[T5]]) @@ -941,7 +941,7 @@ void test36(id x) { // CHECK: call i8* @objc_retain id array[3] = { @"A", x, @"y" }; - // CHECK: [[T0:%.*]] = load i8** [[X]] + // CHECK: [[T0:%.*]] = load i8*, i8** [[X]] // CHECK-NEXT: store i8* null, i8** [[X]] // CHECK-NEXT: call void @objc_release(i8* [[T0]]) x = 0; @@ -965,22 +965,22 @@ void test37(void) { // CHECK-NEXT: [[TEMP:%.*]] = alloca i8* // CHECK-NEXT: store [[TEST37]]* null, [[TEST37]]** [[VAR]] - // CHECK-NEXT: [[W0:%.*]] = load [[TEST37]]** [[VAR]] + // CHECK-NEXT: [[W0:%.*]] = load [[TEST37]]*, [[TEST37]]** [[VAR]] // CHECK-NEXT: [[W1:%.*]] = bitcast [[TEST37]]* [[W0]] to i8* // CHECK-NEXT: store i8* [[W1]], i8** [[TEMP]] // CHECK-NEXT: call void @test37_helper(i8** [[TEMP]]) - // CHECK-NEXT: [[T0:%.*]] = load i8** [[TEMP]] + // CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[TEMP]] // CHECK-NEXT: [[T1:%.*]] = bitcast i8* [[T0]] to [[TEST37]]* // CHECK-NEXT: [[T2:%.*]] = bitcast [[TEST37]]* [[T1]] to i8* // CHECK-NEXT: [[T3:%.*]] = call i8* @objc_retain(i8* [[T2]]) // CHECK-NEXT: [[T4:%.*]] = bitcast i8* [[T3]] to [[TEST37]]* // CHECK-NEXT: call void (...)* @clang.arc.use(i8* [[W1]]) [[NUW]] - // CHECK-NEXT: [[T5:%.*]] = load [[TEST37]]** [[VAR]] + // CHECK-NEXT: [[T5:%.*]] = load [[TEST37]]*, [[TEST37]]** [[VAR]] // CHECK-NEXT: store [[TEST37]]* [[T4]], [[TEST37]]** [[VAR]] // CHECK-NEXT: [[T6:%.*]] = bitcast [[TEST37]]* [[T5]] to i8* // CHECK-NEXT: call void @objc_release(i8* [[T6]]) - // CHECK-NEXT: [[T0:%.*]] = load [[TEST37]]** [[VAR]] + // CHECK-NEXT: [[T0:%.*]] = load [[TEST37]]*, [[TEST37]]** [[VAR]] // CHECK-NEXT: [[T1:%.*]] = bitcast [[TEST37]]* [[T0]] to i8* // CHECK-NEXT: call void @objc_release(i8* [[T1]]) // CHECK-NEXT: ret void @@ -1015,7 +1015,7 @@ void test46(__weak id *wp, __weak volatile id *wvp) { // CHECK: [[T0:%.*]] = call i8* @test46_helper() // CHECK-NEXT: [[T1:%.*]] = call i8* @objc_retainAutoreleasedReturnValue(i8* [[T0]]) - // CHECK-NEXT: [[T2:%.*]] = load i8*** {{%.*}}, align 8 + // CHECK-NEXT: [[T2:%.*]] = load i8**, i8*** {{%.*}}, align 8 // CHECK-NEXT: [[T3:%.*]] = call i8* @objc_storeWeak(i8** [[T2]], i8* [[T1]]) // CHECK-NEXT: [[T4:%.*]] = call i8* @objc_retain(i8* [[T3]]) // CHECK-NEXT: store i8* [[T4]], i8** @@ -1024,7 +1024,7 @@ void test46(__weak id *wp, __weak volatile id *wvp) { // CHECK: [[T0:%.*]] = call i8* @test46_helper() // CHECK-NEXT: [[T1:%.*]] = call i8* @objc_retainAutoreleasedReturnValue(i8* [[T0]]) - // CHECK-NEXT: [[T2:%.*]] = load i8*** {{%.*}}, align 8 + // CHECK-NEXT: [[T2:%.*]] = load i8**, i8*** {{%.*}}, align 8 // CHECK-NEXT: [[T3:%.*]] = call i8* @objc_storeWeak(i8** [[T2]], i8* [[T1]]) // CHECK-NEXT: [[T4:%.*]] = call i8* @objc_retain(i8* [[T3]]) // CHECK-NEXT: store i8* [[T4]], i8** @@ -1042,14 +1042,14 @@ void test47(void) { // CHECK-NEXT: store i8* null, i8** [[X]] // CHECK-NEXT: [[CALL:%.*]] = call i8* @test47_helper() // CHECK-NEXT: [[T0:%.*]] = call i8* @objc_retainAutoreleasedReturnValue(i8* [[CALL]]) - // CHECK-NEXT: [[T1:%.*]] = load i8** [[X]] + // CHECK-NEXT: [[T1:%.*]] = load i8*, i8** [[X]] // CHECK-NEXT: store i8* [[T0]], i8** [[X]] // CHECK-NEXT: call void @objc_release(i8* [[T1]]) // CHECK-NEXT: [[T2:%.*]] = call i8* @objc_retain(i8* [[T0]]) - // CHECK-NEXT: [[T3:%.*]] = load i8** [[X]] + // CHECK-NEXT: [[T3:%.*]] = load i8*, i8** [[X]] // CHECK-NEXT: store i8* [[T2]], i8** [[X]] // CHECK-NEXT: call void @objc_release(i8* [[T3]]) - // CHECK-NEXT: [[T4:%.*]] = load i8** [[X]] + // CHECK-NEXT: [[T4:%.*]] = load i8*, i8** [[X]] // CHECK-NEXT: call void @objc_release(i8* [[T4]]) // CHECK-NEXT: ret void } @@ -1117,10 +1117,10 @@ id test52(void) { // CHECK: [[X:%.*]] = alloca i32 // CHECK-NEXT: [[TMPALLOCA:%.*]] = alloca i8* // CHECK-NEXT: store i32 5, i32* [[X]], -// CHECK-NEXT: [[T0:%.*]] = load i32* [[X]], +// CHECK-NEXT: [[T0:%.*]] = load i32, i32* [[X]], // CHECK-NEXT: [[T1:%.*]] = call i8* @test52_helper(i32 [[T0]]) // CHECK-NEXT: store i8* [[T1]], i8** [[TMPALLOCA]] -// CHECK-NEXT: [[T2:%.*]] = load i8** [[TMPALLOCA]] +// CHECK-NEXT: [[T2:%.*]] = load i8*, i8** [[TMPALLOCA]] // CHECK-NEXT: [[T3:%.*]] = tail call i8* @objc_autoreleaseReturnValue(i8* [[T2]]) // CHECK-NEXT: ret i8* [[T3]] } @@ -1137,15 +1137,15 @@ void test53(void) { // CHECK-NEXT: [[T0:%.*]] = call i8* @test53_helper() // CHECK-NEXT: [[T1:%.*]] = call i8* @objc_retainAutoreleasedReturnValue(i8* [[T0]]) // CHECK-NEXT: store i8* [[T1]], i8** [[Y]], -// CHECK-NEXT: [[T0:%.*]] = load i8** [[Y]], +// CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[Y]], // CHECK-NEXT: [[T1:%.*]] = call i8* @objc_retain(i8* [[T0]]) // CHECK-NEXT: store i8* [[T1]], i8** [[TMPALLOCA]] -// CHECK-NEXT: [[T2:%.*]] = load i8** [[Y]] +// CHECK-NEXT: [[T2:%.*]] = load i8*, i8** [[Y]] // CHECK-NEXT: call void @objc_release(i8* [[T2]]) -// CHECK-NEXT: [[T3:%.*]] = load i8** [[TMPALLOCA]] +// CHECK-NEXT: [[T3:%.*]] = load i8*, i8** [[TMPALLOCA]] // CHECK-NEXT: store i8* [[T3]], i8** [[X]], -// CHECK-NEXT: load i8** [[X]], -// CHECK-NEXT: [[T0:%.*]] = load i8** [[X]] +// CHECK-NEXT: load i8*, i8** [[X]], +// CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[X]] // CHECK-NEXT: call void @objc_release(i8* [[T0]]) // CHECK-NEXT: ret void } @@ -1195,7 +1195,7 @@ void test56_test(void) { // CHECK: [[X:%.*]] = alloca i8*, align 8 // CHECK: [[T0:%.*]] = call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)( // CHECK-NEXT: store i8* [[T0]], i8** [[X]] - // CHECK-NEXT: [[T0:%.*]] = load i8** [[X]] + // CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[X]] // CHECK-NEXT: call void @objc_release(i8* [[T0]]) // CHECK-NEXT: ret void } @@ -1210,17 +1210,17 @@ void test56_test(void) { @synthesize strong, weak, unsafe; @end // CHECK: define internal i8* @"\01-[Test57 strong]"( -// CHECK: [[T0:%.*]] = load [[TEST57:%.*]]** {{%.*}} -// CHECK-NEXT: [[T1:%.*]] = load i64* @"OBJC_IVAR_$_Test57.strong" +// CHECK: [[T0:%.*]] = load [[TEST57:%.*]]*, [[TEST57:%.*]]** {{%.*}} +// CHECK-NEXT: [[T1:%.*]] = load i64, i64* @"OBJC_IVAR_$_Test57.strong" // CHECK-NEXT: [[T2:%.*]] = bitcast [[TEST57]]* [[T0]] to i8* // CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds i8, i8* [[T2]], i64 [[T1]] // CHECK-NEXT: [[T4:%.*]] = bitcast i8* [[T3]] to i8** -// CHECK-NEXT: [[T5:%.*]] = load i8** [[T4]] +// CHECK-NEXT: [[T5:%.*]] = load i8*, i8** [[T4]] // CHECK-NEXT: ret i8* [[T5]] // CHECK: define internal i8* @"\01-[Test57 weak]"( -// CHECK: [[T0:%.*]] = load [[TEST57]]** {{%.*}} -// CHECK-NEXT: [[T1:%.*]] = load i64* @"OBJC_IVAR_$_Test57.weak" +// CHECK: [[T0:%.*]] = load [[TEST57]]*, [[TEST57]]** {{%.*}} +// CHECK-NEXT: [[T1:%.*]] = load i64, i64* @"OBJC_IVAR_$_Test57.weak" // CHECK-NEXT: [[T2:%.*]] = bitcast [[TEST57]]* [[T0]] to i8* // CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds i8, i8* [[T2]], i64 [[T1]] // CHECK-NEXT: [[T4:%.*]] = bitcast i8* [[T3]] to i8** @@ -1229,12 +1229,12 @@ void test56_test(void) { // CHECK-NEXT: ret i8* [[T6]] // CHECK: define internal i8* @"\01-[Test57 unsafe]"( -// CHECK: [[T0:%.*]] = load [[TEST57]]** {{%.*}} -// CHECK-NEXT: [[T1:%.*]] = load i64* @"OBJC_IVAR_$_Test57.unsafe" +// CHECK: [[T0:%.*]] = load [[TEST57]]*, [[TEST57]]** {{%.*}} +// CHECK-NEXT: [[T1:%.*]] = load i64, i64* @"OBJC_IVAR_$_Test57.unsafe" // CHECK-NEXT: [[T2:%.*]] = bitcast [[TEST57]]* [[T0]] to i8* // CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds i8, i8* [[T2]], i64 [[T1]] // CHECK-NEXT: [[T4:%.*]] = bitcast i8* [[T3]] to i8** -// CHECK-NEXT: [[T5:%.*]] = load i8** [[T4]] +// CHECK-NEXT: [[T5:%.*]] = load i8*, i8** [[T4]] // CHECK-NEXT: ret i8* [[T5]] // rdar://problem/9842343 @@ -1270,23 +1270,23 @@ void test61(void) { // CHECK-NEXT: [[T0:%.*]] = call i8* @test61_make() // CHECK-NEXT: [[T1:%.*]] = call i8* @objc_retainAutoreleasedReturnValue(i8* [[T0]]) - // CHECK-NEXT: [[T2:%.*]] = load i8** @OBJC_SELECTOR_REFERENCES_ - // CHECK-NEXT: [[T3:%.*]] = load i8** @OBJC_SELECTOR_REFERENCES_ + // CHECK-NEXT: [[T2:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES_ + // CHECK-NEXT: [[T3:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES_ // CHECK-NEXT: [[T4:%.*]] = call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i8*)*)(i8* [[T1]], i8* [[T3]], i8* [[T2]]) // CHECK-NEXT: call void @objc_release(i8* [[T1]]) [test61_make() performSelector: @selector(test61_void)]; // CHECK-NEXT: [[T0:%.*]] = call i8* @test61_make() // CHECK-NEXT: [[T1:%.*]] = call i8* @objc_retainAutoreleasedReturnValue(i8* [[T0]]) - // CHECK-NEXT: [[T2:%.*]] = load i8** @OBJC_SELECTOR_REFERENCES_ - // CHECK-NEXT: [[T3:%.*]] = load i8** @OBJC_SELECTOR_REFERENCES_ + // CHECK-NEXT: [[T2:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES_ + // CHECK-NEXT: [[T3:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES_ // CHECK-NEXT: [[T4:%.*]] = call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i8*)*)(i8* [[T1]], i8* [[T3]], i8* [[T2]]) // CHECK-NEXT: [[T5:%.*]] = call i8* @objc_retainAutoreleasedReturnValue(i8* [[T4]]) // CHECK-NEXT: store i8* [[T5]], i8** [[Y]] // CHECK-NEXT: call void @objc_release(i8* [[T1]]) id y = [test61_make() performSelector: @selector(test61_id)]; - // CHECK-NEXT: [[T0:%.*]] = load i8** [[Y]] + // CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[Y]] // CHECK-NEXT: call void @objc_release(i8* [[T0]]) // CHECK-NEXT: ret void } @@ -1303,12 +1303,12 @@ void test62(void) { // CHECK-NEXT: store i32 0, i32* [[I]], align 4 // CHECK-NEXT: br label - // CHECK: [[T0:%.*]] = load i32* [[I]], align 4 + // CHECK: [[T0:%.*]] = load i32, i32* [[I]], align 4 // CHECK-NEXT: [[T1:%.*]] = icmp ne i32 [[T0]], 20 // CHECK-NEXT: br i1 [[T1]], for (unsigned i = 0; i != 20; ++i) { - // CHECK: [[T0:%.*]] = load i32* [[I]], align 4 + // CHECK: [[T0:%.*]] = load i32, i32* [[I]], align 4 // CHECK-NEXT: [[T1:%.*]] = icmp ne i32 [[T0]], 0 // CHECK-NEXT: store i1 false, i1* [[CLEANUP_REQUIRED]] // CHECK-NEXT: br i1 [[T1]], @@ -1319,9 +1319,9 @@ void test62(void) { // CHECK-NEXT: [[T2:%.*]] = icmp ne i8* [[T1]], null // CHECK-NEXT: br label // CHECK: [[COND:%.*]] = phi i1 [ false, {{%.*}} ], [ [[T2]], {{%.*}} ] - // CHECK-NEXT: [[T0:%.*]] = load i1* [[CLEANUP_REQUIRED]] + // CHECK-NEXT: [[T0:%.*]] = load i1, i1* [[CLEANUP_REQUIRED]] // CHECK-NEXT: br i1 [[T0]], - // CHECK: [[T0:%.*]] = load i8** [[CLEANUP_VALUE]] + // CHECK: [[T0:%.*]] = load i8*, i8** [[CLEANUP_VALUE]] // CHECK-NEXT: call void @objc_release(i8* [[T0]]) // CHECK-NEXT: br label // CHECK: br i1 [[COND]] @@ -1332,7 +1332,7 @@ void test62(void) { test62_body(); } - // CHECK: [[T0:%.*]] = load i32* [[I]], align 4 + // CHECK: [[T0:%.*]] = load i32, i32* [[I]], align 4 // CHECK-NEXT: [[T1:%.*]] = add i32 [[T0]], 1 // CHECK-NEXT: store i32 [[T1]], i32* [[I]] // CHECK-NEXT: br label @@ -1372,7 +1372,7 @@ void test66(void) { // CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to [[TEST66]]* // CHECK-NEXT: [[T4:%.*]] = call i8* @test66_arg() // CHECK-NEXT: [[T5:%.*]] = call i8* @objc_retainAutoreleasedReturnValue(i8* [[T4]]) -// CHECK-NEXT: [[T6:%.*]] = load i8** @OBJC_SELECTOR_REFERENCES +// CHECK-NEXT: [[T6:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES // CHECK-NEXT: [[T7:%.*]] = bitcast [[TEST66]]* [[T3]] to i8* // CHECK-NEXT: [[SIX:%.*]] = icmp eq i8* [[T7]], null // CHECK-NEXT: br i1 [[SIX]], label [[NULINIT:%.*]], label [[CALL:%.*]] @@ -1404,7 +1404,7 @@ void test68(void) { // CHECK-NEXT: [[T0:%.*]] = call i8* @test67_helper() // CHECK-NEXT: [[T1:%.*]] = call i8* @objc_retainAutoreleasedReturnValue(i8* [[T0]]) // CHECK-NEXT: store i8* [[T1]], i8** [[CL]], align 8 -// CHECK-NEXT: [[T2:%.*]] = load i8** [[CL]] +// CHECK-NEXT: [[T2:%.*]] = load i8*, i8** [[CL]] // CHECK-NEXT: call void @objc_release(i8* [[T2]]) // CHECK-NEXT: ret void @@ -1415,7 +1415,7 @@ void test68(void) { @end // CHECK: define internal i8* @"\01-[Test69 foo]"( // CHECK: [[SELF:%.*]] = alloca [[TEST69:%.*]]*, align 8 -// CHECK: [[T0:%.*]] = load [[TEST69]]** [[SELF]], align 8 +// CHECK: [[T0:%.*]] = load [[TEST69]]*, [[TEST69]]** [[SELF]], align 8 // CHECK-NEXT: [[T1:%.*]] = bitcast [[TEST69]]* [[T0]] to i8* // CHECK-NEXT: ret i8* [[T1]] diff --git a/test/CodeGenObjC/arm64-int32-ivar.m b/test/CodeGenObjC/arm64-int32-ivar.m index 8f8c34a44a..5f2acd6926 100644 --- a/test/CodeGenObjC/arm64-int32-ivar.m +++ b/test/CodeGenObjC/arm64-int32-ivar.m @@ -11,7 +11,7 @@ @end @implementation I -// CHECK: [[IVAR:%.*]] = load i32* @"OBJC_IVAR_$_I.IVAR2" +// CHECK: [[IVAR:%.*]] = load i32, i32* @"OBJC_IVAR_$_I.IVAR2" // CHECK: [[CONV:%.*]] = sext i32 [[IVAR]] to i64 - (id) METH { return IVAR2; } @end diff --git a/test/CodeGenObjC/atomic-aggregate-property.m b/test/CodeGenObjC/atomic-aggregate-property.m index 1b9cb8f8e8..3bef0a0ed7 100644 --- a/test/CodeGenObjC/atomic-aggregate-property.m +++ b/test/CodeGenObjC/atomic-aggregate-property.m @@ -29,7 +29,7 @@ struct s2 {}; @synthesize a; @end // CHECK-LP64: define internal double @"\01-[A x]"( -// CHECK-LP64: load atomic i64* {{%.*}} unordered, align 8 +// CHECK-LP64: load atomic i64, i64* {{%.*}} unordered, align 8 // CHECK-LP64: define internal void @"\01-[A setX:]"( // CHECK-LP64: store atomic i64 {{%.*}}, i64* {{%.*}} unordered, align 8 diff --git a/test/CodeGenObjC/autorelease.m b/test/CodeGenObjC/autorelease.m index 4b81017201..6bb80fd2bd 100644 --- a/test/CodeGenObjC/autorelease.m +++ b/test/CodeGenObjC/autorelease.m @@ -26,7 +26,7 @@ @end // CHECK: call i8* @objc_autoreleasePoolPush -// CHECK: [[T:%.*]] = load i8** [[A:%.*]] +// CHECK: [[T:%.*]] = load i8*, i8** [[A:%.*]] // CHECK: call void @objc_autoreleasePoolPop // rdar://13660038 @@ -42,7 +42,7 @@ int tryTo(int (*f)(void)) { // CHECK-LABEL: define i32 @tryTo(i32 ()* // CHECK: [[RET:%.*]] = alloca i32, // CHECK: [[T0:%.*]] = call i8* @objc_autoreleasePoolPush() -// CHECK-NEXT: [[T1:%.*]] = load i32 ()** {{%.*}}, +// CHECK-NEXT: [[T1:%.*]] = load i32 ()*, i32 ()** {{%.*}}, // CHECK-NEXT: [[T2:%.*]] = invoke i32 [[T1]]() // CHECK: store i32 [[T2]], i32* [[RET]] // CHECK: invoke void @objc_autoreleasePoolPop(i8* [[T0]]) diff --git a/test/CodeGenObjC/bitfield-access.m b/test/CodeGenObjC/bitfield-access.m index 2b8039df9e..3f33afd9ce 100644 --- a/test/CodeGenObjC/bitfield-access.m +++ b/test/CodeGenObjC/bitfield-access.m @@ -15,7 +15,7 @@ // end of the structure. // // CHECK-I386-LABEL: define i32 @f0( -// CHECK-I386: [[t0_0:%.*]] = load i8* {{.*}}, align 1 +// CHECK-I386: [[t0_0:%.*]] = load i8, i8* {{.*}}, align 1 // CHECK-I386: lshr i8 [[t0_0]], 7 // CHECK-I386: } int f0(I0 *a) { @@ -27,7 +27,7 @@ int f0(I0 *a) { // CHECK-ARM-LABEL: define i32 @f1( // CHECK-ARM: [[t1_ptr:%.*]] = getelementptr // CHECK-ARM: [[t1_base:%.*]] = bitcast i8* [[t1_ptr]] to i40* -// CHECK-ARM: [[t1_0:%.*]] = load i40* [[t1_base]], align 1 +// CHECK-ARM: [[t1_0:%.*]] = load i40, i40* [[t1_base]], align 1 // CHECK-ARM: [[t1_1:%.*]] = lshr i40 [[t1_0]], 1 // CHECK-ARM: [[t1_2:%.*]] = and i40 [[t1_1]], // CHECK-ARM: trunc i40 [[t1_2]] to i32 diff --git a/test/CodeGenObjC/block-6.m b/test/CodeGenObjC/block-6.m index 5921bb3928..7867e62187 100644 --- a/test/CodeGenObjC/block-6.m +++ b/test/CodeGenObjC/block-6.m @@ -10,7 +10,7 @@ void MYFUNC() { // CHECK: [[T1:%.*]] = bitcast i8* ()* // CHECK: [[FORWARDING:%.*]] = getelementptr inbounds [[OBSERVER_T]], [[OBSERVER_T]]* [[OBSERVER_SLOT]], i32 0, i32 1 -// CHECK-NEXT: [[T0:%.*]] = load [[OBSERVER_T]]** [[FORWARDING]] +// CHECK-NEXT: [[T0:%.*]] = load [[OBSERVER_T]]*, [[OBSERVER_T]]** [[FORWARDING]] // CHECK-NEXT: [[OBSERVER:%.*]] = getelementptr inbounds [[OBSERVER_T]], [[OBSERVER_T]]* [[T0]], i32 0, i32 6 // CHECK-NEXT: store i8* [[T1]], i8** [[OBSERVER]] __block id observer = ^{ return observer; }; diff --git a/test/CodeGenObjC/blocks.m b/test/CodeGenObjC/blocks.m index bbaa42ab55..fa86e363f3 100644 --- a/test/CodeGenObjC/blocks.m +++ b/test/CodeGenObjC/blocks.m @@ -73,7 +73,7 @@ void test2(Test2 *x) { // Actually capture the value. // CHECK-NEXT: [[T6:%.*]] = getelementptr inbounds [[WEAK_T]], [[WEAK_T]]* [[WEAKX]], i32 0, i32 6 - // CHECK-NEXT: [[CAPTURE:%.*]] = load [[TEST2]]** [[X]] + // CHECK-NEXT: [[CAPTURE:%.*]] = load [[TEST2]]*, [[TEST2]]** [[X]] // CHECK-NEXT: store [[TEST2]]* [[CAPTURE]], [[TEST2]]** [[T6]] // Then we initialize the block, blah blah blah. @@ -95,12 +95,12 @@ void test2(Test2 *x) { // CHECK: [[BLOCK:%.*]] = bitcast i8* {{%.*}} to [[BLOCK_T]]* // CHECK-NOT: bitcast // CHECK: [[T0:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 -// CHECK-NEXT: [[T1:%.*]] = load i8** [[T0]] +// CHECK-NEXT: [[T1:%.*]] = load i8*, i8** [[T0]] // CHECK-NEXT: [[T2:%.*]] = bitcast i8* [[T1]] to [[WEAK_T]]{{.*}}* // CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds [[WEAK_T]]{{.*}}, [[WEAK_T]]{{.*}}* [[T2]], i32 0, i32 1 -// CHECK-NEXT: [[T4:%.*]] = load [[WEAK_T]]{{.*}}** [[T3]] +// CHECK-NEXT: [[T4:%.*]] = load [[WEAK_T]]{{.*}}*, [[WEAK_T]]{{.*}}** [[T3]] // CHECK-NEXT: [[WEAKX:%.*]] = getelementptr inbounds [[WEAK_T]]{{.*}}, [[WEAK_T]]{{.*}}* [[T4]], i32 0, i32 6 -// CHECK-NEXT: [[T0:%.*]] = load [[TEST2]]** [[WEAKX]], align 4 +// CHECK-NEXT: [[T0:%.*]] = load [[TEST2]]*, [[TEST2]]** [[WEAKX]], align 4 // rdar://problem/12722954 // Make sure that ... is appropriately positioned in a block call. @@ -110,11 +110,11 @@ void test3(void (^block)(int, ...)) { // CHECK-LABEL: define void @test3( // CHECK: [[BLOCK:%.*]] = alloca void (i32, ...)*, align 4 // CHECK-NEXT: store void (i32, ...)* -// CHECK-NEXT: [[T0:%.*]] = load void (i32, ...)** [[BLOCK]], align 4 +// CHECK-NEXT: [[T0:%.*]] = load void (i32, ...)*, void (i32, ...)** [[BLOCK]], align 4 // CHECK-NEXT: [[T1:%.*]] = bitcast void (i32, ...)* [[T0]] to [[BLOCK_T:%.*]]* // CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[T1]], i32 0, i32 3 // CHECK-NEXT: [[T3:%.*]] = bitcast [[BLOCK_T]]* [[T1]] to i8* -// CHECK-NEXT: [[T4:%.*]] = load i8** [[T2]] +// CHECK-NEXT: [[T4:%.*]] = load i8*, i8** [[T2]] // CHECK-NEXT: [[T5:%.*]] = bitcast i8* [[T4]] to void (i8*, i32, ...)* // CHECK-NEXT: call void (i8*, i32, ...)* [[T5]](i8* [[T3]], i32 0, i32 1, i32 2, i32 3) // CHECK-NEXT: ret void @@ -125,11 +125,11 @@ void test4(void (^block)()) { // CHECK-LABEL: define void @test4( // CHECK: [[BLOCK:%.*]] = alloca void (...)*, align 4 // CHECK-NEXT: store void (...)* -// CHECK-NEXT: [[T0:%.*]] = load void (...)** [[BLOCK]], align 4 +// CHECK-NEXT: [[T0:%.*]] = load void (...)*, void (...)** [[BLOCK]], align 4 // CHECK-NEXT: [[T1:%.*]] = bitcast void (...)* [[T0]] to [[BLOCK_T:%.*]]* // CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[T1]], i32 0, i32 3 // CHECK-NEXT: [[T3:%.*]] = bitcast [[BLOCK_T]]* [[T1]] to i8* -// CHECK-NEXT: [[T4:%.*]] = load i8** [[T2]] +// CHECK-NEXT: [[T4:%.*]] = load i8*, i8** [[T2]] // CHECK-NEXT: [[T5:%.*]] = bitcast i8* [[T4]] to void (i8*, i32, i32, i32, i32)* // CHECK-NEXT: call void [[T5]](i8* [[T3]], i32 0, i32 1, i32 2, i32 3) // CHECK-NEXT: ret void diff --git a/test/CodeGenObjC/boxing.m b/test/CodeGenObjC/boxing.m index 33dc4e698f..1d39f7bf72 100644 --- a/test/CodeGenObjC/boxing.m +++ b/test/CodeGenObjC/boxing.m @@ -67,29 +67,29 @@ typedef signed char BOOL; // CHECK: [[stringWithUTF8StringSEL:@.*]] = private externally_initialized global i8* getelementptr inbounds ([22 x i8]* [[stringWithUTF8StringMeth]] int main() { - // CHECK: load i8** [[WithIntSEL]] + // CHECK: load i8*, i8** [[WithIntSEL]] int i; @(i); - // CHECK: load i8** [[WithCharSEL]] + // CHECK: load i8*, i8** [[WithCharSEL]] signed char sc; @(sc); - // CHECK: load i8** [[WithBoolSEL]] + // CHECK: load i8*, i8** [[WithBoolSEL]] BOOL b; @(b); - // CHECK: load i8** [[WithBoolSEL]] + // CHECK: load i8*, i8** [[WithBoolSEL]] typeof(b) b2; @(b2); - // CHECK: load i8** [[WithBoolSEL]] + // CHECK: load i8*, i8** [[WithBoolSEL]] typedef const typeof(b) MyBOOL; MyBOOL b3; @(b3); - // CHECK: load i8** [[WithBoolSEL]] + // CHECK: load i8*, i8** [[WithBoolSEL]] @((BOOL)i); - // CHECK: load i8** [[WithIntegerSEL]] + // CHECK: load i8*, i8** [[WithIntegerSEL]] @((NSInteger)i); - // CHECK: load i8** [[WithUnsignedIntegerSEL]] + // CHECK: load i8*, i8** [[WithUnsignedIntegerSEL]] @((NSUInteger)i); - // CHECK: load i8** [[stringWithUTF8StringSEL]] + // CHECK: load i8*, i8** [[stringWithUTF8StringSEL]] const char *s; @(s); typedef enum : NSInteger { Red, Green, Blue } Color; - // CHECK: load i8** [[WithIntegerSEL]] + // CHECK: load i8*, i8** [[WithIntegerSEL]] @(Red); Color col = Red; - // CHECK: load i8** [[WithIntegerSEL]] + // CHECK: load i8*, i8** [[WithIntegerSEL]] @(col); } diff --git a/test/CodeGenObjC/category-super-class-meth.m b/test/CodeGenObjC/category-super-class-meth.m index 575a4dc7c7..d773b272c5 100644 --- a/test/CodeGenObjC/category-super-class-meth.m +++ b/test/CodeGenObjC/category-super-class-meth.m @@ -22,8 +22,8 @@ @end // CHECK: define internal i8* @"\01+[Sub2(Category) copy] -// CHECK: [[ONE:%.*]] = load %struct._class_t** @"OBJC_CLASSLIST_SUP_REFS_$_3" +// CHECK: [[ONE:%.*]] = load %struct._class_t*, %struct._class_t** @"OBJC_CLASSLIST_SUP_REFS_$_3" // CHECK: [[TWO:%.*]] = bitcast %struct._class_t* [[ONE]] to i8* // CHECK: [[THREE:%.*]] = getelementptr inbounds %struct._objc_super, %struct._objc_super* [[OBJC_SUPER:%.*]], i32 0, i32 1 // CHECK: store i8* [[TWO]], i8** [[THREE]] -// CHECK: [[FOUR:%.*]] = load i8** @OBJC_SELECTOR_REFERENCES_ +// CHECK: [[FOUR:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES_ diff --git a/test/CodeGenObjC/debug-info-block-captured-self.m b/test/CodeGenObjC/debug-info-block-captured-self.m index f0ca1af34c..70c948824a 100644 --- a/test/CodeGenObjC/debug-info-block-captured-self.m +++ b/test/CodeGenObjC/debug-info-block-captured-self.m @@ -54,7 +54,7 @@ typedef enum { // CHECK: %[[MEM1:.*]] = alloca i8*, align 8 // CHECK-NEXT: %[[MEM2:.*]] = alloca i8*, align 8 // CHECK: store i8* [[BLOCK_DESC:%.*]], i8** %[[MEM1]], align 8 -// CHECK: %[[TMP0:.*]] = load i8** %[[MEM1]] +// CHECK: %[[TMP0:.*]] = load i8*, i8** %[[MEM1]] // CHECK: call void @llvm.dbg.value(metadata i8* %[[TMP0]], i64 0, metadata ![[BDMD:[0-9]+]], metadata !{{.*}}) // CHECK: call void @llvm.dbg.declare(metadata i8* [[BLOCK_DESC]], metadata ![[BDMD:[0-9]+]], metadata !{{.*}}) // CHECK: %[[TMP1:.*]] = bitcast diff --git a/test/CodeGenObjC/exceptions.m b/test/CodeGenObjC/exceptions.m index 92f68292ff..d4790c73f4 100644 --- a/test/CodeGenObjC/exceptions.m +++ b/test/CodeGenObjC/exceptions.m @@ -58,13 +58,13 @@ int f2() { // CHECK-NEXT: call void asm sideeffect "", "*m,*m"(i32* [[X]] // CHECK-NEXT: call void @foo() // CHECK-NEXT: call void @objc_exception_try_exit - // CHECK-NEXT: [[T:%.*]] = load i32* [[X]] + // CHECK-NEXT: [[T:%.*]] = load i32, i32* [[X]] foo(); } @catch (id) { // Landing pad. Note that we elide the re-enter. // CHECK: call void asm sideeffect "", "=*m,=*m"(i32* [[X]] // CHECK-NEXT: call i8* @objc_exception_extract - // CHECK-NEXT: [[T1:%.*]] = load i32* [[X]] + // CHECK-NEXT: [[T1:%.*]] = load i32, i32* [[X]] // CHECK-NEXT: [[T2:%.*]] = add nsw i32 [[T1]], -1 // This store is dead. diff --git a/test/CodeGenObjC/gc.m b/test/CodeGenObjC/gc.m index 729cf107a9..6b6d293557 100644 --- a/test/CodeGenObjC/gc.m +++ b/test/CodeGenObjC/gc.m @@ -8,7 +8,7 @@ void test0(void) { // CHECK: [[T0:%.*]] = call i8* @test0_helper() // CHECK-NEXT: store i8* [[T0]], i8** [[X:%.*]], align 8 // CHECK-NEXT: call i8* @test0_helper() - // CHECK-NEXT: [[T0:%.*]] = load i8** [[X]], align 8 + // CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[X]], align 8 // CHECK-NEXT: call void asm sideeffect "", "r"(i8* [[T0]]) [[NUW:#[0-9]+]] // CHECK-NEXT: ret void } diff --git a/test/CodeGenObjC/id-isa-codegen.m b/test/CodeGenObjC/id-isa-codegen.m index fa0f0b097a..20f344c539 100644 --- a/test/CodeGenObjC/id-isa-codegen.m +++ b/test/CodeGenObjC/id-isa-codegen.m @@ -64,10 +64,10 @@ id Test2() { ((id)cat)->isa = dynamicSubclass; } @end -// CHECK-LP64: %{{.*}} = load i8** % +// CHECK-LP64: %{{.*}} = load i8*, i8** % // CHECK-NEXT: %{{.*}} = bitcast i8* %{{.*}} to i8** // CHECK-NEXT: store i8* %{{.*}}, i8** %{{.*}} -// CHECK-LP32: %{{.*}} = load i8** % +// CHECK-LP32: %{{.*}} = load i8*, i8** % // CHECK-NEXT: %{{.*}} = bitcast i8* %{{.*}} to i8** // CHECK-NEXT: store i8* %{{.*}}, i8** %{{.*}} diff --git a/test/CodeGenObjC/ivar-base-as-invariant-load.m b/test/CodeGenObjC/ivar-base-as-invariant-load.m index 061fea31a5..4a17eb16f6 100644 --- a/test/CodeGenObjC/ivar-base-as-invariant-load.m +++ b/test/CodeGenObjC/ivar-base-as-invariant-load.m @@ -23,7 +23,7 @@ @end -// CHECK: [[T1:%.*]] = load i64* @"OBJC_IVAR_$_A._flags", !invariant.load ![[MD_NUM:[0-9]+]] -// CHECK: [[T2:%.*]] = load i64* @"OBJC_IVAR_$_A._flags", !invariant.load ![[MD_NUM]] -// CHECK: [[T3:%.*]] = load i64* @"OBJC_IVAR_$_A._flags", !invariant.load ![[MD_NUM]] +// CHECK: [[T1:%.*]] = load i64, i64* @"OBJC_IVAR_$_A._flags", !invariant.load ![[MD_NUM:[0-9]+]] +// CHECK: [[T2:%.*]] = load i64, i64* @"OBJC_IVAR_$_A._flags", !invariant.load ![[MD_NUM]] +// CHECK: [[T3:%.*]] = load i64, i64* @"OBJC_IVAR_$_A._flags", !invariant.load ![[MD_NUM]] // diff --git a/test/CodeGenObjC/ivar-invariant.m b/test/CodeGenObjC/ivar-invariant.m index ef17ffc12f..b9c5bec930 100644 --- a/test/CodeGenObjC/ivar-invariant.m +++ b/test/CodeGenObjC/ivar-invariant.m @@ -29,7 +29,7 @@ @end // CHECK: define internal i8* @"\01-[Derived init]" -// CHECK: [[IVAR:%.*]] = load i64* @"OBJC_IVAR_$_Derived.member", !invariant.load +// CHECK: [[IVAR:%.*]] = load i64, i64* @"OBJC_IVAR_$_Derived.member", !invariant.load void * variant_load_1(int i) { void *ptr; @@ -41,7 +41,7 @@ void * variant_load_1(int i) { } // CHECK-LABEL: define i8* @variant_load_1(i32 %i) -// CHECK: [[IVAR:%.*]] = load i64* @"OBJC_IVAR_$_Derived.member"{{$}} +// CHECK: [[IVAR:%.*]] = load i64, i64* @"OBJC_IVAR_$_Derived.member"{{$}} @interface Container : Derived @end @implementation Container @@ -52,7 +52,7 @@ void * variant_load_1(int i) { @end // CHECK: define internal i8* @"\01-[Container invariant_load_1]" -// CHECK: [[IVAR:%.*]] = load i64* @"OBJC_IVAR_$_Derived.member", !invariant.load +// CHECK: [[IVAR:%.*]] = load i64, i64* @"OBJC_IVAR_$_Derived.member", !invariant.load @interface ForBlock { @@ -62,7 +62,7 @@ void * variant_load_1(int i) { @end // CHECK-LABEL: define internal i8* @block_block_invoke -// CHECK: load i64* @"OBJC_IVAR_$_ForBlock.foo" +// CHECK: load i64, i64* @"OBJC_IVAR_$_ForBlock.foo" id (^block)(ForBlock*) = ^(ForBlock* a) { return a->foo; }; diff --git a/test/CodeGenObjC/messages-2.m b/test/CodeGenObjC/messages-2.m index ce6624ad5a..4f98fc7287 100644 --- a/test/CodeGenObjC/messages-2.m +++ b/test/CodeGenObjC/messages-2.m @@ -150,7 +150,7 @@ typedef struct { void test0(A *x) { // CHECK: [[X:%.*]] = alloca [[A]]* // CHECK-NEXT: [[POINT:%.*]] = alloca [[POINT_T:%.*]], - // CHECK: [[T0:%.*]] = load [[A]]** [[X]] + // CHECK: [[T0:%.*]] = load [[A]]*, [[A]]** [[X]] // CHECK: [[T1:%.*]] = bitcast [[A]]* [[T0]] to i8* // CHECK-NEXT: icmp eq i8* [[T1]], null // CHECK-NEXT: br i1 @@ -162,7 +162,7 @@ void test0(A *x) { // CHECK-NF: [[X:%.*]] = alloca [[A]]* // CHECK-NF-NEXT: [[POINT:%.*]] = alloca [[POINT_T:%.*]], - // CHECK-NF: [[T0:%.*]] = load [[A]]** [[X]] + // CHECK-NF: [[T0:%.*]] = load [[A]]*, [[A]]** [[X]] // CHECK-NF: [[T1:%.*]] = bitcast [[A]]* [[T0]] to i8* // CHECK-NF-NEXT: icmp eq i8* [[T1]], null // CHECK-NF-NEXT: br i1 diff --git a/test/CodeGenObjC/ns_consume_null_check.m b/test/CodeGenObjC/ns_consume_null_check.m index 34fed8839e..777659f268 100644 --- a/test/CodeGenObjC/ns_consume_null_check.m +++ b/test/CodeGenObjC/ns_consume_null_check.m @@ -21,7 +21,7 @@ void test0(void) { // CHECK-NEXT: [[SIX:%.*]] = bitcast // CHECK-NEXT: [[SEVEN:%.*]] = icmp eq i8* [[SIX]], null // CHECK-NEXT: br i1 [[SEVEN]], label [[NULLINIT:%.*]], label [[CALL_LABEL:%.*]] -// CHECK: [[FN:%.*]] = load i8** getelementptr inbounds +// CHECK: [[FN:%.*]] = load i8*, i8** getelementptr inbounds // CHECK-NEXT: [[EIGHT:%.*]] = bitcast i8* [[FN]] // CHECK-NEXT: [[CALL:%.*]] = call signext i8 [[EIGHT]] // CHECK-NEXT: br label [[CONT:%.*]] @@ -43,13 +43,13 @@ void test1(void) { // Various initializations. // CHECK: [[T0:%.*]] = call i8* bitcast ( // CHECK-NEXT: store i8* [[T0]], i8** [[OBJ]] -// CHECK-NEXT: [[T0:%.*]] = load i8** [[OBJ]] +// CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[OBJ]] // CHECK-NEXT: call i8* @objc_initWeak(i8** [[WEAKOBJ]], i8* [[T0]]) [[NUW]] // Okay, start the message-send. -// CHECK-NEXT: [[T0:%.*]] = load [[MYOBJECT:%.*]]** @x -// CHECK-NEXT: [[ARG:%.*]] = load i8** [[OBJ]] +// CHECK-NEXT: [[T0:%.*]] = load [[MYOBJECT:%.*]]*, [[MYOBJECT:%.*]]** @x +// CHECK-NEXT: [[ARG:%.*]] = load i8*, i8** [[OBJ]] // CHECK-NEXT: [[ARG_RETAINED:%.*]] = call i8* @objc_retain(i8* [[ARG]]) -// CHECK-NEXT: load i8** @ +// CHECK-NEXT: load i8*, i8** @ // CHECK-NEXT: [[SELF:%.*]] = bitcast [[MYOBJECT]]* [[T0]] to i8* // Null check. // CHECK-NEXT: [[T0:%.*]] = icmp eq i8* [[SELF]], null @@ -60,9 +60,9 @@ void test1(void) { // CHECK: [[T0:%.*]] = bitcast { float, float }* [[COERCE:%.*]] to <2 x float>* // CHECK-NEXT: store <2 x float> [[CALL]], <2 x float>* [[T0]], // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[COERCE]], i32 0, i32 0 -// CHECK-NEXT: [[REALCALL:%.*]] = load float* [[T0]] +// CHECK-NEXT: [[REALCALL:%.*]] = load float, float* [[T0]] // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[COERCE]], i32 0, i32 1 -// CHECK-NEXT: [[IMAGCALL:%.*]] = load float* [[T0]] +// CHECK-NEXT: [[IMAGCALL:%.*]] = load float, float* [[T0]] // CHECK-NEXT: br label [[CONT:%.*]]{{$}} // Null path. // CHECK: call void @objc_release(i8* [[ARG_RETAINED]]) [[NUW]] diff --git a/test/CodeGenObjC/objc-asm-attribute-test.m b/test/CodeGenObjC/objc-asm-attribute-test.m index 12903b522e..589b08ae0e 100644 --- a/test/CodeGenObjC/objc-asm-attribute-test.m +++ b/test/CodeGenObjC/objc-asm-attribute-test.m @@ -51,4 +51,4 @@ id Test16877359() { // CHECK: @"OBJC_METACLASS_$_MySecretNamespace.Message" = global %struct._class_t // CHECK: @"OBJC_CLASS_$_foo" = external global %struct._class_t // CHECK: define internal i8* @"\01-[Message MyMethod]" -// CHECK: [[IVAR:%.*]] = load i64* @"OBJC_IVAR_$_MySecretNamespace.Message.MyIVAR" +// CHECK: [[IVAR:%.*]] = load i64, i64* @"OBJC_IVAR_$_MySecretNamespace.Message.MyIVAR" diff --git a/test/CodeGenObjC/objc-container-subscripting-1.m b/test/CodeGenObjC/objc-container-subscripting-1.m index 9ddfd39f63..20d7d525f1 100644 --- a/test/CodeGenObjC/objc-container-subscripting-1.m +++ b/test/CodeGenObjC/objc-container-subscripting-1.m @@ -19,15 +19,15 @@ int main() { id oldObject = array[10]; // CHECK: [[ARR:%.*]] = load {{%.*}} [[array:%.*]], align 8 -// CHECK-NEXT: [[SEL:%.*]] = load i8** @OBJC_SELECTOR_REFERENCES_ +// CHECK-NEXT: [[SEL:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES_ // CHECK-NEXT: [[ARRC:%.*]] = bitcast {{%.*}} [[ARR]] to i8* // CHECK-NEXT: [[CALL:%.*]] = call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i32)*)(i8* [[ARRC]], i8* [[SEL]], i32 10) // CHECK-NEXT: store i8* [[CALL]], i8** [[OLDOBJ:%.*]], align 8 val = (array[10] = oldObject); // CHECK: [[THREE:%.*]] = load {{%.*}} [[array:%.*]], align 8 -// CHECK-NEXT: [[FOUR:%.*]] = load i8** [[oldObject:%.*]], align 8 -// CHECK-NEXT: [[FIVE:%.*]] = load i8** @OBJC_SELECTOR_REFERENCES_2 +// CHECK-NEXT: [[FOUR:%.*]] = load i8*, i8** [[oldObject:%.*]], align 8 +// CHECK-NEXT: [[FIVE:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES_2 // CHECK-NEXT: [[SIX:%.*]] = bitcast {{%.*}} [[THREE]] to i8* // CHECK-NEXT: call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, i8*, i32)*)(i8* [[SIX]], i8* [[FIVE]], i8* [[FOUR]], i32 10) // CHECK-NEXT: store i8* [[FOUR]], i8** [[val:%.*]] @@ -37,8 +37,8 @@ int main() { id newObject; oldObject = dictionary[key]; // CHECK: [[SEVEN:%.*]] = load {{%.*}} [[DICTIONARY:%.*]], align 8 -// CHECK-NEXT: [[EIGHT:%.*]] = load i8** [[KEY:%.*]], align 8 -// CHECK-NEXT: [[TEN:%.*]] = load i8** @OBJC_SELECTOR_REFERENCES_4 +// CHECK-NEXT: [[EIGHT:%.*]] = load i8*, i8** [[KEY:%.*]], align 8 +// CHECK-NEXT: [[TEN:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES_4 // CHECK-NEXT: [[ELEVEN:%.*]] = bitcast {{%.*}} [[SEVEN]] to i8* // CHECK-NEXT: [[CALL1:%.*]] = call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, i8*)*)(i8* [[ELEVEN]], i8* [[TEN]], i8* [[EIGHT]]) // CHECK-NEXT: store i8* [[CALL1]], i8** [[oldObject:%.*]], align 8 @@ -46,9 +46,9 @@ int main() { val = (dictionary[key] = newObject); // CHECK: [[TWELVE:%.*]] = load {{%.*}} [[DICTIONARY]], align 8 -// CHECK-NEXT: [[THIRTEEN:%.*]] = load i8** [[KEY]], align 8 -// CHECK-NEXT: [[FOURTEEN:%.*]] = load i8** [[NEWOBJECT:%.*]], align 8 -// CHECK-NEXT: [[SIXTEEN:%.*]] = load i8** @OBJC_SELECTOR_REFERENCES_6 +// CHECK-NEXT: [[THIRTEEN:%.*]] = load i8*, i8** [[KEY]], align 8 +// CHECK-NEXT: [[FOURTEEN:%.*]] = load i8*, i8** [[NEWOBJECT:%.*]], align 8 +// CHECK-NEXT: [[SIXTEEN:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES_6 // CHECK-NEXT: [[SEVENTEEN:%.*]] = bitcast {{%.*}} [[TWELVE]] to i8* // CHECK-NEXT: call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, i8*, i8*)*)(i8* [[SEVENTEEN]], i8* [[SIXTEEN]], i8* [[FOURTEEN]], i8* [[THIRTEEN]]) // CHECK-NEXT: store i8* [[FOURTEEN]], i8** [[val:%.*]] diff --git a/test/CodeGenObjC/optimize-ivar-offset-load.m b/test/CodeGenObjC/optimize-ivar-offset-load.m index 880f79d321..0317c09403 100644 --- a/test/CodeGenObjC/optimize-ivar-offset-load.m +++ b/test/CodeGenObjC/optimize-ivar-offset-load.m @@ -26,11 +26,11 @@ extern void foo(int); } } @end -// CHECK: [[IVAR:%.*]] = load i64* @"OBJC_IVAR_$_SampleClass._value", align 8 +// CHECK: [[IVAR:%.*]] = load i64, i64* @"OBJC_IVAR_$_SampleClass._value", align 8 // CHECK: [[THREE:%.*]] = bitcast [[ONE:%.*]]* [[CALL:%.*]] to i8* // CHECK: [[ADDPTR:%.*]] = getelementptr inbounds i8, i8* [[THREE]], i64 [[IVAR]] // CHECK: [[FOUR:%.*]] = bitcast i8* [[ADDPTR]] to i32* -// CHECK: [[FIVE:%.*]] = load i32* [[FOUR]], align 4 +// CHECK: [[FIVE:%.*]] = load i32, i32* [[FOUR]], align 4 // CHECK: tail call void @foo(i32 [[FIVE]]) @implementation SampleClass @@ -44,8 +44,8 @@ extern void foo(int); } } @end -// CHECK: [[ZERO:%.*]] = load i8** @OBJC_SELECTOR_REFERENCES_, align 8, !invariant.load -// CHECK: [[IVAR:%.*]] = load i64* @"OBJC_IVAR_$_SampleClass._value", align 8, !invariant.load +// CHECK: [[ZERO:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES_, align 8, !invariant.load +// CHECK: [[IVAR:%.*]] = load i64, i64* @"OBJC_IVAR_$_SampleClass._value", align 8, !invariant.load @interface Sample : SampleClass @end @@ -59,6 +59,6 @@ extern void foo(int); } } @end -// CHECK: [[ZERO:%.*]] = load i8** @OBJC_SELECTOR_REFERENCES_, align 8, !invariant.load -// CHECK: [[IVAR:%.*]] = load i64* @"OBJC_IVAR_$_SampleClass._value", align 8, !invariant.load +// CHECK: [[ZERO:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES_, align 8, !invariant.load +// CHECK: [[IVAR:%.*]] = load i64, i64* @"OBJC_IVAR_$_SampleClass._value", align 8, !invariant.load diff --git a/test/CodeGenObjC/property-array-type.m b/test/CodeGenObjC/property-array-type.m index a4df54dde7..ea757db0f6 100644 --- a/test/CodeGenObjC/property-array-type.m +++ b/test/CodeGenObjC/property-array-type.m @@ -26,5 +26,5 @@ typedef struct _GLKMatrix4 GLKMatrix4; // CHECK: [[M:%.*]] = getelementptr inbounds %struct._GLKMatrix4, %struct._GLKMatrix4* [[TMP:%.*]], i32 0, i32 0 // CHECK: [[ARRAYDECAY:%.*]] = getelementptr inbounds [16 x float], [16 x float]* [[M]], i32 0, i32 0 -// CHECK: [[SIX:%.*]] = load i8** @OBJC_SELECTOR_REFERENCES +// CHECK: [[SIX:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES // CHECK: call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, float*)*)(i8* [[SEVEN:%.*]], i8* [[SIX]], float* [[ARRAYDECAY]]) diff --git a/test/CodeGenObjC/property-type-mismatch.m b/test/CodeGenObjC/property-type-mismatch.m index b5618cb513..12bab9ff8d 100644 --- a/test/CodeGenObjC/property-type-mismatch.m +++ b/test/CodeGenObjC/property-type-mismatch.m @@ -13,5 +13,5 @@ void bar(Foo *x) { // CHECK: [[C1:%.*]] = call float bitcast (i8* (i8*, i8*, ...)* @objc_msgSend // CHECK: [[I:%.*]] = fadd float [[C1]], 1.000000e+00 // CHECK: [[CONV:%.*]] = fptosi float [[I]] to i32 -// CHECK: [[T3:%.*]] = load i8** @OBJC_SELECTOR_REFERENCES_2 +// CHECK: [[T3:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES_2 // CHECK: call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend diff --git a/test/CodeGenObjC/property.m b/test/CodeGenObjC/property.m index 0f63a0fd14..ee113780b6 100644 --- a/test/CodeGenObjC/property.m +++ b/test/CodeGenObjC/property.m @@ -58,21 +58,21 @@ int printf(const char *, ...); A *test2_helper(void); void test2() { // CHECK: [[BASE:%.*]] = call [[A:%.*]]* @test2_helper() - // CHECK-NEXT: [[SEL:%.*]] = load i8** + // CHECK-NEXT: [[SEL:%.*]] = load i8*, i8** // CHECK-NEXT: [[BASETMP:%.*]] = bitcast [[A]]* [[BASE]] to i8* // CHECK-NEXT: [[LD:%.*]] = call i32 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i32 (i8*, i8*)*)(i8* [[BASETMP]], i8* [[SEL]]) // CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[LD]], 1 - // CHECK-NEXT: [[SEL:%.*]] = load i8** + // CHECK-NEXT: [[SEL:%.*]] = load i8*, i8** // CHECK-NEXT: [[BASETMP:%.*]] = bitcast [[A]]* [[BASE]] to i8* // CHECK-NEXT: call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, i32)*)(i8* [[BASETMP]], i8* [[SEL]], i32 [[ADD]]) test2_helper().dyn++; // CHECK: [[BASE:%.*]] = call [[A]]* @test2_helper() - // CHECK-NEXT: [[SEL:%.*]] = load i8** + // CHECK-NEXT: [[SEL:%.*]] = load i8*, i8** // CHECK-NEXT: [[BASETMP:%.*]] = bitcast [[A]]* [[BASE]] to i8* // CHECK-NEXT: [[LD:%.*]] = call i32 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i32 (i8*, i8*)*)(i8* [[BASETMP]], i8* [[SEL]]) // CHECK-NEXT: [[ADD:%.*]] = mul nsw i32 [[LD]], 10 - // CHECK-NEXT: [[SEL:%.*]] = load i8** + // CHECK-NEXT: [[SEL:%.*]] = load i8*, i8** // CHECK-NEXT: [[BASETMP:%.*]] = bitcast [[A]]* [[BASE]] to i8* // CHECK-NEXT: call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, i32)*)(i8* [[BASETMP]], i8* [[SEL]], i32 [[ADD]]) test2_helper().dyn *= 10; @@ -135,34 +135,34 @@ void test7(Test7 *t) { // CHECK: define void @test7([[TEST7:%.*]]* // CHECK: [[T:%.*]] = alloca [[TEST7]]*, // CHECK-NEXT: store -// CHECK-NEXT: [[T0:%.*]] = load [[TEST7]]** [[T]], align -// CHECK-NEXT: load i8** @OBJC_SELECTOR_REFERENCES +// CHECK-NEXT: [[T0:%.*]] = load [[TEST7]]*, [[TEST7]]** [[T]], align +// CHECK-NEXT: load i8*, i8** @OBJC_SELECTOR_REFERENCES // CHECK-NEXT: [[T1:%.*]] = bitcast [[TEST7]]* [[T0]] to i8* // CHECK-NEXT: [[T2:%.*]] = call zeroext i8 bitcast // CHECK-NEXT: [[T3:%.*]] = zext i8 [[T2]] to i32 // CHECK-NEXT: [[T4:%.*]] = and i32 [[T3]], 2 // CHECK-NEXT: [[T5:%.*]] = trunc i32 [[T4]] to i8 -// CHECK-NEXT: load i8** @OBJC_SELECTOR_REFERENCES +// CHECK-NEXT: load i8*, i8** @OBJC_SELECTOR_REFERENCES // CHECK-NEXT: [[T6:%.*]] = bitcast [[TEST7]]* [[T0]] to i8* // CHECK-NEXT: call void bitcast -// CHECK-NEXT: [[T0:%.*]] = load [[TEST7]]** [[T]], align -// CHECK-NEXT: load i8** @OBJC_SELECTOR_REFERENCES +// CHECK-NEXT: [[T0:%.*]] = load [[TEST7]]*, [[TEST7]]** [[T]], align +// CHECK-NEXT: load i8*, i8** @OBJC_SELECTOR_REFERENCES // CHECK-NEXT: [[T1:%.*]] = bitcast [[TEST7]]* [[T0]] to i8* // CHECK-NEXT: [[T2:%.*]] = call zeroext i8 bitcast // CHECK-NEXT: [[T3:%.*]] = zext i8 [[T2]] to i32 // CHECK-NEXT: [[T4:%.*]] = or i32 [[T3]], 5 // CHECK-NEXT: [[T5:%.*]] = trunc i32 [[T4]] to i8 -// CHECK-NEXT: load i8** @OBJC_SELECTOR_REFERENCES +// CHECK-NEXT: load i8*, i8** @OBJC_SELECTOR_REFERENCES // CHECK-NEXT: [[T6:%.*]] = bitcast [[TEST7]]* [[T0]] to i8* // CHECK-NEXT: call void bitcast -// CHECK-NEXT: [[T0:%.*]] = load [[TEST7]]** [[T]], align -// CHECK-NEXT: load i8** @OBJC_SELECTOR_REFERENCES +// CHECK-NEXT: [[T0:%.*]] = load [[TEST7]]*, [[TEST7]]** [[T]], align +// CHECK-NEXT: load i8*, i8** @OBJC_SELECTOR_REFERENCES // CHECK-NEXT: [[T1:%.*]] = bitcast [[TEST7]]* [[T0]] to i8* // CHECK-NEXT: [[T2:%.*]] = call zeroext i8 bitcast // CHECK-NEXT: [[T3:%.*]] = zext i8 [[T2]] to i32 // CHECK-NEXT: [[T4:%.*]] = xor i32 [[T3]], 8 // CHECK-NEXT: [[T5:%.*]] = trunc i32 [[T4]] to i8 -// CHECK-NEXT: load i8** @OBJC_SELECTOR_REFERENCES +// CHECK-NEXT: load i8*, i8** @OBJC_SELECTOR_REFERENCES // CHECK-NEXT: [[T6:%.*]] = bitcast [[TEST7]]* [[T0]] to i8* // CHECK-NEXT: call void bitcast // CHECK-NEXT: ret void diff --git a/test/CodeGenObjC/selector-ref-invariance.m b/test/CodeGenObjC/selector-ref-invariance.m index 599bc3aa4c..5758a1cd7f 100644 --- a/test/CodeGenObjC/selector-ref-invariance.m +++ b/test/CodeGenObjC/selector-ref-invariance.m @@ -3,7 +3,7 @@ // rdar://6027699 void test(id x) { -// CHECK: load i8** @OBJC_SELECTOR_REFERENCES_, !invariant.load +// CHECK: load i8*, i8** @OBJC_SELECTOR_REFERENCES_, !invariant.load // CHECK: @objc_msgSend [x foo]; } diff --git a/test/CodeGenObjC/super-message-fragileabi.m b/test/CodeGenObjC/super-message-fragileabi.m index 2c917d7fd6..8c89d7f6db 100644 --- a/test/CodeGenObjC/super-message-fragileabi.m +++ b/test/CodeGenObjC/super-message-fragileabi.m @@ -24,7 +24,7 @@ { ; } -// CHECK: load %struct._objc_class** getelementptr inbounds (%struct._objc_class* @OBJC_CLASS_BetterTable, i32 0, i32 1) +// CHECK: load %struct._objc_class*, %struct._objc_class** getelementptr inbounds (%struct._objc_class* @OBJC_CLASS_BetterTable, i32 0, i32 1) return self; } diff --git a/test/CodeGenObjC/synchronized.m b/test/CodeGenObjC/synchronized.m index 015e55b8f8..212d98a6bc 100644 --- a/test/CodeGenObjC/synchronized.m +++ b/test/CodeGenObjC/synchronized.m @@ -39,7 +39,7 @@ void foo(id a) { // CHECK: unreachable // CHECK: call void @objc_exception_try_exit - // CHECK: [[T:%.*]] = load i8** [[SYNC]] + // CHECK: [[T:%.*]] = load i8*, i8** [[SYNC]] // CHECK-NEXT: call i32 @objc_sync_exit // CHECK: ret void return; @@ -59,7 +59,7 @@ int f0(id a) { @synchronized((x++, a)) { } - // CHECK: [[T:%.*]] = load i32* [[X]] + // CHECK: [[T:%.*]] = load i32, i32* [[X]] // CHECK: ret i32 [[T]] return x; } diff --git a/test/CodeGenObjC/tentative-cfconstantstring.m b/test/CodeGenObjC/tentative-cfconstantstring.m index 2b2601a54f..f81b95d207 100644 --- a/test/CodeGenObjC/tentative-cfconstantstring.m +++ b/test/CodeGenObjC/tentative-cfconstantstring.m @@ -35,8 +35,8 @@ static inline void _inlineFunction() { // CHECK: @_unnamed_cfstring_{{.*}} = private constant %struct.NSConstantString { i32* getelementptr inbounds ([24 x i32]* @__CFConstantStringClassReference, i32 0, i32 0) // CHECK-LABEL: define internal void @_inlineFunction() -// CHECK: [[ZERO:%.*]] = load %struct._class_t** @"OBJC_CLASSLIST_REFERENCES_ -// CHECK-NEXT: [[ONE:%.*]] = load i8** @OBJC_SELECTOR_REFERENCES_ +// CHECK: [[ZERO:%.*]] = load %struct._class_t*, %struct._class_t** @"OBJC_CLASSLIST_REFERENCES_ +// CHECK-NEXT: [[ONE:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES_ // CHECK-NEXT: [[TWO:%.*]] = bitcast %struct._class_t* [[ZERO]] to i8* // CHECK-NEXT: call void (i8*, i8*, [[T:%.*]]*, ...)* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, [[T:%.*]]*, ...)*)(i8* [[TWO]], i8* [[ONE]], [[T:%.*]]* bitcast (%struct.NSConstantString* @_unnamed_cfstring_{{.*}} to [[T:%.*]]*)) // CHECK-NEXT: ret void diff --git a/test/CodeGenObjCXX/arc-cxx11-member-init.mm b/test/CodeGenObjCXX/arc-cxx11-member-init.mm index 0169612462..89683ba3a8 100644 --- a/test/CodeGenObjCXX/arc-cxx11-member-init.mm +++ b/test/CodeGenObjCXX/arc-cxx11-member-init.mm @@ -23,8 +23,8 @@ class XClipboardDataSet @end // CHECK: [[mClipData:%.*]] = getelementptr inbounds %class.XClipboardDataSet, %class.XClipboardDataSet* -// CHECK: [[ZERO:%.*]] = load %struct._class_t** @"OBJC_CLASSLIST_REFERENCES_$_" -// CHECK: [[ONE:%.*]] = load i8** @OBJC_SELECTOR_REFERENCES_ +// CHECK: [[ZERO:%.*]] = load %struct._class_t*, %struct._class_t** @"OBJC_CLASSLIST_REFERENCES_$_" +// CHECK: [[ONE:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES_ // CHECK: [[TWO:%.*]] = bitcast %struct._class_t* [[ZERO]] to i8* // CHECK: [[CALL:%.*]] = call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)(i8* [[TWO]], i8* [[ONE]]) // CHECK: [[THREE:%.*]] = bitcast i8* [[CALL]] to [[T:%.*]]* diff --git a/test/CodeGenObjCXX/arc-exceptions.mm b/test/CodeGenObjCXX/arc-exceptions.mm index 4b1ab4b579..cc2206d1a9 100644 --- a/test/CodeGenObjCXX/arc-exceptions.mm +++ b/test/CodeGenObjCXX/arc-exceptions.mm @@ -94,7 +94,7 @@ namespace test4 { throw 0; } // CHECK-LABEL: define void @_ZN5test41AC2Ev( - // CHECK: [[THIS:%.*]] = load [[A:%.*]]** {{%.*}} + // CHECK: [[THIS:%.*]] = load [[A:%.*]]*, [[A:%.*]]** {{%.*}} // Construct single. // CHECK-NEXT: [[SINGLE:%.*]] = getelementptr inbounds [[A]], [[A]]* [[THIS]], i32 0, i32 0 // CHECK-NEXT: store i8* null, i8** [[SINGLE]], align 8 diff --git a/test/CodeGenObjCXX/arc-move.mm b/test/CodeGenObjCXX/arc-move.mm index d7b9f55d5f..dc670492f1 100644 --- a/test/CodeGenObjCXX/arc-move.mm +++ b/test/CodeGenObjCXX/arc-move.mm @@ -2,9 +2,9 @@ // define void @_Z11simple_moveRU8__strongP11objc_objectS2_ void simple_move(__strong id &x, __strong id &y) { - // CHECK: = load i8** + // CHECK: = load i8*, i8** // CHECK: store i8* null - // CHECK: = load i8** + // CHECK: = load i8*, i8** // CHECK: store i8* // CHECK-NEXT: call void @objc_release x = static_cast<__strong id&&>(y); @@ -34,10 +34,10 @@ typename remove_reference::type&& move(T &&x) { // CHECK-LABEL: define void @_Z12library_moveRU8__strongP11objc_objectS2_ void library_move(__strong id &x, __strong id &y) { // CHECK: call dereferenceable({{[0-9]+}}) i8** @_Z4moveIRU8__strongP11objc_objectEON16remove_referenceIT_E4typeEOS5_ - // CHECK: load i8** + // CHECK: load i8*, i8** // CHECK: store i8* null, i8** - // CHECK: load i8*** - // CHECK-NEXT: load i8** + // CHECK: load i8**, i8*** + // CHECK-NEXT: load i8*, i8** // CHECK-NEXT: store i8* // CHECK-NEXT: call void @objc_release // CHECK-NEXT: ret void @@ -48,7 +48,7 @@ void library_move(__strong id &x, __strong id &y) { void library_move(__strong id &y) { // CHECK: [[Y:%[a-zA-Z0-9]+]] = call dereferenceable({{[0-9]+}}) i8** @_Z4moveIRU8__strongP11objc_objectEON16remove_referenceIT_E4typeEOS5_ // Load the object - // CHECK-NEXT: [[OBJ:%[a-zA-Z0-9]+]] = load i8** [[Y]] + // CHECK-NEXT: [[OBJ:%[a-zA-Z0-9]+]] = load i8*, i8** [[Y]] // Null out y // CHECK-NEXT: store i8* null, i8** [[Y]] // Initialize x with the object @@ -57,7 +57,7 @@ void library_move(__strong id &y) { // CHECK-NEXT: store i32 17 int i = 17; - // CHECK-NEXT: [[OBJ:%[a-zA-Z0-9]+]] = load i8** [[X]] + // CHECK-NEXT: [[OBJ:%[a-zA-Z0-9]+]] = load i8*, i8** [[X]] // CHECK-NEXT: call void @objc_release(i8* [[OBJ]]) // CHECK-NEXT: ret void } @@ -66,10 +66,10 @@ void library_move(__strong id &y) { void const_move(const __strong id &x) { // CHECK: [[Y:%.*]] = alloca i8*, // CHECK: [[X:%.*]] = call dereferenceable({{[0-9]+}}) i8** @_Z4moveIRKU8__strongP11objc_objectEON16remove_referenceIT_E4typeEOS5_( - // CHECK-NEXT: [[T0:%.*]] = load i8** [[X]] + // CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[X]] // CHECK-NEXT: [[T1:%.*]] = call i8* @objc_retain(i8* [[T0]]) // CHECK-NEXT: store i8* [[T1]], i8** [[Y]] - // CHECK-NEXT: [[T0:%.*]] = load i8** [[Y]] + // CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[Y]] // CHECK-NEXT: call void @objc_release(i8* [[T0]]) id y = move(x); } diff --git a/test/CodeGenObjCXX/arc-new-delete.mm b/test/CodeGenObjCXX/arc-new-delete.mm index abae76bbf9..9a61f183c6 100644 --- a/test/CodeGenObjCXX/arc-new-delete.mm +++ b/test/CodeGenObjCXX/arc-new-delete.mm @@ -57,7 +57,7 @@ void test_array_new() { // CHECK-LABEL: define void @_Z11test_deletePU8__strongP11objc_objectPU6__weakS0_ void test_delete(__strong id *sptr, __weak id *wptr) { // CHECK: br i1 - // CHECK: load i8** + // CHECK: load i8*, i8** // CHECK-NEXT: call void @objc_release // CHECK: call void @_ZdlPv delete sptr; @@ -72,7 +72,7 @@ void test_delete(__strong id *sptr, __weak id *wptr) { // CHECK-LABEL: define void @_Z17test_array_deletePU8__strongP11objc_objectPU6__weakS0_ void test_array_delete(__strong id *sptr, __weak id *wptr) { // CHECK: icmp eq i8** [[BEGIN:%.*]], null - // CHECK: [[LEN:%.*]] = load i64* {{%.*}} + // CHECK: [[LEN:%.*]] = load i64, i64* {{%.*}} // CHECK: [[END:%.*]] = getelementptr inbounds i8*, i8** [[BEGIN]], i64 [[LEN]] // CHECK-NEXT: icmp eq i8** [[BEGIN]], [[END]] // CHECK: [[PAST:%.*]] = phi i8** [ [[END]], {{%.*}} ], [ [[CUR:%.*]], @@ -83,7 +83,7 @@ void test_array_delete(__strong id *sptr, __weak id *wptr) { delete [] sptr; // CHECK: icmp eq i8** [[BEGIN:%.*]], null - // CHECK: [[LEN:%.*]] = load i64* {{%.*}} + // CHECK: [[LEN:%.*]] = load i64, i64* {{%.*}} // CHECK: [[END:%.*]] = getelementptr inbounds i8*, i8** [[BEGIN]], i64 [[LEN]] // CHECK-NEXT: icmp eq i8** [[BEGIN]], [[END]] // CHECK: [[PAST:%.*]] = phi i8** [ [[END]], {{%.*}} ], [ [[CUR:%.*]], diff --git a/test/CodeGenObjCXX/arc-pseudo-destructors.mm b/test/CodeGenObjCXX/arc-pseudo-destructors.mm index d21844addf..b79d9a3447 100644 --- a/test/CodeGenObjCXX/arc-pseudo-destructors.mm +++ b/test/CodeGenObjCXX/arc-pseudo-destructors.mm @@ -2,16 +2,16 @@ // CHECK-LABEL: define void @_Z28test_objc_object_pseudo_dtorPU8__strongP11objc_objectPU6__weakS0_ void test_objc_object_pseudo_dtor(__strong id *ptr, __weak id *wptr) { - // CHECK: load i8*** - // CHECK-NEXT: load i8** + // CHECK: load i8**, i8*** + // CHECK-NEXT: load i8*, i8** // CHECK-NEXT: call void @objc_release ptr->~id(); // CHECK: call void @objc_destroyWeak(i8** {{%.*}}) wptr->~id(); - // CHECK: load i8*** - // CHECK-NEXT: load i8** + // CHECK: load i8**, i8*** + // CHECK-NEXT: load i8*, i8** // CHECK-NEXT: call void @objc_release (*ptr).~id(); diff --git a/test/CodeGenObjCXX/arc-references.mm b/test/CodeGenObjCXX/arc-references.mm index 0acb6d5659..2bd21c31b4 100644 --- a/test/CodeGenObjCXX/arc-references.mm +++ b/test/CodeGenObjCXX/arc-references.mm @@ -67,7 +67,7 @@ void test5(__strong id &x) { // CHECK-NEXT: store [[A]]* [[OBJ_A]], [[A]]** [[REFTMP:%[a-zA-Z0-9]+]] // CHECK-NEXT: call void @_Z4sinkOU8__strongP1A sink(x); - // CHECK-NEXT: [[OBJ_A:%[a-zA-Z0-9]+]] = load [[A]]** [[REFTMP]] + // CHECK-NEXT: [[OBJ_A:%[a-zA-Z0-9]+]] = load [[A]]*, [[A]]** [[REFTMP]] // CHECK-NEXT: [[OBJ_ID:%[a-zA-Z0-9]+]] = bitcast [[A]]* [[OBJ_A]] to i8* // CHECK-NEXT: call void @objc_release // CHECK-NEXT: store i32 17, i32 diff --git a/test/CodeGenObjCXX/arc-special-member-functions.mm b/test/CodeGenObjCXX/arc-special-member-functions.mm index e3d13a6079..df3c22916f 100644 --- a/test/CodeGenObjCXX/arc-special-member-functions.mm +++ b/test/CodeGenObjCXX/arc-special-member-functions.mm @@ -93,12 +93,12 @@ void test_ObjCBlockMember_copy_assign(ObjCBlockMember m1, ObjCBlockMember m2) { // Implicitly-generated copy assignment operator for ObjCBlockMember // CHECK: define linkonce_odr dereferenceable({{[0-9]+}}) {{%.*}}* @_ZN15ObjCBlockMemberaSERKS_( // CHECK: [[T0:%.*]] = getelementptr inbounds [[T:%.*]], [[T:%.*]]* {{%.*}}, i32 0, i32 0 -// CHECK-NEXT: [[T1:%.*]] = load i32 (i32)** [[T0]], align 8 +// CHECK-NEXT: [[T1:%.*]] = load i32 (i32)*, i32 (i32)** [[T0]], align 8 // CHECK-NEXT: [[T2:%.*]] = bitcast i32 (i32)* [[T1]] to i8* // CHECK-NEXT: [[T3:%.*]] = call i8* @objc_retainBlock(i8* [[T2]]) // CHECK-NEXT: [[T4:%.*]] = bitcast i8* [[T3]] to i32 (i32)* // CHECK-NEXT: [[T5:%.*]] = getelementptr inbounds [[T]], [[T]]* {{%.*}}, i32 0, i32 0 -// CHECK-NEXT: [[T6:%.*]] = load i32 (i32)** [[T5]], align 8 +// CHECK-NEXT: [[T6:%.*]] = load i32 (i32)*, i32 (i32)** [[T5]], align 8 // CHECK-NEXT: store i32 (i32)* [[T4]], i32 (i32)** [[T5]] // CHECK-NEXT: [[T7:%.*]] = bitcast i32 (i32)* [[T6]] to i8* // CHECK-NEXT: call void @objc_release(i8* [[T7]]) diff --git a/test/CodeGenObjCXX/arc.mm b/test/CodeGenObjCXX/arc.mm index 6b42a4ca48..c66417ba24 100644 --- a/test/CodeGenObjCXX/arc.mm +++ b/test/CodeGenObjCXX/arc.mm @@ -21,7 +21,7 @@ void test0(__weak id *wp, __weak volatile id *wvp) { // CHECK: [[T0:%.*]] = call i8* @_Z12test0_helperv() // CHECK-NEXT: [[T1:%.*]] = call i8* @objc_retainAutoreleasedReturnValue(i8* [[T0]]) - // CHECK-NEXT: [[T2:%.*]] = load i8*** {{%.*}}, align 8 + // CHECK-NEXT: [[T2:%.*]] = load i8**, i8*** {{%.*}}, align 8 // CHECK-NEXT: [[T3:%.*]] = call i8* @objc_storeWeak(i8** [[T2]], i8* [[T1]]) // CHECK-NEXT: [[T4:%.*]] = call i8* @objc_retain(i8* [[T3]]) // CHECK-NEXT: store i8* [[T4]], i8** @@ -30,7 +30,7 @@ void test0(__weak id *wp, __weak volatile id *wvp) { // CHECK: [[T0:%.*]] = call i8* @_Z12test0_helperv() // CHECK-NEXT: [[T1:%.*]] = call i8* @objc_retainAutoreleasedReturnValue(i8* [[T0]]) - // CHECK-NEXT: [[T2:%.*]] = load i8*** {{%.*}}, align 8 + // CHECK-NEXT: [[T2:%.*]] = load i8**, i8*** {{%.*}}, align 8 // CHECK-NEXT: [[T3:%.*]] = call i8* @objc_storeWeak(i8** [[T2]], i8* [[T1]]) // CHECK-NEXT: [[T4:%.*]] = call i8* @objc_loadWeakRetained(i8** [[T2]]) // CHECK-NEXT: store i8* [[T4]], i8** @@ -67,28 +67,28 @@ void test34(int cond) { // CHECK-NEXT: store i8* null, i8** [[STRONG]] // CHECK-NEXT: call i8* @objc_initWeak(i8** [[WEAK]], i8* null) - // CHECK-NEXT: [[T0:%.*]] = load i32* [[COND]] + // CHECK-NEXT: [[T0:%.*]] = load i32, i32* [[COND]] // CHECK-NEXT: [[T1:%.*]] = icmp ne i32 [[T0]], 0 // CHECK: [[ARG:%.*]] = phi i8** // CHECK-NEXT: [[T0:%.*]] = icmp eq i8** [[ARG]], null // CHECK-NEXT: [[T1:%.*]] = select i1 [[T0]], i8** null, i8** [[TEMP1]] // CHECK-NEXT: br i1 [[T0]], - // CHECK: [[T0:%.*]] = load i8** [[ARG]] + // CHECK: [[T0:%.*]] = load i8*, i8** [[ARG]] // CHECK-NEXT: store i8* [[T0]], i8** [[TEMP1]] // CHECK-NEXT: br label // CHECK: [[W0:%.*]] = phi i8* [ [[T0]], {{%.*}} ], [ undef, {{%.*}} ] // CHECK: call void @_Z11test34_sinkPU15__autoreleasingP11objc_object(i8** [[T1]]) // CHECK-NEXT: [[T0:%.*]] = icmp eq i8** [[ARG]], null // CHECK-NEXT: br i1 [[T0]], - // CHECK: [[T0:%.*]] = load i8** [[TEMP1]] + // CHECK: [[T0:%.*]] = load i8*, i8** [[TEMP1]] // CHECK-NEXT: [[T1:%.*]] = call i8* @objc_retain(i8* [[T0]]) // CHECK-NEXT: call void (...)* @clang.arc.use(i8* [[W0]]) - // CHECK-NEXT: [[T2:%.*]] = load i8** [[ARG]] + // CHECK-NEXT: [[T2:%.*]] = load i8*, i8** [[ARG]] // CHECK-NEXT: store i8* [[T1]], i8** [[ARG]] // CHECK-NEXT: call void @objc_release(i8* [[T2]]) // CHECK-NEXT: br label - // CHECK: [[T0:%.*]] = load i32* [[COND]] + // CHECK: [[T0:%.*]] = load i32, i32* [[COND]] // CHECK-NEXT: [[T1:%.*]] = icmp ne i32 [[T0]], 0 // CHECK: [[ARG:%.*]] = phi i8** // CHECK-NEXT: [[T0:%.*]] = icmp eq i8** [[ARG]], null @@ -103,7 +103,7 @@ void test34(int cond) { // CHECK: call void @_Z11test34_sinkPU15__autoreleasingP11objc_object(i8** [[T1]]) // CHECK-NEXT: [[T0:%.*]] = icmp eq i8** [[ARG]], null // CHECK-NEXT: br i1 [[T0]], - // CHECK: [[T0:%.*]] = load i8** [[TEMP2]] + // CHECK: [[T0:%.*]] = load i8*, i8** [[TEMP2]] // CHECK-NEXT: call i8* @objc_storeWeak(i8** [[ARG]], i8* [[T0]]) // CHECK-NEXT: br label @@ -203,7 +203,7 @@ template void test37(Test37 *a); // CHECK-NEXT: [[COLL:%.*]] = bitcast i8* [[T2]] to [[NSARRAY]]* // Make sure it's not immediately released before starting the iteration. -// CHECK-NEXT: load i8** @OBJC_SELECTOR_REFERENCES_ +// CHECK-NEXT: load i8*, i8** @OBJC_SELECTOR_REFERENCES_ // CHECK-NEXT: [[T0:%.*]] = bitcast [[NSARRAY]]* [[COLL]] to i8* // CHECK-NEXT: @objc_msgSend @@ -291,9 +291,9 @@ template void test40_helper(); // CHECK: [[X:%.*]] = alloca i8* // CHECK-NEXT: [[TEMP:%.*]] = alloca i8* // CHECK-NEXT: store i8* null, i8** [[X]] -// CHECK: [[T0:%.*]] = load i8** [[X]] +// CHECK: [[T0:%.*]] = load i8*, i8** [[X]] // CHECK-NEXT: store i8* [[T0]], i8** [[TEMP]] // CHECK: @objc_msgSend -// CHECK-NEXT: [[T0:%.*]] = load i8** [[TEMP]] +// CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[TEMP]] // CHECK-NEXT: call i8* @objc_retain(i8* [[T0]]) diff --git a/test/CodeGenObjCXX/exceptions-legacy.mm b/test/CodeGenObjCXX/exceptions-legacy.mm index f1ac44df30..f6cd29647e 100644 --- a/test/CodeGenObjCXX/exceptions-legacy.mm +++ b/test/CodeGenObjCXX/exceptions-legacy.mm @@ -26,7 +26,7 @@ void test0(id obj) { // Leave the @synchronized. The reload of obj here is unnecessary. // CHECK: call void @objc_exception_try_exit([[BUF_T]]* [[BUF]]) -// CHECK-NEXT: [[T0:%.*]] = load i8** +// CHECK-NEXT: [[T0:%.*]] = load i8*, i8** // CHECK-NEXT: call i32 @objc_sync_exit(i8* [[T0]]) // CHECK-NEXT: ret void @@ -34,12 +34,12 @@ void test0(id obj) { // CHECK: [[T0:%.*]] = landingpad // CHECK-NEXT: cleanup // CHECK-NEXT: call void @objc_exception_try_exit([[BUF_T]]* [[BUF]]) -// CHECK-NEXT: [[T0:%.*]] = load i8** +// CHECK-NEXT: [[T0:%.*]] = load i8*, i8** // CHECK-NEXT: call i32 @objc_sync_exit(i8* [[T0]]) // CHECK-NEXT: resume // ObjC EH "cleanup". -// CHECK: [[T0:%.*]] = load i8** +// CHECK: [[T0:%.*]] = load i8*, i8** // CHECK-NEXT: call i32 @objc_sync_exit(i8* [[T0]]) // CHECK-NEXT: [[T0:%.*]] = call i8* @objc_exception_extract([[BUF_T]]* [[BUF]]) // CHECK-NEXT: call void @objc_exception_throw(i8* [[T0]]) @@ -75,6 +75,6 @@ void test1(id obj, bool *failed) { // CHECK-NEXT: resume // Catch handler. Reload of 'failed' address is unnecessary. -// CHECK: [[T0:%.*]] = load i8** +// CHECK: [[T0:%.*]] = load i8*, i8** // CHECK-NEXT: store i8 1, i8* [[T0]], // CHECK-NEXT: br label diff --git a/test/CodeGenObjCXX/gc.mm b/test/CodeGenObjCXX/gc.mm index 0f96d9e264..2e2ad0f41e 100644 --- a/test/CodeGenObjCXX/gc.mm +++ b/test/CodeGenObjCXX/gc.mm @@ -12,9 +12,9 @@ namespace test0 { // CHECK-LABEL: define void @_ZN5test01AC2Ev( // CHECK: [[THIS:%.*]] = alloca [[TEST0:%.*]]*, align 8 // CHECK-NEXT: store -// CHECK-NEXT: [[T0:%.*]] = load [[TEST0]]** [[THIS]] +// CHECK-NEXT: [[T0:%.*]] = load [[TEST0]]*, [[TEST0]]** [[THIS]] // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[TEST0]], [[TEST0]]* [[T0]], i32 0, i32 0 -// CHECK-NEXT: [[T2:%.*]] = load i8** @_ZN5test01xE +// CHECK-NEXT: [[T2:%.*]] = load i8*, i8** @_ZN5test01xE // CHECK-NEXT: call i8* @objc_assign_strongCast(i8* [[T2]], i8** [[T1]]) // CHECK-NEXT: ret void } diff --git a/test/CodeGenObjCXX/lambda-expressions.mm b/test/CodeGenObjCXX/lambda-expressions.mm index 175b68344b..4d9b8d0bd3 100644 --- a/test/CodeGenObjCXX/lambda-expressions.mm +++ b/test/CodeGenObjCXX/lambda-expressions.mm @@ -65,7 +65,7 @@ void take_block(void (^block)()) { block(); } // Check lines for BlockInLambda test below // ARC-LABEL: define internal i32 @___ZZN13BlockInLambda1X1fEvENKUlvE_clEv_block_invoke // ARC: [[Y:%.*]] = getelementptr inbounds %"struct.BlockInLambda::X", %"struct.BlockInLambda::X"* {{.*}}, i32 0, i32 1 -// ARC-NEXT: [[YVAL:%.*]] = load i32* [[Y]], align 4 +// ARC-NEXT: [[YVAL:%.*]] = load i32, i32* [[Y]], align 4 // ARC-NEXT: ret i32 [[YVAL]] typedef int (^fptr)(); diff --git a/test/CodeGenObjCXX/lvalue-reference-getter.mm b/test/CodeGenObjCXX/lvalue-reference-getter.mm index 87c132080a..3547ebe917 100644 --- a/test/CodeGenObjCXX/lvalue-reference-getter.mm +++ b/test/CodeGenObjCXX/lvalue-reference-getter.mm @@ -22,7 +22,7 @@ static SetSection gSetSection; @end // CHECK: [[SELF:%.*]] = alloca [[T6:%.*]]*, align -// CHECK: [[T0:%.*]] = load {{.*}}* [[SELF]], align -// CHECK: [[T1:%.*]] = load {{.*}}* @OBJC_SELECTOR_REFERENCES_ +// CHECK: [[T0:%.*]] = load {{.*}}, {{.*}}* [[SELF]], align +// CHECK: [[T1:%.*]] = load {{.*}}, {{.*}}* @OBJC_SELECTOR_REFERENCES_ // CHECK: [[C:%.*]] = call dereferenceable({{[0-9]+}}) %struct.SetSection* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend // CHECK: call dereferenceable({{[0-9]+}}) i32* @_ZN10SetSection2atEi(%struct.SetSection* [[C]] diff --git a/test/CodeGenObjCXX/message-reference.mm b/test/CodeGenObjCXX/message-reference.mm index 6b341f866b..7a1b96e028 100644 --- a/test/CodeGenObjCXX/message-reference.mm +++ b/test/CodeGenObjCXX/message-reference.mm @@ -16,5 +16,5 @@ @end // CHECK: [[T:%.*]] = call dereferenceable({{[0-9]+}}) i32* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend -// CHECK: [[U:%.*]] = load i32* [[T]] +// CHECK: [[U:%.*]] = load i32, i32* [[T]] // CHECK [[V:%.*]] = icmp eq i32 [[U]], 0 diff --git a/test/CodeGenObjCXX/message.mm b/test/CodeGenObjCXX/message.mm index af3d81f16c..b294244875 100644 --- a/test/CodeGenObjCXX/message.mm +++ b/test/CodeGenObjCXX/message.mm @@ -17,7 +17,7 @@ namespace test0 { template void foo(); // CHECK-LABEL: define weak_odr void @_ZN5test03fooIiEEvv() // CHECK: [[T0:%.*]] = call [[TEST0:%.*]]* @_ZN5test01AcvP5Test0Ev( - // CHECK-NEXT: [[T1:%.*]] = load i8** + // CHECK-NEXT: [[T1:%.*]] = load i8*, i8** // CHECK-NEXT: [[T2:%.*]] = bitcast [[TEST0]]* [[T0]] to i8* // CHECK-NEXT: call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*)*)(i8* [[T2]], i8* [[T1]]) // CHECK-NEXT: ret void diff --git a/test/CodeGenObjCXX/property-lvalue-capture.mm b/test/CodeGenObjCXX/property-lvalue-capture.mm index 1242246b24..26c6db6b44 100644 --- a/test/CodeGenObjCXX/property-lvalue-capture.mm +++ b/test/CodeGenObjCXX/property-lvalue-capture.mm @@ -24,10 +24,10 @@ typedef Quad2 Quad2d; } @end -// CHECK: [[TWO:%.*]] = load i8** @OBJC_SELECTOR_REFERENCES_, !invariant.load ![[MD_NUM:[0-9]+]] +// CHECK: [[TWO:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES_, !invariant.load ![[MD_NUM:[0-9]+]] // CHECK: [[THREE:%.*]] = bitcast [[ONET:%.*]]* [[ONE:%.*]] to i8* // CHECK: [[CALL:%.*]] = call nonnull %struct.Quad2* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %struct.Quad2* (i8*, i8*)*)(i8* [[THREE]], i8* [[TWO]]) -// CHECK: [[FOUR:%.*]] = load i8** @OBJC_SELECTOR_REFERENCES_2, !invariant.load ![[MD_NUM]] +// CHECK: [[FOUR:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES_2, !invariant.load ![[MD_NUM]] // CHECK: [[FIVE:%.*]] = bitcast [[ONET]]* [[ZERO:%.*]] to i8* // CHECK: call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, %struct.Quad2*)*)(i8* [[FIVE]], i8* [[FOUR]], %struct.Quad2* nonnull [[CALL]]) @@ -46,8 +46,8 @@ void test(C *c, const A &a) { const A &result = c.prop = a; } -// CHECK: [[ONE1:%.*]] = load %struct.A** [[AADDR:%.*]], align 8 -// CHECK: [[TWO1:%.*]] = load i8** @OBJC_SELECTOR_REFERENCES_5, !invariant.load ![[MD_NUM]] +// CHECK: [[ONE1:%.*]] = load %struct.A*, %struct.A** [[AADDR:%.*]], align 8 +// CHECK: [[TWO1:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES_5, !invariant.load ![[MD_NUM]] // CHECK: [[THREE1:%.*]] = bitcast [[TWOT:%.*]]* [[ZERO1:%.*]] to i8* // CHECK: call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, %struct.A*)*)(i8* [[THREE1]], i8* [[TWO1]], %struct.A* dereferenceable({{[0-9]+}}) [[ONE1]]) // CHECK: store %struct.A* [[ONE1]], %struct.A** [[RESULT:%.*]], align 8 diff --git a/test/CodeGenObjCXX/property-object-reference-1.mm b/test/CodeGenObjCXX/property-object-reference-1.mm index 79bf9e4f31..4eb7713290 100644 --- a/test/CodeGenObjCXX/property-object-reference-1.mm +++ b/test/CodeGenObjCXX/property-object-reference-1.mm @@ -26,6 +26,6 @@ struct TCPPObject // CHECK: [[cppObjectaddr:%.*]] = alloca %struct.TCPPObject*, align 8 // CHECK: store %struct.TCPPObject* [[cppObject:%.*]], %struct.TCPPObject** [[cppObjectaddr]], align 8 -// CHECK: [[THREE:%.*]] = load %struct.TCPPObject** [[cppObjectaddr]], align 8 +// CHECK: [[THREE:%.*]] = load %struct.TCPPObject*, %struct.TCPPObject** [[cppObjectaddr]], align 8 // CHECK: [[FOUR:%.*]] = bitcast %struct.TCPPObject* [[THREE]] to i8* // CHECK: call void @objc_copyStruct(i8* [[TWO:%.*]], i8* [[FOUR]], i64 256, i1 zeroext true, i1 zeroext false) diff --git a/test/CodeGenObjCXX/property-object-reference-2.mm b/test/CodeGenObjCXX/property-object-reference-2.mm index 20949f71af..87cebc1083 100644 --- a/test/CodeGenObjCXX/property-object-reference-2.mm +++ b/test/CodeGenObjCXX/property-object-reference-2.mm @@ -30,8 +30,8 @@ struct TCPPObject @end // CHECK-LABEL: define internal void @__copy_helper_atomic_property_( -// CHECK: [[TWO:%.*]] = load %struct.TCPPObject** [[ADDR:%.*]], align 8 -// CHECK: [[THREE:%.*]] = load %struct.TCPPObject** [[ADDR1:%.*]], align 8 +// CHECK: [[TWO:%.*]] = load %struct.TCPPObject*, %struct.TCPPObject** [[ADDR:%.*]], align 8 +// CHECK: [[THREE:%.*]] = load %struct.TCPPObject*, %struct.TCPPObject** [[ADDR1:%.*]], align 8 // CHECK: [[CALL:%.*]] = call i32 @_Z7DEFAULTv() // CHECK: call void @_ZN10TCPPObjectC1ERKS_i(%struct.TCPPObject* [[TWO]], %struct.TCPPObject* dereferenceable({{[0-9]+}}) [[THREE]], i32 [[CALL]]) // CHECK: ret void @@ -44,8 +44,8 @@ struct TCPPObject // CHECK: ret void // CHECK-LABEL: define internal void @__assign_helper_atomic_property_( -// CHECK: [[TWO:%.*]] = load %struct.TCPPObject** [[ADDR:%.*]], align 8 -// CHECK: [[THREE:%.*]] = load %struct.TCPPObject** [[ADDR1:%.*]], align 8 +// CHECK: [[TWO:%.*]] = load %struct.TCPPObject*, %struct.TCPPObject** [[ADDR:%.*]], align 8 +// CHECK: [[THREE:%.*]] = load %struct.TCPPObject*, %struct.TCPPObject** [[ADDR1:%.*]], align 8 // CHECK: [[CALL:%.*]] = call dereferenceable({{[0-9]+}}) %struct.TCPPObject* @_ZN10TCPPObjectaSERKS_(%struct.TCPPObject* [[TWO]], %struct.TCPPObject* dereferenceable({{[0-9]+}}) [[THREE]]) // CHECK: ret void diff --git a/test/CodeGenObjCXX/property-object-reference.mm b/test/CodeGenObjCXX/property-object-reference.mm index 691e6fff8d..766b3d32ca 100644 --- a/test/CodeGenObjCXX/property-object-reference.mm +++ b/test/CodeGenObjCXX/property-object-reference.mm @@ -25,11 +25,11 @@ static Foo gFoo; @end // CHECK: [[T0:%.*]] = load {{%.*}} [[S0:%.*]] -// CHECK: load i8** @OBJC_SELECTOR_REFERENCES_ +// CHECK: load i8*, i8** @OBJC_SELECTOR_REFERENCES_ // CHECK: [[T2:%.*]] = bitcast {{%.*}} [[T0]] to i8* // CHECK: @objc_msgSend // CHECK: [[R0:%.*]] = load {{%.*}} [[U0:%.*]] -// CHECK: load i8** @OBJC_SELECTOR_REFERENCES_ +// CHECK: load i8*, i8** @OBJC_SELECTOR_REFERENCES_ // CHECK: [[R2:%.*]] = bitcast {{%.*}} [[R0]] to i8* // CHECK: @objc_msgSend diff --git a/test/CodeGenObjCXX/property-reference.mm b/test/CodeGenObjCXX/property-reference.mm index 849b606267..db7f5a6215 100644 --- a/test/CodeGenObjCXX/property-reference.mm +++ b/test/CodeGenObjCXX/property-reference.mm @@ -42,7 +42,7 @@ namespace test1 { @end // CHECK: define internal dereferenceable({{[0-9]+}}) [[A:%.*]]* @"\01-[Test1 prop1]"( // CHECK: [[SELF:%.*]] = alloca [[TEST1:%.*]]*, align 8 -// CHECK: [[T0:%.*]] = load [[TEST1]]** [[SELF]] +// CHECK: [[T0:%.*]] = load [[TEST1]]*, [[TEST1]]** [[SELF]] // CHECK-NEXT: [[T1:%.*]] = bitcast [[TEST1]]* [[T0]] to i8* // CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds i8, i8* [[T1]], i64 0 // CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to [[A]]* @@ -68,10 +68,10 @@ template void test2(Test2*); // CHECK: [[X:%.*]] = alloca i32, // CHECK: @objc_msgSend // CHECK: store i32 {{%.*}}, i32* [[X]], -// CHECK: load i32* [[X]], +// CHECK: load i32, i32* [[X]], // CHECK: @objc_msgSend // CHECK: @objc_msgSend -// CHECK: load i32* [[X]], +// CHECK: load i32, i32* [[X]], // CHECK-NEXT: add nsw // CHECK: @objc_msgSend // CHECK-NEXT: ret void @@ -87,10 +87,10 @@ template void test3(Test2*); // CHECK: [[X:%.*]] = alloca i32, // CHECK: @objc_msgSend // CHECK: store i32 {{%.*}}, i32* [[X]], -// CHECK: load i32* [[X]], +// CHECK: load i32, i32* [[X]], // CHECK: @objc_msgSend // CHECK: @objc_msgSend -// CHECK: load i32* [[X]], +// CHECK: load i32, i32* [[X]], // CHECK-NEXT: add nsw // CHECK: @objc_msgSend // CHECK-NEXT: ret void diff --git a/test/Modules/templates.mm b/test/Modules/templates.mm index d60b873d0f..b850a69122 100644 --- a/test/Modules/templates.mm +++ b/test/Modules/templates.mm @@ -86,8 +86,8 @@ unsigned testMixedStruct() { // CHECK: call void @_Z10useListIntR4ListIiE(%[[ListInt]]* dereferenceable({{[0-9]+}}) %[[r]]) useListInt(r); - // CHECK: load i32* bitcast (i8* getelementptr inbounds (i8* bitcast ({{.*}}* @list_left to i8*), i64 8) to i32*) - // CHECK: load i32* bitcast (i8* getelementptr inbounds (i8* bitcast ({{.*}}* @list_right to i8*), i64 8) to i32*) + // CHECK: load i32, i32* bitcast (i8* getelementptr inbounds (i8* bitcast ({{.*}}* @list_left to i8*), i64 8) to i32*) + // CHECK: load i32, i32* bitcast (i8* getelementptr inbounds (i8* bitcast ({{.*}}* @list_right to i8*), i64 8) to i32*) return list_left.*size_right + list_right.*size_left; } diff --git a/test/OpenMP/atomic_read_codegen.c b/test/OpenMP/atomic_read_codegen.c index c9bbf1bfd0..57d539f45e 100644 --- a/test/OpenMP/atomic_read_codegen.c +++ b/test/OpenMP/atomic_read_codegen.c @@ -75,64 +75,64 @@ float2 float2x; register int rix __asm__("0"); int main() { -// CHECK: load atomic i8* +// CHECK: load atomic i8, i8* // CHECK: store i8 #pragma omp atomic read bv = bx; -// CHECK: load atomic i8* +// CHECK: load atomic i8, i8* // CHECK: store i8 #pragma omp atomic read cv = cx; -// CHECK: load atomic i8* +// CHECK: load atomic i8, i8* // CHECK: store i8 #pragma omp atomic read ucv = ucx; -// CHECK: load atomic i16* +// CHECK: load atomic i16, i16* // CHECK: store i16 #pragma omp atomic read sv = sx; -// CHECK: load atomic i16* +// CHECK: load atomic i16, i16* // CHECK: store i16 #pragma omp atomic read usv = usx; -// CHECK: load atomic i32* +// CHECK: load atomic i32, i32* // CHECK: store i32 #pragma omp atomic read iv = ix; -// CHECK: load atomic i32* +// CHECK: load atomic i32, i32* // CHECK: store i32 #pragma omp atomic read uiv = uix; -// CHECK: load atomic i64* +// CHECK: load atomic i64, i64* // CHECK: store i64 #pragma omp atomic read lv = lx; -// CHECK: load atomic i64* +// CHECK: load atomic i64, i64* // CHECK: store i64 #pragma omp atomic read ulv = ulx; -// CHECK: load atomic i64* +// CHECK: load atomic i64, i64* // CHECK: store i64 #pragma omp atomic read llv = llx; -// CHECK: load atomic i64* +// CHECK: load atomic i64, i64* // CHECK: store i64 #pragma omp atomic read ullv = ullx; -// CHECK: load atomic i32* bitcast (float* +// CHECK: load atomic i32, i32* bitcast (float* // CHECK: bitcast i32 {{.*}} to float // CHECK: store float #pragma omp atomic read fv = fx; -// CHECK: load atomic i64* bitcast (double* +// CHECK: load atomic i64, i64* bitcast (double* // CHECK: bitcast i64 {{.*}} to double // CHECK: store double #pragma omp atomic read dv = dx; -// CHECK: [[LD:%.+]] = load atomic i128* bitcast (x86_fp80* +// CHECK: [[LD:%.+]] = load atomic i128, i128* bitcast (x86_fp80* // CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[LDTEMP:%.*]] to i128* // CHECK: store i128 [[LD]], i128* [[BITCAST]] -// CHECK: [[LD:%.+]] = load x86_fp80* [[LDTEMP]] +// CHECK: [[LD:%.+]] = load x86_fp80, x86_fp80* [[LDTEMP]] // CHECK: store x86_fp80 [[LD]] #pragma omp atomic read ldv = ldx; @@ -152,33 +152,33 @@ int main() { // CHECK: store double #pragma omp atomic seq_cst read cdv = cdx; -// CHECK: load atomic i64* +// CHECK: load atomic i64, i64* // CHECK: store i8 #pragma omp atomic read bv = ulx; -// CHECK: load atomic i8* +// CHECK: load atomic i8, i8* // CHECK: store i8 #pragma omp atomic read cv = bx; -// CHECK: load atomic i8* +// CHECK: load atomic i8, i8* // CHECK: call{{.*}} @__kmpc_flush( // CHECK: store i8 #pragma omp atomic read, seq_cst ucv = cx; -// CHECK: load atomic i64* +// CHECK: load atomic i64, i64* // CHECK: store i16 #pragma omp atomic read sv = ulx; -// CHECK: load atomic i64* +// CHECK: load atomic i64, i64* // CHECK: store i16 #pragma omp atomic read usv = lx; -// CHECK: load atomic i32* +// CHECK: load atomic i32, i32* // CHECK: call{{.*}} @__kmpc_flush( // CHECK: store i32 #pragma omp atomic seq_cst, read iv = uix; -// CHECK: load atomic i32* +// CHECK: load atomic i32, i32* // CHECK: store i32 #pragma omp atomic read uiv = ix; @@ -186,15 +186,15 @@ int main() { // CHECK: store i64 #pragma omp atomic read lv = cix; -// CHECK: load atomic i32* +// CHECK: load atomic i32, i32* // CHECK: store i64 #pragma omp atomic read ulv = fx; -// CHECK: load atomic i64* +// CHECK: load atomic i64, i64* // CHECK: store i64 #pragma omp atomic read llv = dx; -// CHECK: load atomic i128* +// CHECK: load atomic i128, i128* // CHECK: store i64 #pragma omp atomic read ullv = ldx; @@ -202,40 +202,40 @@ int main() { // CHECK: store float #pragma omp atomic read fv = cix; -// CHECK: load atomic i16* +// CHECK: load atomic i16, i16* // CHECK: store double #pragma omp atomic read dv = sx; -// CHECK: load atomic i8* +// CHECK: load atomic i8, i8* // CHECK: store x86_fp80 #pragma omp atomic read ldv = bx; -// CHECK: load atomic i8* +// CHECK: load atomic i8, i8* // CHECK: store i32 // CHECK: store i32 #pragma omp atomic read civ = bx; -// CHECK: load atomic i16* +// CHECK: load atomic i16, i16* // CHECK: store float // CHECK: store float #pragma omp atomic read cfv = usx; -// CHECK: load atomic i64* +// CHECK: load atomic i64, i64* // CHECK: store double // CHECK: store double #pragma omp atomic read cdv = llx; -// CHECK: [[I128VAL:%.+]] = load atomic i128* bitcast (<4 x i32>* @{{.+}} to i128*) monotonic +// CHECK: [[I128VAL:%.+]] = load atomic i128, i128* bitcast (<4 x i32>* @{{.+}} to i128*) monotonic // CHECK: [[I128PTR:%.+]] = bitcast <4 x i32>* [[LDTEMP:%.+]] to i128* // CHECK: store i128 [[I128VAL]], i128* [[I128PTR]] -// CHECK: [[LD:%.+]] = load <4 x i32>* [[LDTEMP]] +// CHECK: [[LD:%.+]] = load <4 x i32>, <4 x i32>* [[LDTEMP]] // CHECK: extractelement <4 x i32> [[LD]] // CHECK: store i8 #pragma omp atomic read bv = int4x[0]; -// CHECK: [[LD:%.+]] = load atomic i32* bitcast (i8* getelementptr (i8* bitcast (%{{.+}}* @{{.+}} to i8*), i64 4) to i32*) monotonic +// CHECK: [[LD:%.+]] = load atomic i32, i32* bitcast (i8* getelementptr (i8* bitcast (%{{.+}}* @{{.+}} to i8*), i64 4) to i32*) monotonic // CHECK: store i32 [[LD]], i32* [[LDTEMP:%.+]] -// CHECK: [[LD:%.+]] = load i32* [[LDTEMP]] +// CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]] // CHECK: [[SHL:%.+]] = shl i32 [[LD]], 1 // CHECK: ashr i32 [[SHL]], 1 // CHECK: store x86_fp80 @@ -243,29 +243,29 @@ int main() { ldv = bfx.a; // CHECK: [[LDTEMP_VOID_PTR:%.+]] = bitcast i32* [[LDTEMP:%.+]] to i8* // CHECK: call void @__atomic_load(i64 4, i8* getelementptr (i8* bitcast (%struct.BitFields_packed* @bfx_packed to i8*), i64 4), i8* [[LDTEMP_VOID_PTR]], i32 0) -// CHECK: [[LD:%.+]] = load i32* [[LDTEMP]] +// CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]] // CHECK: [[SHL:%.+]] = shl i32 [[LD]], 1 // CHECK: ashr i32 [[SHL]], 1 // CHECK: store x86_fp80 #pragma omp atomic read ldv = bfx_packed.a; -// CHECK: [[LD:%.+]] = load atomic i32* getelementptr inbounds (%struct.BitFields2* @bfx2, i32 0, i32 0) monotonic +// CHECK: [[LD:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields2* @bfx2, i32 0, i32 0) monotonic // CHECK: store i32 [[LD]], i32* [[LDTEMP:%.+]] -// CHECK: [[LD:%.+]] = load i32* [[LDTEMP]] +// CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]] // CHECK: ashr i32 [[LD]], 31 // CHECK: store x86_fp80 #pragma omp atomic read ldv = bfx2.a; -// CHECK: [[LD:%.+]] = load atomic i8* getelementptr (i8* bitcast (%struct.BitFields2_packed* @bfx2_packed to i8*), i64 3) monotonic +// CHECK: [[LD:%.+]] = load atomic i8, i8* getelementptr (i8* bitcast (%struct.BitFields2_packed* @bfx2_packed to i8*), i64 3) monotonic // CHECK: store i8 [[LD]], i8* [[LDTEMP:%.+]] -// CHECK: [[LD:%.+]] = load i8* [[LDTEMP]] +// CHECK: [[LD:%.+]] = load i8, i8* [[LDTEMP]] // CHECK: ashr i8 [[LD]], 7 // CHECK: store x86_fp80 #pragma omp atomic read ldv = bfx2_packed.a; -// CHECK: [[LD:%.+]] = load atomic i32* getelementptr inbounds (%struct.BitFields3* @bfx3, i32 0, i32 0) monotonic +// CHECK: [[LD:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields3* @bfx3, i32 0, i32 0) monotonic // CHECK: store i32 [[LD]], i32* [[LDTEMP:%.+]] -// CHECK: [[LD:%.+]] = load i32* [[LDTEMP]] +// CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]] // CHECK: [[SHL:%.+]] = shl i32 [[LD]], 7 // CHECK: ashr i32 [[SHL]], 18 // CHECK: store x86_fp80 @@ -273,51 +273,51 @@ int main() { ldv = bfx3.a; // CHECK: [[LDTEMP_VOID_PTR:%.+]] = bitcast i24* [[LDTEMP:%.+]] to i8* // CHECK: call void @__atomic_load(i64 3, i8* getelementptr (i8* bitcast (%struct.BitFields3_packed* @bfx3_packed to i8*), i64 1), i8* [[LDTEMP_VOID_PTR]], i32 0) -// CHECK: [[LD:%.+]] = load i24* [[LDTEMP]] +// CHECK: [[LD:%.+]] = load i24, i24* [[LDTEMP]] // CHECK: [[SHL:%.+]] = shl i24 [[LD]], 7 // CHECK: [[ASHR:%.+]] = ashr i24 [[SHL]], 10 // CHECK: sext i24 [[ASHR]] to i32 // CHECK: store x86_fp80 #pragma omp atomic read ldv = bfx3_packed.a; -// CHECK: [[LD:%.+]] = load atomic i64* bitcast (%struct.BitFields4* @bfx4 to i64*) monotonic +// CHECK: [[LD:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @bfx4 to i64*) monotonic // CHECK: store i64 [[LD]], i64* [[LDTEMP:%.+]] -// CHECK: [[LD:%.+]] = load i64* [[LDTEMP]] +// CHECK: [[LD:%.+]] = load i64, i64* [[LDTEMP]] // CHECK: [[SHL:%.+]] = shl i64 [[LD]], 47 // CHECK: [[ASHR:%.+]] = ashr i64 [[SHL]], 63 // CHECK: trunc i64 [[ASHR]] to i32 // CHECK: store x86_fp80 #pragma omp atomic read ldv = bfx4.a; -// CHECK: [[LD:%.+]] = load atomic i8* getelementptr inbounds (%struct.BitFields4_packed* @bfx4_packed, i32 0, i32 0, i64 2) monotonic +// CHECK: [[LD:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed* @bfx4_packed, i32 0, i32 0, i64 2) monotonic // CHECK: store i8 [[LD]], i8* [[LDTEMP:%.+]] -// CHECK: [[LD:%.+]] = load i8* [[LDTEMP]] +// CHECK: [[LD:%.+]] = load i8, i8* [[LDTEMP]] // CHECK: [[SHL:%.+]] = shl i8 [[LD]], 7 // CHECK: [[ASHR:%.+]] = ashr i8 [[SHL]], 7 // CHECK: sext i8 [[ASHR]] to i32 // CHECK: store x86_fp80 #pragma omp atomic read ldv = bfx4_packed.a; -// CHECK: [[LD:%.+]] = load atomic i64* bitcast (%struct.BitFields4* @bfx4 to i64*) monotonic +// CHECK: [[LD:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @bfx4 to i64*) monotonic // CHECK: store i64 [[LD]], i64* [[LDTEMP:%.+]] -// CHECK: [[LD:%.+]] = load i64* [[LDTEMP]] +// CHECK: [[LD:%.+]] = load i64, i64* [[LDTEMP]] // CHECK: [[SHL:%.+]] = shl i64 [[LD]], 40 // CHECK: [[ASHR:%.+]] = ashr i64 [[SHL]], 57 // CHECK: store x86_fp80 #pragma omp atomic read ldv = bfx4.b; -// CHECK: [[LD:%.+]] = load atomic i8* getelementptr inbounds (%struct.BitFields4_packed* @bfx4_packed, i32 0, i32 0, i64 2) monotonic +// CHECK: [[LD:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed* @bfx4_packed, i32 0, i32 0, i64 2) monotonic // CHECK: store i8 [[LD]], i8* [[LDTEMP:%.+]] -// CHECK: [[LD:%.+]] = load i8* [[LDTEMP]] +// CHECK: [[LD:%.+]] = load i8, i8* [[LDTEMP]] // CHECK: [[ASHR:%.+]] = ashr i8 [[LD]], 1 // CHECK: sext i8 [[ASHR]] to i64 // CHECK: store x86_fp80 #pragma omp atomic read ldv = bfx4_packed.b; -// CHECK: [[LD:%.+]] = load atomic i64* bitcast (<2 x float>* @{{.+}} to i64*) monotonic +// CHECK: [[LD:%.+]] = load atomic i64, i64* bitcast (<2 x float>* @{{.+}} to i64*) monotonic // CHECK: [[BITCAST:%.+]] = bitcast <2 x float>* [[LDTEMP:%.+]] to i64* // CHECK: store i64 [[LD]], i64* [[BITCAST]] -// CHECK: [[LD:%.+]] = load <2 x float>* [[LDTEMP]] +// CHECK: [[LD:%.+]] = load <2 x float>, <2 x float>* [[LDTEMP]] // CHECK: extractelement <2 x float> [[LD]] // CHECK: store i64 #pragma omp atomic read diff --git a/test/OpenMP/atomic_write_codegen.c b/test/OpenMP/atomic_write_codegen.c index 17eba07449..b1c5627466 100644 --- a/test/OpenMP/atomic_write_codegen.c +++ b/test/OpenMP/atomic_write_codegen.c @@ -75,71 +75,71 @@ float2 float2x; register int rix __asm__("0"); int main() { -// CHECK: load i8* +// CHECK: load i8, i8* // CHECK: store atomic i8 #pragma omp atomic write bx = bv; -// CHECK: load i8* +// CHECK: load i8, i8* // CHECK: store atomic i8 #pragma omp atomic write cx = cv; -// CHECK: load i8* +// CHECK: load i8, i8* // CHECK: store atomic i8 #pragma omp atomic write ucx = ucv; -// CHECK: load i16* +// CHECK: load i16, i16* // CHECK: store atomic i16 #pragma omp atomic write sx = sv; -// CHECK: load i16* +// CHECK: load i16, i16* // CHECK: store atomic i16 #pragma omp atomic write usx = usv; -// CHECK: load i32* +// CHECK: load i32, i32* // CHECK: store atomic i32 #pragma omp atomic write ix = iv; -// CHECK: load i32* +// CHECK: load i32, i32* // CHECK: store atomic i32 #pragma omp atomic write uix = uiv; -// CHECK: load i64* +// CHECK: load i64, i64* // CHECK: store atomic i64 #pragma omp atomic write lx = lv; -// CHECK: load i64* +// CHECK: load i64, i64* // CHECK: store atomic i64 #pragma omp atomic write ulx = ulv; -// CHECK: load i64* +// CHECK: load i64, i64* // CHECK: store atomic i64 #pragma omp atomic write llx = llv; -// CHECK: load i64* +// CHECK: load i64, i64* // CHECK: store atomic i64 #pragma omp atomic write ullx = ullv; -// CHECK: load float* +// CHECK: load float, float* // CHECK: bitcast float {{.*}} to i32 // CHECK: store atomic i32 {{.*}}, i32* bitcast (float* #pragma omp atomic write fx = fv; -// CHECK: load double* +// CHECK: load double, double* // CHECK: bitcast double {{.*}} to i64 // CHECK: store atomic i64 {{.*}}, i64* bitcast (double* #pragma omp atomic write dx = dv; -// CHECK: [[LD:%.+]] = load x86_fp80* +// CHECK: [[LD:%.+]] = load x86_fp80, x86_fp80* // CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[LDTEMP:%.*]] to i8* // CHECK: call void @llvm.memset.p0i8.i64(i8* [[BITCAST]], i8 0, i64 16, i32 16, i1 false) // CHECK: store x86_fp80 [[LD]], x86_fp80* [[LDTEMP]] // CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[LDTEMP:%.*]] to i128* -// CHECK: [[LD:%.+]] = load i128* [[BITCAST]] +// CHECK: [[LD:%.+]] = load i128, i128* [[BITCAST]] // CHECK: store atomic i128 [[LD]], i128* bitcast (x86_fp80* #pragma omp atomic write ldx = ldv; -// CHECK: [[REAL_VAL:%.+]] = load i32* getelementptr inbounds ({ i32, i32 }* @{{.*}}, i32 0, i32 0) -// CHECK: [[IMG_VAL:%.+]] = load i32* getelementptr inbounds ({ i32, i32 }* @{{.*}}, i32 0, i32 1) +// CHECK: [[REAL_VAL:%.+]] = load i32, i32* getelementptr inbounds ({ i32, i32 }* @{{.*}}, i32 0, i32 0) +// CHECK: [[IMG_VAL:%.+]] = load i32, i32* getelementptr inbounds ({ i32, i32 }* @{{.*}}, i32 0, i32 1) // CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0 // CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1 // CHECK: store i32 [[REAL_VAL]], i32* [[TEMP_REAL_REF]] @@ -148,8 +148,8 @@ int main() { // CHECK: call void @__atomic_store(i64 8, i8* bitcast ({ i32, i32 }* @{{.*}} to i8*), i8* [[BITCAST]], i32 0) #pragma omp atomic write cix = civ; -// CHECK: [[REAL_VAL:%.+]] = load float* getelementptr inbounds ({ float, float }* @{{.*}}, i32 0, i32 0) -// CHECK: [[IMG_VAL:%.+]] = load float* getelementptr inbounds ({ float, float }* @{{.*}}, i32 0, i32 1) +// CHECK: [[REAL_VAL:%.+]] = load float, float* getelementptr inbounds ({ float, float }* @{{.*}}, i32 0, i32 0) +// CHECK: [[IMG_VAL:%.+]] = load float, float* getelementptr inbounds ({ float, float }* @{{.*}}, i32 0, i32 1) // CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP:%.+]], i32 0, i32 0 // CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP]], i32 0, i32 1 // CHECK: store float [[REAL_VAL]], float* [[TEMP_REAL_REF]] @@ -158,8 +158,8 @@ int main() { // CHECK: call void @__atomic_store(i64 8, i8* bitcast ({ float, float }* @{{.*}} to i8*), i8* [[BITCAST]], i32 0) #pragma omp atomic write cfx = cfv; -// CHECK: [[REAL_VAL:%.+]] = load double* getelementptr inbounds ({ double, double }* @{{.*}}, i32 0, i32 0) -// CHECK: [[IMG_VAL:%.+]] = load double* getelementptr inbounds ({ double, double }* @{{.*}}, i32 0, i32 1) +// CHECK: [[REAL_VAL:%.+]] = load double, double* getelementptr inbounds ({ double, double }* @{{.*}}, i32 0, i32 0) +// CHECK: [[IMG_VAL:%.+]] = load double, double* getelementptr inbounds ({ double, double }* @{{.*}}, i32 0, i32 1) // CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[TEMP:%.+]], i32 0, i32 0 // CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[TEMP]], i32 0, i32 1 // CHECK: store double [[REAL_VAL]], double* [[TEMP_REAL_REF]] @@ -169,37 +169,37 @@ int main() { // CHECK: call{{.*}} @__kmpc_flush( #pragma omp atomic seq_cst write cdx = cdv; -// CHECK: load i8* +// CHECK: load i8, i8* // CHECK: store atomic i64 #pragma omp atomic write ulx = bv; -// CHECK: load i8* +// CHECK: load i8, i8* // CHECK: store atomic i8 #pragma omp atomic write bx = cv; -// CHECK: load i8* +// CHECK: load i8, i8* // CHECK: store atomic i8 // CHECK: call{{.*}} @__kmpc_flush( #pragma omp atomic write, seq_cst cx = ucv; -// CHECK: load i16* +// CHECK: load i16, i16* // CHECK: store atomic i64 #pragma omp atomic write ulx = sv; -// CHECK: load i16* +// CHECK: load i16, i16* // CHECK: store atomic i64 #pragma omp atomic write lx = usv; -// CHECK: load i32* +// CHECK: load i32, i32* // CHECK: store atomic i32 // CHECK: call{{.*}} @__kmpc_flush( #pragma omp atomic seq_cst, write uix = iv; -// CHECK: load i32* +// CHECK: load i32, i32* // CHECK: store atomic i32 #pragma omp atomic write ix = uiv; -// CHECK: load i64* +// CHECK: load i64, i64* // CHECK: [[VAL:%.+]] = trunc i64 %{{.*}} to i32 // CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0 // CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1 @@ -209,25 +209,25 @@ int main() { // CHECK: call void @__atomic_store(i64 8, i8* bitcast ({ i32, i32 }* @{{.+}} to i8*), i8* [[BITCAST]], i32 0) #pragma omp atomic write cix = lv; -// CHECK: load i64* +// CHECK: load i64, i64* // CHECK: store atomic i32 %{{.+}}, i32* bitcast (float* #pragma omp atomic write fx = ulv; -// CHECK: load i64* +// CHECK: load i64, i64* // CHECK: store atomic i64 %{{.+}}, i64* bitcast (double* #pragma omp atomic write dx = llv; -// CHECK: load i64* +// CHECK: load i64, i64* // CHECK: [[VAL:%.+]] = uitofp i64 %{{.+}} to x86_fp80 // CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP:%.+]] to i8* // CHECK: call void @llvm.memset.p0i8.i64(i8* [[BITCAST]], i8 0, i64 16, i32 16, i1 false) // CHECK: store x86_fp80 [[VAL]], x86_fp80* [[TEMP]] // CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP]] to i128* -// CHECK: [[VAL:%.+]] = load i128* [[BITCAST]] +// CHECK: [[VAL:%.+]] = load i128, i128* [[BITCAST]] // CHECK: store atomic i128 [[VAL]], i128* bitcast (x86_fp80* #pragma omp atomic write ldx = ullv; -// CHECK: load float* +// CHECK: load float, float* // CHECK: [[VAL:%.+]] = fptosi float %{{.*}} to i32 // CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0 // CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1 @@ -237,43 +237,43 @@ int main() { // CHECK: call void @__atomic_store(i64 8, i8* bitcast ({ i32, i32 }* @{{.+}} to i8*), i8* [[BITCAST]], i32 0) #pragma omp atomic write cix = fv; -// CHECK: load double* +// CHECK: load double, double* // CHECK: store atomic i16 #pragma omp atomic write sx = dv; -// CHECK: load x86_fp80* +// CHECK: load x86_fp80, x86_fp80* // CHECK: store atomic i8 #pragma omp atomic write bx = ldv; -// CHECK: load i32* getelementptr inbounds ({ i32, i32 }* @{{.+}}, i32 0, i32 0) -// CHECK: load i32* getelementptr inbounds ({ i32, i32 }* @{{.+}}, i32 0, i32 1) +// CHECK: load i32, i32* getelementptr inbounds ({ i32, i32 }* @{{.+}}, i32 0, i32 0) +// CHECK: load i32, i32* getelementptr inbounds ({ i32, i32 }* @{{.+}}, i32 0, i32 1) // CHECK: icmp ne i32 %{{.+}}, 0 // CHECK: icmp ne i32 %{{.+}}, 0 // CHECK: or i1 // CHECK: store atomic i8 #pragma omp atomic write bx = civ; -// CHECK: load float* getelementptr inbounds ({ float, float }* @{{.*}}, i32 0, i32 0) +// CHECK: load float, float* getelementptr inbounds ({ float, float }* @{{.*}}, i32 0, i32 0) // CHECK: store atomic i16 #pragma omp atomic write usx = cfv; -// CHECK: load double* getelementptr inbounds ({ double, double }* @{{.+}}, i32 0, i32 0) +// CHECK: load double, double* getelementptr inbounds ({ double, double }* @{{.+}}, i32 0, i32 0) // CHECK: store atomic i64 #pragma omp atomic write llx = cdv; -// CHECK: [[IDX:%.+]] = load i16* @{{.+}} -// CHECK: load i8* +// CHECK: [[IDX:%.+]] = load i16, i16* @{{.+}} +// CHECK: load i8, i8* // CHECK: [[VEC_ITEM_VAL:%.+]] = zext i1 %{{.+}} to i32 -// CHECK: [[I128VAL:%.+]] = load atomic i128* bitcast (<4 x i32>* [[DEST:@.+]] to i128*) monotonic +// CHECK: [[I128VAL:%.+]] = load atomic i128, i128* bitcast (<4 x i32>* [[DEST:@.+]] to i128*) monotonic // CHECK: [[LD:%.+]] = bitcast i128 [[I128VAL]] to <4 x i32> // CHECK: br label %[[CONT:.+]] // CHECK: [[CONT]] // CHECK: [[OLD_VEC_VAL:%.+]] = phi <4 x i32> [ [[LD]], %{{.+}} ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ] // CHECK: store <4 x i32> [[OLD_VEC_VAL]], <4 x i32>* [[LDTEMP:%.+]], -// CHECK: [[VEC_VAL:%.+]] = load <4 x i32>* [[LDTEMP]] +// CHECK: [[VEC_VAL:%.+]] = load <4 x i32>, <4 x i32>* [[LDTEMP]] // CHECK: [[NEW_VEC_VAL:%.+]] = insertelement <4 x i32> [[VEC_VAL]], i32 [[VEC_ITEM_VAL]], i16 [[IDX]] // CHECK: store <4 x i32> [[NEW_VEC_VAL]], <4 x i32>* [[LDTEMP]] -// CHECK: [[NEW_VEC_VAL:%.+]] = load <4 x i32>* [[LDTEMP]] +// CHECK: [[NEW_VEC_VAL:%.+]] = load <4 x i32>, <4 x i32>* [[LDTEMP]] // CHECK: [[OLD_I128:%.+]] = bitcast <4 x i32> [[OLD_VEC_VAL]] to i128 // CHECK: [[NEW_I128:%.+]] = bitcast <4 x i32> [[NEW_VEC_VAL]] to i128 // CHECK: [[RES:%.+]] = cmpxchg i128* bitcast (<4 x i32>* [[DEST]] to i128*), i128 [[OLD_I128]], i128 [[NEW_I128]] monotonic monotonic @@ -284,9 +284,9 @@ int main() { // CHECK: [[EXIT]] #pragma omp atomic write int4x[sv] = bv; -// CHECK: load x86_fp80* @{{.+}} +// CHECK: load x86_fp80, x86_fp80* @{{.+}} // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32 -// CHECK: [[PREV_VALUE:%.+]] = load atomic i32* bitcast (i8* getelementptr (i8* bitcast (%struct.BitFields* @{{.+}} to i8*), i64 4) to i32*) monotonic +// CHECK: [[PREV_VALUE:%.+]] = load atomic i32, i32* bitcast (i8* getelementptr (i8* bitcast (%struct.BitFields* @{{.+}} to i8*), i64 4) to i32*) monotonic // CHECK: br label %[[CONT:.+]] // CHECK: [[CONT]] // CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ] @@ -294,7 +294,7 @@ int main() { // CHECK: [[BF_CLEAR:%.+]] = and i32 %{{.+}}, -2147483648 // CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]] // CHECK: store i32 %{{.+}}, i32* [[LDTEMP:%.+]] -// CHECK: [[NEW_BF_VALUE:%.+]] = load i32* [[LDTEMP]] +// CHECK: [[NEW_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP]] // CHECK: [[RES:%.+]] = cmpxchg i32* bitcast (i8* getelementptr (i8* bitcast (%struct.BitFields* @{{.+}} to i8*), i64 4) to i32*), i32 [[OLD_BF_VALUE]], i32 [[NEW_BF_VALUE]] monotonic monotonic // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i32, i1 } [[RES]], 1 @@ -302,11 +302,11 @@ int main() { // CHECK: [[EXIT]] #pragma omp atomic write bfx.a = ldv; -// CHECK: load x86_fp80* @{{.+}} +// CHECK: load x86_fp80, x86_fp80* @{{.+}} // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32 // CHECK: [[BITCAST:%.+]] = bitcast i32* [[LDTEMP:%.+]] to i8* // CHECK: call void @__atomic_load(i64 4, i8* getelementptr (i8* bitcast (%struct.BitFields_packed* @{{.+}} to i8*), i64 4), i8* [[BITCAST]], i32 0) -// CHECK: [[PREV_VALUE:%.+]] = load i32* [[LDTEMP]] +// CHECK: [[PREV_VALUE:%.+]] = load i32, i32* [[LDTEMP]] // CHECK: br label %[[CONT:.+]] // CHECK: [[CONT]] // CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ] @@ -314,20 +314,20 @@ int main() { // CHECK: [[BF_CLEAR:%.+]] = and i32 %{{.+}}, -2147483648 // CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]] // CHECK: store i32 %{{.+}}, i32* [[LDTEMP:%.+]] -// CHECK: [[NEW_BF_VALUE:%.+]] = load i32* [[LDTEMP]] +// CHECK: [[NEW_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP]] // CHECK: store i32 [[OLD_BF_VALUE]], i32* [[TEMP_OLD_BF_ADDR:%.+]], // CHECK: store i32 [[NEW_BF_VALUE]], i32* [[TEMP_NEW_BF_ADDR:%.+]], // CHECK: [[BITCAST_TEMP_OLD_BF_ADDR:%.+]] = bitcast i32* [[TEMP_OLD_BF_ADDR]] to i8* // CHECK: [[BITCAST_TEMP_NEW_BF_ADDR:%.+]] = bitcast i32* [[TEMP_NEW_BF_ADDR]] to i8* // CHECK: [[FAIL_SUCCESS:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 4, i8* getelementptr (i8* bitcast (%struct.BitFields_packed* @{{.+}} to i8*), i64 4), i8* [[BITCAST_TEMP_OLD_BF_ADDR]], i8* [[BITCAST_TEMP_NEW_BF_ADDR]], i32 0, i32 0) -// CHECK: [[FAILED_OLD_VAL]] = load i32* [[TEMP_OLD_BF_ADDR]] +// CHECK: [[FAILED_OLD_VAL]] = load i32, i32* [[TEMP_OLD_BF_ADDR]] // CHECK: br i1 [[FAIL_SUCCESS]], label %[[CONT]], label %[[EXIT:.+]] // CHECK: [[EXIT]] #pragma omp atomic write bfx_packed.a = ldv; -// CHECK: load x86_fp80* @{{.+}} +// CHECK: load x86_fp80, x86_fp80* @{{.+}} // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32 -// CHECK: [[PREV_VALUE:%.+]] = load atomic i32* getelementptr inbounds (%struct.BitFields2* @{{.+}}, i32 0, i32 0) monotonic +// CHECK: [[PREV_VALUE:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields2* @{{.+}}, i32 0, i32 0) monotonic // CHECK: br label %[[CONT:.+]] // CHECK: [[CONT]] // CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ] @@ -336,7 +336,7 @@ int main() { // CHECK: [[BF_CLEAR:%.+]] = and i32 %{{.+}}, 2147483647 // CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]] // CHECK: store i32 %{{.+}}, i32* [[LDTEMP:%.+]] -// CHECK: [[NEW_BF_VALUE:%.+]] = load i32* [[LDTEMP]] +// CHECK: [[NEW_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP]] // CHECK: [[RES:%.+]] = cmpxchg i32* getelementptr inbounds (%struct.BitFields2* @{{.+}}, i32 0, i32 0), i32 [[OLD_BF_VALUE]], i32 [[NEW_BF_VALUE]] monotonic monotonic // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i32, i1 } [[RES]], 1 @@ -344,9 +344,9 @@ int main() { // CHECK: [[EXIT]] #pragma omp atomic write bfx2.a = ldv; -// CHECK: load x86_fp80* @{{.+}} +// CHECK: load x86_fp80, x86_fp80* @{{.+}} // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32 -// CHECK: [[PREV_VALUE:%.+]] = load atomic i8* getelementptr (i8* bitcast (%struct.BitFields2_packed* @{{.+}} to i8*), i64 3) monotonic +// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, i8* getelementptr (i8* bitcast (%struct.BitFields2_packed* @{{.+}} to i8*), i64 3) monotonic // CHECK: br label %[[CONT:.+]] // CHECK: [[CONT]] // CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ] @@ -356,7 +356,7 @@ int main() { // CHECK: [[BF_CLEAR:%.+]] = and i8 %{{.+}}, 127 // CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]] // CHECK: store i8 %{{.+}}, i8* [[LDTEMP:%.+]] -// CHECK: [[NEW_BF_VALUE:%.+]] = load i8* [[LDTEMP]] +// CHECK: [[NEW_BF_VALUE:%.+]] = load i8, i8* [[LDTEMP]] // CHECK: [[RES:%.+]] = cmpxchg i8* getelementptr (i8* bitcast (%struct.BitFields2_packed* @{{.+}} to i8*), i64 3), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1 @@ -364,9 +364,9 @@ int main() { // CHECK: [[EXIT]] #pragma omp atomic write bfx2_packed.a = ldv; -// CHECK: load x86_fp80* @{{.+}} +// CHECK: load x86_fp80, x86_fp80* @{{.+}} // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32 -// CHECK: [[PREV_VALUE:%.+]] = load atomic i32* getelementptr inbounds (%struct.BitFields3* @{{.+}}, i32 0, i32 0) monotonic +// CHECK: [[PREV_VALUE:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields3* @{{.+}}, i32 0, i32 0) monotonic // CHECK: br label %[[CONT:.+]] // CHECK: [[CONT]] // CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ] @@ -375,7 +375,7 @@ int main() { // CHECK: [[BF_CLEAR:%.+]] = and i32 %{{.+}}, -33552385 // CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]] // CHECK: store i32 %{{.+}}, i32* [[LDTEMP:%.+]] -// CHECK: [[NEW_BF_VALUE:%.+]] = load i32* [[LDTEMP]] +// CHECK: [[NEW_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP]] // CHECK: [[RES:%.+]] = cmpxchg i32* getelementptr inbounds (%struct.BitFields3* @{{.+}}, i32 0, i32 0), i32 [[OLD_BF_VALUE]], i32 [[NEW_BF_VALUE]] monotonic monotonic // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i32, i1 } [[RES]], 1 @@ -383,12 +383,12 @@ int main() { // CHECK: [[EXIT]] #pragma omp atomic write bfx3.a = ldv; -// CHECK: load x86_fp80* @{{.+}} +// CHECK: load x86_fp80, x86_fp80* @{{.+}} // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32 // CHECK: [[LDTEMP:%.+]] = bitcast i32* %{{.+}} to i24* // CHECK: [[BITCAST:%.+]] = bitcast i24* %{{.+}} to i8* // CHECK: call void @__atomic_load(i64 3, i8* getelementptr (i8* bitcast (%struct.BitFields3_packed* @{{.+}} to i8*), i64 1), i8* [[BITCAST]], i32 0) -// CHECK: [[PREV_VALUE:%.+]] = load i24* [[LDTEMP]] +// CHECK: [[PREV_VALUE:%.+]] = load i24, i24* [[LDTEMP]] // CHECK: br label %[[CONT:.+]] // CHECK: [[CONT]] // CHECK: [[OLD_BF_VALUE:%.+]] = phi i24 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ] @@ -398,7 +398,7 @@ int main() { // CHECK: [[BF_CLEAR:%.+]] = and i24 %{{.+}}, -131065 // CHECK: or i24 [[BF_CLEAR]], [[BF_VALUE]] // CHECK: store i24 %{{.+}}, i24* [[LDTEMP:%.+]] -// CHECK: [[NEW_BF_VALUE:%.+]] = load i24* [[LDTEMP]] +// CHECK: [[NEW_BF_VALUE:%.+]] = load i24, i24* [[LDTEMP]] // CHECK: [[TEMP_OLD_BF_ADDR:%.+]] = bitcast i32* %{{.+}} to i24* // CHECK: store i24 [[OLD_BF_VALUE]], i24* [[TEMP_OLD_BF_ADDR]] // CHECK: [[TEMP_NEW_BF_ADDR:%.+]] = bitcast i32* %{{.+}} to i24* @@ -406,14 +406,14 @@ int main() { // CHECK: [[BITCAST_TEMP_OLD_BF_ADDR:%.+]] = bitcast i24* [[TEMP_OLD_BF_ADDR]] to i8* // CHECK: [[BITCAST_TEMP_NEW_BF_ADDR:%.+]] = bitcast i24* [[TEMP_NEW_BF_ADDR]] to i8* // CHECK: [[FAIL_SUCCESS:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 3, i8* getelementptr (i8* bitcast (%struct.BitFields3_packed* @{{.+}} to i8*), i64 1), i8* [[BITCAST_TEMP_OLD_BF_ADDR]], i8* [[BITCAST_TEMP_NEW_BF_ADDR]], i32 0, i32 0) -// CHECK: [[FAILED_OLD_VAL]] = load i24* [[TEMP_OLD_BF_ADDR]] +// CHECK: [[FAILED_OLD_VAL]] = load i24, i24* [[TEMP_OLD_BF_ADDR]] // CHECK: br i1 [[FAIL_SUCCESS]], label %[[CONT]], label %[[EXIT:.+]] // CHECK: [[EXIT]] #pragma omp atomic write bfx3_packed.a = ldv; -// CHECK: load x86_fp80* @{{.+}} +// CHECK: load x86_fp80, x86_fp80* @{{.+}} // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32 -// CHECK: [[PREV_VALUE:%.+]] = load atomic i64* bitcast (%struct.BitFields4* @{{.+}} to i64*) monotonic +// CHECK: [[PREV_VALUE:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @{{.+}} to i64*) monotonic // CHECK: br label %[[CONT:.+]] // CHECK: [[CONT]] // CHECK: [[OLD_BF_VALUE:%.+]] = phi i64 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ] @@ -423,7 +423,7 @@ int main() { // CHECK: [[BF_CLEAR:%.+]] = and i64 %{{.+}}, -65537 // CHECK: or i64 [[BF_CLEAR]], [[BF_VALUE]] // CHECK: store i64 %{{.+}}, i64* [[LDTEMP:%.+]] -// CHECK: [[NEW_BF_VALUE:%.+]] = load i64* [[LDTEMP]] +// CHECK: [[NEW_BF_VALUE:%.+]] = load i64, i64* [[LDTEMP]] // CHECK: [[RES:%.+]] = cmpxchg i64* bitcast (%struct.BitFields4* @{{.+}} to i64*), i64 [[OLD_BF_VALUE]], i64 [[NEW_BF_VALUE]] monotonic monotonic // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i64, i1 } [[RES]], 0 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i64, i1 } [[RES]], 1 @@ -431,9 +431,9 @@ int main() { // CHECK: [[EXIT]] #pragma omp atomic write bfx4.a = ldv; -// CHECK: load x86_fp80* @{{.+}} +// CHECK: load x86_fp80, x86_fp80* @{{.+}} // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32 -// CHECK: [[PREV_VALUE:%.+]] = load atomic i8* getelementptr inbounds (%struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2) monotonic +// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2) monotonic // CHECK: br label %[[CONT:.+]] // CHECK: [[CONT]] // CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ] @@ -442,7 +442,7 @@ int main() { // CHECK: [[BF_CLEAR:%.+]] = and i8 %{{.+}}, -2 // CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]] // CHECK: store i8 %{{.+}}, i8* [[LDTEMP:%.+]] -// CHECK: [[NEW_BF_VALUE:%.+]] = load i8* [[LDTEMP]] +// CHECK: [[NEW_BF_VALUE:%.+]] = load i8, i8* [[LDTEMP]] // CHECK: [[RES:%.+]] = cmpxchg i8* getelementptr inbounds (%struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1 @@ -450,9 +450,9 @@ int main() { // CHECK: [[EXIT]] #pragma omp atomic write bfx4_packed.a = ldv; -// CHECK: load x86_fp80* @{{.+}} +// CHECK: load x86_fp80, x86_fp80* @{{.+}} // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i64 -// CHECK: [[PREV_VALUE:%.+]] = load atomic i64* bitcast (%struct.BitFields4* @{{.+}} to i64*) monotonic +// CHECK: [[PREV_VALUE:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @{{.+}} to i64*) monotonic // CHECK: br label %[[CONT:.+]] // CHECK: [[CONT]] // CHECK: [[OLD_BF_VALUE:%.+]] = phi i64 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ] @@ -461,7 +461,7 @@ int main() { // CHECK: [[BF_CLEAR:%.+]] = and i64 %{{.+}}, -16646145 // CHECK: or i64 [[BF_CLEAR]], [[BF_VALUE]] // CHECK: store i64 %{{.+}}, i64* [[LDTEMP:%.+]] -// CHECK: [[NEW_BF_VALUE:%.+]] = load i64* [[LDTEMP]] +// CHECK: [[NEW_BF_VALUE:%.+]] = load i64, i64* [[LDTEMP]] // CHECK: [[RES:%.+]] = cmpxchg i64* bitcast (%struct.BitFields4* @{{.+}} to i64*), i64 [[OLD_BF_VALUE]], i64 [[NEW_BF_VALUE]] monotonic monotonic // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i64, i1 } [[RES]], 0 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i64, i1 } [[RES]], 1 @@ -469,9 +469,9 @@ int main() { // CHECK: [[EXIT]] #pragma omp atomic write bfx4.b = ldv; -// CHECK: load x86_fp80* @{{.+}} +// CHECK: load x86_fp80, x86_fp80* @{{.+}} // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i64 -// CHECK: [[PREV_VALUE:%.+]] = load atomic i8* getelementptr inbounds (%struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2) monotonic +// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2) monotonic // CHECK: br label %[[CONT:.+]] // CHECK: [[CONT]] // CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ] @@ -481,7 +481,7 @@ int main() { // CHECK: [[BF_CLEAR:%.+]] = and i8 %{{.+}}, 1 // CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]] // CHECK: store i8 %{{.+}}, i8* [[LDTEMP:%.+]] -// CHECK: [[NEW_BF_VALUE:%.+]] = load i8* [[LDTEMP]] +// CHECK: [[NEW_BF_VALUE:%.+]] = load i8, i8* [[LDTEMP]] // CHECK: [[RES:%.+]] = cmpxchg i8* getelementptr inbounds (%struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1 @@ -489,18 +489,18 @@ int main() { // CHECK: [[EXIT]] #pragma omp atomic write bfx4_packed.b = ldv; -// CHECK: load i64* +// CHECK: load i64, i64* // CHECK: [[VEC_ITEM_VAL:%.+]] = uitofp i64 %{{.+}} to float -// CHECK: [[I64VAL:%.+]] = load atomic i64* bitcast (<2 x float>* [[DEST:@.+]] to i64*) monotonic +// CHECK: [[I64VAL:%.+]] = load atomic i64, i64* bitcast (<2 x float>* [[DEST:@.+]] to i64*) monotonic // CHECK: [[LD:%.+]] = bitcast i64 [[I64VAL]] to <2 x float> // CHECK: br label %[[CONT:.+]] // CHECK: [[CONT]] // CHECK: [[OLD_VEC_VAL:%.+]] = phi <2 x float> [ [[LD]], %{{.+}} ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ] // CHECK: store <2 x float> [[OLD_VEC_VAL]], <2 x float>* [[LDTEMP:%.+]], -// CHECK: [[VEC_VAL:%.+]] = load <2 x float>* [[LDTEMP]] +// CHECK: [[VEC_VAL:%.+]] = load <2 x float>, <2 x float>* [[LDTEMP]] // CHECK: [[NEW_VEC_VAL:%.+]] = insertelement <2 x float> [[VEC_VAL]], float [[VEC_ITEM_VAL]], i64 0 // CHECK: store <2 x float> [[NEW_VEC_VAL]], <2 x float>* [[LDTEMP]] -// CHECK: [[NEW_VEC_VAL:%.+]] = load <2 x float>* [[LDTEMP]] +// CHECK: [[NEW_VEC_VAL:%.+]] = load <2 x float>, <2 x float>* [[LDTEMP]] // CHECK: [[OLD_I64:%.+]] = bitcast <2 x float> [[OLD_VEC_VAL]] to i64 // CHECK: [[NEW_I64:%.+]] = bitcast <2 x float> [[NEW_VEC_VAL]] to i64 // CHECK: [[RES:%.+]] = cmpxchg i64* bitcast (<2 x float>* [[DEST]] to i64*), i64 [[OLD_I64]], i64 [[NEW_I64]] monotonic monotonic diff --git a/test/OpenMP/for_codegen.cpp b/test/OpenMP/for_codegen.cpp index 3193d84b6a..78d8c84b3e 100644 --- a/test/OpenMP/for_codegen.cpp +++ b/test/OpenMP/for_codegen.cpp @@ -13,22 +13,22 @@ void without_schedule_clause(float *a, float *b, float *c, float *d) { #pragma omp for // CHECK: call void @__kmpc_for_static_init_4([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID]], i32 34, i32* [[IS_LAST:%[^,]+]], i32* [[OMP_LB:%[^,]+]], i32* [[OMP_UB:%[^,]+]], i32* [[OMP_ST:%[^,]+]], i32 1, i32 1) // UB = min(UB, GlobalUB) -// CHECK-NEXT: [[UB:%.+]] = load i32* [[OMP_UB]] +// CHECK-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]] // CHECK-NEXT: [[UBCMP:%.+]] = icmp sgt i32 [[UB]], 4571423 // CHECK-NEXT: br i1 [[UBCMP]], label [[UB_TRUE:%[^,]+]], label [[UB_FALSE:%[^,]+]] // CHECK: [[UBRESULT:%.+]] = phi i32 [ 4571423, [[UB_TRUE]] ], [ [[UBVAL:%[^,]+]], [[UB_FALSE]] ] // CHECK-NEXT: store i32 [[UBRESULT]], i32* [[OMP_UB]] -// CHECK-NEXT: [[LB:%.+]] = load i32* [[OMP_LB]] +// CHECK-NEXT: [[LB:%.+]] = load i32, i32* [[OMP_LB]] // CHECK-NEXT: store i32 [[LB]], i32* [[OMP_IV:[^,]+]] // Loop header -// CHECK: [[IV:%.+]] = load i32* [[OMP_IV]] -// CHECK-NEXT: [[UB:%.+]] = load i32* [[OMP_UB]] +// CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]] +// CHECK-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]] // CHECK-NEXT: [[CMP:%.+]] = icmp sle i32 [[IV]], [[UB]] // CHECK-NEXT: br i1 [[CMP]], label %[[LOOP1_BODY:[^,]+]], label %[[LOOP1_END:[^,]+]] for (int i = 33; i < 32000000; i += 7) { // CHECK: [[LOOP1_BODY]] // Start of body: calculate i from IV: -// CHECK: [[IV1_1:%.+]] = load i32* [[OMP_IV]] +// CHECK: [[IV1_1:%.+]] = load i32, i32* [[OMP_IV]] // CHECK-NEXT: [[CALC_I_1:%.+]] = mul nsw i32 [[IV1_1]], 7 // CHECK-NEXT: [[CALC_I_2:%.+]] = add nsw i32 33, [[CALC_I_1]] // CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]] @@ -36,7 +36,7 @@ void without_schedule_clause(float *a, float *b, float *c, float *d) { // End of body: store into a[i]: // CHECK: store float [[RESULT:%.+]], float* {{%.+}} a[i] = b[i] * c[i] * d[i]; -// CHECK: [[IV1_2:%.+]] = load i32* [[OMP_IV]]{{.*}} +// CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}} // CHECK-NEXT: [[ADD1_2:%.+]] = add nsw i32 [[IV1_2]], 1 // CHECK-NEXT: store i32 [[ADD1_2]], i32* [[OMP_IV]] // CHECK-NEXT: br label %{{.+}} @@ -53,22 +53,22 @@ void static_not_chunked(float *a, float *b, float *c, float *d) { #pragma omp for schedule(static) // CHECK: call void @__kmpc_for_static_init_4([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID]], i32 34, i32* [[IS_LAST:%[^,]+]], i32* [[OMP_LB:%[^,]+]], i32* [[OMP_UB:%[^,]+]], i32* [[OMP_ST:%[^,]+]], i32 1, i32 1) // UB = min(UB, GlobalUB) -// CHECK-NEXT: [[UB:%.+]] = load i32* [[OMP_UB]] +// CHECK-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]] // CHECK-NEXT: [[UBCMP:%.+]] = icmp sgt i32 [[UB]], 4571423 // CHECK-NEXT: br i1 [[UBCMP]], label [[UB_TRUE:%[^,]+]], label [[UB_FALSE:%[^,]+]] // CHECK: [[UBRESULT:%.+]] = phi i32 [ 4571423, [[UB_TRUE]] ], [ [[UBVAL:%[^,]+]], [[UB_FALSE]] ] // CHECK-NEXT: store i32 [[UBRESULT]], i32* [[OMP_UB]] -// CHECK-NEXT: [[LB:%.+]] = load i32* [[OMP_LB]] +// CHECK-NEXT: [[LB:%.+]] = load i32, i32* [[OMP_LB]] // CHECK-NEXT: store i32 [[LB]], i32* [[OMP_IV:[^,]+]] // Loop header -// CHECK: [[IV:%.+]] = load i32* [[OMP_IV]] -// CHECK-NEXT: [[UB:%.+]] = load i32* [[OMP_UB]] +// CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]] +// CHECK-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]] // CHECK-NEXT: [[CMP:%.+]] = icmp sle i32 [[IV]], [[UB]] // CHECK-NEXT: br i1 [[CMP]], label %[[LOOP1_BODY:[^,]+]], label %[[LOOP1_END:[^,]+]] for (int i = 32000000; i > 33; i += -7) { // CHECK: [[LOOP1_BODY]] // Start of body: calculate i from IV: -// CHECK: [[IV1_1:%.+]] = load i32* [[OMP_IV]] +// CHECK: [[IV1_1:%.+]] = load i32, i32* [[OMP_IV]] // CHECK-NEXT: [[CALC_I_1:%.+]] = mul nsw i32 [[IV1_1]], 7 // CHECK-NEXT: [[CALC_I_2:%.+]] = sub nsw i32 32000000, [[CALC_I_1]] // CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]] @@ -76,7 +76,7 @@ void static_not_chunked(float *a, float *b, float *c, float *d) { // End of body: store into a[i]: // CHECK: store float [[RESULT:%.+]], float* {{%.+}} a[i] = b[i] * c[i] * d[i]; -// CHECK: [[IV1_2:%.+]] = load i32* [[OMP_IV]]{{.*}} +// CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}} // CHECK-NEXT: [[ADD1_2:%.+]] = add nsw i32 [[IV1_2]], 1 // CHECK-NEXT: store i32 [[ADD1_2]], i32* [[OMP_IV]] // CHECK-NEXT: br label %{{.+}} @@ -93,30 +93,30 @@ void static_chunked(float *a, float *b, float *c, float *d) { #pragma omp for schedule(static, 5) // CHECK: call void @__kmpc_for_static_init_4u([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID]], i32 33, i32* [[IS_LAST:%[^,]+]], i32* [[OMP_LB:%[^,]+]], i32* [[OMP_UB:%[^,]+]], i32* [[OMP_ST:%[^,]+]], i32 1, i32 5) // UB = min(UB, GlobalUB) -// CHECK: [[UB:%.+]] = load i32* [[OMP_UB]] +// CHECK: [[UB:%.+]] = load i32, i32* [[OMP_UB]] // CHECK-NEXT: [[UBCMP:%.+]] = icmp ugt i32 [[UB]], 16908288 // CHECK-NEXT: br i1 [[UBCMP]], label [[UB_TRUE:%[^,]+]], label [[UB_FALSE:%[^,]+]] // CHECK: [[UBRESULT:%.+]] = phi i32 [ 16908288, [[UB_TRUE]] ], [ [[UBVAL:%[^,]+]], [[UB_FALSE]] ] // CHECK-NEXT: store i32 [[UBRESULT]], i32* [[OMP_UB]] -// CHECK-NEXT: [[LB:%.+]] = load i32* [[OMP_LB]] +// CHECK-NEXT: [[LB:%.+]] = load i32, i32* [[OMP_LB]] // CHECK-NEXT: store i32 [[LB]], i32* [[OMP_IV:[^,]+]] // Outer loop header -// CHECK: [[O_IV:%.+]] = load i32* [[OMP_IV]] -// CHECK-NEXT: [[O_UB:%.+]] = load i32* [[OMP_UB]] +// CHECK: [[O_IV:%.+]] = load i32, i32* [[OMP_IV]] +// CHECK-NEXT: [[O_UB:%.+]] = load i32, i32* [[OMP_UB]] // CHECK-NEXT: [[O_CMP:%.+]] = icmp ule i32 [[O_IV]], [[O_UB]] // CHECK-NEXT: br i1 [[O_CMP]], label %[[O_LOOP1_BODY:[^,]+]], label %[[O_LOOP1_END:[^,]+]] // Loop header // CHECK: [[O_LOOP1_BODY]] -// CHECK: [[IV:%.+]] = load i32* [[OMP_IV]] -// CHECK-NEXT: [[UB:%.+]] = load i32* [[OMP_UB]] +// CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]] +// CHECK-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]] // CHECK-NEXT: [[CMP:%.+]] = icmp ule i32 [[IV]], [[UB]] // CHECK-NEXT: br i1 [[CMP]], label %[[LOOP1_BODY:[^,]+]], label %[[LOOP1_END:[^,]+]] for (unsigned i = 131071; i <= 2147483647; i += 127) { // CHECK: [[LOOP1_BODY]] // Start of body: calculate i from IV: -// CHECK: [[IV1_1:%.+]] = load i32* [[OMP_IV]] +// CHECK: [[IV1_1:%.+]] = load i32, i32* [[OMP_IV]] // CHECK-NEXT: [[CALC_I_1:%.+]] = mul i32 [[IV1_1]], 127 // CHECK-NEXT: [[CALC_I_2:%.+]] = add i32 131071, [[CALC_I_1]] // CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]] @@ -124,19 +124,19 @@ void static_chunked(float *a, float *b, float *c, float *d) { // End of body: store into a[i]: // CHECK: store float [[RESULT:%.+]], float* {{%.+}} a[i] = b[i] * c[i] * d[i]; -// CHECK: [[IV1_2:%.+]] = load i32* [[OMP_IV]]{{.*}} +// CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}} // CHECK-NEXT: [[ADD1_2:%.+]] = add i32 [[IV1_2]], 1 // CHECK-NEXT: store i32 [[ADD1_2]], i32* [[OMP_IV]] // CHECK-NEXT: br label %{{.+}} } // CHECK: [[LOOP1_END]] // Update the counters, adding stride -// CHECK: [[LB:%.+]] = load i32* [[OMP_LB]] -// CHECK-NEXT: [[ST:%.+]] = load i32* [[OMP_ST]] +// CHECK: [[LB:%.+]] = load i32, i32* [[OMP_LB]] +// CHECK-NEXT: [[ST:%.+]] = load i32, i32* [[OMP_ST]] // CHECK-NEXT: [[ADD_LB:%.+]] = add i32 [[LB]], [[ST]] // CHECK-NEXT: store i32 [[ADD_LB]], i32* [[OMP_LB]] -// CHECK-NEXT: [[UB:%.+]] = load i32* [[OMP_UB]] -// CHECK-NEXT: [[ST:%.+]] = load i32* [[OMP_ST]] +// CHECK-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]] +// CHECK-NEXT: [[ST:%.+]] = load i32, i32* [[OMP_ST]] // CHECK-NEXT: [[ADD_UB:%.+]] = add i32 [[UB]], [[ST]] // CHECK-NEXT: store i32 [[ADD_UB]], i32* [[OMP_UB]] diff --git a/test/OpenMP/parallel_codegen.cpp b/test/OpenMP/parallel_codegen.cpp index a72b4f8669..14450c2961 100644 --- a/test/OpenMP/parallel_codegen.cpp +++ b/test/OpenMP/parallel_codegen.cpp @@ -40,7 +40,7 @@ int main (int argc, char **argv) { // CHECK-NEXT: store i32* {{%[a-z0-9.]+}}, i32** [[ARGC_REF]] // CHECK-NEXT: [[BITCAST:%.+]] = bitcast %struct.anon* [[AGG_CAPTURED]] to i8* // CHECK-NEXT: call void (%ident_t*, i32, void (i32*, i32*, ...)*, ...)* @__kmpc_fork_call(%ident_t* [[DEF_LOC_2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), i8* [[BITCAST]]) -// CHECK-NEXT: [[ARGV:%.+]] = load i8*** {{%[a-z0-9.]+}} +// CHECK-NEXT: [[ARGV:%.+]] = load i8**, i8*** {{%[a-z0-9.]+}} // CHECK-NEXT: [[RET:%.+]] = call {{[a-z]*[ ]?i32}} [[TMAIN:@.+tmain.+]](i8** [[ARGV]]) // CHECK-NEXT: ret i32 [[RET]] // CHECK-NEXT: } @@ -56,7 +56,7 @@ int main (int argc, char **argv) { // CHECK-DEBUG-NEXT: store i8* getelementptr inbounds ([{{.+}} x i8]* [[LOC1]], i32 0, i32 0), i8** [[KMPC_LOC_PSOURCE_REF]] // CHECK-DEBUG-NEXT: [[BITCAST:%.+]] = bitcast %struct.anon* [[AGG_CAPTURED]] to i8* // CHECK-DEBUG-NEXT: call void (%ident_t*, i32, void (i32*, i32*, ...)*, ...)* @__kmpc_fork_call(%ident_t* [[LOC_2_ADDR]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), i8* [[BITCAST]]) -// CHECK-DEBUG-NEXT: [[ARGV:%.+]] = load i8*** {{%[a-z0-9.]+}} +// CHECK-DEBUG-NEXT: [[ARGV:%.+]] = load i8**, i8*** {{%[a-z0-9.]+}} // CHECK-DEBUG-NEXT: [[RET:%.+]] = call i32 [[TMAIN:@.+tmain.+]](i8** [[ARGV]]) // CHECK-DEBUG-NEXT: ret i32 [[RET]] // CHECK-DEBUG-NEXT: } @@ -64,10 +64,10 @@ int main (int argc, char **argv) { // CHECK-LABEL: define internal void @.omp_outlined.(i32* %.global_tid., i32* %.bound_tid., %struct.anon* %__context) // CHECK: [[CONTEXT_ADDR:%.+]] = alloca %struct.anon* // CHECK: store %struct.anon* %__context, %struct.anon** [[CONTEXT_ADDR]] -// CHECK: [[CONTEXT_PTR:%.+]] = load %struct.anon** [[CONTEXT_ADDR]] +// CHECK: [[CONTEXT_PTR:%.+]] = load %struct.anon*, %struct.anon** [[CONTEXT_ADDR]] // CHECK-NEXT: [[ARGC_PTR_REF:%.+]] = getelementptr inbounds %struct.anon, %struct.anon* [[CONTEXT_PTR]], i32 0, i32 0 -// CHECK-NEXT: [[ARGC_REF:%.+]] = load i32** [[ARGC_PTR_REF]] -// CHECK-NEXT: [[ARGC:%.+]] = load i32* [[ARGC_REF]] +// CHECK-NEXT: [[ARGC_REF:%.+]] = load i32*, i32** [[ARGC_PTR_REF]] +// CHECK-NEXT: [[ARGC:%.+]] = load i32, i32* [[ARGC_REF]] // CHECK-NEXT: invoke void [[FOO:@.+foo.+]](i32{{[ ]?[a-z]*}} [[ARGC]]) // CHECK: ret void // CHECK: call void @{{.+terminate.*|abort}}( @@ -76,10 +76,10 @@ int main (int argc, char **argv) { // CHECK-DEBUG-LABEL: define internal void @.omp_outlined.(i32* %.global_tid., i32* %.bound_tid., %struct.anon* %__context) // CHECK-DEBUG: [[CONTEXT_ADDR:%.+]] = alloca %struct.anon* // CHECK-DEBUG: store %struct.anon* %__context, %struct.anon** [[CONTEXT_ADDR]] -// CHECK-DEBUG: [[CONTEXT_PTR:%.+]] = load %struct.anon** [[CONTEXT_ADDR]] +// CHECK-DEBUG: [[CONTEXT_PTR:%.+]] = load %struct.anon*, %struct.anon** [[CONTEXT_ADDR]] // CHECK-DEBUG-NEXT: [[ARGC_PTR_REF:%.+]] = getelementptr inbounds %struct.anon, %struct.anon* [[CONTEXT_PTR]], i32 0, i32 0 -// CHECK-DEBUG-NEXT: [[ARGC_REF:%.+]] = load i32** [[ARGC_PTR_REF]] -// CHECK-DEBUG-NEXT: [[ARGC:%.+]] = load i32* [[ARGC_REF]] +// CHECK-DEBUG-NEXT: [[ARGC_REF:%.+]] = load i32*, i32** [[ARGC_PTR_REF]] +// CHECK-DEBUG-NEXT: [[ARGC:%.+]] = load i32, i32* [[ARGC_REF]] // CHECK-DEBUG-NEXT: invoke void [[FOO:@.+foo.+]](i32 [[ARGC]]) // CHECK-DEBUG: ret void // CHECK-DEBUG: call void @{{.+terminate.*|abort}}( @@ -117,10 +117,10 @@ int main (int argc, char **argv) { // CHECK-LABEL: define internal void @.omp_outlined.1(i32* %.global_tid., i32* %.bound_tid., %struct.anon.0* %__context) // CHECK: [[CONTEXT_ADDR:%.+]] = alloca %struct.anon.0* // CHECK: store %struct.anon.0* %__context, %struct.anon.0** [[CONTEXT_ADDR]] -// CHECK: [[CONTEXT_PTR:%.+]] = load %struct.anon.0** [[CONTEXT_ADDR]] +// CHECK: [[CONTEXT_PTR:%.+]] = load %struct.anon.0*, %struct.anon.0** [[CONTEXT_ADDR]] // CHECK-NEXT: [[ARGC_PTR_REF:%.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* [[CONTEXT_PTR]], i32 0, i32 0 -// CHECK-NEXT: [[ARGC_REF:%.+]] = load i8**** [[ARGC_PTR_REF]] -// CHECK-NEXT: [[ARGC:%.+]] = load i8*** [[ARGC_REF]] +// CHECK-NEXT: [[ARGC_REF:%.+]] = load i8***, i8**** [[ARGC_PTR_REF]] +// CHECK-NEXT: [[ARGC:%.+]] = load i8**, i8*** [[ARGC_REF]] // CHECK-NEXT: invoke void [[FOO1:@.+foo.+]](i8** [[ARGC]]) // CHECK: ret void // CHECK: call void @{{.+terminate.*|abort}}( @@ -129,10 +129,10 @@ int main (int argc, char **argv) { // CHECK-DEBUG-LABEL: define internal void @.omp_outlined.1(i32* %.global_tid., i32* %.bound_tid., %struct.anon.0* %__context) // CHECK-DEBUG: [[CONTEXT_ADDR:%.+]] = alloca %struct.anon.0* // CHECK-DEBUG: store %struct.anon.0* %__context, %struct.anon.0** [[CONTEXT_ADDR]] -// CHECK-DEBUG: [[CONTEXT_PTR:%.+]] = load %struct.anon.0** [[CONTEXT_ADDR]] +// CHECK-DEBUG: [[CONTEXT_PTR:%.+]] = load %struct.anon.0*, %struct.anon.0** [[CONTEXT_ADDR]] // CHECK-DEBUG-NEXT: [[ARGC_PTR_REF:%.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* [[CONTEXT_PTR]], i32 0, i32 0 -// CHECK-DEBUG-NEXT: [[ARGC_REF:%.+]] = load i8**** [[ARGC_PTR_REF]] -// CHECK-DEBUG-NEXT: [[ARGC:%.+]] = load i8*** [[ARGC_REF]] +// CHECK-DEBUG-NEXT: [[ARGC_REF:%.+]] = load i8***, i8**** [[ARGC_PTR_REF]] +// CHECK-DEBUG-NEXT: [[ARGC:%.+]] = load i8**, i8*** [[ARGC_REF]] // CHECK-DEBUG-NEXT: invoke void [[FOO1:@.+foo.+]](i8** [[ARGC]]) // CHECK-DEBUG: ret void // CHECK-DEBUG: call void @{{.+terminate.*|abort}}( diff --git a/test/OpenMP/parallel_firstprivate_codegen.cpp b/test/OpenMP/parallel_firstprivate_codegen.cpp index 8a59bf9937..7adf8e294c 100644 --- a/test/OpenMP/parallel_firstprivate_codegen.cpp +++ b/test/OpenMP/parallel_firstprivate_codegen.cpp @@ -64,10 +64,10 @@ int main() { // LAMBDA: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* %{{.+}}, i32* %{{.+}}, %{{.+}}* [[ARG:%.+]]) // LAMBDA: [[G_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}, // LAMBDA: store %{{.+}}* [[ARG]], %{{.+}}** [[ARG_REF:%.+]], - // LAMBDA: [[ARG:%.+]] = load %{{.+}}** [[ARG_REF]] + // LAMBDA: [[ARG:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_REF]] // LAMBDA: [[G_REF_ADDR:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // LAMBDA: [[G_REF:%.+]] = load i{{[0-9]+}}** [[G_REF_ADDR]] - // LAMBDA: [[G_VAL:%.+]] = load volatile i{{[0-9]+}}* [[G_REF]] + // LAMBDA: [[G_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G_REF_ADDR]] + // LAMBDA: [[G_VAL:%.+]] = load volatile i{{[0-9]+}}, i{{[0-9]+}}* [[G_REF]] // LAMBDA: store volatile i{{[0-9]+}} [[G_VAL]], i{{[0-9]+}}* [[G_PRIVATE_ADDR]] // LAMBDA: call i32 @__kmpc_cancel_barrier( g = 1; @@ -79,9 +79,9 @@ int main() { // LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* [[ARG_PTR:%.+]]) // LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]], g = 2; - // LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}** [[ARG_PTR_REF]] + // LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]] // LAMBDA: [[G_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // LAMBDA: [[G_REF:%.+]] = load i{{[0-9]+}}** [[G_PTR_REF]] + // LAMBDA: [[G_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G_PTR_REF]] // LAMBDA: store volatile i{{[0-9]+}} 2, i{{[0-9]+}}* [[G_REF]] }(); } @@ -102,10 +102,10 @@ int main() { // BLOCKS: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* %{{.+}}, i32* %{{.+}}, %{{.+}}* [[ARG:%.+]]) // BLOCKS: [[G_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}, // BLOCKS: store %{{.+}}* [[ARG]], %{{.+}}** [[ARG_REF:%.+]], - // BLOCKS: [[ARG:%.+]] = load %{{.+}}** [[ARG_REF]] + // BLOCKS: [[ARG:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_REF]] // BLOCKS: [[G_REF_ADDR:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // BLOCKS: [[G_REF:%.+]] = load i{{[0-9]+}}** [[G_REF_ADDR]] - // BLOCKS: [[G_VAL:%.+]] = load volatile i{{[0-9]+}}* [[G_REF]] + // BLOCKS: [[G_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G_REF_ADDR]] + // BLOCKS: [[G_VAL:%.+]] = load volatile i{{[0-9]+}}, i{{[0-9]+}}* [[G_REF]] // BLOCKS: store volatile i{{[0-9]+}} [[G_VAL]], i{{[0-9]+}}* [[G_PRIVATE_ADDR]] // BLOCKS: call i32 @__kmpc_cancel_barrier( g = 1; @@ -156,11 +156,11 @@ int main() { // CHECK: [[VAR_PRIV:%.+]] = alloca [[S_FLOAT_TY]], // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]], // CHECK: [[T_VAR_PTR_REF:%.+]] = getelementptr inbounds [[CAP_MAIN_TY]], [[CAP_MAIN_TY]]* %{{.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} 1 -// CHECK: [[T_VAR_REF:%.+]] = load i{{[0-9]+}}** [[T_VAR_PTR_REF]], -// CHECK: [[T_VAR_VAL:%.+]] = load i{{[0-9]+}}* [[T_VAR_REF]], +// CHECK: [[T_VAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[T_VAR_PTR_REF]], +// CHECK: [[T_VAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_REF]], // CHECK: store i{{[0-9]+}} [[T_VAR_VAL]], i{{[0-9]+}}* [[T_VAR_PRIV]], // CHECK: [[VEC_PTR_REF:%.+]] = getelementptr inbounds [[CAP_MAIN_TY]], [[CAP_MAIN_TY]]* %{{.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} 0 -// CHECK: [[VEC_REF:%.+]] = load [2 x i{{[0-9]+}}]** [[VEC_PTR_REF:%.+]], +// CHECK: [[VEC_REF:%.+]] = load [2 x i{{[0-9]+}}]*, [2 x i{{[0-9]+}}]** [[VEC_PTR_REF:%.+]], // CHECK: br label %[[VEC_PRIV_INIT:.+]] // CHECK: [[VEC_PRIV_INIT]] // CHECK: [[VEC_DEST:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_PRIV]] to i8* @@ -169,7 +169,7 @@ int main() { // CHECK: br label %[[VEC_PRIV_INIT_END:.+]] // CHECK: [[VEC_PRIV_INIT_END]] // CHECK: [[S_ARR_REF_PTR:%.+]] = getelementptr inbounds [[CAP_MAIN_TY]], [[CAP_MAIN_TY]]* %{{.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} 2 -// CHECK: [[S_ARR_REF:%.+]] = load [2 x [[S_FLOAT_TY]]]** [[S_ARR_REF_PTR]], +// CHECK: [[S_ARR_REF:%.+]] = load [2 x [[S_FLOAT_TY]]]*, [2 x [[S_FLOAT_TY]]]** [[S_ARR_REF_PTR]], // CHECK: br label %[[S_ARR_PRIV_INIT:.+]] // CHECK: [[S_ARR_PRIV_INIT]] // CHECK: [[S_ARR_BEGIN:%.+]] = getelementptr inbounds [2 x [[S_FLOAT_TY]]], [2 x [[S_FLOAT_TY]]]* [[S_ARR_REF]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 @@ -186,12 +186,12 @@ int main() { // CHECK: br label %[[S_ARR_PRIV_INIT_END:.+]] // CHECK: [[S_ARR_PRIV_INIT_END]] // CHECK: [[VAR_REF_PTR:%.+]] = getelementptr inbounds [[CAP_MAIN_TY]], [[CAP_MAIN_TY]]* %{{.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} 3 -// CHECK: [[VAR_REF:%.+]] = load [[S_FLOAT_TY]]** [[VAR_REF_PTR]], +// CHECK: [[VAR_REF:%.+]] = load [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]** [[VAR_REF_PTR]], // CHECK: call {{.*}} [[ST_TY_DEFAULT_CONSTR]]([[ST_TY]]* [[ST_TY_TEMP:%.+]]) // CHECK: call {{.*}} [[S_FLOAT_TY_COPY_CONSTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]], [[S_FLOAT_TY]]* {{.*}} [[VAR_REF]], [[ST_TY]]* [[ST_TY_TEMP]]) // CHECK: call {{.*}} [[ST_TY_DESTR]]([[ST_TY]]* [[ST_TY_TEMP]]) -// CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}** [[GTID_ADDR_ADDR]] -// CHECK: [[GTID:%.+]] = load i{{[0-9]+}}* [[GTID_REF]] +// CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_ADDR]] +// CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] // CHECK: call i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]]) // CHECK-DAG: call {{.*}} [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]]) // CHECK-DAG: call {{.*}} [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]* @@ -211,11 +211,11 @@ int main() { // CHECK: [[VAR_PRIV:%.+]] = alloca [[S_INT_TY]], // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]], // CHECK: [[T_VAR_PTR_REF:%.+]] = getelementptr inbounds [[CAP_TMAIN_TY]], [[CAP_TMAIN_TY]]* %{{.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} 1 -// CHECK: [[T_VAR_REF:%.+]] = load i{{[0-9]+}}** [[T_VAR_PTR_REF]], -// CHECK: [[T_VAR_VAL:%.+]] = load i{{[0-9]+}}* [[T_VAR_REF]], +// CHECK: [[T_VAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[T_VAR_PTR_REF]], +// CHECK: [[T_VAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_REF]], // CHECK: store i{{[0-9]+}} [[T_VAR_VAL]], i{{[0-9]+}}* [[T_VAR_PRIV]], // CHECK: [[VEC_PTR_REF:%.+]] = getelementptr inbounds [[CAP_TMAIN_TY]], [[CAP_TMAIN_TY]]* %{{.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} 0 -// CHECK: [[VEC_REF:%.+]] = load [2 x i{{[0-9]+}}]** [[VEC_PTR_REF:%.+]], +// CHECK: [[VEC_REF:%.+]] = load [2 x i{{[0-9]+}}]*, [2 x i{{[0-9]+}}]** [[VEC_PTR_REF:%.+]], // CHECK: br label %[[VEC_PRIV_INIT:.+]] // CHECK: [[VEC_PRIV_INIT]] // CHECK: [[VEC_DEST:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_PRIV]] to i8* @@ -224,7 +224,7 @@ int main() { // CHECK: br label %[[VEC_PRIV_INIT_END:.+]] // CHECK: [[VEC_PRIV_INIT_END]] // CHECK: [[S_ARR_REF_PTR:%.+]] = getelementptr inbounds [[CAP_TMAIN_TY]], [[CAP_TMAIN_TY]]* %{{.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} 2 -// CHECK: [[S_ARR_REF:%.+]] = load [2 x [[S_INT_TY]]]** [[S_ARR_REF_PTR]], +// CHECK: [[S_ARR_REF:%.+]] = load [2 x [[S_INT_TY]]]*, [2 x [[S_INT_TY]]]** [[S_ARR_REF_PTR]], // CHECK: br label %[[S_ARR_PRIV_INIT:.+]] // CHECK: [[S_ARR_PRIV_INIT]] // CHECK: [[S_ARR_BEGIN:%.+]] = getelementptr inbounds [2 x [[S_INT_TY]]], [2 x [[S_INT_TY]]]* [[S_ARR_REF]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 @@ -241,12 +241,12 @@ int main() { // CHECK: br label %[[S_ARR_PRIV_INIT_END:.+]] // CHECK: [[S_ARR_PRIV_INIT_END]] // CHECK: [[VAR_REF_PTR:%.+]] = getelementptr inbounds [[CAP_TMAIN_TY]], [[CAP_TMAIN_TY]]* %{{.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} 3 -// CHECK: [[VAR_REF:%.+]] = load [[S_INT_TY]]** [[VAR_REF_PTR]], +// CHECK: [[VAR_REF:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** [[VAR_REF_PTR]], // CHECK: call {{.*}} [[ST_TY_DEFAULT_CONSTR]]([[ST_TY]]* [[ST_TY_TEMP:%.+]]) // CHECK: call {{.*}} [[S_INT_TY_COPY_CONSTR]]([[S_INT_TY]]* [[VAR_PRIV]], [[S_INT_TY]]* {{.*}} [[VAR_REF]], [[ST_TY]]* [[ST_TY_TEMP]]) // CHECK: call {{.*}} [[ST_TY_DESTR]]([[ST_TY]]* [[ST_TY_TEMP]]) -// CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}** [[GTID_ADDR_ADDR]] -// CHECK: [[GTID:%.+]] = load i{{[0-9]+}}* [[GTID_REF]] +// CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_ADDR]] +// CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] // CHECK: call i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]]) // CHECK-DAG: call {{.*}} [[S_INT_TY_DESTR]]([[S_INT_TY]]* [[VAR_PRIV]]) // CHECK-DAG: call {{.*}} [[S_INT_TY_DESTR]]([[S_INT_TY]]* diff --git a/test/OpenMP/parallel_if_codegen.cpp b/test/OpenMP/parallel_if_codegen.cpp index 44c874f1fb..256f410bfb 100644 --- a/test/OpenMP/parallel_if_codegen.cpp +++ b/test/OpenMP/parallel_if_codegen.cpp @@ -25,10 +25,10 @@ void gtid_test() { // CHECK: define internal void [[GTID_TEST_REGION1]](i{{.+}}* [[GTID_PARAM:%.+]], i // CHECK: store i{{[0-9]+}}* [[GTID_PARAM]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]], -// CHECK: [[GTID_ADDR:%.+]] = load i{{[0-9]+}}** [[GTID_ADDR_REF]] -// CHECK: [[GTID:%.+]] = load i{{[0-9]+}}* [[GTID_ADDR]] +// CHECK: [[GTID_ADDR:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]] +// CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_ADDR]] // CHECK: call void @__kmpc_serialized_parallel(%{{.+}}* @{{.+}}, i{{.+}} [[GTID]]) -// CHECK: [[GTID_ADDR:%.+]] = load i{{[0-9]+}}** [[GTID_ADDR_REF]] +// CHECK: [[GTID_ADDR:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]] // CHECK: call void [[GTID_TEST_REGION2:@.+]](i{{[0-9]+}}* [[GTID_ADDR]] // CHECK: call void @__kmpc_end_serialized_parallel(%{{.+}}* @{{.+}}, i{{.+}} [[GTID]]) // CHECK: ret void diff --git a/test/OpenMP/parallel_num_threads_codegen.cpp b/test/OpenMP/parallel_num_threads_codegen.cpp index c095e430b6..693880058a 100644 --- a/test/OpenMP/parallel_num_threads_codegen.cpp +++ b/test/OpenMP/parallel_num_threads_codegen.cpp @@ -49,7 +49,7 @@ int main() { // CHECK: store i8 [[S_CHAR_OP]], i8* [[A_ADDR]] // CHECK: call void @__kmpc_push_num_threads([[IDENT_T_TY]]* [[DEF_LOC_2]], i32 [[GTID]], i32 2) // CHECK: call void {{.*}}* @__kmpc_fork_call( -// CHECK: [[A_VAL:%.+]] = load i8* [[A_ADDR]] +// CHECK: [[A_VAL:%.+]] = load i8, i8* [[A_ADDR]] // CHECK: [[RES:%.+]] = sext i8 [[A_VAL]] to i32 // CHECK: call void @__kmpc_push_num_threads([[IDENT_T_TY]]* [[DEF_LOC_2]], i32 [[GTID]], i32 [[RES]]) // CHECK: call void {{.*}}* @__kmpc_fork_call( diff --git a/test/OpenMP/parallel_private_codegen.cpp b/test/OpenMP/parallel_private_codegen.cpp index b14e5c3f5b..7094e4f1be 100644 --- a/test/OpenMP/parallel_private_codegen.cpp +++ b/test/OpenMP/parallel_private_codegen.cpp @@ -64,9 +64,9 @@ int main() { // LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* [[ARG_PTR:%.+]]) // LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]], g = 2; - // LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}** [[ARG_PTR_REF]] + // LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]] // LAMBDA: [[G_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // LAMBDA: [[G_REF:%.+]] = load i{{[0-9]+}}** [[G_PTR_REF]] + // LAMBDA: [[G_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G_PTR_REF]] // LAMBDA: store volatile i{{[0-9]+}} 2, i{{[0-9]+}}* [[G_REF]] }(); } @@ -143,8 +143,8 @@ int main() { // CHECK-NOT: [[T_VAR_PRIV]] // CHECK-NOT: [[VEC_PRIV]] // CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]]) -// CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}** [[GTID_ADDR_REF]] -// CHECK: [[GTID:%.+]] = load i{{[0-9]+}}* [[GTID_REF]] +// CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]] +// CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] // CHECK: call i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]]) // CHECK-DAG: call void [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]]) // CHECK-DAG: call void [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]* @@ -171,8 +171,8 @@ int main() { // CHECK-NOT: [[T_VAR_PRIV]] // CHECK-NOT: [[VEC_PRIV]] // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR]]([[S_INT_TY]]* [[VAR_PRIV]]) -// CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}** [[GTID_ADDR_REF]] -// CHECK: [[GTID:%.+]] = load i{{[0-9]+}}* [[GTID_REF]] +// CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]] +// CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] // CHECK: call i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]]) // CHECK-DAG: call void [[S_INT_TY_DESTR]]([[S_INT_TY]]* [[VAR_PRIV]]) // CHECK-DAG: call void [[S_INT_TY_DESTR]]([[S_INT_TY]]* diff --git a/test/OpenMP/simd_codegen.cpp b/test/OpenMP/simd_codegen.cpp index fac1427c30..db92e0e4d6 100644 --- a/test/OpenMP/simd_codegen.cpp +++ b/test/OpenMP/simd_codegen.cpp @@ -11,13 +11,13 @@ void simple(float *a, float *b, float *c, float *d) { #pragma omp simd // CHECK: store i32 0, i32* [[OMP_IV:%[^,]+]] -// CHECK: [[IV:%.+]] = load i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID:[0-9]+]] +// CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID:[0-9]+]] // CHECK-NEXT: [[CMP:%.+]] = icmp slt i32 [[IV]], 6 // CHECK-NEXT: br i1 [[CMP]], label %[[SIMPLE_LOOP1_BODY:.+]], label %[[SIMPLE_LOOP1_END:[^,]+]] for (int i = 3; i < 32; i += 5) { // CHECK: [[SIMPLE_LOOP1_BODY]] // Start of body: calculate i from IV: -// CHECK: [[IV1_1:%.+]] = load i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID]] +// CHECK: [[IV1_1:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID]] // CHECK: [[CALC_I_1:%.+]] = mul nsw i32 [[IV1_1]], 5 // CHECK-NEXT: [[CALC_I_2:%.+]] = add nsw i32 3, [[CALC_I_1]] // CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID]] @@ -25,7 +25,7 @@ void simple(float *a, float *b, float *c, float *d) { // End of body: store into a[i]: // CHECK: store float [[RESULT:%.+]], float* {{%.+}}{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID]] a[i] = b[i] * c[i] * d[i]; -// CHECK: [[IV1_2:%.+]] = load i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID]] +// CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID]] // CHECK-NEXT: [[ADD1_2:%.+]] = add nsw i32 [[IV1_2]], 1 // CHECK-NEXT: store i32 [[ADD1_2]], i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID]] // br label %{{.+}}, !llvm.loop !{{.+}} @@ -35,19 +35,19 @@ void simple(float *a, float *b, float *c, float *d) { #pragma omp simd // CHECK: store i32 0, i32* [[OMP_IV2:%[^,]+]] -// CHECK: [[IV2:%.+]] = load i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID:[0-9]+]] +// CHECK: [[IV2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID:[0-9]+]] // CHECK-NEXT: [[CMP2:%.+]] = icmp slt i32 [[IV2]], 9 // CHECK-NEXT: br i1 [[CMP2]], label %[[SIMPLE_LOOP2_BODY:.+]], label %[[SIMPLE_LOOP2_END:[^,]+]] for (int i = 10; i > 1; i--) { // CHECK: [[SIMPLE_LOOP2_BODY]] // Start of body: calculate i from IV: -// CHECK: [[IV2_0:%.+]] = load i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] +// CHECK: [[IV2_0:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] // FIXME: It is interesting, why the following "mul 1" was not constant folded? // CHECK-NEXT: [[IV2_1:%.+]] = mul nsw i32 [[IV2_0]], 1 // CHECK-NEXT: [[LC_I_1:%.+]] = sub nsw i32 10, [[IV2_1]] // CHECK-NEXT: store i32 [[LC_I_1]], i32* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] a[i]++; -// CHECK: [[IV2_2:%.+]] = load i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] +// CHECK: [[IV2_2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] // CHECK-NEXT: [[ADD2_2:%.+]] = add nsw i32 [[IV2_2]], 1 // CHECK-NEXT: store i32 [[ADD2_2]], i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] // br label {{.+}}, !llvm.loop ![[SIMPLE_LOOP2_ID]] @@ -57,18 +57,18 @@ void simple(float *a, float *b, float *c, float *d) { #pragma omp simd // CHECK: store i64 0, i64* [[OMP_IV3:%[^,]+]] -// CHECK: [[IV3:%.+]] = load i64* [[OMP_IV3]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID:[0-9]+]] +// CHECK: [[IV3:%.+]] = load i64, i64* [[OMP_IV3]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID:[0-9]+]] // CHECK-NEXT: [[CMP3:%.+]] = icmp ult i64 [[IV3]], 4 // CHECK-NEXT: br i1 [[CMP3]], label %[[SIMPLE_LOOP3_BODY:.+]], label %[[SIMPLE_LOOP3_END:[^,]+]] for (unsigned long long it = 2000; it >= 600; it-=400) { // CHECK: [[SIMPLE_LOOP3_BODY]] // Start of body: calculate it from IV: -// CHECK: [[IV3_0:%.+]] = load i64* [[OMP_IV3]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] +// CHECK: [[IV3_0:%.+]] = load i64, i64* [[OMP_IV3]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] // CHECK-NEXT: [[LC_IT_1:%.+]] = mul i64 [[IV3_0]], 400 // CHECK-NEXT: [[LC_IT_2:%.+]] = sub i64 2000, [[LC_IT_1]] // CHECK-NEXT: store i64 [[LC_IT_2]], i64* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] a[it]++; -// CHECK: [[IV3_2:%.+]] = load i64* [[OMP_IV3]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] +// CHECK: [[IV3_2:%.+]] = load i64, i64* [[OMP_IV3]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] // CHECK-NEXT: [[ADD3_2:%.+]] = add i64 [[IV3_2]], 1 // CHECK-NEXT: store i64 [[ADD3_2]], i64* [[OMP_IV3]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] } @@ -77,19 +77,19 @@ void simple(float *a, float *b, float *c, float *d) { #pragma omp simd // CHECK: store i32 0, i32* [[OMP_IV4:%[^,]+]] -// CHECK: [[IV4:%.+]] = load i32* [[OMP_IV4]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP4_ID:[0-9]+]] +// CHECK: [[IV4:%.+]] = load i32, i32* [[OMP_IV4]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP4_ID:[0-9]+]] // CHECK-NEXT: [[CMP4:%.+]] = icmp slt i32 [[IV4]], 4 // CHECK-NEXT: br i1 [[CMP4]], label %[[SIMPLE_LOOP4_BODY:.+]], label %[[SIMPLE_LOOP4_END:[^,]+]] for (short it = 6; it <= 20; it-=-4) { // CHECK: [[SIMPLE_LOOP4_BODY]] // Start of body: calculate it from IV: -// CHECK: [[IV4_0:%.+]] = load i32* [[OMP_IV4]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP4_ID]] +// CHECK: [[IV4_0:%.+]] = load i32, i32* [[OMP_IV4]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP4_ID]] // CHECK-NEXT: [[LC_IT_1:%.+]] = mul nsw i32 [[IV4_0]], 4 // CHECK-NEXT: [[LC_IT_2:%.+]] = add nsw i32 6, [[LC_IT_1]] // CHECK-NEXT: [[LC_IT_3:%.+]] = trunc i32 [[LC_IT_2]] to i16 // CHECK-NEXT: store i16 [[LC_IT_3]], i16* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP4_ID]] -// CHECK: [[IV4_2:%.+]] = load i32* [[OMP_IV4]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP4_ID]] +// CHECK: [[IV4_2:%.+]] = load i32, i32* [[OMP_IV4]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP4_ID]] // CHECK-NEXT: [[ADD4_2:%.+]] = add nsw i32 [[IV4_2]], 1 // CHECK-NEXT: store i32 [[ADD4_2]], i32* [[OMP_IV4]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP4_ID]] } @@ -98,19 +98,19 @@ void simple(float *a, float *b, float *c, float *d) { #pragma omp simd // CHECK: store i32 0, i32* [[OMP_IV5:%[^,]+]] -// CHECK: [[IV5:%.+]] = load i32* [[OMP_IV5]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP5_ID:[0-9]+]] +// CHECK: [[IV5:%.+]] = load i32, i32* [[OMP_IV5]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP5_ID:[0-9]+]] // CHECK-NEXT: [[CMP5:%.+]] = icmp slt i32 [[IV5]], 26 // CHECK-NEXT: br i1 [[CMP5]], label %[[SIMPLE_LOOP5_BODY:.+]], label %[[SIMPLE_LOOP5_END:[^,]+]] for (unsigned char it = 'z'; it >= 'a'; it+=-1) { // CHECK: [[SIMPLE_LOOP5_BODY]] // Start of body: calculate it from IV: -// CHECK: [[IV5_0:%.+]] = load i32* [[OMP_IV5]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP5_ID]] +// CHECK: [[IV5_0:%.+]] = load i32, i32* [[OMP_IV5]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP5_ID]] // CHECK-NEXT: [[IV5_1:%.+]] = mul nsw i32 [[IV5_0]], 1 // CHECK-NEXT: [[LC_IT_1:%.+]] = sub nsw i32 122, [[IV5_1]] // CHECK-NEXT: [[LC_IT_2:%.+]] = trunc i32 [[LC_IT_1]] to i8 // CHECK-NEXT: store i8 [[LC_IT_2]], i8* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP5_ID]] -// CHECK: [[IV5_2:%.+]] = load i32* [[OMP_IV5]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP5_ID]] +// CHECK: [[IV5_2:%.+]] = load i32, i32* [[OMP_IV5]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP5_ID]] // CHECK-NEXT: [[ADD5_2:%.+]] = add nsw i32 [[IV5_2]], 1 // CHECK-NEXT: store i32 [[ADD5_2]], i32* [[OMP_IV5]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP5_ID]] } @@ -122,18 +122,18 @@ void simple(float *a, float *b, float *c, float *d) { // // CHECK: store i32 0, i32* [[OMP_IV6:%[^,]+]] -// CHECK: [[IV6:%.+]] = load i32* [[OMP_IV6]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP6_ID:[0-9]+]] +// CHECK: [[IV6:%.+]] = load i32, i32* [[OMP_IV6]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP6_ID:[0-9]+]] // CHECK-NEXT: [[CMP6:%.+]] = icmp slt i32 [[IV6]], -8 // CHECK-NEXT: br i1 [[CMP6]], label %[[SIMPLE_LOOP6_BODY:.+]], label %[[SIMPLE_LOOP6_END:[^,]+]] for (int i=100; i<10; i+=10) { // CHECK: [[SIMPLE_LOOP6_BODY]] // Start of body: calculate i from IV: -// CHECK: [[IV6_0:%.+]] = load i32* [[OMP_IV6]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP6_ID]] +// CHECK: [[IV6_0:%.+]] = load i32, i32* [[OMP_IV6]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP6_ID]] // CHECK-NEXT: [[LC_IT_1:%.+]] = mul nsw i32 [[IV6_0]], 10 // CHECK-NEXT: [[LC_IT_2:%.+]] = add nsw i32 100, [[LC_IT_1]] // CHECK-NEXT: store i32 [[LC_IT_2]], i32* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP6_ID]] -// CHECK: [[IV6_2:%.+]] = load i32* [[OMP_IV6]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP6_ID]] +// CHECK: [[IV6_2:%.+]] = load i32, i32* [[OMP_IV6]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP6_ID]] // CHECK-NEXT: [[ADD6_2:%.+]] = add nsw i32 [[IV6_2]], 1 // CHECK-NEXT: store i32 [[ADD6_2]], i32* [[OMP_IV6]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP6_ID]] } @@ -149,28 +149,28 @@ void simple(float *a, float *b, float *c, float *d) { // CHECK: [[SIMPLE_IF7_THEN]] // CHECK: br label %[[SIMD_LOOP7_COND:[^,]+]] // CHECK: [[SIMD_LOOP7_COND]] -// CHECK-NEXT: [[IV7:%.+]] = load i64* [[OMP_IV7]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID:[0-9]+]] +// CHECK-NEXT: [[IV7:%.+]] = load i64, i64* [[OMP_IV7]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID:[0-9]+]] // CHECK-NEXT: [[CMP7:%.+]] = icmp slt i64 [[IV7]], 6 // CHECK-NEXT: br i1 [[CMP7]], label %[[SIMPLE_LOOP7_BODY:.+]], label %[[SIMPLE_LOOP7_END:[^,]+]] for (long long i = -10; i < 10; i += 3) { // CHECK: [[SIMPLE_LOOP7_BODY]] // Start of body: calculate i from IV: -// CHECK: [[IV7_0:%.+]] = load i64* [[OMP_IV7]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID]] +// CHECK: [[IV7_0:%.+]] = load i64, i64* [[OMP_IV7]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID]] // CHECK-NEXT: [[LC_IT_1:%.+]] = mul nsw i64 [[IV7_0]], 3 // CHECK-NEXT: [[LC_IT_2:%.+]] = add nsw i64 -10, [[LC_IT_1]] // CHECK-NEXT: store i64 [[LC_IT_2]], i64* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID]] A = i; -// CHECK: [[IV7_2:%.+]] = load i64* [[OMP_IV7]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID]] +// CHECK: [[IV7_2:%.+]] = load i64, i64* [[OMP_IV7]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID]] // CHECK-NEXT: [[ADD7_2:%.+]] = add nsw i64 [[IV7_2]], 1 // CHECK-NEXT: store i64 [[ADD7_2]], i64* [[OMP_IV7]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID]] } // CHECK: [[SIMPLE_LOOP7_END]] // Separated last iteration. -// CHECK: [[IV7_4:%.+]] = load i64* [[OMP_IV7]] +// CHECK: [[IV7_4:%.+]] = load i64, i64* [[OMP_IV7]] // CHECK-NEXT: [[LC_FIN_1:%.+]] = mul nsw i64 [[IV7_4]], 3 // CHECK-NEXT: [[LC_FIN_2:%.+]] = add nsw i64 -10, [[LC_FIN_1]] // CHECK-NEXT: store i64 [[LC_FIN_2]], i64* [[ADDR_I:%[^,]+]] -// CHECK: [[LOAD_I:%.+]] = load i64* [[ADDR_I]] +// CHECK: [[LOAD_I:%.+]] = load i64, i64* [[ADDR_I]] // CHECK-NEXT: [[CONV_I:%.+]] = trunc i64 [[LOAD_I]] to i32 // // CHECK: br label %[[SIMPLE_IF7_END]] @@ -197,24 +197,24 @@ int templ1(T a, T *z) { // CHECK-LABEL: define {{.*i32}} @{{.*}}templ1{{.*}}(float {{.+}}, float* {{.+}}) // CHECK: store i64 0, i64* [[T1_OMP_IV:[^,]+]] // ... -// CHECK: [[IV:%.+]] = load i64* [[T1_OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID:[0-9]+]] +// CHECK: [[IV:%.+]] = load i64, i64* [[T1_OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID:[0-9]+]] // CHECK-NEXT: [[CMP1:%.+]] = icmp slt i64 [[IV]], 16 // CHECK-NEXT: br i1 [[CMP1]], label %[[T1_BODY:.+]], label %[[T1_END:[^,]+]] // CHECK: [[T1_BODY]] // Loop counters i and j updates: -// CHECK: [[IV1:%.+]] = load i64* [[T1_OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]] +// CHECK: [[IV1:%.+]] = load i64, i64* [[T1_OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]] // CHECK-NEXT: [[I_1:%.+]] = sdiv i64 [[IV1]], 4 // CHECK-NEXT: [[I_1_MUL1:%.+]] = mul nsw i64 [[I_1]], 1 // CHECK-NEXT: [[I_1_ADD0:%.+]] = add nsw i64 0, [[I_1_MUL1]] // CHECK-NEXT: [[I_2:%.+]] = trunc i64 [[I_1_ADD0]] to i32 // CHECK-NEXT: store i32 [[I_2]], i32* {{%.+}}{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]] -// CHECK: [[IV2:%.+]] = load i64* [[T1_OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]] +// CHECK: [[IV2:%.+]] = load i64, i64* [[T1_OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]] // CHECK-NEXT: [[J_1:%.+]] = srem i64 [[IV2]], 4 // CHECK-NEXT: [[J_2:%.+]] = mul nsw i64 [[J_1]], 2 // CHECK-NEXT: [[J_2_ADD0:%.+]] = add nsw i64 0, [[J_2]] // CHECK-NEXT: store i64 [[J_2_ADD0]], i64* {{%.+}}{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]] // simd.for.inc: -// CHECK: [[IV3:%.+]] = load i64* [[T1_OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]] +// CHECK: [[IV3:%.+]] = load i64, i64* [[T1_OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]] // CHECK-NEXT: [[INC:%.+]] = add nsw i64 [[IV3]], 1 // CHECK-NEXT: store i64 [[INC]], i64* [[T1_OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]] // CHECK-NEXT: br label {{%.+}} @@ -269,15 +269,15 @@ void iter_simple(IterDouble ia, IterDouble ib, IterDouble ic) { // CHECK-NEXT: store i32 [[DIFF5]], i32* [[OMP_LAST_IT:%[^,]+]]{{.+}} #pragma omp simd -// CHECK: [[IV:%.+]] = load i32* [[IT_OMP_IV]]{{.+}} !llvm.mem.parallel_loop_access ![[ITER_LOOP_ID:[0-9]+]] -// CHECK-NEXT: [[LAST_IT:%.+]] = load i32* [[OMP_LAST_IT]]{{.+}}!llvm.mem.parallel_loop_access ![[ITER_LOOP_ID]] +// CHECK: [[IV:%.+]] = load i32, i32* [[IT_OMP_IV]]{{.+}} !llvm.mem.parallel_loop_access ![[ITER_LOOP_ID:[0-9]+]] +// CHECK-NEXT: [[LAST_IT:%.+]] = load i32, i32* [[OMP_LAST_IT]]{{.+}}!llvm.mem.parallel_loop_access ![[ITER_LOOP_ID]] // CHECK-NEXT: [[NUM_IT:%.+]] = add nsw i32 [[LAST_IT]], 1 // CHECK-NEXT: [[CMP:%.+]] = icmp slt i32 [[IV]], [[NUM_IT]] // CHECK-NEXT: br i1 [[CMP]], label %[[IT_BODY:[^,]+]], label %[[IT_END:[^,]+]] for (IterDouble i = ia; i < ib; ++i) { // CHECK: [[IT_BODY]] // Start of body: calculate i from index: -// CHECK: [[IV1:%.+]] = load i32* [[IT_OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[ITER_LOOP_ID]] +// CHECK: [[IV1:%.+]] = load i32, i32* [[IT_OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[ITER_LOOP_ID]] // Call of operator+ (i, IV). // CHECK: {{%.+}} = call {{.+}} @{{.*}}IterDouble{{.*}}!llvm.mem.parallel_loop_access ![[ITER_LOOP_ID]] // ... loop body ... @@ -288,7 +288,7 @@ void iter_simple(IterDouble ia, IterDouble ib, IterDouble ic) { // CHECK: store double [[MULR:%.+]], double* [[RESULT_ADDR:%.+]], !llvm.mem.parallel_loop_access ![[ITER_LOOP_ID]] ++ic; // -// CHECK: [[IV2:%.+]] = load i32* [[IT_OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[ITER_LOOP_ID]] +// CHECK: [[IV2:%.+]] = load i32, i32* [[IT_OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[ITER_LOOP_ID]] // CHECK-NEXT: [[ADD2:%.+]] = add nsw i32 [[IV2]], 1 // CHECK-NEXT: store i32 [[ADD2]], i32* [[IT_OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[ITER_LOOP_ID]] // br label %{{.*}}, !llvm.loop ![[ITER_LOOP_ID]] @@ -308,7 +308,7 @@ void collapsed(float *a, float *b, float *c, float *d) { // #pragma omp simd collapse(4) -// CHECK: [[IV:%.+]] = load i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID:[0-9]+]] +// CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID:[0-9]+]] // CHECK-NEXT: [[CMP:%.+]] = icmp ult i32 [[IV]], 120 // CHECK-NEXT: br i1 [[CMP]], label %[[COLL1_BODY:[^,]+]], label %[[COLL1_END:[^,]+]] for (i = 1; i < 3; i++) // 2 iterations @@ -318,25 +318,25 @@ void collapsed(float *a, float *b, float *c, float *d) { { // CHECK: [[COLL1_BODY]] // Start of body: calculate i from index: -// CHECK: [[IV1:%.+]] = load i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]] +// CHECK: [[IV1:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]] // Calculation of the loop counters values. // CHECK: [[CALC_I_1:%.+]] = udiv i32 [[IV1]], 60 // CHECK-NEXT: [[CALC_I_1_MUL1:%.+]] = mul i32 [[CALC_I_1]], 1 // CHECK-NEXT: [[CALC_I_2:%.+]] = add i32 1, [[CALC_I_1_MUL1]] // CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]] -// CHECK: [[IV1_2:%.+]] = load i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]] +// CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]] // CHECK-NEXT: [[CALC_J_1:%.+]] = udiv i32 [[IV1_2]], 20 // CHECK-NEXT: [[CALC_J_2:%.+]] = urem i32 [[CALC_J_1]], 3 // CHECK-NEXT: [[CALC_J_2_MUL1:%.+]] = mul i32 [[CALC_J_2]], 1 // CHECK-NEXT: [[CALC_J_3:%.+]] = add i32 2, [[CALC_J_2_MUL1]] // CHECK-NEXT: store i32 [[CALC_J_3]], i32* [[LC_J:.+]] -// CHECK: [[IV1_3:%.+]] = load i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]] +// CHECK: [[IV1_3:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]] // CHECK-NEXT: [[CALC_K_1:%.+]] = udiv i32 [[IV1_3]], 5 // CHECK-NEXT: [[CALC_K_2:%.+]] = urem i32 [[CALC_K_1]], 4 // CHECK-NEXT: [[CALC_K_2_MUL1:%.+]] = mul i32 [[CALC_K_2]], 1 // CHECK-NEXT: [[CALC_K_3:%.+]] = add i32 3, [[CALC_K_2_MUL1]] // CHECK-NEXT: store i32 [[CALC_K_3]], i32* [[LC_K:.+]] -// CHECK: [[IV1_4:%.+]] = load i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]] +// CHECK: [[IV1_4:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]] // CHECK-NEXT: [[CALC_L_1:%.+]] = urem i32 [[IV1_4]], 5 // CHECK-NEXT: [[CALC_L_1_MUL1:%.+]] = mul i32 [[CALC_L_1]], 1 // CHECK-NEXT: [[CALC_L_2:%.+]] = add i32 4, [[CALC_L_1_MUL1]] @@ -347,7 +347,7 @@ void collapsed(float *a, float *b, float *c, float *d) { // CHECK: store float [[RESULT:%.+]], float* [[RESULT_ADDR:%.+]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]] float res = b[j] * c[k]; a[i] = res * d[l]; -// CHECK: [[IV2:%.+]] = load i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]] +// CHECK: [[IV2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]] // CHECK-NEXT: [[ADD2:%.+]] = add i32 [[IV2]], 1 // CHECK-NEXT: store i32 [[ADD2]], i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]] // br label %{{[^,]+}}, !llvm.loop ![[COLL1_LOOP_ID]] @@ -371,8 +371,8 @@ void widened(float *a, float *b, float *c, float *d) { // #pragma omp simd collapse(2) -// CHECK: [[IV:%.+]] = load i64* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID:[0-9]+]] -// CHECK-NEXT: [[LI:%.+]] = load i64* [[OMP_LI:%[^,]+]]{{.+}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]] +// CHECK: [[IV:%.+]] = load i64, i64* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID:[0-9]+]] +// CHECK-NEXT: [[LI:%.+]] = load i64, i64* [[OMP_LI:%[^,]+]]{{.+}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]] // CHECK-NEXT: [[NUMIT:%.+]] = add nsw i64 [[LI]], 1 // CHECK-NEXT: [[CMP:%.+]] = icmp slt i64 [[IV]], [[NUMIT]] // CHECK-NEXT: br i1 [[CMP]], label %[[WIDE1_BODY:[^,]+]], label %[[WIDE1_END:[^,]+]] @@ -381,17 +381,17 @@ void widened(float *a, float *b, float *c, float *d) { { // CHECK: [[WIDE1_BODY]] // Start of body: calculate i from index: -// CHECK: [[IV1:%.+]] = load i64* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]] +// CHECK: [[IV1:%.+]] = load i64, i64* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]] // Calculation of the loop counters values... // CHECK: store i32 {{[^,]+}}, i32* [[LC_I:.+]] -// CHECK: [[IV1_2:%.+]] = load i64* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]] +// CHECK: [[IV1_2:%.+]] = load i64, i64* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]] // CHECK: store i16 {{[^,]+}}, i16* [[LC_J:.+]] // ... loop body ... // End of body: store into a[i]: // CHECK: store float [[RESULT:%.+]], float* [[RESULT_ADDR:%.+]]{{.+}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]] float res = b[j] * c[j]; a[i] = res * d[i]; -// CHECK: [[IV2:%.+]] = load i64* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]] +// CHECK: [[IV2:%.+]] = load i64, i64* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]] // CHECK-NEXT: [[ADD2:%.+]] = add nsw i64 [[IV2]], 1 // CHECK-NEXT: store i64 [[ADD2]], i64* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]] // br label %{{[^,]+}}, !llvm.loop ![[WIDE1_LOOP_ID]] diff --git a/test/OpenMP/threadprivate_codegen.cpp b/test/OpenMP/threadprivate_codegen.cpp index 0c079b16d2..cd4a4d3a84 100644 --- a/test/OpenMP/threadprivate_codegen.cpp +++ b/test/OpenMP/threadprivate_codegen.cpp @@ -164,15 +164,15 @@ static S1 gs1(5); // CHECK: define {{.*}} [[S1_DTOR:@.*]]([[S1]]* {{.*}}) // CHECK: define internal {{.*}}i8* [[GS1_CTOR:@\.__kmpc_global_ctor_\..*]](i8*) // CHECK: store i8* %0, i8** [[ARG_ADDR:%.*]], -// CHECK: [[ARG:%.+]] = load i8** [[ARG_ADDR]] +// CHECK: [[ARG:%.+]] = load i8*, i8** [[ARG_ADDR]] // CHECK: [[RES:%.*]] = bitcast i8* [[ARG]] to [[S1]]* // CHECK-NEXT: call {{.*}} [[S1_CTOR]]([[S1]]* [[RES]], {{.*}} 5) -// CHECK: [[ARG:%.+]] = load i8** [[ARG_ADDR]] +// CHECK: [[ARG:%.+]] = load i8*, i8** [[ARG_ADDR]] // CHECK: ret i8* [[ARG]] // CHECK-NEXT: } // CHECK: define internal {{.*}}void [[GS1_DTOR:@\.__kmpc_global_dtor_\..*]](i8*) // CHECK: store i8* %0, i8** [[ARG_ADDR:%.*]], -// CHECK: [[ARG:%.+]] = load i8** [[ARG_ADDR]] +// CHECK: [[ARG:%.+]] = load i8*, i8** [[ARG_ADDR]] // CHECK: [[RES:%.*]] = bitcast i8* [[ARG]] to [[S1]]* // CHECK-NEXT: call {{.*}} [[S1_DTOR]]([[S1]]* [[RES]]) // CHECK-NEXT: ret void @@ -188,16 +188,16 @@ static S1 gs1(5); // CHECK-DEBUG: call {{.*}}void @__kmpc_threadprivate_register([[IDENT]]* [[KMPC_LOC_ADDR]], i8* bitcast ([[S1]]* [[GS1]] to i8*), i8* (i8*)* [[GS1_CTOR:@\.__kmpc_global_ctor_\..*]], i8* (i8*, i8*)* null, void (i8*)* [[GS1_DTOR:@\.__kmpc_global_dtor_\..*]]) // CHECK-DEBUG: define internal {{.*}}i8* [[GS1_CTOR]](i8*) // CHECK-DEBUG: store i8* %0, i8** [[ARG_ADDR:%.*]], -// CHECK-DEBUG: [[ARG:%.+]] = load i8** [[ARG_ADDR]] +// CHECK-DEBUG: [[ARG:%.+]] = load i8*, i8** [[ARG_ADDR]] // CHECK-DEBUG: [[RES:%.*]] = bitcast i8* [[ARG]] to [[S1]]* // CHECK-DEBUG-NEXT: call {{.*}} [[S1_CTOR:@.+]]([[S1]]* [[RES]], {{.*}} 5) -// CHECK-DEBUG: [[ARG:%.+]] = load i8** [[ARG_ADDR]] +// CHECK-DEBUG: [[ARG:%.+]] = load i8*, i8** [[ARG_ADDR]] // CHECK-DEBUG: ret i8* [[ARG]] // CHECK-DEBUG-NEXT: } // CHECK-DEBUG: define {{.*}} [[S1_CTOR]]([[S1]]* {{.*}}, // CHECK-DEBUG: define internal {{.*}}void [[GS1_DTOR]](i8*) // CHECK-DEBUG: store i8* %0, i8** [[ARG_ADDR:%.*]], -// CHECK-DEBUG: [[ARG:%.+]] = load i8** [[ARG_ADDR]] +// CHECK-DEBUG: [[ARG:%.+]] = load i8*, i8** [[ARG_ADDR]] // CHECK-DEBUG: [[RES:%.*]] = bitcast i8* [[ARG]] to [[S1]]* // CHECK-DEBUG-NEXT: call {{.*}} [[S1_DTOR:@.+]]([[S1]]* [[RES]]) // CHECK-DEBUG-NEXT: ret void @@ -216,7 +216,7 @@ S1 arr_x[2][3] = { { 1, 2, 3 }, { 4, 5, 6 } }; #pragma omp threadprivate(arr_x) // CHECK: define internal {{.*}}i8* [[ARR_X_CTOR:@\.__kmpc_global_ctor_\..*]](i8*) // CHECK: store i8* %0, i8** [[ARG_ADDR:%.*]], -// CHECK: [[ARG:%.+]] = load i8** [[ARG_ADDR]] +// CHECK: [[ARG:%.+]] = load i8*, i8** [[ARG_ADDR]] // CHECK: [[RES:%.*]] = bitcast i8* [[ARG]] to [2 x [3 x [[S1]]]]* // CHECK: [[ARR1:%.*]] = getelementptr inbounds [2 x [3 x [[S1]]]], [2 x [3 x [[S1]]]]* [[RES]], i{{.*}} 0, i{{.*}} 0 // CHECK: [[ARR:%.*]] = getelementptr inbounds [3 x [[S1]]], [3 x [[S1]]]* [[ARR1]], i{{.*}} 0, i{{.*}} 0 @@ -232,12 +232,12 @@ S1 arr_x[2][3] = { { 1, 2, 3 }, { 4, 5, 6 } }; // CHECK: invoke {{.*}} [[S1_CTOR]]([[S1]]* [[ARR_ELEMENT]], [[INT]] {{.*}}5) // CHECK: [[ARR_ELEMENT2:%.*]] = getelementptr inbounds [[S1]], [[S1]]* [[ARR_ELEMENT]], i{{.*}} 1 // CHECK: invoke {{.*}} [[S1_CTOR]]([[S1]]* [[ARR_ELEMENT2]], [[INT]] {{.*}}6) -// CHECK: [[ARG:%.+]] = load i8** [[ARG_ADDR]] +// CHECK: [[ARG:%.+]] = load i8*, i8** [[ARG_ADDR]] // CHECK: ret i8* [[ARG]] // CHECK: } // CHECK: define internal {{.*}}void [[ARR_X_DTOR:@\.__kmpc_global_dtor_\..*]](i8*) // CHECK: store i8* %0, i8** [[ARG_ADDR:%.*]], -// CHECK: [[ARG:%.+]] = load i8** [[ARG_ADDR]] +// CHECK: [[ARG:%.+]] = load i8*, i8** [[ARG_ADDR]] // CHECK: [[ARR_BEGIN:%.*]] = bitcast i8* [[ARG]] to [[S1]]* // CHECK-NEXT: [[ARR_CUR:%.*]] = getelementptr inbounds [[S1]], [[S1]]* [[ARR_BEGIN]], i{{.*}} 6 // CHECK-NEXT: br label %[[ARR_LOOP:.*]] @@ -308,7 +308,7 @@ int main() { // CHECK: [[GS1_TEMP_ADDR:%.*]] = call {{.*}}i8* @__kmpc_threadprivate_cached([[IDENT]]* [[DEFAULT_LOC]], i32 [[THREAD_NUM]], i8* bitcast ([[S1]]* [[GS1]] to i8*), i{{.*}} {{[0-9]+}}, i8*** [[GS1]].cache.) // CHECK-NEXT: [[GS1_ADDR:%.*]] = bitcast i8* [[GS1_TEMP_ADDR]] to [[S1]]* // CHECK-NEXT: [[GS1_A_ADDR:%.*]] = getelementptr inbounds [[S1]], [[S1]]* [[GS1_ADDR]], i{{.*}} 0, i{{.*}} 0 -// CHECK-NEXT: [[GS1_A:%.*]] = load [[INT]]* [[GS1_A_ADDR]] +// CHECK-NEXT: [[GS1_A:%.*]] = load [[INT]], [[INT]]* [[GS1_A_ADDR]] // CHECK-NEXT: invoke {{.*}} [[SMAIN_CTOR:.*]]([[SMAIN]]* [[SM]], [[INT]] {{.*}}[[GS1_A]]) // CHECK: call {{.*}}void @__cxa_guard_release // CHECK-DEBUG: [[KMPC_LOC_ADDR_PSOURCE:%.*]] = getelementptr inbounds [[IDENT]], [[IDENT]]* [[KMPC_LOC_ADDR]], i{{.*}} 0, i{{.*}} 4 @@ -322,28 +322,28 @@ int main() { // CHECK-DEBUG: [[GS1_TEMP_ADDR:%.*]] = call {{.*}}i8* @__kmpc_threadprivate_cached([[IDENT]]* [[KMPC_LOC_ADDR]], i32 [[THREAD_NUM]], i8* bitcast ([[S1]]* [[GS1]] to i8*), i{{.*}} {{[0-9]+}}, i8*** // CHECK-DEBUG-NEXT: [[GS1_ADDR:%.*]] = bitcast i8* [[GS1_TEMP_ADDR]] to [[S1]]* // CHECK-DEBUG-NEXT: [[GS1_A_ADDR:%.*]] = getelementptr inbounds [[S1]], [[S1]]* [[GS1_ADDR]], i{{.*}} 0, i{{.*}} 0 -// CHECK-DEBUG-NEXT: [[GS1_A:%.*]] = load [[INT]]* [[GS1_A_ADDR]] +// CHECK-DEBUG-NEXT: [[GS1_A:%.*]] = load [[INT]], [[INT]]* [[GS1_A_ADDR]] // CHECK-DEBUG-NEXT: invoke {{.*}} [[SMAIN_CTOR:.*]]([[SMAIN]]* [[SM]], [[INT]] {{.*}}[[GS1_A]]) // CHECK-DEBUG: call {{.*}}void @__cxa_guard_release #pragma omp threadprivate(sm) // CHECK: [[STATIC_S_TEMP_ADDR:%.*]] = call {{.*}}i8* @__kmpc_threadprivate_cached([[IDENT]]* [[DEFAULT_LOC]], i32 [[THREAD_NUM]], i8* bitcast ([[S3]]* [[STATIC_S]] to i8*), i{{.*}} {{[0-9]+}}, i8*** [[STATIC_S]].cache.) // CHECK-NEXT: [[STATIC_S_ADDR:%.*]] = bitcast i8* [[STATIC_S_TEMP_ADDR]] to [[S3]]* // CHECK-NEXT: [[STATIC_S_A_ADDR:%.*]] = getelementptr inbounds [[S3]], [[S3]]* [[STATIC_S_ADDR]], i{{.*}} 0, i{{.*}} 0 - // CHECK-NEXT: [[STATIC_S_A:%.*]] = load [[INT]]* [[STATIC_S_A_ADDR]] + // CHECK-NEXT: [[STATIC_S_A:%.*]] = load [[INT]], [[INT]]* [[STATIC_S_A_ADDR]] // CHECK-NEXT: store [[INT]] [[STATIC_S_A]], [[INT]]* [[RES_ADDR:[^,]+]] // CHECK-DEBUG: [[KMPC_LOC_ADDR_PSOURCE:%.*]] = getelementptr inbounds [[IDENT]], [[IDENT]]* [[KMPC_LOC_ADDR]], i{{.*}} 0, i{{.*}} 4 // CHECK-DEBUG-NEXT: store i8* getelementptr inbounds ([{{.*}} x i8]* [[LOC5]], i{{.*}} 0, i{{.*}} 0), i8** [[KMPC_LOC_ADDR_PSOURCE]] // CHECK-DEBUG-NEXT: [[STATIC_S_TEMP_ADDR:%.*]] = call {{.*}}i8* @__kmpc_threadprivate_cached([[IDENT]]* [[KMPC_LOC_ADDR]], i32 [[THREAD_NUM]], i8* bitcast ([[S3]]* [[STATIC_S]] to i8*), i{{.*}} {{[0-9]+}}, i8*** // CHECK-DEBUG-NEXT: [[STATIC_S_ADDR:%.*]] = bitcast i8* [[STATIC_S_TEMP_ADDR]] to [[S3]]* // CHECK-DEBUG-NEXT: [[STATIC_S_A_ADDR:%.*]] = getelementptr inbounds [[S3]], [[S3]]* [[STATIC_S_ADDR]], i{{.*}} 0, i{{.*}} 0 - // CHECK-DEBUG-NEXT: [[STATIC_S_A:%.*]] = load [[INT]]* [[STATIC_S_A_ADDR]] + // CHECK-DEBUG-NEXT: [[STATIC_S_A:%.*]] = load [[INT]], [[INT]]* [[STATIC_S_A_ADDR]] // CHECK-DEBUG-NEXT: store [[INT]] [[STATIC_S_A]], [[INT]]* [[RES_ADDR:[^,]+]] Res = Static::s.a; // CHECK: [[SM_TEMP_ADDR:%.*]] = call {{.*}}i8* @__kmpc_threadprivate_cached([[IDENT]]* [[DEFAULT_LOC]], i32 [[THREAD_NUM]], i8* bitcast ([[SMAIN]]* [[SM]] to i8*), i{{.*}} {{[0-9]+}}, i8*** [[SM]].cache.) // CHECK-NEXT: [[SM_ADDR:%.*]] = bitcast i8* [[SM_TEMP_ADDR]] to [[SMAIN]]* // CHECK-NEXT: [[SM_A_ADDR:%.*]] = getelementptr inbounds [[SMAIN]], [[SMAIN]]* [[SM_ADDR]], i{{.*}} 0, i{{.*}} 0 - // CHECK-NEXT: [[SM_A:%.*]] = load [[INT]]* [[SM_A_ADDR]] - // CHECK-NEXT: [[RES:%.*]] = load [[INT]]* [[RES_ADDR]] + // CHECK-NEXT: [[SM_A:%.*]] = load [[INT]], [[INT]]* [[SM_A_ADDR]] + // CHECK-NEXT: [[RES:%.*]] = load [[INT]], [[INT]]* [[RES_ADDR]] // CHECK-NEXT: [[ADD:%.*]] = add {{.*}} [[INT]] [[RES]], [[SM_A]] // CHECK-NEXT: store [[INT]] [[ADD]], [[INT]]* [[RES:.+]] // CHECK-DEBUG: [[KMPC_LOC_ADDR_PSOURCE:%.*]] = getelementptr inbounds [[IDENT]], [[IDENT]]* [[KMPC_LOC_ADDR]], i{{.*}} 0, i{{.*}} 4 @@ -351,16 +351,16 @@ int main() { // CHECK-DEBUG-NEXT: [[SM_TEMP_ADDR:%.*]] = call {{.*}}i8* @__kmpc_threadprivate_cached([[IDENT]]* [[KMPC_LOC_ADDR]], i32 [[THREAD_NUM]], i8* bitcast ([[SMAIN]]* [[SM]] to i8*), i{{.*}} {{[0-9]+}}, i8*** // CHECK-DEBUG-NEXT: [[SM_ADDR:%.*]] = bitcast i8* [[SM_TEMP_ADDR]] to [[SMAIN]]* // CHECK-DEBUG-NEXT: [[SM_A_ADDR:%.*]] = getelementptr inbounds [[SMAIN]], [[SMAIN]]* [[SM_ADDR]], i{{.*}} 0, i{{.*}} 0 - // CHECK-DEBUG-NEXT: [[SM_A:%.*]] = load [[INT]]* [[SM_A_ADDR]] - // CHECK-DEBUG-NEXT: [[RES:%.*]] = load [[INT]]* [[RES_ADDR]] + // CHECK-DEBUG-NEXT: [[SM_A:%.*]] = load [[INT]], [[INT]]* [[SM_A_ADDR]] + // CHECK-DEBUG-NEXT: [[RES:%.*]] = load [[INT]], [[INT]]* [[RES_ADDR]] // CHECK-DEBUG-NEXT: [[ADD:%.*]] = add {{.*}} [[INT]] [[RES]], [[SM_A]] // CHECK-DEBUG-NEXT: store [[INT]] [[ADD]], [[INT]]* [[RES:.+]] Res += sm.a; // CHECK: [[GS1_TEMP_ADDR:%.*]] = call {{.*}}i8* @__kmpc_threadprivate_cached([[IDENT]]* [[DEFAULT_LOC]], i32 [[THREAD_NUM]], i8* bitcast ([[S1]]* [[GS1]] to i8*), i{{.*}} {{[0-9]+}}, i8*** [[GS1]].cache.) // CHECK-NEXT: [[GS1_ADDR:%.*]] = bitcast i8* [[GS1_TEMP_ADDR]] to [[S1]]* // CHECK-NEXT: [[GS1_A_ADDR:%.*]] = getelementptr inbounds [[S1]], [[S1]]* [[GS1_ADDR]], i{{.*}} 0, i{{.*}} 0 - // CHECK-NEXT: [[GS1_A:%.*]] = load [[INT]]* [[GS1_A_ADDR]] - // CHECK-NEXT: [[RES:%.*]] = load [[INT]]* [[RES_ADDR]] + // CHECK-NEXT: [[GS1_A:%.*]] = load [[INT]], [[INT]]* [[GS1_A_ADDR]] + // CHECK-NEXT: [[RES:%.*]] = load [[INT]], [[INT]]* [[RES_ADDR]] // CHECK-NEXT: [[ADD:%.*]] = add {{.*}} [[INT]] [[RES]], [[GS1_A]] // CHECK-NEXT: store [[INT]] [[ADD]], [[INT]]* [[RES:.+]] // CHECK-DEBUG: [[KMPC_LOC_ADDR_PSOURCE:%.*]] = getelementptr inbounds [[IDENT]], [[IDENT]]* [[KMPC_LOC_ADDR]], i{{.*}} 0, i{{.*}} 4 @@ -368,25 +368,25 @@ int main() { // CHECK-DEBUG-NEXT: [[GS1_TEMP_ADDR:%.*]] = call {{.*}}i8* @__kmpc_threadprivate_cached([[IDENT]]* [[KMPC_LOC_ADDR]], i32 [[THREAD_NUM]], i8* bitcast ([[S1]]* [[GS1]] to i8*), i{{.*}} {{[0-9]+}}, i8*** // CHECK-DEBUG-NEXT: [[GS1_ADDR:%.*]] = bitcast i8* [[GS1_TEMP_ADDR]] to [[S1]]* // CHECK-DEBUG-NEXT: [[GS1_A_ADDR:%.*]] = getelementptr inbounds [[S1]], [[S1]]* [[GS1_ADDR]], i{{.*}} 0, i{{.*}} 0 - // CHECK-DEBUG-NEXT: [[GS1_A:%.*]] = load [[INT]]* [[GS1_A_ADDR]] - // CHECK-DEBUG-NEXT: [[RES:%.*]] = load [[INT]]* [[RES_ADDR]] + // CHECK-DEBUG-NEXT: [[GS1_A:%.*]] = load [[INT]], [[INT]]* [[GS1_A_ADDR]] + // CHECK-DEBUG-NEXT: [[RES:%.*]] = load [[INT]], [[INT]]* [[RES_ADDR]] // CHECK-DEBUG-NEXT: [[ADD:%.*]] = add {{.*}} [[INT]] [[RES]], [[GS1_A]] // CHECK-DEBUG-NEXT: store [[INT]] [[ADD]], [[INT]]* [[RES:.+]] Res += gs1.a; - // CHECK: [[GS2_A:%.*]] = load [[INT]]* getelementptr inbounds ([[S2]]* [[GS2]], i{{.*}} 0, i{{.*}} 0) - // CHECK-NEXT: [[RES:%.*]] = load [[INT]]* [[RES_ADDR]] + // CHECK: [[GS2_A:%.*]] = load [[INT]], [[INT]]* getelementptr inbounds ([[S2]]* [[GS2]], i{{.*}} 0, i{{.*}} 0) + // CHECK-NEXT: [[RES:%.*]] = load [[INT]], [[INT]]* [[RES_ADDR]] // CHECK-NEXT: [[ADD:%.*]] = add {{.*}} [[INT]] [[RES]], [[GS2_A]] // CHECK-NEXT: store [[INT]] [[ADD]], [[INT]]* [[RES:.+]] - // CHECK-DEBUG: [[GS2_A:%.*]] = load [[INT]]* getelementptr inbounds ([[S2]]* [[GS2]], i{{.*}} 0, i{{.*}} 0) - // CHECK-DEBUG-NEXT: [[RES:%.*]] = load [[INT]]* [[RES_ADDR]] + // CHECK-DEBUG: [[GS2_A:%.*]] = load [[INT]], [[INT]]* getelementptr inbounds ([[S2]]* [[GS2]], i{{.*}} 0, i{{.*}} 0) + // CHECK-DEBUG-NEXT: [[RES:%.*]] = load [[INT]], [[INT]]* [[RES_ADDR]] // CHECK-DEBUG-NEXT: [[ADD:%.*]] = add {{.*}} [[INT]] [[RES]], [[GS2_A]] // CHECK-DEBUG-NEXT: store [[INT]] [[ADD]], [[INT]]* [[RES:.+]] Res += gs2.a; // CHECK: [[GS3_TEMP_ADDR:%.*]] = call {{.*}}i8* @__kmpc_threadprivate_cached([[IDENT]]* [[DEFAULT_LOC]], i32 [[THREAD_NUM]], i8* bitcast ([[S5]]* [[GS3]] to i8*), i{{.*}} {{[0-9]+}}, i8*** [[GS3]].cache.) // CHECK-NEXT: [[GS3_ADDR:%.*]] = bitcast i8* [[GS3_TEMP_ADDR]] to [[S5]]* // CHECK-NEXT: [[GS3_A_ADDR:%.*]] = getelementptr inbounds [[S5]], [[S5]]* [[GS3_ADDR]], i{{.*}} 0, i{{.*}} 0 - // CHECK-NEXT: [[GS3_A:%.*]] = load [[INT]]* [[GS3_A_ADDR]] - // CHECK-NEXT: [[RES:%.*]] = load [[INT]]* [[RES_ADDR]] + // CHECK-NEXT: [[GS3_A:%.*]] = load [[INT]], [[INT]]* [[GS3_A_ADDR]] + // CHECK-NEXT: [[RES:%.*]] = load [[INT]], [[INT]]* [[RES_ADDR]] // CHECK-NEXT: [[ADD:%.*]] = add {{.*}} [[INT]] [[RES]], [[GS3_A]] // CHECK-NEXT: store [[INT]] [[ADD]], [[INT]]* [[RES:.+]] // CHECK-DEBUG: [[KMPC_LOC_ADDR_PSOURCE:%.*]] = getelementptr inbounds [[IDENT]], [[IDENT]]* [[KMPC_LOC_ADDR]], i{{.*}} 0, i{{.*}} 4 @@ -394,8 +394,8 @@ int main() { // CHECK-DEBUG-NEXT: [[GS3_TEMP_ADDR:%.*]] = call {{.*}}i8* @__kmpc_threadprivate_cached([[IDENT]]* [[KMPC_LOC_ADDR]], i32 [[THREAD_NUM]], i8* bitcast ([[S5]]* [[GS3]] to i8*), i{{.*}} {{[0-9]+}}, i8*** // CHECK-DEBUG-NEXT: [[GS3_ADDR:%.*]] = bitcast i8* [[GS3_TEMP_ADDR]] to [[S5]]* // CHECK-DEBUG-NEXT: [[GS3_A_ADDR:%.*]] = getelementptr inbounds [[S5]], [[S5]]* [[GS3_ADDR]], i{{.*}} 0, i{{.*}} 0 - // CHECK-DEBUG-NEXT: [[GS3_A:%.*]] = load [[INT]]* [[GS3_A_ADDR]] - // CHECK-DEBUG-NEXT: [[RES:%.*]] = load [[INT]]* [[RES_ADDR]] + // CHECK-DEBUG-NEXT: [[GS3_A:%.*]] = load [[INT]], [[INT]]* [[GS3_A_ADDR]] + // CHECK-DEBUG-NEXT: [[RES:%.*]] = load [[INT]], [[INT]]* [[RES_ADDR]] // CHECK-DEBUG-NEXT: [[ADD:%.*]] = add {{.*}} [[INT]] [[RES]], [[GS3_A]] // CHECK-DEBUG-NEXT: store [[INT]] [[ADD]], [[INT]]* [[RES:.+]] Res += gs3.a; @@ -404,8 +404,8 @@ int main() { // CHECK-NEXT: [[ARR_X_1_ADDR:%.*]] = getelementptr inbounds [2 x [3 x [[S1]]]], [2 x [3 x [[S1]]]]* [[ARR_X_ADDR]], i{{.*}} 0, i{{.*}} 1 // CHECK-NEXT: [[ARR_X_1_1_ADDR:%.*]] = getelementptr inbounds [3 x [[S1]]], [3 x [[S1]]]* [[ARR_X_1_ADDR]], i{{.*}} 0, i{{.*}} 1 // CHECK-NEXT: [[ARR_X_1_1_A_ADDR:%.*]] = getelementptr inbounds [[S1]], [[S1]]* [[ARR_X_1_1_ADDR]], i{{.*}} 0, i{{.*}} 0 - // CHECK-NEXT: [[ARR_X_1_1_A:%.*]] = load [[INT]]* [[ARR_X_1_1_A_ADDR]] - // CHECK-NEXT: [[RES:%.*]] = load [[INT]]* [[RES_ADDR]] + // CHECK-NEXT: [[ARR_X_1_1_A:%.*]] = load [[INT]], [[INT]]* [[ARR_X_1_1_A_ADDR]] + // CHECK-NEXT: [[RES:%.*]] = load [[INT]], [[INT]]* [[RES_ADDR]] // CHECK-NEXT: [[ADD:%.*]] = add {{.*}} [[INT]] [[RES]], [[ARR_X_1_1_A]] // CHECK-NEXT: store [[INT]] [[ADD]], [[INT]]* [[RES:.+]] // CHECK-DEBUG: [[KMPC_LOC_ADDR_PSOURCE:%.*]] = getelementptr inbounds [[IDENT]], [[IDENT]]* [[KMPC_LOC_ADDR]], i{{.*}} 0, i{{.*}} 4 @@ -415,48 +415,48 @@ int main() { // CHECK-DEBUG-NEXT: [[ARR_X_1_ADDR:%.*]] = getelementptr inbounds [2 x [3 x [[S1]]]], [2 x [3 x [[S1]]]]* [[ARR_X_ADDR]], i{{.*}} 0, i{{.*}} 1 // CHECK-DEBUG-NEXT: [[ARR_X_1_1_ADDR:%.*]] = getelementptr inbounds [3 x [[S1]]], [3 x [[S1]]]* [[ARR_X_1_ADDR]], i{{.*}} 0, i{{.*}} 1 // CHECK-DEBUG-NEXT: [[ARR_X_1_1_A_ADDR:%.*]] = getelementptr inbounds [[S1]], [[S1]]* [[ARR_X_1_1_ADDR]], i{{.*}} 0, i{{.*}} 0 - // CHECK-DEBUG-NEXT: [[ARR_X_1_1_A:%.*]] = load [[INT]]* [[ARR_X_1_1_A_ADDR]] - // CHECK-DEBUG-NEXT: [[RES:%.*]] = load [[INT]]* [[RES_ADDR]] + // CHECK-DEBUG-NEXT: [[ARR_X_1_1_A:%.*]] = load [[INT]], [[INT]]* [[ARR_X_1_1_A_ADDR]] + // CHECK-DEBUG-NEXT: [[RES:%.*]] = load [[INT]], [[INT]]* [[RES_ADDR]] // CHECK-DEBUG-NEXT: [[ADD:%.*]] = add {{.*}} [[INT]] [[RES]], [[ARR_X_1_1_A]] // CHECK-DEBUG-NEXT: store [[INT]] [[ADD]], [[INT]]* [[RES:.+]] Res += arr_x[1][1].a; // CHECK: [[ST_INT_ST_TEMP_ADDR:%.*]] = call {{.*}}i8* @__kmpc_threadprivate_cached([[IDENT]]* [[DEFAULT_LOC]], i32 [[THREAD_NUM]], i8* bitcast ([[INT]]* [[ST_INT_ST]] to i8*), i{{.*}} {{[0-9]+}}, i8*** [[ST_INT_ST]].cache.) // CHECK-NEXT: [[ST_INT_ST_ADDR:%.*]] = bitcast i8* [[ST_INT_ST_TEMP_ADDR]] to [[INT]]* - // CHECK-NEXT: [[ST_INT_ST_VAL:%.*]] = load [[INT]]* [[ST_INT_ST_ADDR]] - // CHECK-NEXT: [[RES:%.*]] = load [[INT]]* [[RES_ADDR]] + // CHECK-NEXT: [[ST_INT_ST_VAL:%.*]] = load [[INT]], [[INT]]* [[ST_INT_ST_ADDR]] + // CHECK-NEXT: [[RES:%.*]] = load [[INT]], [[INT]]* [[RES_ADDR]] // CHECK-NEXT: [[ADD:%.*]] = add {{.*}} [[INT]] [[RES]], [[ST_INT_ST_VAL]] // CHECK-NEXT: store [[INT]] [[ADD]], [[INT]]* [[RES:.+]] // CHECK-DEBUG: [[KMPC_LOC_ADDR_PSOURCE:%.*]] = getelementptr inbounds [[IDENT]], [[IDENT]]* [[KMPC_LOC_ADDR]], i{{.*}} 0, i{{.*}} 4 // CHECK-DEBUG-NEXT: store i8* getelementptr inbounds ([{{.*}} x i8]* [[LOC10]], i{{.*}} 0, i{{.*}} 0), i8** [[KMPC_LOC_ADDR_PSOURCE]] // CHECK-DEBUG-NEXT: [[ST_INT_ST_TEMP_ADDR:%.*]] = call {{.*}}i8* @__kmpc_threadprivate_cached([[IDENT]]* [[KMPC_LOC_ADDR]], i32 [[THREAD_NUM]], i8* bitcast ([[INT]]* [[ST_INT_ST]] to i8*), i{{.*}} {{[0-9]+}}, i8*** // CHECK-DEBUG-NEXT: [[ST_INT_ST_ADDR:%.*]] = bitcast i8* [[ST_INT_ST_TEMP_ADDR]] to [[INT]]* - // CHECK-DEBUG-NEXT: [[ST_INT_ST_VAL:%.*]] = load [[INT]]* [[ST_INT_ST_ADDR]] - // CHECK-DEBUG-NEXT: [[RES:%.*]] = load [[INT]]* [[RES_ADDR]] + // CHECK-DEBUG-NEXT: [[ST_INT_ST_VAL:%.*]] = load [[INT]], [[INT]]* [[ST_INT_ST_ADDR]] + // CHECK-DEBUG-NEXT: [[RES:%.*]] = load [[INT]], [[INT]]* [[RES_ADDR]] // CHECK-DEBUG-NEXT: [[ADD:%.*]] = add {{.*}} [[INT]] [[RES]], [[ST_INT_ST_VAL]] // CHECK-DEBUG-NEXT: store [[INT]] [[ADD]], [[INT]]* [[RES:.+]] Res += ST::st; // CHECK: [[ST_FLOAT_ST_TEMP_ADDR:%.*]] = call {{.*}}i8* @__kmpc_threadprivate_cached([[IDENT]]* [[DEFAULT_LOC]], i32 [[THREAD_NUM]], i8* bitcast (float* [[ST_FLOAT_ST]] to i8*), i{{.*}} {{[0-9]+}}, i8*** [[ST_FLOAT_ST]].cache.) // CHECK-NEXT: [[ST_FLOAT_ST_ADDR:%.*]] = bitcast i8* [[ST_FLOAT_ST_TEMP_ADDR]] to float* - // CHECK-NEXT: [[ST_FLOAT_ST_VAL:%.*]] = load float* [[ST_FLOAT_ST_ADDR]] + // CHECK-NEXT: [[ST_FLOAT_ST_VAL:%.*]] = load float, float* [[ST_FLOAT_ST_ADDR]] // CHECK-NEXT: [[FLOAT_TO_INT_CONV:%.*]] = fptosi float [[ST_FLOAT_ST_VAL]] to [[INT]] - // CHECK-NEXT: [[RES:%.*]] = load [[INT]]* [[RES_ADDR]] + // CHECK-NEXT: [[RES:%.*]] = load [[INT]], [[INT]]* [[RES_ADDR]] // CHECK-NEXT: [[ADD:%.*]] = add {{.*}} [[INT]] [[RES]], [[FLOAT_TO_INT_CONV]] // CHECK-NEXT: store [[INT]] [[ADD]], [[INT]]* [[RES:.+]] // CHECK-DEBUG: [[KMPC_LOC_ADDR_PSOURCE:%.*]] = getelementptr inbounds [[IDENT]], [[IDENT]]* [[KMPC_LOC_ADDR]], i{{.*}} 0, i{{.*}} 4 // CHECK-DEBUG-NEXT: store i8* getelementptr inbounds ([{{.*}} x i8]* [[LOC11]], i{{.*}} 0, i{{.*}} 0), i8** [[KMPC_LOC_ADDR_PSOURCE]] // CHECK-DEBUG-NEXT: [[ST_FLOAT_ST_TEMP_ADDR:%.*]] = call {{.*}}i8* @__kmpc_threadprivate_cached([[IDENT]]* [[KMPC_LOC_ADDR]], i32 [[THREAD_NUM]], i8* bitcast (float* [[ST_FLOAT_ST]] to i8*), i{{.*}} {{[0-9]+}}, i8*** // CHECK-DEBUG-NEXT: [[ST_FLOAT_ST_ADDR:%.*]] = bitcast i8* [[ST_FLOAT_ST_TEMP_ADDR]] to float* - // CHECK-DEBUG-NEXT: [[ST_FLOAT_ST_VAL:%.*]] = load float* [[ST_FLOAT_ST_ADDR]] + // CHECK-DEBUG-NEXT: [[ST_FLOAT_ST_VAL:%.*]] = load float, float* [[ST_FLOAT_ST_ADDR]] // CHECK-DEBUG-NEXT: [[FLOAT_TO_INT_CONV:%.*]] = fptosi float [[ST_FLOAT_ST_VAL]] to [[INT]] - // CHECK-DEBUG-NEXT: [[RES:%.*]] = load [[INT]]* [[RES_ADDR]] + // CHECK-DEBUG-NEXT: [[RES:%.*]] = load [[INT]], [[INT]]* [[RES_ADDR]] // CHECK-DEBUG-NEXT: [[ADD:%.*]] = add {{.*}} [[INT]] [[RES]], [[FLOAT_TO_INT_CONV]] // CHECK-DEBUG-NEXT: store [[INT]] [[ADD]], [[INT]]* [[RES:.+]] Res += static_cast(ST::st); // CHECK: [[ST_S4_ST_TEMP_ADDR:%.*]] = call {{.*}}i8* @__kmpc_threadprivate_cached([[IDENT]]* [[DEFAULT_LOC]], i32 [[THREAD_NUM]], i8* bitcast ([[S4]]* [[ST_S4_ST]] to i8*), i{{.*}} {{[0-9]+}}, i8*** [[ST_S4_ST]].cache.) // CHECK-NEXT: [[ST_S4_ST_ADDR:%.*]] = bitcast i8* [[ST_S4_ST_TEMP_ADDR]] to [[S4]]* // CHECK-NEXT: [[ST_S4_ST_A_ADDR:%.*]] = getelementptr inbounds [[S4]], [[S4]]* [[ST_S4_ST_ADDR]], i{{.*}} 0, i{{.*}} 0 - // CHECK-NEXT: [[ST_S4_ST_A:%.*]] = load [[INT]]* [[ST_S4_ST_A_ADDR]] - // CHECK-NEXT: [[RES:%.*]] = load [[INT]]* [[RES_ADDR]] + // CHECK-NEXT: [[ST_S4_ST_A:%.*]] = load [[INT]], [[INT]]* [[ST_S4_ST_A_ADDR]] + // CHECK-NEXT: [[RES:%.*]] = load [[INT]], [[INT]]* [[RES_ADDR]] // CHECK-NEXT: [[ADD:%.*]] = add {{.*}} [[INT]] [[RES]], [[ST_S4_ST_A]] // CHECK-NEXT: store [[INT]] [[ADD]], [[INT]]* [[RES:.+]] // CHECK-DEBUG: [[KMPC_LOC_ADDR_PSOURCE:%.*]] = getelementptr inbounds [[IDENT]], [[IDENT]]* [[KMPC_LOC_ADDR]], i{{.*}} 0, i{{.*}} 4 @@ -464,14 +464,14 @@ int main() { // CHECK-DEBUG-NEXT: [[ST_S4_ST_TEMP_ADDR:%.*]] = call {{.*}}i8* @__kmpc_threadprivate_cached([[IDENT]]* [[KMPC_LOC_ADDR]], i32 [[THREAD_NUM]], i8* bitcast ([[S4]]* [[ST_S4_ST]] to i8*), i{{.*}} {{[0-9]+}}, i8*** // CHECK-DEBUG-NEXT: [[ST_S4_ST_ADDR:%.*]] = bitcast i8* [[ST_S4_ST_TEMP_ADDR]] to [[S4]]* // CHECK-DEBUG-NEXT: [[ST_S4_ST_A_ADDR:%.*]] = getelementptr inbounds [[S4]], [[S4]]* [[ST_S4_ST_ADDR]], i{{.*}} 0, i{{.*}} 0 - // CHECK-DEBUG-NEXT: [[ST_S4_ST_A:%.*]] = load [[INT]]* [[ST_S4_ST_A_ADDR]] - // CHECK-DEBUG-NEXT: [[RES:%.*]] = load [[INT]]* [[RES_ADDR]] + // CHECK-DEBUG-NEXT: [[ST_S4_ST_A:%.*]] = load [[INT]], [[INT]]* [[ST_S4_ST_A_ADDR]] + // CHECK-DEBUG-NEXT: [[RES:%.*]] = load [[INT]], [[INT]]* [[RES_ADDR]] // CHECK-DEBUG-NEXT: [[ADD:%.*]] = add {{.*}} [[INT]] [[RES]], [[ST_S4_ST_A]] // CHECK-DEBUG-NEXT: store [[INT]] [[ADD]], [[INT]]* [[RES:.+]] Res += ST::st.a; - // CHECK: [[RES:%.*]] = load [[INT]]* [[RES_ADDR]] + // CHECK: [[RES:%.*]] = load [[INT]], [[INT]]* [[RES_ADDR]] // CHECK-NEXT: ret [[INT]] [[RES]] - // CHECK-DEBUG: [[RES:%.*]] = load [[INT]]* [[RES_ADDR]] + // CHECK-DEBUG: [[RES:%.*]] = load [[INT]], [[INT]]* [[RES_ADDR]] // CHECK-DEBUG-NEXT: ret [[INT]] [[RES]] return Res; } @@ -480,20 +480,20 @@ int main() { // CHECK: define internal {{.*}}i8* [[SM_CTOR]](i8*) // CHECK: [[THREAD_NUM:%.+]] = call {{.*}}i32 @__kmpc_global_thread_num([[IDENT]]* [[DEFAULT_LOC]]) // CHECK: store i8* %0, i8** [[ARG_ADDR:%.*]], -// CHECK: [[ARG:%.+]] = load i8** [[ARG_ADDR]] +// CHECK: [[ARG:%.+]] = load i8*, i8** [[ARG_ADDR]] // CHECK: [[RES:%.*]] = bitcast i8* [[ARG]] to [[SMAIN]]* // CHECK: [[GS1_TEMP_ADDR:%.*]] = call {{.*}}i8* @__kmpc_threadprivate_cached([[IDENT]]* [[DEFAULT_LOC]], i32 [[THREAD_NUM]], i8* bitcast ([[S1]]* [[GS1]] to i8*), i{{.*}} {{[0-9]+}}, i8*** [[GS1]].cache.) // CHECK-NEXT: [[GS1_ADDR:%.*]] = bitcast i8* [[GS1_TEMP_ADDR]] to [[S1]]* // CHECK-NEXT: [[GS1_A_ADDR:%.*]] = getelementptr inbounds [[S1]], [[S1]]* [[GS1_ADDR]], i{{.*}} 0, i{{.*}} 0 -// CHECK-NEXT: [[GS1_A:%.*]] = load [[INT]]* [[GS1_A_ADDR]] +// CHECK-NEXT: [[GS1_A:%.*]] = load [[INT]], [[INT]]* [[GS1_A_ADDR]] // CHECK-NEXT: call {{.*}} [[SMAIN_CTOR:@.+]]([[SMAIN]]* [[RES]], [[INT]] {{.*}}[[GS1_A]]) -// CHECK: [[ARG:%.+]] = load i8** [[ARG_ADDR]] +// CHECK: [[ARG:%.+]] = load i8*, i8** [[ARG_ADDR]] // CHECK-NEXT: ret i8* [[ARG]] // CHECK-NEXT: } // CHECK: define {{.*}} [[SMAIN_CTOR]]([[SMAIN]]* {{.*}}, // CHECK: define internal {{.*}}void [[SM_DTOR]](i8*) // CHECK: store i8* %0, i8** [[ARG_ADDR:%.*]], -// CHECK: [[ARG:%.+]] = load i8** [[ARG_ADDR]] +// CHECK: [[ARG:%.+]] = load i8*, i8** [[ARG_ADDR]] // CHECK: [[RES:%.*]] = bitcast i8* [[ARG]] to [[SMAIN]]* // CHECK-NEXT: call {{.*}} [[SMAIN_DTOR:@.+]]([[SMAIN]]* [[RES]]) // CHECK-NEXT: ret void @@ -505,16 +505,16 @@ int main() { // CHECK-DEBUG-NEXT: store i8* getelementptr inbounds ([{{.*}} x i8]* [[LOC3]], i{{.*}} 0, i{{.*}} 0), i8** [[KMPC_LOC_ADDR_PSOURCE]] // CHECK-DEBUG-NEXT: [[THREAD_NUM:%.+]] = call {{.*}}i32 @__kmpc_global_thread_num([[IDENT]]* [[KMPC_LOC_ADDR]]) // CHECK-DEBUG: store i8* %0, i8** [[ARG_ADDR:%.*]], -// CHECK-DEBUG: [[ARG:%.+]] = load i8** [[ARG_ADDR]] +// CHECK-DEBUG: [[ARG:%.+]] = load i8*, i8** [[ARG_ADDR]] // CHECK-DEBUG: [[RES:%.*]] = bitcast i8* [[ARG]] to [[SMAIN]]* // CHECK-DEBUG: [[KMPC_LOC_ADDR_PSOURCE:%.*]] = getelementptr inbounds [[IDENT]], [[IDENT]]* [[KMPC_LOC_ADDR]], i{{.*}} 0, i{{.*}} 4 // CHECK-DEBUG-NEXT: store i8* getelementptr inbounds ([{{.*}} x i8]* [[LOC3]], i{{.*}} 0, i{{.*}} 0), i8** [[KMPC_LOC_ADDR_PSOURCE]] // CHECK-DEBUG: [[GS1_TEMP_ADDR:%.*]] = call {{.*}}i8* @__kmpc_threadprivate_cached([[IDENT]]* [[KMPC_LOC_ADDR]], i32 [[THREAD_NUM]], i8* bitcast ([[S1]]* [[GS1]] to i8*), i{{.*}} {{[0-9]+}}, i8*** // CHECK-DEBUG-NEXT: [[GS1_ADDR:%.*]] = bitcast i8* [[GS1_TEMP_ADDR]] to [[S1]]* // CHECK-DEBUG-NEXT: [[GS1_A_ADDR:%.*]] = getelementptr inbounds [[S1]], [[S1]]* [[GS1_ADDR]], i{{.*}} 0, i{{.*}} 0 -// CHECK-DEBUG-NEXT: [[GS1_A:%.*]] = load [[INT]]* [[GS1_A_ADDR]] +// CHECK-DEBUG-NEXT: [[GS1_A:%.*]] = load [[INT]], [[INT]]* [[GS1_A_ADDR]] // CHECK-DEBUG-NEXT: call {{.*}} [[SMAIN_CTOR:@.+]]([[SMAIN]]* [[RES]], [[INT]] {{.*}}[[GS1_A]]) -// CHECK-DEBUG: [[ARG:%.+]] = load i8** [[ARG_ADDR]] +// CHECK-DEBUG: [[ARG:%.+]] = load i8*, i8** [[ARG_ADDR]] // CHECK-DEBUG-NEXT: ret i8* [[ARG]] // CHECK-DEBUG-NEXT: } // CHECK-DEBUG: define {{.*}} [[SMAIN_CTOR]]([[SMAIN]]* {{.*}}, @@ -535,7 +535,7 @@ int foobar() { // CHECK: [[STATIC_S_TEMP_ADDR:%.*]] = call {{.*}}i8* @__kmpc_threadprivate_cached([[IDENT]]* [[DEFAULT_LOC]], i32 [[THREAD_NUM]], i8* bitcast ([[S3]]* [[STATIC_S]] to i8*), i{{.*}} {{[0-9]+}}, i8*** [[STATIC_S]].cache.) // CHECK-NEXT: [[STATIC_S_ADDR:%.*]] = bitcast i8* [[STATIC_S_TEMP_ADDR]] to [[S3]]* // CHECK-NEXT: [[STATIC_S_A_ADDR:%.*]] = getelementptr inbounds [[S3]], [[S3]]* [[STATIC_S_ADDR]], i{{.*}} 0, i{{.*}} 0 - // CHECK-NEXT: [[STATIC_S_A:%.*]] = load [[INT]]* [[STATIC_S_A_ADDR]] + // CHECK-NEXT: [[STATIC_S_A:%.*]] = load [[INT]], [[INT]]* [[STATIC_S_A_ADDR]] // CHECK-NEXT: store [[INT]] [[STATIC_S_A]], [[INT]]* [[RES_ADDR:[^,]+]] // CHECK-DEBUG: [[KMPC_LOC_ADDR_PSOURCE:%.*]] = getelementptr inbounds [[IDENT]], [[IDENT]]* [[KMPC_LOC_ADDR]], i{{.*}} 0, i{{.*}} 4 // CHECK-DEBUG-NEXT: store i8* getelementptr inbounds ([{{.*}} x i8]* [[LOC13]], i{{.*}} 0, i{{.*}} 0), i8** [[KMPC_LOC_ADDR_PSOURCE]] @@ -545,14 +545,14 @@ int foobar() { // CHECK-DEBUG-NEXT: [[STATIC_S_TEMP_ADDR:%.*]] = call {{.*}}i8* @__kmpc_threadprivate_cached([[IDENT]]* [[KMPC_LOC_ADDR]], i32 [[THREAD_NUM]], i8* bitcast ([[S3]]* [[STATIC_S]] to i8*), i{{.*}} {{[0-9]+}}, i8*** // CHECK-DEBUG-NEXT: [[STATIC_S_ADDR:%.*]] = bitcast i8* [[STATIC_S_TEMP_ADDR]] to [[S3]]* // CHECK-DEBUG-NEXT: [[STATIC_S_A_ADDR:%.*]] = getelementptr inbounds [[S3]], [[S3]]* [[STATIC_S_ADDR]], i{{.*}} 0, i{{.*}} 0 - // CHECK-DEBUG-NEXT: [[STATIC_S_A:%.*]] = load [[INT]]* [[STATIC_S_A_ADDR]] + // CHECK-DEBUG-NEXT: [[STATIC_S_A:%.*]] = load [[INT]], [[INT]]* [[STATIC_S_A_ADDR]] // CHECK-DEBUG-NEXT: store [[INT]] [[STATIC_S_A]], [[INT]]* [[RES_ADDR:[^,]+]] Res = Static::s.a; // CHECK: [[GS1_TEMP_ADDR:%.*]] = call {{.*}}i8* @__kmpc_threadprivate_cached([[IDENT]]* [[DEFAULT_LOC]], i32 [[THREAD_NUM]], i8* bitcast ([[S1]]* [[GS1]] to i8*), i{{.*}} {{[0-9]+}}, i8*** [[GS1]].cache.) // CHECK-NEXT: [[GS1_ADDR:%.*]] = bitcast i8* [[GS1_TEMP_ADDR]] to [[S1]]* // CHECK-NEXT: [[GS1_A_ADDR:%.*]] = getelementptr inbounds [[S1]], [[S1]]* [[GS1_ADDR]], i{{.*}} 0, i{{.*}} 0 - // CHECK-NEXT: [[GS1_A:%.*]] = load [[INT]]* [[GS1_A_ADDR]] - // CHECK-NEXT: [[RES:%.*]] = load [[INT]]* [[RES_ADDR]] + // CHECK-NEXT: [[GS1_A:%.*]] = load [[INT]], [[INT]]* [[GS1_A_ADDR]] + // CHECK-NEXT: [[RES:%.*]] = load [[INT]], [[INT]]* [[RES_ADDR]] // CHECK-NEXT: [[ADD:%.*]] = add {{.*}} [[INT]] [[RES]], [[GS1_A]] // CHECK-NEXT: store [[INT]] [[ADD]], [[INT]]* [[RES:.+]] // CHECK-DEBUG: [[KMPC_LOC_ADDR_PSOURCE:%.*]] = getelementptr inbounds [[IDENT]], [[IDENT]]* [[KMPC_LOC_ADDR]], i{{.*}} 0, i{{.*}} 4 @@ -560,25 +560,25 @@ int foobar() { // CHECK-DEBUG-NEXT: [[GS1_TEMP_ADDR:%.*]] = call {{.*}}i8* @__kmpc_threadprivate_cached([[IDENT]]* [[KMPC_LOC_ADDR]], i32 [[THREAD_NUM]], i8* bitcast ([[S1]]* [[GS1]] to i8*), i{{.*}} {{[0-9]+}}, i8*** // CHECK-DEBUG-NEXT: [[GS1_ADDR:%.*]] = bitcast i8* [[GS1_TEMP_ADDR]] to [[S1]]* // CHECK-DEBUG-NEXT: [[GS1_A_ADDR:%.*]] = getelementptr inbounds [[S1]], [[S1]]* [[GS1_ADDR]], i{{.*}} 0, i{{.*}} 0 - // CHECK-DEBUG-NEXT: [[GS1_A:%.*]] = load [[INT]]* [[GS1_A_ADDR]] - // CHECK-DEBUG-NEXT: [[RES:%.*]] = load [[INT]]* [[RES_ADDR]] + // CHECK-DEBUG-NEXT: [[GS1_A:%.*]] = load [[INT]], [[INT]]* [[GS1_A_ADDR]] + // CHECK-DEBUG-NEXT: [[RES:%.*]] = load [[INT]], [[INT]]* [[RES_ADDR]] // CHECK-DEBUG-NEXT: [[ADD:%.*]] = add {{.*}} [[INT]] [[RES]], [[GS1_A]] // CHECK-DEBUG-NEXT: store [[INT]] [[ADD]], [[INT]]* [[RES:.+]] Res += gs1.a; - // CHECK: [[GS2_A:%.*]] = load [[INT]]* getelementptr inbounds ([[S2]]* [[GS2]], i{{.*}} 0, i{{.*}} 0) - // CHECK-NEXT: [[RES:%.*]] = load [[INT]]* [[RES_ADDR]] + // CHECK: [[GS2_A:%.*]] = load [[INT]], [[INT]]* getelementptr inbounds ([[S2]]* [[GS2]], i{{.*}} 0, i{{.*}} 0) + // CHECK-NEXT: [[RES:%.*]] = load [[INT]], [[INT]]* [[RES_ADDR]] // CHECK-NEXT: [[ADD:%.*]] = add {{.*}} [[INT]] [[RES]], [[GS2_A]] // CHECK-NEXT: store [[INT]] [[ADD]], [[INT]]* [[RES:.+]] - // CHECK-DEBUG: [[GS2_A:%.*]] = load [[INT]]* getelementptr inbounds ([[S2]]* [[GS2]], i{{.*}} 0, i{{.*}} 0) - // CHECK-DEBUG-NEXT: [[RES:%.*]] = load [[INT]]* [[RES_ADDR]] + // CHECK-DEBUG: [[GS2_A:%.*]] = load [[INT]], [[INT]]* getelementptr inbounds ([[S2]]* [[GS2]], i{{.*}} 0, i{{.*}} 0) + // CHECK-DEBUG-NEXT: [[RES:%.*]] = load [[INT]], [[INT]]* [[RES_ADDR]] // CHECK-DEBUG-NEXT: [[ADD:%.*]] = add {{.*}} [[INT]] [[RES]], [[GS2_A]] // CHECK-DEBUG-NEXT: store [[INT]] [[ADD]], [[INT]]* [[RES:.+]] Res += gs2.a; // CHECK: [[GS3_TEMP_ADDR:%.*]] = call {{.*}}i8* @__kmpc_threadprivate_cached([[IDENT]]* [[DEFAULT_LOC]], i32 [[THREAD_NUM]], i8* bitcast ([[S5]]* [[GS3]] to i8*), i{{.*}} {{[0-9]+}}, i8*** [[GS3]].cache.) // CHECK-NEXT: [[GS3_ADDR:%.*]] = bitcast i8* [[GS3_TEMP_ADDR]] to [[S5]]* // CHECK-NEXT: [[GS3_A_ADDR:%.*]] = getelementptr inbounds [[S5]], [[S5]]* [[GS3_ADDR]], i{{.*}} 0, i{{.*}} 0 - // CHECK-NEXT: [[GS3_A:%.*]] = load [[INT]]* [[GS3_A_ADDR]] - // CHECK-NEXT: [[RES:%.*]] = load [[INT]]* [[RES_ADDR]] + // CHECK-NEXT: [[GS3_A:%.*]] = load [[INT]], [[INT]]* [[GS3_A_ADDR]] + // CHECK-NEXT: [[RES:%.*]] = load [[INT]], [[INT]]* [[RES_ADDR]] // CHECK-NEXT: [[ADD:%.*]] = add {{.*}} [[INT]] [[RES]], [[GS3_A]] // CHECK-NEXT: store [[INT]] [[ADD]], [[INT]]* [[RES:.+]] // CHECK-DEBUG: [[KMPC_LOC_ADDR_PSOURCE:%.*]] = getelementptr inbounds [[IDENT]], [[IDENT]]* [[KMPC_LOC_ADDR]], i{{.*}} 0, i{{.*}} 4 @@ -586,8 +586,8 @@ int foobar() { // CHECK-DEBUG-NEXT: [[GS3_TEMP_ADDR:%.*]] = call {{.*}}i8* @__kmpc_threadprivate_cached([[IDENT]]* [[KMPC_LOC_ADDR]], i32 [[THREAD_NUM]], i8* bitcast ([[S5]]* [[GS3]] to i8*), i{{.*}} {{[0-9]+}}, i8*** // CHECK-DEBUG-NEXT: [[GS3_ADDR:%.*]] = bitcast i8* [[GS3_TEMP_ADDR]] to [[S5]]* // CHECK-DEBUG-NEXT: [[GS3_A_ADDR:%.*]] = getelementptr inbounds [[S5]], [[S5]]* [[GS3_ADDR]], i{{.*}} 0, i{{.*}} 0 - // CHECK-DEBUG-NEXT: [[GS3_A:%.*]] = load [[INT]]* [[GS3_A_ADDR]] - // CHECK-DEBUG-NEXT: [[RES:%.*]] = load [[INT]]* [[RES_ADDR]] + // CHECK-DEBUG-NEXT: [[GS3_A:%.*]] = load [[INT]], [[INT]]* [[GS3_A_ADDR]] + // CHECK-DEBUG-NEXT: [[RES:%.*]] = load [[INT]], [[INT]]* [[RES_ADDR]] // CHECK-DEBUG-NEXT: [[ADD:%.*]] = add {{.*}} [[INT]] [[RES]], [[GS3_A]] // CHECK-DEBUG-NEXT: store [[INT]] [[ADD]], [[INT]]* [[RES:.+]] Res += gs3.a; @@ -596,8 +596,8 @@ int foobar() { // CHECK-NEXT: [[ARR_X_1_ADDR:%.*]] = getelementptr inbounds [2 x [3 x [[S1]]]], [2 x [3 x [[S1]]]]* [[ARR_X_ADDR]], i{{.*}} 0, i{{.*}} 1 // CHECK-NEXT: [[ARR_X_1_1_ADDR:%.*]] = getelementptr inbounds [3 x [[S1]]], [3 x [[S1]]]* [[ARR_X_1_ADDR]], i{{.*}} 0, i{{.*}} 1 // CHECK-NEXT: [[ARR_X_1_1_A_ADDR:%.*]] = getelementptr inbounds [[S1]], [[S1]]* [[ARR_X_1_1_ADDR]], i{{.*}} 0, i{{.*}} 0 - // CHECK-NEXT: [[ARR_X_1_1_A:%.*]] = load [[INT]]* [[ARR_X_1_1_A_ADDR]] - // CHECK-NEXT: [[RES:%.*]] = load [[INT]]* [[RES_ADDR]] + // CHECK-NEXT: [[ARR_X_1_1_A:%.*]] = load [[INT]], [[INT]]* [[ARR_X_1_1_A_ADDR]] + // CHECK-NEXT: [[RES:%.*]] = load [[INT]], [[INT]]* [[RES_ADDR]] // CHECK-NEXT: [[ADD:%.*]] = add {{.*}} [[INT]] [[RES]], [[ARR_X_1_1_A]] // CHECK-NEXT: store [[INT]] [[ADD]], [[INT]]* [[RES:.+]] // CHECK-DEBUG: [[KMPC_LOC_ADDR_PSOURCE:%.*]] = getelementptr inbounds [[IDENT]], [[IDENT]]* [[KMPC_LOC_ADDR]], i{{.*}} 0, i{{.*}} 4 @@ -607,48 +607,48 @@ int foobar() { // CHECK-DEBUG-NEXT: [[ARR_X_1_ADDR:%.*]] = getelementptr inbounds [2 x [3 x [[S1]]]], [2 x [3 x [[S1]]]]* [[ARR_X_ADDR]], i{{.*}} 0, i{{.*}} 1 // CHECK-DEBUG-NEXT: [[ARR_X_1_1_ADDR:%.*]] = getelementptr inbounds [3 x [[S1]]], [3 x [[S1]]]* [[ARR_X_1_ADDR]], i{{.*}} 0, i{{.*}} 1 // CHECK-DEBUG-NEXT: [[ARR_X_1_1_A_ADDR:%.*]] = getelementptr inbounds [[S1]], [[S1]]* [[ARR_X_1_1_ADDR]], i{{.*}} 0, i{{.*}} 0 - // CHECK-DEBUG-NEXT: [[ARR_X_1_1_A:%.*]] = load [[INT]]* [[ARR_X_1_1_A_ADDR]] - // CHECK-DEBUG-NEXT: [[RES:%.*]] = load [[INT]]* [[RES_ADDR]] + // CHECK-DEBUG-NEXT: [[ARR_X_1_1_A:%.*]] = load [[INT]], [[INT]]* [[ARR_X_1_1_A_ADDR]] + // CHECK-DEBUG-NEXT: [[RES:%.*]] = load [[INT]], [[INT]]* [[RES_ADDR]] // CHECK-DEBUG-NEXT: [[ADD:%.*]] = add {{.*}} [[INT]] [[RES]], [[ARR_X_1_1_A]] // CHECK-DEBUG-NEXT: store [[INT]] [[ADD]], [[INT]]* [[RES:.+]] Res += arr_x[1][1].a; // CHECK: [[ST_INT_ST_TEMP_ADDR:%.*]] = call {{.*}}i8* @__kmpc_threadprivate_cached([[IDENT]]* [[DEFAULT_LOC]], i32 [[THREAD_NUM]], i8* bitcast ([[INT]]* [[ST_INT_ST]] to i8*), i{{.*}} {{[0-9]+}}, i8*** [[ST_INT_ST]].cache.) // CHECK-NEXT: [[ST_INT_ST_ADDR:%.*]] = bitcast i8* [[ST_INT_ST_TEMP_ADDR]] to [[INT]]* - // CHECK-NEXT: [[ST_INT_ST_VAL:%.*]] = load [[INT]]* [[ST_INT_ST_ADDR]] - // CHECK-NEXT: [[RES:%.*]] = load [[INT]]* [[RES_ADDR]] + // CHECK-NEXT: [[ST_INT_ST_VAL:%.*]] = load [[INT]], [[INT]]* [[ST_INT_ST_ADDR]] + // CHECK-NEXT: [[RES:%.*]] = load [[INT]], [[INT]]* [[RES_ADDR]] // CHECK-NEXT: [[ADD:%.*]] = add {{.*}} [[INT]] [[RES]], [[ST_INT_ST_VAL]] // CHECK-NEXT: store [[INT]] [[ADD]], [[INT]]* [[RES:.+]] // CHECK-DEBUG: [[KMPC_LOC_ADDR_PSOURCE:%.*]] = getelementptr inbounds [[IDENT]], [[IDENT]]* [[KMPC_LOC_ADDR]], i{{.*}} 0, i{{.*}} 4 // CHECK-DEBUG-NEXT: store i8* getelementptr inbounds ([{{.*}} x i8]* [[LOC17]], i{{.*}} 0, i{{.*}} 0), i8** [[KMPC_LOC_ADDR_PSOURCE]] // CHECK-DEBUG-NEXT: [[ST_INT_ST_TEMP_ADDR:%.*]] = call {{.*}}i8* @__kmpc_threadprivate_cached([[IDENT]]* [[KMPC_LOC_ADDR]], i32 [[THREAD_NUM]], i8* bitcast ([[INT]]* [[ST_INT_ST]] to i8*), i{{.*}} {{[0-9]+}}, i8*** // CHECK-DEBUG-NEXT: [[ST_INT_ST_ADDR:%.*]] = bitcast i8* [[ST_INT_ST_TEMP_ADDR]] to [[INT]]* - // CHECK-DEBUG-NEXT: [[ST_INT_ST_VAL:%.*]] = load [[INT]]* [[ST_INT_ST_ADDR]] - // CHECK-DEBUG-NEXT: [[RES:%.*]] = load [[INT]]* [[RES_ADDR]] + // CHECK-DEBUG-NEXT: [[ST_INT_ST_VAL:%.*]] = load [[INT]], [[INT]]* [[ST_INT_ST_ADDR]] + // CHECK-DEBUG-NEXT: [[RES:%.*]] = load [[INT]], [[INT]]* [[RES_ADDR]] // CHECK-DEBUG-NEXT: [[ADD:%.*]] = add {{.*}} [[INT]] [[RES]], [[ST_INT_ST_VAL]] // CHECK-DEBUG-NEXT: store [[INT]] [[ADD]], [[INT]]* [[RES:.+]] Res += ST::st; // CHECK: [[ST_FLOAT_ST_TEMP_ADDR:%.*]] = call {{.*}}i8* @__kmpc_threadprivate_cached([[IDENT]]* [[DEFAULT_LOC]], i32 [[THREAD_NUM]], i8* bitcast (float* [[ST_FLOAT_ST]] to i8*), i{{.*}} {{[0-9]+}}, i8*** [[ST_FLOAT_ST]].cache.) // CHECK-NEXT: [[ST_FLOAT_ST_ADDR:%.*]] = bitcast i8* [[ST_FLOAT_ST_TEMP_ADDR]] to float* - // CHECK-NEXT: [[ST_FLOAT_ST_VAL:%.*]] = load float* [[ST_FLOAT_ST_ADDR]] + // CHECK-NEXT: [[ST_FLOAT_ST_VAL:%.*]] = load float, float* [[ST_FLOAT_ST_ADDR]] // CHECK-NEXT: [[FLOAT_TO_INT_CONV:%.*]] = fptosi float [[ST_FLOAT_ST_VAL]] to [[INT]] - // CHECK-NEXT: [[RES:%.*]] = load [[INT]]* [[RES_ADDR]] + // CHECK-NEXT: [[RES:%.*]] = load [[INT]], [[INT]]* [[RES_ADDR]] // CHECK-NEXT: [[ADD:%.*]] = add {{.*}} [[INT]] [[RES]], [[FLOAT_TO_INT_CONV]] // CHECK-NEXT: store [[INT]] [[ADD]], [[INT]]* [[RES:.+]] // CHECK-DEBUG: [[KMPC_LOC_ADDR_PSOURCE:%.*]] = getelementptr inbounds [[IDENT]], [[IDENT]]* [[KMPC_LOC_ADDR]], i{{.*}} 0, i{{.*}} 4 // CHECK-DEBUG-NEXT: store i8* getelementptr inbounds ([{{.*}} x i8]* [[LOC18]], i{{.*}} 0, i{{.*}} 0), i8** [[KMPC_LOC_ADDR_PSOURCE]] // CHECK-DEBUG-NEXT: [[ST_FLOAT_ST_TEMP_ADDR:%.*]] = call {{.*}}i8* @__kmpc_threadprivate_cached([[IDENT]]* [[KMPC_LOC_ADDR]], i32 [[THREAD_NUM]], i8* bitcast (float* [[ST_FLOAT_ST]] to i8*), i{{.*}} {{[0-9]+}}, i8*** // CHECK-DEBUG-NEXT: [[ST_FLOAT_ST_ADDR:%.*]] = bitcast i8* [[ST_FLOAT_ST_TEMP_ADDR]] to float* - // CHECK-DEBUG-NEXT: [[ST_FLOAT_ST_VAL:%.*]] = load float* [[ST_FLOAT_ST_ADDR]] + // CHECK-DEBUG-NEXT: [[ST_FLOAT_ST_VAL:%.*]] = load float, float* [[ST_FLOAT_ST_ADDR]] // CHECK-DEBUG-NEXT: [[FLOAT_TO_INT_CONV:%.*]] = fptosi float [[ST_FLOAT_ST_VAL]] to [[INT]] - // CHECK-DEBUG-NEXT: [[RES:%.*]] = load [[INT]]* [[RES_ADDR]] + // CHECK-DEBUG-NEXT: [[RES:%.*]] = load [[INT]], [[INT]]* [[RES_ADDR]] // CHECK-DEBUG-NEXT: [[ADD:%.*]] = add {{.*}} [[INT]] [[RES]], [[FLOAT_TO_INT_CONV]] // CHECK-DEBUG-NEXT: store [[INT]] [[ADD]], [[INT]]* [[RES:.+]] Res += static_cast(ST::st); // CHECK: [[ST_S4_ST_TEMP_ADDR:%.*]] = call {{.*}}i8* @__kmpc_threadprivate_cached([[IDENT]]* [[DEFAULT_LOC]], i32 [[THREAD_NUM]], i8* bitcast ([[S4]]* [[ST_S4_ST]] to i8*), i{{.*}} {{[0-9]+}}, i8*** [[ST_S4_ST]].cache.) // CHECK-NEXT: [[ST_S4_ST_ADDR:%.*]] = bitcast i8* [[ST_S4_ST_TEMP_ADDR]] to [[S4]]* // CHECK-NEXT: [[ST_S4_ST_A_ADDR:%.*]] = getelementptr inbounds [[S4]], [[S4]]* [[ST_S4_ST_ADDR]], i{{.*}} 0, i{{.*}} 0 - // CHECK-NEXT: [[ST_S4_ST_A:%.*]] = load [[INT]]* [[ST_S4_ST_A_ADDR]] - // CHECK-NEXT: [[RES:%.*]] = load [[INT]]* [[RES_ADDR]] + // CHECK-NEXT: [[ST_S4_ST_A:%.*]] = load [[INT]], [[INT]]* [[ST_S4_ST_A_ADDR]] + // CHECK-NEXT: [[RES:%.*]] = load [[INT]], [[INT]]* [[RES_ADDR]] // CHECK-NEXT: [[ADD:%.*]] = add {{.*}} [[INT]] [[RES]], [[ST_S4_ST_A]] // CHECK-NEXT: store [[INT]] [[ADD]], [[INT]]* [[RES:.+]] // CHECK-DEBUG: [[KMPC_LOC_ADDR_PSOURCE:%.*]] = getelementptr inbounds [[IDENT]], [[IDENT]]* [[KMPC_LOC_ADDR]], i{{.*}} 0, i{{.*}} 4 @@ -656,14 +656,14 @@ int foobar() { // CHECK-DEBUG-NEXT: [[ST_S4_ST_TEMP_ADDR:%.*]] = call {{.*}}i8* @__kmpc_threadprivate_cached([[IDENT]]* [[KMPC_LOC_ADDR]], i32 [[THREAD_NUM]], i8* bitcast ([[S4]]* [[ST_S4_ST]] to i8*), i{{.*}} {{[0-9]+}}, i8*** // CHECK-DEBUG-NEXT: [[ST_S4_ST_ADDR:%.*]] = bitcast i8* [[ST_S4_ST_TEMP_ADDR]] to [[S4]]* // CHECK-DEBUG-NEXT: [[ST_S4_ST_A_ADDR:%.*]] = getelementptr inbounds [[S4]], [[S4]]* [[ST_S4_ST_ADDR]], i{{.*}} 0, i{{.*}} 0 - // CHECK-DEBUG-NEXT: [[ST_S4_ST_A:%.*]] = load [[INT]]* [[ST_S4_ST_A_ADDR]] - // CHECK-DEBUG-NEXT: [[RES:%.*]] = load [[INT]]* [[RES_ADDR]] + // CHECK-DEBUG-NEXT: [[ST_S4_ST_A:%.*]] = load [[INT]], [[INT]]* [[ST_S4_ST_A_ADDR]] + // CHECK-DEBUG-NEXT: [[RES:%.*]] = load [[INT]], [[INT]]* [[RES_ADDR]] // CHECK-DEBUG-NEXT: [[ADD:%.*]] = add {{.*}} [[INT]] [[RES]], [[ST_S4_ST_A]] // CHECK-DEBUG-NEXT: store [[INT]] [[ADD]], [[INT]]* [[RES:.+]] Res += ST::st.a; - // CHECK: [[RES:%.*]] = load [[INT]]* [[RES_ADDR]] + // CHECK: [[RES:%.*]] = load [[INT]], [[INT]]* [[RES_ADDR]] // CHECK-NEXT: ret [[INT]] [[RES]] - // CHECK-DEBUG: [[RES:%.*]] = load [[INT]]* [[RES_ADDR]] + // CHECK-DEBUG: [[RES:%.*]] = load [[INT]], [[INT]]* [[RES_ADDR]] // CHECK-DEBUG-NEXT: ret [[INT]] [[RES]] return Res; } @@ -672,16 +672,16 @@ int foobar() { // CHECK: call {{.*}}void @__kmpc_threadprivate_register([[IDENT]]* [[DEFAULT_LOC]], i8* bitcast ([[S4]]* [[ST_S4_ST]] to i8*), i8* (i8*)* [[ST_S4_ST_CTOR:@\.__kmpc_global_ctor_\..+]], i8* (i8*, i8*)* null, void (i8*)* [[ST_S4_ST_DTOR:@\.__kmpc_global_dtor_\..+]]) // CHECK: define internal {{.*}}i8* [[ST_S4_ST_CTOR]](i8*) // CHECK: store i8* %0, i8** [[ARG_ADDR:%.*]], -// CHECK: [[ARG:%.+]] = load i8** [[ARG_ADDR]] +// CHECK: [[ARG:%.+]] = load i8*, i8** [[ARG_ADDR]] // CHECK: [[RES:%.*]] = bitcast i8* [[ARG]] to [[S4]]* // CHECK-NEXT: call {{.*}} [[S4_CTOR:@.+]]([[S4]]* [[RES]], {{.*}} 23) -// CHECK: [[ARG:%.+]] = load i8** [[ARG_ADDR]] +// CHECK: [[ARG:%.+]] = load i8*, i8** [[ARG_ADDR]] // CHECK-NEXT: ret i8* [[ARG]] // CHECK-NEXT: } // CHECK: define {{.*}} [[S4_CTOR]]([[S4]]* {{.*}}, // CHECK: define internal {{.*}}void [[ST_S4_ST_DTOR]](i8*) // CHECK: store i8* %0, i8** [[ARG_ADDR:%.*]], -// CHECK: [[ARG:%.+]] = load i8** [[ARG_ADDR]] +// CHECK: [[ARG:%.+]] = load i8*, i8** [[ARG_ADDR]] // CHECK: [[RES:%.*]] = bitcast i8* [[ARG]] to [[S4]]* // CHECK-NEXT: call {{.*}} [[S4_DTOR:@.+]]([[S4]]* [[RES]]) // CHECK-NEXT: ret void diff --git a/test/SemaCXX/linkage.cpp b/test/SemaCXX/linkage.cpp index 79c7835867..aa595948c6 100644 --- a/test/SemaCXX/linkage.cpp +++ b/test/SemaCXX/linkage.cpp @@ -83,7 +83,7 @@ extern "C" { // Test both for mangling in the code generation and warnings from use // of internal, undefined names via -Werror. // CHECK: call i32 @g( - // CHECK: load i32* @a, + // CHECK: load i32, i32* @a, return g() + a; } }; diff --git a/test/SemaObjC/debugger-support.m b/test/SemaObjC/debugger-support.m index 5dbc3eebfa..b67353f6f7 100644 --- a/test/SemaObjC/debugger-support.m +++ b/test/SemaObjC/debugger-support.m @@ -8,7 +8,7 @@ void test0(id x) { // CHECK: [[X:%.*]] = alloca i8*, align 8 // CHECK-NEXT: [[RESULT:%.*]] = alloca [[A:%.*]], align 4 // CHECK-NEXT: store i8* {{%.*}}, i8** [[X]], - // CHECK-NEXT: [[T0:%.*]] = load i8** [[X]], - // CHECK-NEXT: [[T1:%.*]] = load i8** @OBJC_SELECTOR_REFERENCES_ + // CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[X]], + // CHECK-NEXT: [[T1:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES_ // CHECK-NEXT: [[T2:%.*]] = call { i64, i64 } bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to { i64, i64 } (i8*, i8*)*)(i8* [[T0]], i8* [[T1]]) } -- 2.40.0