From d1a05a83f81b0f61861f3e8fd8ffe693cbd376b2 Mon Sep 17 00:00:00 2001 From: Vedant Kumar Date: Fri, 17 Mar 2017 17:53:26 +0000 Subject: [PATCH] [Bitcode] Add compatibility test for the 4.0 release Fork off compatibility.ll for the 4.0 release. The *.bc file in this commit was produced using a Release build of the release_40 branch. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@298109 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/Bitcode/compatibility-4.0.ll | 1689 ++++++++++++++++++++++++++ test/Bitcode/compatibility-4.0.ll.bc | Bin 0 -> 16400 bytes 2 files changed, 1689 insertions(+) create mode 100644 test/Bitcode/compatibility-4.0.ll create mode 100644 test/Bitcode/compatibility-4.0.ll.bc diff --git a/test/Bitcode/compatibility-4.0.ll b/test/Bitcode/compatibility-4.0.ll new file mode 100644 index 00000000000..fa7a9b1e1e0 --- /dev/null +++ b/test/Bitcode/compatibility-4.0.ll @@ -0,0 +1,1689 @@ +; Bitcode compatibility test for llvm 4.0.0 +; +; N.b: This is 4.0-compatible IR. The CHECK lines occasionally differ from +; the IR used to generate the bitcode, and may need to be updated. + +; RUN: llvm-dis < %s.bc | FileCheck %s + +target datalayout = "E" +; CHECK: target datalayout = "E" + +target triple = "x86_64-apple-macosx10.10.0" +; CHECK: target triple = "x86_64-apple-macosx10.10.0" + +;; Module-level assembly +module asm "beep boop" +; CHECK: module asm "beep boop" + +;; Comdats +$comdat.any = comdat any +; CHECK: $comdat.any = comdat any +$comdat.exactmatch = comdat exactmatch +; CHECK: $comdat.exactmatch = comdat exactmatch +$comdat.largest = comdat largest +; CHECK: $comdat.largest = comdat largest +$comdat.noduplicates = comdat noduplicates +; CHECK: $comdat.noduplicates = comdat noduplicates +$comdat.samesize = comdat samesize +; CHECK: $comdat.samesize = comdat samesize + +;; Constants +@const.true = constant i1 true +; CHECK: @const.true = constant i1 true +@const.false = constant i1 false +; CHECK: @const.false = constant i1 false +@const.int = constant i32 zeroinitializer +; CHECK: @const.int = constant i32 0 +@const.float = constant double 0.0 +; CHECK: @const.float = constant double 0.0 +@const.null = constant i8* null +; CHECK: @const.null = constant i8* null +%const.struct.type = type { i32, i8 } +%const.struct.type.packed = type <{ i32, i8 }> +@const.struct = constant %const.struct.type { i32 -1, i8 undef } +; CHECK: @const.struct = constant %const.struct.type { i32 -1, i8 undef } +@const.struct.packed = constant %const.struct.type.packed <{ i32 -1, i8 1 }> +; CHECK: @const.struct.packed = constant %const.struct.type.packed <{ i32 -1, i8 1 }> + +; CHECK: @constant.array.i8 = constant [3 x i8] c"\00\01\00" +@constant.array.i8 = constant [3 x i8] [i8 -0, i8 1, i8 0] +; CHECK: @constant.array.i16 = constant [3 x i16] [i16 0, i16 1, i16 0] +@constant.array.i16 = constant [3 x i16] [i16 -0, i16 1, i16 0] +; CHECK: @constant.array.i32 = constant [3 x i32] [i32 0, i32 1, i32 0] +@constant.array.i32 = constant [3 x i32] [i32 -0, i32 1, i32 0] +; CHECK: @constant.array.i64 = constant [3 x i64] [i64 0, i64 1, i64 0] +@constant.array.i64 = constant [3 x i64] [i64 -0, i64 1, i64 0] +; CHECK: @constant.array.f16 = constant [3 x half] [half 0xH8000, half 0xH3C00, half 0xH0000] +@constant.array.f16 = constant [3 x half] [half -0.0, half 1.0, half 0.0] +; CHECK: @constant.array.f32 = constant [3 x float] [float -0.000000e+00, float 1.000000e+00, float 0.000000e+00] +@constant.array.f32 = constant [3 x float] [float -0.0, float 1.0, float 0.0] +; CHECK: @constant.array.f64 = constant [3 x double] [double -0.000000e+00, double 1.000000e+00, double 0.000000e+00] +@constant.array.f64 = constant [3 x double] [double -0.0, double 1.0, double 0.0] + +; CHECK: @constant.vector.i8 = constant <3 x i8> +@constant.vector.i8 = constant <3 x i8> +; CHECK: @constant.vector.i16 = constant <3 x i16> +@constant.vector.i16 = constant <3 x i16> +; CHECK: @constant.vector.i32 = constant <3 x i32> +@constant.vector.i32 = constant <3 x i32> +; CHECK: @constant.vector.i64 = constant <3 x i64> +@constant.vector.i64 = constant <3 x i64> +; CHECK: @constant.vector.f16 = constant <3 x half> +@constant.vector.f16 = constant <3 x half> +; CHECK: @constant.vector.f32 = constant <3 x float> +@constant.vector.f32 = constant <3 x float> +; CHECK: @constant.vector.f64 = constant <3 x double> +@constant.vector.f64 = constant <3 x double> + +;; Global Variables +; Format: [@ =] [Linkage] [Visibility] [DLLStorageClass] +; [ThreadLocal] [(unnamed_addr|local_unnamed_addr)] [AddrSpace] [ExternallyInitialized] +; [] +; [, section "name"] [, comdat [($name)]] [, align ] + +; Global Variables -- Simple +@g1 = global i32 0 +; CHECK: @g1 = global i32 0 +@g2 = constant i32 0 +; CHECK: @g2 = constant i32 0 + +; Global Variables -- Linkage +@g.private = private global i32 0 +; CHECK: @g.private = private global i32 0 +@g.internal = internal global i32 0 +; CHECK: @g.internal = internal global i32 0 +@g.available_externally = available_externally global i32 0 +; CHECK: @g.available_externally = available_externally global i32 0 +@g.linkonce = linkonce global i32 0 +; CHECK: @g.linkonce = linkonce global i32 0 +@g.weak = weak global i32 0 +; CHECK: @g.weak = weak global i32 0 +@g.common = common global i32 0 +; CHECK: @g.common = common global i32 0 +@g.appending = appending global [4 x i8] c"test" +; CHECK: @g.appending = appending global [4 x i8] c"test" +@g.extern_weak = extern_weak global i32 +; CHECK: @g.extern_weak = extern_weak global i32 +@g.linkonce_odr = linkonce_odr global i32 0 +; CHECK: @g.linkonce_odr = linkonce_odr global i32 0 +@g.weak_odr = weak_odr global i32 0 +; CHECK: @g.weak_odr = weak_odr global i32 0 +@g.external = external global i32 +; CHECK: @g.external = external global i32 + +; Global Variables -- Visibility +@g.default = default global i32 0 +; CHECK: @g.default = global i32 0 +@g.hidden = hidden global i32 0 +; CHECK: @g.hidden = hidden global i32 0 +@g.protected = protected global i32 0 +; CHECK: @g.protected = protected global i32 0 + +; Global Variables -- DLLStorageClass +@g.dlldefault = default global i32 0 +; CHECK: @g.dlldefault = global i32 0 +@g.dllimport = external dllimport global i32 +; CHECK: @g.dllimport = external dllimport global i32 +@g.dllexport = dllexport global i32 0 +; CHECK: @g.dllexport = dllexport global i32 0 + +; Global Variables -- ThreadLocal +@g.notthreadlocal = global i32 0 +; CHECK: @g.notthreadlocal = global i32 0 +@g.generaldynamic = thread_local global i32 0 +; CHECK: @g.generaldynamic = thread_local global i32 0 +@g.localdynamic = thread_local(localdynamic) global i32 0 +; CHECK: @g.localdynamic = thread_local(localdynamic) global i32 0 +@g.initialexec = thread_local(initialexec) global i32 0 +; CHECK: @g.initialexec = thread_local(initialexec) global i32 0 +@g.localexec = thread_local(localexec) global i32 0 +; CHECK: @g.localexec = thread_local(localexec) global i32 0 + +; Global Variables -- unnamed_addr and local_unnamed_addr +@g.unnamed_addr = unnamed_addr global i32 0 +; CHECK: @g.unnamed_addr = unnamed_addr global i32 0 +@g.local_unnamed_addr = local_unnamed_addr global i32 0 +; CHECK: @g.local_unnamed_addr = local_unnamed_addr global i32 0 + +; Global Variables -- AddrSpace +@g.addrspace = addrspace(1) global i32 0 +; CHECK: @g.addrspace = addrspace(1) global i32 0 + +; Global Variables -- ExternallyInitialized +@g.externally_initialized = external externally_initialized global i32 +; CHECK: @g.externally_initialized = external externally_initialized global i32 + +; Global Variables -- section +@g.section = global i32 0, section "_DATA" +; CHECK: @g.section = global i32 0, section "_DATA" + +; Global Variables -- comdat +@comdat.any = global i32 0, comdat +; CHECK: @comdat.any = global i32 0, comdat +@comdat.exactmatch = global i32 0, comdat +; CHECK: @comdat.exactmatch = global i32 0, comdat +@comdat.largest = global i32 0, comdat +; CHECK: @comdat.largest = global i32 0, comdat +@comdat.noduplicates = global i32 0, comdat +; CHECK: @comdat.noduplicates = global i32 0, comdat +@comdat.samesize = global i32 0, comdat +; CHECK: @comdat.samesize = global i32 0, comdat + +; Force two globals from different comdats into sections with the same name. +$comdat1 = comdat any +$comdat2 = comdat any +@g.comdat1 = global i32 0, section "SharedSection", comdat($comdat1) +; CHECK: @g.comdat1 = global i32 0, section "SharedSection", comdat($comdat1) +@g.comdat2 = global i32 0, section "SharedSection", comdat($comdat2) +; CHECK: @g.comdat2 = global i32 0, section "SharedSection", comdat($comdat2) + +; Global Variables -- align +@g.align = global i32 0, align 4 +; CHECK: @g.align = global i32 0, align 4 + +; Global Variables -- Intrinsics +%pri.func.data = type { i32, void ()*, i8* } +@g.used1 = global i32 0 +@g.used2 = global i32 0 +@g.used3 = global i8 0 +declare void @g.f1() +@llvm.used = appending global [1 x i32*] [i32* @g.used1], section "llvm.metadata" +; CHECK: @llvm.used = appending global [1 x i32*] [i32* @g.used1], section "llvm.metadata" +@llvm.compiler.used = appending global [1 x i32*] [i32* @g.used2], section "llvm.metadata" +; CHECK: @llvm.compiler.used = appending global [1 x i32*] [i32* @g.used2], section "llvm.metadata" +@llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata" +; CHECK: @llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata" +@llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata" +; CHECK: @llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata" + +;; Aliases +; Format: @ = [Linkage] [Visibility] [DLLStorageClass] [ThreadLocal] +; [unnamed_addr] alias @ + +; Aliases -- Linkage +@a.private = private alias i32, i32* @g.private +; CHECK: @a.private = private alias i32, i32* @g.private +@a.internal = internal alias i32, i32* @g.internal +; CHECK: @a.internal = internal alias i32, i32* @g.internal +@a.linkonce = linkonce alias i32, i32* @g.linkonce +; CHECK: @a.linkonce = linkonce alias i32, i32* @g.linkonce +@a.weak = weak alias i32, i32* @g.weak +; CHECK: @a.weak = weak alias i32, i32* @g.weak +@a.linkonce_odr = linkonce_odr alias i32, i32* @g.linkonce_odr +; CHECK: @a.linkonce_odr = linkonce_odr alias i32, i32* @g.linkonce_odr +@a.weak_odr = weak_odr alias i32, i32* @g.weak_odr +; CHECK: @a.weak_odr = weak_odr alias i32, i32* @g.weak_odr +@a.external = external alias i32, i32* @g1 +; CHECK: @a.external = alias i32, i32* @g1 + +; Aliases -- Visibility +@a.default = default alias i32, i32* @g.default +; CHECK: @a.default = alias i32, i32* @g.default +@a.hidden = hidden alias i32, i32* @g.hidden +; CHECK: @a.hidden = hidden alias i32, i32* @g.hidden +@a.protected = protected alias i32, i32* @g.protected +; CHECK: @a.protected = protected alias i32, i32* @g.protected + +; Aliases -- DLLStorageClass +@a.dlldefault = default alias i32, i32* @g.dlldefault +; CHECK: @a.dlldefault = alias i32, i32* @g.dlldefault +@a.dllimport = dllimport alias i32, i32* @g1 +; CHECK: @a.dllimport = dllimport alias i32, i32* @g1 +@a.dllexport = dllexport alias i32, i32* @g.dllexport +; CHECK: @a.dllexport = dllexport alias i32, i32* @g.dllexport + +; Aliases -- ThreadLocal +@a.notthreadlocal = alias i32, i32* @g.notthreadlocal +; CHECK: @a.notthreadlocal = alias i32, i32* @g.notthreadlocal +@a.generaldynamic = thread_local alias i32, i32* @g.generaldynamic +; CHECK: @a.generaldynamic = thread_local alias i32, i32* @g.generaldynamic +@a.localdynamic = thread_local(localdynamic) alias i32, i32* @g.localdynamic +; CHECK: @a.localdynamic = thread_local(localdynamic) alias i32, i32* @g.localdynamic +@a.initialexec = thread_local(initialexec) alias i32, i32* @g.initialexec +; CHECK: @a.initialexec = thread_local(initialexec) alias i32, i32* @g.initialexec +@a.localexec = thread_local(localexec) alias i32, i32* @g.localexec +; CHECK: @a.localexec = thread_local(localexec) alias i32, i32* @g.localexec + +; Aliases -- unnamed_addr and local_unnamed_addr +@a.unnamed_addr = unnamed_addr alias i32, i32* @g.unnamed_addr +; CHECK: @a.unnamed_addr = unnamed_addr alias i32, i32* @g.unnamed_addr +@a.local_unnamed_addr = local_unnamed_addr alias i32, i32* @g.local_unnamed_addr +; CHECK: @a.local_unnamed_addr = local_unnamed_addr alias i32, i32* @g.local_unnamed_addr + +;; IFunc +; Format @ = [Linkage] [Visibility] ifunc , +; * @ + +; IFunc -- Linkage +@ifunc.external = external ifunc void (), i8* ()* @ifunc_resolver +; CHECK: @ifunc.external = ifunc void (), i8* ()* @ifunc_resolver +@ifunc.private = private ifunc void (), i8* ()* @ifunc_resolver +; CHECK: @ifunc.private = private ifunc void (), i8* ()* @ifunc_resolver +@ifunc.internal = internal ifunc void (), i8* ()* @ifunc_resolver +; CHECK: @ifunc.internal = internal ifunc void (), i8* ()* @ifunc_resolver + +; IFunc -- Visibility +@ifunc.default = default ifunc void (), i8* ()* @ifunc_resolver +; CHECK: @ifunc.default = ifunc void (), i8* ()* @ifunc_resolver +@ifunc.hidden = hidden ifunc void (), i8* ()* @ifunc_resolver +; CHECK: @ifunc.hidden = hidden ifunc void (), i8* ()* @ifunc_resolver +@ifunc.protected = protected ifunc void (), i8* ()* @ifunc_resolver +; CHECK: @ifunc.protected = protected ifunc void (), i8* ()* @ifunc_resolver + +define i8* @ifunc_resolver() { +entry: + ret i8* null +} + +;; Functions +; Format: define [linkage] [visibility] [DLLStorageClass] +; [cconv] [ret attrs] +; @ ([argument list]) +; [(unnamed_addr|local_unnamed_addr)] [fn Attrs] [section "name"] [comdat [($name)]] +; [align N] [gc] [prefix Constant] [prologue Constant] +; [personality Constant] { ... } + +; Functions -- Simple +declare void @f1 () +; CHECK: declare void @f1() + +define void @f2 () { +; CHECK: define void @f2() +entry: + ret void +} + +; Functions -- linkage +define private void @f.private() { +; CHECK: define private void @f.private() +entry: + ret void +} +define internal void @f.internal() { +; CHECK: define internal void @f.internal() +entry: + ret void +} +define available_externally void @f.available_externally() { +; CHECK: define available_externally void @f.available_externally() +entry: + ret void +} +define linkonce void @f.linkonce() { +; CHECK: define linkonce void @f.linkonce() +entry: + ret void +} +define weak void @f.weak() { +; CHECK: define weak void @f.weak() +entry: + ret void +} +define linkonce_odr void @f.linkonce_odr() { +; CHECK: define linkonce_odr void @f.linkonce_odr() +entry: + ret void +} +define weak_odr void @f.weak_odr() { +; CHECK: define weak_odr void @f.weak_odr() +entry: + ret void +} +declare external void @f.external() +; CHECK: declare void @f.external() +declare extern_weak void @f.extern_weak() +; CHECK: declare extern_weak void @f.extern_weak() + +; Functions -- visibility +declare default void @f.default() +; CHECK: declare void @f.default() +declare hidden void @f.hidden() +; CHECK: declare hidden void @f.hidden() +declare protected void @f.protected() +; CHECK: declare protected void @f.protected() + +; Functions -- DLLStorageClass +declare dllimport void @f.dllimport() +; CHECK: declare dllimport void @f.dllimport() +declare dllexport void @f.dllexport() +; CHECK: declare dllexport void @f.dllexport() + +; Functions -- cconv (Calling conventions) +declare ccc void @f.ccc() +; CHECK: declare void @f.ccc() +declare fastcc void @f.fastcc() +; CHECK: declare fastcc void @f.fastcc() +declare coldcc void @f.coldcc() +; CHECK: declare coldcc void @f.coldcc() +declare cc10 void @f.cc10() +; CHECK: declare ghccc void @f.cc10() +declare ghccc void @f.ghccc() +; CHECK: declare ghccc void @f.ghccc() +declare cc11 void @f.cc11() +; CHECK: declare cc11 void @f.cc11() +declare webkit_jscc void @f.webkit_jscc() +; CHECK: declare webkit_jscc void @f.webkit_jscc() +declare anyregcc void @f.anyregcc() +; CHECK: declare anyregcc void @f.anyregcc() +declare preserve_mostcc void @f.preserve_mostcc() +; CHECK: declare preserve_mostcc void @f.preserve_mostcc() +declare preserve_allcc void @f.preserve_allcc() +; CHECK: declare preserve_allcc void @f.preserve_allcc() +declare cc64 void @f.cc64() +; CHECK: declare x86_stdcallcc void @f.cc64() +declare x86_stdcallcc void @f.x86_stdcallcc() +; CHECK: declare x86_stdcallcc void @f.x86_stdcallcc() +declare cc65 void @f.cc65() +; CHECK: declare x86_fastcallcc void @f.cc65() +declare x86_fastcallcc void @f.x86_fastcallcc() +; CHECK: declare x86_fastcallcc void @f.x86_fastcallcc() +declare cc66 void @f.cc66() +; CHECK: declare arm_apcscc void @f.cc66() +declare arm_apcscc void @f.arm_apcscc() +; CHECK: declare arm_apcscc void @f.arm_apcscc() +declare cc67 void @f.cc67() +; CHECK: declare arm_aapcscc void @f.cc67() +declare arm_aapcscc void @f.arm_aapcscc() +; CHECK: declare arm_aapcscc void @f.arm_aapcscc() +declare cc68 void @f.cc68() +; CHECK: declare arm_aapcs_vfpcc void @f.cc68() +declare arm_aapcs_vfpcc void @f.arm_aapcs_vfpcc() +; CHECK: declare arm_aapcs_vfpcc void @f.arm_aapcs_vfpcc() +declare cc69 void @f.cc69() +; CHECK: declare msp430_intrcc void @f.cc69() +declare msp430_intrcc void @f.msp430_intrcc() +; CHECK: declare msp430_intrcc void @f.msp430_intrcc() +declare cc70 void @f.cc70() +; CHECK: declare x86_thiscallcc void @f.cc70() +declare x86_thiscallcc void @f.x86_thiscallcc() +; CHECK: declare x86_thiscallcc void @f.x86_thiscallcc() +declare cc71 void @f.cc71() +; CHECK: declare ptx_kernel void @f.cc71() +declare ptx_kernel void @f.ptx_kernel() +; CHECK: declare ptx_kernel void @f.ptx_kernel() +declare cc72 void @f.cc72() +; CHECK: declare ptx_device void @f.cc72() +declare ptx_device void @f.ptx_device() +; CHECK: declare ptx_device void @f.ptx_device() +declare cc75 void @f.cc75() +; CHECK: declare spir_func void @f.cc75() +declare spir_func void @f.spir_func() +; CHECK: declare spir_func void @f.spir_func() +declare cc76 void @f.cc76() +; CHECK: declare spir_kernel void @f.cc76() +declare spir_kernel void @f.spir_kernel() +; CHECK: declare spir_kernel void @f.spir_kernel() +declare cc77 void @f.cc77() +; CHECK: declare intel_ocl_bicc void @f.cc77() +declare intel_ocl_bicc void @f.intel_ocl_bicc() +; CHECK: declare intel_ocl_bicc void @f.intel_ocl_bicc() +declare cc78 void @f.cc78() +; CHECK: declare x86_64_sysvcc void @f.cc78() +declare x86_64_sysvcc void @f.x86_64_sysvcc() +; CHECK: declare x86_64_sysvcc void @f.x86_64_sysvcc() +declare cc79 void @f.cc79() +; CHECK: declare x86_64_win64cc void @f.cc79() +declare x86_64_win64cc void @f.x86_64_win64cc() +; CHECK: declare x86_64_win64cc void @f.x86_64_win64cc() +declare cc80 void @f.cc80() +; CHECK: declare x86_vectorcallcc void @f.cc80() +declare x86_vectorcallcc void @f.x86_vectorcallcc() +; CHECK: declare x86_vectorcallcc void @f.x86_vectorcallcc() +declare cc81 void @f.cc81() +; CHECK: declare hhvmcc void @f.cc81() +declare hhvmcc void @f.hhvmcc() +; CHECK: declare hhvmcc void @f.hhvmcc() +declare cc82 void @f.cc82() +; CHECK: declare hhvm_ccc void @f.cc82() +declare hhvm_ccc void @f.hhvm_ccc() +; CHECK: declare hhvm_ccc void @f.hhvm_ccc() +declare cc83 void @f.cc83() +; CHECK: declare x86_intrcc void @f.cc83() +declare x86_intrcc void @f.x86_intrcc() +; CHECK: declare x86_intrcc void @f.x86_intrcc() +declare cc84 void @f.cc84() +; CHECK: declare avr_intrcc void @f.cc84() +declare avr_intrcc void @f.avr_intrcc() +; CHECK: declare avr_intrcc void @f.avr_intrcc() +declare cc85 void @f.cc85() +; CHECK: declare avr_signalcc void @f.cc85() +declare avr_signalcc void @f.avr_signalcc() +; CHECK: declare avr_signalcc void @f.avr_signalcc() +declare cc87 void @f.cc87() +; CHECK: declare amdgpu_vs void @f.cc87() +declare amdgpu_vs void @f.amdgpu_vs() +; CHECK: declare amdgpu_vs void @f.amdgpu_vs() +declare cc88 void @f.cc88() +; CHECK: declare amdgpu_gs void @f.cc88() +declare amdgpu_gs void @f.amdgpu_gs() +; CHECK: declare amdgpu_gs void @f.amdgpu_gs() +declare cc89 void @f.cc89() +; CHECK: declare amdgpu_ps void @f.cc89() +declare amdgpu_ps void @f.amdgpu_ps() +; CHECK: declare amdgpu_ps void @f.amdgpu_ps() +declare cc90 void @f.cc90() +; CHECK: declare amdgpu_cs void @f.cc90() +declare amdgpu_cs void @f.amdgpu_cs() +; CHECK: declare amdgpu_cs void @f.amdgpu_cs() +declare cc91 void @f.cc91() +; CHECK: declare amdgpu_kernel void @f.cc91() +declare amdgpu_kernel void @f.amdgpu_kernel() +; CHECK: declare amdgpu_kernel void @f.amdgpu_kernel() +declare cc1023 void @f.cc1023() +; CHECK: declare cc1023 void @f.cc1023() + +; Functions -- ret attrs (Return attributes) +declare zeroext i64 @f.zeroext() +; CHECK: declare zeroext i64 @f.zeroext() +declare signext i64 @f.signext() +; CHECK: declare signext i64 @f.signext() +declare inreg i32* @f.inreg() +; CHECK: declare inreg i32* @f.inreg() +declare noalias i32* @f.noalias() +; CHECK: declare noalias i32* @f.noalias() +declare nonnull i32* @f.nonnull() +; CHECK: declare nonnull i32* @f.nonnull() +declare dereferenceable(4) i32* @f.dereferenceable4() +; CHECK: declare dereferenceable(4) i32* @f.dereferenceable4() +declare dereferenceable(8) i32* @f.dereferenceable8() +; CHECK: declare dereferenceable(8) i32* @f.dereferenceable8() +declare dereferenceable(16) i32* @f.dereferenceable16() +; CHECK: declare dereferenceable(16) i32* @f.dereferenceable16() +declare dereferenceable_or_null(4) i32* @f.dereferenceable4_or_null() +; CHECK: declare dereferenceable_or_null(4) i32* @f.dereferenceable4_or_null() +declare dereferenceable_or_null(8) i32* @f.dereferenceable8_or_null() +; CHECK: declare dereferenceable_or_null(8) i32* @f.dereferenceable8_or_null() +declare dereferenceable_or_null(16) i32* @f.dereferenceable16_or_null() +; CHECK: declare dereferenceable_or_null(16) i32* @f.dereferenceable16_or_null() + +; Functions -- Parameter attributes +declare void @f.param.zeroext(i8 zeroext) +; CHECK: declare void @f.param.zeroext(i8 zeroext) +declare void @f.param.signext(i8 signext) +; CHECK: declare void @f.param.signext(i8 signext) +declare void @f.param.inreg(i8 inreg) +; CHECK: declare void @f.param.inreg(i8 inreg) +declare void @f.param.byval({ i8, i8 }* byval) +; CHECK: declare void @f.param.byval({ i8, i8 }* byval) +declare void @f.param.inalloca(i8* inalloca) +; CHECK: declare void @f.param.inalloca(i8* inalloca) +declare void @f.param.sret(i8* sret) +; CHECK: declare void @f.param.sret(i8* sret) +declare void @f.param.noalias(i8* noalias) +; CHECK: declare void @f.param.noalias(i8* noalias) +declare void @f.param.nocapture(i8* nocapture) +; CHECK: declare void @f.param.nocapture(i8* nocapture) +declare void @f.param.nest(i8* nest) +; CHECK: declare void @f.param.nest(i8* nest) +declare i8* @f.param.returned(i8* returned) +; CHECK: declare i8* @f.param.returned(i8* returned) +declare void @f.param.nonnull(i8* nonnull) +; CHECK: declare void @f.param.nonnull(i8* nonnull) +declare void @f.param.dereferenceable(i8* dereferenceable(4)) +; CHECK: declare void @f.param.dereferenceable(i8* dereferenceable(4)) +declare void @f.param.dereferenceable_or_null(i8* dereferenceable_or_null(4)) +; CHECK: declare void @f.param.dereferenceable_or_null(i8* dereferenceable_or_null(4)) + +; Functions -- unnamed_addr and local_unnamed_addr +declare void @f.unnamed_addr() unnamed_addr +; CHECK: declare void @f.unnamed_addr() unnamed_addr +declare void @f.local_unnamed_addr() local_unnamed_addr +; CHECK: declare void @f.local_unnamed_addr() local_unnamed_addr + +; Functions -- fn Attrs (Function attributes) +declare void @f.alignstack4() alignstack(4) +; CHECK: declare void @f.alignstack4() #0 +declare void @f.alignstack8() alignstack(8) +; CHECK: declare void @f.alignstack8() #1 +declare void @f.alwaysinline() alwaysinline +; CHECK: declare void @f.alwaysinline() #2 +declare void @f.cold() cold +; CHECK: declare void @f.cold() #3 +declare void @f.convergent() convergent +; CHECK: declare void @f.convergent() #4 +declare void @f.inlinehint() inlinehint +; CHECK: declare void @f.inlinehint() #5 +declare void @f.jumptable() unnamed_addr jumptable +; CHECK: declare void @f.jumptable() unnamed_addr #6 +declare void @f.minsize() minsize +; CHECK: declare void @f.minsize() #7 +declare void @f.naked() naked +; CHECK: declare void @f.naked() #8 +declare void @f.nobuiltin() nobuiltin +; CHECK: declare void @f.nobuiltin() #9 +declare void @f.noduplicate() noduplicate +; CHECK: declare void @f.noduplicate() #10 +declare void @f.noimplicitfloat() noimplicitfloat +; CHECK: declare void @f.noimplicitfloat() #11 +declare void @f.noinline() noinline +; CHECK: declare void @f.noinline() #12 +declare void @f.nonlazybind() nonlazybind +; CHECK: declare void @f.nonlazybind() #13 +declare void @f.noredzone() noredzone +; CHECK: declare void @f.noredzone() #14 +declare void @f.noreturn() noreturn +; CHECK: declare void @f.noreturn() #15 +declare void @f.nounwind() nounwind +; CHECK: declare void @f.nounwind() #16 +declare void @f.optnone() noinline optnone +; CHECK: declare void @f.optnone() #17 +declare void @f.optsize() optsize +; CHECK: declare void @f.optsize() #18 +declare void @f.readnone() readnone +; CHECK: declare void @f.readnone() #19 +declare void @f.readonly() readonly +; CHECK: declare void @f.readonly() #20 +declare void @f.returns_twice() returns_twice +; CHECK: declare void @f.returns_twice() #21 +declare void @f.safestack() safestack +; CHECK: declare void @f.safestack() #22 +declare void @f.sanitize_address() sanitize_address +; CHECK: declare void @f.sanitize_address() #23 +declare void @f.sanitize_memory() sanitize_memory +; CHECK: declare void @f.sanitize_memory() #24 +declare void @f.sanitize_thread() sanitize_thread +; CHECK: declare void @f.sanitize_thread() #25 +declare void @f.ssp() ssp +; CHECK: declare void @f.ssp() #26 +declare void @f.sspreq() sspreq +; CHECK: declare void @f.sspreq() #27 +declare void @f.sspstrong() sspstrong +; CHECK: declare void @f.sspstrong() #28 +declare void @f.thunk() "thunk" +; CHECK: declare void @f.thunk() #29 +declare void @f.uwtable() uwtable +; CHECK: declare void @f.uwtable() #30 +declare void @f.kvpair() "cpu"="cortex-a8" +; CHECK:declare void @f.kvpair() #31 +declare void @f.norecurse() norecurse +; CHECK: declare void @f.norecurse() #32 +declare void @f.inaccessiblememonly() inaccessiblememonly +; CHECK: declare void @f.inaccessiblememonly() #33 +declare void @f.inaccessiblemem_or_argmemonly() inaccessiblemem_or_argmemonly +; CHECK: declare void @f.inaccessiblemem_or_argmemonly() #34 + +; Functions -- section +declare void @f.section() section "80" +; CHECK: declare void @f.section() section "80" + +; Functions -- comdat +define void @f.comdat_any() comdat($comdat.any) { +; CHECK: define void @f.comdat_any() comdat($comdat.any) +entry: + ret void +} +define void @f.comdat_exactmatch() comdat($comdat.exactmatch) { +; CHECK: define void @f.comdat_exactmatch() comdat($comdat.exactmatch) +entry: + ret void +} +define void @f.comdat_largest() comdat($comdat.largest) { +; CHECK: define void @f.comdat_largest() comdat($comdat.largest) +entry: + ret void +} +define void @f.comdat_noduplicates() comdat($comdat.noduplicates) { +; CHECK: define void @f.comdat_noduplicates() comdat($comdat.noduplicates) +entry: + ret void +} +define void @f.comdat_samesize() comdat($comdat.samesize) { +; CHECK: define void @f.comdat_samesize() comdat($comdat.samesize) +entry: + ret void +} + +; Functions -- align +declare void @f.align2() align 2 +; CHECK: declare void @f.align2() align 2 +declare void @f.align4() align 4 +; CHECK: declare void @f.align4() align 4 +declare void @f.align8() align 8 +; CHECK: declare void @f.align8() align 8 + +; Functions -- GC +declare void @f.gcshadow() gc "shadow-stack" +; CHECK: declare void @f.gcshadow() gc "shadow-stack" + +; Functions -- Prefix data +declare void @f.prefixi32() prefix i32 1684365668 +; CHECK: declare void @f.prefixi32() prefix i32 1684365668 +declare void @f.prefixarray() prefix [4 x i32] [i32 0, i32 1, i32 2, i32 3] +; CHECK: declare void @f.prefixarray() prefix [4 x i32] [i32 0, i32 1, i32 2, i32 3] + +; Functions -- Prologue data +declare void @f.prologuei32() prologue i32 1684365669 +; CHECK: declare void @f.prologuei32() prologue i32 1684365669 +declare void @f.prologuearray() prologue [4 x i32] [i32 0, i32 1, i32 2, i32 3] +; CHECK: declare void @f.prologuearray() prologue [4 x i32] [i32 0, i32 1, i32 2, i32 3] + +; Functions -- Personality constant +declare void @llvm.donothing() nounwind readnone +; CHECK: declare void @llvm.donothing() #35 +define void @f.no_personality() personality i8 3 { +; CHECK: define void @f.no_personality() personality i8 3 + invoke void @llvm.donothing() to label %normal unwind label %exception +exception: + %cleanup = landingpad i8 cleanup + br label %normal +normal: + ret void +} + +declare i32 @f.personality_handler() +; CHECK: declare i32 @f.personality_handler() +define void @f.personality() personality i32 ()* @f.personality_handler { +; CHECK: define void @f.personality() personality i32 ()* @f.personality_handler + invoke void @llvm.donothing() to label %normal unwind label %exception +exception: + %cleanup = landingpad i32 cleanup + br label %normal +normal: + ret void +} + +;; Atomic Memory Ordering Constraints +define void @atomics(i32* %word) { + %cmpxchg.0 = cmpxchg i32* %word, i32 0, i32 4 monotonic monotonic + ; CHECK: %cmpxchg.0 = cmpxchg i32* %word, i32 0, i32 4 monotonic monotonic + %cmpxchg.1 = cmpxchg i32* %word, i32 0, i32 5 acq_rel monotonic + ; CHECK: %cmpxchg.1 = cmpxchg i32* %word, i32 0, i32 5 acq_rel monotonic + %cmpxchg.2 = cmpxchg i32* %word, i32 0, i32 6 acquire monotonic + ; CHECK: %cmpxchg.2 = cmpxchg i32* %word, i32 0, i32 6 acquire monotonic + %cmpxchg.3 = cmpxchg i32* %word, i32 0, i32 7 release monotonic + ; CHECK: %cmpxchg.3 = cmpxchg i32* %word, i32 0, i32 7 release monotonic + %cmpxchg.4 = cmpxchg i32* %word, i32 0, i32 8 seq_cst monotonic + ; CHECK: %cmpxchg.4 = cmpxchg i32* %word, i32 0, i32 8 seq_cst monotonic + %cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic + ; CHECK: %cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic + %cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic + ; CHECK: %cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic + %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 singlethread seq_cst monotonic + ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 singlethread seq_cst monotonic + %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic + ; CHECK: %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic + %atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic + ; CHECK: %atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic + %atomicrmw.sub = atomicrmw sub i32* %word, i32 14 monotonic + ; CHECK: %atomicrmw.sub = atomicrmw sub i32* %word, i32 14 monotonic + %atomicrmw.and = atomicrmw and i32* %word, i32 15 monotonic + ; CHECK: %atomicrmw.and = atomicrmw and i32* %word, i32 15 monotonic + %atomicrmw.nand = atomicrmw nand i32* %word, i32 16 monotonic + ; CHECK: %atomicrmw.nand = atomicrmw nand i32* %word, i32 16 monotonic + %atomicrmw.or = atomicrmw or i32* %word, i32 17 monotonic + ; CHECK: %atomicrmw.or = atomicrmw or i32* %word, i32 17 monotonic + %atomicrmw.xor = atomicrmw xor i32* %word, i32 18 monotonic + ; CHECK: %atomicrmw.xor = atomicrmw xor i32* %word, i32 18 monotonic + %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic + ; CHECK: %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic + %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic + ; CHECK: %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic + %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 singlethread monotonic + ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 singlethread monotonic + %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 singlethread monotonic + ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 singlethread monotonic + fence acquire + ; CHECK: fence acquire + fence release + ; CHECK: fence release + fence acq_rel + ; CHECK: fence acq_rel + fence singlethread seq_cst + ; CHECK: fence singlethread seq_cst + + %ld.1 = load atomic i32, i32* %word monotonic, align 4 + ; CHECK: %ld.1 = load atomic i32, i32* %word monotonic, align 4 + %ld.2 = load atomic volatile i32, i32* %word acquire, align 8 + ; CHECK: %ld.2 = load atomic volatile i32, i32* %word acquire, align 8 + %ld.3 = load atomic volatile i32, i32* %word singlethread seq_cst, align 16 + ; CHECK: %ld.3 = load atomic volatile i32, i32* %word singlethread seq_cst, align 16 + + store atomic i32 23, i32* %word monotonic, align 4 + ; CHECK: store atomic i32 23, i32* %word monotonic, align 4 + store atomic volatile i32 24, i32* %word monotonic, align 4 + ; CHECK: store atomic volatile i32 24, i32* %word monotonic, align 4 + store atomic volatile i32 25, i32* %word singlethread monotonic, align 4 + ; CHECK: store atomic volatile i32 25, i32* %word singlethread monotonic, align 4 + ret void +} + +;; Fast Math Flags +define void @fastmathflags(float %op1, float %op2) { + %f.nnan = fadd nnan float %op1, %op2 + ; CHECK: %f.nnan = fadd nnan float %op1, %op2 + %f.ninf = fadd ninf float %op1, %op2 + ; CHECK: %f.ninf = fadd ninf float %op1, %op2 + %f.nsz = fadd nsz float %op1, %op2 + ; CHECK: %f.nsz = fadd nsz float %op1, %op2 + %f.arcp = fadd arcp float %op1, %op2 + ; CHECK: %f.arcp = fadd arcp float %op1, %op2 + %f.fast = fadd fast float %op1, %op2 + ; CHECK: %f.fast = fadd fast float %op1, %op2 + ret void +} + +; Check various fast math flags and floating-point types on calls. + +declare float @fmf1() +declare double @fmf2() +declare <4 x double> @fmf3() + +; CHECK-LABEL: fastMathFlagsForCalls( +define void @fastMathFlagsForCalls(float %f, double %d1, <4 x double> %d2) { + %call.fast = call fast float @fmf1() + ; CHECK: %call.fast = call fast float @fmf1() + + ; Throw in some other attributes to make sure those stay in the right places. + + %call.nsz.arcp = notail call nsz arcp double @fmf2() + ; CHECK: %call.nsz.arcp = notail call nsz arcp double @fmf2() + + %call.nnan.ninf = tail call nnan ninf fastcc <4 x double> @fmf3() + ; CHECK: %call.nnan.ninf = tail call nnan ninf fastcc <4 x double> @fmf3() + + ret void +} + +;; Type System +%opaquety = type opaque +define void @typesystem() { + %p0 = bitcast i8* null to i32 (i32)* + ; CHECK: %p0 = bitcast i8* null to i32 (i32)* + %p1 = bitcast i8* null to void (i8*)* + ; CHECK: %p1 = bitcast i8* null to void (i8*)* + %p2 = bitcast i8* null to i32 (i8*, ...)* + ; CHECK: %p2 = bitcast i8* null to i32 (i8*, ...)* + %p3 = bitcast i8* null to { i32, i8 } (i8*, ...)* + ; CHECK: %p3 = bitcast i8* null to { i32, i8 } (i8*, ...)* + %p4 = bitcast i8* null to <{ i32, i8 }> (i8*, ...)* + ; CHECK: %p4 = bitcast i8* null to <{ i32, i8 }> (i8*, ...)* + %p5 = bitcast i8* null to <{ i32, i8 }> (<{ i8*, i64 }>*, ...)* + ; CHECK: %p5 = bitcast i8* null to <{ i32, i8 }> (<{ i8*, i64 }>*, ...)* + + %t0 = alloca i1942652 + ; CHECK: %t0 = alloca i1942652 + %t1 = alloca half + ; CHECK: %t1 = alloca half + %t2 = alloca float + ; CHECK: %t2 = alloca float + %t3 = alloca double + ; CHECK: %t3 = alloca double + %t4 = alloca fp128 + ; CHECK: %t4 = alloca fp128 + %t5 = alloca x86_fp80 + ; CHECK: %t5 = alloca x86_fp80 + %t6 = alloca ppc_fp128 + ; CHECK: %t6 = alloca ppc_fp128 + %t7 = alloca x86_mmx + ; CHECK: %t7 = alloca x86_mmx + %t8 = alloca %opaquety* + ; CHECK: %t8 = alloca %opaquety* + + ret void +} + +declare void @llvm.token(token) +; CHECK: declare void @llvm.token(token) + +;; Inline Assembler Expressions +define void @inlineasm(i32 %arg) { + call i32 asm "bswap $0", "=r,r"(i32 %arg) + ; CHECK: call i32 asm "bswap $0", "=r,r"(i32 %arg) + call i32 asm sideeffect "blt $1, $2, $3", "=r,r,rm"(i32 %arg, i32 %arg) + ; CHECK: call i32 asm sideeffect "blt $1, $2, $3", "=r,r,rm"(i32 %arg, i32 %arg) + ret void +} + +;; Instructions + +; Instructions -- Terminators +define void @instructions.terminators(i8 %val) personality i32 -10 { + br i1 false, label %iftrue, label %iffalse + ; CHECK: br i1 false, label %iftrue, label %iffalse + br label %iftrue + ; CHECK: br label %iftrue +iftrue: + ret void + ; CHECK: ret void +iffalse: + + switch i8 %val, label %defaultdest [ + ; CHECK: switch i8 %val, label %defaultdest [ + i8 0, label %defaultdest.0 + ; CHECK: i8 0, label %defaultdest.0 + i8 1, label %defaultdest.1 + ; CHECK: i8 1, label %defaultdest.1 + i8 2, label %defaultdest.2 + ; CHECK: i8 2, label %defaultdest.2 + ] + ; CHECK: ] +defaultdest: + ret void +defaultdest.0: + ret void +defaultdest.1: + ret void +defaultdest.2: + + indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2] + ; CHECK: indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2] + indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2] + ; CHECK: indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2] + + invoke fastcc void @f.fastcc() + ; CHECK: invoke fastcc void @f.fastcc() + to label %defaultdest unwind label %exc + ; CHECK: to label %defaultdest unwind label %exc +exc: + %cleanup = landingpad i32 cleanup + + resume i32 undef + ; CHECK: resume i32 undef + unreachable + ; CHECK: unreachable + + ret void +} + +define i32 @instructions.win_eh.1() personality i32 -3 { +entry: + %arg1 = alloca i32 + %arg2 = alloca i32 + invoke void @f.ccc() to label %normal unwind label %catchswitch1 + invoke void @f.ccc() to label %normal unwind label %catchswitch2 + invoke void @f.ccc() to label %normal unwind label %catchswitch3 + +catchswitch1: + %cs1 = catchswitch within none [label %catchpad1] unwind to caller + +catchpad1: + catchpad within %cs1 [] + br label %normal + ; CHECK: catchpad within %cs1 [] + ; CHECK-NEXT: br label %normal + +catchswitch2: + %cs2 = catchswitch within none [label %catchpad2] unwind to caller + +catchpad2: + catchpad within %cs2 [i32* %arg1] + br label %normal + ; CHECK: catchpad within %cs2 [i32* %arg1] + ; CHECK-NEXT: br label %normal + +catchswitch3: + %cs3 = catchswitch within none [label %catchpad3] unwind label %cleanuppad1 + +catchpad3: + catchpad within %cs3 [i32* %arg1, i32* %arg2] + br label %normal + ; CHECK: catchpad within %cs3 [i32* %arg1, i32* %arg2] + ; CHECK-NEXT: br label %normal + +cleanuppad1: + %clean.1 = cleanuppad within none [] + unreachable + ; CHECK: %clean.1 = cleanuppad within none [] + ; CHECK-NEXT: unreachable + +normal: + ret i32 0 +} +; +define i32 @instructions.win_eh.2() personality i32 -4 { +entry: + invoke void @f.ccc() to label %invoke.cont unwind label %catchswitch + +invoke.cont: + invoke void @f.ccc() to label %continue unwind label %cleanup + +cleanup: + %clean = cleanuppad within none [] + ; CHECK: %clean = cleanuppad within none [] + cleanupret from %clean unwind to caller + ; CHECK: cleanupret from %clean unwind to caller + +catchswitch: + %cs = catchswitch within none [label %catchpad] unwind label %terminate + +catchpad: + %catch = catchpad within %cs [] + br label %body + ; CHECK: %catch = catchpad within %cs [] + ; CHECK-NEXT: br label %body + +body: + invoke void @f.ccc() [ "funclet"(token %catch) ] + to label %continue unwind label %terminate.inner + catchret from %catch to label %return + ; CHECK: catchret from %catch to label %return + +return: + ret i32 0 + +terminate.inner: + cleanuppad within %catch [] + unreachable + ; CHECK: cleanuppad within %catch [] + ; CHECK-NEXT: unreachable + +terminate: + cleanuppad within none [] + unreachable + ; CHECK: cleanuppad within none [] + ; CHECK-NEXT: unreachable + +continue: + ret i32 0 +} + +; Instructions -- Binary Operations +define void @instructions.binops(i8 %op1, i8 %op2) { + ; nuw x nsw + add i8 %op1, %op2 + ; CHECK: add i8 %op1, %op2 + add nuw i8 %op1, %op2 + ; CHECK: add nuw i8 %op1, %op2 + add nsw i8 %op1, %op2 + ; CHECK: add nsw i8 %op1, %op2 + add nuw nsw i8 %op1, %op2 + ; CHECK: add nuw nsw i8 %op1, %op2 + sub i8 %op1, %op2 + ; CHECK: sub i8 %op1, %op2 + sub nuw i8 %op1, %op2 + ; CHECK: sub nuw i8 %op1, %op2 + sub nsw i8 %op1, %op2 + ; CHECK: sub nsw i8 %op1, %op2 + sub nuw nsw i8 %op1, %op2 + ; CHECK: sub nuw nsw i8 %op1, %op2 + mul i8 %op1, %op2 + ; CHECK: mul i8 %op1, %op2 + mul nuw i8 %op1, %op2 + ; CHECK: mul nuw i8 %op1, %op2 + mul nsw i8 %op1, %op2 + ; CHECK: mul nsw i8 %op1, %op2 + mul nuw nsw i8 %op1, %op2 + ; CHECK: mul nuw nsw i8 %op1, %op2 + + ; exact + udiv i8 %op1, %op2 + ; CHECK: udiv i8 %op1, %op2 + udiv exact i8 %op1, %op2 + ; CHECK: udiv exact i8 %op1, %op2 + sdiv i8 %op1, %op2 + ; CHECK: sdiv i8 %op1, %op2 + sdiv exact i8 %op1, %op2 + ; CHECK: sdiv exact i8 %op1, %op2 + + ; none + urem i8 %op1, %op2 + ; CHECK: urem i8 %op1, %op2 + srem i8 %op1, %op2 + ; CHECK: srem i8 %op1, %op2 + + ret void +} + +; Instructions -- Bitwise Binary Operations +define void @instructions.bitwise_binops(i8 %op1, i8 %op2) { + ; nuw x nsw + shl i8 %op1, %op2 + ; CHECK: shl i8 %op1, %op2 + shl nuw i8 %op1, %op2 + ; CHECK: shl nuw i8 %op1, %op2 + shl nsw i8 %op1, %op2 + ; CHECK: shl nsw i8 %op1, %op2 + shl nuw nsw i8 %op1, %op2 + ; CHECK: shl nuw nsw i8 %op1, %op2 + + ; exact + lshr i8 %op1, %op2 + ; CHECK: lshr i8 %op1, %op2 + lshr exact i8 %op1, %op2 + ; CHECK: lshr exact i8 %op1, %op2 + ashr i8 %op1, %op2 + ; CHECK: ashr i8 %op1, %op2 + ashr exact i8 %op1, %op2 + ; CHECK: ashr exact i8 %op1, %op2 + + ; none + and i8 %op1, %op2 + ; CHECK: and i8 %op1, %op2 + or i8 %op1, %op2 + ; CHECK: or i8 %op1, %op2 + xor i8 %op1, %op2 + ; CHECK: xor i8 %op1, %op2 + + ret void +} + +; Instructions -- Vector Operations +define void @instructions.vectorops(<4 x float> %vec, <4 x float> %vec2) { + extractelement <4 x float> %vec, i8 0 + ; CHECK: extractelement <4 x float> %vec, i8 0 + insertelement <4 x float> %vec, float 3.500000e+00, i8 0 + ; CHECK: insertelement <4 x float> %vec, float 3.500000e+00, i8 0 + shufflevector <4 x float> %vec, <4 x float> %vec2, <2 x i32> zeroinitializer + ; CHECK: shufflevector <4 x float> %vec, <4 x float> %vec2, <2 x i32> zeroinitializer + + ret void +} + +; Instructions -- Aggregate Operations +define void @instructions.aggregateops({ i8, i32 } %up, <{ i8, i32 }> %p, + [3 x i8] %arr, { i8, { i32 }} %n, + <2 x i8*> %pvec, <2 x i64> %offsets) { + extractvalue { i8, i32 } %up, 0 + ; CHECK: extractvalue { i8, i32 } %up, 0 + extractvalue <{ i8, i32 }> %p, 1 + ; CHECK: extractvalue <{ i8, i32 }> %p, 1 + extractvalue [3 x i8] %arr, 2 + ; CHECK: extractvalue [3 x i8] %arr, 2 + extractvalue { i8, { i32 } } %n, 1, 0 + ; CHECK: extractvalue { i8, { i32 } } %n, 1, 0 + + insertvalue { i8, i32 } %up, i8 1, 0 + ; CHECK: insertvalue { i8, i32 } %up, i8 1, 0 + insertvalue <{ i8, i32 }> %p, i32 2, 1 + ; CHECK: insertvalue <{ i8, i32 }> %p, i32 2, 1 + insertvalue [3 x i8] %arr, i8 0, 0 + ; CHECK: insertvalue [3 x i8] %arr, i8 0, 0 + insertvalue { i8, { i32 } } %n, i32 0, 1, 0 + ; CHECK: insertvalue { i8, { i32 } } %n, i32 0, 1, 0 + + %up.ptr = alloca { i8, i32 } + %p.ptr = alloca <{ i8, i32 }> + %arr.ptr = alloca [3 x i8] + %n.ptr = alloca { i8, { i32 } } + + getelementptr { i8, i32 }, { i8, i32 }* %up.ptr, i8 0 + ; CHECK: getelementptr { i8, i32 }, { i8, i32 }* %up.ptr, i8 0 + getelementptr <{ i8, i32 }>, <{ i8, i32 }>* %p.ptr, i8 1 + ; CHECK: getelementptr <{ i8, i32 }>, <{ i8, i32 }>* %p.ptr, i8 1 + getelementptr [3 x i8], [3 x i8]* %arr.ptr, i8 2 + ; CHECK: getelementptr [3 x i8], [3 x i8]* %arr.ptr, i8 2 + getelementptr { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 0, i32 1 + ; CHECK: getelementptr { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 0, i32 1 + getelementptr inbounds { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 1, i32 0 + ; CHECK: getelementptr inbounds { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 1, i32 0 + getelementptr i8, <2 x i8*> %pvec, <2 x i64> %offsets + ; CHECK: getelementptr i8, <2 x i8*> %pvec, <2 x i64> %offsets + + ret void +} + +; Instructions -- Memory Access and Addressing Operations +!7 = !{i32 1} +!8 = !{} +!9 = !{i64 4} +define void @instructions.memops(i32** %base) { + alloca i32, i8 4, align 4 + ; CHECK: alloca i32, i8 4, align 4 + alloca inalloca i32, i8 4, align 4 + ; CHECK: alloca inalloca i32, i8 4, align 4 + + load i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9 + ; CHECK: load i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9 + load volatile i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9 + ; CHECK: load volatile i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9 + + store i32* null, i32** %base, align 4, !nontemporal !8 + ; CHECK: store i32* null, i32** %base, align 4, !nontemporal !8 + store volatile i32* null, i32** %base, align 4, !nontemporal !8 + ; CHECK: store volatile i32* null, i32** %base, align 4, !nontemporal !8 + + ret void +} + +; Instructions -- Conversion Operations +define void @instructions.conversions() { + trunc i32 -1 to i1 + ; CHECK: trunc i32 -1 to i1 + zext i32 -1 to i64 + ; CHECK: zext i32 -1 to i64 + sext i32 -1 to i64 + ; CHECK: sext i32 -1 to i64 + fptrunc float undef to half + ; CHECK: fptrunc float undef to half + fpext half undef to float + ; CHECK: fpext half undef to float + fptoui float undef to i32 + ; CHECK: fptoui float undef to i32 + fptosi float undef to i32 + ; CHECK: fptosi float undef to i32 + uitofp i32 1 to float + ; CHECK: uitofp i32 1 to float + sitofp i32 -1 to float + ; CHECK: sitofp i32 -1 to float + ptrtoint i8* null to i64 + ; CHECK: ptrtoint i8* null to i64 + inttoptr i64 0 to i8* + ; CHECK: inttoptr i64 0 to i8* + bitcast i32 0 to i32 + ; CHECK: bitcast i32 0 to i32 + addrspacecast i32* null to i32 addrspace(1)* + ; CHECK: addrspacecast i32* null to i32 addrspace(1)* + + ret void +} + +; Instructions -- Other Operations +define void @instructions.other(i32 %op1, i32 %op2, half %fop1, half %fop2) { +entry: + icmp eq i32 %op1, %op2 + ; CHECK: icmp eq i32 %op1, %op2 + icmp ne i32 %op1, %op2 + ; CHECK: icmp ne i32 %op1, %op2 + icmp ugt i32 %op1, %op2 + ; CHECK: icmp ugt i32 %op1, %op2 + icmp uge i32 %op1, %op2 + ; CHECK: icmp uge i32 %op1, %op2 + icmp ult i32 %op1, %op2 + ; CHECK: icmp ult i32 %op1, %op2 + icmp ule i32 %op1, %op2 + ; CHECK: icmp ule i32 %op1, %op2 + icmp sgt i32 %op1, %op2 + ; CHECK: icmp sgt i32 %op1, %op2 + icmp sge i32 %op1, %op2 + ; CHECK: icmp sge i32 %op1, %op2 + icmp slt i32 %op1, %op2 + ; CHECK: icmp slt i32 %op1, %op2 + icmp sle i32 %op1, %op2 + ; CHECK: icmp sle i32 %op1, %op2 + + fcmp false half %fop1, %fop2 + ; CHECK: fcmp false half %fop1, %fop2 + fcmp oeq half %fop1, %fop2 + ; CHECK: fcmp oeq half %fop1, %fop2 + fcmp ogt half %fop1, %fop2 + ; CHECK: fcmp ogt half %fop1, %fop2 + fcmp oge half %fop1, %fop2 + ; CHECK: fcmp oge half %fop1, %fop2 + fcmp olt half %fop1, %fop2 + ; CHECK: fcmp olt half %fop1, %fop2 + fcmp ole half %fop1, %fop2 + ; CHECK: fcmp ole half %fop1, %fop2 + fcmp one half %fop1, %fop2 + ; CHECK: fcmp one half %fop1, %fop2 + fcmp ord half %fop1, %fop2 + ; CHECK: fcmp ord half %fop1, %fop2 + fcmp ueq half %fop1, %fop2 + ; CHECK: fcmp ueq half %fop1, %fop2 + fcmp ugt half %fop1, %fop2 + ; CHECK: fcmp ugt half %fop1, %fop2 + fcmp uge half %fop1, %fop2 + ; CHECK: fcmp uge half %fop1, %fop2 + fcmp ult half %fop1, %fop2 + ; CHECK: fcmp ult half %fop1, %fop2 + fcmp ule half %fop1, %fop2 + ; CHECK: fcmp ule half %fop1, %fop2 + fcmp une half %fop1, %fop2 + ; CHECK: fcmp une half %fop1, %fop2 + fcmp uno half %fop1, %fop2 + ; CHECK: fcmp uno half %fop1, %fop2 + fcmp true half %fop1, %fop2 + ; CHECK: fcmp true half %fop1, %fop2 + + br label %exit +L1: + %v1 = add i32 %op1, %op2 + br label %exit +L2: + %v2 = add i32 %op1, %op2 + br label %exit +exit: + phi i32 [ %v1, %L1 ], [ %v2, %L2 ], [ %op1, %entry ] + ; CHECK: phi i32 [ %v1, %L1 ], [ %v2, %L2 ], [ %op1, %entry ] + + select i1 true, i32 0, i32 1 + ; CHECK: select i1 true, i32 0, i32 1 + select <2 x i1> , <2 x i8> , <2 x i8> + ; CHECK: select <2 x i1> , <2 x i8> , <2 x i8> + + call void @f.nobuiltin() builtin + ; CHECK: call void @f.nobuiltin() #40 + + call fastcc noalias i32* @f.noalias() noinline + ; CHECK: call fastcc noalias i32* @f.noalias() #12 + tail call ghccc nonnull i32* @f.nonnull() minsize + ; CHECK: tail call ghccc nonnull i32* @f.nonnull() #7 + + ret void +} + +define void @instructions.call_musttail(i8* inalloca %val) { + musttail call void @f.param.inalloca(i8* inalloca %val) + ; CHECK: musttail call void @f.param.inalloca(i8* inalloca %val) + + ret void +} + +define void @instructions.call_notail() { + notail call void @f1() + ; CHECK: notail call void @f1() + + ret void +} + +define void @instructions.landingpad() personality i32 -2 { + invoke void @llvm.donothing() to label %proceed unwind label %catch1 + invoke void @llvm.donothing() to label %proceed unwind label %catch2 + invoke void @llvm.donothing() to label %proceed unwind label %catch3 + invoke void @llvm.donothing() to label %proceed unwind label %catch4 + +catch1: + landingpad i32 + ; CHECK: landingpad i32 + cleanup + ; CHECK: cleanup + br label %proceed + +catch2: + landingpad i32 + ; CHECK: landingpad i32 + cleanup + ; CHECK: cleanup + catch i32* null + ; CHECK: catch i32* null + br label %proceed + +catch3: + landingpad i32 + ; CHECK: landingpad i32 + cleanup + ; CHECK: cleanup + catch i32* null + ; CHECK: catch i32* null + catch i32* null + ; CHECK: catch i32* null + br label %proceed + +catch4: + landingpad i32 + ; CHECK: landingpad i32 + filter [2 x i32] zeroinitializer + ; CHECK: filter [2 x i32] zeroinitializer + br label %proceed + +proceed: + ret void +} + +;; Intrinsic Functions + +; Intrinsic Functions -- Variable Argument Handling +declare void @llvm.va_start(i8*) +declare void @llvm.va_copy(i8*, i8*) +declare void @llvm.va_end(i8*) +define void @instructions.va_arg(i8* %v, ...) { + %ap = alloca i8* + %ap2 = bitcast i8** %ap to i8* + + call void @llvm.va_start(i8* %ap2) + ; CHECK: call void @llvm.va_start(i8* %ap2) + + va_arg i8* %ap2, i32 + ; CHECK: va_arg i8* %ap2, i32 + + call void @llvm.va_copy(i8* %v, i8* %ap2) + ; CHECK: call void @llvm.va_copy(i8* %v, i8* %ap2) + + call void @llvm.va_end(i8* %ap2) + ; CHECK: call void @llvm.va_end(i8* %ap2) + + ret void +} + +; Intrinsic Functions -- Accurate Garbage Collection +declare void @llvm.gcroot(i8**, i8*) +declare i8* @llvm.gcread(i8*, i8**) +declare void @llvm.gcwrite(i8*, i8*, i8**) +define void @intrinsics.gc() gc "shadow-stack" { + %ptrloc = alloca i8* + call void @llvm.gcroot(i8** %ptrloc, i8* null) + ; CHECK: call void @llvm.gcroot(i8** %ptrloc, i8* null) + + call i8* @llvm.gcread(i8* null, i8** %ptrloc) + ; CHECK: call i8* @llvm.gcread(i8* null, i8** %ptrloc) + + %ref = alloca i8 + call void @llvm.gcwrite(i8* %ref, i8* null, i8** %ptrloc) + ; CHECK: call void @llvm.gcwrite(i8* %ref, i8* null, i8** %ptrloc) + + ret void +} + +; Intrinsic Functions -- Code Generation +declare i8* @llvm.returnaddress(i32) +declare i8* @llvm.frameaddress(i32) +declare i32 @llvm.read_register.i32(metadata) +declare i64 @llvm.read_register.i64(metadata) +declare void @llvm.write_register.i32(metadata, i32) +declare void @llvm.write_register.i64(metadata, i64) +declare i8* @llvm.stacksave() +declare void @llvm.stackrestore(i8*) +declare void @llvm.prefetch(i8*, i32, i32, i32) +declare void @llvm.pcmarker(i32) +declare i64 @llvm.readcyclecounter() +declare void @llvm.clear_cache(i8*, i8*) +declare void @llvm.instrprof_increment(i8*, i64, i32, i32) + +!10 = !{!"rax"} +define void @intrinsics.codegen() { + call i8* @llvm.returnaddress(i32 1) + ; CHECK: call i8* @llvm.returnaddress(i32 1) + call i8* @llvm.frameaddress(i32 1) + ; CHECK: call i8* @llvm.frameaddress(i32 1) + + call i32 @llvm.read_register.i32(metadata !10) + ; CHECK: call i32 @llvm.read_register.i32(metadata !10) + call i64 @llvm.read_register.i64(metadata !10) + ; CHECK: call i64 @llvm.read_register.i64(metadata !10) + call void @llvm.write_register.i32(metadata !10, i32 0) + ; CHECK: call void @llvm.write_register.i32(metadata !10, i32 0) + call void @llvm.write_register.i64(metadata !10, i64 0) + ; CHECK: call void @llvm.write_register.i64(metadata !10, i64 0) + + %stack = call i8* @llvm.stacksave() + ; CHECK: %stack = call i8* @llvm.stacksave() + call void @llvm.stackrestore(i8* %stack) + ; CHECK: call void @llvm.stackrestore(i8* %stack) + + call void @llvm.prefetch(i8* %stack, i32 0, i32 3, i32 0) + ; CHECK: call void @llvm.prefetch(i8* %stack, i32 0, i32 3, i32 0) + + call void @llvm.pcmarker(i32 1) + ; CHECK: call void @llvm.pcmarker(i32 1) + + call i64 @llvm.readcyclecounter() + ; CHECK: call i64 @llvm.readcyclecounter() + + call void @llvm.clear_cache(i8* null, i8* null) + ; CHECK: call void @llvm.clear_cache(i8* null, i8* null) + + call void @llvm.instrprof_increment(i8* null, i64 0, i32 0, i32 0) + ; CHECK: call void @llvm.instrprof_increment(i8* null, i64 0, i32 0, i32 0) + + ret void +} + +declare void @llvm.localescape(...) +declare i8* @llvm.localrecover(i8* %func, i8* %fp, i32 %idx) +define void @intrinsics.localescape() { + %static.alloca = alloca i32 + call void (...) @llvm.localescape(i32* %static.alloca) + ; CHECK: call void (...) @llvm.localescape(i32* %static.alloca) + + call void @intrinsics.localrecover() + + ret void +} +define void @intrinsics.localrecover() { + %func = bitcast void ()* @intrinsics.localescape to i8* + %fp = call i8* @llvm.frameaddress(i32 1) + call i8* @llvm.localrecover(i8* %func, i8* %fp, i32 0) + ; CHECK: call i8* @llvm.localrecover(i8* %func, i8* %fp, i32 0) + + ret void +} + +; We need this function to provide `uses' for some metadata tests. +define void @misc.metadata() { + call void @f1(), !srcloc !11 + call void @f1(), !srcloc !12 + call void @f1(), !srcloc !13 + call void @f1(), !srcloc !14 + ret void +} + +declare void @op_bundle_callee_0() +declare void @op_bundle_callee_1(i32,i32) + +define void @call_with_operand_bundle0(i32* %ptr) { +; CHECK-LABEL: call_with_operand_bundle0( + entry: + %l = load i32, i32* %ptr + %x = add i32 42, 1 + call void @op_bundle_callee_0() [ "foo"(i32 42, i64 100, i32 %x), "bar"(float 0.000000e+00, i64 100, i32 %l) ] +; CHECK: call void @op_bundle_callee_0() [ "foo"(i32 42, i64 100, i32 %x), "bar"(float 0.000000e+00, i64 100, i32 %l) ] + ret void +} + +define void @call_with_operand_bundle1(i32* %ptr) { +; CHECK-LABEL: call_with_operand_bundle1( + entry: + %l = load i32, i32* %ptr + %x = add i32 42, 1 + + call void @op_bundle_callee_0() + call void @op_bundle_callee_0() [ "foo"() ] + call void @op_bundle_callee_0() [ "foo"(i32 42, i64 100, i32 %x), "bar"(float 0.000000e+00, i64 100, i32 %l) ] +; CHECK: @op_bundle_callee_0(){{$}} +; CHECK-NEXT: call void @op_bundle_callee_0() [ "foo"() ] +; CHECK-NEXT: call void @op_bundle_callee_0() [ "foo"(i32 42, i64 100, i32 %x), "bar"(float 0.000000e+00, i64 100, i32 %l) ] + ret void +} + +define void @call_with_operand_bundle2(i32* %ptr) { +; CHECK-LABEL: call_with_operand_bundle2( + entry: + call void @op_bundle_callee_0() [ "foo"() ] +; CHECK: call void @op_bundle_callee_0() [ "foo"() ] + ret void +} + +define void @call_with_operand_bundle3(i32* %ptr) { +; CHECK-LABEL: call_with_operand_bundle3( + entry: + %l = load i32, i32* %ptr + %x = add i32 42, 1 + call void @op_bundle_callee_0() [ "foo"(i32 42, i64 100, i32 %x), "foo"(i32 42, float 0.000000e+00, i32 %l) ] +; CHECK: call void @op_bundle_callee_0() [ "foo"(i32 42, i64 100, i32 %x), "foo"(i32 42, float 0.000000e+00, i32 %l) ] + ret void +} + +define void @call_with_operand_bundle4(i32* %ptr) { +; CHECK-LABEL: call_with_operand_bundle4( + entry: + %l = load i32, i32* %ptr + %x = add i32 42, 1 + call void @op_bundle_callee_1(i32 10, i32 %x) [ "foo"(i32 42, i64 100, i32 %x), "foo"(i32 42, float 0.000000e+00, i32 %l) ] +; CHECK: call void @op_bundle_callee_1(i32 10, i32 %x) [ "foo"(i32 42, i64 100, i32 %x), "foo"(i32 42, float 0.000000e+00, i32 %l) ] + ret void +} + +; Invoke versions of the above tests: + + +define void @invoke_with_operand_bundle0(i32* %ptr) personality i8 3 { +; CHECK-LABEL: @invoke_with_operand_bundle0( + entry: + %l = load i32, i32* %ptr + %x = add i32 42, 1 + invoke void @op_bundle_callee_0() [ "foo"(i32 42, i64 100, i32 %x), "bar"(float 0.000000e+00, i64 100, i32 %l) ] to label %normal unwind label %exception +; CHECK: invoke void @op_bundle_callee_0() [ "foo"(i32 42, i64 100, i32 %x), "bar"(float 0.000000e+00, i64 100, i32 %l) ] + +exception: + %cleanup = landingpad i8 cleanup + br label %normal +normal: + ret void +} + +define void @invoke_with_operand_bundle1(i32* %ptr) personality i8 3 { +; CHECK-LABEL: @invoke_with_operand_bundle1( + entry: + %l = load i32, i32* %ptr + %x = add i32 42, 1 + + invoke void @op_bundle_callee_0() to label %normal unwind label %exception +; CHECK: invoke void @op_bundle_callee_0(){{$}} + +exception: + %cleanup = landingpad i8 cleanup + br label %normal + +normal: + invoke void @op_bundle_callee_0() [ "foo"() ] to label %normal1 unwind label %exception1 +; CHECK: invoke void @op_bundle_callee_0() [ "foo"() ] + +exception1: + %cleanup1 = landingpad i8 cleanup + br label %normal1 + +normal1: + invoke void @op_bundle_callee_0() [ "foo"(i32 42, i64 100, i32 %x), "foo"(i32 42, float 0.000000e+00, i32 %l) ] to label %normal2 unwind label %exception2 +; CHECK: invoke void @op_bundle_callee_0() [ "foo"(i32 42, i64 100, i32 %x), "foo"(i32 42, float 0.000000e+00, i32 %l) ] + +exception2: + %cleanup2 = landingpad i8 cleanup + br label %normal2 + +normal2: + ret void +} + +define void @invoke_with_operand_bundle2(i32* %ptr) personality i8 3 { +; CHECK-LABEL: @invoke_with_operand_bundle2( + entry: + invoke void @op_bundle_callee_0() [ "foo"() ] to label %normal unwind label %exception +; CHECK: invoke void @op_bundle_callee_0() [ "foo"() ] + +exception: + %cleanup = landingpad i8 cleanup + br label %normal +normal: + ret void +} + +define void @invoke_with_operand_bundle3(i32* %ptr) personality i8 3 { +; CHECK-LABEL: @invoke_with_operand_bundle3( + entry: + %l = load i32, i32* %ptr + %x = add i32 42, 1 + invoke void @op_bundle_callee_0() [ "foo"(i32 42, i64 100, i32 %x), "foo"(i32 42, float 0.000000e+00, i32 %l) ] to label %normal unwind label %exception +; CHECK: invoke void @op_bundle_callee_0() [ "foo"(i32 42, i64 100, i32 %x), "foo"(i32 42, float 0.000000e+00, i32 %l) ] + +exception: + %cleanup = landingpad i8 cleanup + br label %normal +normal: + ret void +} + +define void @invoke_with_operand_bundle4(i32* %ptr) personality i8 3 { +; CHECK-LABEL: @invoke_with_operand_bundle4( + entry: + %l = load i32, i32* %ptr + %x = add i32 42, 1 + invoke void @op_bundle_callee_1(i32 10, i32 %x) [ "foo"(i32 42, i64 100, i32 %x), "foo"(i32 42, float 0.000000e+00, i32 %l) ] + to label %normal unwind label %exception +; CHECK: invoke void @op_bundle_callee_1(i32 10, i32 %x) [ "foo"(i32 42, i64 100, i32 %x), "foo"(i32 42, float 0.000000e+00, i32 %l) ] + +exception: + %cleanup = landingpad i8 cleanup + br label %normal +normal: + ret void +} + +declare void @vaargs_func(...) +define void @invoke_with_operand_bundle_vaarg(i32* %ptr) personality i8 3 { +; CHECK-LABEL: @invoke_with_operand_bundle_vaarg( + entry: + %l = load i32, i32* %ptr + %x = add i32 42, 1 + invoke void (...) @vaargs_func(i32 10, i32 %x) [ "foo"(i32 42, i64 100, i32 %x), "foo"(i32 42, float 0.000000e+00, i32 %l) ] + to label %normal unwind label %exception +; CHECK: invoke void (...) @vaargs_func(i32 10, i32 %x) [ "foo"(i32 42, i64 100, i32 %x), "foo"(i32 42, float 0.000000e+00, i32 %l) ] + +exception: + %cleanup = landingpad i8 cleanup + br label %normal +normal: + ret void +} + + +declare void @f.writeonly() writeonly +; CHECK: declare void @f.writeonly() #39 + +;; Constant Expressions + +define i8** @constexpr() { + ; CHECK: ret i8** getelementptr inbounds ({ [4 x i8*], [4 x i8*] }, { [4 x i8*], [4 x i8*] }* null, i32 0, inrange i32 1, i32 2) + ret i8** getelementptr inbounds ({ [4 x i8*], [4 x i8*] }, { [4 x i8*], [4 x i8*] }* null, i32 0, inrange i32 1, i32 2) +} + +; CHECK: attributes #0 = { alignstack=4 } +; CHECK: attributes #1 = { alignstack=8 } +; CHECK: attributes #2 = { alwaysinline } +; CHECK: attributes #3 = { cold } +; CHECK: attributes #4 = { convergent } +; CHECK: attributes #5 = { inlinehint } +; CHECK: attributes #6 = { jumptable } +; CHECK: attributes #7 = { minsize } +; CHECK: attributes #8 = { naked } +; CHECK: attributes #9 = { nobuiltin } +; CHECK: attributes #10 = { noduplicate } +; CHECK: attributes #11 = { noimplicitfloat } +; CHECK: attributes #12 = { noinline } +; CHECK: attributes #13 = { nonlazybind } +; CHECK: attributes #14 = { noredzone } +; CHECK: attributes #15 = { noreturn } +; CHECK: attributes #16 = { nounwind } +; CHECK: attributes #17 = { noinline optnone } +; CHECK: attributes #18 = { optsize } +; CHECK: attributes #19 = { readnone } +; CHECK: attributes #20 = { readonly } +; CHECK: attributes #21 = { returns_twice } +; CHECK: attributes #22 = { safestack } +; CHECK: attributes #23 = { sanitize_address } +; CHECK: attributes #24 = { sanitize_memory } +; CHECK: attributes #25 = { sanitize_thread } +; CHECK: attributes #26 = { ssp } +; CHECK: attributes #27 = { sspreq } +; CHECK: attributes #28 = { sspstrong } +; CHECK: attributes #29 = { "thunk" } +; CHECK: attributes #30 = { uwtable } +; CHECK: attributes #31 = { "cpu"="cortex-a8" } +; CHECK: attributes #32 = { norecurse } +; CHECK: attributes #33 = { inaccessiblememonly } +; CHECK: attributes #34 = { inaccessiblemem_or_argmemonly } +; CHECK: attributes #35 = { nounwind readnone } +; CHECK: attributes #36 = { argmemonly nounwind readonly } +; CHECK: attributes #37 = { argmemonly nounwind } +; CHECK: attributes #38 = { nounwind readonly } +; CHECK: attributes #39 = { writeonly } +; CHECK: attributes #40 = { builtin } + +;; Metadata + +; Metadata -- Module flags +!llvm.module.flags = !{!0, !1, !2, !4, !5, !6} +; CHECK: !llvm.module.flags = !{!0, !1, !2, !4, !5, !6} + +!0 = !{i32 1, !"mod1", i32 0} +; CHECK: !0 = !{i32 1, !"mod1", i32 0} +!1 = !{i32 2, !"mod2", i32 0} +; CHECK: !1 = !{i32 2, !"mod2", i32 0} +!2 = !{i32 3, !"mod3", !3} +; CHECK: !2 = !{i32 3, !"mod3", !3} +!3 = !{!"mod6", !0} +; CHECK: !3 = !{!"mod6", !0} +!4 = !{i32 4, !"mod4", i32 0} +; CHECK: !4 = !{i32 4, !"mod4", i32 0} +!5 = !{i32 5, !"mod5", !0} +; CHECK: !5 = !{i32 5, !"mod5", !0} +!6 = !{i32 6, !"mod6", !0} +; CHECK: !6 = !{i32 6, !"mod6", !0} + +; Metadata -- Check `distinct' +!11 = distinct !{} +; CHECK: !11 = distinct !{} +!12 = distinct !{} +; CHECK: !12 = distinct !{} +!13 = !{!11} +; CHECK: !13 = !{!11} +!14 = !{!12} +; CHECK: !14 = !{!12} diff --git a/test/Bitcode/compatibility-4.0.ll.bc b/test/Bitcode/compatibility-4.0.ll.bc new file mode 100644 index 0000000000000000000000000000000000000000..a2988ff95402376140f39be5e9566379398e3d1e GIT binary patch literal 16400 zcmeHueOOahw)Z|chvWbOPDs!o0Z#xeR;mXG8Wi<}1R6DJ_)u}`j0vLER&4wjs~u}k zK7gpGAlAmV(}8NOc4qFNR%)$R6F^H9713J!m<}lQ+Nrhp)uFvs-@VUCkaT+Ix%YkE z_j%qw-u;-9{keW?t-aRT>+Il{-ofDj76ttDP69vz0G6Zoewhh7LSSEslu4dE=jia; z=O;d6z|v6{P)h;08wEhPg9aklmcx(4o>Zf2B_@lh^sKFzOFArsj7=ammT+{gC}(SU z4k1fdXi4SNuylvpSgoM-dG7Z>Y3vgWs*l*t>@?K_(F`GDU z!$`6NlCtRP#4Qzx|M?geb*np~{CiZ>SF7XP-=dm??PP7JG6|G6*l#|9wb#H!3*P+` z>I77r`#V&TfQT`ar59Ck%M!Y&i|_2}WZgbgi{kd%l}=O}3KrrzF{(`hr^W9|1%hu! zxMCloPLLhv&germ42rf$AV`(b9Rfl2vSp8-Aim8fE{PD9?~WkigyrTq_ZVUMo-x!? zVY#i0{!&=ZK_+cNXIB}0P3Q#DG1PLQv$u@C;Rm|HwhKVsM*AK==S%e^62#gh>3uHp zL09BAU=$D9x!ek=sMD4E3Vn#2HuD4vLjK z7Uj^>$lak>rh3nC;-ahTP0Av7EXk>xMmC0GW7Ri@6JNTz-l904i*Mf#4;?C z1cp`D?|AVY@=o{%vWJ)lXvlU|DI0NVTNh`W!s^BiQ_}AwwkoT)$fQzo51Fn@E+Tf| ziBC8Au-MmGu6hHV!A@CFeXgC}9lfZ!Fq9-;P}=D!bZ%jJ^*Orw-4FJ0iLG;Gvt*@& z@sHAUvA89(ie0pz`p(^VM<*%CVjt@zM0t!dE|1!5CG1aEnmon$oE@>hKgY^SJB)u; zq>Ds@2e-@;?{MPF|LADLe6Q=o-CWN}n~h^vmRDzKS^ASi6-j-`vNGcSqQ8)eS1!yJ z$4qOOd1iHebB;=-BF!bSr3A5P9}Hq1LR1LyD3Idd zz~ouFE-qygZ83+O!ViDw;B+zRQO5IO5=G0L^0w)B-%jfKbY&E|ZIp4oiZi~CwXRIy z%uk7o52*jZ=nbBRG!a+4_n=fH65StHMU%~>BhQ+gSE?CPMFZHV5s4<-EX1#WeHjJz z`5rd5^5sR6<)q{lo6}~iT&v6oJ*PdQA78E4wmajt;S*iW+IIbT7e1j1A6I1;Kd<_- zKE740y6KDPRI7TrRVS?pH?5{z)AioSZ`sIhHfm{CSrXgx|?zmb`jW;@;NdPf@x9raM)c7y8%E$V7wB} z0{k&;ktC6?iU0?9x_=DS0U){fD*ApXUl$>=;Fg7eGV$vIV;pQYIAw?-0D~u3#W>oZ zg5LxH-pD+J1qlJr=_7Og0ILX6Q8f`mW2gzoIJDqWU>vUWGlxd40&}R!1i)e4{e&@g zp8yWw0V9CZunHtlXfMw+FR+IA6$Y&JvxvOnh8X*~LAG4Whaq!ioz>4ra%ehVEUiwM z=mZMWvje9C4WPW+h9LY!y5lX*X(ZaT)*$}QS2+oYMJH8;e)PMlIbe_NvFWt2M7J@@&ArvT`52c4fLDDILXmUA$ zF~q0S3=PO%ATv~05TZdH7(;txPYGgGS>8X0w7oM(rCcy*buef_5U4pA)D#RlA_!C) z4ElZ$D2NG?v@RHQW)NsYFlccQC^j+(bWt$qpMyZX!JvnNL1zbn_6CDmfpk2YBSA#(Dxj~>@FzE6iP~E5? z(Dq=^!$F{y!9kMV3IcUL=HH}v@hY-k0R$V6RG%;bhypE69W@kSjDO5qT^Ypz+TXIY zHcMFkT?}57BP_~MS@@_8-6oIfMUDF36YJl^K?6nXsz?(i{A`r2DJqvIRJP8z+ zK80`=f6LN(9l~k+EvDxVgj4xj%DZk61>jpAU!-e{7>U9=Un;V9LAU_8KA`}J0^ksC z1eDGX4y99YpiWo?1I7F>78LngOpiK*2C4p*@@_SR8|-fhZ~@8t!yvyG{xAptY6)Sm zx*raO!FE3p6bD$a#NQIapw{0KqHesu#Yf$KS$Q}wRP+G204oAbmmg0E*C5SLMqq{s zoCpG9tA7whSE#gDe^ZFqOyDdL_<24sSCBEn-xNT11%slO7gPk$hG5W6VT=#y5H!x; z6hLjkpf3gH3ZQ|*M*wXJ28HJxU;L5=j8TyEW`K|&X+XIGXhSgQ%YnH9Xu!+_(4Jt> z3I4f$NrOQFCyXKm254gZzC39me*DEN9HaggCuq;#GV|ve$7eLCOJD~085d+k2QVta ze!&V4D%cM{0NB|ApF#`V3iB_7ZEiMv@XFx_Z-pU$L>AjMP&xjE0JM+PClmz${Ttms zcr>jiKcv3}`DqAvKkSsQnG3x&P%5$;K)_d#_;Uu*obgOxn!wDH7R-^t+5RTeb1n$3 zuV34si|4a{=DG!~PAFNR9FneC{bVi_|43j2e(T?v_oM~&7O*ea>8*Y?U_r2j_TtG* zrpG#Y747f;H7tp~t$iVsJ|C)z%QQ($pmHXq06Cr*b4tP)OOdTJyz=m<=$ z$i5HVYN)>ngK7c?dY_Q>o1)(~df{G7^iPH1aU#G_aN!Hna|x1tA`+PWbUF2wQ2?uc5+csQI6pv^+fbGv{<+@j_meq) z{h!sI6&kc~1V(@-+@g#(P=3B&Fyrkn{l3HZNY~7QtOiO&_6LC~{MIMTeA0qaJpH5v z%_HVX%hI}YfXOifU88Px5Y7812QK7(6MJ{h6URXwHvsXK1+6Qh*?yFNB8%dAihmgD zZCL+IV2MlwvmsqZBI6GrkCWtvLvv`;5f&$$c7qo6qLg_Bg#jm&9?eJMp!xi10w*wH z7J7lo09w&O1Stq-2VAlT(16QwgSRnwBBLez2@aQ_Ehf;C3y>BDmw4f}G6=;2^-m&{ zGB`p(HoVBrU6YTiz`Fp{ZpZ=#H8wnE1#mY3)6+Kq6#_g(Z5SQKGJ8EZs{pzr*JpbZ zj(l)$|Jq0U|1xjI4}HfMUb*tjo8dPcAK$opXX9_)IX|yS^XkLlZ_K;?a$Lgh_m3}L z_2xS-@4FNoM}*am2B6dqav)@Sf+smv1}~vPhd+CQ(2MriWguFK(GD3H2fyq?XlE># z!gfDpPEWyp#dg1MexVhE;?gIEz%*(7J~0jUl>7;LRFm>WpkshfQnzWJm?CtPf~AN% zQFWVz7VIE&1plOnA6^4D=c7eou`dD^l{ai244Zsx+ePxp~D zC5}${s%hqYae4|Ky=TL?J+7xy?1?GPoRsnsdP5G_rv%_dsE-ViQ?+!f7Amj_ZoSsk zT$RH~)~uGS@GO(w08!t<8xt-vLzs73>NPf3SG8W#*z$aH}tFE+JB&$*+tNKD9*_XM>d1U1>CAzO! zzVV=gLKr)t1VY%F2~>m9b|Eqi%z!yXMFqczMJ| zb<>gX(kFvXiNESgXwoHgs8v_(30J$|#Z&y>v4n-nd~Jh%+}qCaZRTXZAePFPS4&oV z@=f=vqmTv?UPw4;Rozs_f9Z=~hb16})$zSnaZ$Nb+txh3K|cX1U<5wKsSVj zgW`YLOQmV>lMX)_@G}N}H1Ly|MCKfV7jkRU9klxt>0m}Bk)n@b$5*h!b4uDQqcf!I zxi*?QBXy>;8PcitHioIlXl|!AR}sTglE#erWXv0lWA~41Odp)l*l^(E{Hhc7#Koc- zXjd?xfN*LjEmnio3w#tfyuT~Ots_BxF++I&oT?PniNIbs51nt}VarqCzDiyxwt4gu z;Py1=v)Y>-H&iF;BW~)D+1*gb3;G%)z0n)_U4w;!X2Bm|R$93JkQ2HObXtl}D zw89Dr!(yca<5&d_m4O27{~!N9xb=>S|8lObVBnh-HdxuSXw%cWtLdsvu~;k)MUv|t9* zMdpPKvhZD{?^&=snt-zwhRqVXmb`BX5!1wcvG{q6qpD(+zmEYq%tFiR?SS-o?04qi z{GhU!K;L=F;t=#*p)BSXolq7_`0?0cBR?Kn97+Q?9$P$x26}N5C5_^`gz3@F(PXn|A08E0P3T+ptZKiWtir3*9 z=deh|+rzl2wC&#+PH5ZxBNLsP#wQJY?upUWWZR0+%I@2Fn=L1NZXL$a#cThqE;>mZ0vug-pJPguQo%|c~y<`sE}Svp;+t0!RBp@Xn% z>35>0qPITbm~(?h)25Gn;bd*uJyFxPw~la3_bk};$vN1y^m|cL3rrUUp3ys4z%P++ zYO=n->1#~XKhSS5;!+?n3~j~ZT5;c*@{rBi27CgC#kOE^@Y%{g+kEFFcfDftuN}P~ z@T#@Y@+PLl0%%hFlA0nA^XP1VdkAWTE$8)>$zxCBV*>JpQZuoV`Mg(hb6K?)UC11e zHc^rTgebCBNu-S|-=q9`Iel$yvs!geR%uh6SAwuaSGq)-{t`oC%<~kvpt;pM3oH`tDi8|M(1Lw z(aS{Q0+TEksA0;EZqdV=y6%lzdOrQu{-*--DwOcBVCIruGAqkV?#lZhu8$o?j1mhUQ7a7jW}8thuhNU^l#G~rFP!@9zr zwwxh!X;bT@@zk?ia+1VAt4_e76?8JUJKlxPUi6waXfz+o*S(5Y0aKHDPHVF2K@1~e zLMuzvwYJWey`(o;wYA*TwCoR#b6Odm`g}F&iDEh{{gtcF`YRW9k77ZM^|`|UGvvJ* zx{+5)6my|*6hjug3PR4AWDCC&IqAP%gdJOvg)5qJz@|TkE1-y5fg=$&Qyw70Y)GGm587{&3KOjdPSJ41)iPh767cd>HxBuCzShpcgr? zQRORPKRqt7au;>s7$W;q|%P~6!*ipt(S+N^n$ky|m^oQ=Ll(dI3C!=rxG~k4KkQ;b+Uyl&vF?`3>5BD%OEfDj{Hz$@Mc5ad9h?CSX(AR+ zf^@zR1KUzo_7AaQ`KQPKT5o@YY#Sk3xi#9;i zp+8=(9A8=Y)bDZ*%@1v2wlgGXj;O@M%@M6K_y|pek16T!u`vfe+6v$!Vm^FKUjZLm zobb_Ji)y%ZEIWg_O6i(xPDMm>M6E$O)?Ao&)trCA=A0Sd9P!5M(y^8|(yvxqPuZN? z)0!jRx+ooMdn@B=`?6M>^NR@w2z@!G^rrgEC6t(%%1Bp|2AbRrOk=%GLt2|w;>xvF zri68iNQX~#>x3Ii3u8#}+J>P?Vq(MvTG{Jc2UU&EjsgB5yhgwvq&QP}7W?RbHJ#*gMf zoFXr(z+$C^`(QK=gMnDvrrQC#`2J-0Ehi@P`bN{CA+aAP0l>#IGEFi%4TWxpVnuH> z-{tP_Qo*i!VkZR-5UodaY64^0qUn3$N{R#+CG-Iu3;Xya5S=ZRw(OcDs>McBz|fsP z&#neAVn1&|f#@IcN*JCbNJ9c&a`+ATHMO8bVXmz~ICI&s@2R#-mEC{v-nK-!m=ZHO zgLj#u_yhu}2Ie;ad6jp5++o&?JY1lufZzQztCa9?a@e!I`UY{57`{4K*r@Ls#6FV? zAyz3>Xmo;C-!8_H10%W>qb%8lA8H+tv@9PHg~dwy{|3vS_aiiD{JbwBKS@My)8ZBk zhxO&w)gaCh=FcCvs9PLN%^(amu|c0Mi1DA^$VPwlCvW6O=_P!E@z@)AC|V+LNkxCd zjXcipqq56B&QD>)9z9b6FPBJhQca{?mAc%omcv}+X<_b7UzT6w@o(!@EzlA~9If2u z%4+eYNmg*(tPlq-jxrnNbPD8FkPYn!(wPgzJORu%xi1GAI|5tL5Ds@&&`rBh&Zr$x zsT-m72*pYZc`aQ3ZIf@aU)De2=_NL_ctm$N{KGN09_a?|nPN+QI2H0aiar81e7%R* zx@?pup*bVD7;i=0WvUp~p<}@U zFAj`hLS8Psx1ssT<|0FWE4B-6BQTI$qS7;-u5geyBFPfkCBsGV&d!6+^V(=XZNZVD z$7L6mek<}VxFz!Oo@a#<(4BN3KK>$YWf~1A!kN`u{;|_Cpn;d9xGlFiTAb7086uU4 z5a0g_4_zTceP7ivRZ0rlmQU5g`vt-tunuh*u+o6PvHh3)1!Q;EEB~jm4}us0+6WNp zHCLx7SFuBqU<-gEl6!gxq%U?t-MW2)1N#4K`bTP4gfoMPOx9*=>I+|CB{Dy#;I>$meNH zVaUk!)Q3yYD&~B`+Ae(p5|J=7UL~GIcOVzUqdERgWd0v~e45N3G7!f~L!H8Jcoza~ z$TT0&ZW7v1(-EHqW$JvKKsGi|XI}^L@yojUZ|@bdv;PZwh29+kw({Sy#l;k|7i5bL z|K?#RLH)VCK%4a2OJ~4d4*pm6^1sQ0cKw|Hb36=u7e=w9WxR+bog;pi)*Et|T$||H zNRI-st``2!#lqpAXSnPsRrus><%{2zY}mE_gVYJFc=jQD{2qLq2hVcj+ST~jMfjLv zJkyA4w0K4ep032xNSx_AviZt~(og#MKm&1>(JJ(QGNzQMy+Y-ZiNvL)K)FisKp->vGc05=40Y*H|YN z(_t($ghJoj`jYze4Ar_n{~g;d`&sAFvbpB&Niwx8Klh<`Kj{cbb(}ZW&I20_#mgb^ zln20AdJ&t_9p_jHobxZe2nzd*2k3}p^y$)!nl1)serO3H<+!P-{wnrkgt>d{na5O3 zkH+gTc_PBJrwY=JYA{D47GvEnitH+!hBGa_crz9Iytl9~io&W!tMl}8YeyI%sJz`Y zk;(-(psL=(W^_+t31;1OO~k>}MwK~cVh3=r#^~r&LeES;V5yMAJUaf(vCWSMb=*8e zcakN2l60wI{$;eT=73h53-Sp|V~28L2Q3$+w&apx1=^ew*9h<3s9O2tA4izi){t<2 zS{@i0V`3i3Wzn*kEoZeI$z+A9S}}3KG`4}d2x~_TB-G!+gt}C?ymlwG=~ZpTF?!QA zjp)vJed)sdZd`T`_a+%%o0;vfBy8%Ie_=43eo4la6in@oI+Zhbi+ozekS@i<&ow|V zoy16MbkCGOHk^S}(>xpIBWpRuWx}5!SB-3^VG8kxzx&9mL#(aSKeKVj`Y|cxTQqW6 zrniLg@YEw3QaiV3T3sM~tIXLUNuDv+m^4i`3|-&sblwYxZg>RWf(5Twksat25>!Tnn5`RS?qN}vEO9exkc2gh;SMm zXDK-Y6{Ip{< zxhlRv0k<<`x%#8B7tG!Bjy#$nNeo4neNA#?hN)jy>p~u~rJg0;{Jw}f%K3Zo+VaOh z=Iz5}vJjbU6AOGf52xQBdttxQ0xf>KI53 z*FF3&H_UkEo}?%vwPiFol|#K&_9J#`e^J#-qzseJ$_S0j0JVS9;tx{Yz~m}z#_}P! z(`3whcwQ#+T57P#-BaD?s4q&l!P?mA=gp@|4u}-IY9{rYeZt-`#pO{&U_Vl7kR+DUtym|opeobxhBhf zo{AE`ki%nbiisTtSADIhK4u3Qje9JKK(A+KxmqI^?dlNFmCoQ^dH(A#=t0kig(tF7Zekh zO*340I0#E=r2R3URbZZZ7Y^?TOW_3GOJ2tc1{#I^#rN3dxx>M!yU(?*e0WB0U^5HtvUxoGoI?OR0x|yN~sZsU2ll`V&c%1-OqSv-!Ot(yiYyttCW_XYhNB@ z$jh!Y#`X&S|Gc4x`eN5xBIn>=qS$o;h0TUyFkF4wp>fr-6@Sy7UuhtiROoUaq|{AT zt4}M`n_T49(OTR}ZCVqSTK-aQt3i9(vEwxsouz|iUdqcDT*-TPdk-D2(7e8r`q}<| z^!u=6ZW0y3K2tkFoLU~mIPaJrCoSI19r(rRjq8&ugxS-?$>l=8T##A*lFaTfy_Oj2 z820kaCUgE-NEe#2r)zecPD0yslwe%0QrFkw6v#o*eD!GyC%Oo|XsFEF z`>e}ul*@i_Z~o&-Zmqtk4`m7Wrvg1Rrkk&T6)i3e`tL<9?WP`y@p|^ww1KDY;E&3d zn7SvOe=yQ;f!g>LPaZL}^Dg$Wd`N#H0RzO{jBT1}V84<}ME_x_&>)-Dq2ub3_oLGxr$<{mEX;zWfJ^Tirr$~0M9HM*Ok-v$j?&}_yYgrkQ+3|6*gKO?113?j zd`fnXB&bx^I}$P{8RZz5vFdZN0g6f9OLcRm#z51IkgZG;;Q>2sJY8NxYIZ{kOLmhZ7}9xh|^`?lU!uJF$8 zGvcI3J=xemNqz9J#A|$1)%jGBK2fI77 z0TsL58avU9wTg)FJ1BXA>5a$tO!?>u#`}Ydl2Tp+>Rc!MO+XW!XgsR+u)NI~GcL&s zgp6+ulvXt3$~#cA-Y+g9>3SP~LK=KnRt=9MKE{+fF*NHBSDFVZbIXOpF^E(0;nMhx z-S}BO%AHpIR&jJ0>9ADtE6zSw0)MY`c>i%@h*xp6OA{lDHDt`O@O{@Wr%=b{`6!JiGsyAAExm%BfgU_byD1~y zIREAfZdJ42cjbd=lNA0$G?UCL8|1LYEe1ury9|}dV=`On+Ocae#^happlXCV?}`DiR6T^)M|X7sYHrLPpAbc;WE z;4iTBo|nDv>S2~In>-y)ZFODBEfI1xB|N!}87LK9wI2nJmIvB%yS~F3qvid+c$TWV zXRH`_n8Mw9Rm=*$XT;H6-KwDtC?}8Um7lh#H~3TI;yN$Ne4mr0%U&}m-5OcVmaC2b zo>~zl)hAEj*v<-TA3Ub>B=CL0)N*%x(_M+1R(=K<>oK_)nKKl+H$3^aXbO`_@w-a@ z$!X=aYOv2mr~1J#mK$&qg$=EK#;)Fk&R+HOq#qpTiww!I65A4FYF8T*KZA+9@Zw-S z30-dNbOTwGEE|--Dopurtp@o_dD&WH_t0mLla5UiDx~o0LI&*xf2#iwMW?h_-hs&{ z|5C)w^p-R|$-<;Qt!(#ZQMY1nhe_69bkjfdzk@DRHuydI_0oOtJ<%U&PQ<|%n|r3{ zt+Mas{jWvg@U`gaU7dXP^_szWra)Gs4jhWd)Rz{)G#I_G{SCfyzAc&>0&3Jh!8~ny z&?a4N{9083?aF-RjJ0o@LE3nSZ`!Wx;V`854GC1(0By--X-3>MJ^9j7nDd;;$egb4 zoDbhQ9p>}jm$^ml&?iQx64Xy3TIE^~4+1_x-;@N-8ggA{B+?m{C*d4seSg!+xzrmE z6zIKB6M;fu-M831*`R)-;xp+pNjqL(3(@;DOp_drD+3x$asLLKilQVTuzZiTLHv2x zv94^-L2bp1%m^>l49%!?`eYat;qTU^4$&8I2Ff`e%B*yU!7%aN>$A#XoJe)j%FBg) uRrkBb{*-iD5$d=!q5QQwKmO(VG1uhR>CR=|{J!(L%mIHh3$GdY^1lGF4>v0S literal 0 HcmV?d00001 -- 2.50.1