svn commit: r327639 - in vendor/llvm/dist-release_60: . cmake include/llvm/CodeGen include/llvm/IR include/llvm/Support include/llvm/Transforms/Scalar lib/Analysis lib/CodeGen lib/CodeGen/GlobalISe...
Dimitry Andric
dim at FreeBSD.org
Sat Jan 6 21:34:30 UTC 2018
Author: dim
Date: Sat Jan 6 21:34:26 2018
New Revision: 327639
URL: https://svnweb.freebsd.org/changeset/base/327639
Log:
Vendor import of llvm release_60 branch r321788:
https://llvm.org/svn/llvm-project/llvm/branches/release_60@321788
Added:
vendor/llvm/dist-release_60/test/CodeGen/AArch64/GlobalISel/unknown-intrinsic.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/minmax-of-minmax.ll
vendor/llvm/dist-release_60/test/CodeGen/Mips/constraint-c-err.ll
vendor/llvm/dist-release_60/test/CodeGen/Mips/constraint-c.ll
vendor/llvm/dist-release_60/test/CodeGen/PowerPC/convert-rr-to-ri-instrs-out-of-range.mir
vendor/llvm/dist-release_60/test/CodeGen/PowerPC/pr35688.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/pr35765.ll
vendor/llvm/dist-release_60/test/MC/ELF/metadata-declaration-errors.s (contents, props changed)
vendor/llvm/dist-release_60/test/Transforms/InstCombine/fmul-sqrt.ll
vendor/llvm/dist-release_60/test/Transforms/InstSimplify/bitreverse-fold.ll
vendor/llvm/dist-release_60/test/Transforms/InstSimplify/exp-intrinsic.ll
vendor/llvm/dist-release_60/test/Transforms/InstSimplify/exp2-intrinsic.ll
vendor/llvm/dist-release_60/test/Transforms/InstSimplify/fold-intrinsics.ll
vendor/llvm/dist-release_60/test/Transforms/InstSimplify/log-intrinsic.ll
vendor/llvm/dist-release_60/test/Transforms/InstSimplify/log2-intrinsic.ll
vendor/llvm/dist-release_60/test/Transforms/SimplifyCFG/pr35774.ll
Deleted:
vendor/llvm/dist-release_60/test/Transforms/InstCombine/bitreverse-fold.ll
Modified:
vendor/llvm/dist-release_60/CMakeLists.txt
vendor/llvm/dist-release_60/cmake/config-ix.cmake
vendor/llvm/dist-release_60/include/llvm/CodeGen/TargetPassConfig.h
vendor/llvm/dist-release_60/include/llvm/IR/Function.h
vendor/llvm/dist-release_60/include/llvm/IR/IntrinsicsAMDGPU.td
vendor/llvm/dist-release_60/include/llvm/Support/CommandLine.h
vendor/llvm/dist-release_60/include/llvm/Support/TargetRegistry.h
vendor/llvm/dist-release_60/include/llvm/Transforms/Scalar/LoopPassManager.h
vendor/llvm/dist-release_60/lib/Analysis/InstructionSimplify.cpp
vendor/llvm/dist-release_60/lib/Analysis/ScalarEvolution.cpp
vendor/llvm/dist-release_60/lib/Analysis/ScalarEvolutionExpander.cpp
vendor/llvm/dist-release_60/lib/Analysis/ValueTracking.cpp
vendor/llvm/dist-release_60/lib/CodeGen/CodeGenPrepare.cpp
vendor/llvm/dist-release_60/lib/CodeGen/GlobalISel/IRTranslator.cpp
vendor/llvm/dist-release_60/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
vendor/llvm/dist-release_60/lib/CodeGen/LLVMTargetMachine.cpp
vendor/llvm/dist-release_60/lib/CodeGen/LiveDebugVariables.cpp
vendor/llvm/dist-release_60/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
vendor/llvm/dist-release_60/lib/CodeGen/SelectionDAG/FastISel.cpp
vendor/llvm/dist-release_60/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
vendor/llvm/dist-release_60/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
vendor/llvm/dist-release_60/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
vendor/llvm/dist-release_60/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
vendor/llvm/dist-release_60/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
vendor/llvm/dist-release_60/lib/CodeGen/TargetPassConfig.cpp
vendor/llvm/dist-release_60/lib/CodeGen/WinEHPrepare.cpp
vendor/llvm/dist-release_60/lib/IR/BasicBlock.cpp
vendor/llvm/dist-release_60/lib/IR/Verifier.cpp
vendor/llvm/dist-release_60/lib/MC/MCParser/ELFAsmParser.cpp
vendor/llvm/dist-release_60/lib/Passes/PassBuilder.cpp
vendor/llvm/dist-release_60/lib/Support/CommandLine.cpp
vendor/llvm/dist-release_60/lib/Target/AArch64/AArch64ISelLowering.cpp
vendor/llvm/dist-release_60/lib/Target/AArch64/AArch64RegisterInfo.td
vendor/llvm/dist-release_60/lib/Target/AArch64/AArch64TargetMachine.cpp
vendor/llvm/dist-release_60/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
vendor/llvm/dist-release_60/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp
vendor/llvm/dist-release_60/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
vendor/llvm/dist-release_60/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h
vendor/llvm/dist-release_60/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
vendor/llvm/dist-release_60/lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp
vendor/llvm/dist-release_60/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.h
vendor/llvm/dist-release_60/lib/Target/AMDGPU/MIMGInstructions.td
vendor/llvm/dist-release_60/lib/Target/AMDGPU/SIISelLowering.cpp
vendor/llvm/dist-release_60/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
vendor/llvm/dist-release_60/lib/Target/AMDGPU/SIMachineFunctionInfo.h
vendor/llvm/dist-release_60/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
vendor/llvm/dist-release_60/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.h
vendor/llvm/dist-release_60/lib/Target/ARM/MCTargetDesc/ARMAsmBackendDarwin.h
vendor/llvm/dist-release_60/lib/Target/ARM/MCTargetDesc/ARMAsmBackendELF.h
vendor/llvm/dist-release_60/lib/Target/ARM/MCTargetDesc/ARMAsmBackendWinCOFF.h
vendor/llvm/dist-release_60/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h
vendor/llvm/dist-release_60/lib/Target/AVR/MCTargetDesc/AVRAsmBackend.cpp
vendor/llvm/dist-release_60/lib/Target/AVR/MCTargetDesc/AVRMCTargetDesc.h
vendor/llvm/dist-release_60/lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp
vendor/llvm/dist-release_60/lib/Target/BPF/MCTargetDesc/BPFMCTargetDesc.h
vendor/llvm/dist-release_60/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp
vendor/llvm/dist-release_60/lib/Target/Hexagon/HexagonPatterns.td
vendor/llvm/dist-release_60/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp
vendor/llvm/dist-release_60/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.h
vendor/llvm/dist-release_60/lib/Target/Lanai/MCTargetDesc/LanaiAsmBackend.cpp
vendor/llvm/dist-release_60/lib/Target/Lanai/MCTargetDesc/LanaiMCTargetDesc.h
vendor/llvm/dist-release_60/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
vendor/llvm/dist-release_60/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h
vendor/llvm/dist-release_60/lib/Target/Mips/MipsISelLowering.cpp
vendor/llvm/dist-release_60/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp
vendor/llvm/dist-release_60/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h
vendor/llvm/dist-release_60/lib/Target/PowerPC/PPCISelLowering.cpp
vendor/llvm/dist-release_60/lib/Target/PowerPC/PPCInstrInfo.cpp
vendor/llvm/dist-release_60/lib/Target/PowerPC/PPCInstrInfo.h
vendor/llvm/dist-release_60/lib/Target/PowerPC/PPCMIPeephole.cpp
vendor/llvm/dist-release_60/lib/Target/PowerPC/PPCPreEmitPeephole.cpp
vendor/llvm/dist-release_60/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp
vendor/llvm/dist-release_60/lib/Target/RISCV/MCTargetDesc/RISCVMCTargetDesc.h
vendor/llvm/dist-release_60/lib/Target/RISCV/RISCVISelLowering.cpp
vendor/llvm/dist-release_60/lib/Target/RISCV/RISCVInstrInfoC.td
vendor/llvm/dist-release_60/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp
vendor/llvm/dist-release_60/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.h
vendor/llvm/dist-release_60/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp
vendor/llvm/dist-release_60/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h
vendor/llvm/dist-release_60/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.cpp
vendor/llvm/dist-release_60/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
vendor/llvm/dist-release_60/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h
vendor/llvm/dist-release_60/lib/Target/X86/X86FixupBWInsts.cpp
vendor/llvm/dist-release_60/lib/Target/X86/X86ISelLowering.cpp
vendor/llvm/dist-release_60/lib/Target/X86/X86ISelLowering.h
vendor/llvm/dist-release_60/lib/Target/X86/X86InstrAVX512.td
vendor/llvm/dist-release_60/lib/Target/X86/X86InstrMMX.td
vendor/llvm/dist-release_60/lib/Transforms/Coroutines/CoroSplit.cpp
vendor/llvm/dist-release_60/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
vendor/llvm/dist-release_60/lib/Transforms/Scalar/CallSiteSplitting.cpp
vendor/llvm/dist-release_60/lib/Transforms/Scalar/GVNSink.cpp
vendor/llvm/dist-release_60/lib/Transforms/Scalar/IndVarSimplify.cpp
vendor/llvm/dist-release_60/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
vendor/llvm/dist-release_60/lib/Transforms/Scalar/JumpThreading.cpp
vendor/llvm/dist-release_60/lib/Transforms/Scalar/LoopDeletion.cpp
vendor/llvm/dist-release_60/lib/Transforms/Scalar/LoopStrengthReduce.cpp
vendor/llvm/dist-release_60/lib/Transforms/Scalar/LoopUnswitch.cpp
vendor/llvm/dist-release_60/lib/Transforms/Scalar/SCCP.cpp
vendor/llvm/dist-release_60/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp
vendor/llvm/dist-release_60/lib/Transforms/Scalar/StructurizeCFG.cpp
vendor/llvm/dist-release_60/lib/Transforms/Utils/BasicBlockUtils.cpp
vendor/llvm/dist-release_60/lib/Transforms/Utils/BreakCriticalEdges.cpp
vendor/llvm/dist-release_60/lib/Transforms/Utils/CallPromotionUtils.cpp
vendor/llvm/dist-release_60/lib/Transforms/Utils/CloneFunction.cpp
vendor/llvm/dist-release_60/lib/Transforms/Utils/Local.cpp
vendor/llvm/dist-release_60/lib/Transforms/Utils/LoopUnroll.cpp
vendor/llvm/dist-release_60/lib/Transforms/Utils/LoopUnrollRuntime.cpp
vendor/llvm/dist-release_60/lib/Transforms/Utils/LoopUtils.cpp
vendor/llvm/dist-release_60/lib/Transforms/Utils/SSAUpdater.cpp
vendor/llvm/dist-release_60/lib/Transforms/Utils/SimplifyCFG.cpp
vendor/llvm/dist-release_60/lib/Transforms/Vectorize/LoopVectorize.cpp
vendor/llvm/dist-release_60/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/GlobalISel/gisel-commandline-option.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/GlobalISel/legalize-mul.mir
vendor/llvm/dist-release_60/test/CodeGen/AArch64/aarch64_f16_be.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/and-mask-removal.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/arm64-EXT-undef-mask.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/arm64-abi.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/arm64-abi_align.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/arm64-elf-constpool.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/arm64-elf-globals.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/arm64-fast-isel-alloca.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/arm64-fast-isel-br.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/arm64-fast-isel-call.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/arm64-fast-isel-conversion-fallback.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/arm64-fast-isel-conversion.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/arm64-fast-isel-fcmp.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/arm64-fast-isel-gv.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/arm64-fast-isel-icmp.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/arm64-fast-isel-materialize.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/arm64-fast-isel-noconvert.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/arm64-fast-isel-rem.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/arm64-fast-isel-ret.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/arm64-fast-isel.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/arm64-simd-scalar-to-vector.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/arm64-tls-dynamic-together.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/arm64-vcvt_f.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/br-cond-not-merge.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/cmpxchg-O0.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/cxx-tlscc.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/fast-isel-atomic.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/fast-isel-sp-adjust.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/i128-fast-isel-fallback.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/ldst-paired-aliasing.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/preferred-alignment.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/swift-return.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/swifterror.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/swiftself.ll
vendor/llvm/dist-release_60/test/CodeGen/AArch64/tailcall-fastisel.ll
vendor/llvm/dist-release_60/test/CodeGen/AMDGPU/indirect-addressing-si.ll
vendor/llvm/dist-release_60/test/CodeGen/AMDGPU/llvm.amdgcn.image.ll
vendor/llvm/dist-release_60/test/CodeGen/AMDGPU/llvm.amdgcn.s.waitcnt.ll
vendor/llvm/dist-release_60/test/CodeGen/Hexagon/autohvx/vext-128b.ll
vendor/llvm/dist-release_60/test/CodeGen/Hexagon/autohvx/vext-64b.ll
vendor/llvm/dist-release_60/test/CodeGen/PowerPC/convert-rr-to-ri-instrs.mir
vendor/llvm/dist-release_60/test/CodeGen/PowerPC/duplicate-returns-for-tailcall.ll
vendor/llvm/dist-release_60/test/CodeGen/PowerPC/ppc64-sibcall.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/avx-intrinsics-fast-isel.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/avx-splat.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/avx-vbroadcast.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/avx512-calling-conv.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/avx512-cvt.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/avx512-ext.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/avx512-extract-subvector-load-store.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/avx512-insert-extract.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/avx512-skx-insert-subvec.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/avx512-vec-cmp.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/bitcast-and-setcc-128.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/bitcast-and-setcc-256.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/bitcast-int-to-vector-bool.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/bitcast-setcc-128.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/bitcast-setcc-256.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/bitcast-setcc-512.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/broadcastm-lowering.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/build-vector-128.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/build-vector-256.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/cast-vsel.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/cvtv2f32.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/fixup-bw-inst.mir
vendor/llvm/dist-release_60/test/CodeGen/X86/memset-nonzero.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/oddshuffles.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/pr33349.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/psubus.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/setcc-wide-types.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/sse-intrinsics-x86-upgrade.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/vec_fp_to_int.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/vec_set-H.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/vector-compare-results.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/vector-pcmp.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/vector-shift-ashr-128.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/vector-shift-lshr-128.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/vector-shift-shl-128.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/vector-shuffle-128-v16.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/vector-shuffle-128-v8.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/vector-shuffle-256-v16.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/vector-shuffle-256-v32.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/vector-shuffle-v1.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/vector-trunc.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/widened-broadcast.ll
vendor/llvm/dist-release_60/test/CodeGen/X86/x86-interleaved-access.ll
vendor/llvm/dist-release_60/test/DebugInfo/AArch64/asan-stack-vars.ll
vendor/llvm/dist-release_60/test/DebugInfo/AArch64/frameindices.ll
vendor/llvm/dist-release_60/test/DebugInfo/AArch64/line-header.ll
vendor/llvm/dist-release_60/test/DebugInfo/AArch64/prologue_end.ll
vendor/llvm/dist-release_60/test/MC/AMDGPU/flat-gfx9.s
vendor/llvm/dist-release_60/test/MC/Disassembler/AMDGPU/flat_gfx9.txt
vendor/llvm/dist-release_60/test/MC/X86/intel-syntax-error.s
vendor/llvm/dist-release_60/test/MC/X86/intel-syntax.s
vendor/llvm/dist-release_60/test/MC/X86/x86_64-asm-match.s
vendor/llvm/dist-release_60/test/Other/loop-pm-invalidation.ll
vendor/llvm/dist-release_60/test/Other/new-pass-manager.ll
vendor/llvm/dist-release_60/test/Other/new-pm-defaults.ll
vendor/llvm/dist-release_60/test/Other/new-pm-thinlto-defaults.ll
vendor/llvm/dist-release_60/test/Transforms/InstCombine/bswap-fold.ll
vendor/llvm/dist-release_60/test/Transforms/InstCombine/call.ll
vendor/llvm/dist-release_60/test/Transforms/InstCombine/extractelement.ll
vendor/llvm/dist-release_60/test/Transforms/InstCombine/intrinsics.ll
vendor/llvm/dist-release_60/test/Transforms/InstCombine/udiv-simplify.ll
vendor/llvm/dist-release_60/test/Transforms/InstCombine/vec_demanded_elts.ll
vendor/llvm/dist-release_60/test/Transforms/InstCombine/vector_insertelt_shuffle.ll
vendor/llvm/dist-release_60/test/Transforms/InstSimplify/extract-element.ll
vendor/llvm/dist-release_60/test/Transforms/InstSimplify/insertelement.ll
vendor/llvm/dist-release_60/test/Transforms/LoopRotate/pr35210.ll
vendor/llvm/dist-release_60/test/Transforms/LoopSimplify/unreachable-loop-pred.ll
vendor/llvm/dist-release_60/test/tools/llvm-objcopy/symbol-copy.test
vendor/llvm/dist-release_60/tools/dsymutil/DwarfLinker.cpp
vendor/llvm/dist-release_60/tools/llvm-dwp/llvm-dwp.cpp
vendor/llvm/dist-release_60/tools/llvm-mc/llvm-mc.cpp
vendor/llvm/dist-release_60/tools/llvm-objcopy/Object.cpp
vendor/llvm/dist-release_60/tools/llvm-objcopy/Object.h
vendor/llvm/dist-release_60/unittests/DebugInfo/DWARF/DwarfGenerator.cpp
vendor/llvm/dist-release_60/unittests/IR/BasicBlockTest.cpp
vendor/llvm/dist-release_60/unittests/Support/CommandLineTest.cpp
vendor/llvm/dist-release_60/utils/TableGen/CodeGenDAGPatterns.cpp
Modified: vendor/llvm/dist-release_60/CMakeLists.txt
==============================================================================
--- vendor/llvm/dist-release_60/CMakeLists.txt Sat Jan 6 21:19:52 2018 (r327638)
+++ vendor/llvm/dist-release_60/CMakeLists.txt Sat Jan 6 21:34:26 2018 (r327639)
@@ -27,7 +27,7 @@ if(NOT DEFINED LLVM_VERSION_PATCH)
set(LLVM_VERSION_PATCH 0)
endif()
if(NOT DEFINED LLVM_VERSION_SUFFIX)
- set(LLVM_VERSION_SUFFIX svn)
+ set(LLVM_VERSION_SUFFIX "")
endif()
if (NOT PACKAGE_VERSION)
Modified: vendor/llvm/dist-release_60/cmake/config-ix.cmake
==============================================================================
--- vendor/llvm/dist-release_60/cmake/config-ix.cmake Sat Jan 6 21:19:52 2018 (r327638)
+++ vendor/llvm/dist-release_60/cmake/config-ix.cmake Sat Jan 6 21:34:26 2018 (r327639)
@@ -640,7 +640,8 @@ endif()
string(REPLACE " " ";" LLVM_BINDINGS_LIST "${LLVM_BINDINGS}")
function(find_python_module module)
- string(TOUPPER ${module} module_upper)
+ string(REPLACE "." "_" module_name ${module})
+ string(TOUPPER ${module_name} module_upper)
set(FOUND_VAR PY_${module_upper}_FOUND)
execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c" "import ${module}"
@@ -658,13 +659,16 @@ endfunction()
set (PYTHON_MODULES
pygments
+ # Some systems still don't have pygments.lexers.c_cpp which was introduced in
+ # version 2.0 in 2014...
+ pygments.lexers.c_cpp
yaml
)
foreach(module ${PYTHON_MODULES})
find_python_module(${module})
endforeach()
-if(PY_PYGMENTS_FOUND AND PY_YAML_FOUND)
+if(PY_PYGMENTS_FOUND AND PY_PYGMENTS_LEXERS_C_CPP_FOUND AND PY_YAML_FOUND)
set (LLVM_HAVE_OPT_VIEWER_MODULES 1)
else()
set (LLVM_HAVE_OPT_VIEWER_MODULES 0)
Modified: vendor/llvm/dist-release_60/include/llvm/CodeGen/TargetPassConfig.h
==============================================================================
--- vendor/llvm/dist-release_60/include/llvm/CodeGen/TargetPassConfig.h Sat Jan 6 21:19:52 2018 (r327638)
+++ vendor/llvm/dist-release_60/include/llvm/CodeGen/TargetPassConfig.h Sat Jan 6 21:34:26 2018 (r327639)
@@ -325,9 +325,9 @@ class TargetPassConfig : public ImmutablePass { (publi
virtual bool isGlobalISelEnabled() const;
/// Check whether or not GlobalISel should abort on error.
- /// When this is disable, GlobalISel will fall back on SDISel instead of
+ /// When this is disabled, GlobalISel will fall back on SDISel instead of
/// erroring out.
- virtual bool isGlobalISelAbortEnabled() const;
+ bool isGlobalISelAbortEnabled() const;
/// Check whether or not a diagnostic should be emitted when GlobalISel
/// uses the fallback path. In other words, it will emit a diagnostic
Modified: vendor/llvm/dist-release_60/include/llvm/IR/Function.h
==============================================================================
--- vendor/llvm/dist-release_60/include/llvm/IR/Function.h Sat Jan 6 21:19:52 2018 (r327638)
+++ vendor/llvm/dist-release_60/include/llvm/IR/Function.h Sat Jan 6 21:34:26 2018 (r327639)
@@ -218,6 +218,7 @@ class Function : public GlobalObject, public ilist_nod
Attribute::get(getContext(), Kind, Val));
}
+ /// @brief Add function attributes to this function.
void addFnAttr(Attribute Attr) {
addAttribute(AttributeList::FunctionIndex, Attr);
}
@@ -268,6 +269,8 @@ class Function : public GlobalObject, public ilist_nod
bool hasFnAttribute(Attribute::AttrKind Kind) const {
return AttributeSets.hasFnAttribute(Kind);
}
+
+ /// @brief Return true if the function has the attribute.
bool hasFnAttribute(StringRef Kind) const {
return AttributeSets.hasFnAttribute(Kind);
}
@@ -276,6 +279,8 @@ class Function : public GlobalObject, public ilist_nod
Attribute getFnAttribute(Attribute::AttrKind Kind) const {
return getAttribute(AttributeList::FunctionIndex, Kind);
}
+
+ /// @brief Return the attribute for the given attribute kind.
Attribute getFnAttribute(StringRef Kind) const {
return getAttribute(AttributeList::FunctionIndex, Kind);
}
@@ -342,10 +347,12 @@ class Function : public GlobalObject, public ilist_nod
return getAttributes().hasParamAttribute(ArgNo, Kind);
}
+ /// @brief gets the attribute from the list of attributes.
Attribute getAttribute(unsigned i, Attribute::AttrKind Kind) const {
return AttributeSets.getAttribute(i, Kind);
}
+ /// @brief gets the attribute from the list of attributes.
Attribute getAttribute(unsigned i, StringRef Kind) const {
return AttributeSets.getAttribute(i, Kind);
}
Modified: vendor/llvm/dist-release_60/include/llvm/IR/IntrinsicsAMDGPU.td
==============================================================================
--- vendor/llvm/dist-release_60/include/llvm/IR/IntrinsicsAMDGPU.td Sat Jan 6 21:19:52 2018 (r327638)
+++ vendor/llvm/dist-release_60/include/llvm/IR/IntrinsicsAMDGPU.td Sat Jan 6 21:34:26 2018 (r327639)
@@ -304,7 +304,8 @@ class AMDGPUImageLoad<bit NoMem = 0> : Intrinsic <
llvm_i1_ty, // slc(imm)
llvm_i1_ty, // lwe(imm)
llvm_i1_ty], // da(imm)
- !if(NoMem, [IntrNoMem], [IntrReadMem])>;
+ !if(NoMem, [IntrNoMem], [IntrReadMem]), "",
+ !if(NoMem, [], [SDNPMemOperand])>;
def int_amdgcn_image_load : AMDGPUImageLoad;
def int_amdgcn_image_load_mip : AMDGPUImageLoad;
@@ -320,7 +321,7 @@ class AMDGPUImageStore : Intrinsic <
llvm_i1_ty, // slc(imm)
llvm_i1_ty, // lwe(imm)
llvm_i1_ty], // da(imm)
- []>;
+ [IntrWriteMem], "", [SDNPMemOperand]>;
def int_amdgcn_image_store : AMDGPUImageStore;
def int_amdgcn_image_store_mip : AMDGPUImageStore;
@@ -336,7 +337,8 @@ class AMDGPUImageSample<bit NoMem = 0> : Intrinsic <
llvm_i1_ty, // slc(imm)
llvm_i1_ty, // lwe(imm)
llvm_i1_ty], // da(imm)
- !if(NoMem, [IntrNoMem], [IntrReadMem])>;
+ !if(NoMem, [IntrNoMem], [IntrReadMem]), "",
+ !if(NoMem, [], [SDNPMemOperand])>;
// Basic sample
def int_amdgcn_image_sample : AMDGPUImageSample;
@@ -428,7 +430,7 @@ class AMDGPUImageAtomic : Intrinsic <
llvm_i1_ty, // r128(imm)
llvm_i1_ty, // da(imm)
llvm_i1_ty], // slc(imm)
- []>;
+ [], "", [SDNPMemOperand]>;
def int_amdgcn_image_atomic_swap : AMDGPUImageAtomic;
def int_amdgcn_image_atomic_add : AMDGPUImageAtomic;
@@ -451,7 +453,7 @@ def int_amdgcn_image_atomic_cmpswap : Intrinsic <
llvm_i1_ty, // r128(imm)
llvm_i1_ty, // da(imm)
llvm_i1_ty], // slc(imm)
- []>;
+ [], "", [SDNPMemOperand]>;
class AMDGPUBufferLoad : Intrinsic <
[llvm_anyfloat_ty],
@@ -460,7 +462,7 @@ class AMDGPUBufferLoad : Intrinsic <
llvm_i32_ty, // offset(SGPR/VGPR/imm)
llvm_i1_ty, // glc(imm)
llvm_i1_ty], // slc(imm)
- [IntrReadMem]>;
+ [IntrReadMem], "", [SDNPMemOperand]>;
def int_amdgcn_buffer_load_format : AMDGPUBufferLoad;
def int_amdgcn_buffer_load : AMDGPUBufferLoad;
@@ -472,7 +474,7 @@ class AMDGPUBufferStore : Intrinsic <
llvm_i32_ty, // offset(SGPR/VGPR/imm)
llvm_i1_ty, // glc(imm)
llvm_i1_ty], // slc(imm)
- [IntrWriteMem]>;
+ [IntrWriteMem], "", [SDNPMemOperand]>;
def int_amdgcn_buffer_store_format : AMDGPUBufferStore;
def int_amdgcn_buffer_store : AMDGPUBufferStore;
@@ -487,7 +489,7 @@ def int_amdgcn_tbuffer_load : Intrinsic <
llvm_i32_ty, // nfmt(imm)
llvm_i1_ty, // glc(imm)
llvm_i1_ty], // slc(imm)
- []>;
+ [IntrReadMem], "", [SDNPMemOperand]>;
def int_amdgcn_tbuffer_store : Intrinsic <
[],
@@ -501,7 +503,7 @@ def int_amdgcn_tbuffer_store : Intrinsic <
llvm_i32_ty, // nfmt(imm)
llvm_i1_ty, // glc(imm)
llvm_i1_ty], // slc(imm)
- []>;
+ [IntrWriteMem], "", [SDNPMemOperand]>;
class AMDGPUBufferAtomic : Intrinsic <
[llvm_i32_ty],
@@ -510,7 +512,7 @@ class AMDGPUBufferAtomic : Intrinsic <
llvm_i32_ty, // vindex(VGPR)
llvm_i32_ty, // offset(SGPR/VGPR/imm)
llvm_i1_ty], // slc(imm)
- []>;
+ [], "", [SDNPMemOperand]>;
def int_amdgcn_buffer_atomic_swap : AMDGPUBufferAtomic;
def int_amdgcn_buffer_atomic_add : AMDGPUBufferAtomic;
def int_amdgcn_buffer_atomic_sub : AMDGPUBufferAtomic;
@@ -529,7 +531,7 @@ def int_amdgcn_buffer_atomic_cmpswap : Intrinsic<
llvm_i32_ty, // vindex(VGPR)
llvm_i32_ty, // offset(SGPR/VGPR/imm)
llvm_i1_ty], // slc(imm)
- []>;
+ [], "", [SDNPMemOperand]>;
// Uses that do not set the done bit should set IntrWriteMem on the
// call site.
Modified: vendor/llvm/dist-release_60/include/llvm/Support/CommandLine.h
==============================================================================
--- vendor/llvm/dist-release_60/include/llvm/Support/CommandLine.h Sat Jan 6 21:19:52 2018 (r327638)
+++ vendor/llvm/dist-release_60/include/llvm/Support/CommandLine.h Sat Jan 6 21:34:26 2018 (r327639)
@@ -1862,6 +1862,33 @@ using TokenizerCallback = void (*)(StringRef Source, S
SmallVectorImpl<const char *> &NewArgv,
bool MarkEOLs);
+/// Tokenizes content of configuration file.
+///
+/// \param [in] Source The string representing content of config file.
+/// \param [in] Saver Delegates back to the caller for saving parsed strings.
+/// \param [out] NewArgv All parsed strings are appended to NewArgv.
+/// \param [in] MarkEOLs Added for compatibility with TokenizerCallback.
+///
+/// It works like TokenizeGNUCommandLine with ability to skip comment lines.
+///
+void tokenizeConfigFile(StringRef Source, StringSaver &Saver,
+ SmallVectorImpl<const char *> &NewArgv,
+ bool MarkEOLs = false);
+
+/// Reads command line options from the given configuration file.
+///
+/// \param [in] CfgFileName Path to configuration file.
+/// \param [in] Saver Objects that saves allocated strings.
+/// \param [out] Argv Array to which the read options are added.
+/// \return true if the file was successfully read.
+///
+/// It reads content of the specified file, tokenizes it and expands "@file"
+/// commands resolving file names in them relative to the directory where
+/// CfgFilename resides.
+///
+bool readConfigFile(StringRef CfgFileName, StringSaver &Saver,
+ SmallVectorImpl<const char *> &Argv);
+
/// \brief Expand response files on a command line recursively using the given
/// StringSaver and tokenization strategy. Argv should contain the command line
/// before expansion and will be modified in place. If requested, Argv will
Modified: vendor/llvm/dist-release_60/include/llvm/Support/TargetRegistry.h
==============================================================================
--- vendor/llvm/dist-release_60/include/llvm/Support/TargetRegistry.h Sat Jan 6 21:19:52 2018 (r327638)
+++ vendor/llvm/dist-release_60/include/llvm/Support/TargetRegistry.h Sat Jan 6 21:34:26 2018 (r327639)
@@ -123,8 +123,8 @@ class Target { (public)
using AsmPrinterCtorTy = AsmPrinter *(*)(
TargetMachine &TM, std::unique_ptr<MCStreamer> &&Streamer);
using MCAsmBackendCtorTy = MCAsmBackend *(*)(const Target &T,
+ const MCSubtargetInfo &STI,
const MCRegisterInfo &MRI,
- const Triple &TT, StringRef CPU,
const MCTargetOptions &Options);
using MCAsmParserCtorTy = MCTargetAsmParser *(*)(
const MCSubtargetInfo &STI, MCAsmParser &P, const MCInstrInfo &MII,
@@ -381,15 +381,12 @@ class Target { (public)
}
/// createMCAsmBackend - Create a target specific assembly parser.
- ///
- /// \param TheTriple The target triple string.
- MCAsmBackend *createMCAsmBackend(const MCRegisterInfo &MRI,
- StringRef TheTriple, StringRef CPU,
- const MCTargetOptions &Options)
- const {
+ MCAsmBackend *createMCAsmBackend(const MCSubtargetInfo &STI,
+ const MCRegisterInfo &MRI,
+ const MCTargetOptions &Options) const {
if (!MCAsmBackendCtorFn)
return nullptr;
- return MCAsmBackendCtorFn(*this, MRI, Triple(TheTriple), CPU, Options);
+ return MCAsmBackendCtorFn(*this, STI, MRI, Options);
}
/// createMCAsmParser - Create a target specific assembly parser.
@@ -1106,10 +1103,10 @@ template <class MCAsmBackendImpl> struct RegisterMCAsm
}
private:
- static MCAsmBackend *Allocator(const Target &T, const MCRegisterInfo &MRI,
- const Triple &TheTriple, StringRef CPU,
+ static MCAsmBackend *Allocator(const Target &T, const MCSubtargetInfo &STI,
+ const MCRegisterInfo &MRI,
const MCTargetOptions &Options) {
- return new MCAsmBackendImpl(T, MRI, TheTriple, CPU);
+ return new MCAsmBackendImpl(T, STI, MRI);
}
};
Modified: vendor/llvm/dist-release_60/include/llvm/Transforms/Scalar/LoopPassManager.h
==============================================================================
--- vendor/llvm/dist-release_60/include/llvm/Transforms/Scalar/LoopPassManager.h Sat Jan 6 21:19:52 2018 (r327638)
+++ vendor/llvm/dist-release_60/include/llvm/Transforms/Scalar/LoopPassManager.h Sat Jan 6 21:34:26 2018 (r327639)
@@ -264,7 +264,8 @@ template <typename LoopPassT>
class FunctionToLoopPassAdaptor
: public PassInfoMixin<FunctionToLoopPassAdaptor<LoopPassT>> {
public:
- explicit FunctionToLoopPassAdaptor(LoopPassT Pass) : Pass(std::move(Pass)) {
+ explicit FunctionToLoopPassAdaptor(LoopPassT Pass, bool DebugLogging = false)
+ : Pass(std::move(Pass)), LoopCanonicalizationFPM(DebugLogging) {
LoopCanonicalizationFPM.addPass(LoopSimplifyPass());
LoopCanonicalizationFPM.addPass(LCSSAPass());
}
@@ -384,8 +385,8 @@ class FunctionToLoopPassAdaptor (private)
/// adaptor.
template <typename LoopPassT>
FunctionToLoopPassAdaptor<LoopPassT>
-createFunctionToLoopPassAdaptor(LoopPassT Pass) {
- return FunctionToLoopPassAdaptor<LoopPassT>(std::move(Pass));
+createFunctionToLoopPassAdaptor(LoopPassT Pass, bool DebugLogging = false) {
+ return FunctionToLoopPassAdaptor<LoopPassT>(std::move(Pass), DebugLogging);
}
/// \brief Pass for printing a loop's contents as textual IR.
Modified: vendor/llvm/dist-release_60/lib/Analysis/InstructionSimplify.cpp
==============================================================================
--- vendor/llvm/dist-release_60/lib/Analysis/InstructionSimplify.cpp Sat Jan 6 21:19:52 2018 (r327638)
+++ vendor/llvm/dist-release_60/lib/Analysis/InstructionSimplify.cpp Sat Jan 6 21:34:26 2018 (r327639)
@@ -826,7 +826,7 @@ static Value *SimplifyMulInst(Value *Op0, Value *Op1,
MaxRecurse))
return V;
- // Mul distributes over Add. Try some generic simplifications based on this.
+ // Mul distributes over Add. Try some generic simplifications based on this.
if (Value *V = ExpandBinOp(Instruction::Mul, Op0, Op1, Instruction::Add,
Q, MaxRecurse))
return V;
@@ -3838,12 +3838,13 @@ Value *llvm::SimplifyInsertElementInst(Value *Vec, Val
// Fold into undef if index is out of bounds.
if (auto *CI = dyn_cast<ConstantInt>(Idx)) {
uint64_t NumElements = cast<VectorType>(Vec->getType())->getNumElements();
-
if (CI->uge(NumElements))
return UndefValue::get(Vec->getType());
}
- // TODO: We should also fold if index is iteslf an undef.
+ // If index is undef, it might be out of bounds (see above case)
+ if (isa<UndefValue>(Idx))
+ return UndefValue::get(Vec->getType());
return nullptr;
}
@@ -3896,10 +3897,13 @@ static Value *SimplifyExtractElementInst(Value *Vec, V
// If extracting a specified index from the vector, see if we can recursively
// find a previously computed scalar that was inserted into the vector.
- if (auto *IdxC = dyn_cast<ConstantInt>(Idx))
- if (IdxC->getValue().ule(Vec->getType()->getVectorNumElements()))
- if (Value *Elt = findScalarElement(Vec, IdxC->getZExtValue()))
- return Elt;
+ if (auto *IdxC = dyn_cast<ConstantInt>(Idx)) {
+ if (IdxC->getValue().uge(Vec->getType()->getVectorNumElements()))
+ // definitely out of bounds, thus undefined result
+ return UndefValue::get(Vec->getType()->getVectorElementType());
+ if (Value *Elt = findScalarElement(Vec, IdxC->getZExtValue()))
+ return Elt;
+ }
// An undef extract index can be arbitrarily chosen to be an out-of-range
// index value, which would result in the instruction being undef.
@@ -4489,26 +4493,53 @@ static Value *SimplifyIntrinsic(Function *F, IterTy Ar
}
}
+ Value *IIOperand = *ArgBegin;
+ Value *X;
switch (IID) {
case Intrinsic::fabs: {
- if (SignBitMustBeZero(*ArgBegin, Q.TLI))
- return *ArgBegin;
+ if (SignBitMustBeZero(IIOperand, Q.TLI))
+ return IIOperand;
return nullptr;
}
case Intrinsic::bswap: {
- Value *IIOperand = *ArgBegin;
- Value *X = nullptr;
// bswap(bswap(x)) -> x
if (match(IIOperand, m_BSwap(m_Value(X))))
return X;
return nullptr;
}
case Intrinsic::bitreverse: {
- Value *IIOperand = *ArgBegin;
- Value *X = nullptr;
// bitreverse(bitreverse(x)) -> x
if (match(IIOperand, m_BitReverse(m_Value(X))))
return X;
+ return nullptr;
+ }
+ case Intrinsic::exp: {
+ // exp(log(x)) -> x
+ if (Q.CxtI->isFast() &&
+ match(IIOperand, m_Intrinsic<Intrinsic::log>(m_Value(X))))
+ return X;
+ return nullptr;
+ }
+ case Intrinsic::exp2: {
+ // exp2(log2(x)) -> x
+ if (Q.CxtI->isFast() &&
+ match(IIOperand, m_Intrinsic<Intrinsic::log2>(m_Value(X))))
+ return X;
+ return nullptr;
+ }
+ case Intrinsic::log: {
+ // log(exp(x)) -> x
+ if (Q.CxtI->isFast() &&
+ match(IIOperand, m_Intrinsic<Intrinsic::exp>(m_Value(X))))
+ return X;
+ return nullptr;
+ }
+ case Intrinsic::log2: {
+ // log2(exp2(x)) -> x
+ if (Q.CxtI->isFast() &&
+ match(IIOperand, m_Intrinsic<Intrinsic::exp2>(m_Value(X)))) {
+ return X;
+ }
return nullptr;
}
default:
Modified: vendor/llvm/dist-release_60/lib/Analysis/ScalarEvolution.cpp
==============================================================================
--- vendor/llvm/dist-release_60/lib/Analysis/ScalarEvolution.cpp Sat Jan 6 21:19:52 2018 (r327638)
+++ vendor/llvm/dist-release_60/lib/Analysis/ScalarEvolution.cpp Sat Jan 6 21:34:26 2018 (r327639)
@@ -2358,7 +2358,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImp
FoundMatch = true;
}
if (FoundMatch)
- return getAddExpr(Ops, Flags);
+ return getAddExpr(Ops, Flags, Depth + 1);
// Check for truncates. If all the operands are truncated from the same
// type, see if factoring out the truncate would permit the result to be
@@ -6402,9 +6402,8 @@ PushLoopPHIs(const Loop *L, SmallVectorImpl<Instructio
BasicBlock *Header = L->getHeader();
// Push all Loop-header PHIs onto the Worklist stack.
- for (BasicBlock::iterator I = Header->begin();
- PHINode *PN = dyn_cast<PHINode>(I); ++I)
- Worklist.push_back(PN);
+ for (PHINode &PN : Header->phis())
+ Worklist.push_back(&PN);
}
const ScalarEvolution::BackedgeTakenInfo &
@@ -7638,12 +7637,9 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHI
if (!Latch)
return nullptr;
- for (auto &I : *Header) {
- PHINode *PHI = dyn_cast<PHINode>(&I);
- if (!PHI) break;
- auto *StartCST = getOtherIncomingValue(PHI, Latch);
- if (!StartCST) continue;
- CurrentIterVals[PHI] = StartCST;
+ for (PHINode &PHI : Header->phis()) {
+ if (auto *StartCST = getOtherIncomingValue(&PHI, Latch))
+ CurrentIterVals[&PHI] = StartCST;
}
if (!CurrentIterVals.count(PN))
return RetVal = nullptr;
@@ -7720,13 +7716,9 @@ const SCEV *ScalarEvolution::computeExitCountExhaustiv
BasicBlock *Latch = L->getLoopLatch();
assert(Latch && "Should follow from NumIncomingValues == 2!");
- for (auto &I : *Header) {
- PHINode *PHI = dyn_cast<PHINode>(&I);
- if (!PHI)
- break;
- auto *StartCST = getOtherIncomingValue(PHI, Latch);
- if (!StartCST) continue;
- CurrentIterVals[PHI] = StartCST;
+ for (PHINode &PHI : Header->phis()) {
+ if (auto *StartCST = getOtherIncomingValue(&PHI, Latch))
+ CurrentIterVals[&PHI] = StartCST;
}
if (!CurrentIterVals.count(PN))
return getCouldNotCompute();
Modified: vendor/llvm/dist-release_60/lib/Analysis/ScalarEvolutionExpander.cpp
==============================================================================
--- vendor/llvm/dist-release_60/lib/Analysis/ScalarEvolutionExpander.cpp Sat Jan 6 21:19:52 2018 (r327638)
+++ vendor/llvm/dist-release_60/lib/Analysis/ScalarEvolutionExpander.cpp Sat Jan 6 21:34:26 2018 (r327639)
@@ -1154,16 +1154,11 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddR
IVIncInsertLoop &&
SE.DT.properlyDominates(LatchBlock, IVIncInsertLoop->getHeader());
- for (auto &I : *L->getHeader()) {
- auto *PN = dyn_cast<PHINode>(&I);
- // Found first non-phi, the rest of instructions are also not Phis.
- if (!PN)
- break;
-
- if (!SE.isSCEVable(PN->getType()))
+ for (PHINode &PN : L->getHeader()->phis()) {
+ if (!SE.isSCEVable(PN.getType()))
continue;
- const SCEVAddRecExpr *PhiSCEV = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(PN));
+ const SCEVAddRecExpr *PhiSCEV = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(&PN));
if (!PhiSCEV)
continue;
@@ -1175,16 +1170,16 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddR
continue;
Instruction *TempIncV =
- cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock));
+ cast<Instruction>(PN.getIncomingValueForBlock(LatchBlock));
// Check whether we can reuse this PHI node.
if (LSRMode) {
- if (!isExpandedAddRecExprPHI(PN, TempIncV, L))
+ if (!isExpandedAddRecExprPHI(&PN, TempIncV, L))
continue;
if (L == IVIncInsertLoop && !hoistIVInc(TempIncV, IVIncInsertPos))
continue;
} else {
- if (!isNormalAddRecExprPHI(PN, TempIncV, L))
+ if (!isNormalAddRecExprPHI(&PN, TempIncV, L))
continue;
}
@@ -1193,7 +1188,7 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddR
IncV = TempIncV;
TruncTy = nullptr;
InvertStep = false;
- AddRecPhiMatch = PN;
+ AddRecPhiMatch = &PN;
break;
}
@@ -1203,7 +1198,7 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddR
canBeCheaplyTransformed(SE, PhiSCEV, Normalized, InvertStep)) {
// Record the phi node. But don't stop we might find an exact match
// later.
- AddRecPhiMatch = PN;
+ AddRecPhiMatch = &PN;
IncV = TempIncV;
TruncTy = SE.getEffectiveSCEVType(Normalized->getType());
}
@@ -1863,12 +1858,8 @@ SCEVExpander::replaceCongruentIVs(Loop *L, const Domin
const TargetTransformInfo *TTI) {
// Find integer phis in order of increasing width.
SmallVector<PHINode*, 8> Phis;
- for (auto &I : *L->getHeader()) {
- if (auto *PN = dyn_cast<PHINode>(&I))
- Phis.push_back(PN);
- else
- break;
- }
+ for (PHINode &PN : L->getHeader()->phis())
+ Phis.push_back(&PN);
if (TTI)
std::sort(Phis.begin(), Phis.end(), [](Value *LHS, Value *RHS) {
Modified: vendor/llvm/dist-release_60/lib/Analysis/ValueTracking.cpp
==============================================================================
--- vendor/llvm/dist-release_60/lib/Analysis/ValueTracking.cpp Sat Jan 6 21:19:52 2018 (r327638)
+++ vendor/llvm/dist-release_60/lib/Analysis/ValueTracking.cpp Sat Jan 6 21:34:26 2018 (r327639)
@@ -2264,9 +2264,9 @@ static unsigned ComputeNumSignBitsImpl(const Value *V,
// ashr X, C -> adds C sign bits. Vectors too.
const APInt *ShAmt;
if (match(U->getOperand(1), m_APInt(ShAmt))) {
- unsigned ShAmtLimited = ShAmt->getZExtValue();
- if (ShAmtLimited >= TyBits)
+ if (ShAmt->uge(TyBits))
break; // Bad shift.
+ unsigned ShAmtLimited = ShAmt->getZExtValue();
Tmp += ShAmtLimited;
if (Tmp > TyBits) Tmp = TyBits;
}
@@ -2277,9 +2277,9 @@ static unsigned ComputeNumSignBitsImpl(const Value *V,
if (match(U->getOperand(1), m_APInt(ShAmt))) {
// shl destroys sign bits.
Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
+ if (ShAmt->uge(TyBits) || // Bad shift.
+ ShAmt->uge(Tmp)) break; // Shifted all sign bits out.
Tmp2 = ShAmt->getZExtValue();
- if (Tmp2 >= TyBits || // Bad shift.
- Tmp2 >= Tmp) break; // Shifted all sign bits out.
return Tmp - Tmp2;
}
break;
@@ -4161,6 +4161,81 @@ static SelectPatternResult matchClamp(CmpInst::Predica
return {SPF_UNKNOWN, SPNB_NA, false};
}
+/// Recognize variations of:
+/// a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
+static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred,
+ Value *CmpLHS, Value *CmpRHS,
+ Value *TrueVal, Value *FalseVal) {
+ // TODO: Allow FP min/max with nnan/nsz.
+ assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison");
+
+ Value *A, *B;
+ SelectPatternResult L = matchSelectPattern(TrueVal, A, B);
+ if (!SelectPatternResult::isMinOrMax(L.Flavor))
+ return {SPF_UNKNOWN, SPNB_NA, false};
+
+ Value *C, *D;
+ SelectPatternResult R = matchSelectPattern(FalseVal, C, D);
+ if (L.Flavor != R.Flavor)
+ return {SPF_UNKNOWN, SPNB_NA, false};
+
+ // Match the compare to the min/max operations of the select operands.
+ switch (L.Flavor) {
+ case SPF_SMIN:
+ if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
+ Pred = ICmpInst::getSwappedPredicate(Pred);
+ std::swap(CmpLHS, CmpRHS);
+ }
+ if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
+ break;
+ return {SPF_UNKNOWN, SPNB_NA, false};
+ case SPF_SMAX:
+ if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
+ Pred = ICmpInst::getSwappedPredicate(Pred);
+ std::swap(CmpLHS, CmpRHS);
+ }
+ if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
+ break;
+ return {SPF_UNKNOWN, SPNB_NA, false};
+ case SPF_UMIN:
+ if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
+ Pred = ICmpInst::getSwappedPredicate(Pred);
+ std::swap(CmpLHS, CmpRHS);
+ }
+ if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
+ break;
+ return {SPF_UNKNOWN, SPNB_NA, false};
+ case SPF_UMAX:
+ if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
+ Pred = ICmpInst::getSwappedPredicate(Pred);
+ std::swap(CmpLHS, CmpRHS);
+ }
+ if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
+ break;
+ return {SPF_UNKNOWN, SPNB_NA, false};
+ default:
+ llvm_unreachable("Bad flavor while matching min/max");
+ }
+
+ // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
+ if (CmpLHS == A && CmpRHS == C && D == B)
+ return {L.Flavor, SPNB_NA, false};
+
+ // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
+ if (CmpLHS == A && CmpRHS == D && C == B)
+ return {L.Flavor, SPNB_NA, false};
+
+ // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
+ if (CmpLHS == B && CmpRHS == C && D == A)
+ return {L.Flavor, SPNB_NA, false};
+
+ // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
+ if (CmpLHS == B && CmpRHS == D && C == A)
+ return {L.Flavor, SPNB_NA, false};
+
+ return {SPF_UNKNOWN, SPNB_NA, false};
+}
+
/// Match non-obvious integer minimum and maximum sequences.
static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
Value *CmpLHS, Value *CmpRHS,
@@ -4174,6 +4249,10 @@ static SelectPatternResult matchMinMax(CmpInst::Predic
if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
return SPR;
+ SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
+ if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
+ return SPR;
+
if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
return {SPF_UNKNOWN, SPNB_NA, false};
Modified: vendor/llvm/dist-release_60/lib/CodeGen/CodeGenPrepare.cpp
==============================================================================
--- vendor/llvm/dist-release_60/lib/CodeGen/CodeGenPrepare.cpp Sat Jan 6 21:19:52 2018 (r327638)
+++ vendor/llvm/dist-release_60/lib/CodeGen/CodeGenPrepare.cpp Sat Jan 6 21:34:26 2018 (r327639)
@@ -633,16 +633,10 @@ bool CodeGenPrepare::isMergingEmptyBlockProfitable(Bas
if (DestBBPred == BB)
continue;
- bool HasAllSameValue = true;
- BasicBlock::const_iterator DestBBI = DestBB->begin();
- while (const PHINode *DestPN = dyn_cast<PHINode>(DestBBI++)) {
- if (DestPN->getIncomingValueForBlock(BB) !=
- DestPN->getIncomingValueForBlock(DestBBPred)) {
- HasAllSameValue = false;
- break;
- }
- }
- if (HasAllSameValue)
+ if (llvm::all_of(DestBB->phis(), [&](const PHINode &DestPN) {
+ return DestPN.getIncomingValueForBlock(BB) ==
+ DestPN.getIncomingValueForBlock(DestBBPred);
+ }))
SameIncomingValueBBs.insert(DestBBPred);
}
@@ -672,9 +666,8 @@ bool CodeGenPrepare::canMergeBlocks(const BasicBlock *
// We only want to eliminate blocks whose phi nodes are used by phi nodes in
// the successor. If there are more complex condition (e.g. preheaders),
// don't mess around with them.
- BasicBlock::const_iterator BBI = BB->begin();
- while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) {
- for (const User *U : PN->users()) {
+ for (const PHINode &PN : BB->phis()) {
+ for (const User *U : PN.users()) {
const Instruction *UI = cast<Instruction>(U);
if (UI->getParent() != DestBB || !isa<PHINode>(UI))
return false;
@@ -713,10 +706,9 @@ bool CodeGenPrepare::canMergeBlocks(const BasicBlock *
for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) {
BasicBlock *Pred = DestBBPN->getIncomingBlock(i);
if (BBPreds.count(Pred)) { // Common predecessor?
- BBI = DestBB->begin();
- while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) {
- const Value *V1 = PN->getIncomingValueForBlock(Pred);
- const Value *V2 = PN->getIncomingValueForBlock(BB);
+ for (const PHINode &PN : DestBB->phis()) {
+ const Value *V1 = PN.getIncomingValueForBlock(Pred);
+ const Value *V2 = PN.getIncomingValueForBlock(BB);
// If V2 is a phi node in BB, look up what the mapped value will be.
if (const PHINode *V2PN = dyn_cast<PHINode>(V2))
@@ -759,11 +751,9 @@ void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBl
// Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB
// to handle the new incoming edges it is about to have.
- PHINode *PN;
- for (BasicBlock::iterator BBI = DestBB->begin();
- (PN = dyn_cast<PHINode>(BBI)); ++BBI) {
+ for (PHINode &PN : DestBB->phis()) {
// Remove the incoming value for BB, and remember it.
- Value *InVal = PN->removeIncomingValue(BB, false);
+ Value *InVal = PN.removeIncomingValue(BB, false);
// Two options: either the InVal is a phi node defined in BB or it is some
// value that dominates BB.
@@ -771,17 +761,17 @@ void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBl
if (InValPhi && InValPhi->getParent() == BB) {
// Add all of the input values of the input PHI as inputs of this phi.
for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i)
- PN->addIncoming(InValPhi->getIncomingValue(i),
- InValPhi->getIncomingBlock(i));
+ PN.addIncoming(InValPhi->getIncomingValue(i),
+ InValPhi->getIncomingBlock(i));
} else {
// Otherwise, add one instance of the dominating value for each edge that
// we will be adding.
if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
- PN->addIncoming(InVal, BBPN->getIncomingBlock(i));
+ PN.addIncoming(InVal, BBPN->getIncomingBlock(i));
} else {
for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
- PN->addIncoming(InVal, *PI);
+ PN.addIncoming(InVal, *PI);
}
}
}
@@ -6497,22 +6487,16 @@ bool CodeGenPrepare::splitBranchCondition(Function &F)
std::swap(TBB, FBB);
// Replace the old BB with the new BB.
- for (auto &I : *TBB) {
- PHINode *PN = dyn_cast<PHINode>(&I);
- if (!PN)
- break;
+ for (PHINode &PN : TBB->phis()) {
int i;
- while ((i = PN->getBasicBlockIndex(&BB)) >= 0)
- PN->setIncomingBlock(i, TmpBB);
+ while ((i = PN.getBasicBlockIndex(&BB)) >= 0)
+ PN.setIncomingBlock(i, TmpBB);
}
// Add another incoming edge form the new BB.
- for (auto &I : *FBB) {
- PHINode *PN = dyn_cast<PHINode>(&I);
- if (!PN)
- break;
- auto *Val = PN->getIncomingValueForBlock(&BB);
- PN->addIncoming(Val, TmpBB);
+ for (PHINode &PN : FBB->phis()) {
+ auto *Val = PN.getIncomingValueForBlock(&BB);
+ PN.addIncoming(Val, TmpBB);
}
// Update the branch weights (from SelectionDAGBuilder::
Modified: vendor/llvm/dist-release_60/lib/CodeGen/GlobalISel/IRTranslator.cpp
==============================================================================
--- vendor/llvm/dist-release_60/lib/CodeGen/GlobalISel/IRTranslator.cpp Sat Jan 6 21:19:52 2018 (r327638)
+++ vendor/llvm/dist-release_60/lib/CodeGen/GlobalISel/IRTranslator.cpp Sat Jan 6 21:34:26 2018 (r327639)
@@ -815,7 +815,14 @@ bool IRTranslator::translateCall(const User &U, Machin
if (CI.isInlineAsm())
return translateInlineAsm(CI, MIRBuilder);
- if (!F || !F->isIntrinsic()) {
+ Intrinsic::ID ID = Intrinsic::not_intrinsic;
+ if (F && F->isIntrinsic()) {
+ ID = F->getIntrinsicID();
+ if (TII && ID == Intrinsic::not_intrinsic)
+ ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
+ }
+
+ if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic) {
unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI);
SmallVector<unsigned, 8> Args;
for (auto &Arg: CI.arg_operands())
@@ -826,10 +833,6 @@ bool IRTranslator::translateCall(const User &U, Machin
return getOrCreateVReg(*CI.getCalledValue());
});
}
-
- Intrinsic::ID ID = F->getIntrinsicID();
- if (TII && ID == Intrinsic::not_intrinsic)
- ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
Modified: vendor/llvm/dist-release_60/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
==============================================================================
--- vendor/llvm/dist-release_60/lib/CodeGen/GlobalISel/LegalizerHelper.cpp Sat Jan 6 21:19:52 2018 (r327638)
+++ vendor/llvm/dist-release_60/lib/CodeGen/GlobalISel/LegalizerHelper.cpp Sat Jan 6 21:34:26 2018 (r327639)
@@ -813,7 +813,21 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned Type
unsigned Zero = MRI.createGenericVirtualRegister(Ty);
MIRBuilder.buildConstant(Zero, 0);
- MIRBuilder.buildICmp(CmpInst::ICMP_NE, Overflow, HiPart, Zero);
+
+ // For *signed* multiply, overflow is detected by checking:
+ // (hi != (lo >> bitwidth-1))
+ if (Opcode == TargetOpcode::G_SMULH) {
+ unsigned Shifted = MRI.createGenericVirtualRegister(Ty);
+ unsigned ShiftAmt = MRI.createGenericVirtualRegister(Ty);
+ MIRBuilder.buildConstant(ShiftAmt, Ty.getSizeInBits() - 1);
+ MIRBuilder.buildInstr(TargetOpcode::G_ASHR)
+ .addDef(Shifted)
+ .addUse(Res)
+ .addUse(ShiftAmt);
+ MIRBuilder.buildICmp(CmpInst::ICMP_NE, Overflow, HiPart, Shifted);
+ } else {
+ MIRBuilder.buildICmp(CmpInst::ICMP_NE, Overflow, HiPart, Zero);
+ }
MI.eraseFromParent();
return Legalized;
}
Modified: vendor/llvm/dist-release_60/lib/CodeGen/LLVMTargetMachine.cpp
==============================================================================
--- vendor/llvm/dist-release_60/lib/CodeGen/LLVMTargetMachine.cpp Sat Jan 6 21:19:52 2018 (r327638)
+++ vendor/llvm/dist-release_60/lib/CodeGen/LLVMTargetMachine.cpp Sat Jan 6 21:34:26 2018 (r327639)
@@ -136,8 +136,7 @@ bool LLVMTargetMachine::addAsmPrinter(PassManagerBase
MCE = getTarget().createMCCodeEmitter(MII, MRI, Context);
MCAsmBackend *MAB =
- getTarget().createMCAsmBackend(MRI, getTargetTriple().str(), TargetCPU,
- Options.MCOptions);
+ getTarget().createMCAsmBackend(STI, MRI, Options.MCOptions);
auto FOut = llvm::make_unique<formatted_raw_ostream>(Out);
MCStreamer *S = getTarget().createAsmStreamer(
Context, std::move(FOut), Options.MCOptions.AsmVerbose,
@@ -151,8 +150,7 @@ bool LLVMTargetMachine::addAsmPrinter(PassManagerBase
// emission fails.
MCCodeEmitter *MCE = getTarget().createMCCodeEmitter(MII, MRI, Context);
MCAsmBackend *MAB =
- getTarget().createMCAsmBackend(MRI, getTargetTriple().str(), TargetCPU,
- Options.MCOptions);
+ getTarget().createMCAsmBackend(STI, MRI, Options.MCOptions);
if (!MCE || !MAB)
return true;
@@ -225,17 +223,16 @@ bool LLVMTargetMachine::addPassesToEmitMC(PassManagerB
// Create the code emitter for the target if it exists. If not, .o file
// emission fails.
+ const MCSubtargetInfo &STI = *getMCSubtargetInfo();
const MCRegisterInfo &MRI = *getMCRegisterInfo();
MCCodeEmitter *MCE =
getTarget().createMCCodeEmitter(*getMCInstrInfo(), MRI, *Ctx);
MCAsmBackend *MAB =
- getTarget().createMCAsmBackend(MRI, getTargetTriple().str(), TargetCPU,
- Options.MCOptions);
+ getTarget().createMCAsmBackend(STI, MRI, Options.MCOptions);
if (!MCE || !MAB)
return true;
const Triple &T = getTargetTriple();
- const MCSubtargetInfo &STI = *getMCSubtargetInfo();
std::unique_ptr<MCStreamer> AsmStreamer(getTarget().createMCObjectStreamer(
T, *Ctx, std::unique_ptr<MCAsmBackend>(MAB), Out,
std::unique_ptr<MCCodeEmitter>(MCE), STI, Options.MCOptions.MCRelaxAll,
Modified: vendor/llvm/dist-release_60/lib/CodeGen/LiveDebugVariables.cpp
==============================================================================
--- vendor/llvm/dist-release_60/lib/CodeGen/LiveDebugVariables.cpp Sat Jan 6 21:19:52 2018 (r327638)
+++ vendor/llvm/dist-release_60/lib/CodeGen/LiveDebugVariables.cpp Sat Jan 6 21:34:26 2018 (r327639)
@@ -242,8 +242,11 @@ class UserValue { (public)
// We are storing a MachineOperand outside a MachineInstr.
locations.back().clearParent();
// Don't store def operands.
- if (locations.back().isReg())
+ if (locations.back().isReg()) {
+ if (locations.back().isDef())
+ locations.back().setIsDead(false);
locations.back().setIsUse();
+ }
return locations.size() - 1;
}
Modified: vendor/llvm/dist-release_60/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
==============================================================================
--- vendor/llvm/dist-release_60/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Sat Jan 6 21:19:52 2018 (r327638)
+++ vendor/llvm/dist-release_60/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Sat Jan 6 21:34:26 2018 (r327639)
@@ -3850,7 +3850,6 @@ bool DAGCombiner::SearchForAndLoads(SDNode *N,
return false;
}
case ISD::ZERO_EXTEND:
- case ISD::ANY_EXTEND:
case ISD::AssertZext: {
unsigned ActiveBits = Mask->getAPIntValue().countTrailingOnes();
EVT ExtVT = EVT::getIntegerVT(*DAG.getContext(), ActiveBits);
@@ -13783,30 +13782,30 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
}
}
- // Deal with elidable overlapping chained stores.
- if (StoreSDNode *ST1 = dyn_cast<StoreSDNode>(Chain))
- if (OptLevel != CodeGenOpt::None && ST->isUnindexed() &&
- ST1->isUnindexed() && !ST1->isVolatile() && ST1->hasOneUse() &&
- !ST1->getBasePtr().isUndef() && !ST->isVolatile()) {
- BaseIndexOffset STBasePtr = BaseIndexOffset::match(ST->getBasePtr(), DAG);
- BaseIndexOffset ST1BasePtr =
- BaseIndexOffset::match(ST1->getBasePtr(), DAG);
- unsigned STBytes = ST->getMemoryVT().getStoreSize();
- unsigned ST1Bytes = ST1->getMemoryVT().getStoreSize();
- int64_t PtrDiff;
- // If this is a store who's preceeding store to a subset of the same
- // memory and no one other node is chained to that store we can
- // effectively drop the store. Do not remove stores to undef as they may
- // be used as data sinks.
+ if (StoreSDNode *ST1 = dyn_cast<StoreSDNode>(Chain)) {
+ if (ST->isUnindexed() && !ST->isVolatile() && ST1->isUnindexed() &&
+ !ST1->isVolatile() && ST1->getBasePtr() == Ptr &&
+ ST->getMemoryVT() == ST1->getMemoryVT()) {
+ // If this is a store followed by a store with the same value to the same
+ // location, then the store is dead/noop.
+ if (ST1->getValue() == Value) {
+ // The store is dead, remove it.
+ return Chain;
+ }
- if (((ST->getBasePtr() == ST1->getBasePtr()) &&
- (ST->getValue() == ST1->getValue())) ||
- (STBasePtr.equalBaseIndex(ST1BasePtr, DAG, PtrDiff) &&
- (0 <= PtrDiff) && (PtrDiff + ST1Bytes <= STBytes))) {
+ // If this is a store who's preceeding store to the same location
+ // and no one other node is chained to that store we can effectively
+ // drop the store. Do not remove stores to undef as they may be used as
+ // data sinks.
+ if (OptLevel != CodeGenOpt::None && ST1->hasOneUse() &&
+ !ST1->getBasePtr().isUndef()) {
+ // ST1 is fully overwritten and can be elided. Combine with it's chain
+ // value.
CombineTo(ST1, ST1->getChain());
- return SDValue(N, 0);
+ return SDValue();
}
}
+ }
// If this is an FP_ROUND or TRUNC followed by a store, fold this into a
// truncating store. We can do this even if this is already a truncstore.
Modified: vendor/llvm/dist-release_60/lib/CodeGen/SelectionDAG/FastISel.cpp
==============================================================================
--- vendor/llvm/dist-release_60/lib/CodeGen/SelectionDAG/FastISel.cpp Sat Jan 6 21:19:52 2018 (r327638)
+++ vendor/llvm/dist-release_60/lib/CodeGen/SelectionDAG/FastISel.cpp Sat Jan 6 21:34:26 2018 (r327639)
@@ -2051,11 +2051,9 @@ bool FastISel::handlePHINodesInSuccessorBlocks(const B
// At this point we know that there is a 1-1 correspondence between LLVM PHI
// nodes and Machine PHI nodes, but the incoming operands have not been
// emitted yet.
- for (BasicBlock::const_iterator I = SuccBB->begin();
- const auto *PN = dyn_cast<PHINode>(I); ++I) {
-
+ for (const PHINode &PN : SuccBB->phis()) {
// Ignore dead phi's.
- if (PN->use_empty())
+ if (PN.use_empty())
continue;
// Only handle legal types. Two interesting things to note here. First,
@@ -2064,7 +2062,7 @@ bool FastISel::handlePHINodesInSuccessorBlocks(const B
// own moves. Second, this check is necessary because FastISel doesn't
// use CreateRegs to create registers, so it always creates
// exactly one register for each non-void instruction.
- EVT VT = TLI.getValueType(DL, PN->getType(), /*AllowUnknown=*/true);
+ EVT VT = TLI.getValueType(DL, PN.getType(), /*AllowUnknown=*/true);
if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
// Handle integer promotions, though, because they're common and easy.
if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) {
@@ -2073,11 +2071,11 @@ bool FastISel::handlePHINodesInSuccessorBlocks(const B
}
}
- const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
+ const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
// Set the DebugLoc for the copy. Prefer the location of the operand
// if there is one; use the location of the PHI otherwise.
- DbgLoc = PN->getDebugLoc();
+ DbgLoc = PN.getDebugLoc();
if (const auto *Inst = dyn_cast<Instruction>(PHIOp))
DbgLoc = Inst->getDebugLoc();
Modified: vendor/llvm/dist-release_60/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
==============================================================================
--- vendor/llvm/dist-release_60/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp Sat Jan 6 21:19:52 2018 (r327638)
+++ vendor/llvm/dist-release_60/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp Sat Jan 6 21:34:26 2018 (r327639)
@@ -257,20 +257,20 @@ void FunctionLoweringInfo::set(const Function &fn, Mac
// Create Machine PHI nodes for LLVM PHI nodes, lowering them as
// appropriate.
- for (BasicBlock::const_iterator I = BB.begin();
- const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
- if (PN->use_empty()) continue;
+ for (const PHINode &PN : BB.phis()) {
+ if (PN.use_empty())
+ continue;
// Skip empty types
- if (PN->getType()->isEmptyTy())
+ if (PN.getType()->isEmptyTy())
*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
More information about the svn-src-all
mailing list