svn commit: r326460 - in vendor/llvm/dist: . docs include/llvm/Analysis include/llvm/CodeGen include/llvm/IR include/llvm/Support lib/AsmParser lib/Bitcode/Reader lib/CodeGen lib/CodeGen/AsmPrinter...
Dimitry Andric
dim at FreeBSD.org
Sat Dec 2 12:46:28 UTC 2017
Author: dim
Date: Sat Dec 2 12:46:23 2017
New Revision: 326460
URL: https://svnweb.freebsd.org/changeset/base/326460
Log:
Vendor import of llvm release_50 branch r319231:
https://llvm.org/svn/llvm-project/llvm/branches/release_50@319231
Added:
vendor/llvm/dist/test/Bitcode/upgrade-section-name.ll
vendor/llvm/dist/test/CodeGen/AArch64/cmp-frameindex.ll
vendor/llvm/dist/test/CodeGen/AMDGPU/hazard.mir
vendor/llvm/dist/test/CodeGen/ARM/no-fpscr-liveness.ll
vendor/llvm/dist/test/CodeGen/ARM/pr32578.ll
vendor/llvm/dist/test/CodeGen/AVR/atomics/load-store-16-unexpected-register-bug.ll
vendor/llvm/dist/test/CodeGen/AVR/branch-relaxation-long.ll
vendor/llvm/dist/test/CodeGen/AVR/clear-bss.ll
vendor/llvm/dist/test/CodeGen/AVR/copy-data-to-ram.ll
vendor/llvm/dist/test/CodeGen/AVR/std-ldd-immediate-overflow.ll
vendor/llvm/dist/test/CodeGen/Mips/dsp-spill-reload.ll
vendor/llvm/dist/test/CodeGen/Mips/msa/emergency-spill.mir
vendor/llvm/dist/test/CodeGen/X86/pr34605.ll
vendor/llvm/dist/test/DebugInfo/Sparc/subreg.ll
vendor/llvm/dist/test/DebugInfo/cross-cu-scope.ll
vendor/llvm/dist/test/LTO/Resolution/X86/function-alias-non-prevailing.ll
vendor/llvm/dist/test/Linker/Inputs/only-needed-compiler-used.ll
vendor/llvm/dist/test/Linker/Inputs/only-needed-ctors.ll
vendor/llvm/dist/test/Linker/Inputs/only-needed-dtors.ll
vendor/llvm/dist/test/Linker/Inputs/only-needed-used.ll
vendor/llvm/dist/test/Linker/only-needed-compiler-used.ll
vendor/llvm/dist/test/Linker/only-needed-ctors1.ll
vendor/llvm/dist/test/Linker/only-needed-ctors2.ll
vendor/llvm/dist/test/Linker/only-needed-dtors1.ll
vendor/llvm/dist/test/Linker/only-needed-dtors2.ll
vendor/llvm/dist/test/Linker/only-needed-used.ll
vendor/llvm/dist/test/MC/Mips/macro-aliases-invalid-wrong-error.s (contents, props changed)
vendor/llvm/dist/test/MC/Mips/macro-aliases.s (contents, props changed)
vendor/llvm/dist/test/MC/Mips/mt/invalid-wrong-error.s (contents, props changed)
vendor/llvm/dist/test/MC/Mips/mt/mftr-mttr-aliases-invalid-wrong-error.s (contents, props changed)
vendor/llvm/dist/test/MC/Mips/mt/mftr-mttr-aliases-invalid.s (contents, props changed)
vendor/llvm/dist/test/MC/Mips/mt/mftr-mttr-aliases.s (contents, props changed)
vendor/llvm/dist/test/MC/Mips/mt/mftr-mttr-reserved-valid.s (contents, props changed)
vendor/llvm/dist/test/MC/Mips/tls-symbols.s (contents, props changed)
vendor/llvm/dist/test/Transforms/SimplifyCFG/gepcost.ll
Deleted:
vendor/llvm/dist/test/CodeGen/AArch64/thread-pointer.ll
Modified:
vendor/llvm/dist/CMakeLists.txt
vendor/llvm/dist/docs/CMake.rst
vendor/llvm/dist/include/llvm/Analysis/TargetTransformInfoImpl.h
vendor/llvm/dist/include/llvm/CodeGen/MachineRegisterInfo.h
vendor/llvm/dist/include/llvm/IR/AutoUpgrade.h
vendor/llvm/dist/include/llvm/Support/FormatVariadic.h
vendor/llvm/dist/lib/AsmParser/LLParser.cpp
vendor/llvm/dist/lib/Bitcode/Reader/BitcodeReader.cpp
vendor/llvm/dist/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
vendor/llvm/dist/lib/CodeGen/AsmPrinter/DwarfDebug.h
vendor/llvm/dist/lib/CodeGen/AsmPrinter/DwarfExpression.cpp
vendor/llvm/dist/lib/CodeGen/LiveIntervalAnalysis.cpp
vendor/llvm/dist/lib/CodeGen/MachineRegisterInfo.cpp
vendor/llvm/dist/lib/CodeGen/MachineVerifier.cpp
vendor/llvm/dist/lib/IR/AutoUpgrade.cpp
vendor/llvm/dist/lib/IR/ConstantFold.cpp
vendor/llvm/dist/lib/Linker/IRMover.cpp
vendor/llvm/dist/lib/Linker/LinkModules.cpp
vendor/llvm/dist/lib/Support/Host.cpp
vendor/llvm/dist/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp
vendor/llvm/dist/lib/Target/AArch64/AArch64ISelLowering.cpp
vendor/llvm/dist/lib/Target/AArch64/AArch64InstrInfo.cpp
vendor/llvm/dist/lib/Target/AArch64/AArch64InstrInfo.td
vendor/llvm/dist/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp
vendor/llvm/dist/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
vendor/llvm/dist/lib/Target/ARM/ARMAsmPrinter.cpp
vendor/llvm/dist/lib/Target/ARM/ARMCallLowering.cpp
vendor/llvm/dist/lib/Target/ARM/ARMExpandPseudoInsts.cpp
vendor/llvm/dist/lib/Target/ARM/ARMFastISel.cpp
vendor/llvm/dist/lib/Target/ARM/ARMFrameLowering.cpp
vendor/llvm/dist/lib/Target/ARM/ARMInstrInfo.td
vendor/llvm/dist/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
vendor/llvm/dist/lib/Target/ARM/ARMSubtarget.h
vendor/llvm/dist/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp
vendor/llvm/dist/lib/Target/AVR/AVRExpandPseudoInsts.cpp
vendor/llvm/dist/lib/Target/AVR/AVRISelLowering.cpp
vendor/llvm/dist/lib/Target/AVR/AVRISelLowering.h
vendor/llvm/dist/lib/Target/AVR/AVRInstrInfo.cpp
vendor/llvm/dist/lib/Target/AVR/AVRInstrInfo.h
vendor/llvm/dist/lib/Target/AVR/AVRInstrInfo.td
vendor/llvm/dist/lib/Target/AVR/AVRRegisterInfo.cpp
vendor/llvm/dist/lib/Target/AVR/AVRTargetMachine.cpp
vendor/llvm/dist/lib/Target/AVR/MCTargetDesc/AVRTargetStreamer.cpp
vendor/llvm/dist/lib/Target/AVR/MCTargetDesc/AVRTargetStreamer.h
vendor/llvm/dist/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
vendor/llvm/dist/lib/Target/Mips/MCTargetDesc/MipsMCExpr.cpp
vendor/llvm/dist/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
vendor/llvm/dist/lib/Target/Mips/MicroMipsDSPInstrInfo.td
vendor/llvm/dist/lib/Target/Mips/MipsDSPInstrInfo.td
vendor/llvm/dist/lib/Target/Mips/MipsFrameLowering.cpp
vendor/llvm/dist/lib/Target/Mips/MipsMTInstrFormats.td
vendor/llvm/dist/lib/Target/Mips/MipsMTInstrInfo.td
vendor/llvm/dist/lib/Target/Mips/MipsSEFrameLowering.cpp
vendor/llvm/dist/lib/Target/Mips/MipsSEInstrInfo.cpp
vendor/llvm/dist/lib/Target/Mips/MipsSchedule.td
vendor/llvm/dist/lib/Target/Mips/MipsScheduleGeneric.td
vendor/llvm/dist/lib/Target/Mips/MipsTargetStreamer.h
vendor/llvm/dist/lib/Target/X86/X86ISelLowering.cpp
vendor/llvm/dist/test/Bitcode/upgrade-module-flag.ll
vendor/llvm/dist/test/CodeGen/AArch64/arm64-memset-inline.ll
vendor/llvm/dist/test/CodeGen/AArch64/falkor-hwpf-fix.mir
vendor/llvm/dist/test/CodeGen/AArch64/fastcc.ll
vendor/llvm/dist/test/CodeGen/AArch64/ldst-opt.ll
vendor/llvm/dist/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll
vendor/llvm/dist/test/CodeGen/ARM/armv4.ll
vendor/llvm/dist/test/CodeGen/ARM/debug-segmented-stacks.ll
vendor/llvm/dist/test/CodeGen/ARM/segmented-stacks-dynamic.ll
vendor/llvm/dist/test/CodeGen/ARM/segmented-stacks.ll
vendor/llvm/dist/test/CodeGen/AVR/atomics/load16.ll
vendor/llvm/dist/test/CodeGen/AVR/call.ll
vendor/llvm/dist/test/CodeGen/AVR/directmem.ll
vendor/llvm/dist/test/CodeGen/AVR/load.ll
vendor/llvm/dist/test/CodeGen/AVR/pseudo/LDWRdPtr-same-src-dst.mir
vendor/llvm/dist/test/CodeGen/AVR/pseudo/LDWRdPtr.mir
vendor/llvm/dist/test/CodeGen/AVR/pseudo/LDWRdPtrPd.mir
vendor/llvm/dist/test/CodeGen/AVR/pseudo/LDWRdPtrPi.mir
vendor/llvm/dist/test/CodeGen/AVR/varargs.ll
vendor/llvm/dist/test/CodeGen/Mips/msa/frameindex.ll
vendor/llvm/dist/test/CodeGen/X86/fp128-cast.ll
vendor/llvm/dist/test/MC/Disassembler/Mips/mt/valid-r2-el.txt
vendor/llvm/dist/test/MC/Disassembler/Mips/mt/valid-r2.txt
vendor/llvm/dist/test/MC/Mips/mt/invalid.s
vendor/llvm/dist/test/MC/Mips/mt/valid.s
vendor/llvm/dist/test/Transforms/InstCombine/gep-vector.ll
vendor/llvm/dist/unittests/Support/FormatVariadicTest.cpp
vendor/llvm/dist/unittests/Support/Host.cpp
vendor/llvm/dist/utils/release/merge-request.sh
Modified: vendor/llvm/dist/CMakeLists.txt
==============================================================================
--- vendor/llvm/dist/CMakeLists.txt Sat Dec 2 07:29:24 2017 (r326459)
+++ vendor/llvm/dist/CMakeLists.txt Sat Dec 2 12:46:23 2017 (r326460)
@@ -26,7 +26,7 @@ if(NOT DEFINED LLVM_VERSION_MINOR)
set(LLVM_VERSION_MINOR 0)
endif()
if(NOT DEFINED LLVM_VERSION_PATCH)
- set(LLVM_VERSION_PATCH 0)
+ set(LLVM_VERSION_PATCH 1)
endif()
if(NOT DEFINED LLVM_VERSION_SUFFIX)
set(LLVM_VERSION_SUFFIX "")
@@ -207,10 +207,6 @@ include(VersionFromVCS)
option(LLVM_APPEND_VC_REV
"Embed the version control system revision id in LLVM" ON)
-
-if( LLVM_APPEND_VC_REV )
- add_version_info_from_vcs(PACKAGE_VERSION)
-endif()
set(PACKAGE_NAME LLVM)
set(PACKAGE_STRING "${PACKAGE_NAME} ${PACKAGE_VERSION}")
Modified: vendor/llvm/dist/docs/CMake.rst
==============================================================================
--- vendor/llvm/dist/docs/CMake.rst Sat Dec 2 07:29:24 2017 (r326459)
+++ vendor/llvm/dist/docs/CMake.rst Sat Dec 2 12:46:23 2017 (r326460)
@@ -248,9 +248,10 @@ LLVM-specific variables
**LLVM_APPEND_VC_REV**:BOOL
Embed version control revision info (svn revision number or Git revision id).
- This is used among other things in the LLVM version string (stored in the
- PACKAGE_VERSION macro). For this to work cmake must be invoked before the
- build. Defaults to ON.
+ The version info is provided by the ``LLVM_REVISION`` macro in
+ ``llvm/include/llvm/Support/VCSRevision.h``. Developers using git who don't
+ need revision info can disable this option to avoid re-linking most binaries
+ after a branch switch. Defaults to ON.
**LLVM_ENABLE_THREADS**:BOOL
Build with threads support, if available. Defaults to ON.
Modified: vendor/llvm/dist/include/llvm/Analysis/TargetTransformInfoImpl.h
==============================================================================
--- vendor/llvm/dist/include/llvm/Analysis/TargetTransformInfoImpl.h Sat Dec 2 07:29:24 2017 (r326459)
+++ vendor/llvm/dist/include/llvm/Analysis/TargetTransformInfoImpl.h Sat Dec 2 12:46:23 2017 (r326460)
@@ -652,6 +652,12 @@ class TargetTransformInfoImplCRTPBase : public TargetT
auto GTI = gep_type_begin(PointeeType, Operands);
Type *TargetType;
+
+ // Handle the case where the GEP instruction has a single operand,
+ // the basis, therefore TargetType is a nullptr.
+ if (Operands.empty())
+ return !BaseGV ? TTI::TCC_Free : TTI::TCC_Basic;
+
for (auto I = Operands.begin(); I != Operands.end(); ++I, ++GTI) {
TargetType = GTI.getIndexedType();
// We assume that the cost of Scalar GEP with constant index and the
Modified: vendor/llvm/dist/include/llvm/CodeGen/MachineRegisterInfo.h
==============================================================================
--- vendor/llvm/dist/include/llvm/CodeGen/MachineRegisterInfo.h Sat Dec 2 07:29:24 2017 (r326459)
+++ vendor/llvm/dist/include/llvm/CodeGen/MachineRegisterInfo.h Sat Dec 2 12:46:23 2017 (r326460)
@@ -807,6 +807,14 @@ class MachineRegisterInfo { (public)
return getReservedRegs().test(PhysReg);
}
+ /// Returns true when the given register unit is considered reserved.
+ ///
+ /// Register units are considered reserved when for at least one of their
+ /// root registers, the root register and all super registers are reserved.
+ /// This currently iterates the register hierarchy and may be slower than
+ /// expected.
+ bool isReservedRegUnit(unsigned Unit) const;
+
/// isAllocatable - Returns true when PhysReg belongs to an allocatable
/// register class and it hasn't been reserved.
///
Modified: vendor/llvm/dist/include/llvm/IR/AutoUpgrade.h
==============================================================================
--- vendor/llvm/dist/include/llvm/IR/AutoUpgrade.h Sat Dec 2 07:29:24 2017 (r326459)
+++ vendor/llvm/dist/include/llvm/IR/AutoUpgrade.h Sat Dec 2 12:46:23 2017 (r326460)
@@ -51,6 +51,8 @@ namespace llvm {
/// module is modified.
bool UpgradeModuleFlags(Module &M);
+ void UpgradeSectionAttributes(Module &M);
+
/// If the given TBAA tag uses the scalar TBAA format, create a new node
/// corresponding to the upgrade to the struct-path aware TBAA format.
/// Otherwise return the \p TBAANode itself.
Modified: vendor/llvm/dist/include/llvm/Support/FormatVariadic.h
==============================================================================
--- vendor/llvm/dist/include/llvm/Support/FormatVariadic.h Sat Dec 2 07:29:24 2017 (r326459)
+++ vendor/llvm/dist/include/llvm/Support/FormatVariadic.h Sat Dec 2 12:46:23 2017 (r326460)
@@ -94,6 +94,15 @@ class formatv_object_base { (public)
Adapters.reserve(ParamCount);
}
+ formatv_object_base(formatv_object_base const &rhs) = delete;
+
+ formatv_object_base(formatv_object_base &&rhs)
+ : Fmt(std::move(rhs.Fmt)),
+ Adapters(), // Adapters are initialized by formatv_object
+ Replacements(std::move(rhs.Replacements)) {
+ Adapters.reserve(rhs.Adapters.size());
+ };
+
void format(raw_ostream &S) const {
for (auto &R : Replacements) {
if (R.Type == ReplacementType::Empty)
@@ -147,6 +156,14 @@ template <typename Tuple> class formatv_object : publi
formatv_object(StringRef Fmt, Tuple &&Params)
: formatv_object_base(Fmt, std::tuple_size<Tuple>::value),
Parameters(std::move(Params)) {
+ Adapters = apply_tuple(create_adapters(), Parameters);
+ }
+
+ formatv_object(formatv_object const &rhs) = delete;
+
+ formatv_object(formatv_object &&rhs)
+ : formatv_object_base(std::move(rhs)),
+ Parameters(std::move(rhs.Parameters)) {
Adapters = apply_tuple(create_adapters(), Parameters);
}
};
Modified: vendor/llvm/dist/lib/AsmParser/LLParser.cpp
==============================================================================
--- vendor/llvm/dist/lib/AsmParser/LLParser.cpp Sat Dec 2 07:29:24 2017 (r326459)
+++ vendor/llvm/dist/lib/AsmParser/LLParser.cpp Sat Dec 2 12:46:23 2017 (r326460)
@@ -240,6 +240,7 @@ bool LLParser::ValidateEndOfModule() {
UpgradeDebugInfo(*M);
UpgradeModuleFlags(*M);
+ UpgradeSectionAttributes(*M);
if (!Slots)
return false;
Modified: vendor/llvm/dist/lib/Bitcode/Reader/BitcodeReader.cpp
==============================================================================
--- vendor/llvm/dist/lib/Bitcode/Reader/BitcodeReader.cpp Sat Dec 2 07:29:24 2017 (r326459)
+++ vendor/llvm/dist/lib/Bitcode/Reader/BitcodeReader.cpp Sat Dec 2 12:46:23 2017 (r326460)
@@ -264,7 +264,7 @@ Expected<bool> hasObjCCategoryInModule(BitstreamCursor
if (convertToString(Record, 0, S))
return error("Invalid record");
// Check for the i386 and other (x86_64, ARM) conventions
- if (S.find("__DATA, __objc_catlist") != std::string::npos ||
+ if (S.find("__DATA,__objc_catlist") != std::string::npos ||
S.find("__OBJC,__category") != std::string::npos)
return true;
break;
Modified: vendor/llvm/dist/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
==============================================================================
--- vendor/llvm/dist/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp Sat Dec 2 07:29:24 2017 (r326459)
+++ vendor/llvm/dist/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp Sat Dec 2 12:46:23 2017 (r326460)
@@ -621,6 +621,7 @@ void DwarfCompileUnit::constructAbstractSubprogramScop
auto *SP = cast<DISubprogram>(Scope->getScopeNode());
DIE *ContextDIE;
+ DwarfCompileUnit *ContextCU = this;
if (includeMinimalInlineScopes())
ContextDIE = &getUnitDie();
@@ -631,18 +632,23 @@ void DwarfCompileUnit::constructAbstractSubprogramScop
else if (auto *SPDecl = SP->getDeclaration()) {
ContextDIE = &getUnitDie();
getOrCreateSubprogramDIE(SPDecl);
- } else
+ } else {
ContextDIE = getOrCreateContextDIE(resolve(SP->getScope()));
+ // The scope may be shared with a subprogram that has already been
+ // constructed in another CU, in which case we need to construct this
+ // subprogram in the same CU.
+ ContextCU = DD->lookupCU(ContextDIE->getUnitDie());
+ }
// Passing null as the associated node because the abstract definition
// shouldn't be found by lookup.
- AbsDef = &createAndAddDIE(dwarf::DW_TAG_subprogram, *ContextDIE, nullptr);
- applySubprogramAttributesToDefinition(SP, *AbsDef);
+ AbsDef = &ContextCU->createAndAddDIE(dwarf::DW_TAG_subprogram, *ContextDIE, nullptr);
+ ContextCU->applySubprogramAttributesToDefinition(SP, *AbsDef);
- if (!includeMinimalInlineScopes())
- addUInt(*AbsDef, dwarf::DW_AT_inline, None, dwarf::DW_INL_inlined);
- if (DIE *ObjectPointer = createAndAddScopeChildren(Scope, *AbsDef))
- addDIEEntry(*AbsDef, dwarf::DW_AT_object_pointer, *ObjectPointer);
+ if (!ContextCU->includeMinimalInlineScopes())
+ ContextCU->addUInt(*AbsDef, dwarf::DW_AT_inline, None, dwarf::DW_INL_inlined);
+ if (DIE *ObjectPointer = ContextCU->createAndAddScopeChildren(Scope, *AbsDef))
+ ContextCU->addDIEEntry(*AbsDef, dwarf::DW_AT_object_pointer, *ObjectPointer);
}
DIE *DwarfCompileUnit::constructImportedEntityDIE(
Modified: vendor/llvm/dist/lib/CodeGen/AsmPrinter/DwarfDebug.h
==============================================================================
--- vendor/llvm/dist/lib/CodeGen/AsmPrinter/DwarfDebug.h Sat Dec 2 07:29:24 2017 (r326459)
+++ vendor/llvm/dist/lib/CodeGen/AsmPrinter/DwarfDebug.h Sat Dec 2 12:46:23 2017 (r326460)
@@ -283,7 +283,7 @@ class DwarfDebug : public DebugHandlerBase {
// 0, referencing the comp_dir of all the type units that use it.
MCDwarfDwoLineTable SplitTypeUnitFileTable;
/// @}
-
+
/// True iff there are multiple CUs in this module.
bool SingleCU;
bool IsDarwin;
@@ -562,6 +562,9 @@ class DwarfDebug : public DebugHandlerBase {
bool isLexicalScopeDIENull(LexicalScope *Scope);
bool hasDwarfPubSections(bool includeMinimalInlineScopes) const;
+
+ /// Find the matching DwarfCompileUnit for the given CU DIE.
+ DwarfCompileUnit *lookupCU(const DIE *Die) { return CUDieMap.lookup(Die); }
};
} // End of namespace llvm
Modified: vendor/llvm/dist/lib/CodeGen/AsmPrinter/DwarfExpression.cpp
==============================================================================
--- vendor/llvm/dist/lib/CodeGen/AsmPrinter/DwarfExpression.cpp Sat Dec 2 07:29:24 2017 (r326459)
+++ vendor/llvm/dist/lib/CodeGen/AsmPrinter/DwarfExpression.cpp Sat Dec 2 12:46:23 2017 (r326460)
@@ -131,13 +131,12 @@ bool DwarfExpression::addMachineReg(const TargetRegist
// Intersection between the bits we already emitted and the bits
// covered by this subregister.
- SmallBitVector Intersection(RegSize, false);
- Intersection.set(Offset, Offset + Size);
- Intersection ^= Coverage;
+ SmallBitVector CurSubReg(RegSize, false);
+ CurSubReg.set(Offset, Offset + Size);
// If this sub-register has a DWARF number and we haven't covered
// its range, emit a DWARF piece for it.
- if (Reg >= 0 && Intersection.any()) {
+ if (Reg >= 0 && CurSubReg.test(Coverage)) {
// Emit a piece for any gap in the coverage.
if (Offset > CurPos)
DwarfRegs.push_back({-1, Offset - CurPos, nullptr});
Modified: vendor/llvm/dist/lib/CodeGen/LiveIntervalAnalysis.cpp
==============================================================================
--- vendor/llvm/dist/lib/CodeGen/LiveIntervalAnalysis.cpp Sat Dec 2 07:29:24 2017 (r326459)
+++ vendor/llvm/dist/lib/CodeGen/LiveIntervalAnalysis.cpp Sat Dec 2 12:46:23 2017 (r326460)
@@ -269,8 +269,9 @@ void LiveIntervals::computeRegUnitRange(LiveRange &LR,
// may share super-registers. That's OK because createDeadDefs() is
// idempotent. It is very rare for a register unit to have multiple roots, so
// uniquing super-registers is probably not worthwhile.
- bool IsReserved = true;
+ bool IsReserved = false;
for (MCRegUnitRootIterator Root(Unit, TRI); Root.isValid(); ++Root) {
+ bool IsRootReserved = true;
for (MCSuperRegIterator Super(*Root, TRI, /*IncludeSelf=*/true);
Super.isValid(); ++Super) {
unsigned Reg = *Super;
@@ -279,9 +280,12 @@ void LiveIntervals::computeRegUnitRange(LiveRange &LR,
// A register unit is considered reserved if all its roots and all their
// super registers are reserved.
if (!MRI->isReserved(Reg))
- IsReserved = false;
+ IsRootReserved = false;
}
+ IsReserved |= IsRootReserved;
}
+ assert(IsReserved == MRI->isReservedRegUnit(Unit) &&
+ "reserved computation mismatch");
// Now extend LR to reach all uses.
// Ignore uses of reserved registers. We only track defs of those.
@@ -924,7 +928,7 @@ class LiveIntervals::HMEditor { (public)
// kill flags. This is wasteful. Eventually, LiveVariables will strip all kill
// flags, and postRA passes will use a live register utility instead.
LiveRange *getRegUnitLI(unsigned Unit) {
- if (UpdateFlags)
+ if (UpdateFlags && !MRI.isReservedRegUnit(Unit))
return &LIS.getRegUnit(Unit);
return LIS.getCachedRegUnit(Unit);
}
Modified: vendor/llvm/dist/lib/CodeGen/MachineRegisterInfo.cpp
==============================================================================
--- vendor/llvm/dist/lib/CodeGen/MachineRegisterInfo.cpp Sat Dec 2 07:29:24 2017 (r326459)
+++ vendor/llvm/dist/lib/CodeGen/MachineRegisterInfo.cpp Sat Dec 2 12:46:23 2017 (r326460)
@@ -601,3 +601,21 @@ void MachineRegisterInfo::setCalleeSavedRegs(ArrayRef<
UpdatedCSRs.push_back(0);
IsUpdatedCSRsInitialized = true;
}
+
+bool MachineRegisterInfo::isReservedRegUnit(unsigned Unit) const {
+ const TargetRegisterInfo *TRI = getTargetRegisterInfo();
+ for (MCRegUnitRootIterator Root(Unit, TRI); Root.isValid(); ++Root) {
+ bool IsRootReserved = true;
+ for (MCSuperRegIterator Super(*Root, TRI, /*IncludeSelf=*/true);
+ Super.isValid(); ++Super) {
+ unsigned Reg = *Super;
+ if (!isReserved(Reg)) {
+ IsRootReserved = false;
+ break;
+ }
+ }
+ if (IsRootReserved)
+ return true;
+ }
+ return false;
+}
Modified: vendor/llvm/dist/lib/CodeGen/MachineVerifier.cpp
==============================================================================
--- vendor/llvm/dist/lib/CodeGen/MachineVerifier.cpp Sat Dec 2 07:29:24 2017 (r326459)
+++ vendor/llvm/dist/lib/CodeGen/MachineVerifier.cpp Sat Dec 2 12:46:23 2017 (r326460)
@@ -1316,6 +1316,8 @@ void MachineVerifier::checkLiveness(const MachineOpera
// Check the cached regunit intervals.
if (TargetRegisterInfo::isPhysicalRegister(Reg) && !isReserved(Reg)) {
for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units) {
+ if (MRI->isReservedRegUnit(*Units))
+ continue;
if (const LiveRange *LR = LiveInts->getCachedRegUnit(*Units))
checkLivenessAtUse(MO, MONum, UseIdx, *LR, *Units);
}
Modified: vendor/llvm/dist/lib/IR/AutoUpgrade.cpp
==============================================================================
--- vendor/llvm/dist/lib/IR/AutoUpgrade.cpp Sat Dec 2 07:29:24 2017 (r326459)
+++ vendor/llvm/dist/lib/IR/AutoUpgrade.cpp Sat Dec 2 12:46:23 2017 (r326460)
@@ -2271,6 +2271,24 @@ bool llvm::UpgradeModuleFlags(Module &M) {
}
}
}
+ // Upgrade Objective-C Image Info Section. Removed the whitespce in the
+ // section name so that llvm-lto will not complain about mismatching
+ // module flags that is functionally the same.
+ if (ID->getString() == "Objective-C Image Info Section") {
+ if (auto *Value = dyn_cast_or_null<MDString>(Op->getOperand(2))) {
+ SmallVector<StringRef, 4> ValueComp;
+ Value->getString().split(ValueComp, " ");
+ if (ValueComp.size() != 1) {
+ std::string NewValue;
+ for (auto &S : ValueComp)
+ NewValue += S.str();
+ Metadata *Ops[3] = {Op->getOperand(0), Op->getOperand(1),
+ MDString::get(M.getContext(), NewValue)};
+ ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops));
+ Changed = true;
+ }
+ }
+ }
}
// "Objective-C Class Properties" is recently added for Objective-C. We
@@ -2285,6 +2303,35 @@ bool llvm::UpgradeModuleFlags(Module &M) {
}
return Changed;
+}
+
+void llvm::UpgradeSectionAttributes(Module &M) {
+ auto TrimSpaces = [](StringRef Section) -> std::string {
+ SmallVector<StringRef, 5> Components;
+ Section.split(Components, ',');
+
+ SmallString<32> Buffer;
+ raw_svector_ostream OS(Buffer);
+
+ for (auto Component : Components)
+ OS << ',' << Component.trim();
+
+ return OS.str().substr(1);
+ };
+
+ for (auto &GV : M.globals()) {
+ if (!GV.hasSection())
+ continue;
+
+ StringRef Section = GV.getSection();
+
+ if (!Section.startswith("__DATA, __objc_catlist"))
+ continue;
+
+ // __DATA, __objc_catlist, regular, no_dead_strip
+ // __DATA,__objc_catlist,regular,no_dead_strip
+ GV.setSection(TrimSpaces(Section));
+ }
}
static bool isOldLoopArgument(Metadata *MD) {
Modified: vendor/llvm/dist/lib/IR/ConstantFold.cpp
==============================================================================
--- vendor/llvm/dist/lib/IR/ConstantFold.cpp Sat Dec 2 07:29:24 2017 (r326459)
+++ vendor/llvm/dist/lib/IR/ConstantFold.cpp Sat Dec 2 12:46:23 2017 (r326460)
@@ -2199,6 +2199,9 @@ Constant *llvm::ConstantFoldGetElementPtr(Type *Pointe
Unknown = true;
continue;
}
+ if (!isa<ConstantInt>(Idxs[i - 1]))
+ // FIXME: add the support of cosntant vector index.
+ continue;
if (InRangeIndex && i == *InRangeIndex + 1) {
// If an index is marked inrange, we cannot apply this canonicalization to
// the following index, as that will cause the inrange index to point to
Modified: vendor/llvm/dist/lib/Linker/IRMover.cpp
==============================================================================
--- vendor/llvm/dist/lib/Linker/IRMover.cpp Sat Dec 2 07:29:24 2017 (r326459)
+++ vendor/llvm/dist/lib/Linker/IRMover.cpp Sat Dec 2 12:46:23 2017 (r326460)
@@ -640,6 +640,10 @@ GlobalValue *IRLinker::copyGlobalValueProto(const Glob
} else {
if (ForDefinition)
NewGV = copyGlobalAliasProto(cast<GlobalAlias>(SGV));
+ else if (SGV->getValueType()->isFunctionTy())
+ NewGV =
+ Function::Create(cast<FunctionType>(TypeMap.get(SGV->getValueType())),
+ GlobalValue::ExternalLinkage, SGV->getName(), &DstM);
else
NewGV = new GlobalVariable(
DstM, TypeMap.get(SGV->getValueType()),
Modified: vendor/llvm/dist/lib/Linker/LinkModules.cpp
==============================================================================
--- vendor/llvm/dist/lib/Linker/LinkModules.cpp Sat Dec 2 07:29:24 2017 (r326459)
+++ vendor/llvm/dist/lib/Linker/LinkModules.cpp Sat Dec 2 12:46:23 2017 (r326460)
@@ -329,8 +329,18 @@ bool ModuleLinker::shouldLinkFromSource(bool &LinkFrom
bool ModuleLinker::linkIfNeeded(GlobalValue &GV) {
GlobalValue *DGV = getLinkedToGlobal(&GV);
- if (shouldLinkOnlyNeeded() && !(DGV && DGV->isDeclaration()))
- return false;
+ if (shouldLinkOnlyNeeded()) {
+ // Always import variables with appending linkage.
+ if (!GV.hasAppendingLinkage()) {
+ // Don't import globals unless they are referenced by the destination
+ // module.
+ if (!DGV)
+ return false;
+ // Don't import globals that are already defined in the destination module
+ if (!DGV->isDeclaration())
+ return false;
+ }
+ }
if (DGV && !GV.hasLocalLinkage() && !GV.hasAppendingLinkage()) {
auto *DGVar = dyn_cast<GlobalVariable>(DGV);
Modified: vendor/llvm/dist/lib/Support/Host.cpp
==============================================================================
--- vendor/llvm/dist/lib/Support/Host.cpp Sat Dec 2 07:29:24 2017 (r326459)
+++ vendor/llvm/dist/lib/Support/Host.cpp Sat Dec 2 12:46:23 2017 (r326460)
@@ -208,6 +208,7 @@ StringRef sys::detail::getHostCPUNameForARM(
.Case("0x06f", "krait") // APQ8064
.Case("0x201", "kryo")
.Case("0x205", "kryo")
+ .Case("0xc00", "falkor")
.Default("generic");
return "generic";
Modified: vendor/llvm/dist/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp
==============================================================================
--- vendor/llvm/dist/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp Sat Dec 2 07:29:24 2017 (r326459)
+++ vendor/llvm/dist/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp Sat Dec 2 12:46:23 2017 (r326460)
@@ -220,27 +220,27 @@ static Optional<LoadInfo> getLoadInfo(const MachineIns
default:
return None;
+ case AArch64::LD1i64:
+ case AArch64::LD2i64:
+ DestRegIdx = 0;
+ BaseRegIdx = 3;
+ OffsetIdx = -1;
+ IsPrePost = false;
+ break;
+
case AArch64::LD1i8:
case AArch64::LD1i16:
case AArch64::LD1i32:
- case AArch64::LD1i64:
case AArch64::LD2i8:
case AArch64::LD2i16:
case AArch64::LD2i32:
- case AArch64::LD2i64:
case AArch64::LD3i8:
case AArch64::LD3i16:
case AArch64::LD3i32:
+ case AArch64::LD3i64:
case AArch64::LD4i8:
case AArch64::LD4i16:
case AArch64::LD4i32:
- DestRegIdx = 0;
- BaseRegIdx = 3;
- OffsetIdx = -1;
- IsPrePost = false;
- break;
-
- case AArch64::LD3i64:
case AArch64::LD4i64:
DestRegIdx = -1;
BaseRegIdx = 3;
@@ -264,23 +264,16 @@ static Optional<LoadInfo> getLoadInfo(const MachineIns
case AArch64::LD1Rv4s:
case AArch64::LD1Rv8h:
case AArch64::LD1Rv16b:
- case AArch64::LD1Twov1d:
- case AArch64::LD1Twov2s:
- case AArch64::LD1Twov4h:
- case AArch64::LD1Twov8b:
- case AArch64::LD2Twov2s:
- case AArch64::LD2Twov4s:
- case AArch64::LD2Twov8b:
- case AArch64::LD2Rv1d:
- case AArch64::LD2Rv2s:
- case AArch64::LD2Rv4s:
- case AArch64::LD2Rv8b:
DestRegIdx = 0;
BaseRegIdx = 1;
OffsetIdx = -1;
IsPrePost = false;
break;
+ case AArch64::LD1Twov1d:
+ case AArch64::LD1Twov2s:
+ case AArch64::LD1Twov4h:
+ case AArch64::LD1Twov8b:
case AArch64::LD1Twov2d:
case AArch64::LD1Twov4s:
case AArch64::LD1Twov8h:
@@ -301,10 +294,17 @@ static Optional<LoadInfo> getLoadInfo(const MachineIns
case AArch64::LD1Fourv4s:
case AArch64::LD1Fourv8h:
case AArch64::LD1Fourv16b:
+ case AArch64::LD2Twov2s:
+ case AArch64::LD2Twov4s:
+ case AArch64::LD2Twov8b:
case AArch64::LD2Twov2d:
case AArch64::LD2Twov4h:
case AArch64::LD2Twov8h:
case AArch64::LD2Twov16b:
+ case AArch64::LD2Rv1d:
+ case AArch64::LD2Rv2s:
+ case AArch64::LD2Rv4s:
+ case AArch64::LD2Rv8b:
case AArch64::LD2Rv2d:
case AArch64::LD2Rv4h:
case AArch64::LD2Rv8h:
@@ -345,32 +345,32 @@ static Optional<LoadInfo> getLoadInfo(const MachineIns
IsPrePost = false;
break;
+ case AArch64::LD1i64_POST:
+ case AArch64::LD2i64_POST:
+ DestRegIdx = 1;
+ BaseRegIdx = 4;
+ OffsetIdx = 5;
+ IsPrePost = true;
+ break;
+
case AArch64::LD1i8_POST:
case AArch64::LD1i16_POST:
case AArch64::LD1i32_POST:
- case AArch64::LD1i64_POST:
case AArch64::LD2i8_POST:
case AArch64::LD2i16_POST:
case AArch64::LD2i32_POST:
- case AArch64::LD2i64_POST:
case AArch64::LD3i8_POST:
case AArch64::LD3i16_POST:
case AArch64::LD3i32_POST:
+ case AArch64::LD3i64_POST:
case AArch64::LD4i8_POST:
case AArch64::LD4i16_POST:
case AArch64::LD4i32_POST:
- DestRegIdx = 1;
- BaseRegIdx = 4;
- OffsetIdx = 5;
- IsPrePost = false;
- break;
-
- case AArch64::LD3i64_POST:
case AArch64::LD4i64_POST:
DestRegIdx = -1;
BaseRegIdx = 4;
OffsetIdx = 5;
- IsPrePost = false;
+ IsPrePost = true;
break;
case AArch64::LD1Onev1d_POST:
@@ -389,23 +389,16 @@ static Optional<LoadInfo> getLoadInfo(const MachineIns
case AArch64::LD1Rv4s_POST:
case AArch64::LD1Rv8h_POST:
case AArch64::LD1Rv16b_POST:
- case AArch64::LD1Twov1d_POST:
- case AArch64::LD1Twov2s_POST:
- case AArch64::LD1Twov4h_POST:
- case AArch64::LD1Twov8b_POST:
- case AArch64::LD2Twov2s_POST:
- case AArch64::LD2Twov4s_POST:
- case AArch64::LD2Twov8b_POST:
- case AArch64::LD2Rv1d_POST:
- case AArch64::LD2Rv2s_POST:
- case AArch64::LD2Rv4s_POST:
- case AArch64::LD2Rv8b_POST:
DestRegIdx = 1;
BaseRegIdx = 2;
OffsetIdx = 3;
- IsPrePost = false;
+ IsPrePost = true;
break;
+ case AArch64::LD1Twov1d_POST:
+ case AArch64::LD1Twov2s_POST:
+ case AArch64::LD1Twov4h_POST:
+ case AArch64::LD1Twov8b_POST:
case AArch64::LD1Twov2d_POST:
case AArch64::LD1Twov4s_POST:
case AArch64::LD1Twov8h_POST:
@@ -426,10 +419,17 @@ static Optional<LoadInfo> getLoadInfo(const MachineIns
case AArch64::LD1Fourv4s_POST:
case AArch64::LD1Fourv8h_POST:
case AArch64::LD1Fourv16b_POST:
+ case AArch64::LD2Twov2s_POST:
+ case AArch64::LD2Twov4s_POST:
+ case AArch64::LD2Twov8b_POST:
case AArch64::LD2Twov2d_POST:
case AArch64::LD2Twov4h_POST:
case AArch64::LD2Twov8h_POST:
case AArch64::LD2Twov16b_POST:
+ case AArch64::LD2Rv1d_POST:
+ case AArch64::LD2Rv2s_POST:
+ case AArch64::LD2Rv4s_POST:
+ case AArch64::LD2Rv8b_POST:
case AArch64::LD2Rv2d_POST:
case AArch64::LD2Rv4h_POST:
case AArch64::LD2Rv8h_POST:
@@ -467,7 +467,7 @@ static Optional<LoadInfo> getLoadInfo(const MachineIns
DestRegIdx = -1;
BaseRegIdx = 2;
OffsetIdx = 3;
- IsPrePost = false;
+ IsPrePost = true;
break;
case AArch64::LDRBBroW:
@@ -572,8 +572,12 @@ static Optional<LoadInfo> getLoadInfo(const MachineIns
IsPrePost = true;
break;
- case AArch64::LDPDi:
+ case AArch64::LDNPDi:
+ case AArch64::LDNPQi:
+ case AArch64::LDNPSi:
case AArch64::LDPQi:
+ case AArch64::LDPDi:
+ case AArch64::LDPSi:
DestRegIdx = -1;
BaseRegIdx = 2;
OffsetIdx = 3;
@@ -581,7 +585,6 @@ static Optional<LoadInfo> getLoadInfo(const MachineIns
break;
case AArch64::LDPSWi:
- case AArch64::LDPSi:
case AArch64::LDPWi:
case AArch64::LDPXi:
DestRegIdx = 0;
@@ -592,18 +595,18 @@ static Optional<LoadInfo> getLoadInfo(const MachineIns
case AArch64::LDPQpost:
case AArch64::LDPQpre:
+ case AArch64::LDPDpost:
+ case AArch64::LDPDpre:
+ case AArch64::LDPSpost:
+ case AArch64::LDPSpre:
DestRegIdx = -1;
BaseRegIdx = 3;
OffsetIdx = 4;
IsPrePost = true;
break;
- case AArch64::LDPDpost:
- case AArch64::LDPDpre:
case AArch64::LDPSWpost:
case AArch64::LDPSWpre:
- case AArch64::LDPSpost:
- case AArch64::LDPSpre:
case AArch64::LDPWpost:
case AArch64::LDPWpre:
case AArch64::LDPXpost:
@@ -687,9 +690,14 @@ void FalkorHWPFFix::runOnLoop(MachineLoop &L, MachineF
if (!TII->isStridedAccess(MI))
continue;
- LoadInfo LdI = *getLoadInfo(MI);
- unsigned OldTag = *getTag(TRI, MI, LdI);
- auto &OldCollisions = TagMap[OldTag];
+ Optional<LoadInfo> OptLdI = getLoadInfo(MI);
+ if (!OptLdI)
+ continue;
+ LoadInfo LdI = *OptLdI;
+ Optional<unsigned> OptOldTag = getTag(TRI, MI, LdI);
+ if (!OptOldTag)
+ continue;
+ auto &OldCollisions = TagMap[*OptOldTag];
if (OldCollisions.size() <= 1)
continue;
Modified: vendor/llvm/dist/lib/Target/AArch64/AArch64ISelLowering.cpp
==============================================================================
--- vendor/llvm/dist/lib/Target/AArch64/AArch64ISelLowering.cpp Sat Dec 2 07:29:24 2017 (r326459)
+++ vendor/llvm/dist/lib/Target/AArch64/AArch64ISelLowering.cpp Sat Dec 2 12:46:23 2017 (r326460)
@@ -9347,11 +9347,20 @@ static SDValue replaceZeroVectorStore(SelectionDAG &DA
return SDValue();
}
- // Use WZR/XZR here to prevent DAGCombiner::MergeConsecutiveStores from
- // undoing this transformation.
- SDValue SplatVal = VT.getVectorElementType().getSizeInBits() == 32
- ? DAG.getRegister(AArch64::WZR, MVT::i32)
- : DAG.getRegister(AArch64::XZR, MVT::i64);
+ // Use a CopyFromReg WZR/XZR here to prevent
+ // DAGCombiner::MergeConsecutiveStores from undoing this transformation.
+ SDLoc DL(&St);
+ unsigned ZeroReg;
+ EVT ZeroVT;
+ if (VT.getVectorElementType().getSizeInBits() == 32) {
+ ZeroReg = AArch64::WZR;
+ ZeroVT = MVT::i32;
+ } else {
+ ZeroReg = AArch64::XZR;
+ ZeroVT = MVT::i64;
+ }
+ SDValue SplatVal =
+ DAG.getCopyFromReg(DAG.getEntryNode(), DL, ZeroReg, ZeroVT);
return splitStoreSplat(DAG, St, SplatVal, NumVecElts);
}
Modified: vendor/llvm/dist/lib/Target/AArch64/AArch64InstrInfo.cpp
==============================================================================
--- vendor/llvm/dist/lib/Target/AArch64/AArch64InstrInfo.cpp Sat Dec 2 07:29:24 2017 (r326459)
+++ vendor/llvm/dist/lib/Target/AArch64/AArch64InstrInfo.cpp Sat Dec 2 12:46:23 2017 (r326460)
@@ -940,6 +940,12 @@ bool AArch64InstrInfo::areMemAccessesTriviallyDisjoint
bool AArch64InstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
unsigned &SrcReg2, int &CmpMask,
int &CmpValue) const {
+ // The first operand can be a frame index where we'd normally expect a
+ // register.
+ assert(MI.getNumOperands() >= 2 && "All AArch64 cmps should have 2 operands");
+ if (!MI.getOperand(1).isReg())
+ return false;
+
switch (MI.getOpcode()) {
default:
break;
Modified: vendor/llvm/dist/lib/Target/AArch64/AArch64InstrInfo.td
==============================================================================
--- vendor/llvm/dist/lib/Target/AArch64/AArch64InstrInfo.td Sat Dec 2 07:29:24 2017 (r326459)
+++ vendor/llvm/dist/lib/Target/AArch64/AArch64InstrInfo.td Sat Dec 2 12:46:23 2017 (r326460)
@@ -441,8 +441,7 @@ def MSRpstateImm1 : MSRpstateImm0_1;
def MSRpstateImm4 : MSRpstateImm0_15;
// The thread pointer (on Linux, at least, where this has been implemented) is
-// TPIDR_EL0. Add pseudo op so we can mark it as not having any side effects.
-let hasSideEffects = 0 in
+// TPIDR_EL0.
def MOVbaseTLS : Pseudo<(outs GPR64:$dst), (ins),
[(set GPR64:$dst, AArch64threadpointer)]>, Sched<[WriteSys]>;
Modified: vendor/llvm/dist/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp
==============================================================================
--- vendor/llvm/dist/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp Sat Dec 2 07:29:24 2017 (r326459)
+++ vendor/llvm/dist/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp Sat Dec 2 12:46:23 2017 (r326460)
@@ -167,6 +167,9 @@ AArch64RedundantCopyElimination::knownRegValInBlock(
// CMP is an alias for SUBS with a dead destination register.
case AArch64::SUBSWri:
case AArch64::SUBSXri: {
+ // Sometimes the first operand is a FrameIndex. Bail if tht happens.
+ if (!PredI.getOperand(1).isReg())
+ return None;
MCPhysReg SrcReg = PredI.getOperand(1).getReg();
// Must not be a symbolic immediate.
Modified: vendor/llvm/dist/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
==============================================================================
--- vendor/llvm/dist/lib/Target/AMDGPU/GCNHazardRecognizer.cpp Sat Dec 2 07:29:24 2017 (r326459)
+++ vendor/llvm/dist/lib/Target/AMDGPU/GCNHazardRecognizer.cpp Sat Dec 2 12:46:23 2017 (r326460)
@@ -218,12 +218,17 @@ void GCNHazardRecognizer::RecedeCycle() {
int GCNHazardRecognizer::getWaitStatesSince(
function_ref<bool(MachineInstr *)> IsHazard) {
- int WaitStates = -1;
+ int WaitStates = 0;
for (MachineInstr *MI : EmittedInstrs) {
+ if (MI) {
+ if (IsHazard(MI))
+ return WaitStates;
+
+ unsigned Opcode = MI->getOpcode();
+ if (Opcode == AMDGPU::DBG_VALUE || Opcode == AMDGPU::IMPLICIT_DEF)
+ continue;
+ }
++WaitStates;
- if (!MI || !IsHazard(MI))
- continue;
- return WaitStates;
}
return std::numeric_limits<int>::max();
}
Modified: vendor/llvm/dist/lib/Target/ARM/ARMAsmPrinter.cpp
==============================================================================
--- vendor/llvm/dist/lib/Target/ARM/ARMAsmPrinter.cpp Sat Dec 2 07:29:24 2017 (r326459)
+++ vendor/llvm/dist/lib/Target/ARM/ARMAsmPrinter.cpp Sat Dec 2 12:46:23 2017 (r326460)
@@ -1276,6 +1276,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr
// Add 's' bit operand (always reg0 for this)
.addReg(0));
+ assert(Subtarget->hasV4TOps());
EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::BX)
.addReg(MI->getOperand(0).getReg()));
return;
@@ -1896,6 +1897,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr
.addImm(ARMCC::AL)
.addReg(0));
+ assert(Subtarget->hasV4TOps());
EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::BX)
.addReg(ScratchReg)
// Predicate.
Modified: vendor/llvm/dist/lib/Target/ARM/ARMCallLowering.cpp
==============================================================================
--- vendor/llvm/dist/lib/Target/ARM/ARMCallLowering.cpp Sat Dec 2 07:29:24 2017 (r326459)
+++ vendor/llvm/dist/lib/Target/ARM/ARMCallLowering.cpp Sat Dec 2 12:46:23 2017 (r326460)
@@ -251,7 +251,9 @@ bool ARMCallLowering::lowerReturn(MachineIRBuilder &MI
const Value *Val, unsigned VReg) const {
assert(!Val == !VReg && "Return value without a vreg");
- auto Ret = MIRBuilder.buildInstrNoInsert(ARM::BX_RET).add(predOps(ARMCC::AL));
+ auto const &ST = MIRBuilder.getMF().getSubtarget<ARMSubtarget>();
+ unsigned Opcode = ST.getReturnOpcode();
+ auto Ret = MIRBuilder.buildInstrNoInsert(Opcode).add(predOps(ARMCC::AL));
if (!lowerReturnVal(MIRBuilder, Val, VReg, Ret))
return false;
Modified: vendor/llvm/dist/lib/Target/ARM/ARMExpandPseudoInsts.cpp
==============================================================================
--- vendor/llvm/dist/lib/Target/ARM/ARMExpandPseudoInsts.cpp Sat Dec 2 07:29:24 2017 (r326459)
+++ vendor/llvm/dist/lib/Target/ARM/ARMExpandPseudoInsts.cpp Sat Dec 2 12:46:23 2017 (r326460)
@@ -1030,8 +1030,11 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
if (STI->isThumb())
MIB.add(predOps(ARMCC::AL));
} else if (RetOpcode == ARM::TCRETURNri) {
+ unsigned Opcode =
+ STI->isThumb() ? ARM::tTAILJMPr
+ : (STI->hasV4TOps() ? ARM::TAILJMPr : ARM::TAILJMPr4);
BuildMI(MBB, MBBI, dl,
- TII.get(STI->isThumb() ? ARM::tTAILJMPr : ARM::TAILJMPr))
+ TII.get(Opcode))
.addReg(JumpTarget.getReg(), RegState::Kill);
}
Modified: vendor/llvm/dist/lib/Target/ARM/ARMFastISel.cpp
==============================================================================
--- vendor/llvm/dist/lib/Target/ARM/ARMFastISel.cpp Sat Dec 2 07:29:24 2017 (r326459)
+++ vendor/llvm/dist/lib/Target/ARM/ARMFastISel.cpp Sat Dec 2 12:46:23 2017 (r326460)
@@ -1332,6 +1332,8 @@ bool ARMFastISel::SelectIndirectBr(const Instruction *
if (AddrReg == 0) return false;
unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX;
+ assert(isThumb2 || Subtarget->hasV4TOps());
+
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc)).addReg(AddrReg));
@@ -2168,9 +2170,8 @@ bool ARMFastISel::SelectRet(const Instruction *I) {
RetRegs.push_back(VA.getLocReg());
}
- unsigned RetOpc = isThumb2 ? ARM::tBX_RET : ARM::BX_RET;
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
- TII.get(RetOpc));
+ TII.get(Subtarget->getReturnOpcode()));
AddOptionalDefs(MIB);
for (unsigned R : RetRegs)
MIB.addReg(R, RegState::Implicit);
Modified: vendor/llvm/dist/lib/Target/ARM/ARMFrameLowering.cpp
==============================================================================
--- vendor/llvm/dist/lib/Target/ARM/ARMFrameLowering.cpp Sat Dec 2 07:29:24 2017 (r326459)
+++ vendor/llvm/dist/lib/Target/ARM/ARMFrameLowering.cpp Sat Dec 2 12:46:23 2017 (r326460)
@@ -479,7 +479,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &M
if (DPRCSSize > 0) {
// Since vpush register list cannot have gaps, there may be multiple vpush
// instructions in the prologue.
- while (MBBI->getOpcode() == ARM::VSTMDDB_UPD) {
+ while (MBBI != MBB.end() && MBBI->getOpcode() == ARM::VSTMDDB_UPD) {
DefCFAOffsetCandidates.addInst(MBBI, sizeOfSPAdjustment(*MBBI));
LastPush = MBBI++;
}
@@ -2397,9 +2397,8 @@ void ARMFrameLowering::adjustForSegmentedStacks(
BuildMI(AllocMBB, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
.addCFIIndex(CFIIndex);
- // bx lr - Return from this function.
- Opcode = Thumb ? ARM::tBX_RET : ARM::BX_RET;
- BuildMI(AllocMBB, DL, TII.get(Opcode)).add(predOps(ARMCC::AL));
+ // Return from this function.
+ BuildMI(AllocMBB, DL, TII.get(ST->getReturnOpcode())).add(predOps(ARMCC::AL));
// Restore SR0 and SR1 in case of __morestack() was not called.
// pop {SR0, SR1}
Modified: vendor/llvm/dist/lib/Target/ARM/ARMInstrInfo.td
==============================================================================
--- vendor/llvm/dist/lib/Target/ARM/ARMInstrInfo.td Sat Dec 2 07:29:24 2017 (r326459)
+++ vendor/llvm/dist/lib/Target/ARM/ARMInstrInfo.td Sat Dec 2 12:46:23 2017 (r326460)
@@ -2425,7 +2425,7 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarr
def TAILJMPr : ARMPseudoExpand<(outs), (ins tcGPR:$dst),
4, IIC_Br, [],
(BX GPR:$dst)>, Sched<[WriteBr]>,
- Requires<[IsARM]>;
+ Requires<[IsARM, HasV4T]>;
}
// Secure Monitor Call is a system instruction.
@@ -5586,6 +5586,12 @@ def Int_eh_sjlj_dispatchsetup : PseudoInst<(outs), (in
let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in
def MOVPCRX : ARMPseudoExpand<(outs), (ins GPR:$dst),
4, IIC_Br, [(brind GPR:$dst)],
+ (MOVr PC, GPR:$dst, (ops 14, zero_reg), zero_reg)>,
+ Requires<[IsARM, NoV4T]>, Sched<[WriteBr]>;
+
+let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [SP] in
+ def TAILJMPr4 : ARMPseudoExpand<(outs), (ins GPR:$dst),
+ 4, IIC_Br, [],
(MOVr PC, GPR:$dst, (ops 14, zero_reg), zero_reg)>,
Requires<[IsARM, NoV4T]>, Sched<[WriteBr]>;
Modified: vendor/llvm/dist/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
==============================================================================
--- vendor/llvm/dist/lib/Target/ARM/ARMLoadStoreOptimizer.cpp Sat Dec 2 07:29:24 2017 (r326459)
+++ vendor/llvm/dist/lib/Target/ARM/ARMLoadStoreOptimizer.cpp Sat Dec 2 12:46:23 2017 (r326460)
@@ -1909,6 +1909,7 @@ bool ARMLoadStoreOpt::CombineMovBx(MachineBasicBlock &
for (auto Use : Prev->uses())
if (Use.isKill()) {
+ assert(STI->hasV4TOps());
BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(ARM::tBX))
.addReg(Use.getReg(), RegState::Kill)
.add(predOps(ARMCC::AL))
Modified: vendor/llvm/dist/lib/Target/ARM/ARMSubtarget.h
==============================================================================
--- vendor/llvm/dist/lib/Target/ARM/ARMSubtarget.h Sat Dec 2 07:29:24 2017 (r326459)
+++ vendor/llvm/dist/lib/Target/ARM/ARMSubtarget.h Sat Dec 2 12:46:23 2017 (r326460)
@@ -729,6 +729,17 @@ class ARMSubtarget : public ARMGenSubtargetInfo { (pub
/// True if fast-isel is used.
bool useFastISel() const;
+
+ /// Returns the correct return opcode for the current feature set.
+ /// Use BX if available to allow mixing thumb/arm code, but fall back
+ /// to plain mov pc,lr on ARMv4.
+ unsigned getReturnOpcode() const {
+ if (isThumb())
+ return ARM::tBX_RET;
+ if (hasV4TOps())
+ return ARM::BX_RET;
+ return ARM::MOVPCLR;
+ }
};
} // end namespace llvm
Modified: vendor/llvm/dist/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp
==============================================================================
--- vendor/llvm/dist/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp Sat Dec 2 07:29:24 2017 (r326459)
+++ vendor/llvm/dist/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp Sat Dec 2 12:46:23 2017 (r326460)
@@ -142,9 +142,9 @@ std::string ARM_MC::ParseARMTriple(const Triple &TT, S
if (isThumb) {
if (ARMArchFeature.empty())
- ARMArchFeature = "+thumb-mode";
+ ARMArchFeature = "+thumb-mode,+v4t";
else
- ARMArchFeature += ",+thumb-mode";
+ ARMArchFeature += ",+thumb-mode,+v4t";
}
if (TT.isOSNaCl()) {
Modified: vendor/llvm/dist/lib/Target/AVR/AVRExpandPseudoInsts.cpp
==============================================================================
--- vendor/llvm/dist/lib/Target/AVR/AVRExpandPseudoInsts.cpp Sat Dec 2 07:29:24 2017 (r326459)
+++ vendor/llvm/dist/lib/Target/AVR/AVRExpandPseudoInsts.cpp Sat Dec 2 12:46:23 2017 (r326460)
@@ -583,8 +583,8 @@ bool AVRExpandPseudo::expand<AVR::LDWRdPtr>(Block &MBB
unsigned TmpReg = 0; // 0 for no temporary register
unsigned SrcReg = MI.getOperand(1).getReg();
bool SrcIsKill = MI.getOperand(1).isKill();
- OpLo = AVR::LDRdPtr;
- OpHi = AVR::LDDRdPtrQ;
+ OpLo = AVR::LDRdPtrPi;
+ OpHi = AVR::LDRdPtr;
TRI->splitReg(DstReg, DstLoReg, DstHiReg);
// Use a temporary register if src and dst registers are the same.
@@ -597,6 +597,7 @@ bool AVRExpandPseudo::expand<AVR::LDWRdPtr>(Block &MBB
// Load low byte.
auto MIBLO = buildMI(MBB, MBBI, OpLo)
.addReg(CurDstLoReg, RegState::Define)
+ .addReg(SrcReg, RegState::Define)
.addReg(SrcReg);
// Push low byte onto stack if necessary.
@@ -606,8 +607,7 @@ bool AVRExpandPseudo::expand<AVR::LDWRdPtr>(Block &MBB
// Load high byte.
auto MIBHI = buildMI(MBB, MBBI, OpHi)
.addReg(CurDstHiReg, RegState::Define)
- .addReg(SrcReg, getKillRegState(SrcIsKill))
- .addImm(1);
+ .addReg(SrcReg, getKillRegState(SrcIsKill));
if (TmpReg) {
// Move the high byte into the final destination.
@@ -699,7 +699,9 @@ bool AVRExpandPseudo::expand<AVR::LDDWRdPtrQ>(Block &M
OpHi = AVR::LDDRdPtrQ;
TRI->splitReg(DstReg, DstLoReg, DstHiReg);
- assert(Imm <= 63 && "Offset is out of range");
+ // Since we add 1 to the Imm value for the high byte below, and 63 is the highest Imm value
+ // allowed for the instruction, 62 is the limit here.
+ assert(Imm <= 62 && "Offset is out of range");
// Use a temporary register if src and dst registers are the same.
if (DstReg == SrcReg)
@@ -741,7 +743,50 @@ bool AVRExpandPseudo::expand<AVR::LDDWRdPtrQ>(Block &M
template <>
bool AVRExpandPseudo::expand<AVR::LPMWRdZ>(Block &MBB, BlockIt MBBI) {
- llvm_unreachable("wide LPM is unimplemented");
+ MachineInstr &MI = *MBBI;
+ unsigned OpLo, OpHi, DstLoReg, DstHiReg;
+ unsigned DstReg = MI.getOperand(0).getReg();
+ unsigned TmpReg = 0; // 0 for no temporary register
+ unsigned SrcReg = MI.getOperand(1).getReg();
+ bool SrcIsKill = MI.getOperand(1).isKill();
+ OpLo = AVR::LPMRdZPi;
+ OpHi = AVR::LPMRdZ;
+ TRI->splitReg(DstReg, DstLoReg, DstHiReg);
+
+ // Use a temporary register if src and dst registers are the same.
+ if (DstReg == SrcReg)
+ TmpReg = scavengeGPR8(MI);
+
+ unsigned CurDstLoReg = (DstReg == SrcReg) ? TmpReg : DstLoReg;
+ unsigned CurDstHiReg = (DstReg == SrcReg) ? TmpReg : DstHiReg;
+
+ // Load low byte.
+ auto MIBLO = buildMI(MBB, MBBI, OpLo)
+ .addReg(CurDstLoReg, RegState::Define)
+ .addReg(SrcReg);
+
+ // Push low byte onto stack if necessary.
+ if (TmpReg)
+ buildMI(MBB, MBBI, AVR::PUSHRr).addReg(TmpReg);
+
+ // Load high byte.
+ auto MIBHI = buildMI(MBB, MBBI, OpHi)
+ .addReg(CurDstHiReg, RegState::Define)
+ .addReg(SrcReg, getKillRegState(SrcIsKill));
+
+ if (TmpReg) {
+ // Move the high byte into the final destination.
+ buildMI(MBB, MBBI, AVR::MOVRdRr).addReg(DstHiReg).addReg(TmpReg);
+
+ // Move the low byte from the scratch space into the final destination.
+ buildMI(MBB, MBBI, AVR::POPRd).addReg(DstLoReg);
+ }
+
+ MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+
+ MI.eraseFromParent();
+ return true;
}
template <>
@@ -1074,7 +1119,9 @@ bool AVRExpandPseudo::expand<AVR::STDWPtrQRr>(Block &M
*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
More information about the svn-src-all
mailing list