svn commit: r343794 - in vendor/llvm/dist-release_80: cmake/modules docs include/llvm/Support include/llvm/Transforms/Utils lib/CodeGen lib/CodeGen/AsmPrinter lib/CodeGen/SelectionDAG lib/DebugInfo...
Dimitry Andric
dim at FreeBSD.org
Tue Feb 5 18:39:05 UTC 2019
Author: dim
Date: Tue Feb 5 18:38:58 2019
New Revision: 343794
URL: https://svnweb.freebsd.org/changeset/base/343794
Log:
Vendor import of llvm release_80 branch r353167:
https://llvm.org/svn/llvm-project/llvm/branches/release_80@353167
Added:
vendor/llvm/dist-release_80/test/CodeGen/AArch64/build-vector-extract.ll
vendor/llvm/dist-release_80/test/CodeGen/AArch64/eh_recoverfp.ll
vendor/llvm/dist-release_80/test/CodeGen/Mips/reloc-jalr.ll
vendor/llvm/dist-release_80/test/DebugInfo/COFF/types-empty-member-fn.ll
vendor/llvm/dist-release_80/test/Transforms/FunctionImport/Inputs/comdat.ll
vendor/llvm/dist-release_80/test/Transforms/FunctionImport/comdat.ll
vendor/llvm/dist-release_80/test/Transforms/LoopTransformWarning/enable_and_isvectorized.ll
vendor/llvm/dist-release_80/test/Transforms/LoopVectorize/no_switch_disable_vectorization.ll
Modified:
vendor/llvm/dist-release_80/cmake/modules/AddLLVM.cmake
vendor/llvm/dist-release_80/docs/ReleaseNotes.rst
vendor/llvm/dist-release_80/include/llvm/Support/JSON.h
vendor/llvm/dist-release_80/include/llvm/Transforms/Utils/FunctionImportUtils.h
vendor/llvm/dist-release_80/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
vendor/llvm/dist-release_80/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
vendor/llvm/dist-release_80/lib/CodeGen/MachineInstr.cpp
vendor/llvm/dist-release_80/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
vendor/llvm/dist-release_80/lib/DebugInfo/DWARF/DWARFDebugLoc.cpp
vendor/llvm/dist-release_80/lib/IR/AutoUpgrade.cpp
vendor/llvm/dist-release_80/lib/Support/JSON.cpp
vendor/llvm/dist-release_80/lib/Target/AArch64/AArch64SpeculationHardening.cpp
vendor/llvm/dist-release_80/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
vendor/llvm/dist-release_80/lib/Target/Mips/MCTargetDesc/MipsABIInfo.cpp
vendor/llvm/dist-release_80/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h
vendor/llvm/dist-release_80/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp
vendor/llvm/dist-release_80/lib/Target/Mips/MCTargetDesc/MipsMCExpr.cpp
vendor/llvm/dist-release_80/lib/Target/Mips/MicroMips32r6InstrInfo.td
vendor/llvm/dist-release_80/lib/Target/Mips/MicroMipsInstrInfo.td
vendor/llvm/dist-release_80/lib/Target/Mips/Mips32r6InstrInfo.td
vendor/llvm/dist-release_80/lib/Target/Mips/MipsAsmPrinter.cpp
vendor/llvm/dist-release_80/lib/Target/Mips/MipsFastISel.cpp
vendor/llvm/dist-release_80/lib/Target/Mips/MipsISelLowering.cpp
vendor/llvm/dist-release_80/lib/Target/Mips/MipsISelLowering.h
vendor/llvm/dist-release_80/lib/Target/Mips/MipsInstrInfo.cpp
vendor/llvm/dist-release_80/lib/Target/Mips/MipsInstrInfo.td
vendor/llvm/dist-release_80/lib/Target/Mips/MipsMCInstLower.cpp
vendor/llvm/dist-release_80/lib/Target/X86/X86DiscriminateMemOps.cpp
vendor/llvm/dist-release_80/lib/Target/X86/X86InsertPrefetch.cpp
vendor/llvm/dist-release_80/lib/Transforms/Utils/FunctionImportUtils.cpp
vendor/llvm/dist-release_80/lib/Transforms/Utils/LoopUtils.cpp
vendor/llvm/dist-release_80/test/CodeGen/AArch64/speculation-hardening-loads.ll
vendor/llvm/dist-release_80/test/CodeGen/AArch64/speculation-hardening.ll
vendor/llvm/dist-release_80/test/CodeGen/AArch64/speculation-hardening.mir
vendor/llvm/dist-release_80/test/CodeGen/Mips/cconv/vector.ll
vendor/llvm/dist-release_80/test/CodeGen/Mips/gprestore.ll
vendor/llvm/dist-release_80/test/CodeGen/Mips/llvm-ir/sdiv.ll
vendor/llvm/dist-release_80/test/CodeGen/Mips/llvm-ir/srem.ll
vendor/llvm/dist-release_80/test/CodeGen/Mips/llvm-ir/udiv.ll
vendor/llvm/dist-release_80/test/CodeGen/Mips/llvm-ir/urem.ll
vendor/llvm/dist-release_80/test/CodeGen/Mips/long-call-attr.ll
vendor/llvm/dist-release_80/test/CodeGen/Mips/long-call-mcount.ll
vendor/llvm/dist-release_80/test/CodeGen/Mips/msa/f16-llvm-ir.ll
vendor/llvm/dist-release_80/test/CodeGen/Mips/o32_cc_byval.ll
vendor/llvm/dist-release_80/test/CodeGen/Mips/shrink-wrapping.ll
vendor/llvm/dist-release_80/test/CodeGen/X86/debug-loclists.ll
vendor/llvm/dist-release_80/test/CodeGen/X86/discriminate-mem-ops.ll
vendor/llvm/dist-release_80/test/CodeGen/X86/insert-prefetch-inline.ll
vendor/llvm/dist-release_80/test/CodeGen/X86/insert-prefetch-invalid-instr.ll
vendor/llvm/dist-release_80/test/CodeGen/X86/insert-prefetch.ll
vendor/llvm/dist-release_80/test/DebugInfo/Mips/dwarfdump-tls.ll
vendor/llvm/dist-release_80/test/DebugInfo/X86/dwarfdump-debug-loclists.test
vendor/llvm/dist-release_80/test/tools/llvm-dwarfdump/X86/debug_loclists_startx_length.s
Modified: vendor/llvm/dist-release_80/cmake/modules/AddLLVM.cmake
==============================================================================
--- vendor/llvm/dist-release_80/cmake/modules/AddLLVM.cmake Tue Feb 5 18:22:21 2019 (r343793)
+++ vendor/llvm/dist-release_80/cmake/modules/AddLLVM.cmake Tue Feb 5 18:38:58 2019 (r343794)
@@ -1280,7 +1280,6 @@ function(get_llvm_lit_path base_dir file_name)
cmake_parse_arguments(ARG "ALLOW_EXTERNAL" "" "" ${ARGN})
if (ARG_ALLOW_EXTERNAL)
- set(LLVM_DEFAULT_EXTERNAL_LIT "${LLVM_EXTERNAL_LIT}")
set (LLVM_EXTERNAL_LIT "" CACHE STRING "Command used to spawn lit")
if ("${LLVM_EXTERNAL_LIT}" STREQUAL "")
set(LLVM_EXTERNAL_LIT "${LLVM_DEFAULT_EXTERNAL_LIT}")
Modified: vendor/llvm/dist-release_80/docs/ReleaseNotes.rst
==============================================================================
--- vendor/llvm/dist-release_80/docs/ReleaseNotes.rst Tue Feb 5 18:22:21 2019 (r343793)
+++ vendor/llvm/dist-release_80/docs/ReleaseNotes.rst Tue Feb 5 18:38:58 2019 (r343794)
@@ -48,6 +48,12 @@ Non-comprehensive list of changes in this release
functionality. See `Writing an LLVM Pass
<WritingAnLLVMPass.html#setting-up-the-build-environment>`_.
+* For MinGW, references to data variables that might need to be imported
+ from a dll are accessed via a stub, to allow the linker to convert it to
+ a dllimport if needed.
+
+* Added support for labels as offsets in ``.reloc`` directive.
+
.. NOTE
If you would like to document a larger change, then you can add a
subsection about it right here. You can copy the following boilerplate
@@ -62,18 +68,45 @@ Changes to the LLVM IR
----------------------
+Changes to the AArch64 Target
+-----------------------------
+
+* Added support for the ``.arch_extension`` assembler directive, just like
+ on ARM.
+
+
Changes to the ARM Backend
--------------------------
During this release ...
+Changes to the Hexagon Target
+--------------------------
+
+* Added support for Hexagon/HVX V66 ISA.
+
Changes to the MIPS Target
--------------------------
- During this release ...
+* Improved support of GlobalISel instruction selection framework.
+* Implemented emission of ``R_MIPS_JALR`` and ``R_MICROMIPS_JALR``
+ relocations. These relocations provide hints to a linker for optimization
+ of jumps to protected symbols.
+* ORC JIT has been supported for MIPS and MIPS64 architectures.
+
+* Assembler now suggests alternative MIPS instruction mnemonics when
+ an invalid one is specified.
+
+* Improved support for MIPS N32 ABI.
+
+* Added new instructions (``pll.ps``, ``plu.ps``, ``cvt.s.pu``,
+ ``cvt.s.pl``, ``cvt.ps``, ``sigrie``).
+
+* Numerous bug fixes and code cleanups.
+
Changes to the PowerPC Target
-----------------------------
@@ -123,7 +156,16 @@ Changes to the DAG infrastructure
External Open Source Projects Using LLVM 8
==========================================
-* A project...
+Zig Programming Language
+------------------------
+
+`Zig <https://ziglang.org>`_ is a system programming language intended to be
+an alternative to C. It provides high level features such as generics, compile
+time function execution, and partial evaluation, while exposing low level LLVM
+IR features such as aliases and intrinsics. Zig uses Clang to provide automatic
+import of .h symbols, including inline functions and simple macros. Zig uses
+LLD combined with lazily building compiler-rt to provide out-of-the-box
+cross-compiling for all supported targets.
Additional Information
Modified: vendor/llvm/dist-release_80/include/llvm/Support/JSON.h
==============================================================================
--- vendor/llvm/dist-release_80/include/llvm/Support/JSON.h Tue Feb 5 18:22:21 2019 (r343793)
+++ vendor/llvm/dist-release_80/include/llvm/Support/JSON.h Tue Feb 5 18:38:58 2019 (r343794)
@@ -481,6 +481,7 @@ class Value { (private)
mutable llvm::AlignedCharArrayUnion<bool, double, int64_t, llvm::StringRef,
std::string, json::Array, json::Object>
Union;
+ friend bool operator==(const Value &, const Value &);
};
bool operator==(const Value &, const Value &);
Modified: vendor/llvm/dist-release_80/include/llvm/Transforms/Utils/FunctionImportUtils.h
==============================================================================
--- vendor/llvm/dist-release_80/include/llvm/Transforms/Utils/FunctionImportUtils.h Tue Feb 5 18:22:21 2019 (r343793)
+++ vendor/llvm/dist-release_80/include/llvm/Transforms/Utils/FunctionImportUtils.h Tue Feb 5 18:38:58 2019 (r343794)
@@ -44,6 +44,11 @@ class FunctionImportGlobalProcessing {
/// to promote any non-renamable values.
SmallPtrSet<GlobalValue *, 8> Used;
+ /// Keep track of any COMDATs that require renaming (because COMDAT
+ /// leader was promoted and renamed). Maps from original COMDAT to one
+ /// with new name.
+ DenseMap<const Comdat *, Comdat *> RenamedComdats;
+
/// Check if we should promote the given local value to global scope.
bool shouldPromoteLocalToGlobal(const GlobalValue *SGV);
Modified: vendor/llvm/dist-release_80/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
==============================================================================
--- vendor/llvm/dist-release_80/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp Tue Feb 5 18:22:21 2019 (r343793)
+++ vendor/llvm/dist-release_80/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp Tue Feb 5 18:38:58 2019 (r343794)
@@ -1836,7 +1836,10 @@ TypeIndex CodeViewDebug::lowerTypeMemberFunction(const
unsigned Index = 0;
SmallVector<TypeIndex, 8> ArgTypeIndices;
- TypeIndex ReturnTypeIndex = getTypeIndex(ReturnAndArgs[Index++]);
+ TypeIndex ReturnTypeIndex = TypeIndex::Void();
+ if (ReturnAndArgs.size() > Index) {
+ ReturnTypeIndex = getTypeIndex(ReturnAndArgs[Index++]);
+ }
// If the first argument is a pointer type and this isn't a static method,
// treat it as the special 'this' parameter, which is encoded separately from
Modified: vendor/llvm/dist-release_80/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
==============================================================================
--- vendor/llvm/dist-release_80/lib/CodeGen/AsmPrinter/DwarfDebug.cpp Tue Feb 5 18:22:21 2019 (r343793)
+++ vendor/llvm/dist-release_80/lib/CodeGen/AsmPrinter/DwarfDebug.cpp Tue Feb 5 18:38:58 2019 (r343794)
@@ -1956,8 +1956,10 @@ void DebugLocEntry::finalize(const AsmPrinter &AP,
void DwarfDebug::emitDebugLocEntryLocation(const DebugLocStream::Entry &Entry) {
// Emit the size.
Asm->OutStreamer->AddComment("Loc expr size");
- Asm->emitInt16(DebugLocs.getBytes(Entry).size());
-
+ if (getDwarfVersion() >= 5)
+ Asm->EmitULEB128(DebugLocs.getBytes(Entry).size());
+ else
+ Asm->emitInt16(DebugLocs.getBytes(Entry).size());
// Emit the entry.
APByteStreamer Streamer(*Asm);
emitDebugLocEntry(Streamer, Entry);
Modified: vendor/llvm/dist-release_80/lib/CodeGen/MachineInstr.cpp
==============================================================================
--- vendor/llvm/dist-release_80/lib/CodeGen/MachineInstr.cpp Tue Feb 5 18:22:21 2019 (r343793)
+++ vendor/llvm/dist-release_80/lib/CodeGen/MachineInstr.cpp Tue Feb 5 18:38:58 2019 (r343794)
@@ -225,12 +225,13 @@ void MachineInstr::addOperand(MachineFunction &MF, con
}
#ifndef NDEBUG
- bool isMetaDataOp = Op.getType() == MachineOperand::MO_Metadata;
+ bool isDebugOp = Op.getType() == MachineOperand::MO_Metadata ||
+ Op.getType() == MachineOperand::MO_MCSymbol;
// OpNo now points as the desired insertion point. Unless this is a variadic
// instruction, only implicit regs are allowed beyond MCID->getNumOperands().
// RegMask operands go between the explicit and implicit operands.
assert((isImpReg || Op.isRegMask() || MCID->isVariadic() ||
- OpNo < MCID->getNumOperands() || isMetaDataOp) &&
+ OpNo < MCID->getNumOperands() || isDebugOp) &&
"Trying to add an operand to a machine instr that is already done!");
#endif
Modified: vendor/llvm/dist-release_80/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
==============================================================================
--- vendor/llvm/dist-release_80/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Tue Feb 5 18:22:21 2019 (r343793)
+++ vendor/llvm/dist-release_80/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Tue Feb 5 18:38:58 2019 (r343794)
@@ -16214,23 +16214,29 @@ static SDValue reduceBuildVecToShuffleWithZero(SDNode
// The build vector contains some number of undef elements and exactly
// one other element. That other element must be a zero-extended scalar
// extracted from a vector at a constant index to turn this into a shuffle.
+ // Also, require that the build vector does not implicitly truncate/extend
+ // its elements.
// TODO: This could be enhanced to allow ANY_EXTEND as well as ZERO_EXTEND.
+ EVT VT = BV->getValueType(0);
SDValue Zext = BV->getOperand(ZextElt);
if (Zext.getOpcode() != ISD::ZERO_EXTEND || !Zext.hasOneUse() ||
Zext.getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
- !isa<ConstantSDNode>(Zext.getOperand(0).getOperand(1)))
+ !isa<ConstantSDNode>(Zext.getOperand(0).getOperand(1)) ||
+ Zext.getValueSizeInBits() != VT.getScalarSizeInBits())
return SDValue();
- // The zero-extend must be a multiple of the source size.
+ // The zero-extend must be a multiple of the source size, and we must be
+ // building a vector of the same size as the source of the extract element.
SDValue Extract = Zext.getOperand(0);
unsigned DestSize = Zext.getValueSizeInBits();
unsigned SrcSize = Extract.getValueSizeInBits();
- if (DestSize % SrcSize != 0)
+ if (DestSize % SrcSize != 0 ||
+ Extract.getOperand(0).getValueSizeInBits() != VT.getSizeInBits())
return SDValue();
// Create a shuffle mask that will combine the extracted element with zeros
// and undefs.
- int ZextRatio = DestSize / SrcSize;
+ int ZextRatio = DestSize / SrcSize;
int NumMaskElts = NumBVOps * ZextRatio;
SmallVector<int, 32> ShufMask(NumMaskElts, -1);
for (int i = 0; i != NumMaskElts; ++i) {
@@ -16260,7 +16266,7 @@ static SDValue reduceBuildVecToShuffleWithZero(SDNode
SDValue ZeroVec = DAG.getConstant(0, DL, VecVT);
SDValue Shuf = DAG.getVectorShuffle(VecVT, DL, Extract.getOperand(0), ZeroVec,
ShufMask);
- return DAG.getBitcast(BV->getValueType(0), Shuf);
+ return DAG.getBitcast(VT, Shuf);
}
// Check to see if this is a BUILD_VECTOR of a bunch of EXTRACT_VECTOR_ELT
Modified: vendor/llvm/dist-release_80/lib/DebugInfo/DWARF/DWARFDebugLoc.cpp
==============================================================================
--- vendor/llvm/dist-release_80/lib/DebugInfo/DWARF/DWARFDebugLoc.cpp Tue Feb 5 18:22:21 2019 (r343793)
+++ vendor/llvm/dist-release_80/lib/DebugInfo/DWARF/DWARFDebugLoc.cpp Tue Feb 5 18:38:58 2019 (r343794)
@@ -184,7 +184,8 @@ DWARFDebugLoclists::parseOneLocationList(DataExtractor
}
if (Kind != dwarf::DW_LLE_base_address) {
- unsigned Bytes = Data.getU16(Offset);
+ unsigned Bytes =
+ Version >= 5 ? Data.getULEB128(Offset) : Data.getU16(Offset);
// A single location description describing the location of the object...
StringRef str = Data.getData().substr(*Offset, Bytes);
*Offset += Bytes;
Modified: vendor/llvm/dist-release_80/lib/IR/AutoUpgrade.cpp
==============================================================================
--- vendor/llvm/dist-release_80/lib/IR/AutoUpgrade.cpp Tue Feb 5 18:22:21 2019 (r343793)
+++ vendor/llvm/dist-release_80/lib/IR/AutoUpgrade.cpp Tue Feb 5 18:38:58 2019 (r343794)
@@ -469,6 +469,11 @@ static bool UpgradeX86IntrinsicFunction(Function *F, S
}
}
+ if (Name == "seh.recoverfp") {
+ NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::eh_recoverfp);
+ return true;
+ }
+
return false;
}
@@ -542,10 +547,6 @@ static bool UpgradeIntrinsicFunction1(Function *F, Fun
}
if (Name == "aarch64.thread.pointer" || Name == "arm.thread.pointer") {
NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::thread_pointer);
- return true;
- }
- if (Name == "x86.seh.recoverfp") {
- NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::eh_recoverfp);
return true;
}
break;
Modified: vendor/llvm/dist-release_80/lib/Support/JSON.cpp
==============================================================================
--- vendor/llvm/dist-release_80/lib/Support/JSON.cpp Tue Feb 5 18:22:21 2019 (r343793)
+++ vendor/llvm/dist-release_80/lib/Support/JSON.cpp Tue Feb 5 18:38:58 2019 (r343794)
@@ -182,6 +182,12 @@ bool operator==(const Value &L, const Value &R) {
case Value::Boolean:
return *L.getAsBoolean() == *R.getAsBoolean();
case Value::Number:
+ // Workaround for https://gcc.gnu.org/bugzilla/show_bug.cgi?id=323
+ // The same integer must convert to the same double, per the standard.
+ // However we see 64-vs-80-bit precision comparisons with gcc-7 -O3 -m32.
+ // So we avoid floating point promotion for exact comparisons.
+ if (L.Type == Value::T_Integer || R.Type == Value::T_Integer)
+ return L.getAsInteger() == R.getAsInteger();
return *L.getAsNumber() == *R.getAsNumber();
case Value::String:
return *L.getAsString() == *R.getAsString();
Modified: vendor/llvm/dist-release_80/lib/Target/AArch64/AArch64SpeculationHardening.cpp
==============================================================================
--- vendor/llvm/dist-release_80/lib/Target/AArch64/AArch64SpeculationHardening.cpp Tue Feb 5 18:22:21 2019 (r343793)
+++ vendor/llvm/dist-release_80/lib/Target/AArch64/AArch64SpeculationHardening.cpp Tue Feb 5 18:38:58 2019 (r343794)
@@ -103,6 +103,7 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/RegisterScavenging.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/Pass.h"
#include "llvm/Support/CodeGen.h"
@@ -146,25 +147,31 @@ class AArch64SpeculationHardening : public MachineFunc
BitVector RegsAlreadyMasked;
bool functionUsesHardeningRegister(MachineFunction &MF) const;
- bool instrumentControlFlow(MachineBasicBlock &MBB);
+ bool instrumentControlFlow(MachineBasicBlock &MBB,
+ bool &UsesFullSpeculationBarrier);
bool endsWithCondControlFlow(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
AArch64CC::CondCode &CondCode) const;
void insertTrackingCode(MachineBasicBlock &SplitEdgeBB,
AArch64CC::CondCode &CondCode, DebugLoc DL) const;
- void insertSPToRegTaintPropagation(MachineBasicBlock *MBB,
+ void insertSPToRegTaintPropagation(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI) const;
- void insertRegToSPTaintPropagation(MachineBasicBlock *MBB,
+ void insertRegToSPTaintPropagation(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned TmpReg) const;
+ void insertFullSpeculationBarrier(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ DebugLoc DL) const;
bool slhLoads(MachineBasicBlock &MBB);
bool makeGPRSpeculationSafe(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
MachineInstr &MI, unsigned Reg);
- bool lowerSpeculationSafeValuePseudos(MachineBasicBlock &MBB);
+ bool lowerSpeculationSafeValuePseudos(MachineBasicBlock &MBB,
+ bool UsesFullSpeculationBarrier);
bool expandSpeculationSafeValue(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI);
+ MachineBasicBlock::iterator MBBI,
+ bool UsesFullSpeculationBarrier);
bool insertCSDB(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
DebugLoc DL);
};
@@ -207,15 +214,19 @@ bool AArch64SpeculationHardening::endsWithCondControlF
return true;
}
+void AArch64SpeculationHardening::insertFullSpeculationBarrier(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ DebugLoc DL) const {
+ // A full control flow speculation barrier consists of (DSB SYS + ISB)
+ BuildMI(MBB, MBBI, DL, TII->get(AArch64::DSB)).addImm(0xf);
+ BuildMI(MBB, MBBI, DL, TII->get(AArch64::ISB)).addImm(0xf);
+}
+
void AArch64SpeculationHardening::insertTrackingCode(
MachineBasicBlock &SplitEdgeBB, AArch64CC::CondCode &CondCode,
DebugLoc DL) const {
if (UseControlFlowSpeculationBarrier) {
- // insert full control flow speculation barrier (DSB SYS + ISB)
- BuildMI(SplitEdgeBB, SplitEdgeBB.begin(), DL, TII->get(AArch64::ISB))
- .addImm(0xf);
- BuildMI(SplitEdgeBB, SplitEdgeBB.begin(), DL, TII->get(AArch64::DSB))
- .addImm(0xf);
+ insertFullSpeculationBarrier(SplitEdgeBB, SplitEdgeBB.begin(), DL);
} else {
BuildMI(SplitEdgeBB, SplitEdgeBB.begin(), DL, TII->get(AArch64::CSELXr))
.addDef(MisspeculatingTaintReg)
@@ -227,7 +238,7 @@ void AArch64SpeculationHardening::insertTrackingCode(
}
bool AArch64SpeculationHardening::instrumentControlFlow(
- MachineBasicBlock &MBB) {
+ MachineBasicBlock &MBB, bool &UsesFullSpeculationBarrier) {
LLVM_DEBUG(dbgs() << "Instrument control flow tracking on MBB: " << MBB);
bool Modified = false;
@@ -263,55 +274,105 @@ bool AArch64SpeculationHardening::instrumentControlFlo
}
// Perform correct code generation around function calls and before returns.
- {
- SmallVector<MachineInstr *, 4> ReturnInstructions;
- SmallVector<MachineInstr *, 4> CallInstructions;
+ // The below variables record the return/terminator instructions and the call
+ // instructions respectively; including which register is available as a
+ // temporary register just before the recorded instructions.
+ SmallVector<std::pair<MachineInstr *, unsigned>, 4> ReturnInstructions;
+ SmallVector<std::pair<MachineInstr *, unsigned>, 4> CallInstructions;
+ // if a temporary register is not available for at least one of the
+ // instructions for which we need to transfer taint to the stack pointer, we
+ // need to insert a full speculation barrier.
+ // TmpRegisterNotAvailableEverywhere tracks that condition.
+ bool TmpRegisterNotAvailableEverywhere = false;
- for (MachineInstr &MI : MBB) {
- if (MI.isReturn())
- ReturnInstructions.push_back(&MI);
- else if (MI.isCall())
- CallInstructions.push_back(&MI);
- }
+ RegScavenger RS;
+ RS.enterBasicBlock(MBB);
- Modified |=
- (ReturnInstructions.size() > 0) || (CallInstructions.size() > 0);
+ for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); I++) {
+ MachineInstr &MI = *I;
+ if (!MI.isReturn() && !MI.isCall())
+ continue;
- for (MachineInstr *Return : ReturnInstructions)
- insertRegToSPTaintPropagation(Return->getParent(), Return, AArch64::X17);
- for (MachineInstr *Call : CallInstructions) {
+ // The RegScavenger represents registers available *after* the MI
+ // instruction pointed to by RS.getCurrentPosition().
+ // We need to have a register that is available *before* the MI is executed.
+ if (I != MBB.begin())
+ RS.forward(std::prev(I));
+ // FIXME: The below just finds *a* unused register. Maybe code could be
+ // optimized more if this looks for the register that isn't used for the
+ // longest time around this place, to enable more scheduling freedom. Not
+ // sure if that would actually result in a big performance difference
+ // though. Maybe RegisterScavenger::findSurvivorBackwards has some logic
+ // already to do this - but it's unclear if that could easily be used here.
+ unsigned TmpReg = RS.FindUnusedReg(&AArch64::GPR64commonRegClass);
+ LLVM_DEBUG(dbgs() << "RS finds "
+ << ((TmpReg == 0) ? "no register " : "register ");
+ if (TmpReg != 0) dbgs() << printReg(TmpReg, TRI) << " ";
+ dbgs() << "to be available at MI " << MI);
+ if (TmpReg == 0)
+ TmpRegisterNotAvailableEverywhere = true;
+ if (MI.isReturn())
+ ReturnInstructions.push_back({&MI, TmpReg});
+ else if (MI.isCall())
+ CallInstructions.push_back({&MI, TmpReg});
+ }
+
+ if (TmpRegisterNotAvailableEverywhere) {
+ // When a temporary register is not available everywhere in this basic
+ // basic block where a propagate-taint-to-sp operation is needed, just
+ // emit a full speculation barrier at the start of this basic block, which
+ // renders the taint/speculation tracking in this basic block unnecessary.
+ insertFullSpeculationBarrier(MBB, MBB.begin(),
+ (MBB.begin())->getDebugLoc());
+ UsesFullSpeculationBarrier = true;
+ Modified = true;
+ } else {
+ for (auto MI_Reg : ReturnInstructions) {
+ assert(MI_Reg.second != 0);
+ LLVM_DEBUG(
+ dbgs()
+ << " About to insert Reg to SP taint propagation with temp register "
+ << printReg(MI_Reg.second, TRI)
+ << " on instruction: " << *MI_Reg.first);
+ insertRegToSPTaintPropagation(MBB, MI_Reg.first, MI_Reg.second);
+ Modified = true;
+ }
+
+ for (auto MI_Reg : CallInstructions) {
+ assert(MI_Reg.second != 0);
+ LLVM_DEBUG(dbgs() << " About to insert Reg to SP and back taint "
+ "propagation with temp register "
+ << printReg(MI_Reg.second, TRI)
+ << " around instruction: " << *MI_Reg.first);
// Just after the call:
- MachineBasicBlock::iterator i = Call;
- i++;
- insertSPToRegTaintPropagation(Call->getParent(), i);
+ insertSPToRegTaintPropagation(
+ MBB, std::next((MachineBasicBlock::iterator)MI_Reg.first));
// Just before the call:
- insertRegToSPTaintPropagation(Call->getParent(), Call, AArch64::X17);
+ insertRegToSPTaintPropagation(MBB, MI_Reg.first, MI_Reg.second);
+ Modified = true;
}
}
-
return Modified;
}
void AArch64SpeculationHardening::insertSPToRegTaintPropagation(
- MachineBasicBlock *MBB, MachineBasicBlock::iterator MBBI) const {
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const {
// If full control flow speculation barriers are used, emit a control flow
// barrier to block potential miss-speculation in flight coming in to this
// function.
if (UseControlFlowSpeculationBarrier) {
- // insert full control flow speculation barrier (DSB SYS + ISB)
- BuildMI(*MBB, MBBI, DebugLoc(), TII->get(AArch64::DSB)).addImm(0xf);
- BuildMI(*MBB, MBBI, DebugLoc(), TII->get(AArch64::ISB)).addImm(0xf);
+ insertFullSpeculationBarrier(MBB, MBBI, DebugLoc());
return;
}
// CMP SP, #0 === SUBS xzr, SP, #0
- BuildMI(*MBB, MBBI, DebugLoc(), TII->get(AArch64::SUBSXri))
+ BuildMI(MBB, MBBI, DebugLoc(), TII->get(AArch64::SUBSXri))
.addDef(AArch64::XZR)
.addUse(AArch64::SP)
.addImm(0)
.addImm(0); // no shift
// CSETM x16, NE === CSINV x16, xzr, xzr, EQ
- BuildMI(*MBB, MBBI, DebugLoc(), TII->get(AArch64::CSINVXr))
+ BuildMI(MBB, MBBI, DebugLoc(), TII->get(AArch64::CSINVXr))
.addDef(MisspeculatingTaintReg)
.addUse(AArch64::XZR)
.addUse(AArch64::XZR)
@@ -319,7 +380,7 @@ void AArch64SpeculationHardening::insertSPToRegTaintPr
}
void AArch64SpeculationHardening::insertRegToSPTaintPropagation(
- MachineBasicBlock *MBB, MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
unsigned TmpReg) const {
// If full control flow speculation barriers are used, there will not be
// miss-speculation when returning from this function, and therefore, also
@@ -328,19 +389,19 @@ void AArch64SpeculationHardening::insertRegToSPTaintPr
return;
// mov Xtmp, SP === ADD Xtmp, SP, #0
- BuildMI(*MBB, MBBI, DebugLoc(), TII->get(AArch64::ADDXri))
+ BuildMI(MBB, MBBI, DebugLoc(), TII->get(AArch64::ADDXri))
.addDef(TmpReg)
.addUse(AArch64::SP)
.addImm(0)
.addImm(0); // no shift
// and Xtmp, Xtmp, TaintReg === AND Xtmp, Xtmp, TaintReg, #0
- BuildMI(*MBB, MBBI, DebugLoc(), TII->get(AArch64::ANDXrs))
+ BuildMI(MBB, MBBI, DebugLoc(), TII->get(AArch64::ANDXrs))
.addDef(TmpReg, RegState::Renamable)
.addUse(TmpReg, RegState::Kill | RegState::Renamable)
.addUse(MisspeculatingTaintReg, RegState::Kill)
.addImm(0);
// mov SP, Xtmp === ADD SP, Xtmp, #0
- BuildMI(*MBB, MBBI, DebugLoc(), TII->get(AArch64::ADDXri))
+ BuildMI(MBB, MBBI, DebugLoc(), TII->get(AArch64::ADDXri))
.addDef(AArch64::SP)
.addUse(TmpReg, RegState::Kill)
.addImm(0)
@@ -484,7 +545,8 @@ bool AArch64SpeculationHardening::slhLoads(MachineBasi
/// \brief If MBBI references a pseudo instruction that should be expanded
/// here, do the expansion and return true. Otherwise return false.
bool AArch64SpeculationHardening::expandSpeculationSafeValue(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) {
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ bool UsesFullSpeculationBarrier) {
MachineInstr &MI = *MBBI;
unsigned Opcode = MI.getOpcode();
bool Is64Bit = true;
@@ -499,7 +561,7 @@ bool AArch64SpeculationHardening::expandSpeculationSaf
// Just remove the SpeculationSafe pseudo's if control flow
// miss-speculation isn't happening because we're already inserting barriers
// to guarantee that.
- if (!UseControlFlowSpeculationBarrier) {
+ if (!UseControlFlowSpeculationBarrier && !UsesFullSpeculationBarrier) {
unsigned DstReg = MI.getOperand(0).getReg();
unsigned SrcReg = MI.getOperand(1).getReg();
// Mark this register and all its aliasing registers as needing to be
@@ -537,7 +599,7 @@ bool AArch64SpeculationHardening::insertCSDB(MachineBa
}
bool AArch64SpeculationHardening::lowerSpeculationSafeValuePseudos(
- MachineBasicBlock &MBB) {
+ MachineBasicBlock &MBB, bool UsesFullSpeculationBarrier) {
bool Modified = false;
RegsNeedingCSDBBeforeUse.reset();
@@ -572,15 +634,16 @@ bool AArch64SpeculationHardening::lowerSpeculationSafe
break;
}
- if (NeedToEmitBarrier)
+ if (NeedToEmitBarrier && !UsesFullSpeculationBarrier)
Modified |= insertCSDB(MBB, MBBI, DL);
- Modified |= expandSpeculationSafeValue(MBB, MBBI);
+ Modified |=
+ expandSpeculationSafeValue(MBB, MBBI, UsesFullSpeculationBarrier);
MBBI = NMBBI;
}
- if (RegsNeedingCSDBBeforeUse.any())
+ if (RegsNeedingCSDBBeforeUse.any() && !UsesFullSpeculationBarrier)
Modified |= insertCSDB(MBB, MBBI, DL);
return Modified;
@@ -609,7 +672,7 @@ bool AArch64SpeculationHardening::runOnMachineFunction
Modified |= slhLoads(MBB);
}
- // 2.a Add instrumentation code to function entry and exits.
+ // 2. Add instrumentation code to function entry and exits.
LLVM_DEBUG(
dbgs()
<< "***** AArch64SpeculationHardening - track control flow *****\n");
@@ -620,17 +683,15 @@ bool AArch64SpeculationHardening::runOnMachineFunction
EntryBlocks.push_back(LPI.LandingPadBlock);
for (auto Entry : EntryBlocks)
insertSPToRegTaintPropagation(
- Entry, Entry->SkipPHIsLabelsAndDebug(Entry->begin()));
+ *Entry, Entry->SkipPHIsLabelsAndDebug(Entry->begin()));
- // 2.b Add instrumentation code to every basic block.
- for (auto &MBB : MF)
- Modified |= instrumentControlFlow(MBB);
-
- LLVM_DEBUG(dbgs() << "***** AArch64SpeculationHardening - Lowering "
- "SpeculationSafeValue Pseudos *****\n");
- // Step 3: Lower SpeculationSafeValue pseudo instructions.
- for (auto &MBB : MF)
- Modified |= lowerSpeculationSafeValuePseudos(MBB);
+ // 3. Add instrumentation code to every basic block.
+ for (auto &MBB : MF) {
+ bool UsesFullSpeculationBarrier = false;
+ Modified |= instrumentControlFlow(MBB, UsesFullSpeculationBarrier);
+ Modified |=
+ lowerSpeculationSafeValuePseudos(MBB, UsesFullSpeculationBarrier);
+ }
return Modified;
}
Modified: vendor/llvm/dist-release_80/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
==============================================================================
--- vendor/llvm/dist-release_80/lib/Target/Mips/AsmParser/MipsAsmParser.cpp Tue Feb 5 18:22:21 2019 (r343793)
+++ vendor/llvm/dist-release_80/lib/Target/Mips/AsmParser/MipsAsmParser.cpp Tue Feb 5 18:38:58 2019 (r343794)
@@ -65,10 +65,7 @@ class MCInstrInfo;
} // end namespace llvm
-static cl::opt<bool>
-EmitJalrReloc("mips-jalr-reloc", cl::Hidden,
- cl::desc("MIPS: Emit R_{MICRO}MIPS_JALR relocation with jalr"),
- cl::init(true));
+extern cl::opt<bool> EmitJalrReloc;
namespace {
Modified: vendor/llvm/dist-release_80/lib/Target/Mips/MCTargetDesc/MipsABIInfo.cpp
==============================================================================
--- vendor/llvm/dist-release_80/lib/Target/Mips/MCTargetDesc/MipsABIInfo.cpp Tue Feb 5 18:22:21 2019 (r343793)
+++ vendor/llvm/dist-release_80/lib/Target/Mips/MCTargetDesc/MipsABIInfo.cpp Tue Feb 5 18:38:58 2019 (r343794)
@@ -15,6 +15,13 @@
using namespace llvm;
+// Note: this option is defined here to be visible from libLLVMMipsAsmParser
+// and libLLVMMipsCodeGen
+cl::opt<bool>
+EmitJalrReloc("mips-jalr-reloc", cl::Hidden,
+ cl::desc("MIPS: Emit R_{MICRO}MIPS_JALR relocation with jalr"),
+ cl::init(true));
+
namespace {
static const MCPhysReg O32IntRegs[4] = {Mips::A0, Mips::A1, Mips::A2, Mips::A3};
Modified: vendor/llvm/dist-release_80/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h
==============================================================================
--- vendor/llvm/dist-release_80/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h Tue Feb 5 18:22:21 2019 (r343793)
+++ vendor/llvm/dist-release_80/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h Tue Feb 5 18:38:58 2019 (r343794)
@@ -89,7 +89,10 @@ namespace MipsII {
MO_GOT_HI16,
MO_GOT_LO16,
MO_CALL_HI16,
- MO_CALL_LO16
+ MO_CALL_LO16,
+
+ /// Helper operand used to generate R_MIPS_JALR
+ MO_JALR
};
enum {
Modified: vendor/llvm/dist-release_80/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp
==============================================================================
--- vendor/llvm/dist-release_80/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp Tue Feb 5 18:22:21 2019 (r343793)
+++ vendor/llvm/dist-release_80/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp Tue Feb 5 18:38:58 2019 (r343794)
@@ -614,8 +614,9 @@ getExprOpValue(const MCExpr *Expr, SmallVectorImpl<MCF
llvm_unreachable("Unhandled fixup kind!");
break;
case MipsMCExpr::MEK_DTPREL:
- llvm_unreachable("MEK_DTPREL is used for TLS DIEExpr only");
- break;
+ // MEK_DTPREL is used for marking TLS DIEExpr only
+ // and contains a regular sub-expression.
+ return getExprOpValue(MipsExpr->getSubExpr(), Fixups, STI);
case MipsMCExpr::MEK_CALL_HI16:
FixupKind = Mips::fixup_Mips_CALL_HI16;
break;
Modified: vendor/llvm/dist-release_80/lib/Target/Mips/MCTargetDesc/MipsMCExpr.cpp
==============================================================================
--- vendor/llvm/dist-release_80/lib/Target/Mips/MCTargetDesc/MipsMCExpr.cpp Tue Feb 5 18:22:21 2019 (r343793)
+++ vendor/llvm/dist-release_80/lib/Target/Mips/MCTargetDesc/MipsMCExpr.cpp Tue Feb 5 18:38:58 2019 (r343794)
@@ -44,8 +44,10 @@ void MipsMCExpr::printImpl(raw_ostream &OS, const MCAs
llvm_unreachable("MEK_None and MEK_Special are invalid");
break;
case MEK_DTPREL:
- llvm_unreachable("MEK_DTPREL is used for TLS DIEExpr only");
- break;
+ // MEK_DTPREL is used for marking TLS DIEExpr only
+ // and contains a regular sub-expression.
+ getSubExpr()->print(OS, MAI, true);
+ return;
case MEK_CALL_HI16:
OS << "%call_hi";
break;
@@ -161,7 +163,9 @@ MipsMCExpr::evaluateAsRelocatableImpl(MCValue &Res,
case MEK_Special:
llvm_unreachable("MEK_None and MEK_Special are invalid");
case MEK_DTPREL:
- llvm_unreachable("MEK_DTPREL is used for TLS DIEExpr only");
+ // MEK_DTPREL is used for marking TLS DIEExpr only
+ // and contains a regular sub-expression.
+ return getSubExpr()->evaluateAsRelocatable(Res, Layout, Fixup);
case MEK_DTPREL_HI:
case MEK_DTPREL_LO:
case MEK_GOT:
@@ -249,9 +253,6 @@ void MipsMCExpr::fixELFSymbolsInTLSFixups(MCAssembler
case MEK_Special:
llvm_unreachable("MEK_None and MEK_Special are invalid");
break;
- case MEK_DTPREL:
- llvm_unreachable("MEK_DTPREL is used for TLS DIEExpr only");
- break;
case MEK_CALL_HI16:
case MEK_CALL_LO16:
case MEK_GOT:
@@ -274,6 +275,7 @@ void MipsMCExpr::fixELFSymbolsInTLSFixups(MCAssembler
if (const MipsMCExpr *E = dyn_cast<const MipsMCExpr>(getSubExpr()))
E->fixELFSymbolsInTLSFixups(Asm);
break;
+ case MEK_DTPREL:
case MEK_DTPREL_HI:
case MEK_DTPREL_LO:
case MEK_TLSLDM:
Modified: vendor/llvm/dist-release_80/lib/Target/Mips/MicroMips32r6InstrInfo.td
==============================================================================
--- vendor/llvm/dist-release_80/lib/Target/Mips/MicroMips32r6InstrInfo.td Tue Feb 5 18:22:21 2019 (r343793)
+++ vendor/llvm/dist-release_80/lib/Target/Mips/MicroMips32r6InstrInfo.td Tue Feb 5 18:38:58 2019 (r343794)
@@ -460,6 +460,7 @@ class JALRC16_MMR6_DESC_BASE<string opstr, RegisterOpe
let isCall = 1;
let hasDelaySlot = 0;
let Defs = [RA];
+ let hasPostISelHook = 1;
}
class JALRC16_MMR6_DESC : JALRC16_MMR6_DESC_BASE<"jalr", GPR32Opnd>;
Modified: vendor/llvm/dist-release_80/lib/Target/Mips/MicroMipsInstrInfo.td
==============================================================================
--- vendor/llvm/dist-release_80/lib/Target/Mips/MicroMipsInstrInfo.td Tue Feb 5 18:22:21 2019 (r343793)
+++ vendor/llvm/dist-release_80/lib/Target/Mips/MicroMipsInstrInfo.td Tue Feb 5 18:38:58 2019 (r343794)
@@ -426,6 +426,7 @@ class JumpLinkRegMM16<string opstr, RegisterOperand RO
let isCall = 1;
let hasDelaySlot = 1;
let Defs = [RA];
+ let hasPostISelHook = 1;
}
// 16-bit Jump Reg
Modified: vendor/llvm/dist-release_80/lib/Target/Mips/Mips32r6InstrInfo.td
==============================================================================
--- vendor/llvm/dist-release_80/lib/Target/Mips/Mips32r6InstrInfo.td Tue Feb 5 18:22:21 2019 (r343793)
+++ vendor/llvm/dist-release_80/lib/Target/Mips/Mips32r6InstrInfo.td Tue Feb 5 18:38:58 2019 (r343794)
@@ -1105,7 +1105,7 @@ def : MipsPat<(select i32:$cond, immz, i32:$f),
// Pseudo instructions
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, hasDelaySlot = 1,
- hasExtraSrcRegAllocReq = 1, isCTI = 1, Defs = [AT] in {
+ hasExtraSrcRegAllocReq = 1, isCTI = 1, Defs = [AT], hasPostISelHook = 1 in {
class TailCallRegR6<Instruction JumpInst, Register RT, RegisterOperand RO> :
PseudoSE<(outs), (ins RO:$rs), [(MipsTailCall RO:$rs)], II_JR>,
PseudoInstExpansion<(JumpInst RT:$rt, RO:$rs)>;
Modified: vendor/llvm/dist-release_80/lib/Target/Mips/MipsAsmPrinter.cpp
==============================================================================
--- vendor/llvm/dist-release_80/lib/Target/Mips/MipsAsmPrinter.cpp Tue Feb 5 18:22:21 2019 (r343793)
+++ vendor/llvm/dist-release_80/lib/Target/Mips/MipsAsmPrinter.cpp Tue Feb 5 18:38:58 2019 (r343794)
@@ -68,6 +68,8 @@ using namespace llvm;
#define DEBUG_TYPE "mips-asm-printer"
+extern cl::opt<bool> EmitJalrReloc;
+
MipsTargetStreamer &MipsAsmPrinter::getTargetStreamer() const {
return static_cast<MipsTargetStreamer &>(*OutStreamer->getTargetStreamer());
}
@@ -148,6 +150,40 @@ void MipsAsmPrinter::emitPseudoIndirectBranch(MCStream
EmitToStreamer(OutStreamer, TmpInst0);
}
+// If there is an MO_JALR operand, insert:
+//
+// .reloc tmplabel, R_{MICRO}MIPS_JALR, symbol
+// tmplabel:
+//
+// This is an optimization hint for the linker which may then replace
+// an indirect call with a direct branch.
+static void emitDirectiveRelocJalr(const MachineInstr &MI,
+ MCContext &OutContext,
+ TargetMachine &TM,
+ MCStreamer &OutStreamer,
+ const MipsSubtarget &Subtarget) {
+ for (unsigned int I = MI.getDesc().getNumOperands(), E = MI.getNumOperands();
+ I < E; ++I) {
+ MachineOperand MO = MI.getOperand(I);
+ if (MO.isMCSymbol() && (MO.getTargetFlags() & MipsII::MO_JALR)) {
+ MCSymbol *Callee = MO.getMCSymbol();
+ if (Callee && !Callee->getName().empty()) {
+ MCSymbol *OffsetLabel = OutContext.createTempSymbol();
+ const MCExpr *OffsetExpr =
+ MCSymbolRefExpr::create(OffsetLabel, OutContext);
+ const MCExpr *CaleeExpr =
+ MCSymbolRefExpr::create(Callee, OutContext);
+ OutStreamer.EmitRelocDirective
+ (*OffsetExpr,
+ Subtarget.inMicroMipsMode() ? "R_MICROMIPS_JALR" : "R_MIPS_JALR",
+ CaleeExpr, SMLoc(), *TM.getMCSubtargetInfo());
+ OutStreamer.EmitLabel(OffsetLabel);
+ return;
+ }
+ }
+ }
+}
+
void MipsAsmPrinter::EmitInstruction(const MachineInstr *MI) {
MipsTargetStreamer &TS = getTargetStreamer();
unsigned Opc = MI->getOpcode();
@@ -205,6 +241,11 @@ void MipsAsmPrinter::EmitInstruction(const MachineInst
case Mips::PATCHABLE_TAIL_CALL:
LowerPATCHABLE_TAIL_CALL(*MI);
return;
+ }
+
+ if (EmitJalrReloc &&
+ (MI->isReturn() || MI->isCall() || MI->isIndirectBranch())) {
+ emitDirectiveRelocJalr(*MI, OutContext, TM, *OutStreamer, *Subtarget);
}
MachineBasicBlock::const_instr_iterator I = MI->getIterator();
Modified: vendor/llvm/dist-release_80/lib/Target/Mips/MipsFastISel.cpp
==============================================================================
--- vendor/llvm/dist-release_80/lib/Target/Mips/MipsFastISel.cpp Tue Feb 5 18:22:21 2019 (r343793)
+++ vendor/llvm/dist-release_80/lib/Target/Mips/MipsFastISel.cpp Tue Feb 5 18:38:58 2019 (r343794)
@@ -56,6 +56,7 @@
#include "llvm/IR/Type.h"
#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSymbol.h"
@@ -75,6 +76,8 @@
using namespace llvm;
+extern cl::opt<bool> EmitJalrReloc;
+
namespace {
class MipsFastISel final : public FastISel {
@@ -1550,6 +1553,16 @@ bool MipsFastISel::fastLowerCall(CallLoweringInfo &CLI
MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC));
CLI.Call = MIB;
+
+ if (EmitJalrReloc && !Subtarget->inMips16Mode()) {
+ // Attach callee address to the instruction, let asm printer emit
+ // .reloc R_MIPS_JALR.
+ if (Symbol)
+ MIB.addSym(Symbol, MipsII::MO_JALR);
+ else
+ MIB.addSym(FuncInfo.MF->getContext().getOrCreateSymbol(
+ Addr.getGlobalValue()->getName()), MipsII::MO_JALR);
+ }
// Finish off the call including any return values.
return finishCall(CLI, RetVT, NumBytes);
Modified: vendor/llvm/dist-release_80/lib/Target/Mips/MipsISelLowering.cpp
==============================================================================
--- vendor/llvm/dist-release_80/lib/Target/Mips/MipsISelLowering.cpp Tue Feb 5 18:22:21 2019 (r343793)
+++ vendor/llvm/dist-release_80/lib/Target/Mips/MipsISelLowering.cpp Tue Feb 5 18:38:58 2019 (r343794)
@@ -57,6 +57,7 @@
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CodeGen.h"
@@ -91,6 +92,8 @@ NoZeroDivCheck("mno-check-zero-division", cl::Hidden,
cl::desc("MIPS: Don't trap on integer division by zero."),
cl::init(false));
+extern cl::opt<bool> EmitJalrReloc;
+
static const MCPhysReg Mips64DPRegs[8] = {
Mips::D12_64, Mips::D13_64, Mips::D14_64, Mips::D15_64,
Mips::D16_64, Mips::D17_64, Mips::D18_64, Mips::D19_64
@@ -2879,6 +2882,54 @@ getOpndList(SmallVectorImpl<SDValue> &Ops,
Ops.push_back(InFlag);
}
+void MipsTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
+ SDNode *Node) const {
+ switch (MI.getOpcode()) {
+ default:
+ return;
+ case Mips::JALR:
+ case Mips::JALRPseudo:
+ case Mips::JALR64:
+ case Mips::JALR64Pseudo:
+ case Mips::JALR16_MM:
+ case Mips::JALRC16_MMR6:
+ case Mips::TAILCALLREG:
+ case Mips::TAILCALLREG64:
+ case Mips::TAILCALLR6REG:
+ case Mips::TAILCALL64R6REG:
+ case Mips::TAILCALLREG_MM:
+ case Mips::TAILCALLREG_MMR6: {
+ if (!EmitJalrReloc ||
+ Subtarget.inMips16Mode() ||
+ !isPositionIndependent() ||
+ Node->getNumOperands() < 1 ||
+ Node->getOperand(0).getNumOperands() < 2) {
+ return;
+ }
+ // We are after the callee address, set by LowerCall().
+ // If added to MI, asm printer will emit .reloc R_MIPS_JALR for the
+ // symbol.
+ const SDValue TargetAddr = Node->getOperand(0).getOperand(1);
+ StringRef Sym;
+ if (const GlobalAddressSDNode *G =
+ dyn_cast_or_null<const GlobalAddressSDNode>(TargetAddr)) {
+ Sym = G->getGlobal()->getName();
+ }
+ else if (const ExternalSymbolSDNode *ES =
+ dyn_cast_or_null<const ExternalSymbolSDNode>(TargetAddr)) {
+ Sym = ES->getSymbol();
+ }
+
+ if (Sym.empty())
+ return;
+
+ MachineFunction *MF = MI.getParent()->getParent();
+ MCSymbol *S = MF->getContext().getOrCreateSymbol(Sym);
+ MI.addOperand(MachineOperand::CreateMCSymbol(S, MipsII::MO_JALR));
+ }
+ }
+}
+
/// LowerCall - functions arguments are copied from virtual regs to
/// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
SDValue
@@ -2930,7 +2981,7 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLowe
// the maximum out going argument area (including the reserved area), and
// preallocates the stack space on entrance to the caller.
//
- // FIXME: We should do the same for efficency and space.
+ // FIXME: We should do the same for efficiency and space.
// Note: The check on the calling convention below must match
// MipsABIInfo::GetCalleeAllocdArgSizeInBytes().
Modified: vendor/llvm/dist-release_80/lib/Target/Mips/MipsISelLowering.h
==============================================================================
--- vendor/llvm/dist-release_80/lib/Target/Mips/MipsISelLowering.h Tue Feb 5 18:22:21 2019 (r343793)
+++ vendor/llvm/dist-release_80/lib/Target/Mips/MipsISelLowering.h Tue Feb 5 18:38:58 2019 (r343794)
@@ -341,6 +341,9 @@ class TargetRegisterClass;
EmitInstrWithCustomInserter(MachineInstr &MI,
MachineBasicBlock *MBB) const override;
+ void AdjustInstrPostInstrSelection(MachineInstr &MI,
+ SDNode *Node) const override;
+
void HandleByVal(CCState *, unsigned &, unsigned) const override;
unsigned getRegisterByName(const char* RegName, EVT VT,
Modified: vendor/llvm/dist-release_80/lib/Target/Mips/MipsInstrInfo.cpp
==============================================================================
--- vendor/llvm/dist-release_80/lib/Target/Mips/MipsInstrInfo.cpp Tue Feb 5 18:22:21 2019 (r343793)
+++ vendor/llvm/dist-release_80/lib/Target/Mips/MipsInstrInfo.cpp Tue Feb 5 18:38:58 2019 (r343794)
@@ -653,6 +653,16 @@ MipsInstrInfo::genInstrWithNewOpc(unsigned NewOpc,
MIB.addImm(0);
+ // If I has an MCSymbol operand (used by asm printer, to emit R_MIPS_JALR),
+ // add it to the new instruction.
+ for (unsigned J = I->getDesc().getNumOperands(), E = I->getNumOperands();
+ J < E; ++J) {
+ const MachineOperand &MO = I->getOperand(J);
+ if (MO.isMCSymbol() && (MO.getTargetFlags() & MipsII::MO_JALR))
+ MIB.addSym(MO.getMCSymbol(), MipsII::MO_JALR);
+ }
+
+
} else {
for (unsigned J = 0, E = I->getDesc().getNumOperands(); J < E; ++J) {
if (BranchWithZeroOperand && (unsigned)ZeroOperandPosition == J)
@@ -825,7 +835,8 @@ MipsInstrInfo::getSerializableDirectMachineOperandTarg
{MO_GOT_HI16, "mips-got-hi16"},
{MO_GOT_LO16, "mips-got-lo16"},
{MO_CALL_HI16, "mips-call-hi16"},
- {MO_CALL_LO16, "mips-call-lo16"}
+ {MO_CALL_LO16, "mips-call-lo16"},
+ {MO_JALR, "mips-jalr"}
};
return makeArrayRef(Flags);
}
Modified: vendor/llvm/dist-release_80/lib/Target/Mips/MipsInstrInfo.td
==============================================================================
--- vendor/llvm/dist-release_80/lib/Target/Mips/MipsInstrInfo.td Tue Feb 5 18:22:21 2019 (r343793)
+++ vendor/llvm/dist-release_80/lib/Target/Mips/MipsInstrInfo.td Tue Feb 5 18:38:58 2019 (r343794)
@@ -1623,11 +1623,15 @@ let isCall=1, hasDelaySlot=1, isCTI=1, Defs = [RA] in
class JumpLinkRegPseudo<RegisterOperand RO, Instruction JALRInst,
Register RetReg, RegisterOperand ResRO = RO>:
PseudoSE<(outs), (ins RO:$rs), [(MipsJmpLink RO:$rs)], II_JALR>,
- PseudoInstExpansion<(JALRInst RetReg, ResRO:$rs)>;
+ PseudoInstExpansion<(JALRInst RetReg, ResRO:$rs)> {
+ let hasPostISelHook = 1;
+ }
class JumpLinkReg<string opstr, RegisterOperand RO>:
InstSE<(outs RO:$rd), (ins RO:$rs), !strconcat(opstr, "\t$rd, $rs"),
- [], II_JALR, FrmR, opstr>;
+ [], II_JALR, FrmR, opstr> {
+ let hasPostISelHook = 1;
+ }
class BGEZAL_FT<string opstr, DAGOperand opnd,
RegisterOperand RO> :
@@ -1646,7 +1650,9 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarr
class TailCallReg<Instruction JumpInst, RegisterOperand RO> :
PseudoSE<(outs), (ins RO:$rs), [(MipsTailCall RO:$rs)], II_JR>,
- PseudoInstExpansion<(JumpInst RO:$rs)>;
+ PseudoInstExpansion<(JumpInst RO:$rs)> {
+ let hasPostISelHook = 1;
+ }
}
class BAL_BR_Pseudo<Instruction RealInst, DAGOperand opnd> :
Modified: vendor/llvm/dist-release_80/lib/Target/Mips/MipsMCInstLower.cpp
==============================================================================
--- vendor/llvm/dist-release_80/lib/Target/Mips/MipsMCInstLower.cpp Tue Feb 5 18:22:21 2019 (r343793)
+++ vendor/llvm/dist-release_80/lib/Target/Mips/MipsMCInstLower.cpp Tue Feb 5 18:38:58 2019 (r343794)
@@ -117,6 +117,8 @@ MCOperand MipsMCInstLower::LowerSymbolOperand(const Ma
case MipsII::MO_CALL_LO16:
TargetKind = MipsMCExpr::MEK_CALL_LO16;
break;
+ case MipsII::MO_JALR:
+ return MCOperand();
}
switch (MOTy) {
Modified: vendor/llvm/dist-release_80/lib/Target/X86/X86DiscriminateMemOps.cpp
==============================================================================
--- vendor/llvm/dist-release_80/lib/Target/X86/X86DiscriminateMemOps.cpp Tue Feb 5 18:22:21 2019 (r343793)
+++ vendor/llvm/dist-release_80/lib/Target/X86/X86DiscriminateMemOps.cpp Tue Feb 5 18:38:58 2019 (r343794)
@@ -27,6 +27,14 @@ using namespace llvm;
#define DEBUG_TYPE "x86-discriminate-memops"
+static cl::opt<bool> EnableDiscriminateMemops(
+ DEBUG_TYPE, cl::init(false),
+ cl::desc("Generate unique debug info for each instruction with a memory "
+ "operand. Should be enabled for profile-drived cache prefetching, "
+ "both in the build of the binary being profiled, as well as in "
+ "the build of the binary consuming the profile."),
+ cl::Hidden);
+
namespace {
*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
More information about the svn-src-all
mailing list