|  | //===- lib/MC/MCAssembler.cpp - Assembler Backend Implementation ----------===// | 
|  | // | 
|  | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | 
|  | // See https://llvm.org/LICENSE.txt for license information. | 
|  | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | 
|  | // | 
|  | //===----------------------------------------------------------------------===// | 
|  |  | 
|  | #include "llvm/MC/MCAssembler.h" | 
|  | #include "llvm/ADT/ArrayRef.h" | 
|  | #include "llvm/ADT/SmallString.h" | 
|  | #include "llvm/ADT/SmallVector.h" | 
|  | #include "llvm/ADT/Statistic.h" | 
|  | #include "llvm/ADT/StringRef.h" | 
|  | #include "llvm/ADT/Twine.h" | 
|  | #include "llvm/MC/MCAsmBackend.h" | 
|  | #include "llvm/MC/MCAsmInfo.h" | 
|  | #include "llvm/MC/MCAsmLayout.h" | 
|  | #include "llvm/MC/MCCodeEmitter.h" | 
|  | #include "llvm/MC/MCCodeView.h" | 
|  | #include "llvm/MC/MCContext.h" | 
|  | #include "llvm/MC/MCDwarf.h" | 
|  | #include "llvm/MC/MCExpr.h" | 
|  | #include "llvm/MC/MCFixup.h" | 
|  | #include "llvm/MC/MCFixupKindInfo.h" | 
|  | #include "llvm/MC/MCFragment.h" | 
|  | #include "llvm/MC/MCInst.h" | 
|  | #include "llvm/MC/MCObjectWriter.h" | 
|  | #include "llvm/MC/MCSection.h" | 
|  | #include "llvm/MC/MCSymbol.h" | 
|  | #include "llvm/MC/MCValue.h" | 
|  | #include "llvm/Support/Alignment.h" | 
|  | #include "llvm/Support/Casting.h" | 
|  | #include "llvm/Support/Debug.h" | 
|  | #include "llvm/Support/EndianStream.h" | 
|  | #include "llvm/Support/ErrorHandling.h" | 
|  | #include "llvm/Support/LEB128.h" | 
|  | #include "llvm/Support/raw_ostream.h" | 
|  | #include <cassert> | 
|  | #include <cstdint> | 
|  | #include <tuple> | 
|  | #include <utility> | 
|  |  | 
|  | using namespace llvm; | 
|  |  | 
|  | namespace llvm { | 
|  | class MCSubtargetInfo; | 
|  | } | 
|  |  | 
|  | #define DEBUG_TYPE "assembler" | 
|  |  | 
|  | namespace { | 
|  | namespace stats { | 
|  |  | 
|  | STATISTIC(EmittedFragments, "Number of emitted assembler fragments - total"); | 
|  | STATISTIC(EmittedRelaxableFragments, | 
|  | "Number of emitted assembler fragments - relaxable"); | 
|  | STATISTIC(EmittedDataFragments, | 
|  | "Number of emitted assembler fragments - data"); | 
|  | STATISTIC(EmittedCompactEncodedInstFragments, | 
|  | "Number of emitted assembler fragments - compact encoded inst"); | 
|  | STATISTIC(EmittedAlignFragments, | 
|  | "Number of emitted assembler fragments - align"); | 
|  | STATISTIC(EmittedFillFragments, | 
|  | "Number of emitted assembler fragments - fill"); | 
|  | STATISTIC(EmittedNopsFragments, "Number of emitted assembler fragments - nops"); | 
|  | STATISTIC(EmittedOrgFragments, "Number of emitted assembler fragments - org"); | 
|  | STATISTIC(evaluateFixup, "Number of evaluated fixups"); | 
|  | STATISTIC(FragmentLayouts, "Number of fragment layouts"); | 
|  | STATISTIC(ObjectBytes, "Number of emitted object file bytes"); | 
|  | STATISTIC(RelaxationSteps, "Number of assembler layout and relaxation steps"); | 
|  | STATISTIC(RelaxedInstructions, "Number of relaxed instructions"); | 
|  |  | 
|  | } // end namespace stats | 
|  | } // end anonymous namespace | 
|  |  | 
|  | // FIXME FIXME FIXME: There are number of places in this file where we convert | 
|  | // what is a 64-bit assembler value used for computation into a value in the | 
|  | // object file, which may truncate it. We should detect that truncation where | 
|  | // invalid and report errors back. | 
|  |  | 
|  | /* *** */ | 
|  |  | 
|  | MCAssembler::MCAssembler(MCContext &Context, | 
|  | std::unique_ptr<MCAsmBackend> Backend, | 
|  | std::unique_ptr<MCCodeEmitter> Emitter, | 
|  | std::unique_ptr<MCObjectWriter> Writer) | 
|  | : Context(Context), Backend(std::move(Backend)), | 
|  | Emitter(std::move(Emitter)), Writer(std::move(Writer)), | 
|  | BundleAlignSize(0), RelaxAll(false), SubsectionsViaSymbols(false), | 
|  | IncrementalLinkerCompatible(false), ELFHeaderEFlags(0) { | 
|  | VersionInfo.Major = 0; // Major version == 0 for "none specified" | 
|  | DarwinTargetVariantVersionInfo.Major = 0; | 
|  | } | 
|  |  | 
|  | MCAssembler::~MCAssembler() = default; | 
|  |  | 
|  | void MCAssembler::reset() { | 
|  | Sections.clear(); | 
|  | Symbols.clear(); | 
|  | IndirectSymbols.clear(); | 
|  | DataRegions.clear(); | 
|  | LinkerOptions.clear(); | 
|  | FileNames.clear(); | 
|  | ThumbFuncs.clear(); | 
|  | BundleAlignSize = 0; | 
|  | RelaxAll = false; | 
|  | SubsectionsViaSymbols = false; | 
|  | IncrementalLinkerCompatible = false; | 
|  | ELFHeaderEFlags = 0; | 
|  | LOHContainer.reset(); | 
|  | VersionInfo.Major = 0; | 
|  | VersionInfo.SDKVersion = VersionTuple(); | 
|  | DarwinTargetVariantVersionInfo.Major = 0; | 
|  | DarwinTargetVariantVersionInfo.SDKVersion = VersionTuple(); | 
|  |  | 
|  | // reset objects owned by us | 
|  | if (getBackendPtr()) | 
|  | getBackendPtr()->reset(); | 
|  | if (getEmitterPtr()) | 
|  | getEmitterPtr()->reset(); | 
|  | if (getWriterPtr()) | 
|  | getWriterPtr()->reset(); | 
|  | getLOHContainer().reset(); | 
|  | } | 
|  |  | 
|  | bool MCAssembler::registerSection(MCSection &Section) { | 
|  | if (Section.isRegistered()) | 
|  | return false; | 
|  | Sections.push_back(&Section); | 
|  | Section.setIsRegistered(true); | 
|  | return true; | 
|  | } | 
|  |  | 
|  | bool MCAssembler::isThumbFunc(const MCSymbol *Symbol) const { | 
|  | if (ThumbFuncs.count(Symbol)) | 
|  | return true; | 
|  |  | 
|  | if (!Symbol->isVariable()) | 
|  | return false; | 
|  |  | 
|  | const MCExpr *Expr = Symbol->getVariableValue(); | 
|  |  | 
|  | MCValue V; | 
|  | if (!Expr->evaluateAsRelocatable(V, nullptr, nullptr)) | 
|  | return false; | 
|  |  | 
|  | if (V.getSymB() || V.getRefKind() != MCSymbolRefExpr::VK_None) | 
|  | return false; | 
|  |  | 
|  | const MCSymbolRefExpr *Ref = V.getSymA(); | 
|  | if (!Ref) | 
|  | return false; | 
|  |  | 
|  | if (Ref->getKind() != MCSymbolRefExpr::VK_None) | 
|  | return false; | 
|  |  | 
|  | const MCSymbol &Sym = Ref->getSymbol(); | 
|  | if (!isThumbFunc(&Sym)) | 
|  | return false; | 
|  |  | 
|  | ThumbFuncs.insert(Symbol); // Cache it. | 
|  | return true; | 
|  | } | 
|  |  | 
|  | bool MCAssembler::isSymbolLinkerVisible(const MCSymbol &Symbol) const { | 
|  | // Non-temporary labels should always be visible to the linker. | 
|  | if (!Symbol.isTemporary()) | 
|  | return true; | 
|  |  | 
|  | if (Symbol.isUsedInReloc()) | 
|  | return true; | 
|  |  | 
|  | return false; | 
|  | } | 
|  |  | 
|  | const MCSymbol *MCAssembler::getAtom(const MCSymbol &S) const { | 
|  | // Linker visible symbols define atoms. | 
|  | if (isSymbolLinkerVisible(S)) | 
|  | return &S; | 
|  |  | 
|  | // Absolute and undefined symbols have no defining atom. | 
|  | if (!S.isInSection()) | 
|  | return nullptr; | 
|  |  | 
|  | // Non-linker visible symbols in sections which can't be atomized have no | 
|  | // defining atom. | 
|  | if (!getContext().getAsmInfo()->isSectionAtomizableBySymbols( | 
|  | *S.getFragment()->getParent())) | 
|  | return nullptr; | 
|  |  | 
|  | // Otherwise, return the atom for the containing fragment. | 
|  | return S.getFragment()->getAtom(); | 
|  | } | 
|  |  | 
|  | bool MCAssembler::evaluateFixup(const MCAsmLayout &Layout, | 
|  | const MCFixup &Fixup, const MCFragment *DF, | 
|  | MCValue &Target, uint64_t &Value, | 
|  | bool &WasForced) const { | 
|  | ++stats::evaluateFixup; | 
|  |  | 
|  | // FIXME: This code has some duplication with recordRelocation. We should | 
|  | // probably merge the two into a single callback that tries to evaluate a | 
|  | // fixup and records a relocation if one is needed. | 
|  |  | 
|  | // On error claim to have completely evaluated the fixup, to prevent any | 
|  | // further processing from being done. | 
|  | const MCExpr *Expr = Fixup.getValue(); | 
|  | MCContext &Ctx = getContext(); | 
|  | Value = 0; | 
|  | WasForced = false; | 
|  | if (!Expr->evaluateAsRelocatable(Target, &Layout, &Fixup)) { | 
|  | Ctx.reportError(Fixup.getLoc(), "expected relocatable expression"); | 
|  | return true; | 
|  | } | 
|  | if (const MCSymbolRefExpr *RefB = Target.getSymB()) { | 
|  | if (RefB->getKind() != MCSymbolRefExpr::VK_None) { | 
|  | Ctx.reportError(Fixup.getLoc(), | 
|  | "unsupported subtraction of qualified symbol"); | 
|  | return true; | 
|  | } | 
|  | } | 
|  |  | 
|  | assert(getBackendPtr() && "Expected assembler backend"); | 
|  | bool IsTarget = getBackendPtr()->getFixupKindInfo(Fixup.getKind()).Flags & | 
|  | MCFixupKindInfo::FKF_IsTarget; | 
|  |  | 
|  | if (IsTarget) | 
|  | return getBackend().evaluateTargetFixup(*this, Layout, Fixup, DF, Target, | 
|  | Value, WasForced); | 
|  |  | 
|  | unsigned FixupFlags = getBackendPtr()->getFixupKindInfo(Fixup.getKind()).Flags; | 
|  | bool IsPCRel = getBackendPtr()->getFixupKindInfo(Fixup.getKind()).Flags & | 
|  | MCFixupKindInfo::FKF_IsPCRel; | 
|  |  | 
|  | bool IsResolved = false; | 
|  | if (IsPCRel) { | 
|  | if (Target.getSymB()) { | 
|  | IsResolved = false; | 
|  | } else if (!Target.getSymA()) { | 
|  | IsResolved = false; | 
|  | } else { | 
|  | const MCSymbolRefExpr *A = Target.getSymA(); | 
|  | const MCSymbol &SA = A->getSymbol(); | 
|  | if (A->getKind() != MCSymbolRefExpr::VK_None || SA.isUndefined()) { | 
|  | IsResolved = false; | 
|  | } else if (auto *Writer = getWriterPtr()) { | 
|  | IsResolved = (FixupFlags & MCFixupKindInfo::FKF_Constant) || | 
|  | Writer->isSymbolRefDifferenceFullyResolvedImpl( | 
|  | *this, SA, *DF, false, true); | 
|  | } | 
|  | } | 
|  | } else { | 
|  | IsResolved = Target.isAbsolute(); | 
|  | } | 
|  |  | 
|  | Value = Target.getConstant(); | 
|  |  | 
|  | if (const MCSymbolRefExpr *A = Target.getSymA()) { | 
|  | const MCSymbol &Sym = A->getSymbol(); | 
|  | if (Sym.isDefined()) | 
|  | Value += Layout.getSymbolOffset(Sym); | 
|  | } | 
|  | if (const MCSymbolRefExpr *B = Target.getSymB()) { | 
|  | const MCSymbol &Sym = B->getSymbol(); | 
|  | if (Sym.isDefined()) | 
|  | Value -= Layout.getSymbolOffset(Sym); | 
|  | } | 
|  |  | 
|  | bool ShouldAlignPC = getBackend().getFixupKindInfo(Fixup.getKind()).Flags & | 
|  | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits; | 
|  | assert((ShouldAlignPC ? IsPCRel : true) && | 
|  | "FKF_IsAlignedDownTo32Bits is only allowed on PC-relative fixups!"); | 
|  |  | 
|  | if (IsPCRel) { | 
|  | uint32_t Offset = Layout.getFragmentOffset(DF) + Fixup.getOffset(); | 
|  |  | 
|  | // A number of ARM fixups in Thumb mode require that the effective PC | 
|  | // address be determined as the 32-bit aligned version of the actual offset. | 
|  | if (ShouldAlignPC) Offset &= ~0x3; | 
|  | Value -= Offset; | 
|  | } | 
|  |  | 
|  | // Let the backend force a relocation if needed. | 
|  | if (IsResolved && getBackend().shouldForceRelocation(*this, Fixup, Target)) { | 
|  | IsResolved = false; | 
|  | WasForced = true; | 
|  | } | 
|  |  | 
|  | return IsResolved; | 
|  | } | 
|  |  | 
|  | uint64_t MCAssembler::computeFragmentSize(const MCAsmLayout &Layout, | 
|  | const MCFragment &F) const { | 
|  | assert(getBackendPtr() && "Requires assembler backend"); | 
|  | switch (F.getKind()) { | 
|  | case MCFragment::FT_Data: | 
|  | return cast<MCDataFragment>(F).getContents().size(); | 
|  | case MCFragment::FT_Relaxable: | 
|  | return cast<MCRelaxableFragment>(F).getContents().size(); | 
|  | case MCFragment::FT_CompactEncodedInst: | 
|  | return cast<MCCompactEncodedInstFragment>(F).getContents().size(); | 
|  | case MCFragment::FT_Fill: { | 
|  | auto &FF = cast<MCFillFragment>(F); | 
|  | int64_t NumValues = 0; | 
|  | if (!FF.getNumValues().evaluateAsAbsolute(NumValues, Layout)) { | 
|  | getContext().reportError(FF.getLoc(), | 
|  | "expected assembly-time absolute expression"); | 
|  | return 0; | 
|  | } | 
|  | int64_t Size = NumValues * FF.getValueSize(); | 
|  | if (Size < 0) { | 
|  | getContext().reportError(FF.getLoc(), "invalid number of bytes"); | 
|  | return 0; | 
|  | } | 
|  | return Size; | 
|  | } | 
|  |  | 
|  | case MCFragment::FT_Nops: | 
|  | return cast<MCNopsFragment>(F).getNumBytes(); | 
|  |  | 
|  | case MCFragment::FT_LEB: | 
|  | return cast<MCLEBFragment>(F).getContents().size(); | 
|  |  | 
|  | case MCFragment::FT_BoundaryAlign: | 
|  | return cast<MCBoundaryAlignFragment>(F).getSize(); | 
|  |  | 
|  | case MCFragment::FT_SymbolId: | 
|  | return 4; | 
|  |  | 
|  | case MCFragment::FT_Align: { | 
|  | const MCAlignFragment &AF = cast<MCAlignFragment>(F); | 
|  | unsigned Offset = Layout.getFragmentOffset(&AF); | 
|  | unsigned Size = offsetToAlignment(Offset, AF.getAlignment()); | 
|  |  | 
|  | // Insert extra Nops for code alignment if the target define | 
|  | // shouldInsertExtraNopBytesForCodeAlign target hook. | 
|  | if (AF.getParent()->useCodeAlign() && AF.hasEmitNops() && | 
|  | getBackend().shouldInsertExtraNopBytesForCodeAlign(AF, Size)) | 
|  | return Size; | 
|  |  | 
|  | // If we are padding with nops, force the padding to be larger than the | 
|  | // minimum nop size. | 
|  | if (Size > 0 && AF.hasEmitNops()) { | 
|  | while (Size % getBackend().getMinimumNopSize()) | 
|  | Size += AF.getAlignment().value(); | 
|  | } | 
|  | if (Size > AF.getMaxBytesToEmit()) | 
|  | return 0; | 
|  | return Size; | 
|  | } | 
|  |  | 
|  | case MCFragment::FT_Org: { | 
|  | const MCOrgFragment &OF = cast<MCOrgFragment>(F); | 
|  | MCValue Value; | 
|  | if (!OF.getOffset().evaluateAsValue(Value, Layout)) { | 
|  | getContext().reportError(OF.getLoc(), | 
|  | "expected assembly-time absolute expression"); | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | uint64_t FragmentOffset = Layout.getFragmentOffset(&OF); | 
|  | int64_t TargetLocation = Value.getConstant(); | 
|  | if (const MCSymbolRefExpr *A = Value.getSymA()) { | 
|  | uint64_t Val; | 
|  | if (!Layout.getSymbolOffset(A->getSymbol(), Val)) { | 
|  | getContext().reportError(OF.getLoc(), "expected absolute expression"); | 
|  | return 0; | 
|  | } | 
|  | TargetLocation += Val; | 
|  | } | 
|  | int64_t Size = TargetLocation - FragmentOffset; | 
|  | if (Size < 0 || Size >= 0x40000000) { | 
|  | getContext().reportError( | 
|  | OF.getLoc(), "invalid .org offset '" + Twine(TargetLocation) + | 
|  | "' (at offset '" + Twine(FragmentOffset) + "')"); | 
|  | return 0; | 
|  | } | 
|  | return Size; | 
|  | } | 
|  |  | 
|  | case MCFragment::FT_Dwarf: | 
|  | return cast<MCDwarfLineAddrFragment>(F).getContents().size(); | 
|  | case MCFragment::FT_DwarfFrame: | 
|  | return cast<MCDwarfCallFrameFragment>(F).getContents().size(); | 
|  | case MCFragment::FT_CVInlineLines: | 
|  | return cast<MCCVInlineLineTableFragment>(F).getContents().size(); | 
|  | case MCFragment::FT_CVDefRange: | 
|  | return cast<MCCVDefRangeFragment>(F).getContents().size(); | 
|  | case MCFragment::FT_PseudoProbe: | 
|  | return cast<MCPseudoProbeAddrFragment>(F).getContents().size(); | 
|  | case MCFragment::FT_Dummy: | 
|  | llvm_unreachable("Should not have been added"); | 
|  | } | 
|  |  | 
|  | llvm_unreachable("invalid fragment kind"); | 
|  | } | 
|  |  | 
|  | void MCAsmLayout::layoutFragment(MCFragment *F) { | 
|  | MCFragment *Prev = F->getPrevNode(); | 
|  |  | 
|  | // We should never try to recompute something which is valid. | 
|  | assert(!isFragmentValid(F) && "Attempt to recompute a valid fragment!"); | 
|  | // We should never try to compute the fragment layout if its predecessor | 
|  | // isn't valid. | 
|  | assert((!Prev || isFragmentValid(Prev)) && | 
|  | "Attempt to compute fragment before its predecessor!"); | 
|  |  | 
|  | assert(!F->IsBeingLaidOut && "Already being laid out!"); | 
|  | F->IsBeingLaidOut = true; | 
|  |  | 
|  | ++stats::FragmentLayouts; | 
|  |  | 
|  | // Compute fragment offset and size. | 
|  | if (Prev) | 
|  | F->Offset = Prev->Offset + getAssembler().computeFragmentSize(*this, *Prev); | 
|  | else | 
|  | F->Offset = 0; | 
|  | F->IsBeingLaidOut = false; | 
|  | LastValidFragment[F->getParent()] = F; | 
|  |  | 
|  | // If bundling is enabled and this fragment has instructions in it, it has to | 
|  | // obey the bundling restrictions. With padding, we'll have: | 
|  | // | 
|  | // | 
|  | //        BundlePadding | 
|  | //             ||| | 
|  | // ------------------------------------- | 
|  | //   Prev  |##########|       F        | | 
|  | // ------------------------------------- | 
|  | //                    ^ | 
|  | //                    | | 
|  | //                    F->Offset | 
|  | // | 
|  | // The fragment's offset will point to after the padding, and its computed | 
|  | // size won't include the padding. | 
|  | // | 
|  | // When the -mc-relax-all flag is used, we optimize bundling by writting the | 
|  | // padding directly into fragments when the instructions are emitted inside | 
|  | // the streamer. When the fragment is larger than the bundle size, we need to | 
|  | // ensure that it's bundle aligned. This means that if we end up with | 
|  | // multiple fragments, we must emit bundle padding between fragments. | 
|  | // | 
|  | // ".align N" is an example of a directive that introduces multiple | 
|  | // fragments. We could add a special case to handle ".align N" by emitting | 
|  | // within-fragment padding (which would produce less padding when N is less | 
|  | // than the bundle size), but for now we don't. | 
|  | // | 
|  | if (Assembler.isBundlingEnabled() && F->hasInstructions()) { | 
|  | assert(isa<MCEncodedFragment>(F) && | 
|  | "Only MCEncodedFragment implementations have instructions"); | 
|  | MCEncodedFragment *EF = cast<MCEncodedFragment>(F); | 
|  | uint64_t FSize = Assembler.computeFragmentSize(*this, *EF); | 
|  |  | 
|  | if (!Assembler.getRelaxAll() && FSize > Assembler.getBundleAlignSize()) | 
|  | report_fatal_error("Fragment can't be larger than a bundle size"); | 
|  |  | 
|  | uint64_t RequiredBundlePadding = | 
|  | computeBundlePadding(Assembler, EF, EF->Offset, FSize); | 
|  | if (RequiredBundlePadding > UINT8_MAX) | 
|  | report_fatal_error("Padding cannot exceed 255 bytes"); | 
|  | EF->setBundlePadding(static_cast<uint8_t>(RequiredBundlePadding)); | 
|  | EF->Offset += RequiredBundlePadding; | 
|  | } | 
|  | } | 
|  |  | 
|  | void MCAssembler::registerSymbol(const MCSymbol &Symbol, bool *Created) { | 
|  | bool New = !Symbol.isRegistered(); | 
|  | if (Created) | 
|  | *Created = New; | 
|  | if (New) { | 
|  | Symbol.setIsRegistered(true); | 
|  | Symbols.push_back(&Symbol); | 
|  | } | 
|  | } | 
|  |  | 
|  | void MCAssembler::writeFragmentPadding(raw_ostream &OS, | 
|  | const MCEncodedFragment &EF, | 
|  | uint64_t FSize) const { | 
|  | assert(getBackendPtr() && "Expected assembler backend"); | 
|  | // Should NOP padding be written out before this fragment? | 
|  | unsigned BundlePadding = EF.getBundlePadding(); | 
|  | if (BundlePadding > 0) { | 
|  | assert(isBundlingEnabled() && | 
|  | "Writing bundle padding with disabled bundling"); | 
|  | assert(EF.hasInstructions() && | 
|  | "Writing bundle padding for a fragment without instructions"); | 
|  |  | 
|  | unsigned TotalLength = BundlePadding + static_cast<unsigned>(FSize); | 
|  | const MCSubtargetInfo *STI = EF.getSubtargetInfo(); | 
|  | if (EF.alignToBundleEnd() && TotalLength > getBundleAlignSize()) { | 
|  | // If the padding itself crosses a bundle boundary, it must be emitted | 
|  | // in 2 pieces, since even nop instructions must not cross boundaries. | 
|  | //             v--------------v   <- BundleAlignSize | 
|  | //        v---------v             <- BundlePadding | 
|  | // ---------------------------- | 
|  | // | Prev |####|####|    F    | | 
|  | // ---------------------------- | 
|  | //        ^-------------------^   <- TotalLength | 
|  | unsigned DistanceToBoundary = TotalLength - getBundleAlignSize(); | 
|  | if (!getBackend().writeNopData(OS, DistanceToBoundary, STI)) | 
|  | report_fatal_error("unable to write NOP sequence of " + | 
|  | Twine(DistanceToBoundary) + " bytes"); | 
|  | BundlePadding -= DistanceToBoundary; | 
|  | } | 
|  | if (!getBackend().writeNopData(OS, BundlePadding, STI)) | 
|  | report_fatal_error("unable to write NOP sequence of " + | 
|  | Twine(BundlePadding) + " bytes"); | 
|  | } | 
|  | } | 
|  |  | 
|  | /// Write the fragment \p F to the output file. | 
|  | static void writeFragment(raw_ostream &OS, const MCAssembler &Asm, | 
|  | const MCAsmLayout &Layout, const MCFragment &F) { | 
|  | // FIXME: Embed in fragments instead? | 
|  | uint64_t FragmentSize = Asm.computeFragmentSize(Layout, F); | 
|  |  | 
|  | support::endianness Endian = Asm.getBackend().Endian; | 
|  |  | 
|  | if (const MCEncodedFragment *EF = dyn_cast<MCEncodedFragment>(&F)) | 
|  | Asm.writeFragmentPadding(OS, *EF, FragmentSize); | 
|  |  | 
|  | // This variable (and its dummy usage) is to participate in the assert at | 
|  | // the end of the function. | 
|  | uint64_t Start = OS.tell(); | 
|  | (void) Start; | 
|  |  | 
|  | ++stats::EmittedFragments; | 
|  |  | 
|  | switch (F.getKind()) { | 
|  | case MCFragment::FT_Align: { | 
|  | ++stats::EmittedAlignFragments; | 
|  | const MCAlignFragment &AF = cast<MCAlignFragment>(F); | 
|  | assert(AF.getValueSize() && "Invalid virtual align in concrete fragment!"); | 
|  |  | 
|  | uint64_t Count = FragmentSize / AF.getValueSize(); | 
|  |  | 
|  | // FIXME: This error shouldn't actually occur (the front end should emit | 
|  | // multiple .align directives to enforce the semantics it wants), but is | 
|  | // severe enough that we want to report it. How to handle this? | 
|  | if (Count * AF.getValueSize() != FragmentSize) | 
|  | report_fatal_error("undefined .align directive, value size '" + | 
|  | Twine(AF.getValueSize()) + | 
|  | "' is not a divisor of padding size '" + | 
|  | Twine(FragmentSize) + "'"); | 
|  |  | 
|  | // See if we are aligning with nops, and if so do that first to try to fill | 
|  | // the Count bytes.  Then if that did not fill any bytes or there are any | 
|  | // bytes left to fill use the Value and ValueSize to fill the rest. | 
|  | // If we are aligning with nops, ask that target to emit the right data. | 
|  | if (AF.hasEmitNops()) { | 
|  | if (!Asm.getBackend().writeNopData(OS, Count, AF.getSubtargetInfo())) | 
|  | report_fatal_error("unable to write nop sequence of " + | 
|  | Twine(Count) + " bytes"); | 
|  | break; | 
|  | } | 
|  |  | 
|  | // Otherwise, write out in multiples of the value size. | 
|  | for (uint64_t i = 0; i != Count; ++i) { | 
|  | switch (AF.getValueSize()) { | 
|  | default: llvm_unreachable("Invalid size!"); | 
|  | case 1: OS << char(AF.getValue()); break; | 
|  | case 2: | 
|  | support::endian::write<uint16_t>(OS, AF.getValue(), Endian); | 
|  | break; | 
|  | case 4: | 
|  | support::endian::write<uint32_t>(OS, AF.getValue(), Endian); | 
|  | break; | 
|  | case 8: | 
|  | support::endian::write<uint64_t>(OS, AF.getValue(), Endian); | 
|  | break; | 
|  | } | 
|  | } | 
|  | break; | 
|  | } | 
|  |  | 
|  | case MCFragment::FT_Data: | 
|  | ++stats::EmittedDataFragments; | 
|  | OS << cast<MCDataFragment>(F).getContents(); | 
|  | break; | 
|  |  | 
|  | case MCFragment::FT_Relaxable: | 
|  | ++stats::EmittedRelaxableFragments; | 
|  | OS << cast<MCRelaxableFragment>(F).getContents(); | 
|  | break; | 
|  |  | 
|  | case MCFragment::FT_CompactEncodedInst: | 
|  | ++stats::EmittedCompactEncodedInstFragments; | 
|  | OS << cast<MCCompactEncodedInstFragment>(F).getContents(); | 
|  | break; | 
|  |  | 
|  | case MCFragment::FT_Fill: { | 
|  | ++stats::EmittedFillFragments; | 
|  | const MCFillFragment &FF = cast<MCFillFragment>(F); | 
|  | uint64_t V = FF.getValue(); | 
|  | unsigned VSize = FF.getValueSize(); | 
|  | const unsigned MaxChunkSize = 16; | 
|  | char Data[MaxChunkSize]; | 
|  | assert(0 < VSize && VSize <= MaxChunkSize && "Illegal fragment fill size"); | 
|  | // Duplicate V into Data as byte vector to reduce number of | 
|  | // writes done. As such, do endian conversion here. | 
|  | for (unsigned I = 0; I != VSize; ++I) { | 
|  | unsigned index = Endian == support::little ? I : (VSize - I - 1); | 
|  | Data[I] = uint8_t(V >> (index * 8)); | 
|  | } | 
|  | for (unsigned I = VSize; I < MaxChunkSize; ++I) | 
|  | Data[I] = Data[I - VSize]; | 
|  |  | 
|  | // Set to largest multiple of VSize in Data. | 
|  | const unsigned NumPerChunk = MaxChunkSize / VSize; | 
|  | // Set ChunkSize to largest multiple of VSize in Data | 
|  | const unsigned ChunkSize = VSize * NumPerChunk; | 
|  |  | 
|  | // Do copies by chunk. | 
|  | StringRef Ref(Data, ChunkSize); | 
|  | for (uint64_t I = 0, E = FragmentSize / ChunkSize; I != E; ++I) | 
|  | OS << Ref; | 
|  |  | 
|  | // do remainder if needed. | 
|  | unsigned TrailingCount = FragmentSize % ChunkSize; | 
|  | if (TrailingCount) | 
|  | OS.write(Data, TrailingCount); | 
|  | break; | 
|  | } | 
|  |  | 
|  | case MCFragment::FT_Nops: { | 
|  | ++stats::EmittedNopsFragments; | 
|  | const MCNopsFragment &NF = cast<MCNopsFragment>(F); | 
|  |  | 
|  | int64_t NumBytes = NF.getNumBytes(); | 
|  | int64_t ControlledNopLength = NF.getControlledNopLength(); | 
|  | int64_t MaximumNopLength = | 
|  | Asm.getBackend().getMaximumNopSize(*NF.getSubtargetInfo()); | 
|  |  | 
|  | assert(NumBytes > 0 && "Expected positive NOPs fragment size"); | 
|  | assert(ControlledNopLength >= 0 && "Expected non-negative NOP size"); | 
|  |  | 
|  | if (ControlledNopLength > MaximumNopLength) { | 
|  | Asm.getContext().reportError(NF.getLoc(), | 
|  | "illegal NOP size " + | 
|  | std::to_string(ControlledNopLength) + | 
|  | ". (expected within [0, " + | 
|  | std::to_string(MaximumNopLength) + "])"); | 
|  | // Clamp the NOP length as reportError does not stop the execution | 
|  | // immediately. | 
|  | ControlledNopLength = MaximumNopLength; | 
|  | } | 
|  |  | 
|  | // Use maximum value if the size of each NOP is not specified | 
|  | if (!ControlledNopLength) | 
|  | ControlledNopLength = MaximumNopLength; | 
|  |  | 
|  | while (NumBytes) { | 
|  | uint64_t NumBytesToEmit = | 
|  | (uint64_t)std::min(NumBytes, ControlledNopLength); | 
|  | assert(NumBytesToEmit && "try to emit empty NOP instruction"); | 
|  | if (!Asm.getBackend().writeNopData(OS, NumBytesToEmit, | 
|  | NF.getSubtargetInfo())) { | 
|  | report_fatal_error("unable to write nop sequence of the remaining " + | 
|  | Twine(NumBytesToEmit) + " bytes"); | 
|  | break; | 
|  | } | 
|  | NumBytes -= NumBytesToEmit; | 
|  | } | 
|  | break; | 
|  | } | 
|  |  | 
|  | case MCFragment::FT_LEB: { | 
|  | const MCLEBFragment &LF = cast<MCLEBFragment>(F); | 
|  | OS << LF.getContents(); | 
|  | break; | 
|  | } | 
|  |  | 
|  | case MCFragment::FT_BoundaryAlign: { | 
|  | const MCBoundaryAlignFragment &BF = cast<MCBoundaryAlignFragment>(F); | 
|  | if (!Asm.getBackend().writeNopData(OS, FragmentSize, BF.getSubtargetInfo())) | 
|  | report_fatal_error("unable to write nop sequence of " + | 
|  | Twine(FragmentSize) + " bytes"); | 
|  | break; | 
|  | } | 
|  |  | 
|  | case MCFragment::FT_SymbolId: { | 
|  | const MCSymbolIdFragment &SF = cast<MCSymbolIdFragment>(F); | 
|  | support::endian::write<uint32_t>(OS, SF.getSymbol()->getIndex(), Endian); | 
|  | break; | 
|  | } | 
|  |  | 
|  | case MCFragment::FT_Org: { | 
|  | ++stats::EmittedOrgFragments; | 
|  | const MCOrgFragment &OF = cast<MCOrgFragment>(F); | 
|  |  | 
|  | for (uint64_t i = 0, e = FragmentSize; i != e; ++i) | 
|  | OS << char(OF.getValue()); | 
|  |  | 
|  | break; | 
|  | } | 
|  |  | 
|  | case MCFragment::FT_Dwarf: { | 
|  | const MCDwarfLineAddrFragment &OF = cast<MCDwarfLineAddrFragment>(F); | 
|  | OS << OF.getContents(); | 
|  | break; | 
|  | } | 
|  | case MCFragment::FT_DwarfFrame: { | 
|  | const MCDwarfCallFrameFragment &CF = cast<MCDwarfCallFrameFragment>(F); | 
|  | OS << CF.getContents(); | 
|  | break; | 
|  | } | 
|  | case MCFragment::FT_CVInlineLines: { | 
|  | const auto &OF = cast<MCCVInlineLineTableFragment>(F); | 
|  | OS << OF.getContents(); | 
|  | break; | 
|  | } | 
|  | case MCFragment::FT_CVDefRange: { | 
|  | const auto &DRF = cast<MCCVDefRangeFragment>(F); | 
|  | OS << DRF.getContents(); | 
|  | break; | 
|  | } | 
|  | case MCFragment::FT_PseudoProbe: { | 
|  | const MCPseudoProbeAddrFragment &PF = cast<MCPseudoProbeAddrFragment>(F); | 
|  | OS << PF.getContents(); | 
|  | break; | 
|  | } | 
|  | case MCFragment::FT_Dummy: | 
|  | llvm_unreachable("Should not have been added"); | 
|  | } | 
|  |  | 
|  | assert(OS.tell() - Start == FragmentSize && | 
|  | "The stream should advance by fragment size"); | 
|  | } | 
|  |  | 
|  | void MCAssembler::writeSectionData(raw_ostream &OS, const MCSection *Sec, | 
|  | const MCAsmLayout &Layout) const { | 
|  | assert(getBackendPtr() && "Expected assembler backend"); | 
|  |  | 
|  | // Ignore virtual sections. | 
|  | if (Sec->isVirtualSection()) { | 
|  | assert(Layout.getSectionFileSize(Sec) == 0 && "Invalid size for section!"); | 
|  |  | 
|  | // Check that contents are only things legal inside a virtual section. | 
|  | for (const MCFragment &F : *Sec) { | 
|  | switch (F.getKind()) { | 
|  | default: llvm_unreachable("Invalid fragment in virtual section!"); | 
|  | case MCFragment::FT_Data: { | 
|  | // Check that we aren't trying to write a non-zero contents (or fixups) | 
|  | // into a virtual section. This is to support clients which use standard | 
|  | // directives to fill the contents of virtual sections. | 
|  | const MCDataFragment &DF = cast<MCDataFragment>(F); | 
|  | if (DF.fixup_begin() != DF.fixup_end()) | 
|  | getContext().reportError(SMLoc(), Sec->getVirtualSectionKind() + | 
|  | " section '" + Sec->getName() + | 
|  | "' cannot have fixups"); | 
|  | for (unsigned i = 0, e = DF.getContents().size(); i != e; ++i) | 
|  | if (DF.getContents()[i]) { | 
|  | getContext().reportError(SMLoc(), | 
|  | Sec->getVirtualSectionKind() + | 
|  | " section '" + Sec->getName() + | 
|  | "' cannot have non-zero initializers"); | 
|  | break; | 
|  | } | 
|  | break; | 
|  | } | 
|  | case MCFragment::FT_Align: | 
|  | // Check that we aren't trying to write a non-zero value into a virtual | 
|  | // section. | 
|  | assert((cast<MCAlignFragment>(F).getValueSize() == 0 || | 
|  | cast<MCAlignFragment>(F).getValue() == 0) && | 
|  | "Invalid align in virtual section!"); | 
|  | break; | 
|  | case MCFragment::FT_Fill: | 
|  | assert((cast<MCFillFragment>(F).getValue() == 0) && | 
|  | "Invalid fill in virtual section!"); | 
|  | break; | 
|  | case MCFragment::FT_Org: | 
|  | break; | 
|  | } | 
|  | } | 
|  |  | 
|  | return; | 
|  | } | 
|  |  | 
|  | uint64_t Start = OS.tell(); | 
|  | (void)Start; | 
|  |  | 
|  | for (const MCFragment &F : *Sec) | 
|  | writeFragment(OS, *this, Layout, F); | 
|  |  | 
|  | assert(getContext().hadError() || | 
|  | OS.tell() - Start == Layout.getSectionAddressSize(Sec)); | 
|  | } | 
|  |  | 
|  | std::tuple<MCValue, uint64_t, bool> | 
|  | MCAssembler::handleFixup(const MCAsmLayout &Layout, MCFragment &F, | 
|  | const MCFixup &Fixup) { | 
|  | // Evaluate the fixup. | 
|  | MCValue Target; | 
|  | uint64_t FixedValue; | 
|  | bool WasForced; | 
|  | bool IsResolved = evaluateFixup(Layout, Fixup, &F, Target, FixedValue, | 
|  | WasForced); | 
|  | if (!IsResolved) { | 
|  | // The fixup was unresolved, we need a relocation. Inform the object | 
|  | // writer of the relocation, and give it an opportunity to adjust the | 
|  | // fixup value if need be. | 
|  | getWriter().recordRelocation(*this, Layout, &F, Fixup, Target, FixedValue); | 
|  | } | 
|  | return std::make_tuple(Target, FixedValue, IsResolved); | 
|  | } | 
|  |  | 
|  | void MCAssembler::layout(MCAsmLayout &Layout) { | 
|  | assert(getBackendPtr() && "Expected assembler backend"); | 
|  | DEBUG_WITH_TYPE("mc-dump", { | 
|  | errs() << "assembler backend - pre-layout\n--\n"; | 
|  | dump(); }); | 
|  |  | 
|  | // Create dummy fragments and assign section ordinals. | 
|  | unsigned SectionIndex = 0; | 
|  | for (MCSection &Sec : *this) { | 
|  | // Create dummy fragments to eliminate any empty sections, this simplifies | 
|  | // layout. | 
|  | if (Sec.getFragmentList().empty()) | 
|  | new MCDataFragment(&Sec); | 
|  |  | 
|  | Sec.setOrdinal(SectionIndex++); | 
|  | } | 
|  |  | 
|  | // Assign layout order indices to sections and fragments. | 
|  | for (unsigned i = 0, e = Layout.getSectionOrder().size(); i != e; ++i) { | 
|  | MCSection *Sec = Layout.getSectionOrder()[i]; | 
|  | Sec->setLayoutOrder(i); | 
|  |  | 
|  | unsigned FragmentIndex = 0; | 
|  | for (MCFragment &Frag : *Sec) | 
|  | Frag.setLayoutOrder(FragmentIndex++); | 
|  | } | 
|  |  | 
|  | // Layout until everything fits. | 
|  | while (layoutOnce(Layout)) { | 
|  | if (getContext().hadError()) | 
|  | return; | 
|  | // Size of fragments in one section can depend on the size of fragments in | 
|  | // another. If any fragment has changed size, we have to re-layout (and | 
|  | // as a result possibly further relax) all. | 
|  | for (MCSection &Sec : *this) | 
|  | Layout.invalidateFragmentsFrom(&*Sec.begin()); | 
|  | } | 
|  |  | 
|  | DEBUG_WITH_TYPE("mc-dump", { | 
|  | errs() << "assembler backend - post-relaxation\n--\n"; | 
|  | dump(); }); | 
|  |  | 
|  | // Finalize the layout, including fragment lowering. | 
|  | finishLayout(Layout); | 
|  |  | 
|  | DEBUG_WITH_TYPE("mc-dump", { | 
|  | errs() << "assembler backend - final-layout\n--\n"; | 
|  | dump(); }); | 
|  |  | 
|  | // Allow the object writer a chance to perform post-layout binding (for | 
|  | // example, to set the index fields in the symbol data). | 
|  | getWriter().executePostLayoutBinding(*this, Layout); | 
|  |  | 
|  | // Evaluate and apply the fixups, generating relocation entries as necessary. | 
|  | for (MCSection &Sec : *this) { | 
|  | for (MCFragment &Frag : Sec) { | 
|  | ArrayRef<MCFixup> Fixups; | 
|  | MutableArrayRef<char> Contents; | 
|  | const MCSubtargetInfo *STI = nullptr; | 
|  |  | 
|  | // Process MCAlignFragment and MCEncodedFragmentWithFixups here. | 
|  | switch (Frag.getKind()) { | 
|  | default: | 
|  | continue; | 
|  | case MCFragment::FT_Align: { | 
|  | MCAlignFragment &AF = cast<MCAlignFragment>(Frag); | 
|  | // Insert fixup type for code alignment if the target define | 
|  | // shouldInsertFixupForCodeAlign target hook. | 
|  | if (Sec.useCodeAlign() && AF.hasEmitNops()) | 
|  | getBackend().shouldInsertFixupForCodeAlign(*this, Layout, AF); | 
|  | continue; | 
|  | } | 
|  | case MCFragment::FT_Data: { | 
|  | MCDataFragment &DF = cast<MCDataFragment>(Frag); | 
|  | Fixups = DF.getFixups(); | 
|  | Contents = DF.getContents(); | 
|  | STI = DF.getSubtargetInfo(); | 
|  | assert(!DF.hasInstructions() || STI != nullptr); | 
|  | break; | 
|  | } | 
|  | case MCFragment::FT_Relaxable: { | 
|  | MCRelaxableFragment &RF = cast<MCRelaxableFragment>(Frag); | 
|  | Fixups = RF.getFixups(); | 
|  | Contents = RF.getContents(); | 
|  | STI = RF.getSubtargetInfo(); | 
|  | assert(!RF.hasInstructions() || STI != nullptr); | 
|  | break; | 
|  | } | 
|  | case MCFragment::FT_CVDefRange: { | 
|  | MCCVDefRangeFragment &CF = cast<MCCVDefRangeFragment>(Frag); | 
|  | Fixups = CF.getFixups(); | 
|  | Contents = CF.getContents(); | 
|  | break; | 
|  | } | 
|  | case MCFragment::FT_Dwarf: { | 
|  | MCDwarfLineAddrFragment &DF = cast<MCDwarfLineAddrFragment>(Frag); | 
|  | Fixups = DF.getFixups(); | 
|  | Contents = DF.getContents(); | 
|  | break; | 
|  | } | 
|  | case MCFragment::FT_DwarfFrame: { | 
|  | MCDwarfCallFrameFragment &DF = cast<MCDwarfCallFrameFragment>(Frag); | 
|  | Fixups = DF.getFixups(); | 
|  | Contents = DF.getContents(); | 
|  | break; | 
|  | } | 
|  | case MCFragment::FT_PseudoProbe: { | 
|  | MCPseudoProbeAddrFragment &PF = cast<MCPseudoProbeAddrFragment>(Frag); | 
|  | Fixups = PF.getFixups(); | 
|  | Contents = PF.getContents(); | 
|  | break; | 
|  | } | 
|  | } | 
|  | for (const MCFixup &Fixup : Fixups) { | 
|  | uint64_t FixedValue; | 
|  | bool IsResolved; | 
|  | MCValue Target; | 
|  | std::tie(Target, FixedValue, IsResolved) = | 
|  | handleFixup(Layout, Frag, Fixup); | 
|  | getBackend().applyFixup(*this, Fixup, Target, Contents, FixedValue, | 
|  | IsResolved, STI); | 
|  | } | 
|  | } | 
|  | } | 
|  | } | 
|  |  | 
|  | void MCAssembler::Finish() { | 
|  | // Create the layout object. | 
|  | MCAsmLayout Layout(*this); | 
|  | layout(Layout); | 
|  |  | 
|  | // Write the object file. | 
|  | stats::ObjectBytes += getWriter().writeObject(*this, Layout); | 
|  | } | 
|  |  | 
|  | bool MCAssembler::fixupNeedsRelaxation(const MCFixup &Fixup, | 
|  | const MCRelaxableFragment *DF, | 
|  | const MCAsmLayout &Layout) const { | 
|  | assert(getBackendPtr() && "Expected assembler backend"); | 
|  | MCValue Target; | 
|  | uint64_t Value; | 
|  | bool WasForced; | 
|  | bool Resolved = evaluateFixup(Layout, Fixup, DF, Target, Value, WasForced); | 
|  | if (Target.getSymA() && | 
|  | Target.getSymA()->getKind() == MCSymbolRefExpr::VK_X86_ABS8 && | 
|  | Fixup.getKind() == FK_Data_1) | 
|  | return false; | 
|  | return getBackend().fixupNeedsRelaxationAdvanced(Fixup, Resolved, Value, DF, | 
|  | Layout, WasForced); | 
|  | } | 
|  |  | 
|  | bool MCAssembler::fragmentNeedsRelaxation(const MCRelaxableFragment *F, | 
|  | const MCAsmLayout &Layout) const { | 
|  | assert(getBackendPtr() && "Expected assembler backend"); | 
|  | // If this inst doesn't ever need relaxation, ignore it. This occurs when we | 
|  | // are intentionally pushing out inst fragments, or because we relaxed a | 
|  | // previous instruction to one that doesn't need relaxation. | 
|  | if (!getBackend().mayNeedRelaxation(F->getInst(), *F->getSubtargetInfo())) | 
|  | return false; | 
|  |  | 
|  | for (const MCFixup &Fixup : F->getFixups()) | 
|  | if (fixupNeedsRelaxation(Fixup, F, Layout)) | 
|  | return true; | 
|  |  | 
|  | return false; | 
|  | } | 
|  |  | 
|  | bool MCAssembler::relaxInstruction(MCAsmLayout &Layout, | 
|  | MCRelaxableFragment &F) { | 
|  | assert(getEmitterPtr() && | 
|  | "Expected CodeEmitter defined for relaxInstruction"); | 
|  | if (!fragmentNeedsRelaxation(&F, Layout)) | 
|  | return false; | 
|  |  | 
|  | ++stats::RelaxedInstructions; | 
|  |  | 
|  | // FIXME-PERF: We could immediately lower out instructions if we can tell | 
|  | // they are fully resolved, to avoid retesting on later passes. | 
|  |  | 
|  | // Relax the fragment. | 
|  |  | 
|  | MCInst Relaxed = F.getInst(); | 
|  | getBackend().relaxInstruction(Relaxed, *F.getSubtargetInfo()); | 
|  |  | 
|  | // Encode the new instruction. | 
|  | // | 
|  | // FIXME-PERF: If it matters, we could let the target do this. It can | 
|  | // probably do so more efficiently in many cases. | 
|  | SmallVector<MCFixup, 4> Fixups; | 
|  | SmallString<256> Code; | 
|  | raw_svector_ostream VecOS(Code); | 
|  | getEmitter().encodeInstruction(Relaxed, VecOS, Fixups, *F.getSubtargetInfo()); | 
|  |  | 
|  | // Update the fragment. | 
|  | F.setInst(Relaxed); | 
|  | F.getContents() = Code; | 
|  | F.getFixups() = Fixups; | 
|  |  | 
|  | return true; | 
|  | } | 
|  |  | 
|  | bool MCAssembler::relaxLEB(MCAsmLayout &Layout, MCLEBFragment &LF) { | 
|  | uint64_t OldSize = LF.getContents().size(); | 
|  | int64_t Value; | 
|  | bool Abs = LF.getValue().evaluateKnownAbsolute(Value, Layout); | 
|  | if (!Abs) | 
|  | report_fatal_error("sleb128 and uleb128 expressions must be absolute"); | 
|  | SmallString<8> &Data = LF.getContents(); | 
|  | Data.clear(); | 
|  | raw_svector_ostream OSE(Data); | 
|  | // The compiler can generate EH table assembly that is impossible to assemble | 
|  | // without either adding padding to an LEB fragment or adding extra padding | 
|  | // to a later alignment fragment. To accommodate such tables, relaxation can | 
|  | // only increase an LEB fragment size here, not decrease it. See PR35809. | 
|  | if (LF.isSigned()) | 
|  | encodeSLEB128(Value, OSE, OldSize); | 
|  | else | 
|  | encodeULEB128(Value, OSE, OldSize); | 
|  | return OldSize != LF.getContents().size(); | 
|  | } | 
|  |  | 
|  | /// Check if the branch crosses the boundary. | 
|  | /// | 
|  | /// \param StartAddr start address of the fused/unfused branch. | 
|  | /// \param Size size of the fused/unfused branch. | 
|  | /// \param BoundaryAlignment alignment requirement of the branch. | 
|  | /// \returns true if the branch cross the boundary. | 
|  | static bool mayCrossBoundary(uint64_t StartAddr, uint64_t Size, | 
|  | Align BoundaryAlignment) { | 
|  | uint64_t EndAddr = StartAddr + Size; | 
|  | return (StartAddr >> Log2(BoundaryAlignment)) != | 
|  | ((EndAddr - 1) >> Log2(BoundaryAlignment)); | 
|  | } | 
|  |  | 
|  | /// Check if the branch is against the boundary. | 
|  | /// | 
|  | /// \param StartAddr start address of the fused/unfused branch. | 
|  | /// \param Size size of the fused/unfused branch. | 
|  | /// \param BoundaryAlignment alignment requirement of the branch. | 
|  | /// \returns true if the branch is against the boundary. | 
|  | static bool isAgainstBoundary(uint64_t StartAddr, uint64_t Size, | 
|  | Align BoundaryAlignment) { | 
|  | uint64_t EndAddr = StartAddr + Size; | 
|  | return (EndAddr & (BoundaryAlignment.value() - 1)) == 0; | 
|  | } | 
|  |  | 
|  | /// Check if the branch needs padding. | 
|  | /// | 
|  | /// \param StartAddr start address of the fused/unfused branch. | 
|  | /// \param Size size of the fused/unfused branch. | 
|  | /// \param BoundaryAlignment alignment requirement of the branch. | 
|  | /// \returns true if the branch needs padding. | 
|  | static bool needPadding(uint64_t StartAddr, uint64_t Size, | 
|  | Align BoundaryAlignment) { | 
|  | return mayCrossBoundary(StartAddr, Size, BoundaryAlignment) || | 
|  | isAgainstBoundary(StartAddr, Size, BoundaryAlignment); | 
|  | } | 
|  |  | 
|  | bool MCAssembler::relaxBoundaryAlign(MCAsmLayout &Layout, | 
|  | MCBoundaryAlignFragment &BF) { | 
|  | // BoundaryAlignFragment that doesn't need to align any fragment should not be | 
|  | // relaxed. | 
|  | if (!BF.getLastFragment()) | 
|  | return false; | 
|  |  | 
|  | uint64_t AlignedOffset = Layout.getFragmentOffset(&BF); | 
|  | uint64_t AlignedSize = 0; | 
|  | for (const MCFragment *F = BF.getLastFragment(); F != &BF; | 
|  | F = F->getPrevNode()) | 
|  | AlignedSize += computeFragmentSize(Layout, *F); | 
|  |  | 
|  | Align BoundaryAlignment = BF.getAlignment(); | 
|  | uint64_t NewSize = needPadding(AlignedOffset, AlignedSize, BoundaryAlignment) | 
|  | ? offsetToAlignment(AlignedOffset, BoundaryAlignment) | 
|  | : 0U; | 
|  | if (NewSize == BF.getSize()) | 
|  | return false; | 
|  | BF.setSize(NewSize); | 
|  | Layout.invalidateFragmentsFrom(&BF); | 
|  | return true; | 
|  | } | 
|  |  | 
|  | bool MCAssembler::relaxDwarfLineAddr(MCAsmLayout &Layout, | 
|  | MCDwarfLineAddrFragment &DF) { | 
|  |  | 
|  | bool WasRelaxed; | 
|  | if (getBackend().relaxDwarfLineAddr(DF, Layout, WasRelaxed)) | 
|  | return WasRelaxed; | 
|  |  | 
|  | MCContext &Context = Layout.getAssembler().getContext(); | 
|  | uint64_t OldSize = DF.getContents().size(); | 
|  | int64_t AddrDelta; | 
|  | bool Abs = DF.getAddrDelta().evaluateKnownAbsolute(AddrDelta, Layout); | 
|  | assert(Abs && "We created a line delta with an invalid expression"); | 
|  | (void)Abs; | 
|  | int64_t LineDelta; | 
|  | LineDelta = DF.getLineDelta(); | 
|  | SmallVectorImpl<char> &Data = DF.getContents(); | 
|  | Data.clear(); | 
|  | raw_svector_ostream OSE(Data); | 
|  | DF.getFixups().clear(); | 
|  |  | 
|  | MCDwarfLineAddr::Encode(Context, getDWARFLinetableParams(), LineDelta, | 
|  | AddrDelta, OSE); | 
|  | return OldSize != Data.size(); | 
|  | } | 
|  |  | 
|  | bool MCAssembler::relaxDwarfCallFrameFragment(MCAsmLayout &Layout, | 
|  | MCDwarfCallFrameFragment &DF) { | 
|  | bool WasRelaxed; | 
|  | if (getBackend().relaxDwarfCFA(DF, Layout, WasRelaxed)) | 
|  | return WasRelaxed; | 
|  |  | 
|  | MCContext &Context = Layout.getAssembler().getContext(); | 
|  | uint64_t OldSize = DF.getContents().size(); | 
|  | int64_t AddrDelta; | 
|  | bool Abs = DF.getAddrDelta().evaluateKnownAbsolute(AddrDelta, Layout); | 
|  | assert(Abs && "We created call frame with an invalid expression"); | 
|  | (void) Abs; | 
|  | SmallVectorImpl<char> &Data = DF.getContents(); | 
|  | Data.clear(); | 
|  | raw_svector_ostream OSE(Data); | 
|  | DF.getFixups().clear(); | 
|  |  | 
|  | MCDwarfFrameEmitter::EncodeAdvanceLoc(Context, AddrDelta, OSE); | 
|  | return OldSize != Data.size(); | 
|  | } | 
|  |  | 
|  | bool MCAssembler::relaxCVInlineLineTable(MCAsmLayout &Layout, | 
|  | MCCVInlineLineTableFragment &F) { | 
|  | unsigned OldSize = F.getContents().size(); | 
|  | getContext().getCVContext().encodeInlineLineTable(Layout, F); | 
|  | return OldSize != F.getContents().size(); | 
|  | } | 
|  |  | 
|  | bool MCAssembler::relaxCVDefRange(MCAsmLayout &Layout, | 
|  | MCCVDefRangeFragment &F) { | 
|  | unsigned OldSize = F.getContents().size(); | 
|  | getContext().getCVContext().encodeDefRange(Layout, F); | 
|  | return OldSize != F.getContents().size(); | 
|  | } | 
|  |  | 
|  | bool MCAssembler::relaxPseudoProbeAddr(MCAsmLayout &Layout, | 
|  | MCPseudoProbeAddrFragment &PF) { | 
|  | uint64_t OldSize = PF.getContents().size(); | 
|  | int64_t AddrDelta; | 
|  | bool Abs = PF.getAddrDelta().evaluateKnownAbsolute(AddrDelta, Layout); | 
|  | assert(Abs && "We created a pseudo probe with an invalid expression"); | 
|  | (void)Abs; | 
|  | SmallVectorImpl<char> &Data = PF.getContents(); | 
|  | Data.clear(); | 
|  | raw_svector_ostream OSE(Data); | 
|  | PF.getFixups().clear(); | 
|  |  | 
|  | // AddrDelta is a signed integer | 
|  | encodeSLEB128(AddrDelta, OSE, OldSize); | 
|  | return OldSize != Data.size(); | 
|  | } | 
|  |  | 
|  | bool MCAssembler::relaxFragment(MCAsmLayout &Layout, MCFragment &F) { | 
|  | switch(F.getKind()) { | 
|  | default: | 
|  | return false; | 
|  | case MCFragment::FT_Relaxable: | 
|  | assert(!getRelaxAll() && | 
|  | "Did not expect a MCRelaxableFragment in RelaxAll mode"); | 
|  | return relaxInstruction(Layout, cast<MCRelaxableFragment>(F)); | 
|  | case MCFragment::FT_Dwarf: | 
|  | return relaxDwarfLineAddr(Layout, cast<MCDwarfLineAddrFragment>(F)); | 
|  | case MCFragment::FT_DwarfFrame: | 
|  | return relaxDwarfCallFrameFragment(Layout, | 
|  | cast<MCDwarfCallFrameFragment>(F)); | 
|  | case MCFragment::FT_LEB: | 
|  | return relaxLEB(Layout, cast<MCLEBFragment>(F)); | 
|  | case MCFragment::FT_BoundaryAlign: | 
|  | return relaxBoundaryAlign(Layout, cast<MCBoundaryAlignFragment>(F)); | 
|  | case MCFragment::FT_CVInlineLines: | 
|  | return relaxCVInlineLineTable(Layout, cast<MCCVInlineLineTableFragment>(F)); | 
|  | case MCFragment::FT_CVDefRange: | 
|  | return relaxCVDefRange(Layout, cast<MCCVDefRangeFragment>(F)); | 
|  | case MCFragment::FT_PseudoProbe: | 
|  | return relaxPseudoProbeAddr(Layout, cast<MCPseudoProbeAddrFragment>(F)); | 
|  | } | 
|  | } | 
|  |  | 
|  | bool MCAssembler::layoutSectionOnce(MCAsmLayout &Layout, MCSection &Sec) { | 
|  | // Holds the first fragment which needed relaxing during this layout. It will | 
|  | // remain NULL if none were relaxed. | 
|  | // When a fragment is relaxed, all the fragments following it should get | 
|  | // invalidated because their offset is going to change. | 
|  | MCFragment *FirstRelaxedFragment = nullptr; | 
|  |  | 
|  | // Attempt to relax all the fragments in the section. | 
|  | for (MCFragment &Frag : Sec) { | 
|  | // Check if this is a fragment that needs relaxation. | 
|  | bool RelaxedFrag = relaxFragment(Layout, Frag); | 
|  | if (RelaxedFrag && !FirstRelaxedFragment) | 
|  | FirstRelaxedFragment = &Frag; | 
|  | } | 
|  | if (FirstRelaxedFragment) { | 
|  | Layout.invalidateFragmentsFrom(FirstRelaxedFragment); | 
|  | return true; | 
|  | } | 
|  | return false; | 
|  | } | 
|  |  | 
|  | bool MCAssembler::layoutOnce(MCAsmLayout &Layout) { | 
|  | ++stats::RelaxationSteps; | 
|  |  | 
|  | bool WasRelaxed = false; | 
|  | for (MCSection &Sec : *this) { | 
|  | while (layoutSectionOnce(Layout, Sec)) | 
|  | WasRelaxed = true; | 
|  | } | 
|  |  | 
|  | return WasRelaxed; | 
|  | } | 
|  |  | 
|  | void MCAssembler::finishLayout(MCAsmLayout &Layout) { | 
|  | assert(getBackendPtr() && "Expected assembler backend"); | 
|  | // The layout is done. Mark every fragment as valid. | 
|  | for (unsigned int i = 0, n = Layout.getSectionOrder().size(); i != n; ++i) { | 
|  | MCSection &Section = *Layout.getSectionOrder()[i]; | 
|  | Layout.getFragmentOffset(&*Section.getFragmentList().rbegin()); | 
|  | computeFragmentSize(Layout, *Section.getFragmentList().rbegin()); | 
|  | } | 
|  | getBackend().finishLayout(*this, Layout); | 
|  | } | 
|  |  | 
|  | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) | 
|  | LLVM_DUMP_METHOD void MCAssembler::dump() const{ | 
|  | raw_ostream &OS = errs(); | 
|  |  | 
|  | OS << "<MCAssembler\n"; | 
|  | OS << "  Sections:[\n    "; | 
|  | for (const_iterator it = begin(), ie = end(); it != ie; ++it) { | 
|  | if (it != begin()) OS << ",\n    "; | 
|  | it->dump(); | 
|  | } | 
|  | OS << "],\n"; | 
|  | OS << "  Symbols:["; | 
|  |  | 
|  | for (const_symbol_iterator it = symbol_begin(), ie = symbol_end(); it != ie; ++it) { | 
|  | if (it != symbol_begin()) OS << ",\n           "; | 
|  | OS << "("; | 
|  | it->dump(); | 
|  | OS << ", Index:" << it->getIndex() << ", "; | 
|  | OS << ")"; | 
|  | } | 
|  | OS << "]>\n"; | 
|  | } | 
|  | #endif |