/// initialized.
uint64_t Offset;
- /// EffectiveSize - The compute size of this section. This is ~0 until
- /// initialized.
- uint64_t EffectiveSize;
-
/// LayoutOrder - The layout order of this fragment.
unsigned LayoutOrder;
/// cannot be satisfied in this width then this fragment is ignored.
unsigned MaxBytesToEmit;
+ /// Size - The current estimate of the size.
+ unsigned Size;
+
/// EmitNops - Flag to indicate that (optimal) NOPs should be emitted instead
/// of using the provided value. The exact interpretation of this flag is
/// target dependent.
unsigned _MaxBytesToEmit, MCSectionData *SD = 0)
: MCFragment(FT_Align, SD), Alignment(_Alignment),
Value(_Value),ValueSize(_ValueSize),
- MaxBytesToEmit(_MaxBytesToEmit), EmitNops(false) {}
+ MaxBytesToEmit(_MaxBytesToEmit), Size(0), EmitNops(false) {}
/// @name Accessors
/// @{
unsigned getValueSize() const { return ValueSize; }
+ unsigned getSize() const { return Size; }
+
+ void setSize(unsigned Size_) { Size = Size_; }
+
unsigned getMaxBytesToEmit() const { return MaxBytesToEmit; }
bool hasEmitNops() const { return EmitNops; }
const MCInstFragment *IF,
const MCAsmLayout &Layout) const;
- /// Compute the effective fragment size assuming it is layed out at the given
- /// \arg SectionAddress and \arg FragmentOffset.
- uint64_t ComputeFragmentSize(const MCFragment &F,
- uint64_t FragmentOffset) const;
-
/// LayoutOnce - Perform one layout iteration and return true if any offsets
/// were adjusted.
bool LayoutOnce(const MCObjectWriter &Writer, MCAsmLayout &Layout);
bool RelaxDwarfLineAddr(const MCObjectWriter &Writer, MCAsmLayout &Layout,
MCDwarfLineAddrFragment &DF);
+ bool RelaxAlignment(const MCObjectWriter &Writer, MCAsmLayout &Layout,
+ MCAlignFragment &DF);
+
/// FinishLayout - Finalize a layout, including fragment lowering.
void FinishLayout(MCAsmLayout &Layout);
MCFragment &F, const MCFixup &Fixup);
public:
+ /// Compute the effective fragment size assuming it is layed out at the given
+ /// \arg SectionAddress and \arg FragmentOffset.
+ uint64_t ComputeFragmentSize(const MCFragment &F) const;
+
/// Find the symbol which defines the atom containing the given symbol, or
/// null if there is no such symbol.
const MCSymbolData *getAtom(const MCSymbolData *Symbol) const;
if (!isFragmentUpToDate(F))
return;
- // Otherwise, reset the last valid fragment to the predecessor of the
- // invalidated fragment.
+ // Otherwise, reset the last valid fragment to this fragment.
const MCSectionData &SD = *F->getParent();
- LastValidFragment[&SD] = F->getPrevNode();
+ LastValidFragment[&SD] = F;
}
void MCAsmLayout::EnsureValid(const MCFragment *F) const {
}
}
-uint64_t MCAsmLayout::getFragmentEffectiveSize(const MCFragment *F) const {
- EnsureValid(F);
- assert(F->EffectiveSize != ~UINT64_C(0) && "Address not set!");
- return F->EffectiveSize;
-}
-
uint64_t MCAsmLayout::getFragmentOffset(const MCFragment *F) const {
EnsureValid(F);
assert(F->Offset != ~UINT64_C(0) && "Address not set!");
uint64_t MCAsmLayout::getSectionAddressSize(const MCSectionData *SD) const {
// The size is the last fragment's end offset.
const MCFragment &F = SD->getFragmentList().back();
- return getFragmentOffset(&F) + getFragmentEffectiveSize(&F);
+ return getFragmentOffset(&F) + getAssembler().ComputeFragmentSize(F);
}
uint64_t MCAsmLayout::getSectionFileSize(const MCSectionData *SD) const {
}
MCFragment::MCFragment(FragmentType _Kind, MCSectionData *_Parent)
- : Kind(_Kind), Parent(_Parent), Atom(0), Offset(~UINT64_C(0)),
- EffectiveSize(~UINT64_C(0))
+ : Kind(_Kind), Parent(_Parent), Atom(0), Offset(~UINT64_C(0))
{
if (Parent)
Parent->getFragmentList().push_back(this);
return IsResolved;
}
-uint64_t MCAssembler::ComputeFragmentSize(const MCFragment &F,
- uint64_t FragmentOffset) const {
+uint64_t MCAssembler::ComputeFragmentSize(const MCFragment &F) const {
switch (F.getKind()) {
case MCFragment::FT_Data:
return cast<MCDataFragment>(F).getContents().size();
case MCFragment::FT_LEB:
return cast<MCLEBFragment>(F).getContents().size();
- case MCFragment::FT_Align: {
- const MCAlignFragment &AF = cast<MCAlignFragment>(F);
-
- uint64_t Size = OffsetToAlignment(FragmentOffset, AF.getAlignment());
-
- // Honor MaxBytesToEmit.
- if (Size > AF.getMaxBytesToEmit())
- return 0;
-
- return Size;
- }
+ case MCFragment::FT_Align:
+ return cast<MCAlignFragment>(F).getSize();
case MCFragment::FT_Org:
return cast<MCOrgFragment>(F).getSize();
// Compute fragment offset and size.
uint64_t Offset = 0;
if (Prev)
- Offset += Prev->Offset + Prev->EffectiveSize;
+ Offset += Prev->Offset + getAssembler().ComputeFragmentSize(*Prev);
F->Offset = Offset;
- F->EffectiveSize = getAssembler().ComputeFragmentSize(*F, F->Offset);
LastValidFragment[F->getParent()] = F;
}
++stats::EmittedFragments;
// FIXME: Embed in fragments instead?
- uint64_t FragmentSize = Layout.getFragmentEffectiveSize(&F);
+ uint64_t FragmentSize = Asm.ComputeFragmentSize(F);
switch (F.getKind()) {
case MCFragment::FT_Align: {
MCAlignFragment &AF = cast<MCAlignFragment>(F);
return OldSize != Data.size();
}
+bool MCAssembler::RelaxAlignment(const MCObjectWriter &Writer,
+ MCAsmLayout &Layout,
+ MCAlignFragment &AF) {
+ unsigned Offset = Layout.getFragmentOffset(&AF);
+ unsigned Size = OffsetToAlignment(Offset, AF.getAlignment());
+ if (Size > AF.getMaxBytesToEmit())
+ Size = 0;
+ unsigned OldSize = AF.getSize();
+ AF.setSize(Size);
+ return OldSize != Size;
+}
+
bool MCAssembler::LayoutOnce(const MCObjectWriter &Writer,
MCAsmLayout &Layout) {
++stats::RelaxationSteps;
switch(it2->getKind()) {
default:
break;
+ case MCFragment::FT_Align:
+ relaxedFrag = RelaxAlignment(Writer, Layout,
+ *cast<MCAlignFragment>(it2));
+ break;
case MCFragment::FT_Inst:
relaxedFrag = RelaxInstruction(Writer, Layout,
*cast<MCInstFragment>(it2));
}
OS << "<MCFragment " << (void*) this << " LayoutOrder:" << LayoutOrder
- << " Offset:" << Offset << " EffectiveSize:" << EffectiveSize << ">";
+ << " Offset:" << Offset << ">";
switch (getKind()) {
case MCFragment::FT_Align: {
// FIXME: This is a horrible way of checking the output, we need an llvm-mc
// based 'otool'.
-// FIXME: PR8467.
-// There is an unnecessary relaxation here. After the first jmp slides,
-// the .align size could be recomputed so that the second jump will be in range
-// for a 1-byte jump. For performance reasons, this is not currently done.
+// This is a case where llvm-mc computes a better layout than Darwin 'as'. This
+// issue is that after the first jmp slides, the .align size must be
+// recomputed -- otherwise the second jump will appear to be out-of-range for a
+// 1-byte jump.
// CHECK: # Section 0
// CHECK: (('section_name', '__text\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
// CHECK: ('segment_name', '__TEXT\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
// CHECK: ('address', 0)
-// CHECK: ('size', 322)
+// CHECK: ('size', 306)
// CHECK: ('offset', 324)
// CHECK: ('alignment', 4)
// CHECK: ('reloc_offset', 0)