+
+ static void EmitData(raw_ostream &Out, key_type_ref, data_type_ref V,
+ offset_type) {
+ using namespace llvm::support;
+ endian::Writer<little> LE(Out);
+ for (const auto &ProfileData : *V) {
+ const InstrProfRecord &ProfRecord = ProfileData.second;
+
+ LE.write<uint64_t>(ProfileData.first); // Function hash
+ LE.write<uint64_t>(ProfRecord.Counts.size());
+ for (uint64_t I : ProfRecord.Counts)
+ LE.write<uint64_t>(I);
+
+ // Write value data
+ std::unique_ptr<ValueProfData> VDataPtr =
+ ValueProfData::serializeFrom(ProfileData.second);
+ uint32_t S = VDataPtr->getSize();
+ VDataPtr->swapBytesFromHost(ValueProfDataEndianness);
+ Out.write((const char *)VDataPtr.get(), S);
+ }
+ }
+};
+}
+
+// Internal interface for testing purpose only.
+void InstrProfWriter::setValueProfDataEndianness(
+ support::endianness Endianness) {
+ ValueProfDataEndianness = Endianness;
+}
+
+std::error_code InstrProfWriter::addRecord(InstrProfRecord &&I,
+ uint64_t Weight) {
+ auto &ProfileDataMap = FunctionData[I.Name];
+
+ bool NewFunc;
+ ProfilingData::iterator Where;
+ std::tie(Where, NewFunc) =
+ ProfileDataMap.insert(std::make_pair(I.Hash, InstrProfRecord()));
+ InstrProfRecord &Dest = Where->second;
+
+ instrprof_error Result = instrprof_error::success;
+ if (NewFunc) {
+ // We've never seen a function with this name and hash, add it.
+ Dest = std::move(I);
+ // Fix up the name to avoid dangling reference.
+ Dest.Name = FunctionData.find(Dest.Name)->getKey();
+ if (Weight > 1)
+ Result = Dest.scale(Weight);
+ } else {
+ // We're updating a function we've seen before.
+ Result = Dest.merge(I, Weight);
+ }
+
+ Dest.sortValueData();
+
+ // We keep track of the max function count as we go for simplicity.
+ // Update this statistic no matter the result of the merge.
+ if (Dest.Counts[0] > MaxFunctionCount)
+ MaxFunctionCount = Dest.Counts[0];
+
+ return Result;
+}
+
+std::pair<uint64_t, uint64_t> InstrProfWriter::writeImpl(raw_ostream &OS) {
+ OnDiskChainedHashTableGenerator<InstrProfRecordTrait> Generator;
+
+ // Populate the hash table generator.
+ for (const auto &I : FunctionData)
+ Generator.insert(I.getKey(), &I.getValue());
+
+ using namespace llvm::support;
+ endian::Writer<little> LE(OS);
+
+ // Write the header.
+ IndexedInstrProf::Header Header;
+ Header.Magic = IndexedInstrProf::Magic;
+ Header.Version = IndexedInstrProf::Version;
+ Header.MaxFunctionCount = MaxFunctionCount;
+ Header.HashType = static_cast<uint64_t>(IndexedInstrProf::HashType);
+ Header.HashOffset = 0;
+ int N = sizeof(IndexedInstrProf::Header) / sizeof(uint64_t);
+
+ // Only write out all the fields execpt 'HashOffset'. We need
+ // to remember the offset of that field to allow back patching
+ // later.
+ for (int I = 0; I < N - 1; I++)
+ LE.write<uint64_t>(reinterpret_cast<uint64_t *>(&Header)[I]);
+
+ // Save a space to write the hash table start location.
+ uint64_t HashTableStartLoc = OS.tell();
+ // Reserve the space for HashOffset field.
+ LE.write<uint64_t>(0);
+ // Write the hash table.
+ uint64_t HashTableStart = Generator.Emit(OS);
+
+ return std::make_pair(HashTableStartLoc, HashTableStart);
+}
+
+void InstrProfWriter::write(raw_fd_ostream &OS) {
+ // Write the hash table.
+ auto TableStart = writeImpl(OS);
+
+ // Go back and fill in the hash table start.
+ using namespace support;
+ OS.seek(TableStart.first);
+ // Now patch the HashOffset field previously reserved.
+ endian::Writer<little>(OS).write<uint64_t>(TableStart.second);