[llvm-profdata] Add merge() to InstrProfRecord
[oota-llvm.git] / lib / ProfileData / InstrProfWriter.cpp
index 320860a4cb410b6bf9c5a23d95b270dfe1cc392b..465a4ea9e00a4dce0d1d9b753866555e2dd38468 100644 (file)
 //===----------------------------------------------------------------------===//
 
 #include "llvm/ProfileData/InstrProfWriter.h"
-#include "llvm/Support/Endian.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/EndianStream.h"
+#include "llvm/Support/OnDiskHashTable.h"
 
 using namespace llvm;
 
-error_code InstrProfWriter::addFunctionCounts(StringRef FunctionName,
-                                              uint64_t FunctionHash,
-                                              ArrayRef<uint64_t> Counters) {
-  auto Where = FunctionData.find(FunctionName);
-  if (Where == FunctionData.end()) {
-    // If this is the first time we've seen this function, just add it.
-    FunctionData[FunctionName] = {FunctionHash, Counters};
-    return instrprof_error::success;;
+namespace {
+static support::endianness ValueProfDataEndianness = support::little;
+
+class InstrProfRecordTrait {
+public:
+  typedef StringRef key_type;
+  typedef StringRef key_type_ref;
+
+  typedef const InstrProfWriter::ProfilingData *const data_type;
+  typedef const InstrProfWriter::ProfilingData *const data_type_ref;
+
+  typedef uint64_t hash_value_type;
+  typedef uint64_t offset_type;
+
+  static hash_value_type ComputeHash(key_type_ref K) {
+    return IndexedInstrProf::ComputeHash(IndexedInstrProf::HashType, K);
   }
 
-  auto &Data = Where->getValue();
-  // We can only add to existing functions if they match, so we check the hash
-  // and number of counters.
-  if (Data.Hash != FunctionHash)
-    return instrprof_error::hash_mismatch;
-  if (Data.Counts.size() != Counters.size())
-    return instrprof_error::count_mismatch;
-  // These match, add up the counters.
-  for (size_t I = 0, E = Counters.size(); I < E; ++I) {
-    if (Data.Counts[I] + Counters[I] < Data.Counts[I])
-      return instrprof_error::counter_overflow;
-    Data.Counts[I] += Counters[I];
+  static std::pair<offset_type, offset_type>
+  EmitKeyDataLength(raw_ostream &Out, key_type_ref K, data_type_ref V) {
+    using namespace llvm::support;
+    endian::Writer<little> LE(Out);
+
+    offset_type N = K.size();
+    LE.write<offset_type>(N);
+
+    offset_type M = 0;
+    for (const auto &ProfileData : *V) {
+      const InstrProfRecord &ProfRecord = ProfileData.second;
+      M += sizeof(uint64_t); // The function hash
+      M += sizeof(uint64_t); // The size of the Counts vector
+      M += ProfRecord.Counts.size() * sizeof(uint64_t);
+
+      // Value data
+      M += ValueProfData::getSize(ProfileData.second);
+    }
+    LE.write<offset_type>(M);
+
+    return std::make_pair(N, M);
   }
-  return instrprof_error::success;
+
+  static void EmitKey(raw_ostream &Out, key_type_ref K, offset_type N){
+    Out.write(K.data(), N);
+  }
+
+  static void EmitData(raw_ostream &Out, key_type_ref, data_type_ref V,
+                       offset_type) {
+    using namespace llvm::support;
+    endian::Writer<little> LE(Out);
+    for (const auto &ProfileData : *V) {
+      const InstrProfRecord &ProfRecord = ProfileData.second;
+
+      LE.write<uint64_t>(ProfileData.first); // Function hash
+      LE.write<uint64_t>(ProfRecord.Counts.size());
+      for (uint64_t I : ProfRecord.Counts)
+        LE.write<uint64_t>(I);
+
+      // Write value data
+      std::unique_ptr<ValueProfData> VDataPtr =
+          ValueProfData::serializeFrom(ProfileData.second);
+      uint32_t S = VDataPtr->getSize();
+      VDataPtr->swapBytesFromHost(ValueProfDataEndianness);
+      Out.write((const char *)VDataPtr.get(), S);
+    }
+  }
+};
+}
+
+// Internal interface for testing purpose only.
+void InstrProfWriter::setValueProfDataEndianness(
+    support::endianness Endianness) {
+  ValueProfDataEndianness = Endianness;
+}
+
+void InstrProfWriter::updateStringTableReferences(InstrProfRecord &I) {
+  I.updateStrings(&StringTable);
 }
 
-void InstrProfWriter::write(raw_ostream &OS) {
-  // Write out the counts for each function.
-  for (const auto &I : FunctionData) {
-    StringRef Name = I.getKey();
-    uint64_t Hash = I.getValue().Hash;
-    const std::vector<uint64_t> &Counts = I.getValue().Counts;
-
-    OS << Name << "\n" << Hash << "\n" << Counts.size() << "\n";
-    for (uint64_t Count : Counts)
-      OS << Count << "\n";
-    OS << "\n";
+std::error_code InstrProfWriter::addRecord(InstrProfRecord &&I) {
+  updateStringTableReferences(I);
+  auto &ProfileDataMap = FunctionData[I.Name];
+
+  bool NewFunc;
+  ProfilingData::iterator Where;
+  std::tie(Where, NewFunc) =
+      ProfileDataMap.insert(std::make_pair(I.Hash, InstrProfRecord()));
+  InstrProfRecord &Dest = Where->second;
+  if (NewFunc) {
+    // We've never seen a function with this name and hash, add it.
+    Dest = std::move(I);
+  } else {
+    // We're updating a function we've seen before.
+    instrprof_error MergeResult = Dest.merge(I);
+    if (MergeResult != instrprof_error::success) {
+      return MergeResult;
+    }
   }
+
+  // We keep track of the max function count as we go for simplicity.
+  if (Dest.Counts[0] > MaxFunctionCount)
+    MaxFunctionCount = Dest.Counts[0];
+
+  return instrprof_error::success;
+}
+
+std::pair<uint64_t, uint64_t> InstrProfWriter::writeImpl(raw_ostream &OS) {
+  OnDiskChainedHashTableGenerator<InstrProfRecordTrait> Generator;
+
+  // Populate the hash table generator.
+  for (const auto &I : FunctionData)
+    Generator.insert(I.getKey(), &I.getValue());
+
+  using namespace llvm::support;
+  endian::Writer<little> LE(OS);
+
+  // Write the header.
+  IndexedInstrProf::Header Header;
+  Header.Magic = IndexedInstrProf::Magic;
+  Header.Version = IndexedInstrProf::Version;
+  Header.MaxFunctionCount = MaxFunctionCount;
+  Header.HashType = static_cast<uint64_t>(IndexedInstrProf::HashType);
+  Header.HashOffset = 0;
+  int N = sizeof(IndexedInstrProf::Header) / sizeof(uint64_t);
+
+  // Only write out all the fields execpt 'HashOffset'. We need
+  // to remember the offset of that field to allow back patching
+  // later.
+  for (int I = 0; I < N - 1; I++)
+    LE.write<uint64_t>(reinterpret_cast<uint64_t *>(&Header)[I]);
+
+  // Save a space to write the hash table start location.
+  uint64_t HashTableStartLoc = OS.tell();
+  // Reserve the space for HashOffset field.
+  LE.write<uint64_t>(0);
+  // Write the hash table.
+  uint64_t HashTableStart = Generator.Emit(OS);
+
+  return std::make_pair(HashTableStartLoc, HashTableStart);
+}
+
+void InstrProfWriter::write(raw_fd_ostream &OS) {
+  // Write the hash table.
+  auto TableStart = writeImpl(OS);
+
+  // Go back and fill in the hash table start.
+  using namespace support;
+  OS.seek(TableStart.first);
+  // Now patch the HashOffset field previously reserved.
+  endian::Writer<little>(OS).write<uint64_t>(TableStart.second);
+}
+
+std::unique_ptr<MemoryBuffer> InstrProfWriter::writeBuffer() {
+  std::string Data;
+  llvm::raw_string_ostream OS(Data);
+  // Write the hash table.
+  auto TableStart = writeImpl(OS);
+  OS.flush();
+
+  // Go back and fill in the hash table start.
+  using namespace support;
+  uint64_t Bytes = endian::byte_swap<uint64_t, little>(TableStart.second);
+  Data.replace(TableStart.first, sizeof(uint64_t), (const char *)&Bytes,
+               sizeof(uint64_t));
+
+  // Return this in an aligned memory buffer.
+  return MemoryBuffer::getMemBufferCopy(Data);
 }