diff --git a/API/hermes/TraceInterpreter.cpp b/API/hermes/TraceInterpreter.cpp index 031967c1a8a..303c421950d 100644 --- a/API/hermes/TraceInterpreter.cpp +++ b/API/hermes/TraceInterpreter.cpp @@ -1592,7 +1592,7 @@ bool TraceInterpreter::ifObjectAddToDefs( std::string TraceInterpreter::printStats() { if (options_.forceGCBeforeStats) { - rt_.instrumentation().collectGarbage(); + rt_.instrumentation().collectGarbage("forced for stats"); } std::string stats = rt_.instrumentation().getRecordedGCStats(); ::hermes::vm::instrumentation::PerfEvents::endAndInsertStats(stats); diff --git a/API/hermes/hermes.cpp b/API/hermes/hermes.cpp index bfdb6d1983c..fb046fa45b6 100644 --- a/API/hermes/hermes.cpp +++ b/API/hermes/hermes.cpp @@ -512,13 +512,17 @@ class HermesRuntimeImpl final : public HermesRuntime, } // Overridden from jsi::Instrumentation - void collectGarbage() override { - runtime_.getHeap().collect(); + void collectGarbage(std::string cause) override { + runtime_.collect(std::move(cause)); } // Overridden from jsi::Instrumentation - void startTrackingHeapObjectStackTraces() override { - runtime_.enableAllocationLocationTracker(); + void startTrackingHeapObjectStackTraces( + std::function)> fragmentCallback) override { + runtime_.enableAllocationLocationTracker(std::move(fragmentCallback)); } // Overridden from jsi::Instrumentation diff --git a/API/jsi/jsi/decorator.h b/API/jsi/jsi/decorator.h index 46e7414abbe..9110145e177 100644 --- a/API/jsi/jsi/decorator.h +++ b/API/jsi/jsi/decorator.h @@ -331,12 +331,17 @@ class RuntimeDecorator : public Base, private jsi::Instrumentation { return plain().instrumentation().getHeapInfo(includeExpensive); } - void collectGarbage() override { - plain().instrumentation().collectGarbage(); + void collectGarbage(std::string cause) override { + plain().instrumentation().collectGarbage(std::move(cause)); } - void startTrackingHeapObjectStackTraces() override { - plain().instrumentation().startTrackingHeapObjectStackTraces(); + void startTrackingHeapObjectStackTraces( + std::function)> callback) override { + plain().instrumentation().startTrackingHeapObjectStackTraces( + std::move(callback)); } void stopTrackingHeapObjectStackTraces() override { diff --git a/API/jsi/jsi/instrumentation.h b/API/jsi/jsi/instrumentation.h index 04c76ce2594..0a9f48abb52 100644 --- a/API/jsi/jsi/instrumentation.h +++ b/API/jsi/jsi/instrumentation.h @@ -7,8 +7,10 @@ #pragma once +#include #include #include +#include #include #include @@ -49,12 +51,27 @@ class JSI_EXPORT Instrumentation { virtual std::unordered_map getHeapInfo( bool includeExpensive) = 0; - /// perform a full garbage collection - virtual void collectGarbage() = 0; + /// Perform a full garbage collection. + /// \param cause The cause of this collection, as it should be reported in + /// logs. + virtual void collectGarbage(std::string cause) = 0; + + /// A HeapStatsUpdate is a tuple of the fragment index, the number of objects + /// in that fragment, and the number of bytes used by those objects. + /// A "fragment" is a view of all objects allocated within a time slice. + using HeapStatsUpdate = std::tuple; /// Start capturing JS stack-traces for all JS heap allocated objects. These /// can be accessed via \c ::createSnapshotToFile(). - virtual void startTrackingHeapObjectStackTraces() = 0; + /// \param fragmentCallback If present, invoke this callback every so often + /// with the most recently seen object ID, and a list of fragments that have + /// been updated. This callback will be invoked on the same thread that the + /// runtime is using. + virtual void startTrackingHeapObjectStackTraces( + std::function stats)> fragmentCallback) = 0; /// Stop capture JS stack-traces for JS heap allocated objects. virtual void stopTrackingHeapObjectStackTraces() = 0; diff --git a/API/jsi/jsi/jsi.cpp b/API/jsi/jsi/jsi.cpp index e4a7e431fca..1054057e1fd 100644 --- a/API/jsi/jsi/jsi.cpp +++ b/API/jsi/jsi/jsi.cpp @@ -97,9 +97,13 @@ Instrumentation& Runtime::instrumentation() { return std::unordered_map{}; } - void collectGarbage() override {} + void collectGarbage(std::string) override {} - void startTrackingHeapObjectStackTraces() override {} + void startTrackingHeapObjectStackTraces( + std::function)>) override {} void stopTrackingHeapObjectStackTraces() override {} void createSnapshotToFile(const std::string&) override { diff --git a/include/hermes/Support/SamplingThread.h b/include/hermes/Support/SamplingThread.h deleted file mode 100644 index 2a375951abd..00000000000 --- a/include/hermes/Support/SamplingThread.h +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright (c) Facebook, Inc. and its affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */ - -#ifndef HERMES_SUPPORT_SAMPLINGTHREAD_H -#define HERMES_SUPPORT_SAMPLINGTHREAD_H - -#include -#include -#include -#include -#include -#include - -namespace hermes { - -/// Manages a thread that samples an atomic T at a given interval. -/// Despite managing another thread itself, this class is not thread-safe. -template -class SamplingThread { - public: - using Clock = std::chrono::steady_clock; - using TimePoint = std::chrono::time_point; - using Duration = Clock::duration; - using Samples = std::vector>; - - /// Creates a new instance - /// \p value The value to be sampled. - /// \p interval The approximate period of time to wait between samples. - SamplingThread(const std::atomic &value, Duration interval) - : mExit_(), - value_(value), - interval_(interval), - startTime_(Clock::now()), - samples_(), - sampler_(&SamplingThread::run, this) {} - - ~SamplingThread() { - if (isRunning()) { - (void)stop(); - } - } - - /// \return True if and only if the sampling thread is still running. - bool isRunning() const { - return sampler_.joinable(); - } - - /// When sampling began. - TimePoint startTime() const { - return startTime_; - } - - /// Stop sampling and \return the samples collected. The thread must be - /// running when calling this method. - Samples stop() { - assert(isRunning() && "Can only stop thread once."); - { - ExitGuard g(mExit_); - stop_ = true; - } - exitMonitor_.notify_one(); - sampler_.join(); - return std::move(samples_); - } - - private: - using ExitGuard = std::unique_lock; - - /// Used to co-ordinate the shutdown of the sampling thread. - bool stop_{false}; - - /// Guards stop_ - std::mutex mExit_; - std::condition_variable exitMonitor_; - - const std::atomic &value_; - Duration interval_; - const std::chrono::time_point startTime_; - Samples samples_; - std::thread sampler_; - - /// The code to run in the sampling thread. - void run() { - ExitGuard l(mExit_); - while (!stop_) { - samples_.emplace_back( - Clock::now(), value_.load(std::memory_order_relaxed)); - exitMonitor_.wait_for(l, interval_); - } - } -}; - -} // namespace hermes - -#endif // HERMES_SUPPORT_SAMPLINGTHREAD_H diff --git a/include/hermes/VM/GCBase.h b/include/hermes/VM/GCBase.h index befd9d4a153..63837392d8d 100644 --- a/include/hermes/VM/GCBase.h +++ b/include/hermes/VM/GCBase.h @@ -15,7 +15,6 @@ #include "hermes/Public/GCTripwireContext.h" #include "hermes/Support/CheckedMalloc.h" #include "hermes/Support/OSCompat.h" -#include "hermes/Support/SamplingThread.h" #include "hermes/Support/StatsAccumulator.h" #include "hermes/VM/BuildMetadata.h" #include "hermes/VM/CellKind.h" @@ -98,8 +97,9 @@ class Deserializer; /// void creditExternalMemory(GCCell *alloc, uint32_t size); /// void debitExternalMemory(GCCell *alloc, uint32_t size); /// -/// Force a garbage collection cycle. -/// void collect(); +/// Force a garbage collection cycle. The provided cause will be used in +/// logging. +/// void collect(std::string cause); /// /// The maximum size of any one allocation allowable by the GC in any state. /// static constexpr uint32_t maxAllocationSize(); @@ -189,6 +189,9 @@ class Deserializer; /// class GCBase { public: + static const char kNaturalCauseForAnalytics[]; + static const char kHandleSanCauseForAnalytics[]; + /// An interface enabling the garbage collector to mark roots and free /// symbols. struct GCCallbacks { @@ -352,8 +355,7 @@ class GCBase { #endif /// When enabled, every allocation gets an attached stack-trace and an - /// object ID, and the latter is periodically sampled by the GCs IDTracker. - /// When disabled old allocations continue to be tracked but + /// object ID. When disabled old allocations continue to be tracked but /// no new allocations get a stack-trace. struct AllocationLocationTracker final { explicit inline AllocationLocationTracker(GCBase *gc); @@ -361,37 +363,87 @@ class GCBase { /// Returns true if tracking is enabled for new allocations. inline bool isEnabled() const; /// Must be called by GC implementations whenever a new allocation is made. - inline void newAlloc(const void *ptr); + void newAlloc(const void *ptr, uint32_t sz); /// Must be called by GC implementations whenever an allocation is moved. - inline void moveAlloc(const void *oldPtr, const void *newPtr); + void moveAlloc(const void *oldPtr, const void *newPtr); /// Must be called by GC implementations whenever an allocation is freed. - inline void freeAlloc(const void *ptr); + void freeAlloc(const void *ptr, uint32_t sz); /// Returns data needed to reconstruct the JS stack used to create the /// specified allocation. inline StackTracesTreeNode *getStackTracesTreeNodeForAlloc( const void *ptr) const; - /// Enable location tracking and ID sampling. - void enable(); + /// A Fragment is a time bound for when objects are allocated. Any + /// allocations that occur before the lastSeenObjectID_ are in this + /// fragment. Allocations increment the numObjects_ and numBytes_. Free'd + /// cells from this fragment decrement numObjects_ and numBytes_. + struct Fragment { + HeapSnapshot::NodeID lastSeenObjectID_; + std::chrono::microseconds timestamp_; + /// Number of objects still alive in this fragment. Incremented when + /// objects are created, decremented when objects are destroyed. + uint64_t numObjects_; + /// Total size of objects still alive in this fragment. + uint64_t numBytes_; + /// If true, one of numObjects or numBytes changed since the last flush. + bool touchedSinceLastFlush_; + }; + + /// This must match the definition in jsi::Instrumentation to avoid + /// unnecessary copying. + using HeapStatsUpdate = std::tuple; + + /// Enable location tracking. + void enable(std::function)> callback); /// Disable location tracking - turns \c newAlloc() into a no-op. Existing /// allocations continue to be tracked. void disable(); + const std::vector &fragments() const { + return fragments_; + } + #ifdef HERMESVM_SERIALIZE void serialize(Serializer &s) const; void deserialize(Deserializer &d); #endif private: + /// Flush out heap profiler data to the callback after a new kFlushThreshold + /// bytes are allocated. + static constexpr uint64_t kFlushThreshold = 128 * (1 << 10); /// Associates allocations at their current location with their stack trace /// data. llvh::DenseMap stackMap_; /// We need access to the GCBase to collect the current stack when nodes are - /// allocated and to sample IDs for the allocation timeline. + /// allocated. GCBase *gc_; /// Indicates if tracking of new allocations is enabled. bool enabled_{false}; + /// Time when the profiler was started. + std::chrono::steady_clock::time_point startTime_; + /// This should be called periodically whenever the last seen object ID is + /// updated. + std::function< + void(uint64_t, std::chrono::microseconds, std::vector)> + fragmentCallback_; + /// All samples that have been flushed. Only needs the last object ID to be + /// written to the file. + std::vector fragments_; + + /// Updates the last fragment to have the current last ID and timestamp, + /// then calls fragmentCallback_ with both the new fragment and any changed + /// fragments from freeAlloc. + void flushCallback(); + + /// Find the fragment corresponding to the given id. + /// \return fragments_.back() if none exists (it's the currently active + /// fragment). + Fragment &findFragmentForID(HeapSnapshot::NodeID id); }; class IDTracker final { @@ -439,9 +491,6 @@ class GCBase { } }; - /// Thread to periodically sample the last assigned ID. - using Sampler = SamplingThread; - explicit IDTracker(); /// Return true if IDs are being tracked. @@ -451,6 +500,10 @@ class GCBase { /// If one does not yet exist, start tracking it. inline HeapSnapshot::NodeID getObjectID(const void *cell); + /// Same as \c getObjectID, except it asserts if the cell doesn't have an + /// ID. + inline HeapSnapshot::NodeID getObjectIDMustExist(const void *cell); + /// Get the unique object id of the symbol with the given index \b /// symIdx. If one does not yet exist, start tracking it. inline HeapSnapshot::NodeID getObjectID(uint32_t symIdx); @@ -489,13 +542,9 @@ class GCBase { template inline void forEachID(F callback); - /// Begin periodically sampling the most recently assigned object ID. - /// Restarts sampling from scratch if it was already in progress. - void beginSamplingLastID(Sampler::Duration duration); - - /// End sampling and return all collected samples. Returns empty values - /// if sampling was not in progress. - std::pair endSamplingLastID(); + /// Get the current last ID. All other existing IDs are less than or equal + /// to this one. + inline HeapSnapshot::NodeID lastID() const; #ifdef HERMESVM_SERIALIZE /// Serialize this IDTracker to the output stream. @@ -522,10 +571,9 @@ class GCBase { /// recycled so that snapshots don't confuse two objects with each other. /// NOTE: Need to ensure that this starts on an odd number, so check if /// the first non-reserved ID is odd, if not add one. - std::atomic nextID_{ - static_cast( - ReservedObjectID::FirstNonReservedID) | - 1}; + HeapSnapshot::NodeID lastID_{static_cast( + ReservedObjectID::FirstNonReservedID) | + 1}; /// Map of object pointers to IDs. Only populated once the first heap /// snapshot is requested, or the first time the memory profiler is turned @@ -540,9 +588,6 @@ class GCBase { /// Map of numeric values to IDs. Used to give numbers in the heap a unique /// node. llvh::DenseMap numberIDMap_; - - /// Thread that periodically samples the last assigned ID. - std::unique_ptr sampler_; }; #ifndef NDEBUG @@ -1504,6 +1549,13 @@ inline HeapSnapshot::NodeID GCBase::IDTracker::getObjectID(const void *cell) { return objID; } +inline HeapSnapshot::NodeID GCBase::IDTracker::getObjectIDMustExist( + const void *cell) { + auto iter = objectIDMap_.find(cell); + assert(iter != objectIDMap_.end() && "cell must already have an ID"); + return iter->second; +} + inline HeapSnapshot::NodeID GCBase::IDTracker::getObjectID(uint32_t symIdx) { auto iter = symbolIDMap_.find(symIdx); if (iter != symbolIDMap_.end()) { @@ -1570,16 +1622,18 @@ inline void GCBase::IDTracker::forEachID(F callback) { } } +inline HeapSnapshot::NodeID GCBase::IDTracker::lastID() const { + return lastID_; +} + inline HeapSnapshot::NodeID GCBase::IDTracker::nextObjectID() { - // fetch_add returns the old value. - uint64_t before = nextID_.fetch_add(kIDStep); // This must be unique for most features that rely on it, check for overflow. if (LLVM_UNLIKELY( - before >= + lastID_ >= std::numeric_limits::max() - kIDStep)) { hermes_fatal("Ran out of object IDs"); } - return before + kIDStep; + return lastID_ += kIDStep; } inline HeapSnapshot::NodeID GCBase::IDTracker::nextNativeID() { @@ -1602,44 +1656,6 @@ inline bool GCBase::AllocationLocationTracker::isEnabled() const { return enabled_; } -inline void GCBase::AllocationLocationTracker::newAlloc(const void *ptr) { - // Note we always get the current IP even if allocation tracking is not - // enabled as it allows us to assert this feature works across many tests. - // Note it's not very slow, it's slower than the non-virtual version - // in Runtime though. - const auto *ip = gc_->gcCallbacks_->getCurrentIPSlow(); - if (enabled_) { - // This is stateful and causes the object to have an ID assigned. - gc_->getIDTracker().getObjectID(ptr); - if (auto node = gc_->gcCallbacks_->getCurrentStackTracesTreeNode(ip)) { - stackMap_.try_emplace(ptr, node); - } - } -} - -inline void GCBase::AllocationLocationTracker::moveAlloc( - const void *oldPtr, - const void *newPtr) { - if (oldPtr == newPtr) { - // This can happen in old generations when compacting to the same location. - return; - } - auto oldIt = stackMap_.find(oldPtr); - if (oldIt == stackMap_.end()) { - return; - } - const auto oldStackTracesTreeNode = oldIt->second; - assert( - stackMap_.count(newPtr) == 0 && - "Moving to a location that is already tracked"); - stackMap_.erase(oldIt); - stackMap_[newPtr] = oldStackTracesTreeNode; -} - -inline void GCBase::AllocationLocationTracker::freeAlloc(const void *ptr) { - stackMap_.erase(ptr); -} - inline StackTracesTreeNode * GCBase::AllocationLocationTracker::getStackTracesTreeNodeForAlloc( const void *ptr) const { diff --git a/include/hermes/VM/GenGCNC.h b/include/hermes/VM/GenGCNC.h index f77b96dd0d8..c42e07c87b2 100644 --- a/include/hermes/VM/GenGCNC.h +++ b/include/hermes/VM/GenGCNC.h @@ -250,7 +250,7 @@ class GenGC final : public GCBase { /// \p canEffectiveOOM Indicates whether the GC can declare effective OOM as a /// result of this collection. - void collect(bool canEffectiveOOM = false); + void collect(std::string cause, bool canEffectiveOOM = false); static constexpr uint32_t minAllocationSize() { // NCGen doesn't enforce a minimum allocation requirement. @@ -589,6 +589,7 @@ class GenGC final : public GCBase { CollectionSection( GenGC *gc, const char *name, + std::string cause, OptValue gcCallbacksOpt = llvh::None); ~CollectionSection(); @@ -612,6 +613,7 @@ class GenGC final : public GCBase { private: GenGC *gc_; GCCycle cycle_; + std::string cause_; TimePoint wallStart_; std::chrono::microseconds cpuStart_; size_t gcUsedBefore_; @@ -928,7 +930,7 @@ inline void *GenGC::alloc(uint32_t sz) { // MallocGC already implements that well though, and it is complicated and // slow to implement that for NCGen. So instead do a full collection which // is almost as good. - collect(); + collect("handle-san"); } #ifdef HERMESVM_GC_GENERATIONAL_MARKSWEEPCOMPACT @@ -970,7 +972,7 @@ inline void *GenGC::allocLongLived(uint32_t size) { // MallocGC already implements that well though, and it is complicated and // slow to implement that for NCGen. So instead do a full collection which // is almost as good. - collect(); + collect("handle-san"); } AllocResult res; if (allocContextFromYG_) { diff --git a/include/hermes/VM/HadesGC.h b/include/hermes/VM/HadesGC.h index 2c9009e67ee..c803713f47c 100644 --- a/include/hermes/VM/HadesGC.h +++ b/include/hermes/VM/HadesGC.h @@ -110,7 +110,7 @@ class HadesGC final : public GCBase { /// Force a garbage collection cycle. /// (Part of general GC API defined in GCBase.h). - void collect(); + void collect(std::string cause); /// Run the finalizers for all heap objects. void finalizeAll(); @@ -561,11 +561,12 @@ class HadesGC final : public GCBase { /// Perform a YG garbage collection. All live objects in YG will be evacuated /// to the OG. + /// \param cause The cause of the GC, used for logging. /// \param forceOldGenCollection If true, always start an old gen collection /// if one is not already active. /// \post The YG is completely empty, and all bytes are available for new /// allocations. - void youngGenCollection(bool forceOldGenCollection); + void youngGenCollection(std::string cause, bool forceOldGenCollection); /// In the "no GC before TTI" mode, move the Young Gen heap segment to the /// Old Gen without scanning for garbage. @@ -574,7 +575,7 @@ class HadesGC final : public GCBase { /// Perform an OG garbage collection. All live objects in OG will be left /// untouched, all unreachable objects will be placed into a free list that /// can be used by \c oldGenAlloc. - void oldGenCollection(); + void oldGenCollection(std::string cause); /// If there's an OG collection going on, wait for it to complete. This /// function is synchronous and will block the caller if the GC background diff --git a/include/hermes/VM/MallocGC.h b/include/hermes/VM/MallocGC.h index 033013065c3..9a464a4a435 100644 --- a/include/hermes/VM/MallocGC.h +++ b/include/hermes/VM/MallocGC.h @@ -168,7 +168,7 @@ class MallocGC final : public GCBase { /// Checks if a requested \p size can fit in the heap. If it can't, a /// collection occurs. If it still can't after the collection, OOM is /// declared. - void collectBeforeAlloc(uint32_t size); + void collectBeforeAlloc(std::string cause, uint32_t size); /// Same as above, but tries to allocate in a long lived area of the heap. /// Use this when the object is known to last for a long period of time. @@ -185,7 +185,7 @@ class MallocGC final : public GCBase { /// Collect all of the dead objects and symbols in the heap. Also invalidate /// weak pointers that point to dead objects. - void collect(); + void collect(std::string cause); static constexpr uint32_t minAllocationSize() { // MallocGC imposes no limit on individual allocations. @@ -325,10 +325,12 @@ template inline void *MallocGC::alloc(uint32_t size) { assert(noAllocLevel_ == 0 && "no alloc allowed right now"); size = heapAlignSize(size); + if (shouldSanitizeHandles()) { + collectBeforeAlloc(kHandleSanCauseForAnalytics, size); + } // Use subtraction to prevent overflow. - if (LLVM_UNLIKELY( - shouldSanitizeHandles() || size > sizeLimit_ - allocatedBytes_)) { - collectBeforeAlloc(size); + if (LLVM_UNLIKELY(size > sizeLimit_ - allocatedBytes_)) { + collectBeforeAlloc(kNaturalCauseForAnalytics, size); } // Add space for the header. auto *header = new (checkedMalloc(size + sizeof(CellHeader))) CellHeader(); @@ -340,18 +342,6 @@ inline void *MallocGC::alloc(uint32_t size) { totalAllocatedBytes_ += size; #ifndef NDEBUG ++numAllocatedObjects_; -#endif -#if !defined(HERMES_ENABLE_ALLOCATION_LOCATION_TRACES) && !defined(NDEBUG) - // If allocation location tracking is enabled we implicitly call - // getCurrentIP() via newAlloc() below. Even if this isn't enabled, we always - // call getCurrentIPSlow() in a debug build as this has the effect of - // asserting the IP is correctly set (not invalidated) at this point. This - // allows us to leverage our whole test-suite to find missing cases of - // CAPTURE_IP* macros in the interpreter loop. - (void)gcCallbacks_->getCurrentIPSlow(); -#endif -#ifdef HERMES_ENABLE_ALLOCATION_LOCATION_TRACES - getAllocationLocationTracker().newAlloc(mem); #endif return mem; } diff --git a/include/hermes/VM/Runtime.h b/include/hermes/VM/Runtime.h index f3c3aa5f6d8..ee514221307 100644 --- a/include/hermes/VM/Runtime.h +++ b/include/hermes/VM/Runtime.h @@ -247,8 +247,8 @@ class Runtime : public HandleRootOwner, void ttiReached(); /// Force a garbage collection cycle. - void collect() { - heap_.collect(); + void collect(std::string cause) { + heap_.collect(std::move(cause)); } /// Potentially move the heap if handle sanitization is on. @@ -1336,7 +1336,15 @@ class Runtime : public HandleRootOwner, /// Enable allocation location tracking. Only works with /// HERMES_ENABLE_ALLOCATION_LOCATION_TRACES. - void enableAllocationLocationTracker(); + void enableAllocationLocationTracker() { + enableAllocationLocationTracker(nullptr); + } + void enableAllocationLocationTracker( + std::function)> + fragmentCallback); /// Disable allocation location tracking for new objects. Old objects tagged /// with stack traces continue to be tracked until they are freed. @@ -1605,7 +1613,7 @@ inline void *Runtime::alloc(uint32_t sz) { #endif void *ptr = heap_.alloc(sz); #ifdef HERMES_ENABLE_ALLOCATION_LOCATION_TRACES - heap_.getAllocationLocationTracker().newAlloc(ptr); + heap_.getAllocationLocationTracker().newAlloc(ptr, sz); #endif return ptr; } @@ -1623,7 +1631,7 @@ inline void *Runtime::allocLongLived(uint32_t size) { #endif void *ptr = heap_.allocLongLived(size); #ifdef HERMES_ENABLE_ALLOCATION_LOCATION_TRACES - heap_.getAllocationLocationTracker().newAlloc(ptr); + heap_.getAllocationLocationTracker().newAlloc(ptr, size); #endif return ptr; } diff --git a/include/hermes/VM/StackTracesTree-NoRuntime.h b/include/hermes/VM/StackTracesTree-NoRuntime.h index 694d52f70ad..5ce09eff8e3 100644 --- a/include/hermes/VM/StackTracesTree-NoRuntime.h +++ b/include/hermes/VM/StackTracesTree-NoRuntime.h @@ -21,6 +21,7 @@ #define HERMES_ENABLE_ALLOCATION_LOCATION_TRACES #endif +#include "hermes/Public/DebuggerTypes.h" #include "hermes/Support/OptValue.h" #include "hermes/Support/StringSetVector.h" @@ -43,31 +44,36 @@ struct StackTracesTreeNode { /// nodes that would appear at the same "location". struct SourceLoc { StringSetVector::size_type scriptName; + ::facebook::hermes::debugger::ScriptID scriptID; int32_t lineNo; int32_t columnNo; SourceLoc( StringSetVector::size_type scriptName, + ::facebook::hermes::debugger::ScriptID scriptID, int32_t lineNo, int32_t columnNo) - : scriptName(scriptName), lineNo(lineNo), columnNo(columnNo){}; + : scriptName(scriptName), + scriptID(scriptID), + lineNo(lineNo), + columnNo(columnNo) {} unsigned hash() const { - return scriptName ^ columnNo ^ lineNo; + return scriptName ^ scriptID ^ columnNo ^ lineNo; }; bool operator==(const SourceLoc &r) const { - return scriptName == r.scriptName && lineNo == r.lineNo && - columnNo == r.columnNo; + return scriptName == r.scriptName && scriptID == r.scriptID && + lineNo == r.lineNo && columnNo == r.columnNo; } }; /// Utility class for use with \c llvh::DenseMap . struct SourceLocMapInfo { static inline SourceLoc getEmptyKey() { - return {SIZE_MAX, -1, -1}; + return {SIZE_MAX, 0, -1, -1}; } static inline SourceLoc getTombstoneKey() { - return {SIZE_MAX - 1, -1, -1}; + return {SIZE_MAX - 1, 0, -1, -1}; } static unsigned getHashValue(const SourceLoc &v) { return v.hash(); diff --git a/include/hermes/VM/StackTracesTree.h b/include/hermes/VM/StackTracesTree.h index a06c216bd25..37db3bb9cdd 100644 --- a/include/hermes/VM/StackTracesTree.h +++ b/include/hermes/VM/StackTracesTree.h @@ -93,24 +93,24 @@ struct StackTracesTree { std::shared_ptr strings_; /// Pre-computed string IDs - const StringSetVector::size_type invalidFunctionID_; - const StringSetVector::size_type invalidScriptNameID_; + const StringSetVector::size_type rootFunctionID_; + const StringSetVector::size_type rootScriptNameID_; const StringSetVector::size_type nativeFunctionID_; const StringSetVector::size_type anonymousFunctionID_; /// Every node in the try gets an ID which is used when writing out snapshot /// data for Chrome. - size_t nextNodeID_{0}; + size_t nextNodeID_{1}; /// The root of the tree is a sentinel which is always present and does not /// represent a valid code location. std::unique_ptr root_{new StackTracesTreeNode( nextNodeID_++, nullptr, /* parent */ - {invalidScriptNameID_, -1, -1}, + {rootScriptNameID_, 0, 0, 0}, nullptr, /* codeBlock */ nullptr, /* ip */ - invalidFunctionID_)}; + rootFunctionID_)}; /// Current head of the tree, typically representing the last known call-site /// in bytecode execution. diff --git a/lib/ConsoleHost/ConsoleHost.cpp b/lib/ConsoleHost/ConsoleHost.cpp index df68e4128fe..f8dc961e165 100644 --- a/lib/ConsoleHost/ConsoleHost.cpp +++ b/lib/ConsoleHost/ConsoleHost.cpp @@ -432,7 +432,7 @@ bool executeHBCBytecodeImpl( statSampler->stop().printJSON(llvh::errs()); if (options.forceGCBeforeStats) { - runtime->collect(); + runtime->collect("forced for stats"); } printStats(runtime.get(), llvh::errs()); } diff --git a/lib/VM/GCBase.cpp b/lib/VM/GCBase.cpp index 423e32a685f..6577980a7b9 100644 --- a/lib/VM/GCBase.cpp +++ b/lib/VM/GCBase.cpp @@ -35,6 +35,9 @@ using llvh::format; namespace hermes { namespace vm { +const char GCBase::kNaturalCauseForAnalytics[] = "natural"; +const char GCBase::kHandleSanCauseForAnalytics[] = "handle-san"; + GCBase::GCBase( MetadataTable metaTable, GCCallbacks *gcCallbacks, @@ -506,16 +509,9 @@ void GCBase::createSnapshot(GC *gc, llvh::raw_ostream &os) { snap.emitAllocationTraceInfo(); snap.beginSection(HeapSnapshot::Section::Samples); - GCBase::IDTracker::Sampler::TimePoint startTime; - GCBase::IDTracker::Sampler::Samples samples; - std::tie(startTime, samples) = gc->getIDTracker().endSamplingLastID(); - for (auto sample : samples) { - uint64_t lastID = sample.second; - uint64_t timestampMicros = - std::chrono::duration_cast( - sample.first - startTime) - .count(); - json.emitValues({timestampMicros, lastID}); + for (const auto &fragment : getAllocationLocationTracker().fragments()) { + json.emitValues({static_cast(fragment.timestamp_.count()), + fragment.lastSeenObjectID_}); } snap.endSection(HeapSnapshot::Section::Samples); @@ -675,6 +671,7 @@ void GCBase::printStats(JSONEmitter &json) { json.emitKeyValue("runtimeDescription", event.runtimeDescription); json.emitKeyValue("gcKind", event.gcKind); json.emitKeyValue("collectionType", event.collectionType); + json.emitKeyValue("cause", event.cause); json.emitKeyValue("duration", event.duration.count()); json.emitKeyValue("cpuDuration", event.cpuDuration.count()); json.emitKeyValue("preAllocated", event.preAllocated); @@ -834,7 +831,7 @@ llvh::raw_ostream &operator<<(llvh::raw_ostream &os, const SizeFormatObj &sfo) { GCBase::GCCallbacks::~GCCallbacks() {} GCBase::IDTracker::IDTracker() { - assert(nextID_ % 2 == 1 && "First JS object ID isn't odd"); + assert(lastID_ % 2 == 1 && "First JS object ID isn't odd"); } #ifdef HERMESVM_SERIALIZE @@ -853,7 +850,7 @@ void GCBase::AllocationLocationTracker::deserialize(Deserializer &d) { } void GCBase::IDTracker::serialize(Serializer &s) const { - s.writeInt(nextID_); + s.writeInt(lastID_); s.writeInt(objectIDMap_.size()); for (auto it = objectIDMap_.begin(); it != objectIDMap_.end(); it++) { s.writeRelocation(it->first); @@ -862,7 +859,7 @@ void GCBase::IDTracker::serialize(Serializer &s) const { } void GCBase::IDTracker::deserialize(Deserializer &d) { - nextID_ = d.readInt(); + lastID_ = d.readInt(); size_t size = d.readInt(); for (size_t i = 0; i < size; i++) { // Heap must have been deserialized before this function. All deserialized @@ -899,39 +896,169 @@ HeapSnapshot::NodeID GCBase::IDTracker::getNumberID(double num) { return numberRef = nextNumberID(); } -void GCBase::IDTracker::beginSamplingLastID(Sampler::Duration duration) { - endSamplingLastID(); - sampler_ = llvh::make_unique(nextID_, duration); -} - -std::pair< - GCBase::IDTracker::Sampler::TimePoint, - GCBase::IDTracker::Sampler::Samples> -GCBase::IDTracker::endSamplingLastID() { - if (sampler_) { - std::pair result = - make_pair(sampler_->startTime(), sampler_->stop()); - sampler_.reset(); - return result; - } else { - return std::make_pair(Sampler::TimePoint{}, Sampler::Samples{}); - } -} - -void GCBase::AllocationLocationTracker::enable() { +void GCBase::AllocationLocationTracker::enable( + std::function< + void(uint64_t, std::chrono::microseconds, std::vector)> + callback) { + assert(!enabled_ && "Shouldn't enable twice"); enabled_ = true; - GC *gc = reinterpret_cast(gc_); + GC *gc = static_cast(gc_); // For correct visualization of the allocation timeline, it's necessary that // objects in the heap snapshot that existed before sampling was enabled have // numerically lower IDs than those allocated during sampling. We ensure this // by assigning IDs to everything here. - gc->forAllObjs([gc](GCCell *cell) { gc->getIDTracker().getObjectID(cell); }); - gc->getIDTracker().beginSamplingLastID(std::chrono::milliseconds(50)); + uint64_t numObjects = 0; + uint64_t numBytes = 0; + gc->forAllObjs([gc, &numObjects, &numBytes](GCCell *cell) { + numObjects++; + numBytes += cell->getAllocatedSize(); + gc->getIDTracker().getObjectID(cell); + }); + fragmentCallback_ = std::move(callback); + startTime_ = std::chrono::steady_clock::now(); + fragments_.clear(); + // The first fragment has all objects that were live before the profiler was + // enabled. + // The ID and timestamp will be filled out via flushCallback. + fragments_.emplace_back(Fragment{ + static_cast(IDTracker::ReservedObjectID::NoID), + std::chrono::microseconds(), + numObjects, + numBytes, + // Say the fragment is touched here so it is written out + // automatically by flushCallback. + true}); + // Immediately flush the first fragment. + flushCallback(); } void GCBase::AllocationLocationTracker::disable() { - gc_->getIDTracker().endSamplingLastID(); + flushCallback(); enabled_ = false; + fragmentCallback_ = nullptr; +} + +void GCBase::AllocationLocationTracker::newAlloc(const void *ptr, uint32_t sz) { + // Note we always get the current IP even if allocation tracking is not + // enabled as it allows us to assert this feature works across many tests. + // Note it's not very slow, it's slower than the non-virtual version + // in Runtime though. + const auto *ip = gc_->gcCallbacks_->getCurrentIPSlow(); + if (enabled_) { + // This is stateful and causes the object to have an ID assigned. + const auto id = gc_->getIDTracker().getObjectID(ptr); + HERMES_SLOW_ASSERT( + &findFragmentForID(id) == &fragments_.back() && + "Should only ever be allocating into the newest fragment"); + (void)id; + Fragment &lastFrag = fragments_.back(); + HERMES_SLOW_ASSERT( + lastFrag.lastSeenObjectID_ == + static_cast( + IDTracker::ReservedObjectID::NoID) && + "Last fragment should not have an ID assigned yet"); + lastFrag.numObjects_++; + lastFrag.numBytes_ += sz; + lastFrag.touchedSinceLastFlush_ = true; + if (lastFrag.numBytes_ >= kFlushThreshold) { + flushCallback(); + } + if (auto node = gc_->gcCallbacks_->getCurrentStackTracesTreeNode(ip)) { + auto itAndDidInsert = stackMap_.try_emplace(ptr, node); + assert(itAndDidInsert.second && "Failed to create a new node"); + (void)itAndDidInsert; + } + } +} + +void GCBase::AllocationLocationTracker::moveAlloc( + const void *oldPtr, + const void *newPtr) { + if (oldPtr == newPtr) { + // This can happen in old generations when compacting to the same location. + return; + } + auto oldIt = stackMap_.find(oldPtr); + if (oldIt == stackMap_.end()) { + // This can happen if the tracker is turned on between collections, and + // something is being moved that didn't have a stack entry. + return; + } + const auto oldStackTracesTreeNode = oldIt->second; + assert( + stackMap_.count(newPtr) == 0 && + "Moving to a location that is already tracked"); + stackMap_.erase(oldIt); + stackMap_[newPtr] = oldStackTracesTreeNode; +} + +void GCBase::AllocationLocationTracker::freeAlloc( + const void *ptr, + uint32_t sz) { + stackMap_.erase(ptr); + if (!enabled_) { + // Fragments won't exist if the heap profiler isn't enabled. + return; + } + const auto id = gc_->getIDTracker().getObjectIDMustExist(ptr); + Fragment &frag = findFragmentForID(id); + assert( + frag.numObjects_ >= 1 && "Num objects decremented too much for fragment"); + frag.numObjects_--; + assert(frag.numBytes_ >= sz && "Num bytes decremented too much for fragment"); + frag.numBytes_ -= sz; + frag.touchedSinceLastFlush_ = true; +} + +GCBase::AllocationLocationTracker::Fragment & +GCBase::AllocationLocationTracker::findFragmentForID(HeapSnapshot::NodeID id) { + assert(fragments_.size() >= 1 && "Must have at least one fragment available"); + for (auto it = fragments_.begin(); it != fragments_.end() - 1; ++it) { + if (it->lastSeenObjectID_ >= id) { + return *it; + } + } + // Since no previous fragments matched, it must be the last fragment. + return fragments_.back(); +} + +void GCBase::AllocationLocationTracker::flushCallback() { + Fragment &lastFrag = fragments_.back(); + const auto lastID = gc_->getIDTracker().lastID(); + const auto duration = std::chrono::duration_cast( + std::chrono::steady_clock::now() - startTime_); + assert( + lastFrag.lastSeenObjectID_ == + static_cast( + IDTracker::ReservedObjectID::NoID) && + "Last fragment should not have an ID assigned yet"); + // In case a flush happens without any allocations occurring, don't add a new + // fragment. + if (lastFrag.touchedSinceLastFlush_) { + lastFrag.lastSeenObjectID_ = lastID; + lastFrag.timestamp_ = duration; + // Place an empty fragment at the end, for any new allocs. + fragments_.emplace_back(Fragment{ + static_cast(IDTracker::ReservedObjectID::NoID), + std::chrono::microseconds(), + 0, + 0, + false}); + } + if (fragmentCallback_) { + std::vector updatedFragments; + // Don't include the last fragment, which is newly created (or has no + // objects in it). + for (size_t i = 0; i < fragments_.size() - 1; ++i) { + auto &fragment = fragments_[i]; + if (fragment.touchedSinceLastFlush_) { + updatedFragments.emplace_back( + i, fragment.numObjects_, fragment.numBytes_); + fragment.touchedSinceLastFlush_ = false; + } + } + fragmentCallback_(lastID, duration, std::move(updatedFragments)); + } } llvh::Optional GCBase::getSnapshotID(HermesValue val) { diff --git a/lib/VM/HeapSnapshot.cpp b/lib/VM/HeapSnapshot.cpp index 361a2096170..95c0c7ec308 100644 --- a/lib/VM/HeapSnapshot.cpp +++ b/lib/VM/HeapSnapshot.cpp @@ -344,10 +344,10 @@ void HeapSnapshot::emitAllocationTraceInfo() { struct FuncHashMapInfo { static StackTracesTreeNode::SourceLoc getEmptyKey() { - return {SIZE_MAX, -1, -1}; + return {SIZE_MAX, 0, -1, -1}; } static inline StackTracesTreeNode::SourceLoc getTombstoneKey() { - return {SIZE_MAX - 1, -1, -1}; + return {SIZE_MAX - 1, 0, -1, -1}; } static unsigned getHashValue(const StackTracesTreeNode::SourceLoc &v) { return v.hash(); @@ -383,7 +383,8 @@ void HeapSnapshot::emitAllocationTraceInfo() { json_.emitValue(functionId); // "function_id" json_.emitValue(curNode->name); // "name" json_.emitValue(curNode->sourceLoc.scriptName); // "script_name" - json_.emitValue(curNode->sourceLoc.scriptName); // "script_id" + json_.emitValue(curNode->sourceLoc.scriptID); // "script_id" + // These should be emitted as 1-based, not 0-based like locations. json_.emitValue(curNode->sourceLoc.lineNo); // "line" json_.emitValue(curNode->sourceLoc.columnNo); // "column" for (auto child : curNode->getChildren()) { @@ -393,10 +394,7 @@ void HeapSnapshot::emitAllocationTraceInfo() { endSection(Section::TraceFunctionInfos); beginSection(Section::TraceTree); - // Start from the nodes below the sentinel node as this is always invalid - for (auto child : stackTracesTree_->getRootNode()->getChildren()) { - nodeStack.push(child); - } + nodeStack.push(stackTracesTree_->getRootNode()); while (!nodeStack.empty()) { auto curNode = nodeStack.top(); nodeStack.pop(); diff --git a/lib/VM/JSError.cpp b/lib/VM/JSError.cpp index 15d3d4213a2..222c4b3afaf 100644 --- a/lib/VM/JSError.cpp +++ b/lib/VM/JSError.cpp @@ -429,7 +429,7 @@ ExecutionStatus JSError::recordStackTrace( // which have savedCodeBlock == nullptr in order to allow proper returns in // the interpreter. StackFramePtr prev = cf->getPreviousFrame(); - if (prev && prev != framesEnd) { + if (prev != framesEnd) { if (CodeBlock *parentCB = prev->getCalleeCodeBlock()) { savedCodeBlock = parentCB; } diff --git a/lib/VM/JSLib/GlobalObject.cpp b/lib/VM/JSLib/GlobalObject.cpp index 7b2227406fd..6803bf7c4bc 100644 --- a/lib/VM/JSLib/GlobalObject.cpp +++ b/lib/VM/JSLib/GlobalObject.cpp @@ -238,7 +238,7 @@ CallResult parseFloat(void *, Runtime *runtime, NativeArgs args) { /// Customized global function. gc() forces a GC collect. CallResult gc(void *, Runtime *runtime, NativeArgs) { - runtime->collect(); + runtime->collect("forced"); return HermesValue::encodeUndefinedValue(); } diff --git a/lib/VM/Runtime.cpp b/lib/VM/Runtime.cpp index 2c2e46e3ac3..3ea525474d2 100644 --- a/lib/VM/Runtime.cpp +++ b/lib/VM/Runtime.cpp @@ -1906,7 +1906,7 @@ HiddenClass *Runtime::resolveHiddenClassId(ClassId classId) { #ifdef HERMESVM_SERIALIZE void Runtime::serialize(Serializer &s) { // Full GC here. - heap_.collect(); + heap_.collect("serialize"); s.writeCurrentOffset(); heap_.serializeWeakRefs(s); @@ -2227,12 +2227,17 @@ StackTracesTreeNode *Runtime::getCurrentStackTracesTreeNode( return stackTracesTree_->getStackTrace(this, codeBlock, ip); } -void Runtime::enableAllocationLocationTracker() { +void Runtime::enableAllocationLocationTracker( + std::function)> + fragmentCallback) { if (!stackTracesTree_) { stackTracesTree_ = make_unique(); } stackTracesTree_->syncWithRuntimeStack(this); - heap_.getAllocationLocationTracker().enable(); + heap_.getAllocationLocationTracker().enable(std::move(fragmentCallback)); } void Runtime::disableAllocationLocationTracker(bool clearExistingTree) { @@ -2266,7 +2271,11 @@ StackTracesTreeNode *Runtime::getCurrentStackTracesTreeNode( return nullptr; } -void Runtime::enableAllocationLocationTracker() {} +void Runtime::enableAllocationLocationTracker( + std::function)>) {} void Runtime::disableAllocationLocationTracker(bool) {} diff --git a/lib/VM/StackTracesTree.cpp b/lib/VM/StackTracesTree.cpp index 7d1e8cfa7ad..8c185b61ef5 100644 --- a/lib/VM/StackTracesTree.cpp +++ b/lib/VM/StackTracesTree.cpp @@ -84,8 +84,8 @@ void StackTracesTreeNode::addMapping( StackTracesTree::StackTracesTree() : strings_(std::make_shared()), - invalidFunctionID_(strings_->insert("(invalid function name)")), - invalidScriptNameID_(strings_->insert("(invalid script name)")), + rootFunctionID_(strings_->insert("(root)")), + rootScriptNameID_(strings_->insert("")), nativeFunctionID_(strings_->insert("(native)")), anonymousFunctionID_(strings_->insert("(anonymous)")), head_(root_.get()) {} @@ -93,29 +93,58 @@ StackTracesTree::StackTracesTree() void StackTracesTree::syncWithRuntimeStack(Runtime *runtime) { head_ = root_.get(); - // Copy the frame pointers into a vector so we can iterate over them in - // reverse. - std::vector frames( - runtime->getStackFrames().begin(), runtime->getStackFrames().end()); + const StackFramePtr framesEnd = *runtime->getStackFrames().end(); + std::vector> stack; - auto frameIt = frames.rbegin(); - if (frameIt == frames.rend()) { + // Walk the current stack, and call pushCallStack for each JS frame (not + // native frames). The current frame is not included, because any allocs after + // this point will call pushCallStack which will get the most recent IP. Each + // stack frame tracks information about the caller. + for (StackFramePtr cf : runtime->getStackFrames()) { + CodeBlock *savedCodeBlock = cf.getSavedCodeBlock(); + const Inst *savedIP = cf.getSavedIP(); + // Go up one frame and get the callee code block but use the current + // frame's saved IP. This also allows us to account for bound functions, + // which have savedCodeBlock == nullptr in order to allow proper returns in + // the interpreter. + StackFramePtr prev = cf.getPreviousFrame(); + if (prev != framesEnd) { + if (CodeBlock *parentCB = prev.getCalleeCodeBlock()) { + assert( + !savedCodeBlock || + savedCodeBlock == parentCB && + "If savedCodeBlock is non-null, it should match the parent's " + "callee code block"); + savedCodeBlock = parentCB; + } + } else { + // The last frame is the entry into the global function, use the callee + // code block instead of the caller. + // TODO: This leaves an extra global call frame that doesn't make any + // sense laying around. But that matches the behavior of enabling from the + // beginning. When a fix for the non-synced version is found, remove this + // branch as well. + savedCodeBlock = cf.getCalleeCodeBlock(); + savedIP = savedCodeBlock->getOffsetPtr(0); + } + stack.emplace_back(savedCodeBlock, savedIP); + } + + // If the stack is empty, avoid the rend - 1 comparison issue by returning + // early. + if (stack.empty()) { return; } - // Stack frames tells us the current CodeBlock and the _previous_ IP. So we - // treat the first frame specially using the IP of the first bytecode in - // the CodeBlock. - const CodeBlock *codeBlock = (*frameIt)->getCalleeCodeBlock(); - pushCallStack(runtime, codeBlock, codeBlock->getOffsetPtr(0)); - ++frameIt; - // The rend() - 1 is to skip the last frame for now as the only way to - // enable allocationLocationTracker is via a native call triggered from JS. In - // future we may need to change this depending on how and when tracking is - // enabled. - for (; codeBlock && frameIt < frames.rend() - 1; ++frameIt) { - const Inst *ip = (*frameIt)->getSavedIP(); - pushCallStack(runtime, codeBlock, ip); - codeBlock = (*frameIt)->getCalleeCodeBlock(); + + // Iterate over the stack in reverse to push calls. The final frame is ignored + // because that is the native frame where enableAllocationLocationTracker is + // called, which isn't poppped. + for (auto it = stack.rbegin(); it != stack.rend() - 1; ++it) { + // Check that both the code block and ip are non-null, which means it was a + // JS frame, and not a native frame. + if (it->first && it->second) { + pushCallStack(runtime, it->first, it->second); + } } } @@ -142,6 +171,7 @@ StackTracesTreeNode::SourceLoc StackTracesTree::computeSourceLoc( // report unknown. RuntimeModule *runtimeModule = codeBlock->getRuntimeModule(); std::string scriptName; + auto scriptID = runtimeModule->getScriptID(); int32_t lineNo, columnNo; if (location) { scriptName = runtimeModule->getBytecode()->getDebugInfo()->getFilenameByID( @@ -154,7 +184,7 @@ StackTracesTreeNode::SourceLoc StackTracesTree::computeSourceLoc( lineNo = -1; columnNo = -1; } - return {strings_->insert(scriptName), lineNo, columnNo}; + return {strings_->insert(scriptName), scriptID, lineNo, columnNo}; } void StackTracesTree::pushCallStack( diff --git a/lib/VM/gcs/AlignedHeapSegment.cpp b/lib/VM/gcs/AlignedHeapSegment.cpp index 8c0347f47f7..9e245e03839 100644 --- a/lib/VM/gcs/AlignedHeapSegment.cpp +++ b/lib/VM/gcs/AlignedHeapSegment.cpp @@ -297,8 +297,10 @@ void AlignedHeapSegment::deleteDeadObjectIDs(GC *gc) { forAllObjs([&markBits, &idTracker, &allocationLocationTracker]( const GCCell *cell) { if (!markBits.at(markBits.addressToIndex(cell))) { + // The allocation tracker needs to use the ID, so this needs to come + // before untrackObject. + allocationLocationTracker.freeAlloc(cell, cell->getAllocatedSize()); idTracker.untrackObject(cell); - allocationLocationTracker.freeAlloc(cell); } }); } diff --git a/lib/VM/gcs/GenGCNC.cpp b/lib/VM/gcs/GenGCNC.cpp index 67fbfbce6fd..babc1ca805f 100644 --- a/lib/VM/gcs/GenGCNC.cpp +++ b/lib/VM/gcs/GenGCNC.cpp @@ -295,7 +295,7 @@ void GenGC::ttiReached() { } } -void GenGC::collect(bool canEffectiveOOM) { +void GenGC::collect(std::string cause, bool canEffectiveOOM) { assert(noAllocLevel_ == 0 && "no GC allowed right now"); if (canEffectiveOOM && ++consecFullGCs_ >= oomThreshold_) oom(make_error_code(OOMError::Effective)); @@ -341,7 +341,8 @@ void GenGC::collect(bool canEffectiveOOM) { << ") garbage collection # " << numGCs() << "\n"); { - CollectionSection fullCollection(this, "Full collection", gcCallbacks_); + CollectionSection fullCollection( + this, "Full collection", std::move(cause), gcCallbacks_); fullCollection.addArg("fullGCUsedBefore", usedBefore); fullCollection.addArg("fullGCSizeBefore", sizeBefore); @@ -1486,10 +1487,12 @@ void GenGC::printFullCollectionStats(JSONEmitter &json) const { GenGC::CollectionSection::CollectionSection( GenGC *gc, const char *name, + std::string cause, OptValue gcCallbacksOpt) : PerfSection(name, gc->getName().c_str()), gc_(gc), cycle_(gc, gcCallbacksOpt, name), + cause_(std::move(cause)), wallStart_(steady_clock::now()), cpuStart_(oscompat::thread_cpu_time()), gcUsedBefore_(gc->usedDirect()), @@ -1550,6 +1553,7 @@ void GenGC::CollectionSection::recordGCStats( gc_->getName(), "gengc", cycle_.extraInfo(), + std::move(cause_), std::chrono::duration_cast( wallEnd - wallStart_), std::chrono::duration_cast(cpuEnd - cpuStart_), diff --git a/lib/VM/gcs/HadesGC.cpp b/lib/VM/gcs/HadesGC.cpp index c681d7b5536..67bae9630cc 100644 --- a/lib/VM/gcs/HadesGC.cpp +++ b/lib/VM/gcs/HadesGC.cpp @@ -202,8 +202,8 @@ class HadesGC::CollectionStats final { using TimePoint = std::chrono::time_point; using Duration = std::chrono::microseconds; - CollectionStats(HadesGC *gc, std::string extraInfo = "") - : gc_{gc}, extraInfo_{std::move(extraInfo)} {} + CollectionStats(HadesGC *gc, std::string cause, std::string extraInfo = "") + : gc_{gc}, cause_{std::move(cause)}, extraInfo_{std::move(extraInfo)} {} ~CollectionStats(); /// Record the allocated bytes in the heap and its size before a collection @@ -251,6 +251,7 @@ class HadesGC::CollectionStats final { private: HadesGC *gc_; + std::string cause_; std::string extraInfo_; TimePoint beginTime_{}; TimePoint endTime_{}; @@ -265,6 +266,7 @@ HadesGC::CollectionStats::~CollectionStats() { gc_->getName(), kHeapNameForAnalytics, extraInfo_, + std::move(cause_), std::chrono::duration_cast( endTime_ - beginTime_), std::chrono::duration_cast(cpuDuration_), @@ -900,7 +902,7 @@ void HadesGC::printStats(JSONEmitter &json) { json.closeDict(); } -void HadesGC::collect() { +void HadesGC::collect(std::string cause) { { // Wait for any existing collections to finish before starting a new one. std::lock_guard lk{gcMutex_}; @@ -908,7 +910,7 @@ void HadesGC::collect() { } // This function should block until a collection finishes. // YG needs to be empty in order to do an OG collection. - youngGenCollection(/*forceOldGenCollection*/ true); + youngGenCollection(std::move(cause), /*forceOldGenCollection*/ true); // Wait for the collection to complete. std::lock_guard lk{gcMutex_}; waitForCollectionToFinish(); @@ -953,7 +955,7 @@ void HadesGC::waitForCollectionToFinish() { lk.release(); } -void HadesGC::oldGenCollection() { +void HadesGC::oldGenCollection(std::string cause) { // Full collection: // * Mark all live objects by iterating through a worklist. // * Sweep dead objects onto the free lists. @@ -975,7 +977,8 @@ void HadesGC::oldGenCollection() { // any) in addition to creating a new CollectionStats. It is desirable to // call the destructor here so that the analytics callback is invoked from the // mutator thread. - ogCollectionStats_ = llvh::make_unique(this, "old"); + ogCollectionStats_ = + llvh::make_unique(this, std::move(cause), "old"); // NOTE: Leave CPU time as zero if the collection isn't concurrent, as the // times aren't useful. auto cpuTimeStart = oscompat::thread_cpu_time(); @@ -1232,16 +1235,18 @@ void HadesGC::sweep() { if (HeapSegment::getCellMarkBit(cell)) { return; } - sweptBytes += cell->getAllocatedSize(); + const auto sz = cell->getAllocatedSize(); + sweptBytes += sz; // Cell is dead, run its finalizer first if it has one. cell->getVT()->finalizeIfExists(cell, this); oldGen_.addCellToFreelist(cell); if (isTracking) { - // There is no race condition here, because the object has already been - // determined to be dead, so nothing can be accessing it, or asking for - // its ID. + // FIXME: There could be a race condition here if newAlloc is being + // called at the same time and using a shared data structure with + // freeAlloc. + // freeAlloc relies on the ID, so call it before untrackObject. + getAllocationLocationTracker().freeAlloc(cell, sz); getIDTracker().untrackObject(cell); - getAllocationLocationTracker().freeAlloc(cell); } }); @@ -1536,12 +1541,14 @@ void *HadesGC::allocWork(uint32_t sz) { // more interesting aspect of Hades which is concurrent background // collections. So instead, do a youngGenCollection which force-starts an // oldGenCollection if one is not already running. - youngGenCollection(/*forceOldGenCollection*/ true); + youngGenCollection( + kHandleSanCauseForAnalytics, /*forceOldGenCollection*/ true); } AllocResult res = youngGen().youngGenBumpAlloc(sz); if (LLVM_UNLIKELY(!res.success)) { // Failed to alloc in young gen, do a young gen collection. - youngGenCollection(/*forceOldGenCollection*/ false); + youngGenCollection( + kNaturalCauseForAnalytics, /*forceOldGenCollection*/ false); res = youngGen().youngGenBumpAlloc(sz); assert(res.success && "Should never fail to allocate"); } @@ -1706,8 +1713,10 @@ GCCell *HadesGC::OldGen::search(uint32_t sz) { return nullptr; } -void HadesGC::youngGenCollection(bool forceOldGenCollection) { - CollectionStats stats{this, "young"}; +void HadesGC::youngGenCollection( + std::string cause, + bool forceOldGenCollection) { + CollectionStats stats{this, cause, "young"}; auto cpuTimeStart = oscompat::thread_cpu_time(); stats.setBeginTime(); stats.setBeforeSizes(youngGen().used(), youngGen().size()); @@ -1770,9 +1779,11 @@ void HadesGC::youngGenCollection(bool forceOldGenCollection) { auto *fptr = cell->getMarkedForwardingPointer(); ptr += reinterpret_cast(fptr)->getAllocatedSize(); } else { - ptr += cell->getAllocatedSize(); + const auto sz = cell->getAllocatedSize(); + ptr += sz; + // Have to call freeAlloc before untrackObject. + getAllocationLocationTracker().freeAlloc(cell, sz); getIDTracker().untrackObject(cell); - getAllocationLocationTracker().freeAlloc(cell); } } } @@ -1801,7 +1812,7 @@ void HadesGC::youngGenCollection(bool forceOldGenCollection) { ygAverageSurvivalRatio_.update(stats.survivalRatio()); if (concurrentPhase_ == Phase::None) { if (forceOldGenCollection) { - oldGenCollection(); + oldGenCollection(std::move(cause)); } else { // If the OG is sufficiently full after the collection finishes, begin // an OG collection. @@ -1810,7 +1821,7 @@ void HadesGC::youngGenCollection(bool forceOldGenCollection) { constexpr double kCollectionThreshold = 0.75; double allocatedRatio = static_cast(totalAllocated) / totalBytes; if (allocatedRatio >= kCollectionThreshold) { - oldGenCollection(); + oldGenCollection(kNaturalCauseForAnalytics); } } } else if (oldGenMarker_) { diff --git a/lib/VM/gcs/MallocGC.cpp b/lib/VM/gcs/MallocGC.cpp index 6039265356c..53c09f29d68 100644 --- a/lib/VM/gcs/MallocGC.cpp +++ b/lib/VM/gcs/MallocGC.cpp @@ -195,7 +195,7 @@ MallocGC::~MallocGC() { } } -void MallocGC::collectBeforeAlloc(uint32_t size) { +void MallocGC::collectBeforeAlloc(std::string cause, uint32_t size) { const auto growSizeLimit = [this, size](gcheapsize_t sizeLimit) { // Either double the size limit, or increase to size, at a max of maxSize_. return std::min(maxSize_, std::max(sizeLimit * 2, size)); @@ -220,7 +220,7 @@ void MallocGC::collectBeforeAlloc(uint32_t size) { #endif // Do a collection if the sanitization of handles is requested or if there // is memory pressure. - collect(); + collect(std::move(cause)); // While we still can't fill the allocation, keep growing. while (allocatedBytes_ >= sizeLimit_ - size) { if (sizeLimit_ == maxSize_) { @@ -253,7 +253,7 @@ void MallocGC::clearUnmarkedPropertyMaps() { } #endif -void MallocGC::collect() { +void MallocGC::collect(std::string cause) { assert(noAllocLevel_ == 0 && "no GC allowed right now"); using std::chrono::steady_clock; LLVM_DEBUG(llvh::dbgs() << "Beginning collection"); @@ -299,10 +299,8 @@ void MallocGC::collect() { assert(!header->isMarked() && "Live pointer left in dead heap section"); #endif GCCell *cell = header->data(); -#ifndef NDEBUG // Extract before running any potential finalizers. const auto freedSize = cell->getAllocatedSize(); -#endif // Run the finalizer if it exists and the cell is actually dead. if (!header->isMarked()) { cell->getVT()->finalizeIfExists(cell, this); @@ -312,14 +310,11 @@ void MallocGC::collect() { ++numFinalizedObjects_; } #endif - } - if (idTracker_.isTrackingIDs() || - allocationLocationTracker_.isEnabled()) { - idTracker_.untrackObject(cell); - allocationLocationTracker_.freeAlloc(cell); - } - if (allocationLocationTracker_.isEnabled()) { - allocationLocationTracker_.freeAlloc(cell); + // Pointers that aren't marked now weren't moved, and are dead instead. + if (isTrackingIDs()) { + allocationLocationTracker_.freeAlloc(cell, freedSize); + idTracker_.untrackObject(cell); + } } #ifndef NDEBUG // Before free'ing, fill with a dead value for debugging @@ -373,6 +368,7 @@ void MallocGC::collect() { getName(), "malloc", "full", + std::move(cause), std::chrono::duration_cast( wallEnd - wallStart), std::chrono::duration_cast(cpuEnd - cpuStart), diff --git a/lib/VM/gcs/OldGenNC.cpp b/lib/VM/gcs/OldGenNC.cpp index 716b6381eab..eca226d435b 100644 --- a/lib/VM/gcs/OldGenNC.cpp +++ b/lib/VM/gcs/OldGenNC.cpp @@ -613,7 +613,7 @@ void OldGen::recordLevelAfterCompaction( AllocResult OldGen::fullCollectThenAlloc( uint32_t allocSize, HasFinalizer hasFinalizer) { - gc_->collect(/* canEffectiveOOM */ true); + gc_->collect(GCBase::kNaturalCauseForAnalytics, /* canEffectiveOOM */ true); { AllocResult res = allocRaw(allocSize, hasFinalizer); if (LLVM_LIKELY(res.success)) { diff --git a/lib/VM/gcs/YoungGenNC.cpp b/lib/VM/gcs/YoungGenNC.cpp index 8f3780d4562..e7cbfbdfa96 100644 --- a/lib/VM/gcs/YoungGenNC.cpp +++ b/lib/VM/gcs/YoungGenNC.cpp @@ -250,7 +250,7 @@ AllocResult YoungGen::fullCollectThenAlloc( uint32_t allocSize, HasFinalizer hasFinalizer, bool fixedSizeAlloc) { - gc_->collect(/* canEffectiveOOM */ true); + gc_->collect(GCBase::kNaturalCauseForAnalytics, /* canEffectiveOOM */ true); { AllocResult res = allocRaw(allocSize, hasFinalizer); if (LLVM_LIKELY(res.success)) { @@ -302,7 +302,10 @@ YoungGen::recordAllocSizes() { void YoungGen::collect() { assert(gc_->noAllocLevel_ == 0 && "no GC allowed right now"); GenGC::CollectionSection ygCollection( - gc_, "YoungGen collection", gc_->getGCCallbacks()); + gc_, + "YoungGen collection", + GCBase::kNaturalCauseForAnalytics, + gc_->getGCCallbacks()); #ifdef HERMES_EXTRA_DEBUG /// Protect the card table boundary table, to detect corrupting mutator @@ -373,17 +376,19 @@ void YoungGen::collect() { nextGen_->youngGenTransitiveClosure(toScan, acceptor); } - if (gc_->getIDTracker().isTrackingIDs()) { - PerfSection fixupTrackedObjectsSystraceRegion("updateIDTracker"); - updateIDTracker(); - } - + // Have to delete allocation tracker before the ID tracker, because the + // allocation tracker uses the ID tracker. if (gc_->getAllocationLocationTracker().isEnabled()) { PerfSection updateAllocationLocationTrackerSystraceRegion( "updateAllocationLocationTracker"); updateAllocationLocationTracker(); } + if (gc_->getIDTracker().isTrackingIDs()) { + PerfSection fixupTrackedObjectsSystraceRegion("updateIDTracker"); + updateIDTracker(); + } + // We've now determined reachability; find weak refs to young-gen // pointers that have become unreachable. auto updateWeakRefsStart = steady_clock::now(); @@ -590,21 +595,24 @@ void YoungGen::updateTrackers() { GCCell *cell = reinterpret_cast(ptr); if (cell->hasMarkedForwardingPointer()) { auto *fptr = cell->getMarkedForwardingPointer(); - if (idTracker) { - gc_->getIDTracker().moveObject(cell, fptr); - } if (allocationLocationTracker) { gc_->getAllocationLocationTracker().moveAlloc(cell, fptr); } + if (idTracker) { + gc_->getIDTracker().moveObject(cell, fptr); + } ptr += reinterpret_cast(fptr)->getAllocatedSize(); } else { - ptr += cell->getAllocatedSize(); + const auto sz = cell->getAllocatedSize(); + ptr += sz; + // The allocation tracker needs to use the ID, so this needs to come + // before untrackObject. + if (allocationLocationTracker) { + gc_->getAllocationLocationTracker().freeAlloc(cell, sz); + } if (idTracker) { gc_->getIDTracker().untrackObject(cell); } - if (allocationLocationTracker) { - gc_->getAllocationLocationTracker().freeAlloc(cell); - } } } } diff --git a/public/hermes/Public/GCConfig.h b/public/hermes/Public/GCConfig.h index e67d83597d6..381e893cc16 100644 --- a/public/hermes/Public/GCConfig.h +++ b/public/hermes/Public/GCConfig.h @@ -33,6 +33,7 @@ struct GCAnalyticsEvent { std::string runtimeDescription; std::string gcKind; std::string collectionType; + std::string cause; std::chrono::milliseconds duration; std::chrono::milliseconds cpuDuration; uint64_t preAllocated; diff --git a/unittests/API/HeapSnapshotAPITest.cpp b/unittests/API/HeapSnapshotAPITest.cpp new file mode 100644 index 00000000000..4da9c32f3ac --- /dev/null +++ b/unittests/API/HeapSnapshotAPITest.cpp @@ -0,0 +1,187 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifdef HERMES_ENABLE_DEBUGGER + +#include +#include +#include +#include +#include +#include + +using namespace facebook::jsi; +using namespace facebook::hermes; + +class HeapSnapshotAPITest : public ::testing::TestWithParam { + public: + HeapSnapshotAPITest() + : rt(makeHermesRuntime( + ::hermes::vm::RuntimeConfig::Builder() + .withGCConfig(::hermes::vm::GCConfig::Builder() + .withAllocationLocationTrackerFromStart( + trackingFromBeginning()) + .build()) + .withES6Proxy(true) + .build())) {} + + protected: + Value eval(const char *code) { + return rt->evaluateJavaScript( + std::make_unique(code), "test.js"); + } + + bool trackingFromBeginning() const { + return GetParam(); + } + + void startTrackingHeapObjects() { + if (!trackingFromBeginning()) { + rt->instrumentation().startTrackingHeapObjectStackTraces(nullptr); + } + } + + void stopTrackingHeapObjects() { + rt->instrumentation().stopTrackingHeapObjectStackTraces(); + } + + std::shared_ptr rt; +}; + +static std::string functionInfoToString( + int idx, + const folly::dynamic &traceFunctionInfos, + const folly::dynamic &strings) { + auto it = traceFunctionInfos.begin() + idx * 6; + auto functionID = it->asInt(); + auto name = strings.at((it + 1)->asInt()).asString(); + auto scriptName = strings.at((it + 2)->asInt()).asString(); + auto scriptID = (it + 3)->asInt(); + auto line = (it + 4)->asInt(); + auto col = (it + 5)->asInt(); + + std::ostringstream os; + os << name << "(" << functionID << ") @ " << scriptName << "(" << scriptID + << "):" << line << ":" << col << ""; + return os.str(); +} + +struct ChromeStackTreeNode { + ChromeStackTreeNode(ChromeStackTreeNode *parent, int traceFunctionInfosId) + : parent_(parent), traceFunctionInfosId_(traceFunctionInfosId) {} + + /// Recursively builds up a tree of trace nodes, and inserts a pair of (trace + /// node id, pointer to node in tree) into \p idNodeMap. + /// WARN: The return value of this function keeps the node pointers alive. It + /// must outlive \p idNodeMap or else \p idNodeMap will have dangling pointers + /// to the nodes. + static std::vector> parse( + const folly::dynamic &traceNodes, + ChromeStackTreeNode *parent, + std::map &idNodeMap) { + std::vector> res; + for (auto node = traceNodes.begin(); node != traceNodes.end(); node += 5) { + auto id = node->asInt(); + auto functionInfoIndex = (node + 1)->asInt(); + folly::dynamic children = *(node + 4); + auto treeNode = + std::make_unique(parent, functionInfoIndex); + idNodeMap.emplace(id, treeNode.get()); + treeNode->children_ = parse(children, treeNode.get(), idNodeMap); + res.emplace_back(std::move(treeNode)); + } + return res; + }; + + std::string buildStackTrace( + const folly::dynamic &traceFunctionInfos, + const folly::dynamic &strings) { + std::string res = + parent_ ? parent_->buildStackTrace(traceFunctionInfos, strings) : ""; + res += "\n" + + functionInfoToString( + traceFunctionInfosId_, traceFunctionInfos, strings); + return res; + }; + + private: + ChromeStackTreeNode *parent_; + int traceFunctionInfosId_; + std::vector> children_; +}; + +TEST_P(HeapSnapshotAPITest, HeapTimeline) { + startTrackingHeapObjects(); + facebook::jsi::Function alloc = eval("function alloc() { return {}; }; alloc") + .asObject(*rt) + .asFunction(*rt); + facebook::jsi::Object obj = alloc.call(*rt).asObject(*rt); + const uint64_t objID = rt->getUniqueID(obj); + + std::ostringstream os; + rt->instrumentation().collectGarbage("test"); + rt->instrumentation().createSnapshotToStream(os); + stopTrackingHeapObjects(); + + const std::string heapTimeline = os.str(); + folly::dynamic json = folly::parseJson(heapTimeline); + ASSERT_TRUE(json.isObject()); + auto it = json.find("strings"); + ASSERT_NE(it, json.items().end()); + auto strings = it->second; + it = json.find("nodes"); + ASSERT_NE(it, json.items().end()); + auto nodes = it->second; + it = json.find("trace_tree"); + ASSERT_NE(it, json.items().end()); + auto traceTree = it->second; + it = json.find("trace_function_infos"); + ASSERT_NE(it, json.items().end()); + auto traceFunctionInfos = it->second; + + // The root node should be the only thing at the top of the tree. There are 5 + // fields per single node, and the last field is a children array. + EXPECT_EQ(traceTree.size(), 5) + << "There should never be more than a single 5-tuple at the beginning of " + "the trace tree"; + + // Search nodes for the objID. + const auto nodeTupleSize = 6; + const auto nodeIDFieldIndex = 2; + const auto nodeTraceIDFieldIndex = 5; + uint64_t traceNodeID = 0; + ASSERT_EQ(nodes.size() % nodeTupleSize, 0) + << "Nodes array must consist of tuples"; + for (auto node = nodes.begin(); node != nodes.end(); node += nodeTupleSize) { + if (static_cast((node + nodeIDFieldIndex)->asInt()) == objID) { + traceNodeID = (node + nodeTraceIDFieldIndex)->asInt(); + EXPECT_NE(traceNodeID, 0ul) << "Object in node graph has a zero trace ID"; + break; + } + } + ASSERT_NE(traceNodeID, 0ul) << "Object not found in nodes graph"; + + // Now use the trace node ID to locate the corresponding stack. + std::map idNodeMap; + auto roots = ChromeStackTreeNode::parse(traceTree, nullptr, idNodeMap); + (void)roots; + auto stackTreeNode = idNodeMap.find(traceNodeID); + ASSERT_NE(stackTreeNode, idNodeMap.end()); + EXPECT_EQ( + stackTreeNode->second->buildStackTrace(traceFunctionInfos, strings), + R"#( +(root)(0) @ (0):0:0 +global(1) @ test.js(1):1:1 +alloc(2) @ test.js(1):1:27)#"); +} + +INSTANTIATE_TEST_CASE_P( + WithOrWithoutAllocationTracker, + HeapSnapshotAPITest, + ::testing::Bool()); + +#endif diff --git a/unittests/Support/CMakeLists.txt b/unittests/Support/CMakeLists.txt index 1609388c944..25a669caf42 100644 --- a/unittests/Support/CMakeLists.txt +++ b/unittests/Support/CMakeLists.txt @@ -19,7 +19,6 @@ set(SupportSources RegexTest.cpp SNPrintfBufTest.cpp SourceErrorManagerTest.cpp - SamplingThreadTest.cpp StatsAccumulatorTest.cpp StringKindTest.cpp StringSetVectorTest.cpp diff --git a/unittests/Support/SamplingThreadTest.cpp b/unittests/Support/SamplingThreadTest.cpp deleted file mode 100644 index fa2c34193a2..00000000000 --- a/unittests/Support/SamplingThreadTest.cpp +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright (c) Facebook, Inc. and its affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */ - -#include - -#include "hermes/Support/SamplingThread.h" - -namespace { - -TEST(SamplingThreadTest, Counter) { - constexpr int kStart = 100; - constexpr int kStep = 2; - // Chosen to make the test take O(100ms) on my test machine, - constexpr int kIterations = 10 * 1000 * 1000; - - std::atomic counter{kStart}; - hermes::SamplingThread sampler{counter, std::chrono::milliseconds(1)}; - - for (unsigned i = 0; i < kIterations; ++i) { - counter.fetch_add(kStep, std::memory_order_relaxed); - } - - auto samples = sampler.stop(); - for (unsigned i = 0; i < samples.size(); ++i) { - // Sampled values should be even numbers within the expected range. - int v = samples[i].second; - EXPECT_LE(kStart, v); - EXPECT_LE(v, counter.load()); - EXPECT_EQ(0, v % kStep); - - // Both the timestamps and observed counts should progress monotonically. - if (i > 0) { - EXPECT_LE(samples[i - 1].first, samples[i].first); - EXPECT_LE(samples[i - 1].second, samples[i].second); - } - } -} - -} // namespace diff --git a/unittests/VMRuntime/ArrayStorageTest.cpp b/unittests/VMRuntime/ArrayStorageTest.cpp index 2d9988f4bdc..fc0d18aa2be 100644 --- a/unittests/VMRuntime/ArrayStorageTest.cpp +++ b/unittests/VMRuntime/ArrayStorageTest.cpp @@ -122,7 +122,7 @@ TEST_F(ArrayStorageTest, AllowTrimming) { // Now force some GCs to happen. for (auto i = 0; i < 2; i++) { - runtime->collect(); + runtime->collect("test"); } // The array should be trimmed. diff --git a/unittests/VMRuntime/DecoratedObjectTest.cpp b/unittests/VMRuntime/DecoratedObjectTest.cpp index b000d115daf..ff2cb603d5c 100644 --- a/unittests/VMRuntime/DecoratedObjectTest.cpp +++ b/unittests/VMRuntime/DecoratedObjectTest.cpp @@ -35,15 +35,15 @@ TEST_F(DecoratedObjectTest, DecoratedObjectFinalizerRunsOnce) { runtime, Handle::vmcast(&runtime->objectPrototype), llvh::make_unique(counter))); - runtime->getHeap().collect(); + runtime->collect("test"); // should not have been finalized yet EXPECT_EQ(0, *counter); } // should finalize once - runtime->getHeap().collect(); + runtime->collect("test"); EXPECT_EQ(1, *counter); - runtime->getHeap().collect(); - runtime->getHeap().collect(); + runtime->collect("test"); + runtime->collect("test"); EXPECT_EQ(1, *counter); } @@ -61,7 +61,7 @@ TEST_F(DecoratedObjectTest, ChangeDecoration) { // Old decoration was deallocated. EXPECT_EQ(1, *counter); } - runtime->getHeap().collect(); + runtime->collect("test"); // Old and new deallocated. EXPECT_EQ(2, *counter); } @@ -74,7 +74,7 @@ TEST_F(DecoratedObjectTest, NullDecoration) { runtime, Handle::vmcast(&runtime->objectPrototype), nullptr)); EXPECT_EQ(nullptr, handle->getDecoration()); } - runtime->getHeap().collect(); + runtime->collect("test"); } TEST_F(DecoratedObjectTest, AdditionalSlots) { @@ -92,7 +92,7 @@ TEST_F(DecoratedObjectTest, AdditionalSlots) { EXPECT_NE(strRes, ExecutionStatus::EXCEPTION); DecoratedObject::setAdditionalSlotValue(*handle, runtime, 1, *strRes); // Verify slot values survive GC. - runtime->getHeap().collect(); + runtime->collect("test"); EXPECT_EQ( DecoratedObject::getAdditionalSlotValue(*handle, runtime, 0).getNumber(), 10); diff --git a/unittests/VMRuntime/ExternalMemAccountingTest.cpp b/unittests/VMRuntime/ExternalMemAccountingTest.cpp index 7877d423cd4..fe3a7a04b58 100644 --- a/unittests/VMRuntime/ExternalMemAccountingTest.cpp +++ b/unittests/VMRuntime/ExternalMemAccountingTest.cpp @@ -128,7 +128,7 @@ TEST_P(ExtMemTests, ExtMemInOldByAllocTest) { roots.push_back(YoungGenCell::create(rt)); rt.pointerRoots.push_back(&roots.back()); } - gc.collect(); + rt.collect(); size_t ygs0 = gc.numYoungGCs(); EXPECT_EQ(1, gc.numFullGCs()); // The OG now has 2 free segments. @@ -152,7 +152,7 @@ TEST_P(ExtMemTests, ExtMemInOldByAllocTest) { rt.pointerRoots.push_back(&roots.back()); // Get it in the old generation. - gc.collect(); + rt.collect(); EXPECT_EQ(ygs0 + 1, gc.numYoungGCs()); EXPECT_EQ(2, gc.numFullGCs()); @@ -197,7 +197,7 @@ TEST_P(ExtMemTests, ExtMemInOldDirectTest) { roots.push_back(YoungGenCell::create(rt)); rt.pointerRoots.push_back(&roots.back()); } - gc.collect(); + rt.collect(); size_t ygs0 = gc.numYoungGCs(); EXPECT_EQ(1, gc.numFullGCs()); // The OG now has 2 free segments. @@ -245,7 +245,6 @@ TEST(ExtMemNonParamTests, ExtMemDoesNotBreakFullGC) { auto runtime = DummyRuntime::create(getMetadataTable(), gcConfig); DummyRuntime &rt = *runtime; - auto &gc = rt.gc; using SegmentSizeCell = EmptyCell; @@ -275,7 +274,7 @@ TEST(ExtMemNonParamTests, ExtMemDoesNotBreakFullGC) { // test any results here, beyond the GC completing successfully. // (At one point, there was a bug that caused it to OOM in this // situation -- this is a regression test.) - gc.collect(); + rt.collect(); } TEST(ExtMemNonParamDeathTest, SaturateYoungGen) { diff --git a/unittests/VMRuntime/GCBasicsTest.cpp b/unittests/VMRuntime/GCBasicsTest.cpp index 0d5968a5a43..0845991a67c 100644 --- a/unittests/VMRuntime/GCBasicsTest.cpp +++ b/unittests/VMRuntime/GCBasicsTest.cpp @@ -118,7 +118,7 @@ TEST_F(GCBasicsTest, SmokeTest) { ASSERT_EQ(0u, info.allocatedBytes); // Collect an empty heap. - gc.collect(); + rt.collect(); gc.getHeapInfo(info); gc.getDebugHeapInfo(debugInfo); ASSERT_EQ(0u, debugInfo.numAllocatedObjects); @@ -140,7 +140,7 @@ TEST_F(GCBasicsTest, SmokeTest) { ASSERT_EQ(sizeof(Dummy), info.allocatedBytes); // Now free the unreachable object. - gc.collect(); + rt.collect(); gc.getHeapInfo(info); gc.getDebugHeapInfo(debugInfo); ASSERT_EQ(0u, debugInfo.numAllocatedObjects); @@ -164,7 +164,7 @@ TEST_F(GCBasicsTest, SmokeTest) { // Make only the second object reachable and collect. rt.pointerRoots.push_back(&o2); - gc.collect(); + rt.collect(); gc.getHeapInfo(info); gc.getDebugHeapInfo(debugInfo); ASSERT_EQ(1u, debugInfo.numAllocatedObjects); @@ -205,7 +205,7 @@ TEST_F(GCBasicsTest, MovedObjectTest) { a1->values()[0].set(HermesValue::encodeObjectValue(a1), &gc); a1->values()[1].set(HermesValue::encodeObjectValue(a2), &gc); - gc.collect(); + rt.collect(); totalAlloc -= heapAlignSize(Array::allocSize(0)); gc.getHeapInfo(info); gc.getDebugHeapInfo(debugInfo); @@ -306,7 +306,7 @@ TEST_F(GCBasicsTest, WeakRefTest) { a1 = nullptr; mtx.unlock(); - gc.collect(); + rt.collect(); gc.getDebugHeapInfo(debugInfo); mtx.lock(); EXPECT_EQ(1u, debugInfo.numAllocatedObjects); @@ -319,7 +319,7 @@ TEST_F(GCBasicsTest, WeakRefTest) { // Make the slot unreachable and test that it is freed. mtx.unlock(); rt.markExtraWeak = [&](WeakRefAcceptor &acceptor) { acceptor.accept(wr2); }; - gc.collect(); + rt.collect(); mtx.lock(); ASSERT_EQ(WeakSlotState::Free, wr1.unsafeGetSlot()->state()); @@ -469,7 +469,7 @@ TEST_F(GCBasicsTest, TestIDPersistsAcrossCollections) { GCScope scope{&rt}; auto handle = rt.makeHandle(Dummy::create(rt)); const auto idBefore = rt.getHeap().getObjectID(*handle); - rt.getHeap().collect(); + rt.collect(); const auto idAfter = rt.getHeap().getObjectID(*handle); EXPECT_EQ(idBefore, idAfter); } @@ -478,7 +478,7 @@ TEST_F(GCBasicsTest, TestIDPersistsAcrossCollections) { TEST_F(GCBasicsTest, TestIDDeathInYoung) { GCScope scope{&rt}; rt.getHeap().getObjectID(Dummy::create(rt)); - rt.getHeap().collect(); + rt.collect(); // ~DummyRuntime will verify all pointers in ID map. } @@ -489,7 +489,7 @@ TEST(GCCallbackTest, TestCallbackInvoked) { GCConfig config = GCConfig::Builder().withCallback(cb).build(); auto rt = Runtime::create(RuntimeConfig::Builder().withGCConfig(config).build()); - rt->collect(); + rt->collect("test"); // Hades will record the YG and OG collections as separate events. #ifndef HERMESVM_GC_HADES EXPECT_EQ(2, ev.size()); diff --git a/unittests/VMRuntime/GCFinalizerTest.cpp b/unittests/VMRuntime/GCFinalizerTest.cpp index a469cf60c14..4ca0369bb74 100644 --- a/unittests/VMRuntime/GCFinalizerTest.cpp +++ b/unittests/VMRuntime/GCFinalizerTest.cpp @@ -83,12 +83,11 @@ TEST(GCFinalizerTest, NoDeadFinalizables) { int finalized = 0; auto runtime = DummyRuntime::create(getMetadataTable(), kTestGCConfigSmall); DummyRuntime &rt = *runtime; - auto &gc = rt.gc; DummyCell::create(rt); GCCell *r = FinalizerCell::create(rt, &finalized); rt.pointerRoots.push_back(&r); - gc.collect(); + rt.collect(); ASSERT_EQ(0, finalized); } @@ -97,12 +96,11 @@ TEST(GCFinalizerTest, FinalizablesOnly) { int finalized = 0; auto runtime = DummyRuntime::create(getMetadataTable(), kTestGCConfigSmall); DummyRuntime &rt = *runtime; - auto &gc = rt.gc; FinalizerCell::create(rt, &finalized); GCCell *r = FinalizerCell::create(rt, &finalized); rt.pointerRoots.push_back(&r); - gc.collect(); + rt.collect(); ASSERT_EQ(1, finalized); } @@ -111,7 +109,6 @@ TEST(GCFinalizerTest, MultipleCollect) { int finalized = 0; auto runtime = DummyRuntime::create(getMetadataTable(), kTestGCConfigSmall); DummyRuntime &rt = *runtime; - auto &gc = rt.gc; FinalizerCell::create(rt, &finalized); DummyCell::create(rt); @@ -120,12 +117,12 @@ TEST(GCFinalizerTest, MultipleCollect) { GCCell *r2 = DummyCell::create(rt); rt.pointerRoots.push_back(&r1); rt.pointerRoots.push_back(&r2); - gc.collect(); + rt.collect(); ASSERT_EQ(2, finalized); rt.pointerRoots.clear(); - gc.collect(); + rt.collect(); ASSERT_EQ(3, finalized); } @@ -134,7 +131,6 @@ TEST(GCFinalizerTest, FinalizeAllOnRuntimeDestructDummyRuntime) { int finalized = 0; { auto rt = DummyRuntime::create(getMetadataTable(), kTestGCConfigSmall); - auto &gc = rt->gc; GCCell *r1 = FinalizerCell::create(*rt, &finalized); GCCell *r2 = FinalizerCell::create(*rt, &finalized); @@ -143,8 +139,8 @@ TEST(GCFinalizerTest, FinalizeAllOnRuntimeDestructDummyRuntime) { // Collect once to get the objects into the old gen, then a second time // to get their mark bits set in their stable locations. - gc.collect(); - gc.collect(); + rt->collect(); + rt->collect(); ASSERT_EQ(0, finalized); // Now: does destructing the runtime with set mark bits run all the @@ -158,7 +154,6 @@ TEST(GCFinalizerTest, FinalizeAllOnRuntimeDestructRealRuntime) { std::shared_ptr rt{Runtime::create(kTestRTConfig)}; { GCScope gcScope(rt.get()); - auto &gc = rt->getHeap(); auto r1 = rt->makeHandle( HermesValue::encodeObjectValue(FinalizerCell::create(*rt, &finalized))); @@ -167,8 +162,8 @@ TEST(GCFinalizerTest, FinalizeAllOnRuntimeDestructRealRuntime) { // Collect once to get the objects into the old gen, then a second time // to get their mark bits set in their stable locations. - gc.collect(); - gc.collect(); + rt->collect("test"); + rt->collect("test"); ASSERT_EQ(0, finalized); (void)r1; (void)r2; diff --git a/unittests/VMRuntime/GCFragmentationNCTest.cpp b/unittests/VMRuntime/GCFragmentationNCTest.cpp index fa74dfe8474..36a22b86d25 100644 --- a/unittests/VMRuntime/GCFragmentationNCTest.cpp +++ b/unittests/VMRuntime/GCFragmentationNCTest.cpp @@ -95,7 +95,7 @@ TEST(GCFragmentationNCTest, Test) { { // (3) Force a full collection to make sure all the allocated cells so far // end up in the old generation. - gc.collect(); + rt.collect(); } // The end of the old generation now looks like this: @@ -173,7 +173,7 @@ TEST(GCFragmentationNCTest, ExternalMemoryTest) { { // (3) Force a full collection to make sure all the allocated cells so far // end up in the old generation. The external memory charge should also // be transfered. - gc.collect(); + rt.collect(); EXPECT_EQ(1, gc.numFullGCs()); } diff --git a/unittests/VMRuntime/GCHeapExtentsInCrashManagerTest.cpp b/unittests/VMRuntime/GCHeapExtentsInCrashManagerTest.cpp index b1e63987ff6..cd39a5fb677 100644 --- a/unittests/VMRuntime/GCHeapExtentsInCrashManagerTest.cpp +++ b/unittests/VMRuntime/GCHeapExtentsInCrashManagerTest.cpp @@ -181,7 +181,7 @@ TEST(GCHeapExtentsInCrashManagerTest, HeapExtentsCorrect) { roots.pop_back(); rt.pointerRoots.pop_back(); } - rt.gc.collect(); + rt.collect(); EXPECT_EQ(2, testCrashMgr->customData().size()); diff --git a/unittests/VMRuntime/GCMarkWeakTest.cpp b/unittests/VMRuntime/GCMarkWeakTest.cpp index c07fab3fcac..7ace32f86ab 100644 --- a/unittests/VMRuntime/GCMarkWeakTest.cpp +++ b/unittests/VMRuntime/GCMarkWeakTest.cpp @@ -87,7 +87,7 @@ TEST(GCMarkWeakTest, MarkWeak) { GCCell *g = TestCell::create(rt, &numMarkWeakCalls); rt.pointerRoots.push_back(&g); - gc.collect(); + rt.collect(); { WeakRefLock lk{gc.weakRefMutex()}; @@ -100,7 +100,7 @@ TEST(GCMarkWeakTest, MarkWeak) { } rt.pointerRoots.pop_back(); - gc.collect(); + rt.collect(); // The weak ref is live at the beginning of the collection, but not by the // end, so the call in updateReferences isn't run, nor the second // checkHeapWellFormed. diff --git a/unittests/VMRuntime/GCOOMNCTest.cpp b/unittests/VMRuntime/GCOOMNCTest.cpp index 90830ab2a61..a3327f3020c 100644 --- a/unittests/VMRuntime/GCOOMNCTest.cpp +++ b/unittests/VMRuntime/GCOOMNCTest.cpp @@ -120,19 +120,19 @@ TEST(GCEffectiveOOMDeathTest, UnitTest) { auto &gc = rt.gc; // tick... - gc.collect(/* canEffectiveOOM */ true); + gc.collect("test", /* canEffectiveOOM */ true); // tick... - gc.collect(/* canEffectiveOOM */ true); + gc.collect("test", /* canEffectiveOOM */ true); // ... - gc.collect(/* canEffectiveOOM */ false); + gc.collect("test", /* canEffectiveOOM */ false); // ... - gc.collect(/* canEffectiveOOM */ false); + gc.collect("test", /* canEffectiveOOM */ false); // ...BOOM! - EXPECT_OOM(gc.collect(/* canEffectiveOOM */ true)); + EXPECT_OOM(gc.collect("test", /* canEffectiveOOM */ true)); } TEST(GCEffectiveOOMDeathTest, IntegrationTest) { diff --git a/unittests/VMRuntime/GCObjectIterationTest.cpp b/unittests/VMRuntime/GCObjectIterationTest.cpp index 95220ba9c76..500fe487fa8 100644 --- a/unittests/VMRuntime/GCObjectIterationTest.cpp +++ b/unittests/VMRuntime/GCObjectIterationTest.cpp @@ -41,7 +41,7 @@ TEST(GCObjectIterationTest, ForAllObjsGetsAllObjects) { GCCell *largeCell1 = LargeCell::create(rt); rt.pointerRoots.push_back(&largeCell1); // Should move both to the old gen, in separate segments. - gc.collect(); + rt.collect(); // A smaller size, in the young generation. constexpr size_t kSmallSize = 80; using SmallCell = EmptyCell; diff --git a/unittests/VMRuntime/GCReturnUnusedMemoryNCTest.cpp b/unittests/VMRuntime/GCReturnUnusedMemoryNCTest.cpp index f157a4c8a3b..a3af746b3c5 100644 --- a/unittests/VMRuntime/GCReturnUnusedMemoryNCTest.cpp +++ b/unittests/VMRuntime/GCReturnUnusedMemoryNCTest.cpp @@ -86,7 +86,7 @@ TEST(GCReturnUnusedMemoryNCTest, CollectReturnsFreeMemory) { rt.pointerRoots.erase(rt.pointerRoots.begin()); // Collect should return the unused memory back to the OS. - gc.collect(); + rt.collect(); size_t collected = gcRegionFootprint(gc); ASSERT_NE(collected, FAILED); diff --git a/unittests/VMRuntime/GCSanitizeHandlesTest.cpp b/unittests/VMRuntime/GCSanitizeHandlesTest.cpp index 023d11125ba..514b00a053a 100644 --- a/unittests/VMRuntime/GCSanitizeHandlesTest.cpp +++ b/unittests/VMRuntime/GCSanitizeHandlesTest.cpp @@ -142,7 +142,7 @@ TEST(GCSanitizeHandlesTest, MovesAfterCollect) { Handle dummy = runtime->makeHandle(DummyObject::create(*runtime)); - runtime->gc.collect(); + runtime->collect(); TH.testHandleMoves(dummy); } diff --git a/unittests/VMRuntime/GCSizingTest.cpp b/unittests/VMRuntime/GCSizingTest.cpp index 57706700e6d..e363889feb3 100644 --- a/unittests/VMRuntime/GCSizingTest.cpp +++ b/unittests/VMRuntime/GCSizingTest.cpp @@ -128,7 +128,7 @@ TEST_F(GCSizingTest, TestOccupancyTarget) { headFollower->values()[kPrevFieldIndex].getObject()); } if ((i % kGCFrequency) == 0) { - gc.collect(); + rt.collect(); GCBase::HeapInfo info; gc.getHeapInfo(info); @@ -185,7 +185,7 @@ TEST_F(GCSizingTest, TestHeapShrinks) { tmpArr = nullptr; // Now do an explicit full GC, so we will resize when we have 50MB live. - gc.collect(); + rt.collect(); GCBase::HeapInfo info; gc.getHeapInfo(info); @@ -201,11 +201,11 @@ TEST_F(GCSizingTest, TestHeapShrinks) { // Now do 5 full GCs; the heap size should start to approach 2X the live data; // we'll allow 3X to account for convergence, so 30 MB. - gc.collect(); - gc.collect(); - gc.collect(); - gc.collect(); - gc.collect(); + rt.collect(); + rt.collect(); + rt.collect(); + rt.collect(); + rt.collect(); gc.getHeapInfo(info); gcheapsize_t expectedSmallHeap = 30 * 1000000; @@ -252,7 +252,7 @@ TEST(GCSizingMinHeapTest, TestHeapDoesNotShrinkPastMinSize) { tmpArr = nullptr; // Now do an explicit full GC, so we will resize when we have 50MB live. - gc.collect(); + rt.collect(); GCBase::HeapInfo info; gc.getHeapInfo(info); @@ -270,7 +270,7 @@ TEST(GCSizingMinHeapTest, TestHeapDoesNotShrinkPastMinSize) { // live data, or 20MB. But we've specified the minimum heap at // 40MB, so it should not shrink beyond that. for (unsigned i = 0; i < 10; i++) { - gc.collect(); + rt.collect(); } gc.getHeapInfo(info); diff --git a/unittests/VMRuntime/HeapSnapshotTest.cpp b/unittests/VMRuntime/HeapSnapshotTest.cpp index cbae94c4029..d71aa0d8855 100644 --- a/unittests/VMRuntime/HeapSnapshotTest.cpp +++ b/unittests/VMRuntime/HeapSnapshotTest.cpp @@ -366,7 +366,7 @@ static JSONObject * takeSnapshot(GC &gc, JSONFactory &factory, const char *file, int line) { std::string result(""); llvh::raw_string_ostream str(result); - gc.collect(); + gc.collect("snapshot"); gc.createSnapshot(str); str.flush(); @@ -804,6 +804,12 @@ struct ChromeStackTreeNode { ChromeStackTreeNode *parent, std::map &idNodeMap) { std::vector> res; + if (!parent) { + assert( + traceNodes.size() == 5 && + "Allocation trace should only have a" + "single root node"); + } for (size_t i = 0; i < traceNodes.size(); i += 5) { auto id = llvh::cast(traceNodes[i])->getValue(); auto functionInfoIndex = @@ -895,10 +901,11 @@ baz(); EXPECT_STREQ( fooStackStr.c_str(), R"#( -global(1) @ test.js(4):2:1 -global(2) @ test.js(4):11:4 -baz(7) @ test.js(4):9:19 -foo(8) @ test.js(4):3:20)#"); +(root)(0) @ (0):0:0 +global(1) @ test.js(1):2:1 +global(2) @ test.js(1):11:4 +baz(7) @ test.js(1):9:19 +foo(8) @ test.js(1):3:20)#"); auto barAllocNode = FIND_NODE_FOR_ID(barObjID, nodes, strings); auto barStackTreeNode = idNodeMap.find(barAllocNode.traceNodeID); @@ -908,10 +915,11 @@ foo(8) @ test.js(4):3:20)#"); ASSERT_STREQ( barStackStr.c_str(), R"#( -global(1) @ test.js(4):2:1 -global(2) @ test.js(4):11:4 -baz(3) @ test.js(4):9:31 -bar(4) @ test.js(4):6:20)#"); +(root)(0) @ (0):0:0 +global(1) @ test.js(1):2:1 +global(2) @ test.js(1):11:4 +baz(3) @ test.js(1):9:31 +bar(4) @ test.js(1):6:20)#"); } #endif // HERMES_ENABLE_DEBUGGER diff --git a/unittests/VMRuntime/HiddenClassTest.cpp b/unittests/VMRuntime/HiddenClassTest.cpp index adfe4135ad2..cd42eeaa348 100644 --- a/unittests/VMRuntime/HiddenClassTest.cpp +++ b/unittests/VMRuntime/HiddenClassTest.cpp @@ -23,7 +23,7 @@ using HiddenClassTest = LargeHeapRuntimeTestFixture; TEST_F(HiddenClassTest, SmokeTest) { GCScope gcScope{runtime, "HiddenClassTest.SmokeTest", 48}; - runtime->collect(); + runtime->collect("test"); auto aHnd = *runtime->getIdentifierTable().getSymbolHandle( runtime, createUTF16Ref(u"a")); @@ -253,7 +253,7 @@ TEST_F(HiddenClassTest, UpdatePropertyFlagsWithoutTransitionsTest) { GCScope gcScope{ runtime, "HiddenClassTest.UpdatePropertyFlagsWithoutTransitionsTest", 48}; - runtime->collect(); + runtime->collect("test"); auto aHnd = *runtime->getIdentifierTable().getSymbolHandle( runtime, createUTF16Ref(u"a")); diff --git a/unittests/VMRuntime/InstrumentationAPITest.cpp b/unittests/VMRuntime/InstrumentationAPITest.cpp index 1e935b6d141..a42312924e1 100644 --- a/unittests/VMRuntime/InstrumentationAPITest.cpp +++ b/unittests/VMRuntime/InstrumentationAPITest.cpp @@ -52,7 +52,7 @@ TEST(InstrumentationAPITest, RunCallbackWhenCollecting) { .build()) .build()); DummyRuntime &runtime = *rt; - runtime.gc.collect(); + runtime.collect(); EXPECT_TRUE(triggeredTripwire); } @@ -70,7 +70,7 @@ TEST(InstrumentationAPITest, DontRunCallbackWhenCollecting_underSizeLimit) { .build()) .build()); DummyRuntime &runtime = *rt; - runtime.gc.collect(); + runtime.collect(); EXPECT_FALSE(triggeredTripwire); } @@ -108,11 +108,11 @@ TEST(InstrumentationAPITest, RunCallbackAfterAllocatingMemoryOverLimit) { .build()) .build()); DummyRuntime &runtime = *rt; - runtime.gc.collect(); + runtime.collect(); EXPECT_FALSE(triggeredTripwire); GCCell *cell = Dummy::create(runtime); runtime.pointerRoots.push_back(&cell); - runtime.gc.collect(); + runtime.collect(); EXPECT_TRUE(triggeredTripwire); } @@ -130,11 +130,11 @@ TEST(InstrumentationAPITest, DontRunCallbackAfterAllocatingMemoryUnderLimit) { .build()) .build()); DummyRuntime &runtime = *rt; - runtime.gc.collect(); + runtime.collect(); EXPECT_FALSE(triggeredTripwire); GCCell *cell = Dummy::create(runtime); runtime.pointerRoots.push_back(&cell); - runtime.gc.collect(); + runtime.collect(); EXPECT_FALSE(triggeredTripwire); } diff --git a/unittests/VMRuntime/NativeFrameTest.cpp b/unittests/VMRuntime/NativeFrameTest.cpp index 817d0e0e386..a29807239cb 100644 --- a/unittests/VMRuntime/NativeFrameTest.cpp +++ b/unittests/VMRuntime/NativeFrameTest.cpp @@ -42,14 +42,14 @@ TEST_F(NativeFrameTest, PoisonedStackTest) { runtime, 0, nullptr, false, HermesValue::encodeUndefinedValue()}; ASSERT_FALSE(frame.overflowed()); // We should not die after this because there were no arguments. - runtime->collect(); + runtime->collect("test"); // Now make a frame with arguments. ScopedNativeCallFrame frame2{ runtime, 1, nullptr, false, HermesValue::encodeUndefinedValue()}; ASSERT_FALSE(frame2.overflowed()); // The frame should be poisoned; ensure we die after a GC. - EXPECT_DEATH(runtime->collect(), "Invalid"); + EXPECT_DEATH(runtime->collect("test"), "Invalid"); } #endif diff --git a/unittests/VMRuntime/SegmentedArrayTest.cpp b/unittests/VMRuntime/SegmentedArrayTest.cpp index a921c68da0d..0919a2871e7 100644 --- a/unittests/VMRuntime/SegmentedArrayTest.cpp +++ b/unittests/VMRuntime/SegmentedArrayTest.cpp @@ -76,7 +76,7 @@ TEST_F(SegmentedArrayTest, AllowTrimming) { // Now force some GCs to happen. for (auto i = 0; i < 2; i++) { - runtime->collect(); + runtime->collect("test"); } // The array should be trimmed. diff --git a/unittests/VMRuntime/StackTracesTreeTest.cpp b/unittests/VMRuntime/StackTracesTreeTest.cpp index 3d5d66d91d5..e2400f34ce7 100644 --- a/unittests/VMRuntime/StackTracesTreeTest.cpp +++ b/unittests/VMRuntime/StackTracesTreeTest.cpp @@ -20,7 +20,7 @@ namespace unittest { namespace stacktracestreetest { namespace { -struct StackTracesTreeTest : RuntimeTestFixtureBase { +struct StackTracesTreeTest : public RuntimeTestFixtureBase { explicit StackTracesTreeTest() : RuntimeTestFixtureBase( RuntimeConfig::Builder(kTestRTConfigBuilder) @@ -30,6 +30,9 @@ struct StackTracesTreeTest : RuntimeTestFixtureBase { .build()) .build()) {} + explicit StackTracesTreeTest(const RuntimeConfig &config) + : RuntimeTestFixtureBase(config) {} + ::testing::AssertionResult eval(const std::string &code) { hbc::CompileFlags flags; // Ideally none of this should require debug info, so let's ensure it @@ -78,7 +81,84 @@ struct StackTracesTreeTest : RuntimeTestFixtureBase { << trimmedRes.str().c_str()); }; }; -}; // namespace + +// Used to inject a no-op function into JS. +static CallResult noop(void *, Runtime *runtime, NativeArgs) { + return HermesValue::encodeUndefinedValue(); +} + +static CallResult +enableAllocationLocationTracker(void *, Runtime *runtime, NativeArgs) { + runtime->enableAllocationLocationTracker(); + return HermesValue::encodeUndefinedValue(); +} + +struct StackTracesTreeParameterizedTest + : public StackTracesTreeTest, + public ::testing::WithParamInterface { + StackTracesTreeParameterizedTest() + : StackTracesTreeTest( + RuntimeConfig::Builder(kTestRTConfigBuilder) + .withES6Proxy(true) + .withGCConfig(GCConfig::Builder(kTestGCConfigBuilder) + .withAllocationLocationTrackerFromStart( + trackerOnByDefault()) + .build()) + .build()) {} + + bool trackerOnByDefault() const { + // If GetParam() is true, then allocation tracking is enabled from the + // start. If GetParam() is false, then allocation tracking begins when + // enableAllocationLocationTracker is called. + return GetParam(); + } + + /// Delete the existing tree and reset all state related to allocations. + void resetTree() { + // Calling this should clear all existing StackTracesTree data. + runtime->disableAllocationLocationTracker(true); + ASSERT_FALSE(runtime->getStackTracesTree()); + // If the tracker was on by default, after cleaning it should be re-enabled, + // so the function doesn't need to be called. + if (trackerOnByDefault()) { + runtime->enableAllocationLocationTracker(); + } + } + + void SetUp() override { + // Add a JS function 'enableAllocationLocationTracker' + // The stack traces for objects allocated after the call to + // enableAllocationLocationTracker should be identical. + SymbolID enableAllocationLocationTrackerSym; + { + vm::GCScope gcScope(runtime); + enableAllocationLocationTrackerSym = + vm::stringToSymbolID( + runtime, + vm::StringPrimitive::createNoThrow( + runtime, "enableAllocationLocationTracker")) + ->getHermesValue() + .getSymbol(); + } + + ASSERT_FALSE(isException(JSObject::putNamed_RJS( + runtime->getGlobal(), + runtime, + enableAllocationLocationTrackerSym, + runtime->makeHandle( + *NativeFunction::createWithoutPrototype( + runtime, + nullptr, + trackerOnByDefault() ? noop : enableAllocationLocationTracker, + enableAllocationLocationTrackerSym, + 0))))); + } + + // No need for a tear-down, because the runtime destructor will clear all + // memory. +}; + +} // namespace static std::string stackTraceToJSON(StackTracesTree &tree) { auto &stringTable = *tree.getStringTable(); @@ -123,15 +203,15 @@ bar test.js:1:34 foo test.js:1:66 global test.js:1:75 global test.js:1:1 -(invalid function name) (invalid script name):-1:-1 +(root) :0:0 )#"); const auto expectedTree = llvh::StringRef(R"#( { - "name": "(invalid function name)", - "scriptName": "(invalid script name)", - "line": -1, - "col": -1, + "name": "(root)", + "scriptName": "", + "line": 0, + "col": 0, "children": [ { "name": "global", @@ -195,12 +275,29 @@ global test.js:1:1 stackTraceToJSON(*stackTracesTree).c_str(), expectedTree.str().c_str()); } -TEST_F(StackTracesTreeTest, TraceThroughNamedAnon) { +TEST_P(StackTracesTreeParameterizedTest, GlobalScopeAlloc) { + // Not only should the trace be correct but the stack trace should be + // popped back down to the root. This is implicitly checked by + // ASSERT_RUN_TRACE. + ASSERT_RUN_TRACE( + R"#( +enableAllocationLocationTracker(); +new Object(); +)#", + R"#( +global test.js:3:11 +global test.js:2:1 +(root) :0:0 + )#"); +} + +TEST_P(StackTracesTreeParameterizedTest, TraceThroughNamedAnon) { ASSERT_RUN_TRACE( R"#( function foo() { function bar() { var anonVar = function() { + enableAllocationLocationTracker(); return new Object(); } return anonVar(); @@ -210,74 +307,78 @@ function foo() { foo(); )#", R"#( -anonVar test.js:5:24 -bar test.js:7:19 -foo test.js:9:13 -global test.js:11:4 +anonVar test.js:6:24 +bar test.js:8:19 +foo test.js:10:13 +global test.js:12:4 global test.js:2:1 -(invalid function name) (invalid script name):-1:-1 +(root) :0:0 )#"); } -TEST_F(StackTracesTreeTest, TraceThroughAnon) { +TEST_P(StackTracesTreeParameterizedTest, TraceThroughAnon) { ASSERT_RUN_TRACE( R"#( function foo() { return (function() { + enableAllocationLocationTracker(); return new Object(); })(); } foo(); )#", R"#( -(anonymous) test.js:4:22 -foo test.js:5:5 -global test.js:7:4 +(anonymous) test.js:5:22 +foo test.js:6:5 +global test.js:8:4 global test.js:2:1 -(invalid function name) (invalid script name):-1:-1 +(root) :0:0 )#"); } -TEST_F(StackTracesTreeTest, TraceThroughAssignedFunction) { +TEST_P(StackTracesTreeParameterizedTest, TraceThroughAssignedFunction) { ASSERT_RUN_TRACE( R"#( function foo() { + enableAllocationLocationTracker(); return new Object(); } var bar = foo; bar(); )#", R"#( -foo test.js:3:20 -global test.js:6:4 +foo test.js:4:20 +global test.js:7:4 global test.js:2:1 -(invalid function name) (invalid script name):-1:-1 +(root) :0:0 )#"); } -TEST_F(StackTracesTreeTest, TraceThroughGetter) { +TEST_P(StackTracesTreeParameterizedTest, TraceThroughGetter) { ASSERT_RUN_TRACE( R"#( const obj = { get foo() { + enableAllocationLocationTracker(); return new Object(); } } obj.foo; )#", R"#( -get foo test.js:4:22 -global test.js:7:4 +get foo test.js:5:22 +global test.js:8:4 global test.js:2:1 -(invalid function name) (invalid script name):-1:-1 +(root) :0:0 )#"); } -TEST_F(StackTracesTreeTest, TraceThroughProxy) { +TEST_P(StackTracesTreeParameterizedTest, TraceThroughProxy) { ASSERT_RUN_TRACE( R"#( const handler = { get: function(obj, prop) { + enableAllocationLocationTracker(); return new Object(); } }; @@ -285,44 +386,54 @@ const p = new Proxy({}, handler); p.something; )#", R"#( -get test.js:4:22 -global test.js:8:2 +get test.js:5:22 +global test.js:9:2 global test.js:2:1 -(invalid function name) (invalid script name):-1:-1 +(root) :0:0 )#"); } -TEST_F(StackTracesTreeTest, TraceThroughEval) { +TEST_P(StackTracesTreeParameterizedTest, TraceThroughEval) { ASSERT_RUN_TRACE( R"#( -function returnit() { return new Object(); } +function returnit() { + enableAllocationLocationTracker(); + return new Object(); +} eval("returnit()"); )#", R"#( -returnit test.js:2:40 +returnit test.js:4:20 eval JavaScript:1:9 -global test.js:3:5 +global test.js:6:5 global test.js:2:1 -(invalid function name) (invalid script name):-1:-1 +(root) :0:0 )#"); } -TEST_F(StackTracesTreeTest, TraceThroughBoundFunctions) { - ASSERT_FALSE(eval("function foo() { return new Object(); }")); +TEST_P(StackTracesTreeParameterizedTest, TraceThroughBoundFunctions) { + ASSERT_FALSE(eval( + R"#( +function foo() { + enableAllocationLocationTracker(); + return new Object(); +})#")); ASSERT_RUN_TRACE("foo.bind(null)()", R"#( -foo eval.js:1:35 +foo eval.js:4:20 global test.js:1:15 global test.js:1:1 -(invalid function name) (invalid script name):-1:-1 +(root) :0:0 )#"); + resetTree(); ASSERT_RUN_TRACE("foo.bind(null).bind(null)()", R"#( -foo eval.js:1:35 +foo eval.js:4:20 global test.js:1:26 global test.js:1:1 -(invalid function name) (invalid script name):-1:-1 +(root) :0:0 )#"); + resetTree(); ASSERT_RUN_TRACE( R"#( @@ -331,6 +442,7 @@ function chain1() { } function chain2() { + enableAllocationLocationTracker(); return new Object(); } @@ -339,35 +451,40 @@ var chain2bound = chain2.bind(null); chain1.bind(null)(); )#", R"#( -chain2 test.js:7:20 +chain2 test.js:8:20 chain1 test.js:3:21 -global test.js:12:18 +global test.js:13:18 global test.js:2:1 -(invalid function name) (invalid script name):-1:-1 +(root) :0:0 )#"); + resetTree(); } -TEST_F(StackTracesTreeTest, TraceThroughNative) { +TEST_P(StackTracesTreeParameterizedTest, TraceThroughNative) { ASSERT_RUN_TRACE( R"#( -function foo(x) { return new Object(); } +function foo(x) { + enableAllocationLocationTracker(); + return new Object(); +} ([0].map(foo))[0]; )#", R"#( -foo test.js:2:36 -global test.js:3:9 +foo test.js:4:20 +global test.js:6:9 global test.js:2:1 -(invalid function name) (invalid script name):-1:-1 +(root) :0:0 )#"); } -TEST_F(StackTracesTreeTest, UnwindOnThrow) { +TEST_P(StackTracesTreeParameterizedTest, UnwindOnThrow) { // This relies on ASSERT_RUN_TRACE implicitly checking the stack is cleared ASSERT_RUN_TRACE( R"#( function foo() { try { function throws() { + enableAllocationLocationTracker(); throw new Error(); } ([0].map(throws.bind(null)))[0]; @@ -379,105 +496,91 @@ function foo() { foo(); )#", R"#( -throws test.js:5:22 -foo test.js:7:13 -global test.js:13:4 +throws test.js:6:22 +foo test.js:8:13 +global test.js:14:4 global test.js:2:1 -(invalid function name) (invalid script name):-1:-1 +(root) :0:0 )#"); -} + resetTree(); -static CallResult -enableAllocationLocationTacker(void *, Runtime *runtime, NativeArgs) { - runtime->enableAllocationLocationTracker(); - return HermesValue::encodeUndefinedValue(); -} - -TEST_F(StackTracesTreeTest, EnableMidwayThroughExecution) { - // Add a JS function 'enableAllocationLocationTracker' - SymbolID enableAllocationLocationTackerSym; - { - vm::GCScope gcScope(runtime); - enableAllocationLocationTackerSym = - vm::stringToSymbolID( - runtime, - vm::StringPrimitive::createNoThrow( - runtime, "enableAllocationLocationTracker")) - ->getHermesValue() - .getSymbol(); - } - - ASSERT_FALSE(isException(JSObject::putNamed_RJS( - runtime->getGlobal(), - runtime, - enableAllocationLocationTackerSym, - runtime->makeHandle( - *NativeFunction::createWithoutPrototype( - runtime, - nullptr, - enableAllocationLocationTacker, - Predefined::getSymbolID(Predefined::emptyString), - 0))))); - - // Calling this should clear all existing StackTracesTree data. - runtime->disableAllocationLocationTracker(true); - ASSERT_FALSE(runtime->getStackTracesTree()); - - // Not only should the trace be correct but the stack trace should be - // popped back down to the root. This is implicitly checked by - // ASSERT_RUN_TRACE. + // Test catching multiple blocks up. ASSERT_RUN_TRACE( R"#( -enableAllocationLocationTracker(); -new Object(); +function thrower() { + enableAllocationLocationTracker(); + throw new Error(); +} +function layerOne() { return thrower(); } +function layerTwo() { return layerOne(); } +function tryAlloc() { + try { + layerTwo(); + } catch (e) { + return e; + } +} +tryAlloc(); )#", R"#( -global test.js:3:11 +thrower test.js:4:18 +layerOne test.js:6:37 +layerTwo test.js:7:38 +tryAlloc test.js:10:13 +global test.js:15:9 global test.js:2:1 -(invalid function name) (invalid script name):-1:-1 +(root) :0:0 )#"); +} - runtime->disableAllocationLocationTracker(true); - ASSERT_TRUE(!runtime->getStackTracesTree()); - +TEST_P(StackTracesTreeParameterizedTest, MultipleNativeLayers) { + // Multiple map and bind layers. ASSERT_RUN_TRACE( R"#( function foo() { - return new Object(); -} -function bar() { enableAllocationLocationTracker(); - return foo(); + return new Object(); } -bar(); +([0].map(foo.bind(null)))[0]; )#", R"#( -foo test.js:3:20 -bar test.js:7:13 -global test.js:9:4 +foo test.js:4:20 +global test.js:6:9 global test.js:2:1 -(invalid function name) (invalid script name):-1:-1 - )#"); - - runtime->disableAllocationLocationTracker(true); - ASSERT_TRUE(!runtime->getStackTracesTree()); +(root) :0:0 + )#"); + resetTree(); + // Multiple Function.prototype.apply layers. ASSERT_RUN_TRACE( R"#( function foo() { enableAllocationLocationTracker(); return new Object(); } -([0].map(foo.bind(null)))[0]; +function secondLayerApply() { return foo.apply(null, []); } +function layered() { return secondLayerApply(); } +function fooApply() { return layered.apply(null, []); } +fooApply(); )#", R"#( foo test.js:4:20 -global test.js:6:9 +secondLayerApply test.js:6:47 +layered test.js:7:45 +fooApply test.js:8:43 +global test.js:9:9 global test.js:2:1 -(invalid function name) (invalid script name):-1:-1 +(root) :0:0 )#"); + resetTree(); } +// Test with the allocation location tracker on and off. +INSTANTIATE_TEST_CASE_P( + WithOrWithoutAllocationTracker, + StackTracesTreeParameterizedTest, + ::testing::Bool()); + TEST_F(StackTracesTreeTest, MultipleAllocationsMergeInTree) { ASSERT_FALSE(eval(R"#( @@ -497,10 +600,10 @@ function baz() { const auto expectedTree = llvh::StringRef(R"#( { - "name": "(invalid function name)", - "scriptName": "(invalid script name)", - "line": -1, - "col": -1, + "name": "(root)", + "scriptName": "", + "line": 0, + "col": 0, "children": [ { "name": "global", diff --git a/unittests/VMRuntime/TestHelpers.h b/unittests/VMRuntime/TestHelpers.h index 7de7610cf87..b1767c34483 100644 --- a/unittests/VMRuntime/TestHelpers.h +++ b/unittests/VMRuntime/TestHelpers.h @@ -311,6 +311,10 @@ struct DummyRuntime final : public HandleRootOwner, return gc; } + void collect() { + gc.collect("test"); + } + void markRoots(RootAcceptor &acceptor, bool) override; void markWeakRoots(WeakRootAcceptor &weakAcceptor) override; diff --git a/unittests/VMRuntime/WeakValueMapTest.cpp b/unittests/VMRuntime/WeakValueMapTest.cpp index 42915cfd6cc..03c97c6cc96 100644 --- a/unittests/VMRuntime/WeakValueMapTest.cpp +++ b/unittests/VMRuntime/WeakValueMapTest.cpp @@ -24,7 +24,7 @@ namespace { using WeakValueMapTest = LargeHeapRuntimeTestFixture; TEST_F(WeakValueMapTest, SmokeTest) { - runtime->collect(); + runtime->collect("test"); WeakValueMap wvp{}; @@ -82,7 +82,7 @@ TEST_F(WeakValueMapTest, SmokeTest) { // Make sure no temporary handles exist. gcScope.flushToMarker(marker); - runtime->collect(); + runtime->collect("test"); #ifndef HERMESVM_GC_HADES // Hades doesn't support DebugHeapInfo yet. GCBase::DebugHeapInfo debugInfo; @@ -99,7 +99,7 @@ TEST_F(WeakValueMapTest, SmokeTest) { // Make sure no temporary handles exist. gcScope.flushToMarker(marker); - runtime->collect(); + runtime->collect("test"); #ifndef HERMESVM_GC_HADES // Hades doesn't support debugInfo yet. runtime->getHeap().getDebugHeapInfo(debugInfo);