Skip to content

Commit

Permalink
Switch CoreCLR finalizer loop to C#
Browse files Browse the repository at this point in the history
  • Loading branch information
jkotas committed Jun 15, 2024
1 parent dd8d97e commit 3d5877f
Show file tree
Hide file tree
Showing 19 changed files with 174 additions and 354 deletions.
18 changes: 18 additions & 0 deletions src/coreclr/System.Private.CoreLib/src/System/GC.CoreCLR.cs
Original file line number Diff line number Diff line change
Expand Up @@ -303,6 +303,24 @@ public static int GetGeneration(WeakReference wo)
//
public static int MaxGeneration => GetMaxGeneration();

[LibraryImport(RuntimeHelpers.QCall, EntryPoint = "GCInterface_GetNextFinalizableObject")]
private static unsafe partial void* GetNextFinalizeableObject(ObjectHandleOnStack target);

private static unsafe uint RunFinalizers()
{
uint count = 0;
while (true)
{
object? target = null;
void* fptr = GetNextFinalizeableObject(ObjectHandleOnStack.Create(ref target));
if (fptr == null)
break;
((delegate*<object, void>)fptr)(target!);
count++;
}
return count;
}

[LibraryImport(RuntimeHelpers.QCall, EntryPoint = "GCInterface_WaitForPendingFinalizers")]
private static partial void _WaitForPendingFinalizers();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -361,14 +361,39 @@ internal static int OptimalMaxSpinWaitsPerSpinIteration
}

[MethodImpl(MethodImplOptions.AggressiveInlining)]
internal void ResetThreadPoolThread()
internal void ResetFinalizerThread()
{
Debug.Assert(this == CurrentThread);
Debug.Assert(IsThreadPoolThread);

if (_mayNeedResetForThreadPool)
{
ResetThreadPoolThreadSlow();
ResetFinalizerThreadSlow();
}
}

[MethodImpl(MethodImplOptions.NoInlining)]
private void ResetFinalizerThreadSlow()
{
Debug.Assert(this == CurrentThread);
Debug.Assert(_mayNeedResetForThreadPool);

_mayNeedResetForThreadPool = false;

const string FinalizerThreadName = ".NET Finalizer";

if (Name != FinalizerThreadName)
{
Name = FinalizerThreadName;
}

if (!IsBackground)
{
IsBackground = true;
}

if (Priority != ThreadPriority.Highest)
{
Priority = ThreadPriority.Highest;
}
}
}
Expand Down
26 changes: 0 additions & 26 deletions src/coreclr/vm/amd64/CallDescrWorkerAMD64.asm
Original file line number Diff line number Diff line change
Expand Up @@ -6,32 +6,6 @@ include <AsmConstants.inc>

extern CallDescrWorkerUnwindFrameChainHandler:proc

;;
;; EXTERN_C void FastCallFinalizeWorker(Object *obj, PCODE funcPtr);
;;
NESTED_ENTRY FastCallFinalizeWorker, _TEXT, CallDescrWorkerUnwindFrameChainHandler
alloc_stack 28h ;; alloc callee scratch and align the stack
END_PROLOGUE

;
; RCX: already contains obj*
; RDX: address of finalizer method to call
;

; !!!!!!!!!
; NOTE: you cannot tail call here because we must have the CallDescrWorkerUnwindFrameChainHandler
; personality routine on the stack.
; !!!!!!!!!
call rdx
xor rax, rax

; epilog
add rsp, 28h
ret


NESTED_END FastCallFinalizeWorker, _TEXT

;;extern "C" void CallDescrWorkerInternal(CallDescrData * pCallDescrData);

NESTED_ENTRY CallDescrWorkerInternal, _TEXT, CallDescrWorkerUnwindFrameChainHandler
Expand Down
27 changes: 0 additions & 27 deletions src/coreclr/vm/amd64/calldescrworkeramd64.S
Original file line number Diff line number Diff line change
Expand Up @@ -10,33 +10,6 @@

//extern CallDescrWorkerUnwindFrameChainHandler:proc

//
// EXTERN_C void FastCallFinalizeWorker(Object *obj, PCODE funcPtr);
//
NESTED_ENTRY FastCallFinalizeWorker, _TEXT, NoHandler
push_nonvol_reg rbp
mov rbp, rsp
END_PROLOGUE

//
// RDI: already contains obj*
// RSI: address of finalizer method to call
//

// !!!!!!!!!
// NOTE: you cannot tail call here because we must have the CallDescrWorkerUnwindFrameChainHandler
// personality routine on the stack.
// !!!!!!!!!
call rsi
xor rax, rax

// epilog
pop_nonvol_reg rbp
ret


NESTED_END FastCallFinalizeWorker, _TEXT

//extern "C" void CallDescrWorkerInternal(CallDescrData * pCallDescrData);

NESTED_ENTRY CallDescrWorkerInternal, _TEXT, NoHandler
Expand Down
1 change: 0 additions & 1 deletion src/coreclr/vm/amd64/cgencpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@ class ComCallMethodDesc;
// functions implemented in AMD64 assembly
//
EXTERN_C void SinglecastDelegateInvokeStub();
EXTERN_C void FastCallFinalizeWorker(Object *obj, PCODE funcPtr);

#define COMMETHOD_PREPAD 16 // # extra bytes to allocate in addition to sizeof(ComCallMethodDesc)
#define COMMETHOD_CALL_PRESTUB_SIZE 6 // 32-bit indirect relative call
Expand Down
8 changes: 2 additions & 6 deletions src/coreclr/vm/callhelpers.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,12 +40,8 @@ void AssertMulticoreJitAllowedModule(PCODE pTarget)
// out of managed code. Instead, we rely on explicit cleanup like CLRException::HandlerState::CleanupTry
// or UMThunkUnwindFrameChainHandler.
//
// So most callers should call through CallDescrWorkerWithHandler (or a wrapper like MethodDesc::Call)
// and get the platform-appropriate exception handling. A few places try to optimize by calling direct
// to managed methods (see ArrayInitializeWorker or FastCallFinalize). This sort of thing is
// dangerous. You have to worry about marking yourself as a legal managed caller and you have to
// worry about how exceptions will be handled on a FEATURE_EH_FUNCLETS plan. It is generally only suitable
// for X86.
// So all callers should call through CallDescrWorkerWithHandler (or a wrapper like MethodDesc::Call)
// and get the platform-appropriate exception handling.

//*******************************************************************************
void CallDescrWorkerWithHandler(
Expand Down
25 changes: 25 additions & 0 deletions src/coreclr/vm/comutilnative.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -856,6 +856,31 @@ extern "C" void QCALLTYPE GCInterface_Collect(INT32 generation, INT32 mode)
END_QCALL;
}

extern "C" void* QCALLTYPE GCInterface_GetNextFinalizableObject(QCall::ObjectHandleOnStack pObj)
{
QCALL_CONTRACT;

PCODE funcPtr = NULL;

BEGIN_QCALL;

GCX_COOP();

OBJECTREF target = FinalizerThread::GetNextFinalizableObject();

if (target != NULL)
{
pObj.Set(target);

MethodTable* pMT = target->GetMethodTable();

funcPtr = pMT->GetRestoredSlot(g_pObjectFinalizerMD->GetSlot());
}

END_QCALL;

return (void*)funcPtr;
}

/*==========================WaitForPendingFinalizers============================
**Action: Run all Finalizers that haven't been run.
Expand Down
2 changes: 2 additions & 0 deletions src/coreclr/vm/comutilnative.h
Original file line number Diff line number Diff line change
Expand Up @@ -198,6 +198,8 @@ extern "C" INT64 QCALLTYPE GCInterface_GetTotalMemory();

extern "C" void QCALLTYPE GCInterface_Collect(INT32 generation, INT32 mode);

extern "C" void* QCALLTYPE GCInterface_GetNextFinalizableObject(QCall::ObjectHandleOnStack pObj);

extern "C" void QCALLTYPE GCInterface_WaitForPendingFinalizers();
#ifdef FEATURE_BASICFREEZE
extern "C" void* QCALLTYPE GCInterface_RegisterFrozenSegment(void *pSection, SIZE_T sizeSection);
Expand Down
3 changes: 1 addition & 2 deletions src/coreclr/vm/corelib.h
Original file line number Diff line number Diff line change
Expand Up @@ -884,8 +884,7 @@ DEFINE_METHOD(VALUE_TYPE, EQUALS, Equals,

DEFINE_CLASS(GC, System, GC)
DEFINE_METHOD(GC, KEEP_ALIVE, KeepAlive, SM_Obj_RetVoid)
DEFINE_METHOD(GC, COLLECT, Collect, SM_RetVoid)
DEFINE_METHOD(GC, WAIT_FOR_PENDING_FINALIZERS, WaitForPendingFinalizers, SM_RetVoid)
DEFINE_METHOD(GC, RUN_FINALIZERS, RunFinalizers, SM_RetUInt)

DEFINE_CLASS_U(System, WeakReference, WeakReferenceObject)
DEFINE_FIELD_U(_taggedHandle, WeakReferenceObject, m_taggedHandle)
Expand Down
2 changes: 1 addition & 1 deletion src/coreclr/vm/exceptionhandling.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8518,7 +8518,7 @@ extern "C" bool QCALLTYPE SfiNext(StackFrameIterator* pThis, uint* uExCollideCla
}
else
{
// TODO-NewEH: Currently there are two other cases of internal VM->managed transitions. The FastCallFinalize and COMToCLRDispatchHelperWithStack
// TODO-NewEH: Currently there are two other cases of internal VM->managed transitions. The COMToCLRDispatchHelperWithStack
// Either add handling those here as well or rewrite all these perf critical places in C#, so that CallDescrWorker is the only path that
// needs to be handled here.
size_t CallDescrWorkerInternalReturnAddress = (size_t)CallDescrWorkerInternal + CallDescrWorkerInternalReturnAddressOffset;
Expand Down
125 changes: 92 additions & 33 deletions src/coreclr/vm/finalizerthread.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -52,34 +52,108 @@ BOOL FinalizerThread::HaveExtraWorkForFinalizer()
return GetFinalizerThread()->HaveExtraWorkForFinalizer();
}

void CallFinalizer(Object* obj)
static void CallFinalizerOnThreadObject(OBJECTREF obj)
{
STATIC_CONTRACT_MODE_COOPERATIVE;

THREADBASEREF refThis = (THREADBASEREF)obj;
Thread* thread = refThis->GetInternal();

// Prevent multiple calls to Finalize
// Objects can be resurrected after being finalized. However, there is no
// race condition here. We always check whether an exposed thread object is
// still attached to the internal Thread object, before proceeding.
if (thread)
{
refThis->ResetStartHelper();

// During process shutdown, we finalize even reachable objects. But if we break
// the link between the System.Thread and the internal Thread object, the runtime
// may not work correctly. In particular, we won't be able to transition between
// contexts and domains to finalize other objects. Since the runtime doesn't
// require that Threads finalize during shutdown, we need to disable this. If
// we wait until phase 2 of shutdown finalization (when the EE is suspended and
// will never resume) then we can simply skip the side effects of Thread
// finalization.
if ((g_fEEShutDown & ShutDown_Finalize2) == 0)
{
if (GetThreadNULLOk() != thread)
{
refThis->ClearInternal();
}

thread->SetThreadState(Thread::TS_Finalized);
Thread::SetCleanupNeededForFinalizedThread();
}
}
}

OBJECTREF FinalizerThread::GetNextFinalizableObject()
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_COOPERATIVE;

MethodTable *pMT = obj->GetMethodTable();
STRESS_LOG2(LF_GC, LL_INFO1000, "Finalizing object %p MT %pT\n", obj, pMT);
LOG((LF_GC, LL_INFO1000, "Finalizing " LOG_OBJECT_CLASS(obj)));
Again:
if (fQuitFinalizer)
return NULL;

_ASSERTE(GetThread()->PreemptiveGCDisabled());
OBJECTREF obj = ObjectToOBJECTREF(GCHeapUtilities::GetGCHeap()->GetNextFinalizable());
if (obj == NULL)
return NULL;

if (!((obj->GetHeader()->GetBits()) & BIT_SBLK_FINALIZER_RUN))
MethodTable *pMT = obj->GetMethodTable();
STRESS_LOG2(LF_GC, LL_INFO1000, "Finalizing object %p MT %pT\n", OBJECTREFToObject(obj), pMT);
LOG((LF_GC, LL_INFO1000, "Finalizing " LOG_OBJECT_CLASS(OBJECTREFToObject(obj))));

if ((obj->GetHeader()->GetBits()) & BIT_SBLK_FINALIZER_RUN)
{
_ASSERTE(pMT->HasFinalizer());
//reset the bit so the object can be put on the list
//with RegisterForFinalization
obj->GetHeader()->ClrBit (BIT_SBLK_FINALIZER_RUN);
goto Again;
}

_ASSERTE(pMT->HasFinalizer());

#ifdef FEATURE_EVENT_TRACE
ETW::GCLog::SendFinalizeObjectEvent(pMT, obj);
ETW::GCLog::SendFinalizeObjectEvent(pMT, OBJECTREFToObject(obj));
#endif // FEATURE_EVENT_TRACE

MethodTable::CallFinalizer(obj);
// Check for precise init class constructors that have failed, if any have failed, then we didn't run the
// constructor for the object, and running the finalizer for the object would violate the CLI spec by running
// instance code without having successfully run the precise-init class constructor.
if (pMT->HasPreciseInitCctors())
{
MethodTable *pMTCur = pMT;
do
{
if ((!pMTCur->GetClass()->IsBeforeFieldInit()) && pMTCur->IsInitError())
{
// Precise init Type Initializer for type failed... do not run finalizer
goto Again;
}

pMTCur = pMTCur->GetParentMethodTable();
}
while (pMTCur != NULL);
}
else

if (pMT == g_pThreadClass)
{
//reset the bit so the object can be put on the list
//with RegisterForFinalization
obj->GetHeader()->ClrBit (BIT_SBLK_FINALIZER_RUN);
// Finalizing Thread object requires ThreadStoreLock. It is expensive if
// we keep taking ThreadStoreLock. This is very bad if we have high retiring
// rate of Thread objects.
// To avoid taking ThreadStoreLock multiple times, we mark Thread with TS_Finalized
// and clean up a batch of them when we take ThreadStoreLock next time.

// To avoid possible hierarchy requirement between critical finalizers, we call cleanup
// code directly.
CallFinalizerOnThreadObject(obj);
goto Again;
}

return obj;
}

void FinalizerThread::FinalizeAllObjects()
Expand All @@ -90,28 +164,13 @@ void FinalizerThread::FinalizeAllObjects()

FireEtwGCFinalizersBegin_V1(GetClrInstanceId());

unsigned int fcount = 0;

Object* fobj = GCHeapUtilities::GetGCHeap()->GetNextFinalizable();
PREPARE_NONVIRTUAL_CALLSITE(METHOD__GC__RUN_FINALIZERS);
DECLARE_ARGHOLDER_ARRAY(args, 0);

Thread *pThread = GetThread();

// Finalize everyone
while (fobj && !fQuitFinalizer)
{
fcount++;
uint32_t count;
CALL_MANAGED_METHOD(count, uint32_t, args);

CallFinalizer(fobj);

// thread abort could be injected by the debugger,
// but should not be allowed to "leak" out of expression evaluation
_ASSERTE(!GetFinalizerThread()->IsAbortRequested());

pThread->InternalReset();

fobj = GCHeapUtilities::GetGCHeap()->GetNextFinalizable();
}
FireEtwGCFinalizersEnd_V1(fcount, GetClrInstanceId());
FireEtwGCFinalizersEnd_V1(count, GetClrInstanceId());
}

void FinalizerThread::WaitForFinalizerEvent (CLREvent *event)
Expand Down
2 changes: 2 additions & 0 deletions src/coreclr/vm/finalizerthread.h
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,8 @@ class FinalizerThread

static BOOL HaveExtraWorkForFinalizer();

static OBJECTREF GetNextFinalizableObject();

static void RaiseShutdownEvents()
{
WRAPPER_NO_CONTRACT;
Expand Down
1 change: 1 addition & 0 deletions src/coreclr/vm/metasig.h
Original file line number Diff line number Diff line change
Expand Up @@ -215,6 +215,7 @@ DEFINE_METASIG(SM(Obj_ArrObject_RetVoid, j a(j), v))
DEFINE_METASIG(SM(Obj_IntPtr_Obj_RetVoid, j I j, v))
DEFINE_METASIG(SM(RetUIntPtr, _, U))
DEFINE_METASIG(SM(RetIntPtr, _, I))
DEFINE_METASIG(SM(RetUInt, _, K))
DEFINE_METASIG(SM(RetBool, _, F))
DEFINE_METASIG(SM(IntPtr_RetStr, I, s))
DEFINE_METASIG(SM(IntPtr_RetBool, I, F))
Expand Down
Loading

0 comments on commit 3d5877f

Please sign in to comment.