diff --git a/src/coreclr/System.Private.CoreLib/src/System/GC.CoreCLR.cs b/src/coreclr/System.Private.CoreLib/src/System/GC.CoreCLR.cs index 6ec67cbbab2e34..998dcc8340c7e7 100644 --- a/src/coreclr/System.Private.CoreLib/src/System/GC.CoreCLR.cs +++ b/src/coreclr/System.Private.CoreLib/src/System/GC.CoreCLR.cs @@ -303,6 +303,27 @@ public static int GetGeneration(WeakReference wo) // public static int MaxGeneration => GetMaxGeneration(); + [LibraryImport(RuntimeHelpers.QCall, EntryPoint = "GCInterface_GetNextFinalizableObject")] + private static unsafe partial void* GetNextFinalizeableObject(ObjectHandleOnStack target); + + private static unsafe uint RunFinalizers() + { + Thread currentThread = Thread.CurrentThread; + + uint count = 0; + while (true) + { + object? target = null; + void* fptr = GetNextFinalizeableObject(ObjectHandleOnStack.Create(ref target)); + if (fptr == null) + break; + ((delegate*)fptr)(target!); + currentThread.ResetFinalizerThread(); + count++; + } + return count; + } + [LibraryImport(RuntimeHelpers.QCall, EntryPoint = "GCInterface_WaitForPendingFinalizers")] private static partial void _WaitForPendingFinalizers(); diff --git a/src/coreclr/System.Private.CoreLib/src/System/Threading/Thread.CoreCLR.cs b/src/coreclr/System.Private.CoreLib/src/System/Threading/Thread.CoreCLR.cs index 3f85dab6d443a6..45a24e61b28901 100644 --- a/src/coreclr/System.Private.CoreLib/src/System/Threading/Thread.CoreCLR.cs +++ b/src/coreclr/System.Private.CoreLib/src/System/Threading/Thread.CoreCLR.cs @@ -361,14 +361,39 @@ internal static int OptimalMaxSpinWaitsPerSpinIteration } [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal void ResetThreadPoolThread() + internal void ResetFinalizerThread() { Debug.Assert(this == CurrentThread); - Debug.Assert(IsThreadPoolThread); if (_mayNeedResetForThreadPool) { - ResetThreadPoolThreadSlow(); + ResetFinalizerThreadSlow(); + } + } + + [MethodImpl(MethodImplOptions.NoInlining)] + private void ResetFinalizerThreadSlow() + { + Debug.Assert(this == CurrentThread); + Debug.Assert(_mayNeedResetForThreadPool); + + _mayNeedResetForThreadPool = false; + + const string FinalizerThreadName = ".NET Finalizer"; + + if (Name != FinalizerThreadName) + { + Name = FinalizerThreadName; + } + + if (!IsBackground) + { + IsBackground = true; + } + + if (Priority != ThreadPriority.Highest) + { + Priority = ThreadPriority.Highest; } } } diff --git a/src/coreclr/vm/amd64/CallDescrWorkerAMD64.asm b/src/coreclr/vm/amd64/CallDescrWorkerAMD64.asm index af0ca575b05661..e51353eb429889 100644 --- a/src/coreclr/vm/amd64/CallDescrWorkerAMD64.asm +++ b/src/coreclr/vm/amd64/CallDescrWorkerAMD64.asm @@ -6,32 +6,6 @@ include extern CallDescrWorkerUnwindFrameChainHandler:proc -;; -;; EXTERN_C void FastCallFinalizeWorker(Object *obj, PCODE funcPtr); -;; - NESTED_ENTRY FastCallFinalizeWorker, _TEXT, CallDescrWorkerUnwindFrameChainHandler - alloc_stack 28h ;; alloc callee scratch and align the stack - END_PROLOGUE - - ; - ; RCX: already contains obj* - ; RDX: address of finalizer method to call - ; - - ; !!!!!!!!! - ; NOTE: you cannot tail call here because we must have the CallDescrWorkerUnwindFrameChainHandler - ; personality routine on the stack. - ; !!!!!!!!! - call rdx - xor rax, rax - - ; epilog - add rsp, 28h - ret - - - NESTED_END FastCallFinalizeWorker, _TEXT - ;;extern "C" void CallDescrWorkerInternal(CallDescrData * pCallDescrData); NESTED_ENTRY CallDescrWorkerInternal, _TEXT, CallDescrWorkerUnwindFrameChainHandler diff --git a/src/coreclr/vm/amd64/calldescrworkeramd64.S b/src/coreclr/vm/amd64/calldescrworkeramd64.S index 329979c120a647..963134eb22be86 100644 --- a/src/coreclr/vm/amd64/calldescrworkeramd64.S +++ b/src/coreclr/vm/amd64/calldescrworkeramd64.S @@ -10,33 +10,6 @@ //extern CallDescrWorkerUnwindFrameChainHandler:proc -// -// EXTERN_C void FastCallFinalizeWorker(Object *obj, PCODE funcPtr); -// -NESTED_ENTRY FastCallFinalizeWorker, _TEXT, NoHandler - push_nonvol_reg rbp - mov rbp, rsp - END_PROLOGUE - - // - // RDI: already contains obj* - // RSI: address of finalizer method to call - // - - // !!!!!!!!! - // NOTE: you cannot tail call here because we must have the CallDescrWorkerUnwindFrameChainHandler - // personality routine on the stack. - // !!!!!!!!! - call rsi - xor rax, rax - - // epilog - pop_nonvol_reg rbp - ret - - -NESTED_END FastCallFinalizeWorker, _TEXT - //extern "C" void CallDescrWorkerInternal(CallDescrData * pCallDescrData); NESTED_ENTRY CallDescrWorkerInternal, _TEXT, NoHandler diff --git a/src/coreclr/vm/amd64/cgencpu.h b/src/coreclr/vm/amd64/cgencpu.h index b5c5ef3a8c159e..75d31ea1a4ad0e 100644 --- a/src/coreclr/vm/amd64/cgencpu.h +++ b/src/coreclr/vm/amd64/cgencpu.h @@ -35,7 +35,6 @@ class ComCallMethodDesc; // functions implemented in AMD64 assembly // EXTERN_C void SinglecastDelegateInvokeStub(); -EXTERN_C void FastCallFinalizeWorker(Object *obj, PCODE funcPtr); #define COMMETHOD_PREPAD 16 // # extra bytes to allocate in addition to sizeof(ComCallMethodDesc) #define COMMETHOD_CALL_PRESTUB_SIZE 6 // 32-bit indirect relative call diff --git a/src/coreclr/vm/callhelpers.cpp b/src/coreclr/vm/callhelpers.cpp index bc426b9c40b16b..3125c21ea3d13d 100644 --- a/src/coreclr/vm/callhelpers.cpp +++ b/src/coreclr/vm/callhelpers.cpp @@ -40,12 +40,8 @@ void AssertMulticoreJitAllowedModule(PCODE pTarget) // out of managed code. Instead, we rely on explicit cleanup like CLRException::HandlerState::CleanupTry // or UMThunkUnwindFrameChainHandler. // -// So most callers should call through CallDescrWorkerWithHandler (or a wrapper like MethodDesc::Call) -// and get the platform-appropriate exception handling. A few places try to optimize by calling direct -// to managed methods (see ArrayInitializeWorker or FastCallFinalize). This sort of thing is -// dangerous. You have to worry about marking yourself as a legal managed caller and you have to -// worry about how exceptions will be handled on a FEATURE_EH_FUNCLETS plan. It is generally only suitable -// for X86. +// So all callers should call through CallDescrWorkerWithHandler (or a wrapper like MethodDesc::Call) +// and get the platform-appropriate exception handling. //******************************************************************************* void CallDescrWorkerWithHandler( diff --git a/src/coreclr/vm/comutilnative.cpp b/src/coreclr/vm/comutilnative.cpp index dccd4393c893e3..883eff937eac4d 100644 --- a/src/coreclr/vm/comutilnative.cpp +++ b/src/coreclr/vm/comutilnative.cpp @@ -856,6 +856,31 @@ extern "C" void QCALLTYPE GCInterface_Collect(INT32 generation, INT32 mode) END_QCALL; } +extern "C" void* QCALLTYPE GCInterface_GetNextFinalizableObject(QCall::ObjectHandleOnStack pObj) +{ + QCALL_CONTRACT; + + PCODE funcPtr = 0; + + BEGIN_QCALL; + + GCX_COOP(); + + OBJECTREF target = FinalizerThread::GetNextFinalizableObject(); + + if (target != NULL) + { + pObj.Set(target); + + MethodTable* pMT = target->GetMethodTable(); + + funcPtr = pMT->GetRestoredSlot(g_pObjectFinalizerMD->GetSlot()); + } + + END_QCALL; + + return (void*)funcPtr; +} /*==========================WaitForPendingFinalizers============================ **Action: Run all Finalizers that haven't been run. diff --git a/src/coreclr/vm/comutilnative.h b/src/coreclr/vm/comutilnative.h index 4b83e3ea9dad4c..4a559fe68aa3d0 100644 --- a/src/coreclr/vm/comutilnative.h +++ b/src/coreclr/vm/comutilnative.h @@ -198,6 +198,8 @@ extern "C" INT64 QCALLTYPE GCInterface_GetTotalMemory(); extern "C" void QCALLTYPE GCInterface_Collect(INT32 generation, INT32 mode); +extern "C" void* QCALLTYPE GCInterface_GetNextFinalizableObject(QCall::ObjectHandleOnStack pObj); + extern "C" void QCALLTYPE GCInterface_WaitForPendingFinalizers(); #ifdef FEATURE_BASICFREEZE extern "C" void* QCALLTYPE GCInterface_RegisterFrozenSegment(void *pSection, SIZE_T sizeSection); diff --git a/src/coreclr/vm/corelib.h b/src/coreclr/vm/corelib.h index df42e52eab9527..a9e32f399a07a7 100644 --- a/src/coreclr/vm/corelib.h +++ b/src/coreclr/vm/corelib.h @@ -884,8 +884,7 @@ DEFINE_METHOD(VALUE_TYPE, EQUALS, Equals, DEFINE_CLASS(GC, System, GC) DEFINE_METHOD(GC, KEEP_ALIVE, KeepAlive, SM_Obj_RetVoid) -DEFINE_METHOD(GC, COLLECT, Collect, SM_RetVoid) -DEFINE_METHOD(GC, WAIT_FOR_PENDING_FINALIZERS, WaitForPendingFinalizers, SM_RetVoid) +DEFINE_METHOD(GC, RUN_FINALIZERS, RunFinalizers, SM_RetUInt) DEFINE_CLASS_U(System, WeakReference, WeakReferenceObject) DEFINE_FIELD_U(_taggedHandle, WeakReferenceObject, m_taggedHandle) diff --git a/src/coreclr/vm/exceptionhandling.cpp b/src/coreclr/vm/exceptionhandling.cpp index c7cc10a94de743..1967b9f835b73b 100644 --- a/src/coreclr/vm/exceptionhandling.cpp +++ b/src/coreclr/vm/exceptionhandling.cpp @@ -8518,8 +8518,8 @@ extern "C" bool QCALLTYPE SfiNext(StackFrameIterator* pThis, uint* uExCollideCla } else { - // TODO-NewEH: Currently there are two other cases of internal VM->managed transitions. The FastCallFinalize and COMToCLRDispatchHelperWithStack - // Either add handling those here as well or rewrite all these perf critical places in C#, so that CallDescrWorker is the only path that + // TODO-NewEH: Currently there is one case of internal VM->managed transitions: COMToCLRDispatchHelperWithStack + // Either add handling here as well or rewrite it in C#, so that CallDescrWorker is the only path that // needs to be handled here. size_t CallDescrWorkerInternalReturnAddress = (size_t)CallDescrWorkerInternal + CallDescrWorkerInternalReturnAddressOffset; if (GetIP(pThis->m_crawl.GetRegisterSet()->pCallerContext) == CallDescrWorkerInternalReturnAddress) diff --git a/src/coreclr/vm/finalizerthread.cpp b/src/coreclr/vm/finalizerthread.cpp index 9381d6e29676a6..4efa84d21d90c1 100644 --- a/src/coreclr/vm/finalizerthread.cpp +++ b/src/coreclr/vm/finalizerthread.cpp @@ -52,34 +52,108 @@ BOOL FinalizerThread::HaveExtraWorkForFinalizer() return GetFinalizerThread()->HaveExtraWorkForFinalizer(); } -void CallFinalizer(Object* obj) +static void CallFinalizerOnThreadObject(OBJECTREF obj) +{ + STATIC_CONTRACT_MODE_COOPERATIVE; + + THREADBASEREF refThis = (THREADBASEREF)obj; + Thread* thread = refThis->GetInternal(); + + // Prevent multiple calls to Finalize + // Objects can be resurrected after being finalized. However, there is no + // race condition here. We always check whether an exposed thread object is + // still attached to the internal Thread object, before proceeding. + if (thread) + { + refThis->ResetStartHelper(); + + // During process shutdown, we finalize even reachable objects. But if we break + // the link between the System.Thread and the internal Thread object, the runtime + // may not work correctly. In particular, we won't be able to transition between + // contexts and domains to finalize other objects. Since the runtime doesn't + // require that Threads finalize during shutdown, we need to disable this. If + // we wait until phase 2 of shutdown finalization (when the EE is suspended and + // will never resume) then we can simply skip the side effects of Thread + // finalization. + if ((g_fEEShutDown & ShutDown_Finalize2) == 0) + { + if (GetThreadNULLOk() != thread) + { + refThis->ClearInternal(); + } + + thread->SetThreadState(Thread::TS_Finalized); + Thread::SetCleanupNeededForFinalizedThread(); + } + } +} + +OBJECTREF FinalizerThread::GetNextFinalizableObject() { STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; STATIC_CONTRACT_MODE_COOPERATIVE; - MethodTable *pMT = obj->GetMethodTable(); - STRESS_LOG2(LF_GC, LL_INFO1000, "Finalizing object %p MT %pT\n", obj, pMT); - LOG((LF_GC, LL_INFO1000, "Finalizing " LOG_OBJECT_CLASS(obj))); +Again: + if (fQuitFinalizer) + return NULL; - _ASSERTE(GetThread()->PreemptiveGCDisabled()); + OBJECTREF obj = ObjectToOBJECTREF(GCHeapUtilities::GetGCHeap()->GetNextFinalizable()); + if (obj == NULL) + return NULL; - if (!((obj->GetHeader()->GetBits()) & BIT_SBLK_FINALIZER_RUN)) + MethodTable *pMT = obj->GetMethodTable(); + STRESS_LOG2(LF_GC, LL_INFO1000, "Finalizing object %p MT %pT\n", OBJECTREFToObject(obj), pMT); + LOG((LF_GC, LL_INFO1000, "Finalizing " LOG_OBJECT_CLASS(OBJECTREFToObject(obj)))); + + if ((obj->GetHeader()->GetBits()) & BIT_SBLK_FINALIZER_RUN) { - _ASSERTE(pMT->HasFinalizer()); + //reset the bit so the object can be put on the list + //with RegisterForFinalization + obj->GetHeader()->ClrBit (BIT_SBLK_FINALIZER_RUN); + goto Again; + } + + _ASSERTE(pMT->HasFinalizer()); #ifdef FEATURE_EVENT_TRACE - ETW::GCLog::SendFinalizeObjectEvent(pMT, obj); + ETW::GCLog::SendFinalizeObjectEvent(pMT, OBJECTREFToObject(obj)); #endif // FEATURE_EVENT_TRACE - MethodTable::CallFinalizer(obj); + // Check for precise init class constructors that have failed, if any have failed, then we didn't run the + // constructor for the object, and running the finalizer for the object would violate the CLI spec by running + // instance code without having successfully run the precise-init class constructor. + if (pMT->HasPreciseInitCctors()) + { + MethodTable *pMTCur = pMT; + do + { + if ((!pMTCur->GetClass()->IsBeforeFieldInit()) && pMTCur->IsInitError()) + { + // Precise init Type Initializer for type failed... do not run finalizer + goto Again; + } + + pMTCur = pMTCur->GetParentMethodTable(); + } + while (pMTCur != NULL); } - else + + if (pMT == g_pThreadClass) { - //reset the bit so the object can be put on the list - //with RegisterForFinalization - obj->GetHeader()->ClrBit (BIT_SBLK_FINALIZER_RUN); + // Finalizing Thread object requires ThreadStoreLock. It is expensive if + // we keep taking ThreadStoreLock. This is very bad if we have high retiring + // rate of Thread objects. + // To avoid taking ThreadStoreLock multiple times, we mark Thread with TS_Finalized + // and clean up a batch of them when we take ThreadStoreLock next time. + + // To avoid possible hierarchy requirement between critical finalizers, we call cleanup + // code directly. + CallFinalizerOnThreadObject(obj); + goto Again; } + + return obj; } void FinalizerThread::FinalizeAllObjects() @@ -90,28 +164,13 @@ void FinalizerThread::FinalizeAllObjects() FireEtwGCFinalizersBegin_V1(GetClrInstanceId()); - unsigned int fcount = 0; - - Object* fobj = GCHeapUtilities::GetGCHeap()->GetNextFinalizable(); + PREPARE_NONVIRTUAL_CALLSITE(METHOD__GC__RUN_FINALIZERS); + DECLARE_ARGHOLDER_ARRAY(args, 0); - Thread *pThread = GetThread(); - - // Finalize everyone - while (fobj && !fQuitFinalizer) - { - fcount++; + uint32_t count; + CALL_MANAGED_METHOD(count, uint32_t, args); - CallFinalizer(fobj); - - // thread abort could be injected by the debugger, - // but should not be allowed to "leak" out of expression evaluation - _ASSERTE(!GetFinalizerThread()->IsAbortRequested()); - - pThread->InternalReset(); - - fobj = GCHeapUtilities::GetGCHeap()->GetNextFinalizable(); - } - FireEtwGCFinalizersEnd_V1(fcount, GetClrInstanceId()); + FireEtwGCFinalizersEnd_V1(count, GetClrInstanceId()); } void FinalizerThread::WaitForFinalizerEvent (CLREvent *event) diff --git a/src/coreclr/vm/finalizerthread.h b/src/coreclr/vm/finalizerthread.h index 5beabd3dbd3103..5ea3cca02bebfe 100644 --- a/src/coreclr/vm/finalizerthread.h +++ b/src/coreclr/vm/finalizerthread.h @@ -48,6 +48,8 @@ class FinalizerThread static BOOL HaveExtraWorkForFinalizer(); + static OBJECTREF GetNextFinalizableObject(); + static void RaiseShutdownEvents() { WRAPPER_NO_CONTRACT; diff --git a/src/coreclr/vm/metasig.h b/src/coreclr/vm/metasig.h index 35e6472b4b8daf..073c843a02561f 100644 --- a/src/coreclr/vm/metasig.h +++ b/src/coreclr/vm/metasig.h @@ -215,6 +215,7 @@ DEFINE_METASIG(SM(Obj_ArrObject_RetVoid, j a(j), v)) DEFINE_METASIG(SM(Obj_IntPtr_Obj_RetVoid, j I j, v)) DEFINE_METASIG(SM(RetUIntPtr, _, U)) DEFINE_METASIG(SM(RetIntPtr, _, I)) +DEFINE_METASIG(SM(RetUInt, _, K)) DEFINE_METASIG(SM(RetBool, _, F)) DEFINE_METASIG(SM(IntPtr_RetStr, I, s)) DEFINE_METASIG(SM(IntPtr_RetBool, I, F)) diff --git a/src/coreclr/vm/methodtable.cpp b/src/coreclr/vm/methodtable.cpp index 59e791d96552ca..a3e9641bc73e1a 100644 --- a/src/coreclr/vm/methodtable.cpp +++ b/src/coreclr/vm/methodtable.cpp @@ -3955,186 +3955,6 @@ OBJECTREF MethodTable::FastBox(void** data) return ref; } -#if TARGET_X86 || TARGET_AMD64 -//========================================================================================== -static void FastCallFinalize(Object *obj, PCODE funcPtr, BOOL fCriticalCall) -{ - STATIC_CONTRACT_THROWS; - STATIC_CONTRACT_GC_TRIGGERS; - STATIC_CONTRACT_MODE_COOPERATIVE; - - BEGIN_CALL_TO_MANAGEDEX(fCriticalCall ? EEToManagedCriticalCall : EEToManagedDefault); - -#if defined(TARGET_X86) - -#ifdef TARGET_WINDOWS - __asm - { - mov ecx, [obj] - call [funcPtr] - INDEBUG(nop) // Mark the fact that we can call managed code - } -#else - __asm - ( - "mov %%ecx, %[obj]\n\t" - "call *%[funcPtr]\n\t" - INDEBUG("nop\n\t") - : - : [obj] "m" (obj), [funcPtr] "m" (funcPtr) - : "ecx" - ); -#endif - - -#else // TARGET_X86 - - FastCallFinalizeWorker(obj, funcPtr); - -#endif // TARGET_X86 - - END_CALL_TO_MANAGED(); -} - -#endif // TARGET_X86 || TARGET_AMD64 - -void CallFinalizerOnThreadObject(Object *obj) -{ - STATIC_CONTRACT_MODE_COOPERATIVE; - - THREADBASEREF refThis = (THREADBASEREF)ObjectToOBJECTREF(obj); - Thread* thread = refThis->GetInternal(); - - // Prevent multiple calls to Finalize - // Objects can be resurrected after being finalized. However, there is no - // race condition here. We always check whether an exposed thread object is - // still attached to the internal Thread object, before proceeding. - if (thread) - { - refThis->ResetStartHelper(); - - // During process shutdown, we finalize even reachable objects. But if we break - // the link between the System.Thread and the internal Thread object, the runtime - // may not work correctly. In particular, we won't be able to transition between - // contexts and domains to finalize other objects. Since the runtime doesn't - // require that Threads finalize during shutdown, we need to disable this. If - // we wait until phase 2 of shutdown finalization (when the EE is suspended and - // will never resume) then we can simply skip the side effects of Thread - // finalization. - if ((g_fEEShutDown & ShutDown_Finalize2) == 0) - { - if (GetThreadNULLOk() != thread) - { - refThis->ClearInternal(); - } - - thread->SetThreadState(Thread::TS_Finalized); - Thread::SetCleanupNeededForFinalizedThread(); - } - } -} - -//========================================================================================== -// From the GC finalizer thread, invoke the Finalize() method on an object. -void MethodTable::CallFinalizer(Object *obj) -{ - CONTRACTL - { - THROWS; - GC_TRIGGERS; - MODE_COOPERATIVE; - PRECONDITION(obj->GetMethodTable()->HasFinalizer()); - } - CONTRACTL_END; - - MethodTable *pMT = obj->GetMethodTable(); - - // Check for precise init class constructors that have failed, if any have failed, then we didn't run the - // constructor for the object, and running the finalizer for the object would violate the CLI spec by running - // instance code without having successfully run the precise-init class constructor. - if (pMT->HasPreciseInitCctors()) - { - MethodTable *pMTCur = pMT; - do - { - if ((!pMTCur->GetClass()->IsBeforeFieldInit()) && pMTCur->IsInitError()) - { - // Precise init Type Initializer for type failed... do not run finalizer - return; - } - - pMTCur = pMTCur->GetParentMethodTable(); - } - while (pMTCur != NULL); - } - - if (pMT == g_pThreadClass) - { - // Finalizing Thread object requires ThreadStoreLock. It is expensive if - // we keep taking ThreadStoreLock. This is very bad if we have high retiring - // rate of Thread objects. - // To avoid taking ThreadStoreLock multiple times, we mark Thread with TS_Finalized - // and clean up a batch of them when we take ThreadStoreLock next time. - - // To avoid possible hierarchy requirement between critical finalizers, we call cleanup - // code directly. - CallFinalizerOnThreadObject(obj); - return; - } - - - // Determine if the object has a critical or normal finalizer. - BOOL fCriticalFinalizer = pMT->HasCriticalFinalizer(); - - // There's no reason to actually set up a frame here. If we crawl out of the - // Finalize() method on this thread, we will see FRAME_TOP which indicates - // that the crawl should terminate. This is analogous to how KickOffThread() - // starts new threads in the runtime. - PCODE funcPtr = pMT->GetRestoredSlot(g_pObjectFinalizerMD->GetSlot()); - -#ifdef STRESS_LOG - if (fCriticalFinalizer) - { - STRESS_LOG1(LF_GCALLOC, LL_INFO100, "Finalizing CriticalFinalizer %pM\n", - pMT); - } -#endif - -#if defined(TARGET_X86) || defined(TARGET_AMD64) - -#ifdef DEBUGGING_SUPPORTED - if (CORDebuggerTraceCall()) - g_pDebugInterface->TraceCall((const BYTE *) funcPtr); -#endif // DEBUGGING_SUPPORTED - - FastCallFinalize(obj, funcPtr, fCriticalFinalizer); - -#else // defined(TARGET_X86) || defined(TARGET_AMD64) - - PREPARE_NONVIRTUAL_CALLSITE_USING_CODE(funcPtr); - - DECLARE_ARGHOLDER_ARRAY(args, 1); - - args[ARGNUM_0] = PTR_TO_ARGHOLDER(obj); - - if (fCriticalFinalizer) - { - CRITICAL_CALLSITE; - } - - CALL_MANAGED_METHOD_NORET(args); - -#endif // (defined(TARGET_X86) && defined(TARGET_AMD64) - -#ifdef STRESS_LOG - if (fCriticalFinalizer) - { - STRESS_LOG1(LF_GCALLOC, LL_INFO100, "Finalized CriticalFinalizer %pM without exception\n", - pMT); - } -#endif -} - //========================================================================== // If the MethodTable doesn't yet know the Exposed class that represents it via // Reflection, acquire that class now. Regardless, return it to the caller. @@ -8694,4 +8514,4 @@ void MethodTable::GetStaticsOffsets(StaticsOffsetType offsetType, bool fGenericS *dwNonGCOffset = (uint32_t)sizeof(TADDR) * 2; *dwGCOffset = (uint32_t)sizeof(TADDR) * 2; } -} \ No newline at end of file +} diff --git a/src/coreclr/vm/object.h b/src/coreclr/vm/object.h index c2bf72c1dee02b..412711f8a33bc8 100644 --- a/src/coreclr/vm/object.h +++ b/src/coreclr/vm/object.h @@ -1346,12 +1346,6 @@ class ThreadBaseObject : public Object m_StartHelper = NULL; } - void ResetName() - { - LIMITED_METHOD_CONTRACT; - m_Name = NULL; - } - void SetPriority(INT32 priority) { LIMITED_METHOD_CONTRACT; diff --git a/src/coreclr/vm/qcallentrypoints.cpp b/src/coreclr/vm/qcallentrypoints.cpp index 2c719586aed988..e76ecb82fdbd38 100644 --- a/src/coreclr/vm/qcallentrypoints.cpp +++ b/src/coreclr/vm/qcallentrypoints.cpp @@ -253,6 +253,7 @@ static const Entry s_QCall[] = DllImportEntry(GCInterface_GetTotalMemory) DllImportEntry(GCInterface_Collect) DllImportEntry(GCInterface_ReRegisterForFinalize) + DllImportEntry(GCInterface_GetNextFinalizableObject) DllImportEntry(GCInterface_WaitForPendingFinalizers) DllImportEntry(GCInterface_AddMemoryPressure) DllImportEntry(GCInterface_RemoveMemoryPressure) diff --git a/src/coreclr/vm/threads.cpp b/src/coreclr/vm/threads.cpp index 48cff3c9f84002..d9c52e8343fa55 100644 --- a/src/coreclr/vm/threads.cpp +++ b/src/coreclr/vm/threads.cpp @@ -7708,81 +7708,6 @@ UINT64 Thread::GetTotalCount(SIZE_T threadLocalCountOffset, UINT64 *overflowCoun return total; } -INT32 Thread::ResetManagedThreadObject(INT32 nPriority) -{ - CONTRACTL { - NOTHROW; - GC_TRIGGERS; - } - CONTRACTL_END; - - GCX_COOP(); - return ResetManagedThreadObjectInCoopMode(nPriority); -} - -INT32 Thread::ResetManagedThreadObjectInCoopMode(INT32 nPriority) -{ - CONTRACTL { - NOTHROW; - GC_NOTRIGGER; - MODE_COOPERATIVE; - } - CONTRACTL_END; - - THREADBASEREF pObject = (THREADBASEREF)ObjectFromHandle(m_ExposedObject); - if (pObject != NULL) - { - pObject->ResetName(); - nPriority = pObject->GetPriority(); - } - - return nPriority; -} - -void Thread::InternalReset(BOOL fNotFinalizerThread, BOOL fThreadObjectResetNeeded, BOOL fResetAbort) -{ - CONTRACTL { - NOTHROW; - if(!fNotFinalizerThread || fThreadObjectResetNeeded) {GC_TRIGGERS;} else {GC_NOTRIGGER;} - } - CONTRACTL_END; - - _ASSERTE (this == GetThread()); - - INT32 nPriority = ThreadNative::PRIORITY_NORMAL; - - if (!fNotFinalizerThread && this == FinalizerThread::GetFinalizerThread()) - { - nPriority = ThreadNative::PRIORITY_HIGHEST; - } - - if(fThreadObjectResetNeeded) - { - nPriority = ResetManagedThreadObject(nPriority); - } - - if (fResetAbort && IsAbortRequested()) { - UnmarkThreadForAbort(); - } - - if (IsThreadPoolThread() && fThreadObjectResetNeeded) - { - SetBackground(TRUE); - if (nPriority != ThreadNative::PRIORITY_NORMAL) - { - SetThreadPriority(THREAD_PRIORITY_NORMAL); - } - } - else if (!fNotFinalizerThread && this == FinalizerThread::GetFinalizerThread()) - { - SetBackground(TRUE); - if (nPriority != ThreadNative::PRIORITY_HIGHEST) - { - SetThreadPriority(THREAD_PRIORITY_HIGHEST); - } - } -} - DeadlockAwareLock::DeadlockAwareLock(const char *description) : m_pHoldingThread(NULL) #ifdef _DEBUG diff --git a/src/coreclr/vm/threads.h b/src/coreclr/vm/threads.h index 9d8f68fbddbeb7..04db79521817c7 100644 --- a/src/coreclr/vm/threads.h +++ b/src/coreclr/vm/threads.h @@ -671,10 +671,6 @@ class Thread // There are cases during managed debugging when we can run into this situation }; - void InternalReset (BOOL fNotFinalizerThread=FALSE, BOOL fThreadObjectResetNeeded=TRUE, BOOL fResetAbort=TRUE); - INT32 ResetManagedThreadObject(INT32 nPriority); - INT32 ResetManagedThreadObjectInCoopMode(INT32 nPriority); - public: HRESULT DetachThread(BOOL fDLLThreadDetach); diff --git a/src/libraries/System.Private.CoreLib/src/System/Threading/Thread.cs b/src/libraries/System.Private.CoreLib/src/System/Threading/Thread.cs index 3ef77076a01988..7af434b641a34c 100644 --- a/src/libraries/System.Private.CoreLib/src/System/Threading/Thread.cs +++ b/src/libraries/System.Private.CoreLib/src/System/Threading/Thread.cs @@ -404,7 +404,6 @@ internal void SetThreadPoolWorkerThreadName() } } -#if !CORECLR [MethodImpl(MethodImplOptions.AggressiveInlining)] internal void ResetThreadPoolThread() { @@ -416,7 +415,6 @@ internal void ResetThreadPoolThread() ResetThreadPoolThreadSlow(); } } -#endif [MethodImpl(MethodImplOptions.NoInlining)] private void ResetThreadPoolThreadSlow()