This repository has been archived by the owner on Jan 23, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 2.7k
/
threads.h
6686 lines (5566 loc) · 223 KB
/
threads.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
// THREADS.H -
//
//
//
// Currently represents a logical and physical COM+ thread. Later, these concepts will be separated.
//
//
// #SuspendingTheRuntime
//
// One of the primary differences between runtime code (managed code), and traditional (unmanaged code) is
// the existence of the GC heap (see file:gc.cpp#Overview). For the GC to do its job, it must be able to
// traverse all references to the GC heap, including ones on the stack of every thread, as well as any in
// hardware registers. While it is simple to state this requirement, it has long reaching effects, because
// properly accounting for all GC heap references ALL the time turns out to be quite hard. When we make a
// bookkeeping mistake, a GC reference is not reported at GC time, which means it will not be updated when the
// GC happens. Since memory in the GC heap can move, this can cause the pointer to point at 'random' places
// in the GC heap, causing data corruption. This is a 'GC Hole', and is very bad. We have special modes (see
// code:EEConfig.GetGCStressLevel) called GCStress to help find such issues.
//
// In order to find all GC references on the stacks we need insure that no thread is manipulating a GC
// reference at the time of the scan. This is the job of code:Thread.SuspendRuntime. Logically it suspends
// every thread in the process. Unfortunately it can not literally simply call the OS SuspendThread API on
// all threads. The reason is that the other threads MIGHT hold important locks (for example there is a lock
// that is taken when unmanaged heap memory is requested, or when a DLL is loaded). In general process
// global structures in the OS will be protected by locks, and if you suspend a thread it might hold that
// lock. If you happen to need that OS service (eg you might need to allocated unmanaged memory), then
// deadlock will occur (as you wait on the suspended thread, that never wakes up).
//
// Luckily, we don't need to actually suspend the threads, we just need to insure that all GC references on
// the stack are stable. This is where the concept of cooperative mode and preemptive mode (a bad name) come
// from.
//
// #CooperativeMode
//
// The runtime keeps a table of all threads that have ever run managed code in the code:ThreadStore table.
// The ThreadStore table holds a list of Thread objects (see code:#ThreadClass). This object holds all
// infomation about managed threads. Cooperative mode is defined as the mode the thread is in when the field
// code:Thread.m_fPreemptiveGCDisabled is non-zero. When this field is zero the thread is said to be in
// Preemptive mode (named because if you preempt the thread in this mode, it is guaranteed to be in a place
// where a GC can occur).
//
// When a thread is in cooperative mode, it is basically saying that it is potentially modifying GC
// references, and so the runtime must Cooperate with it to get to a 'GC Safe' location where the GC
// references can be enumerated. This is the mode that a thread is in MOST times when it is running managed
// code (in fact if the EIP is in JIT compiled code, there is only one place where you are NOT in cooperative
// mode (Inlined PINVOKE transition code)). Conversely, any time non-runtime unmanaged code is running, the
// thread MUST NOT be in cooperative mode (you risk deadlock otherwise). Only code in mscorwks.dll might be
// running in either cooperative or preemptive mode.
//
// It is easier to describe the invariant associated with being in Preemptive mode. When the thread is in
// preemptive mode (when code:Thread.m_fPreemptiveGCDisabled is zero), the thread guarantees two things
//
// * That it not currently running code that manipulates GC references.
// * That it has set the code:Thread.m_pFrame pointer in the code:Thread to be a subclass of the class
// code:Frame which marks the location on the stack where the last managed method frame is. This
// allows the GC to start crawling the stack from there (essentially skip over the unmanaged frames).
// * That the thread will not reenter managed code if the global variable code:g_TrapReturningThreads is
// set (it will call code:Thread.RareDisablePreemptiveGC first which will block if a a suspension is
// in progress)
//
// The basic idea is that the suspension logic in code:Thread.SuspendRuntime first sets the global variable
// code:g_TrapReturningThreads and then checks if each thread in the ThreadStore is in Cooperative mode. If a
// thread is NOT in cooperative mode, the logic simply skips the thread, because it knows that the thread
// will stop itself before reentering managed code (because code:g_TrapReturningThreads is set). This avoids
// the deadlock problem mentioned earlier, because threads that are running unmanaged code are allowed to
// run. Enumeration of GC references starts at the first managed frame (pointed at by code:Thread.m_pFrame).
//
// When a thread is in cooperative mode, it means that GC references might be being manipulated. There are
// two important possibilities
//
// * The CPU is running JIT compiled code
// * The CPU is running code elsewhere (which should only be in mscorwks.dll, because everywhere else a
// transition to preemptive mode should have happened first)
//
// * #PartiallyInteruptibleCode
// * #FullyInteruptibleCode
//
// If the Instruction pointer (x86/x64: EIP, ARM: R15/PC) is in JIT compiled code, we can detect this because we have tables that
// map the ranges of every method back to their code:MethodDesc (this the code:ICodeManager interface). In
// addition to knowing the method, these tables also point at 'GCInfo' that tell for that method which stack
// locations and which registers hold GC references at any particular instruction pointer. If the method is
// what is called FullyInterruptible, then we have information for any possible instruction pointer in the
// method and we can simply stop the thread (however we have to do this carefully TODO explain).
//
// However for most methods, we only keep GC information for paticular EIP's, in particular we keep track of
// GC reference liveness only at call sites. Thus not every location is 'GC Safe' (that is we can enumerate
// all references, but must be 'driven' to a GC safe location).
//
// We drive threads to GC safe locations by hijacking. This is a term for updating the return address on the
// stack so that we gain control when a method returns. If we find that we are in JITTed code but NOT at a GC
// safe location, then we find the return address for the method and modfiy it to cause the runtime to stop.
// We then let the method run. Hopefully the method quickly returns, and hits our hijack, and we are now at a
// GC-safe location (all call sites are GC-safe). If not we repeat the procedure (possibly moving the
// hijack). At some point a method returns, and we get control. For methods that have loops that don't make
// calls, we are forced to make the method FullyInterruptible, so we can be sure to stop the mehod.
//
// This leaves only the case where we are in cooperative modes, but not in JIT compiled code (we should be in
// clr.dll). In this case we simply let the thread run. The idea is that code in clr.dll makes the
// promise that it will not do ANYTHING that will block (which includes taking a lock), while in cooperative
// mode, or do anything that might take a long time without polling to see if a GC is needed. Thus this code
// 'cooperates' to insure that GCs can happen in a timely fashion.
//
// If you need to switch the GC mode of the current thread, look for the GCX_COOP() and GCX_PREEMP() macros.
//
#ifndef __threads_h__
#define __threads_h__
#include "vars.hpp"
#include "util.hpp"
#include "eventstore.hpp"
#include "argslot.h"
#include "regdisp.h"
#include "mscoree.h"
#include "gcheaputilities.h"
#include "gchandleutilities.h"
#include "gcinfotypes.h"
#include <clrhost.h>
class Thread;
class ThreadStore;
class MethodDesc;
struct PendingSync;
class AppDomain;
class NDirect;
class Frame;
class ThreadBaseObject;
class AppDomainStack;
class LoadLevelLimiter;
class DomainFile;
class DeadlockAwareLock;
struct HelperMethodFrameCallerList;
class ThreadLocalIBCInfo;
class EECodeInfo;
class DebuggerPatchSkip;
class FaultingExceptionFrame;
enum BinderMethodID : int;
class CRWLock;
struct LockEntry;
class PendingTypeLoadHolder;
class PrepareCodeConfig;
class NativeCodeVersion;
struct ThreadLocalBlock;
typedef DPTR(struct ThreadLocalBlock) PTR_ThreadLocalBlock;
typedef DPTR(PTR_ThreadLocalBlock) PTR_PTR_ThreadLocalBlock;
typedef void(*ADCallBackFcnType)(LPVOID);
#include "stackwalktypes.h"
#include "log.h"
#include "stackingallocator.h"
#include "excep.h"
#include "synch.h"
#include "exstate.h"
#include "threaddebugblockinginfo.h"
#include "interoputil.h"
#include "eventtrace.h"
#ifdef FEATURE_PERFTRACING
class EventPipeBufferList;
#endif // FEATURE_PERFTRACING
struct TLMTableEntry;
typedef DPTR(struct TLMTableEntry) PTR_TLMTableEntry;
typedef DPTR(struct ThreadLocalModule) PTR_ThreadLocalModule;
class ThreadStaticHandleTable;
struct ThreadLocalModule;
class Module;
struct ThreadLocalBlock
{
friend class ClrDataAccess;
private:
PTR_TLMTableEntry m_pTLMTable; // Table of ThreadLocalModules
SIZE_T m_TLMTableSize; // Current size of table
SpinLock m_TLMTableLock; // Spinlock used to synchronize growing the table and freeing TLM by other threads
// Each ThreadLocalBlock has its own ThreadStaticHandleTable. The ThreadStaticHandleTable works
// by allocating Object arrays on the GC heap and keeping them alive with pinning handles.
//
// We use the ThreadStaticHandleTable to allocate space for GC thread statics. A GC thread
// static is thread static that is either a reference type or a value type whose layout
// contains a pointer to a reference type.
ThreadStaticHandleTable * m_pThreadStaticHandleTable;
// Need to keep a list of the pinning handles we've created
// so they can be cleaned up when the thread dies
ObjectHandleList m_PinningHandleList;
public:
#ifndef DACCESS_COMPILE
void AddPinningHandleToList(OBJECTHANDLE oh);
void FreePinningHandles();
void AllocateThreadStaticHandles(Module * pModule, ThreadLocalModule * pThreadLocalModule);
OBJECTHANDLE AllocateStaticFieldObjRefPtrs(int nRequested, OBJECTHANDLE* ppLazyAllocate = NULL);
void InitThreadStaticHandleTable();
void AllocateThreadStaticBoxes(MethodTable* pMT);
#endif
public: // used by code generators
static SIZE_T GetOffsetOfModuleSlotsPointer() { return offsetof(ThreadLocalBlock, m_pTLMTable); }
public:
#ifndef DACCESS_COMPILE
ThreadLocalBlock()
: m_pTLMTable(NULL), m_TLMTableSize(0), m_pThreadStaticHandleTable(NULL)
{
m_TLMTableLock.Init(LOCK_TYPE_DEFAULT);
}
void FreeTLM(SIZE_T i, BOOL isThreadShuttingDown);
void FreeTable();
void EnsureModuleIndex(ModuleIndex index);
#endif
void SetModuleSlot(ModuleIndex index, PTR_ThreadLocalModule pLocalModule);
PTR_ThreadLocalModule GetTLMIfExists(ModuleIndex index);
PTR_ThreadLocalModule GetTLMIfExists(MethodTable* pMT);
#ifdef DACCESS_COMPILE
void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
#endif
};
#ifdef CROSSGEN_COMPILE
#include "asmconstants.h"
class Thread
{
friend class ThreadStatics;
ThreadLocalBlock m_ThreadLocalBlock;
public:
BOOL IsAddressInStack (PTR_VOID addr) const { return TRUE; }
static BOOL IsAddressInCurrentStack (PTR_VOID addr) { return TRUE; }
StackingAllocator* m_stackLocalAllocator = NULL;
bool CheckCanUseStackAlloc() { return true; }
private:
LoadLevelLimiter *m_pLoadLimiter;
public:
LoadLevelLimiter *GetLoadLevelLimiter()
{
LIMITED_METHOD_CONTRACT;
return m_pLoadLimiter;
}
void SetLoadLevelLimiter(LoadLevelLimiter *limiter)
{
LIMITED_METHOD_CONTRACT;
m_pLoadLimiter = limiter;
}
PTR_Frame GetFrame() { return NULL; }
void SetFrame(Frame *pFrame) { }
DWORD CatchAtSafePoint() { return 0; }
DWORD CatchAtSafePointOpportunistic() { return 0; }
static void ObjectRefProtected(const OBJECTREF* ref) { }
static void ObjectRefNew(const OBJECTREF* ref) { }
void EnablePreemptiveGC() { }
void DisablePreemptiveGC() { }
inline void IncLockCount() { }
inline void DecLockCount() { }
static LPVOID GetStaticFieldAddress(FieldDesc *pFD) { return NULL; }
PTR_AppDomain GetDomain() { return ::GetAppDomain(); }
DWORD GetThreadId() { return 0; }
inline DWORD GetOverridesCount() { return 0; }
inline BOOL CheckThreadWideSpecialFlag(DWORD flags) { return 0; }
BOOL PreemptiveGCDisabled() { return false; }
void PulseGCMode() { }
OBJECTREF GetThrowable() { return NULL; }
OBJECTREF LastThrownObject() { return NULL; }
static BOOL Debug_AllowCallout() { return TRUE; }
static void IncForbidSuspendThread() { }
static void DecForbidSuspendThread() { }
typedef StateHolder<Thread::IncForbidSuspendThread, Thread::DecForbidSuspendThread> ForbidSuspendThreadHolder;
static BYTE GetOffsetOfCurrentFrame()
{
LIMITED_METHOD_CONTRACT;
size_t ofs = Thread_m_pFrame;
_ASSERTE(FitsInI1(ofs));
return (BYTE)ofs;
}
static BYTE GetOffsetOfGCFlag()
{
LIMITED_METHOD_CONTRACT;
size_t ofs = Thread_m_fPreemptiveGCDisabled;
_ASSERTE(FitsInI1(ofs));
return (BYTE)ofs;
}
void SetLoadingFile(DomainFile *pFile)
{
}
typedef Holder<Thread *, DoNothing, DoNothing> LoadingFileHolder;
enum ThreadState
{
};
BOOL HasThreadState(ThreadState ts)
{
LIMITED_METHOD_CONTRACT;
return ((DWORD)m_State & ts);
}
BOOL HasThreadStateOpportunistic(ThreadState ts)
{
LIMITED_METHOD_CONTRACT;
return m_State.LoadWithoutBarrier() & ts;
}
Volatile<ThreadState> m_State;
enum ThreadStateNoConcurrency
{
TSNC_OwnsSpinLock = 0x00000400, // The thread owns a spinlock.
TSNC_LoadsTypeViolation = 0x40000000, // Use by type loader to break deadlocks caused by type load level ordering violations
};
ThreadStateNoConcurrency m_StateNC;
void SetThreadStateNC(ThreadStateNoConcurrency tsnc)
{
LIMITED_METHOD_CONTRACT;
m_StateNC = (ThreadStateNoConcurrency)((DWORD)m_StateNC | tsnc);
}
void ResetThreadStateNC(ThreadStateNoConcurrency tsnc)
{
LIMITED_METHOD_CONTRACT;
m_StateNC = (ThreadStateNoConcurrency)((DWORD)m_StateNC & ~tsnc);
}
BOOL HasThreadStateNC(ThreadStateNoConcurrency tsnc)
{
LIMITED_METHOD_DAC_CONTRACT;
return ((DWORD)m_StateNC & tsnc);
}
PendingTypeLoadHolder* m_pPendingTypeLoad;
#ifndef DACCESS_COMPILE
PendingTypeLoadHolder* GetPendingTypeLoad()
{
LIMITED_METHOD_CONTRACT;
return m_pPendingTypeLoad;
}
void SetPendingTypeLoad(PendingTypeLoadHolder* pPendingTypeLoad)
{
LIMITED_METHOD_CONTRACT;
m_pPendingTypeLoad = pPendingTypeLoad;
}
#endif
void SetProfilerCallbackFullState(DWORD dwFullState)
{
LIMITED_METHOD_CONTRACT;
}
DWORD SetProfilerCallbackStateFlags(DWORD dwFlags)
{
LIMITED_METHOD_CONTRACT;
return dwFlags;
}
#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
enum ApartmentState { AS_Unknown };
#endif
DWORD m_dwLastError;
};
class AVInRuntimeImplOkayHolder
{
public:
AVInRuntimeImplOkayHolder()
{
LIMITED_METHOD_CONTRACT;
}
AVInRuntimeImplOkayHolder(Thread * pThread)
{
LIMITED_METHOD_CONTRACT;
}
~AVInRuntimeImplOkayHolder()
{
LIMITED_METHOD_CONTRACT;
}
};
inline BOOL dbgOnly_IsSpecialEEThread() { return FALSE; }
#define FORBIDGC_LOADER_USE_ENABLED() false
#define ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE() ;
#define BEGIN_FORBID_TYPELOAD()
#define END_FORBID_TYPELOAD()
#define TRIGGERS_TYPELOAD()
#define TRIGGERSGC() ANNOTATION_GC_TRIGGERS
inline void CommonTripThread() { }
class DeadlockAwareLock
{
public:
DeadlockAwareLock(const char *description = NULL) { }
~DeadlockAwareLock() { }
BOOL CanEnterLock() { return TRUE; }
BOOL TryBeginEnterLock() { return TRUE; }
void BeginEnterLock() { }
void EndEnterLock() { }
void LeaveLock() { }
public:
typedef StateHolder<DoNothing,DoNothing> BlockingLockHolder;
};
// Do not include threads.inl
#define _THREADS_INL
typedef Thread::ForbidSuspendThreadHolder ForbidSuspendThreadHolder;
#else // CROSSGEN_COMPILE
#if (defined(_TARGET_ARM_) && defined(FEATURE_EMULATE_SINGLESTEP))
#include "armsinglestepper.h"
#endif
#if (defined(_TARGET_ARM64_) && defined(FEATURE_EMULATE_SINGLESTEP))
#include "arm64singlestepper.h"
#endif
#if !defined(PLATFORM_SUPPORTS_SAFE_THREADSUSPEND)
// DISABLE_THREADSUSPEND controls whether Thread::SuspendThread will be used at all.
// This API is dangerous on non-Windows platforms, as it can lead to deadlocks,
// due to low level OS resources that the PAL is not aware of, or due to the fact that
// PAL-unaware code in the process may hold onto some OS resources.
#define DISABLE_THREADSUSPEND
#endif
// NT thread priorities range from -15 to +15.
#define INVALID_THREAD_PRIORITY ((DWORD)0x80000000)
// For a fiber which switched out, we set its OSID to a special number
// Note: there's a copy of this macro in strike.cpp
#define SWITCHED_OUT_FIBER_OSID 0xbaadf00d;
#ifdef _DEBUG
// A thread doesn't recieve its id until fully constructed.
#define UNINITIALIZED_THREADID 0xbaadf00d
#endif //_DEBUG
// Capture all the synchronization requests, for debugging purposes
#if defined(_DEBUG) && defined(TRACK_SYNC)
// Each thread has a stack that tracks all enter and leave requests
struct Dbg_TrackSync
{
virtual ~Dbg_TrackSync() = default;
virtual void EnterSync (UINT_PTR caller, void *pAwareLock) = 0;
virtual void LeaveSync (UINT_PTR caller, void *pAwareLock) = 0;
};
EXTERN_C void EnterSyncHelper (UINT_PTR caller, void *pAwareLock);
EXTERN_C void LeaveSyncHelper (UINT_PTR caller, void *pAwareLock);
#endif // TRACK_SYNC
//***************************************************************************
#ifdef FEATURE_HIJACK
// Used to capture information about the state of execution of a *SUSPENDED* thread.
struct ExecutionState;
#ifndef PLATFORM_UNIX
// This is the type of the start function of a redirected thread pulled from
// a HandledJITCase during runtime suspension
typedef void (__stdcall *PFN_REDIRECTTARGET)();
// Describes the weird argument sets during hijacking
struct HijackArgs;
#endif // !PLATFORM_UNIX
#endif // FEATURE_HIJACK
//***************************************************************************
#ifdef ENABLE_CONTRACTS_IMPL
inline Thread* GetThreadNULLOk()
{
LIMITED_METHOD_CONTRACT;
Thread * pThread;
BEGIN_GETTHREAD_ALLOWED_IN_NO_THROW_REGION;
pThread = GetThread();
END_GETTHREAD_ALLOWED_IN_NO_THROW_REGION;
return pThread;
}
#else
#define GetThreadNULLOk() GetThread()
#endif
// manifest constant for waiting in the exposed classlibs
const INT32 INFINITE_TIMEOUT = -1;
/***************************************************************************/
// Public enum shared between thread and threadpool
// These are two kinds of threadpool thread that the threadpool mgr needs
// to keep track of
enum ThreadpoolThreadType
{
WorkerThread,
CompletionPortThread,
WaitThread,
TimerMgrThread
};
//***************************************************************************
// Public functions
//
// Thread* GetThread() - returns current Thread
// Thread* SetupThread() - creates new Thread.
// Thread* SetupUnstartedThread() - creates new unstarted Thread which
// (obviously) isn't in a TLS.
// void DestroyThread() - the underlying logical thread is going
// away.
// void DetachThread() - the underlying logical thread is going
// away but we don't want to destroy it yet.
//
// Public functions for ASM code generators
//
// Thread* __stdcall CreateThreadBlockThrow() - creates new Thread on reverse p-invoke
//
// Public functions for one-time init/cleanup
//
// void InitThreadManager() - onetime init
// void TerminateThreadManager() - onetime cleanup
//
// Public functions for taking control of a thread at a safe point
//
// VOID OnHijackTripThread() - we've hijacked a JIT method
// VOID OnHijackFPTripThread() - we've hijacked a JIT method,
// and need to save the x87 FP stack.
//
//***************************************************************************
//***************************************************************************
// Public functions
//***************************************************************************
//---------------------------------------------------------------------------
//
//---------------------------------------------------------------------------
Thread* SetupThread(BOOL fInternal);
inline Thread* SetupThread()
{
WRAPPER_NO_CONTRACT;
return SetupThread(FALSE);
}
// A host can deny a thread entering runtime by returning a NULL IHostTask.
// But we do want threads used by threadpool.
inline Thread* SetupInternalThread()
{
WRAPPER_NO_CONTRACT;
return SetupThread(TRUE);
}
Thread* SetupThreadNoThrow(HRESULT *phresult = NULL);
// WARNING : only GC calls this with bRequiresTSL set to FALSE.
Thread* SetupUnstartedThread(BOOL bRequiresTSL=TRUE);
void DestroyThread(Thread *th);
DWORD GetRuntimeId();
EXTERN_C Thread* WINAPI CreateThreadBlockThrow();
//---------------------------------------------------------------------------
// One-time initialization. Called during Dll initialization.
//---------------------------------------------------------------------------
void InitThreadManager();
// When we want to take control of a thread at a safe point, the thread will
// eventually come back to us in one of the following trip functions:
#ifdef FEATURE_HIJACK
EXTERN_C void WINAPI OnHijackTripThread();
#ifdef _TARGET_X86_
EXTERN_C void WINAPI OnHijackFPTripThread(); // hijacked JIT code is returning an FP value
#endif // _TARGET_X86_
#endif // FEATURE_HIJACK
void CommonTripThread();
// When we resume a thread at a new location, to get an exception thrown, we have to
// pretend the exception originated elsewhere.
EXTERN_C void ThrowControlForThread(
#ifdef FEATURE_EH_FUNCLETS
FaultingExceptionFrame *pfef
#endif // FEATURE_EH_FUNCLETS
);
// RWLock state inside TLS
struct LockEntry
{
LockEntry *pNext; // next entry
LockEntry *pPrev; // prev entry
LONG dwULockID;
LONG dwLLockID; // owning lock
WORD wReaderLevel; // reader nesting level
};
#if defined(_DEBUG)
BOOL MatchThreadHandleToOsId ( HANDLE h, DWORD osId );
#endif
#ifdef FEATURE_COMINTEROP
#define RCW_STACK_SIZE 64
class RCWStack
{
public:
inline RCWStack()
{
LIMITED_METHOD_CONTRACT;
memset(this, 0, sizeof(RCWStack));
}
inline VOID SetEntry(unsigned int index, RCW* pRCW)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
PRECONDITION(index < RCW_STACK_SIZE);
PRECONDITION(CheckPointer(pRCW, NULL_OK));
}
CONTRACTL_END;
m_pList[index] = pRCW;
}
inline RCW* GetEntry(unsigned int index)
{
CONTRACT (RCW*)
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
PRECONDITION(index < RCW_STACK_SIZE);
}
CONTRACT_END;
RETURN m_pList[index];
}
inline VOID SetNextStack(RCWStack* pStack)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
PRECONDITION(CheckPointer(pStack));
PRECONDITION(m_pNext == NULL);
}
CONTRACTL_END;
m_pNext = pStack;
}
inline RCWStack* GetNextStack()
{
CONTRACT (RCWStack*)
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
}
CONTRACT_END;
RETURN m_pNext;
}
private:
RCWStack* m_pNext;
RCW* m_pList[RCW_STACK_SIZE];
};
class RCWStackHeader
{
public:
RCWStackHeader()
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
m_iIndex = 0;
m_iSize = RCW_STACK_SIZE;
m_pHead = new RCWStack();
}
~RCWStackHeader()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
RCWStack* pStack = m_pHead;
RCWStack* pNextStack = NULL;
while (pStack)
{
pNextStack = pStack->GetNextStack();
delete pStack;
pStack = pNextStack;
}
}
bool Push(RCW* pRCW)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
PRECONDITION(CheckPointer(pRCW, NULL_OK));
}
CONTRACTL_END;
if (!GrowListIfNeeded())
return false;
// Fast Path
if (m_iIndex < RCW_STACK_SIZE)
{
m_pHead->SetEntry(m_iIndex, pRCW);
m_iIndex++;
return true;
}
// Slow Path
unsigned int count = m_iIndex;
RCWStack* pStack = m_pHead;
while (count >= RCW_STACK_SIZE)
{
pStack = pStack->GetNextStack();
_ASSERTE(pStack);
count -= RCW_STACK_SIZE;
}
pStack->SetEntry(count, pRCW);
m_iIndex++;
return true;
}
RCW* Pop()
{
CONTRACT (RCW*)
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
PRECONDITION(m_iIndex > 0);
POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
}
CONTRACT_END;
RCW* pRCW = NULL;
m_iIndex--;
// Fast Path
if (m_iIndex < RCW_STACK_SIZE)
{
pRCW = m_pHead->GetEntry(m_iIndex);
m_pHead->SetEntry(m_iIndex, NULL);
RETURN pRCW;
}
// Slow Path
unsigned int count = m_iIndex;
RCWStack* pStack = m_pHead;
while (count >= RCW_STACK_SIZE)
{
pStack = pStack->GetNextStack();
_ASSERTE(pStack);
count -= RCW_STACK_SIZE;
}
pRCW = pStack->GetEntry(count);
pStack->SetEntry(count, NULL);
RETURN pRCW;
}
BOOL IsInStack(RCW* pRCW)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
PRECONDITION(CheckPointer(pRCW));
}
CONTRACTL_END;
if (m_iIndex == 0)
return FALSE;
// Fast Path
if (m_iIndex <= RCW_STACK_SIZE)
{
for (int i = 0; i < (int)m_iIndex; i++)
{
if (pRCW == m_pHead->GetEntry(i))
return TRUE;
}
return FALSE;
}
// Slow Path
RCWStack* pStack = m_pHead;
int totalcount = 0;
while (pStack != NULL)
{
for (int i = 0; (i < RCW_STACK_SIZE) && (totalcount < m_iIndex); i++, totalcount++)
{
if (pRCW == pStack->GetEntry(i))
return TRUE;
}
pStack = pStack->GetNextStack();
}
return FALSE;
}
private:
bool GrowListIfNeeded()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
INJECT_FAULT(COMPlusThrowOM());
PRECONDITION(CheckPointer(m_pHead));
}
CONTRACTL_END;
if (m_iIndex == m_iSize)
{
RCWStack* pStack = m_pHead;
RCWStack* pNextStack = NULL;
while ( (pNextStack = pStack->GetNextStack()) != NULL)
pStack = pNextStack;
RCWStack* pNewStack = new (nothrow) RCWStack();
if (NULL == pNewStack)
return false;
pStack->SetNextStack(pNewStack);
m_iSize += RCW_STACK_SIZE;
}
return true;
}
// Zero-based index to the first free element in the list.
int m_iIndex;
// Total size of the list, including all stacks.
int m_iSize;
// Pointer to the first stack.
RCWStack* m_pHead;
};
#endif // FEATURE_COMINTEROP
typedef DWORD (*AppropriateWaitFunc) (void *args, DWORD timeout, DWORD option);
// The Thread class represents a managed thread. This thread could be internal
// or external (i.e. it wandered in from outside the runtime). For internal
// threads, it could correspond to an exposed System.Thread object or it
// could correspond to an internal worker thread of the runtime.
//
// If there's a physical Win32 thread underneath this object (i.e. it isn't an
// unstarted System.Thread), then this instance can be found in the TLS
// of that physical thread.
// FEATURE_MULTIREG_RETURN is set for platforms where a struct return value
// [GcInfo v2 only] can be returned in multiple registers
// ex: Windows/Unix ARM/ARM64, Unix-AMD64.
//
//
// UNIX_AMD64_ABI is a specific kind of FEATURE_MULTIREG_RETURN
// [GcInfo v1 and v2] specified by SystemV ABI for AMD64
//
#ifdef FEATURE_HIJACK // Hijack function returning
EXTERN_C void STDCALL OnHijackWorker(HijackArgs * pArgs);
#endif // FEATURE_HIJACK
// This is the code we pass around for Thread.Interrupt, mainly for assertions
#define APC_Code 0xEECEECEE
#ifdef DACCESS_COMPILE
class BaseStackGuard;
#endif
// #ThreadClass
//
// A code:Thread contains all the per-thread information needed by the runtime. You can get at this
// structure throught the and OS TLS slot see code:#RuntimeThreadLocals for more
// Implementing IUnknown would prevent the field (e.g. m_Context) layout from being rearranged (which will need to be fixed in
// "asmconstants.h" for the respective architecture). As it is, ICLRTask derives from IUnknown and would have got IUnknown implemented
// here - so doing this explicitly and maintaining layout sanity should be just fine.
class Thread
{
friend struct ThreadQueue; // used to enqueue & dequeue threads onto SyncBlocks
friend class ThreadStore;
friend class ThreadSuspend;
friend class SyncBlock;
friend struct PendingSync;
friend class AppDomain;
friend class ThreadNative;
friend class DeadlockAwareLock;
#ifdef _DEBUG
friend class EEContract;
#endif
#ifdef DACCESS_COMPILE
friend class ClrDataAccess;
friend class ClrDataTask;
#endif
friend BOOL NTGetThreadContext(Thread *pThread, T_CONTEXT *pContext);
friend BOOL NTSetThreadContext(Thread *pThread, const T_CONTEXT *pContext);