-
Notifications
You must be signed in to change notification settings - Fork 6
/
Copy pathdebug.h
237 lines (186 loc) · 4.57 KB
/
debug.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
#if BONSAI_DEBUG_SYSTEM_API
#define DEFAULT_DEBUG_LIB "./bin/lib_debug_system_loadable" PLATFORM_RUNTIME_LIB_EXTENSION
inline umm
HashArenaBlock(memory_arena *Arena)
{
umm Result = (umm)Arena;
return Result;
}
inline umm
HashArena(memory_arena *Arena)
{
umm Result = (umm)Arena->Start;
return Result;
}
#if 0
// TODO(Jesse, id: 161, tags: back_burner, debug_recording): Reinstate this!
enum debug_recording_mode
{
RecordingMode_Clear,
RecordingMode_Record,
RecordingMode_Playback,
RecordingMode_Count,
};
#define DEBUG_RECORD_INPUT_SIZE 3600
struct debug_recording_state
{
s32 FramesRecorded;
s32 FramesPlayedBack;
debug_recording_mode Mode;
memory_arena RecordedMainMemory;
hotkeys Inputs[DEBUG_RECORD_INPUT_SIZE];
};
#endif
struct memory_record;
typedef b32 (*meta_comparator)(memory_record*, memory_record*);
struct called_function
{
const char* Name;
u32 CallCount;
};
struct debug_draw_call
{
const char * Caller;
u32 N;
u32 Calls;
};
struct min_max_avg_dt
{
r32 Min;
r32 Max;
r32 Avg;
};
struct memory_arena_stats
{
u64 Allocations;
u64 Pushes;
u64 TotalAllocated;
u64 Remaining;
};
poof(are_equal(memory_arena_stats))
#include <generated/are_equal_memory_arena_stats.h>
struct debug_profile_scope;
struct debug_scope_tree;
enum debug_context_switch_type
{
ContextSwitch_Undefined,
ContextSwitch_On,
ContextSwitch_Off
};
struct context_switch_event;
struct debug_context_switch_event
{
debug_context_switch_type Type;
u32 ProcessorNumber;
u64 CycleCount;
/* context_switch_event *SystemEvent; */
};
#define MAX_CONTEXT_SWITCH_EVENTS ((u32)Kilobytes(1))
struct debug_context_switch_event_buffer
{
u32 At;
u32 End; // one-past-last
debug_context_switch_event *Events;
};
// TODO(Jesse): Metaprogram this now that we have more flexible parameters
struct debug_context_switch_event_buffer_stream_block
{
umm MinCycles;
umm MaxCycles;
debug_context_switch_event_buffer Buffer;
debug_context_switch_event_buffer_stream_block *Next;
};
struct debug_context_switch_event_buffer_stream
{
debug_context_switch_event_buffer_stream_block *FirstBlock;
debug_context_switch_event_buffer_stream_block *CurrentBlock;
debug_context_switch_event_buffer_stream_block *FirstFreeBlock;
};
template <typename T> b32 BufferHasRoomFor(T *Buffer, u32 VertsToPush);
link_internal debug_context_switch_event*
GetLatest(debug_context_switch_event_buffer *Buf)
{
debug_context_switch_event *Result = {};
if (Buf->At) { Result = Buf->Events + (Buf->At-1); }
return Result;
}
link_internal debug_context_switch_event*
GetLatest(debug_context_switch_event_buffer_stream_block *Block)
{
debug_context_switch_event *Result = GetLatest(&Block->Buffer);
return Result;
}
link_internal debug_context_switch_event*
GetLatest(debug_context_switch_event_buffer_stream *Stream)
{
debug_context_switch_event *Result = GetLatest(Stream->CurrentBlock);
return Result;
}
struct debug_thread_state
{
memory_arena *Memory;
memory_arena *MemoryFor_debug_profile_scope; // Specifically for allocationg debug_profile_scope structs
memory_record *MetaTable;
debug_scope_tree *ScopeTrees;
debug_profile_scope *FirstFreeScope;
mutex_op_array *MutexOps;
// Note(Jesse): This must not straddle a cache line;
// on x86, reads are defined to be atomic of they do not straddle a cache line
// multiple threads read from the main threads copy of this
//
// TODO(Jesse): What about other chips? Should we just use atomic read instructions?
volatile u32 WriteIndex;
u32 ThreadId;
debug_context_switch_event_buffer_stream *ContextSwitches;
// TODO(Jesse): Make a 32-bit define instead
#if EMCC
u8 Pad[36];
#else
/* u8 Pad[12]; */
#endif
};
CAssert(sizeof(debug_thread_state) == CACHE_LINE_SIZE);
struct unique_debug_profile_scope
{
const char* Name;
u32 CallCount;
u64 TotalCycles;
u64 MinCycles = u64_MAX;
u64 MaxCycles;
debug_profile_scope* Scope;
unique_debug_profile_scope* NextUnique;
};
struct selected_memory_arena
{
umm ArenaAddress;
umm ArenaBlockAddress;
};
#define MAX_SELECTED_ARENAS 128
struct selected_arenas
{
u32 Count;
selected_memory_arena Arenas[MAX_SELECTED_ARENAS];
};
struct frame_stats
{
u64 TotalCycles;
u64 StartingCycle;
r32 FrameMs;
};
struct registered_memory_arena
{
memory_arena *Arena;
const char* Name;
s32 ThreadId;
b32 Expanded;
b32 Tombstone;
};
enum event_tracing_status
{
EventTracingStatus_Unstarted,
EventTracingStatus_Starting,
EventTracingStatus_Running,
EventTracingStatus_Error,
};
global_variable volatile event_tracing_status Global_EventTracingStatus = {};
#endif