Skip to content

Commit

Permalink
Fix setting breakpoints on AVX 256 instructions and other 32 byte imm…
Browse files Browse the repository at this point in the history
…ediate instructions (#54786)
  • Loading branch information
hoyosjs committed Jun 28, 2021
1 parent 6cb345d commit 1c383a2
Show file tree
Hide file tree
Showing 4 changed files with 62 additions and 36 deletions.
15 changes: 10 additions & 5 deletions src/coreclr/debug/ee/controller.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1393,7 +1393,7 @@ bool DebuggerController::ApplyPatch(DebuggerControllerPatch *patch)
_ASSERTE(!"VirtualProtect of code page failed");
return false;
}
#endif // !defined(HOST_OSX) || !defined(HOST_ARM64)
#endif // !defined(HOST_OSX) || !defined(HOST_ARM64)
}
// TODO: : determine if this is needed for AMD64
#if defined(TARGET_X86) //REVISIT_TODO what is this?!
Expand Down Expand Up @@ -1496,7 +1496,7 @@ bool DebuggerController::UnapplyPatch(DebuggerControllerPatch *patch)
_ASSERTE(!"VirtualProtect of code page failed");
return false;
}
#endif // !defined(HOST_OSX) || !defined(HOST_ARM64)
#endif // !defined(HOST_OSX) || !defined(HOST_ARM64)
}
else
{
Expand Down Expand Up @@ -4352,6 +4352,7 @@ DebuggerPatchSkip::DebuggerPatchSkip(Thread *thread,

m_pSharedPatchBypassBuffer = patch->GetOrCreateSharedPatchBypassBuffer();
BYTE* patchBypass = m_pSharedPatchBypassBuffer->PatchBypass;
LOG((LF_CORDB, LL_INFO10000, "DPS::DPS: Patch skip for opcode 0x%.4x at address %p buffer allocated at 0x%.8x\n", patch->opcode, patch->address, m_pSharedPatchBypassBuffer));

// Copy the instruction block over to the patch skip
// WARNING: there used to be an issue here because CopyInstructionBlock copied the breakpoint from the
Expand Down Expand Up @@ -4412,8 +4413,9 @@ DebuggerPatchSkip::DebuggerPatchSkip(Thread *thread,
}
else
{
_ASSERTE(m_instrAttrib.m_cOperandSize <= SharedPatchBypassBuffer::cbBufferBypass);
// Copy the data into our buffer.
memcpy(bufferBypass, patch->address + m_instrAttrib.m_cbInstr + dwOldDisp, SharedPatchBypassBuffer::cbBufferBypass);
memcpy(bufferBypass, patch->address + m_instrAttrib.m_cbInstr + dwOldDisp, m_instrAttrib.m_cOperandSize);

if (m_instrAttrib.m_fIsWrite)
{
Expand Down Expand Up @@ -4901,11 +4903,14 @@ bool DebuggerPatchSkip::TriggerSingleStep(Thread *thread, const BYTE *ip)
break;

case 16:
memcpy(reinterpret_cast<void*>(targetFixup), bufferBypass, 16);
case 32:
memcpy(reinterpret_cast<void*>(targetFixup), bufferBypass, fixupSize);
break;

default:
_ASSERTE(!"bad operand size");
_ASSERTE(!"bad operand size. If you hit this and it was because we need to process instructions with larger \
relative immediates, make sure to update the SharedPatchBypassBuffer size, the DebuggerHeapExecutableMemoryAllocator, \
and structures depending on DBG_MAX_EXECUTABLE_ALLOC_SIZE.");
}
}
#endif
Expand Down
4 changes: 3 additions & 1 deletion src/coreclr/debug/ee/controller.h
Original file line number Diff line number Diff line change
Expand Up @@ -288,7 +288,9 @@ class SharedPatchBypassBuffer
// "PatchBypass" must be the first field of this class for alignment to be correct.
BYTE PatchBypass[MAX_INSTRUCTION_LENGTH];
#if defined(TARGET_AMD64)
const static int cbBufferBypass = 0x10;
// If you update this value, make sure that it fits in the data payload of a
// DebuggerHeapExecutableMemoryChunk. This will need to be bumped to 0x40 for AVX 512 support.
const static int cbBufferBypass = 0x20;
BYTE BypassBuffer[cbBufferBypass];

UINT_PTR RipTargetFixup;
Expand Down
41 changes: 24 additions & 17 deletions src/coreclr/debug/ee/debugger.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -365,10 +365,10 @@ void Debugger::DoNotCallDirectlyPrivateLock(void)
//
Thread * pThread;
bool fIsCooperative;

pThread = g_pEEInterface->GetThread();
fIsCooperative = (pThread != NULL) && (pThread->PreemptiveGCDisabled());

if (m_fShutdownMode && !fIsCooperative)
{
// The big fear is that some other random thread will take the debugger-lock and then block on something else,
Expand Down Expand Up @@ -16299,7 +16299,8 @@ void* DebuggerHeapExecutableMemoryAllocator::Allocate(DWORD numberOfBytes)
}
}

return ChangePageUsage(pageToAllocateOn, chunkToUse, ChangePageUsageAction::ALLOCATE);
ASSERT(chunkToUse >= 1 && (uint)chunkToUse < CHUNKS_PER_DEBUGGERHEAP);
return GetPointerToChunkWithUsageUpdate(pageToAllocateOn, chunkToUse, ChangePageUsageAction::ALLOCATE);
}

void DebuggerHeapExecutableMemoryAllocator::Free(void* addr)
Expand All @@ -16314,16 +16315,17 @@ void DebuggerHeapExecutableMemoryAllocator::Free(void* addr)
int chunkNum = static_cast<DebuggerHeapExecutableMemoryChunk*>(addr)->data.chunkNumber;

// Sanity check: assert that the address really represents the start of a chunk.
ASSERT(((uint64_t)addr - (uint64_t)pageToFreeIn) % 64 == 0);
ASSERT(((uint64_t)addr - (uint64_t)pageToFreeIn) % EXPECTED_CHUNKSIZE == 0);

ChangePageUsage(pageToFreeIn, chunkNum, ChangePageUsageAction::FREE);
GetPointerToChunkWithUsageUpdate(pageToFreeIn, chunkNum, ChangePageUsageAction::FREE);
}

DebuggerHeapExecutableMemoryPage* DebuggerHeapExecutableMemoryAllocator::AddNewPage()
{
void* newPageAddr = VirtualAlloc(NULL, sizeof(DebuggerHeapExecutableMemoryPage), MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE);

DebuggerHeapExecutableMemoryPage *newPage = new (newPageAddr) DebuggerHeapExecutableMemoryPage;
CrstHolder execMemAllocCrstHolder(&m_execMemAllocMutex);
newPage->SetNextPage(m_pages);

// Add the new page to the linked list of pages
Expand All @@ -16333,8 +16335,9 @@ DebuggerHeapExecutableMemoryPage* DebuggerHeapExecutableMemoryAllocator::AddNewP

bool DebuggerHeapExecutableMemoryAllocator::CheckPageForAvailability(DebuggerHeapExecutableMemoryPage* page, /* _Out_ */ int* chunkToUse)
{
CrstHolder execMemAllocCrstHolder(&m_execMemAllocMutex);
uint64_t occupancy = page->GetPageOccupancy();
bool available = occupancy != UINT64_MAX;
bool available = occupancy != MAX_CHUNK_MASK;

if (!available)
{
Expand All @@ -16348,13 +16351,13 @@ bool DebuggerHeapExecutableMemoryAllocator::CheckPageForAvailability(DebuggerHea

if (chunkToUse)
{
// Start i at 62 because first chunk is reserved
for (int i = 62; i >= 0; i--)
// skip the first bit, as that's used by the booking chunk.
for (int i = CHUNKS_PER_DEBUGGERHEAP - 2; i >= 0; i--)
{
uint64_t mask = ((uint64_t)1 << i);
uint64_t mask = (1ull << i);
if ((mask & occupancy) == 0)
{
*chunkToUse = 64 - i - 1;
*chunkToUse = CHUNKS_PER_DEBUGGERHEAP - i - 1;
break;
}
}
Expand All @@ -16363,12 +16366,12 @@ bool DebuggerHeapExecutableMemoryAllocator::CheckPageForAvailability(DebuggerHea
return true;
}

void* DebuggerHeapExecutableMemoryAllocator::ChangePageUsage(DebuggerHeapExecutableMemoryPage* page, int chunkNumber, ChangePageUsageAction action)
void* DebuggerHeapExecutableMemoryAllocator::GetPointerToChunkWithUsageUpdate(DebuggerHeapExecutableMemoryPage* page, int chunkNumber, ChangePageUsageAction action)
{
ASSERT(action == ChangePageUsageAction::ALLOCATE || action == ChangePageUsageAction::FREE);
uint64_t mask = 1ull << (CHUNKS_PER_DEBUGGERHEAP - chunkNumber - 1);

uint64_t mask = (uint64_t)0x1 << (64 - chunkNumber - 1);

CrstHolder execMemAllocCrstHolder(&m_execMemAllocMutex);
uint64_t prevOccupancy = page->GetPageOccupancy();
uint64_t newOccupancy = (action == ChangePageUsageAction::ALLOCATE) ? (prevOccupancy | mask) : (prevOccupancy ^ mask);
page->SetPageOccupancy(newOccupancy);
Expand Down Expand Up @@ -16459,11 +16462,15 @@ HRESULT DebuggerHeap::Init(BOOL fExecutable)
#endif

#ifndef HOST_WINDOWS
m_execMemAllocator = new (nothrow) DebuggerHeapExecutableMemoryAllocator();
ASSERT(m_execMemAllocator != NULL);
if (m_execMemAllocator == NULL)
m_execMemAllocator = NULL;
if (m_fExecutable)
{
return E_OUTOFMEMORY;
m_execMemAllocator = new (nothrow) DebuggerHeapExecutableMemoryAllocator();
ASSERT(m_execMemAllocator != NULL);
if (m_execMemAllocator == NULL)
{
return E_OUTOFMEMORY;
}
}
#endif

Expand Down
38 changes: 25 additions & 13 deletions src/coreclr/debug/ee/debugger.h
Original file line number Diff line number Diff line change
Expand Up @@ -1047,7 +1047,12 @@ class DebuggerMethodInfo
// different part of the address space (not on the heap).
// ------------------------------------------------------------------------ */

#define DBG_MAX_EXECUTABLE_ALLOC_SIZE 48
constexpr uint64_t DBG_MAX_EXECUTABLE_ALLOC_SIZE=112;
constexpr uint64_t EXPECTED_CHUNKSIZE=128;
constexpr uint64_t DEBUGGERHEAP_PAGESIZE=4096;
constexpr uint64_t CHUNKS_PER_DEBUGGERHEAP=(DEBUGGERHEAP_PAGESIZE / EXPECTED_CHUNKSIZE);
constexpr uint64_t MAX_CHUNK_MASK=((1ull << CHUNKS_PER_DEBUGGERHEAP) - 1);
constexpr uint64_t BOOKKEEPING_CHUNK_MASK (1ull << (CHUNKS_PER_DEBUGGERHEAP - 1));

// Forward declaration
struct DebuggerHeapExecutableMemoryPage;
Expand All @@ -1060,7 +1065,7 @@ struct DebuggerHeapExecutableMemoryPage;
// for the page, and the remaining ones are DataChunks and are handed out
// by the allocator when it allocates memory.
// ------------------------------------------------------------------------ */
union DECLSPEC_ALIGN(64) DebuggerHeapExecutableMemoryChunk {
union DECLSPEC_ALIGN(EXPECTED_CHUNKSIZE) DebuggerHeapExecutableMemoryChunk {

struct DataChunk
{
Expand All @@ -1078,13 +1083,14 @@ union DECLSPEC_ALIGN(64) DebuggerHeapExecutableMemoryChunk {
DebuggerHeapExecutableMemoryPage *nextPage;

uint64_t pageOccupancy;
static_assert(CHUNKS_PER_DEBUGGERHEAP <= sizeof(pageOccupancy) * 8,
"Our interfaces assume the chunks in a page can be masken on this field");

} bookkeeping;

char _alignpad[64];
char _alignpad[EXPECTED_CHUNKSIZE];
};

static_assert(sizeof(DebuggerHeapExecutableMemoryChunk) == 64, "DebuggerHeapExecutableMemoryChunk is expect to be 64 bytes.");
static_assert(sizeof(DebuggerHeapExecutableMemoryChunk) == EXPECTED_CHUNKSIZE, "DebuggerHeapExecutableMemoryChunk is expect to be EXPECTED_CHUNKSIZE bytes.");

// ------------------------------------------------------------------------ */
// DebuggerHeapExecutableMemoryPage
Expand All @@ -1095,7 +1101,7 @@ static_assert(sizeof(DebuggerHeapExecutableMemoryChunk) == 64, "DebuggerHeapExec
// about which of the other chunks are used/free as well as a pointer to
// the next page.
// ------------------------------------------------------------------------ */
struct DECLSPEC_ALIGN(4096) DebuggerHeapExecutableMemoryPage
struct DECLSPEC_ALIGN(DEBUGGERHEAP_PAGESIZE) DebuggerHeapExecutableMemoryPage
{
inline DebuggerHeapExecutableMemoryPage* GetNextPage()
{
Expand All @@ -1115,24 +1121,25 @@ struct DECLSPEC_ALIGN(4096) DebuggerHeapExecutableMemoryPage

inline void SetPageOccupancy(uint64_t newOccupancy)
{
// Can't unset first bit of occupancy!
ASSERT((newOccupancy & 0x8000000000000000) != 0);

// Can't unset the bookmark chunk!
ASSERT((newOccupancy & BOOKKEEPING_CHUNK_MASK) != 0);
ASSERT(newOccupancy <= MAX_CHUNK_MASK);
ExecutableWriterHolder<DebuggerHeapExecutableMemoryPage> debuggerHeapPageWriterHolder(this, sizeof(DebuggerHeapExecutableMemoryPage));
debuggerHeapPageWriterHolder.GetRW()->chunks[0].bookkeeping.pageOccupancy = newOccupancy;
}

inline void* GetPointerToChunk(int chunkNum) const
{
ASSERT(chunkNum >= 0 && (uint)chunkNum < CHUNKS_PER_DEBUGGERHEAP);
return (char*)this + chunkNum * sizeof(DebuggerHeapExecutableMemoryChunk);
}

DebuggerHeapExecutableMemoryPage()
{
ExecutableWriterHolder<DebuggerHeapExecutableMemoryPage> debuggerHeapPageWriterHolder(this, sizeof(DebuggerHeapExecutableMemoryPage));

SetPageOccupancy(0x8000000000000000); // only the first bit is set.
for (uint8_t i = 1; i < sizeof(chunks)/sizeof(chunks[0]); i++)
SetPageOccupancy(BOOKKEEPING_CHUNK_MASK); // only the first bit is set.
for (uint8_t i = 1; i < CHUNKS_PER_DEBUGGERHEAP; i++)
{
ASSERT(i != 0);
debuggerHeapPageWriterHolder.GetRW()->chunks[i].data.startOfPage = this;
Expand All @@ -1141,8 +1148,13 @@ struct DECLSPEC_ALIGN(4096) DebuggerHeapExecutableMemoryPage
}

private:
DebuggerHeapExecutableMemoryChunk chunks[64];
DebuggerHeapExecutableMemoryChunk chunks[CHUNKS_PER_DEBUGGERHEAP];
static_assert(sizeof(chunks) == DEBUGGERHEAP_PAGESIZE,
"Expected DebuggerHeapExecutableMemoryPage to have DEBUGGERHEAP_PAGESIZE bytes worth of chunks.");

};
static_assert(sizeof(DebuggerHeapExecutableMemoryPage) == DEBUGGERHEAP_PAGESIZE,
"DebuggerHeapExecutableMemoryPage exceeded the expected size.");

// ------------------------------------------------------------------------ */
// DebuggerHeapExecutableMemoryAllocator class
Expand Down Expand Up @@ -1170,7 +1182,7 @@ class DebuggerHeapExecutableMemoryAllocator

DebuggerHeapExecutableMemoryPage* AddNewPage();
bool CheckPageForAvailability(DebuggerHeapExecutableMemoryPage* page, /* _Out_ */ int* chunkToUse);
void* ChangePageUsage(DebuggerHeapExecutableMemoryPage* page, int chunkNumber, ChangePageUsageAction action);
void* GetPointerToChunkWithUsageUpdate(DebuggerHeapExecutableMemoryPage* page, int chunkNumber, ChangePageUsageAction action);

private:
// Linked list of pages that have been allocated
Expand Down

0 comments on commit 1c383a2

Please sign in to comment.