Skip to content
This repository has been archived by the owner on Jan 23, 2023. It is now read-only.

Commit

Permalink
Add Large pages support in GC
Browse files Browse the repository at this point in the history
  • Loading branch information
mjsabby committed Mar 25, 2019
1 parent 245b0fd commit 68b3796
Show file tree
Hide file tree
Showing 7 changed files with 172 additions and 9 deletions.
8 changes: 8 additions & 0 deletions src/gc/env/gcenv.os.h
Original file line number Diff line number Diff line change
Expand Up @@ -204,6 +204,14 @@ class GCToOSInterface
// true if it has succeeded, false if it has failed
static bool VirtualCommit(void *address, size_t size, uint32_t node = NUMA_NODE_UNDEFINED);

// Commit virtual memory range. It must be part of a range reserved using VirtualReserve.
// Parameters:
// address - starting virtual address
// size - size of the virtual memory range
// Return:
// true if it has succeeded, false if it has failed
static void* VirtualReserveAndCommitLargePages(size_t size, uint32_t node = NUMA_NODE_UNDEFINED);

// Decomit virtual memory range.
// Parameters:
// address - starting virtual address
Expand Down
27 changes: 20 additions & 7 deletions src/gc/gc.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2411,6 +2411,7 @@ void qsort1(uint8_t** low, uint8_t** high, unsigned int depth);
#endif //USE_INTROSORT

void* virtual_alloc (size_t size);
void* virtual_alloc (size_t size, bool use_large_pages_p);
void virtual_free (void* add, size_t size);

/* per heap static initialization */
Expand Down Expand Up @@ -2826,6 +2827,7 @@ GCSpinLock gc_heap::gc_lock;

size_t gc_heap::eph_gen_starts_size = 0;
heap_segment* gc_heap::segment_standby_list;
size_t gc_heap::use_large_pages_p = 0;
size_t gc_heap::last_gc_index = 0;
#ifdef SEG_MAPPING_TABLE
size_t gc_heap::min_segment_size = 0;
Expand Down Expand Up @@ -4271,7 +4273,7 @@ typedef struct

initial_memory_details memory_details;

BOOL reserve_initial_memory (size_t normal_size, size_t large_size, size_t num_heaps)
BOOL reserve_initial_memory (size_t normal_size, size_t large_size, size_t num_heaps, bool use_large_pages_p)
{
BOOL reserve_success = FALSE;

Expand Down Expand Up @@ -4332,10 +4334,10 @@ BOOL reserve_initial_memory (size_t normal_size, size_t large_size, size_t num_h
// try to allocate 2 blocks
uint8_t* b1 = 0;
uint8_t* b2 = 0;
b1 = (uint8_t*)virtual_alloc (memory_details.block_count * normal_size);
b1 = (uint8_t*)virtual_alloc (memory_details.block_count * normal_size, use_large_pages_p);
if (b1)
{
b2 = (uint8_t*)virtual_alloc (memory_details.block_count * large_size);
b2 = (uint8_t*)virtual_alloc (memory_details.block_count * large_size, use_large_pages_p);
if (b2)
{
memory_details.allocation_pattern = initial_memory_details::TWO_STAGE;
Expand Down Expand Up @@ -4368,7 +4370,7 @@ BOOL reserve_initial_memory (size_t normal_size, size_t large_size, size_t num_h
memory_details.block_size_normal :
memory_details.block_size_large);
current_block->memory_base =
(uint8_t*)virtual_alloc (block_size);
(uint8_t*)virtual_alloc (block_size, use_large_pages_p);
if (current_block->memory_base == 0)
{
// Free the blocks that we've allocated so far
Expand Down Expand Up @@ -4476,6 +4478,11 @@ heap_segment* get_initial_segment (size_t size, int h_number)
}

void* virtual_alloc (size_t size)
{
return virtual_alloc(size, false);
}

void* virtual_alloc (size_t size, bool use_large_pages_p)
{
size_t requested_size = size;

Expand All @@ -4496,7 +4503,8 @@ void* virtual_alloc (size_t size)
flags = VirtualReserveFlags::WriteWatch;
}
#endif // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
void* prgmem = GCToOSInterface::VirtualReserve (requested_size, card_size * card_word_width, flags);

void* prgmem = use_large_pages_p ? GCToOSInterface::VirtualReserveAndCommitLargePages(requested_size, card_size * card_word_width) : GCToOSInterface::VirtualReserve(requested_size, card_size * card_word_width, flags);
void *aligned_mem = prgmem;

// We don't want (prgmem + size) to be right at the end of the address space
Expand Down Expand Up @@ -9308,7 +9316,7 @@ heap_segment* gc_heap::make_heap_segment (uint8_t* new_pages, size_t size, int h
heap_segment_mem (new_segment) = start;
heap_segment_used (new_segment) = start;
heap_segment_reserved (new_segment) = new_pages + size;
heap_segment_committed (new_segment) = new_pages + initial_commit;
heap_segment_committed(new_segment) = (use_large_pages_p ? heap_segment_reserved(new_segment) : (new_pages + initial_commit));
init_heap_segment (new_segment);
dprintf (2, ("Creating heap segment %Ix", (size_t)new_segment));
return new_segment;
Expand Down Expand Up @@ -9399,6 +9407,8 @@ void gc_heap::reset_heap_segment_pages (heap_segment* seg)
void gc_heap::decommit_heap_segment_pages (heap_segment* seg,
size_t extra_space)
{
if (use_large_pages_p)
return;
uint8_t* page_start = align_on_page (heap_segment_allocated(seg));
size_t size = heap_segment_committed (seg) - page_start;
extra_space = align_on_page (extra_space);
Expand Down Expand Up @@ -10108,12 +10118,15 @@ HRESULT gc_heap::initialize_gc (size_t segment_size,
block_count = 1;
#endif //MULTIPLE_HEAPS

use_large_pages_p = false;

if (heap_hard_limit)
{
check_commit_cs.Initialize();
use_large_pages_p = GCConfig::GetGCLargePages();
}

if (!reserve_initial_memory(segment_size,heap_size,block_count))
if (!reserve_initial_memory(segment_size, heap_size, block_count, use_large_pages_p))
return E_OUTOFMEMORY;

#ifdef CARD_BUNDLE
Expand Down
1 change: 1 addition & 0 deletions src/gc/gcconfig.h
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@ class GCConfigStringHolder
"Specifies the name of the GC config log file") \
BOOL_CONFIG(GCNumaAware, "GCNumaAware", true, "Enables numa allocations in the GC") \
BOOL_CONFIG(GCCpuGroup, "GCCpuGroup", false, "Enables CPU groups in the GC") \
BOOL_CONFIG(GCLargePages, "GCUseLargePages", false, "Enables using Large Pages in the GC") \
INT_CONFIG(HeapVerifyLevel, "HeapVerify", HEAPVERIFY_NONE, \
"When set verifies the integrity of the managed heap on entry and exit of each GC") \
INT_CONFIG(LOHCompactionMode, "GCLOHCompact", 0, "Specifies the LOH compaction mode") \
Expand Down
4 changes: 4 additions & 0 deletions src/gc/gcpriv.h
Original file line number Diff line number Diff line change
Expand Up @@ -3145,6 +3145,10 @@ class gc_heap
PER_HEAP_ISOLATED
size_t current_total_committed_gc_own;

// This is if large pages should be used.
PER_HEAP_ISOLATED
size_t use_large_pages_p;

PER_HEAP_ISOLATED
size_t last_gc_index;

Expand Down
35 changes: 33 additions & 2 deletions src/gc/unix/gcenv.unix.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -271,7 +271,7 @@ void GCToOSInterface::YieldThread(uint32_t switchCount)
// flags - flags to control special settings like write watching
// Return:
// Starting virtual address of the reserved range
void* GCToOSInterface::VirtualReserve(size_t size, size_t alignment, uint32_t flags)
static VirtualReserveInner(size_t size, size_t alignment, uint32_t flags, uint32_t hugePagesFlag = 0)
{
assert(!(flags & VirtualReserveFlags::WriteWatch) && "WriteWatch not supported on Unix");
if (alignment == 0)
Expand All @@ -280,7 +280,7 @@ void* GCToOSInterface::VirtualReserve(size_t size, size_t alignment, uint32_t fl
}

size_t alignedSize = size + (alignment - OS_PAGE_SIZE);
void * pRetVal = mmap(nullptr, alignedSize, PROT_NONE, MAP_ANON | MAP_PRIVATE, -1, 0);
void * pRetVal = mmap(nullptr, alignedSize, PROT_NONE, MAP_ANON | MAP_PRIVATE | hugePagesFlag, -1, 0);

if (pRetVal != NULL)
{
Expand All @@ -305,6 +305,18 @@ void* GCToOSInterface::VirtualReserve(size_t size, size_t alignment, uint32_t fl
return pRetVal;
}

// Reserve virtual memory range.
// Parameters:
// size - size of the virtual memory range
// alignment - requested memory alignment, 0 means no specific alignment requested
// flags - flags to control special settings like write watching
// Return:
// Starting virtual address of the reserved range
void* GCToOSInterface::VirtualReserve(size_t size, size_t alignment, uint32_t flags)
{
return VirtualReserveInner(size, alignment, flags);
}

// Release virtual memory range previously reserved using VirtualReserve
// Parameters:
// address - starting virtual address
Expand All @@ -318,6 +330,25 @@ bool GCToOSInterface::VirtualRelease(void* address, size_t size)
return (ret == 0);
}

// Commit virtual memory range.
// Parameters:
// size - size of the virtual memory range
// alignment - requested memory alignment, 0 means no specific alignment requested
// flags - flags to control special settings like write watching
// node - NUMA node
// Return:
// Starting virtual address of the committed range
void* GCToOSInterface::VirtualReserveAndCommitLargePages(size_t size, uint32_t node)
{
void* pRetVal = VirtualReserveInner(size, alignment, 0, MAP_HUGETLB);
if (VirtualCommit(pRetVal, size, node))
{
return pRetVal;
}

return nullptr;
}

// Commit virtual memory range. It must be part of a range reserved using VirtualReserve.
// Parameters:
// address - starting virtual address
Expand Down
53 changes: 53 additions & 0 deletions src/gc/windows/gcenv.windows.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -594,6 +594,59 @@ bool GCToOSInterface::VirtualRelease(void* address, size_t size)
return !!::VirtualFree(address, 0, MEM_RELEASE);
}

// Commit virtual memory range.
// Parameters:
// size - size of the virtual memory range
// alignment - requested memory alignment, 0 means no specific alignment requested
// flags - flags to control special settings like write watching
// node - NUMA node
// Return:
// Starting virtual address of the committed range
void* GCToOSInterface::VirtualReserveAndCommitLargePages(size_t size, uint32_t node)
{
void* pRetVal = nullptr;

TOKEN_PRIVILEGES tp;
LUID luid;
if (!LookupPrivilegeValueW(nullptr, SE_LOCK_MEMORY_NAME, &luid))
{
return nullptr;
}

tp.PrivilegeCount = 1;
tp.Privileges[0].Luid = luid;
tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;

HANDLE token;
if (!OpenProcessToken(::GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES, &token))
{
return nullptr;
}

if (!AdjustTokenPrivileges(token, FALSE, &tp, 0, nullptr, 0))
{
CloseHandle(token);
return nullptr;
}

CloseHandle(token);

auto largePageMinimum = GetLargePageMinimum();
size = (size + (largePageMinimum - 1)) & ~(largePageMinimum - 1);

if (node == NUMA_NODE_UNDEFINED)
{
pRetVal = ::VirtualAlloc(nullptr, size, MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES, PAGE_READWRITE);
}
else
{
assert(g_fEnableGCNumaAware);
pRetVal = ::VirtualAllocExNuma(::GetCurrentProcess(), nullptr, size, MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES, PAGE_READWRITE, node);
}

return pRetVal;
}

// Commit virtual memory range. It must be part of a range reserved using VirtualReserve.
// Parameters:
// address - starting virtual address
Expand Down
53 changes: 53 additions & 0 deletions src/vm/gcenv.os.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -205,6 +205,59 @@ bool GCToOSInterface::VirtualRelease(void* address, size_t size)
return !!::ClrVirtualFree(address, 0, MEM_RELEASE);
}

// Commit virtual memory range.
// Parameters:
// size - size of the virtual memory range
// node - NUMA node
// Return:
// Starting virtual address of the committed range
void* GCToOSInterface::VirtualReserveAndCommitLargePages(size_t size, uint32_t node)
{
LIMITED_METHOD_CONTRACT;

#if !defined(FEATURE_PAL)
TOKEN_PRIVILEGES tp;
LUID luid;
if (!LookupPrivilegeValueW(nullptr, SE_LOCK_MEMORY_NAME, &luid))
{
return nullptr;
}

tp.PrivilegeCount = 1;
tp.Privileges[0].Luid = luid;
tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;

HANDLE token;
if (!OpenProcessToken(::GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES, &token))
{
return nullptr;
}

if (!AdjustTokenPrivileges(token, FALSE, &tp, 0, nullptr, 0))
{
CloseHandle(token);
return nullptr;
}

CloseHandle(token);

auto largePageMinimum = GetLargePageMinimum();
size = (size + (largePageMinimum - 1)) & ~(largePageMinimum - 1);
#endif

void* pRetVal = nullptr;
if (node == NUMA_NODE_UNDEFINED)
{
pRetVal = ::ClrVirtualAlloc(nullptr, size, MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES, PAGE_READWRITE);
}
else
{
pRetVal = NumaNodeInfo::VirtualAllocExNuma(::GetCurrentProcess(), nullptr, size, MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES, PAGE_READWRITE, node);
}

return pRetVal;
}

// Commit virtual memory range. It must be part of a range reserved using VirtualReserve.
// Parameters:
// address - starting virtual address
Expand Down

0 comments on commit 68b3796

Please sign in to comment.