Skip to content

Commit

Permalink
Fixed improper filtering assumptions for certain VMP versions, fixed …
Browse files Browse the repository at this point in the history
…always saving using the process image module name.
  • Loading branch information
0xnobody committed Sep 7, 2020
1 parent 4aafcda commit 49b37db
Show file tree
Hide file tree
Showing 7 changed files with 82 additions and 35 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
* `[-disable-reloc]`: An optional setting to instruct VMPDump to mark that relocs have been stripped in the ouput image, forcing the image to load at the dumped ImageBase. This is useful if runnable dumps are desired.

VMProtect initialization and unpacking must be complete in the target process before running VMPDump. This means it must be at or past the OEP (Original Entry Point).
The dumped and fixed image will appear in the module directory, under the name `<Module Name>.VMPDump.<Module Extension>`.
The dumped and fixed image will appear in the process image module directory, under the name `<Target Module Name>.VMPDump.<Target Module Extension>`.

## How It Works
VMProtect injects stubs for every import call or jmp. These stubs resolve the 'obfuscated' thunk in the `.vmpX` section, and add a fixed constant to 'deobfuscate' it. The calls or jumps themselves are then dispatched with a ret instruction.
Expand Down
29 changes: 26 additions & 3 deletions VMPDump/disassembler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,11 @@

namespace vmpdump
{
// Disassembles at the effective address, negotating jumps according to the flags.
// Disassembles at the offset from the base, negotating jumps according to the flags.
// NOTE: The offset is used for the disassembled instructions' addresses.
// If the number of instructions disassembled exceeds the provided max amount, en empty instruction stream is returned.
//
instruction_stream disassembler::disassemble( uint64_t base, uint64_t offset, disassembler_flags flags )
instruction_stream disassembler::disassemble( uint64_t base, uint64_t offset, disassembler_flags flags, uint64_t max_instructions )
{
// ea = base + offset
//
Expand All @@ -14,10 +16,31 @@ namespace vmpdump

size_t size = 0xFFFFFFFFFFFFFFFFull;

uint64_t i = 0;

// Helper lambda to exception-wrap the disassebly.
// This is useful as we may be dealing with invalid instructions which may cause an access violation.
//
auto disasm = [&]() -> bool
{
__try
{
return cs_disasm_iter( handle, ( const uint8_t** )&ea, &size, &offset, insn );
}
__except ( 1 ) {}
return false;
};

// While iterative disassembly is successful.
//
while ( cs_disasm_iter( handle, ( const uint8_t** )&ea, &size, &offset, insn ) )
while ( disasm() )
{
// Check max bounds.
//
if ( i >= max_instructions )
return instruction_stream {};
i++;

// Construct a self-containing instruction.
//
auto ins = std::make_shared<instruction>( insn );
Expand Down
3 changes: 2 additions & 1 deletion VMPDump/disassembler.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -88,8 +88,9 @@ namespace vmpdump

// Disassembles at the offset from the base, negotating jumps according to the flags.
// NOTE: The offset is used for the disassembled instructions' addresses.
// If the number of instructions disassembled exceeds the provided max amount, en empty instruction stream is returned.
//
instruction_stream disassemble( uint64_t base, uint64_t offset, disassembler_flags flags = disassembler_take_unconditional_imm );
instruction_stream disassemble( uint64_t base, uint64_t offset, disassembler_flags flags = disassembler_take_unconditional_imm, uint64_t max_instructions = -1 );

// Disassembles at the offset from the base, simply disassembling every instruction in order.
//
Expand Down
5 changes: 5 additions & 0 deletions VMPDump/instruction_stream.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,11 @@ namespace vmpdump
// Lift the single instruction.
//
lifter.process( block, ins->ins.address, ins->ins.bytes );

// If block branches, end lifting.
//
if ( block->is_complete() )
break;
}

// Return the created basic block.
Expand Down
6 changes: 6 additions & 0 deletions VMPDump/instruction_stream.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,12 @@ namespace vmpdump
instruction_stream& operator= ( instruction_stream&& ) = default;
instruction_stream& operator= ( const instruction_stream& ) = default;

// Construct as empty.
//
instruction_stream()
: instructions{}, begin( 0 ), end( 0 ), index( 0 )
{}

// Construct via copying existing instruction vector
//
instruction_stream( const std::vector<std::shared_ptr<instruction>>& instructions )
Expand Down
5 changes: 4 additions & 1 deletion VMPDump/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ namespace vmpdump
//
settings = parse_settings( arguments );
#else
settings = { 0x720, "", {}, true };
settings = { 0x1244, "", { 0x1D420 }, true };
#endif

if ( !settings )
Expand Down Expand Up @@ -397,6 +397,9 @@ namespace vmpdump
// Save module.
//
std::filesystem::path module_path = { instance->module_full_path };
module_path.remove_filename();
module_path /= instance->target_module_view->module_name;

module_path.replace_extension( "VMPDump" + module_path.extension().string() );
std::ofstream outfile( module_path.string(), std::ios::out | std::ios::binary );
outfile.write( ( const char* )raw_module.raw_bytes.data(), raw_module.raw_bytes.size() );
Expand Down
67 changes: 38 additions & 29 deletions VMPDump/vmpdump.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,11 @@ namespace vmpdump
//
basic_block* lifted_block = stream.lift();

// Ensure lifted block is valid.
//
if ( !lifted_block->is_complete() )
return {};

// Get the iterator just before the VMEXIT at the end.
// This is the baseline we'll be using to see how certain registers / stack variables changed during the stub.
//
Expand Down Expand Up @@ -203,46 +208,50 @@ namespace vmpdump
uint64_t call_target_offset = ins.operand( 0 ).imm;
uint8_t* call_target = local_module_bytes + call_target_offset;

// VMP import stubs always begin with a NOP (0x90).
// Ensure the current call matches this. Unfortunately we have to use the IsBadReadPtr API here as we
// cannot be sure that we are dealing with valid code.
// Ensure that the call destination is valid memory in the first place.
//
if ( !IsBadReadPtr( call_target, 1 ) && *call_target == 0x90 )
if ( !IsBadReadPtr( call_target, 1 ) )
{
// Disassemble at the call target.
// Max 25 instructions, in order to filter out invalid calls.
//
instruction_stream stream = disassembler::get().disassemble( ( uint64_t )local_module_bytes, call_target_offset );
instruction_stream stream = disassembler::get().disassemble( ( uint64_t )local_module_bytes, call_target_offset, disassembler_take_unconditional_imm, 25 );

// Analyze the disassembled stream as a VMP import stub.
// Perform more preliminary filtering, so we only pass the most valid calls to the costly VTIL analysis.
//
if ( std::optional<import_stub_analysis> stub_analysis = analyze_import_stub( stream ) )
if ( !stream.instructions.empty() && stream.instructions[ stream.instructions.size() - 1 ]->ins.id == X86_INS_RET )
{
// vtil::logger::log<vtil::logger::CON_GRN>( "** Resolved import stub @ 0x%p\r\n", ins.ins.address );

// Compute the ea of the function, in the target process.
//
uintptr_t target_ea = *( uintptr_t* )( local_module_bytes + stub_analysis->thunk_rva ) + stub_analysis->dest_offset;

// If it doesn't already exist within the map, insert the import.
//
const resolved_import* referenced_import = &resolved_imports.insert( { stub_analysis->thunk_rva, { stub_analysis->thunk_rva, target_ea } } ).first->second;

// Record the call to the import.
//
import_calls.push_back( { ins.ins.address, referenced_import, stub_analysis->stack_adjustment, stub_analysis->padding, stub_analysis->is_jmp, previous_instruction } );

// If the call is a jump, and has no backwards (push) padding, it must be padded after the stub.
// Because jumps don't return, this information won't be provided to us by the analysis, so we have
// to skip the next byte to prevent potentially invalid disassembly.
// Analyze the disassembled stream as a VMP import stub.
//
if ( stub_analysis->is_jmp && stub_analysis->stack_adjustment == 0 )
if ( std::optional<import_stub_analysis> stub_analysis = analyze_import_stub( stream ) )
{
offset++;
code_start++;
// vtil::logger::log<vtil::logger::CON_GRN>( "** Resolved import stub @ 0x%p\r\n", ins.ins.address );

// Compute the ea of the function, in the target process.
//
uintptr_t target_ea = *( uintptr_t* )( local_module_bytes + stub_analysis->thunk_rva ) + stub_analysis->dest_offset;

// If it doesn't already exist within the map, insert the import.
//
const resolved_import* referenced_import = &resolved_imports.insert( { stub_analysis->thunk_rva, { stub_analysis->thunk_rva, target_ea } } ).first->second;

// Record the call to the import.
//
import_calls.push_back( { ins.ins.address, referenced_import, stub_analysis->stack_adjustment, stub_analysis->padding, stub_analysis->is_jmp, previous_instruction } );

// If the call is a jump, and has no backwards (push) padding, it must be padded after the stub.
// Because jumps don't return, this information won't be provided to us by the analysis, so we have
// to skip the next byte to prevent potentially invalid disassembly.
//
if ( stub_analysis->is_jmp && stub_analysis->stack_adjustment == 0 )
{
offset++;
code_start++;
}
}
// else
// vtil::logger::log<vtil::logger::CON_PRP>( "** Potentially skipped import call @ RVA 0x%p\r\n", ins.ins.address );
}
else
vtil::logger::log<vtil::logger::CON_PRP>( "** Potentially skipped import call @ RVA 0x%p\r\n", ins.ins.address );
}
}

Expand Down

0 comments on commit 49b37db

Please sign in to comment.