123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736 |
- // -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
- /* Copyright (c) 2007, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * ---
- * Author: Joi Sigurdsson
- * Author: Scott Francis
- *
- * Implementation of PreamblePatcher
- */
- #include "preamble_patcher.h"
- #include "mini_disassembler.h"
- // compatibility shims
- #include "base/logging.h"
- // Definitions of assembly statements we need
- #define ASM_JMP32REL 0xE9
- #define ASM_INT3 0xCC
- #define ASM_JMP32ABS_0 0xFF
- #define ASM_JMP32ABS_1 0x25
- #define ASM_JMP8REL 0xEB
- #define ASM_JCC32REL_0 0x0F
- #define ASM_JCC32REL_1_MASK 0x80
- #define ASM_NOP 0x90
- // X64 opcodes
- #define ASM_REXW 0x48
- #define ASM_MOVRAX_IMM 0xB8
- #define ASM_JMP 0xFF
- #define ASM_JMP_RAX 0xE0
- namespace sidestep {
- PreamblePatcher::PreamblePage* PreamblePatcher::preamble_pages_ = NULL;
- long PreamblePatcher::granularity_ = 0;
- long PreamblePatcher::pagesize_ = 0;
- bool PreamblePatcher::initialized_ = false;
- static const unsigned int kPreamblePageMagic = 0x4347414D; // "MAGC"
- // Handle a special case that we see with functions that point into an
- // IAT table (including functions linked statically into the
- // application): these function already starts with ASM_JMP32*. For
- // instance, malloc() might be implemented as a JMP to __malloc().
- // This function follows the initial JMPs for us, until we get to the
- // place where the actual code is defined. If we get to STOP_BEFORE,
- // we return the address before stop_before. The stop_before_trampoline
- // flag is used in 64-bit mode. If true, we will return the address
- // before a trampoline is detected. Trampolines are defined as:
- //
- // nop
- // mov rax, <replacement_function>
- // jmp rax
- //
- // See PreamblePatcher::RawPatchWithStub for more information.
- void* PreamblePatcher::ResolveTargetImpl(unsigned char* target,
- unsigned char* stop_before,
- bool stop_before_trampoline) {
- if (target == NULL)
- return NULL;
- while (1) {
- unsigned char* new_target;
- if (target[0] == ASM_JMP32REL) {
- // target[1-4] holds the place the jmp goes to, but it's
- // relative to the next instruction.
- int relative_offset; // Windows guarantees int is 4 bytes
- SIDESTEP_ASSERT(sizeof(relative_offset) == 4);
- memcpy(reinterpret_cast<void*>(&relative_offset),
- reinterpret_cast<void*>(target + 1), 4);
- new_target = target + 5 + relative_offset;
- } else if (target[0] == ASM_JMP8REL) {
- // Visual Studio 7.1 implements new[] as an 8 bit jump to new
- signed char relative_offset;
- memcpy(reinterpret_cast<void*>(&relative_offset),
- reinterpret_cast<void*>(target + 1), 1);
- new_target = target + 2 + relative_offset;
- } else if (target[0] == ASM_JMP32ABS_0 &&
- target[1] == ASM_JMP32ABS_1) {
- jmp32rel:
- // Visual studio seems to sometimes do it this way instead of the
- // previous way. Not sure what the rules are, but it was happening
- // with operator new in some binaries.
- void** new_target_v;
- if (kIs64BitBinary) {
- // In 64-bit mode JMPs are RIP-relative, not absolute
- int target_offset;
- memcpy(reinterpret_cast<void*>(&target_offset),
- reinterpret_cast<void*>(target + 2), 4);
- new_target_v = reinterpret_cast<void**>(target + target_offset + 6);
- } else {
- SIDESTEP_ASSERT(sizeof(new_target) == 4);
- memcpy(&new_target_v, reinterpret_cast<void*>(target + 2), 4);
- }
- new_target = reinterpret_cast<unsigned char*>(*new_target_v);
- } else if (kIs64BitBinary && target[0] == ASM_REXW
- && target[1] == ASM_JMP32ABS_0
- && target[2] == ASM_JMP32ABS_1) {
- // in Visual Studio 2012 we're seeing jump like that:
- // rex.W jmpq *0x11d019(%rip)
- //
- // according to docs I have, rex prefix is actually unneeded and
- // can be ignored. I.e. docs say for jumps like that operand
- // already defaults to 64-bit. But clearly it breaks abs. jump
- // detection above and we just skip rex
- target++;
- goto jmp32rel;
- } else {
- break;
- }
- if (new_target == stop_before)
- break;
- if (stop_before_trampoline && *new_target == ASM_NOP
- && new_target[1] == ASM_REXW && new_target[2] == ASM_MOVRAX_IMM)
- break;
- target = new_target;
- }
- return target;
- }
- // Special case scoped_ptr to avoid dependency on scoped_ptr below.
- class DeleteUnsignedCharArray {
- public:
- DeleteUnsignedCharArray(unsigned char* array) : array_(array) {
- }
- ~DeleteUnsignedCharArray() {
- if (array_) {
- PreamblePatcher::FreePreambleBlock(array_);
- }
- }
- unsigned char* Release() {
- unsigned char* temp = array_;
- array_ = NULL;
- return temp;
- }
- private:
- unsigned char* array_;
- };
- SideStepError PreamblePatcher::RawPatchWithStubAndProtections(
- void* target_function, void *replacement_function,
- unsigned char* preamble_stub, unsigned long stub_size,
- unsigned long* bytes_needed) {
- // We need to be able to write to a process-local copy of the first
- // MAX_PREAMBLE_STUB_SIZE bytes of target_function
- DWORD old_target_function_protect = 0;
- BOOL succeeded = ::VirtualProtect(reinterpret_cast<void*>(target_function),
- MAX_PREAMBLE_STUB_SIZE,
- PAGE_EXECUTE_READWRITE,
- &old_target_function_protect);
- if (!succeeded) {
- SIDESTEP_ASSERT(false && "Failed to make page containing target function "
- "copy-on-write.");
- return SIDESTEP_ACCESS_DENIED;
- }
- SideStepError error_code = RawPatchWithStub(target_function,
- replacement_function,
- preamble_stub,
- stub_size,
- bytes_needed);
- // Restore the protection of the first MAX_PREAMBLE_STUB_SIZE bytes of
- // pTargetFunction to what they were before we started goofing around.
- // We do this regardless of whether the patch succeeded or not.
- succeeded = ::VirtualProtect(reinterpret_cast<void*>(target_function),
- MAX_PREAMBLE_STUB_SIZE,
- old_target_function_protect,
- &old_target_function_protect);
- if (!succeeded) {
- SIDESTEP_ASSERT(false &&
- "Failed to restore protection to target function.");
- // We must not return an error here because the function has
- // likely actually been patched, and returning an error might
- // cause our client code not to unpatch it. So we just keep
- // going.
- }
- if (SIDESTEP_SUCCESS != error_code) { // Testing RawPatchWithStub, above
- SIDESTEP_ASSERT(false);
- return error_code;
- }
- // Flush the instruction cache to make sure the processor doesn't execute the
- // old version of the instructions (before our patch).
- //
- // FlushInstructionCache is actually a no-op at least on
- // single-processor XP machines. I'm not sure why this is so, but
- // it is, yet I want to keep the call to the API here for
- // correctness in case there is a difference in some variants of
- // Windows/hardware.
- succeeded = ::FlushInstructionCache(::GetCurrentProcess(),
- target_function,
- MAX_PREAMBLE_STUB_SIZE);
- if (!succeeded) {
- SIDESTEP_ASSERT(false && "Failed to flush instruction cache.");
- // We must not return an error here because the function has actually
- // been patched, and returning an error would likely cause our client
- // code not to unpatch it. So we just keep going.
- }
- return SIDESTEP_SUCCESS;
- }
- SideStepError PreamblePatcher::RawPatch(void* target_function,
- void* replacement_function,
- void** original_function_stub) {
- if (!target_function || !replacement_function || !original_function_stub ||
- (*original_function_stub) || target_function == replacement_function) {
- SIDESTEP_ASSERT(false && "Preconditions not met");
- return SIDESTEP_INVALID_PARAMETER;
- }
- BOOL succeeded = FALSE;
- // First, deal with a special case that we see with functions that
- // point into an IAT table (including functions linked statically
- // into the application): these function already starts with
- // ASM_JMP32REL. For instance, malloc() might be implemented as a
- // JMP to __malloc(). In that case, we replace the destination of
- // the JMP (__malloc), rather than the JMP itself (malloc). This
- // way we get the correct behavior no matter how malloc gets called.
- void* new_target = ResolveTarget(target_function);
- if (new_target != target_function) {
- target_function = new_target;
- }
- // In 64-bit mode, preamble_stub must be within 2GB of target function
- // so that if target contains a jump, we can translate it.
- unsigned char* preamble_stub = AllocPreambleBlockNear(target_function);
- if (!preamble_stub) {
- SIDESTEP_ASSERT(false && "Unable to allocate preamble-stub.");
- return SIDESTEP_INSUFFICIENT_BUFFER;
- }
- // Frees the array at end of scope.
- DeleteUnsignedCharArray guard_preamble_stub(preamble_stub);
- SideStepError error_code = RawPatchWithStubAndProtections(
- target_function, replacement_function, preamble_stub,
- MAX_PREAMBLE_STUB_SIZE, NULL);
- if (SIDESTEP_SUCCESS != error_code) {
- SIDESTEP_ASSERT(false);
- return error_code;
- }
- // Flush the instruction cache to make sure the processor doesn't execute the
- // old version of the instructions (before our patch).
- //
- // FlushInstructionCache is actually a no-op at least on
- // single-processor XP machines. I'm not sure why this is so, but
- // it is, yet I want to keep the call to the API here for
- // correctness in case there is a difference in some variants of
- // Windows/hardware.
- succeeded = ::FlushInstructionCache(::GetCurrentProcess(),
- target_function,
- MAX_PREAMBLE_STUB_SIZE);
- if (!succeeded) {
- SIDESTEP_ASSERT(false && "Failed to flush instruction cache.");
- // We must not return an error here because the function has actually
- // been patched, and returning an error would likely cause our client
- // code not to unpatch it. So we just keep going.
- }
- SIDESTEP_LOG("PreamblePatcher::RawPatch successfully patched.");
- // detach the scoped pointer so the memory is not freed
- *original_function_stub =
- reinterpret_cast<void*>(guard_preamble_stub.Release());
- return SIDESTEP_SUCCESS;
- }
- SideStepError PreamblePatcher::Unpatch(void* target_function,
- void* replacement_function,
- void* original_function_stub) {
- SIDESTEP_ASSERT(target_function && replacement_function &&
- original_function_stub);
- if (!target_function || !replacement_function ||
- !original_function_stub) {
- return SIDESTEP_INVALID_PARAMETER;
- }
- // Before unpatching, target_function should be a JMP to
- // replacement_function. If it's not, then either it's an error, or
- // we're falling into the case where the original instruction was a
- // JMP, and we patched the jumped_to address rather than the JMP
- // itself. (For instance, if malloc() is just a JMP to __malloc(),
- // we patched __malloc() and not malloc().)
- unsigned char* target = reinterpret_cast<unsigned char*>(target_function);
- target = reinterpret_cast<unsigned char*>(
- ResolveTargetImpl(
- target, reinterpret_cast<unsigned char*>(replacement_function),
- true));
- // We should end at the function we patched. When we patch, we insert
- // a ASM_JMP32REL instruction, so look for that as a sanity check.
- if (target[0] != ASM_JMP32REL) {
- SIDESTEP_ASSERT(false &&
- "target_function does not look like it was patched.");
- return SIDESTEP_INVALID_PARAMETER;
- }
- const unsigned int kRequiredTargetPatchBytes = 5;
- // We need to be able to write to a process-local copy of the first
- // kRequiredTargetPatchBytes bytes of target_function
- DWORD old_target_function_protect = 0;
- BOOL succeeded = ::VirtualProtect(reinterpret_cast<void*>(target),
- kRequiredTargetPatchBytes,
- PAGE_EXECUTE_READWRITE,
- &old_target_function_protect);
- if (!succeeded) {
- SIDESTEP_ASSERT(false && "Failed to make page containing target function "
- "copy-on-write.");
- return SIDESTEP_ACCESS_DENIED;
- }
- unsigned char* preamble_stub = reinterpret_cast<unsigned char*>(
- original_function_stub);
- // Disassemble the preamble of stub and copy the bytes back to target.
- // If we've done any conditional jumps in the preamble we need to convert
- // them back to the original REL8 jumps in the target.
- MiniDisassembler disassembler;
- unsigned int preamble_bytes = 0;
- unsigned int target_bytes = 0;
- while (target_bytes < kRequiredTargetPatchBytes) {
- unsigned int cur_bytes = 0;
- InstructionType instruction_type =
- disassembler.Disassemble(preamble_stub + preamble_bytes, cur_bytes);
- if (IT_JUMP == instruction_type) {
- unsigned int jump_bytes = 0;
- SideStepError jump_ret = SIDESTEP_JUMP_INSTRUCTION;
- if (IsNearConditionalJump(preamble_stub + preamble_bytes, cur_bytes) ||
- IsNearRelativeJump(preamble_stub + preamble_bytes, cur_bytes) ||
- IsNearAbsoluteCall(preamble_stub + preamble_bytes, cur_bytes) ||
- IsNearRelativeCall(preamble_stub + preamble_bytes, cur_bytes)) {
- jump_ret = PatchNearJumpOrCall(preamble_stub + preamble_bytes,
- cur_bytes, target + target_bytes,
- &jump_bytes, MAX_PREAMBLE_STUB_SIZE);
- }
- if (jump_ret == SIDESTEP_JUMP_INSTRUCTION) {
- SIDESTEP_ASSERT(false &&
- "Found unsupported jump instruction in stub!!");
- return SIDESTEP_UNSUPPORTED_INSTRUCTION;
- }
- target_bytes += jump_bytes;
- } else if (IT_GENERIC == instruction_type) {
- if (IsMovWithDisplacement(preamble_stub + preamble_bytes, cur_bytes)) {
- unsigned int mov_bytes = 0;
- if (PatchMovWithDisplacement(preamble_stub + preamble_bytes, cur_bytes,
- target + target_bytes, &mov_bytes,
- MAX_PREAMBLE_STUB_SIZE)
- != SIDESTEP_SUCCESS) {
- SIDESTEP_ASSERT(false &&
- "Found unsupported generic instruction in stub!!");
- return SIDESTEP_UNSUPPORTED_INSTRUCTION;
- }
- } else {
- memcpy(reinterpret_cast<void*>(target + target_bytes),
- reinterpret_cast<void*>(reinterpret_cast<unsigned char*>(
- original_function_stub) + preamble_bytes), cur_bytes);
- target_bytes += cur_bytes;
- }
- } else {
- SIDESTEP_ASSERT(false &&
- "Found unsupported instruction in stub!!");
- return SIDESTEP_UNSUPPORTED_INSTRUCTION;
- }
- preamble_bytes += cur_bytes;
- }
- FreePreambleBlock(reinterpret_cast<unsigned char*>(original_function_stub));
- // Restore the protection of the first kRequiredTargetPatchBytes bytes of
- // target to what they were before we started goofing around.
- succeeded = ::VirtualProtect(reinterpret_cast<void*>(target),
- kRequiredTargetPatchBytes,
- old_target_function_protect,
- &old_target_function_protect);
- // Flush the instruction cache to make sure the processor doesn't execute the
- // old version of the instructions (before our patch).
- //
- // See comment on FlushInstructionCache elsewhere in this file.
- succeeded = ::FlushInstructionCache(::GetCurrentProcess(),
- target,
- MAX_PREAMBLE_STUB_SIZE);
- if (!succeeded) {
- SIDESTEP_ASSERT(false && "Failed to flush instruction cache.");
- return SIDESTEP_UNEXPECTED;
- }
- SIDESTEP_LOG("PreamblePatcher::Unpatch successfully unpatched.");
- return SIDESTEP_SUCCESS;
- }
- void PreamblePatcher::Initialize() {
- if (!initialized_) {
- SYSTEM_INFO si = { 0 };
- ::GetSystemInfo(&si);
- granularity_ = si.dwAllocationGranularity;
- pagesize_ = si.dwPageSize;
- initialized_ = true;
- }
- }
- unsigned char* PreamblePatcher::AllocPreambleBlockNear(void* target) {
- PreamblePage* preamble_page = preamble_pages_;
- while (preamble_page != NULL) {
- if (preamble_page->free_ != NULL) {
- __int64 val = reinterpret_cast<__int64>(preamble_page) -
- reinterpret_cast<__int64>(target);
- if ((val > 0 && val + pagesize_ <= INT_MAX) ||
- (val < 0 && val >= INT_MIN)) {
- break;
- }
- }
- preamble_page = preamble_page->next_;
- }
- // The free_ member of the page is used to store the next available block
- // of memory to use or NULL if there are no chunks available, in which case
- // we'll allocate a new page.
- if (preamble_page == NULL || preamble_page->free_ == NULL) {
- // Create a new preamble page and initialize the free list
- preamble_page = reinterpret_cast<PreamblePage*>(AllocPageNear(target));
- SIDESTEP_ASSERT(preamble_page != NULL && "Could not allocate page!");
- void** pp = &preamble_page->free_;
- unsigned char* ptr = reinterpret_cast<unsigned char*>(preamble_page) +
- MAX_PREAMBLE_STUB_SIZE;
- unsigned char* limit = reinterpret_cast<unsigned char*>(preamble_page) +
- pagesize_;
- while (ptr < limit) {
- *pp = ptr;
- pp = reinterpret_cast<void**>(ptr);
- ptr += MAX_PREAMBLE_STUB_SIZE;
- }
- *pp = NULL;
- // Insert the new page into the list
- preamble_page->magic_ = kPreamblePageMagic;
- preamble_page->next_ = preamble_pages_;
- preamble_pages_ = preamble_page;
- }
- unsigned char* ret = reinterpret_cast<unsigned char*>(preamble_page->free_);
- preamble_page->free_ = *(reinterpret_cast<void**>(preamble_page->free_));
- return ret;
- }
- void PreamblePatcher::FreePreambleBlock(unsigned char* block) {
- SIDESTEP_ASSERT(block != NULL);
- SIDESTEP_ASSERT(granularity_ != 0);
- uintptr_t ptr = reinterpret_cast<uintptr_t>(block);
- ptr -= ptr & (granularity_ - 1);
- PreamblePage* preamble_page = reinterpret_cast<PreamblePage*>(ptr);
- SIDESTEP_ASSERT(preamble_page->magic_ == kPreamblePageMagic);
- *(reinterpret_cast<void**>(block)) = preamble_page->free_;
- preamble_page->free_ = block;
- }
- void* PreamblePatcher::AllocPageNear(void* target) {
- MEMORY_BASIC_INFORMATION mbi = { 0 };
- if (!::VirtualQuery(target, &mbi, sizeof(mbi))) {
- SIDESTEP_ASSERT(false && "VirtualQuery failed on target address");
- return 0;
- }
- if (initialized_ == false) {
- PreamblePatcher::Initialize();
- SIDESTEP_ASSERT(initialized_);
- }
- void* pv = NULL;
- unsigned char* allocation_base = reinterpret_cast<unsigned char*>(
- mbi.AllocationBase);
- __int64 i = 1;
- bool high_target = reinterpret_cast<__int64>(target) > UINT_MAX;
- while (pv == NULL) {
- __int64 val = reinterpret_cast<__int64>(allocation_base) -
- (i * granularity_);
- if (high_target &&
- reinterpret_cast<__int64>(target) - val > INT_MAX) {
- // We're further than 2GB from the target
- break;
- } else if (val <= NULL) {
- // Less than 0
- break;
- }
- pv = ::VirtualAlloc(reinterpret_cast<void*>(allocation_base -
- (i++ * granularity_)),
- pagesize_, MEM_COMMIT | MEM_RESERVE,
- PAGE_EXECUTE_READWRITE);
- }
- // We couldn't allocate low, try to allocate high
- if (pv == NULL) {
- i = 1;
- // Round up to the next multiple of page granularity
- allocation_base = reinterpret_cast<unsigned char*>(
- (reinterpret_cast<__int64>(target) &
- (~(granularity_ - 1))) + granularity_);
- while (pv == NULL) {
- __int64 val = reinterpret_cast<__int64>(allocation_base) +
- (i * granularity_) - reinterpret_cast<__int64>(target);
- if (val > INT_MAX || val < 0) {
- // We're too far or we overflowed
- break;
- }
- pv = ::VirtualAlloc(reinterpret_cast<void*>(allocation_base +
- (i++ * granularity_)),
- pagesize_, MEM_COMMIT | MEM_RESERVE,
- PAGE_EXECUTE_READWRITE);
- }
- }
- return pv;
- }
- bool PreamblePatcher::IsShortConditionalJump(
- unsigned char* target,
- unsigned int instruction_size) {
- return (*(target) & 0x70) == 0x70 && instruction_size == 2;
- }
- bool PreamblePatcher::IsShortJump(
- unsigned char* target,
- unsigned int instruction_size) {
- return target[0] == 0xeb && instruction_size == 2;
- }
- bool PreamblePatcher::IsNearConditionalJump(
- unsigned char* target,
- unsigned int instruction_size) {
- return *(target) == 0xf && (*(target + 1) & 0x80) == 0x80 &&
- instruction_size == 6;
- }
- bool PreamblePatcher::IsNearRelativeJump(
- unsigned char* target,
- unsigned int instruction_size) {
- return *(target) == 0xe9 && instruction_size == 5;
- }
- bool PreamblePatcher::IsNearAbsoluteCall(
- unsigned char* target,
- unsigned int instruction_size) {
- return *(target) == 0xff && (*(target + 1) & 0x10) == 0x10 &&
- instruction_size == 6;
- }
- bool PreamblePatcher::IsNearRelativeCall(
- unsigned char* target,
- unsigned int instruction_size) {
- return *(target) == 0xe8 && instruction_size == 5;
- }
- bool PreamblePatcher::IsMovWithDisplacement(
- unsigned char* target,
- unsigned int instruction_size) {
- // In this case, the ModRM byte's mod field will be 0 and r/m will be 101b (5)
- return instruction_size == 7 && *target == 0x48 && *(target + 1) == 0x8b &&
- (*(target + 2) >> 6) == 0 && (*(target + 2) & 0x7) == 5;
- }
- SideStepError PreamblePatcher::PatchShortConditionalJump(
- unsigned char* source,
- unsigned int instruction_size,
- unsigned char* target,
- unsigned int* target_bytes,
- unsigned int target_size) {
- // note: rel8 offset is signed. Thus we need to ask for signed char
- // to negative offsets right
- unsigned char* original_jump_dest = (source + 2) + static_cast<signed char>(source[1]);
- unsigned char* stub_jump_from = target + 6;
- __int64 fixup_jump_offset = original_jump_dest - stub_jump_from;
- if (fixup_jump_offset > INT_MAX || fixup_jump_offset < INT_MIN) {
- SIDESTEP_ASSERT(false &&
- "Unable to fix up short jump because target"
- " is too far away.");
- return SIDESTEP_JUMP_INSTRUCTION;
- }
- *target_bytes = 6;
- if (target_size > *target_bytes) {
- // Convert the short jump to a near jump.
- //
- // 0f 8x xx xx xx xx = Jcc rel32off
- unsigned short jmpcode = ((0x80 | (source[0] & 0xf)) << 8) | 0x0f;
- memcpy(reinterpret_cast<void*>(target),
- reinterpret_cast<void*>(&jmpcode), 2);
- memcpy(reinterpret_cast<void*>(target + 2),
- reinterpret_cast<void*>(&fixup_jump_offset), 4);
- }
- return SIDESTEP_SUCCESS;
- }
- SideStepError PreamblePatcher::PatchShortJump(
- unsigned char* source,
- unsigned int instruction_size,
- unsigned char* target,
- unsigned int* target_bytes,
- unsigned int target_size) {
- // note: rel8 offset is _signed_. Thus we need signed char here.
- unsigned char* original_jump_dest = (source + 2) + static_cast<signed char>(source[1]);
- unsigned char* stub_jump_from = target + 5;
- __int64 fixup_jump_offset = original_jump_dest - stub_jump_from;
- if (fixup_jump_offset > INT_MAX || fixup_jump_offset < INT_MIN) {
- SIDESTEP_ASSERT(false &&
- "Unable to fix up short jump because target"
- " is too far away.");
- return SIDESTEP_JUMP_INSTRUCTION;
- }
- *target_bytes = 5;
- if (target_size > *target_bytes) {
- // Convert the short jump to a near jump.
- //
- // e9 xx xx xx xx = jmp rel32off
- target[0] = 0xe9;
- memcpy(reinterpret_cast<void*>(target + 1),
- reinterpret_cast<void*>(&fixup_jump_offset), 4);
- }
- return SIDESTEP_SUCCESS;
- }
- SideStepError PreamblePatcher::PatchNearJumpOrCall(
- unsigned char* source,
- unsigned int instruction_size,
- unsigned char* target,
- unsigned int* target_bytes,
- unsigned int target_size) {
- SIDESTEP_ASSERT(instruction_size == 5 || instruction_size == 6);
- unsigned int jmp_offset_in_instruction = instruction_size == 5 ? 1 : 2;
- unsigned char* original_jump_dest = reinterpret_cast<unsigned char *>(
- reinterpret_cast<__int64>(source + instruction_size) +
- *(reinterpret_cast<int*>(source + jmp_offset_in_instruction)));
- unsigned char* stub_jump_from = target + instruction_size;
- __int64 fixup_jump_offset = original_jump_dest - stub_jump_from;
- if (fixup_jump_offset > INT_MAX || fixup_jump_offset < INT_MIN) {
- SIDESTEP_ASSERT(false &&
- "Unable to fix up near jump because target"
- " is too far away.");
- return SIDESTEP_JUMP_INSTRUCTION;
- }
- if ((fixup_jump_offset < SCHAR_MAX && fixup_jump_offset > SCHAR_MIN)) {
- *target_bytes = 2;
- if (target_size > *target_bytes) {
- // If the new offset is in range, use a short jump instead of a near jump.
- if (source[0] == ASM_JCC32REL_0 &&
- (source[1] & ASM_JCC32REL_1_MASK) == ASM_JCC32REL_1_MASK) {
- unsigned short jmpcode = (static_cast<unsigned char>(
- fixup_jump_offset) << 8) | (0x70 | (source[1] & 0xf));
- memcpy(reinterpret_cast<void*>(target),
- reinterpret_cast<void*>(&jmpcode),
- 2);
- } else {
- target[0] = ASM_JMP8REL;
- target[1] = static_cast<unsigned char>(fixup_jump_offset);
- }
- }
- } else {
- *target_bytes = instruction_size;
- if (target_size > *target_bytes) {
- memcpy(reinterpret_cast<void*>(target),
- reinterpret_cast<void*>(source),
- jmp_offset_in_instruction);
- memcpy(reinterpret_cast<void*>(target + jmp_offset_in_instruction),
- reinterpret_cast<void*>(&fixup_jump_offset),
- 4);
- }
- }
- return SIDESTEP_SUCCESS;
- }
- SideStepError PreamblePatcher::PatchMovWithDisplacement(
- unsigned char* source,
- unsigned int instruction_size,
- unsigned char* target,
- unsigned int* target_bytes,
- unsigned int target_size) {
- SIDESTEP_ASSERT(instruction_size == 7);
- const int mov_offset_in_instruction = 3; // 0x48 0x8b 0x0d <offset>
- unsigned char* original_mov_dest = reinterpret_cast<unsigned char*>(
- reinterpret_cast<__int64>(source + instruction_size) +
- *(reinterpret_cast<int*>(source + mov_offset_in_instruction)));
- unsigned char* stub_mov_from = target + instruction_size;
- __int64 fixup_mov_offset = original_mov_dest - stub_mov_from;
- if (fixup_mov_offset > INT_MAX || fixup_mov_offset < INT_MIN) {
- SIDESTEP_ASSERT(false &&
- "Unable to fix up near MOV because target is too far away.");
- return SIDESTEP_UNEXPECTED;
- }
- *target_bytes = instruction_size;
- if (target_size > *target_bytes) {
- memcpy(reinterpret_cast<void*>(target),
- reinterpret_cast<void*>(source),
- mov_offset_in_instruction);
- memcpy(reinterpret_cast<void*>(target + mov_offset_in_instruction),
- reinterpret_cast<void*>(&fixup_mov_offset),
- 4);
- }
- return SIDESTEP_SUCCESS;
- }
- }; // namespace sidestep
|