preamble_patcher.cc 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736
  1. // -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
  2. /* Copyright (c) 2007, Google Inc.
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are
  7. * met:
  8. *
  9. * * Redistributions of source code must retain the above copyright
  10. * notice, this list of conditions and the following disclaimer.
  11. * * Redistributions in binary form must reproduce the above
  12. * copyright notice, this list of conditions and the following disclaimer
  13. * in the documentation and/or other materials provided with the
  14. * distribution.
  15. * * Neither the name of Google Inc. nor the names of its
  16. * contributors may be used to endorse or promote products derived from
  17. * this software without specific prior written permission.
  18. *
  19. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  20. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  21. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  22. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  23. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  24. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  25. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  26. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  27. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  28. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  29. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  30. *
  31. * ---
  32. * Author: Joi Sigurdsson
  33. * Author: Scott Francis
  34. *
  35. * Implementation of PreamblePatcher
  36. */
  37. #include "preamble_patcher.h"
  38. #include "mini_disassembler.h"
  39. // compatibility shims
  40. #include "base/logging.h"
  41. // Definitions of assembly statements we need
  42. #define ASM_JMP32REL 0xE9
  43. #define ASM_INT3 0xCC
  44. #define ASM_JMP32ABS_0 0xFF
  45. #define ASM_JMP32ABS_1 0x25
  46. #define ASM_JMP8REL 0xEB
  47. #define ASM_JCC32REL_0 0x0F
  48. #define ASM_JCC32REL_1_MASK 0x80
  49. #define ASM_NOP 0x90
  50. // X64 opcodes
  51. #define ASM_REXW 0x48
  52. #define ASM_MOVRAX_IMM 0xB8
  53. #define ASM_JMP 0xFF
  54. #define ASM_JMP_RAX 0xE0
  55. namespace sidestep {
  56. PreamblePatcher::PreamblePage* PreamblePatcher::preamble_pages_ = NULL;
  57. long PreamblePatcher::granularity_ = 0;
  58. long PreamblePatcher::pagesize_ = 0;
  59. bool PreamblePatcher::initialized_ = false;
  60. static const unsigned int kPreamblePageMagic = 0x4347414D; // "MAGC"
  61. // Handle a special case that we see with functions that point into an
  62. // IAT table (including functions linked statically into the
  63. // application): these function already starts with ASM_JMP32*. For
  64. // instance, malloc() might be implemented as a JMP to __malloc().
  65. // This function follows the initial JMPs for us, until we get to the
  66. // place where the actual code is defined. If we get to STOP_BEFORE,
  67. // we return the address before stop_before. The stop_before_trampoline
  68. // flag is used in 64-bit mode. If true, we will return the address
  69. // before a trampoline is detected. Trampolines are defined as:
  70. //
  71. // nop
  72. // mov rax, <replacement_function>
  73. // jmp rax
  74. //
  75. // See PreamblePatcher::RawPatchWithStub for more information.
  76. void* PreamblePatcher::ResolveTargetImpl(unsigned char* target,
  77. unsigned char* stop_before,
  78. bool stop_before_trampoline) {
  79. if (target == NULL)
  80. return NULL;
  81. while (1) {
  82. unsigned char* new_target;
  83. if (target[0] == ASM_JMP32REL) {
  84. // target[1-4] holds the place the jmp goes to, but it's
  85. // relative to the next instruction.
  86. int relative_offset; // Windows guarantees int is 4 bytes
  87. SIDESTEP_ASSERT(sizeof(relative_offset) == 4);
  88. memcpy(reinterpret_cast<void*>(&relative_offset),
  89. reinterpret_cast<void*>(target + 1), 4);
  90. new_target = target + 5 + relative_offset;
  91. } else if (target[0] == ASM_JMP8REL) {
  92. // Visual Studio 7.1 implements new[] as an 8 bit jump to new
  93. signed char relative_offset;
  94. memcpy(reinterpret_cast<void*>(&relative_offset),
  95. reinterpret_cast<void*>(target + 1), 1);
  96. new_target = target + 2 + relative_offset;
  97. } else if (target[0] == ASM_JMP32ABS_0 &&
  98. target[1] == ASM_JMP32ABS_1) {
  99. jmp32rel:
  100. // Visual studio seems to sometimes do it this way instead of the
  101. // previous way. Not sure what the rules are, but it was happening
  102. // with operator new in some binaries.
  103. void** new_target_v;
  104. if (kIs64BitBinary) {
  105. // In 64-bit mode JMPs are RIP-relative, not absolute
  106. int target_offset;
  107. memcpy(reinterpret_cast<void*>(&target_offset),
  108. reinterpret_cast<void*>(target + 2), 4);
  109. new_target_v = reinterpret_cast<void**>(target + target_offset + 6);
  110. } else {
  111. SIDESTEP_ASSERT(sizeof(new_target) == 4);
  112. memcpy(&new_target_v, reinterpret_cast<void*>(target + 2), 4);
  113. }
  114. new_target = reinterpret_cast<unsigned char*>(*new_target_v);
  115. } else if (kIs64BitBinary && target[0] == ASM_REXW
  116. && target[1] == ASM_JMP32ABS_0
  117. && target[2] == ASM_JMP32ABS_1) {
  118. // in Visual Studio 2012 we're seeing jump like that:
  119. // rex.W jmpq *0x11d019(%rip)
  120. //
  121. // according to docs I have, rex prefix is actually unneeded and
  122. // can be ignored. I.e. docs say for jumps like that operand
  123. // already defaults to 64-bit. But clearly it breaks abs. jump
  124. // detection above and we just skip rex
  125. target++;
  126. goto jmp32rel;
  127. } else {
  128. break;
  129. }
  130. if (new_target == stop_before)
  131. break;
  132. if (stop_before_trampoline && *new_target == ASM_NOP
  133. && new_target[1] == ASM_REXW && new_target[2] == ASM_MOVRAX_IMM)
  134. break;
  135. target = new_target;
  136. }
  137. return target;
  138. }
  139. // Special case scoped_ptr to avoid dependency on scoped_ptr below.
  140. class DeleteUnsignedCharArray {
  141. public:
  142. DeleteUnsignedCharArray(unsigned char* array) : array_(array) {
  143. }
  144. ~DeleteUnsignedCharArray() {
  145. if (array_) {
  146. PreamblePatcher::FreePreambleBlock(array_);
  147. }
  148. }
  149. unsigned char* Release() {
  150. unsigned char* temp = array_;
  151. array_ = NULL;
  152. return temp;
  153. }
  154. private:
  155. unsigned char* array_;
  156. };
  157. SideStepError PreamblePatcher::RawPatchWithStubAndProtections(
  158. void* target_function, void *replacement_function,
  159. unsigned char* preamble_stub, unsigned long stub_size,
  160. unsigned long* bytes_needed) {
  161. // We need to be able to write to a process-local copy of the first
  162. // MAX_PREAMBLE_STUB_SIZE bytes of target_function
  163. DWORD old_target_function_protect = 0;
  164. BOOL succeeded = ::VirtualProtect(reinterpret_cast<void*>(target_function),
  165. MAX_PREAMBLE_STUB_SIZE,
  166. PAGE_EXECUTE_READWRITE,
  167. &old_target_function_protect);
  168. if (!succeeded) {
  169. SIDESTEP_ASSERT(false && "Failed to make page containing target function "
  170. "copy-on-write.");
  171. return SIDESTEP_ACCESS_DENIED;
  172. }
  173. SideStepError error_code = RawPatchWithStub(target_function,
  174. replacement_function,
  175. preamble_stub,
  176. stub_size,
  177. bytes_needed);
  178. // Restore the protection of the first MAX_PREAMBLE_STUB_SIZE bytes of
  179. // pTargetFunction to what they were before we started goofing around.
  180. // We do this regardless of whether the patch succeeded or not.
  181. succeeded = ::VirtualProtect(reinterpret_cast<void*>(target_function),
  182. MAX_PREAMBLE_STUB_SIZE,
  183. old_target_function_protect,
  184. &old_target_function_protect);
  185. if (!succeeded) {
  186. SIDESTEP_ASSERT(false &&
  187. "Failed to restore protection to target function.");
  188. // We must not return an error here because the function has
  189. // likely actually been patched, and returning an error might
  190. // cause our client code not to unpatch it. So we just keep
  191. // going.
  192. }
  193. if (SIDESTEP_SUCCESS != error_code) { // Testing RawPatchWithStub, above
  194. SIDESTEP_ASSERT(false);
  195. return error_code;
  196. }
  197. // Flush the instruction cache to make sure the processor doesn't execute the
  198. // old version of the instructions (before our patch).
  199. //
  200. // FlushInstructionCache is actually a no-op at least on
  201. // single-processor XP machines. I'm not sure why this is so, but
  202. // it is, yet I want to keep the call to the API here for
  203. // correctness in case there is a difference in some variants of
  204. // Windows/hardware.
  205. succeeded = ::FlushInstructionCache(::GetCurrentProcess(),
  206. target_function,
  207. MAX_PREAMBLE_STUB_SIZE);
  208. if (!succeeded) {
  209. SIDESTEP_ASSERT(false && "Failed to flush instruction cache.");
  210. // We must not return an error here because the function has actually
  211. // been patched, and returning an error would likely cause our client
  212. // code not to unpatch it. So we just keep going.
  213. }
  214. return SIDESTEP_SUCCESS;
  215. }
  216. SideStepError PreamblePatcher::RawPatch(void* target_function,
  217. void* replacement_function,
  218. void** original_function_stub) {
  219. if (!target_function || !replacement_function || !original_function_stub ||
  220. (*original_function_stub) || target_function == replacement_function) {
  221. SIDESTEP_ASSERT(false && "Preconditions not met");
  222. return SIDESTEP_INVALID_PARAMETER;
  223. }
  224. BOOL succeeded = FALSE;
  225. // First, deal with a special case that we see with functions that
  226. // point into an IAT table (including functions linked statically
  227. // into the application): these function already starts with
  228. // ASM_JMP32REL. For instance, malloc() might be implemented as a
  229. // JMP to __malloc(). In that case, we replace the destination of
  230. // the JMP (__malloc), rather than the JMP itself (malloc). This
  231. // way we get the correct behavior no matter how malloc gets called.
  232. void* new_target = ResolveTarget(target_function);
  233. if (new_target != target_function) {
  234. target_function = new_target;
  235. }
  236. // In 64-bit mode, preamble_stub must be within 2GB of target function
  237. // so that if target contains a jump, we can translate it.
  238. unsigned char* preamble_stub = AllocPreambleBlockNear(target_function);
  239. if (!preamble_stub) {
  240. SIDESTEP_ASSERT(false && "Unable to allocate preamble-stub.");
  241. return SIDESTEP_INSUFFICIENT_BUFFER;
  242. }
  243. // Frees the array at end of scope.
  244. DeleteUnsignedCharArray guard_preamble_stub(preamble_stub);
  245. SideStepError error_code = RawPatchWithStubAndProtections(
  246. target_function, replacement_function, preamble_stub,
  247. MAX_PREAMBLE_STUB_SIZE, NULL);
  248. if (SIDESTEP_SUCCESS != error_code) {
  249. SIDESTEP_ASSERT(false);
  250. return error_code;
  251. }
  252. // Flush the instruction cache to make sure the processor doesn't execute the
  253. // old version of the instructions (before our patch).
  254. //
  255. // FlushInstructionCache is actually a no-op at least on
  256. // single-processor XP machines. I'm not sure why this is so, but
  257. // it is, yet I want to keep the call to the API here for
  258. // correctness in case there is a difference in some variants of
  259. // Windows/hardware.
  260. succeeded = ::FlushInstructionCache(::GetCurrentProcess(),
  261. target_function,
  262. MAX_PREAMBLE_STUB_SIZE);
  263. if (!succeeded) {
  264. SIDESTEP_ASSERT(false && "Failed to flush instruction cache.");
  265. // We must not return an error here because the function has actually
  266. // been patched, and returning an error would likely cause our client
  267. // code not to unpatch it. So we just keep going.
  268. }
  269. SIDESTEP_LOG("PreamblePatcher::RawPatch successfully patched.");
  270. // detach the scoped pointer so the memory is not freed
  271. *original_function_stub =
  272. reinterpret_cast<void*>(guard_preamble_stub.Release());
  273. return SIDESTEP_SUCCESS;
  274. }
  275. SideStepError PreamblePatcher::Unpatch(void* target_function,
  276. void* replacement_function,
  277. void* original_function_stub) {
  278. SIDESTEP_ASSERT(target_function && replacement_function &&
  279. original_function_stub);
  280. if (!target_function || !replacement_function ||
  281. !original_function_stub) {
  282. return SIDESTEP_INVALID_PARAMETER;
  283. }
  284. // Before unpatching, target_function should be a JMP to
  285. // replacement_function. If it's not, then either it's an error, or
  286. // we're falling into the case where the original instruction was a
  287. // JMP, and we patched the jumped_to address rather than the JMP
  288. // itself. (For instance, if malloc() is just a JMP to __malloc(),
  289. // we patched __malloc() and not malloc().)
  290. unsigned char* target = reinterpret_cast<unsigned char*>(target_function);
  291. target = reinterpret_cast<unsigned char*>(
  292. ResolveTargetImpl(
  293. target, reinterpret_cast<unsigned char*>(replacement_function),
  294. true));
  295. // We should end at the function we patched. When we patch, we insert
  296. // a ASM_JMP32REL instruction, so look for that as a sanity check.
  297. if (target[0] != ASM_JMP32REL) {
  298. SIDESTEP_ASSERT(false &&
  299. "target_function does not look like it was patched.");
  300. return SIDESTEP_INVALID_PARAMETER;
  301. }
  302. const unsigned int kRequiredTargetPatchBytes = 5;
  303. // We need to be able to write to a process-local copy of the first
  304. // kRequiredTargetPatchBytes bytes of target_function
  305. DWORD old_target_function_protect = 0;
  306. BOOL succeeded = ::VirtualProtect(reinterpret_cast<void*>(target),
  307. kRequiredTargetPatchBytes,
  308. PAGE_EXECUTE_READWRITE,
  309. &old_target_function_protect);
  310. if (!succeeded) {
  311. SIDESTEP_ASSERT(false && "Failed to make page containing target function "
  312. "copy-on-write.");
  313. return SIDESTEP_ACCESS_DENIED;
  314. }
  315. unsigned char* preamble_stub = reinterpret_cast<unsigned char*>(
  316. original_function_stub);
  317. // Disassemble the preamble of stub and copy the bytes back to target.
  318. // If we've done any conditional jumps in the preamble we need to convert
  319. // them back to the original REL8 jumps in the target.
  320. MiniDisassembler disassembler;
  321. unsigned int preamble_bytes = 0;
  322. unsigned int target_bytes = 0;
  323. while (target_bytes < kRequiredTargetPatchBytes) {
  324. unsigned int cur_bytes = 0;
  325. InstructionType instruction_type =
  326. disassembler.Disassemble(preamble_stub + preamble_bytes, cur_bytes);
  327. if (IT_JUMP == instruction_type) {
  328. unsigned int jump_bytes = 0;
  329. SideStepError jump_ret = SIDESTEP_JUMP_INSTRUCTION;
  330. if (IsNearConditionalJump(preamble_stub + preamble_bytes, cur_bytes) ||
  331. IsNearRelativeJump(preamble_stub + preamble_bytes, cur_bytes) ||
  332. IsNearAbsoluteCall(preamble_stub + preamble_bytes, cur_bytes) ||
  333. IsNearRelativeCall(preamble_stub + preamble_bytes, cur_bytes)) {
  334. jump_ret = PatchNearJumpOrCall(preamble_stub + preamble_bytes,
  335. cur_bytes, target + target_bytes,
  336. &jump_bytes, MAX_PREAMBLE_STUB_SIZE);
  337. }
  338. if (jump_ret == SIDESTEP_JUMP_INSTRUCTION) {
  339. SIDESTEP_ASSERT(false &&
  340. "Found unsupported jump instruction in stub!!");
  341. return SIDESTEP_UNSUPPORTED_INSTRUCTION;
  342. }
  343. target_bytes += jump_bytes;
  344. } else if (IT_GENERIC == instruction_type) {
  345. if (IsMovWithDisplacement(preamble_stub + preamble_bytes, cur_bytes)) {
  346. unsigned int mov_bytes = 0;
  347. if (PatchMovWithDisplacement(preamble_stub + preamble_bytes, cur_bytes,
  348. target + target_bytes, &mov_bytes,
  349. MAX_PREAMBLE_STUB_SIZE)
  350. != SIDESTEP_SUCCESS) {
  351. SIDESTEP_ASSERT(false &&
  352. "Found unsupported generic instruction in stub!!");
  353. return SIDESTEP_UNSUPPORTED_INSTRUCTION;
  354. }
  355. } else {
  356. memcpy(reinterpret_cast<void*>(target + target_bytes),
  357. reinterpret_cast<void*>(reinterpret_cast<unsigned char*>(
  358. original_function_stub) + preamble_bytes), cur_bytes);
  359. target_bytes += cur_bytes;
  360. }
  361. } else {
  362. SIDESTEP_ASSERT(false &&
  363. "Found unsupported instruction in stub!!");
  364. return SIDESTEP_UNSUPPORTED_INSTRUCTION;
  365. }
  366. preamble_bytes += cur_bytes;
  367. }
  368. FreePreambleBlock(reinterpret_cast<unsigned char*>(original_function_stub));
  369. // Restore the protection of the first kRequiredTargetPatchBytes bytes of
  370. // target to what they were before we started goofing around.
  371. succeeded = ::VirtualProtect(reinterpret_cast<void*>(target),
  372. kRequiredTargetPatchBytes,
  373. old_target_function_protect,
  374. &old_target_function_protect);
  375. // Flush the instruction cache to make sure the processor doesn't execute the
  376. // old version of the instructions (before our patch).
  377. //
  378. // See comment on FlushInstructionCache elsewhere in this file.
  379. succeeded = ::FlushInstructionCache(::GetCurrentProcess(),
  380. target,
  381. MAX_PREAMBLE_STUB_SIZE);
  382. if (!succeeded) {
  383. SIDESTEP_ASSERT(false && "Failed to flush instruction cache.");
  384. return SIDESTEP_UNEXPECTED;
  385. }
  386. SIDESTEP_LOG("PreamblePatcher::Unpatch successfully unpatched.");
  387. return SIDESTEP_SUCCESS;
  388. }
  389. void PreamblePatcher::Initialize() {
  390. if (!initialized_) {
  391. SYSTEM_INFO si = { 0 };
  392. ::GetSystemInfo(&si);
  393. granularity_ = si.dwAllocationGranularity;
  394. pagesize_ = si.dwPageSize;
  395. initialized_ = true;
  396. }
  397. }
  398. unsigned char* PreamblePatcher::AllocPreambleBlockNear(void* target) {
  399. PreamblePage* preamble_page = preamble_pages_;
  400. while (preamble_page != NULL) {
  401. if (preamble_page->free_ != NULL) {
  402. __int64 val = reinterpret_cast<__int64>(preamble_page) -
  403. reinterpret_cast<__int64>(target);
  404. if ((val > 0 && val + pagesize_ <= INT_MAX) ||
  405. (val < 0 && val >= INT_MIN)) {
  406. break;
  407. }
  408. }
  409. preamble_page = preamble_page->next_;
  410. }
  411. // The free_ member of the page is used to store the next available block
  412. // of memory to use or NULL if there are no chunks available, in which case
  413. // we'll allocate a new page.
  414. if (preamble_page == NULL || preamble_page->free_ == NULL) {
  415. // Create a new preamble page and initialize the free list
  416. preamble_page = reinterpret_cast<PreamblePage*>(AllocPageNear(target));
  417. SIDESTEP_ASSERT(preamble_page != NULL && "Could not allocate page!");
  418. void** pp = &preamble_page->free_;
  419. unsigned char* ptr = reinterpret_cast<unsigned char*>(preamble_page) +
  420. MAX_PREAMBLE_STUB_SIZE;
  421. unsigned char* limit = reinterpret_cast<unsigned char*>(preamble_page) +
  422. pagesize_;
  423. while (ptr < limit) {
  424. *pp = ptr;
  425. pp = reinterpret_cast<void**>(ptr);
  426. ptr += MAX_PREAMBLE_STUB_SIZE;
  427. }
  428. *pp = NULL;
  429. // Insert the new page into the list
  430. preamble_page->magic_ = kPreamblePageMagic;
  431. preamble_page->next_ = preamble_pages_;
  432. preamble_pages_ = preamble_page;
  433. }
  434. unsigned char* ret = reinterpret_cast<unsigned char*>(preamble_page->free_);
  435. preamble_page->free_ = *(reinterpret_cast<void**>(preamble_page->free_));
  436. return ret;
  437. }
  438. void PreamblePatcher::FreePreambleBlock(unsigned char* block) {
  439. SIDESTEP_ASSERT(block != NULL);
  440. SIDESTEP_ASSERT(granularity_ != 0);
  441. uintptr_t ptr = reinterpret_cast<uintptr_t>(block);
  442. ptr -= ptr & (granularity_ - 1);
  443. PreamblePage* preamble_page = reinterpret_cast<PreamblePage*>(ptr);
  444. SIDESTEP_ASSERT(preamble_page->magic_ == kPreamblePageMagic);
  445. *(reinterpret_cast<void**>(block)) = preamble_page->free_;
  446. preamble_page->free_ = block;
  447. }
  448. void* PreamblePatcher::AllocPageNear(void* target) {
  449. MEMORY_BASIC_INFORMATION mbi = { 0 };
  450. if (!::VirtualQuery(target, &mbi, sizeof(mbi))) {
  451. SIDESTEP_ASSERT(false && "VirtualQuery failed on target address");
  452. return 0;
  453. }
  454. if (initialized_ == false) {
  455. PreamblePatcher::Initialize();
  456. SIDESTEP_ASSERT(initialized_);
  457. }
  458. void* pv = NULL;
  459. unsigned char* allocation_base = reinterpret_cast<unsigned char*>(
  460. mbi.AllocationBase);
  461. __int64 i = 1;
  462. bool high_target = reinterpret_cast<__int64>(target) > UINT_MAX;
  463. while (pv == NULL) {
  464. __int64 val = reinterpret_cast<__int64>(allocation_base) -
  465. (i * granularity_);
  466. if (high_target &&
  467. reinterpret_cast<__int64>(target) - val > INT_MAX) {
  468. // We're further than 2GB from the target
  469. break;
  470. } else if (val <= NULL) {
  471. // Less than 0
  472. break;
  473. }
  474. pv = ::VirtualAlloc(reinterpret_cast<void*>(allocation_base -
  475. (i++ * granularity_)),
  476. pagesize_, MEM_COMMIT | MEM_RESERVE,
  477. PAGE_EXECUTE_READWRITE);
  478. }
  479. // We couldn't allocate low, try to allocate high
  480. if (pv == NULL) {
  481. i = 1;
  482. // Round up to the next multiple of page granularity
  483. allocation_base = reinterpret_cast<unsigned char*>(
  484. (reinterpret_cast<__int64>(target) &
  485. (~(granularity_ - 1))) + granularity_);
  486. while (pv == NULL) {
  487. __int64 val = reinterpret_cast<__int64>(allocation_base) +
  488. (i * granularity_) - reinterpret_cast<__int64>(target);
  489. if (val > INT_MAX || val < 0) {
  490. // We're too far or we overflowed
  491. break;
  492. }
  493. pv = ::VirtualAlloc(reinterpret_cast<void*>(allocation_base +
  494. (i++ * granularity_)),
  495. pagesize_, MEM_COMMIT | MEM_RESERVE,
  496. PAGE_EXECUTE_READWRITE);
  497. }
  498. }
  499. return pv;
  500. }
  501. bool PreamblePatcher::IsShortConditionalJump(
  502. unsigned char* target,
  503. unsigned int instruction_size) {
  504. return (*(target) & 0x70) == 0x70 && instruction_size == 2;
  505. }
  506. bool PreamblePatcher::IsShortJump(
  507. unsigned char* target,
  508. unsigned int instruction_size) {
  509. return target[0] == 0xeb && instruction_size == 2;
  510. }
  511. bool PreamblePatcher::IsNearConditionalJump(
  512. unsigned char* target,
  513. unsigned int instruction_size) {
  514. return *(target) == 0xf && (*(target + 1) & 0x80) == 0x80 &&
  515. instruction_size == 6;
  516. }
  517. bool PreamblePatcher::IsNearRelativeJump(
  518. unsigned char* target,
  519. unsigned int instruction_size) {
  520. return *(target) == 0xe9 && instruction_size == 5;
  521. }
  522. bool PreamblePatcher::IsNearAbsoluteCall(
  523. unsigned char* target,
  524. unsigned int instruction_size) {
  525. return *(target) == 0xff && (*(target + 1) & 0x10) == 0x10 &&
  526. instruction_size == 6;
  527. }
  528. bool PreamblePatcher::IsNearRelativeCall(
  529. unsigned char* target,
  530. unsigned int instruction_size) {
  531. return *(target) == 0xe8 && instruction_size == 5;
  532. }
  533. bool PreamblePatcher::IsMovWithDisplacement(
  534. unsigned char* target,
  535. unsigned int instruction_size) {
  536. // In this case, the ModRM byte's mod field will be 0 and r/m will be 101b (5)
  537. return instruction_size == 7 && *target == 0x48 && *(target + 1) == 0x8b &&
  538. (*(target + 2) >> 6) == 0 && (*(target + 2) & 0x7) == 5;
  539. }
  540. SideStepError PreamblePatcher::PatchShortConditionalJump(
  541. unsigned char* source,
  542. unsigned int instruction_size,
  543. unsigned char* target,
  544. unsigned int* target_bytes,
  545. unsigned int target_size) {
  546. // note: rel8 offset is signed. Thus we need to ask for signed char
  547. // to negative offsets right
  548. unsigned char* original_jump_dest = (source + 2) + static_cast<signed char>(source[1]);
  549. unsigned char* stub_jump_from = target + 6;
  550. __int64 fixup_jump_offset = original_jump_dest - stub_jump_from;
  551. if (fixup_jump_offset > INT_MAX || fixup_jump_offset < INT_MIN) {
  552. SIDESTEP_ASSERT(false &&
  553. "Unable to fix up short jump because target"
  554. " is too far away.");
  555. return SIDESTEP_JUMP_INSTRUCTION;
  556. }
  557. *target_bytes = 6;
  558. if (target_size > *target_bytes) {
  559. // Convert the short jump to a near jump.
  560. //
  561. // 0f 8x xx xx xx xx = Jcc rel32off
  562. unsigned short jmpcode = ((0x80 | (source[0] & 0xf)) << 8) | 0x0f;
  563. memcpy(reinterpret_cast<void*>(target),
  564. reinterpret_cast<void*>(&jmpcode), 2);
  565. memcpy(reinterpret_cast<void*>(target + 2),
  566. reinterpret_cast<void*>(&fixup_jump_offset), 4);
  567. }
  568. return SIDESTEP_SUCCESS;
  569. }
  570. SideStepError PreamblePatcher::PatchShortJump(
  571. unsigned char* source,
  572. unsigned int instruction_size,
  573. unsigned char* target,
  574. unsigned int* target_bytes,
  575. unsigned int target_size) {
  576. // note: rel8 offset is _signed_. Thus we need signed char here.
  577. unsigned char* original_jump_dest = (source + 2) + static_cast<signed char>(source[1]);
  578. unsigned char* stub_jump_from = target + 5;
  579. __int64 fixup_jump_offset = original_jump_dest - stub_jump_from;
  580. if (fixup_jump_offset > INT_MAX || fixup_jump_offset < INT_MIN) {
  581. SIDESTEP_ASSERT(false &&
  582. "Unable to fix up short jump because target"
  583. " is too far away.");
  584. return SIDESTEP_JUMP_INSTRUCTION;
  585. }
  586. *target_bytes = 5;
  587. if (target_size > *target_bytes) {
  588. // Convert the short jump to a near jump.
  589. //
  590. // e9 xx xx xx xx = jmp rel32off
  591. target[0] = 0xe9;
  592. memcpy(reinterpret_cast<void*>(target + 1),
  593. reinterpret_cast<void*>(&fixup_jump_offset), 4);
  594. }
  595. return SIDESTEP_SUCCESS;
  596. }
  597. SideStepError PreamblePatcher::PatchNearJumpOrCall(
  598. unsigned char* source,
  599. unsigned int instruction_size,
  600. unsigned char* target,
  601. unsigned int* target_bytes,
  602. unsigned int target_size) {
  603. SIDESTEP_ASSERT(instruction_size == 5 || instruction_size == 6);
  604. unsigned int jmp_offset_in_instruction = instruction_size == 5 ? 1 : 2;
  605. unsigned char* original_jump_dest = reinterpret_cast<unsigned char *>(
  606. reinterpret_cast<__int64>(source + instruction_size) +
  607. *(reinterpret_cast<int*>(source + jmp_offset_in_instruction)));
  608. unsigned char* stub_jump_from = target + instruction_size;
  609. __int64 fixup_jump_offset = original_jump_dest - stub_jump_from;
  610. if (fixup_jump_offset > INT_MAX || fixup_jump_offset < INT_MIN) {
  611. SIDESTEP_ASSERT(false &&
  612. "Unable to fix up near jump because target"
  613. " is too far away.");
  614. return SIDESTEP_JUMP_INSTRUCTION;
  615. }
  616. if ((fixup_jump_offset < SCHAR_MAX && fixup_jump_offset > SCHAR_MIN)) {
  617. *target_bytes = 2;
  618. if (target_size > *target_bytes) {
  619. // If the new offset is in range, use a short jump instead of a near jump.
  620. if (source[0] == ASM_JCC32REL_0 &&
  621. (source[1] & ASM_JCC32REL_1_MASK) == ASM_JCC32REL_1_MASK) {
  622. unsigned short jmpcode = (static_cast<unsigned char>(
  623. fixup_jump_offset) << 8) | (0x70 | (source[1] & 0xf));
  624. memcpy(reinterpret_cast<void*>(target),
  625. reinterpret_cast<void*>(&jmpcode),
  626. 2);
  627. } else {
  628. target[0] = ASM_JMP8REL;
  629. target[1] = static_cast<unsigned char>(fixup_jump_offset);
  630. }
  631. }
  632. } else {
  633. *target_bytes = instruction_size;
  634. if (target_size > *target_bytes) {
  635. memcpy(reinterpret_cast<void*>(target),
  636. reinterpret_cast<void*>(source),
  637. jmp_offset_in_instruction);
  638. memcpy(reinterpret_cast<void*>(target + jmp_offset_in_instruction),
  639. reinterpret_cast<void*>(&fixup_jump_offset),
  640. 4);
  641. }
  642. }
  643. return SIDESTEP_SUCCESS;
  644. }
  645. SideStepError PreamblePatcher::PatchMovWithDisplacement(
  646. unsigned char* source,
  647. unsigned int instruction_size,
  648. unsigned char* target,
  649. unsigned int* target_bytes,
  650. unsigned int target_size) {
  651. SIDESTEP_ASSERT(instruction_size == 7);
  652. const int mov_offset_in_instruction = 3; // 0x48 0x8b 0x0d <offset>
  653. unsigned char* original_mov_dest = reinterpret_cast<unsigned char*>(
  654. reinterpret_cast<__int64>(source + instruction_size) +
  655. *(reinterpret_cast<int*>(source + mov_offset_in_instruction)));
  656. unsigned char* stub_mov_from = target + instruction_size;
  657. __int64 fixup_mov_offset = original_mov_dest - stub_mov_from;
  658. if (fixup_mov_offset > INT_MAX || fixup_mov_offset < INT_MIN) {
  659. SIDESTEP_ASSERT(false &&
  660. "Unable to fix up near MOV because target is too far away.");
  661. return SIDESTEP_UNEXPECTED;
  662. }
  663. *target_bytes = instruction_size;
  664. if (target_size > *target_bytes) {
  665. memcpy(reinterpret_cast<void*>(target),
  666. reinterpret_cast<void*>(source),
  667. mov_offset_in_instruction);
  668. memcpy(reinterpret_cast<void*>(target + mov_offset_in_instruction),
  669. reinterpret_cast<void*>(&fixup_mov_offset),
  670. 4);
  671. }
  672. return SIDESTEP_SUCCESS;
  673. }
  674. }; // namespace sidestep