shim_mmap.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158
  1. /* -*- mode:c; c-file-style:"k&r"; c-basic-offset: 4; tab-width:4; indent-tabs-mode:nil; mode:auto-fill; fill-column:78; -*- */
  2. /* vim: set ts=4 sw=4 et tw=78 fo=cqt wm=0: */
  3. /* Copyright (C) 2014 OSCAR lab, Stony Brook University
  4. This file is part of Graphene Library OS.
  5. Graphene Library OS is free software: you can redistribute it and/or
  6. modify it under the terms of the GNU General Public License
  7. as published by the Free Software Foundation, either version 3 of the
  8. License, or (at your option) any later version.
  9. Graphene Library OS is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  15. /*
  16. * shim_mmap.c
  17. *
  18. * Implementation of system call "mmap", "munmap" and "mprotect".
  19. */
  20. #include <shim_internal.h>
  21. #include <shim_table.h>
  22. #include <shim_handle.h>
  23. #include <shim_vma.h>
  24. #include <shim_fs.h>
  25. #include <shim_profile.h>
  26. #include <pal.h>
  27. #include <pal_error.h>
  28. #include <sys/mman.h>
  29. #include <errno.h>
  30. DEFINE_PROFILE_OCCURENCE(mmap, memory);
  31. void * shim_do_mmap (void * addr, size_t length, int prot, int flags, int fd,
  32. off_t offset)
  33. {
  34. struct shim_handle * hdl = NULL;
  35. long ret = -ENOMEM;
  36. bool reserved = false;
  37. assert(!(flags & (VMA_UNMAPPED|VMA_TAINTED)));
  38. int pal_alloc_type = ((flags & MAP_32BIT) ? PAL_ALLOC_32BIT : 0);
  39. if (!addr) {
  40. addr = get_unmapped_vma(ALIGN_UP(length), flags);
  41. if (addr)
  42. reserved = true;
  43. }
  44. if (addr) {
  45. void * cur_stack = current_stack();
  46. assert(cur_stack < addr || cur_stack > addr + length);
  47. }
  48. void * mapped = ALIGN_DOWN((void *) addr);
  49. void * mapped_end = ALIGN_UP((void *) addr + length);
  50. addr = mapped;
  51. length = mapped_end - mapped;
  52. if (flags & MAP_ANONYMOUS) {
  53. addr = (void *) DkVirtualMemoryAlloc(addr, length, pal_alloc_type,
  54. PAL_PROT(prot, 0));
  55. if (!addr) {
  56. ret = (PAL_NATIVE_ERRNO == PAL_ERROR_DENIED) ? -EPERM : -PAL_ERRNO;
  57. goto free_reserved;
  58. }
  59. ADD_PROFILE_OCCURENCE(mmap, length);
  60. } else {
  61. if (fd < 0) {
  62. ret = -EINVAL;
  63. goto free_reserved;
  64. }
  65. hdl = get_fd_handle(fd, NULL, NULL);
  66. if (!hdl) {
  67. ret = -EBADF;
  68. goto free_reserved;
  69. }
  70. if (!hdl->fs || !hdl->fs->fs_ops || !hdl->fs->fs_ops->mmap) {
  71. put_handle(hdl);
  72. ret = -ENODEV;
  73. goto free_reserved;
  74. }
  75. if ((ret = hdl->fs->fs_ops->mmap(hdl, &addr, length, prot,
  76. flags, offset)) < 0) {
  77. put_handle(hdl);
  78. goto free_reserved;
  79. }
  80. }
  81. if (addr != mapped) {
  82. mapped = ALIGN_DOWN((void *) addr);
  83. mapped_end = ALIGN_UP((void *) addr + length);
  84. }
  85. ret = bkeep_mmap((void *) mapped, mapped_end - mapped, prot,
  86. flags, hdl, offset, NULL);
  87. assert(!ret);
  88. if (hdl)
  89. put_handle(hdl);
  90. return addr;
  91. free_reserved:
  92. if (reserved)
  93. bkeep_munmap((void *) mapped, mapped_end - mapped, &flags);
  94. return (void *) ret;
  95. }
  96. int shim_do_mprotect (void * addr, size_t len, int prot)
  97. {
  98. uintptr_t mapped = ALIGN_DOWN((uintptr_t) addr);
  99. uintptr_t mapped_end = ALIGN_UP((uintptr_t) addr + len);
  100. int flags = 0;
  101. if (bkeep_mprotect((void *) mapped, mapped_end - mapped, prot, &flags) < 0)
  102. return -EACCES;
  103. if (!DkVirtualMemoryProtect((void *) mapped, mapped_end - mapped, prot))
  104. return -PAL_ERRNO;
  105. return 0;
  106. }
  107. int shim_do_munmap (void * addr, size_t len)
  108. {
  109. struct shim_vma * tmp = NULL;
  110. if (lookup_overlap_vma(addr, len, &tmp) < 0) {
  111. debug("can't find addr %p - %p in map, quit unmapping\n",
  112. addr, addr + len);
  113. /* Really not an error */
  114. return -EFAULT;
  115. }
  116. uintptr_t mapped = ALIGN_DOWN((uintptr_t) addr);
  117. uintptr_t mapped_end = ALIGN_UP((uintptr_t) addr + len);
  118. int flags = 0;
  119. if (bkeep_munmap((void *) mapped, mapped_end - mapped, &flags) < 0)
  120. return -EACCES;
  121. DkVirtualMemoryFree((void *) mapped, mapped_end - mapped);
  122. return 0;
  123. }