shim_mmap.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161
  1. /* -*- mode:c; c-file-style:"k&r"; c-basic-offset: 4; tab-width:4; indent-tabs-mode:nil; mode:auto-fill; fill-column:78; -*- */
  2. /* vim: set ts=4 sw=4 et tw=78 fo=cqt wm=0: */
  3. /* Copyright (C) 2014 OSCAR lab, Stony Brook University
  4. This file is part of Graphene Library OS.
  5. Graphene Library OS is free software: you can redistribute it and/or
  6. modify it under the terms of the GNU General Public License
  7. as published by the Free Software Foundation, either version 3 of the
  8. License, or (at your option) any later version.
  9. Graphene Library OS is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  15. /*
  16. * shim_mmap.c
  17. *
  18. * Implementation of system call "mmap", "munmap" and "mprotect".
  19. */
  20. #include <shim_internal.h>
  21. #include <shim_table.h>
  22. #include <shim_handle.h>
  23. #include <shim_vma.h>
  24. #include <shim_fs.h>
  25. #include <shim_profile.h>
  26. #include <pal.h>
  27. #include <pal_error.h>
  28. #include <sys/mman.h>
  29. #include <errno.h>
  30. DEFINE_PROFILE_OCCURENCE(mmap, memory);
  31. void * shim_do_mmap (void * addr, size_t length, int prot, int flags, int fd,
  32. off_t offset)
  33. {
  34. struct shim_handle * hdl = NULL;
  35. long ret = -ENOMEM;
  36. bool reserved = false;
  37. assert(!(flags & (VMA_UNMAPPED|VMA_TAINTED)));
  38. if (flags & MAP_32BIT)
  39. return (void *) -ENOSYS;
  40. int pal_alloc_type = 0;
  41. if (!addr) {
  42. addr = get_unmapped_vma(ALIGN_UP(length), flags);
  43. if (addr)
  44. reserved = true;
  45. }
  46. if (addr) {
  47. void * cur_stack = current_stack();
  48. assert(cur_stack < addr || cur_stack > addr + length);
  49. }
  50. void * mapped = ALIGN_DOWN((void *) addr);
  51. void * mapped_end = ALIGN_UP((void *) addr + length);
  52. addr = mapped;
  53. length = mapped_end - mapped;
  54. if (flags & MAP_ANONYMOUS) {
  55. addr = (void *) DkVirtualMemoryAlloc(addr, length, pal_alloc_type,
  56. PAL_PROT(prot, 0));
  57. if (!addr) {
  58. ret = (PAL_NATIVE_ERRNO == PAL_ERROR_DENIED) ? -EPERM : -PAL_ERRNO;
  59. goto free_reserved;
  60. }
  61. ADD_PROFILE_OCCURENCE(mmap, length);
  62. } else {
  63. if (fd < 0) {
  64. ret = -EINVAL;
  65. goto free_reserved;
  66. }
  67. hdl = get_fd_handle(fd, NULL, NULL);
  68. if (!hdl) {
  69. ret = -EBADF;
  70. goto free_reserved;
  71. }
  72. if (!hdl->fs || !hdl->fs->fs_ops || !hdl->fs->fs_ops->mmap) {
  73. put_handle(hdl);
  74. ret = -ENODEV;
  75. goto free_reserved;
  76. }
  77. if ((ret = hdl->fs->fs_ops->mmap(hdl, &addr, length, prot,
  78. flags, offset)) < 0) {
  79. put_handle(hdl);
  80. goto free_reserved;
  81. }
  82. }
  83. if (addr != mapped) {
  84. mapped = ALIGN_DOWN((void *) addr);
  85. mapped_end = ALIGN_UP((void *) addr + length);
  86. }
  87. ret = bkeep_mmap((void *) mapped, mapped_end - mapped, prot,
  88. flags, hdl, offset, NULL);
  89. assert(!ret);
  90. if (hdl)
  91. put_handle(hdl);
  92. return addr;
  93. free_reserved:
  94. if (reserved)
  95. bkeep_munmap((void *) mapped, mapped_end - mapped, &flags);
  96. return (void *) ret;
  97. }
  98. int shim_do_mprotect (void * addr, size_t len, int prot)
  99. {
  100. uintptr_t mapped = ALIGN_DOWN((uintptr_t) addr);
  101. uintptr_t mapped_end = ALIGN_UP((uintptr_t) addr + len);
  102. int flags = 0;
  103. if (bkeep_mprotect((void *) mapped, mapped_end - mapped, prot, &flags) < 0)
  104. return -EACCES;
  105. if (!DkVirtualMemoryProtect((void *) mapped, mapped_end - mapped, prot))
  106. return -PAL_ERRNO;
  107. return 0;
  108. }
  109. int shim_do_munmap (void * addr, size_t len)
  110. {
  111. struct shim_vma * tmp = NULL;
  112. if (lookup_overlap_vma(addr, len, &tmp) < 0) {
  113. debug("can't find addr %p - %p in map, quit unmapping\n",
  114. addr, addr + len);
  115. /* Really not an error */
  116. return -EFAULT;
  117. }
  118. uintptr_t mapped = ALIGN_DOWN((uintptr_t) addr);
  119. uintptr_t mapped_end = ALIGN_UP((uintptr_t) addr + len);
  120. int flags = 0;
  121. if (bkeep_munmap((void *) mapped, mapped_end - mapped, &flags) < 0)
  122. return -EACCES;
  123. DkVirtualMemoryFree((void *) mapped, mapped_end - mapped);
  124. return 0;
  125. }