shim_mmap.c 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179
  1. /* -*- mode:c; c-file-style:"k&r"; c-basic-offset: 4; tab-width:4; indent-tabs-mode:nil; mode:auto-fill; fill-column:78; -*- */
  2. /* vim: set ts=4 sw=4 et tw=78 fo=cqt wm=0: */
  3. /* Copyright (C) 2014 OSCAR lab, Stony Brook University
  4. This file is part of Graphene Library OS.
  5. Graphene Library OS is free software: you can redistribute it and/or
  6. modify it under the terms of the GNU General Public License
  7. as published by the Free Software Foundation, either version 3 of the
  8. License, or (at your option) any later version.
  9. Graphene Library OS is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  15. /*
  16. * shim_mmap.c
  17. *
  18. * Implementation of system call "mmap", "munmap" and "mprotect".
  19. */
  20. #include <shim_internal.h>
  21. #include <shim_table.h>
  22. #include <shim_handle.h>
  23. #include <shim_vma.h>
  24. #include <shim_fs.h>
  25. #include <shim_profile.h>
  26. #include <pal.h>
  27. #include <pal_error.h>
  28. #include <sys/mman.h>
  29. #include <errno.h>
  30. DEFINE_PROFILE_OCCURENCE(mmap, memory);
  31. void * shim_do_mmap (void * addr, size_t length, int prot, int flags, int fd,
  32. off_t offset)
  33. {
  34. struct shim_handle * hdl = NULL;
  35. long ret = -ENOMEM;
  36. bool reserved = false;
  37. if (addr + length < addr) {
  38. return (void *) -EINVAL;
  39. }
  40. assert(!(flags & (VMA_UNMAPPED|VMA_TAINTED)));
  41. if (flags & MAP_32BIT)
  42. return (void *) -ENOSYS;
  43. int pal_alloc_type = 0;
  44. if ((flags & MAP_FIXED) && (addr != NULL)) {
  45. struct shim_vma * tmp = NULL;
  46. if (lookup_overlap_vma(addr, length, &tmp) == 0) {
  47. debug("mmap: allowing overlapping MAP_FIXED allocation at %p with length %lu\n",
  48. addr, length);
  49. }
  50. } else {
  51. /* For calls without MAP_FIXED, don't even attempt to honor the
  52. * caller's requested address. Such requests are likely to be assuming
  53. * things about the address space that aren't valid in graphene. */
  54. addr = NULL;
  55. }
  56. if (!addr) {
  57. addr = get_unmapped_vma(ALIGN_UP(length), flags);
  58. if (addr) {
  59. reserved = true;
  60. // Approximate check only, to help root out bugs.
  61. void * cur_stack = current_stack();
  62. assert(cur_stack < addr || cur_stack > addr + length);
  63. }
  64. }
  65. void * mapped = ALIGN_DOWN((void *) addr);
  66. void * mapped_end = ALIGN_UP((void *) addr + length);
  67. addr = mapped;
  68. length = mapped_end - mapped;
  69. if (flags & MAP_ANONYMOUS) {
  70. addr = (void *) DkVirtualMemoryAlloc(addr, length, pal_alloc_type,
  71. PAL_PROT(prot, 0));
  72. if (!addr) {
  73. ret = (PAL_NATIVE_ERRNO == PAL_ERROR_DENIED) ? -EPERM : -PAL_ERRNO;
  74. goto free_reserved;
  75. }
  76. ADD_PROFILE_OCCURENCE(mmap, length);
  77. } else {
  78. if (fd < 0) {
  79. ret = -EINVAL;
  80. goto free_reserved;
  81. }
  82. hdl = get_fd_handle(fd, NULL, NULL);
  83. if (!hdl) {
  84. ret = -EBADF;
  85. goto free_reserved;
  86. }
  87. if (!hdl->fs || !hdl->fs->fs_ops || !hdl->fs->fs_ops->mmap) {
  88. put_handle(hdl);
  89. ret = -ENODEV;
  90. goto free_reserved;
  91. }
  92. if ((ret = hdl->fs->fs_ops->mmap(hdl, &addr, length, PAL_PROT(prot, flags),
  93. flags, offset)) < 0) {
  94. put_handle(hdl);
  95. goto free_reserved;
  96. }
  97. }
  98. if (addr != mapped) {
  99. mapped = ALIGN_DOWN((void *) addr);
  100. mapped_end = ALIGN_UP((void *) addr + length);
  101. }
  102. ret = bkeep_mmap((void *) mapped, mapped_end - mapped, prot,
  103. flags, hdl, offset, NULL);
  104. assert(!ret);
  105. if (hdl)
  106. put_handle(hdl);
  107. return addr;
  108. free_reserved:
  109. if (reserved)
  110. bkeep_munmap((void *) mapped, mapped_end - mapped, &flags);
  111. return (void *) ret;
  112. }
  113. int shim_do_mprotect (void * addr, size_t len, int prot)
  114. {
  115. uintptr_t mapped = ALIGN_DOWN((uintptr_t) addr);
  116. uintptr_t mapped_end = ALIGN_UP((uintptr_t) addr + len);
  117. int flags = 0;
  118. if (bkeep_mprotect((void *) mapped, mapped_end - mapped, prot, &flags) < 0)
  119. return -EACCES;
  120. if (!DkVirtualMemoryProtect((void *) mapped, mapped_end - mapped, prot))
  121. return -PAL_ERRNO;
  122. return 0;
  123. }
  124. int shim_do_munmap (void * addr, size_t len)
  125. {
  126. struct shim_vma * tmp = NULL;
  127. if (lookup_overlap_vma(addr, len, &tmp) < 0) {
  128. debug("can't find addr %p - %p in map, quit unmapping\n",
  129. addr, addr + len);
  130. /* Really not an error */
  131. return -EFAULT;
  132. }
  133. uintptr_t mapped = ALIGN_DOWN((uintptr_t) addr);
  134. uintptr_t mapped_end = ALIGN_UP((uintptr_t) addr + len);
  135. int flags = 0;
  136. if (bkeep_munmap((void *) mapped, mapped_end - mapped, &flags) < 0)
  137. return -EACCES;
  138. DkVirtualMemoryFree((void *) mapped, mapped_end - mapped);
  139. return 0;
  140. }