graphene-ipc.c 23 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003
  1. #include <linux/module.h>
  2. #include <linux/kallsyms.h>
  3. #include <linux/version.h>
  4. #include <linux/init.h>
  5. #include <linux/fs.h>
  6. #include <linux/mm_types.h>
  7. #include <linux/mm.h>
  8. #include <linux/mmu_notifier.h>
  9. #include <linux/slab.h>
  10. #include <linux/swap.h>
  11. #include <linux/swapops.h>
  12. #include <linux/miscdevice.h>
  13. #include <linux/sched.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/bitmap.h>
  16. #include <asm/mman.h>
  17. #include <asm/tlb.h>
  18. #include "graphene-ipc.h"
  19. #include "ksyms.h"
  20. MODULE_LICENSE("Dual BSD/GPL");
  21. #define FILE_POISON LIST_POISON1
  22. struct kmem_cache *gipc_queue_cachep;
  23. struct kmem_cache *gipc_send_buffer_cachep;
  24. #define GIPC_DEBUG 0
  25. #if defined(GIPC_DEBUG) && GIPC_DEBUG == 1
  26. # define DEBUG(...) printk(KERN_INFO __VA_ARGS__)
  27. # define GIPC_BUG_ON(cond) BUG_ON(cond)
  28. #else
  29. # define DEBUG(...)
  30. # define GIPC_BUG_ON(cond)
  31. #endif
  32. #if defined(CONFIG_GRAPHENE_BULK_IPC) || LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)
  33. # if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
  34. # define DO_MMAP_PGOFF(file, addr, len, prot, flags, pgoff) \
  35. ({ \
  36. unsigned long populate; \
  37. unsigned long rv = do_mmap_pgoff((file), (addr), (len), \
  38. (prot), (flags), \
  39. (pgoff), &populate); \
  40. rv; })
  41. # else
  42. # define DO_MMAP_PGOFF(file, addr, len, prot, flags, pgoff) \
  43. do_mmap_pgoff((file), (addr), (len), (prot), (flags), (pgoff))
  44. # endif /* kernel_version < 3.9.0 */
  45. #else
  46. # if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
  47. # define MY_DO_MMAP
  48. # define DO_MMAP_PGOFF(file, addr, len, prot, flags, pgoff) \
  49. ({ \
  50. unsigned long populate; \
  51. unsigned long rv; \
  52. rv = KSYM(do_mmap)((file), (addr), (len), \
  53. (prot), (flags), 0, (pgoff), \
  54. &populate); \
  55. rv; })
  56. # elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
  57. # define MY_DO_MMAP_PGOFF
  58. # define DO_MMAP_PGOFF(file, addr, len, prot, flags, pgoff) \
  59. ({ \
  60. unsigned long populate; \
  61. unsigned long rv; \
  62. rv = KSYM(do_mmap_pgoff)((file), (addr), (len), \
  63. (prot), (flags), (pgoff), \
  64. &populate); \
  65. rv; })
  66. # else
  67. # define MY_DO_MMAP_PGOFF
  68. # define DO_MMAP_PGOFF(file, addr, len, prot, flags, pgoff) \
  69. KSYM(do_mmap_pgoff)((file), (addr), (len), (prot), (flags), (pgoff))
  70. # endif /* kernel version < 3.9 */
  71. #endif /* !CONFIG_GRAPHENE_BULK_IPC && kernel version > 3.4.0 */
  72. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
  73. # ifdef CONFIG_GRAPHENE_BULK_IPC
  74. # define FLUSH_TLB_MM_RANGE flush_tlb_mm_range
  75. # else
  76. # define MY_FLUSH_TLB_MM_RANGE
  77. # define FLUSH_TLB_MM_RANGE KSYM(flush_tlb_mm_range)
  78. # endif
  79. #else /* LINUX_VERSION_CODE < 3.7.0 */
  80. # if defined(CONFIG_GRAPHENE_BULK_IPC) || LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)
  81. # define FLUSH_TLB_PAGE flush_tlb_page
  82. # else
  83. # define MY_FLUSH_TLB_PAGE
  84. # define FLUSH_TLB_PAGE KSYM(flush_tlb_page)
  85. # endif
  86. #endif
  87. #ifdef MY_DO_MMAP
  88. IMPORT_KSYM(do_mmap);
  89. #endif
  90. #ifdef MY_DO_MMAP_PGOFF
  91. IMPORT_KSYM(do_mmap_pgoff);
  92. #endif
  93. #ifdef MY_FLUSH_TLB_MM_RANGE
  94. IMPORT_KSYM(flush_tlb_mm_range);
  95. #endif
  96. #ifdef MY_FLUSH_TLB_PAGE
  97. IMPORT_KSYM(flush_tlb_page);
  98. #endif
  99. #ifndef gipc_get_session
  100. u64 (*my_gipc_get_session) (struct task_struct *) = NULL;
  101. #endif
  102. struct gipc_queue {
  103. struct list_head list;
  104. s64 token;
  105. u64 owner;
  106. atomic_t count;
  107. struct mutex send_lock, recv_lock;
  108. wait_queue_head_t send, recv;
  109. volatile int next, last;
  110. struct {
  111. struct page *page;
  112. struct file *file;
  113. u64 pgoff;
  114. } pages[PAGE_QUEUE];
  115. };
  116. struct gipc_send_buffer {
  117. unsigned long page_bit_map[PAGE_BITS];
  118. struct page *pages[PAGE_QUEUE];
  119. struct vm_area_struct *vmas[PAGE_QUEUE];
  120. struct file *files[PAGE_QUEUE];
  121. unsigned long pgoffs[PAGE_QUEUE];
  122. };
  123. struct {
  124. spinlock_t lock;
  125. /*
  126. * For now, just make them monotonically increasing. XXX: At
  127. * some point, do something smarter for security.
  128. */
  129. u64 max_token;
  130. struct list_head channels; // gipc_queue structs
  131. } gdev;
  132. #ifdef gipc_get_session
  133. #define GIPC_OWNER gipc_get_session(current)
  134. #else
  135. #define GIPC_OWNER (my_gipc_get_session ? my_gipc_get_session(current) : 0)
  136. #endif
  137. static inline struct gipc_queue * create_gipc_queue(struct file *creator)
  138. {
  139. struct gipc_queue *gq = kmem_cache_alloc(gipc_queue_cachep, GFP_KERNEL);
  140. if (!gq)
  141. return gq;
  142. memset(gq, 0, sizeof(*gq));
  143. INIT_LIST_HEAD(&gq->list);
  144. mutex_init(&gq->send_lock);
  145. mutex_init(&gq->recv_lock);
  146. init_waitqueue_head(&gq->send);
  147. init_waitqueue_head(&gq->recv);
  148. gq->owner = GIPC_OWNER;
  149. creator->private_data = gq;
  150. atomic_set(&gq->count, 1);
  151. spin_lock(&gdev.lock);
  152. list_add(&gq->list, &gdev.channels);
  153. gq->token = gdev.max_token++;
  154. spin_unlock(&gdev.lock);
  155. return gq;
  156. }
  157. static inline void release_gipc_queue(struct gipc_queue *gq, bool locked)
  158. {
  159. int idx;
  160. if (!atomic_dec_and_test(&gq->count))
  161. return;
  162. if (!locked)
  163. spin_lock(&gdev.lock);
  164. while (gq->next != gq->last) {
  165. idx = gq->next;
  166. if (gq->pages[idx].page) {
  167. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
  168. put_page(gq->pages[idx].page);
  169. #else
  170. page_cache_release(gq->pages[idx].page);
  171. #endif
  172. gq->pages[idx].page = NULL;
  173. }
  174. if (gq->pages[idx].file) {
  175. fput_atomic(gq->pages[idx].file);
  176. gq->pages[idx].file = NULL;
  177. gq->pages[idx].pgoff = 0;
  178. }
  179. gq->next++;
  180. gq->next &= (PAGE_QUEUE - 1);
  181. }
  182. list_del(&gq->list);
  183. if (!locked)
  184. spin_unlock(&gdev.lock);
  185. kmem_cache_free(gipc_queue_cachep, gq);
  186. }
  187. #if defined(SPLIT_RSS_COUNTING)
  188. static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
  189. {
  190. struct task_struct *task = current;
  191. if (likely(task->mm == mm))
  192. task->rss_stat.count[member] += val;
  193. else
  194. add_mm_counter(mm, member, val);
  195. }
  196. #else
  197. #define add_mm_counter_fast(mm, member, val) add_mm_counter(mm, member, val)
  198. #endif
  199. #define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
  200. #define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
  201. inline int make_page_cow(struct mm_struct *mm, struct vm_area_struct *vma,
  202. unsigned long addr)
  203. {
  204. pgd_t *pgd;
  205. pud_t *pud;
  206. pmd_t *pmd;
  207. pte_t *pte;
  208. spinlock_t *ptl;
  209. pgd = pgd_offset(mm, addr);
  210. if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
  211. goto no_page;
  212. pud = pud_offset(pgd, addr);
  213. if (pud_none(*pud) || unlikely(pud_bad(*pud)))
  214. goto no_page;
  215. pmd = pmd_offset(pud, addr);
  216. if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
  217. goto no_page;
  218. BUG_ON(pmd_trans_huge(*pmd));
  219. pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
  220. if (!pte_present(*pte)) {
  221. spin_unlock(ptl);
  222. goto no_page;
  223. }
  224. ptep_set_wrprotect(mm, addr, pte);
  225. spin_unlock(ptl);
  226. DEBUG("make page COW at %lx\n", addr);
  227. return 0;
  228. no_page:
  229. return -EFAULT;
  230. }
  231. static void fill_page_bit_map(struct mm_struct *mm,
  232. unsigned long addr, unsigned long nr_pages,
  233. unsigned long page_bit_map[PAGE_BITS])
  234. {
  235. int i = 0;
  236. DEBUG("GIPC_SEND fill_page_bit_map %lx - %lx\n",
  237. addr, addr + (nr_pages << PAGE_SHIFT));
  238. do {
  239. struct vm_area_struct *vma;
  240. pgd_t *pgd;
  241. pud_t *pud;
  242. pmd_t *pmd;
  243. pte_t *pte;
  244. spinlock_t *ptl;
  245. bool has_page = false;
  246. vma = find_vma(mm, addr);
  247. if (!vma)
  248. goto next;
  249. BUG_ON(vma->vm_flags & VM_HUGETLB);
  250. pgd = pgd_offset(mm, addr);
  251. if (pgd_none(*pgd) || pgd_bad(*pgd))
  252. goto next;
  253. pud = pud_offset(pgd, addr);
  254. if (pud_none(*pud) || pud_bad(*pud))
  255. goto next;
  256. pmd = pmd_offset(pud, addr);
  257. if (pmd_none(*pmd))
  258. goto next;
  259. if (unlikely(pmd_trans_huge(*pmd))) {
  260. has_page = true;
  261. goto next;
  262. }
  263. if (pmd_bad(*pmd))
  264. goto next;
  265. pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
  266. if (pte_none(*pte))
  267. goto next_locked;
  268. /*
  269. if (unlikely(!pte_present(*pte)) && pte_file(*pte))
  270. goto next_locked;
  271. */
  272. has_page = true;
  273. next_locked:
  274. spin_unlock(ptl);
  275. next:
  276. if (has_page) {
  277. DEBUG("found a page at %lx\n", addr);
  278. set_bit(i, page_bit_map);
  279. } else {
  280. clear_bit(i, page_bit_map);
  281. }
  282. } while (i++, addr += PAGE_SIZE, i < nr_pages);
  283. }
  284. static int get_pages (struct task_struct *task, unsigned long start,
  285. unsigned long nr_pages,
  286. unsigned long page_bit_map[PAGE_BITS],
  287. struct page *pages[PAGE_QUEUE],
  288. struct vm_area_struct *vmas[PAGE_QUEUE])
  289. {
  290. struct mm_struct *mm = task->mm;
  291. struct vm_area_struct *vma = NULL;
  292. unsigned long addr = start, nr;
  293. int i = 0, j, rv;
  294. while (i < nr_pages) {
  295. unsigned long flushed, vmflags;
  296. int last = i;
  297. if (test_bit(last, page_bit_map)) {
  298. i = find_next_zero_bit(page_bit_map, PAGE_QUEUE,
  299. last + 1);
  300. if (i > nr_pages)
  301. i = nr_pages;
  302. nr = i - last;
  303. DEBUG("GIPC_SEND get_user_pages %ld pages at %lx\n",
  304. addr, nr);
  305. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
  306. rv = get_user_pages(addr, nr,
  307. FOLL_GET|FOLL_FORCE|FOLL_SPLIT,
  308. pages + last, vmas + last);
  309. #else
  310. rv = __get_user_pages(task, mm, addr, nr,
  311. FOLL_GET|FOLL_FORCE|FOLL_SPLIT,
  312. pages + last, vmas + last, NULL);
  313. #endif
  314. if (rv <= 0) {
  315. printk(KERN_ERR "Graphene error: "
  316. "get_user_pages at 0x%016lx-0x%016lx\n",
  317. addr, addr + (nr << PAGE_SHIFT));
  318. return rv;
  319. }
  320. if (rv != nr) {
  321. printk(KERN_ERR "Graphene error: "
  322. "get_user_pages at 0x%016lx\n",
  323. addr + (rv << PAGE_SHIFT));
  324. return -EACCES;
  325. }
  326. flushed = addr;
  327. vmflags = 0;
  328. for (j = 0; j < nr; j++) {
  329. unsigned long target = addr + (j << PAGE_SHIFT);
  330. /* Mark source COW */
  331. rv = make_page_cow(mm, vmas[last + j],
  332. target);
  333. if (rv)
  334. return rv;
  335. if (PageAnon(pages[last + j])) {
  336. /* Fix up the counters */
  337. inc_mm_counter_fast(mm, MM_FILEPAGES);
  338. dec_mm_counter_fast(mm, MM_ANONPAGES);
  339. pages[last + j]->mapping = NULL;
  340. }
  341. #ifdef FLUSH_TLB_MM_RANGE
  342. if (vmflags == vmas[last + j]->vm_flags)
  343. continue;
  344. if (flushed < target)
  345. FLUSH_TLB_MM_RANGE(mm, flushed, target,
  346. vmflags);
  347. flushed = target;
  348. vmflags = vmas[last + j]->vm_flags;
  349. #else
  350. FLUSH_TLB_PAGE(vmas[last + j], target);
  351. #endif
  352. }
  353. #ifdef FLUSH_TLB_MM_RANGE
  354. if (flushed < addr + (nr << PAGE_SHIFT))
  355. FLUSH_TLB_MM_RANGE(mm, flushed,
  356. addr + (nr << PAGE_SHIFT),
  357. vmflags);
  358. #endif
  359. vma = vmas[i - 1];
  360. addr += nr << PAGE_SHIFT;
  361. } else {
  362. /* This is the case where a page (or pages) are not
  363. * currently mapped.
  364. * Handle the hole appropriately. */
  365. i = find_next_bit(page_bit_map, PAGE_QUEUE, last + 1);
  366. if (i > nr_pages)
  367. i = nr_pages;
  368. nr = i - last;
  369. DEBUG("GIPC_SEND skip %ld pages at %lx\n", addr, nr);
  370. for (j = 0; j < nr; j++) {
  371. if (!vma) {
  372. vma = find_vma(mm, addr);
  373. } else {
  374. /* DEP 6/17/13 - these addresses should
  375. * be monotonically increasing. */
  376. for (; vma && addr >= vma->vm_end;
  377. vma = vma->vm_next);
  378. /* Leverage monotonic increasing vmas
  379. * to more quickly detect holes in the
  380. * address space. */
  381. if (vma && addr < vma->vm_start)
  382. vma = NULL;
  383. }
  384. pages[last + j] = NULL;
  385. vmas[last + j] = vma;
  386. addr += PAGE_SIZE;
  387. }
  388. }
  389. }
  390. return i;
  391. }
  392. static int do_gipc_send(struct task_struct *task, struct gipc_queue *gq,
  393. struct gipc_send_buffer *gbuf,
  394. unsigned long __user *uaddr, unsigned long __user *ulen,
  395. unsigned long *copied_pages)
  396. {
  397. struct mm_struct *mm = task->mm;
  398. unsigned long addr, len, nr_pages;
  399. int rv, i;
  400. DEBUG("GIPC_SEND uaddr = %p, ulen = %p\n", uaddr, ulen);
  401. rv = copy_from_user(&addr, uaddr, sizeof(unsigned long));
  402. if (rv) {
  403. printk(KERN_ALERT "Graphene SEND: bad buffer %p\n", uaddr);
  404. return -EFAULT;
  405. }
  406. rv = copy_from_user(&len, ulen, sizeof(unsigned long));
  407. if (rv) {
  408. printk(KERN_ALERT "Graphene SEND: bad buffer %p\n", ulen);
  409. return -EFAULT;
  410. }
  411. if (addr > addr + len) {
  412. printk(KERN_ALERT "Graphene SEND: attempt to send %p - %p "
  413. " by thread %d FAIL: bad argument\n",
  414. (void *) addr, (void *) (addr + len), task->pid);
  415. return -EINVAL;
  416. }
  417. DEBUG("GIPC_SEND addr = %lx, len = %ld\n", addr, len);
  418. nr_pages = len >> PAGE_SHIFT;
  419. if (!access_ok(VERIFY_READ, addr, len)) {
  420. printk(KERN_ALERT "Graphene SEND:"
  421. " attempt to send %p - %p (%ld pages) "
  422. " by thread %d FAIL: bad permission\n",
  423. (void *) addr, (void *) (addr + len), nr_pages,
  424. task->pid);
  425. return -EFAULT;
  426. }
  427. DEBUG(" %p - %p (%ld pages) sent by thread %d\n",
  428. (void *) addr, (void *) (addr + len), nr_pages, task->pid);
  429. while (nr_pages) {
  430. unsigned long nr =
  431. (nr_pages <= PAGE_QUEUE) ? nr_pages : PAGE_QUEUE;
  432. /* for each of these addresses - check if
  433. * demand faulting will be triggered
  434. * if vma is present, but there is no page
  435. * present(pmd/pud not present or PTE_PRESENT
  436. * is off) then get_user_pages will trigger
  437. * the creation of those */
  438. down_write(&mm->mmap_sem);
  439. fill_page_bit_map(mm, addr, nr, gbuf->page_bit_map);
  440. rv = get_pages(task, addr, nr,
  441. gbuf->page_bit_map,
  442. gbuf->pages,
  443. gbuf->vmas);
  444. if (rv < 0) {
  445. up_write(&mm->mmap_sem);
  446. break;
  447. }
  448. for (i = 0; i < nr; i++) {
  449. BUG_ON((!gbuf->vmas[i]) && (!!gbuf->pages[i]));
  450. if (gbuf->vmas[i] && gbuf->vmas[i]->vm_file) {
  451. gbuf->files[i] = get_file(gbuf->vmas[i]->vm_file);
  452. gbuf->pgoffs[i] =
  453. ((addr - gbuf->vmas[i]->vm_start) >> PAGE_SHIFT)
  454. + gbuf->vmas[i]->vm_pgoff;
  455. } else {
  456. gbuf->files[i] = NULL;
  457. gbuf->pgoffs[i] = 0;
  458. }
  459. addr += PAGE_SIZE;
  460. }
  461. up_write(&mm->mmap_sem);
  462. for (i = 0; i < nr ; i++) {
  463. /* Put in the pending buffer*/
  464. if (((gq->last + 1) & (PAGE_QUEUE - 1)) == gq->next) {
  465. /* The blocking condition for send
  466. * and recv can't both be true! */
  467. wake_up_all(&gq->recv);
  468. wait_event_interruptible(gq->send,
  469. ((gq->last + 1) & (PAGE_QUEUE - 1)) != gq->next);
  470. if (signal_pending(task)) {
  471. rv = -ERESTARTSYS;
  472. goto out;
  473. }
  474. }
  475. gq->pages[gq->last].page = gbuf->pages[i];
  476. gq->pages[gq->last].file = gbuf->files[i];
  477. gq->pages[gq->last].pgoff = gbuf->pgoffs[i];
  478. gq->last++;
  479. gq->last &= PAGE_QUEUE - 1;
  480. (*copied_pages)++;
  481. }
  482. wake_up_all(&gq->recv);
  483. nr_pages -= nr;
  484. }
  485. out:
  486. return rv;
  487. }
  488. static inline
  489. int recv_next (struct task_struct *task, struct gipc_queue *gq)
  490. {
  491. if (gq->next == gq->last) {
  492. /* The blocking condition for send & recv can't both be true */
  493. wake_up_all(&gq->send);
  494. wait_event_interruptible(gq->recv, gq->next != gq->last);
  495. if (signal_pending(task))
  496. return -ERESTARTSYS;
  497. }
  498. return gq->next;
  499. }
  500. static int do_gipc_recv(struct task_struct *task, struct gipc_queue *gq,
  501. unsigned long __user *uaddr, unsigned long __user *ulen,
  502. unsigned long __user *uprot,
  503. unsigned long *copied_pages)
  504. {
  505. struct mm_struct *mm = task->mm;
  506. struct vm_area_struct *vma = NULL;
  507. unsigned long start, addr, len, nr_pages, prot, pgoff;
  508. struct page *page = NULL;
  509. struct file *file = NULL;
  510. int i = 0, rv;
  511. rv = copy_from_user(&addr, uaddr, sizeof(unsigned long));
  512. if (rv) {
  513. printk(KERN_ALERT "Graphene RECV: bad buffer %p\n", uaddr);
  514. return -EFAULT;
  515. }
  516. rv = copy_from_user(&len, ulen, sizeof(unsigned long));
  517. if (rv) {
  518. printk(KERN_ALERT "Graphene RECV: bad buffer %p\n", ulen);
  519. return -EFAULT;
  520. }
  521. rv = copy_from_user(&prot, uprot, sizeof(unsigned long));
  522. if (rv) {
  523. printk(KERN_ALERT "Graphene RECV: bad buffer %p\n", uprot);
  524. return -EFAULT;
  525. }
  526. nr_pages = len >> PAGE_SHIFT;
  527. start = addr;
  528. down_write(&mm->mmap_sem);
  529. while (i < nr_pages) {
  530. int found = recv_next(task, gq);
  531. int need_map = 1;
  532. if (found < 0) {
  533. rv = found;
  534. goto finish;
  535. }
  536. page = gq->pages[found].page;
  537. file = gq->pages[found].file;
  538. pgoff = gq->pages[found].pgoff;
  539. gq->next++;
  540. gq->next &= PAGE_QUEUE - 1;
  541. wake_up_all(&gq->send);
  542. if (vma) {
  543. need_map = 0;
  544. if (vma->vm_file != file)
  545. need_map = 1;
  546. if (file && vma->vm_start +
  547. ((pgoff - vma->vm_pgoff) << PAGE_SHIFT)
  548. != addr)
  549. need_map = 1;
  550. if (prot != (vma->vm_flags & (VM_READ|VM_WRITE|VM_EXEC)))
  551. need_map = 1;
  552. }
  553. if (need_map) {
  554. unsigned long flags = MAP_PRIVATE;
  555. if (addr)
  556. flags |= MAP_FIXED;
  557. if (file)
  558. flags |= MAP_FILE;
  559. else
  560. flags |= MAP_ANONYMOUS;
  561. addr = DO_MMAP_PGOFF(file, addr,
  562. (nr_pages - i) << PAGE_SHIFT,
  563. prot, flags, pgoff);
  564. if (IS_ERR_VALUE(addr)) {
  565. rv = PTR_ERR((void *) addr);
  566. printk(KERN_ERR
  567. "Graphene error: failed to mmap (%d)\n",
  568. -rv);
  569. goto finish;
  570. }
  571. if (file)
  572. DEBUG("map %08lx-%08lx file %p\n", addr,
  573. addr + ((nr_pages - i) << PAGE_SHIFT),
  574. file);
  575. else
  576. DEBUG("map %08lx-%08lx\n", addr,
  577. addr + ((nr_pages - i) << PAGE_SHIFT));
  578. if (!start)
  579. start = addr;
  580. vma = find_vma(mm, addr);
  581. if (!vma) {
  582. printk(KERN_ERR
  583. "Graphene error: can't find vma at %p\n",
  584. (void *) addr);
  585. rv = -ENOENT;
  586. goto finish;
  587. }
  588. } else {
  589. BUG_ON(!vma);
  590. }
  591. if (page) {
  592. rv = vm_insert_page(vma, addr, page);
  593. if (rv) {
  594. printk(KERN_ERR "Graphene error: "
  595. "fail to insert page %d\n", rv);
  596. goto finish;
  597. }
  598. rv = make_page_cow(mm, vma, addr);
  599. if (rv) {
  600. printk(KERN_ERR "Graphene error: "
  601. "can't make vma copy-on-write at %p\n",
  602. (void *) addr);
  603. goto finish;
  604. }
  605. }
  606. finish:
  607. /* Drop the kernel's reference to this page */
  608. if (page)
  609. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
  610. put_page(page);
  611. #else
  612. page_cache_release(page);
  613. #endif
  614. if (file)
  615. fput_atomic(file);
  616. if (rv)
  617. break;
  618. i++;
  619. addr += PAGE_SIZE;
  620. (*copied_pages)++;
  621. }
  622. up_write(&mm->mmap_sem);
  623. if (i)
  624. DEBUG(" %p - %p (%d pages) received by thread %d\n",
  625. (void *) start, (void *) start + (i << PAGE_SHIFT), i,
  626. task->pid);
  627. if (start) {
  628. rv = copy_to_user(uaddr, &start, sizeof(unsigned long));
  629. if (rv) {
  630. printk(KERN_ERR "Graphene error: bad buffer %p\n",
  631. uaddr);
  632. return -EFAULT;
  633. }
  634. }
  635. return rv;
  636. }
  637. static long gipc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  638. {
  639. struct task_struct *task = current;
  640. struct gipc_queue *gq = NULL;
  641. long rv = 0;
  642. switch (cmd) {
  643. case GIPC_SEND: {
  644. struct gipc_send gs;
  645. struct gipc_send_buffer *gbuf;
  646. int i;
  647. unsigned long nr_pages = 0;
  648. rv = copy_from_user(&gs, (void *) arg, sizeof(gs));
  649. if (rv) {
  650. printk(KERN_ALERT "Graphene SEND: bad buffer %p\n",
  651. (void *) arg);
  652. return -EFAULT;
  653. }
  654. /* Find/allocate the gipc_pages struct for our recipient */
  655. gq = (struct gipc_queue *) file->private_data;
  656. if (!gq)
  657. return -EFAULT;
  658. gbuf = kmem_cache_alloc(gipc_send_buffer_cachep, GFP_KERNEL);
  659. if (!gbuf)
  660. return -ENOMEM;
  661. DEBUG("GIPC_SEND %ld entries to token %lld by thread %d\n",
  662. gs.entries, gq->token, task->pid);
  663. mutex_lock(&gq->send_lock);
  664. for (i = 0; i < gs.entries; i++) {
  665. rv = do_gipc_send(task, gq, gbuf, gs.addr + i,
  666. gs.len + i, &nr_pages);
  667. if (rv < 0)
  668. break;
  669. }
  670. mutex_unlock(&gq->send_lock);
  671. DEBUG("GIPC_SEND return to thread %d, %ld pages are sent\n",
  672. task->pid, nr_pages);
  673. kmem_cache_free(gipc_send_buffer_cachep, gbuf);
  674. rv = nr_pages ? : rv;
  675. break;
  676. }
  677. case GIPC_RECV: {
  678. struct gipc_recv gr;
  679. int i;
  680. unsigned long nr_pages = 0;
  681. rv = copy_from_user(&gr, (void *) arg, sizeof(gr));
  682. if (rv) {
  683. printk(KERN_ERR "Graphene error: bad buffer %p\n",
  684. (void *) arg);
  685. return -EFAULT;
  686. }
  687. gq = (struct gipc_queue *) file->private_data;
  688. if (!gq)
  689. return -EBADF;
  690. DEBUG("GIPC_RECV %ld entries to token %lld by thread %d\n",
  691. gr.entries, gq->token, task->pid);
  692. mutex_lock(&gq->recv_lock);
  693. for (i = 0; i < gr.entries; i++) {
  694. rv = do_gipc_recv(task, gq, gr.addr + i, gr.len + i,
  695. gr.prot + i, &nr_pages);
  696. if (rv < 0)
  697. break;
  698. }
  699. mutex_unlock(&gq->recv_lock);
  700. DEBUG("GIPC_RECV return to thread %d, %ld pages are received\n",
  701. task->pid, nr_pages);
  702. rv = nr_pages ? : rv;
  703. break;
  704. }
  705. case GIPC_CREATE: {
  706. gq = create_gipc_queue(file);
  707. if (!gq) {
  708. rv = -ENOMEM;
  709. break;
  710. }
  711. DEBUG("GIPC_CREATE token %lld by thread %d\n", gq->token,
  712. task->pid);
  713. rv = gq->token;
  714. break;
  715. }
  716. case GIPC_JOIN: {
  717. struct gipc_queue *q;
  718. u64 token = arg;
  719. u64 session = GIPC_OWNER;
  720. if (file->private_data != NULL)
  721. return -EBUSY;
  722. /* Search for this token */
  723. spin_lock(&gdev.lock);
  724. list_for_each_entry(q, &gdev.channels, list) {
  725. if (q->token == token) {
  726. gq = q;
  727. break;
  728. }
  729. }
  730. /* Fail if we didn't find it */
  731. if (!gq) {
  732. spin_unlock(&gdev.lock);
  733. return -ENOENT;
  734. }
  735. if (gq->owner != session) {
  736. spin_unlock(&gdev.lock);
  737. return -EPERM;
  738. }
  739. atomic_inc(&gq->count);
  740. file->private_data = gq;
  741. /* Hold the lock until we allocate so only one process
  742. * gets the queue */
  743. spin_unlock(&gdev.lock);
  744. DEBUG("GIPC_JOIN token %lld by thread %d\n", token, task->pid);
  745. rv = 0;
  746. break;
  747. }
  748. default:
  749. printk(KERN_ALERT "Graphene unknown ioctl %u %lu\n", cmd, arg);
  750. rv = -ENOSYS;
  751. break;
  752. }
  753. return rv;
  754. }
  755. static int gipc_release(struct inode *inode, struct file *file)
  756. {
  757. struct gipc_queue *gq = (struct gipc_queue *) file->private_data;
  758. if (!gq)
  759. return 0;
  760. file->private_data = NULL;
  761. release_gipc_queue(gq, false);
  762. return 0;
  763. }
  764. static int gipc_open(struct inode *inode, struct file *file)
  765. {
  766. file->private_data = NULL;
  767. return 0;
  768. }
  769. static struct file_operations gipc_fops = {
  770. .owner = THIS_MODULE,
  771. .release = gipc_release,
  772. .open = gipc_open,
  773. .unlocked_ioctl = gipc_ioctl,
  774. .compat_ioctl = gipc_ioctl,
  775. .llseek = noop_llseek,
  776. };
  777. static struct miscdevice gipc_dev = {
  778. .minor = GIPC_MINOR,
  779. .name = "gipc",
  780. .fops = &gipc_fops,
  781. .mode = 0666,
  782. };
  783. static int __init gipc_init(void)
  784. {
  785. int rv = 0;
  786. #ifdef MY_DO_MMAP
  787. LOOKUP_KSYM(do_mmap);
  788. #endif
  789. #ifdef MY_DO_MMAP_PGOFF
  790. LOOKUP_KSYM(do_mmap_pgoff);
  791. #endif
  792. #ifdef MY_FLUSH_TLB_MM_RANGE
  793. LOOKUP_KSYM(flush_tlb_mm_range);
  794. #endif
  795. #ifdef MY_FLUSH_TLB_PAGE
  796. LOOKUP_KSYM(flush_tlb_page);
  797. #endif
  798. #ifndef gipc_get_session
  799. my_gipc_get_session = (void *) kallsyms_lookup_name("gipc_get_session");
  800. #endif
  801. /* Register the kmem cache */
  802. gipc_queue_cachep = kmem_cache_create("gipc queue",
  803. sizeof(struct gipc_queue),
  804. 0,
  805. SLAB_HWCACHE_ALIGN|
  806. SLAB_DESTROY_BY_RCU,
  807. NULL);
  808. if (!gipc_queue_cachep) {
  809. printk(KERN_ERR "Graphene error: "
  810. "failed to create a gipc queues cache\n");
  811. return -ENOMEM;
  812. }
  813. gipc_send_buffer_cachep = kmem_cache_create("gipc send buffer",
  814. sizeof(struct gipc_send_buffer),
  815. 0,
  816. SLAB_HWCACHE_ALIGN|
  817. SLAB_DESTROY_BY_RCU,
  818. NULL);
  819. if (!gipc_send_buffer_cachep) {
  820. printk(KERN_ERR "Graphene error: "
  821. "failed to create a gipc buffers cache\n");
  822. return -ENOMEM;
  823. }
  824. INIT_LIST_HEAD(&gdev.channels);
  825. spin_lock_init(&gdev.lock);
  826. gdev.max_token = 1;
  827. rv = misc_register(&gipc_dev);
  828. if (rv) {
  829. printk(KERN_ERR "Graphene error: "
  830. "failed to add a char device (rv=%d)\n", rv);
  831. return rv;
  832. }
  833. printk(KERN_ALERT "Graphene IPC: Hello, world\n");
  834. return 0;
  835. }
  836. static void __exit gipc_exit(void)
  837. {
  838. struct gipc_queue *gq, *n;
  839. spin_lock(&gdev.lock);
  840. list_for_each_entry_safe(gq, n, &gdev.channels, list)
  841. release_gipc_queue(gq, true);
  842. spin_unlock(&gdev.lock);
  843. misc_deregister(&gipc_dev);
  844. kmem_cache_destroy(gipc_queue_cachep);
  845. printk(KERN_ALERT "Graphene IPC: Goodbye, cruel world\n");
  846. }
  847. module_init(gipc_init);
  848. module_exit(gipc_exit);