graphene-ipc.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935
  1. #include <linux/module.h>
  2. #include <linux/kallsyms.h>
  3. #include <linux/version.h>
  4. #include <linux/init.h>
  5. #include <linux/fs.h>
  6. #include <linux/mm_types.h>
  7. #include <linux/mm.h>
  8. #include <linux/mmu_notifier.h>
  9. #include <linux/slab.h>
  10. #include <linux/swap.h>
  11. #include <linux/swapops.h>
  12. #include <linux/miscdevice.h>
  13. #include <linux/sched.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/bitmap.h>
  16. #include <asm/mman.h>
  17. #include <asm/tlb.h>
  18. #ifdef CONFIG_GRAPHENE_BULK_IPC
  19. # include "graphene.h"
  20. #endif
  21. #include "graphene-ipc.h"
  22. MODULE_LICENSE("Dual BSD/GPL");
  23. #define FILE_POISON LIST_POISON1
  24. struct kmem_cache *gipc_queue_cachep;
  25. struct kmem_cache *gipc_send_buffer_cachep;
  26. #define GIPC_DEBUG 0
  27. #if defined(GIPC_DEBUG) && GIPC_DEBUG == 1
  28. # define DEBUG(...) printk(KERN_INFO __VA_ARGS__)
  29. # define GIPC_BUG_ON(cond) BUG_ON(cond)
  30. #else
  31. # define DEBUG(...)
  32. # define GIPC_BUG_ON(cond)
  33. #endif
  34. #if !defined(CONFIG_GRAPHENE_BULK_IPC)
  35. # if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
  36. typedef unsigned long (*do_mmap_pgoff_t) (struct file *, unsigned long,
  37. unsigned long, unsigned long,
  38. unsigned long, unsigned long,
  39. unsigned long *);
  40. static do_mmap_pgoff_t my_do_mmap_pgoff = NULL;
  41. # elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
  42. typedef unsigned long (*do_mmap_pgoff_t) (struct file *, unsigned long,
  43. unsigned long, unsigned long,
  44. unsigned long, unsigned long);
  45. static do_mmap_pgoff_t my_do_mmap_pgoff = NULL;
  46. # endif //kernel version >=3.4
  47. # if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
  48. typedef unsigned long (*flush_tlb_page_t) (struct vm_area_struct *, unsigned long);
  49. static flush_tlb_page_t my_flush_tlb_page = NULL;
  50. # endif
  51. #endif
  52. #ifndef gipc_get_session
  53. u64 (*my_gipc_get_session) (struct task_struct *) = NULL;
  54. #endif
  55. struct gipc_queue {
  56. struct list_head list;
  57. s64 token;
  58. u64 owner;
  59. atomic_t count;
  60. struct mutex send_lock, recv_lock;
  61. wait_queue_head_t send, recv;
  62. volatile int next, last;
  63. struct {
  64. struct page *page;
  65. struct file *file;
  66. u64 pgoff;
  67. } pages[PAGE_QUEUE];
  68. };
  69. struct gipc_send_buffer {
  70. unsigned long page_bit_map[PAGE_BITS];
  71. struct page *pages[PAGE_QUEUE];
  72. struct vm_area_struct *vmas[PAGE_QUEUE];
  73. struct file *files[PAGE_QUEUE];
  74. unsigned long pgoffs[PAGE_QUEUE];
  75. };
  76. struct {
  77. spinlock_t lock;
  78. /*
  79. * For now, just make them monotonically increasing. XXX: At
  80. * some point, do something smarter for security.
  81. */
  82. u64 max_token;
  83. struct list_head channels; // gipc_queue structs
  84. } gdev;
  85. #ifdef gipc_get_session
  86. #define GIPC_OWNER gipc_get_session(current)
  87. #else
  88. #define GIPC_OWNER (my_gipc_get_session ? my_gipc_get_session(current) : 0)
  89. #endif
  90. static inline struct gipc_queue * create_gipc_queue(struct file *creator)
  91. {
  92. struct gipc_queue *gq = kmem_cache_alloc(gipc_queue_cachep, GFP_KERNEL);
  93. if (!gq)
  94. return gq;
  95. memset(gq, 0, sizeof(*gq));
  96. INIT_LIST_HEAD(&gq->list);
  97. mutex_init(&gq->send_lock);
  98. mutex_init(&gq->recv_lock);
  99. init_waitqueue_head(&gq->send);
  100. init_waitqueue_head(&gq->recv);
  101. gq->owner = GIPC_OWNER;
  102. creator->private_data = gq;
  103. atomic_set(&gq->count, 1);
  104. spin_lock(&gdev.lock);
  105. list_add(&gq->list, &gdev.channels);
  106. gq->token = gdev.max_token++;
  107. spin_unlock(&gdev.lock);
  108. return gq;
  109. }
  110. static inline void release_gipc_queue(struct gipc_queue *gq, bool locked)
  111. {
  112. int idx;
  113. if (!atomic_dec_and_test(&gq->count))
  114. return;
  115. if (!locked)
  116. spin_lock(&gdev.lock);
  117. while (gq->next != gq->last) {
  118. idx = gq->next;
  119. if (gq->pages[idx].page) {
  120. page_cache_release(gq->pages[idx].page);
  121. gq->pages[idx].page = NULL;
  122. }
  123. if (gq->pages[idx].file) {
  124. fput_atomic(gq->pages[idx].file);
  125. gq->pages[idx].file = NULL;
  126. gq->pages[idx].pgoff = 0;
  127. }
  128. gq->next++;
  129. gq->next &= (PAGE_QUEUE - 1);
  130. }
  131. list_del(&gq->list);
  132. if (!locked)
  133. spin_unlock(&gdev.lock);
  134. kmem_cache_free(gipc_queue_cachep, gq);
  135. }
  136. #if defined(SPLIT_RSS_COUNTING)
  137. static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
  138. {
  139. struct task_struct *task = current;
  140. if (likely(task->mm == mm))
  141. task->rss_stat.count[member] += val;
  142. else
  143. add_mm_counter(mm, member, val);
  144. }
  145. #else
  146. #define add_mm_counter_fast(mm, member, val) add_mm_counter(mm, member, val)
  147. #endif
  148. #define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
  149. #define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
  150. inline int make_page_cow(struct mm_struct *mm, struct vm_area_struct *vma,
  151. unsigned long addr)
  152. {
  153. pgd_t *pgd;
  154. pud_t *pud;
  155. pmd_t *pmd;
  156. pte_t *pte;
  157. spinlock_t *ptl;
  158. pgd = pgd_offset(mm, addr);
  159. if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
  160. goto no_page;
  161. pud = pud_offset(pgd, addr);
  162. if (pud_none(*pud) || unlikely(pud_bad(*pud)))
  163. goto no_page;
  164. pmd = pmd_offset(pud, addr);
  165. if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
  166. goto no_page;
  167. BUG_ON(pmd_trans_huge(*pmd));
  168. pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
  169. if (!pte_present(*pte)) {
  170. spin_unlock(ptl);
  171. goto no_page;
  172. }
  173. ptep_set_wrprotect(mm, addr, pte);
  174. spin_unlock(ptl);
  175. #if !defined(CONFIG_GRAPHENE_BULK_IPC) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
  176. my_flush_tlb_page(vma, addr);
  177. #else
  178. flush_tlb_page(vma, addr);
  179. #endif
  180. DEBUG("make page COW at %lx\n", addr);
  181. return 0;
  182. no_page:
  183. return -EFAULT;
  184. }
  185. static void fill_page_bit_map(struct mm_struct *mm,
  186. unsigned long addr, unsigned long nr_pages,
  187. unsigned long page_bit_map[PAGE_BITS])
  188. {
  189. int i = 0;
  190. DEBUG("GIPC_SEND fill_page_bit_map %lx - %lx\n",
  191. addr, addr + (nr_pages << PAGE_SHIFT));
  192. do {
  193. struct vm_area_struct *vma;
  194. pgd_t *pgd;
  195. pud_t *pud;
  196. pmd_t *pmd;
  197. pte_t *pte;
  198. spinlock_t *ptl;
  199. bool has_page = false;
  200. vma = find_vma(mm, addr);
  201. if (!vma)
  202. goto next;
  203. BUG_ON(vma->vm_flags & VM_HUGETLB);
  204. pgd = pgd_offset(mm, addr);
  205. if (pgd_none(*pgd) || pgd_bad(*pgd))
  206. goto next;
  207. pud = pud_offset(pgd, addr);
  208. if (pud_none(*pud) || pud_bad(*pud))
  209. goto next;
  210. pmd = pmd_offset(pud, addr);
  211. if (pmd_none(*pmd))
  212. goto next;
  213. if (unlikely(pmd_trans_huge(*pmd))) {
  214. has_page = true;
  215. goto next;
  216. }
  217. if (pmd_bad(*pmd))
  218. goto next;
  219. pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
  220. if (pte_none(*pte))
  221. goto next_locked;
  222. if (unlikely(!pte_present(*pte)) && pte_file(*pte))
  223. goto next_locked;
  224. has_page = true;
  225. next_locked:
  226. spin_unlock(ptl);
  227. next:
  228. if (has_page) {
  229. DEBUG("found a page at %lx\n", addr);
  230. set_bit(i, page_bit_map);
  231. } else {
  232. clear_bit(i, page_bit_map);
  233. }
  234. } while (i++, addr += PAGE_SIZE, i < nr_pages);
  235. }
  236. static int get_pages (struct task_struct *task, unsigned long start,
  237. unsigned long nr_pages,
  238. unsigned long page_bit_map[PAGE_BITS],
  239. struct page *pages[PAGE_QUEUE],
  240. struct vm_area_struct *vmas[PAGE_QUEUE])
  241. {
  242. struct mm_struct *mm = task->mm;
  243. struct vm_area_struct *vma = NULL;
  244. unsigned long addr = start, nr;
  245. int i = 0, j, rv;
  246. while (i < nr_pages) {
  247. int last = i;
  248. if (test_bit(last, page_bit_map)) {
  249. i = find_next_zero_bit(page_bit_map, PAGE_QUEUE,
  250. last + 1);
  251. if (i > nr_pages)
  252. i = nr_pages;
  253. nr = i - last;
  254. DEBUG("GIPC_SEND get_user_pages %ld pages at %lx\n",
  255. addr, nr);
  256. rv = __get_user_pages(task, mm, addr, nr,
  257. FOLL_GET|FOLL_FORCE|FOLL_SPLIT,
  258. pages + last, vmas + last, NULL);
  259. if (rv <= 0) {
  260. printk(KERN_ERR "Graphene error: "
  261. "get_user_pages at 0x%016lx-0x%016lx\n",
  262. addr, addr + (nr << PAGE_SHIFT));
  263. return rv;
  264. }
  265. if (rv != nr) {
  266. printk(KERN_ERR "Graphene error: "
  267. "get_user_pages at 0x%016lx\n",
  268. addr + (rv << PAGE_SHIFT));
  269. return -EACCES;
  270. }
  271. for (j = 0; j < nr; j++) {
  272. /* Mark source COW */
  273. rv = make_page_cow(mm, vmas[last + j],
  274. addr + (j << PAGE_SHIFT));
  275. if (rv)
  276. return rv;
  277. if (PageAnon(pages[last + j])) {
  278. /* Fix up the counters */
  279. inc_mm_counter_fast(mm, MM_FILEPAGES);
  280. dec_mm_counter_fast(mm, MM_ANONPAGES);
  281. }
  282. pages[last + j]->mapping = NULL;
  283. }
  284. vma = vmas[i - 1];
  285. addr += nr << PAGE_SHIFT;
  286. } else {
  287. /* This is the case where a page (or pages) are not
  288. * currently mapped.
  289. * Handle the hole appropriately. */
  290. i = find_next_bit(page_bit_map, PAGE_QUEUE, last + 1);
  291. if (i > nr_pages)
  292. i = nr_pages;
  293. nr = i - last;
  294. DEBUG("GIPC_SEND skip %ld pages at %lx\n", addr, nr);
  295. for (j = 0; j < nr; j++) {
  296. if (!vma) {
  297. vma = find_vma(mm, addr);
  298. } else {
  299. /* DEP 6/17/13 - these addresses should
  300. * be monotonically increasing. */
  301. for (; vma && addr >= vma->vm_end;
  302. vma = vma->vm_next);
  303. /* Leverage monotonic increasing vmas
  304. * to more quickly detect holes in the
  305. * address space. */
  306. if (vma && addr < vma->vm_start)
  307. vma = NULL;
  308. }
  309. pages[last + j] = NULL;
  310. vmas[last + j] = vma;
  311. addr += PAGE_SIZE;
  312. }
  313. }
  314. }
  315. return i;
  316. }
  317. static int do_gipc_send(struct task_struct *task, struct gipc_queue *gq,
  318. struct gipc_send_buffer *gbuf,
  319. unsigned long __user *uaddr, unsigned long __user *ulen,
  320. unsigned long *copied_pages)
  321. {
  322. struct mm_struct *mm = task->mm;
  323. unsigned long addr, len, nr_pages;
  324. int rv, i;
  325. DEBUG("GIPC_SEND uaddr = %p, ulen = %p\n", uaddr, ulen);
  326. rv = copy_from_user(&addr, uaddr, sizeof(unsigned long));
  327. if (rv) {
  328. printk(KERN_ALERT "Graphene SEND: bad buffer %p\n", uaddr);
  329. return -EFAULT;
  330. }
  331. rv = copy_from_user(&len, ulen, sizeof(unsigned long));
  332. if (rv) {
  333. printk(KERN_ALERT "Graphene SEND: bad buffer %p\n", ulen);
  334. return -EFAULT;
  335. }
  336. if (addr > addr + len) {
  337. printk(KERN_ALERT "Graphene SEND: attempt to send %p - %p "
  338. " by thread %d FAIL: bad argument\n",
  339. (void *) addr, (void *) (addr + len), task->pid);
  340. return -EINVAL;
  341. }
  342. DEBUG("GIPC_SEND addr = %lx, len = %ld\n", addr, len);
  343. nr_pages = len >> PAGE_SHIFT;
  344. if (!access_ok(VERIFY_READ, addr, len)) {
  345. printk(KERN_ALERT "Graphene SEND:"
  346. " attempt to send %p - %p (%ld pages) "
  347. " by thread %d FAIL: bad permission\n",
  348. (void *) addr, (void *) (addr + len), nr_pages,
  349. task->pid);
  350. return -EFAULT;
  351. }
  352. DEBUG(" %p - %p (%ld pages) sent by thread %d\n",
  353. (void *) addr, (void *) (addr + len), nr_pages, task->pid);
  354. while (nr_pages) {
  355. unsigned long nr =
  356. (nr_pages <= PAGE_QUEUE) ? nr_pages : PAGE_QUEUE;
  357. /* for each of these addresses - check if
  358. * demand faulting will be triggered
  359. * if vma is present, but there is no page
  360. * present(pmd/pud not present or PTE_PRESENT
  361. * is off) then get_user_pages will trigger
  362. * the creation of those */
  363. down_write(&mm->mmap_sem);
  364. fill_page_bit_map(mm, addr, nr, gbuf->page_bit_map);
  365. rv = get_pages(task, addr, nr,
  366. gbuf->page_bit_map,
  367. gbuf->pages,
  368. gbuf->vmas);
  369. if (rv < 0) {
  370. up_write(&mm->mmap_sem);
  371. break;
  372. }
  373. for (i = 0; i < nr; i++) {
  374. BUG_ON((!gbuf->vmas[i]) && (!!gbuf->pages[i]));
  375. if (gbuf->vmas[i] && gbuf->vmas[i]->vm_file) {
  376. gbuf->files[i] = get_file(gbuf->vmas[i]->vm_file);
  377. gbuf->pgoffs[i] =
  378. ((addr - gbuf->vmas[i]->vm_start) >> PAGE_SHIFT)
  379. + gbuf->vmas[i]->vm_pgoff;
  380. } else {
  381. gbuf->files[i] = NULL;
  382. gbuf->pgoffs[i] = 0;
  383. }
  384. addr += PAGE_SIZE;
  385. }
  386. up_write(&mm->mmap_sem);
  387. for (i = 0; i < nr ; i++) {
  388. /* Put in the pending buffer*/
  389. if (((gq->last + 1) & (PAGE_QUEUE - 1)) == gq->next) {
  390. /* The blocking condition for send
  391. * and recv can't both be true! */
  392. wake_up_all(&gq->recv);
  393. wait_event_interruptible(gq->send,
  394. ((gq->last + 1) & (PAGE_QUEUE - 1)) != gq->next);
  395. if (signal_pending(task)) {
  396. rv = -ERESTARTSYS;
  397. goto out;
  398. }
  399. }
  400. gq->pages[gq->last].page = gbuf->pages[i];
  401. gq->pages[gq->last].file = gbuf->files[i];
  402. gq->pages[gq->last].pgoff = gbuf->pgoffs[i];
  403. gq->last++;
  404. gq->last &= PAGE_QUEUE - 1;
  405. (*copied_pages)++;
  406. }
  407. wake_up_all(&gq->recv);
  408. nr_pages -= nr;
  409. }
  410. out:
  411. return rv;
  412. }
  413. static inline
  414. int recv_next (struct task_struct *task, struct gipc_queue *gq)
  415. {
  416. if (gq->next == gq->last) {
  417. /* The blocking condition for send & recv can't both be true */
  418. wake_up_all(&gq->send);
  419. wait_event_interruptible(gq->recv, gq->next != gq->last);
  420. if (signal_pending(task))
  421. return -ERESTARTSYS;
  422. }
  423. return gq->next;
  424. }
  425. static inline
  426. unsigned long __do_mmap_pgoff(struct file *file, unsigned long addr,
  427. unsigned long len, unsigned long prot,
  428. unsigned long flags, unsigned long pgoff)
  429. {
  430. /* Speculate that we will want the entire region under one vma.
  431. * Correct if not. */
  432. #if defined(CONFIG_GRAPHENE_BULK_IPC) || LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)
  433. #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
  434. addr = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
  435. #else
  436. unsigned long populate;
  437. addr = do_mmap_pgoff(file, addr, len, prot, flags, pgoff, &populate);
  438. #endif /* kernel_version >= 3.9.0 */
  439. #else
  440. #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
  441. addr = my_do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
  442. #else
  443. unsigned long populate;
  444. addr = my_do_mmap_pgoff(file, addr, len, prot, flags, pgoff, &populate);
  445. #endif /* kernel_version >= 3.9.0 */
  446. #endif
  447. return addr;
  448. }
  449. static int do_gipc_recv(struct task_struct *task, struct gipc_queue *gq,
  450. unsigned long __user *uaddr, unsigned long __user *ulen,
  451. unsigned long __user *uprot,
  452. unsigned long *copied_pages)
  453. {
  454. struct mm_struct *mm = task->mm;
  455. struct vm_area_struct *vma = NULL;
  456. unsigned long start, addr, len, nr_pages, prot, pgoff;
  457. struct page *page = NULL;
  458. struct file *file = NULL;
  459. int i = 0, rv;
  460. rv = copy_from_user(&addr, uaddr, sizeof(unsigned long));
  461. if (rv) {
  462. printk(KERN_ALERT "Graphene RECV: bad buffer %p\n", uaddr);
  463. return -EFAULT;
  464. }
  465. rv = copy_from_user(&len, ulen, sizeof(unsigned long));
  466. if (rv) {
  467. printk(KERN_ALERT "Graphene RECV: bad buffer %p\n", ulen);
  468. return -EFAULT;
  469. }
  470. rv = copy_from_user(&prot, uprot, sizeof(unsigned long));
  471. if (rv) {
  472. printk(KERN_ALERT "Graphene RECV: bad buffer %p\n", uprot);
  473. return -EFAULT;
  474. }
  475. nr_pages = len >> PAGE_SHIFT;
  476. start = addr;
  477. down_write(&mm->mmap_sem);
  478. while (i < nr_pages) {
  479. int found = recv_next(task, gq);
  480. if (found < 0) {
  481. rv = found;
  482. goto finish;
  483. }
  484. page = gq->pages[found].page;
  485. file = gq->pages[found].file;
  486. pgoff = gq->pages[found].pgoff;
  487. gq->next++;
  488. gq->next &= PAGE_QUEUE - 1;
  489. wake_up_all(&gq->send);
  490. if (!vma || vma->vm_file != file ||
  491. vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT)
  492. != addr) {
  493. unsigned long flags = MAP_PRIVATE;
  494. if (addr)
  495. flags |= MAP_FIXED;
  496. if (file)
  497. flags |= MAP_FILE;
  498. else
  499. flags |= MAP_ANONYMOUS;
  500. addr = __do_mmap_pgoff(file, addr,
  501. (nr_pages - i) << PAGE_SHIFT,
  502. prot, flags, pgoff);
  503. if (IS_ERR_VALUE(addr)) {
  504. rv = PTR_ERR((void *) addr);
  505. printk(KERN_ERR
  506. "Graphene error: failed to mmap (%d)\n",
  507. -rv);
  508. goto finish;
  509. }
  510. if (!start)
  511. start = addr;
  512. vma = find_vma(mm, addr);
  513. if (!vma) {
  514. printk(KERN_ERR
  515. "Graphene error: can't find vma at %p\n",
  516. (void *) addr);
  517. rv = -ENOENT;
  518. goto finish;
  519. }
  520. }
  521. if (page) {
  522. rv = vm_insert_page(vma, addr, page);
  523. if (rv) {
  524. printk(KERN_ERR "Graphene error: "
  525. "fail to insert page %d\n", rv);
  526. goto finish;
  527. }
  528. rv = make_page_cow(mm, vma, addr);
  529. if (rv) {
  530. printk(KERN_ERR "Graphene error: "
  531. "can't make vma copy-on-write at %p\n",
  532. (void *) addr);
  533. goto finish;
  534. }
  535. }
  536. finish:
  537. /* Drop the kernel's reference to this page */
  538. if (page)
  539. page_cache_release(page);
  540. if (file)
  541. fput_atomic(file);
  542. if (rv)
  543. break;
  544. i++;
  545. addr += PAGE_SIZE;
  546. (*copied_pages)++;
  547. }
  548. up_write(&mm->mmap_sem);
  549. if (i)
  550. DEBUG(" %p - %p (%d pages) received by thread %d\n",
  551. (void *) start, (void *) start + (i << PAGE_SHIFT), i,
  552. task->pid);
  553. if (start) {
  554. rv = copy_to_user(uaddr, &start, sizeof(unsigned long));
  555. if (rv) {
  556. printk(KERN_ERR "Graphene error: bad buffer %p\n",
  557. uaddr);
  558. return -EFAULT;
  559. }
  560. }
  561. return rv;
  562. }
  563. static long gipc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  564. {
  565. struct task_struct *task = current;
  566. struct gipc_queue *gq = NULL;
  567. long rv = 0;
  568. switch (cmd) {
  569. case GIPC_SEND: {
  570. struct gipc_send gs;
  571. struct gipc_send_buffer *gbuf;
  572. int i;
  573. unsigned long nr_pages = 0;
  574. rv = copy_from_user(&gs, (void *) arg, sizeof(gs));
  575. if (rv) {
  576. printk(KERN_ALERT "Graphene SEND: bad buffer %p\n",
  577. (void *) arg);
  578. return -EFAULT;
  579. }
  580. /* Find/allocate the gipc_pages struct for our recipient */
  581. gq = (struct gipc_queue *) file->private_data;
  582. if (!gq)
  583. return -EFAULT;
  584. gbuf = kmem_cache_alloc(gipc_send_buffer_cachep, GFP_KERNEL);
  585. if (!gbuf)
  586. return -ENOMEM;
  587. DEBUG("GIPC_SEND %ld entries to token %lld by thread %d\n",
  588. gs.entries, gq->token, task->pid);
  589. mutex_lock(&gq->send_lock);
  590. for (i = 0; i < gs.entries; i++) {
  591. rv = do_gipc_send(task, gq, gbuf, gs.addr + i,
  592. gs.len + i, &nr_pages);
  593. if (rv < 0)
  594. break;
  595. }
  596. mutex_unlock(&gq->send_lock);
  597. DEBUG("GIPC_SEND return to thread %d, %ld pages are sent\n",
  598. task->pid, nr_pages);
  599. kmem_cache_free(gipc_send_buffer_cachep, gbuf);
  600. rv = nr_pages ? : rv;
  601. break;
  602. }
  603. case GIPC_RECV: {
  604. struct gipc_recv gr;
  605. int i;
  606. unsigned long nr_pages = 0;
  607. rv = copy_from_user(&gr, (void *) arg, sizeof(gr));
  608. if (rv) {
  609. printk(KERN_ERR "Graphene error: bad buffer %p\n",
  610. (void *) arg);
  611. return -EFAULT;
  612. }
  613. gq = (struct gipc_queue *) file->private_data;
  614. if (!gq)
  615. return -EBADF;
  616. DEBUG("GIPC_RECV %ld entries to token %lld by thread %d\n",
  617. gr.entries, gq->token, task->pid);
  618. mutex_lock(&gq->recv_lock);
  619. for (i = 0; i < gr.entries; i++) {
  620. rv = do_gipc_recv(task, gq, gr.addr + i, gr.len + i,
  621. gr.prot + i, &nr_pages);
  622. if (rv < 0)
  623. break;
  624. }
  625. mutex_unlock(&gq->recv_lock);
  626. DEBUG("GIPC_RECV return to thread %d, %ld pages are received\n",
  627. task->pid, nr_pages);
  628. rv = nr_pages ? : rv;
  629. break;
  630. }
  631. case GIPC_CREATE: {
  632. gq = create_gipc_queue(file);
  633. if (!gq) {
  634. rv = -ENOMEM;
  635. break;
  636. }
  637. DEBUG("GIPC_CREATE token %lld by thread %d\n", gq->token,
  638. task->pid);
  639. rv = gq->token;
  640. break;
  641. }
  642. case GIPC_JOIN: {
  643. struct gipc_queue *q;
  644. u64 token = arg;
  645. u64 session = GIPC_OWNER;
  646. if (file->private_data != NULL)
  647. return -EBUSY;
  648. /* Search for this token */
  649. spin_lock(&gdev.lock);
  650. list_for_each_entry(q, &gdev.channels, list) {
  651. if (q->token == token) {
  652. gq = q;
  653. break;
  654. }
  655. }
  656. /* Fail if we didn't find it */
  657. if (!gq) {
  658. spin_unlock(&gdev.lock);
  659. return -ENOENT;
  660. }
  661. if (gq->owner != session) {
  662. spin_unlock(&gdev.lock);
  663. return -EPERM;
  664. }
  665. atomic_inc(&gq->count);
  666. file->private_data = gq;
  667. /* Hold the lock until we allocate so only one process
  668. * gets the queue */
  669. spin_unlock(&gdev.lock);
  670. DEBUG("GIPC_JOIN token %lld by thread %d\n", token, task->pid);
  671. rv = 0;
  672. break;
  673. }
  674. default:
  675. printk(KERN_ALERT "Graphene unknown ioctl %u %lu\n", cmd, arg);
  676. rv = -ENOSYS;
  677. break;
  678. }
  679. return rv;
  680. }
  681. static int gipc_release(struct inode *inode, struct file *file)
  682. {
  683. struct gipc_queue *gq = (struct gipc_queue *) file->private_data;
  684. if (!gq)
  685. return 0;
  686. file->private_data = NULL;
  687. release_gipc_queue(gq, false);
  688. return 0;
  689. }
  690. static int gipc_open(struct inode *inode, struct file *file)
  691. {
  692. file->private_data = NULL;
  693. return 0;
  694. }
  695. static struct file_operations gipc_fops = {
  696. .owner = THIS_MODULE,
  697. .release = gipc_release,
  698. .open = gipc_open,
  699. .unlocked_ioctl = gipc_ioctl,
  700. .compat_ioctl = gipc_ioctl,
  701. .llseek = noop_llseek,
  702. };
  703. static struct miscdevice gipc_dev = {
  704. .minor = GIPC_MINOR,
  705. .name = "gipc",
  706. .fops = &gipc_fops,
  707. .mode = 0666,
  708. };
  709. static int __init gipc_init(void)
  710. {
  711. int rv = 0;
  712. #if !defined(CONFIG_GRAPHENE_BULK_IPC) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
  713. my_do_mmap_pgoff = (do_mmap_pgoff_t)
  714. kallsyms_lookup_name("do_mmap_pgoff");
  715. printk(KERN_ERR "resolved symbol do_mmap_pgoff %p\n", my_do_mmap_pgoff);
  716. if (!my_do_mmap_pgoff) {
  717. printk(KERN_ERR "Graphene error: "
  718. "can't find kernel function do_mmap_pgoff\n");
  719. return -ENOENT;
  720. }
  721. #endif
  722. #if !defined(CONFIG_GRAPHENE_BULK_IPC) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
  723. my_flush_tlb_page = (flush_tlb_page_t)
  724. kallsyms_lookup_name("flush_tlb_page");
  725. printk(KERN_ERR "resolved symbol flush_tlb_page %p\n", my_flush_tlb_page);
  726. if (!my_flush_tlb_page) {
  727. printk(KERN_ERR "Graphene error: "
  728. "can't find kernel function flush_tlb_page\n");
  729. return -ENOENT;
  730. }
  731. #endif
  732. #ifndef gipc_get_session
  733. my_gipc_get_session = (void *) kallsyms_lookup_name("gipc_get_session");
  734. #endif
  735. /* Register the kmem cache */
  736. gipc_queue_cachep = kmem_cache_create("gipc queue",
  737. sizeof(struct gipc_queue),
  738. 0,
  739. SLAB_HWCACHE_ALIGN|
  740. SLAB_DESTROY_BY_RCU,
  741. NULL);
  742. if (!gipc_queue_cachep) {
  743. printk(KERN_ERR "Graphene error: "
  744. "failed to create a gipc queues cache\n");
  745. return -ENOMEM;
  746. }
  747. gipc_send_buffer_cachep = kmem_cache_create("gipc send buffer",
  748. sizeof(struct gipc_send_buffer),
  749. 0,
  750. SLAB_HWCACHE_ALIGN|
  751. SLAB_DESTROY_BY_RCU,
  752. NULL);
  753. if (!gipc_send_buffer_cachep) {
  754. printk(KERN_ERR "Graphene error: "
  755. "failed to create a gipc buffers cache\n");
  756. return -ENOMEM;
  757. }
  758. INIT_LIST_HEAD(&gdev.channels);
  759. spin_lock_init(&gdev.lock);
  760. gdev.max_token = 1;
  761. rv = misc_register(&gipc_dev);
  762. if (rv) {
  763. printk(KERN_ERR "Graphene error: "
  764. "failed to add a char device (rv=%d)\n", rv);
  765. return rv;
  766. }
  767. printk(KERN_ALERT "Graphene IPC: Hello, world\n");
  768. return 0;
  769. }
  770. static void __exit gipc_exit(void)
  771. {
  772. struct gipc_queue *gq, *n;
  773. spin_lock(&gdev.lock);
  774. list_for_each_entry_safe(gq, n, &gdev.channels, list)
  775. release_gipc_queue(gq, true);
  776. spin_unlock(&gdev.lock);
  777. misc_deregister(&gipc_dev);
  778. kmem_cache_destroy(gipc_queue_cachep);
  779. printk(KERN_ALERT "Graphene IPC: Goodbye, cruel world\n");
  780. }
  781. module_init(gipc_init);
  782. module_exit(gipc_exit);