123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990 |
- #include <linux/module.h>
- #include <linux/kallsyms.h>
- #include <linux/version.h>
- #include <linux/init.h>
- #include <linux/fs.h>
- #include <linux/mm_types.h>
- #include <linux/mm.h>
- #include <linux/mmu_notifier.h>
- #include <linux/slab.h>
- #include <linux/swap.h>
- #include <linux/swapops.h>
- #include <linux/miscdevice.h>
- #include <linux/sched.h>
- #include <linux/pagemap.h>
- #include <linux/bitmap.h>
- #include <asm/mman.h>
- #include <asm/tlb.h>
- #include "graphene-ipc.h"
- #include "ksyms.h"
- MODULE_LICENSE("Dual BSD/GPL");
- #define FILE_POISON LIST_POISON1
- struct kmem_cache *gipc_queue_cachep;
- struct kmem_cache *gipc_send_buffer_cachep;
- #define GIPC_DEBUG 0
- #if defined(GIPC_DEBUG) && GIPC_DEBUG == 1
- # define DEBUG(...) printk(KERN_INFO __VA_ARGS__)
- # define GIPC_BUG_ON(cond) BUG_ON(cond)
- #else
- # define DEBUG(...)
- # define GIPC_BUG_ON(cond)
- #endif
- #if defined(CONFIG_GRAPHENE_BULK_IPC) || LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)
- # if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
- # define DO_MMAP_PGOFF(file, addr, len, prot, flags, pgoff) \
- ({ \
- unsigned long populate; \
- unsigned long rv = do_mmap_pgoff((file), (addr), (len), \
- (prot), (flags), \
- (pgoff), &populate); \
- rv; })
- # else
- # define DO_MMAP_PGOFF(file, addr, len, prot, flags, pgoff) \
- do_mmap_pgoff((file), (addr), (len), (prot), (flags), (pgoff))
- # endif /* kernel_version < 3.9.0 */
- #else
- # if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
- # define MY_DO_MMAP
- # define DO_MMAP_PGOFF(file, addr, len, prot, flags, pgoff) \
- ({ \
- unsigned long populate; \
- unsigned long rv; \
- rv = KSYM(do_mmap)((file), (addr), (len), \
- (prot), (flags), 0, (pgoff), \
- &populate); \
- rv; })
- # elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
- # define MY_DO_MMAP_PGOFF
- # define DO_MMAP_PGOFF(file, addr, len, prot, flags, pgoff) \
- ({ \
- unsigned long populate; \
- unsigned long rv; \
- rv = KSYM(do_mmap_pgoff)((file), (addr), (len), \
- (prot), (flags), (pgoff), \
- &populate); \
- rv; })
- # else
- # define MY_DO_MMAP_PGOFF
- # define DO_MMAP_PGOFF(file, addr, len, prot, flags, pgoff) \
- KSYM(do_mmap_pgoff)((file), (addr), (len), (prot), (flags), (pgoff))
- # endif /* kernel version < 3.9 */
- #endif /* !CONFIG_GRAPHENE_BULK_IPC && kernel version > 3.4.0 */
- #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
- # ifdef CONFIG_GRAPHENE_BULK_IPC
- # define FLUSH_TLB_MM_RANGE flush_tlb_mm_range
- # else
- # define MY_FLUSH_TLB_MM_RANGE
- # define FLUSH_TLB_MM_RANGE KSYM(flush_tlb_mm_range)
- # endif
- #else /* LINUX_VERSION_CODE < 3.7.0 */
- # if defined(CONFIG_GRAPHENE_BULK_IPC) || LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)
- # define FLUSH_TLB_PAGE flush_tlb_page
- # else
- # define MY_FLUSH_TLB_PAGE
- # define FLUSH_TLB_PAGE KSYM(flush_tlb_page)
- # endif
- #endif
- #ifdef MY_DO_MMAP
- IMPORT_KSYM(do_mmap);
- #endif
- #ifdef MY_DO_MMAP_PGOFF
- IMPORT_KSYM(do_mmap_pgoff);
- #endif
- #ifdef MY_FLUSH_TLB_MM_RANGE
- IMPORT_KSYM(flush_tlb_mm_range);
- #endif
- #ifdef MY_FLUSH_TLB_PAGE
- IMPORT_KSYM(flush_tlb_page);
- #endif
- #ifndef gipc_get_session
- u64 (*my_gipc_get_session) (struct task_struct *) = NULL;
- #endif
- struct gipc_queue {
- struct list_head list;
- s64 token;
- u64 owner;
- atomic_t count;
- struct mutex send_lock, recv_lock;
- wait_queue_head_t send, recv;
- volatile int next, last;
- struct {
- struct page *page;
- struct file *file;
- u64 pgoff;
- } pages[PAGE_QUEUE];
- };
- struct gipc_send_buffer {
- unsigned long page_bit_map[PAGE_BITS];
- struct page *pages[PAGE_QUEUE];
- struct vm_area_struct *vmas[PAGE_QUEUE];
- struct file *files[PAGE_QUEUE];
- unsigned long pgoffs[PAGE_QUEUE];
- };
- struct {
- spinlock_t lock;
- /*
- * For now, just make them monotonically increasing. XXX: At
- * some point, do something smarter for security.
- */
- u64 max_token;
- struct list_head channels; // gipc_queue structs
- } gdev;
- #ifdef gipc_get_session
- #define GIPC_OWNER gipc_get_session(current)
- #else
- #define GIPC_OWNER (my_gipc_get_session ? my_gipc_get_session(current) : 0)
- #endif
- static inline struct gipc_queue * create_gipc_queue(struct file *creator)
- {
- struct gipc_queue *gq = kmem_cache_alloc(gipc_queue_cachep, GFP_KERNEL);
- if (!gq)
- return gq;
- memset(gq, 0, sizeof(*gq));
- INIT_LIST_HEAD(&gq->list);
- mutex_init(&gq->send_lock);
- mutex_init(&gq->recv_lock);
- init_waitqueue_head(&gq->send);
- init_waitqueue_head(&gq->recv);
- gq->owner = GIPC_OWNER;
- creator->private_data = gq;
- atomic_set(&gq->count, 1);
- spin_lock(&gdev.lock);
- list_add(&gq->list, &gdev.channels);
- gq->token = gdev.max_token++;
- spin_unlock(&gdev.lock);
- return gq;
- }
- static inline void release_gipc_queue(struct gipc_queue *gq, bool locked)
- {
- int idx;
- if (!atomic_dec_and_test(&gq->count))
- return;
- if (!locked)
- spin_lock(&gdev.lock);
- while (gq->next != gq->last) {
- idx = gq->next;
- if (gq->pages[idx].page) {
- page_cache_release(gq->pages[idx].page);
- gq->pages[idx].page = NULL;
- }
- if (gq->pages[idx].file) {
- fput_atomic(gq->pages[idx].file);
- gq->pages[idx].file = NULL;
- gq->pages[idx].pgoff = 0;
- }
- gq->next++;
- gq->next &= (PAGE_QUEUE - 1);
- }
- list_del(&gq->list);
- if (!locked)
- spin_unlock(&gdev.lock);
- kmem_cache_free(gipc_queue_cachep, gq);
- }
- #if defined(SPLIT_RSS_COUNTING)
- static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
- {
- struct task_struct *task = current;
- if (likely(task->mm == mm))
- task->rss_stat.count[member] += val;
- else
- add_mm_counter(mm, member, val);
- }
- #else
- #define add_mm_counter_fast(mm, member, val) add_mm_counter(mm, member, val)
- #endif
- #define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
- #define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
- inline int make_page_cow(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long addr)
- {
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- spinlock_t *ptl;
- pgd = pgd_offset(mm, addr);
- if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
- goto no_page;
- pud = pud_offset(pgd, addr);
- if (pud_none(*pud) || unlikely(pud_bad(*pud)))
- goto no_page;
- pmd = pmd_offset(pud, addr);
- if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
- goto no_page;
- BUG_ON(pmd_trans_huge(*pmd));
- pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
- if (!pte_present(*pte)) {
- spin_unlock(ptl);
- goto no_page;
- }
- ptep_set_wrprotect(mm, addr, pte);
- spin_unlock(ptl);
- DEBUG("make page COW at %lx\n", addr);
- return 0;
- no_page:
- return -EFAULT;
- }
- static void fill_page_bit_map(struct mm_struct *mm,
- unsigned long addr, unsigned long nr_pages,
- unsigned long page_bit_map[PAGE_BITS])
- {
- int i = 0;
- DEBUG("GIPC_SEND fill_page_bit_map %lx - %lx\n",
- addr, addr + (nr_pages << PAGE_SHIFT));
- do {
- struct vm_area_struct *vma;
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- spinlock_t *ptl;
- bool has_page = false;
- vma = find_vma(mm, addr);
- if (!vma)
- goto next;
- BUG_ON(vma->vm_flags & VM_HUGETLB);
- pgd = pgd_offset(mm, addr);
- if (pgd_none(*pgd) || pgd_bad(*pgd))
- goto next;
- pud = pud_offset(pgd, addr);
- if (pud_none(*pud) || pud_bad(*pud))
- goto next;
- pmd = pmd_offset(pud, addr);
- if (pmd_none(*pmd))
- goto next;
- if (unlikely(pmd_trans_huge(*pmd))) {
- has_page = true;
- goto next;
- }
- if (pmd_bad(*pmd))
- goto next;
- pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
- if (pte_none(*pte))
- goto next_locked;
- /*
- if (unlikely(!pte_present(*pte)) && pte_file(*pte))
- goto next_locked;
- */
- has_page = true;
- next_locked:
- spin_unlock(ptl);
- next:
- if (has_page) {
- DEBUG("found a page at %lx\n", addr);
- set_bit(i, page_bit_map);
- } else {
- clear_bit(i, page_bit_map);
- }
- } while (i++, addr += PAGE_SIZE, i < nr_pages);
- }
- static int get_pages (struct task_struct *task, unsigned long start,
- unsigned long nr_pages,
- unsigned long page_bit_map[PAGE_BITS],
- struct page *pages[PAGE_QUEUE],
- struct vm_area_struct *vmas[PAGE_QUEUE])
- {
- struct mm_struct *mm = task->mm;
- struct vm_area_struct *vma = NULL;
- unsigned long addr = start, nr;
- int i = 0, j, rv;
- while (i < nr_pages) {
- unsigned long flushed, vmflags;
- int last = i;
- if (test_bit(last, page_bit_map)) {
- i = find_next_zero_bit(page_bit_map, PAGE_QUEUE,
- last + 1);
- if (i > nr_pages)
- i = nr_pages;
- nr = i - last;
- DEBUG("GIPC_SEND get_user_pages %ld pages at %lx\n",
- addr, nr);
- rv = __get_user_pages(task, mm, addr, nr,
- FOLL_GET|FOLL_FORCE|FOLL_SPLIT,
- pages + last, vmas + last, NULL);
- if (rv <= 0) {
- printk(KERN_ERR "Graphene error: "
- "get_user_pages at 0x%016lx-0x%016lx\n",
- addr, addr + (nr << PAGE_SHIFT));
- return rv;
- }
- if (rv != nr) {
- printk(KERN_ERR "Graphene error: "
- "get_user_pages at 0x%016lx\n",
- addr + (rv << PAGE_SHIFT));
- return -EACCES;
- }
- flushed = addr;
- vmflags = 0;
- for (j = 0; j < nr; j++) {
- unsigned long target = addr + (j << PAGE_SHIFT);
- /* Mark source COW */
- rv = make_page_cow(mm, vmas[last + j],
- target);
- if (rv)
- return rv;
- if (PageAnon(pages[last + j])) {
- /* Fix up the counters */
- inc_mm_counter_fast(mm, MM_FILEPAGES);
- dec_mm_counter_fast(mm, MM_ANONPAGES);
- pages[last + j]->mapping = NULL;
- }
- #ifdef FLUSH_TLB_MM_RANGE
- if (vmflags == vmas[last + j]->vm_flags)
- continue;
- if (flushed < target)
- FLUSH_TLB_MM_RANGE(mm, flushed, target,
- vmflags);
- flushed = target;
- vmflags = vmas[last + j]->vm_flags;
- #else
- FLUSH_TLB_PAGE(vmas[last + j], target);
- #endif
- }
- #ifdef FLUSH_TLB_MM_RANGE
- if (flushed < addr + (nr << PAGE_SHIFT))
- FLUSH_TLB_MM_RANGE(mm, flushed,
- addr + (nr << PAGE_SHIFT),
- vmflags);
- #endif
- vma = vmas[i - 1];
- addr += nr << PAGE_SHIFT;
- } else {
- /* This is the case where a page (or pages) are not
- * currently mapped.
- * Handle the hole appropriately. */
- i = find_next_bit(page_bit_map, PAGE_QUEUE, last + 1);
- if (i > nr_pages)
- i = nr_pages;
- nr = i - last;
- DEBUG("GIPC_SEND skip %ld pages at %lx\n", addr, nr);
- for (j = 0; j < nr; j++) {
- if (!vma) {
- vma = find_vma(mm, addr);
- } else {
- /* DEP 6/17/13 - these addresses should
- * be monotonically increasing. */
- for (; vma && addr >= vma->vm_end;
- vma = vma->vm_next);
- /* Leverage monotonic increasing vmas
- * to more quickly detect holes in the
- * address space. */
- if (vma && addr < vma->vm_start)
- vma = NULL;
- }
- pages[last + j] = NULL;
- vmas[last + j] = vma;
- addr += PAGE_SIZE;
- }
- }
- }
- return i;
- }
- static int do_gipc_send(struct task_struct *task, struct gipc_queue *gq,
- struct gipc_send_buffer *gbuf,
- unsigned long __user *uaddr, unsigned long __user *ulen,
- unsigned long *copied_pages)
- {
- struct mm_struct *mm = task->mm;
- unsigned long addr, len, nr_pages;
- int rv, i;
- DEBUG("GIPC_SEND uaddr = %p, ulen = %p\n", uaddr, ulen);
- rv = copy_from_user(&addr, uaddr, sizeof(unsigned long));
- if (rv) {
- printk(KERN_ALERT "Graphene SEND: bad buffer %p\n", uaddr);
- return -EFAULT;
- }
- rv = copy_from_user(&len, ulen, sizeof(unsigned long));
- if (rv) {
- printk(KERN_ALERT "Graphene SEND: bad buffer %p\n", ulen);
- return -EFAULT;
- }
- if (addr > addr + len) {
- printk(KERN_ALERT "Graphene SEND: attempt to send %p - %p "
- " by thread %d FAIL: bad argument\n",
- (void *) addr, (void *) (addr + len), task->pid);
- return -EINVAL;
- }
- DEBUG("GIPC_SEND addr = %lx, len = %ld\n", addr, len);
- nr_pages = len >> PAGE_SHIFT;
- if (!access_ok(VERIFY_READ, addr, len)) {
- printk(KERN_ALERT "Graphene SEND:"
- " attempt to send %p - %p (%ld pages) "
- " by thread %d FAIL: bad permission\n",
- (void *) addr, (void *) (addr + len), nr_pages,
- task->pid);
- return -EFAULT;
- }
- DEBUG(" %p - %p (%ld pages) sent by thread %d\n",
- (void *) addr, (void *) (addr + len), nr_pages, task->pid);
- while (nr_pages) {
- unsigned long nr =
- (nr_pages <= PAGE_QUEUE) ? nr_pages : PAGE_QUEUE;
- /* for each of these addresses - check if
- * demand faulting will be triggered
- * if vma is present, but there is no page
- * present(pmd/pud not present or PTE_PRESENT
- * is off) then get_user_pages will trigger
- * the creation of those */
- down_write(&mm->mmap_sem);
- fill_page_bit_map(mm, addr, nr, gbuf->page_bit_map);
- rv = get_pages(task, addr, nr,
- gbuf->page_bit_map,
- gbuf->pages,
- gbuf->vmas);
- if (rv < 0) {
- up_write(&mm->mmap_sem);
- break;
- }
- for (i = 0; i < nr; i++) {
- BUG_ON((!gbuf->vmas[i]) && (!!gbuf->pages[i]));
- if (gbuf->vmas[i] && gbuf->vmas[i]->vm_file) {
- gbuf->files[i] = get_file(gbuf->vmas[i]->vm_file);
- gbuf->pgoffs[i] =
- ((addr - gbuf->vmas[i]->vm_start) >> PAGE_SHIFT)
- + gbuf->vmas[i]->vm_pgoff;
- } else {
- gbuf->files[i] = NULL;
- gbuf->pgoffs[i] = 0;
- }
- addr += PAGE_SIZE;
- }
- up_write(&mm->mmap_sem);
- for (i = 0; i < nr ; i++) {
- /* Put in the pending buffer*/
- if (((gq->last + 1) & (PAGE_QUEUE - 1)) == gq->next) {
- /* The blocking condition for send
- * and recv can't both be true! */
- wake_up_all(&gq->recv);
- wait_event_interruptible(gq->send,
- ((gq->last + 1) & (PAGE_QUEUE - 1)) != gq->next);
- if (signal_pending(task)) {
- rv = -ERESTARTSYS;
- goto out;
- }
- }
- gq->pages[gq->last].page = gbuf->pages[i];
- gq->pages[gq->last].file = gbuf->files[i];
- gq->pages[gq->last].pgoff = gbuf->pgoffs[i];
- gq->last++;
- gq->last &= PAGE_QUEUE - 1;
- (*copied_pages)++;
- }
- wake_up_all(&gq->recv);
- nr_pages -= nr;
- }
- out:
- return rv;
- }
- static inline
- int recv_next (struct task_struct *task, struct gipc_queue *gq)
- {
- if (gq->next == gq->last) {
- /* The blocking condition for send & recv can't both be true */
- wake_up_all(&gq->send);
- wait_event_interruptible(gq->recv, gq->next != gq->last);
- if (signal_pending(task))
- return -ERESTARTSYS;
- }
-
- return gq->next;
- }
- static int do_gipc_recv(struct task_struct *task, struct gipc_queue *gq,
- unsigned long __user *uaddr, unsigned long __user *ulen,
- unsigned long __user *uprot,
- unsigned long *copied_pages)
- {
- struct mm_struct *mm = task->mm;
- struct vm_area_struct *vma = NULL;
- unsigned long start, addr, len, nr_pages, prot, pgoff;
- struct page *page = NULL;
- struct file *file = NULL;
- int i = 0, rv;
- rv = copy_from_user(&addr, uaddr, sizeof(unsigned long));
- if (rv) {
- printk(KERN_ALERT "Graphene RECV: bad buffer %p\n", uaddr);
- return -EFAULT;
- }
- rv = copy_from_user(&len, ulen, sizeof(unsigned long));
- if (rv) {
- printk(KERN_ALERT "Graphene RECV: bad buffer %p\n", ulen);
- return -EFAULT;
- }
- rv = copy_from_user(&prot, uprot, sizeof(unsigned long));
- if (rv) {
- printk(KERN_ALERT "Graphene RECV: bad buffer %p\n", uprot);
- return -EFAULT;
- }
- nr_pages = len >> PAGE_SHIFT;
- start = addr;
- down_write(&mm->mmap_sem);
- while (i < nr_pages) {
- int found = recv_next(task, gq);
- int need_map = 1;
- if (found < 0) {
- rv = found;
- goto finish;
- }
- page = gq->pages[found].page;
- file = gq->pages[found].file;
- pgoff = gq->pages[found].pgoff;
- gq->next++;
- gq->next &= PAGE_QUEUE - 1;
- wake_up_all(&gq->send);
- if (vma) {
- need_map = 0;
- if (vma->vm_file != file)
- need_map = 1;
- if (file && vma->vm_start +
- ((pgoff - vma->vm_pgoff) << PAGE_SHIFT)
- != addr)
- need_map = 1;
- if (prot != (vma->vm_flags & (VM_READ|VM_WRITE|VM_EXEC)))
- need_map = 1;
- }
- if (need_map) {
- unsigned long flags = MAP_PRIVATE;
- if (addr)
- flags |= MAP_FIXED;
- if (file)
- flags |= MAP_FILE;
- else
- flags |= MAP_ANONYMOUS;
- addr = DO_MMAP_PGOFF(file, addr,
- (nr_pages - i) << PAGE_SHIFT,
- prot, flags, pgoff);
- if (IS_ERR_VALUE(addr)) {
- rv = PTR_ERR((void *) addr);
- printk(KERN_ERR
- "Graphene error: failed to mmap (%d)\n",
- -rv);
- goto finish;
- }
- if (file)
- DEBUG("map %08lx-%08lx file %p\n", addr,
- addr + ((nr_pages - i) << PAGE_SHIFT),
- file);
- else
- DEBUG("map %08lx-%08lx\n", addr,
- addr + ((nr_pages - i) << PAGE_SHIFT));
- if (!start)
- start = addr;
- vma = find_vma(mm, addr);
- if (!vma) {
- printk(KERN_ERR
- "Graphene error: can't find vma at %p\n",
- (void *) addr);
- rv = -ENOENT;
- goto finish;
- }
- } else {
- BUG_ON(!vma);
- }
- if (page) {
- rv = vm_insert_page(vma, addr, page);
- if (rv) {
- printk(KERN_ERR "Graphene error: "
- "fail to insert page %d\n", rv);
- goto finish;
- }
- rv = make_page_cow(mm, vma, addr);
- if (rv) {
- printk(KERN_ERR "Graphene error: "
- "can't make vma copy-on-write at %p\n",
- (void *) addr);
- goto finish;
- }
- }
- finish:
- /* Drop the kernel's reference to this page */
- if (page)
- page_cache_release(page);
- if (file)
- fput_atomic(file);
- if (rv)
- break;
- i++;
- addr += PAGE_SIZE;
- (*copied_pages)++;
- }
- up_write(&mm->mmap_sem);
- if (i)
- DEBUG(" %p - %p (%d pages) received by thread %d\n",
- (void *) start, (void *) start + (i << PAGE_SHIFT), i,
- task->pid);
- if (start) {
- rv = copy_to_user(uaddr, &start, sizeof(unsigned long));
- if (rv) {
- printk(KERN_ERR "Graphene error: bad buffer %p\n",
- uaddr);
- return -EFAULT;
- }
- }
- return rv;
- }
- static long gipc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
- {
- struct task_struct *task = current;
- struct gipc_queue *gq = NULL;
- long rv = 0;
- switch (cmd) {
- case GIPC_SEND: {
- struct gipc_send gs;
- struct gipc_send_buffer *gbuf;
- int i;
- unsigned long nr_pages = 0;
- rv = copy_from_user(&gs, (void *) arg, sizeof(gs));
- if (rv) {
- printk(KERN_ALERT "Graphene SEND: bad buffer %p\n",
- (void *) arg);
- return -EFAULT;
- }
- /* Find/allocate the gipc_pages struct for our recipient */
- gq = (struct gipc_queue *) file->private_data;
- if (!gq)
- return -EFAULT;
- gbuf = kmem_cache_alloc(gipc_send_buffer_cachep, GFP_KERNEL);
- if (!gbuf)
- return -ENOMEM;
- DEBUG("GIPC_SEND %ld entries to token %lld by thread %d\n",
- gs.entries, gq->token, task->pid);
- mutex_lock(&gq->send_lock);
- for (i = 0; i < gs.entries; i++) {
- rv = do_gipc_send(task, gq, gbuf, gs.addr + i,
- gs.len + i, &nr_pages);
- if (rv < 0)
- break;
- }
- mutex_unlock(&gq->send_lock);
- DEBUG("GIPC_SEND return to thread %d, %ld pages are sent\n",
- task->pid, nr_pages);
- kmem_cache_free(gipc_send_buffer_cachep, gbuf);
- rv = nr_pages ? : rv;
- break;
- }
- case GIPC_RECV: {
- struct gipc_recv gr;
- int i;
- unsigned long nr_pages = 0;
- rv = copy_from_user(&gr, (void *) arg, sizeof(gr));
- if (rv) {
- printk(KERN_ERR "Graphene error: bad buffer %p\n",
- (void *) arg);
- return -EFAULT;
- }
- gq = (struct gipc_queue *) file->private_data;
- if (!gq)
- return -EBADF;
- DEBUG("GIPC_RECV %ld entries to token %lld by thread %d\n",
- gr.entries, gq->token, task->pid);
- mutex_lock(&gq->recv_lock);
- for (i = 0; i < gr.entries; i++) {
- rv = do_gipc_recv(task, gq, gr.addr + i, gr.len + i,
- gr.prot + i, &nr_pages);
- if (rv < 0)
- break;
- }
- mutex_unlock(&gq->recv_lock);
- DEBUG("GIPC_RECV return to thread %d, %ld pages are received\n",
- task->pid, nr_pages);
- rv = nr_pages ? : rv;
- break;
- }
- case GIPC_CREATE: {
- gq = create_gipc_queue(file);
- if (!gq) {
- rv = -ENOMEM;
- break;
- }
- DEBUG("GIPC_CREATE token %lld by thread %d\n", gq->token,
- task->pid);
- rv = gq->token;
- break;
- }
- case GIPC_JOIN: {
- struct gipc_queue *q;
- u64 token = arg;
- u64 session = GIPC_OWNER;
- if (file->private_data != NULL)
- return -EBUSY;
- /* Search for this token */
- spin_lock(&gdev.lock);
- list_for_each_entry(q, &gdev.channels, list) {
- if (q->token == token) {
- gq = q;
- break;
- }
- }
- /* Fail if we didn't find it */
- if (!gq) {
- spin_unlock(&gdev.lock);
- return -ENOENT;
- }
- if (gq->owner != session) {
- spin_unlock(&gdev.lock);
- return -EPERM;
- }
- atomic_inc(&gq->count);
- file->private_data = gq;
- /* Hold the lock until we allocate so only one process
- * gets the queue */
- spin_unlock(&gdev.lock);
- DEBUG("GIPC_JOIN token %lld by thread %d\n", token, task->pid);
- rv = 0;
- break;
- }
- default:
- printk(KERN_ALERT "Graphene unknown ioctl %u %lu\n", cmd, arg);
- rv = -ENOSYS;
- break;
- }
- return rv;
- }
- static int gipc_release(struct inode *inode, struct file *file)
- {
- struct gipc_queue *gq = (struct gipc_queue *) file->private_data;
- if (!gq)
- return 0;
- file->private_data = NULL;
- release_gipc_queue(gq, false);
- return 0;
- }
- static int gipc_open(struct inode *inode, struct file *file)
- {
- file->private_data = NULL;
- return 0;
- }
- static struct file_operations gipc_fops = {
- .owner = THIS_MODULE,
- .release = gipc_release,
- .open = gipc_open,
- .unlocked_ioctl = gipc_ioctl,
- .compat_ioctl = gipc_ioctl,
- .llseek = noop_llseek,
- };
- static struct miscdevice gipc_dev = {
- .minor = GIPC_MINOR,
- .name = "gipc",
- .fops = &gipc_fops,
- .mode = 0666,
- };
- static int __init gipc_init(void)
- {
- int rv = 0;
- #ifdef MY_DO_MMAP
- LOOKUP_KSYM(do_mmap);
- #endif
- #ifdef MY_DO_MMAP_PGOFF
- LOOKUP_KSYM(do_mmap_pgoff);
- #endif
- #ifdef MY_FLUSH_TLB_MM_RANGE
- LOOKUP_KSYM(flush_tlb_mm_range);
- #endif
- #ifdef MY_FLUSH_TLB_PAGE
- LOOKUP_KSYM(flush_tlb_page);
- #endif
- #ifndef gipc_get_session
- my_gipc_get_session = (void *) kallsyms_lookup_name("gipc_get_session");
- #endif
- /* Register the kmem cache */
- gipc_queue_cachep = kmem_cache_create("gipc queue",
- sizeof(struct gipc_queue),
- 0,
- SLAB_HWCACHE_ALIGN|
- SLAB_DESTROY_BY_RCU,
- NULL);
- if (!gipc_queue_cachep) {
- printk(KERN_ERR "Graphene error: "
- "failed to create a gipc queues cache\n");
- return -ENOMEM;
- }
- gipc_send_buffer_cachep = kmem_cache_create("gipc send buffer",
- sizeof(struct gipc_send_buffer),
- 0,
- SLAB_HWCACHE_ALIGN|
- SLAB_DESTROY_BY_RCU,
- NULL);
- if (!gipc_send_buffer_cachep) {
- printk(KERN_ERR "Graphene error: "
- "failed to create a gipc buffers cache\n");
- return -ENOMEM;
- }
- INIT_LIST_HEAD(&gdev.channels);
- spin_lock_init(&gdev.lock);
- gdev.max_token = 1;
- rv = misc_register(&gipc_dev);
- if (rv) {
- printk(KERN_ERR "Graphene error: "
- "failed to add a char device (rv=%d)\n", rv);
- return rv;
- }
- printk(KERN_ALERT "Graphene IPC: Hello, world\n");
- return 0;
- }
- static void __exit gipc_exit(void)
- {
- struct gipc_queue *gq, *n;
- spin_lock(&gdev.lock);
- list_for_each_entry_safe(gq, n, &gdev.channels, list)
- release_gipc_queue(gq, true);
- spin_unlock(&gdev.lock);
- misc_deregister(&gipc_dev);
- kmem_cache_destroy(gipc_queue_cachep);
- printk(KERN_ALERT "Graphene IPC: Goodbye, cruel world\n");
- }
- module_init(gipc_init);
- module_exit(gipc_exit);
|