graphene.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630
  1. /*
  2. * linux/graphene/graphene.c
  3. *
  4. * Copyright (C) 2013-, Chia-Che Tsai, Bhushan Jain and Donald Porter
  5. *
  6. * Manage the graphene information and security policies.
  7. */
  8. #include <linux/version.h>
  9. #include <linux/atomic.h>
  10. #include <linux/slab.h>
  11. #include <linux/sched.h>
  12. #include <linux/fs.h>
  13. #include <linux/file.h>
  14. #include <linux/fdtable.h>
  15. #include <linux/dcache.h>
  16. #include <linux/namei.h>
  17. #include <linux/fs_struct.h>
  18. #include <linux/mount.h>
  19. #include <linux/rcupdate.h>
  20. #include <linux/uaccess.h>
  21. #include <linux/module.h>
  22. #include <linux/un.h>
  23. #include <linux/net.h>
  24. #include <net/sock.h>
  25. #include <net/inet_sock.h>
  26. #include <net/tcp_states.h>
  27. #include <linux/pipe_fs_i.h>
  28. #include <../security/apparmor/include/audit.h>
  29. #include "graphene.h"
  30. #include "graphene-ipc.h"
  31. static struct list_head unix_list = LIST_HEAD_INIT(unix_list);
  32. static DEFINE_SPINLOCK(unix_list_lock);
  33. static atomic_t gipc_session;
  34. static int add_graphene_unix(struct graphene_unix *u)
  35. {
  36. struct graphene_unix *tmp;
  37. int err = 0;
  38. rcu_read_lock();
  39. list_for_each_entry_rcu(tmp, &unix_list, list) {
  40. if (u->root.mnt) {
  41. if (!tmp->root.mnt)
  42. continue;
  43. if (!path_equal(&tmp->root, &u->root))
  44. continue;
  45. }
  46. if (u->prefix.len) {
  47. int len;
  48. if (!tmp->prefix.len)
  49. continue;
  50. len = u->prefix.len < tmp->prefix.len ?
  51. u->prefix.len : tmp->prefix.len;
  52. if (!strncmp(u->prefix.name, tmp->prefix.name, len)) {
  53. err = -EACCES;
  54. break;
  55. }
  56. }
  57. }
  58. if (!err) {
  59. spin_lock(&unix_list_lock);
  60. list_add_tail_rcu(&u->list, &unix_list);
  61. spin_unlock(&unix_list_lock);
  62. }
  63. rcu_read_unlock();
  64. return err;;
  65. }
  66. int dup_graphene_struct(struct task_struct *tsk)
  67. {
  68. struct graphene_struct *gs, *new;
  69. struct graphene_info *gi;
  70. if (!(tsk->graphene))
  71. return 0;
  72. if (tsk->group_leader != tsk) {
  73. atomic_inc(&tsk->graphene->g_count);
  74. return 0;
  75. }
  76. gs = tsk->graphene;
  77. new = kmalloc(sizeof(struct graphene_struct), GFP_KERNEL);
  78. if (!new)
  79. return -ENOMEM;
  80. spin_lock(&gs->g_lock);
  81. gi = gs->g_info;
  82. atomic_inc(&gi->gi_count);
  83. new->g_info = gi;
  84. spin_unlock(&gs->g_lock);
  85. atomic_set(&new->g_count, 1);
  86. spin_lock_init(&new->g_lock);
  87. tsk->graphene = new;
  88. return 0;
  89. }
  90. static void drop_graphene_info(struct graphene_info *info)
  91. {
  92. struct graphene_path *p, *n;
  93. int i;
  94. list_for_each_entry_safe(p, n, &info->gi_paths, list) {
  95. path_put(&p->path);
  96. kfree(p);
  97. }
  98. list_for_each_entry_safe(p, n, &info->gi_rpaths, list) {
  99. path_put(&p->path);
  100. kfree(p);
  101. }
  102. if (info->gi_libexec.dentry)
  103. path_put(&info->gi_libexec);
  104. if (info->gi_unix) {
  105. struct graphene_unix *u = info->gi_unix;
  106. if (!atomic_dec_return(&u->count)) {
  107. spin_lock(&unix_list_lock);
  108. if (!list_empty(&u->list)) {
  109. list_del_rcu(&u->list);
  110. spin_unlock(&unix_list_lock);
  111. synchronize_rcu();
  112. }
  113. if (u->root.mnt)
  114. path_put(&u->root);
  115. if (u->prefix.len)
  116. kfree(u->prefix.name);
  117. kfree(u);
  118. }
  119. }
  120. for (i = 0 ; i < 3 ; i++)
  121. if (info->gi_console[i].mnt)
  122. path_put(&info->gi_console[i]);
  123. kfree(info);
  124. }
  125. static void put_graphene_info(struct graphene_info *info)
  126. {
  127. if (!atomic_dec_return(&info->gi_count))
  128. drop_graphene_info(info);
  129. }
  130. void put_graphene_struct(struct task_struct *tsk)
  131. {
  132. struct graphene_struct *gs = tsk->graphene;
  133. if (gs) {
  134. tsk->graphene = NULL;
  135. if (atomic_dec_return(&gs->g_count))
  136. return;
  137. put_graphene_info(gs->g_info);
  138. kfree(gs);
  139. }
  140. }
  141. static inline
  142. struct graphene_info *get_graphene_info(struct graphene_struct *gs)
  143. {
  144. struct graphene_info *info;
  145. if (!gs)
  146. return NULL;
  147. rcu_read_lock();
  148. info = rcu_dereference_check(gs->g_info,
  149. lockdep_is_held(&gs->g_lock) ||
  150. atomic_read(&gs->g_count) == 1 ||
  151. rcu_my_thread_group_empty());
  152. rcu_read_unlock();
  153. return info;
  154. }
  155. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
  156. # define FILE_INODE(file) ((file)->f_inode)
  157. #else
  158. # define FILE_INODE(file) ((file)->f_dentry->d_inode)
  159. #endif
  160. static loff_t graphene_lib_llseek(struct file *file, loff_t offset, int origin)
  161. {
  162. struct inode *inode = FILE_INODE(file);
  163. if (!inode)
  164. return -EINVAL;
  165. if (!inode->i_fop || !inode->i_fop->llseek)
  166. return -EINVAL;
  167. return inode->i_fop->llseek(file, offset, origin);
  168. }
  169. static ssize_t graphene_lib_read (struct file *file, char __user *buf,
  170. size_t len, loff_t *ppos)
  171. {
  172. struct inode *inode = FILE_INODE(file);
  173. if (!inode)
  174. return -EINVAL;
  175. if (!inode->i_fop || !inode->i_fop->read)
  176. return -EINVAL;
  177. return inode->i_fop->read(file, buf, len, ppos);
  178. }
  179. static ssize_t graphene_lib_aio_read (struct kiocb *iocb, const struct iovec *iov,
  180. unsigned long nr_segs, loff_t pos)
  181. {
  182. struct inode *inode = FILE_INODE(iocb->ki_filp);
  183. if (!inode)
  184. return -EINVAL;
  185. if (!inode->i_fop || !inode->i_fop->aio_read)
  186. return -EINVAL;
  187. return inode->i_fop->aio_read(iocb, iov, nr_segs, pos);
  188. }
  189. static int graphene_lib_mmap(struct file *file, struct vm_area_struct *vma)
  190. {
  191. struct inode *inode = FILE_INODE(file);
  192. if (!inode)
  193. return -EINVAL;
  194. if (!inode->i_fop || !inode->i_fop->mmap)
  195. return -EINVAL;
  196. return inode->i_fop->mmap(file, vma);
  197. }
  198. static int graphene_lib_release(struct inode *inode, struct file *file)
  199. {
  200. if (!inode)
  201. return -EINVAL;
  202. if (!inode->i_fop || !inode->i_fop->release)
  203. return -EINVAL;
  204. return inode->i_fop->release(inode, file);
  205. }
  206. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
  207. # define DEFINE_PATH_BUFFER(fn, kpath, max) struct filename *fn; char *kpath; int max;
  208. #else
  209. # define DEFINE_PATH_BUFFER(fn, kpath, max) char * kpath; int max;
  210. #endif
  211. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
  212. # define GET_PATH_BUFFER(fn, kpath, max) \
  213. fn = __getname(); \
  214. kpath = (char *) fn + sizeof(*fn); \
  215. max = PATH_MAX - sizeof(*fn);
  216. #else
  217. # define GET_PATH_BUFFER(fn, kpath, max) \
  218. kpath = __getname(); \
  219. max = PATH_MAX;
  220. #endif
  221. #define DEFINE_PATH(dp, path, fn, kpath, max) \
  222. DEFINE_PATH_BUFFER(fn, kpath, max) \
  223. char *dp; \
  224. GET_PATH_BUFFER(fn, kpath, max) \
  225. dp = d_path(path, kpath, max);
  226. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
  227. # define PUT_PATH_BUFFER(fn, kpath) final_putname(fn);
  228. #else
  229. # define PUT_PATH_BUFFER(fn, kpath) putname(kpath);
  230. #endif
  231. static unsigned long
  232. graphene_lib_get_area(struct file *file, unsigned long addr, unsigned long len,
  233. unsigned long pgoff, unsigned long flags)
  234. {
  235. struct task_struct *current_tsk = current;
  236. struct graphene_info *gi = get_graphene_info(current_tsk->graphene);
  237. struct inode *inode = FILE_INODE(file);
  238. unsigned long (*get_area) (struct file *, unsigned long, unsigned long,
  239. unsigned long, unsigned long);
  240. if (!inode)
  241. return -EINVAL;
  242. if (gi->gi_libaddr) {
  243. if (!path_equal(&file->f_path, &gi->gi_libexec))
  244. BUG();
  245. if (!addr)
  246. addr = gi->gi_libaddr + pgoff * PAGE_SIZE;
  247. #ifdef CONFIG_GRAPHENE_DEBUG
  248. {
  249. DEFINE_PATH(dp, &file->f_path, fn, kpath, max)
  250. if (!IS_ERR(dp))
  251. printk(KERN_INFO "Graphene: PID %d MAP FILE %s"
  252. " OFF 0x%08lx AT 0x%016lx\n",
  253. current->pid, dp,
  254. pgoff * PAGE_SIZE, addr);
  255. PUT_PATH_BUFFER(fn, kpath)
  256. }
  257. #endif
  258. return addr;
  259. }
  260. get_area = (inode->i_fop && inode->i_fop->get_unmapped_area) ?
  261. inode->i_fop->get_unmapped_area :
  262. current_tsk->mm->get_unmapped_area;
  263. return get_area(file, addr, len, pgoff, flags);
  264. }
  265. /* These are file oprations required for execve */
  266. static struct file_operations graphene_lib_operations = {
  267. .llseek = graphene_lib_llseek,
  268. .read = graphene_lib_read,
  269. .aio_read = graphene_lib_aio_read,
  270. .mmap = graphene_lib_mmap,
  271. .get_unmapped_area = graphene_lib_get_area,
  272. .release = graphene_lib_release,
  273. };
  274. #ifdef CONFIG_GRAPHENE_DEBUG
  275. static void print_path(const char * fmt, struct path *path)
  276. {
  277. DEFINE_PATH(dp, path, fn, kpath, max)
  278. printk(fmt, current->pid, IS_ERR(dp) ? "(unknown)" : dp);
  279. PUT_PATH_BUFFER(fn, kpath)
  280. }
  281. #else
  282. # define print_path(...) do {} while (0)
  283. #endif
  284. int graphene_execve_open(struct file *file)
  285. {
  286. struct task_struct *current_tsk = current;
  287. struct graphene_info *gi = get_graphene_info(current_tsk->graphene);
  288. if (!current_tsk->in_execve)
  289. BUG();
  290. if (!path_equal(&file->f_path, &gi->gi_libexec)) {
  291. print_path(KERN_INFO "Graphene: DENY EXEC PID %d PATH %s\n",
  292. &file->f_path);
  293. return -EACCES;
  294. }
  295. if (!gi->gi_libaddr)
  296. goto accepted;
  297. file->f_op = &graphene_lib_operations;
  298. accepted:
  299. print_path(KERN_INFO "Graphene: ALLOW EXEC PID %d PATH %s\n",
  300. &file->f_path);
  301. return 0;
  302. }
  303. static int graphene_check_path(struct graphene_info *gi, int op, u32 mask,
  304. struct path *path, struct graphene_path *gp,
  305. int is_recursive)
  306. {
  307. if (!path_equal(path, &gp->path))
  308. return 0;
  309. if (mask & (MAY_READ|MAY_EXEC|MAY_ACCESS|
  310. AA_MAY_META_READ|AA_EXEC_MMAP|AA_MAY_LINK)) {
  311. if (!(gp->type & GRAPHENE_FS_READ))
  312. return -EACCES;
  313. }
  314. if (mask & (MAY_WRITE|MAY_APPEND|
  315. AA_MAY_CREATE|AA_MAY_DELETE|AA_MAY_META_WRITE|
  316. AA_MAY_CHMOD|AA_MAY_CHOWN)) {
  317. if (!(gp->type & GRAPHENE_FS_WRITE))
  318. return -EACCES;
  319. }
  320. return 1;
  321. }
  322. static int __common_perm(struct graphene_info *gi, int op, struct path *target,
  323. u32 mask)
  324. {
  325. struct graphene_path *p;
  326. struct path root, path = *target;
  327. struct qstr last;
  328. int rv = 0, i;
  329. BUG_ON(!path.dentry);
  330. path_get(&path);
  331. if (op == OP_OPEN) {
  332. int minor = iminor(path.dentry->d_inode);
  333. if (minor == GRAPHENE_MINOR)
  334. goto out;
  335. if (minor == GIPC_MINOR)
  336. goto out;
  337. }
  338. rcu_read_lock();
  339. for (i = 0 ; i < 3 ; i++) {
  340. if (!gi->gi_console[i].mnt)
  341. continue;
  342. if (path_equal(&gi->gi_console[i], &path))
  343. goto out;
  344. }
  345. list_for_each_entry_rcu(p, &gi->gi_paths, list) {
  346. rv = graphene_check_path(gi, op, mask, &path, p, 0);
  347. if (rv)
  348. goto out;
  349. }
  350. if (gi->gi_libexec.mnt && path_equal(&path, &gi->gi_libexec)) {
  351. rv = 0;
  352. goto out;
  353. }
  354. get_fs_root(current->fs, &root);
  355. last.len = 0;
  356. while (!path_equal(&path, &root)) {
  357. int is_recursive = 0;
  358. list_for_each_entry_rcu(p, &gi->gi_rpaths, list) {
  359. rv = graphene_check_path(gi, op, mask, &path, p,
  360. is_recursive);
  361. if (rv)
  362. goto out_root;
  363. }
  364. if (gi->gi_unix && gi->gi_unix->root.mnt) {
  365. struct graphene_unix *u = gi->gi_unix;
  366. if (path_equal(&path, &u->root)) {
  367. rv = 0;
  368. if (op == OP_MKNOD)
  369. goto out_root;
  370. if (op == OP_UNLINK) {
  371. if (!u->prefix.len)
  372. goto out_root;
  373. if (last.len) {
  374. int len = u->prefix.len;
  375. if (last.len < len)
  376. len = last.len;
  377. if (!strncmp(last.name,
  378. u->prefix.name,
  379. len))
  380. goto out_root;
  381. }
  382. }
  383. break;
  384. }
  385. }
  386. last = path.dentry->d_name;
  387. while(1) {
  388. struct dentry *old = path.dentry;
  389. if (path_equal(&path, &root))
  390. break;
  391. if (path.dentry != path.mnt->mnt_root) {
  392. path.dentry = dget_parent(path.dentry);
  393. dput(old);
  394. break;
  395. }
  396. if (!follow_up(&path))
  397. break;
  398. }
  399. is_recursive = 1;
  400. }
  401. rv = -EACCES;
  402. out_root:
  403. path_put(&root);
  404. out:
  405. rcu_read_unlock();
  406. path_put(&path);
  407. if (rv >= 0) {
  408. rv = 0;
  409. print_path(KERN_INFO "Graphene: ALLOW PID %d PATH %s\n", target);
  410. } else {
  411. print_path(KERN_INFO "Graphene: DENY PID %d PATH %s\n", target);
  412. }
  413. return rv;
  414. }
  415. int graphene_common_perm(int op, struct path *path, u32 mask)
  416. {
  417. struct graphene_info *gi = get_graphene_info(current->graphene);
  418. if (!gi)
  419. return 0;
  420. return __common_perm(gi, op, path, mask);
  421. }
  422. static int __unix_perm(struct sockaddr *address, int addrlen)
  423. {
  424. struct graphene_info *gi = get_graphene_info(current->graphene);
  425. const char *path, *sun_path;
  426. int path_len;
  427. if (!gi->gi_unix)
  428. return -EPERM;
  429. path = sun_path = ((struct sockaddr_un *) address)->sun_path;
  430. if (gi->gi_unix->root.mnt) {
  431. struct nameidata nd;
  432. int err;
  433. err = kern_path_parent(path, &nd);
  434. if (err)
  435. return err;
  436. if (!path_equal(&gi->gi_unix->root, &nd.path)) {
  437. path_put(&nd.path);
  438. goto denied;
  439. }
  440. path_put(&nd.path);
  441. path = nd.last.name;
  442. path_len = nd.last.len;
  443. } else {
  444. path_len = strlen(path);
  445. }
  446. if (!gi->gi_unix->prefix.len)
  447. return 0;
  448. if (gi->gi_unix->prefix.len < path_len &&
  449. !memcmp(path,
  450. gi->gi_unix->prefix.name,
  451. gi->gi_unix->prefix.len))
  452. return 0;
  453. denied:
  454. #ifdef CONFIG_GRAPHENE_DEBUG
  455. printk(KERN_INFO "Graphene: DENY PID %d SOCKET %s\n",
  456. current->pid, sun_path);
  457. #endif
  458. return -EPERM;
  459. }
  460. static int net_cmp(int family, int addr_any, int port_any,
  461. struct graphene_net_addr *ga,
  462. struct sockaddr *addr, int addrlen)
  463. {
  464. switch(family) {
  465. case AF_INET: {
  466. struct sockaddr_in *a = (void *) addr;
  467. if (!addr_any) {
  468. if (a->sin_addr.s_addr != ga->addr.sin_addr.s_addr)
  469. return 1;
  470. }
  471. if (!port_any) {
  472. unsigned short port = ntohs(a->sin_port);
  473. if (!(port >= ga->port_begin && port <= ga->port_end))
  474. return 1;
  475. }
  476. break;
  477. }
  478. #ifdef CONFIG_IPV6
  479. case AF_INET6: {
  480. struct sockaddr_in6 *a6 = (void *) addr;
  481. if (!addr_any) {
  482. if (memcmp(&a6->sin6_addr, &ga->addr.sin6_addr,
  483. sizeof(struct in6_addr)))
  484. return 1;
  485. }
  486. if (!port_any) {
  487. unsigned short port = ntohs(a6->sin6_port);
  488. if (!(port >= ga->port_begin && port <= ga->port_end))
  489. return 1;
  490. }
  491. break;
  492. }
  493. #endif
  494. }
  495. return 0;
  496. }
  497. #ifdef CONFIG_GRAPHENE_DEBUG
  498. static void print_net(int allow, int family, int op,
  499. struct sockaddr *local_addr, int local_addrlen,
  500. struct sockaddr *peer_addr, int peer_addrlen)
  501. {
  502. const char *allow_str = allow ? "ALLOW" : "DENY";
  503. const char *op_str = "";
  504. int print_peer = (op == OP_CONNECT || op == OP_SENDMSG);
  505. switch(op) {
  506. case OP_BIND: op_str = "BIND"; break;
  507. case OP_LISTEN: op_str = "LISTEN"; break;
  508. case OP_CONNECT: op_str = "CONNECT"; break;
  509. case OP_SENDMSG: op_str = "SENDMSG"; break;
  510. case OP_RECVMSG: op_str = "RECVMSG"; break;
  511. }
  512. if (family == AF_INET) {
  513. struct sockaddr_in *la = (void *) local_addr;
  514. u8 *a1 = (u8 *) &la->sin_addr.s_addr;
  515. struct sockaddr_in *pa = (void *) peer_addr;
  516. u8 *a2 = (u8 *) &pa->sin_addr.s_addr;
  517. if (print_peer && peer_addr) {
  518. printk(KERN_INFO "Graphene: %s %s PID %d SOCKET "
  519. "%d.%d.%d.%d:%d:%d.%d.%d.%d:%d\n",
  520. allow_str, op_str, current->pid,
  521. a1[0], a1[1], a1[2], a1[3], ntohs(la->sin_port),
  522. a2[0], a2[1], a2[2], a2[3], ntohs(pa->sin_port));
  523. } else {
  524. printk(KERN_INFO "Graphene: %s %s PID %d SOCKET "
  525. "%d.%d.%d.%d:%d\n",
  526. allow_str, op_str, current->pid,
  527. a1[0], a1[1], a1[2], a1[3], ntohs(la->sin_port));
  528. }
  529. }
  530. #ifdef CONFIG_IPV6
  531. if (family == AF_INET6) {
  532. struct sockaddr_in6 *la = (void *) local_addr;
  533. u16 *a1 = (u16 *) &la->sin6_addr.s6_addr;
  534. struct sockaddr_in6 *pa = (void *) peer_addr;
  535. u16 *a2 = (u16 *) &pa->sin6_addr.s6_addr;
  536. if (print_peer) {
  537. printk(KERN_INFO "Graphene: %s %s PID %d SOCKET "
  538. "[%d:%d:%d:%d:%d:%d:%d:%d]:%d:"
  539. "[%d.%d.%d.%d:%d:%d:%d:%d]:%d\n",
  540. allow_str, op_str, current->pid,
  541. a1[0], a1[1], a1[2], a1[3],
  542. a1[4], a1[5], a1[6], a1[7], ntohs(la->sin6_port),
  543. a2[0], a2[1], a2[2], a2[3],
  544. a2[4], a2[5], a2[6], a2[7], ntohs(pa->sin6_port));
  545. } else {
  546. printk(KERN_INFO "Graphene: %s %s PID %d SOCKET "
  547. "[%d.%d.%d.%d:%d:%d:%d:%d]:%d\n",
  548. allow_str, op_str, current->pid,
  549. a1[0], a1[1], a1[2], a1[3],
  550. a1[4], a1[5], a1[6], a1[7], ntohs(la->sin6_port));
  551. }
  552. }
  553. #endif
  554. }
  555. #else
  556. # define print_net(...) do {} while (0)
  557. #endif
  558. /*
  559. * network rules:
  560. * bind:
  561. * input addr/port match local addr/port
  562. * listen:
  563. * local addr/port match local addr/port
  564. * allow ANY peer addr/port
  565. * connect:
  566. * local/remote addr/port match local/remote addr/port
  567. * sendmsg:
  568. * EITHER stream socket OR no inport addr/port OR
  569. * local/remote addr/port match local/remote addr/port
  570. * recvmsg:
  571. * EITHER stream socket OR connected OR
  572. * allow ANY peer addr/port
  573. */
  574. static
  575. int __common_net_perm(struct graphene_info *gi, int op, struct socket *sock,
  576. struct sockaddr *address, int addrlen)
  577. {
  578. struct sock *sk = sock->sk;
  579. struct inet_sock *inet = inet_sk(sk);
  580. struct graphene_net *gn;
  581. struct sockaddr_storage addrbuf;
  582. struct sockaddr * local_addr = NULL, * peer_addr = NULL;
  583. int local_addrlen, peer_addrlen;
  584. int local_needcmp = 0, peer_needcmp = 0;
  585. int local_needany = 0, peer_needany = 0;
  586. int err;
  587. if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_DGRAM)
  588. return -EPERM;
  589. #ifdef CONFIG_IPV6
  590. if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
  591. #else
  592. if (sk->sk_family != AF_INET)
  593. #endif
  594. return -EPERM;
  595. if (list_empty(&gi->gi_net))
  596. return -EPERM;
  597. if (op == OP_LISTEN)
  598. peer_needany = 1;
  599. if (op == OP_RECVMSG) {
  600. if (inet->inet_dport)
  601. return 0;
  602. peer_needany = 1;
  603. }
  604. if (op == OP_CONNECT || op == OP_SENDMSG) {
  605. BUG_ON(!address);
  606. peer_addr = address;
  607. peer_addrlen = addrlen;
  608. peer_needcmp = 1;
  609. }
  610. if (op == OP_BIND) {
  611. BUG_ON(!address);
  612. local_addr = address;
  613. local_addrlen = addrlen;
  614. local_needcmp = 1;
  615. if (sk->sk_type == SOCK_DGRAM)
  616. peer_needany = 1;
  617. } else {
  618. local_addr = (struct sockaddr *) &addrbuf;
  619. local_needcmp = 1;
  620. err = sock->ops->getname(sock, local_addr, &local_addrlen, 0);
  621. if (err)
  622. return err;
  623. }
  624. list_for_each_entry(gn, &gi->gi_net, list) {
  625. if (gn->family != sk->sk_family)
  626. continue;
  627. if (local_needany &&
  628. (gn->flags & (LOCAL_ADDR_ANY|LOCAL_PORT_ANY)) !=
  629. (LOCAL_ADDR_ANY|LOCAL_PORT_ANY))
  630. continue;
  631. if (peer_needany &&
  632. (gn->flags & (PEER_ADDR_ANY|PEER_PORT_ANY)) !=
  633. (PEER_ADDR_ANY|PEER_PORT_ANY))
  634. continue;
  635. if (local_needcmp) {
  636. if (net_cmp(sk->sk_family, gn->flags & LOCAL_ADDR_ANY,
  637. gn->flags & LOCAL_PORT_ANY,
  638. &gn->local, local_addr, local_addrlen))
  639. continue;
  640. }
  641. if (peer_needcmp) {
  642. if (net_cmp(sk->sk_family, gn->flags & PEER_ADDR_ANY,
  643. gn->flags & PEER_PORT_ANY,
  644. &gn->peer, peer_addr, peer_addrlen))
  645. continue;
  646. }
  647. print_net(1, sk->sk_family, op, local_addr, local_addrlen,
  648. peer_addr, peer_addrlen);
  649. return 0;
  650. }
  651. print_net(0, sk->sk_family, op, local_addr, local_addrlen,
  652. peer_addr, peer_addrlen);
  653. return -EPERM;
  654. }
  655. int graphene_socket_bind(struct socket *sock,
  656. struct sockaddr *address, int addrlen)
  657. {
  658. if (GRAPHENE_ENABLED()) {
  659. struct graphene_info *gi = get_graphene_info(current->graphene);
  660. if (!sock || !sock->sk)
  661. return 0;
  662. if (sock->sk->sk_family == PF_UNIX) {
  663. if (sock->sk->sk_type != SOCK_STREAM)
  664. return -EPERM;
  665. return __unix_perm(address, addrlen);
  666. }
  667. return __common_net_perm(gi, OP_BIND, sock, address, addrlen);
  668. }
  669. return 0;
  670. }
  671. int graphene_socket_listen(struct socket *sock, int backlog)
  672. {
  673. if (GRAPHENE_ENABLED()) {
  674. struct graphene_info *gi = get_graphene_info(current->graphene);
  675. if (!sock || !sock->sk || sock->sk->sk_family == PF_UNIX)
  676. return 0;
  677. return __common_net_perm(gi, OP_LISTEN, sock, NULL, 0);
  678. }
  679. return 0;
  680. }
  681. int graphene_socket_connect(struct socket *sock,
  682. struct sockaddr *address, int addrlen)
  683. {
  684. if (GRAPHENE_ENABLED()) {
  685. struct graphene_info *gi = get_graphene_info(current->graphene);
  686. if (!sock || !sock->sk)
  687. return 0;
  688. if (sock->sk->sk_family == PF_UNIX) {
  689. if (sock->sk->sk_type != SOCK_STREAM)
  690. return -EPERM;
  691. return __unix_perm(address, addrlen);
  692. }
  693. return __common_net_perm(gi, OP_CONNECT, sock, address,
  694. addrlen);
  695. }
  696. return 0;
  697. }
  698. int graphene_socket_sendmsg(struct socket *sock,
  699. struct msghdr *msg, int size)
  700. {
  701. if (GRAPHENE_ENABLED()) {
  702. struct graphene_info *gi = get_graphene_info(current->graphene);
  703. if (!sock || !sock->sk || sock->sk->sk_family == PF_UNIX)
  704. return 0;
  705. if (sock->sk->sk_type == SOCK_STREAM)
  706. return 0;
  707. if (!msg->msg_name)
  708. return 0;
  709. return __common_net_perm(gi, OP_SENDMSG, sock,
  710. msg->msg_name, msg->msg_namelen);
  711. }
  712. return 0;
  713. }
  714. int graphene_socket_recvmsg(struct socket *sock,
  715. struct msghdr *msg, int size, int flags)
  716. {
  717. if (GRAPHENE_ENABLED()) {
  718. struct graphene_info *gi = get_graphene_info(current->graphene);
  719. if (!sock || !sock->sk || sock->sk->sk_family == PF_UNIX)
  720. return 0;
  721. if (sock->sk->sk_type == SOCK_STREAM)
  722. return 0;
  723. return __common_net_perm(gi, OP_RECVMSG, sock, NULL, 0);
  724. }
  725. return 0;
  726. }
  727. int graphene_task_kill(struct task_struct *tsk, struct siginfo *info,
  728. int sig, u32 secid)
  729. {
  730. struct task_struct *current_tsk = current;
  731. if (!current_tsk->graphene)
  732. return 0;
  733. if (sig != SIGCONT)
  734. return -EPERM;
  735. return (tsk->tgid == current_tsk->tgid) ? 0 : -EPERM;
  736. }
  737. static void get_console(struct graphene_info *gi, struct files_struct *files)
  738. {
  739. int i, j, n = 0;
  740. struct fdtable *fdt = files_fdtable(files);
  741. j = 0;
  742. rcu_read_lock();
  743. fdt = files_fdtable(files);
  744. rcu_read_unlock();
  745. for (;;) {
  746. unsigned long set;
  747. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
  748. i = j * BITS_PER_LONG;
  749. #else
  750. i = j * __NFDBITS;
  751. #endif
  752. if (i >= fdt->max_fds)
  753. break;
  754. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
  755. set = fdt->open_fds[j++];
  756. #else
  757. set = fdt->open_fds->fds_bits[j++];
  758. #endif
  759. for ( ; set ; i++, set >>= 1) {
  760. struct file *file;
  761. int k;
  762. if (!(set & 1))
  763. continue;
  764. if (i > 2)
  765. goto out;
  766. file = xchg(&fdt->fd[i], NULL);
  767. if (!file)
  768. continue;
  769. for (k = 0 ; k < n ; k++)
  770. if (path_equal(&file->f_path, &gi->gi_console[k]))
  771. break;
  772. if (k == n) {
  773. path_get(&file->f_path);
  774. gi->gi_console[n++] = file->f_path;
  775. }
  776. #ifdef CONFIG_GRAPHENE_DEBUG
  777. {
  778. DEFINE_PATH(dp, &file->f_path, fn, kpath, max)
  779. if (!IS_ERR(dp))
  780. printk(KERN_INFO "Graphene: "
  781. "PID %d CONSOLE %s\n",
  782. current->pid, dp);
  783. PUT_PATH_BUFFER(fn, kpath)
  784. }
  785. #endif
  786. xchg(&fdt->fd[i], file);
  787. }
  788. }
  789. out:
  790. for ( ; n < 3 ; n++)
  791. gi->gi_console[n].mnt = NULL;
  792. }
  793. static int update_graphene(struct task_struct *current_tsk,
  794. struct graphene_info *gi);
  795. #ifdef CONFIG_GRAPHENE_DEBUG
  796. static void print_net_rule(const char *fmt, struct graphene_net *n)
  797. {
  798. # ifdef CONFIG_IPV6
  799. # define ADDR_STR_MAX 128
  800. # else
  801. # define ADDR_STR_MAX 48
  802. # endif
  803. char str[ADDR_STR_MAX];
  804. int len = 0, i;
  805. for (i = 0; i < 2; i++) {
  806. unsigned char addr_any = i ? PEER_ADDR_ANY : LOCAL_ADDR_ANY;
  807. unsigned char port_any = i ? PEER_PORT_ANY : LOCAL_PORT_ANY;
  808. struct graphene_net_addr *a = i ? &n->peer : &n->local;
  809. if (i)
  810. str[len++] = ':';
  811. switch(n->family) {
  812. case AF_INET:
  813. if (n->flags & addr_any) {
  814. str[len++] = ':';
  815. } else {
  816. u8 *ip = (u8 *) &a->addr.sin_addr.s_addr;
  817. len += snprintf(str + len,
  818. ADDR_STR_MAX - len,
  819. "%u.%u.%u.%u:",
  820. ip[0], ip[1], ip[2], ip[3]);
  821. }
  822. break;
  823. #ifdef CONFIG_IPV6
  824. case AF_INET6:
  825. if (n->flags & addr_any) {
  826. str[len++] = '[';
  827. str[len++] = ']';
  828. str[len++] = ':';
  829. } else {
  830. u16 *ip = (u16 *) &a->addr.sin6_addr.s6_addr;
  831. len += snprintf(str + len,
  832. ADDR_STR_MAX - len,
  833. "[%u:%u:%u:%u:%u:%u:%u:%u]:",
  834. ip[0], ip[1], ip[2], ip[3],
  835. ip[4], ip[5], ip[6], ip[7]);
  836. }
  837. break;
  838. #endif /* CONFIG_IPV6 */
  839. }
  840. if (!(n->flags & port_any)) {
  841. if (a->port_begin == a->port_end)
  842. len += snprintf(str + len, ADDR_STR_MAX - len,
  843. "%u", a->port_begin);
  844. else
  845. len += snprintf(str + len, ADDR_STR_MAX - len,
  846. "%u-%u",
  847. a->port_begin, a->port_end);
  848. }
  849. }
  850. BUG_ON(len >= ADDR_STR_MAX);
  851. str[len] = 0;
  852. printk(fmt, current->pid, str);
  853. }
  854. #else
  855. # define print_net_rule(...) do {} while (0)
  856. #endif
  857. static int set_net_rule(struct graphene_net_policy *np,
  858. struct graphene_info *gi)
  859. {
  860. struct graphene_net *n;
  861. int i;
  862. #ifdef CONFIG_IPV6
  863. if (np->family != AF_INET && np->family != AF_INET6)
  864. #else
  865. if (np->family != AF_INET)
  866. #endif
  867. return -EINVAL;
  868. n = kmalloc(sizeof(struct graphene_net), GFP_KERNEL);
  869. if (!n)
  870. return -ENOMEM;
  871. n->family = np->family;
  872. n->flags = 0;
  873. n->local = np->local;
  874. n->peer = np->peer;
  875. for (i = 0; i < 2; i++) {
  876. unsigned char addr_any = i ? PEER_ADDR_ANY : LOCAL_ADDR_ANY;
  877. unsigned char port_any = i ? PEER_PORT_ANY : LOCAL_PORT_ANY;
  878. struct graphene_net_addr *a = i ? &n->peer : &n->local;
  879. switch(n->family) {
  880. case AF_INET:
  881. if (!a->addr.sin_addr.s_addr)
  882. n->flags |= addr_any;
  883. break;
  884. #ifdef CONFIG_IPV6
  885. case AF_INET6:
  886. if (!memcmp(&a->addr.sin6_addr.s6_addr, &in6addr_any, 16))
  887. n->flags |= addr_any;
  888. break;
  889. #endif /* CONFIG_IPV6 */
  890. }
  891. if (a->port_begin == 0 && a->port_end == 65535)
  892. n->flags |= port_any;
  893. }
  894. INIT_LIST_HEAD(&n->list);
  895. list_add_tail(&n->list, &gi->gi_net);
  896. print_net_rule(KERN_INFO "Graphene: PID %d NET RULE %s\n", n);
  897. return 0;
  898. }
  899. u32 gipc_get_session(struct task_struct *tsk)
  900. {
  901. struct graphene_info *gi = get_graphene_info(tsk->graphene);
  902. return gi ? gi->gi_gipc_session : 0;
  903. }
  904. int set_graphene(struct task_struct *current_tsk,
  905. const struct graphene_policies __user *gpolicies)
  906. {
  907. int npolicies;
  908. const struct graphene_user_policy __user *policies = gpolicies->policies;
  909. struct graphene_info *gi;
  910. struct graphene_user_policy ptmp;
  911. struct graphene_path *p;
  912. struct graphene_unix *u;
  913. int i, rv = 0;
  914. DEFINE_PATH_BUFFER(fn, kpath, max)
  915. #ifdef CONFIG_GRAPHENE_DEBUG
  916. char *dp;
  917. #endif
  918. rv = copy_from_user(&npolicies, &gpolicies->npolicies, sizeof(int));
  919. if (rv < 0)
  920. return -EFAULT;
  921. if (npolicies && !policies)
  922. return -EINVAL;
  923. #ifndef CONFIG_GRAPHENE_ISOLATE
  924. if (current_tsk->graphene)
  925. return -EAGAIN;
  926. if (current_tsk != current_tsk->group_leader)
  927. return -EPERM;
  928. #endif
  929. gi = kmalloc(sizeof(struct graphene_info), GFP_KERNEL);
  930. if (!gi)
  931. return -ENOMEM;
  932. GET_PATH_BUFFER(fn, kpath, max)
  933. memset(gi, 0, sizeof(struct graphene_info));
  934. INIT_LIST_HEAD(&gi->gi_paths);
  935. INIT_LIST_HEAD(&gi->gi_rpaths);
  936. INIT_LIST_HEAD(&gi->gi_net);
  937. gi->gi_gipc_session = atomic_inc_return(&gipc_session);
  938. #ifdef CONFIG_GRAPHENE_DEBUG
  939. printk(KERN_INFO "Graphene: PID %d GIPC SESSION %u\n",
  940. current_tsk->pid, gi->gi_gipc_session);
  941. #endif
  942. for (i = 0 ; i < npolicies ; i++) {
  943. int type, flags;
  944. rv = copy_from_user(&ptmp, policies + i,
  945. sizeof(struct graphene_user_policy));
  946. if (rv) {
  947. rv = -EFAULT;
  948. goto err;
  949. }
  950. if (!ptmp.value) {
  951. rv = -EINVAL;
  952. goto err;
  953. }
  954. type = ptmp.type & ~(GRAPHENE_FS_READ | GRAPHENE_FS_WRITE);
  955. flags = ptmp.type & ~type;
  956. switch(type) {
  957. case GRAPHENE_LIB_NAME:
  958. rv = strncpy_from_user(kpath, ptmp.value, max);
  959. if (rv < 0)
  960. goto err;
  961. rv = kern_path(kpath, LOOKUP_FOLLOW, &gi->gi_libexec);
  962. if (rv)
  963. goto err;
  964. #ifdef CONFIG_GRAPHENE_DEBUG
  965. dp = d_path(&gi->gi_libexec, kpath, max);
  966. if (IS_ERR(dp)) {
  967. rv = -EINVAL;
  968. goto err;
  969. }
  970. printk(KERN_INFO "Graphene: PID %d LIB NAME %s\n",
  971. current_tsk->pid, dp);
  972. #endif
  973. break;
  974. case GRAPHENE_LIB_ADDR:
  975. gi->gi_libaddr = (u64) ptmp.value;
  976. #ifdef CONFIG_GRAPHENE_DEBUG
  977. printk(KERN_INFO "Graphene: PID %d LIB ADDR 0x%016llx\n",
  978. current_tsk->pid, gi->gi_libaddr);
  979. #endif
  980. break;
  981. case GRAPHENE_UNIX_ROOT:
  982. rv = strncpy_from_user(kpath, ptmp.value, max);
  983. if (rv < 0)
  984. goto err;
  985. u = gi->gi_unix;
  986. if (!u) {
  987. u = kmalloc(sizeof(struct graphene_unix),
  988. GFP_KERNEL);
  989. if (!u) {
  990. rv = -ENOMEM;
  991. goto err;
  992. }
  993. u->root.mnt = NULL;
  994. u->prefix.len = 0;
  995. atomic_set(&u->count, 1);
  996. INIT_LIST_HEAD(&u->list);
  997. gi->gi_unix = u;
  998. }
  999. if (u && u->root.mnt)
  1000. path_put(&u->root);
  1001. rv = kern_path(kpath, LOOKUP_FOLLOW, &u->root);
  1002. if (rv)
  1003. goto err;
  1004. #ifdef CONFIG_GRAPHENE_DEBUG
  1005. dp = d_path(&u->root, kpath, max);
  1006. if (IS_ERR(dp)) {
  1007. rv = -EINVAL;
  1008. goto err;
  1009. }
  1010. printk(KERN_INFO "Graphene: PID %d UNIX ROOT %s\n",
  1011. current_tsk->pid, dp);
  1012. #endif
  1013. break;
  1014. case GRAPHENE_UNIX_PREFIX: {
  1015. char * prefix;
  1016. rv = strncpy_from_user(kpath, ptmp.value, max);
  1017. if (rv < 0)
  1018. goto err;
  1019. u = gi->gi_unix;
  1020. if (!u) {
  1021. u = kmalloc(sizeof(struct graphene_unix),
  1022. GFP_KERNEL);
  1023. if (!u) {
  1024. rv = -ENOMEM;
  1025. goto err;
  1026. }
  1027. u->root.mnt = NULL;
  1028. u->prefix.len = 0;
  1029. atomic_set(&u->count, 1);
  1030. INIT_LIST_HEAD(&u->list);
  1031. gi->gi_unix = u;
  1032. }
  1033. if (u && u->prefix.len)
  1034. kfree(&u->prefix.name);
  1035. prefix = kmalloc(rv + 1, GFP_KERNEL);
  1036. if (!prefix) {
  1037. rv = -ENOMEM;
  1038. goto err;
  1039. }
  1040. memcpy(prefix, kpath, rv + 1);
  1041. u->prefix.len = rv;
  1042. u->prefix.name = prefix;
  1043. #ifdef CONFIG_GRAPHENE_DEBUG
  1044. printk(KERN_INFO "Graphene: PID %d UNIX PREFIX %s\n",
  1045. current_tsk->pid, kpath);
  1046. #endif
  1047. break;
  1048. }
  1049. case GRAPHENE_NET_RULE: {
  1050. struct graphene_net_policy np;
  1051. rv = copy_from_user(&np, ptmp.value,
  1052. sizeof(struct graphene_net_policy));
  1053. if (rv) {
  1054. rv = -EFAULT;
  1055. goto err;
  1056. }
  1057. rv = set_net_rule(&np, gi);
  1058. if (rv)
  1059. goto err;
  1060. break;
  1061. }
  1062. case GRAPHENE_FS_PATH:
  1063. case GRAPHENE_FS_RECURSIVE:
  1064. rv = strncpy_from_user(kpath, ptmp.value, max);
  1065. if (rv < 0)
  1066. goto err;
  1067. p = kmalloc(sizeof(struct graphene_path),
  1068. GFP_KERNEL);
  1069. if (!p) {
  1070. rv = -ENOMEM;
  1071. goto err;
  1072. }
  1073. rv = kern_path(kpath, LOOKUP_FOLLOW, &p->path);
  1074. if (rv) {
  1075. kfree(p);
  1076. goto err;
  1077. }
  1078. #ifdef CONFIG_GRAPHENE_DEBUG
  1079. dp = d_path(&p->path, kpath, max);
  1080. if (IS_ERR(dp)) {
  1081. rv = -EINVAL;
  1082. kfree(p);
  1083. goto err;
  1084. }
  1085. printk(KERN_INFO "Graphene: PID %d PATH %s%s\n",
  1086. current_tsk->pid, dp,
  1087. type == GRAPHENE_FS_PATH ? "" :
  1088. " (recursive)");
  1089. #endif
  1090. p->type = flags;
  1091. INIT_LIST_HEAD(&p->list);
  1092. list_add_tail(&p->list,
  1093. type == GRAPHENE_FS_PATH ?
  1094. &gi->gi_paths : &gi->gi_rpaths);
  1095. break;
  1096. }
  1097. }
  1098. if (!current_tsk->graphene) {
  1099. struct graphene_struct *gs;
  1100. if (gi->gi_unix) {
  1101. rv = add_graphene_unix(gi->gi_unix);
  1102. if (rv)
  1103. goto err;
  1104. }
  1105. get_console(gi, current_tsk->files);
  1106. gs = kmalloc(sizeof(struct graphene_struct), GFP_KERNEL);
  1107. if (!gs) {
  1108. rv = -ENOMEM;
  1109. goto err;
  1110. }
  1111. atomic_set(&gs->g_count, 1);
  1112. gs->g_info = gi;
  1113. spin_lock_init(&gs->g_lock);
  1114. current_tsk->graphene = gs;
  1115. printk(KERN_INFO "Graphene: PID %d registered\n",
  1116. current_tsk->pid);
  1117. }
  1118. #ifdef CONFIG_GRAPHENE_ISOLATE
  1119. else {
  1120. if ((rv = update_graphene(current_tsk, gi)) < 0) {
  1121. printk(KERN_INFO "Graphene: PID %d cannot be updated (%d)\n",
  1122. current_tsk->pid, rv);
  1123. goto err;
  1124. }
  1125. printk(KERN_INFO "Graphene: PID %d updated\n",
  1126. current_tsk->pid);
  1127. }
  1128. #endif
  1129. rv = 0;
  1130. goto out;
  1131. err:
  1132. drop_graphene_info(gi);
  1133. out:
  1134. PUT_PATH_BUFFER(fn, kpath)
  1135. return rv;
  1136. }
  1137. #ifdef CONFIG_GRAPHENE_ISOLATE
  1138. static int do_close_sock(struct graphene_info *gi, struct socket *sock,
  1139. int close_unix)
  1140. {
  1141. struct sock *sk = sock->sk;
  1142. struct sockaddr_storage address;
  1143. struct sockaddr *addr = (void *) &address;
  1144. struct inet_sock *inet;
  1145. int len, err;
  1146. if (!sk)
  1147. return 0;
  1148. if (sk->sk_family == PF_UNIX)
  1149. return close_unix ? -EPERM : 0;
  1150. inet = inet_sk(sk);
  1151. if (inet->inet_dport) {
  1152. err = sock->ops->getname(sock, addr, &len, 1);
  1153. if (err)
  1154. return err;
  1155. return __common_net_perm(gi, OP_CONNECT, sock, addr, len);
  1156. }
  1157. if (!inet->inet_num)
  1158. return 0;
  1159. if (sk->sk_state == TCP_LISTEN) {
  1160. err = __common_net_perm(gi, OP_LISTEN, sock, NULL, 0);
  1161. } else {
  1162. err = sock->ops->getname(sock, addr, &len, 0);
  1163. if (err)
  1164. return err;
  1165. err = __common_net_perm(gi, OP_BIND, sock, addr, len);
  1166. }
  1167. return err;
  1168. }
  1169. static int do_close_fds(struct graphene_info *gi, struct files_struct *files,
  1170. int close_unix)
  1171. {
  1172. int i, j;
  1173. struct fdtable *fdt = files_fdtable(files);
  1174. j = 0;
  1175. rcu_read_lock();
  1176. fdt = files_fdtable(files);
  1177. rcu_read_unlock();
  1178. for (;;) {
  1179. unsigned long set;
  1180. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
  1181. i = j * BITS_PER_LONG;
  1182. #else
  1183. i = j * __NFDBITS;
  1184. #endif
  1185. if (i >= fdt->max_fds)
  1186. break;
  1187. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
  1188. set = fdt->open_fds[j++];
  1189. #else
  1190. set = fdt->open_fds->fds_bits[j++];
  1191. #endif
  1192. for ( ; set ; i++, set >>= 1) {
  1193. struct socket *sock = NULL;
  1194. struct file *file;
  1195. int err;
  1196. if (!(set & 1))
  1197. continue;
  1198. #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)
  1199. sock = sockfd_lookup(i, &err);
  1200. #endif
  1201. file = xchg(&fdt->fd[i], NULL);
  1202. if (!file)
  1203. continue;
  1204. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)
  1205. sock = sock_from_file(file, &err);
  1206. #endif
  1207. if (sock) {
  1208. err = do_close_sock(gi, sock, close_unix);
  1209. if (!err)
  1210. goto allow;
  1211. goto deny;
  1212. }
  1213. if (get_pipe_info(file))
  1214. goto deny;
  1215. err = __common_perm(gi, OP_OPEN, &file->f_path,
  1216. aa_map_file_to_perms(file));
  1217. if (!err) {
  1218. allow:
  1219. xchg(&fdt->fd[i], file);
  1220. continue;
  1221. }
  1222. deny:
  1223. filp_close(file, files);
  1224. cond_resched();
  1225. }
  1226. }
  1227. return 0;
  1228. }
  1229. static
  1230. int net_check (int family, int addr_any, int port_any,
  1231. int flags1, struct graphene_net_addr * addr1,
  1232. int flags2, struct graphene_net_addr * addr2)
  1233. {
  1234. if (flags2 & addr_any)
  1235. goto port;
  1236. if (flags1 & addr_any)
  1237. goto port;
  1238. switch (family) {
  1239. case AF_INET:
  1240. if (memcmp(&addr1->addr.sin_addr,
  1241. &addr2->addr.sin_addr,
  1242. sizeof(struct in_addr)))
  1243. return -EACCES;
  1244. break;
  1245. case AF_INET6:
  1246. if (memcmp(&addr1->addr.sin6_addr,
  1247. &addr2->addr.sin6_addr,
  1248. sizeof(struct in6_addr)))
  1249. return -EACCES;
  1250. break;
  1251. }
  1252. port:
  1253. if (flags2 & port_any)
  1254. return 0;
  1255. if (flags1 & port_any)
  1256. return 0;
  1257. if (addr1->port_begin < addr2->port_begin ||
  1258. addr1->port_end > addr2->port_end)
  1259. return -EACCES;
  1260. return 0;
  1261. }
  1262. static int update_graphene(struct task_struct *current_tsk,
  1263. struct graphene_info *new)
  1264. {
  1265. struct graphene_struct *gs = current_tsk->graphene;
  1266. struct graphene_info *gi = get_graphene_info(gs);
  1267. struct graphene_path *p;
  1268. struct graphene_net *n1, *n2;
  1269. int i = 0, close_unix = 0;
  1270. if (new->gi_unix) {
  1271. if (!new->gi_unix->root.mnt &&
  1272. gi->gi_unix && gi->gi_unix->root.mnt) {
  1273. if (!path_equal(&new->gi_unix->root,
  1274. &gi->gi_unix->root))
  1275. return -EACCES;
  1276. path_get(&gi->gi_unix->root);
  1277. new->gi_unix->root = gi->gi_unix->root;
  1278. }
  1279. if (new->gi_unix->prefix.len) {
  1280. int err = add_graphene_unix(new->gi_unix);
  1281. if (err)
  1282. return err;
  1283. }
  1284. close_unix = 1;
  1285. }
  1286. for (i = 0 ; i < 3 ; i++)
  1287. if (gi->gi_console[i].mnt) {
  1288. path_get(&gi->gi_console[i]);
  1289. new->gi_console[i] = gi->gi_console[i];
  1290. } else {
  1291. new->gi_console[i].mnt = NULL;
  1292. }
  1293. list_for_each_entry(p, &new->gi_paths, list) {
  1294. u32 mask = 0;
  1295. if (p->type & GRAPHENE_FS_READ)
  1296. mask |= MAY_READ;
  1297. if (p->type & GRAPHENE_FS_WRITE)
  1298. mask |= MAY_WRITE;
  1299. print_path(KERN_INFO "Graphene: PID %d CHECK RULE %s\n",
  1300. &p->path);
  1301. if (__common_perm(gi, OP_OPEN, &p->path, mask) < 0)
  1302. return -EACCES;
  1303. }
  1304. list_for_each_entry(n1, &new->gi_net, list) {
  1305. bool accepted = false;
  1306. print_net_rule(KERN_INFO "Graphene: PID %d CHECK RULE %s\n",
  1307. n1);
  1308. list_for_each_entry(n2, &gi->gi_net, list) {
  1309. if (n1->family != n2->family)
  1310. continue;
  1311. if (net_check(n1->family,
  1312. LOCAL_ADDR_ANY, LOCAL_PORT_ANY,
  1313. n1->flags, &n1->local,
  1314. n2->flags, &n2->local) < 0)
  1315. continue;
  1316. if (net_check(n1->family,
  1317. PEER_ADDR_ANY, PEER_PORT_ANY,
  1318. n1->flags, &n1->peer,
  1319. n2->flags, &n2->peer) < 0)
  1320. continue;
  1321. accepted = true;
  1322. print_net_rule(KERN_INFO "Graphene: PID %d ALLOW %s\n",
  1323. n1);
  1324. break;
  1325. }
  1326. if (!accepted) {
  1327. print_net_rule(KERN_INFO "Graphene: PID %d DENY %s\n",
  1328. n1);
  1329. return -EACCES;
  1330. }
  1331. }
  1332. spin_lock(&gs->g_lock);
  1333. put_graphene_info(gs->g_info);
  1334. gs->g_info = new;
  1335. spin_unlock(&gs->g_lock);
  1336. do_close_fds(new, current_tsk->files, close_unix);
  1337. return 0;
  1338. }
  1339. #endif /* CONFIG_GRAPHENE_ISOLATE */
  1340. static long graphene_ioctl(struct file *file, unsigned int cmd,
  1341. unsigned long arg)
  1342. {
  1343. struct task_struct *current_tsk = current;
  1344. switch (cmd) {
  1345. case GRAPHENE_SET_TASK:
  1346. return set_graphene(current_tsk,
  1347. (const struct graphene_policies __user *) arg);
  1348. default:
  1349. return -ENOSYS;
  1350. }
  1351. }
  1352. static struct file_operations graphene_operations = {
  1353. .unlocked_ioctl = graphene_ioctl,
  1354. .compat_ioctl = graphene_ioctl,
  1355. .llseek = noop_llseek,
  1356. };
  1357. static struct miscdevice graphene_dev = {
  1358. .minor = GRAPHENE_MINOR,
  1359. .name = "graphene",
  1360. .fops = &graphene_operations,
  1361. .mode = 0666,
  1362. };
  1363. static int __init graphene_init(void)
  1364. {
  1365. int rv;
  1366. rv = misc_register(&graphene_dev);
  1367. if (rv) {
  1368. printk(KERN_ERR "Graphene error: "
  1369. "failed to add a char device (rv=%d)\n", rv);
  1370. return rv;
  1371. }
  1372. return 0;
  1373. }
  1374. device_initcall(graphene_init);