graphene.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484
  1. /*
  2. * linux/graphene/graphene.c
  3. *
  4. * Copyright (C) 2013-, Chia-Che Tsai, Bhushan Jain and Donald Porter
  5. *
  6. * Manage the graphene information and security policies.
  7. */
  8. #include <linux/version.h>
  9. #include <linux/slab.h>
  10. #include <linux/sched.h>
  11. #include <linux/fs.h>
  12. #include <linux/file.h>
  13. #include <linux/fs_struct.h>
  14. #include <linux/fdtable.h>
  15. #include <linux/namei.h>
  16. #include <linux/dcache.h>
  17. #include <linux/mount.h>
  18. #include <linux/rcupdate.h>
  19. #include <linux/uaccess.h>
  20. #include <linux/un.h>
  21. #include <linux/net.h>
  22. #include <linux/atomic.h>
  23. #include <net/sock.h>
  24. #include <net/inet_sock.h>
  25. #include <net/tcp_states.h>
  26. #include <linux/pipe_fs_i.h>
  27. #include <../fs/internal.h>
  28. #include <../security/apparmor/include/audit.h>
  29. #include "graphene.h"
  30. #include "graphene-ipc.h"
  31. static atomic64_t unix_prefix_counter = ATOMIC64_INIT(1);
  32. static atomic64_t gipc_session = ATOMIC64_INIT(1);;
  33. int dup_graphene_struct(struct task_struct *tsk)
  34. {
  35. struct graphene_struct *gs, *new;
  36. struct graphene_info *gi;
  37. if (!(tsk->graphene))
  38. return 0;
  39. if (tsk->group_leader != tsk) {
  40. atomic_inc(&tsk->graphene->g_count);
  41. return 0;
  42. }
  43. gs = tsk->graphene;
  44. new = kmalloc(sizeof(struct graphene_struct), GFP_KERNEL);
  45. if (!new)
  46. return -ENOMEM;
  47. spin_lock(&gs->g_lock);
  48. gi = gs->g_info;
  49. atomic_inc(&gi->gi_count);
  50. new->g_info = gi;
  51. spin_unlock(&gs->g_lock);
  52. atomic_set(&new->g_count, 1);
  53. spin_lock_init(&new->g_lock);
  54. tsk->graphene = new;
  55. return 0;
  56. }
  57. static void drop_graphene_info(struct graphene_info *info)
  58. {
  59. struct graphene_path *p, *n;
  60. int i;
  61. list_for_each_entry_safe(p, n, &info->gi_paths, list) {
  62. path_put(&p->path);
  63. kfree(p);
  64. }
  65. list_for_each_entry_safe(p, n, &info->gi_rpaths, list) {
  66. path_put(&p->path);
  67. kfree(p);
  68. }
  69. if (info->gi_libexec.dentry)
  70. path_put(&info->gi_libexec);
  71. for (i = 0 ; i < 3 && info->gi_console[i].mnt ; i++)
  72. path_put(&info->gi_console[i]);
  73. if (info->gi_mcast_sock)
  74. fput(info->gi_mcast_sock);
  75. kfree(info);
  76. }
  77. static void put_graphene_info(struct graphene_info *info)
  78. {
  79. if (!atomic_dec_return(&info->gi_count))
  80. drop_graphene_info(info);
  81. }
  82. void put_graphene_struct(struct task_struct *tsk)
  83. {
  84. struct graphene_struct *gs = tsk->graphene;
  85. if (gs) {
  86. tsk->graphene = NULL;
  87. if (atomic_dec_return(&gs->g_count))
  88. return;
  89. put_graphene_info(gs->g_info);
  90. kfree(gs);
  91. }
  92. }
  93. static inline
  94. struct graphene_info *get_graphene_info(struct graphene_struct *gs)
  95. {
  96. struct graphene_info *info;
  97. if (!gs)
  98. return NULL;
  99. rcu_read_lock();
  100. info = rcu_dereference_check(gs->g_info,
  101. lockdep_is_held(&gs->g_lock) ||
  102. atomic_read(&gs->g_count) == 1 ||
  103. rcu_my_thread_group_empty());
  104. rcu_read_unlock();
  105. return info;
  106. }
  107. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
  108. # define FILE_INODE(file) ((file)->f_inode)
  109. #else
  110. # define FILE_INODE(file) ((file)->f_dentry->d_inode)
  111. #endif
  112. static loff_t graphene_lib_llseek(struct file *file, loff_t offset, int origin)
  113. {
  114. struct inode *inode = FILE_INODE(file);
  115. if (!inode)
  116. return -EINVAL;
  117. if (!inode->i_fop || !inode->i_fop->llseek)
  118. return -EINVAL;
  119. return inode->i_fop->llseek(file, offset, origin);
  120. }
  121. static ssize_t graphene_lib_read (struct file *file, char __user *buf,
  122. size_t len, loff_t *ppos)
  123. {
  124. struct inode *inode = FILE_INODE(file);
  125. if (!inode)
  126. return -EINVAL;
  127. if (!inode->i_fop || !inode->i_fop->read)
  128. return -EINVAL;
  129. return inode->i_fop->read(file, buf, len, ppos);
  130. }
  131. static ssize_t graphene_lib_aio_read (struct kiocb *iocb, const struct iovec *iov,
  132. unsigned long nr_segs, loff_t pos)
  133. {
  134. struct inode *inode = FILE_INODE(iocb->ki_filp);
  135. if (!inode)
  136. return -EINVAL;
  137. if (!inode->i_fop || !inode->i_fop->aio_read)
  138. return -EINVAL;
  139. return inode->i_fop->aio_read(iocb, iov, nr_segs, pos);
  140. }
  141. static int graphene_lib_mmap(struct file *file, struct vm_area_struct *vma)
  142. {
  143. struct inode *inode = FILE_INODE(file);
  144. if (!inode)
  145. return -EINVAL;
  146. if (!inode->i_fop || !inode->i_fop->mmap)
  147. return -EINVAL;
  148. return inode->i_fop->mmap(file, vma);
  149. }
  150. static int graphene_lib_release(struct inode *inode, struct file *file)
  151. {
  152. if (!inode)
  153. return -EINVAL;
  154. if (!inode->i_fop || !inode->i_fop->release)
  155. return -EINVAL;
  156. return inode->i_fop->release(inode, file);
  157. }
  158. #define DEFINE_PATH_BUFFER(kpath, max) char * kpath; int max;
  159. #define GET_PATH_BUFFER(kpath, max) \
  160. kpath = __getname(); \
  161. max = PATH_MAX;
  162. #define DEFINE_PATH(dp, path, kpath, max) \
  163. DEFINE_PATH_BUFFER(kpath, max) \
  164. char *dp; \
  165. GET_PATH_BUFFER(kpath, max) \
  166. dp = d_path(path, kpath, max);
  167. #define PUT_PATH_BUFFER(kpath) __putname(kpath);
  168. static unsigned long
  169. graphene_lib_get_area(struct file *file, unsigned long addr, unsigned long len,
  170. unsigned long pgoff, unsigned long flags)
  171. {
  172. struct task_struct *current_tsk = current;
  173. struct graphene_info *gi = get_graphene_info(current_tsk->graphene);
  174. struct inode *inode = FILE_INODE(file);
  175. unsigned long (*get_area) (struct file *, unsigned long, unsigned long,
  176. unsigned long, unsigned long);
  177. if (!inode)
  178. return -EINVAL;
  179. if (gi->gi_libaddr) {
  180. if (!path_equal(&file->f_path, &gi->gi_libexec))
  181. BUG();
  182. if (!addr)
  183. addr = gi->gi_libaddr + pgoff * PAGE_SIZE;
  184. #ifdef CONFIG_GRAPHENE_DEBUG
  185. {
  186. DEFINE_PATH(dp, &file->f_path, kpath, max)
  187. if (!IS_ERR(dp))
  188. printk(KERN_INFO "Graphene: PID %d MAP FILE %s"
  189. " OFF 0x%08lx AT 0x%016lx\n",
  190. current->pid, dp,
  191. pgoff * PAGE_SIZE, addr);
  192. PUT_PATH_BUFFER(kpath)
  193. }
  194. #endif
  195. return addr;
  196. }
  197. get_area = (inode->i_fop && inode->i_fop->get_unmapped_area) ?
  198. inode->i_fop->get_unmapped_area :
  199. current_tsk->mm->get_unmapped_area;
  200. return get_area(file, addr, len, pgoff, flags);
  201. }
  202. /* These are file oprations required for execve */
  203. static struct file_operations graphene_lib_operations = {
  204. .llseek = graphene_lib_llseek,
  205. .read = graphene_lib_read,
  206. .aio_read = graphene_lib_aio_read,
  207. .mmap = graphene_lib_mmap,
  208. .get_unmapped_area = graphene_lib_get_area,
  209. .release = graphene_lib_release,
  210. };
  211. #ifdef CONFIG_GRAPHENE_DEBUG
  212. static void print_path(const char * fmt, struct path *path)
  213. {
  214. DEFINE_PATH(dp, path, kpath, max)
  215. if (!IS_ERR(dp))
  216. printk(fmt, current->pid, IS_ERR(dp) ? "(unknown)" : dp);
  217. PUT_PATH_BUFFER(kpath)
  218. }
  219. #else
  220. # define print_path(...) do {} while (0)
  221. #endif
  222. int graphene_execve_open(struct file *file)
  223. {
  224. struct task_struct *current_tsk = current;
  225. struct graphene_info *gi = get_graphene_info(current_tsk->graphene);
  226. if (!current_tsk->in_execve)
  227. BUG();
  228. if (!path_equal(&file->f_path, &gi->gi_libexec)) {
  229. print_path(KERN_INFO "Graphene: DENY EXEC PID %d PATH %s\n",
  230. &file->f_path);
  231. return -EPERM;
  232. }
  233. if (!gi->gi_libaddr)
  234. goto accepted;
  235. file->f_op = &graphene_lib_operations;
  236. accepted:
  237. print_path(KERN_INFO "Graphene: ALLOW EXEC PID %d PATH %s\n",
  238. &file->f_path);
  239. return 0;
  240. }
  241. static int graphene_check_path(struct graphene_info *gi, int op, u32 mask,
  242. struct path *path, struct graphene_path *gp,
  243. int is_recursive)
  244. {
  245. if (!path_equal(path, &gp->path))
  246. return 0;
  247. if (mask & (MAY_READ|MAY_EXEC|MAY_ACCESS|
  248. AA_MAY_META_READ|AA_EXEC_MMAP|AA_MAY_LINK)) {
  249. if (!(gp->type & GRAPHENE_FS_READ))
  250. return -EPERM;
  251. }
  252. if (mask & (MAY_WRITE|MAY_APPEND|
  253. AA_MAY_CREATE|AA_MAY_DELETE|AA_MAY_META_WRITE|
  254. AA_MAY_CHMOD|AA_MAY_CHOWN)) {
  255. if (!(gp->type & GRAPHENE_FS_WRITE))
  256. return -EPERM;
  257. }
  258. return 1;
  259. }
  260. static int __common_perm(struct graphene_info *gi, int op, struct path *target,
  261. u32 mask)
  262. {
  263. struct graphene_path *p;
  264. struct path root, path = *target;
  265. struct qstr last;
  266. int rv = 0, i;
  267. BUG_ON(!path.dentry);
  268. path_get(&path);
  269. for (i = 0; i < 3 && gi->gi_console[i].mnt; i++)
  270. if (path_equal(target, &gi->gi_console[i]))
  271. goto out;
  272. if (op == OP_OPEN) {
  273. int minor = iminor(path.dentry->d_inode);
  274. if (minor == GRAPHENE_MINOR)
  275. goto out;
  276. if (minor == GIPC_MINOR)
  277. goto out;
  278. }
  279. rcu_read_lock();
  280. list_for_each_entry_rcu(p, &gi->gi_paths, list) {
  281. rv = graphene_check_path(gi, op, mask, &path, p, 0);
  282. if (rv)
  283. goto out;
  284. }
  285. if (gi->gi_libexec.mnt && path_equal(&path, &gi->gi_libexec)) {
  286. rv = 0;
  287. goto out;
  288. }
  289. get_fs_root(current->fs, &root);
  290. last.len = 0;
  291. while (!path_equal(&path, &root)) {
  292. int is_recursive = 0;
  293. list_for_each_entry_rcu(p, &gi->gi_rpaths, list) {
  294. rv = graphene_check_path(gi, op, mask, &path, p,
  295. is_recursive);
  296. if (rv)
  297. goto out_root;
  298. }
  299. last = path.dentry->d_name;
  300. while(1) {
  301. struct dentry *old = path.dentry;
  302. if (path_equal(&path, &root))
  303. break;
  304. if (path.dentry != path.mnt->mnt_root) {
  305. path.dentry = dget_parent(path.dentry);
  306. dput(old);
  307. break;
  308. }
  309. if (!follow_up(&path))
  310. break;
  311. }
  312. is_recursive = 1;
  313. }
  314. rv = -EPERM;
  315. out_root:
  316. path_put(&root);
  317. out:
  318. rcu_read_unlock();
  319. path_put(&path);
  320. if (rv >= 0) {
  321. rv = 0;
  322. print_path(KERN_INFO "Graphene: ALLOW PID %d PATH %s\n",
  323. target);
  324. } else {
  325. print_path(KERN_INFO "Graphene: DENY PID %d PATH %s\n",
  326. target);
  327. }
  328. return rv;
  329. }
  330. int graphene_common_perm(int op, struct path *path, u32 mask)
  331. {
  332. struct graphene_info *gi = get_graphene_info(current->graphene);
  333. if (!gi)
  334. return 0;
  335. return __common_perm(gi, op, path, mask);
  336. }
  337. static int __unix_perm(struct sockaddr *address, int addrlen)
  338. {
  339. struct graphene_info *gi = get_graphene_info(current->graphene);
  340. const char * sun_path =
  341. ((struct sockaddr_un *) address)->sun_path;
  342. if (!gi->gi_unix[1])
  343. return -EPERM;
  344. if (!memcmp(sun_path, gi->gi_unix, sizeof(gi->gi_unix)))
  345. return 0;
  346. #ifdef CONFIG_GRAPHENE_DEBUG
  347. printk(KERN_INFO "Graphene: DENY PID %d SOCKET %s\n",
  348. current->pid, sun_path);
  349. #endif
  350. return -EPERM;
  351. }
  352. static int net_cmp(int family, bool addr_any, bool port_any,
  353. struct graphene_net_addr *ga,
  354. struct sockaddr *addr, int addrlen)
  355. {
  356. switch(family) {
  357. case AF_INET: {
  358. struct sockaddr_in *a = (void *) addr;
  359. if (!addr_any) {
  360. if (a->sin_addr.s_addr != ga->addr.sin_addr.s_addr)
  361. return 1;
  362. }
  363. if (!port_any) {
  364. unsigned short port = ntohs(a->sin_port);
  365. if (!(port >= ga->port_begin && port <= ga->port_end))
  366. return 1;
  367. }
  368. break;
  369. }
  370. #ifdef CONFIG_IPV6
  371. case AF_INET6: {
  372. struct sockaddr_in6 *a6 = (void *) addr;
  373. if (!addr_any) {
  374. if (memcmp(&a6->sin6_addr, &ga->addr.sin6_addr,
  375. sizeof(struct in6_addr)))
  376. return 1;
  377. }
  378. if (!port_any) {
  379. unsigned short port = ntohs(a6->sin6_port);
  380. if (!(port >= ga->port_begin && port <= ga->port_end))
  381. return 1;
  382. }
  383. break;
  384. }
  385. #endif
  386. }
  387. return 0;
  388. }
  389. #ifdef CONFIG_GRAPHENE_DEBUG
  390. static void print_net(int allow, int family, int op, struct sockaddr *addr,
  391. int addrlen)
  392. {
  393. const char *allow_str = allow ? "ALLOW" : "DENY";
  394. const char *op_str = "UNKNOWN OP";
  395. switch(op) {
  396. case OP_BIND: op_str = "BIND"; break;
  397. case OP_LISTEN: op_str = "LISTEN"; break;
  398. case OP_CONNECT: op_str = "CONNECT"; break;
  399. case OP_SENDMSG: op_str = "SENDMSG"; break;
  400. case OP_RECVMSG: op_str = "RECVMSG"; break;
  401. }
  402. if (!addr) {
  403. printk(KERN_INFO "Graphene: %s %s PID %d SOCKET\n",
  404. allow_str, op_str, current->pid);
  405. return;
  406. }
  407. switch(family) {
  408. case AF_INET: {
  409. struct sockaddr_in *a = (void *) addr;
  410. u8 *a1 = (u8 *) &a->sin_addr.s_addr;
  411. printk(KERN_INFO "Graphene: %s %s PID %d SOCKET "
  412. "%d.%d.%d.%d:%d\n",
  413. allow_str, op_str, current->pid,
  414. a1[0], a1[1], a1[2], a1[3], ntohs(a->sin_port));
  415. }
  416. break;
  417. #ifdef CONFIG_IPV6
  418. case AF_INET6: {
  419. struct sockaddr_in6 *a = (void *) addr;
  420. u16 *a1 = (u16 *) &a->sin6_addr.s6_addr;
  421. printk(KERN_INFO "Graphene: %s %s PID %d SOCKET "
  422. "[%d.%d.%d.%d:%d:%d:%d:%d]:%d\n",
  423. allow_str, op_str, current->pid,
  424. a1[0], a1[1], a1[2], a1[3],
  425. a1[4], a1[5], a1[6], a1[7], ntohs(a->sin6_port));
  426. }
  427. break;
  428. #endif
  429. }
  430. }
  431. #else
  432. # define print_net(...) do {} while (0)
  433. #endif
  434. /*
  435. * network rules:
  436. * bind:
  437. * input addr/port match bind addr/port
  438. * listen:
  439. * always allow
  440. * connect:
  441. * input addr/port match peer addr/port
  442. * sendmsg:
  443. * EITHER stream socket OR no input addr/port OR
  444. * input addr/port match peer addr/port
  445. * recvmsg:
  446. * EITHER stream socket OR connected
  447. */
  448. static
  449. int __common_net_perm(struct graphene_info *gi, int op, struct socket *sock,
  450. struct sockaddr *address, int addrlen)
  451. {
  452. struct sock *sk = sock->sk;
  453. struct list_head *head;
  454. struct graphene_net *gn;
  455. if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_DGRAM)
  456. return -EPERM;
  457. #ifdef CONFIG_IPV6
  458. if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
  459. #else
  460. if (sk->sk_family != AF_INET)
  461. #endif
  462. return -EPERM;
  463. switch(op) {
  464. case OP_BIND:
  465. head = &gi->gi_binds;
  466. break;
  467. case OP_CONNECT:
  468. case OP_SENDMSG:
  469. head = &gi->gi_peers;
  470. break;
  471. default:
  472. print_net(1, sk->sk_family, op, address, addrlen);
  473. return 0;
  474. }
  475. BUG_ON(!address);
  476. if (list_empty(head))
  477. goto no_rules;
  478. list_for_each_entry(gn, head, list) {
  479. if (gn->family != sk->sk_family)
  480. continue;
  481. if (net_cmp(sk->sk_family,
  482. gn->flags & ADDR_ANY, gn->flags & PORT_ANY,
  483. &gn->addr, address, addrlen))
  484. continue;
  485. print_net(1, sk->sk_family, op, address, addrlen);
  486. return 0;
  487. }
  488. no_rules:
  489. if (gi->gi_mcast_port && sk->sk_family == AF_INET &&
  490. ((struct sockaddr_in *) address)->sin_port == gi->gi_mcast_port) {
  491. print_net(1, AF_INET, op, address, addrlen);
  492. return 0;
  493. }
  494. print_net(0, sk->sk_family, op, address, addrlen);
  495. return -EPERM;
  496. }
  497. int graphene_socket_bind(struct socket *sock,
  498. struct sockaddr *address, int addrlen)
  499. {
  500. if (GRAPHENE_ENABLED()) {
  501. struct graphene_info *gi = get_graphene_info(current->graphene);
  502. if (!sock || !sock->sk)
  503. return 0;
  504. if (sock->sk->sk_family == PF_UNIX) {
  505. if (sock->sk->sk_type != SOCK_STREAM)
  506. return -EPERM;
  507. return __unix_perm(address, addrlen);
  508. }
  509. return __common_net_perm(gi, OP_BIND, sock, address, addrlen);
  510. }
  511. return 0;
  512. }
  513. int graphene_socket_listen(struct socket *sock, int backlog)
  514. {
  515. if (GRAPHENE_ENABLED()) {
  516. struct graphene_info *gi = get_graphene_info(current->graphene);
  517. if (!sock || !sock->sk || sock->sk->sk_family == PF_UNIX)
  518. return 0;
  519. return __common_net_perm(gi, OP_LISTEN, sock, NULL, 0);
  520. }
  521. return 0;
  522. }
  523. int graphene_socket_connect(struct socket *sock,
  524. struct sockaddr *address, int addrlen)
  525. {
  526. if (GRAPHENE_ENABLED()) {
  527. struct graphene_info *gi = get_graphene_info(current->graphene);
  528. if (!sock || !sock->sk)
  529. return 0;
  530. if (sock->sk->sk_family == PF_UNIX) {
  531. if (sock->sk->sk_type != SOCK_STREAM)
  532. return -EPERM;
  533. return __unix_perm(address, addrlen);
  534. }
  535. return __common_net_perm(gi, OP_CONNECT, sock, address,
  536. addrlen);
  537. }
  538. return 0;
  539. }
  540. int graphene_socket_sendmsg(struct socket *sock,
  541. struct msghdr *msg, int size)
  542. {
  543. if (GRAPHENE_ENABLED()) {
  544. struct graphene_info *gi = get_graphene_info(current->graphene);
  545. if (!sock || !sock->sk || sock->sk->sk_family == PF_UNIX)
  546. return 0;
  547. if (sock->sk->sk_type == SOCK_STREAM)
  548. return 0;
  549. if (!msg->msg_name)
  550. return 0;
  551. return __common_net_perm(gi, OP_SENDMSG, sock,
  552. msg->msg_name, msg->msg_namelen);
  553. }
  554. return 0;
  555. }
  556. int graphene_socket_recvmsg(struct socket *sock,
  557. struct msghdr *msg, int size, int flags)
  558. {
  559. if (GRAPHENE_ENABLED()) {
  560. struct graphene_info *gi = get_graphene_info(current->graphene);
  561. if (!sock || !sock->sk || sock->sk->sk_family == PF_UNIX)
  562. return 0;
  563. if (sock->sk->sk_type == SOCK_STREAM)
  564. return 0;
  565. return __common_net_perm(gi, OP_RECVMSG, sock, NULL, 0);
  566. }
  567. return 0;
  568. }
  569. int graphene_task_kill(struct task_struct *tsk, struct siginfo *info,
  570. int sig, u32 secid)
  571. {
  572. struct task_struct *current_tsk = current;
  573. if (!current_tsk->graphene)
  574. return 0;
  575. if (sig != SIGCONT)
  576. return -EPERM;
  577. return (tsk->tgid == current_tsk->tgid) ? 0 : -EPERM;
  578. }
  579. static void get_console(struct graphene_info *gi, struct files_struct *files)
  580. {
  581. struct fdtable *fdt;
  582. unsigned long set;
  583. int fd = 0, n = 0;
  584. rcu_read_lock();
  585. fdt = files_fdtable(files);
  586. rcu_read_unlock();
  587. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
  588. set = fdt->open_fds[0];
  589. #else
  590. set = fdt->open_fds->fds_bits[0];
  591. #endif
  592. for (; fd < 3 && fd < fdt->max_fds && set ; fd++, set >>= 1) {
  593. struct file *file;
  594. if (!(set & 1))
  595. continue;
  596. file = ACCESS_ONCE(fdt->fd[fd]);
  597. if (!file)
  598. continue;
  599. path_get(&file->f_path);
  600. gi->gi_console[n++] = file->f_path;
  601. #ifdef CONFIG_GRAPHENE_DEBUG
  602. {
  603. DEFINE_PATH(dp, &file->f_path, kpath, max)
  604. if (!IS_ERR(dp))
  605. printk(KERN_INFO "Graphene: "
  606. "PID %d CONSOLE %s\n",
  607. current->pid, dp);
  608. PUT_PATH_BUFFER(kpath)
  609. }
  610. #endif
  611. }
  612. for ( ; n < 3 ; n++)
  613. gi->gi_console[n].mnt = NULL;
  614. }
  615. static int update_graphene(struct task_struct *current_tsk,
  616. struct graphene_info *gi);
  617. #ifdef CONFIG_GRAPHENE_DEBUG
  618. static void print_net_rule(const char *fmt, struct graphene_net *n)
  619. {
  620. # ifdef CONFIG_IPV6
  621. # define ADDR_STR_MAX 128
  622. # else
  623. # define ADDR_STR_MAX 48
  624. # endif
  625. char str[ADDR_STR_MAX];
  626. int len = 0;
  627. if (n->flags & ADDR_ANY) {
  628. str[len++] = 'A';
  629. str[len++] = 'N';
  630. str[len++] = 'Y';
  631. str[len++] = ':';
  632. } else {
  633. switch(n->family) {
  634. case AF_INET: {
  635. u8 *ip = (u8 *) &n->addr.addr.sin_addr.s_addr;
  636. len += snprintf(str + len,
  637. ADDR_STR_MAX - len,
  638. "%u.%u.%u.%u:",
  639. ip[0], ip[1], ip[2], ip[3]);
  640. }
  641. break;
  642. #ifdef CONFIG_IPV6
  643. case AF_INET6: {
  644. u16 *ip = (u16 *) &n->addr.addr.sin6_addr.s6_addr;
  645. len += snprintf(str + len,
  646. ADDR_STR_MAX - len,
  647. "[%u:%u:%u:%u:%u:%u:%u:%u]:",
  648. ip[0], ip[1], ip[2], ip[3],
  649. ip[4], ip[5], ip[6], ip[7]);
  650. }
  651. break;
  652. #endif /* CONFIG_IPV6 */
  653. }
  654. }
  655. if (n->flags & PORT_ANY) {
  656. str[len++] = 'A';
  657. str[len++] = 'N';
  658. str[len++] = 'Y';
  659. } else {
  660. if (n->addr.port_begin == n->addr.port_end)
  661. len += snprintf(str + len, ADDR_STR_MAX - len,
  662. "%u", n->addr.port_begin);
  663. else
  664. len += snprintf(str + len, ADDR_STR_MAX - len,
  665. "%u-%u",
  666. n->addr.port_begin, n->addr.port_end);
  667. }
  668. BUG_ON(len >= ADDR_STR_MAX);
  669. str[len] = 0;
  670. printk(fmt, current->pid, str);
  671. }
  672. #else
  673. # define print_net_rule(...) do {} while (0)
  674. #endif
  675. static int set_net_rule(struct graphene_net_rule *nr, struct graphene_info *gi,
  676. bool bind)
  677. {
  678. struct graphene_net *n;
  679. #ifdef CONFIG_IPV6
  680. if (nr->family != AF_INET && nr->family != AF_INET6)
  681. #else
  682. if (nr->family != AF_INET)
  683. #endif
  684. return -EINVAL;
  685. n = kmalloc(sizeof(struct graphene_net), GFP_KERNEL);
  686. if (!n)
  687. return -ENOMEM;
  688. n->family = nr->family;
  689. n->flags = 0;
  690. n->addr = nr->addr;
  691. switch(n->family) {
  692. case AF_INET:
  693. if (!n->addr.addr.sin_addr.s_addr)
  694. n->flags |= ADDR_ANY;
  695. break;
  696. #ifdef CONFIG_IPV6
  697. case AF_INET6:
  698. if (!memcmp(&n->addr.addr.sin6_addr.s6_addr, &in6addr_any, 16))
  699. n->flags |= ADDR_ANY;
  700. break;
  701. #endif /* CONFIG_IPV6 */
  702. }
  703. if (n->addr.port_begin == 0 && n->addr.port_end == 65535)
  704. n->flags |= PORT_ANY;
  705. INIT_LIST_HEAD(&n->list);
  706. if (bind) {
  707. list_add_tail(&n->list, &gi->gi_binds);
  708. print_net_rule(KERN_INFO "Graphene: PID %d NET BIND %s\n", n);
  709. } else {
  710. list_add_tail(&n->list, &gi->gi_peers);
  711. print_net_rule(KERN_INFO "Graphene: PID %d NET PEER %s\n", n);
  712. }
  713. return 0;
  714. }
  715. u64 gipc_get_session(struct task_struct *tsk)
  716. {
  717. struct graphene_info *gi = get_graphene_info(tsk->graphene);
  718. return gi ? gi->gi_gipc_session : 0;
  719. }
  720. int set_graphene(struct task_struct *current_tsk,
  721. const struct graphene_policies __user *gpolicies)
  722. {
  723. int npolicies;
  724. const struct graphene_user_policy __user *policies = gpolicies->policies;
  725. struct graphene_info *gi;
  726. struct graphene_user_policy ptmp;
  727. struct graphene_path *p;
  728. int i, rv = 0;
  729. DEFINE_PATH_BUFFER(kpath, max)
  730. #ifdef CONFIG_GRAPHENE_DEBUG
  731. char *dp;
  732. #endif
  733. rv = copy_from_user(&npolicies, &gpolicies->npolicies, sizeof(int));
  734. if (rv)
  735. return -EFAULT;
  736. if (npolicies && !policies)
  737. return -EINVAL;
  738. #ifndef CONFIG_GRAPHENE_ISOLATE
  739. if (current_tsk->graphene)
  740. return -EAGAIN;
  741. if (current_tsk != current_tsk->group_leader)
  742. return -EPERM;
  743. #endif
  744. gi = kmalloc(sizeof(struct graphene_info), GFP_KERNEL);
  745. if (!gi)
  746. return -ENOMEM;
  747. GET_PATH_BUFFER(kpath, max)
  748. memset(gi, 0, sizeof(struct graphene_info));
  749. INIT_LIST_HEAD(&gi->gi_paths);
  750. INIT_LIST_HEAD(&gi->gi_rpaths);
  751. INIT_LIST_HEAD(&gi->gi_binds);
  752. INIT_LIST_HEAD(&gi->gi_peers);
  753. gi->gi_gipc_session = atomic64_inc_return(&gipc_session);
  754. #ifdef CONFIG_GRAPHENE_DEBUG
  755. printk(KERN_INFO "Graphene: PID %d GIPC SESSION %llu\n",
  756. current_tsk->pid, gi->gi_gipc_session);
  757. #endif
  758. for (i = 0 ; i < npolicies ; i++) {
  759. int type, flags;
  760. rv = copy_from_user(&ptmp, policies + i,
  761. sizeof(struct graphene_user_policy));
  762. if (rv) {
  763. rv = -EFAULT;
  764. goto err;
  765. }
  766. if (!ptmp.value) {
  767. rv = -EINVAL;
  768. goto err;
  769. }
  770. type = ptmp.type & GRAPHENE_POLICY_TYPES;
  771. flags = ptmp.type & ~type;
  772. switch(type) {
  773. case GRAPHENE_LIB_NAME:
  774. rv = strncpy_from_user(kpath, ptmp.value, max);
  775. if (rv < 0)
  776. goto err;
  777. rv = kern_path(kpath, LOOKUP_FOLLOW, &gi->gi_libexec);
  778. if (rv)
  779. goto err;
  780. #ifdef CONFIG_GRAPHENE_DEBUG
  781. dp = d_path(&gi->gi_libexec, kpath, max);
  782. if (IS_ERR(dp)) {
  783. rv = -EINVAL;
  784. goto err;
  785. }
  786. printk(KERN_INFO "Graphene: PID %d LIB NAME %s\n",
  787. current_tsk->pid, dp);
  788. #endif
  789. break;
  790. case GRAPHENE_LIB_ADDR:
  791. gi->gi_libaddr = (u64) ptmp.value;
  792. #ifdef CONFIG_GRAPHENE_DEBUG
  793. printk(KERN_INFO "Graphene: PID %d LIB ADDR 0x%016llx\n",
  794. current_tsk->pid, gi->gi_libaddr);
  795. #endif
  796. break;
  797. case GRAPHENE_UNIX_PREFIX: {
  798. unsigned long token =
  799. atomic64_inc_return(&unix_prefix_counter);
  800. gi->gi_unix[0] = '\0';
  801. snprintf(gi->gi_unix + 1, sizeof(gi->gi_unix) - 1,
  802. GRAPHENE_UNIX_PREFIX_FMT, token);
  803. gi->gi_unix[sizeof(gi->gi_unix) - 1] = '/';
  804. rv = copy_to_user((void *) ptmp.value, &token,
  805. sizeof(unsigned long));
  806. if (rv) {
  807. rv = -EFAULT;
  808. goto err;
  809. }
  810. #ifdef CONFIG_GRAPHENE_DEBUG
  811. printk(KERN_INFO "Graphene: PID %d UNIX PREFIX %s\n",
  812. current_tsk->pid, kpath);
  813. #endif
  814. break;
  815. }
  816. case GRAPHENE_MCAST_PORT: {
  817. struct socket *sock;
  818. struct sock *sk;
  819. struct inet_sock *inet;
  820. struct file *file;
  821. unsigned short port;
  822. rv = sock_create_kern(AF_INET, SOCK_DGRAM, 0, &sock);
  823. if (rv)
  824. goto err;
  825. file = sock_alloc_file(sock, 0, NULL);
  826. if (unlikely(IS_ERR(file))) {
  827. sock_release(sock);
  828. rv = PTR_ERR(file);
  829. goto err;
  830. }
  831. sk = sock->sk;
  832. lock_sock(sk);
  833. inet = inet_sk(sk);
  834. sk->sk_reuse = SK_CAN_REUSE;
  835. if (sk->sk_prot->get_port(sk, 0)) {
  836. release_sock(sk);
  837. sock_release(sock);
  838. rv = -EAGAIN;
  839. goto err;
  840. }
  841. port = inet->inet_sport = htons(inet->inet_num);
  842. release_sock(sk);
  843. gi->gi_mcast_port = port;
  844. gi->gi_mcast_sock = file;
  845. port = ntohs(port);
  846. rv = copy_to_user((void *) ptmp.value, &port,
  847. sizeof(unsigned short));
  848. if (rv) {
  849. rv = -EFAULT;
  850. goto err;
  851. }
  852. #ifdef CONFIG_GRAPHENE_DEBUG
  853. printk(KERN_INFO "Graphene: PID %d MCAST PORT %d\n",
  854. current_tsk->pid, port);
  855. #endif
  856. break;
  857. }
  858. case GRAPHENE_NET_RULE: {
  859. struct graphene_net_rule nr;
  860. rv = copy_from_user(&nr, ptmp.value,
  861. sizeof(struct graphene_net_rule));
  862. if (rv) {
  863. rv = -EFAULT;
  864. goto err;
  865. }
  866. rv = set_net_rule(&nr, gi, flags & GRAPHENE_NET_BIND);
  867. if (rv < 0)
  868. goto err;
  869. break;
  870. }
  871. case GRAPHENE_FS_PATH:
  872. rv = strncpy_from_user(kpath, ptmp.value, max);
  873. if (rv < 0)
  874. goto err;
  875. p = kmalloc(sizeof(struct graphene_path),
  876. GFP_KERNEL);
  877. if (!p) {
  878. rv = -ENOMEM;
  879. goto err;
  880. }
  881. rv = kern_path(kpath, LOOKUP_FOLLOW, &p->path);
  882. if (rv) {
  883. kfree(p);
  884. goto err;
  885. }
  886. #ifdef CONFIG_GRAPHENE_DEBUG
  887. dp = d_path(&p->path, kpath, max);
  888. if (IS_ERR(dp)) {
  889. rv = -EINVAL;
  890. kfree(p);
  891. goto err;
  892. }
  893. printk(KERN_INFO "Graphene: PID %d PATH %s%s\n",
  894. current_tsk->pid, dp,
  895. type == GRAPHENE_FS_PATH ? "" :
  896. " (recursive)");
  897. #endif
  898. p->type = flags;
  899. INIT_LIST_HEAD(&p->list);
  900. list_add_tail(&p->list,
  901. (flags & GRAPHENE_FS_RECURSIVE) ?
  902. &gi->gi_rpaths : &gi->gi_paths);
  903. break;
  904. }
  905. }
  906. if (!current_tsk->graphene) {
  907. struct graphene_struct *gs;
  908. get_console(gi, current_tsk->files);
  909. gs = kmalloc(sizeof(struct graphene_struct), GFP_KERNEL);
  910. if (!gs) {
  911. rv = -ENOMEM;
  912. goto err;
  913. }
  914. atomic_set(&gs->g_count, 1);
  915. gs->g_info = gi;
  916. spin_lock_init(&gs->g_lock);
  917. current_tsk->graphene = gs;
  918. printk(KERN_INFO "Graphene: PID %d registered\n",
  919. current_tsk->pid);
  920. }
  921. #ifdef CONFIG_GRAPHENE_ISOLATE
  922. else {
  923. if ((rv = update_graphene(current_tsk, gi)) < 0) {
  924. printk(KERN_INFO "Graphene: PID %d cannot be updated (%d)\n",
  925. current_tsk->pid, rv);
  926. goto err;
  927. }
  928. printk(KERN_INFO "Graphene: PID %d updated\n",
  929. current_tsk->pid);
  930. }
  931. #endif
  932. rv = 0;
  933. goto out;
  934. err:
  935. drop_graphene_info(gi);
  936. out:
  937. PUT_PATH_BUFFER(kpath)
  938. return rv;
  939. }
  940. #ifdef CONFIG_GRAPHENE_ISOLATE
  941. static int do_close_sock(struct graphene_info *gi, struct socket *sock,
  942. int close_unix)
  943. {
  944. struct sock *sk = sock->sk;
  945. struct sockaddr_storage address;
  946. struct sockaddr *addr = (void *) &address;
  947. struct inet_sock *inet;
  948. int len, err;
  949. if (!sk)
  950. return 0;
  951. if (sk->sk_family == PF_UNIX)
  952. return close_unix ? -EPERM : 0;
  953. inet = inet_sk(sk);
  954. if (inet->inet_dport) {
  955. err = sock->ops->getname(sock, addr, &len, 1);
  956. if (err)
  957. return err;
  958. /* give it a chance, check if it match one of the peers */
  959. err = __common_net_perm(gi, OP_CONNECT, sock, addr, len);
  960. if (!err)
  961. return 0;
  962. }
  963. if (!inet->inet_num)
  964. return 0;
  965. err = sock->ops->getname(sock, addr, &len, 0);
  966. if (err)
  967. return err;
  968. return __common_net_perm(gi, OP_BIND, sock, addr, len);
  969. }
  970. static int do_close_fds(struct graphene_info *gi, struct files_struct *files,
  971. int close_unix)
  972. {
  973. struct fdtable *fdt;
  974. int fd, i = 0;
  975. rcu_read_lock();
  976. fdt = files_fdtable(files);
  977. rcu_read_unlock();
  978. for (;;) {
  979. unsigned long set;
  980. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
  981. fd = i * BITS_PER_LONG;
  982. #else
  983. fd = i * __NFDBITS;
  984. #endif
  985. if (fd >= fdt->max_fds)
  986. break;
  987. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
  988. set = fdt->open_fds[i++];
  989. #else
  990. set = fdt->open_fds->fds_bits[i++];
  991. #endif
  992. for ( ; set ; fd++, set >>= 1) {
  993. struct socket *sock = NULL;
  994. struct file *file;
  995. int err;
  996. if (!(set & 1))
  997. continue;
  998. file = xchg(&fdt->fd[fd], NULL);
  999. if (!file)
  1000. continue;
  1001. if (get_pipe_info(file))
  1002. goto deny;
  1003. sock = sock_from_file(file, &err);
  1004. if (sock) {
  1005. err = do_close_sock(gi, sock, close_unix);
  1006. if (!err)
  1007. goto allow;
  1008. goto deny;
  1009. }
  1010. err = __common_perm(gi, OP_OPEN, &file->f_path,
  1011. aa_map_file_to_perms(file));
  1012. if (err)
  1013. goto deny;
  1014. allow:
  1015. xchg(&fdt->fd[fd], file);
  1016. continue;
  1017. deny:
  1018. filp_close(file, files);
  1019. cond_resched();
  1020. }
  1021. }
  1022. return 0;
  1023. }
  1024. static
  1025. int net_check (int family,
  1026. int flags1, struct graphene_net_addr * addr1,
  1027. int flags2, struct graphene_net_addr * addr2)
  1028. {
  1029. if (flags2 & ADDR_ANY)
  1030. goto port;
  1031. if (flags1 & ADDR_ANY)
  1032. goto port;
  1033. switch (family) {
  1034. case AF_INET:
  1035. if (memcmp(&addr1->addr.sin_addr,
  1036. &addr2->addr.sin_addr,
  1037. sizeof(struct in_addr)))
  1038. return -EPERM;
  1039. break;
  1040. case AF_INET6:
  1041. if (memcmp(&addr1->addr.sin6_addr,
  1042. &addr2->addr.sin6_addr,
  1043. sizeof(struct in6_addr)))
  1044. return -EPERM;
  1045. break;
  1046. }
  1047. port:
  1048. if (flags2 & PORT_ANY)
  1049. return 0;
  1050. if (flags1 & PORT_ANY)
  1051. return 0;
  1052. if (addr1->port_begin < addr2->port_begin ||
  1053. addr1->port_end > addr2->port_end)
  1054. return -EPERM;
  1055. return 0;
  1056. }
  1057. static int net_check_fds(struct graphene_net *n, struct files_struct *files)
  1058. {
  1059. struct fdtable *fdt;
  1060. int fd, i = 0;
  1061. rcu_read_lock();
  1062. fdt = files_fdtable(files);
  1063. for (;;) {
  1064. unsigned long set;
  1065. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
  1066. fd = i * BITS_PER_LONG;
  1067. #else
  1068. fd = i * __NFDBITS;
  1069. #endif
  1070. if (fd >= fdt->max_fds)
  1071. break;
  1072. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
  1073. set = fdt->open_fds[i++];
  1074. #else
  1075. set = fdt->open_fds->fds_bits[i++];
  1076. #endif
  1077. for ( ; set ; fd++, set >>= 1) {
  1078. struct file *file;
  1079. struct socket *sock;
  1080. struct sock *sk;
  1081. struct inet_sock *inet;
  1082. struct sockaddr_storage address;
  1083. struct sockaddr *addr = (void *) &address;
  1084. int len, err;
  1085. if (!(set & 1))
  1086. continue;
  1087. file = rcu_dereference_raw(fdt->fd[fd]);
  1088. if (!file)
  1089. continue;
  1090. sock = sock_from_file(file, &err);
  1091. if (!sock)
  1092. continue;
  1093. if (!(sk = sock->sk) || sk->sk_family != n->family)
  1094. continue;
  1095. inet = inet_sk(sk);
  1096. if (!inet->inet_dport)
  1097. continue;
  1098. err = sock->ops->getname(sock, addr, &len, 1);
  1099. if (err)
  1100. continue;
  1101. if (!net_cmp(n->family, false, false,
  1102. &n->addr, addr, len)) {
  1103. rcu_read_unlock();
  1104. return 1;
  1105. }
  1106. }
  1107. }
  1108. rcu_read_unlock();
  1109. return 0;
  1110. }
  1111. static int update_graphene(struct task_struct *current_tsk,
  1112. struct graphene_info *new)
  1113. {
  1114. struct graphene_struct *gs = current_tsk->graphene;
  1115. struct graphene_info *gi = get_graphene_info(gs);
  1116. struct graphene_path *p;
  1117. struct graphene_net *n1, *n2;
  1118. int i = 0, close_unix = 0;
  1119. for (i = 0 ; i < 3 && gi->gi_console[i].mnt ; i++) {
  1120. path_get(&gi->gi_console[i]);
  1121. new->gi_console[i] = gi->gi_console[i];
  1122. }
  1123. list_for_each_entry(p, &new->gi_paths, list) {
  1124. u32 mask = 0;
  1125. if (p->type & GRAPHENE_FS_READ)
  1126. mask |= MAY_READ;
  1127. if (p->type & GRAPHENE_FS_WRITE)
  1128. mask |= MAY_WRITE;
  1129. print_path(KERN_INFO "Graphene: PID %d CHECK RULE %s\n",
  1130. &p->path);
  1131. if (__common_perm(gi, OP_OPEN, &p->path, mask) < 0)
  1132. return -EPERM;
  1133. }
  1134. list_for_each_entry(n1, &new->gi_binds, list) {
  1135. bool accepted = false;
  1136. print_net_rule(KERN_INFO
  1137. "Graphene: PID %d CHECK RULE BIND %s\n",
  1138. n1);
  1139. list_for_each_entry(n2, &gi->gi_binds, list) {
  1140. if (n1->family != n2->family)
  1141. continue;
  1142. if (net_check(n1->family,
  1143. n1->flags, &n1->addr,
  1144. n2->flags, &n2->addr) < 0)
  1145. continue;
  1146. accepted = true;
  1147. print_net_rule(KERN_INFO
  1148. "Graphene: PID %d ALLOW BIND %s\n",
  1149. n1);
  1150. break;
  1151. }
  1152. if (!accepted) {
  1153. print_net_rule(KERN_INFO
  1154. "Graphene: PID %d DENY BIND %s\n",
  1155. n1);
  1156. return -EPERM;
  1157. }
  1158. }
  1159. list_for_each_entry(n1, &new->gi_peers, list) {
  1160. bool accepted = false;
  1161. print_net_rule(KERN_INFO
  1162. "Graphene: PID %d CHECK RULE CONNECT %s\n",
  1163. n1);
  1164. list_for_each_entry(n2, &gi->gi_peers, list) {
  1165. if (n1->family != n2->family)
  1166. continue;
  1167. if (net_check(n1->family,
  1168. n1->flags, &n1->addr,
  1169. n2->flags, &n2->addr) < 0)
  1170. continue;
  1171. accepted = true;
  1172. print_net_rule(KERN_INFO
  1173. "Graphene: PID %d ALLOW CONNECT %s\n",
  1174. n1);
  1175. break;
  1176. }
  1177. if (!accepted && !(n1->flags & (ADDR_ANY|PORT_ANY)) &&
  1178. net_check_fds(n1, current_tsk->files))
  1179. accepted = true;
  1180. if (!accepted) {
  1181. print_net_rule(KERN_INFO
  1182. "Graphene: PID %d DENY CONNECT %s\n",
  1183. n1);
  1184. return -EPERM;
  1185. }
  1186. }
  1187. if (!new->gi_unix[1] && gi->gi_unix[1])
  1188. memcpy(new->gi_unix, gi->gi_unix, sizeof(gi->gi_unix));
  1189. if (!new->gi_mcast_port)
  1190. new->gi_mcast_port = gi->gi_mcast_port;
  1191. if (!new->gi_mcast_sock && gi->gi_mcast_sock) {
  1192. atomic_long_inc(&gi->gi_mcast_sock->f_count);
  1193. new->gi_mcast_sock = gi->gi_mcast_sock;
  1194. }
  1195. spin_lock(&gs->g_lock);
  1196. put_graphene_info(gs->g_info);
  1197. gs->g_info = new;
  1198. spin_unlock(&gs->g_lock);
  1199. do_close_fds(new, current_tsk->files, close_unix);
  1200. return 0;
  1201. }
  1202. #endif /* CONFIG_GRAPHENE_ISOLATE */
  1203. static long graphene_ioctl(struct file *file, unsigned int cmd,
  1204. unsigned long arg)
  1205. {
  1206. struct task_struct *current_tsk = current;
  1207. switch (cmd) {
  1208. case GRAPHENE_SET_TASK:
  1209. return set_graphene(current_tsk,
  1210. (const struct graphene_policies __user *) arg);
  1211. default:
  1212. return -ENOSYS;
  1213. }
  1214. }
  1215. static struct file_operations graphene_operations = {
  1216. .unlocked_ioctl = graphene_ioctl,
  1217. .compat_ioctl = graphene_ioctl,
  1218. .llseek = noop_llseek,
  1219. };
  1220. static struct miscdevice graphene_dev = {
  1221. .minor = GRAPHENE_MINOR,
  1222. .name = "graphene",
  1223. .fops = &graphene_operations,
  1224. .mode = 0666,
  1225. };
  1226. static int __init graphene_init(void)
  1227. {
  1228. int rv;
  1229. rv = misc_register(&graphene_dev);
  1230. if (rv) {
  1231. printk(KERN_ERR "Graphene error: "
  1232. "failed to add a char device (rv=%d)\n", rv);
  1233. return rv;
  1234. }
  1235. return 0;
  1236. }
  1237. device_initcall(graphene_init);