graphene.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529
  1. /*
  2. * linux/graphene/graphene.c
  3. *
  4. * Copyright (C) 2013-, Chia-Che Tsai, Bhushan Jain and Donald Porter
  5. *
  6. * Manage the graphene information and security policies.
  7. */
  8. #include <linux/version.h>
  9. #include <linux/slab.h>
  10. #include <linux/sched.h>
  11. #include <linux/fs.h>
  12. #include <linux/file.h>
  13. #include <linux/fs_struct.h>
  14. #include <linux/fdtable.h>
  15. #include <linux/namei.h>
  16. #include <linux/dcache.h>
  17. #include <linux/mount.h>
  18. #include <linux/rcupdate.h>
  19. #include <linux/uaccess.h>
  20. #include <linux/un.h>
  21. #include <linux/net.h>
  22. #include <linux/atomic.h>
  23. #include <net/sock.h>
  24. #include <net/inet_sock.h>
  25. #include <net/tcp_states.h>
  26. #include <linux/pipe_fs_i.h>
  27. #include <../fs/internal.h>
  28. #include <../security/apparmor/include/audit.h>
  29. #include "graphene.h"
  30. #include "graphene-ipc.h"
  31. static atomic64_t unix_prefix_counter = ATOMIC64_INIT(1);
  32. static atomic64_t gipc_session = ATOMIC64_INIT(1);;
  33. int dup_graphene_struct(struct task_struct *tsk)
  34. {
  35. struct graphene_struct *gs, *new;
  36. struct graphene_info *gi;
  37. if (!(tsk->graphene))
  38. return 0;
  39. if (tsk->group_leader != tsk) {
  40. atomic_inc(&tsk->graphene->g_count);
  41. return 0;
  42. }
  43. gs = tsk->graphene;
  44. new = kmalloc(sizeof(struct graphene_struct), GFP_KERNEL);
  45. if (!new)
  46. return -ENOMEM;
  47. spin_lock(&gs->g_lock);
  48. gi = gs->g_info;
  49. atomic_inc(&gi->gi_count);
  50. new->g_info = gi;
  51. spin_unlock(&gs->g_lock);
  52. atomic_set(&new->g_count, 1);
  53. spin_lock_init(&new->g_lock);
  54. tsk->graphene = new;
  55. return 0;
  56. }
  57. static void drop_graphene_info(struct graphene_info *info)
  58. {
  59. struct graphene_path *p, *n;
  60. int i;
  61. list_for_each_entry_safe(p, n, &info->gi_paths, list) {
  62. path_put(&p->path);
  63. kfree(p);
  64. }
  65. list_for_each_entry_safe(p, n, &info->gi_rpaths, list) {
  66. path_put(&p->path);
  67. kfree(p);
  68. }
  69. if (info->gi_libexec.dentry)
  70. path_put(&info->gi_libexec);
  71. for (i = 0 ; i < 3 && info->gi_console[i].mnt ; i++)
  72. path_put(&info->gi_console[i]);
  73. if (info->gi_mcast_sock)
  74. fput(info->gi_mcast_sock);
  75. kfree(info);
  76. }
  77. static void put_graphene_info(struct graphene_info *info)
  78. {
  79. if (!atomic_dec_return(&info->gi_count))
  80. drop_graphene_info(info);
  81. }
  82. void put_graphene_struct(struct task_struct *tsk)
  83. {
  84. struct graphene_struct *gs = tsk->graphene;
  85. if (gs) {
  86. tsk->graphene = NULL;
  87. if (atomic_dec_return(&gs->g_count))
  88. return;
  89. put_graphene_info(gs->g_info);
  90. kfree(gs);
  91. }
  92. }
  93. static inline
  94. struct graphene_info *get_graphene_info(struct graphene_struct *gs)
  95. {
  96. struct graphene_info *info;
  97. if (!gs)
  98. return NULL;
  99. rcu_read_lock();
  100. info = rcu_dereference_check(gs->g_info,
  101. lockdep_is_held(&gs->g_lock) ||
  102. atomic_read(&gs->g_count) == 1 ||
  103. rcu_my_thread_group_empty());
  104. rcu_read_unlock();
  105. return info;
  106. }
  107. #if 0
  108. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
  109. # define FILE_INODE(file) ((file)->f_inode)
  110. #else
  111. # define FILE_INODE(file) ((file)->f_dentry->d_inode)
  112. #endif
  113. static loff_t graphene_lib_llseek(struct file *file, loff_t offset, int origin)
  114. {
  115. struct inode *inode = FILE_INODE(file);
  116. if (!inode)
  117. return -EINVAL;
  118. if (!inode->i_fop || !inode->i_fop->llseek)
  119. return -EINVAL;
  120. return inode->i_fop->llseek(file, offset, origin);
  121. }
  122. static ssize_t graphene_lib_read (struct file *file, char __user *buf,
  123. size_t len, loff_t *ppos)
  124. {
  125. struct inode *inode = FILE_INODE(file);
  126. const struct file_operations *fops;
  127. if (!inode)
  128. return -EINVAL;
  129. fops = fops_get(inode->i_fop);
  130. if (unlikely(!fops))
  131. return -EINVAL;
  132. return inode->i_fop->read(file, buf, len, ppos);
  133. }
  134. static ssize_t graphene_lib_aio_read (struct kiocb *iocb, const struct iovec *iov,
  135. unsigned long nr_segs, loff_t pos)
  136. {
  137. struct inode *inode = FILE_INODE(iocb->ki_filp);
  138. if (!inode)
  139. return -EINVAL;
  140. if (!inode->i_fop || !inode->i_fop->aio_read)
  141. return -EINVAL;
  142. return inode->i_fop->aio_read(iocb, iov, nr_segs, pos);
  143. }
  144. static int graphene_lib_mmap(struct file *file, struct vm_area_struct *vma)
  145. {
  146. struct inode *inode = FILE_INODE(file);
  147. if (!inode)
  148. return -EINVAL;
  149. if (!inode->i_fop || !inode->i_fop->mmap)
  150. return -EINVAL;
  151. return inode->i_fop->mmap(file, vma);
  152. }
  153. static int graphene_lib_release(struct inode *inode, struct file *file)
  154. {
  155. if (!inode)
  156. return -EINVAL;
  157. if (!inode->i_fop || !inode->i_fop->release)
  158. return -EINVAL;
  159. return inode->i_fop->release(inode, file);
  160. }
  161. #endif
  162. #define DEFINE_PATH_BUFFER(kpath, max) char * kpath; int max;
  163. #define GET_PATH_BUFFER(kpath, max) \
  164. kpath = __getname(); \
  165. max = PATH_MAX;
  166. #define DEFINE_PATH(dp, path, kpath, max) \
  167. DEFINE_PATH_BUFFER(kpath, max) \
  168. char *dp; \
  169. GET_PATH_BUFFER(kpath, max) \
  170. dp = d_path(path, kpath, max);
  171. #define PUT_PATH_BUFFER(kpath) __putname(kpath);
  172. #if 0
  173. static unsigned long
  174. graphene_lib_get_area(struct file *file, unsigned long addr, unsigned long len,
  175. unsigned long pgoff, unsigned long flags)
  176. {
  177. struct task_struct *current_tsk = current;
  178. struct graphene_info *gi = get_graphene_info(current_tsk->graphene);
  179. struct inode *inode = FILE_INODE(file);
  180. unsigned long (*get_area) (struct file *, unsigned long, unsigned long,
  181. unsigned long, unsigned long);
  182. if (!inode)
  183. return -EINVAL;
  184. if (gi->gi_libaddr) {
  185. if (!path_equal(&file->f_path, &gi->gi_libexec))
  186. BUG();
  187. if (!addr)
  188. addr = gi->gi_libaddr + pgoff * PAGE_SIZE;
  189. #ifdef CONFIG_GRAPHENE_DEBUG
  190. {
  191. DEFINE_PATH(dp, &file->f_path, kpath, max)
  192. if (!IS_ERR(dp))
  193. printk(KERN_INFO "Graphene: PID %d MAP FILE %s"
  194. " OFF 0x%08lx AT 0x%016lx\n",
  195. current->pid, dp,
  196. pgoff * PAGE_SIZE, addr);
  197. PUT_PATH_BUFFER(kpath)
  198. }
  199. #endif
  200. return addr;
  201. }
  202. get_area = (inode->i_fop && inode->i_fop->get_unmapped_area) ?
  203. inode->i_fop->get_unmapped_area :
  204. current_tsk->mm->get_unmapped_area;
  205. return get_area(file, addr, len, pgoff, flags);
  206. }
  207. /* These are file oprations required for execve */
  208. static struct file_operations graphene_lib_operations = {
  209. .llseek = graphene_lib_llseek,
  210. .read = graphene_lib_read,
  211. .aio_read = graphene_lib_aio_read,
  212. .mmap = graphene_lib_mmap,
  213. .get_unmapped_area = graphene_lib_get_area,
  214. .release = graphene_lib_release,
  215. };
  216. #endif
  217. #ifdef CONFIG_GRAPHENE_DEBUG
  218. static void print_path(const char * fmt, struct path *path)
  219. {
  220. DEFINE_PATH(dp, path, kpath, max)
  221. if (!IS_ERR(dp))
  222. printk(fmt, current->pid, IS_ERR(dp) ? "(unknown)" : dp);
  223. PUT_PATH_BUFFER(kpath)
  224. }
  225. #else
  226. # define print_path(...) do {} while (0)
  227. #endif
  228. int graphene_execve_open(struct file *file)
  229. {
  230. struct task_struct *current_tsk = current;
  231. struct graphene_info *gi = get_graphene_info(current_tsk->graphene);
  232. if (!current_tsk->in_execve)
  233. BUG();
  234. if (!path_equal(&file->f_path, &gi->gi_libexec)) {
  235. print_path(KERN_INFO "Graphene: DENY EXEC PID %d PATH %s\n",
  236. &file->f_path);
  237. return -EPERM;
  238. }
  239. if (!gi->gi_libaddr)
  240. goto accepted;
  241. //file->f_op = &graphene_lib_operations;
  242. accepted:
  243. print_path(KERN_INFO "Graphene: ALLOW EXEC PID %d PATH %s\n",
  244. &file->f_path);
  245. return 0;
  246. }
  247. unsigned long
  248. graphene_execve_get_area(struct file *file, unsigned long addr,
  249. unsigned long len, unsigned long pgoff,
  250. unsigned long flags)
  251. {
  252. unsigned long (*get_area) (struct file *, unsigned long, unsigned long,
  253. unsigned long, unsigned long);
  254. struct task_struct *current_tsk = current;
  255. struct graphene_info *gi = get_graphene_info(current_tsk->graphene);
  256. BUG_ON(!file);
  257. if (gi->gi_libaddr) {
  258. if (!addr)
  259. addr = gi->gi_libaddr + pgoff * PAGE_SIZE;
  260. #ifdef CONFIG_GRAPHENE_DEBUG
  261. {
  262. DEFINE_PATH(dp, &file->f_path, kpath, max)
  263. if (!IS_ERR(dp))
  264. printk(KERN_INFO "Graphene: PID %d MAP FILE %s"
  265. " OFF 0x%08lx AT 0x%016lx\n",
  266. current->pid, dp,
  267. pgoff * PAGE_SIZE, addr);
  268. PUT_PATH_BUFFER(kpath)
  269. }
  270. #endif
  271. return addr;
  272. }
  273. get_area = current_tsk->mm->get_unmapped_area;
  274. if (file->f_op->get_unmapped_area)
  275. get_area = file->f_op->get_unmapped_area;
  276. return get_area(file, addr, len, pgoff, flags);
  277. }
  278. static int graphene_check_path(struct graphene_info *gi, int op, u32 mask,
  279. struct path *path, struct graphene_path *gp,
  280. int is_recursive)
  281. {
  282. if (!path_equal(path, &gp->path))
  283. return 0;
  284. if (mask & (MAY_READ|MAY_EXEC|MAY_ACCESS|
  285. AA_MAY_META_READ|AA_EXEC_MMAP|AA_MAY_LINK)) {
  286. if (!(gp->type & GRAPHENE_FS_READ))
  287. return -EPERM;
  288. }
  289. if (mask & (MAY_WRITE|MAY_APPEND|
  290. AA_MAY_CREATE|AA_MAY_DELETE|AA_MAY_META_WRITE|
  291. AA_MAY_CHMOD|AA_MAY_CHOWN)) {
  292. if (!(gp->type & GRAPHENE_FS_WRITE))
  293. return -EPERM;
  294. }
  295. return 1;
  296. }
  297. static int __common_perm(struct graphene_info *gi, int op, struct path *target,
  298. u32 mask)
  299. {
  300. struct graphene_path *p;
  301. struct path root, path = *target;
  302. struct qstr last;
  303. int rv = 0, i;
  304. BUG_ON(!path.dentry);
  305. path_get(&path);
  306. for (i = 0; i < 3 && gi->gi_console[i].mnt; i++)
  307. if (path_equal(target, &gi->gi_console[i]))
  308. goto out;
  309. if (op == OP_OPEN) {
  310. int minor = iminor(path.dentry->d_inode);
  311. if (minor == GRAPHENE_MINOR)
  312. goto out;
  313. if (minor == GIPC_MINOR)
  314. goto out;
  315. }
  316. rcu_read_lock();
  317. list_for_each_entry_rcu(p, &gi->gi_paths, list) {
  318. rv = graphene_check_path(gi, op, mask, &path, p, 0);
  319. if (rv)
  320. goto out;
  321. }
  322. if (gi->gi_libexec.mnt && path_equal(&path, &gi->gi_libexec)) {
  323. rv = 0;
  324. goto out;
  325. }
  326. get_fs_root(current->fs, &root);
  327. last.len = 0;
  328. while (!path_equal(&path, &root)) {
  329. int is_recursive = 0;
  330. list_for_each_entry_rcu(p, &gi->gi_rpaths, list) {
  331. rv = graphene_check_path(gi, op, mask, &path, p,
  332. is_recursive);
  333. if (rv)
  334. goto out_root;
  335. }
  336. last = path.dentry->d_name;
  337. while(1) {
  338. struct dentry *old = path.dentry;
  339. if (path_equal(&path, &root))
  340. break;
  341. if (path.dentry != path.mnt->mnt_root) {
  342. path.dentry = dget_parent(path.dentry);
  343. dput(old);
  344. break;
  345. }
  346. if (!follow_up(&path))
  347. break;
  348. }
  349. is_recursive = 1;
  350. }
  351. rv = -EPERM;
  352. out_root:
  353. path_put(&root);
  354. out:
  355. rcu_read_unlock();
  356. path_put(&path);
  357. if (rv >= 0) {
  358. rv = 0;
  359. print_path(KERN_INFO "Graphene: ALLOW PID %d PATH %s\n",
  360. target);
  361. } else {
  362. print_path(KERN_INFO "Graphene: DENY PID %d PATH %s\n",
  363. target);
  364. }
  365. return rv;
  366. }
  367. int graphene_common_perm(int op, struct path *path, u32 mask)
  368. {
  369. struct graphene_info *gi = get_graphene_info(current->graphene);
  370. if (!gi)
  371. return 0;
  372. return __common_perm(gi, op, path, mask);
  373. }
  374. static int __unix_perm(struct sockaddr *address, int addrlen)
  375. {
  376. struct graphene_info *gi = get_graphene_info(current->graphene);
  377. const char * sun_path =
  378. ((struct sockaddr_un *) address)->sun_path;
  379. if (!gi->gi_unix[1])
  380. return -EPERM;
  381. if (!memcmp(sun_path, gi->gi_unix, sizeof(gi->gi_unix)))
  382. return 0;
  383. #ifdef CONFIG_GRAPHENE_DEBUG
  384. printk(KERN_INFO "Graphene: DENY PID %d SOCKET %s\n",
  385. current->pid, sun_path);
  386. #endif
  387. return -EPERM;
  388. }
  389. static int net_cmp(int family, bool addr_any, bool port_any,
  390. struct graphene_net_addr *ga,
  391. struct sockaddr *addr, int addrlen)
  392. {
  393. switch(family) {
  394. case AF_INET: {
  395. struct sockaddr_in *a = (void *) addr;
  396. if (!addr_any) {
  397. if (a->sin_addr.s_addr != ga->addr.sin_addr.s_addr)
  398. return 1;
  399. }
  400. if (!port_any) {
  401. unsigned short port = ntohs(a->sin_port);
  402. if (!(port >= ga->port_begin && port <= ga->port_end))
  403. return 1;
  404. }
  405. break;
  406. }
  407. #ifdef CONFIG_IPV6
  408. case AF_INET6: {
  409. struct sockaddr_in6 *a6 = (void *) addr;
  410. if (!addr_any) {
  411. if (memcmp(&a6->sin6_addr, &ga->addr.sin6_addr,
  412. sizeof(struct in6_addr)))
  413. return 1;
  414. }
  415. if (!port_any) {
  416. unsigned short port = ntohs(a6->sin6_port);
  417. if (!(port >= ga->port_begin && port <= ga->port_end))
  418. return 1;
  419. }
  420. break;
  421. }
  422. #endif
  423. }
  424. return 0;
  425. }
  426. #ifdef CONFIG_GRAPHENE_DEBUG
  427. static void print_net(int allow, int family, int op, struct sockaddr *addr,
  428. int addrlen)
  429. {
  430. const char *allow_str = allow ? "ALLOW" : "DENY";
  431. const char *op_str = "UNKNOWN OP";
  432. switch(op) {
  433. case OP_BIND: op_str = "BIND"; break;
  434. case OP_LISTEN: op_str = "LISTEN"; break;
  435. case OP_CONNECT: op_str = "CONNECT"; break;
  436. case OP_SENDMSG: op_str = "SENDMSG"; break;
  437. case OP_RECVMSG: op_str = "RECVMSG"; break;
  438. }
  439. if (!addr) {
  440. printk(KERN_INFO "Graphene: %s %s PID %d SOCKET\n",
  441. allow_str, op_str, current->pid);
  442. return;
  443. }
  444. switch(family) {
  445. case AF_INET: {
  446. struct sockaddr_in *a = (void *) addr;
  447. u8 *a1 = (u8 *) &a->sin_addr.s_addr;
  448. printk(KERN_INFO "Graphene: %s %s PID %d SOCKET "
  449. "%d.%d.%d.%d:%d\n",
  450. allow_str, op_str, current->pid,
  451. a1[0], a1[1], a1[2], a1[3], ntohs(a->sin_port));
  452. }
  453. break;
  454. #ifdef CONFIG_IPV6
  455. case AF_INET6: {
  456. struct sockaddr_in6 *a = (void *) addr;
  457. u16 *a1 = (u16 *) &a->sin6_addr.s6_addr;
  458. printk(KERN_INFO "Graphene: %s %s PID %d SOCKET "
  459. "[%d.%d.%d.%d:%d:%d:%d:%d]:%d\n",
  460. allow_str, op_str, current->pid,
  461. a1[0], a1[1], a1[2], a1[3],
  462. a1[4], a1[5], a1[6], a1[7], ntohs(a->sin6_port));
  463. }
  464. break;
  465. #endif
  466. }
  467. }
  468. #else
  469. # define print_net(...) do {} while (0)
  470. #endif
  471. /*
  472. * network rules:
  473. * bind:
  474. * input addr/port match bind addr/port
  475. * listen:
  476. * always allow
  477. * connect:
  478. * input addr/port match peer addr/port
  479. * sendmsg:
  480. * EITHER stream socket OR no input addr/port OR
  481. * input addr/port match peer addr/port
  482. * recvmsg:
  483. * EITHER stream socket OR connected
  484. */
  485. static
  486. int __common_net_perm(struct graphene_info *gi, int op, struct socket *sock,
  487. struct sockaddr *address, int addrlen)
  488. {
  489. struct sock *sk = sock->sk;
  490. struct list_head *head;
  491. struct graphene_net *gn;
  492. if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_DGRAM)
  493. return -EPERM;
  494. #ifdef CONFIG_IPV6
  495. if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
  496. #else
  497. if (sk->sk_family != AF_INET)
  498. #endif
  499. return -EPERM;
  500. switch(op) {
  501. case OP_BIND:
  502. head = &gi->gi_binds;
  503. break;
  504. case OP_CONNECT:
  505. case OP_SENDMSG:
  506. head = &gi->gi_peers;
  507. break;
  508. default:
  509. print_net(1, sk->sk_family, op, address, addrlen);
  510. return 0;
  511. }
  512. BUG_ON(!address);
  513. if (list_empty(head))
  514. goto no_rules;
  515. list_for_each_entry(gn, head, list) {
  516. if (gn->family != sk->sk_family)
  517. continue;
  518. if (net_cmp(sk->sk_family,
  519. gn->flags & ADDR_ANY, gn->flags & PORT_ANY,
  520. &gn->addr, address, addrlen))
  521. continue;
  522. print_net(1, sk->sk_family, op, address, addrlen);
  523. return 0;
  524. }
  525. no_rules:
  526. if (gi->gi_mcast_port && sk->sk_family == AF_INET &&
  527. ((struct sockaddr_in *) address)->sin_port == gi->gi_mcast_port) {
  528. print_net(1, AF_INET, op, address, addrlen);
  529. return 0;
  530. }
  531. print_net(0, sk->sk_family, op, address, addrlen);
  532. return -EPERM;
  533. }
  534. int graphene_socket_bind(struct socket *sock,
  535. struct sockaddr *address, int addrlen)
  536. {
  537. if (GRAPHENE_ENABLED()) {
  538. struct graphene_info *gi = get_graphene_info(current->graphene);
  539. if (!sock || !sock->sk)
  540. return 0;
  541. if (sock->sk->sk_family == PF_UNIX) {
  542. if (sock->sk->sk_type != SOCK_STREAM)
  543. return -EPERM;
  544. return __unix_perm(address, addrlen);
  545. }
  546. return __common_net_perm(gi, OP_BIND, sock, address, addrlen);
  547. }
  548. return 0;
  549. }
  550. int graphene_socket_listen(struct socket *sock, int backlog)
  551. {
  552. if (GRAPHENE_ENABLED()) {
  553. struct graphene_info *gi = get_graphene_info(current->graphene);
  554. if (!sock || !sock->sk || sock->sk->sk_family == PF_UNIX)
  555. return 0;
  556. return __common_net_perm(gi, OP_LISTEN, sock, NULL, 0);
  557. }
  558. return 0;
  559. }
  560. int graphene_socket_connect(struct socket *sock,
  561. struct sockaddr *address, int addrlen)
  562. {
  563. if (GRAPHENE_ENABLED()) {
  564. struct graphene_info *gi = get_graphene_info(current->graphene);
  565. if (!sock || !sock->sk)
  566. return 0;
  567. if (sock->sk->sk_family == PF_UNIX) {
  568. if (sock->sk->sk_type != SOCK_STREAM)
  569. return -EPERM;
  570. return __unix_perm(address, addrlen);
  571. }
  572. return __common_net_perm(gi, OP_CONNECT, sock, address,
  573. addrlen);
  574. }
  575. return 0;
  576. }
  577. int graphene_socket_sendmsg(struct socket *sock,
  578. struct msghdr *msg, int size)
  579. {
  580. if (GRAPHENE_ENABLED()) {
  581. struct graphene_info *gi = get_graphene_info(current->graphene);
  582. if (!sock || !sock->sk || sock->sk->sk_family == PF_UNIX)
  583. return 0;
  584. if (sock->sk->sk_type == SOCK_STREAM)
  585. return 0;
  586. if (!msg->msg_name)
  587. return 0;
  588. return __common_net_perm(gi, OP_SENDMSG, sock,
  589. msg->msg_name, msg->msg_namelen);
  590. }
  591. return 0;
  592. }
  593. int graphene_socket_recvmsg(struct socket *sock,
  594. struct msghdr *msg, int size, int flags)
  595. {
  596. if (GRAPHENE_ENABLED()) {
  597. struct graphene_info *gi = get_graphene_info(current->graphene);
  598. if (!sock || !sock->sk || sock->sk->sk_family == PF_UNIX)
  599. return 0;
  600. if (sock->sk->sk_type == SOCK_STREAM)
  601. return 0;
  602. return __common_net_perm(gi, OP_RECVMSG, sock, NULL, 0);
  603. }
  604. return 0;
  605. }
  606. int graphene_task_kill(struct task_struct *tsk, struct siginfo *info,
  607. int sig, u32 secid)
  608. {
  609. struct task_struct *current_tsk = current;
  610. if (!current_tsk->graphene)
  611. return 0;
  612. if (sig != SIGCONT)
  613. return -EPERM;
  614. return (tsk->tgid == current_tsk->tgid) ? 0 : -EPERM;
  615. }
  616. static void get_console(struct graphene_info *gi, struct files_struct *files)
  617. {
  618. struct fdtable *fdt;
  619. unsigned long set;
  620. int fd = 0, n = 0;
  621. rcu_read_lock();
  622. fdt = files_fdtable(files);
  623. rcu_read_unlock();
  624. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
  625. set = fdt->open_fds[0];
  626. #else
  627. set = fdt->open_fds->fds_bits[0];
  628. #endif
  629. for (; fd < 3 && fd < fdt->max_fds && set ; fd++, set >>= 1) {
  630. struct file *file;
  631. if (!(set & 1))
  632. continue;
  633. file = ACCESS_ONCE(fdt->fd[fd]);
  634. if (!file)
  635. continue;
  636. path_get(&file->f_path);
  637. gi->gi_console[n++] = file->f_path;
  638. #ifdef CONFIG_GRAPHENE_DEBUG
  639. {
  640. DEFINE_PATH(dp, &file->f_path, kpath, max)
  641. if (!IS_ERR(dp))
  642. printk(KERN_INFO "Graphene: "
  643. "PID %d CONSOLE %s\n",
  644. current->pid, dp);
  645. PUT_PATH_BUFFER(kpath)
  646. }
  647. #endif
  648. }
  649. for ( ; n < 3 ; n++)
  650. gi->gi_console[n].mnt = NULL;
  651. }
  652. static int update_graphene(struct task_struct *current_tsk,
  653. struct graphene_info *gi);
  654. #ifdef CONFIG_GRAPHENE_DEBUG
  655. static void print_net_rule(const char *fmt, struct graphene_net *n)
  656. {
  657. # ifdef CONFIG_IPV6
  658. # define ADDR_STR_MAX 128
  659. # else
  660. # define ADDR_STR_MAX 48
  661. # endif
  662. char str[ADDR_STR_MAX];
  663. int len = 0;
  664. if (n->flags & ADDR_ANY) {
  665. str[len++] = 'A';
  666. str[len++] = 'N';
  667. str[len++] = 'Y';
  668. str[len++] = ':';
  669. } else {
  670. switch(n->family) {
  671. case AF_INET: {
  672. u8 *ip = (u8 *) &n->addr.addr.sin_addr.s_addr;
  673. len += snprintf(str + len,
  674. ADDR_STR_MAX - len,
  675. "%u.%u.%u.%u:",
  676. ip[0], ip[1], ip[2], ip[3]);
  677. }
  678. break;
  679. #ifdef CONFIG_IPV6
  680. case AF_INET6: {
  681. u16 *ip = (u16 *) &n->addr.addr.sin6_addr.s6_addr;
  682. len += snprintf(str + len,
  683. ADDR_STR_MAX - len,
  684. "[%u:%u:%u:%u:%u:%u:%u:%u]:",
  685. ip[0], ip[1], ip[2], ip[3],
  686. ip[4], ip[5], ip[6], ip[7]);
  687. }
  688. break;
  689. #endif /* CONFIG_IPV6 */
  690. }
  691. }
  692. if (n->flags & PORT_ANY) {
  693. str[len++] = 'A';
  694. str[len++] = 'N';
  695. str[len++] = 'Y';
  696. } else {
  697. if (n->addr.port_begin == n->addr.port_end)
  698. len += snprintf(str + len, ADDR_STR_MAX - len,
  699. "%u", n->addr.port_begin);
  700. else
  701. len += snprintf(str + len, ADDR_STR_MAX - len,
  702. "%u-%u",
  703. n->addr.port_begin, n->addr.port_end);
  704. }
  705. BUG_ON(len >= ADDR_STR_MAX);
  706. str[len] = 0;
  707. printk(fmt, current->pid, str);
  708. }
  709. #else
  710. # define print_net_rule(...) do {} while (0)
  711. #endif
  712. static int set_net_rule(struct graphene_net_rule *nr, struct graphene_info *gi,
  713. bool bind)
  714. {
  715. struct graphene_net *n;
  716. #ifdef CONFIG_IPV6
  717. if (nr->family != AF_INET && nr->family != AF_INET6)
  718. #else
  719. if (nr->family != AF_INET)
  720. #endif
  721. return -EINVAL;
  722. n = kmalloc(sizeof(struct graphene_net), GFP_KERNEL);
  723. if (!n)
  724. return -ENOMEM;
  725. n->family = nr->family;
  726. n->flags = 0;
  727. n->addr = nr->addr;
  728. switch(n->family) {
  729. case AF_INET:
  730. if (!n->addr.addr.sin_addr.s_addr)
  731. n->flags |= ADDR_ANY;
  732. break;
  733. #ifdef CONFIG_IPV6
  734. case AF_INET6:
  735. if (!memcmp(&n->addr.addr.sin6_addr.s6_addr, &in6addr_any, 16))
  736. n->flags |= ADDR_ANY;
  737. break;
  738. #endif /* CONFIG_IPV6 */
  739. }
  740. if (n->addr.port_begin == 0 && n->addr.port_end == 65535)
  741. n->flags |= PORT_ANY;
  742. INIT_LIST_HEAD(&n->list);
  743. if (bind) {
  744. list_add_tail(&n->list, &gi->gi_binds);
  745. print_net_rule(KERN_INFO "Graphene: PID %d NET BIND %s\n", n);
  746. } else {
  747. list_add_tail(&n->list, &gi->gi_peers);
  748. print_net_rule(KERN_INFO "Graphene: PID %d NET PEER %s\n", n);
  749. }
  750. return 0;
  751. }
  752. u64 gipc_get_session(struct task_struct *tsk)
  753. {
  754. struct graphene_info *gi = get_graphene_info(tsk->graphene);
  755. return gi ? gi->gi_gipc_session : 0;
  756. }
  757. int set_graphene(struct task_struct *current_tsk,
  758. const struct graphene_policies __user *gpolicies)
  759. {
  760. int npolicies;
  761. const struct graphene_user_policy __user *policies = gpolicies->policies;
  762. struct graphene_info *gi;
  763. struct graphene_user_policy ptmp;
  764. struct graphene_path *p;
  765. int i, rv = 0;
  766. DEFINE_PATH_BUFFER(kpath, max)
  767. #ifdef CONFIG_GRAPHENE_DEBUG
  768. char *dp;
  769. #endif
  770. rv = copy_from_user(&npolicies, &gpolicies->npolicies, sizeof(int));
  771. if (rv)
  772. return -EFAULT;
  773. if (npolicies && !policies)
  774. return -EINVAL;
  775. #ifndef CONFIG_GRAPHENE_ISOLATE
  776. if (current_tsk->graphene)
  777. return -EAGAIN;
  778. if (current_tsk != current_tsk->group_leader)
  779. return -EPERM;
  780. #endif
  781. gi = kmalloc(sizeof(struct graphene_info), GFP_KERNEL);
  782. if (!gi)
  783. return -ENOMEM;
  784. GET_PATH_BUFFER(kpath, max)
  785. memset(gi, 0, sizeof(struct graphene_info));
  786. INIT_LIST_HEAD(&gi->gi_paths);
  787. INIT_LIST_HEAD(&gi->gi_rpaths);
  788. INIT_LIST_HEAD(&gi->gi_binds);
  789. INIT_LIST_HEAD(&gi->gi_peers);
  790. gi->gi_gipc_session = atomic64_inc_return(&gipc_session);
  791. #ifdef CONFIG_GRAPHENE_DEBUG
  792. printk(KERN_INFO "Graphene: PID %d GIPC SESSION %llu\n",
  793. current_tsk->pid, gi->gi_gipc_session);
  794. #endif
  795. for (i = 0 ; i < npolicies ; i++) {
  796. int type, flags;
  797. rv = copy_from_user(&ptmp, policies + i,
  798. sizeof(struct graphene_user_policy));
  799. if (rv) {
  800. rv = -EFAULT;
  801. goto err;
  802. }
  803. if (!ptmp.value) {
  804. rv = -EINVAL;
  805. goto err;
  806. }
  807. type = ptmp.type & GRAPHENE_POLICY_TYPES;
  808. flags = ptmp.type & ~type;
  809. switch(type) {
  810. case GRAPHENE_LIB_NAME:
  811. rv = strncpy_from_user(kpath, ptmp.value, max);
  812. if (rv < 0)
  813. goto err;
  814. rv = kern_path(kpath, LOOKUP_FOLLOW, &gi->gi_libexec);
  815. if (rv)
  816. goto err;
  817. #ifdef CONFIG_GRAPHENE_DEBUG
  818. dp = d_path(&gi->gi_libexec, kpath, max);
  819. if (IS_ERR(dp)) {
  820. rv = -EINVAL;
  821. goto err;
  822. }
  823. printk(KERN_INFO "Graphene: PID %d LIB NAME %s\n",
  824. current_tsk->pid, dp);
  825. #endif
  826. break;
  827. case GRAPHENE_LIB_ADDR:
  828. gi->gi_libaddr = (u64) ptmp.value;
  829. #ifdef CONFIG_GRAPHENE_DEBUG
  830. printk(KERN_INFO "Graphene: PID %d LIB ADDR 0x%016llx\n",
  831. current_tsk->pid, gi->gi_libaddr);
  832. #endif
  833. break;
  834. case GRAPHENE_UNIX_PREFIX: {
  835. unsigned long token =
  836. atomic64_inc_return(&unix_prefix_counter);
  837. gi->gi_unix[0] = '\0';
  838. snprintf(gi->gi_unix + 1, sizeof(gi->gi_unix) - 1,
  839. GRAPHENE_UNIX_PREFIX_FMT, token);
  840. gi->gi_unix[sizeof(gi->gi_unix) - 1] = '/';
  841. rv = copy_to_user((void *) ptmp.value, &token,
  842. sizeof(unsigned long));
  843. if (rv) {
  844. rv = -EFAULT;
  845. goto err;
  846. }
  847. #ifdef CONFIG_GRAPHENE_DEBUG
  848. printk(KERN_INFO "Graphene: PID %d UNIX PREFIX %s\n",
  849. current_tsk->pid, kpath);
  850. #endif
  851. break;
  852. }
  853. case GRAPHENE_MCAST_PORT: {
  854. struct socket *sock;
  855. struct sock *sk;
  856. struct inet_sock *inet;
  857. struct file *file;
  858. unsigned short port;
  859. rv = sock_create_kern(AF_INET, SOCK_DGRAM, 0, &sock);
  860. if (rv)
  861. goto err;
  862. file = sock_alloc_file(sock, 0, NULL);
  863. if (unlikely(IS_ERR(file))) {
  864. sock_release(sock);
  865. rv = PTR_ERR(file);
  866. goto err;
  867. }
  868. sk = sock->sk;
  869. lock_sock(sk);
  870. inet = inet_sk(sk);
  871. sk->sk_reuse = SK_CAN_REUSE;
  872. if (sk->sk_prot->get_port(sk, 0)) {
  873. release_sock(sk);
  874. sock_release(sock);
  875. rv = -EAGAIN;
  876. goto err;
  877. }
  878. port = inet->inet_sport = htons(inet->inet_num);
  879. release_sock(sk);
  880. gi->gi_mcast_port = port;
  881. gi->gi_mcast_sock = file;
  882. port = ntohs(port);
  883. rv = copy_to_user((void *) ptmp.value, &port,
  884. sizeof(unsigned short));
  885. if (rv) {
  886. rv = -EFAULT;
  887. goto err;
  888. }
  889. #ifdef CONFIG_GRAPHENE_DEBUG
  890. printk(KERN_INFO "Graphene: PID %d MCAST PORT %d\n",
  891. current_tsk->pid, port);
  892. #endif
  893. break;
  894. }
  895. case GRAPHENE_NET_RULE: {
  896. struct graphene_net_rule nr;
  897. rv = copy_from_user(&nr, ptmp.value,
  898. sizeof(struct graphene_net_rule));
  899. if (rv) {
  900. rv = -EFAULT;
  901. goto err;
  902. }
  903. rv = set_net_rule(&nr, gi, flags & GRAPHENE_NET_BIND);
  904. if (rv < 0)
  905. goto err;
  906. break;
  907. }
  908. case GRAPHENE_FS_PATH:
  909. rv = strncpy_from_user(kpath, ptmp.value, max);
  910. if (rv < 0)
  911. goto err;
  912. p = kmalloc(sizeof(struct graphene_path),
  913. GFP_KERNEL);
  914. if (!p) {
  915. rv = -ENOMEM;
  916. goto err;
  917. }
  918. rv = kern_path(kpath, LOOKUP_FOLLOW, &p->path);
  919. if (rv) {
  920. kfree(p);
  921. goto err;
  922. }
  923. #ifdef CONFIG_GRAPHENE_DEBUG
  924. dp = d_path(&p->path, kpath, max);
  925. if (IS_ERR(dp)) {
  926. rv = -EINVAL;
  927. kfree(p);
  928. goto err;
  929. }
  930. printk(KERN_INFO "Graphene: PID %d PATH %s%s\n",
  931. current_tsk->pid, dp,
  932. type == GRAPHENE_FS_PATH ? "" :
  933. " (recursive)");
  934. #endif
  935. p->type = flags;
  936. INIT_LIST_HEAD(&p->list);
  937. list_add_tail(&p->list,
  938. (flags & GRAPHENE_FS_RECURSIVE) ?
  939. &gi->gi_rpaths : &gi->gi_paths);
  940. break;
  941. }
  942. }
  943. if (!current_tsk->graphene) {
  944. struct graphene_struct *gs;
  945. get_console(gi, current_tsk->files);
  946. gs = kmalloc(sizeof(struct graphene_struct), GFP_KERNEL);
  947. if (!gs) {
  948. rv = -ENOMEM;
  949. goto err;
  950. }
  951. atomic_set(&gs->g_count, 1);
  952. gs->g_info = gi;
  953. spin_lock_init(&gs->g_lock);
  954. current_tsk->graphene = gs;
  955. printk(KERN_INFO "Graphene: PID %d registered\n",
  956. current_tsk->pid);
  957. }
  958. #ifdef CONFIG_GRAPHENE_ISOLATE
  959. else {
  960. if ((rv = update_graphene(current_tsk, gi)) < 0) {
  961. printk(KERN_INFO "Graphene: PID %d cannot be updated (%d)\n",
  962. current_tsk->pid, rv);
  963. goto err;
  964. }
  965. printk(KERN_INFO "Graphene: PID %d updated\n",
  966. current_tsk->pid);
  967. }
  968. #endif
  969. rv = 0;
  970. goto out;
  971. err:
  972. drop_graphene_info(gi);
  973. out:
  974. PUT_PATH_BUFFER(kpath)
  975. return rv;
  976. }
  977. #ifdef CONFIG_GRAPHENE_ISOLATE
  978. static int do_close_sock(struct graphene_info *gi, struct socket *sock,
  979. int close_unix)
  980. {
  981. struct sock *sk = sock->sk;
  982. struct sockaddr_storage address;
  983. struct sockaddr *addr = (void *) &address;
  984. struct inet_sock *inet;
  985. int len, err;
  986. if (!sk)
  987. return 0;
  988. if (sk->sk_family == PF_UNIX)
  989. return close_unix ? -EPERM : 0;
  990. inet = inet_sk(sk);
  991. if (inet->inet_dport) {
  992. err = sock->ops->getname(sock, addr, &len, 1);
  993. if (err)
  994. return err;
  995. /* give it a chance, check if it match one of the peers */
  996. err = __common_net_perm(gi, OP_CONNECT, sock, addr, len);
  997. if (!err)
  998. return 0;
  999. }
  1000. if (!inet->inet_num)
  1001. return 0;
  1002. err = sock->ops->getname(sock, addr, &len, 0);
  1003. if (err)
  1004. return err;
  1005. return __common_net_perm(gi, OP_BIND, sock, addr, len);
  1006. }
  1007. static int do_close_fds(struct graphene_info *gi, struct files_struct *files,
  1008. int close_unix)
  1009. {
  1010. struct fdtable *fdt;
  1011. int fd, i = 0;
  1012. rcu_read_lock();
  1013. fdt = files_fdtable(files);
  1014. rcu_read_unlock();
  1015. for (;;) {
  1016. unsigned long set;
  1017. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
  1018. fd = i * BITS_PER_LONG;
  1019. #else
  1020. fd = i * __NFDBITS;
  1021. #endif
  1022. if (fd >= fdt->max_fds)
  1023. break;
  1024. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
  1025. set = fdt->open_fds[i++];
  1026. #else
  1027. set = fdt->open_fds->fds_bits[i++];
  1028. #endif
  1029. for ( ; set ; fd++, set >>= 1) {
  1030. struct socket *sock = NULL;
  1031. struct file *file;
  1032. int err;
  1033. if (!(set & 1))
  1034. continue;
  1035. file = xchg(&fdt->fd[fd], NULL);
  1036. if (!file)
  1037. continue;
  1038. if (get_pipe_info(file))
  1039. goto deny;
  1040. sock = sock_from_file(file, &err);
  1041. if (sock) {
  1042. err = do_close_sock(gi, sock, close_unix);
  1043. if (!err)
  1044. goto allow;
  1045. goto deny;
  1046. }
  1047. err = __common_perm(gi, OP_OPEN, &file->f_path,
  1048. aa_map_file_to_perms(file));
  1049. if (err)
  1050. goto deny;
  1051. allow:
  1052. xchg(&fdt->fd[fd], file);
  1053. continue;
  1054. deny:
  1055. filp_close(file, files);
  1056. cond_resched();
  1057. }
  1058. }
  1059. return 0;
  1060. }
  1061. static
  1062. int net_check (int family,
  1063. int flags1, struct graphene_net_addr * addr1,
  1064. int flags2, struct graphene_net_addr * addr2)
  1065. {
  1066. if (flags2 & ADDR_ANY)
  1067. goto port;
  1068. if (flags1 & ADDR_ANY)
  1069. goto port;
  1070. switch (family) {
  1071. case AF_INET:
  1072. if (memcmp(&addr1->addr.sin_addr,
  1073. &addr2->addr.sin_addr,
  1074. sizeof(struct in_addr)))
  1075. return -EPERM;
  1076. break;
  1077. case AF_INET6:
  1078. if (memcmp(&addr1->addr.sin6_addr,
  1079. &addr2->addr.sin6_addr,
  1080. sizeof(struct in6_addr)))
  1081. return -EPERM;
  1082. break;
  1083. }
  1084. port:
  1085. if (flags2 & PORT_ANY)
  1086. return 0;
  1087. if (flags1 & PORT_ANY)
  1088. return 0;
  1089. if (addr1->port_begin < addr2->port_begin ||
  1090. addr1->port_end > addr2->port_end)
  1091. return -EPERM;
  1092. return 0;
  1093. }
  1094. static int net_check_fds(struct graphene_net *n, struct files_struct *files)
  1095. {
  1096. struct fdtable *fdt;
  1097. int fd, i = 0;
  1098. rcu_read_lock();
  1099. fdt = files_fdtable(files);
  1100. for (;;) {
  1101. unsigned long set;
  1102. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
  1103. fd = i * BITS_PER_LONG;
  1104. #else
  1105. fd = i * __NFDBITS;
  1106. #endif
  1107. if (fd >= fdt->max_fds)
  1108. break;
  1109. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
  1110. set = fdt->open_fds[i++];
  1111. #else
  1112. set = fdt->open_fds->fds_bits[i++];
  1113. #endif
  1114. for ( ; set ; fd++, set >>= 1) {
  1115. struct file *file;
  1116. struct socket *sock;
  1117. struct sock *sk;
  1118. struct inet_sock *inet;
  1119. struct sockaddr_storage address;
  1120. struct sockaddr *addr = (void *) &address;
  1121. int len, err;
  1122. if (!(set & 1))
  1123. continue;
  1124. file = rcu_dereference_raw(fdt->fd[fd]);
  1125. if (!file)
  1126. continue;
  1127. sock = sock_from_file(file, &err);
  1128. if (!sock)
  1129. continue;
  1130. if (!(sk = sock->sk) || sk->sk_family != n->family)
  1131. continue;
  1132. inet = inet_sk(sk);
  1133. if (!inet->inet_dport)
  1134. continue;
  1135. err = sock->ops->getname(sock, addr, &len, 1);
  1136. if (err)
  1137. continue;
  1138. if (!net_cmp(n->family, false, false,
  1139. &n->addr, addr, len)) {
  1140. rcu_read_unlock();
  1141. return 1;
  1142. }
  1143. }
  1144. }
  1145. rcu_read_unlock();
  1146. return 0;
  1147. }
  1148. static int update_graphene(struct task_struct *current_tsk,
  1149. struct graphene_info *new)
  1150. {
  1151. struct graphene_struct *gs = current_tsk->graphene;
  1152. struct graphene_info *gi = get_graphene_info(gs);
  1153. struct graphene_path *p;
  1154. struct graphene_net *n1, *n2;
  1155. int i = 0, close_unix = 0;
  1156. for (i = 0 ; i < 3 && gi->gi_console[i].mnt ; i++) {
  1157. path_get(&gi->gi_console[i]);
  1158. new->gi_console[i] = gi->gi_console[i];
  1159. }
  1160. list_for_each_entry(p, &new->gi_paths, list) {
  1161. u32 mask = 0;
  1162. if (p->type & GRAPHENE_FS_READ)
  1163. mask |= MAY_READ;
  1164. if (p->type & GRAPHENE_FS_WRITE)
  1165. mask |= MAY_WRITE;
  1166. print_path(KERN_INFO "Graphene: PID %d CHECK RULE %s\n",
  1167. &p->path);
  1168. if (__common_perm(gi, OP_OPEN, &p->path, mask) < 0)
  1169. return -EPERM;
  1170. }
  1171. list_for_each_entry(n1, &new->gi_binds, list) {
  1172. bool accepted = false;
  1173. print_net_rule(KERN_INFO
  1174. "Graphene: PID %d CHECK RULE BIND %s\n",
  1175. n1);
  1176. list_for_each_entry(n2, &gi->gi_binds, list) {
  1177. if (n1->family != n2->family)
  1178. continue;
  1179. if (net_check(n1->family,
  1180. n1->flags, &n1->addr,
  1181. n2->flags, &n2->addr) < 0)
  1182. continue;
  1183. accepted = true;
  1184. print_net_rule(KERN_INFO
  1185. "Graphene: PID %d ALLOW BIND %s\n",
  1186. n1);
  1187. break;
  1188. }
  1189. if (!accepted) {
  1190. print_net_rule(KERN_INFO
  1191. "Graphene: PID %d DENY BIND %s\n",
  1192. n1);
  1193. return -EPERM;
  1194. }
  1195. }
  1196. list_for_each_entry(n1, &new->gi_peers, list) {
  1197. bool accepted = false;
  1198. print_net_rule(KERN_INFO
  1199. "Graphene: PID %d CHECK RULE CONNECT %s\n",
  1200. n1);
  1201. list_for_each_entry(n2, &gi->gi_peers, list) {
  1202. if (n1->family != n2->family)
  1203. continue;
  1204. if (net_check(n1->family,
  1205. n1->flags, &n1->addr,
  1206. n2->flags, &n2->addr) < 0)
  1207. continue;
  1208. accepted = true;
  1209. print_net_rule(KERN_INFO
  1210. "Graphene: PID %d ALLOW CONNECT %s\n",
  1211. n1);
  1212. break;
  1213. }
  1214. if (!accepted && !(n1->flags & (ADDR_ANY|PORT_ANY)) &&
  1215. net_check_fds(n1, current_tsk->files))
  1216. accepted = true;
  1217. if (!accepted) {
  1218. print_net_rule(KERN_INFO
  1219. "Graphene: PID %d DENY CONNECT %s\n",
  1220. n1);
  1221. return -EPERM;
  1222. }
  1223. }
  1224. if (!new->gi_unix[1] && gi->gi_unix[1])
  1225. memcpy(new->gi_unix, gi->gi_unix, sizeof(gi->gi_unix));
  1226. if (!new->gi_mcast_port)
  1227. new->gi_mcast_port = gi->gi_mcast_port;
  1228. if (!new->gi_mcast_sock && gi->gi_mcast_sock) {
  1229. atomic_long_inc(&gi->gi_mcast_sock->f_count);
  1230. new->gi_mcast_sock = gi->gi_mcast_sock;
  1231. }
  1232. spin_lock(&gs->g_lock);
  1233. put_graphene_info(gs->g_info);
  1234. gs->g_info = new;
  1235. spin_unlock(&gs->g_lock);
  1236. do_close_fds(new, current_tsk->files, close_unix);
  1237. return 0;
  1238. }
  1239. #endif /* CONFIG_GRAPHENE_ISOLATE */
  1240. static long graphene_ioctl(struct file *file, unsigned int cmd,
  1241. unsigned long arg)
  1242. {
  1243. struct task_struct *current_tsk = current;
  1244. switch (cmd) {
  1245. case GRAPHENE_SET_TASK:
  1246. return set_graphene(current_tsk,
  1247. (const struct graphene_policies __user *) arg);
  1248. default:
  1249. return -ENOSYS;
  1250. }
  1251. }
  1252. static struct file_operations graphene_operations = {
  1253. .unlocked_ioctl = graphene_ioctl,
  1254. .compat_ioctl = graphene_ioctl,
  1255. .llseek = noop_llseek,
  1256. };
  1257. static struct miscdevice graphene_dev = {
  1258. .minor = GRAPHENE_MINOR,
  1259. .name = "graphene",
  1260. .fops = &graphene_operations,
  1261. .mode = 0666,
  1262. };
  1263. static int __init graphene_init(void)
  1264. {
  1265. int rv;
  1266. rv = misc_register(&graphene_dev);
  1267. if (rv) {
  1268. printk(KERN_ERR "Graphene error: "
  1269. "failed to add a char device (rv=%d)\n", rv);
  1270. return rv;
  1271. }
  1272. return 0;
  1273. }
  1274. device_initcall(graphene_init);