graphene.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653
  1. /*
  2. * linux/graphene/graphene.c
  3. *
  4. * Copyright (C) 2013-, Chia-Che Tsai, Bhushan Jain and Donald Porter
  5. *
  6. * Manage the graphene information and security policies.
  7. */
  8. #include <linux/version.h>
  9. #include <linux/atomic.h>
  10. #include <linux/slab.h>
  11. #include <linux/sched.h>
  12. #include <linux/fs.h>
  13. #include <linux/file.h>
  14. #include <linux/fdtable.h>
  15. #include <linux/dcache.h>
  16. #include <linux/namei.h>
  17. #include <linux/fs_struct.h>
  18. #include <linux/mount.h>
  19. #include <linux/rcupdate.h>
  20. #include <linux/uaccess.h>
  21. #include <linux/module.h>
  22. #include <linux/un.h>
  23. #include <linux/net.h>
  24. #include <net/sock.h>
  25. #include <net/inet_sock.h>
  26. #include <net/tcp_states.h>
  27. #include <linux/pipe_fs_i.h>
  28. #include <../fs/internal.h>
  29. #include <../security/apparmor/include/audit.h>
  30. #include "graphene.h"
  31. #include "graphene-ipc.h"
  32. static struct list_head unix_list = LIST_HEAD_INIT(unix_list);
  33. static DEFINE_SPINLOCK(unix_list_lock);
  34. static atomic_t gipc_session;
  35. static int add_graphene_unix(struct graphene_unix *u)
  36. {
  37. struct graphene_unix *tmp;
  38. int err = 0;
  39. rcu_read_lock();
  40. list_for_each_entry_rcu(tmp, &unix_list, list) {
  41. if (u->root.mnt) {
  42. if (!tmp->root.mnt)
  43. continue;
  44. if (!path_equal(&tmp->root, &u->root))
  45. continue;
  46. }
  47. if (u->prefix.len) {
  48. int len;
  49. if (!tmp->prefix.len)
  50. continue;
  51. len = u->prefix.len < tmp->prefix.len ?
  52. u->prefix.len : tmp->prefix.len;
  53. if (!strncmp(u->prefix.name, tmp->prefix.name, len)) {
  54. err = -EACCES;
  55. break;
  56. }
  57. }
  58. }
  59. if (!err) {
  60. spin_lock(&unix_list_lock);
  61. list_add_tail_rcu(&u->list, &unix_list);
  62. spin_unlock(&unix_list_lock);
  63. }
  64. rcu_read_unlock();
  65. return err;;
  66. }
  67. int dup_graphene_struct(struct task_struct *tsk)
  68. {
  69. struct graphene_struct *gs, *new;
  70. struct graphene_info *gi;
  71. if (!(tsk->graphene))
  72. return 0;
  73. if (tsk->group_leader != tsk) {
  74. atomic_inc(&tsk->graphene->g_count);
  75. return 0;
  76. }
  77. gs = tsk->graphene;
  78. new = kmalloc(sizeof(struct graphene_struct), GFP_KERNEL);
  79. if (!new)
  80. return -ENOMEM;
  81. spin_lock(&gs->g_lock);
  82. gi = gs->g_info;
  83. atomic_inc(&gi->gi_count);
  84. new->g_info = gi;
  85. spin_unlock(&gs->g_lock);
  86. atomic_set(&new->g_count, 1);
  87. spin_lock_init(&new->g_lock);
  88. tsk->graphene = new;
  89. return 0;
  90. }
  91. static void drop_graphene_info(struct graphene_info *info)
  92. {
  93. struct graphene_path *p, *n;
  94. int i;
  95. list_for_each_entry_safe(p, n, &info->gi_paths, list) {
  96. path_put(&p->path);
  97. kfree(p);
  98. }
  99. list_for_each_entry_safe(p, n, &info->gi_rpaths, list) {
  100. path_put(&p->path);
  101. kfree(p);
  102. }
  103. if (info->gi_libexec.dentry)
  104. path_put(&info->gi_libexec);
  105. if (info->gi_unix) {
  106. struct graphene_unix *u = info->gi_unix;
  107. if (!atomic_dec_return(&u->count)) {
  108. spin_lock(&unix_list_lock);
  109. if (!list_empty(&u->list)) {
  110. list_del_rcu(&u->list);
  111. spin_unlock(&unix_list_lock);
  112. synchronize_rcu();
  113. }
  114. if (u->root.mnt)
  115. path_put(&u->root);
  116. if (u->prefix.len)
  117. kfree(u->prefix.name);
  118. kfree(u);
  119. }
  120. }
  121. for (i = 0 ; i < 3 ; i++)
  122. if (info->gi_console[i].mnt)
  123. path_put(&info->gi_console[i]);
  124. kfree(info);
  125. }
  126. static void put_graphene_info(struct graphene_info *info)
  127. {
  128. if (!atomic_dec_return(&info->gi_count))
  129. drop_graphene_info(info);
  130. }
  131. void put_graphene_struct(struct task_struct *tsk)
  132. {
  133. struct graphene_struct *gs = tsk->graphene;
  134. if (gs) {
  135. tsk->graphene = NULL;
  136. if (atomic_dec_return(&gs->g_count))
  137. return;
  138. put_graphene_info(gs->g_info);
  139. kfree(gs);
  140. }
  141. }
  142. static inline
  143. struct graphene_info *get_graphene_info(struct graphene_struct *gs)
  144. {
  145. struct graphene_info *info;
  146. if (!gs)
  147. return NULL;
  148. rcu_read_lock();
  149. info = rcu_dereference_check(gs->g_info,
  150. lockdep_is_held(&gs->g_lock) ||
  151. atomic_read(&gs->g_count) == 1 ||
  152. rcu_my_thread_group_empty());
  153. rcu_read_unlock();
  154. return info;
  155. }
  156. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
  157. # define FILE_INODE(file) ((file)->f_inode)
  158. #else
  159. # define FILE_INODE(file) ((file)->f_dentry->d_inode)
  160. #endif
  161. static loff_t graphene_lib_llseek(struct file *file, loff_t offset, int origin)
  162. {
  163. struct inode *inode = FILE_INODE(file);
  164. if (!inode)
  165. return -EINVAL;
  166. if (!inode->i_fop || !inode->i_fop->llseek)
  167. return -EINVAL;
  168. return inode->i_fop->llseek(file, offset, origin);
  169. }
  170. static ssize_t graphene_lib_read (struct file *file, char __user *buf,
  171. size_t len, loff_t *ppos)
  172. {
  173. struct inode *inode = FILE_INODE(file);
  174. if (!inode)
  175. return -EINVAL;
  176. if (!inode->i_fop || !inode->i_fop->read)
  177. return -EINVAL;
  178. return inode->i_fop->read(file, buf, len, ppos);
  179. }
  180. static ssize_t graphene_lib_aio_read (struct kiocb *iocb, const struct iovec *iov,
  181. unsigned long nr_segs, loff_t pos)
  182. {
  183. struct inode *inode = FILE_INODE(iocb->ki_filp);
  184. if (!inode)
  185. return -EINVAL;
  186. if (!inode->i_fop || !inode->i_fop->aio_read)
  187. return -EINVAL;
  188. return inode->i_fop->aio_read(iocb, iov, nr_segs, pos);
  189. }
  190. static int graphene_lib_mmap(struct file *file, struct vm_area_struct *vma)
  191. {
  192. struct inode *inode = FILE_INODE(file);
  193. if (!inode)
  194. return -EINVAL;
  195. if (!inode->i_fop || !inode->i_fop->mmap)
  196. return -EINVAL;
  197. return inode->i_fop->mmap(file, vma);
  198. }
  199. static int graphene_lib_release(struct inode *inode, struct file *file)
  200. {
  201. if (!inode)
  202. return -EINVAL;
  203. if (!inode->i_fop || !inode->i_fop->release)
  204. return -EINVAL;
  205. return inode->i_fop->release(inode, file);
  206. }
  207. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
  208. # define DEFINE_PATH_BUFFER(fn, kpath, max) struct filename *fn; char *kpath; int max;
  209. #else
  210. # define DEFINE_PATH_BUFFER(fn, kpath, max) char * kpath; int max;
  211. #endif
  212. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
  213. # define GET_PATH_BUFFER(fn, kpath, max) \
  214. fn = __getname(); \
  215. kpath = (char *) fn + sizeof(*fn); \
  216. max = PATH_MAX - sizeof(*fn);
  217. #else
  218. # define GET_PATH_BUFFER(fn, kpath, max) \
  219. kpath = __getname(); \
  220. max = PATH_MAX;
  221. #endif
  222. #define DEFINE_PATH(dp, path, fn, kpath, max) \
  223. DEFINE_PATH_BUFFER(fn, kpath, max) \
  224. char *dp; \
  225. GET_PATH_BUFFER(fn, kpath, max) \
  226. dp = d_path(path, kpath, max);
  227. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
  228. # define PUT_PATH_BUFFER(fn, kpath) final_putname(fn);
  229. #else
  230. # define PUT_PATH_BUFFER(fn, kpath) putname(kpath);
  231. #endif
  232. static unsigned long
  233. graphene_lib_get_area(struct file *file, unsigned long addr, unsigned long len,
  234. unsigned long pgoff, unsigned long flags)
  235. {
  236. struct task_struct *current_tsk = current;
  237. struct graphene_info *gi = get_graphene_info(current_tsk->graphene);
  238. struct inode *inode = FILE_INODE(file);
  239. unsigned long (*get_area) (struct file *, unsigned long, unsigned long,
  240. unsigned long, unsigned long);
  241. if (!inode)
  242. return -EINVAL;
  243. if (gi->gi_libaddr) {
  244. if (!path_equal(&file->f_path, &gi->gi_libexec))
  245. BUG();
  246. if (!addr)
  247. addr = gi->gi_libaddr + pgoff * PAGE_SIZE;
  248. #ifdef CONFIG_GRAPHENE_DEBUG
  249. {
  250. DEFINE_PATH(dp, &file->f_path, fn, kpath, max)
  251. if (!IS_ERR(dp))
  252. printk(KERN_INFO "Graphene: PID %d MAP FILE %s"
  253. " OFF 0x%08lx AT 0x%016lx\n",
  254. current->pid, dp,
  255. pgoff * PAGE_SIZE, addr);
  256. PUT_PATH_BUFFER(fn, kpath)
  257. }
  258. #endif
  259. return addr;
  260. }
  261. get_area = (inode->i_fop && inode->i_fop->get_unmapped_area) ?
  262. inode->i_fop->get_unmapped_area :
  263. current_tsk->mm->get_unmapped_area;
  264. return get_area(file, addr, len, pgoff, flags);
  265. }
  266. /* These are file oprations required for execve */
  267. static struct file_operations graphene_lib_operations = {
  268. .llseek = graphene_lib_llseek,
  269. .read = graphene_lib_read,
  270. .aio_read = graphene_lib_aio_read,
  271. .mmap = graphene_lib_mmap,
  272. .get_unmapped_area = graphene_lib_get_area,
  273. .release = graphene_lib_release,
  274. };
  275. #ifdef CONFIG_GRAPHENE_DEBUG
  276. static void print_path(const char * fmt, struct path *path)
  277. {
  278. DEFINE_PATH(dp, path, fn, kpath, max)
  279. printk(fmt, current->pid, IS_ERR(dp) ? "(unknown)" : dp);
  280. PUT_PATH_BUFFER(fn, kpath)
  281. }
  282. #else
  283. # define print_path(...) do {} while (0)
  284. #endif
  285. int graphene_execve_open(struct file *file)
  286. {
  287. struct task_struct *current_tsk = current;
  288. struct graphene_info *gi = get_graphene_info(current_tsk->graphene);
  289. if (!current_tsk->in_execve)
  290. BUG();
  291. if (!path_equal(&file->f_path, &gi->gi_libexec)) {
  292. print_path(KERN_INFO "Graphene: DENY EXEC PID %d PATH %s\n",
  293. &file->f_path);
  294. return -EACCES;
  295. }
  296. if (!gi->gi_libaddr)
  297. goto accepted;
  298. file->f_op = &graphene_lib_operations;
  299. accepted:
  300. print_path(KERN_INFO "Graphene: ALLOW EXEC PID %d PATH %s\n",
  301. &file->f_path);
  302. return 0;
  303. }
  304. static int graphene_check_path(struct graphene_info *gi, int op, u32 mask,
  305. struct path *path, struct graphene_path *gp,
  306. int is_recursive)
  307. {
  308. if (!path_equal(path, &gp->path))
  309. return 0;
  310. if (mask & (MAY_READ|MAY_EXEC|MAY_ACCESS|
  311. AA_MAY_META_READ|AA_EXEC_MMAP|AA_MAY_LINK)) {
  312. if (!(gp->type & GRAPHENE_FS_READ))
  313. return -EACCES;
  314. }
  315. if (mask & (MAY_WRITE|MAY_APPEND|
  316. AA_MAY_CREATE|AA_MAY_DELETE|AA_MAY_META_WRITE|
  317. AA_MAY_CHMOD|AA_MAY_CHOWN)) {
  318. if (!(gp->type & GRAPHENE_FS_WRITE))
  319. return -EACCES;
  320. }
  321. return 1;
  322. }
  323. static int __common_perm(struct graphene_info *gi, int op, struct path *target,
  324. u32 mask)
  325. {
  326. struct graphene_path *p;
  327. struct path root, path = *target;
  328. struct qstr last;
  329. int rv = 0, i;
  330. BUG_ON(!path.dentry);
  331. path_get(&path);
  332. if (op == OP_OPEN) {
  333. int minor = iminor(path.dentry->d_inode);
  334. if (minor == GRAPHENE_MINOR)
  335. goto out;
  336. if (minor == GIPC_MINOR)
  337. goto out;
  338. }
  339. rcu_read_lock();
  340. for (i = 0 ; i < 3 ; i++) {
  341. if (!gi->gi_console[i].mnt)
  342. continue;
  343. if (path_equal(&gi->gi_console[i], &path))
  344. goto out;
  345. }
  346. list_for_each_entry_rcu(p, &gi->gi_paths, list) {
  347. rv = graphene_check_path(gi, op, mask, &path, p, 0);
  348. if (rv)
  349. goto out;
  350. }
  351. if (gi->gi_libexec.mnt && path_equal(&path, &gi->gi_libexec)) {
  352. rv = 0;
  353. goto out;
  354. }
  355. get_fs_root(current->fs, &root);
  356. last.len = 0;
  357. while (!path_equal(&path, &root)) {
  358. int is_recursive = 0;
  359. list_for_each_entry_rcu(p, &gi->gi_rpaths, list) {
  360. rv = graphene_check_path(gi, op, mask, &path, p,
  361. is_recursive);
  362. if (rv)
  363. goto out_root;
  364. }
  365. if (gi->gi_unix && gi->gi_unix->root.mnt) {
  366. struct graphene_unix *u = gi->gi_unix;
  367. if (path_equal(&path, &u->root)) {
  368. rv = 0;
  369. if (op == OP_MKNOD)
  370. goto out_root;
  371. if (op == OP_UNLINK) {
  372. if (!u->prefix.len)
  373. goto out_root;
  374. if (last.len) {
  375. int len = u->prefix.len;
  376. if (last.len < len)
  377. len = last.len;
  378. if (!strncmp(last.name,
  379. u->prefix.name,
  380. len))
  381. goto out_root;
  382. }
  383. }
  384. break;
  385. }
  386. }
  387. last = path.dentry->d_name;
  388. while(1) {
  389. struct dentry *old = path.dentry;
  390. if (path_equal(&path, &root))
  391. break;
  392. if (path.dentry != path.mnt->mnt_root) {
  393. path.dentry = dget_parent(path.dentry);
  394. dput(old);
  395. break;
  396. }
  397. if (!follow_up(&path))
  398. break;
  399. }
  400. is_recursive = 1;
  401. }
  402. rv = -EACCES;
  403. out_root:
  404. path_put(&root);
  405. out:
  406. rcu_read_unlock();
  407. path_put(&path);
  408. if (rv >= 0) {
  409. rv = 0;
  410. print_path(KERN_INFO "Graphene: ALLOW PID %d PATH %s\n", target);
  411. } else {
  412. print_path(KERN_INFO "Graphene: DENY PID %d PATH %s\n", target);
  413. }
  414. return rv;
  415. }
  416. int graphene_common_perm(int op, struct path *path, u32 mask)
  417. {
  418. struct graphene_info *gi = get_graphene_info(current->graphene);
  419. if (!gi)
  420. return 0;
  421. return __common_perm(gi, op, path, mask);
  422. }
  423. static int __unix_perm(struct sockaddr *address, int addrlen)
  424. {
  425. struct graphene_info *gi = get_graphene_info(current->graphene);
  426. const char *path, *sun_path;
  427. struct nameidata nd;
  428. struct path *p = NULL;
  429. int err = 0;
  430. if (!gi->gi_unix)
  431. return -EPERM;
  432. path = sun_path = ((struct sockaddr_un *) address)->sun_path;
  433. if (gi->gi_unix->root.mnt) {
  434. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)
  435. struct path parent;
  436. err = kern_path(path, LOOKUP_FOLLOW, &nd.path);
  437. if (!err)
  438. return err;
  439. p = &nd.path;
  440. err = vfs_path_lookup(nd.path.dentry, nd.path.mnt, "..", 0,
  441. &parent);
  442. if (!err)
  443. goto denied;
  444. if (!path_equal(&gi->gi_unix->root, &parent))
  445. goto denied;
  446. path_put(&parent);
  447. path = nd.path.dentry->d_name.name;
  448. #else
  449. err = kern_path_parent(path, &nd);
  450. if (!err)
  451. return err;
  452. path_put(&nd.path);
  453. path = nd.last.name;
  454. if (!path_equal(&gi->gi_unix->root, &nd.path))
  455. goto denied;
  456. #endif
  457. }
  458. if (gi->gi_unix->prefix.len &&
  459. memcmp(path, gi->gi_unix->prefix.name,
  460. gi->gi_unix->prefix.len))
  461. err = -EPERM;
  462. if (p)
  463. path_put(p);
  464. if (!err)
  465. return 0;
  466. denied:
  467. #ifdef CONFIG_GRAPHENE_DEBUG
  468. printk(KERN_INFO "Graphene: DENY PID %d SOCKET %s\n",
  469. current->pid, sun_path);
  470. #endif
  471. if (p)
  472. path_put(p);
  473. return -EPERM;
  474. }
  475. static int net_cmp(int family, int addr_any, int port_any,
  476. struct graphene_net_addr *ga,
  477. struct sockaddr *addr, int addrlen)
  478. {
  479. switch(family) {
  480. case AF_INET: {
  481. struct sockaddr_in *a = (void *) addr;
  482. if (!addr_any) {
  483. if (a->sin_addr.s_addr != ga->addr.sin_addr.s_addr)
  484. return -EPERM;
  485. }
  486. if (!port_any) {
  487. unsigned short port = ntohs(a->sin_port);
  488. if (!(port >= ga->port_begin && port <= ga->port_end))
  489. return -EPERM;
  490. }
  491. break;
  492. }
  493. #ifdef CONFIG_IPV6
  494. case AF_INET6: {
  495. struct sockaddr_in6 *a6 = (void *) addr;
  496. if (!addr_any) {
  497. if (memcmp(&a6->sin6_addr, &ga->addr.sin6_addr,
  498. sizeof(struct in6_addr)))
  499. return -EPERM;
  500. }
  501. if (!port_any) {
  502. unsigned short port = ntohs(a6->sin6_port);
  503. if (!(port >= ga->port_begin && port <= ga->port_end))
  504. return -EPERM;
  505. }
  506. break;
  507. }
  508. #endif
  509. }
  510. return 0;
  511. }
  512. #ifdef CONFIG_GRAPHENE_DEBUG
  513. static void print_net(int allow, int family, int op,
  514. struct sockaddr *local_addr, int local_addrlen,
  515. struct sockaddr *peer_addr, int peer_addrlen)
  516. {
  517. const char *allow_str = allow ? "ALLOW" : "DENY";
  518. const char *op_str = "";
  519. int print_peer = (op == OP_CONNECT || op == OP_SENDMSG);
  520. switch(op) {
  521. case OP_BIND: op_str = "BIND"; break;
  522. case OP_LISTEN: op_str = "LISTEN"; break;
  523. case OP_CONNECT: op_str = "CONNECT"; break;
  524. case OP_SENDMSG: op_str = "SENDMSG"; break;
  525. case OP_RECVMSG: op_str = "RECVMSG"; break;
  526. }
  527. if (family == AF_INET) {
  528. struct sockaddr_in *la = (void *) local_addr;
  529. u8 *a1 = (u8 *) &la->sin_addr.s_addr;
  530. struct sockaddr_in *pa = (void *) peer_addr;
  531. u8 *a2 = (u8 *) &pa->sin_addr.s_addr;
  532. if (print_peer && peer_addr) {
  533. printk(KERN_INFO "Graphene: %s %s PID %d SOCKET "
  534. "%d.%d.%d.%d:%d:%d.%d.%d.%d:%d\n",
  535. allow_str, op_str, current->pid,
  536. a1[0], a1[1], a1[2], a1[3], ntohs(la->sin_port),
  537. a2[0], a2[1], a2[2], a2[3], ntohs(pa->sin_port));
  538. } else {
  539. printk(KERN_INFO "Graphene: %s %s PID %d SOCKET "
  540. "%d.%d.%d.%d:%d\n",
  541. allow_str, op_str, current->pid,
  542. a1[0], a1[1], a1[2], a1[3], ntohs(la->sin_port));
  543. }
  544. }
  545. #ifdef CONFIG_IPV6
  546. if (family == AF_INET6) {
  547. struct sockaddr_in6 *la = (void *) local_addr;
  548. u16 *a1 = (u16 *) &la->sin6_addr.s6_addr;
  549. struct sockaddr_in6 *pa = (void *) peer_addr;
  550. u16 *a2 = (u16 *) &pa->sin6_addr.s6_addr;
  551. if (print_peer) {
  552. printk(KERN_INFO "Graphene: %s %s PID %d SOCKET "
  553. "[%d:%d:%d:%d:%d:%d:%d:%d]:%d:"
  554. "[%d.%d.%d.%d:%d:%d:%d:%d]:%d\n",
  555. allow_str, op_str, current->pid,
  556. a1[0], a1[1], a1[2], a1[3],
  557. a1[4], a1[5], a1[6], a1[7], ntohs(la->sin6_port),
  558. a2[0], a2[1], a2[2], a2[3],
  559. a2[4], a2[5], a2[6], a2[7], ntohs(pa->sin6_port));
  560. } else {
  561. printk(KERN_INFO "Graphene: %s %s PID %d SOCKET "
  562. "[%d.%d.%d.%d:%d:%d:%d:%d]:%d\n",
  563. allow_str, op_str, current->pid,
  564. a1[0], a1[1], a1[2], a1[3],
  565. a1[4], a1[5], a1[6], a1[7], ntohs(la->sin6_port));
  566. }
  567. }
  568. #endif
  569. }
  570. #else
  571. # define print_net(...) do {} while (0)
  572. #endif
  573. /*
  574. * network rules:
  575. * bind:
  576. * input addr/port match local addr/port
  577. * listen:
  578. * local addr/port match local addr/port
  579. * allow ANY peer addr/port
  580. * connect:
  581. * local/remote addr/port match local/remote addr/port
  582. * sendmsg:
  583. * EITHER stream socket OR no inport addr/port OR
  584. * local/remote addr/port match local/remote addr/port
  585. * recvmsg:
  586. * EITHER stream socket OR connected OR
  587. * allow ANY peer addr/port
  588. */
  589. static
  590. int __common_net_perm(struct graphene_info *gi, int op, struct socket *sock,
  591. struct sockaddr *address, int addrlen)
  592. {
  593. struct sock *sk = sock->sk;
  594. struct inet_sock *inet = inet_sk(sk);
  595. struct graphene_net *gn;
  596. struct sockaddr_storage addrbuf;
  597. struct sockaddr * local_addr = NULL, * peer_addr = NULL;
  598. int local_addrlen, peer_addrlen;
  599. int local_needcmp = 0, peer_needcmp = 0;
  600. int local_needany = 0, peer_needany = 0;
  601. int err;
  602. if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_DGRAM)
  603. return -EPERM;
  604. #ifdef CONFIG_IPV6
  605. if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
  606. #else
  607. if (sk->sk_family != AF_INET)
  608. #endif
  609. return -EPERM;
  610. if (list_empty(&gi->gi_net))
  611. return -EPERM;
  612. if (op == OP_LISTEN)
  613. peer_needany = 1;
  614. if (op == OP_RECVMSG) {
  615. if (inet->inet_dport)
  616. return 0;
  617. peer_needany = 1;
  618. }
  619. if (op == OP_CONNECT || op == OP_SENDMSG) {
  620. BUG_ON(!address);
  621. peer_addr = address;
  622. peer_addrlen = addrlen;
  623. peer_needcmp = 1;
  624. }
  625. if (op == OP_BIND) {
  626. BUG_ON(!address);
  627. local_addr = address;
  628. local_addrlen = addrlen;
  629. local_needcmp = 1;
  630. if (sk->sk_type == SOCK_DGRAM)
  631. peer_needany = 1;
  632. } else {
  633. local_addr = (struct sockaddr *) &addrbuf;
  634. local_needcmp = 1;
  635. err = sock->ops->getname(sock, local_addr, &local_addrlen, 0);
  636. if (err < 0)
  637. return err;
  638. }
  639. list_for_each_entry(gn, &gi->gi_net, list) {
  640. if (gn->family != sk->sk_family)
  641. continue;
  642. if (local_needany &&
  643. (gn->flags & (LOCAL_ADDR_ANY|LOCAL_PORT_ANY)) !=
  644. (LOCAL_ADDR_ANY|LOCAL_PORT_ANY))
  645. continue;
  646. if (peer_needany &&
  647. (gn->flags & (PEER_ADDR_ANY|PEER_PORT_ANY)) !=
  648. (PEER_ADDR_ANY|PEER_PORT_ANY))
  649. continue;
  650. if (local_needcmp) {
  651. err = net_cmp(sk->sk_family, gn->flags & LOCAL_ADDR_ANY,
  652. gn->flags & LOCAL_PORT_ANY,
  653. &gn->local, local_addr, local_addrlen);
  654. if (err < 0)
  655. continue;
  656. }
  657. if (peer_needcmp) {
  658. err = net_cmp(sk->sk_family, gn->flags & PEER_ADDR_ANY,
  659. gn->flags & PEER_PORT_ANY,
  660. &gn->peer, peer_addr, peer_addrlen);
  661. if (err < 0)
  662. continue;
  663. }
  664. print_net(1, sk->sk_family, op, local_addr, local_addrlen,
  665. peer_addr, peer_addrlen);
  666. return 0;
  667. }
  668. print_net(0, sk->sk_family, op, local_addr, local_addrlen,
  669. peer_addr, peer_addrlen);
  670. return -EPERM;
  671. }
  672. int graphene_socket_bind(struct socket *sock,
  673. struct sockaddr *address, int addrlen)
  674. {
  675. if (GRAPHENE_ENABLED()) {
  676. struct graphene_info *gi = get_graphene_info(current->graphene);
  677. if (!sock || !sock->sk)
  678. return 0;
  679. if (sock->sk->sk_family == PF_UNIX) {
  680. if (sock->sk->sk_type != SOCK_STREAM)
  681. return -EPERM;
  682. return __unix_perm(address, addrlen);
  683. }
  684. return __common_net_perm(gi, OP_BIND, sock, address, addrlen);
  685. }
  686. return 0;
  687. }
  688. int graphene_socket_listen(struct socket *sock, int backlog)
  689. {
  690. if (GRAPHENE_ENABLED()) {
  691. struct graphene_info *gi = get_graphene_info(current->graphene);
  692. if (!sock || !sock->sk || sock->sk->sk_family == PF_UNIX)
  693. return 0;
  694. return __common_net_perm(gi, OP_LISTEN, sock, NULL, 0);
  695. }
  696. return 0;
  697. }
  698. int graphene_socket_connect(struct socket *sock,
  699. struct sockaddr *address, int addrlen)
  700. {
  701. if (GRAPHENE_ENABLED()) {
  702. struct graphene_info *gi = get_graphene_info(current->graphene);
  703. if (!sock || !sock->sk)
  704. return 0;
  705. if (sock->sk->sk_family == PF_UNIX) {
  706. if (sock->sk->sk_type != SOCK_STREAM)
  707. return -EPERM;
  708. return __unix_perm(address, addrlen);
  709. }
  710. return __common_net_perm(gi, OP_CONNECT, sock, address,
  711. addrlen);
  712. }
  713. return 0;
  714. }
  715. int graphene_socket_sendmsg(struct socket *sock,
  716. struct msghdr *msg, int size)
  717. {
  718. if (GRAPHENE_ENABLED()) {
  719. struct graphene_info *gi = get_graphene_info(current->graphene);
  720. if (!sock || !sock->sk || sock->sk->sk_family == PF_UNIX)
  721. return 0;
  722. if (sock->sk->sk_type == SOCK_STREAM)
  723. return 0;
  724. if (!msg->msg_name)
  725. return 0;
  726. return __common_net_perm(gi, OP_SENDMSG, sock,
  727. msg->msg_name, msg->msg_namelen);
  728. }
  729. return 0;
  730. }
  731. int graphene_socket_recvmsg(struct socket *sock,
  732. struct msghdr *msg, int size, int flags)
  733. {
  734. if (GRAPHENE_ENABLED()) {
  735. struct graphene_info *gi = get_graphene_info(current->graphene);
  736. if (!sock || !sock->sk || sock->sk->sk_family == PF_UNIX)
  737. return 0;
  738. if (sock->sk->sk_type == SOCK_STREAM)
  739. return 0;
  740. return __common_net_perm(gi, OP_RECVMSG, sock, NULL, 0);
  741. }
  742. return 0;
  743. }
  744. int graphene_task_kill(struct task_struct *tsk, struct siginfo *info,
  745. int sig, u32 secid)
  746. {
  747. struct task_struct *current_tsk = current;
  748. if (!current_tsk->graphene)
  749. return 0;
  750. if (sig != SIGCONT)
  751. return -EPERM;
  752. return (tsk->tgid == current_tsk->tgid) ? 0 : -EPERM;
  753. }
  754. static void get_console(struct graphene_info *gi, struct files_struct *files)
  755. {
  756. int i, j, n = 0;
  757. struct fdtable *fdt = files_fdtable(files);
  758. j = 0;
  759. rcu_read_lock();
  760. fdt = files_fdtable(files);
  761. rcu_read_unlock();
  762. for (;;) {
  763. unsigned long set;
  764. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
  765. i = j * BITS_PER_LONG;
  766. #else
  767. i = j * __NFDBITS;
  768. #endif
  769. if (i >= fdt->max_fds)
  770. break;
  771. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
  772. set = fdt->open_fds[j++];
  773. #else
  774. set = fdt->open_fds->fds_bits[j++];
  775. #endif
  776. for ( ; set ; i++, set >>= 1) {
  777. struct file *file;
  778. int k;
  779. if (!(set & 1))
  780. continue;
  781. if (i > 2)
  782. goto out;
  783. file = xchg(&fdt->fd[i], NULL);
  784. if (!file)
  785. continue;
  786. for (k = 0 ; k < n ; k++)
  787. if (path_equal(&file->f_path, &gi->gi_console[k]))
  788. break;
  789. if (k == n) {
  790. path_get(&file->f_path);
  791. gi->gi_console[n++] = file->f_path;
  792. }
  793. #ifdef CONFIG_GRAPHENE_DEBUG
  794. {
  795. DEFINE_PATH(dp, &file->f_path, fn, kpath, max)
  796. if (!IS_ERR(dp))
  797. printk(KERN_INFO "Graphene: "
  798. "PID %d CONSOLE %s\n",
  799. current->pid, dp);
  800. PUT_PATH_BUFFER(fn, kpath)
  801. }
  802. #endif
  803. xchg(&fdt->fd[i], file);
  804. }
  805. }
  806. out:
  807. for ( ; n < 3 ; n++)
  808. gi->gi_console[n].mnt = NULL;
  809. }
  810. static int update_graphene(struct task_struct *current_tsk,
  811. struct graphene_info *gi);
  812. #ifdef CONFIG_GRAPHENE_DEBUG
  813. static void print_net_rule(const char *fmt, struct graphene_net *n)
  814. {
  815. # ifdef CONFIG_IPV6
  816. # define ADDR_STR_MAX 128
  817. # else
  818. # define ADDR_STR_MAX 48
  819. # endif
  820. char str[ADDR_STR_MAX];
  821. int len = 0, i;
  822. for (i = 0; i < 2; i++) {
  823. unsigned char addr_any = i ? PEER_ADDR_ANY : LOCAL_ADDR_ANY;
  824. unsigned char port_any = i ? PEER_PORT_ANY : LOCAL_PORT_ANY;
  825. struct graphene_net_addr *a = i ? &n->peer : &n->local;
  826. if (i)
  827. str[len++] = ':';
  828. switch(n->family) {
  829. case AF_INET:
  830. if (n->flags & addr_any) {
  831. str[len++] = ':';
  832. } else {
  833. u8 *ip = (u8 *) &a->addr.sin_addr.s_addr;
  834. len += snprintf(str + len,
  835. ADDR_STR_MAX - len,
  836. "%u.%u.%u.%u:",
  837. ip[0], ip[1], ip[2], ip[3]);
  838. }
  839. break;
  840. #ifdef CONFIG_IPV6
  841. case AF_INET6:
  842. if (n->flags & addr_any) {
  843. str[len++] = '[';
  844. str[len++] = ']';
  845. str[len++] = ':';
  846. } else {
  847. u16 *ip = (u16 *) &a->addr.sin6_addr.s6_addr;
  848. len += snprintf(str + len,
  849. ADDR_STR_MAX - len,
  850. "[%u:%u:%u:%u:%u:%u:%u:%u]:",
  851. ip[0], ip[1], ip[2], ip[3],
  852. ip[4], ip[5], ip[6], ip[7]);
  853. }
  854. break;
  855. #endif /* CONFIG_IPV6 */
  856. }
  857. if (!(n->flags & port_any)) {
  858. if (a->port_begin == a->port_end)
  859. len += snprintf(str + len, ADDR_STR_MAX - len,
  860. "%u", a->port_begin);
  861. else
  862. len += snprintf(str + len, ADDR_STR_MAX - len,
  863. "%u-%u",
  864. a->port_begin, a->port_end);
  865. }
  866. }
  867. BUG_ON(len >= ADDR_STR_MAX);
  868. str[len] = 0;
  869. printk(fmt, current->pid, str);
  870. }
  871. #else
  872. # define print_net_rule(...) do {} while (0)
  873. #endif
  874. static int set_net_rule(struct graphene_net_policy *np,
  875. struct graphene_info *gi)
  876. {
  877. struct graphene_net *n;
  878. int i;
  879. #ifdef CONFIG_IPV6
  880. if (np->family != AF_INET && np->family != AF_INET6)
  881. #else
  882. if (np->family != AF_INET)
  883. #endif
  884. return -EINVAL;
  885. n = kmalloc(sizeof(struct graphene_net), GFP_KERNEL);
  886. if (!n)
  887. return -ENOMEM;
  888. n->family = np->family;
  889. n->flags = 0;
  890. n->local = np->local;
  891. n->peer = np->peer;
  892. for (i = 0; i < 2; i++) {
  893. unsigned char addr_any = i ? PEER_ADDR_ANY : LOCAL_ADDR_ANY;
  894. unsigned char port_any = i ? PEER_PORT_ANY : LOCAL_PORT_ANY;
  895. struct graphene_net_addr *a = i ? &n->peer : &n->local;
  896. switch(n->family) {
  897. case AF_INET:
  898. if (!a->addr.sin_addr.s_addr)
  899. n->flags |= addr_any;
  900. break;
  901. #ifdef CONFIG_IPV6
  902. case AF_INET6:
  903. if (!memcmp(&a->addr.sin6_addr.s6_addr, &in6addr_any, 16))
  904. n->flags |= addr_any;
  905. break;
  906. #endif /* CONFIG_IPV6 */
  907. }
  908. if (a->port_begin == 0 && a->port_end == 65535)
  909. n->flags |= port_any;
  910. }
  911. INIT_LIST_HEAD(&n->list);
  912. list_add_tail(&n->list, &gi->gi_net);
  913. print_net_rule(KERN_INFO "Graphene: PID %d NET RULE %s\n", n);
  914. return 0;
  915. }
  916. u32 gipc_get_session(struct task_struct *tsk)
  917. {
  918. struct graphene_info *gi = get_graphene_info(tsk->graphene);
  919. return gi ? gi->gi_gipc_session : 0;
  920. }
  921. int set_graphene(struct task_struct *current_tsk,
  922. const struct graphene_policies __user *gpolicies)
  923. {
  924. int npolicies;
  925. const struct graphene_user_policy __user *policies = gpolicies->policies;
  926. struct graphene_info *gi;
  927. struct graphene_user_policy ptmp;
  928. struct graphene_path *p;
  929. struct graphene_unix *u;
  930. int i, rv = 0;
  931. DEFINE_PATH_BUFFER(fn, kpath, max)
  932. #ifdef CONFIG_GRAPHENE_DEBUG
  933. char *dp;
  934. #endif
  935. rv = copy_from_user(&npolicies, &gpolicies->npolicies, sizeof(int));
  936. if (rv < 0)
  937. return -EFAULT;
  938. if (npolicies && !policies)
  939. return -EINVAL;
  940. #ifndef CONFIG_GRAPHENE_ISOLATE
  941. if (current_tsk->graphene)
  942. return -EAGAIN;
  943. if (current_tsk != current_tsk->group_leader)
  944. return -EPERM;
  945. #endif
  946. gi = kmalloc(sizeof(struct graphene_info), GFP_KERNEL);
  947. if (!gi)
  948. return -ENOMEM;
  949. GET_PATH_BUFFER(fn, kpath, max)
  950. memset(gi, 0, sizeof(struct graphene_info));
  951. INIT_LIST_HEAD(&gi->gi_paths);
  952. INIT_LIST_HEAD(&gi->gi_rpaths);
  953. INIT_LIST_HEAD(&gi->gi_net);
  954. gi->gi_gipc_session = atomic_inc_return(&gipc_session);
  955. #ifdef CONFIG_GRAPHENE_DEBUG
  956. printk(KERN_INFO "Graphene: PID %d GIPC SESSION %u\n",
  957. current_tsk->pid, gi->gi_gipc_session);
  958. #endif
  959. for (i = 0 ; i < npolicies ; i++) {
  960. int type, flags;
  961. rv = copy_from_user(&ptmp, policies + i,
  962. sizeof(struct graphene_user_policy));
  963. if (rv < 0)
  964. goto err;
  965. if (!ptmp.value) {
  966. rv = -EINVAL;
  967. goto err;
  968. }
  969. type = ptmp.type & ~(GRAPHENE_FS_READ | GRAPHENE_FS_WRITE);
  970. flags = ptmp.type & ~type;
  971. switch(type) {
  972. case GRAPHENE_LIB_NAME:
  973. rv = strncpy_from_user(kpath, ptmp.value, max);
  974. if (rv < 0)
  975. goto err;
  976. rv = kern_path(kpath, LOOKUP_FOLLOW, &gi->gi_libexec);
  977. if (rv)
  978. goto err;
  979. #ifdef CONFIG_GRAPHENE_DEBUG
  980. dp = d_path(&gi->gi_libexec, kpath, max);
  981. if (IS_ERR(dp)) {
  982. rv = -EINVAL;
  983. goto err;
  984. }
  985. printk(KERN_INFO "Graphene: PID %d LIB NAME %s\n",
  986. current_tsk->pid, dp);
  987. #endif
  988. break;
  989. case GRAPHENE_LIB_ADDR:
  990. gi->gi_libaddr = (u64) ptmp.value;
  991. #ifdef CONFIG_GRAPHENE_DEBUG
  992. printk(KERN_INFO "Graphene: PID %d LIB ADDR 0x%016llx\n",
  993. current_tsk->pid, gi->gi_libaddr);
  994. #endif
  995. break;
  996. case GRAPHENE_UNIX_ROOT:
  997. rv = strncpy_from_user(kpath, ptmp.value, max);
  998. if (rv < 0)
  999. goto err;
  1000. u = gi->gi_unix;
  1001. if (!u) {
  1002. u = kmalloc(sizeof(struct graphene_unix),
  1003. GFP_KERNEL);
  1004. if (!u) {
  1005. rv = -ENOMEM;
  1006. goto err;
  1007. }
  1008. u->root.mnt = NULL;
  1009. u->prefix.len = 0;
  1010. atomic_set(&u->count, 1);
  1011. INIT_LIST_HEAD(&u->list);
  1012. gi->gi_unix = u;
  1013. }
  1014. if (u && u->root.mnt)
  1015. path_put(&u->root);
  1016. rv = kern_path(kpath, LOOKUP_FOLLOW, &u->root);
  1017. if (rv)
  1018. goto err;
  1019. #ifdef CONFIG_GRAPHENE_DEBUG
  1020. dp = d_path(&u->root, kpath, max);
  1021. if (IS_ERR(dp)) {
  1022. rv = -EINVAL;
  1023. goto err;
  1024. }
  1025. printk(KERN_INFO "Graphene: PID %d UNIX ROOT %s\n",
  1026. current_tsk->pid, dp);
  1027. #endif
  1028. break;
  1029. case GRAPHENE_UNIX_PREFIX: {
  1030. char * prefix;
  1031. rv = strncpy_from_user(kpath, ptmp.value, max);
  1032. if (rv < 0)
  1033. goto err;
  1034. u = gi->gi_unix;
  1035. if (!u) {
  1036. u = kmalloc(sizeof(struct graphene_unix),
  1037. GFP_KERNEL);
  1038. if (!u) {
  1039. rv = -ENOMEM;
  1040. goto err;
  1041. }
  1042. u->root.mnt = NULL;
  1043. u->prefix.len = 0;
  1044. atomic_set(&u->count, 1);
  1045. INIT_LIST_HEAD(&u->list);
  1046. gi->gi_unix = u;
  1047. }
  1048. if (u && u->prefix.len)
  1049. kfree(&u->prefix.name);
  1050. prefix = kmalloc(rv + 1, GFP_KERNEL);
  1051. if (!prefix) {
  1052. rv = -ENOMEM;
  1053. goto err;
  1054. }
  1055. memcpy(prefix, kpath, rv + 1);
  1056. u->prefix.len = rv;
  1057. u->prefix.name = prefix;
  1058. #ifdef CONFIG_GRAPHENE_DEBUG
  1059. printk(KERN_INFO "Graphene: PID %d UNIX PREFIX %s\n",
  1060. current_tsk->pid, kpath);
  1061. #endif
  1062. break;
  1063. }
  1064. case GRAPHENE_NET_RULE: {
  1065. struct graphene_net_policy np;
  1066. rv = copy_from_user(&np, ptmp.value,
  1067. sizeof(struct graphene_net_policy));
  1068. if (rv < 0)
  1069. goto err;
  1070. rv = set_net_rule(&np, gi);
  1071. if (rv < 0)
  1072. goto err;
  1073. break;
  1074. }
  1075. case GRAPHENE_FS_PATH:
  1076. case GRAPHENE_FS_RECURSIVE:
  1077. rv = strncpy_from_user(kpath, ptmp.value, max);
  1078. if (rv < 0)
  1079. goto err;
  1080. p = kmalloc(sizeof(struct graphene_path),
  1081. GFP_KERNEL);
  1082. if (!p) {
  1083. rv = -ENOMEM;
  1084. goto err;
  1085. }
  1086. rv = kern_path(kpath, LOOKUP_FOLLOW, &p->path);
  1087. if (rv) {
  1088. kfree(p);
  1089. goto err;
  1090. }
  1091. #ifdef CONFIG_GRAPHENE_DEBUG
  1092. dp = d_path(&p->path, kpath, max);
  1093. if (IS_ERR(dp)) {
  1094. rv = -EINVAL;
  1095. kfree(p);
  1096. goto err;
  1097. }
  1098. printk(KERN_INFO "Graphene: PID %d PATH %s%s\n",
  1099. current_tsk->pid, dp,
  1100. type == GRAPHENE_FS_PATH ? "" :
  1101. " (recursive)");
  1102. #endif
  1103. p->type = flags;
  1104. INIT_LIST_HEAD(&p->list);
  1105. list_add_tail(&p->list,
  1106. type == GRAPHENE_FS_PATH ?
  1107. &gi->gi_paths : &gi->gi_rpaths);
  1108. break;
  1109. }
  1110. }
  1111. if (!current_tsk->graphene) {
  1112. struct graphene_struct *gs;
  1113. if (gi->gi_unix) {
  1114. rv = add_graphene_unix(gi->gi_unix);
  1115. if (rv < 0)
  1116. goto err;
  1117. }
  1118. get_console(gi, current_tsk->files);
  1119. gs = kmalloc(sizeof(struct graphene_struct), GFP_KERNEL);
  1120. if (!gs) {
  1121. rv = -ENOMEM;
  1122. goto err;
  1123. }
  1124. atomic_set(&gs->g_count, 1);
  1125. gs->g_info = gi;
  1126. spin_lock_init(&gs->g_lock);
  1127. current_tsk->graphene = gs;
  1128. printk(KERN_INFO "Graphene: PID %d registered\n",
  1129. current_tsk->pid);
  1130. }
  1131. #ifdef CONFIG_GRAPHENE_ISOLATE
  1132. else {
  1133. if ((rv = update_graphene(current_tsk, gi)) < 0) {
  1134. printk(KERN_INFO "Graphene: PID %d cannot be updated (%d)\n",
  1135. current_tsk->pid, rv);
  1136. goto err;
  1137. }
  1138. printk(KERN_INFO "Graphene: PID %d updated\n",
  1139. current_tsk->pid);
  1140. }
  1141. #endif
  1142. rv = 0;
  1143. goto out;
  1144. err:
  1145. drop_graphene_info(gi);
  1146. out:
  1147. PUT_PATH_BUFFER(fn, kpath)
  1148. return rv;
  1149. }
  1150. #ifdef CONFIG_GRAPHENE_ISOLATE
  1151. static int do_close_sock(struct graphene_info *gi, struct socket *sock,
  1152. int close_unix)
  1153. {
  1154. struct sock *sk = sock->sk;
  1155. struct sockaddr_storage address;
  1156. struct sockaddr *addr = (void *) &address;
  1157. struct inet_sock *inet;
  1158. int len, err;
  1159. if (!sk)
  1160. return 0;
  1161. if (sk->sk_family == PF_UNIX)
  1162. return close_unix ? -EPERM : 0;
  1163. inet = inet_sk(sk);
  1164. if (inet->inet_dport) {
  1165. err = sock->ops->getname(sock, addr, &len, 1);
  1166. if (err < 0)
  1167. return err;
  1168. err = __common_net_perm(gi, OP_CONNECT, sock, addr, len);
  1169. if (err < 0)
  1170. return err;
  1171. return 0;
  1172. }
  1173. if (!inet->inet_num)
  1174. return 0;
  1175. if (sk->sk_state == TCP_LISTEN) {
  1176. err = __common_net_perm(gi, OP_LISTEN, sock, NULL, 0);
  1177. } else {
  1178. err = sock->ops->getname(sock, addr, &len, 0);
  1179. if (err < 0)
  1180. return err;
  1181. err = __common_net_perm(gi, OP_BIND, sock, addr, len);
  1182. }
  1183. return err;
  1184. }
  1185. static int do_close_fds(struct graphene_info *gi, struct files_struct *files,
  1186. int close_unix)
  1187. {
  1188. int i, j;
  1189. struct fdtable *fdt = files_fdtable(files);
  1190. j = 0;
  1191. rcu_read_lock();
  1192. fdt = files_fdtable(files);
  1193. rcu_read_unlock();
  1194. for (;;) {
  1195. unsigned long set;
  1196. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
  1197. i = j * BITS_PER_LONG;
  1198. #else
  1199. i = j * __NFDBITS;
  1200. #endif
  1201. if (i >= fdt->max_fds)
  1202. break;
  1203. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
  1204. set = fdt->open_fds[j++];
  1205. #else
  1206. set = fdt->open_fds->fds_bits[j++];
  1207. #endif
  1208. for ( ; set ; i++, set >>= 1) {
  1209. struct socket *sock = NULL;
  1210. struct file *file;
  1211. int err;
  1212. if (!(set & 1))
  1213. continue;
  1214. #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)
  1215. sock = sockfd_lookup(i, &err);
  1216. #endif
  1217. file = xchg(&fdt->fd[i], NULL);
  1218. if (!file)
  1219. continue;
  1220. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)
  1221. sock = sock_from_file(file, &err);
  1222. #endif
  1223. if (sock) {
  1224. err = do_close_sock(gi, sock, close_unix);
  1225. if (!err)
  1226. goto allow;
  1227. goto deny;
  1228. }
  1229. if (get_pipe_info(file))
  1230. goto deny;
  1231. err = __common_perm(gi, OP_OPEN, &file->f_path,
  1232. aa_map_file_to_perms(file));
  1233. if (!err) {
  1234. allow:
  1235. xchg(&fdt->fd[i], file);
  1236. continue;
  1237. }
  1238. deny:
  1239. filp_close(file, files);
  1240. cond_resched();
  1241. }
  1242. }
  1243. return 0;
  1244. }
  1245. static
  1246. int net_check (int family, int addr_any, int port_any,
  1247. int flags1, struct graphene_net_addr * addr1,
  1248. int flags2, struct graphene_net_addr * addr2)
  1249. {
  1250. if (flags2 & addr_any)
  1251. goto port;
  1252. if (flags1 & addr_any)
  1253. goto port;
  1254. switch (family) {
  1255. case AF_INET:
  1256. if (memcmp(&addr1->addr.sin_addr,
  1257. &addr2->addr.sin_addr,
  1258. sizeof(struct in_addr)))
  1259. return -EACCES;
  1260. break;
  1261. case AF_INET6:
  1262. if (memcmp(&addr1->addr.sin6_addr,
  1263. &addr2->addr.sin6_addr,
  1264. sizeof(struct in6_addr)))
  1265. return -EACCES;
  1266. break;
  1267. }
  1268. port:
  1269. if (flags2 & port_any)
  1270. return 0;
  1271. if (flags1 & port_any)
  1272. return 0;
  1273. if (addr1->port_begin < addr2->port_begin ||
  1274. addr1->port_end > addr2->port_end)
  1275. return -EACCES;
  1276. return 0;
  1277. }
  1278. static int update_graphene(struct task_struct *current_tsk,
  1279. struct graphene_info *new)
  1280. {
  1281. struct graphene_struct *gs = current_tsk->graphene;
  1282. struct graphene_info *gi = get_graphene_info(gs);
  1283. struct graphene_path *p;
  1284. struct graphene_net *n1, *n2;
  1285. int i = 0, close_unix = 0;
  1286. if (new->gi_unix) {
  1287. if (!new->gi_unix->root.mnt &&
  1288. gi->gi_unix && gi->gi_unix->root.mnt) {
  1289. if (!path_equal(&new->gi_unix->root,
  1290. &gi->gi_unix->root))
  1291. return -EACCES;
  1292. path_get(&gi->gi_unix->root);
  1293. new->gi_unix->root = gi->gi_unix->root;
  1294. }
  1295. if (new->gi_unix->prefix.len) {
  1296. int err = add_graphene_unix(new->gi_unix);
  1297. if (err < 0)
  1298. return err;
  1299. }
  1300. close_unix = 1;
  1301. }
  1302. for (i = 0 ; i < 3 ; i++)
  1303. if (gi->gi_console[i].mnt) {
  1304. path_get(&gi->gi_console[i]);
  1305. new->gi_console[i] = gi->gi_console[i];
  1306. } else {
  1307. new->gi_console[i].mnt = NULL;
  1308. }
  1309. list_for_each_entry(p, &new->gi_paths, list) {
  1310. u32 mask = 0;
  1311. if (p->type & GRAPHENE_FS_READ)
  1312. mask |= MAY_READ;
  1313. if (p->type & GRAPHENE_FS_WRITE)
  1314. mask |= MAY_WRITE;
  1315. print_path(KERN_INFO "Graphene: PID %d CHECK RULE %s\n",
  1316. &p->path);
  1317. if (__common_perm(gi, OP_OPEN, &p->path, mask) < 0)
  1318. return -EACCES;
  1319. }
  1320. list_for_each_entry(n1, &new->gi_net, list) {
  1321. bool accepted = false;
  1322. print_net_rule(KERN_INFO "Graphene: PID %d CHECK RULE %s\n",
  1323. n1);
  1324. list_for_each_entry(n2, &gi->gi_net, list) {
  1325. if (n1->family != n2->family)
  1326. continue;
  1327. if (net_check(n1->family,
  1328. LOCAL_ADDR_ANY, LOCAL_PORT_ANY,
  1329. n1->flags, &n1->local,
  1330. n2->flags, &n2->local) < 0)
  1331. continue;
  1332. if (net_check(n1->family,
  1333. PEER_ADDR_ANY, PEER_PORT_ANY,
  1334. n1->flags, &n1->peer,
  1335. n2->flags, &n2->peer) < 0)
  1336. continue;
  1337. accepted = true;
  1338. print_net_rule(KERN_INFO "Graphene: PID %d ALLOW %s\n",
  1339. n1);
  1340. break;
  1341. }
  1342. if (!accepted) {
  1343. print_net_rule(KERN_INFO "Graphene: PID %d DENY %s\n",
  1344. n1);
  1345. return -EACCES;
  1346. }
  1347. }
  1348. spin_lock(&gs->g_lock);
  1349. put_graphene_info(gs->g_info);
  1350. gs->g_info = new;
  1351. spin_unlock(&gs->g_lock);
  1352. do_close_fds(new, current_tsk->files, close_unix);
  1353. return 0;
  1354. }
  1355. #endif /* CONFIG_GRAPHENE_ISOLATE */
  1356. static long graphene_ioctl(struct file *file, unsigned int cmd,
  1357. unsigned long arg)
  1358. {
  1359. struct task_struct *current_tsk = current;
  1360. switch (cmd) {
  1361. case GRAPHENE_SET_TASK:
  1362. return set_graphene(current_tsk,
  1363. (const struct graphene_policies __user *) arg);
  1364. default:
  1365. return -ENOSYS;
  1366. }
  1367. }
  1368. static struct file_operations graphene_operations = {
  1369. .unlocked_ioctl = graphene_ioctl,
  1370. .compat_ioctl = graphene_ioctl,
  1371. .llseek = noop_llseek,
  1372. };
  1373. static struct miscdevice graphene_dev = {
  1374. .minor = GRAPHENE_MINOR,
  1375. .name = "graphene",
  1376. .fops = &graphene_operations,
  1377. .mode = 0666,
  1378. };
  1379. static int __init graphene_init(void)
  1380. {
  1381. int rv;
  1382. rv = misc_register(&graphene_dev);
  1383. if (rv) {
  1384. printk(KERN_ERR "Graphene error: "
  1385. "failed to add a char device (rv=%d)\n", rv);
  1386. return rv;
  1387. }
  1388. return 0;
  1389. }
  1390. device_initcall(graphene_init);