linux-3.14.patch 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748
  1. diff --git a/Kconfig b/Kconfig
  2. index c13f48d..e18713e 100644
  3. --- a/Kconfig
  4. +++ b/Kconfig
  5. @@ -9,3 +9,4 @@ config SRCARCH
  6. option env="SRCARCH"
  7. source "arch/$SRCARCH/Kconfig"
  8. +source "graphene/Kconfig"
  9. diff --git a/Makefile b/Makefile
  10. index e5ac8a6..116ac82 100644
  11. --- a/Makefile
  12. +++ b/Makefile
  13. @@ -779,7 +779,7 @@ export mod_sign_cmd
  14. ifeq ($(KBUILD_EXTMOD),)
  15. -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
  16. +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ graphene/
  17. vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
  18. $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
  19. diff --git a/arch/Kconfig b/arch/Kconfig
  20. index 80bbb8c..ca9f9e7 100644
  21. --- a/arch/Kconfig
  22. +++ b/arch/Kconfig
  23. @@ -336,6 +336,10 @@ config SECCOMP_FILTER
  24. See Documentation/prctl/seccomp_filter.txt for details.
  25. +# Used by archs to tell that they support SECCOMP_FILTER_JIT
  26. +config HAVE_SECCOMP_FILTER_JIT
  27. + bool
  28. +
  29. config HAVE_CC_STACKPROTECTOR
  30. bool
  31. help
  32. diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
  33. index 0af5250..0c4dfea 100644
  34. --- a/arch/x86/Kconfig
  35. +++ b/arch/x86/Kconfig
  36. @@ -94,6 +94,7 @@ config X86
  37. select GENERIC_CLOCKEVENTS_MIN_ADJUST
  38. select IRQ_FORCED_THREADING
  39. select HAVE_BPF_JIT if X86_64
  40. + select HAVE_SECCOMP_FILTER_JIT if X86_64
  41. select HAVE_ARCH_TRANSPARENT_HUGEPAGE
  42. select CLKEVT_I8253
  43. select ARCH_HAVE_NMI_SAFE_CMPXCHG
  44. @@ -1601,6 +1602,16 @@ config SECCOMP
  45. If unsure, say Y. Only embedded should say N here.
  46. +if SECCOMP
  47. +config SECCOMP_FILTER_JIT
  48. + bool "Enable seccomp filter Just In Time compiler"
  49. + depends on HAVE_SECCOMP_FILTER_JIT
  50. + depends on MODULES
  51. + ---help---
  52. + Like Berkeley Packet Filter, This option allows kernel to generate a
  53. + native code when seccomp filter is loaded in memory.
  54. +endif
  55. +
  56. source kernel/Kconfig.hz
  57. config KEXEC
  58. diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
  59. index 4ed75dd..5768520 100644
  60. --- a/arch/x86/net/bpf_jit_comp.c
  61. +++ b/arch/x86/net/bpf_jit_comp.c
  62. @@ -13,6 +13,7 @@
  63. #include <linux/filter.h>
  64. #include <linux/if_vlan.h>
  65. #include <linux/random.h>
  66. +#include <asm/syscall.h>
  67. /*
  68. * Conventions :
  69. @@ -108,9 +109,13 @@ do { \
  70. goto cond_branch
  71. -#define SEEN_DATAREF 1 /* might call external helpers */
  72. -#define SEEN_XREG 2 /* ebx is used */
  73. -#define SEEN_MEM 4 /* use mem[] for temporary storage */
  74. +#define SEEN_DATAREF (1 << 0) /* might call external skb helpers */
  75. +#define SEEN_XREG (1 << 1) /* ebx is used */
  76. +#define SEEN_MEM (1 << 2) /* use mem[] for temporary storage */
  77. +#define SEEN_SKBREF (1 << 3) /* use pointer to skb */
  78. +#define SEEN_SECCOMP (1 << 4) /* seccomp filters */
  79. +
  80. +#define NEED_PERILOGUE(_seen) ((_seen) & (SEEN_XREG | SEEN_MEM | SEEN_DATAREF))
  81. static inline void bpf_flush_icache(void *start, void *end)
  82. {
  83. @@ -122,6 +127,25 @@ static inline void bpf_flush_icache(void *start, void *end)
  84. set_fs(old_fs);
  85. }
  86. +/* helper to find the offset in struct seccomp_data */
  87. +#define BPF_DATA(_name) offsetof(struct seccomp_data, _name)
  88. +
  89. +/* helper to find the negative offset from the end of struct pt_regs */
  90. +#define roffsetof(_type, _member) ((int)(offsetof(_type, _member) - sizeof(_type)))
  91. +#define PT_REGS(_name) roffsetof(struct pt_regs, _name)
  92. +
  93. +#define EMIT_REGS_LOAD(offset) \
  94. +do { \
  95. + if (is_imm8(offset)) { \
  96. + /* mov off8(%r8),%eax */ \
  97. + EMIT4(0x41, 0x8b, 0x40, offset); \
  98. + } else { \
  99. + /* mov off32(%r8),%eax */ \
  100. + EMIT3(0x41, 0x8b, 0x80); \
  101. + EMIT(offset, 4); \
  102. + } \
  103. +} while (0)
  104. +
  105. #define CHOOSE_LOAD_FUNC(K, func) \
  106. ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
  107. @@ -178,7 +202,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
  108. return header;
  109. }
  110. -void bpf_jit_compile(struct sk_filter *fp)
  111. +static void *__bpf_jit_compile(struct sock_filter *filter, unsigned int flen, u8 seen_all)
  112. {
  113. u8 temp[64];
  114. u8 *prog;
  115. @@ -192,15 +216,14 @@ void bpf_jit_compile(struct sk_filter *fp)
  116. int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */
  117. unsigned int cleanup_addr; /* epilogue code offset */
  118. unsigned int *addrs;
  119. - const struct sock_filter *filter = fp->insns;
  120. - int flen = fp->len;
  121. + void *bpf_func = NULL;
  122. if (!bpf_jit_enable)
  123. - return;
  124. + return bpf_func;
  125. addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL);
  126. if (addrs == NULL)
  127. - return;
  128. + return NULL;
  129. /* Before first pass, make a rough estimation of addrs[]
  130. * each bpf instruction is translated to less than 64 bytes
  131. @@ -212,12 +235,12 @@ void bpf_jit_compile(struct sk_filter *fp)
  132. cleanup_addr = proglen; /* epilogue address */
  133. for (pass = 0; pass < 10; pass++) {
  134. - u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
  135. + u8 seen_or_pass0 = (pass == 0) ? seen_all : seen;
  136. /* no prologue/epilogue for trivial filters (RET something) */
  137. proglen = 0;
  138. prog = temp;
  139. - if (seen_or_pass0) {
  140. + if (NEED_PERILOGUE(seen_or_pass0)) {
  141. EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */
  142. EMIT4(0x48, 0x83, 0xec, 96); /* subq $96,%rsp */
  143. /* note : must save %rbx in case bpf_error is hit */
  144. @@ -260,6 +283,47 @@ void bpf_jit_compile(struct sk_filter *fp)
  145. }
  146. }
  147. +#ifdef CONFIG_SECCOMP_FILTER_JIT
  148. + /* For seccomp filters, load :
  149. + * r9 = current
  150. + * r8 = current->thread.sp0
  151. + * edi = task_thread_info(current)->status & TS_COMPAT
  152. + *
  153. + * r8 points to the end of struct pt_regs, task_pt_regs(current) + 1
  154. + */
  155. + if (seen_or_pass0 & SEEN_SECCOMP) {
  156. + /* seccomp filters: skb must be NULL */
  157. + if (seen_or_pass0 & (SEEN_SKBREF | SEEN_DATAREF)) {
  158. + pr_err_once("seccomp filters shouldn't use skb");
  159. + goto out;
  160. + }
  161. + /* r9 = current */
  162. + EMIT1(0x65);EMIT4(0x4c, 0x8b, 0x0c, 0x25); /* mov %gs:imm32,%r9 */
  163. + EMIT((u32)(unsigned long)&current_task, 4);
  164. +
  165. + /* r8 = current->thread.sp0 */
  166. + EMIT3(0x4d, 0x8b, 0x81); /* mov off32(%r9),%r8 */
  167. + EMIT(offsetof(struct task_struct, thread.sp0), 4);
  168. +
  169. + /* edi = task_thread_info(current)->status & TS_COMPAT */
  170. +#ifdef CONFIG_IA32_EMULATION
  171. + /* task_thread_info(current): current->stack */
  172. + BUILD_BUG_ON(!is_imm8(offsetof(struct task_struct, stack)));
  173. + /* mov off8(%r9),%rdi */
  174. + EMIT4(0x49, 0x8b, 0x79, offsetof(struct task_struct, stack));
  175. + /* task_thread_info(current)->status */
  176. + BUILD_BUG_ON(!is_imm8(offsetof(struct thread_info, status)));
  177. + BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, status) != 4);
  178. + /* mov off8(%rdi),%edi */
  179. + EMIT3(0x8b, 0x7f, offsetof(struct thread_info, status));
  180. + /* task_thread_info(current)->status & TS_COMPAT */
  181. + BUILD_BUG_ON(!is_imm8(TS_COMPAT));
  182. + /* and imm8,%edi */
  183. + EMIT3(0x83, 0xe7, TS_COMPAT);
  184. +#endif /* CONFIG_IA32_EMULATION */
  185. + }
  186. +#endif /* CONFIG_SECCOMP_FILTER_JIT */
  187. +
  188. switch (filter[0].code) {
  189. case BPF_S_RET_K:
  190. case BPF_S_LD_W_LEN:
  191. @@ -272,6 +336,7 @@ void bpf_jit_compile(struct sk_filter *fp)
  192. case BPF_S_ANC_VLAN_TAG_PRESENT:
  193. case BPF_S_ANC_QUEUE:
  194. case BPF_S_ANC_PKTTYPE:
  195. + case BPF_S_ANC_SECCOMP_LD_W:
  196. case BPF_S_LD_W_ABS:
  197. case BPF_S_LD_H_ABS:
  198. case BPF_S_LD_B_ABS:
  199. @@ -449,7 +514,7 @@ void bpf_jit_compile(struct sk_filter *fp)
  200. }
  201. /* fallinto */
  202. case BPF_S_RET_A:
  203. - if (seen_or_pass0) {
  204. + if (NEED_PERILOGUE(seen_or_pass0)) {
  205. if (i != flen - 1) {
  206. EMIT_JMP(cleanup_addr - addrs[i]);
  207. break;
  208. @@ -499,6 +564,7 @@ void bpf_jit_compile(struct sk_filter *fp)
  209. break;
  210. case BPF_S_LD_W_LEN: /* A = skb->len; */
  211. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
  212. + seen |= SEEN_SKBREF;
  213. if (is_imm8(offsetof(struct sk_buff, len)))
  214. /* mov off8(%rdi),%eax */
  215. EMIT3(0x8b, 0x47, offsetof(struct sk_buff, len));
  216. @@ -508,7 +574,7 @@ void bpf_jit_compile(struct sk_filter *fp)
  217. }
  218. break;
  219. case BPF_S_LDX_W_LEN: /* X = skb->len; */
  220. - seen |= SEEN_XREG;
  221. + seen |= SEEN_XREG | SEEN_SKBREF;
  222. if (is_imm8(offsetof(struct sk_buff, len)))
  223. /* mov off8(%rdi),%ebx */
  224. EMIT3(0x8b, 0x5f, offsetof(struct sk_buff, len));
  225. @@ -519,6 +585,7 @@ void bpf_jit_compile(struct sk_filter *fp)
  226. break;
  227. case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
  228. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
  229. + seen |= SEEN_SKBREF;
  230. if (is_imm8(offsetof(struct sk_buff, protocol))) {
  231. /* movzwl off8(%rdi),%eax */
  232. EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, protocol));
  233. @@ -529,6 +596,7 @@ void bpf_jit_compile(struct sk_filter *fp)
  234. EMIT2(0x86, 0xc4); /* ntohs() : xchg %al,%ah */
  235. break;
  236. case BPF_S_ANC_IFINDEX:
  237. + seen |= SEEN_SKBREF;
  238. if (is_imm8(offsetof(struct sk_buff, dev))) {
  239. /* movq off8(%rdi),%rax */
  240. EMIT4(0x48, 0x8b, 0x47, offsetof(struct sk_buff, dev));
  241. @@ -544,6 +612,7 @@ void bpf_jit_compile(struct sk_filter *fp)
  242. break;
  243. case BPF_S_ANC_MARK:
  244. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
  245. + seen |= SEEN_SKBREF;
  246. if (is_imm8(offsetof(struct sk_buff, mark))) {
  247. /* mov off8(%rdi),%eax */
  248. EMIT3(0x8b, 0x47, offsetof(struct sk_buff, mark));
  249. @@ -554,6 +623,7 @@ void bpf_jit_compile(struct sk_filter *fp)
  250. break;
  251. case BPF_S_ANC_RXHASH:
  252. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
  253. + seen |= SEEN_SKBREF;
  254. if (is_imm8(offsetof(struct sk_buff, rxhash))) {
  255. /* mov off8(%rdi),%eax */
  256. EMIT3(0x8b, 0x47, offsetof(struct sk_buff, rxhash));
  257. @@ -564,6 +634,7 @@ void bpf_jit_compile(struct sk_filter *fp)
  258. break;
  259. case BPF_S_ANC_QUEUE:
  260. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
  261. + seen |= SEEN_SKBREF;
  262. if (is_imm8(offsetof(struct sk_buff, queue_mapping))) {
  263. /* movzwl off8(%rdi),%eax */
  264. EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, queue_mapping));
  265. @@ -583,6 +654,7 @@ void bpf_jit_compile(struct sk_filter *fp)
  266. case BPF_S_ANC_VLAN_TAG:
  267. case BPF_S_ANC_VLAN_TAG_PRESENT:
  268. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
  269. + seen |= SEEN_SKBREF;
  270. if (is_imm8(offsetof(struct sk_buff, vlan_tci))) {
  271. /* movzwl off8(%rdi),%eax */
  272. EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, vlan_tci));
  273. @@ -604,6 +676,7 @@ void bpf_jit_compile(struct sk_filter *fp)
  274. if (off < 0)
  275. goto out;
  276. + seen |= SEEN_SKBREF;
  277. if (is_imm8(off)) {
  278. /* movzbl off8(%rdi),%eax */
  279. EMIT4(0x0f, 0xb6, 0x47, off);
  280. @@ -617,7 +690,7 @@ void bpf_jit_compile(struct sk_filter *fp)
  281. }
  282. case BPF_S_LD_W_ABS:
  283. func = CHOOSE_LOAD_FUNC(K, sk_load_word);
  284. -common_load: seen |= SEEN_DATAREF;
  285. +common_load: seen |= SEEN_SKBREF | SEEN_DATAREF;
  286. t_offset = func - (image + addrs[i]);
  287. EMIT1_off32(0xbe, K); /* mov imm32,%esi */
  288. EMIT1_off32(0xe8, t_offset); /* call */
  289. @@ -630,14 +703,14 @@ common_load: seen |= SEEN_DATAREF;
  290. goto common_load;
  291. case BPF_S_LDX_B_MSH:
  292. func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
  293. - seen |= SEEN_DATAREF | SEEN_XREG;
  294. + seen |= SEEN_XREG | SEEN_SKBREF | SEEN_DATAREF;
  295. t_offset = func - (image + addrs[i]);
  296. EMIT1_off32(0xbe, K); /* mov imm32,%esi */
  297. EMIT1_off32(0xe8, t_offset); /* call sk_load_byte_msh */
  298. break;
  299. case BPF_S_LD_W_IND:
  300. func = sk_load_word;
  301. -common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
  302. +common_load_ind: seen |= SEEN_XREG | SEEN_SKBREF | SEEN_DATAREF;
  303. t_offset = func - (image + addrs[i]);
  304. if (K) {
  305. if (is_imm8(K)) {
  306. @@ -725,6 +798,72 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
  307. }
  308. EMIT_COND_JMP(f_op, f_offset);
  309. break;
  310. +#ifdef CONFIG_SECCOMP_FILTER_JIT
  311. + case BPF_S_ANC_SECCOMP_LD_W:
  312. + seen |= SEEN_SECCOMP;
  313. + if (K == BPF_DATA(nr)) {
  314. + /* A = task_pt_regs(current)->orig_ax */
  315. + EMIT_REGS_LOAD(PT_REGS(orig_ax));
  316. + break;
  317. + }
  318. + if (K == BPF_DATA(arch)) {
  319. + /* A = AUDIT_ARCH_X86_64 */
  320. + EMIT1_off32(0xb8, AUDIT_ARCH_X86_64); /* mov imm32,%eax */
  321. +#ifdef CONFIG_IA32_EMULATION
  322. + /* A = compat ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64 */
  323. + EMIT1_off32(0xb9, AUDIT_ARCH_I386); /* mov imm32,%ecx */
  324. + EMIT2(0x85, 0xff); /* test %edi,%edi */
  325. + EMIT3(0x0f, 0x45, 0xc1); /* cmovne %ecx,%eax*/
  326. +#endif /* CONFIG_IA32_EMULATION */
  327. + break;
  328. + }
  329. + if (K >= BPF_DATA(args[0]) && K < BPF_DATA(args[6])) {
  330. + int arg = (K - BPF_DATA(args[0])) / sizeof(u64);
  331. + int off = K % sizeof(u64);
  332. +
  333. + switch (arg) {
  334. + case 0: off += PT_REGS(di); break;
  335. + case 1: off += PT_REGS(si); break;
  336. + case 2: off += PT_REGS(dx); break;
  337. + case 3: off += PT_REGS(r10); break;
  338. + case 4: off += PT_REGS(r8); break;
  339. + case 5: off += PT_REGS(r9); break;
  340. + }
  341. + EMIT_REGS_LOAD(off);
  342. +#ifdef CONFIG_IA32_EMULATION
  343. + off = K % sizeof(u64);
  344. + switch (arg) {
  345. + case 0: off += PT_REGS(bx); break;
  346. + case 1: off += PT_REGS(cx); break;
  347. + case 2: off += PT_REGS(dx); break;
  348. + case 3: off += PT_REGS(si); break;
  349. + case 4: off += PT_REGS(di); break;
  350. + case 5: off += PT_REGS(bp); break;
  351. + }
  352. + if (is_imm8(off)) {
  353. + /* mov off8(%r8),%ecx */
  354. + EMIT4(0x41, 0x8b, 0x48, off);
  355. + } else {
  356. + /* mov off32(%r8),%ecx */
  357. + EMIT3(0x41, 0x8b, 0x88);
  358. + EMIT(off, 4);
  359. + }
  360. + EMIT2(0x85, 0xff); /* test %edi,%edi */
  361. + EMIT3(0x0f, 0x45, 0xc1); /* cmovne %ecx,%eax*/
  362. +#endif /* CONFIG_IA32_EMULATION */
  363. + break;
  364. + }
  365. + if (K == BPF_DATA(instruction_pointer)) {
  366. + /* A = task_pt_regs(current)->ip */
  367. + EMIT_REGS_LOAD(PT_REGS(ip));
  368. + break;
  369. + }
  370. + if (K == BPF_DATA(instruction_pointer) + sizeof(u32)) {
  371. + EMIT_REGS_LOAD(PT_REGS(ip) + 4);
  372. + break;
  373. + }
  374. + goto out;
  375. +#endif /* CONFIG_SECCOMP_FILTER_JIT */
  376. default:
  377. /* hmm, too complex filter, give up with jit compiler */
  378. goto out;
  379. @@ -732,10 +871,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
  380. ilen = prog - temp;
  381. if (image) {
  382. if (unlikely(proglen + ilen > oldproglen)) {
  383. - pr_err("bpb_jit_compile fatal error\n");
  384. - kfree(addrs);
  385. + pr_err("bpf_jit_compile fatal error\n");
  386. module_free(NULL, header);
  387. - return;
  388. + goto out;
  389. }
  390. memcpy(image + proglen, temp, ilen);
  391. }
  392. @@ -747,7 +885,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
  393. * use it to give the cleanup instruction(s) addr
  394. */
  395. cleanup_addr = proglen - 1; /* ret */
  396. - if (seen_or_pass0)
  397. + if (NEED_PERILOGUE(seen_or_pass0))
  398. cleanup_addr -= 1; /* leaveq */
  399. if (seen_or_pass0 & SEEN_XREG)
  400. cleanup_addr -= 4; /* mov -8(%rbp),%rbx */
  401. @@ -771,11 +909,11 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
  402. if (image) {
  403. bpf_flush_icache(header, image + proglen);
  404. set_memory_ro((unsigned long)header, header->pages);
  405. - fp->bpf_func = (void *)image;
  406. + bpf_func = (void *)image;
  407. }
  408. out:
  409. kfree(addrs);
  410. - return;
  411. + return bpf_func;
  412. }
  413. static void bpf_jit_free_deferred(struct work_struct *work)
  414. @@ -798,3 +936,38 @@ void bpf_jit_free(struct sk_filter *fp)
  415. kfree(fp);
  416. }
  417. }
  418. +
  419. +void bpf_jit_compile(struct sk_filter *fp)
  420. +{
  421. + u8 seen_all = SEEN_XREG | SEEN_MEM | SEEN_SKBREF | SEEN_DATAREF;
  422. + void *bpf_func = __bpf_jit_compile(fp->insns, fp->len, seen_all);
  423. +
  424. + if (bpf_func)
  425. + fp->bpf_func = bpf_func;
  426. +}
  427. +
  428. +#ifdef CONFIG_SECCOMP_FILTER_JIT
  429. +void seccomp_jit_compile(struct seccomp_filter *fp)
  430. +{
  431. + struct sock_filter *filter = seccomp_filter_get_insns(fp);
  432. + unsigned int flen = seccomp_filter_get_len(fp);
  433. + u8 seen_all = SEEN_XREG | SEEN_MEM | SEEN_SECCOMP;
  434. + void *bpf_func = __bpf_jit_compile(filter, flen, seen_all);
  435. +
  436. + if (bpf_func)
  437. + seccomp_filter_set_bpf_func(fp, bpf_func);
  438. +}
  439. +
  440. +void seccomp_jit_free(struct seccomp_filter *fp)
  441. +{
  442. + void *bpf_func = seccomp_filter_get_bpf_func(fp);
  443. +
  444. + if (bpf_func) {
  445. + unsigned long addr = (unsigned long)bpf_func & PAGE_MASK;
  446. + struct bpf_binary_header *header = (void *)addr;
  447. +
  448. + set_memory_rw(addr, header->pages);
  449. + module_free(NULL, header);
  450. + }
  451. +}
  452. +#endif /* CONFIG_SECCOMP_FILTER_JIT */
  453. diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
  454. index 3737f72..f7a4aba 100644
  455. --- a/include/linux/miscdevice.h
  456. +++ b/include/linux/miscdevice.h
  457. @@ -3,6 +3,7 @@
  458. #include <linux/major.h>
  459. #include <linux/list.h>
  460. #include <linux/types.h>
  461. +#include <../graphene/graphene.h>
  462. /*
  463. * These allocations are managed by device@lanana.org. If you use an
  464. diff --git a/include/linux/sched.h b/include/linux/sched.h
  465. index a781dec..3381137 100644
  466. --- a/include/linux/sched.h
  467. +++ b/include/linux/sched.h
  468. @@ -1161,6 +1161,11 @@ enum perf_event_task_context {
  469. perf_nr_task_contexts,
  470. };
  471. +#ifdef CONFIG_GRAPHENE
  472. +# include <../graphene/graphene.h>
  473. +struct graphene_struct;
  474. +#endif
  475. +
  476. struct task_struct {
  477. volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
  478. void *stack;
  479. @@ -1581,6 +1586,11 @@ struct task_struct {
  480. unsigned int sequential_io;
  481. unsigned int sequential_io_avg;
  482. #endif
  483. +
  484. +#ifdef CONFIG_GRAPHENE
  485. + /* for graphene tasks */
  486. + struct graphene_struct *graphene; /* structure to store graphene info */
  487. +#endif
  488. };
  489. /* Future-safe accessor for struct task_struct's cpus_allowed. */
  490. diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
  491. index 6f19cfd..ed258f4 100644
  492. --- a/include/linux/seccomp.h
  493. +++ b/include/linux/seccomp.h
  494. @@ -77,6 +77,14 @@ static inline int seccomp_mode(struct seccomp *s)
  495. extern void put_seccomp_filter(struct task_struct *tsk);
  496. extern void get_seccomp_filter(struct task_struct *tsk);
  497. extern u32 seccomp_bpf_load(int off);
  498. +#ifdef CONFIG_SECCOMP_FILTER_JIT
  499. +struct sock_filter *seccomp_filter_get_insns(struct seccomp_filter *);
  500. +unsigned int seccomp_filter_get_len(struct seccomp_filter *);
  501. +void *seccomp_filter_get_bpf_func(struct seccomp_filter *);
  502. +void seccomp_filter_set_bpf_func(struct seccomp_filter *, void *);
  503. +void seccomp_jit_compile(struct seccomp_filter *fp);
  504. +void seccomp_jit_free(struct seccomp_filter *fp);
  505. +#endif
  506. #else /* CONFIG_SECCOMP_FILTER */
  507. static inline void put_seccomp_filter(struct task_struct *tsk)
  508. {
  509. diff --git a/kernel/fork.c b/kernel/fork.c
  510. index a17621c..41d5958 100644
  511. --- a/kernel/fork.c
  512. +++ b/kernel/fork.c
  513. @@ -11,6 +11,7 @@
  514. * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
  515. */
  516. +#include <linux/version.h>
  517. #include <linux/slab.h>
  518. #include <linux/init.h>
  519. #include <linux/unistd.h>
  520. @@ -84,6 +85,10 @@
  521. #define CREATE_TRACE_POINTS
  522. #include <trace/events/task.h>
  523. +#ifdef CONFIG_GRAPHENE
  524. +# include <../graphene/graphene.h>
  525. +#endif
  526. +
  527. /*
  528. * Protected counters by write_lock_irq(&tasklist_lock)
  529. */
  530. @@ -242,6 +247,10 @@ void __put_task_struct(struct task_struct *tsk)
  531. delayacct_tsk_free(tsk);
  532. put_signal_struct(tsk->signal);
  533. +#ifdef CONFIG_GRAPHENE
  534. + put_graphene_struct(tsk);
  535. +#endif
  536. +
  537. if (!profile_handoff_task(tsk))
  538. free_task(tsk);
  539. }
  540. @@ -322,6 +331,16 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
  541. tsk->stack_canary = get_random_int();
  542. #endif
  543. +#ifdef CONFIG_GRAPHENE
  544. + err = dup_graphene_struct(tsk);
  545. + if (err)
  546. +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)
  547. + goto free_ti;
  548. +#else
  549. + goto out;
  550. +#endif
  551. +#endif
  552. +
  553. /*
  554. * One for us, one for whoever does the "release_task()" (usually
  555. * parent)
  556. diff --git a/kernel/seccomp.c b/kernel/seccomp.c
  557. index b7a1004..5def696 100644
  558. --- a/kernel/seccomp.c
  559. +++ b/kernel/seccomp.c
  560. @@ -55,9 +55,34 @@ struct seccomp_filter {
  561. atomic_t usage;
  562. struct seccomp_filter *prev;
  563. unsigned short len; /* Instruction count */
  564. +#ifdef CONFIG_SECCOMP_FILTER_JIT
  565. + void *bpf_func;
  566. +#endif
  567. struct sock_filter insns[];
  568. };
  569. +#ifdef CONFIG_SECCOMP_FILTER_JIT
  570. +struct sock_filter *seccomp_filter_get_insns(struct seccomp_filter *fp)
  571. +{
  572. + return fp->insns;
  573. +}
  574. +
  575. +unsigned int seccomp_filter_get_len(struct seccomp_filter *fp)
  576. +{
  577. + return fp->len;
  578. +}
  579. +
  580. +void *seccomp_filter_get_bpf_func(struct seccomp_filter *fp)
  581. +{
  582. + return fp->bpf_func;
  583. +}
  584. +
  585. +void seccomp_filter_set_bpf_func(struct seccomp_filter *fp, void *bpf_func)
  586. +{
  587. + fp->bpf_func = bpf_func;
  588. +}
  589. +#endif
  590. +
  591. /* Limit any path through the tree to 256KB worth of instructions. */
  592. #define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter))
  593. @@ -213,7 +238,16 @@ static u32 seccomp_run_filters(int syscall)
  594. * value always takes priority (ignoring the DATA).
  595. */
  596. for (f = current->seccomp.filter; f; f = f->prev) {
  597. - u32 cur_ret = sk_run_filter(NULL, f->insns);
  598. + u32 cur_ret;
  599. +#ifdef CONFIG_SECCOMP_FILTER_JIT
  600. + void * bpf_func = seccomp_filter_get_bpf_func(f);
  601. + if (bpf_func)
  602. + cur_ret = (*(unsigned int (*)(const struct sk_buff *,
  603. + const struct sock_filter *))
  604. + bpf_func) (NULL, f->insns);
  605. + else
  606. +#endif
  607. + cur_ret = sk_run_filter(NULL, f->insns);
  608. if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
  609. ret = cur_ret;
  610. }
  611. @@ -275,6 +309,10 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
  612. if (ret)
  613. goto fail;
  614. +#ifdef CONFIG_SECCOMP_FILTER_JIT
  615. + seccomp_jit_compile(filter);
  616. +#endif
  617. +
  618. /*
  619. * If there is an existing filter, make it the prev and don't drop its
  620. * task reference.
  621. @@ -332,6 +370,9 @@ void put_seccomp_filter(struct task_struct *tsk)
  622. while (orig && atomic_dec_and_test(&orig->usage)) {
  623. struct seccomp_filter *freeme = orig;
  624. orig = orig->prev;
  625. +#ifdef CONFIG_SECCOMP_FILTER_JIT
  626. + seccomp_jit_free(freeme);
  627. +#endif
  628. kfree(freeme);
  629. }
  630. }
  631. diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
  632. index 4257b7e..b21c19d 100644
  633. --- a/security/apparmor/lsm.c
  634. +++ b/security/apparmor/lsm.c
  635. @@ -36,6 +36,10 @@
  636. #include "include/policy.h"
  637. #include "include/procattr.h"
  638. +#ifdef CONFIG_GRAPHENE
  639. +# include <../graphene/graphene.h>
  640. +#endif
  641. +
  642. /* Flag indicating whether initialization completed */
  643. int apparmor_initialized __initdata;
  644. @@ -165,6 +169,12 @@ static int common_perm(int op, struct path *path, u32 mask,
  645. struct aa_profile *profile;
  646. int error = 0;
  647. +#ifdef CONFIG_GRAPHENE
  648. + if (GRAPHENE_ENABLED() &&
  649. + (error = graphene_common_perm(op, path, mask)))
  650. + return error;
  651. +#endif
  652. +
  653. profile = __aa_current_profile();
  654. if (!unconfined(profile))
  655. error = aa_path_perm(op, profile, path, 0, mask, cond);
  656. @@ -377,6 +387,7 @@ static int apparmor_file_open(struct file *file, const struct cred *cred)
  657. {
  658. struct aa_file_cxt *fcxt = file->f_security;
  659. struct aa_profile *profile;
  660. + u32 mask;
  661. int error = 0;
  662. if (!mediated_filesystem(file_inode(file)))
  663. @@ -388,10 +399,21 @@ static int apparmor_file_open(struct file *file, const struct cred *cred)
  664. * actually execute the image.
  665. */
  666. if (current->in_execve) {
  667. +#ifdef CONFIG_GRAPHENE
  668. + if (GRAPHENE_ENABLED() && (error = graphene_execve_open(file)))
  669. + return error;
  670. +#endif
  671. fcxt->allow = MAY_EXEC | MAY_READ | AA_EXEC_MMAP;
  672. return 0;
  673. }
  674. +#ifdef CONFIG_GRAPHENE
  675. + mask = aa_map_file_to_perms(file);
  676. + if (GRAPHENE_ENABLED() &&
  677. + (error = graphene_common_perm(OP_OPEN, &file->f_path, mask)))
  678. + return error;
  679. +#endif
  680. +
  681. profile = aa_cred_profile(cred);
  682. if (!unconfined(profile)) {
  683. struct inode *inode = file_inode(file);
  684. @@ -647,6 +669,14 @@ static struct security_operations apparmor_ops = {
  685. .getprocattr = apparmor_getprocattr,
  686. .setprocattr = apparmor_setprocattr,
  687. +#ifdef CONFIG_GRAPHENE
  688. + .socket_bind = graphene_socket_bind,
  689. + .socket_listen = graphene_socket_listen,
  690. + .socket_connect = graphene_socket_connect,
  691. + .socket_sendmsg = graphene_socket_sendmsg,
  692. + .socket_recvmsg = graphene_socket_recvmsg,
  693. +#endif
  694. +
  695. .cred_alloc_blank = apparmor_cred_alloc_blank,
  696. .cred_free = apparmor_cred_free,
  697. .cred_prepare = apparmor_cred_prepare,
  698. @@ -658,6 +688,10 @@ static struct security_operations apparmor_ops = {
  699. .bprm_secureexec = apparmor_bprm_secureexec,
  700. .task_setrlimit = apparmor_task_setrlimit,
  701. +
  702. +#ifdef CONFIG_GRAPHENE
  703. + .task_kill = graphene_task_kill,
  704. +#endif
  705. };
  706. /*