bpf-helper.h 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284
  1. /*
  2. * Example wrapper around BPF macros.
  3. *
  4. * Copyright (c) 2012 The Chromium OS Authors <chromium-os-dev@chromium.org>
  5. * Author: Will Drewry <wad@chromium.org>
  6. *
  7. * The code may be used by anyone for any purpose,
  8. * and can serve as a starting point for developing
  9. * applications using prctl(PR_SET_SECCOMP, 2, ...).
  10. *
  11. * No guarantees are provided with respect to the correctness
  12. * or functionality of this code.
  13. */
  14. #ifndef __BPF_HELPER_H__
  15. #define __BPF_HELPER_H__
  16. #include <asm/bitsperlong.h> /* for __BITS_PER_LONG */
  17. #include <endian.h>
  18. #include <linux/filter.h>
  19. #include <linux/seccomp.h> /* for seccomp_data */
  20. #include <linux/types.h>
  21. #include <linux/unistd.h>
  22. #include <stddef.h>
  23. #define SECCOMP_RET_ISOLATE 0x00010000U /* redirect to appropriate handler.*/
  24. #define BPF_LABELS_MAX 256
  25. struct bpf_labels {
  26. int count;
  27. struct __bpf_label {
  28. const char *label;
  29. __u32 location;
  30. } labels[BPF_LABELS_MAX];
  31. };
  32. int bpf_resolve_jumps(struct bpf_labels *labels,
  33. struct sock_filter *filter, int count);
  34. __u32 seccomp_bpf_label(struct bpf_labels *labels, const char *label);
  35. void seccomp_bpf_print(struct sock_filter *filter, int count);
  36. #define JUMP_JT 0xff
  37. #define JUMP_JF 0xff
  38. #define LABEL_JT 0xfe
  39. #define LABEL_JF 0xfe
  40. #define ALLOW \
  41. BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW)
  42. #define ISOLATE \
  43. BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ISOLATE)
  44. #define TRAP \
  45. BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_TRAP)
  46. #define RET_ERRNO \
  47. BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ERRNO)
  48. #define TRACE \
  49. BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_TRACE)
  50. #define DENY \
  51. BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_KILL)
  52. #define JUMP(labels, label) \
  53. BPF_JUMP(BPF_JMP+BPF_JA, FIND_LABEL((labels), (label)), \
  54. JUMP_JT, JUMP_JF)
  55. #define LABEL(labels, label) \
  56. BPF_JUMP(BPF_JMP+BPF_JA, FIND_LABEL((labels), (label)), \
  57. LABEL_JT, LABEL_JF)
  58. #define SYSCALL(nr, jt) \
  59. BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (nr), 0, 1), \
  60. jt
  61. /* Lame, but just an example */
  62. #define FIND_LABEL(labels, label) seccomp_bpf_label((labels), #label)
  63. #define EXPAND(...) __VA_ARGS__
  64. #define LO_FLAG(flag) (flag & ((1 << sizeof(__u32)) - 1))
  65. #define HI_FLAG(flag) ((flag >> sizeof(__u32)) & ((1 << sizeof(__u32)) - 1))
  66. /* Ensure that we load the logically correct offset. */
  67. #if __BYTE_ORDER == __LITTLE_ENDIAN
  68. #define LO_ARG(idx) offsetof(struct seccomp_data, args[(idx)])
  69. #define LO_IP offsetof(struct seccomp_data, instruction_pointer)
  70. #elif __BYTE_ORDER == __BIG_ENDIAN
  71. #define LO_ARG(idx) offsetof(struct seccomp_data, args[(idx)]) + sizeof(__u32)
  72. #define LO_IP offsetof(struct seccomp_data, instruction_pointer) + sizeof(__u32)
  73. #else
  74. #error "Unknown endianness"
  75. #endif
  76. /* Map all width-sensitive operations */
  77. #if __BITS_PER_LONG == 32
  78. #define JEQ(x, jt) JEQ32(x, EXPAND(jt))
  79. #define JNE(x, jt) JNE32(x, EXPAND(jt))
  80. #define JGT(x, jt) JGT32(x, EXPAND(jt))
  81. #define JLT(x, jt) JLT32(x, EXPAND(jt))
  82. #define JGE(x, jt) JGE32(x, EXPAND(jt))
  83. #define JLE(x, jt) JLE32(x, EXPAND(jt))
  84. #define JA(x, jt) JA32(x, EXPAND(jt))
  85. #define ARG(i) ARG_32(i)
  86. #define ARG_FLAG(i, flag) ARG_FLAG_32(i, flag)
  87. #elif __BITS_PER_LONG == 64
  88. /* Ensure that we load the logically correct offset. */
  89. #if __BYTE_ORDER == __LITTLE_ENDIAN
  90. #define ENDIAN(_lo, _hi) _lo, _hi
  91. #define HI_ARG(idx) offsetof(struct seccomp_data, args[(idx)]) + sizeof(__u32)
  92. #define HI_IP offsetof(struct seccomp_data, instruction_pointer) + sizeof(__u32)
  93. #elif __BYTE_ORDER == __BIG_ENDIAN
  94. #define ENDIAN(_lo, _hi) _hi, _lo
  95. #define HI_ARG(idx) offsetof(struct seccomp_data, args[(idx)])
  96. #define HI_IP offsetof(struct seccomp_data, instruction_pointer)
  97. #endif
  98. union arg64 {
  99. struct {
  100. __u32 ENDIAN(lo32, hi32);
  101. };
  102. __u64 u64;
  103. };
  104. #define JEQ(x, jt) \
  105. JEQ64(((union arg64){.u64 = (x)}).lo32, \
  106. ((union arg64){.u64 = (x)}).hi32, \
  107. EXPAND(jt))
  108. #define JGT(x, jt) \
  109. JGT64(((union arg64){.u64 = (x)}).lo32, \
  110. ((union arg64){.u64 = (x)}).hi32, \
  111. EXPAND(jt))
  112. #define JGE(x, jt) \
  113. JGE64(((union arg64){.u64 = (x)}).lo32, \
  114. ((union arg64){.u64 = (x)}).hi32, \
  115. EXPAND(jt))
  116. #define JNE(x, jt) \
  117. JNE64(((union arg64){.u64 = (x)}).lo32, \
  118. ((union arg64){.u64 = (x)}).hi32, \
  119. EXPAND(jt))
  120. #define JLT(x, jt) \
  121. JLT64(((union arg64){.u64 = (x)}).lo32, \
  122. ((union arg64){.u64 = (x)}).hi32, \
  123. EXPAND(jt))
  124. #define JLE(x, jt) \
  125. JLE64(((union arg64){.u64 = (x)}).lo32, \
  126. ((union arg64){.u64 = (x)}).hi32, \
  127. EXPAND(jt))
  128. #define JA(x, jt) \
  129. JA64(((union arg64){.u64 = (x)}).lo32, \
  130. ((union arg64){.u64 = (x)}).hi32, \
  131. EXPAND(jt))
  132. #define ARG(i) ARG_64(i)
  133. #define ARG_FLAG(i, flag) ARG_FLAG_64(i, flag)
  134. #define IP IP_64
  135. #else
  136. #error __BITS_PER_LONG value unusable.
  137. #endif
  138. /* Loads the arg into A */
  139. #define ARG_32(idx) \
  140. BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx))
  141. /* Loads the arg&flag into A */
  142. #define ARG_FLAG_32(idx, flag) \
  143. BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx)), \
  144. BPF_STMT(BPF_ALU+BPF_AND+BPF_K, LO_FLAG(flag))
  145. /* Loads hi into A and lo in X */
  146. #define IP_64 \
  147. BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_IP), \
  148. BPF_STMT(BPF_ST, 0), /* lo -> M[0] */ \
  149. BPF_STMT(BPF_LD+BPF_W+BPF_ABS, HI_IP), \
  150. BPF_STMT(BPF_ST, 1) /* hi -> M[1] */
  151. /* Loads hi into A and lo in X */
  152. #define ARG_64(idx) \
  153. BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx)), \
  154. BPF_STMT(BPF_ST, 0), /* lo -> M[0] */ \
  155. BPF_STMT(BPF_LD+BPF_W+BPF_ABS, HI_ARG(idx)), \
  156. BPF_STMT(BPF_ST, 1) /* hi -> M[1] */
  157. /* Loads hi into A and lo in X */
  158. #define ARG_FLAG_64(idx, flag) \
  159. BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx)), \
  160. BPF_STMT(BPF_ALU+BPF_AND+BPF_K, LO_FLAG(flag)), \
  161. BPF_STMT(BPF_ST, 0), /* lo -> M[0] */ \
  162. BPF_STMT(BPF_LD+BPF_W+BPF_ABS, HI_ARG(idx)), \
  163. BPF_STMT(BPF_ALU+BPF_AND+BPF_K, HI_FLAG(flag)), \
  164. BPF_STMT(BPF_ST, 1) /* hi -> M[1] */
  165. #define JEQ32(value, jt) \
  166. BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (value), 0, 1), \
  167. jt
  168. #define JNE32(value, jt) \
  169. BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (value), 1, 0), \
  170. jt
  171. /* Checks the lo, then swaps to check the hi. A=lo,X=hi */
  172. #define JEQ64(lo, hi, jt) \
  173. BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
  174. BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
  175. BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 0, 2), \
  176. BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
  177. jt, \
  178. BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
  179. #define JNE64(lo, hi, jt) \
  180. BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 5, 0), \
  181. BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
  182. BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 2, 0), \
  183. BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
  184. jt, \
  185. BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
  186. #define JA32(value, jt) \
  187. BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (value), 0, 1), \
  188. jt
  189. #define JA64(lo, hi, jt) \
  190. BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (hi), 3, 0), \
  191. BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
  192. BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (lo), 0, 2), \
  193. BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
  194. jt, \
  195. BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
  196. #define JGE32(value, jt) \
  197. BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 0, 1), \
  198. jt
  199. #define JLT32(value, jt) \
  200. BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 1, 0), \
  201. jt
  202. /* Shortcut checking if hi > arg.hi. */
  203. #define JGE64(lo, hi, jt) \
  204. BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \
  205. BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
  206. BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
  207. BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (lo), 0, 2), \
  208. BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
  209. jt, \
  210. BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
  211. #define JLT64(lo, hi, jt) \
  212. BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \
  213. BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
  214. BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
  215. BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 2, 0), \
  216. BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
  217. jt, \
  218. BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
  219. #define JGT32(value, jt) \
  220. BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 0, 1), \
  221. jt
  222. #define JLE32(value, jt) \
  223. BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 1, 0), \
  224. jt
  225. /* Check hi > args.hi first, then do the GE checking */
  226. #define JGT64(lo, hi, jt) \
  227. BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \
  228. BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
  229. BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
  230. BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 0, 2), \
  231. BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
  232. jt, \
  233. BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
  234. #define JLE64(lo, hi, jt) \
  235. BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 6, 0), \
  236. BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 3), \
  237. BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
  238. BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 2, 0), \
  239. BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
  240. jt, \
  241. BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
  242. #define LOAD_SYSCALL_NR \
  243. BPF_STMT(BPF_LD+BPF_W+BPF_ABS, \
  244. offsetof(struct seccomp_data, nr))
  245. #endif /* __BPF_HELPER_H__ */