prob_distr.c 51 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717
  1. /* Copyright (c) 2018, The Tor Project, Inc. */
  2. /* See LICENSE for licensing information */
  3. /**
  4. * \file prob_distr.c
  5. *
  6. * \brief
  7. * Implements various probability distributions.
  8. * Almost all code is courtesy of Riastradh.
  9. *
  10. * \details
  11. * Here are some details that might help you understand this file:
  12. *
  13. * - Throughout this file, `eps' means the largest relative error of a
  14. * correctly rounded floating-point operation, which in binary64
  15. * floating-point arithmetic is 2^-53. Here the relative error of a
  16. * true value x from a computed value y is |x - y|/|x|. This
  17. * definition of epsilon is conventional for numerical analysts when
  18. * writing error analyses. (If your libm doesn't provide correctly
  19. * rounded exp and log, their relative error is usually below 2*2^-53
  20. * and probably closer to 1.1*2^-53 instead.)
  21. *
  22. * The C constant DBL_EPSILON is actually twice this, and should
  23. * perhaps rather be named ulp(1) -- that is, it is the distance from
  24. * 1 to the next greater floating-point number, which is usually of
  25. * more interest to programmers and hardware engineers.
  26. *
  27. * Since this file is concerned mainly with error bounds rather than
  28. * with low-level bit-hacking of floating-point numbers, we adopt the
  29. * numerical analysts' definition in the comments, though we do use
  30. * DBL_EPSILON in a handful of places where it is convenient to use
  31. * some function of eps = DBL_EPSILON/2 in a case analysis.
  32. *
  33. * - In various functions (e.g. sample_log_logistic()) we jump through hoops so
  34. * that we can use reals closer to 0 than closer to 1, since we achieve much
  35. * greater accuracy for floating point numbers near 0. In particular, we can
  36. * represent differences as small as 10^-300 for numbers near 0, but of no
  37. * less than 10^-16 for numbers near 1.
  38. **/
  39. #define PROB_DISTR_PRIVATE
  40. #include "orconfig.h"
  41. #include "lib/math/prob_distr.h"
  42. #include "lib/crypt_ops/crypto_rand.h"
  43. #include "lib/cc/ctassert.h"
  44. #include <float.h>
  45. #include <math.h>
  46. #include <stddef.h>
  47. /** Validators for downcasting macros below */
  48. #define validate_container_of(PTR, TYPE, FIELD) \
  49. (0 * sizeof((PTR) - &((TYPE *)(((char *)(PTR)) - \
  50. offsetof(TYPE, FIELD)))->FIELD))
  51. #define validate_const_container_of(PTR, TYPE, FIELD) \
  52. (0 * sizeof((PTR) - &((const TYPE *)(((const char *)(PTR)) - \
  53. offsetof(TYPE, FIELD)))->FIELD))
  54. /** Downcasting macro */
  55. #define container_of(PTR, TYPE, FIELD) \
  56. ((TYPE *)(((char *)(PTR)) - offsetof(TYPE, FIELD)) \
  57. + validate_container_of(PTR, TYPE, FIELD))
  58. /** Constified downcasting macro */
  59. #define const_container_of(PTR, TYPE, FIELD) \
  60. ((const TYPE *)(((const char *)(PTR)) - offsetof(TYPE, FIELD)) \
  61. + validate_const_container_of(PTR, TYPE, FIELD))
  62. /**
  63. * Count number of one bits in 32-bit word.
  64. */
  65. static unsigned
  66. bitcount32(uint32_t x)
  67. {
  68. /* Count two-bit groups. */
  69. x -= (x >> 1) & UINT32_C(0x55555555);
  70. /* Count four-bit groups. */
  71. x = ((x >> 2) & UINT32_C(0x33333333)) + (x & UINT32_C(0x33333333));
  72. /* Count eight-bit groups. */
  73. x = (x + (x >> 4)) & UINT32_C(0x0f0f0f0f);
  74. /* Sum all eight-bit groups, and extract the sum. */
  75. return (x * UINT32_C(0x01010101)) >> 24;
  76. }
  77. /**
  78. * Count leading zeros in 32-bit word.
  79. */
  80. static unsigned
  81. clz32(uint32_t x)
  82. {
  83. /* Round up to a power of two. */
  84. x |= x >> 1;
  85. x |= x >> 2;
  86. x |= x >> 4;
  87. x |= x >> 8;
  88. x |= x >> 16;
  89. /* Subtract count of one bits from 32. */
  90. return (32 - bitcount32(x));
  91. }
  92. /*
  93. * Some lemmas that will be used throughout this file to prove various error
  94. * bounds:
  95. *
  96. * Lemma 1. If |d| <= 1/2, then 1/(1 + d) <= 2.
  97. *
  98. * Proof. If 0 <= d <= 1/2, then 1 + d >= 1, so that 1/(1 + d) <= 1.
  99. * If -1/2 <= d <= 0, then 1 + d >= 1/2, so that 1/(1 + d) <= 2. QED.
  100. *
  101. * Lemma 2. If b = a*(1 + d)/(1 + d') for |d'| < 1/2 and nonzero a, b,
  102. * then b = a*(1 + e) for |e| <= 2|d' - d|.
  103. *
  104. * Proof. |a - b|/|a|
  105. * = |a - a*(1 + d)/(1 + d')|/|a|
  106. * = |1 - (1 + d)/(1 + d')|
  107. * = |(1 + d' - 1 - d)/(1 + d')|
  108. * = |(d' - d)/(1 + d')|
  109. * <= 2|d' - d|, by Lemma 1,
  110. *
  111. * QED.
  112. *
  113. * Lemma 3. For |d|, |d'| < 1/4,
  114. *
  115. * |log((1 + d)/(1 + d'))| <= 4|d - d'|.
  116. *
  117. * Proof. Write
  118. *
  119. * log((1 + d)/(1 + d'))
  120. * = log(1 + (1 + d)/(1 + d') - 1)
  121. * = log(1 + (1 + d - 1 - d')/(1 + d')
  122. * = log(1 + (d - d')/(1 + d')).
  123. *
  124. * By Lemma 1, |(d - d')/(1 + d')| < 2|d' - d| < 1, so the Taylor
  125. * series of log(1 + x) converges absolutely for (d - d')/(1 + d'),
  126. * and thus we have
  127. *
  128. * |log(1 + (d - d')/(1 + d'))|
  129. * = |\sum_{n=1}^\infty ((d - d')/(1 + d'))^n/n|
  130. * <= \sum_{n=1}^\infty |(d - d')/(1 + d')|^n/n
  131. * <= \sum_{n=1}^\infty |2(d' - d)|^n/n
  132. * <= \sum_{n=1}^\infty |2(d' - d)|^n
  133. * = 1/(1 - |2(d' - d)|)
  134. * <= 4|d' - d|,
  135. *
  136. * QED.
  137. *
  138. * Lemma 4. If 1/e <= 1 + x <= e, then
  139. *
  140. * log(1 + (1 + d) x) = (1 + d') log(1 + x)
  141. *
  142. * for |d'| < 8|d|.
  143. *
  144. * Proof. Write
  145. *
  146. * log(1 + (1 + d) x)
  147. * = log(1 + x + x*d)
  148. * = log((1 + x) (1 + x + x*d)/(1 + x))
  149. * = log(1 + x) + log((1 + x + x*d)/(1 + x))
  150. * = log(1 + x) (1 + log((1 + x + x*d)/(1 + x))/log(1 + x)).
  151. *
  152. * The relative error is bounded by
  153. *
  154. * |log((1 + x + x*d)/(1 + x))/log(1 + x)|
  155. * <= 4|x + x*d - x|/|log(1 + x)|, by Lemma 3,
  156. * = 4|x*d|/|log(1 + x)|
  157. * < 8|d|,
  158. *
  159. * since in this range 0 < 1 - 1/e < x/log(1 + x) <= e - 1 < 2. QED.
  160. */
  161. /**
  162. * Compute the logistic function: f(x) = 1/(1 + e^{-x}) = e^x/(1 + e^x).
  163. * Maps a log-odds-space probability in [-\infty, +\infty] into a direct-space
  164. * probability in [0,1]. Inverse of logit.
  165. *
  166. * Ill-conditioned for large x; the identity logistic(-x) = 1 -
  167. * logistic(x) and the function logistichalf(x) = logistic(x) - 1/2 may
  168. * help to rearrange a computation.
  169. *
  170. * This implementation gives relative error bounded by 7 eps.
  171. */
  172. STATIC double
  173. logistic(double x)
  174. {
  175. if (x <= log(DBL_EPSILON/2)) {
  176. /*
  177. * If x <= log(DBL_EPSILON/2) = log(eps), then e^x <= eps. In this case
  178. * we will approximate the logistic() function with e^x because the
  179. * relative error is less than eps. Here is a calculation of the
  180. * relative error between the logistic() function and e^x and a proof
  181. * that it's less than eps:
  182. *
  183. * |e^x - e^x/(1 + e^x)|/|e^x/(1 + e^x)|
  184. * <= |1 - 1/(1 + e^x)|*|1 + e^x|
  185. * = |e^x/(1 + e^x)|*|1 + e^x|
  186. * = |e^x|
  187. * <= eps.
  188. */
  189. return exp(x); /* return e^x */
  190. } else if (x <= -log(DBL_EPSILON/2)) {
  191. /*
  192. * e^{-x} > 0, so 1 + e^{-x} > 1, and 0 < 1/(1 +
  193. * e^{-x}) < 1; further, since e^{-x} < 1 + e^{-x}, we
  194. * also have 0 < 1/(1 + e^{-x}) < 1. Thus, if exp has
  195. * relative error d0, + has relative error d1, and /
  196. * has relative error d2, then we get
  197. *
  198. * (1 + d2)/[(1 + (1 + d0) e^{-x})(1 + d1)]
  199. * = (1 + d0)/[1 + e^{-x} + d0 e^{-x}
  200. * + d1 + d1 e^{-x} + d0 d1 e^{-x}]
  201. * = (1 + d0)/[(1 + e^{-x})
  202. * * (1 + d0 e^{-x}/(1 + e^{-x})
  203. * + d1/(1 + e^{-x})
  204. * + d0 d1 e^{-x}/(1 + e^{-x}))].
  205. * = (1 + d0)/[(1 + e^{-x})(1 + d')]
  206. * = [1/(1 + e^{-x})] (1 + d0)/(1 + d')
  207. *
  208. * where
  209. *
  210. * d' = d0 e^{-x}/(1 + e^{-x})
  211. * + d1/(1 + e^{-x})
  212. * + d0 d1 e^{-x}/(1 + e^{-x}).
  213. *
  214. * By Lemma 2 this relative error is bounded by
  215. *
  216. * 2|d0 - d'|
  217. * = 2|d0 - d0 e^{-x}/(1 + e^{-x})
  218. * - d1/(1 + e^{-x})
  219. * - d0 d1 e^{-x}/(1 + e^{-x})|
  220. * <= 2|d0| + 2|d0 e^{-x}/(1 + e^{-x})|
  221. * + 2|d1/(1 + e^{-x})|
  222. * + 2|d0 d1 e^{-x}/(1 + e^{-x})|
  223. * <= 2|d0| + 2|d0| + 2|d1| + 2|d0 d1|
  224. * <= 4|d0| + 2|d1| + 2|d0 d1|
  225. * <= 6 eps + 2 eps^2.
  226. */
  227. return 1/(1 + exp(-x));
  228. } else {
  229. /*
  230. * e^{-x} <= eps, so the relative error of 1 from 1/(1
  231. * + e^{-x}) is
  232. *
  233. * |1/(1 + e^{-x}) - 1|/|1/(1 + e^{-x})|
  234. * = |e^{-x}/(1 + e^{-x})|/|1/(1 + e^{-x})|
  235. * = |e^{-x}|
  236. * <= eps.
  237. *
  238. * This computation avoids an intermediate overflow
  239. * exception, although the effect on the result is
  240. * harmless.
  241. *
  242. * XXX Should maybe raise inexact here.
  243. */
  244. return 1;
  245. }
  246. }
  247. /**
  248. * Compute the logit function: log p/(1 - p). Defined on [0,1]. Maps
  249. * a direct-space probability in [0,1] to a log-odds-space probability
  250. * in [-\infty, +\infty]. Inverse of logistic.
  251. *
  252. * Ill-conditioned near 1/2 and 1; the identity logit(1 - p) =
  253. * -logit(p) and the function logithalf(p0) = logit(1/2 + p0) may help
  254. * to rearrange a computation for p in [1/(1 + e), 1 - 1/(1 + e)].
  255. *
  256. * This implementation gives relative error bounded by 10 eps.
  257. */
  258. STATIC double
  259. logit(double p)
  260. {
  261. /* logistic(-1) <= p <= logistic(+1) */
  262. if (1/(1 + exp(1)) <= p && p <= 1/(1 + exp(-1))) {
  263. /*
  264. * For inputs near 1/2, we want to compute log1p(near
  265. * 0) rather than log(near 1), so write this as:
  266. *
  267. * log(p/(1 - p)) = -log((1 - p)/p)
  268. * = -log(1 + (1 - p)/p - 1)
  269. * = -log(1 + (1 - p - p)/p)
  270. * = -log(1 + (1 - 2p)/p).
  271. *
  272. * Since p = 2p/2 <= 1 <= 2*2p = 4p, the floating-point
  273. * evaluation of 1 - 2p is exact; the only error arises
  274. * from division and log1p. First, note that if
  275. * logistic(-1) <= p <= logistic(+1), (1 - 2p)/p lies
  276. * in the bounds of Lemma 4.
  277. *
  278. * If division has relative error d0 and log1p has
  279. * relative error d1, the outcome is
  280. *
  281. * -(1 + d1) log(1 + (1 - 2p) (1 + d0)/p)
  282. * = -(1 + d1) (1 + d') log(1 + (1 - 2p)/p)
  283. * = -(1 + d1 + d' + d1 d') log(1 + (1 - 2p)/p).
  284. *
  285. * where |d'| < 8|d0| by Lemma 4. The relative error
  286. * is then bounded by
  287. *
  288. * |d1 + d' + d1 d'|
  289. * <= |d1| + 8|d0| + 8|d1 d0|
  290. * <= 9 eps + 8 eps^2.
  291. */
  292. return -log1p((1 - 2*p)/p);
  293. } else {
  294. /*
  295. * For inputs near 0, although 1 - p may be rounded to
  296. * 1, it doesn't matter much because the magnitude of
  297. * the result is so much larger. For inputs near 1, we
  298. * can compute 1 - p exactly, although the precision on
  299. * the input is limited so we won't ever get more than
  300. * about 700 for the output.
  301. *
  302. * If - has relative error d0, / has relative error d1,
  303. * and log has relative error d2, then
  304. *
  305. * (1 + d2) log((1 + d0) p/[(1 - p)(1 + d1)])
  306. * = (1 + d2) [log(p/(1 - p)) + log((1 + d0)/(1 + d1))]
  307. * = log(p/(1 - p)) + d2 log(p/(1 - p))
  308. * + (1 + d2) log((1 + d0)/(1 + d1))
  309. * = log(p/(1 - p))*[1 + d2 +
  310. * + (1 + d2) log((1 + d0)/(1 + d1))/log(p/(1 - p))]
  311. *
  312. * Since 0 <= p < logistic(-1) or logistic(+1) < p <=
  313. * 1, we have |log(p/(1 - p))| > 1. Hence this error
  314. * is bounded by
  315. *
  316. * |d2 + (1 + d2) log((1 + d0)/(1 + d1))/log(p/(1 - p))|
  317. * <= |d2| + |(1 + d2) log((1 + d0)/(1 + d1))
  318. * / log(p/(1 - p))|
  319. * <= |d2| + |(1 + d2) log((1 + d0)/(1 + d1))|
  320. * <= |d2| + 4|(1 + d2) (d0 - d1)|, by Lemma 3,
  321. * <= |d2| + 4|d0 - d1 + d2 d0 - d1 d0|
  322. * <= |d2| + 4|d0| + 4|d1| + 4|d2 d0| + 4|d1 d0|
  323. * <= 9 eps + 8 eps^2.
  324. */
  325. return log(p/(1 - p));
  326. }
  327. }
  328. /**
  329. * Compute the logit function, translated in input by 1/2: logithalf(p)
  330. * = logit(1/2 + p). Defined on [-1/2, 1/2]. Inverse of logistichalf.
  331. *
  332. * Ill-conditioned near +/-1/2. If |p0| > 1/2 - 1/(1 + e), it may be
  333. * better to compute 1/2 + p0 or -1/2 - p0 and to use logit instead.
  334. * This implementation gives relative error bounded by 34 eps.
  335. */
  336. STATIC double
  337. logithalf(double p0)
  338. {
  339. if (fabs(p0) <= 0.5 - 1/(1 + exp(1))) {
  340. /*
  341. * logit(1/2 + p0)
  342. * = log((1/2 + p0)/(1 - (1/2 + p0)))
  343. * = log((1/2 + p0)/(1/2 - p0))
  344. * = log(1 + (1/2 + p0)/(1/2 - p0) - 1)
  345. * = log(1 + (1/2 + p0 - (1/2 - p0))/(1/2 - p0))
  346. * = log(1 + (1/2 + p0 - 1/2 + p0)/(1/2 - p0))
  347. * = log(1 + 2 p0/(1/2 - p0))
  348. *
  349. * If the error of subtraction is d0, the error of
  350. * division is d1, and the error of log1p is d2, then
  351. * what we compute is
  352. *
  353. * (1 + d2) log(1 + (1 + d1) 2 p0/[(1 + d0) (1/2 - p0)])
  354. * = (1 + d2) log(1 + (1 + d') 2 p0/(1/2 - p0))
  355. * = (1 + d2) (1 + d'') log(1 + 2 p0/(1/2 - p0))
  356. * = (1 + d2 + d'' + d2 d'') log(1 + 2 p0/(1/2 - p0)),
  357. *
  358. * where |d'| < 2|d0 - d1| <= 4 eps by Lemma 2, and
  359. * |d''| < 8|d'| < 32 eps by Lemma 4 since
  360. *
  361. * 1/e <= 1 + 2*p0/(1/2 - p0) <= e
  362. *
  363. * when |p0| <= 1/2 - 1/(1 + e). Hence the relative
  364. * error is bounded by
  365. *
  366. * |d2 + d'' + d2 d''|
  367. * <= |d2| + |d''| + |d2 d''|
  368. * <= |d1| + 32 |d0| + 32 |d1 d0|
  369. * <= 33 eps + 32 eps^2.
  370. */
  371. return log1p(2*p0/(0.5 - p0));
  372. } else {
  373. /*
  374. * We have a choice of computing logit(1/2 + p0) or
  375. * -logit(1 - (1/2 + p0)) = -logit(1/2 - p0). It
  376. * doesn't matter which way we do this: either way,
  377. * since 1/2 p0 <= 1/2 <= 2 p0, the sum and difference
  378. * are computed exactly. So let's do the one that
  379. * skips the final negation.
  380. *
  381. * The result is
  382. *
  383. * (1 + d1) log((1 + d0) (1/2 + p0)/[(1 + d2) (1/2 - p0)])
  384. * = (1 + d1) (1 + log((1 + d0)/(1 + d2))
  385. * / log((1/2 + p0)/(1/2 - p0)))
  386. * * log((1/2 + p0)/(1/2 - p0))
  387. * = (1 + d') log((1/2 + p0)/(1/2 - p0))
  388. * = (1 + d') logit(1/2 + p0)
  389. *
  390. * where
  391. *
  392. * d' = d1 + log((1 + d0)/(1 + d2))/logit(1/2 + p0)
  393. * + d1 log((1 + d0)/(1 + d2))/logit(1/2 + p0).
  394. *
  395. * For |p| > 1/2 - 1/(1 + e), logit(1/2 + p0) > 1.
  396. * Provided |d0|, |d2| < 1/4, by Lemma 3 we have
  397. *
  398. * |log((1 + d0)/(1 + d2))| <= 4|d0 - d2|.
  399. *
  400. * Hence the relative error is bounded by
  401. *
  402. * |d'| <= |d1| + 4|d0 - d2| + 4|d1| |d0 - d2|
  403. * <= |d1| + 4|d0| + 4|d2| + 4|d1 d0| + 4|d1 d2|
  404. * <= 9 eps + 8 eps^2.
  405. */
  406. return log((0.5 + p0)/(0.5 - p0));
  407. }
  408. }
  409. /*
  410. * The following random_uniform_01 is tailored for IEEE 754 binary64
  411. * floating-point or smaller. It can be adapted to larger
  412. * floating-point formats like i387 80-bit or IEEE 754 binary128, but
  413. * it may require sampling more bits.
  414. */
  415. CTASSERT(FLT_RADIX == 2);
  416. CTASSERT(-DBL_MIN_EXP <= 1021);
  417. CTASSERT(DBL_MANT_DIG <= 53);
  418. /**
  419. * Draw a floating-point number in [0, 1] with uniform distribution.
  420. *
  421. * Note that the probability of returning 0 is less than 2^-1074, so
  422. * callers need not check for it. However, callers that cannot handle
  423. * rounding to 1 must deal with that, because it occurs with
  424. * probability 2^-54, which is small but nonnegligible.
  425. */
  426. STATIC double
  427. random_uniform_01(void)
  428. {
  429. uint32_t z, x, hi, lo;
  430. double s;
  431. /*
  432. * Draw an exponent, geometrically distributed, but give up if
  433. * we get a run of more than 1088 zeros, which really means the
  434. * system is broken.
  435. */
  436. z = 0;
  437. while ((x = crypto_rand_u32()) == 0) {
  438. if (z >= 1088)
  439. /* Your bit sampler is broken. Go home. */
  440. return 0;
  441. z += 32;
  442. }
  443. z += clz32(x);
  444. /*
  445. * Pick 32-bit halves of an odd normalized significand.
  446. * Picking it odd breaks ties in the subsequent rounding, which
  447. * occur only with measure zero in the uniform distribution on
  448. * [0, 1].
  449. */
  450. hi = crypto_rand_u32() | UINT32_C(0x80000000);
  451. lo = crypto_rand_u32() | UINT32_C(0x00000001);
  452. /* Round to nearest scaled significand in [2^63, 2^64]. */
  453. s = hi*(double)4294967296 + lo;
  454. /* Rescale into [1/2, 1] and apply exponent in one swell foop. */
  455. return s * ldexp(1, -(64 + z));
  456. }
  457. /*******************************************************************/
  458. /* Functions for specific probability distributions start here: */
  459. /*
  460. * Logistic(mu, sigma) distribution, supported on (-\infty,+\infty)
  461. *
  462. * This is the uniform distribution on [0,1] mapped into log-odds
  463. * space, scaled by sigma and translated by mu.
  464. *
  465. * pdf(x) = e^{-(x - mu)/sigma} sigma (1 + e^{-(x - mu)/sigma})^2
  466. * cdf(x) = 1/(1 + e^{-(x - mu)/sigma}) = logistic((x - mu)/sigma)
  467. * sf(x) = 1 - cdf(x) = 1 - logistic((x - mu)/sigma = logistic(-(x - mu)/sigma)
  468. * icdf(p) = mu + sigma log p/(1 - p) = mu + sigma logit(p)
  469. * isf(p) = mu + sigma log (1 - p)/p = mu - sigma logit(p)
  470. */
  471. /**
  472. * Compute the CDF of the Logistic(mu, sigma) distribution: the
  473. * logistic function. Well-conditioned for negative inputs and small
  474. * positive inputs; ill-conditioned for large positive inputs.
  475. */
  476. STATIC double
  477. cdf_logistic(double x, double mu, double sigma)
  478. {
  479. return logistic((x - mu)/sigma);
  480. }
  481. /**
  482. * Compute the SF of the Logistic(mu, sigma) distribution: the logistic
  483. * function reflected over the y axis. Well-conditioned for positive
  484. * inputs and small negative inputs; ill-conditioned for large negative
  485. * inputs.
  486. */
  487. STATIC double
  488. sf_logistic(double x, double mu, double sigma)
  489. {
  490. return logistic(-(x - mu)/sigma);
  491. }
  492. /**
  493. * Compute the inverse of the CDF of the Logistic(mu, sigma)
  494. * distribution: the logit function. Well-conditioned near 0;
  495. * ill-conditioned near 1/2 and 1.
  496. */
  497. STATIC double
  498. icdf_logistic(double p, double mu, double sigma)
  499. {
  500. return mu + sigma*logit(p);
  501. }
  502. /**
  503. * Compute the inverse of the SF of the Logistic(mu, sigma)
  504. * distribution: the -logit function. Well-conditioned near 0;
  505. * ill-conditioned near 1/2 and 1.
  506. */
  507. STATIC double
  508. isf_logistic(double p, double mu, double sigma)
  509. {
  510. return mu - sigma*logit(p);
  511. }
  512. /*
  513. * LogLogistic(alpha, beta) distribution, supported on (0, +\infty).
  514. *
  515. * This is the uniform distribution on [0,1] mapped into odds space,
  516. * scaled by positive alpha and shaped by positive beta.
  517. *
  518. * Equivalent to computing exp of a Logistic(log alpha, 1/beta) sample.
  519. * (Name arises because the pdf has LogLogistic(x; alpha, beta) =
  520. * Logistic(log x; log alpha, 1/beta) and mathematicians got their
  521. * covariance contravariant.)
  522. *
  523. * pdf(x) = (beta/alpha) (x/alpha)^{beta - 1}/(1 + (x/alpha)^beta)^2
  524. * = (1/e^mu sigma) (x/e^mu)^{1/sigma - 1} /
  525. * (1 + (x/e^mu)^{1/sigma})^2
  526. * cdf(x) = 1/(1 + (x/alpha)^-beta) = 1/(1 + (x/e^mu)^{-1/sigma})
  527. * = 1/(1 + (e^{log x}/e^mu)^{-1/sigma})
  528. * = 1/(1 + (e^{log x - mu})^{-1/sigma})
  529. * = 1/(1 + e^{-(log x - mu)/sigma})
  530. * = logistic((log x - mu)/sigma)
  531. * = logistic((log x - log alpha)/(1/beta))
  532. * sf(x) = 1 - 1/(1 + (x/alpha)^-beta)
  533. * = (x/alpha)^-beta/(1 + (x/alpha)^-beta)
  534. * = 1/((x/alpha)^beta + 1)
  535. * = 1/(1 + (x/alpha)^beta)
  536. * icdf(p) = alpha (p/(1 - p))^{1/beta}
  537. * = alpha e^{logit(p)/beta}
  538. * = e^{mu + sigma logit(p)}
  539. * isf(p) = alpha ((1 - p)/p)^{1/beta}
  540. * = alpha e^{-logit(p)/beta}
  541. * = e^{mu - sigma logit(p)}
  542. */
  543. /**
  544. * Compute the CDF of the LogLogistic(alpha, beta) distribution.
  545. * Well-conditioned for all x and alpha, and the condition number
  546. *
  547. * -beta/[1 + (x/alpha)^{-beta}]
  548. *
  549. * grows linearly with beta.
  550. *
  551. * Loosely, the relative error of this implementation is bounded by
  552. *
  553. * 4 eps + 2 eps^2 + O(beta eps),
  554. *
  555. * so don't bother trying this for beta anywhere near as large as
  556. * 1/eps, around which point it levels off at 1.
  557. */
  558. STATIC double
  559. cdf_log_logistic(double x, double alpha, double beta)
  560. {
  561. /*
  562. * Let d0 be the error of x/alpha; d1, of pow; d2, of +; and
  563. * d3, of the final quotient. The exponentiation gives
  564. *
  565. * ((1 + d0) x/alpha)^{-beta}
  566. * = (x/alpha)^{-beta} (1 + d0)^{-beta}
  567. * = (x/alpha)^{-beta} (1 + (1 + d0)^{-beta} - 1)
  568. * = (x/alpha)^{-beta} (1 + d')
  569. *
  570. * where d' = (1 + d0)^{-beta} - 1. If y = (x/alpha)^{-beta},
  571. * the denominator is
  572. *
  573. * (1 + d2) (1 + (1 + d1) (1 + d') y)
  574. * = (1 + d2) (1 + y + (d1 + d' + d1 d') y)
  575. * = 1 + y + (1 + d2) (d1 + d' + d1 d') y
  576. * = (1 + y) (1 + (1 + d2) (d1 + d' + d1 d') y/(1 + y))
  577. * = (1 + y) (1 + d''),
  578. *
  579. * where d'' = (1 + d2) (d1 + d' + d1 d') y/(1 + y). The
  580. * final result is
  581. *
  582. * (1 + d3) / [(1 + d2) (1 + d'') (1 + y)]
  583. * = (1 + d''') / (1 + y)
  584. *
  585. * for |d'''| <= 2|d3 - d''| by Lemma 2 as long as |d''| < 1/2
  586. * (which may not be the case for very large beta). This
  587. * relative error is therefore bounded by
  588. *
  589. * |d'''|
  590. * <= 2|d3 - d''|
  591. * <= 2|d3| + 2|(1 + d2) (d1 + d' + d1 d') y/(1 + y)|
  592. * <= 2|d3| + 2|(1 + d2) (d1 + d' + d1 d')|
  593. * = 2|d3| + 2|d1 + d' + d1 d' + d2 d1 + d2 d' + d2 d1 d'|
  594. * <= 2|d3| + 2|d1| + 2|d'| + 2|d1 d'| + 2|d2 d1| + 2|d2 d'|
  595. * + 2|d2 d1 d'|
  596. * <= 4 eps + 2 eps^2 + (2 + 2 eps + 2 eps^2) |d'|.
  597. *
  598. * Roughly, |d'| = |(1 + d0)^{-beta} - 1| grows like beta eps,
  599. * until it levels off at 1.
  600. */
  601. return 1/(1 + pow(x/alpha, -beta));
  602. }
  603. /**
  604. * Compute the SF of the LogLogistic(alpha, beta) distribution.
  605. * Well-conditioned for all x and alpha, and the condition number
  606. *
  607. * beta/[1 + (x/alpha)^beta]
  608. *
  609. * grows linearly with beta.
  610. *
  611. * Loosely, the relative error of this implementation is bounded by
  612. *
  613. * 4 eps + 2 eps^2 + O(beta eps)
  614. *
  615. * so don't bother trying this for beta anywhere near as large as
  616. * 1/eps, beyond which point it grows unbounded.
  617. */
  618. STATIC double
  619. sf_log_logistic(double x, double alpha, double beta)
  620. {
  621. /*
  622. * The error analysis here is essentially the same as in
  623. * cdf_log_logistic, except that rather than levelling off at
  624. * 1, |(1 + d0)^beta - 1| grows unbounded.
  625. */
  626. return 1/(1 + pow(x/alpha, beta));
  627. }
  628. /**
  629. * Compute the inverse of the CDF of the LogLogistic(alpha, beta)
  630. * distribution. Ill-conditioned for p near 1 and beta near 0 with
  631. * condition number 1/[beta (1 - p)].
  632. */
  633. STATIC double
  634. icdf_log_logistic(double p, double alpha, double beta)
  635. {
  636. return alpha*pow(p/(1 - p), 1/beta);
  637. }
  638. /**
  639. * Compute the inverse of the SF of the LogLogistic(alpha, beta)
  640. * distribution. Ill-conditioned for p near 1 and for large beta, with
  641. * condition number -1/[beta (1 - p)].
  642. */
  643. STATIC double
  644. isf_log_logistic(double p, double alpha, double beta)
  645. {
  646. return alpha*pow((1 - p)/p, 1/beta);
  647. }
  648. /*
  649. * Weibull(lambda, k) distribution, supported on (0, +\infty).
  650. *
  651. * pdf(x) = (k/lambda) (x/lambda)^{k - 1} e^{-(x/lambda)^k}
  652. * cdf(x) = 1 - e^{-(x/lambda)^k}
  653. * icdf(p) = lambda * (-log (1 - p))^{1/k}
  654. * sf(x) = e^{-(x/lambda)^k}
  655. * isf(p) = lambda * (-log p)^{1/k}
  656. */
  657. /**
  658. * Compute the CDF of the Weibull(lambda, k) distribution.
  659. * Well-conditioned for small x and k, and for large lambda --
  660. * condition number
  661. *
  662. * -k (x/lambda)^k exp(-(x/lambda)^k)/[exp(-(x/lambda)^k) - 1]
  663. *
  664. * grows linearly with k, x^k, and lambda^{-k}.
  665. */
  666. STATIC double
  667. cdf_weibull(double x, double lambda, double k)
  668. {
  669. return -expm1(-pow(x/lambda, k));
  670. }
  671. /**
  672. * Compute the SF of the Weibull(lambda, k) distribution.
  673. * Well-conditioned for small x and k, and for large lambda --
  674. * condition number
  675. *
  676. * -k (x/lambda)^k
  677. *
  678. * grows linearly with k, x^k, and lambda^{-k}.
  679. */
  680. STATIC double
  681. sf_weibull(double x, double lambda, double k)
  682. {
  683. return exp(-pow(x/lambda, k));
  684. }
  685. /**
  686. * Compute the inverse of the CDF of the Weibull(lambda, k)
  687. * distribution. Ill-conditioned for p near 1, and for k near 0;
  688. * condition number is
  689. *
  690. * (p/(1 - p))/(k log(1 - p)).
  691. */
  692. STATIC double
  693. icdf_weibull(double p, double lambda, double k)
  694. {
  695. return lambda*pow(-log1p(-p), 1/k);
  696. }
  697. /**
  698. * Compute the inverse of the SF of the Weibull(lambda, k)
  699. * distribution. Ill-conditioned for p near 0, and for k near 0;
  700. * condition number is
  701. *
  702. * 1/(k log(p)).
  703. */
  704. STATIC double
  705. isf_weibull(double p, double lambda, double k)
  706. {
  707. return lambda*pow(-log(p), 1/k);
  708. }
  709. /*
  710. * GeneralizedPareto(mu, sigma, xi), supported on (mu, +\infty) for
  711. * nonnegative xi, or (mu, mu - sigma/xi) for negative xi.
  712. *
  713. * Samples:
  714. * = mu - sigma log U, if xi = 0;
  715. * = mu + sigma (U^{-xi} - 1)/xi = mu + sigma*expm1(-xi log U)/xi, if xi =/= 0,
  716. * where U is uniform on (0,1].
  717. * = mu + sigma (e^{xi X} - 1)/xi,
  718. * where X has standard exponential distribution.
  719. *
  720. * pdf(x) = sigma^{-1} (1 + xi (x - mu)/sigma)^{-(1 + 1/xi)}
  721. * cdf(x) = 1 - (1 + xi (x - mu)/sigma)^{-1/xi}
  722. * = 1 - e^{-log(1 + xi (x - mu)/sigma)/xi}
  723. * --> 1 - e^{-(x - mu)/sigma} as xi --> 0
  724. * sf(x) = (1 + xi (x - mu)/sigma)^{-1/xi}
  725. * --> e^{-(x - mu)/sigma} as xi --> 0
  726. * icdf(p) = mu + sigma*(p^{-xi} - 1)/xi
  727. * = mu + sigma*expm1(-xi log p)/xi
  728. * --> mu + sigma*log p as xi --> 0
  729. * isf(p) = mu + sigma*((1 - p)^{xi} - 1)/xi
  730. * = mu + sigma*expm1(-xi log1p(-p))/xi
  731. * --> mu + sigma*log1p(-p) as xi --> 0
  732. */
  733. /**
  734. * Compute the CDF of the GeneralizedPareto(mu, sigma, xi)
  735. * distribution. Well-conditioned everywhere. For standard
  736. * distribution (mu=0, sigma=1), condition number
  737. *
  738. * (x/(1 + x xi)) / ((1 + x xi)^{1/xi} - 1)
  739. *
  740. * is bounded by 1, attained only at x = 0.
  741. */
  742. STATIC double
  743. cdf_genpareto(double x, double mu, double sigma, double xi)
  744. {
  745. double x_0 = (x - mu)/sigma;
  746. /*
  747. * log(1 + xi x_0)/xi
  748. * = (-1/xi) \sum_{n=1}^\infty (-xi x_0)^n/n
  749. * = (-1/xi) (-xi x_0 + \sum_{n=2}^\infty (-xi x_0)^n/n)
  750. * = x_0 - (1/xi) \sum_{n=2}^\infty (-xi x_0)^n/n
  751. * = x_0 - x_0 \sum_{n=2}^\infty (-xi x_0)^{n-1}/n
  752. * = x_0 (1 - d),
  753. *
  754. * where d = \sum_{n=2}^\infty (-xi x_0)^{n-1}/n. If |xi| <
  755. * eps/4|x_0|, then
  756. *
  757. * |d| <= \sum_{n=2}^\infty (eps/4)^{n-1}/n
  758. * <= \sum_{n=2}^\infty (eps/4)^{n-1}
  759. * = \sum_{n=1}^\infty (eps/4)^n
  760. * = (eps/4) \sum_{n=0}^\infty (eps/4)^n
  761. * = (eps/4)/(1 - eps/4)
  762. * < eps/2
  763. *
  764. * for any 0 < eps < 2. Thus, the relative error of x_0 from
  765. * log(1 + xi x_0)/xi is bounded by eps.
  766. */
  767. if (fabs(xi) < 1e-17/x_0)
  768. return -expm1(-x_0);
  769. else
  770. return -expm1(-log1p(xi*x_0)/xi);
  771. }
  772. /**
  773. * Compute the SF of the GeneralizedPareto(mu, sigma, xi) distribution.
  774. * For standard distribution (mu=0, sigma=1), ill-conditioned for xi
  775. * near 0; condition number
  776. *
  777. * -x (1 + x xi)^{(-1 - xi)/xi}/(1 + x xi)^{-1/xi}
  778. * = -x (1 + x xi)^{-1/xi - 1}/(1 + x xi)^{-1/xi}
  779. * = -(x/(1 + x xi)) (1 + x xi)^{-1/xi}/(1 + x xi)^{-1/xi}
  780. * = -x/(1 + x xi)
  781. *
  782. * is bounded by 1/xi.
  783. */
  784. STATIC double
  785. sf_genpareto(double x, double mu, double sigma, double xi)
  786. {
  787. double x_0 = (x - mu)/sigma;
  788. if (fabs(xi) < 1e-17/x_0)
  789. return exp(-x_0);
  790. else
  791. return exp(-log1p(xi*x_0)/xi);
  792. }
  793. /**
  794. * Compute the inverse of the CDF of the GeneralizedPareto(mu, sigma,
  795. * xi) distribution. Ill-conditioned for p near 1; condition number is
  796. *
  797. * xi (p/(1 - p))/(1 - (1 - p)^xi)
  798. */
  799. STATIC double
  800. icdf_genpareto(double p, double mu, double sigma, double xi)
  801. {
  802. /*
  803. * To compute f(xi) = (U^{-xi} - 1)/xi = (e^{-xi log U} - 1)/xi
  804. * for xi near zero (note f(xi) --> -log U as xi --> 0), write
  805. * the absolutely convergent Taylor expansion
  806. *
  807. * f(xi) = (1/xi)*(-xi log U + \sum_{n=2}^\infty (-xi log U)^n/n!
  808. * = -log U + (1/xi)*\sum_{n=2}^\infty (-xi log U)^n/n!
  809. * = -log U + \sum_{n=2}^\infty xi^{n-1} (-log U)^n/n!
  810. * = -log U - log U \sum_{n=2}^\infty (-xi log U)^{n-1}/n!
  811. * = -log U (1 + \sum_{n=2}^\infty (-xi log U)^{n-1}/n!).
  812. *
  813. * Let d = \sum_{n=2}^\infty (-xi log U)^{n-1}/n!. What do we
  814. * lose if we discard it and use -log U as an approximation to
  815. * f(xi)? If |xi| < eps/-4log U, then
  816. *
  817. * |d| <= \sum_{n=2}^\infty |xi log U|^{n-1}/n!
  818. * <= \sum_{n=2}^\infty (eps/4)^{n-1}/n!
  819. * <= \sum_{n=1}^\infty (eps/4)^n
  820. * = (eps/4) \sum_{n=0}^\infty (eps/4)^n
  821. * = (eps/4)/(1 - eps/4)
  822. * < eps/2,
  823. *
  824. * for any 0 < eps < 2. Hence, as long as |xi| < eps/-2log U,
  825. * f(xi) = -log U (1 + d) for |d| <= eps/2. |d| is the
  826. * relative error of f(xi) from -log U; from this bound, the
  827. * relative error of -log U from f(xi) is at most (eps/2)/(1 -
  828. * eps/2) = eps/2 + (eps/2)^2 + (eps/2)^3 + ... < eps for 0 <
  829. * eps < 1. Since -log U < 1000 for all U in (0, 1] in
  830. * binary64 floating-point, we can safely cut xi off at 1e-20 <
  831. * eps/4000 and attain <1ulp error from series truncation.
  832. */
  833. if (fabs(xi) <= 1e-20)
  834. return mu - sigma*log1p(-p);
  835. else
  836. return mu + sigma*expm1(-xi*log1p(-p))/xi;
  837. }
  838. /**
  839. * Compute the inverse of the SF of the GeneralizedPareto(mu, sigma,
  840. * xi) distribution. Ill-conditioned for p near 1; conditon number is
  841. *
  842. * -xi/(1 - p^{-xi})
  843. */
  844. STATIC double
  845. isf_genpareto(double p, double mu, double sigma, double xi)
  846. {
  847. if (fabs(xi) <= 1e-20)
  848. return mu - sigma*log(p);
  849. else
  850. return mu + sigma*expm1(-xi*log(p))/xi;
  851. }
  852. /*******************************************************************/
  853. /**
  854. * Deterministic samplers, parametrized by uniform integer and (0,1]
  855. * samples. No guarantees are made about _which_ mapping from the
  856. * integer and (0,1] samples these use; all that is guaranteed is the
  857. * distribution of the outputs conditioned on a uniform distribution on
  858. * the inputs. The automatic tests in test_prob_distr.c double-check
  859. * the particular mappings we use.
  860. *
  861. * Beware: Unlike random_uniform_01(), these are not guaranteed to be
  862. * supported on all possible outputs. See Ilya Mironov, `On the
  863. * Significance of the Least Significant Bits for Differential
  864. * Privacy', for an example of what can go wrong if you try to use
  865. * these to conceal information from an adversary but you expose the
  866. * specific full-precision floating-point values.
  867. *
  868. * Note: None of these samplers use rejection sampling; they are all
  869. * essentially inverse-CDF transforms with tweaks. If you were to add,
  870. * say, a Gamma sampler with the Marsaglia-Tsang method, you would have
  871. * to parametrize it by a potentially infinite stream of uniform (and
  872. * perhaps normal) samples rather than a fixed number, which doesn't
  873. * make for quite as nice automatic testing as for these.
  874. */
  875. /**
  876. * Deterministically sample from the interval [a, b], indexed by a
  877. * uniform random floating-point number p0 in (0, 1].
  878. *
  879. * Note that even if p0 is nonzero, the result may be equal to a, if
  880. * ulp(a)/2 is nonnegligible, e.g. if a = 1. For maximum resolution,
  881. * arrange |a| <= |b|.
  882. */
  883. STATIC double
  884. sample_uniform_interval(double p0, double a, double b)
  885. {
  886. /*
  887. * XXX Prove that the distribution is, in fact, uniform on
  888. * [a,b], particularly around p0 = 1, or at least has very
  889. * small deviation from uniform, quantified appropriately
  890. * (e.g., like in Monahan 1984, or by KL divergence). It
  891. * almost certainly does but it would be nice to quantify the
  892. * error.
  893. */
  894. if ((a <= 0 && 0 <= b) || (b <= 0 && 0 <= a)) {
  895. /*
  896. * When ab < 0, (1 - t) a + t b is monotonic, since for
  897. * a <= b it is a sum of nondecreasing functions of t,
  898. * and for b <= a, of nonincreasing functions of t.
  899. * Further, clearly at 0 and 1 it attains a and b,
  900. * respectively. Hence it is bounded within [a, b].
  901. */
  902. return (1 - p0)*a + p0*b;
  903. } else {
  904. /*
  905. * a + (b - a) t is monotonic -- it is obviously a
  906. * nondecreasing function of t for a <= b. Further, it
  907. * attains a at 0, and while it may overshoot b at 1,
  908. * we have a
  909. *
  910. * Theorem. If 0 <= t < 1, then the floating-point
  911. * evaluation of a + (b - a) t is bounded in [a, b].
  912. *
  913. * Lemma 1. If 0 <= t < 1 is a floating-point number,
  914. * then for any normal floating-point number x except
  915. * the smallest in magnitude, |round(x*t)| < |x|.
  916. *
  917. * Proof. WLOG, assume x >= 0. Since the rounding
  918. * function and t |---> x*t are nondecreasing, their
  919. * composition t |---> round(x*t) is also
  920. * nondecreasing, so it suffices to consider the
  921. * largest floating-point number below 1, in particular
  922. * t = 1 - ulp(1)/2.
  923. *
  924. * Case I: If x is a power of two, then the next
  925. * floating-point number below x is x - ulp(x)/2 = x -
  926. * x*ulp(1)/2 = x*(1 - ulp(1)/2) = x*t, so, since x*t
  927. * is a floating-point number, multiplication is exact,
  928. * and thus round(x*t) = x*t < x.
  929. *
  930. * Case II: If x is not a power of two, then the
  931. * greatest lower bound of real numbers rounded to x is
  932. * x - ulp(x)/2 = x - ulp(T(x))/2 = x - T(x)*ulp(1)/2,
  933. * where T(X) is the largest power of two below x.
  934. * Anything below this bound is rounded to a
  935. * floating-point number smaller than x, and x*t = x*(1
  936. * - ulp(1)/2) = x - x*ulp(1)/2 < x - T(x)*ulp(1)/2
  937. * since T(x) < x, so round(x*t) < x*t < x. QED.
  938. *
  939. * Lemma 2. If x and y are subnormal, then round(x +
  940. * y) = x + y.
  941. *
  942. * Proof. It is a matter of adding the significands,
  943. * since if we treat subnormals as having an implicit
  944. * zero bit before the `binary' point, their exponents
  945. * are all the same. There is at most one carry/borrow
  946. * bit, which can always be acommodated either in a
  947. * subnormal, or, at largest, in the implicit one bit
  948. * of a normal.
  949. *
  950. * Lemma 3. Let x and y be floating-point numbers. If
  951. * round(x - y) is subnormal or zero, then it is equal
  952. * to x - y.
  953. *
  954. * Proof. Case I (equal): round(x - y) = 0 iff x = y;
  955. * hence if round(x - y) = 0, then round(x - y) = 0 = x
  956. * - y.
  957. *
  958. * Case II (subnormal/subnormal): If x and y are both
  959. * subnormal, this follows directly from Lemma 2.
  960. *
  961. * Case IIIa (normal/subnormal): If x is normal and y
  962. * is subnormal, then x and y must share sign, or else
  963. * x - y would be larger than x and thus rounded to
  964. * normal. If s is the smallest normal positive
  965. * floating-point number, |x| < 2s since by
  966. * construction 2s - |y| is normal for all subnormal y.
  967. * This means that x and y must have the same exponent,
  968. * so the difference is the difference of significands,
  969. * which is exact.
  970. *
  971. * Case IIIb (subnormal/normal): Same as case IIIa for
  972. * -(y - x).
  973. *
  974. * Case IV (normal/normal): If x and y are both normal,
  975. * then they must share sign, or else x - y would be
  976. * larger than x and thus rounded to normal. Note that
  977. * |y| < 2|x|, for if |y| >= 2|x|, then |x| - |y| <=
  978. * -|x| but -|x| is normal like x. Also, |x|/2 < |y|:
  979. * if |x|/2 is subnormal, it must hold because y is
  980. * normal; if |x|/2 is normal, then |x|/2 >= s, so
  981. * since |x| - |y| < s,
  982. *
  983. * |x|/2 = |x| - |x|/2 <= |x| - s <= |y|;
  984. *
  985. * that is, |x|/2 < |y| < 2|x|, so by the Sterbenz
  986. * lemma, round(x - y) = x - y. QED.
  987. *
  988. * Proof of theorem. WLOG, assume 0 <= a <= b. Since
  989. * round(a + round(round(b - a)*t) is nondecreasing in
  990. * t and attains a at 0, the lower end of the bound is
  991. * trivial; we must show the upper end of the bound
  992. * strictly. It suffices to show this for the largest
  993. * floating-point number below 1, namely 1 - ulp(1)/2.
  994. *
  995. * Case I: round(b - a) is normal. Then it is at most
  996. * the smallest floating-point number above b - a. By
  997. * Lemma 1, round(round(b - a)*t) < round(b - a).
  998. * Since the inequality is strict, and since
  999. * round(round(b - a)*t) is a floating-point number
  1000. * below round(b - a), and since there are no
  1001. * floating-point numbers between b - a and round(b -
  1002. * a), we must have round(round(b - a)*t) < b - a.
  1003. * Then since y |---> round(a + y) is nondecreasing, we
  1004. * must have
  1005. *
  1006. * round(a + round(round(b - a)*t))
  1007. * <= round(a + (b - a))
  1008. * = round(b) = b.
  1009. *
  1010. * Case II: round(b - a) is subnormal. In this case,
  1011. * Lemma 1 falls apart -- we are not guaranteed the
  1012. * strict inequality. However, by Lemma 3, the
  1013. * difference is exact: round(b - a) = b - a. Thus,
  1014. *
  1015. * round(a + round(round(b - a)*t))
  1016. * <= round(a + round((b - a)*t))
  1017. * <= round(a + (b - a))
  1018. * = round(b)
  1019. * = b,
  1020. *
  1021. * QED.
  1022. */
  1023. /* p0 is restricted to [0,1], but we use >= to silence -Wfloat-equal. */
  1024. if (p0 >= 1)
  1025. return b;
  1026. return a + (b - a)*p0;
  1027. }
  1028. }
  1029. /**
  1030. * Deterministically sample from the standard logistic distribution,
  1031. * indexed by a uniform random 32-bit integer s and uniform random
  1032. * floating-point numbers t and p0 in (0, 1].
  1033. */
  1034. STATIC double
  1035. sample_logistic(uint32_t s, double t, double p0)
  1036. {
  1037. double sign = (s & 1) ? -1 : +1;
  1038. double r;
  1039. /*
  1040. * We carve up the interval (0, 1) into subregions to compute
  1041. * the inverse CDF precisely:
  1042. *
  1043. * A = (0, 1/(1 + e)] ---> (-\infty, -1]
  1044. * B = [1/(1 + e), 1/2] ---> [-1, 0]
  1045. * C = [1/2, 1 - 1/(1 + e)] ---> [0, 1]
  1046. * D = [1 - 1/(1 + e), 1) ---> [1, +\infty)
  1047. *
  1048. * Cases D and C are mirror images of cases A and B,
  1049. * respectively, so we choose between them by the sign chosen
  1050. * by a fair coin toss. We choose between cases A and B by a
  1051. * coin toss weighted by
  1052. *
  1053. * 2/(1 + e) = 1 - [1/2 - 1/(1 + e)]/(1/2):
  1054. *
  1055. * if it comes up heads, scale p0 into a uniform (0, 1/(1 + e)]
  1056. * sample p; if it comes up tails, scale p0 into a uniform (0,
  1057. * 1/2 - 1/(1 + e)] sample and compute the inverse CDF of p =
  1058. * 1/2 - p0.
  1059. */
  1060. if (t <= 2/(1 + exp(1))) {
  1061. /* p uniform in (0, 1/(1 + e)], represented by p. */
  1062. p0 /= 1 + exp(1);
  1063. r = logit(p0);
  1064. } else {
  1065. /*
  1066. * p uniform in [1/(1 + e), 1/2), actually represented
  1067. * by p0 = 1/2 - p uniform in (0, 1/2 - 1/(1 + e)], so
  1068. * that p = 1/2 - p.
  1069. */
  1070. p0 *= 0.5 - 1/(1 + exp(1));
  1071. r = logithalf(p0);
  1072. }
  1073. /*
  1074. * We have chosen from the negative half of the standard
  1075. * logistic distribution, which is symmetric with the positive
  1076. * half. Now use the sign to choose uniformly between them.
  1077. */
  1078. return sign*r;
  1079. }
  1080. /**
  1081. * Deterministically sample from the logistic distribution scaled by
  1082. * sigma and translated by mu.
  1083. */
  1084. static double
  1085. sample_logistic_locscale(uint32_t s, double t, double p0, double mu,
  1086. double sigma)
  1087. {
  1088. return mu + sigma*sample_logistic(s, t, p0);
  1089. }
  1090. /**
  1091. * Deterministically sample from the standard log-logistic
  1092. * distribution, indexed by a uniform random 32-bit integer s and a
  1093. * uniform random floating-point number p0 in (0, 1].
  1094. */
  1095. STATIC double
  1096. sample_log_logistic(uint32_t s, double p0)
  1097. {
  1098. /*
  1099. * Carve up the interval (0, 1) into (0, 1/2] and [1/2, 1); the
  1100. * condition numbers of the icdf and the isf coincide at 1/2.
  1101. */
  1102. p0 *= 0.5;
  1103. if ((s & 1) == 0) {
  1104. /* p = p0 in (0, 1/2] */
  1105. return p0/(1 - p0);
  1106. } else {
  1107. /* p = 1 - p0 in [1/2, 1) */
  1108. return (1 - p0)/p0;
  1109. }
  1110. }
  1111. /**
  1112. * Deterministically sample from the log-logistic distribution with
  1113. * scale alpha and shape beta.
  1114. */
  1115. static double
  1116. sample_log_logistic_scaleshape(uint32_t s, double p0, double alpha,
  1117. double beta)
  1118. {
  1119. double x = sample_log_logistic(s, p0);
  1120. return alpha*pow(x, 1/beta);
  1121. }
  1122. /**
  1123. * Deterministically sample from the standard exponential distribution,
  1124. * indexed by a uniform random 32-bit integer s and a uniform random
  1125. * floating-point number p0 in (0, 1].
  1126. */
  1127. static double
  1128. sample_exponential(uint32_t s, double p0)
  1129. {
  1130. /*
  1131. * We would like to evaluate log(p) for p near 0, and log1p(-p)
  1132. * for p near 1. Simply carve the interval into (0, 1/2] and
  1133. * [1/2, 1) by a fair coin toss.
  1134. */
  1135. p0 *= 0.5;
  1136. if ((s & 1) == 0)
  1137. /* p = p0 in (0, 1/2] */
  1138. return -log(p0);
  1139. else
  1140. /* p = 1 - p0 in [1/2, 1) */
  1141. return -log1p(-p0);
  1142. }
  1143. /**
  1144. * Deterministically sample from a Weibull distribution with scale
  1145. * lambda and shape k -- just an exponential with a shape parameter in
  1146. * addition to a scale parameter. (Yes, lambda really is the scale,
  1147. * _not_ the rate.)
  1148. */
  1149. STATIC double
  1150. sample_weibull(uint32_t s, double p0, double lambda, double k)
  1151. {
  1152. return lambda*pow(sample_exponential(s, p0), 1/k);
  1153. }
  1154. /**
  1155. * Deterministically sample from the generalized Pareto distribution
  1156. * with shape xi, indexed by a uniform random 32-bit integer s and a
  1157. * uniform random floating-point number p0 in (0, 1].
  1158. */
  1159. STATIC double
  1160. sample_genpareto(uint32_t s, double p0, double xi)
  1161. {
  1162. double x = sample_exponential(s, p0);
  1163. /*
  1164. * Write f(xi) = (e^{xi x} - 1)/xi for xi near zero as the
  1165. * absolutely convergent Taylor series
  1166. *
  1167. * f(x) = (1/xi) (xi x + \sum_{n=2}^\infty (xi x)^n/n!)
  1168. * = x + (1/xi) \sum_{n=2}^\inty (xi x)^n/n!
  1169. * = x + \sum_{n=2}^\infty xi^{n-1} x^n/n!
  1170. * = x + x \sum_{n=2}^\infty (xi x)^{n-1}/n!
  1171. * = x (1 + \sum_{n=2}^\infty (xi x)^{n-1}/n!).
  1172. *
  1173. * d = \sum_{n=2}^\infty (xi x)^{n-1}/n! is the relative error
  1174. * of f(x) from x. If |xi| < eps/4x, then
  1175. *
  1176. * |d| <= \sum_{n=2}^\infty |xi x|^{n-1}/n!
  1177. * <= \sum_{n=2}^\infty (eps/4)^{n-1}/n!
  1178. * <= \sum_{n=1}^\infty (eps/4)
  1179. * = (eps/4) \sum_{n=0}^\infty (eps/4)^n
  1180. * = (eps/4)/(1 - eps/4)
  1181. * < eps/2,
  1182. *
  1183. * for any 0 < eps < 2. Hence, as long as |xi| < eps/2x, f(xi)
  1184. * = x (1 + d) for |d| <= eps/2, so x = f(xi) (1 + d') for |d'|
  1185. * <= eps. What bound should we use for x?
  1186. *
  1187. * - If x is exponentially distributed, x > 200 with
  1188. * probability below e^{-200} << 2^{-256}, i.e. never.
  1189. *
  1190. * - If x is computed by -log(U) for U in (0, 1], x is
  1191. * guaranteed to be below 1000 in IEEE 754 binary64
  1192. * floating-point.
  1193. *
  1194. * We can safely cut xi off at 1e-20 < eps/4000 and attain an
  1195. * error bounded by 0.5 ulp for this expression.
  1196. */
  1197. return (fabs(xi) < 1e-20 ? x : expm1(xi*x)/xi);
  1198. }
  1199. /**
  1200. * Deterministically sample from a generalized Pareto distribution with
  1201. * shape xi, scaled by sigma and translated by mu.
  1202. */
  1203. static double
  1204. sample_genpareto_locscale(uint32_t s, double p0, double mu, double sigma,
  1205. double xi)
  1206. {
  1207. return mu + sigma*sample_genpareto(s, p0, xi);
  1208. }
  1209. /**
  1210. * Deterministically sample from the geometric distribution with
  1211. * per-trial success probability p.
  1212. *
  1213. * XXX Quantify the error (KL divergence?) of this
  1214. * ceiling-of-exponential sampler from a true geometric distribution,
  1215. * which we could get by rejection sampling. Relevant papers:
  1216. *
  1217. * John F. Monahan, `Accuracy in Random Number Generation',
  1218. * Mathematics of Computation 45(172), October 1984, pp. 559--568.
  1219. *https://pdfs.semanticscholar.org/aca6/74b96da1df77b2224e8cfc5dd6d61a471632.pdf
  1220. *
  1221. * Karl Bringmann and Tobias Friedrich, `Exact and Efficient
  1222. * Generation of Geometric Random Variates and Random Graphs', in
  1223. * Proceedings of the 40th International Colloaquium on Automata,
  1224. * Languages, and Programming -- ICALP 2013, Springer LNCS 7965,
  1225. * pp.267--278.
  1226. * https://doi.org/10.1007/978-3-642-39206-1_23
  1227. * https://people.mpi-inf.mpg.de/~kbringma/paper/2013ICALP-1.pdf
  1228. */
  1229. static double
  1230. sample_geometric(uint32_t s, double p0, double p)
  1231. {
  1232. double x = sample_exponential(s, p0);
  1233. /* This is actually a check against 1, but we do >= so that the compiler
  1234. does not raise a -Wfloat-equal */
  1235. if (p >= 1)
  1236. return 1;
  1237. return ceil(-x/log1p(-p));
  1238. }
  1239. /*******************************************************************/
  1240. /** Public API for probability distributions:
  1241. *
  1242. * For each probability distribution we define each public functions
  1243. * (sample/cdf/sf/icdf/isf) as part of its dist_ops structure.
  1244. */
  1245. const char *
  1246. dist_name(const struct dist *dist)
  1247. {
  1248. return dist->ops->name;
  1249. }
  1250. double
  1251. dist_sample(const struct dist *dist)
  1252. {
  1253. return dist->ops->sample(dist);
  1254. }
  1255. double
  1256. dist_cdf(const struct dist *dist, double x)
  1257. {
  1258. return dist->ops->cdf(dist, x);
  1259. }
  1260. double
  1261. dist_sf(const struct dist *dist, double x)
  1262. {
  1263. return dist->ops->sf(dist, x);
  1264. }
  1265. double
  1266. dist_icdf(const struct dist *dist, double p)
  1267. {
  1268. return dist->ops->icdf(dist, p);
  1269. }
  1270. double
  1271. dist_isf(const struct dist *dist, double p)
  1272. {
  1273. return dist->ops->isf(dist, p);
  1274. }
  1275. /** Functions for uniform distribution */
  1276. static double
  1277. uniform_sample(const struct dist *dist)
  1278. {
  1279. const struct uniform *U = const_container_of(dist, struct uniform,
  1280. base);
  1281. double p0 = random_uniform_01();
  1282. return sample_uniform_interval(p0, U->a, U->b);
  1283. }
  1284. static double
  1285. uniform_cdf(const struct dist *dist, double x)
  1286. {
  1287. const struct uniform *U = const_container_of(dist, struct uniform,
  1288. base);
  1289. if (x < U->a)
  1290. return 0;
  1291. else if (x < U->b)
  1292. return (x - U->a)/(U->b - U->a);
  1293. else
  1294. return 1;
  1295. }
  1296. static double
  1297. uniform_sf(const struct dist *dist, double x)
  1298. {
  1299. const struct uniform *U = const_container_of(dist, struct uniform,
  1300. base);
  1301. if (x > U->b)
  1302. return 0;
  1303. else if (x > U->a)
  1304. return (U->b - x)/(U->b - U->a);
  1305. else
  1306. return 1;
  1307. }
  1308. static double
  1309. uniform_icdf(const struct dist *dist, double p)
  1310. {
  1311. const struct uniform *U = const_container_of(dist, struct uniform,
  1312. base);
  1313. double w = U->b - U->a;
  1314. return (p < 0.5 ? (U->a + w*p) : (U->b - w*(1 - p)));
  1315. }
  1316. static double
  1317. uniform_isf(const struct dist *dist, double p)
  1318. {
  1319. const struct uniform *U = const_container_of(dist, struct uniform,
  1320. base);
  1321. double w = U->b - U->a;
  1322. return (p < 0.5 ? (U->b - w*p) : (U->a + w*(1 - p)));
  1323. }
  1324. const struct dist_ops uniform_ops = {
  1325. .name = "uniform",
  1326. .sample = uniform_sample,
  1327. .cdf = uniform_cdf,
  1328. .sf = uniform_sf,
  1329. .icdf = uniform_icdf,
  1330. .isf = uniform_isf,
  1331. };
  1332. /** Functions for logistic distribution: */
  1333. static double
  1334. logistic_sample(const struct dist *dist)
  1335. {
  1336. const struct logistic *L = const_container_of(dist, struct logistic,
  1337. base);
  1338. uint32_t s = crypto_rand_u32();
  1339. double t = random_uniform_01();
  1340. double p0 = random_uniform_01();
  1341. return sample_logistic_locscale(s, t, p0, L->mu, L->sigma);
  1342. }
  1343. static double
  1344. logistic_cdf(const struct dist *dist, double x)
  1345. {
  1346. const struct logistic *L = const_container_of(dist, struct logistic,
  1347. base);
  1348. return cdf_logistic(x, L->mu, L->sigma);
  1349. }
  1350. static double
  1351. logistic_sf(const struct dist *dist, double x)
  1352. {
  1353. const struct logistic *L = const_container_of(dist, struct logistic,
  1354. base);
  1355. return sf_logistic(x, L->mu, L->sigma);
  1356. }
  1357. static double
  1358. logistic_icdf(const struct dist *dist, double p)
  1359. {
  1360. const struct logistic *L = const_container_of(dist, struct logistic,
  1361. base);
  1362. return icdf_logistic(p, L->mu, L->sigma);
  1363. }
  1364. static double
  1365. logistic_isf(const struct dist *dist, double p)
  1366. {
  1367. const struct logistic *L = const_container_of(dist, struct logistic,
  1368. base);
  1369. return isf_logistic(p, L->mu, L->sigma);
  1370. }
  1371. const struct dist_ops logistic_ops = {
  1372. .name = "logistic",
  1373. .sample = logistic_sample,
  1374. .cdf = logistic_cdf,
  1375. .sf = logistic_sf,
  1376. .icdf = logistic_icdf,
  1377. .isf = logistic_isf,
  1378. };
  1379. /** Functions for log-logistic distribution: */
  1380. static double
  1381. log_logistic_sample(const struct dist *dist)
  1382. {
  1383. const struct log_logistic *LL = const_container_of(dist, struct
  1384. log_logistic, base);
  1385. uint32_t s = crypto_rand_u32();
  1386. double p0 = random_uniform_01();
  1387. return sample_log_logistic_scaleshape(s, p0, LL->alpha, LL->beta);
  1388. }
  1389. static double
  1390. log_logistic_cdf(const struct dist *dist, double x)
  1391. {
  1392. const struct log_logistic *LL = const_container_of(dist,
  1393. struct log_logistic, base);
  1394. return cdf_log_logistic(x, LL->alpha, LL->beta);
  1395. }
  1396. static double
  1397. log_logistic_sf(const struct dist *dist, double x)
  1398. {
  1399. const struct log_logistic *LL = const_container_of(dist,
  1400. struct log_logistic, base);
  1401. return sf_log_logistic(x, LL->alpha, LL->beta);
  1402. }
  1403. static double
  1404. log_logistic_icdf(const struct dist *dist, double p)
  1405. {
  1406. const struct log_logistic *LL = const_container_of(dist,
  1407. struct log_logistic, base);
  1408. return icdf_log_logistic(p, LL->alpha, LL->beta);
  1409. }
  1410. static double
  1411. log_logistic_isf(const struct dist *dist, double p)
  1412. {
  1413. const struct log_logistic *LL = const_container_of(dist,
  1414. struct log_logistic, base);
  1415. return isf_log_logistic(p, LL->alpha, LL->beta);
  1416. }
  1417. const struct dist_ops log_logistic_ops = {
  1418. .name = "log logistic",
  1419. .sample = log_logistic_sample,
  1420. .cdf = log_logistic_cdf,
  1421. .sf = log_logistic_sf,
  1422. .icdf = log_logistic_icdf,
  1423. .isf = log_logistic_isf,
  1424. };
  1425. /** Functions for Weibull distribution */
  1426. static double
  1427. weibull_sample(const struct dist *dist)
  1428. {
  1429. const struct weibull *W = const_container_of(dist, struct weibull,
  1430. base);
  1431. uint32_t s = crypto_rand_u32();
  1432. double p0 = random_uniform_01();
  1433. return sample_weibull(s, p0, W->lambda, W->k);
  1434. }
  1435. static double
  1436. weibull_cdf(const struct dist *dist, double x)
  1437. {
  1438. const struct weibull *W = const_container_of(dist, struct weibull,
  1439. base);
  1440. return cdf_weibull(x, W->lambda, W->k);
  1441. }
  1442. static double
  1443. weibull_sf(const struct dist *dist, double x)
  1444. {
  1445. const struct weibull *W = const_container_of(dist, struct weibull,
  1446. base);
  1447. return sf_weibull(x, W->lambda, W->k);
  1448. }
  1449. static double
  1450. weibull_icdf(const struct dist *dist, double p)
  1451. {
  1452. const struct weibull *W = const_container_of(dist, struct weibull,
  1453. base);
  1454. return icdf_weibull(p, W->lambda, W->k);
  1455. }
  1456. static double
  1457. weibull_isf(const struct dist *dist, double p)
  1458. {
  1459. const struct weibull *W = const_container_of(dist, struct weibull,
  1460. base);
  1461. return isf_weibull(p, W->lambda, W->k);
  1462. }
  1463. const struct dist_ops weibull_ops = {
  1464. .name = "Weibull",
  1465. .sample = weibull_sample,
  1466. .cdf = weibull_cdf,
  1467. .sf = weibull_sf,
  1468. .icdf = weibull_icdf,
  1469. .isf = weibull_isf,
  1470. };
  1471. /** Functions for generalized Pareto distributions */
  1472. static double
  1473. genpareto_sample(const struct dist *dist)
  1474. {
  1475. const struct genpareto *GP = const_container_of(dist, struct genpareto,
  1476. base);
  1477. uint32_t s = crypto_rand_u32();
  1478. double p0 = random_uniform_01();
  1479. return sample_genpareto_locscale(s, p0, GP->mu, GP->sigma, GP->xi);
  1480. }
  1481. static double
  1482. genpareto_cdf(const struct dist *dist, double x)
  1483. {
  1484. const struct genpareto *GP = const_container_of(dist, struct genpareto,
  1485. base);
  1486. return cdf_genpareto(x, GP->mu, GP->sigma, GP->xi);
  1487. }
  1488. static double
  1489. genpareto_sf(const struct dist *dist, double x)
  1490. {
  1491. const struct genpareto *GP = const_container_of(dist, struct genpareto,
  1492. base);
  1493. return sf_genpareto(x, GP->mu, GP->sigma, GP->xi);
  1494. }
  1495. static double
  1496. genpareto_icdf(const struct dist *dist, double p)
  1497. {
  1498. const struct genpareto *GP = const_container_of(dist, struct genpareto,
  1499. base);
  1500. return icdf_genpareto(p, GP->mu, GP->sigma, GP->xi);
  1501. }
  1502. static double
  1503. genpareto_isf(const struct dist *dist, double p)
  1504. {
  1505. const struct genpareto *GP = const_container_of(dist, struct genpareto,
  1506. base);
  1507. return isf_genpareto(p, GP->mu, GP->sigma, GP->xi);
  1508. }
  1509. const struct dist_ops genpareto_ops = {
  1510. .name = "generalized Pareto",
  1511. .sample = genpareto_sample,
  1512. .cdf = genpareto_cdf,
  1513. .sf = genpareto_sf,
  1514. .icdf = genpareto_icdf,
  1515. .isf = genpareto_isf,
  1516. };
  1517. /** Functions for geometric distribution on number of trials before success */
  1518. static double
  1519. geometric_sample(const struct dist *dist)
  1520. {
  1521. const struct geometric *G = const_container_of(dist, struct geometric, base);
  1522. uint32_t s = crypto_rand_u32();
  1523. double p0 = random_uniform_01();
  1524. return sample_geometric(s, p0, G->p);
  1525. }
  1526. static double
  1527. geometric_cdf(const struct dist *dist, double x)
  1528. {
  1529. const struct geometric *G = const_container_of(dist, struct geometric, base);
  1530. if (x < 1)
  1531. return 0;
  1532. /* 1 - (1 - p)^floor(x) = 1 - e^{floor(x) log(1 - p)} */
  1533. return -expm1(floor(x)*log1p(-G->p));
  1534. }
  1535. static double
  1536. geometric_sf(const struct dist *dist, double x)
  1537. {
  1538. const struct geometric *G = const_container_of(dist, struct geometric, base);
  1539. if (x < 1)
  1540. return 0;
  1541. /* (1 - p)^floor(x) = e^{ceil(x) log(1 - p)} */
  1542. return exp(floor(x)*log1p(-G->p));
  1543. }
  1544. static double
  1545. geometric_icdf(const struct dist *dist, double p)
  1546. {
  1547. const struct geometric *G = const_container_of(dist, struct geometric, base);
  1548. return log1p(-p)/log1p(-G->p);
  1549. }
  1550. static double
  1551. geometric_isf(const struct dist *dist, double p)
  1552. {
  1553. const struct geometric *G = const_container_of(dist, struct geometric, base);
  1554. return log(p)/log1p(-G->p);
  1555. }
  1556. const struct dist_ops geometric_ops = {
  1557. .name = "geometric (1-based)",
  1558. .sample = geometric_sample,
  1559. .cdf = geometric_cdf,
  1560. .sf = geometric_sf,
  1561. .icdf = geometric_icdf,
  1562. .isf = geometric_isf,
  1563. };