estimator.py 94 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899
  1. # -*- coding: utf-8 -*-
  2. """
  3. Complexity estimates for solving LWE.
  4. .. moduleauthor:: Martin R. Albrecht <martinralbrecht@googlemail.com>
  5. """
  6. from functools import partial
  7. from collections import OrderedDict
  8. from scipy.optimize import newton
  9. from sage.modules.all import vector
  10. from sage.calculus.var import var
  11. from sage.functions.log import exp, log
  12. from sage.functions.other import ceil, sqrt, floor, binomial, erf
  13. from sage.interfaces.magma import magma
  14. from sage.matrix.all import Matrix
  15. from sage.misc.all import cached_function
  16. from sage.misc.all import set_verbose, get_verbose, prod
  17. from sage.arith.srange import srange
  18. from sage.numerical.optimize import find_root
  19. from sage.rings.all import QQ, RR, ZZ, RealField, PowerSeriesRing, RDF
  20. from sage.symbolic.all import pi, e
  21. from sage.rings.infinity import PlusInfinity
  22. from sage.crypto.lwe import LWE, Regev, LindnerPeikert
  23. # config
  24. oo = PlusInfinity()
  25. tau_default = 0.3 # τ used in uSVP
  26. tau_prob_default = 0.1 # probability of success for given τ
  27. cfft = 1 # convolutions mod q
  28. enable_LP_estimates = True # enable LP estimates
  29. enable_fplll_estimates = False # enable fplll estimates
  30. # Utility Classes #
  31. class OutOfBoundsError(ValueError):
  32. """
  33. Used to indicate a wrong value, for example delta_0 < 1.
  34. """
  35. pass
  36. class InsufficientSamplesError(ValueError):
  37. """
  38. Used to indicate the number of samples given is too small, especially
  39. useful for #samples <= 0.
  40. """
  41. pass
  42. # Utility Functions #
  43. def binary_search_minimum(f, start, stop, param, extract=lambda x: x, *arg, **kwds):
  44. """
  45. Return minimum of `f` if `f` is convex.
  46. :param start: start of range to search
  47. :param stop: stop of range to search (exclusive)
  48. :param param: the parameter to modify when calling `f`
  49. :param extract: comparison is performed on `extract(f(param=?, *args, **kwds))`
  50. """
  51. return binary_search(f, start, stop, param, better=lambda x, best: extract(x)<=extract(best), *arg, **kwds)
  52. def binary_search(f, start, stop, param, better=lambda x, best: x<=best, *arg, **kwds):
  53. """
  54. Searches for the `best` value in the interval [start,stop]
  55. depending on the given predicate `better`.
  56. :param start: start of range to search
  57. :param stop: stop of range to search (exclusive)
  58. :param param: the parameter to modify when calling `f`
  59. :param better: comparison is performed by evaluating `better(current, best)`
  60. """
  61. kwds[param] = stop
  62. D = {}
  63. D[stop] = f(*arg, **kwds)
  64. best = D[stop]
  65. b = ceil((start+stop)/2)
  66. direction = 0
  67. while True:
  68. if b == start:
  69. best = D[start]
  70. break
  71. if b not in D:
  72. kwds[param] = b
  73. D[b] = f(*arg, **kwds)
  74. if not better(D[b], best):
  75. if direction == 0:
  76. start = b
  77. b = ceil((stop+b)/2)
  78. else:
  79. stop = b
  80. b = floor((start+b)/2)
  81. else:
  82. best = D[b]
  83. if b-1 not in D:
  84. kwds[param] = b-1
  85. D[b-1] = f(*arg, **kwds)
  86. if better(D[b-1], best):
  87. stop = b
  88. b = floor((b+start)/2)
  89. direction = 0
  90. else:
  91. if b+1 not in D:
  92. kwds[param] = b+1
  93. D[b+1] = f(*arg, **kwds)
  94. if not better(D[b+1], best):
  95. break
  96. else:
  97. start = b
  98. b = ceil((stop+b)/2)
  99. direction = 1
  100. return best
  101. def cost_str(d, keyword_width=None, newline=None, round_bound=2048):
  102. """
  103. Return string of key,value pairs as a string "key0: value0, key1: value1"
  104. :param d: report dictionary
  105. :keyword_width: keys are printed with this width
  106. EXAMPLE:
  107. By default dicts are unordered, hence the order of the output of this function is undefined::
  108. sage: s = {"delta":5, "bar":2}
  109. sage: print cost_str(s)
  110. bar: 2, delta: 5
  111. Use `OrderedDict` if you require ordered output::
  112. sage: s = OrderedDict([(u"delta", 5), ("bar",2)])
  113. sage: print cost_str(s)
  114. delta: 5, bar: 2
  115. """
  116. if d is None:
  117. return
  118. s = []
  119. for k in d:
  120. v = d[k]
  121. if keyword_width:
  122. fmt = u"%%%ds" % keyword_width
  123. k = fmt % k
  124. if ZZ(1)/round_bound < v < round_bound or v == 0 or ZZ(-1)/round_bound > v > -round_bound:
  125. try:
  126. s.append(u"%s: %9d" % (k, ZZ(v)))
  127. except TypeError:
  128. if v < 2.0 and v >= 0.0:
  129. s.append(u"%s: %9.7f" % (k, v))
  130. else:
  131. s.append(u"%s: %9.4f" % (k, v))
  132. else:
  133. t = u"≈%s2^%.1f" % ("-" if v < 0 else "", log(abs(v), 2).n())
  134. s.append(u"%s: %9s" % (k, t))
  135. if not newline:
  136. return u", ".join(s)
  137. else:
  138. return u"\n".join(s)
  139. def cost_reorder(d, ordering):
  140. """
  141. Return a new ordered dict from the key:value pairs in `d` but reordered such that the keys in
  142. ordering come first.
  143. :param d: input dictionary
  144. :param ordering: keys which should come first (in order)
  145. EXAMPLE::
  146. sage: d = OrderedDict([("a",1),("b",2),("c",3)]); d
  147. OrderedDict([('a', 1), ('b', 2), ('c', 3)])
  148. sage: cost_reorder(d, ["b","c","a"])
  149. OrderedDict([('b', 2), ('c', 3), ('a', 1)])
  150. """
  151. keys = list(d)
  152. for key in ordering:
  153. keys.pop(keys.index(key))
  154. keys = list(ordering) + keys
  155. r = OrderedDict()
  156. for key in keys:
  157. r[key] = d[key]
  158. return r
  159. def cost_filter(d, keys):
  160. """
  161. Return new ordered dict from the key:value pairs in `d` restricted to the keys in `keys`.
  162. :param d: input dictionary
  163. :param keys: keys which should be copied (ordered)
  164. """
  165. r = OrderedDict()
  166. for key in keys:
  167. r[key] = d[key]
  168. return r
  169. def cost_repeat(d, times, repeat=None):
  170. """
  171. Return a report with all costs multiplied by `times`.
  172. :param d: a cost estimate
  173. :param times: the number of times it should be run
  174. :param repeat: toggle which fields ought to be repeated and which shouldn't
  175. :returns: a new cost estimate
  176. We maintain a local dictionary which decides if an entry is multiplied by `times` or not.
  177. For example, δ would not be multiplied but "\#bop" would be. This check is strict such that
  178. unknown entries raise an error. This is to enforce a decision on whether an entry should be
  179. multiplied by `times` if the function `report` reports on is called `times` often.
  180. EXAMPLE::
  181. sage: n, alpha, q = unpack_lwe(Regev(128))
  182. sage: print cost_str(cost_repeat(sis(n, alpha, q), 2^10))
  183. bkz2: ≈2^85.4, oracle: ≈2^36.5, δ_0: 1.0089681, ...
  184. sage: print cost_str(cost_repeat(sis(n, alpha, q), 1))
  185. bkz2: ≈2^75.4, oracle: ≈2^26.5, δ_0: 1.0089681, ...
  186. """
  187. do_repeat = {
  188. u"bop": True,
  189. u"rop": True,
  190. u"oracle": True,
  191. u"bkz2": True,
  192. u"lp": True,
  193. u"ds": True,
  194. u"fplll": True,
  195. u"sieve": True,
  196. u"enum": True,
  197. u"enumop": True,
  198. u"log(eps)": False,
  199. u"quantum_sieve": True,
  200. u"mem": False,
  201. u"delta_0": False,
  202. u"beta": False,
  203. u"k": False,
  204. u"ε": False,
  205. u"D_reg": False,
  206. u"t": False,
  207. u"Pr[⊥]": False, # we are leaving probabilities alone
  208. u"m": False,
  209. u"dim": False,
  210. u"|v|": False,
  211. u"amplify": False,
  212. u"repeat": False, # we deal with it below
  213. u"c": False,
  214. }
  215. if repeat is not None:
  216. for key in repeat:
  217. do_repeat[key] = repeat[key]
  218. ret = OrderedDict()
  219. for key in d:
  220. try:
  221. if do_repeat[key]:
  222. ret[key] = times * d[key]
  223. else:
  224. ret[key] = d[key]
  225. except KeyError:
  226. raise NotImplementedError(u"You found a bug, this function does not know about '%s' but should."%key)
  227. ret[u"repeat"] = times * ret.get("repeat", 1)
  228. return ret
  229. def cost_combine(left, right, base=None):
  230. """Combine ``left`` and ``right``.
  231. :param left: cost dictionary
  232. :param right: cost dictionary
  233. :param base: add entries to ``base``
  234. """
  235. if base is None:
  236. cost = OrderedDict()
  237. else:
  238. cost = base
  239. for key in left:
  240. cost[key] = left[key]
  241. for key in right:
  242. cost[key] = right[key]
  243. return cost
  244. def stddevf(sigma):
  245. """
  246. σ → std deviation
  247. :param sigma: Gaussian width parameter σ
  248. EXAMPLE::
  249. sage: n = 64.0
  250. sage: stddevf(n)
  251. 25.532...
  252. """
  253. return sigma/sqrt(2*pi).n()
  254. def sigmaf(stddev):
  255. """
  256. std deviation → σ
  257. :param sigma: standard deviation
  258. EXAMPLE::
  259. sage: n = 64.0
  260. sage: sigmaf(stddevf(n))
  261. 64.000...
  262. """
  263. RR = stddev.parent()
  264. return RR(sqrt(2*pi))*stddev
  265. def alphaf(sigma, q, sigma_is_stddev=False):
  266. """
  267. σ, q → α
  268. :param sigma: Gaussian width parameter (or standard deviation if `sigma_is_stddev` is `True`)
  269. :param q: modulus
  270. :param sigma_is_stddev: if `True` then `sigma` is interpreted as the standard deviation
  271. :returns: α = σ/q or σ·sqrt(2π)/q depending on `sigma_is_stddev`
  272. :rtype: real number
  273. """
  274. if sigma_is_stddev is False:
  275. return RR(sigma/q)
  276. else:
  277. return RR(sigmaf(sigma)/q)
  278. def amplify(target_success_probability, success_probability, majority=False):
  279. """
  280. Return the number of trials needed to amplify current `success_probability` to
  281. `target_success_probability`
  282. :param target_success_probability: 0 < real value < 1
  283. :param success_probability: 0 < real value < 1
  284. :param majority: if `True` amplify a deicsional problem, not a computational one
  285. if `False` then we assume that we can check solutions, so one success suffices
  286. :returns: number of required trials to amplify
  287. """
  288. prec = max(53,
  289. 2*ceil(abs(log(success_probability, 2))),
  290. 2*ceil(abs(log(1-success_probability, 2))),
  291. 2*ceil(abs(log(target_success_probability, 2))),
  292. 2*ceil(abs(log(1-target_success_probability, 2))))
  293. RR = RealField(prec)
  294. if target_success_probability < success_probability:
  295. return RR(1)
  296. success_probability = RR(success_probability)
  297. target_success_probability = RR(target_success_probability)
  298. if majority:
  299. eps = success_probability/2
  300. repeat = ceil(2*log(2 - 2*target_success_probability)/log(1 - 4*eps**2))
  301. else:
  302. # target_success_probability = 1 - (1-success_probability)^trials
  303. repeat = ceil(log(1-target_success_probability)/log(1 -success_probability))
  304. return repeat
  305. def rinse_and_repeat(f, n, alpha, q, success_probability=0.99,
  306. optimisation_target=u"bkz2",
  307. decision=True,
  308. samples=None,
  309. *args, **kwds):
  310. """Find best trade-off between success probability and running time.
  311. :param f: a function returning a cost estimate
  312. :param n: dimension > 0
  313. :param alpha: fraction of the noise α < 1.0
  314. :param q: modulus > 0
  315. :param success_probability: target success probability
  316. :param optimisation_target: what value out to be minimized
  317. :param decision: ``True`` if ``f`` solves Decision-LWE, ``False`` for Search-LWE.
  318. :param samples: the number of available samples
  319. """
  320. n, alpha, q, success_probability = preprocess_params(n, alpha, q, success_probability)
  321. best = None
  322. step_size = 32
  323. i = floor(-log(success_probability, 2))
  324. has_solution = False
  325. while True:
  326. prob = min(2**-i, success_probability)
  327. try:
  328. current = f(n, alpha, q,
  329. optimisation_target=optimisation_target,
  330. success_probability=prob,
  331. samples=samples,
  332. *args, **kwds)
  333. repeat = amplify(success_probability, prob, majority=decision)
  334. do_repeat = None if samples is None else {"oracle": False}
  335. current = cost_repeat(current, repeat, do_repeat)
  336. has_solution = True
  337. except (OutOfBoundsError, InsufficientSamplesError) as err:
  338. key = list(best)[0] if best is not None else optimisation_target
  339. current = OrderedDict()
  340. current[key] = oo
  341. if get_verbose() >= 2:
  342. print err
  343. current["log(eps)"] = -i
  344. if get_verbose() >= 2:
  345. print cost_str(current)
  346. key = list(current)[0]
  347. if best is None:
  348. best = current
  349. i += step_size
  350. continue
  351. if key not in best or current[key] < best[key]:
  352. best = current
  353. i += step_size
  354. else:
  355. # we go back
  356. i = -best["log(eps)"] - step_size
  357. i += step_size/2
  358. if i <= 0:
  359. i = step_size/2
  360. # and half the step size
  361. step_size = step_size/2
  362. if step_size == 0:
  363. break
  364. if not has_solution:
  365. raise RuntimeError("No solution found for chosen parameters.")
  366. return best
  367. @cached_function
  368. def distinguish_required_m(sigma, q, success_probability, other_sigma=None):
  369. RR = sigma.parent()
  370. if other_sigma is not None:
  371. sigma = RR(sqrt(sigma**2 + other_sigma**2))
  372. adv = RR(exp(-RR(pi)*(RR(sigma/q)**2)))
  373. return RR(success_probability)/RR(adv**2)
  374. def uniform_variance_from_bounds(a, b, h=None):
  375. """
  376. Variance for uniform distribution from bounds.
  377. :param a:
  378. :param b:
  379. :param h: number of non-zero components.
  380. :returns:
  381. :rtype:
  382. """
  383. assert a < 0 and b > 0 and abs(a) == abs(b)
  384. if h is None:
  385. n = b - a + 1
  386. return (n**2 - 1)/ZZ(12)
  387. else:
  388. # sage: var("i,a,b")
  389. # sage: p = 1/(b-a)
  390. # sage: sum(i^2*p, i, a, b)
  391. return (2*a**3 - 2*b**3 - 3*a**2 - 3*b**2 + a - b)/(6*ZZ(a - b))
  392. def unpack_lwe(lwe):
  393. """
  394. Return n, α, q given an LWE instance object.
  395. :param lwe: LWE object
  396. :returns: n, α, q
  397. :rtype: tuple
  398. """
  399. n = lwe.n
  400. q = lwe.K.order()
  401. try:
  402. alpha = alphaf(sigmaf(lwe.D.sigma), q)
  403. except AttributeError:
  404. # older versions of Sage use stddev, not sigma
  405. alpha = alphaf(sigmaf(lwe.D.stddev), q)
  406. return n, alpha, q
  407. def unpack_lwe_dict(lwe):
  408. """
  409. Return dictionary consisting of n, α, q and samples given an LWE instance object.
  410. :param lwe: LWE object
  411. :returns: "n": n, "alpha": α, "q": q, "samples": samples
  412. :rtype: dictionary
  413. """
  414. n, alpha, q = unpack_lwe(lwe)
  415. samples = lwe.m
  416. return {"n": n, "alpha": alpha, "q": q, "samples": samples}
  417. def preprocess_params(n, alpha, q, success_probability=None, prec=None, samples=None):
  418. """
  419. Check if parameters n, α, q are sound and return correct types.
  420. Also, if given, the soundness of the success probability and the
  421. number of samples is ensured.
  422. """
  423. if n < 1:
  424. raise ValueError("LWE dimension must be greater than 0.")
  425. if alpha <= 0:
  426. raise ValueError("Fraction of noise must be > 0.")
  427. if q < 1:
  428. raise ValueError("LWE modulus must be greater than 0.")
  429. if samples is not None and samples < 1:
  430. raise ValueError("Given number of samples must be greater than 0.")
  431. if prec is None:
  432. prec = 128
  433. RR = RealField(prec)
  434. n, alpha, q = ZZ(n), RR(alpha), ZZ(q),
  435. samples = ZZ(samples)
  436. if success_probability is not None:
  437. if success_probability >= 1 or success_probability <= 0:
  438. raise ValueError("success_probability must be between 0 and 1.")
  439. return n, alpha, q, RR(success_probability)
  440. else:
  441. return n, alpha, q
  442. ################################
  443. # Section 2 #
  444. ################################
  445. def switch_modulus(n, alpha, q, s_variance, h=None):
  446. """
  447. Return modulus switched parameters.
  448. :param n: the number of variables in the LWE instance
  449. :param alpha: noise size
  450. :param q: modulus
  451. :param s_var: the variance of the secret
  452. :param h: number of non-zero components.
  453. If ``h`` is given, then ``s_var`` refers to the variance of non-zero components.
  454. EXAMPLE::
  455. sage: switch_modulus(128, 0.01, 65537, uniform_variance_from_bounds(0,1))
  456. (128, 0.0141421356237310, 410.000000000000)
  457. sage: switch_modulus(128, 0.001, 65537, uniform_variance_from_bounds(0,1))
  458. (128, 0.00141421356237310, 4094.00000000000)
  459. sage: switch_modulus(128, 0.001, 65537, uniform_variance_from_bounds(-5,5))
  460. (128, 0.00141421356237310, 25889.0000000000)
  461. """
  462. if h is not None:
  463. length = h
  464. else:
  465. length = n
  466. p = RR(ceil(sqrt(2*pi*s_variance*length/ZZ(12)) / alpha))
  467. if p < 32: # some random point
  468. # we can't pretend everything is uniform any more, p is too small
  469. p = RR(ceil(sqrt(2*pi*s_variance*length*2/ZZ(12)) / alpha))
  470. beta = RR(sqrt(2)*alpha)
  471. return n, beta, p
  472. # Lattice Reduction
  473. def bkz_svp_repeat(n, k):
  474. """Return number of SVP calls in BKZ-k
  475. :param n: dimension
  476. :param k: block size
  477. .. note :: loosely based on experiments in [PhD:Chen13]
  478. """
  479. return 8*n
  480. def _delta_0f(k):
  481. """
  482. Compute `δ_0` from block size `k` without enforcing `k` in ZZ.
  483. δ_0 for k<=40 were computed as follows:
  484. ```
  485. # -*- coding: utf-8 -*-
  486. from fpylll import BKZ, IntegerMatrix
  487. from multiprocessing import Pool
  488. from sage.all import mean, sqrt, exp, log, cputime
  489. d, trials = 320, 32
  490. def f((A, beta)):
  491. par = BKZ.Param(block_size=beta, strategies=BKZ.DEFAULT_STRATEGY, flags=BKZ.AUTO_ABORT)
  492. q = A[-1, -1]
  493. d = A.nrows
  494. t = cputime()
  495. A = BKZ.reduction(A, par, float_type="dd")
  496. t = cputime(t)
  497. return t, exp(log(A[0].norm()/sqrt(q).n())/d)
  498. if __name__ == '__main__':
  499. for beta in (5, 10, 15, 20, 25, 28, 30, 35, 40):
  500. delta_0 = []
  501. t = []
  502. i = 0
  503. while i < trials:
  504. threads = int(open("delta_0.nthreads").read()) # make sure this file exists
  505. pool = Pool(threads)
  506. A = [(IntegerMatrix.random(d, "qary", k=d//2, bits=50), beta) for j in range(threads)]
  507. for (t_, delta_0_) in pool.imap_unordered(f, A):
  508. t.append(t_)
  509. delta_0.append(delta_0_)
  510. i += threads
  511. print u"β: %2d, δ_0: %.5f, time: %5.1fs, (%2d,%2d)"%(beta, mean(delta_0), mean(t), i, threads)
  512. print
  513. ```
  514. """
  515. small = (( 2, 1.02190), # noqa
  516. ( 5, 1.01862), # noqa
  517. (10, 1.01616),
  518. (15, 1.01485),
  519. (20, 1.01420),
  520. (25, 1.01342),
  521. (28, 1.01331),
  522. (40, 1.01295))
  523. if k <= 2:
  524. return RR(1.0219)
  525. elif k < 40:
  526. for i in range(1, len(small)):
  527. if small[i][0] > k:
  528. return RR(small[i-1][1])
  529. elif k == 40:
  530. return RR(small[-1][1])
  531. else:
  532. return RR(k/(2*pi*e) * (pi*k)**(1/k))**(1/(2*(k-1)))
  533. def delta_0f(k):
  534. """
  535. Compute `δ_0` from block size `k`.
  536. """
  537. k = ZZ(round(k))
  538. return _delta_0f(k)
  539. def k_chen_secant(delta):
  540. """
  541. Estimate required blocksize `k` for a given root-hermite factor δ based on [PhD:Chen13]_
  542. :param delta: root-hermite factor
  543. EXAMPLE::
  544. sage: 50 == k_chen(1.0121)
  545. True
  546. sage: 100 == k_chen(1.0093)
  547. True
  548. sage: k_chen(1.0024) # Chen reports 800
  549. 808
  550. .. [PhD:Chen13] Yuanmi Chen. Réduction de réseau et sécurité concrète du chiffrement
  551. complètement homomorphe. PhD thesis, Paris 7, 2013.
  552. """
  553. # newton() will produce a "warning", if two subsequent function values are
  554. # indistinguishable (i.e. equal in terms of machine precision). In this case
  555. # newton() will return the value k in the middle between the two values
  556. # k1,k2 for which the function values were indistinguishable.
  557. # Since f approaches zero for k->+Infinity, this may be the case for very
  558. # large inputs, like k=1e16.
  559. # For now, these warnings just get printed and the value k is used anyways.
  560. # This seems reasonable, since for such large inputs the exact value of k
  561. # doesn't make such a big difference.
  562. try:
  563. k = newton(lambda k: RR(_delta_0f(k) - delta), 100, fprime=None, args=(), tol=1.48e-08, maxiter=500)
  564. k = ceil(k)
  565. if k < 40:
  566. # newton may output k < 40. The old k_chen method wouldn't do this. For
  567. # consistency, call the old k_chen method, i.e. consider this try as "failed".
  568. raise RuntimeError("k < 40")
  569. return k
  570. except (RuntimeError, TypeError):
  571. # if something fails, use old k_chen method
  572. if get_verbose() >= 2:
  573. print "secant method failed, using k_chen_old(delta) instead!"
  574. k = k_chen_old(delta)
  575. return k
  576. def k_chen_find_root(delta):
  577. # handle k < 40 separately
  578. k = ZZ(40)
  579. if delta_0f(k) < delta:
  580. return k
  581. try:
  582. k = find_root(lambda k: RR(_delta_0f(k) - delta), 40, 2**16, maxiter=500)
  583. k = ceil(k)
  584. except RuntimeError:
  585. # finding root failed; reasons:
  586. # 1. maxiter not sufficient
  587. # 2. no root in given interval
  588. k = k_chen_old(delta)
  589. return k
  590. def k_chen(delta):
  591. # TODO: decide for one strategy (secant, find_root, old) and it's handling of errors.
  592. k = k_chen_find_root(delta)
  593. return k
  594. def k_chen_old(delta):
  595. """
  596. Estimate required blocksize `k` for a given root-hermite factor δ based on [PhD:Chen13]_
  597. :param delta: root-hermite factor
  598. EXAMPLE::
  599. sage: 50 == k_chen(1.0121)
  600. True
  601. sage: 100 == k_chen(1.0093)
  602. True
  603. sage: k_chen(1.0024) # Chen reports 800
  604. 808
  605. .. [PhD:Chen13] Yuanmi Chen. Réduction de réseau et sécurité concrète du chiffrement
  606. complètement homomorphe. PhD thesis, Paris 7, 2013.
  607. """
  608. k = ZZ(40)
  609. while delta_0f(2*k) > delta:
  610. k *= 2
  611. while delta_0f(k+10) > delta:
  612. k += 10
  613. while True:
  614. if delta_0f(k) < delta:
  615. break
  616. k += 1
  617. return k
  618. def bkz_runtime_delta_LP(delta, n):
  619. """
  620. Runtime estimation assuming the Lindner-Peikert model.
  621. """
  622. return RR(1.8/log(delta, 2) - 110 + log(2.3*10**9, 2))
  623. def bkz_runtime_k_sieve_bdgl16_small(k, n):
  624. u"""
  625. Runtime estimation given `k` and assuming sieving is used to realise the SVP oracle.
  626. For small `k` we use estimates based on experiments in [BDGL16]
  627. :param k: block size
  628. :param n: lattice dimension
  629. .. [BDGL16] Becker, A., Ducas, L., Gama, N., & Laarhoven, T. (2016). New directions in
  630. nearest neighbor searching with applications to lattice sieving. In SODA 2016, (pp. 10–24).
  631. """
  632. return RR(0.387*k + 16.4 + log(bkz_svp_repeat(n, k), 2))
  633. def bkz_runtime_k_sieve_bdgl16_asymptotic(k, n):
  634. u"""
  635. Runtime estimation given `k` and assuming sieving is used to realise the SVP oracle.
  636. :param k: block size
  637. :param n: lattice dimension
  638. .. [BDGL16] Becker, A., Ducas, L., Gama, N., & Laarhoven, T. (2016). New directions in
  639. nearest neighbor searching with applications to lattice sieving. In SODA 2016, (pp. 10–24).
  640. """
  641. # we simply pick the same additive constant 16.4 as for the experimental result in [BDGL16]
  642. return RR(0.292*k + 16.4 + log(bkz_svp_repeat(n, k), 2))
  643. def bkz_runtime_k_quantum_sieve(k, n):
  644. """
  645. Runtime estimation for quantum sieving.
  646. .. [LaaMosPol14] Thijs Laarhoven, Michele Mosca, & Joop van de Pol. Finding shortest lattice
  647. vectors faster using quantum search. Cryptology ePrint Archive, Report 2014/907, 2014.
  648. https://eprint.iacr.org/2014/907.
  649. """
  650. return RR((0.265*k + 16.4 + log(bkz_svp_repeat(n, k), 2)))
  651. bkz_runtime_k_sieve_asymptotic = bkz_runtime_k_sieve_bdgl16_asymptotic
  652. bkz_runtime_k_sieve_small = bkz_runtime_k_sieve_bdgl16_small
  653. def bkz_runtime_k_sieve(k, n):
  654. u"""
  655. Runtime estimation given `k` and assuming sieving is used to realise the SVP oracle.
  656. :param k: block size
  657. :param n: lattice dimension
  658. """
  659. if k <= 90:
  660. return bkz_runtime_k_sieve_small(k, n)
  661. else:
  662. return bkz_runtime_k_sieve_asymptotic(k, n)
  663. def bkz_runtime_k_bkz2(k, n):
  664. """
  665. Runtime estimation given `k` and assuming [CheNgu12]_ estimates are correct.
  666. The constants in this function were derived as follows based on Table 4 in [CheNgu12]_::
  667. sage: dim = [100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, 220, 230, 240, 250]
  668. sage: nodes = [39.0, 44.0, 49.0, 54.0, 60.0, 66.0, 72.0, 78.0, 84.0, 96.0, 99.0, 105.0, 111.0, 120.0, 127.0, 134.0] # noqa
  669. sage: times = [c + log(200,2).n() for c in nodes]
  670. sage: T = zip(dim, nodes)
  671. sage: var("a,b,c,k")
  672. (a, b, c, k)
  673. sage: f = a*k*log(k, 2.0) + b*k + c
  674. sage: f = f.function(k)
  675. sage: f.subs(find_fit(T, f, solution_dict=True))
  676. k |--> 0.270188776350190*k*log(k) - 1.0192050451318417*k + 16.10253135200765
  677. .. [CheNgu12] Yuanmi Chen and Phong Q. Nguyen. BKZ 2.0: Better lattice security estimates (Full Version).
  678. 2012. http://www.di.ens.fr/~ychen/research/Full_BKZ.pdf
  679. """
  680. repeat = log(bkz_svp_repeat(n, k), 2)
  681. return RR(0.270188776350190*k*log(k) - 1.0192050451318417*k + 16.10253135200765 + repeat)
  682. def bkz_runtime_delta_bkz2(delta, n):
  683. """
  684. Runtime estimation extrapolated from BKZ 2.0 timings.
  685. """
  686. k = k_chen(delta)
  687. return bkz_runtime_k_bkz2(k, n)
  688. def bkz_runtime_k_fplll(k, n):
  689. """
  690. Runtime estimation extrapolated from fpLLL 4.0.4 experiments
  691. """
  692. repeat = log(bkz_svp_repeat(n, k), 2)
  693. return RR(0.013487467331762426*k**2 - 0.28245244492771304*k + 21.017892848466957 + repeat)
  694. def bkz_runtime_delta(delta, n, log_repeat=0):
  695. """
  696. Runtime estimates for BKZ (2.0) given δ and n
  697. """
  698. if enable_LP_estimates:
  699. t_lp = bkz_runtime_delta_LP(delta, n) + log_repeat
  700. RR = delta.parent()
  701. k = k_chen(delta)
  702. t_sieve = RR(bkz_runtime_k_sieve(k, n) + log_repeat)
  703. t_bkz2 = RR(bkz_runtime_k_bkz2(k, n) + log_repeat)
  704. t_fplll = RR(bkz_runtime_k_fplll(k, n) + log_repeat)
  705. t_quantum_sieve = RR(bkz_runtime_k_quantum_sieve(k, n) + log_repeat)
  706. r = OrderedDict()
  707. r[u"delta_0"] = delta
  708. r[u"bkz2"] = RR(2)**t_bkz2
  709. r[u"beta"] = k
  710. if enable_LP_estimates:
  711. r[u"lp"] = RR(2)**t_lp
  712. if enable_fplll_estimates:
  713. r[u"fplll"] = RR(2)**t_fplll
  714. r[u"sieve"] = RR(2)**t_sieve
  715. r[u"quantum_sieve"] = RR(2)**t_quantum_sieve
  716. return r
  717. def lattice_reduction_opt_m(n, q, delta):
  718. """
  719. Return the (heuristically) optimal lattice dimension `m`
  720. :param n: dimension
  721. :param q: modulus
  722. :param delta: root Hermite factor `δ_0`
  723. """
  724. return ZZ(round(sqrt(n*log(q, 2)/log(delta, 2))))
  725. def sieve_or_enum(func):
  726. """
  727. Take minimum of sieving or enumeration for lattice-based attacks.
  728. :param func: a lattice-reduction based estimator
  729. """
  730. def wrapper(*args, **kwds):
  731. from copy import copy
  732. kwds = copy(kwds)
  733. if "optimisation_target" in kwds:
  734. del kwds["optimisation_target"]
  735. a = func(*args, optimisation_target="bkz2", **kwds)
  736. b = func(*args, optimisation_target="sieve", **kwds)
  737. if a["bkz2"] <= b["sieve"]:
  738. return a
  739. else:
  740. return b
  741. return wrapper
  742. def mitm(n, alpha, q, success_probability=0.99, secret_bounds=None, h=None, samples=None):
  743. """
  744. Return meet-in-the-middle estimates.
  745. :param n: dimension
  746. :param alpha: noise parameter
  747. :param q: modulus
  748. :param success_probability: desired success probability
  749. :param secret_bounds: tuple with lower and upper bound on the secret
  750. :param samples: the number of available samples
  751. :returns: a cost estimate
  752. :rtype: OrderedDict
  753. """
  754. n, alpha, q, success_probability = preprocess_params(n, alpha, q, success_probability)
  755. ret = OrderedDict()
  756. RR = alpha.parent()
  757. m = None
  758. if samples is not None:
  759. if not samples > n:
  760. raise InsufficientSamplesError("Number of samples: %d" % samples)
  761. m = samples - n
  762. t = ceil(2*sqrt(log(n)))
  763. if secret_bounds is None:
  764. # assert((2*t*alpha)**m * (alpha*q)**(n/2) <= 2*n)
  765. m_required = ceil((log(2*n) - log(alpha*q)*(n/2))/log(2*t*alpha))
  766. if m is not None and m < m_required:
  767. raise InsufficientSamplesError("Requirement not fulfilled. Number of samples: %d - %d < %d" % (
  768. samples, n, m_required))
  769. m = m_required
  770. if m*(2*alpha) > 1- 1/(2*n):
  771. raise ValueError("Cannot find m to satisfy constraints (noise too big).")
  772. ret["rop"] = RR((2*alpha*q+1)**(n/2) * 2*n * m)
  773. ret["mem"] = RR((2*alpha*q+1)**(n/2) * m)
  774. else:
  775. a, b = secret_bounds
  776. # assert((2*t*alpha)**m * (b-a+1)**(n/2) <= 2*n)
  777. m_required = ceil(log(2*n/((b-a+1)**(n/2)))/log(2*t*alpha))
  778. if m is not None and m < m_required:
  779. raise InsufficientSamplesError("Requirement not fulfilled. Number of samples: %d - %d < %d" % (
  780. samples, n, m_required))
  781. m = m_required
  782. if (m*(2*alpha) > 1- 1/(2*n)):
  783. raise ValueError("Cannot find m to satisfy constraints (noise too big).")
  784. ret["rop"] = RR((b-a+1)**(n/2) * 2*n * m)
  785. ret["mem"] = RR((b-a+1)**(n/2) * m)
  786. ret["oracle"] = n + m
  787. ret["bop"] = RR(log(q, 2) * ret["rop"])
  788. return cost_reorder(ret, ["bop", "oracle", "mem"])
  789. # BKW
  790. def bkw(n, alpha, q, success_probability=0.99, optimisation_target="bop", prec=None, search=False, samples=None):
  791. """
  792. Estimate the cost of running BKW to solve LWE
  793. :param n: dimension > 0
  794. :param alpha: fraction of the noise α < 1.0
  795. :param q: modulus > 0
  796. :param success_probability: probability of success < 1.0
  797. :param optimisation_target: field to use to decide if parameters are better
  798. :param prec: precision used for floating point computations
  799. :param search: if `True` solve Search-LWE, otherwise solve Decision-LWE
  800. :param samples: the number of available samples
  801. :returns: a cost estimate
  802. :rtype: OrderedDict
  803. """
  804. if search:
  805. return bkw_search(n, alpha, q, success_probability, optimisation_target, prec, samples)
  806. else:
  807. return bkw_decision(n, alpha, q, success_probability, optimisation_target, prec, samples)
  808. def bkw_decision(n, alpha, q, success_probability=0.99, optimisation_target="bop", prec=None, samples=None):
  809. """
  810. Estimate the cost of running BKW to solve Decision-LWE following [DCC:ACFFP15]_.
  811. :param n: dimension > 0
  812. :param alpha: fraction of the noise α < 1.0
  813. :param q: modulus > 0
  814. :param success_probability: probability of success < 1.0
  815. :param optimisation_target: field to use to decide if parameters are better
  816. :param prec: precision used for floating point computations
  817. :param samples: the number of available samples
  818. :returns: a cost estimate
  819. :rtype: OrderedDict
  820. .. [DCC:ACFFP15] Albrecht, M. R., Cid, C., Jean-Charles Faugère, Fitzpatrick, R., &
  821. Perret, L. (2015). On the complexity of the BKW algorithm on LWE.
  822. Designs, Codes & Cryptography, Volume 74, Issue 2, pp 325-354
  823. """
  824. n, alpha, q, success_probability = preprocess_params(n, alpha, q, success_probability)
  825. sigma = alpha*q
  826. has_samples = samples is not None
  827. has_enough_samples = True
  828. RR = alpha.parent()
  829. def _run(t):
  830. a = RR(t*log(n, 2)) # target number of adds: a = t*log_2(n)
  831. b = RR(n/a) # window width
  832. sigma_final = RR(n**t).sqrt() * sigma # after n^t adds we get this σ
  833. m = distinguish_required_m(sigma_final, q, success_probability)
  834. tmp = a*(a-1)/2 * (n+1) - b*a*(a-1)/4 - b/6 * RR((a-1)**3 + 3/2*(a-1)**2 + (a-1)/2)
  835. stage1a = RR(q**b-1)/2 * tmp
  836. stage1b = m * (a/2 * (n + 2))
  837. stage1 = stage1a + stage1b
  838. nrops = RR(stage1)
  839. nbops = RR(log(q, 2) * nrops)
  840. ncalls = RR(a * ceil(RR(q**b)/RR(2)) + m)
  841. nmem = ceil(RR(q**b)/2) * a * (n + 1 - b * (a-1)/2)
  842. current = OrderedDict([(u"t", t),
  843. (u"bop", nbops),
  844. (u"oracle", ncalls),
  845. (u"m", m),
  846. (u"mem", nmem),
  847. (u"rop", nrops),
  848. (u"a", a),
  849. (u"b", b),
  850. ])
  851. if optimisation_target != u"oracle":
  852. current = cost_reorder(current, (optimisation_target, u"oracle", u"t"))
  853. else:
  854. current = cost_reorder(current, (optimisation_target, u"t"))
  855. return current
  856. best_runtime = None
  857. best_samples = None
  858. best = None
  859. may_terminate = False
  860. t = RR(2*(log(q, 2) - log(sigma, 2))/log(n, 2))
  861. while True:
  862. current = _run(t)
  863. if get_verbose() >= 2:
  864. print cost_str(current)
  865. # Usually, both the fewest samples required and the best runtime are
  866. # provided when choosing 't' such that the two steps are balanced. But,
  867. # better safe than sorry, both cases are searched for independently.
  868. # So, 'best_samples' and 'best_runtime' are only used to ensure termination,
  869. # such that 't' is increased until both the best runtime and the fewest
  870. # samples were seen. The result to be returned is hold in 'best'.
  871. if has_samples:
  872. has_enough_samples = current["oracle"] < samples
  873. if not best_samples:
  874. best_samples = current
  875. else:
  876. if best_samples["oracle"] > current["oracle"]:
  877. best_samples = current
  878. if has_enough_samples and (
  879. best is None or best[optimisation_target] > current[optimisation_target]):
  880. best = current
  881. else:
  882. if may_terminate:
  883. break
  884. may_terminate = True
  885. if not best_runtime:
  886. best_runtime = current
  887. else:
  888. if best_runtime[optimisation_target] > current[optimisation_target]:
  889. best_runtime = current
  890. if has_enough_samples and (best is None or best[optimisation_target] > current[optimisation_target]):
  891. best = current
  892. else:
  893. if not has_samples or may_terminate:
  894. break
  895. may_terminate = True
  896. t += 0.05
  897. if best is None:
  898. raise InsufficientSamplesError("Too few samples (%d given) to achieve a success probability of %f." % (
  899. samples, success_probability))
  900. return best
  901. def bkw_search(n, alpha, q, success_probability=0.99, optimisation_target="bop", prec=None, samples=None):
  902. """
  903. Estimate the cost of running BKW to solve Search-LWE following [C:DucTraVau15]_.
  904. :param n: dimension > 0
  905. :param alpha: fraction of the noise α < 1.0
  906. :param q: modulus > 0
  907. :param success_probability: probability of success < 1.0
  908. :param optimisation_target: field to use to decide if parameters are better
  909. :param prec: precision used for floating point computations
  910. :param samples: the number of available samples
  911. :returns: a cost estimate
  912. :rtype: OrderedDict
  913. .. [EC:DucTraVau15] Duc, A., Florian Tramèr, & Vaudenay, S. (2015). Better algorithms for
  914. LWE and LWR.
  915. """
  916. n, alpha, q, success_probability = preprocess_params(n, alpha, q, success_probability)
  917. sigma = stddevf(alpha*q)
  918. eps = success_probability
  919. has_samples = samples is not None
  920. has_enough_samples = True
  921. RR = alpha.parent()
  922. # "To simplify our result, we considered operations over C to have the same
  923. # complexity as operations over Z_q . We also took C_FFT = 1 which is the
  924. # best one can hope to obtain for a FFT."
  925. c_cost = 1
  926. c_mem = 1
  927. def _run(t):
  928. a = RR(t*log(n, 2)) # target number of adds: a = t*log_2(n)
  929. b = RR(n/a) # window width
  930. epp = (1- eps)/a
  931. m = lambda j, eps: 8 * b * log(q/eps) * (1 - (2 * pi**2 * sigma**2)/(q**2))**(-2**(a-j)) # noqa
  932. c1 = (q**b-1)/2 * ((a-1)*(a-2)/2 * (n+1) - b/6 * (a*(a-1) * (a-2)))
  933. c2 = sum([m(j, epp) * (a-1-j)/2 * (n+2) for j in range(a)])
  934. c3 = (2*sum([m(j, epp) for j in range(a)]) + cfft * n * q**b * log(q, 2)) * c_cost
  935. c4 = (a-1)*(a-2) * b * (q**b - 1)/2
  936. nrops = RR(c1 + c2 + c3 + c4)
  937. nbops = RR(log(q, 2) * nrops)
  938. ncalls = (a-1) * (q**b - 1)/2 + m(0, eps)
  939. nmem = ((q**b - 1)/2 * (a-1) * (n + 1 - b*(a-2)/2)) + m(0, eps) + c_mem * q**b
  940. current = OrderedDict([(u"t", t),
  941. (u"bop", nbops),
  942. (u"oracle", ncalls),
  943. (u"m", m(0, eps)),
  944. (u"mem", nmem),
  945. (u"rop", nrops),
  946. (u"a", a),
  947. (u"b", b),
  948. ])
  949. if optimisation_target != u"oracle":
  950. current = cost_reorder(current, (optimisation_target, u"oracle", u"t"))
  951. else:
  952. current = cost_reorder(current, (optimisation_target, u"t"))
  953. return current
  954. best_runtime = None
  955. best_samples = None
  956. best = None
  957. may_terminate = False
  958. t = RR(2*(log(q, 2) - log(sigma, 2))/log(n, 2))
  959. while True:
  960. current = _run(t)
  961. if get_verbose() >= 2:
  962. print cost_str(current)
  963. # Similar to BKW-Decision, both the fewest samples required and the
  964. # best runtime are provided when choosing 't' such that the two steps
  965. # are balanced. To be safe, every 't' is tested until the fewest number
  966. # of samples required is found.
  967. if has_samples:
  968. has_enough_samples = current["oracle"] < samples
  969. if not best_samples:
  970. best_samples = current
  971. else:
  972. if best_samples["oracle"] > current["oracle"]:
  973. best_samples = current
  974. if has_enough_samples and (
  975. best is None or best[optimisation_target] > current[optimisation_target]):
  976. best = current
  977. else:
  978. if may_terminate:
  979. break
  980. may_terminate = True
  981. if not best_runtime:
  982. best_runtime = current
  983. else:
  984. if best_runtime[optimisation_target] > current[optimisation_target]:
  985. best_runtime = current
  986. if has_enough_samples and (best is None or best[optimisation_target] > current[optimisation_target]):
  987. best = current
  988. else:
  989. if not has_samples or may_terminate:
  990. break
  991. may_terminate = True
  992. t += 0.05
  993. if best is None:
  994. raise InsufficientSamplesError("Too few samples (%d given) to achieve a success probability of %f." % (
  995. samples, success_probability))
  996. return best
  997. def _bkw_coded(n, alpha, q, t2, b, success_probability=0.99, ntest=None, secret_bounds=None, h=None):
  998. """
  999. Estimate complexity of Coded-BKW as described in [C:GuoJohSta15]
  1000. :param n: dimension > 0
  1001. :param alpha: fraction of the noise α < 1.0
  1002. :param q: modulus > 0
  1003. :param t2: number of coded BKW steps (≥ 0)
  1004. :param b: table size (≥ 1)
  1005. :param success_probability: probability of success < 1.0, IGNORED
  1006. :param ntest: optional parameter ntest
  1007. :returns: a cost estimate
  1008. :rtype: OrderedDict
  1009. .. note::
  1010. You probably want to call bkw_coded instead.
  1011. """
  1012. n, alpha, q, success_probability = preprocess_params(n, alpha, q, success_probability)
  1013. sigma = stddevf(alpha*q) # [C:GuoJohSta15] use σ = standard deviation
  1014. RR = alpha.parent()
  1015. cost = OrderedDict()
  1016. # Our cost is mainly determined by q**b, on the other hand there are
  1017. # expressions in q**(l+1) below, hence, we set l = b - 1. This allows to
  1018. # achieve the performance reported in [C:GuoJohSta15].
  1019. b = ZZ(b)
  1020. cost["b"] = b
  1021. l = b - 1
  1022. cost["l"] = l
  1023. gamma = RR(1.2) # TODO make this dependent on success_probability
  1024. if secret_bounds:
  1025. d = secret_bounds[1] - secret_bounds[0] + 1
  1026. else:
  1027. d = 3*sigma # TODO make this dependent on success_probability
  1028. cost["d"] = d
  1029. cost[u"γ"] = gamma
  1030. def N(i, sigma_set):
  1031. """
  1032. Return $N_i$ for the $i$-th $[N_i, b]$ linear code.
  1033. :param i: index
  1034. :param sigma_set: target noise level
  1035. """
  1036. return floor(b/(1-log(12*sigma_set**2/ZZ(2)**i, q)/2))
  1037. def find_ntest(n, l, t1, t2, b):
  1038. """
  1039. If the parameter `ntest` is not provided, we use this function to estimate it.
  1040. :param n: dimension > 0
  1041. :param l: table size for hypothesis testing
  1042. :param t1: number of normal BKW steps
  1043. :param t2: number of coded BKW steps
  1044. :param b: table size for BKW steps
  1045. """
  1046. # there is no hypothesis testing because we have enough normal BKW
  1047. # tables to cover all of of n
  1048. if t1*b >= n:
  1049. return 0
  1050. # solve for nest by aiming for ntop == 0
  1051. ntest = var("nest")
  1052. sigma_set = sqrt(q**(2*(1-l/ntest))/12)
  1053. ncod = sum([N(i, sigma_set) for i in range(1, t2+1)])
  1054. ntop = n - ncod - ntest - t1*b
  1055. try:
  1056. ntest = round(find_root(0 == ntop, 0, n))
  1057. except RuntimeError:
  1058. # annoyingly we get a RuntimeError when find_root can't find a
  1059. # solution, we translate to something more meaningful
  1060. raise ValueError("Cannot find parameters for n=%d, l=%d, t1=%d, t2=%d, b=%d"%(n, l, t1, t2, b))
  1061. return ZZ(ntest)
  1062. # we compute t1 from N_i by observing that any N_i ≤ b gives no advantage
  1063. # over vanilla BKW, but the estimates for coded BKW always assume
  1064. # quantisation noise, which is too pessimistic for N_i ≤ b.
  1065. t1 = 0
  1066. if ntest is None:
  1067. ntest_ = find_ntest(n, l, t1, t2, b)
  1068. else:
  1069. ntest_ = ntest
  1070. sigma_set = sqrt(q**(2*(1-l/ntest_))/12)
  1071. Ni = [N(i, sigma_set) for i in range(1, t2+1)]
  1072. t1 = len([e for e in Ni if e <= b])
  1073. # there is no point in having more tables than needed to cover n
  1074. if b*t1 > n:
  1075. t1 = n//b
  1076. t2 -= t1
  1077. cost["t1"] = t1
  1078. cost["t2"] = t2
  1079. # compute ntest with the t1 just computed
  1080. if ntest is None:
  1081. ntest = find_ntest(n, l, t1, t2, b)
  1082. # if there's no ntest then there's no `σ_{set}` and hence no ncod
  1083. if ntest:
  1084. sigma_set = sqrt(q**(2*(1-l/ntest))/12)
  1085. cost[u"σ_set"] = RR(sigma_set)
  1086. ncod = sum([N(i, sigma_set) for i in range(1, t2+1)])
  1087. else:
  1088. ncod = 0
  1089. ntot = ncod + ntest
  1090. ntop = max(n - ncod - ntest - t1*b, 0)
  1091. cost["ncod"] = ncod # coding step
  1092. cost["ntop"] = ntop # guessing step, typically zero
  1093. cost["ntest"] = ntest # hypothesis testing
  1094. # Theorem 1: quantization noise + addition noise
  1095. if secret_bounds:
  1096. s_var = uniform_variance_from_bounds(*secret_bounds, h=h)
  1097. coding_variance = s_var * sigma_set**2 * ntot
  1098. else:
  1099. coding_variance = gamma**2 * sigma**2 * sigma_set**2 * ntot
  1100. sigma_final = RR(sqrt(2**(t1+t2) * sigma**2 + coding_variance))
  1101. cost[u"σ_final"] = RR(sigma_final)
  1102. # we re-use our own estimator
  1103. M = distinguish_required_m(sigmaf(sigma_final), q, success_probability)
  1104. cost["m"] = M
  1105. m = (t1+t2)*(q**b-1)/2 + M
  1106. cost["oracle"] = RR(m)
  1107. # Equation (7)
  1108. n_ = n - t1*b
  1109. C0 = (m-n_) * (n+1) * ceil(n_/(b-1))
  1110. assert(C0 >= 0)
  1111. cost["C0(gauss)"] = RR(C0)
  1112. # Equation (8)
  1113. C1 = sum([(n+1-i*b)*(m - i*(q**b - 1)/2) for i in range(1, t1+1)])
  1114. assert(C1 >= 0)
  1115. cost["C1(bkw)"] = RR(C1)
  1116. # Equation (9)
  1117. C2_ = sum([4*(M + i*(q**b - 1)/2)*N(i, sigma_set) for i in range(i, t2+1)])
  1118. C2 = RR(C2_)
  1119. for i in range(i, t2+1):
  1120. C2 += RR(ntop + ntest + sum([N(j, sigma_set) for j in range(1, i+1)]))*(M + (i-1)*(q**b - 1)/2)
  1121. assert(C2 >= 0)
  1122. cost["C2(coded)"] = RR(C2)
  1123. # Equation (10)
  1124. C3 = M*ntop*(2*d + 1)**ntop
  1125. assert(C3 >= 0)
  1126. cost["C3(guess)"] = RR(C3)
  1127. # Equation (11)
  1128. C4_ = 4*M*ntest
  1129. C4 = C4_ + (2*d+1)**ntop * (cfft * q**(l+1) * (l+1) * log(q, 2) + q**(l+1))
  1130. assert(C4 >= 0)
  1131. cost["C4(test)"] = RR(C4)
  1132. C = (C0 + C1 + C2 + C3+ C4)/(erf(d/sqrt(2*sigma))**ntop) # TODO don't ignore success probability
  1133. cost["rop"] = RR(C)
  1134. cost["bop"] = RR(C)*log(RR(q), RR(2))
  1135. cost["mem"] = (t1+t2)*q**b
  1136. cost = cost_reorder(cost, ["bop", "oracle", "m", "mem", "rop", "b", "t1", "t2"])
  1137. return cost
  1138. def bkw_coded(n, alpha, q, success_probability=0.99, secret_bounds=None, h=None,
  1139. cost_include=("bop", "oracle", "m", "mem", "rop", "b", "t1", "t2"), samples=None):
  1140. """
  1141. Estimate complexity of Coded-BKW as described in [C:GuoJohSta15]
  1142. by optimising parameters.
  1143. :param n: dimension > 0
  1144. :param alpha: fraction of the noise α < 1.0
  1145. :param q: modulus > 0
  1146. :param success_probability: probability of success < 1.0, IGNORED
  1147. :param samples: the number of available samples
  1148. :returns: a cost estimate
  1149. :rtype: OrderedDict
  1150. EXAMPLE::
  1151. sage: n, alpha, q = unpack_lwe(Regev(64))
  1152. sage: print cost_str(bkw_coded(n, alpha, q))
  1153. bop: ≈2^53.1, oracle: ≈2^39.2, m: ≈2^30.2, mem: ≈2^40.2, rop: ≈2^49.5, ...
  1154. """
  1155. bstart = ceil(log(q, 2))
  1156. def _run(b=2):
  1157. # the noise is 2**(t1+t2) * something so there is no need to go beyond, say, q**2
  1158. return binary_search(_bkw_coded, 2, min(n//b, ceil(2*log(q, 2))), "t2",
  1159. lambda x, best: x["rop"]<=best["rop"] and (
  1160. samples is None or best["oracle"]>samples or x["oracle"]<=samples),
  1161. n, alpha, q, b=b, t2=0,
  1162. secret_bounds=secret_bounds, h=h,
  1163. success_probability=success_probability)
  1164. best = binary_search(_run, 2, 3*bstart, "b", lambda x, best: x["rop"]<=best["rop"] and (
  1165. samples is None or best["oracle"]>samples or x["oracle"]<=samples), b=2)
  1166. # binary search cannot "fail". It just outputs some X with X["oracle"]>samples.
  1167. if samples is not None and best["oracle"] > samples:
  1168. raise InsufficientSamplesError("Too few samples given (%d)." % samples)
  1169. return best
  1170. # Dual Strategy
  1171. def _sis(n, alpha, q, success_probability=0.99, optimisation_target=u"bkz2", secret_bounds=None, h=None, samples=None):
  1172. """Estimate cost of solving LWE by solving LWE.
  1173. :param n: dimension > 0
  1174. :param alpha: fraction of the noise α < 1.0
  1175. :param q: modulus > 0
  1176. :param success_probability: probability of success < 1.0
  1177. :param secret_bounds: ignored
  1178. :param h: ignored
  1179. :param samples: the number of available samples
  1180. :returns: a cost estimate
  1181. :rtype: OrderedDict
  1182. """
  1183. n, alpha, q, success_probability = preprocess_params(n, alpha, q, success_probability)
  1184. f = lambda eps: RR(sqrt(log(1/eps)/pi)) # noqa
  1185. RR = alpha.parent()
  1186. # we are solving Decision-LWE
  1187. log_delta_0 = log(f(success_probability)/alpha, 2)**2 / (4*n*log(q, 2))
  1188. delta_0 = RR(2**log_delta_0)
  1189. m_optimal = lattice_reduction_opt_m(n, q, delta_0)
  1190. if samples is None or samples > m_optimal:
  1191. m = m_optimal
  1192. else:
  1193. if not samples > 0:
  1194. raise InsufficientSamplesError("Number of samples: %d" % samples)
  1195. m = samples
  1196. log_delta_0 = log(f(success_probability)/alpha, 2)/m - RR(log(q, 2)*n)/(m**2)
  1197. delta_0 = RR(2**log_delta_0)
  1198. # check for valid delta
  1199. if delta_0 < 1:
  1200. raise OutOfBoundsError(u"Detected delta_0 = %f < 1. Too few samples?!" % delta_0)
  1201. ret = bkz_runtime_delta(delta_0, m)
  1202. ret[u"oracle"] = m
  1203. ret[u"|v|"] = RR(delta_0**m * q**(n/m))
  1204. ret[u"dim"] = m
  1205. if optimisation_target != u"oracle":
  1206. ret = cost_reorder(ret, [optimisation_target, u"oracle"])
  1207. else:
  1208. ret = cost_reorder(ret, [optimisation_target])
  1209. return ret
  1210. sis = partial(rinse_and_repeat, _sis)
  1211. # Decoding
  1212. @cached_function
  1213. def gsa_basis(n, q, delta, m):
  1214. """
  1215. Create the basis lengths.
  1216. :param n: determinant is q^n
  1217. :param q: determinant is q^n
  1218. :param delta: root-Hermite factor
  1219. :param m: lattice dimension
  1220. .. note:: based on the GSA in [RSA:LinPei11]_
  1221. .. [RSA:LinPei11] Richard Lindner and Chris Peikert. Better key sizes (and attacks) for LWE-based encryption.
  1222. In Aggelos Kiayias, editor, CT-RSA 2011, volume 6558 of LNCS, pages 319–339. Springer,
  1223. February 2011.
  1224. """
  1225. log_delta = RDF(log(delta))
  1226. log_q = RDF(log(q))
  1227. qnm = log_q*(n/m)
  1228. qnm_p_log_delta_m = qnm + log_delta*m
  1229. tmm1 = RDF(2*m/(m-1))
  1230. b = [(qnm_p_log_delta_m - log_delta*(tmm1 * i)) for i in xrange(m)]
  1231. b = [log_q - b[-1-i] for i in xrange(m)]
  1232. b = map(lambda x: x.exp(), b)
  1233. return b
  1234. def enum_cost(n, alpha, q, eps, delta_0, m=None, B=None, step=1, enums_per_clock=-15.1):
  1235. """
  1236. Estimates the runtime for performing enumeration.
  1237. :param n: dimension > 0
  1238. :param alpha: fraction of the noise α < 1.0
  1239. :param q: modulus > 0
  1240. :param eps:
  1241. :param delta_0:
  1242. :param m:
  1243. :param B:
  1244. :param step: changes the increments for the values of d[i]
  1245. :param enums_per_clock: the log of the number of enumerations computed per clock cycle
  1246. :returns: a cost estimate
  1247. :rtype: OrderedDict
  1248. """
  1249. RR = alpha.parent()
  1250. step = RDF(step)
  1251. if B is None:
  1252. if m is None:
  1253. m = lattice_reduction_opt_m(n, q, delta_0)
  1254. B = gsa_basis(n, q, delta_0, m)
  1255. d = [RDF(1)]*m
  1256. bd = [d[i] * B[i] for i in xrange(m)]
  1257. scaling_factor = RDF(sqrt(pi) / (2*alpha*q))
  1258. probs_bd = [RDF((bd[i] * scaling_factor)).erf() for i in xrange(m)]
  1259. success_probability = prod(probs_bd)
  1260. if RR(success_probability).is_NaN():
  1261. # try in higher precision
  1262. step = RR(step)
  1263. d = [RR(1)]*m
  1264. bd = [d[i] * B[i] for i in xrange(m)]
  1265. scaling_factor = RR(sqrt(pi) / (2*alpha*q))
  1266. probs_bd = [RR((bd[i] * scaling_factor)).erf() for i in xrange(m)]
  1267. success_probability = prod(probs_bd)
  1268. if success_probability.is_NaN():
  1269. return OrderedDict([(u"delta_0", delta_0),
  1270. ("enum", oo),
  1271. ("enumop", oo)])
  1272. # if m too small, probs_bd entries are of magnitude 1e-10 or
  1273. # something like that. Therefore, success_probability=prod(probs_bd)
  1274. # results in success_probability==0 and so, the loop never terminates.
  1275. # To prevent this, success_probability should be calculated when needed,
  1276. # i.e. at the end of each iteration and not be used to calculate things.
  1277. # Then, a new problem arises: for step=1 this can take very long time,
  1278. # starting at, for example, success_probability==1e-300.
  1279. # Since this is a (rare) special case, an error is thrown.
  1280. if not success_probability > 0:
  1281. raise InsufficientSamplesError("success_probability == 0! Too few samples?!")
  1282. bd = map(list, zip(bd, range(len(bd))))
  1283. bd = sorted(bd)
  1284. import bisect
  1285. last_success_probability = success_probability
  1286. while success_probability < RDF(eps):
  1287. v, i = bd.pop(0)
  1288. d[i] += step
  1289. v += B[i]*step
  1290. last_success_probability = success_probability
  1291. success_probability /= probs_bd[i]
  1292. probs_bd[i] = (v * scaling_factor).erf()
  1293. success_probability *= probs_bd[i]
  1294. bisect.insort_left(bd, [v, i])
  1295. if success_probability == 0 or last_success_probability >= success_probability:
  1296. return OrderedDict([(u"delta_0", delta_0),
  1297. ("enum", oo),
  1298. ("enumop", oo)])
  1299. r = OrderedDict([(u"delta_0", delta_0),
  1300. ("enum", RR(prod(d))),
  1301. ("enumop", RR(prod(d)) / RR(2)**enums_per_clock)])
  1302. return r
  1303. def _decode(n, alpha, q, success_probability=0.99,
  1304. enums_per_clock=-15.1, optimisation_target="bkz2",
  1305. secret_bounds=None, h=None, samples=None):
  1306. """
  1307. Estimates the optimal parameters for decoding attack
  1308. :param n: dimension > 0
  1309. :param alpha: fraction of the noise α < 1.0
  1310. :param q: modulus > 0
  1311. :param success_probability: probability of success < 1.0
  1312. :param enums_per_clock: the log of the number of enumerations computed per clock cycle
  1313. :param optimisation_target: lattice reduction estimate to use
  1314. :param secret_bounds: ignored
  1315. :param h: ignored
  1316. :param samples: the number of available samples
  1317. :returns: a cost estimate
  1318. :rtype: OrderedDict
  1319. """
  1320. n, alpha, q, success_probability = preprocess_params(n, alpha, q, success_probability)
  1321. if samples is not None and not samples > 1:
  1322. raise InsufficientSamplesError("Number of samples: %d" % samples)
  1323. RR = alpha.parent()
  1324. # Number of samples are'nt considered here, because only a "good" starting delta_0
  1325. # is needed and there is no "good" value for number of samples known,
  1326. # i.e. the given number of samples may not produce "good" results
  1327. delta_0m1 = _sis(n, alpha, q, success_probability=success_probability)[u"delta_0"] - 1
  1328. step = RR(1.05)
  1329. direction = -1
  1330. def combine(enum, bkz):
  1331. current = OrderedDict()
  1332. current["rop"] = enum["enumop"] + bkz[optimisation_target]
  1333. for key in bkz:
  1334. current[key] = bkz[key]
  1335. for key in enum:
  1336. current[key] = enum[key]
  1337. current[u"oracle"] = m
  1338. current = cost_reorder(current, ["rop", "oracle", optimisation_target])
  1339. return current
  1340. depth = 6
  1341. current = None
  1342. while True:
  1343. delta_0 = 1 + delta_0m1
  1344. if delta_0 >= 1.0219 and current is not None: # LLL is enough
  1345. break
  1346. m_optimal = lattice_reduction_opt_m(n, q, delta_0)
  1347. if samples is None or samples > m_optimal:
  1348. m = m_optimal
  1349. else:
  1350. m = samples
  1351. bkz = bkz_runtime_delta(delta_0, m)
  1352. bkz["dim"] = m
  1353. enum = enum_cost(n, alpha, q, success_probability, delta_0, m, enums_per_clock=enums_per_clock)
  1354. current = combine(enum, bkz)
  1355. # if lattice reduction is cheaper than enumration, make it more expensive
  1356. if current[optimisation_target] < current["enumop"]:
  1357. prev_direction = direction
  1358. direction = -1
  1359. if direction != prev_direction:
  1360. step = 1 + RR(step-1)/2
  1361. delta_0m1 /= step
  1362. elif current[optimisation_target] > current["enumop"]:
  1363. prev_direction = direction
  1364. direction = 1
  1365. delta_0m1 *= step
  1366. else:
  1367. break
  1368. if direction != prev_direction:
  1369. step = 1 + RR(step-1)/2
  1370. depth -= 1
  1371. if depth == 0:
  1372. break
  1373. return current
  1374. decode = partial(rinse_and_repeat, _decode, decision=False)
  1375. # uSVP
  1376. def kannan(n, alpha, q, tau=tau_default, tau_prob=tau_prob_default, success_probability=0.99,
  1377. optimisation_target="bkz2", samples=None):
  1378. """
  1379. Estimate optimal parameters for using Kannan-embedding to solve CVP.
  1380. :param n: dimension > 0
  1381. :param alpha: fraction of the noise α < 1.0
  1382. :param q: modulus > 0
  1383. :param success_probability: probability of success < 1.0
  1384. :param samples: the number of available samples
  1385. :returns: a cost estimate
  1386. :rtype: OrderedDict
  1387. """
  1388. # TODO: Include the attack described in
  1389. # [RSA:BaiGal14] Shi Bai and Steven D. Galbraith. An improved compression technique
  1390. # for signatures based on learning with errors. In Josh Benaloh,
  1391. # editor, Topics in Cryptology – CT-RSA 2014, volume 8366 of Lecture
  1392. # Notes in Computer Science, pages 28–47, San Francisco, CA, USA,
  1393. # February 25–28, 2014. Springer, Heidelberg, Germany.
  1394. # The estimation of computational cost is the same as kannan with dimension samples=n+m.
  1395. n, alpha, q, success_probability = preprocess_params(n, alpha, q, success_probability)
  1396. RR = alpha.parent()
  1397. log_delta_0 = log(tau*alpha*sqrt(e), 2)**2/(4*n*log(q, 2))
  1398. delta_0 = RR(2**log_delta_0)
  1399. m_optimal = lattice_reduction_opt_m(n, q, delta_0)
  1400. if samples is None or samples > m_optimal:
  1401. m = m_optimal
  1402. else:
  1403. if not samples > 0:
  1404. raise InsufficientSamplesError("Number of samples: %d" % samples)
  1405. m = samples
  1406. delta_0 = RR((q**(1-n/m)*sqrt(1/(e)) / (tau*alpha*q))**(1.0/m))
  1407. # check for valid delta
  1408. if delta_0 < 1:
  1409. raise OutOfBoundsError(u"Detected delta_0 = %f < 1. Too few samples?!" % delta_0)
  1410. l2 = q**(1-n/m) * sqrt(m/(2*pi*e))
  1411. if l2 > q:
  1412. raise NotImplementedError(u"Case where λ_2 = q not implemented.")
  1413. repeat = amplify(success_probability, tau_prob)
  1414. r = bkz_runtime_delta(delta_0, m, log(repeat, 2.0))
  1415. r[u"oracle"] = repeat*m if samples is None else m
  1416. r[u"m"] = m
  1417. r = cost_reorder(r, [optimisation_target, "oracle"])
  1418. if get_verbose() >= 2:
  1419. print cost_str(r)
  1420. return r
  1421. # Gröbner bases
  1422. def gb_complexity(m, n, d, omega=2, call_magma=True, d2=None):
  1423. """Estimate the complexity of computing a Gröbner basis.
  1424. Estimation is done for `m` polynomials of degree `d` in `n`
  1425. variables under the assumption that the system is semi-regular.
  1426. If `d2` is not ``None`` then `n` polynomials of degree are added to
  1427. the system. This is to encode restrictions on the solution. For
  1428. example, if the solution is either `0` or `1`, then `(x_i)⋅(x_i+1)`
  1429. would evaluate to zero on it for any `x_i`.
  1430. :param m: number of polynomials (integer > 0)
  1431. :param n: number of variables (integer > 0)
  1432. :param d: degree of all input polynomials
  1433. :param omega: linear algebra exponent, i.e. matrix-multiplication costs `O(n^ω)` operations.
  1434. :param call_magma: use Magma to perform computation (highly recommended)
  1435. :param d2: secondary degree (integer > 0 or ``None``)
  1436. :returns: A dictionary containing the expected degree of regularity "Dreg"
  1437. (assuming the system is semi-regular), the number of base ring operations "rop",
  1438. and the memory requirements in base ring elements "mem".
  1439. :rtype: OrderedDict
  1440. """
  1441. if m > n**d:
  1442. m = n**d
  1443. if call_magma:
  1444. R = magma.PowerSeriesRing(QQ, 2*n)
  1445. z = R.gen(1)
  1446. coeff = lambda f, d: f.Coefficient(d) # noqa
  1447. else:
  1448. R = PowerSeriesRing(QQ, "z", 2*n)
  1449. z = R.gen()
  1450. coeff = lambda f, d: f[d] # noqa
  1451. if d2 is None:
  1452. s = (1-z**d)**m / (1-z)**n
  1453. else:
  1454. s = (1-z**d)**m * (1-z**d2)**n / (1-z)**n
  1455. retval = OrderedDict([("Dreg", None), ("rop", None)])
  1456. for dreg in xrange(2*n):
  1457. if coeff(s, dreg) < 0:
  1458. break
  1459. else:
  1460. return retval
  1461. retval["Dreg"] = dreg
  1462. retval["rop"] = RR(binomial(n + dreg, dreg)**omega)
  1463. retval["mem"] = RR(binomial(n + dreg, dreg)**2)
  1464. return retval
  1465. def arora_gb(n, alpha, q, success_probability=0.99, omega=2, call_magma=True, guess=0, d2=None, samples=None):
  1466. if samples is not None:
  1467. from warnings import warn
  1468. warn("Given number of samples is ignored for arora_gb()!")
  1469. n, alpha, q, success_probability = preprocess_params(n, alpha, q, success_probability,
  1470. prec=2*log(n, 2)*n)
  1471. RR = alpha.parent()
  1472. stddev = RR(stddevf(RR(alpha)*q))
  1473. if stddev >= 1.1*sqrt(n):
  1474. return None
  1475. if d2 is True:
  1476. d2 = 2*ceil(3*stddev)+1
  1477. ps_single = lambda C: RR(1 - (2/(C*RR(sqrt(2*pi))) * exp(-C**2/2))) # noqa
  1478. m = floor(exp(RR(0.75)*n))
  1479. d = n
  1480. t = ZZ(floor((d-1)/2))
  1481. C = t/stddev
  1482. pred = gb_complexity(m, n-guess, d, omega, call_magma, d2=d2)
  1483. pred["t"] = t
  1484. pred["oracle"] = m
  1485. pred[u"Pr[⊥]"] = RR(m*(1-ps_single(C)))
  1486. pred["bop"] = log(q, 2) * pred["rop"]
  1487. pred = cost_reorder(pred, ["t", "bop", "oracle", "Dreg"])
  1488. if get_verbose() >= 2:
  1489. print "PREDICTION:"
  1490. print cost_str(pred)
  1491. print
  1492. print "ESTIMATION:"
  1493. t = ceil(t/3)
  1494. best = None
  1495. stuck = 0
  1496. for t in srange(t, n):
  1497. d = 2*t + 1
  1498. C = RR(t/stddev)
  1499. if C < 1: # if C is too small, we ignore it
  1500. continue
  1501. # Pr[success]^m = Pr[overall success]
  1502. m = log(success_probability, 2) / log(ps_single(C), 2)
  1503. if m < n:
  1504. continue
  1505. m = floor(m)
  1506. current = gb_complexity(m, n-guess, d, omega, call_magma, d2=d2)
  1507. if current["Dreg"] is None:
  1508. continue
  1509. current["t"] = t
  1510. current[u"Pr[⊥]"] = RR(1-success_probability)
  1511. current["rop"] *= RR((3*stddev)**guess)
  1512. current["bop"] = log(q, 2) * current["rop"]
  1513. current["oracle"] = m
  1514. current = cost_reorder(current, ["bop", "oracle", "t", "Dreg"])
  1515. if get_verbose() >= 2:
  1516. print cost_str(current)
  1517. if best is None:
  1518. best = current
  1519. else:
  1520. if best["rop"] > current["rop"]:
  1521. best = current
  1522. stuck = 0
  1523. else:
  1524. stuck += 1
  1525. if stuck >= 5:
  1526. break
  1527. return best
  1528. # Exhaustive Search for Small Secrets
  1529. def small_secret_guess(f, n, alpha, q, secret_bounds, h=None, samples=None, **kwds):
  1530. size = secret_bounds[1]-secret_bounds[0] + 1
  1531. best = None
  1532. step_size = 16
  1533. fail_attempts, max_fail_attempts = 0, 5
  1534. while step_size >= n:
  1535. step_size /= 2
  1536. i = 0
  1537. while True:
  1538. if i<0:
  1539. break
  1540. try:
  1541. try:
  1542. # some implementations make use of the secret_bounds parameter
  1543. current = f(n-i, alpha, q, secret_bounds=secret_bounds, samples=samples, **kwds)
  1544. except TypeError:
  1545. current = f(n-i, alpha, q, samples=samples, **kwds)
  1546. except (OutOfBoundsError, RuntimeError, InsufficientSamplesError) as err:
  1547. if get_verbose() >= 2:
  1548. print type(err).__name__, ":", err
  1549. i += abs(step_size)
  1550. fail_attempts += 1
  1551. if fail_attempts > max_fail_attempts:
  1552. break
  1553. continue
  1554. if h is None or i<h:
  1555. repeat = size**i
  1556. else:
  1557. # TODO: this is too pessimistic
  1558. repeat = (size)**h * binomial(i, h)
  1559. do_repeat = None if samples is None else {"oracle": False}
  1560. current = cost_repeat(current, repeat, do_repeat)
  1561. key = list(current)[0]
  1562. if best is None:
  1563. best = current
  1564. i += step_size
  1565. else:
  1566. if best[key] > current[key]:
  1567. best = current
  1568. i += step_size
  1569. else:
  1570. step_size = -1*step_size//2
  1571. i += step_size
  1572. if step_size == 0:
  1573. break
  1574. if best is None:
  1575. raise RuntimeError("No solution could be found.")
  1576. return best
  1577. # Modulus Switching
  1578. def decode_small_secret_mod_switch_and_guess(n, alpha, q, secret_bounds, h=None, samples=None, **kwds):
  1579. """Solve LWE by solving BDD for small secret instances.
  1580. :param n: dimension > 0
  1581. :param alpha: fraction of the noise α < 1.0
  1582. :param q: modulus > 0
  1583. :param secret_bounds:
  1584. :param h: number of non-zero components in the secret
  1585. :param samples: the number of available samples
  1586. """
  1587. s_var = uniform_variance_from_bounds(*secret_bounds, h=h)
  1588. n, alpha, q = switch_modulus(n, alpha, q, s_var, h=h)
  1589. return small_secret_guess(decode, n, alpha, q, secret_bounds, h=h, samples=samples, **kwds)
  1590. def kannan_small_secret_mod_switch_and_guess(n, alpha, q, secret_bounds, h=None, samples=None, **kwds):
  1591. """Solve LWE by Kannan embedding for small secret instances.
  1592. :param n: dimension > 0
  1593. :param alpha: fraction of the noise α < 1.0
  1594. :param q: modulus > 0
  1595. :param secret_bounds:
  1596. :param h: number of non-zero components in the secret.
  1597. :param samples: the number of available samples
  1598. """
  1599. s_var = uniform_variance_from_bounds(*secret_bounds, h=h)
  1600. n, alpha, q = switch_modulus(n, alpha, q, s_var, h=h)
  1601. return small_secret_guess(kannan, n, alpha, q, secret_bounds, h=h, samples=samples, **kwds)
  1602. # Bai's and Galbraith's uSVP Attack
  1603. def _bai_gal_small_secret(n, alpha, q, secret_bounds, tau=tau_default, tau_prob=tau_prob_default,
  1604. success_probability=0.99,
  1605. optimisation_target="bkz2",
  1606. h=None, samples=None):
  1607. """
  1608. :param n: dimension > 0
  1609. :param alpha: fraction of the noise α < 1.0
  1610. :param q: modulus > 0
  1611. :param tau: 0 < τ ≤ 1.0
  1612. :param success_probability: probability of success < 1.0
  1613. :param optimisation_target: field to use to decide if parameters are better
  1614. :param h: number of non-zero components in the secret
  1615. """
  1616. n, alpha, q, success_probability = preprocess_params(n, alpha, q, success_probability)
  1617. RR = alpha.parent()
  1618. stddev = stddevf(alpha*q)
  1619. a, b = secret_bounds
  1620. if h is None:
  1621. xi = RR(2)/(b-a)
  1622. else:
  1623. assert -a == b # TODO: don't be so lazy
  1624. # we compute the stddev of |s| and xi to scale each component to σ on average
  1625. variance = sum([ZZ(h)/n * i**2 for i in range(a, b+1) if i])
  1626. s_stddev = variance.sqrt()
  1627. xi = ZZ(1)/s_stddev
  1628. num = (log(q/stddev) - log(tau*sqrt(4*pi*e)))**2 * log(q/stddev)
  1629. den = n*(2*log(q/stddev)-log(xi))**2
  1630. log_delta_0 = RR(num/den)
  1631. delta_0 = RR(e**log_delta_0)
  1632. m_prime_optimal = ceil(sqrt(n*(log(q)-log(stddev))/log_delta_0))
  1633. if samples is None or samples > m_prime_optimal - n:
  1634. m_prime = m_prime_optimal
  1635. else:
  1636. m = samples
  1637. m_prime = m+n
  1638. num = m_prime*(log(q/stddev) - log(2*tau*sqrt(pi*e))) +n*log(xi)-n*log(q/stddev)
  1639. den = m_prime**2
  1640. log_delta_0 = RR(num/den)
  1641. delta_0 = RR(e**log_delta_0)
  1642. m = m_prime - n
  1643. l2 = RR((q**m * (xi*stddev)**n)**(1/m_prime) * sqrt(m_prime/(2*pi*e)))
  1644. if l2 > q:
  1645. raise NotImplementedError("Case λ_2 = q not implemented.")
  1646. repeat = amplify(success_probability, tau_prob)
  1647. r = bkz_runtime_delta(delta_0, m_prime, log(repeat, 2))
  1648. r[u"oracle"] = repeat*m if samples is None else m
  1649. r[u"m"] = m
  1650. if optimisation_target != u"oracle":
  1651. r = cost_reorder(r, [optimisation_target, u"oracle"])
  1652. else:
  1653. r = cost_reorder(r, [optimisation_target])
  1654. if get_verbose() >= 2:
  1655. print cost_str(r)
  1656. return r
  1657. def bai_gal_small_secret(n, alpha, q, secret_bounds, tau=tau_default, tau_prob=tau_prob_default,
  1658. success_probability=0.99,
  1659. optimisation_target="bkz2",
  1660. h=None, samples=None):
  1661. """
  1662. Bai's and Galbraith's uSVP attack + small secret guessing [ACISP:BaiGal14]_
  1663. :param n: dimension > 0
  1664. :param alpha: fraction of the noise α < 1.0
  1665. :param q: modulus > 0
  1666. :param tau: 0 < τ ≤ 1.0
  1667. :param success_probability: probability of success < 1.0
  1668. :param optimisation_target: field to use to decide if parameters are better
  1669. :param h: number of non-zero components in the secret
  1670. :param samples: the number of available samples
  1671. .. [ACISP:BaiGal14] Bai, S., & Galbraith, S. D. (2014). Lattice decoding attacks on binary
  1672. LWE. In W. Susilo, & Y. Mu, ACISP 14 (pp. 322–337).
  1673. """
  1674. return small_secret_guess(_bai_gal_small_secret, n, alpha, q, secret_bounds,
  1675. tau=tau, tau_prob=tau_prob,
  1676. success_probability=0.99,
  1677. optimisation_target=optimisation_target,
  1678. h=h, samples=samples)
  1679. # Small, Sparse Secret SIS
  1680. def success_probability_drop(n, h, k, fail=0):
  1681. """
  1682. Probability ``k`` randomly sampled components have at most
  1683. ``fail`` non-zero components amongst them.
  1684. :param n: dimension of LWE samples
  1685. :param h: number of non-zero components
  1686. :param k: number of components to ignore
  1687. :param fail: we tolerate ``fail`` number of non-zero components
  1688. amongst the ``k`` ignored components
  1689. """
  1690. N = n # population size
  1691. K = n-h # number of success states in the population
  1692. n = k # number of draws
  1693. k = n - fail # number of observed successes
  1694. return (binomial(K, k)*binomial(N-K, n-k)) / binomial(N, n)
  1695. def drop_and_solve(f, n, alpha, q, secret_bounds=None, h=None,
  1696. success_probability=0.99,
  1697. optimisation_target=u"bkz2", postprocess=False, **kwds):
  1698. """
  1699. Solve instances of dimension ``n-k`` with increasing ``k`` using
  1700. ``f`` and pick parameters such that cost is reduced.
  1701. :param n: dimension
  1702. :param alpha: noise parameter
  1703. :param q: modulus q
  1704. :param secret_bounds: lower and upper bound on the secret
  1705. :param h: number of non-zero components of the secret
  1706. :param success_probability: target success probability
  1707. :param optimisation_target: what value out to be minimized
  1708. """
  1709. n, alpha, q, success_probability = preprocess_params(n, alpha, q, success_probability)
  1710. RR = alpha.parent()
  1711. best = None
  1712. # too small a step size leads to an early abort, too large a step
  1713. # size means stepping over target
  1714. step_size = int(n/32)
  1715. k = 0
  1716. while True:
  1717. current = f(n-k, alpha, q,
  1718. success_probability=max(1-1/RR(2)**80, success_probability),
  1719. optimisation_target=optimisation_target,
  1720. h=h, secret_bounds=secret_bounds, **kwds)
  1721. cost_lat = current[optimisation_target]
  1722. cost_post = 0
  1723. probability = success_probability_drop(n, h, k)
  1724. if postprocess:
  1725. repeat = current["repeat"]
  1726. dim = current["dim"]
  1727. for i in range(1, k):
  1728. # compute inner products with rest of A
  1729. cost_post_i = 2 * repeat * dim * k
  1730. # there are (k)(i) positions and max(s_i)-min(s_i) options per position
  1731. # for each position we need to add/subtract the right elements
  1732. cost_post_i += repeat * binomial(k, i) * (secret_bounds[1]-secret_bounds[0])**i * i
  1733. if cost_post + cost_post_i >= cost_lat:
  1734. postprocess = i
  1735. break
  1736. cost_post += cost_post_i
  1737. probability += success_probability_drop(n, h, k, i)
  1738. current["rop"] = cost_lat + cost_post
  1739. current = cost_repeat(current, 1/probability)
  1740. current["k"] = k
  1741. current["postprocess"] = postprocess
  1742. current = cost_reorder(current, ["rop"])
  1743. key = list(current)[0]
  1744. if best is None:
  1745. best = current
  1746. k += step_size
  1747. continue
  1748. if current[key] < best[key]:
  1749. best = current
  1750. k += step_size
  1751. else:
  1752. # we go back
  1753. k = best["k"] - step_size
  1754. k += step_size/2
  1755. if k <= 0:
  1756. k = step_size/2
  1757. # and half the step size
  1758. step_size = step_size/2
  1759. if step_size == 0:
  1760. break
  1761. return best
  1762. sis_drop_and_solve = partial(drop_and_solve, sis)
  1763. decode_drop_and_solve = partial(drop_and_solve, decode)
  1764. bai_gal_drop_and_solve = partial(drop_and_solve, bai_gal_small_secret)
  1765. def sis_small_secret_mod_switch(n, alpha, q, secret_bounds, h=None,
  1766. success_probability=0.99,
  1767. optimisation_target=u"bkz2",
  1768. c=None,
  1769. use_lll=False,
  1770. samples=None):
  1771. """
  1772. Estimate cost of solveing LWE by finding small `(y,x/c)` such that
  1773. `y A = c x`.
  1774. :param n: dimension
  1775. :param alpha: noise parameter
  1776. :param q: modulus q
  1777. :param secret_bounds: lower and upper bound on the secret
  1778. :param h: number of non-zero components of the secret
  1779. :param success_probability: target success probability
  1780. :param optimisation_target: what value out to be minimized
  1781. :param c: explicit constant `c`
  1782. :param use_lll: use LLL calls to produce more small vectors
  1783. """
  1784. if samples is not None:
  1785. from warnings import warn
  1786. warn("Given number of samples is ignored for sis_small_secret_mod_switch()!")
  1787. n, alpha, q, success_probability = preprocess_params(n, alpha, q, success_probability)
  1788. RR = alpha.parent()
  1789. assert(secret_bounds[0] == -1 and secret_bounds[1] == 1)
  1790. if h is None:
  1791. B = ZZ(secret_bounds[1] - secret_bounds[0] + 1)
  1792. h = ceil((B-1)/B * n)
  1793. # stddev of the error
  1794. e = stddevf(alpha*q).n()
  1795. delta_0 = sis(n, alpha, q, optimisation_target=optimisation_target)["delta_0"]
  1796. best = None
  1797. if c is None:
  1798. c = RR(e*sqrt(2*n - n)/sqrt(h))
  1799. if use_lll:
  1800. scale = 2
  1801. else:
  1802. scale = 1
  1803. while True:
  1804. m = lattice_reduction_opt_m(n, c*q, delta_0)
  1805. # the vector found will have norm
  1806. v = scale * delta_0**m * (q/c)**(n/m)
  1807. # each component has stddev v_
  1808. v_ = v/RR(sqrt(m))
  1809. # we split our vector in two parts.
  1810. # 1. v_r is multiplied with the error e (dimension m-n)
  1811. # 2. v_l is the rounding noise (dimension n)
  1812. # scale last q components down again.
  1813. v_r = e*sqrt(m-n)*v_
  1814. v_l = c*sqrt(h)*v_
  1815. repeat = max(distinguish_required_m(v_r, q, success_probability, other_sigma=v_l), RR(1))
  1816. ret = bkz_runtime_delta(delta_0, m, log(repeat, 2))
  1817. if use_lll:
  1818. if "lp" in ret:
  1819. keys = ("bkz2", "sieve", "lp")
  1820. else:
  1821. keys = ("bkz2", "sieve")
  1822. ret_lll = bkz_runtime_delta(delta_0, m)
  1823. for key in keys:
  1824. # CN11: LLL: n^3 log^2 B
  1825. ret_lll[key] += (n**3 * log(q, 2)**2) * repeat
  1826. ret = ret_lll
  1827. ret[u"oracle"] = m * repeat
  1828. ret[u"repeat"] = repeat
  1829. ret[u"dim"] = m
  1830. ret[u"c"] = c
  1831. ret = cost_reorder(ret, [optimisation_target, u"oracle"])
  1832. if get_verbose() >= 2:
  1833. print cost_str(ret)
  1834. if best is None:
  1835. best = ret
  1836. if ret[optimisation_target] > best[optimisation_target]:
  1837. break
  1838. best = ret
  1839. delta_0 = delta_0 + RR(0.00005)
  1840. return best
  1841. def applebaum_transform(n, alpha, q, m, secret_bounds, h=None):
  1842. """
  1843. Swap part of the error vector with the secret as suggested in [EPRINT:GenHalSma12]_
  1844. .. [EPRINT:GenHalSma12] Gentry, C., Halevi, S., & Smart, N. P. (2012). Homomorphic
  1845. evaluation of the AES circuit.
  1846. :param n: dimension
  1847. :param alpha: noise parameter
  1848. :param q: modulus q
  1849. :param m: number of samples in lattice.
  1850. :param secret_bounds: lower and upper bound on the secret
  1851. :param h: number of non-zero components of the secret
  1852. :param success_probability: target success probability
  1853. """
  1854. # we update alpha to reflect that we're replacing part of the error by the secret
  1855. s_var = uniform_variance_from_bounds(*secret_bounds, h=h)
  1856. e_var = stddevf(alpha*q)**2
  1857. stddev_ = ((n*s_var + (m-n)*e_var)/m).sqrt()
  1858. alpha_ = alphaf(stddev_, q, sigma_is_stddev=True)
  1859. alpha = alpha_
  1860. return n, alpha, q
  1861. def applebaum(f, n, alpha, q, m, secret_bounds, h=None,
  1862. success_probability=0.99,
  1863. optimisation_target=u"bkz2", **kwds):
  1864. """Run ``f`` after transforming LWE instance using ``applebaum_transform``.
  1865. :param f: LWE solving cost function
  1866. :param n: dimension
  1867. :param alpha: noise parameter
  1868. :param q: modulus q
  1869. :param m: number of samples in lattice.
  1870. :param secret_bounds: lower and upper bound on the secret
  1871. :param h: number of non-zero components of the secret
  1872. :param success_probability: target success probability
  1873. :param optimisation_target: use this field to optimise
  1874. """
  1875. n, alpha, q = applebaum_transform(n, alpha, q, m, secret_bounds, h)
  1876. return f(n, alpha, q, success_probability=success_probability, optimisation_target=optimisation_target)
  1877. sis_applebaum = partial(applebaum, sis)
  1878. decode_applebaum = partial(applebaum, decode)
  1879. # BKW for Small Secrets
  1880. def bkw_small_secret_variances(q, a, b, kappa, o, RR=None):
  1881. """
  1882. Helper function for small secret BKW variant.
  1883. :param q:
  1884. :param a:
  1885. :param b:
  1886. :param kappa:
  1887. :param o:
  1888. :param RR:
  1889. :returns:
  1890. :rtype:
  1891. """
  1892. if RR is None:
  1893. RR = RealField()
  1894. q = RR(q)
  1895. a = RR(a).round()
  1896. b = RR(b)
  1897. n = a*b
  1898. kappa = RR(kappa)
  1899. T = RR(2)**(b*kappa)
  1900. n = RR(o)/RR(T*(a+1)) + RR(1)
  1901. U_Var = lambda x: (x**2 - 1)/12 # noqa
  1902. red_var = 2*U_Var(q/(2**kappa))
  1903. if o:
  1904. c_ = map(RR, [0.0000000000000000,
  1905. 0.4057993538687922, 0.6924478992819291, 0.7898852691349439,
  1906. 0.8441959360364506, 0.8549679124679972, 0.8954469872316165,
  1907. 0.9157093365103325, 0.9567635780119543, 0.9434245442818547,
  1908. 0.9987153221343770])
  1909. M = Matrix(RR, a, a) # rows are tables, columns are entries those tables
  1910. for l in range(M.ncols()):
  1911. for c in range(l, M.ncols()):
  1912. M[l, c] = U_Var(q)
  1913. for l in range(1, a):
  1914. for i in range(l):
  1915. M[l, i] = red_var + sum(M[i+1:l].column(i))
  1916. bl = b*l
  1917. if round(bl) < len(c_):
  1918. c_tau = c_[round(bl)]
  1919. else:
  1920. c_tau = RR(1)/RR(5)*RR(sqrt(bl)) + RR(1)/RR(3)
  1921. f = (c_tau*n**(~bl) + 1 - c_tau)**2
  1922. for i in range(l):
  1923. M[l, i] = M[l, i]/f
  1924. v = vector(RR, a)
  1925. for i in range(a):
  1926. v[i] = red_var + sum(M[i+1:].column(i))
  1927. else:
  1928. v = vector(RR, a)
  1929. for i in range(a)[::-1]:
  1930. v[i] = 2**(a-i-1) * red_var
  1931. if get_verbose() >= 3:
  1932. print log(red_var, 2).str(), [RealField(14)(log(x, 2)).str() for x in v]
  1933. return v
  1934. def bkw_small_secret(n, alpha, q, success_probability=0.99, secret_bounds=(0, 1), t=None, o=0, samples=None): # noqa
  1935. """
  1936. :param n: number of variables in the LWE instance
  1937. :param alpha: standard deviation of the LWE instance
  1938. :param q: size of the finite field (default: n^2)
  1939. :param secret_bounds: minimum and maximum value of secret
  1940. :param samples: the number of available samples
  1941. """
  1942. def sigma2f(kappa):
  1943. v = bkw_small_secret_variances(q, a, b, kappa, o, RR=RR)
  1944. return sigmaf(sum([b * e * secret_variance for e in v], RR(0)).sqrt())
  1945. def Tf(kappa):
  1946. return min(q**b, ZZ(2)**(b*kappa))/2
  1947. def ops_tf(kappa):
  1948. T = Tf(kappa)
  1949. return T * (a*(a-1)/2 * (n+1) - b*a*(a-1)/4 - b/6 * ((a-1)**3 + 3/2*(a-1)**2 + 1/RR(2)*(a-1)))
  1950. def bkwssf(kappa):
  1951. ret = OrderedDict()
  1952. ret[u"κ"] = kappa
  1953. m = distinguish_required_m(sigma_final, q, success_probability, sigma2f(kappa))
  1954. ret["m"] = m
  1955. ropsm = (m + o) * (a/2 * (n + 2))
  1956. ropst = ops_tf(kappa)
  1957. ret["rop"] = ropst + ropsm
  1958. ret["bop"] = log(q, 2) * ret["rop"]
  1959. T = Tf(kappa)
  1960. ret["mem"] = T * a * (n + 1 - b * (a-1)/2)
  1961. ret["oracle"] = T * a + ret["m"] + o
  1962. return ret
  1963. n, alpha, q, success_probability = preprocess_params(n, alpha, q, success_probability, prec=4*n)
  1964. RR = alpha.parent()
  1965. sigma = alpha*q
  1966. has_samples = samples is not None
  1967. if o is None:
  1968. best = bkw_small_secret(n, alpha, q, success_probability, secret_bounds, t=t, o=0)
  1969. o = best["oracle"]/2
  1970. while True:
  1971. try:
  1972. current = bkw_small_secret(n, alpha, q, success_probability, secret_bounds, t=t, o=o)
  1973. except InsufficientSamplesError:
  1974. break
  1975. if best is None or (current["bop"] < best["bop"] and (not has_samples or current["oracle"] <= samples)):
  1976. best = current
  1977. if current["bop"] > best["bop"]:
  1978. break
  1979. if get_verbose() >= 2:
  1980. print cost_str(current)
  1981. o = o/2
  1982. if has_samples and best["oracle"] > samples:
  1983. raise InsufficientSamplesError("No solution could be found with given samples (%d)" % samples)
  1984. return best
  1985. if t is None:
  1986. t = RR(2*(log(q, 2) - log(sigma, 2))/log(n, 2))
  1987. best = None
  1988. while True:
  1989. try:
  1990. current = bkw_small_secret(n, alpha, q, success_probability, secret_bounds, t=t, o=o)
  1991. except InsufficientSamplesError:
  1992. break
  1993. if best is None or (current["bop"] < best["bop"] and (not has_samples or current["oracle"] <= samples)):
  1994. best = current
  1995. if current["bop"] > best["bop"]:
  1996. break
  1997. if get_verbose() >= 2:
  1998. print cost_str(current)
  1999. t += 0.01
  2000. if has_samples and best["oracle"] > samples:
  2001. raise InsufficientSamplesError("No solution could be found with given samples (%d)" % samples)
  2002. return best
  2003. secret_variance = uniform_variance_from_bounds(*secret_bounds)
  2004. secret_variance = RR(secret_variance)
  2005. a = RR(t*log(n, 2)) # the target number of additions: a = t*log_2(n)
  2006. b = n/a # window width b = n/a
  2007. sigma_final = RR(n**t).sqrt() * sigma # after n^t additions we get this stddev
  2008. transformation_noise = sqrt(n * 1/RR(12) * secret_variance)
  2009. kappa = ceil(log(round(q*transformation_noise/stddevf(sigma)), 2.0)) + 1
  2010. if kappa > ceil(log(q, 2)):
  2011. kappa = ceil(log(q, 2))
  2012. best = None
  2013. while kappa > 0:
  2014. current = bkwssf(kappa)
  2015. if best is None or (current["bop"] < best["bop"] and (not has_samples or current["oracle"] <= samples)):
  2016. best = current
  2017. if current["bop"] > best["bop"]:
  2018. break
  2019. kappa -= 1
  2020. if has_samples and best["oracle"] > samples:
  2021. raise InsufficientSamplesError("No solution could be found with given samples (%d)" % samples)
  2022. best["o"] = o
  2023. best["t"] = t
  2024. best["a"] = a
  2025. best["b"] = b
  2026. best = cost_reorder(best, ["bop", "oracle", "t", "m", "mem"])
  2027. return best
  2028. # Arora-GB for Small Secrets
  2029. def arora_gb_small_secret(n, alpha, q, secret_bounds, h=None, samples=None, **kwds):
  2030. """FIXME! briefly describe function
  2031. :param n:
  2032. :param alpha:
  2033. :param q:
  2034. :param secret_bounds:
  2035. :param h:
  2036. :returns:
  2037. :rtype:
  2038. """
  2039. a, b = secret_bounds
  2040. s_var = uniform_variance_from_bounds(*secret_bounds, h=h)
  2041. n, alpha, q = switch_modulus(n, alpha, q, s_var, h=h)
  2042. return arora_gb(n, alpha, q, d2=b-a+1, **kwds)
  2043. # Toplevel function
  2044. def estimate_lwe(n, alpha, q, samples=None, skip=None, small=False, secret_bounds=None, h=None):
  2045. """
  2046. Estimate the complexity of solving LWE with the given parameters.
  2047. :param n:
  2048. :param alpha:
  2049. :param q:
  2050. :param skip:
  2051. :param small:
  2052. :param secret_bounds:
  2053. :returns:
  2054. :rtype:
  2055. EXAMPLE::
  2056. sage: n, alpha, q = unpack_lwe(Regev(64))
  2057. sage: set_verbose(1)
  2058. sage: d = estimate_lwe(n,alpha,q, skip="arora-gb")
  2059. mitm bop: ≈2^133.2, oracle: 52, mem: ≈2^129.6, rop: ≈2^129.6
  2060. bkw bop: ≈2^53.1, oracle: ≈2^39.2, m: ≈2^30.2, mem: ≈2^40.2, ...
  2061. sis bkz2: ≈2^34.5, oracle: ≈2^11.7, δ_0: 1.0130466, k: 40, ...
  2062. dec bop: ≈2^33.9, oracle: 1288, δ_0: 1.0157998, bkz2: ≈2^32.8, ...
  2063. kannan bkz2: ≈2^35.3, oracle: ≈2^12.9, δ_0: 1.0171085, k: 40, ...
  2064. """
  2065. if not small:
  2066. algorithms = OrderedDict([("mitm", mitm),
  2067. ("bkw", bkw_coded),
  2068. ("sis", sis),
  2069. ("dec", decode),
  2070. ("kannan", kannan),
  2071. ("arora-gb", arora_gb)])
  2072. else:
  2073. algorithms = OrderedDict([("mitm", mitm),
  2074. ("bkw", bkw_coded),
  2075. ("sis", partial(drop_and_solve, sis_small_secret_mod_switch)),
  2076. ("dec", decode_small_secret_mod_switch_and_guess),
  2077. ("kannan", kannan_small_secret_mod_switch_and_guess),
  2078. ("baigal", bai_gal_small_secret),
  2079. ("arora-gb", arora_gb_small_secret)])
  2080. if skip is None:
  2081. skip = []
  2082. try:
  2083. skip = [s.strip().lower() for s in skip.split(",")]
  2084. except AttributeError:
  2085. pass
  2086. skip = [s.strip().lower() for s in skip]
  2087. alg_width = max(len(key) for key in set(algorithms).difference(skip))
  2088. cost_kwds = {"keyword_width": 5}
  2089. results = OrderedDict()
  2090. for alg in algorithms:
  2091. if alg not in skip:
  2092. algf = algorithms[alg]
  2093. if alg in ("dec", "sis", "kannan", "baigal"):
  2094. algf = sieve_or_enum(algf)
  2095. try:
  2096. if small:
  2097. tmp = algf(n, alpha, q, secret_bounds=secret_bounds, h=h, samples=samples)
  2098. else:
  2099. tmp = algf(n, alpha, q, samples=samples)
  2100. if tmp:
  2101. results[alg] = tmp
  2102. if get_verbose() >= 1:
  2103. print ("%%%ds" % alg_width) % alg,
  2104. print cost_str(results[alg], **cost_kwds)
  2105. except Exception as e:
  2106. print u"Algorithm '%s' failed with message '%s'"%(alg, e)
  2107. return results
  2108. # Plots
  2109. def plot_costs(LWE, N, skip=None, filename=None, small=False, secret_bounds=None):
  2110. plots = {}
  2111. for n in N:
  2112. lwe = LWE(n)
  2113. r = estimate_lwe(skip=skip, small=small, secret_bounds=secret_bounds, **unpack_lwe_dict(lwe))
  2114. if get_verbose() >= 1:
  2115. print
  2116. for key in r:
  2117. value = r[key].values()[0]
  2118. plots[key] = plots.get(key, tuple()) + ((n, log(value, 2)),)
  2119. colors = ("#4C72B0", "#55A868", "#C44E52", "#8172B2", "#CCB974", "#64B5CD")
  2120. import matplotlib.pyplot as plt
  2121. plt.clf()
  2122. plt.figure(1)
  2123. for i, plot in enumerate(plots):
  2124. x, y = [x_ for x_, y_ in plots[plot]], [y_ for x_, y_ in plots[plot]]
  2125. plt.plot(x, y, label=plot, color=colors[i], linewidth=1.5)
  2126. plt.legend(loc=2)
  2127. plt.xlabel("n")
  2128. plt.ylabel("$\log_2$(bop)")
  2129. if small:
  2130. plt.title(u"%s (%d-%d), $s ← %s^n$"%(LWE.__name__, N[0], N[-1], secret_bounds))
  2131. else:
  2132. plt.title(u"%s (%d-%d)"%(LWE.__name__, N[0], N[-1]))
  2133. if filename is None:
  2134. if small:
  2135. small_str = "-(%d,%d)"%(secret_bounds[0], secret_bounds[1])
  2136. else:
  2137. small_str = ""
  2138. filename="%s%s-%d-%d.pdf"%(LWE.__name__, small_str, N[0], N[-1])
  2139. plt.savefig(filename, dpi=128)
  2140. def plot_fhe_costs(L, N, skip=None, filename=None, small=False, secret_bounds=None):
  2141. plots = {}
  2142. for n in N:
  2143. params = fhe_params(L, n)
  2144. r = estimate_lwe(*params, skip=skip, small=small, secret_bounds=secret_bounds)
  2145. if get_verbose() >= 1:
  2146. print
  2147. for key in r:
  2148. value = r[key].values()[0]
  2149. plots[key] = plots.get(key, tuple()) + ((n, log(value, 2)),)
  2150. colors = ("#4C72B0", "#55A868", "#C44E52", "#8172B2", "#CCB974", "#64B5CD")
  2151. import matplotlib.pyplot as plt
  2152. plt.clf()
  2153. plt.figure(1)
  2154. for i, plot in enumerate(plots):
  2155. x, y = [x_ for x_, y_ in plots[plot]], [y_ for x_, y_ in plots[plot]]
  2156. plt.plot(x, y, label=plot, color=colors[i], linewidth=1.5)
  2157. plt.legend(loc=2)
  2158. plt.xlabel("n")
  2159. plt.ylabel("$\log_2$(bop)")
  2160. if small:
  2161. plt.title(u"FHE (%d-%d), $L=%d$, $s ← %s^n$"%(N[0], N[-1], L, secret_bounds))
  2162. else:
  2163. plt.title(u"FHE (%d-%d), $L=%d$"%(N[0], N[-1], L))
  2164. if filename is None:
  2165. if small:
  2166. small_str = "-(%d,%d)"%(secret_bounds[0], secret_bounds[1])
  2167. else:
  2168. small_str = ""
  2169. filename="FHE-%d%s-%d-%d.pdf"%(L, small_str, N[0], N[-1])
  2170. plt.savefig(filename, dpi=128)
  2171. # LaTeX tables
  2172. dfs = "%4.0f"
  2173. latex_config = {
  2174. "mitm": OrderedDict([("bop", dfs), ("mem", dfs), ("oracle", dfs)]),
  2175. "bkw": OrderedDict([("bop", dfs), ("mem", dfs), ("oracle", dfs)]),
  2176. "arora-gb": OrderedDict([("bop", dfs), ("mem", dfs), ("oracle", dfs)]),
  2177. "sis": OrderedDict([("bkz2", dfs), ("sieve", dfs), ("oracle", dfs), ("repeat", dfs)]),
  2178. "kannan": OrderedDict([("bkz2", dfs), ("sieve", dfs), ("oracle", dfs), ("repeat", dfs)]),
  2179. "baigal": OrderedDict([("bkz2", dfs), ("sieve", dfs), ("oracle", dfs), ("repeat", dfs)]),
  2180. "dec": OrderedDict([("rop", dfs), ("enum", dfs), ("oracle", dfs), ("repeat", dfs)]),
  2181. }
  2182. def latex_cost_header(cur):
  2183. header = []
  2184. header.append(r"\begin{scriptsize}")
  2185. pretty_algorithm_names = {
  2186. "mitm": "MitM",
  2187. "bkw": "Coded-BKW",
  2188. "arora-gb": "Arora-GB",
  2189. "sis": "SIS",
  2190. "kannan": "Kannan",
  2191. "baigal": "Bai-Gal",
  2192. "dec": "Dec"
  2193. }
  2194. pretty_column_names = {
  2195. "oracle": "$\\Ldis$",
  2196. "repeat": "g",
  2197. }
  2198. line = [r"\begin{tabular}{r"]
  2199. for alg in cur:
  2200. line.append("@{\hskip 8pt}")
  2201. line.append("r" * len([key for key in latex_config[alg].keys() if key in cur[alg]]))
  2202. line.append("}")
  2203. header.append("".join(line))
  2204. header.append(r"\toprule")
  2205. line = [" "]
  2206. for alg in cur:
  2207. count = len([key for key in latex_config[alg].keys() if key in cur[alg]])
  2208. line.append(r"\multicolumn{%d}{c}{%s}"%(count, pretty_algorithm_names[alg]))
  2209. line = " & ".join(line) + "\\\\"
  2210. header.append(line)
  2211. header.append(r"\midrule")
  2212. line = [" $n$"]
  2213. for alg in cur:
  2214. line.extend([pretty_column_names.get(key, key) for key in latex_config[alg].keys() if key in cur[alg]])
  2215. line = " & ".join(line) + "\\\\"
  2216. header.append(line)
  2217. header.append(r"\midrule")
  2218. return header
  2219. def latex_cost_row(cur):
  2220. line = []
  2221. for alg in cur:
  2222. cost = cur[alg]
  2223. for col, format in latex_config[alg].iteritems():
  2224. if (col == "repeat" and col in cost) or col != "repeat":
  2225. line.append(format % log(cost[col], 2))
  2226. return line
  2227. def latex_cost_footer(name):
  2228. footer = []
  2229. footer.append(r"\bottomrule")
  2230. footer.append(r"\end{tabular}")
  2231. footer.append(r"\end{scriptsize}")
  2232. footer.append(r"\caption{%s}" % name)
  2233. return footer
  2234. def latex_costs(LWE, N, skip=None, small=False, secret_bounds=None):
  2235. ret = []
  2236. for i, n in enumerate(N):
  2237. line = ["%4d"%n]
  2238. lwe = LWE(n)
  2239. cur = estimate_lwe(skip=skip, small=small, secret_bounds=secret_bounds, **unpack_lwe_dict(lwe))
  2240. line.extend(latex_cost_row(cur))
  2241. line = " & ".join(line) + "\\\\"
  2242. ret.append(line)
  2243. if get_verbose() >= 1:
  2244. print
  2245. header = latex_cost_header(cur)
  2246. if small:
  2247. name = "%s with $\s[(i)] \sample \{%d,%d\}$"%(LWE.__name__, secret_bounds[0], secret_bounds[1])
  2248. else:
  2249. name = LWE.__name__
  2250. footer = latex_cost_footer(name)
  2251. ret = header + ret + footer
  2252. return "\n".join(ret)
  2253. def fhe_params(L, n):
  2254. # Homomorphic Evaluation of the AES Circuit talks about σ^2 as variance so σ is stddev not width
  2255. # parameter
  2256. stddev = RR(3.2)
  2257. xi = ZZ(8)
  2258. q = ZZ(2)**(16.5*L + 5.4) * xi**(2*L-3) * n**L
  2259. alpha = sigmaf(stddev)/q
  2260. return n, alpha, q
  2261. def latex_fhe_costs(N, l, secret_bounds, skip=None):
  2262. ret = []
  2263. for n in N:
  2264. line = ["%6d"%n]
  2265. params = fhe_params(l, n)
  2266. cur = estimate_lwe(*params, skip=skip, small=True, secret_bounds=secret_bounds)
  2267. line.extend(latex_cost_row(cur))
  2268. line = " & ".join(line) + "\\\\"
  2269. ret.append(line)
  2270. if get_verbose() >= 1:
  2271. print
  2272. header = latex_cost_header(cur)
  2273. name = "FHE with $L=%d$ with $\s[(i)] \sample \{%d,%d\}$"%(l, secret_bounds[0], secret_bounds[1])
  2274. footer = latex_cost_footer(name)
  2275. ret = header + ret + footer
  2276. return "\n".join(ret)
  2277. def make_all_tables():
  2278. N = (64, 128, 256, 512, 1024)
  2279. print latex_costs(Regev, N, skip=["arora-gb"])
  2280. print
  2281. print latex_costs(Regev, N, small=True, secret_bounds=(0, 1), skip=["arora-gb"])
  2282. print
  2283. print latex_costs(LindnerPeikert, N)
  2284. print
  2285. print latex_costs(LindnerPeikert, N, small=True, secret_bounds=(0, 1), skip=["arora-gb"])
  2286. print latex_fhe_costs([2**i for i in range(6, 12)], l=2, skip="Arora-GB", secret_bounds=(0, 1))
  2287. print
  2288. print latex_fhe_costs([2**i for i in range(6, 15)], l=10, skip="Arora-GB", secret_bounds=(0, 1))
  2289. print
  2290. def make_all_plots():
  2291. v = get_verbose()
  2292. set_verbose(1)
  2293. N = range(64, 400, 16)
  2294. plot_costs(Regev, N, skip=["arora-gb", "mitm"])
  2295. plot_costs(LindnerPeikert, N, skip=["arora-gb", "mitm"])
  2296. plot_costs(Regev, N, small=True, secret_bounds=(0, 1), skip=["arora-gb"])
  2297. plot_costs(LindnerPeikert, N, small=True, secret_bounds=(0, 1), skip=["arora-gb"])
  2298. set_verbose(v)
  2299. class SimpleLWE(LWE):
  2300. """
  2301. LWE parameters with `σ=\sqrt{n/2π}` and `q = n^2`.
  2302. """
  2303. def __init__(self, n):
  2304. """
  2305. LWE parameters with `σ=\sqrt{n/2π}` and `q = n^2`.
  2306. :param n: security parameter n (= dimension)
  2307. """
  2308. from sage.stats.distributions.discrete_gaussian_integer import DiscreteGaussianDistributionIntegerSampler
  2309. from sage.rings.arith import next_prime
  2310. q = ZZ(next_prime(n**2))
  2311. s = sqrt(n)
  2312. D = DiscreteGaussianDistributionIntegerSampler(s/sqrt(2*pi.n()), q)
  2313. LWE.__init__(self, n=n, q=q, D=D, secret_dist=(0, 1))