ptwist168_32.c 53 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766
  1. #include "ptwist.h"
  2. /* 32-bit version of ptwist168.c by Ian Goldberg. Based on: */
  3. /* crypto/ec/ecp_nistp224.c */
  4. /*
  5. * Written by Emilia Kasper (Google) for the OpenSSL project.
  6. */
  7. /* ====================================================================
  8. * Copyright (c) 2000-2010 The OpenSSL Project. All rights reserved.
  9. *
  10. * Redistribution and use in source and binary forms, with or without
  11. * modification, are permitted provided that the following conditions
  12. * are met:
  13. *
  14. * 1. Redistributions of source code must retain the above copyright
  15. * notice, this list of conditions and the following disclaimer.
  16. *
  17. * 2. Redistributions in binary form must reproduce the above copyright
  18. * notice, this list of conditions and the following disclaimer in
  19. * the documentation and/or other materials provided with the
  20. * distribution.
  21. *
  22. * 3. All advertising materials mentioning features or use of this
  23. * software must display the following acknowledgment:
  24. * "This product includes software developed by the OpenSSL Project
  25. * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
  26. *
  27. * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
  28. * endorse or promote products derived from this software without
  29. * prior written permission. For written permission, please contact
  30. * licensing@OpenSSL.org.
  31. *
  32. * 5. Products derived from this software may not be called "OpenSSL"
  33. * nor may "OpenSSL" appear in their names without prior written
  34. * permission of the OpenSSL Project.
  35. *
  36. * 6. Redistributions of any form whatsoever must retain the following
  37. * acknowledgment:
  38. * "This product includes software developed by the OpenSSL Project
  39. * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
  40. *
  41. * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
  42. * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  43. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  44. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
  45. * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  46. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  47. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  48. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  49. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  50. * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  51. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
  52. * OF THE POSSIBILITY OF SUCH DAMAGE.
  53. * ====================================================================
  54. *
  55. * This product includes cryptographic software written by Eric Young
  56. * (eay@cryptsoft.com). This product includes software written by Tim
  57. * Hudson (tjh@cryptsoft.com).
  58. *
  59. */
  60. /*
  61. * A 64-bit implementation of the NIST P-224 elliptic curve point multiplication
  62. *
  63. * Inspired by Daniel J. Bernstein's public domain nistp224 implementation
  64. * and Adam Langley's public domain 64-bit C implementation of curve25519
  65. */
  66. #include <stdint.h>
  67. #include <string.h>
  68. typedef uint8_t u8;
  69. /******************************************************************************/
  70. /* INTERNAL REPRESENTATION OF FIELD ELEMENTS
  71. *
  72. * Field elements are represented as sum_{i=0}^{6} 2^{24*i}*a_i
  73. * where each slice a_i is a 32-bit word, i.e., a field element is an fslice
  74. * array a with 7 elements, where a[i] = a_i.
  75. * Outputs from multiplications are represented as unreduced polynomials
  76. * sum_{i=0}^{12} 2^{24*i}*b_i
  77. * where each b_i is a 64-bit word. We ensure that inputs to each field
  78. * multiplication satisfy a_i < 2^30, so outputs satisfy b_i < 4*2^30*2^30,
  79. * and fit into a 128-bit word without overflow. The coefficients are then
  80. * again partially reduced to a_i < 2^25. We only reduce to the unique
  81. * minimal representation at the end of the computation.
  82. *
  83. */
  84. typedef uint32_t fslice;
  85. typedef fslice coord[7];
  86. typedef coord point[3];
  87. #include <stdio.h>
  88. #include <stdlib.h>
  89. static void dump_coord(const char *label, const coord c)
  90. {
  91. if (label) fprintf(stderr, "%s: ", label);
  92. printf("%016lx %016lx %016lx %016lx %016lx %016lx %016lx\n",
  93. c[6], c[5], c[4], c[3], c[2], c[1], c[0]);
  94. }
  95. static void dump_point(const char *label, point p)
  96. {
  97. if (label) fprintf(stderr, "%s:\n", label);
  98. dump_coord(" x", p[0]);
  99. dump_coord(" y", p[1]);
  100. dump_coord(" z", p[2]);
  101. }
  102. /* Field element represented as a byte arrary.
  103. * 21*8 = 168 bits is also the group order size for the elliptic curve. */
  104. typedef u8 felem_bytearray[21];
  105. static const felem_bytearray ptwist168_curve_params[5] = {
  106. {0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, /* p */
  107. 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE,
  108. 0xFF},
  109. {0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, /* a */
  110. 0xFF,0xFF,0xFF,0xFF,0xFF,0xFE,0xFF,0xFF,0xFF,0xFE,
  111. 0xFC},
  112. {0x4E,0x35,0x5E,0x95,0xCA,0xFE,0xDD,0x48,0x6E,0xBC, /* b */
  113. 0x69,0xBA,0xD3,0x16,0x46,0xD3,0x20,0xE0,0x1D,0xC7,
  114. 0xD6},
  115. {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* x */
  116. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  117. 0x02},
  118. {0xEA,0x67,0x47,0xB7,0x5A,0xF8,0xC7,0xF9,0x3C,0x1F, /* y */
  119. 0x5E,0x6D,0x32,0x0F,0x88,0xB9,0xBE,0x15,0x66,0xD2,
  120. 0xF2}
  121. };
  122. /* Helper functions to convert field elements to/from internal representation */
  123. static void bin21_to_felem(fslice out[7], const u8 in[21])
  124. {
  125. out[0] = *((const uint32_t *)(in)) & 0x00ffffff;
  126. out[1] = (*((const uint32_t *)(in+3))) & 0x00ffffff;
  127. out[2] = (*((const uint32_t *)(in+6))) & 0x00ffffff;
  128. out[3] = (*((const uint32_t *)(in+9))) & 0x00ffffff;
  129. out[4] = (*((const uint32_t *)(in+12))) & 0x00ffffff;
  130. out[5] = (*((const uint32_t *)(in+15))) & 0x00ffffff;
  131. out[6] = (*((const uint32_t *)(in+18))) & 0x00ffffff;
  132. }
  133. static void felem_to_bin21(u8 out[21], const fslice in[7])
  134. {
  135. unsigned i;
  136. for (i = 0; i < 3; ++i)
  137. {
  138. out[i] = in[0]>>(8*i);
  139. out[i+3] = in[1]>>(8*i);
  140. out[i+6] = in[2]>>(8*i);
  141. out[i+9] = in[3]>>(8*i);
  142. out[i+12] = in[4]>>(8*i);
  143. out[i+15] = in[5]>>(8*i);
  144. out[i+18] = in[6]>>(8*i);
  145. }
  146. }
  147. #if 0
  148. /* To preserve endianness when using BN_bn2bin and BN_bin2bn */
  149. static void flip_endian(u8 *out, const u8 *in, unsigned len)
  150. {
  151. unsigned i;
  152. for (i = 0; i < len; ++i)
  153. out[i] = in[len-1-i];
  154. }
  155. #endif
  156. /******************************************************************************/
  157. /* FIELD OPERATIONS
  158. *
  159. * Field operations, using the internal representation of field elements.
  160. * NB! These operations are specific to our point multiplication and cannot be
  161. * expected to be correct in general - e.g., multiplication with a large scalar
  162. * will cause an overflow.
  163. *
  164. */
  165. /* Sum two field elements: out += in */
  166. static void felem_sum64(fslice out[7], const fslice in[7])
  167. {
  168. out[0] += in[0];
  169. out[1] += in[1];
  170. out[2] += in[2];
  171. out[3] += in[3];
  172. out[4] += in[4];
  173. out[5] += in[5];
  174. out[6] += in[6];
  175. }
  176. /* Subtract field elements: out -= in */
  177. /* Assumes in[i] < 2^25 */
  178. static void felem_diff64(fslice out[7], const fslice in[7])
  179. {
  180. /* a = 3*2^24 - 3 */
  181. /* b = 3*2^24 - 3*257 */
  182. static const uint32_t a = (((uint32_t) 3) << 24) - ((uint32_t) 3);
  183. static const uint32_t b = (((uint32_t) 3) << 24) - ((uint32_t) 771);
  184. /* Add 0 mod 2^168-2^8-1 to ensure out > in at each element */
  185. out[0] += b;
  186. out[1] += a;
  187. out[2] += a;
  188. out[3] += a;
  189. out[4] += a;
  190. out[5] += a;
  191. out[6] += a;
  192. out[0] -= in[0];
  193. out[1] -= in[1];
  194. out[2] -= in[2];
  195. out[3] -= in[3];
  196. out[4] -= in[4];
  197. out[5] -= in[5];
  198. out[6] -= in[6];
  199. }
  200. /* Subtract in unreduced 64-bit mode: out64 -= in64 */
  201. /* Assumes in[i] < 2^55 */
  202. static void felem_diff128(uint64_t out[13], const uint64_t in[13])
  203. {
  204. /* a = 3*2^54
  205. b = 3*2^54 - 49536
  206. c = 3*2^54 - 49344
  207. d = 3*2^54 - 12730752
  208. a*2^{288..168} + b*2^{144..48} + c*2^24 + d = 0 mod p
  209. */
  210. static const uint64_t a = (((uint64_t)3) << 54);
  211. static const uint64_t b = (((uint64_t)3) << 54) - ((uint64_t) 49536);
  212. static const uint64_t c = (((uint64_t)3) << 54) - ((uint64_t) 49344);
  213. static const uint64_t d = (((uint64_t)3) << 54) - ((uint64_t) 12730752);
  214. /* Add 0 mod 2^168-2^8-1 to ensure out > in */
  215. out[0] += d;
  216. out[1] += c;
  217. out[2] += b;
  218. out[3] += b;
  219. out[4] += b;
  220. out[5] += b;
  221. out[6] += b;
  222. out[7] += a;
  223. out[8] += a;
  224. out[9] += a;
  225. out[10] += a;
  226. out[11] += a;
  227. out[12] += a;
  228. out[0] -= in[0];
  229. out[1] -= in[1];
  230. out[2] -= in[2];
  231. out[3] -= in[3];
  232. out[4] -= in[4];
  233. out[5] -= in[5];
  234. out[6] -= in[6];
  235. out[7] -= in[7];
  236. out[8] -= in[8];
  237. out[9] -= in[9];
  238. out[10] -= in[10];
  239. out[11] -= in[11];
  240. out[12] -= in[12];
  241. }
  242. /* Subtract in mixed mode: out64 -= in32 */
  243. /* in[i] < 2^31 */
  244. static void felem_diff_128_64(uint64_t out[13], const fslice in[7])
  245. {
  246. /* a = 3*2^30 - 192
  247. b = 3*2^30 - 49344
  248. a*2^{144..24} + b = 0 mod p
  249. */
  250. static const uint64_t a = (((uint64_t) 3) << 30) - ((uint64_t) 192);
  251. static const uint64_t b = (((uint64_t) 3) << 30) - ((uint64_t) 49344);
  252. /* Add 0 mod 2^168-2^8-1 to ensure out > in */
  253. out[0] += b;
  254. out[1] += a;
  255. out[2] += a;
  256. out[3] += a;
  257. out[4] += a;
  258. out[5] += a;
  259. out[6] += a;
  260. out[0] -= in[0];
  261. out[1] -= in[1];
  262. out[2] -= in[2];
  263. out[3] -= in[3];
  264. out[4] -= in[4];
  265. out[5] -= in[5];
  266. out[6] -= in[6];
  267. }
  268. /* Multiply a field element by a scalar: out64 = out64 * scalar
  269. * The scalars we actually use are small, so results fit without overflow */
  270. static void felem_scalar64(fslice out[7], const fslice scalar)
  271. {
  272. out[0] *= scalar;
  273. out[1] *= scalar;
  274. out[2] *= scalar;
  275. out[3] *= scalar;
  276. out[4] *= scalar;
  277. out[5] *= scalar;
  278. out[6] *= scalar;
  279. }
  280. /* Multiply an unreduced field element by a scalar: out128 = out128 * scalar
  281. * The scalars we actually use are small, so results fit without overflow */
  282. static void felem_scalar128(uint64_t out[13], const uint128_t scalar)
  283. {
  284. out[0] *= scalar;
  285. out[1] *= scalar;
  286. out[2] *= scalar;
  287. out[3] *= scalar;
  288. out[4] *= scalar;
  289. out[5] *= scalar;
  290. out[6] *= scalar;
  291. out[7] *= scalar;
  292. out[8] *= scalar;
  293. out[9] *= scalar;
  294. out[10] *= scalar;
  295. out[11] *= scalar;
  296. out[12] *= scalar;
  297. }
  298. /* Square a field element: out = in^2 */
  299. static void felem_square(uint64_t out[13], const fslice in[7])
  300. {
  301. out[0] = ((uint64_t) in[0]) * in[0];
  302. out[1] = ((uint64_t) in[0]) * in[1] * 2;
  303. out[2] = ((uint64_t) in[0]) * in[2] * 2 +
  304. ((uint64_t) in[1]) * in[1];
  305. out[3] = ((uint64_t) in[1]) * in[2] * 2 +
  306. ((uint64_t) in[3]) * in[0] * 2;
  307. out[4] = ((uint64_t) in[2]) * in[2] +
  308. ((uint64_t) in[3]) * in[1] * 2 +
  309. ((uint64_t) in[4]) * in[0] * 2;
  310. out[5] = ((uint64_t) in[3]) * in[2] * 2 +
  311. ((uint64_t) in[4]) * in[1] * 2 +
  312. ((uint64_t) in[5]) * in[0] * 2;
  313. out[6] = ((uint64_t) in[3]) * in[3] +
  314. ((uint64_t) in[4]) * in[2] * 2 +
  315. ((uint64_t) in[5]) * in[1] * 2 +
  316. ((uint64_t) in[6]) * in[0] * 2;
  317. out[7] = ((uint64_t) in[4]) * in[3] * 2 +
  318. ((uint64_t) in[5]) * in[2] * 2 +
  319. ((uint64_t) in[6]) * in[1] * 2;
  320. out[8] = ((uint64_t) in[4]) * in[4] +
  321. ((uint64_t) in[5]) * in[3] * 2 +
  322. ((uint64_t) in[6]) * in[2] * 2;
  323. out[9] = ((uint64_t) in[5]) * in[4] * 2 +
  324. ((uint64_t) in[6]) * in[3] * 2;
  325. out[10] = ((uint64_t) in[5]) * in[5] +
  326. ((uint64_t) in[6]) * in[4] * 2;
  327. out[11] = ((uint64_t) in[6]) * in[5] * 2;
  328. out[12] = ((uint64_t) in[6]) * in[6];
  329. }
  330. /* Multiply two field elements: out = in1 * in2 */
  331. static void felem_mul(uint64 out[13], const fslice in1[7], const fslice in2[7])
  332. {
  333. out[0] = ((uint64_t) in1[0]) * in2[0];
  334. out[1] = ((uint64_t) in1[0]) * in2[1] +
  335. ((uint64_t) in1[1]) * in2[0];
  336. out[2] = ((uint64_t) in1[0]) * in2[2] +
  337. ((uint64_t) in1[1]) * in2[1] +
  338. ((uint64_t) in1[2]) * in2[0];
  339. out[3] = ((uint64_t) in1[0]) * in2[3] +
  340. ((uint64_t) in1[1]) * in2[2] +
  341. ((uint64_t) in1[2]) * in2[1] +
  342. ((uint64_t) in1[3]) * in2[0];
  343. out[4] = ((uint64_t) in1[0]) * in2[4] +
  344. ((uint64_t) in1[1]) * in2[3] +
  345. ((uint64_t) in1[2]) * in2[2] +
  346. ((uint64_t) in1[3]) * in2[1] +
  347. ((uint64_t) in1[4]) * in2[0];
  348. out[5] = ((uint64_t) in1[0]) * in2[5] +
  349. ((uint64_t) in1[1]) * in2[4] +
  350. ((uint64_t) in1[2]) * in2[3] +
  351. ((uint64_t) in1[3]) * in2[2] +
  352. ((uint64_t) in1[4]) * in2[1] +
  353. ((uint64_t) in1[5]) * in2[0];
  354. out[6] = ((uint64_t) in1[0]) * in2[6] +
  355. ((uint64_t) in1[1]) * in2[5] +
  356. ((uint64_t) in1[2]) * in2[4] +
  357. ((uint64_t) in1[3]) * in2[3] +
  358. ((uint64_t) in1[4]) * in2[2] +
  359. ((uint64_t) in1[5]) * in2[1] +
  360. ((uint64_t) in1[6]) * in2[0];
  361. out[7] = ((uint64_t) in1[1]) * in2[6] +
  362. ((uint64_t) in1[2]) * in2[5] +
  363. ((uint64_t) in1[3]) * in2[4] +
  364. ((uint64_t) in1[4]) * in2[3] +
  365. ((uint64_t) in1[5]) * in2[2] +
  366. ((uint64_t) in1[6]) * in2[1];
  367. out[8] = ((uint64_t) in1[2]) * in2[6] +
  368. ((uint64_t) in1[3]) * in2[5] +
  369. ((uint64_t) in1[4]) * in2[4] +
  370. ((uint64_t) in1[5]) * in2[3] +
  371. ((uint64_t) in1[6]) * in2[2];
  372. out[9] = ((uint64_t) in1[3]) * in2[6] +
  373. ((uint64_t) in1[4]) * in2[5] +
  374. ((uint64_t) in1[5]) * in2[4] +
  375. ((uint64_t) in1[6]) * in2[3];
  376. out[10] = ((uint64_t) in1[4]) * in2[6] +
  377. ((uint64_t) in1[5]) * in2[5] +
  378. ((uint64_t) in1[6]) * in2[4];
  379. out[11] = ((uint64_t) in1[5]) * in2[6] +
  380. ((uint64_t) in1[6]) * in2[5];
  381. out[12] = ((uint64_t) in1[6]) * in2[6];
  382. }
  383. #define M257(x) (((x)<<8)+(x))
  384. /* XXX: here */
  385. /* Reduce 128-bit coefficients to 64-bit coefficients. Requires in[i] < 2^126,
  386. * ensures out[0] < 2^56, out[1] < 2^56, out[2] < 2^57 */
  387. static void felem_reduce(fslice out[7], const uint64_t in[13])
  388. {
  389. static const uint64_t two24m1 = (((uint64_t) 1)<<24) -
  390. ((uint64_t)1);
  391. uint64_t output[7];
  392. output[0] = in[0]; /* < 2^126 */
  393. output[1] = in[1]; /* < 2^126 */
  394. output[2] = in[2]; /* < 2^126 */
  395. /* Eliminate in[3], in[4] */
  396. output[2] += M257(in[4] >> 56); /* < 2^126 + 2^79 */
  397. output[1] += M257(in[4] & two56m1); /* < 2^126 + 2^65 */
  398. output[1] += M257(in[3] >> 56); /* < 2^126 + 2^65 + 2^79 */
  399. output[0] += M257(in[3] & two56m1); /* < 2^126 + 2^65 */
  400. /* Eliminate the top part of output[2] */
  401. output[0] += M257(output[2] >> 56); /* < 2^126 + 2^65 + 2^79 */
  402. output[2] &= two56m1; /* < 2^56 */
  403. /* Carry 0 -> 1 -> 2 */
  404. output[1] += output[0] >> 56; /* < 2^126 + 2^71 */
  405. output[0] &= two56m1; /* < 2^56 */
  406. output[2] += output[1] >> 56; /* < 2^71 */
  407. output[1] &= two56m1; /* < 2^56 */
  408. /* Eliminate the top part of output[2] */
  409. output[0] += M257(output[2] >> 56); /* < 2^57 */
  410. output[2] &= two56m1; /* < 2^56 */
  411. /* Carry 0 -> 1 -> 2 */
  412. output[1] += output[0] >> 56; /* <= 2^56 */
  413. out[0] = output[0] & two56m1; /* < 2^56 */
  414. out[2] = output[2] + (output[1] >> 56); /* <= 2^56 */
  415. out[1] = output[1] & two56m1; /* < 2^56 */
  416. }
  417. /* Reduce to unique minimal representation */
  418. static void felem_contract(fslice out[3], const fslice in[3])
  419. {
  420. static const uint64_t two56m1 = (((uint64_t) 1)<<56) -
  421. ((uint64_t)1);
  422. static const uint64_t two56m257 = (((uint64_t) 1)<<56) -
  423. ((uint64_t)257);
  424. uint64_t a;
  425. /* in[0] < 2^56, in[1] < 2^56, in[2] <= 2^56 */
  426. /* so in < 2*p for sure */
  427. /* Eliminate the top part of in[2] */
  428. out[0] = in[0] + M257(in[2] >> 56); /* < 2^57 */
  429. out[2] = in[2] & two56m1; /* < 2^56, but if out[0] >= 2^56
  430. then out[2] now = 0 */
  431. /* Carry 0 -> 1 -> 2 */
  432. out[1] = in[1] + (out[0] >> 56); /* < 2^56 + 2, but if
  433. out[1] >= 2^56 then
  434. out[2] = 0 */
  435. out[0] &= two56m1; /* < 2^56 */
  436. out[2] += out[1] >> 56; /* < 2^56 due to the above */
  437. out[1] &= two56m1; /* < 2^56 */
  438. /* Now out < 2^168, but it could still be > p */
  439. a = ((out[2] == two56m1) & (out[1] == two56m1) & (out[0] >= two56m257));
  440. out[2] -= two56m1*a;
  441. out[1] -= two56m1*a;
  442. out[0] -= two56m257*a;
  443. }
  444. /* Negate a field element: out = -in */
  445. /* Assumes in[i] < 2^57 */
  446. static void felem_neg(fslice out[3], const fslice in[3])
  447. {
  448. /* a = 3*2^56 - 3 */
  449. /* b = 3*2^56 - 3*257 */
  450. static const uint64_t a = (((uint64_t) 3) << 56) - ((uint64_t) 3);
  451. static const uint64_t b = (((uint64_t) 3) << 56) - ((uint64_t) 771);
  452. static const uint64_t two56m1 = (((uint64_t) 1) << 56) - ((uint64_t) 1);
  453. fslice tmp[3];
  454. /* Add 0 mod 2^168-2^8-1 to ensure out > in at each element */
  455. /* a*2^112 + a*2^56 + b = 3*p */
  456. tmp[0] = b - in[0];
  457. tmp[1] = a - in[1];
  458. tmp[2] = a - in[2];
  459. /* Carry 0 -> 1 -> 2 */
  460. tmp[1] += tmp[0] >> 56;
  461. tmp[0] &= two56m1; /* < 2^56 */
  462. tmp[2] += tmp[1] >> 56; /* < 2^71 */
  463. tmp[1] &= two56m1; /* < 2^56 */
  464. felem_contract(out, tmp);
  465. }
  466. /* Zero-check: returns 1 if input is 0, and 0 otherwise.
  467. * We know that field elements are reduced to in < 2^169,
  468. * so we only need to check three cases: 0, 2^168 - 2^8 - 1,
  469. * and 2^169 - 2^9 - 2 */
  470. static fslice felem_is_zero(const fslice in[3])
  471. {
  472. fslice zero, two168m8m1, two169m9m2;
  473. static const uint64_t two56m1 = (((uint64_t) 1)<<56) -
  474. ((uint64_t)1);
  475. static const uint64_t two56m257 = (((uint64_t) 1)<<56) -
  476. ((uint64_t)257);
  477. static const uint64_t two57m1 = (((uint64_t) 1)<<57) -
  478. ((uint64_t)1);
  479. static const uint64_t two56m514 = (((uint64_t) 1)<<56) -
  480. ((uint64_t)514);
  481. zero = (in[0] == 0) & (in[1] == 0) & (in[2] == 0);
  482. two168m8m1 = (in[2] == two56m1) & (in[1] == two56m1) &
  483. (in[0] == two56m257);
  484. two169m9m2 = (in[2] == two57m1) & (in[1] == two56m1) &
  485. (in[0] == two56m514);
  486. return (zero | two168m8m1 | two169m9m2);
  487. }
  488. /* Invert a field element */
  489. static void felem_inv(fslice out[3], const fslice in[3])
  490. {
  491. fslice ftmp[3], ftmp2[3], ftmp3[3], ftmp4[3];
  492. uint128_t tmp[5];
  493. unsigned i;
  494. felem_square(tmp, in); felem_reduce(ftmp, tmp); /* 2 */
  495. felem_mul(tmp, in, ftmp); felem_reduce(ftmp, tmp); /* 2^2 - 1 */
  496. /* = ftmp */
  497. felem_square(tmp, ftmp); felem_reduce(ftmp2, tmp); /* 2^3 - 2 */
  498. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^4 - 2^2 */
  499. felem_mul(tmp, ftmp2, ftmp); felem_reduce(ftmp2, tmp); /* 2^4 - 1 */
  500. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^5 - 2 */
  501. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^6 - 2^2 */
  502. felem_mul(tmp, ftmp2, ftmp); felem_reduce(ftmp, tmp); /* 2^6 - 1 */
  503. /* = ftmp */
  504. felem_square(tmp, ftmp); felem_reduce(ftmp2, tmp); /* 2^7 - 2 */
  505. for (i = 0; i < 5; ++i) /* 2^12 - 2^6 */
  506. {
  507. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  508. }
  509. felem_mul(tmp, ftmp, ftmp2); felem_reduce(ftmp3, tmp); /* 2^12 - 1 */
  510. /* = ftmp3 */
  511. felem_square(tmp, ftmp3); felem_reduce(ftmp2, tmp); /* 2^13 - 2 */
  512. for (i = 0; i < 11; ++i) /* 2^24 - 2^12 */
  513. {
  514. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  515. }
  516. felem_mul(tmp, ftmp2, ftmp3); felem_reduce(ftmp3, tmp); /* 2^24 - 1 */
  517. /* = ftmp3 */
  518. felem_square(tmp, ftmp3); felem_reduce(ftmp2, tmp); /* 2^25 - 2 */
  519. for (i = 0; i < 23; ++i) /* 2^48 - 2^24 */
  520. {
  521. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  522. }
  523. felem_mul(tmp, ftmp2, ftmp3); felem_reduce(ftmp4, tmp); /* 2^48 - 1 */
  524. /* = ftmp4 */
  525. felem_square(tmp, ftmp4); felem_reduce(ftmp2, tmp); /* 2^49 - 2 */
  526. for (i = 0; i < 23; ++i) /* 2^72 - 2^24 */
  527. {
  528. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  529. }
  530. felem_mul(tmp, ftmp2, ftmp3); felem_reduce(ftmp4, tmp); /* 2^72 - 1 */
  531. /* = ftmp4 */
  532. felem_square(tmp, ftmp4); felem_reduce(ftmp2, tmp); /* 2^73 - 2 */
  533. for (i = 0; i < 5; ++i) /* 2^78 - 2^6 */
  534. {
  535. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  536. }
  537. felem_mul(tmp, ftmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^78 - 1 */
  538. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^79 - 2 */
  539. felem_mul(tmp, in, ftmp2); felem_reduce(ftmp4, tmp); /* 2^79 - 1 */
  540. /* = ftmp4 */
  541. felem_square(tmp, ftmp4); felem_reduce(ftmp2, tmp); /* 2^80 - 2 */
  542. for (i = 0; i < 78; ++i) /* 2^158 - 2^79 */
  543. {
  544. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  545. }
  546. felem_mul(tmp, ftmp4, ftmp2); felem_reduce(ftmp2, tmp); /* 2^158 - 1 */
  547. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^159 - 2 */
  548. felem_mul(tmp, in, ftmp2); felem_reduce(ftmp2, tmp); /* 2^159 - 1 */
  549. for (i = 0; i < 7; ++i) /* 2^166 - 2^7 */
  550. {
  551. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  552. }
  553. felem_mul(tmp, ftmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^166 - 2^6 - 1 */
  554. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^167 - 2^7 - 2 */
  555. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^168 - 2^8 - 4 */
  556. felem_mul(tmp, in, ftmp2); felem_reduce(out, tmp); /* 2^168 - 2^8 - 3 */
  557. /* = out */
  558. }
  559. /* Take the square root of a field element */
  560. static void felem_sqrt(fslice out[3], const fslice in[3])
  561. {
  562. fslice ftmp[3], ftmp2[3];
  563. uint128_t tmp[5];
  564. unsigned i;
  565. felem_square(tmp, in); felem_reduce(ftmp, tmp); /* 2 */
  566. felem_mul(tmp, in, ftmp); felem_reduce(ftmp, tmp); /* 2^2 - 1 */
  567. /* = ftmp */
  568. felem_square(tmp, ftmp); felem_reduce(ftmp2, tmp); /* 2^3 - 2 */
  569. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^4 - 2^2 */
  570. felem_mul(tmp, ftmp2, ftmp); felem_reduce(ftmp2, tmp); /* 2^4 - 1 */
  571. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^5 - 2 */
  572. felem_mul(tmp, ftmp2, in); felem_reduce(ftmp, tmp); /* 2^5 - 1 */
  573. /* = ftmp */
  574. felem_square(tmp, ftmp); felem_reduce(ftmp2, tmp); /* 2^6 - 2 */
  575. for (i = 0; i < 4; ++i) /* 2^10 - 2^5 */
  576. {
  577. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  578. }
  579. felem_mul(tmp, ftmp, ftmp2); felem_reduce(ftmp, tmp); /* 2^10 - 1 */
  580. /* = ftmp */
  581. felem_square(tmp, ftmp); felem_reduce(ftmp2, tmp); /* 2^11 - 2 */
  582. for (i = 0; i < 9; ++i) /* 2^20 - 2^10 */
  583. {
  584. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  585. }
  586. felem_mul(tmp, ftmp2, ftmp); felem_reduce(ftmp, tmp); /* 2^20 - 1 */
  587. /* = ftmp */
  588. felem_square(tmp, ftmp); felem_reduce(ftmp2, tmp); /* 2^21 - 2 */
  589. for (i = 0; i < 19; ++i) /* 2^40 - 2^20 */
  590. {
  591. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  592. }
  593. felem_mul(tmp, ftmp2, ftmp); felem_reduce(ftmp, tmp); /* 2^40 - 1 */
  594. /* = ftmp */
  595. felem_square(tmp, ftmp); felem_reduce(ftmp2, tmp); /* 2^41 - 2 */
  596. for (i = 0; i < 39; ++i) /* 2^80 - 2^40 */
  597. {
  598. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  599. }
  600. felem_mul(tmp, ftmp2, ftmp); felem_reduce(ftmp, tmp); /* 2^80 - 1 */
  601. /* = ftmp */
  602. felem_square(tmp, ftmp); felem_reduce(ftmp2, tmp); /* 2^81 - 2 */
  603. for (i = 0; i < 79; ++i) /* 2^160 - 2^80 */
  604. {
  605. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  606. }
  607. felem_mul(tmp, ftmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^160 - 1 */
  608. for (i = 0; i < 5; ++i) /* 2^165 - 2^5 */
  609. {
  610. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  611. }
  612. felem_square(tmp, ftmp2); felem_reduce(out, tmp); /* 2^166 - 2^6 */
  613. /* = out */
  614. }
  615. /* Copy in constant time:
  616. * if icopy == 1, copy in to out,
  617. * if icopy == 0, copy out to itself. */
  618. static void
  619. copy_conditional(fslice *out, const fslice *in, unsigned len, fslice icopy)
  620. {
  621. unsigned i;
  622. /* icopy is a (64-bit) 0 or 1, so copy is either all-zero or all-one */
  623. const fslice copy = -icopy;
  624. for (i = 0; i < len; ++i)
  625. {
  626. const fslice tmp = copy & (in[i] ^ out[i]);
  627. out[i] ^= tmp;
  628. }
  629. }
  630. /* Copy in constant time:
  631. * if isel == 1, copy in2 to out,
  632. * if isel == 0, copy in1 to out. */
  633. static void select_conditional(fslice *out, const fslice *in1, const fslice *in2,
  634. unsigned len, fslice isel)
  635. {
  636. unsigned i;
  637. /* isel is a (64-bit) 0 or 1, so sel is either all-zero or all-one */
  638. const fslice sel = -isel;
  639. for (i = 0; i < len; ++i)
  640. {
  641. const fslice tmp = sel & (in1[i] ^ in2[i]);
  642. out[i] = in1[i] ^ tmp;
  643. }
  644. }
  645. /******************************************************************************/
  646. /* ELLIPTIC CURVE POINT OPERATIONS
  647. *
  648. * Points are represented in Jacobian projective coordinates:
  649. * (X, Y, Z) corresponds to the affine point (X/Z^2, Y/Z^3),
  650. * or to the point at infinity if Z == 0.
  651. *
  652. */
  653. /* Double an elliptic curve point:
  654. * (X', Y', Z') = 2 * (X, Y, Z), where
  655. * X' = (3 * (X - Z^2) * (X + Z^2))^2 - 8 * X * Y^2
  656. * Y' = 3 * (X - Z^2) * (X + Z^2) * (4 * X * Y^2 - X') - 8 * Y^2
  657. * Z' = (Y + Z)^2 - Y^2 - Z^2 = 2 * Y * Z
  658. * Outputs can equal corresponding inputs, i.e., x_out == x_in is allowed,
  659. * while x_out == y_in is not (maybe this works, but it's not tested). */
  660. static void
  661. point_double(fslice x_out[3], fslice y_out[3], fslice z_out[3],
  662. const fslice x_in[3], const fslice y_in[3], const fslice z_in[3])
  663. {
  664. uint128_t tmp[5], tmp2[5];
  665. fslice delta[3];
  666. fslice gamma[3];
  667. fslice beta[3];
  668. fslice alpha[3];
  669. fslice ftmp[3], ftmp2[3];
  670. memcpy(ftmp, x_in, 3 * sizeof(fslice));
  671. memcpy(ftmp2, x_in, 3 * sizeof(fslice));
  672. /* delta = z^2 */
  673. felem_square(tmp, z_in);
  674. felem_reduce(delta, tmp);
  675. /* gamma = y^2 */
  676. felem_square(tmp, y_in);
  677. felem_reduce(gamma, tmp);
  678. /* beta = x*gamma */
  679. felem_mul(tmp, x_in, gamma);
  680. felem_reduce(beta, tmp);
  681. /* alpha = 3*(x-delta)*(x+delta) */
  682. felem_diff64(ftmp, delta);
  683. /* ftmp[i] < 2^57 + 2^58 + 2 < 2^59 */
  684. felem_sum64(ftmp2, delta);
  685. /* ftmp2[i] < 2^57 + 2^57 = 2^58 */
  686. felem_scalar64(ftmp2, 3);
  687. /* ftmp2[i] < 3 * 2^58 < 2^60 */
  688. felem_mul(tmp, ftmp, ftmp2);
  689. /* tmp[i] < 2^60 * 2^59 * 4 = 2^121 */
  690. felem_reduce(alpha, tmp);
  691. /* x' = alpha^2 - 8*beta */
  692. felem_square(tmp, alpha);
  693. /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */
  694. memcpy(ftmp, beta, 3 * sizeof(fslice));
  695. felem_scalar64(ftmp, 8);
  696. /* ftmp[i] < 8 * 2^57 = 2^60 */
  697. felem_diff_128_64(tmp, ftmp);
  698. /* tmp[i] < 2^116 + 2^64 + 8 < 2^117 */
  699. felem_reduce(x_out, tmp);
  700. /* z' = (y + z)^2 - gamma - delta */
  701. felem_sum64(delta, gamma);
  702. /* delta[i] < 2^57 + 2^57 = 2^58 */
  703. memcpy(ftmp, y_in, 3 * sizeof(fslice));
  704. felem_sum64(ftmp, z_in);
  705. /* ftmp[i] < 2^57 + 2^57 = 2^58 */
  706. felem_square(tmp, ftmp);
  707. /* tmp[i] < 4 * 2^58 * 2^58 = 2^118 */
  708. felem_diff_128_64(tmp, delta);
  709. /* tmp[i] < 2^118 + 2^64 + 8 < 2^119 */
  710. felem_reduce(z_out, tmp);
  711. /* y' = alpha*(4*beta - x') - 8*gamma^2 */
  712. felem_scalar64(beta, 4);
  713. /* beta[i] < 4 * 2^57 = 2^59 */
  714. felem_diff64(beta, x_out);
  715. /* beta[i] < 2^59 + 2^58 + 2 < 2^60 */
  716. felem_mul(tmp, alpha, beta);
  717. /* tmp[i] < 4 * 2^57 * 2^60 = 2^119 */
  718. felem_square(tmp2, gamma);
  719. /* tmp2[i] < 4 * 2^57 * 2^57 = 2^116 */
  720. felem_scalar128(tmp2, 8);
  721. /* tmp2[i] < 8 * 2^116 = 2^119 */
  722. felem_diff128(tmp, tmp2);
  723. /* tmp[i] < 2^119 + 2^120 < 2^121 */
  724. felem_reduce(y_out, tmp);
  725. }
  726. /* Add two elliptic curve points:
  727. * (X_1, Y_1, Z_1) + (X_2, Y_2, Z_2) = (X_3, Y_3, Z_3), where
  728. * X_3 = (Z_1^3 * Y_2 - Z_2^3 * Y_1)^2 - (Z_1^2 * X_2 - Z_2^2 * X_1)^3 -
  729. * 2 * Z_2^2 * X_1 * (Z_1^2 * X_2 - Z_2^2 * X_1)^2
  730. * Y_3 = (Z_1^3 * Y_2 - Z_2^3 * Y_1) * (Z_2^2 * X_1 * (Z_1^2 * X_2 - Z_2^2 * X_1)^2 - X_3) -
  731. * Z_2^3 * Y_1 * (Z_1^2 * X_2 - Z_2^2 * X_1)^3
  732. * Z_3 = (Z_1^2 * X_2 - Z_2^2 * X_1) * (Z_1 * Z_2) */
  733. /* This function is not entirely constant-time:
  734. * it includes a branch for checking whether the two input points are equal,
  735. * (while not equal to the point at infinity).
  736. * This case never happens during single point multiplication,
  737. * so there is no timing leak for ECDH or ECDSA signing. */
  738. static void point_add(fslice x3[3], fslice y3[3], fslice z3[3],
  739. const fslice x1[3], const fslice y1[3], const fslice z1[3],
  740. const fslice x2[3], const fslice y2[3], const fslice z2[3])
  741. {
  742. fslice ftmp[3], ftmp2[3], ftmp3[3], ftmp4[3], ftmp5[3];
  743. fslice xout[3], yout[3], zout[3];
  744. uint128_t tmp[5], tmp2[5];
  745. fslice z1_is_zero, z2_is_zero, x_equal, y_equal;
  746. /* ftmp = z1^2 */
  747. felem_square(tmp, z1);
  748. felem_reduce(ftmp, tmp);
  749. /* ftmp2 = z2^2 */
  750. felem_square(tmp, z2);
  751. felem_reduce(ftmp2, tmp);
  752. /* ftmp3 = z1^3 */
  753. felem_mul(tmp, ftmp, z1);
  754. felem_reduce(ftmp3, tmp);
  755. /* ftmp4 = z2^3 */
  756. felem_mul(tmp, ftmp2, z2);
  757. felem_reduce(ftmp4, tmp);
  758. /* ftmp3 = z1^3*y2 */
  759. felem_mul(tmp, ftmp3, y2);
  760. /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */
  761. /* ftmp4 = z2^3*y1 */
  762. felem_mul(tmp2, ftmp4, y1);
  763. felem_reduce(ftmp4, tmp2);
  764. /* ftmp3 = z1^3*y2 - z2^3*y1 */
  765. felem_diff_128_64(tmp, ftmp4);
  766. /* tmp[i] < 2^116 + 2^64 + 8 < 2^117 */
  767. felem_reduce(ftmp3, tmp);
  768. /* ftmp = z1^2*x2 */
  769. felem_mul(tmp, ftmp, x2);
  770. /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */
  771. /* ftmp2 =z2^2*x1 */
  772. felem_mul(tmp2, ftmp2, x1);
  773. felem_reduce(ftmp2, tmp2);
  774. /* ftmp = z1^2*x2 - z2^2*x1 */
  775. felem_diff128(tmp, tmp2);
  776. /* tmp[i] < 2^116 + 2^64 + 8 < 2^117 */
  777. felem_reduce(ftmp, tmp);
  778. /* the formulae are incorrect if the points are equal
  779. * so we check for this and do doubling if this happens */
  780. x_equal = felem_is_zero(ftmp);
  781. y_equal = felem_is_zero(ftmp3);
  782. z1_is_zero = felem_is_zero(z1);
  783. z2_is_zero = felem_is_zero(z2);
  784. /* In affine coordinates, (X_1, Y_1) == (X_2, Y_2) */
  785. if (x_equal && y_equal && !z1_is_zero && !z2_is_zero)
  786. {
  787. point_double(x3, y3, z3, x1, y1, z1);
  788. return;
  789. }
  790. /* ftmp5 = z1*z2 */
  791. felem_mul(tmp, z1, z2);
  792. felem_reduce(ftmp5, tmp);
  793. /* zout = (z1^2*x2 - z2^2*x1)*(z1*z2) */
  794. felem_mul(tmp, ftmp, ftmp5);
  795. felem_reduce(zout, tmp);
  796. /* ftmp = (z1^2*x2 - z2^2*x1)^2 */
  797. memcpy(ftmp5, ftmp, 3 * sizeof(fslice));
  798. felem_square(tmp, ftmp);
  799. felem_reduce(ftmp, tmp);
  800. /* ftmp5 = (z1^2*x2 - z2^2*x1)^3 */
  801. felem_mul(tmp, ftmp, ftmp5);
  802. felem_reduce(ftmp5, tmp);
  803. /* ftmp2 = z2^2*x1*(z1^2*x2 - z2^2*x1)^2 */
  804. felem_mul(tmp, ftmp2, ftmp);
  805. felem_reduce(ftmp2, tmp);
  806. /* ftmp4 = z2^3*y1*(z1^2*x2 - z2^2*x1)^3 */
  807. felem_mul(tmp, ftmp4, ftmp5);
  808. /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */
  809. /* tmp2 = (z1^3*y2 - z2^3*y1)^2 */
  810. felem_square(tmp2, ftmp3);
  811. /* tmp2[i] < 4 * 2^57 * 2^57 < 2^116 */
  812. /* tmp2 = (z1^3*y2 - z2^3*y1)^2 - (z1^2*x2 - z2^2*x1)^3 */
  813. felem_diff_128_64(tmp2, ftmp5);
  814. /* tmp2[i] < 2^116 + 2^64 + 8 < 2^117 */
  815. /* ftmp5 = 2*z2^2*x1*(z1^2*x2 - z2^2*x1)^2 */
  816. memcpy(ftmp5, ftmp2, 3 * sizeof(fslice));
  817. felem_scalar64(ftmp5, 2);
  818. /* ftmp5[i] < 2 * 2^57 = 2^58 */
  819. /* xout = (z1^3*y2 - z2^3*y1)^2 - (z1^2*x2 - z2^2*x1)^3 -
  820. 2*z2^2*x1*(z1^2*x2 - z2^2*x1)^2 */
  821. felem_diff_128_64(tmp2, ftmp5);
  822. /* tmp2[i] < 2^117 + 2^64 + 8 < 2^118 */
  823. felem_reduce(xout, tmp2);
  824. /* ftmp2 = z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - xout */
  825. felem_diff64(ftmp2, xout);
  826. /* ftmp2[i] < 2^57 + 2^58 + 2 < 2^59 */
  827. /* tmp2 = (z1^3*y2 - z2^3*y1)*(z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - xout) */
  828. felem_mul(tmp2, ftmp3, ftmp2);
  829. /* tmp2[i] < 4 * 2^57 * 2^59 = 2^118 */
  830. /* yout = (z1^3*y2 - z2^3*y1)*(z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - xout) -
  831. z2^3*y1*(z1^2*x2 - z2^2*x1)^3 */
  832. felem_diff128(tmp2, tmp);
  833. /* tmp2[i] < 2^118 + 2^120 < 2^121 */
  834. felem_reduce(yout, tmp2);
  835. /* the result (xout, yout, zout) is incorrect if one of the
  836. * inputs is the point at infinity, so we need to check for this
  837. * separately */
  838. /* if point 1 is at infinity, copy point 2 to output, and vice versa */
  839. copy_conditional(xout, x2, 3, z1_is_zero);
  840. select_conditional(x3, xout, x1, 3, z2_is_zero);
  841. copy_conditional(yout, y2, 3, z1_is_zero);
  842. select_conditional(y3, yout, y1, 3, z2_is_zero);
  843. copy_conditional(zout, z2, 3, z1_is_zero);
  844. select_conditional(z3, zout, z1, 3, z2_is_zero);
  845. }
  846. static void affine(point P)
  847. {
  848. coord z1, z2, xin, yin;
  849. uint128_t tmp[7];
  850. if (felem_is_zero(P[2])) return;
  851. felem_inv(z2, P[2]);
  852. felem_square(tmp, z2); felem_reduce(z1, tmp);
  853. felem_mul(tmp, P[0], z1); felem_reduce(xin, tmp);
  854. felem_contract(P[0], xin);
  855. felem_mul(tmp, z1, z2); felem_reduce(z1, tmp);
  856. felem_mul(tmp, P[1], z1); felem_reduce(yin, tmp);
  857. felem_contract(P[1], yin);
  858. memset(P[2], 0, sizeof(coord));
  859. P[2][0] = 1;
  860. }
  861. static void affine_x(coord out, point P)
  862. {
  863. coord z1, z2, xin;
  864. uint128_t tmp[7];
  865. if (felem_is_zero(P[2])) return;
  866. felem_inv(z2, P[2]);
  867. felem_square(tmp, z2); felem_reduce(z1, tmp);
  868. felem_mul(tmp, P[0], z1); felem_reduce(xin, tmp);
  869. felem_contract(out, xin);
  870. }
  871. /* Multiply the given point by s */
  872. static void point_mul(point out, point in, const felem_bytearray s)
  873. {
  874. int i;
  875. point tmp;
  876. point table[16];
  877. memset(table[0], 0, sizeof(point));
  878. memmove(table[1], in, sizeof(point));
  879. for(i=2; i<16; i+=2) {
  880. point_double(table[i][0], table[i][1], table[i][2],
  881. table[i/2][0], table[i/2][1], table[i/2][2]);
  882. point_add(table[i+1][0], table[i+1][1], table[i+1][2],
  883. table[i][0], table[i][1], table[i][2],
  884. in[0], in[1], in[2]);
  885. }
  886. /*
  887. for(i=0;i<16;++i) {
  888. fprintf(stderr, "table[%d]:\n", i);
  889. affine(table[i]);
  890. dump_point(NULL, table[i]);
  891. }
  892. */
  893. memset(tmp, 0, sizeof(point));
  894. for(i=0;i<21;i++) {
  895. u8 oh = s[20-i] >> 4;
  896. u8 ol = s[20-i] & 0x0f;
  897. point_double(tmp[0], tmp[1], tmp[2], tmp[0], tmp[1], tmp[2]);
  898. point_double(tmp[0], tmp[1], tmp[2], tmp[0], tmp[1], tmp[2]);
  899. point_double(tmp[0], tmp[1], tmp[2], tmp[0], tmp[1], tmp[2]);
  900. point_double(tmp[0], tmp[1], tmp[2], tmp[0], tmp[1], tmp[2]);
  901. point_add(tmp[0], tmp[1], tmp[2], tmp[0], tmp[1], tmp[2],
  902. table[oh][0], table[oh][1], table[oh][2]);
  903. point_double(tmp[0], tmp[1], tmp[2], tmp[0], tmp[1], tmp[2]);
  904. point_double(tmp[0], tmp[1], tmp[2], tmp[0], tmp[1], tmp[2]);
  905. point_double(tmp[0], tmp[1], tmp[2], tmp[0], tmp[1], tmp[2]);
  906. point_double(tmp[0], tmp[1], tmp[2], tmp[0], tmp[1], tmp[2]);
  907. point_add(tmp[0], tmp[1], tmp[2], tmp[0], tmp[1], tmp[2],
  908. table[ol][0], table[ol][1], table[ol][2]);
  909. }
  910. memmove(out, tmp, sizeof(point));
  911. }
  912. #if 0
  913. /* Select a point from an array of 16 precomputed point multiples,
  914. * in constant time: for bits = {b_0, b_1, b_2, b_3}, return the point
  915. * pre_comp[8*b_3 + 4*b_2 + 2*b_1 + b_0] */
  916. static void select_point(const fslice bits[4], const fslice pre_comp[16][3][4],
  917. fslice out[12])
  918. {
  919. fslice tmp[5][12];
  920. select_conditional(tmp[0], pre_comp[7][0], pre_comp[15][0], 12, bits[3]);
  921. select_conditional(tmp[1], pre_comp[3][0], pre_comp[11][0], 12, bits[3]);
  922. select_conditional(tmp[2], tmp[1], tmp[0], 12, bits[2]);
  923. select_conditional(tmp[0], pre_comp[5][0], pre_comp[13][0], 12, bits[3]);
  924. select_conditional(tmp[1], pre_comp[1][0], pre_comp[9][0], 12, bits[3]);
  925. select_conditional(tmp[3], tmp[1], tmp[0], 12, bits[2]);
  926. select_conditional(tmp[4], tmp[3], tmp[2], 12, bits[1]);
  927. select_conditional(tmp[0], pre_comp[6][0], pre_comp[14][0], 12, bits[3]);
  928. select_conditional(tmp[1], pre_comp[2][0], pre_comp[10][0], 12, bits[3]);
  929. select_conditional(tmp[2], tmp[1], tmp[0], 12, bits[2]);
  930. select_conditional(tmp[0], pre_comp[4][0], pre_comp[12][0], 12, bits[3]);
  931. select_conditional(tmp[1], pre_comp[0][0], pre_comp[8][0], 12, bits[3]);
  932. select_conditional(tmp[3], tmp[1], tmp[0], 12, bits[2]);
  933. select_conditional(tmp[1], tmp[3], tmp[2], 12, bits[1]);
  934. select_conditional(out, tmp[1], tmp[4], 12, bits[0]);
  935. }
  936. /* Interleaved point multiplication using precomputed point multiples:
  937. * The small point multiples 0*P, 1*P, ..., 15*P are in pre_comp[],
  938. * the scalars in scalars[]. If g_scalar is non-NULL, we also add this multiple
  939. * of the generator, using certain (large) precomputed multiples in g_pre_comp.
  940. * Output point (X, Y, Z) is stored in x_out, y_out, z_out */
  941. static void batch_mul(fslice x_out[4], fslice y_out[4], fslice z_out[4],
  942. const felem_bytearray scalars[], const unsigned num_points, const u8 *g_scalar,
  943. const fslice pre_comp[][16][3][4], const fslice g_pre_comp[16][3][4])
  944. {
  945. unsigned i, j, num;
  946. unsigned gen_mul = (g_scalar != NULL);
  947. fslice nq[12], nqt[12], tmp[12];
  948. fslice bits[4];
  949. u8 byte;
  950. /* set nq to the point at infinity */
  951. memset(nq, 0, 12 * sizeof(fslice));
  952. /* Loop over all scalars msb-to-lsb, 4 bits at a time: for each nibble,
  953. * double 4 times, then add the precomputed point multiples.
  954. * If we are also adding multiples of the generator, then interleave
  955. * these additions with the last 56 doublings. */
  956. for (i = (num_points ? 28 : 7); i > 0; --i)
  957. {
  958. for (j = 0; j < 8; ++j)
  959. {
  960. /* double once */
  961. point_double(nq, nq+4, nq+8, nq, nq+4, nq+8);
  962. /* add multiples of the generator */
  963. if ((gen_mul) && (i <= 7))
  964. {
  965. bits[3] = (g_scalar[i+20] >> (7-j)) & 1;
  966. bits[2] = (g_scalar[i+13] >> (7-j)) & 1;
  967. bits[1] = (g_scalar[i+6] >> (7-j)) & 1;
  968. bits[0] = (g_scalar[i-1] >> (7-j)) & 1;
  969. /* select the point to add, in constant time */
  970. select_point(bits, g_pre_comp, tmp);
  971. memcpy(nqt, nq, 12 * sizeof(fslice));
  972. point_add(nq, nq+4, nq+8, nqt, nqt+4, nqt+8,
  973. tmp, tmp+4, tmp+8);
  974. }
  975. /* do an addition after every 4 doublings */
  976. if (j % 4 == 3)
  977. {
  978. /* loop over all scalars */
  979. for (num = 0; num < num_points; ++num)
  980. {
  981. byte = scalars[num][i-1];
  982. bits[3] = (byte >> (10-j)) & 1;
  983. bits[2] = (byte >> (9-j)) & 1;
  984. bits[1] = (byte >> (8-j)) & 1;
  985. bits[0] = (byte >> (7-j)) & 1;
  986. /* select the point to add */
  987. select_point(bits,
  988. pre_comp[num], tmp);
  989. memcpy(nqt, nq, 12 * sizeof(fslice));
  990. point_add(nq, nq+4, nq+8, nqt, nqt+4,
  991. nqt+8, tmp, tmp+4, tmp+8);
  992. }
  993. }
  994. }
  995. }
  996. memcpy(x_out, nq, 4 * sizeof(fslice));
  997. memcpy(y_out, nq+4, 4 * sizeof(fslice));
  998. memcpy(z_out, nq+8, 4 * sizeof(fslice));
  999. }
  1000. /******************************************************************************/
  1001. /* FUNCTIONS TO MANAGE PRECOMPUTATION
  1002. */
  1003. static NISTP224_PRE_COMP *nistp224_pre_comp_new()
  1004. {
  1005. NISTP224_PRE_COMP *ret = NULL;
  1006. ret = (NISTP224_PRE_COMP *)OPENSSL_malloc(sizeof(NISTP224_PRE_COMP));
  1007. if (!ret)
  1008. {
  1009. ECerr(EC_F_NISTP224_PRE_COMP_NEW, ERR_R_MALLOC_FAILURE);
  1010. return ret;
  1011. }
  1012. memset(ret->g_pre_comp, 0, sizeof(ret->g_pre_comp));
  1013. ret->references = 1;
  1014. return ret;
  1015. }
  1016. static void *nistp224_pre_comp_dup(void *src_)
  1017. {
  1018. NISTP224_PRE_COMP *src = src_;
  1019. /* no need to actually copy, these objects never change! */
  1020. CRYPTO_add(&src->references, 1, CRYPTO_LOCK_EC_PRE_COMP);
  1021. return src_;
  1022. }
  1023. static void nistp224_pre_comp_free(void *pre_)
  1024. {
  1025. int i;
  1026. NISTP224_PRE_COMP *pre = pre_;
  1027. if (!pre)
  1028. return;
  1029. i = CRYPTO_add(&pre->references, -1, CRYPTO_LOCK_EC_PRE_COMP);
  1030. if (i > 0)
  1031. return;
  1032. OPENSSL_free(pre);
  1033. }
  1034. static void nistp224_pre_comp_clear_free(void *pre_)
  1035. {
  1036. int i;
  1037. NISTP224_PRE_COMP *pre = pre_;
  1038. if (!pre)
  1039. return;
  1040. i = CRYPTO_add(&pre->references, -1, CRYPTO_LOCK_EC_PRE_COMP);
  1041. if (i > 0)
  1042. return;
  1043. OPENSSL_cleanse(pre, sizeof *pre);
  1044. OPENSSL_free(pre);
  1045. }
  1046. /******************************************************************************/
  1047. /* OPENSSL EC_METHOD FUNCTIONS
  1048. */
  1049. int ec_GFp_nistp224_group_init(EC_GROUP *group)
  1050. {
  1051. int ret;
  1052. ret = ec_GFp_simple_group_init(group);
  1053. group->a_is_minus3 = 1;
  1054. return ret;
  1055. }
  1056. int ec_GFp_nistp224_group_set_curve(EC_GROUP *group, const BIGNUM *p,
  1057. const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx)
  1058. {
  1059. int ret = 0;
  1060. BN_CTX *new_ctx = NULL;
  1061. BIGNUM *curve_p, *curve_a, *curve_b;
  1062. if (ctx == NULL)
  1063. if ((ctx = new_ctx = BN_CTX_new()) == NULL) return 0;
  1064. BN_CTX_start(ctx);
  1065. if (((curve_p = BN_CTX_get(ctx)) == NULL) ||
  1066. ((curve_a = BN_CTX_get(ctx)) == NULL) ||
  1067. ((curve_b = BN_CTX_get(ctx)) == NULL)) goto err;
  1068. BN_bin2bn(nistp224_curve_params[0], sizeof(felem_bytearray), curve_p);
  1069. BN_bin2bn(nistp224_curve_params[1], sizeof(felem_bytearray), curve_a);
  1070. BN_bin2bn(nistp224_curve_params[2], sizeof(felem_bytearray), curve_b);
  1071. if ((BN_cmp(curve_p, p)) || (BN_cmp(curve_a, a)) ||
  1072. (BN_cmp(curve_b, b)))
  1073. {
  1074. ECerr(EC_F_EC_GFP_NISTP224_GROUP_SET_CURVE,
  1075. EC_R_WRONG_CURVE_PARAMETERS);
  1076. goto err;
  1077. }
  1078. group->field_mod_func = BN_nist_mod_224;
  1079. ret = ec_GFp_simple_group_set_curve(group, p, a, b, ctx);
  1080. err:
  1081. BN_CTX_end(ctx);
  1082. if (new_ctx != NULL)
  1083. BN_CTX_free(new_ctx);
  1084. return ret;
  1085. }
  1086. /* Takes the Jacobian coordinates (X, Y, Z) of a point and returns
  1087. * (X', Y') = (X/Z^2, Y/Z^3) */
  1088. int ec_GFp_nistp224_point_get_affine_coordinates(const EC_GROUP *group,
  1089. const EC_POINT *point, BIGNUM *x, BIGNUM *y, BN_CTX *ctx)
  1090. {
  1091. fslice z1[4], z2[4], x_in[4], y_in[4], x_out[4], y_out[4];
  1092. uint128_t tmp[7];
  1093. if (EC_POINT_is_at_infinity(group, point))
  1094. {
  1095. ECerr(EC_F_EC_GFP_NISTP224_POINT_GET_AFFINE_COORDINATES,
  1096. EC_R_POINT_AT_INFINITY);
  1097. return 0;
  1098. }
  1099. if ((!BN_to_felem(x_in, &point->X)) || (!BN_to_felem(y_in, &point->Y)) ||
  1100. (!BN_to_felem(z1, &point->Z))) return 0;
  1101. felem_inv(z2, z1);
  1102. felem_square(tmp, z2); felem_reduce(z1, tmp);
  1103. felem_mul(tmp, x_in, z1); felem_reduce(x_in, tmp);
  1104. felem_contract(x_out, x_in);
  1105. if (x != NULL)
  1106. {
  1107. if (!felem_to_BN(x, x_out)) {
  1108. ECerr(EC_F_EC_GFP_NISTP224_POINT_GET_AFFINE_COORDINATES,
  1109. ERR_R_BN_LIB);
  1110. return 0;
  1111. }
  1112. }
  1113. felem_mul(tmp, z1, z2); felem_reduce(z1, tmp);
  1114. felem_mul(tmp, y_in, z1); felem_reduce(y_in, tmp);
  1115. felem_contract(y_out, y_in);
  1116. if (y != NULL)
  1117. {
  1118. if (!felem_to_BN(y, y_out)) {
  1119. ECerr(EC_F_EC_GFP_NISTP224_POINT_GET_AFFINE_COORDINATES,
  1120. ERR_R_BN_LIB);
  1121. return 0;
  1122. }
  1123. }
  1124. return 1;
  1125. }
  1126. /* Computes scalar*generator + \sum scalars[i]*points[i], ignoring NULL values
  1127. * Result is stored in r (r can equal one of the inputs). */
  1128. int ec_GFp_nistp224_points_mul(const EC_GROUP *group, EC_POINT *r,
  1129. const BIGNUM *scalar, size_t num, const EC_POINT *points[],
  1130. const BIGNUM *scalars[], BN_CTX *ctx)
  1131. {
  1132. int ret = 0;
  1133. int i, j;
  1134. BN_CTX *new_ctx = NULL;
  1135. BIGNUM *x, *y, *z, *tmp_scalar;
  1136. felem_bytearray g_secret;
  1137. felem_bytearray *secrets = NULL;
  1138. fslice (*pre_comp)[16][3][4] = NULL;
  1139. felem_bytearray tmp;
  1140. unsigned num_bytes;
  1141. int have_pre_comp = 0;
  1142. size_t num_points = num;
  1143. fslice x_in[4], y_in[4], z_in[4], x_out[4], y_out[4], z_out[4];
  1144. NISTP224_PRE_COMP *pre = NULL;
  1145. fslice (*g_pre_comp)[3][4] = NULL;
  1146. EC_POINT *generator = NULL;
  1147. const EC_POINT *p = NULL;
  1148. const BIGNUM *p_scalar = NULL;
  1149. if (ctx == NULL)
  1150. if ((ctx = new_ctx = BN_CTX_new()) == NULL) return 0;
  1151. BN_CTX_start(ctx);
  1152. if (((x = BN_CTX_get(ctx)) == NULL) ||
  1153. ((y = BN_CTX_get(ctx)) == NULL) ||
  1154. ((z = BN_CTX_get(ctx)) == NULL) ||
  1155. ((tmp_scalar = BN_CTX_get(ctx)) == NULL))
  1156. goto err;
  1157. if (scalar != NULL)
  1158. {
  1159. pre = EC_EX_DATA_get_data(group->extra_data,
  1160. nistp224_pre_comp_dup, nistp224_pre_comp_free,
  1161. nistp224_pre_comp_clear_free);
  1162. if (pre)
  1163. /* we have precomputation, try to use it */
  1164. g_pre_comp = pre->g_pre_comp;
  1165. else
  1166. /* try to use the standard precomputation */
  1167. g_pre_comp = (fslice (*)[3][4]) gmul;
  1168. generator = EC_POINT_new(group);
  1169. if (generator == NULL)
  1170. goto err;
  1171. /* get the generator from precomputation */
  1172. if (!felem_to_BN(x, g_pre_comp[1][0]) ||
  1173. !felem_to_BN(y, g_pre_comp[1][1]) ||
  1174. !felem_to_BN(z, g_pre_comp[1][2]))
  1175. {
  1176. ECerr(EC_F_EC_GFP_NISTP224_POINTS_MUL, ERR_R_BN_LIB);
  1177. goto err;
  1178. }
  1179. if (!EC_POINT_set_Jprojective_coordinates_GFp(group,
  1180. generator, x, y, z, ctx))
  1181. goto err;
  1182. if (0 == EC_POINT_cmp(group, generator, group->generator, ctx))
  1183. /* precomputation matches generator */
  1184. have_pre_comp = 1;
  1185. else
  1186. /* we don't have valid precomputation:
  1187. * treat the generator as a random point */
  1188. num_points = num_points + 1;
  1189. }
  1190. secrets = OPENSSL_malloc(num_points * sizeof(felem_bytearray));
  1191. pre_comp = OPENSSL_malloc(num_points * 16 * 3 * 4 * sizeof(fslice));
  1192. if ((num_points) && ((secrets == NULL) || (pre_comp == NULL)))
  1193. {
  1194. ECerr(EC_F_EC_GFP_NISTP224_POINTS_MUL, ERR_R_MALLOC_FAILURE);
  1195. goto err;
  1196. }
  1197. /* we treat NULL scalars as 0, and NULL points as points at infinity,
  1198. * i.e., they contribute nothing to the linear combination */
  1199. memset(secrets, 0, num_points * sizeof(felem_bytearray));
  1200. memset(pre_comp, 0, num_points * 16 * 3 * 4 * sizeof(fslice));
  1201. for (i = 0; i < num_points; ++i)
  1202. {
  1203. if (i == num)
  1204. /* the generator */
  1205. {
  1206. p = EC_GROUP_get0_generator(group);
  1207. p_scalar = scalar;
  1208. }
  1209. else
  1210. /* the i^th point */
  1211. {
  1212. p = points[i];
  1213. p_scalar = scalars[i];
  1214. }
  1215. if ((p_scalar != NULL) && (p != NULL))
  1216. {
  1217. num_bytes = BN_num_bytes(p_scalar);
  1218. /* reduce scalar to 0 <= scalar < 2^224 */
  1219. if ((num_bytes > sizeof(felem_bytearray)) || (BN_is_negative(p_scalar)))
  1220. {
  1221. /* this is an unusual input, and we don't guarantee
  1222. * constant-timeness */
  1223. if (!BN_nnmod(tmp_scalar, p_scalar, &group->order, ctx))
  1224. {
  1225. ECerr(EC_F_EC_GFP_NISTP224_POINTS_MUL, ERR_R_BN_LIB);
  1226. goto err;
  1227. }
  1228. num_bytes = BN_bn2bin(tmp_scalar, tmp);
  1229. }
  1230. else
  1231. BN_bn2bin(p_scalar, tmp);
  1232. flip_endian(secrets[i], tmp, num_bytes);
  1233. /* precompute multiples */
  1234. if ((!BN_to_felem(x_out, &p->X)) ||
  1235. (!BN_to_felem(y_out, &p->Y)) ||
  1236. (!BN_to_felem(z_out, &p->Z))) goto err;
  1237. memcpy(pre_comp[i][1][0], x_out, 4 * sizeof(fslice));
  1238. memcpy(pre_comp[i][1][1], y_out, 4 * sizeof(fslice));
  1239. memcpy(pre_comp[i][1][2], z_out, 4 * sizeof(fslice));
  1240. for (j = 1; j < 8; ++j)
  1241. {
  1242. point_double(pre_comp[i][2*j][0],
  1243. pre_comp[i][2*j][1],
  1244. pre_comp[i][2*j][2],
  1245. pre_comp[i][j][0],
  1246. pre_comp[i][j][1],
  1247. pre_comp[i][j][2]);
  1248. point_add(pre_comp[i][2*j+1][0],
  1249. pre_comp[i][2*j+1][1],
  1250. pre_comp[i][2*j+1][2],
  1251. pre_comp[i][1][0],
  1252. pre_comp[i][1][1],
  1253. pre_comp[i][1][2],
  1254. pre_comp[i][2*j][0],
  1255. pre_comp[i][2*j][1],
  1256. pre_comp[i][2*j][2]);
  1257. }
  1258. }
  1259. }
  1260. /* the scalar for the generator */
  1261. if ((scalar != NULL) && (have_pre_comp))
  1262. {
  1263. memset(g_secret, 0, sizeof g_secret);
  1264. num_bytes = BN_num_bytes(scalar);
  1265. /* reduce scalar to 0 <= scalar < 2^224 */
  1266. if ((num_bytes > sizeof(felem_bytearray)) || (BN_is_negative(scalar)))
  1267. {
  1268. /* this is an unusual input, and we don't guarantee
  1269. * constant-timeness */
  1270. if (!BN_nnmod(tmp_scalar, scalar, &group->order, ctx))
  1271. {
  1272. ECerr(EC_F_EC_GFP_NISTP224_POINTS_MUL, ERR_R_BN_LIB);
  1273. goto err;
  1274. }
  1275. num_bytes = BN_bn2bin(tmp_scalar, tmp);
  1276. }
  1277. else
  1278. BN_bn2bin(scalar, tmp);
  1279. flip_endian(g_secret, tmp, num_bytes);
  1280. /* do the multiplication with generator precomputation*/
  1281. batch_mul(x_out, y_out, z_out,
  1282. (const felem_bytearray (*)) secrets, num_points,
  1283. g_secret, (const fslice (*)[16][3][4]) pre_comp,
  1284. (const fslice (*)[3][4]) g_pre_comp);
  1285. }
  1286. else
  1287. /* do the multiplication without generator precomputation */
  1288. batch_mul(x_out, y_out, z_out,
  1289. (const felem_bytearray (*)) secrets, num_points,
  1290. NULL, (const fslice (*)[16][3][4]) pre_comp, NULL);
  1291. /* reduce the output to its unique minimal representation */
  1292. felem_contract(x_in, x_out);
  1293. felem_contract(y_in, y_out);
  1294. felem_contract(z_in, z_out);
  1295. if ((!felem_to_BN(x, x_in)) || (!felem_to_BN(y, y_in)) ||
  1296. (!felem_to_BN(z, z_in)))
  1297. {
  1298. ECerr(EC_F_EC_GFP_NISTP224_POINTS_MUL, ERR_R_BN_LIB);
  1299. goto err;
  1300. }
  1301. ret = EC_POINT_set_Jprojective_coordinates_GFp(group, r, x, y, z, ctx);
  1302. err:
  1303. BN_CTX_end(ctx);
  1304. if (generator != NULL)
  1305. EC_POINT_free(generator);
  1306. if (new_ctx != NULL)
  1307. BN_CTX_free(new_ctx);
  1308. if (secrets != NULL)
  1309. OPENSSL_free(secrets);
  1310. if (pre_comp != NULL)
  1311. OPENSSL_free(pre_comp);
  1312. return ret;
  1313. }
  1314. int ec_GFp_nistp224_precompute_mult(EC_GROUP *group, BN_CTX *ctx)
  1315. {
  1316. int ret = 0;
  1317. NISTP224_PRE_COMP *pre = NULL;
  1318. int i, j;
  1319. BN_CTX *new_ctx = NULL;
  1320. BIGNUM *x, *y;
  1321. EC_POINT *generator = NULL;
  1322. /* throw away old precomputation */
  1323. EC_EX_DATA_free_data(&group->extra_data, nistp224_pre_comp_dup,
  1324. nistp224_pre_comp_free, nistp224_pre_comp_clear_free);
  1325. if (ctx == NULL)
  1326. if ((ctx = new_ctx = BN_CTX_new()) == NULL) return 0;
  1327. BN_CTX_start(ctx);
  1328. if (((x = BN_CTX_get(ctx)) == NULL) ||
  1329. ((y = BN_CTX_get(ctx)) == NULL))
  1330. goto err;
  1331. /* get the generator */
  1332. if (group->generator == NULL) goto err;
  1333. generator = EC_POINT_new(group);
  1334. if (generator == NULL)
  1335. goto err;
  1336. BN_bin2bn(nistp224_curve_params[3], sizeof (felem_bytearray), x);
  1337. BN_bin2bn(nistp224_curve_params[4], sizeof (felem_bytearray), y);
  1338. if (!EC_POINT_set_affine_coordinates_GFp(group, generator, x, y, ctx))
  1339. goto err;
  1340. if ((pre = nistp224_pre_comp_new()) == NULL)
  1341. goto err;
  1342. /* if the generator is the standard one, use built-in precomputation */
  1343. if (0 == EC_POINT_cmp(group, generator, group->generator, ctx))
  1344. {
  1345. memcpy(pre->g_pre_comp, gmul, sizeof(pre->g_pre_comp));
  1346. ret = 1;
  1347. goto err;
  1348. }
  1349. if ((!BN_to_felem(pre->g_pre_comp[1][0], &group->generator->X)) ||
  1350. (!BN_to_felem(pre->g_pre_comp[1][1], &group->generator->Y)) ||
  1351. (!BN_to_felem(pre->g_pre_comp[1][2], &group->generator->Z)))
  1352. goto err;
  1353. /* compute 2^56*G, 2^112*G, 2^168*G */
  1354. for (i = 1; i < 5; ++i)
  1355. {
  1356. point_double(pre->g_pre_comp[2*i][0], pre->g_pre_comp[2*i][1],
  1357. pre->g_pre_comp[2*i][2], pre->g_pre_comp[i][0],
  1358. pre->g_pre_comp[i][1], pre->g_pre_comp[i][2]);
  1359. for (j = 0; j < 55; ++j)
  1360. {
  1361. point_double(pre->g_pre_comp[2*i][0],
  1362. pre->g_pre_comp[2*i][1],
  1363. pre->g_pre_comp[2*i][2],
  1364. pre->g_pre_comp[2*i][0],
  1365. pre->g_pre_comp[2*i][1],
  1366. pre->g_pre_comp[2*i][2]);
  1367. }
  1368. }
  1369. /* g_pre_comp[0] is the point at infinity */
  1370. memset(pre->g_pre_comp[0], 0, sizeof(pre->g_pre_comp[0]));
  1371. /* the remaining multiples */
  1372. /* 2^56*G + 2^112*G */
  1373. point_add(pre->g_pre_comp[6][0], pre->g_pre_comp[6][1],
  1374. pre->g_pre_comp[6][2], pre->g_pre_comp[4][0],
  1375. pre->g_pre_comp[4][1], pre->g_pre_comp[4][2],
  1376. pre->g_pre_comp[2][0], pre->g_pre_comp[2][1],
  1377. pre->g_pre_comp[2][2]);
  1378. /* 2^56*G + 2^168*G */
  1379. point_add(pre->g_pre_comp[10][0], pre->g_pre_comp[10][1],
  1380. pre->g_pre_comp[10][2], pre->g_pre_comp[8][0],
  1381. pre->g_pre_comp[8][1], pre->g_pre_comp[8][2],
  1382. pre->g_pre_comp[2][0], pre->g_pre_comp[2][1],
  1383. pre->g_pre_comp[2][2]);
  1384. /* 2^112*G + 2^168*G */
  1385. point_add(pre->g_pre_comp[12][0], pre->g_pre_comp[12][1],
  1386. pre->g_pre_comp[12][2], pre->g_pre_comp[8][0],
  1387. pre->g_pre_comp[8][1], pre->g_pre_comp[8][2],
  1388. pre->g_pre_comp[4][0], pre->g_pre_comp[4][1],
  1389. pre->g_pre_comp[4][2]);
  1390. /* 2^56*G + 2^112*G + 2^168*G */
  1391. point_add(pre->g_pre_comp[14][0], pre->g_pre_comp[14][1],
  1392. pre->g_pre_comp[14][2], pre->g_pre_comp[12][0],
  1393. pre->g_pre_comp[12][1], pre->g_pre_comp[12][2],
  1394. pre->g_pre_comp[2][0], pre->g_pre_comp[2][1],
  1395. pre->g_pre_comp[2][2]);
  1396. for (i = 1; i < 8; ++i)
  1397. {
  1398. /* odd multiples: add G */
  1399. point_add(pre->g_pre_comp[2*i+1][0], pre->g_pre_comp[2*i+1][1],
  1400. pre->g_pre_comp[2*i+1][2], pre->g_pre_comp[2*i][0],
  1401. pre->g_pre_comp[2*i][1], pre->g_pre_comp[2*i][2],
  1402. pre->g_pre_comp[1][0], pre->g_pre_comp[1][1],
  1403. pre->g_pre_comp[1][2]);
  1404. }
  1405. if (!EC_EX_DATA_set_data(&group->extra_data, pre, nistp224_pre_comp_dup,
  1406. nistp224_pre_comp_free, nistp224_pre_comp_clear_free))
  1407. goto err;
  1408. ret = 1;
  1409. pre = NULL;
  1410. err:
  1411. BN_CTX_end(ctx);
  1412. if (generator != NULL)
  1413. EC_POINT_free(generator);
  1414. if (new_ctx != NULL)
  1415. BN_CTX_free(new_ctx);
  1416. if (pre)
  1417. nistp224_pre_comp_free(pre);
  1418. return ret;
  1419. }
  1420. int ec_GFp_nistp224_have_precompute_mult(const EC_GROUP *group)
  1421. {
  1422. if (EC_EX_DATA_get_data(group->extra_data, nistp224_pre_comp_dup,
  1423. nistp224_pre_comp_free, nistp224_pre_comp_clear_free)
  1424. != NULL)
  1425. return 1;
  1426. else
  1427. return 0;
  1428. }
  1429. #endif
  1430. #ifdef TESTING
  1431. #include <sys/time.h>
  1432. static u8 ctoh(char c)
  1433. {
  1434. if (c >= '0' && c <= '9') return c-'0';
  1435. if (c >= 'a' && c <= 'f') return c-'a'+10;
  1436. if (c >= 'A' && c <= 'F') return c-'A'+10;
  1437. return 0;
  1438. }
  1439. static void arg_to_bytearray(felem_bytearray ba, const char *arg)
  1440. {
  1441. /* Convert the arg, which is a string like "1a2637c8" to a byte
  1442. * array like 0xc8 0x37 0x26 0x1a. */
  1443. int size = sizeof(felem_bytearray);
  1444. int arglen = strlen(arg);
  1445. int argsize = (arglen+1)/2;
  1446. const char *argp = arg + arglen;
  1447. u8 *bap = ba;
  1448. memset(ba, 0, size);
  1449. if (size < argsize) {
  1450. fprintf(stderr, "Arg too long: %s\n", arg);
  1451. exit(1);
  1452. }
  1453. while (argp > arg+1) {
  1454. argp -= 2;
  1455. *bap = (ctoh(argp[0])<<4)|(ctoh(argp[1]));
  1456. ++bap;
  1457. }
  1458. if (arglen & 1) {
  1459. /* Handle the stray top nybble */
  1460. argp -= 1;
  1461. *bap = ctoh(argp[0]);
  1462. }
  1463. }
  1464. static void arg_to_coord(coord c, const char *arg)
  1465. {
  1466. felem_bytearray ba;
  1467. arg_to_bytearray(ba, arg);
  1468. /* Now convert it to a coord */
  1469. bin21_to_felem(c, ba);
  1470. }
  1471. int main(int argc, char **argv)
  1472. {
  1473. point infinity, P, Q, P2, PQ;
  1474. felem_bytearray s;
  1475. int i;
  1476. struct timeval st, et;
  1477. unsigned long el;
  1478. int niter = 1000;
  1479. memset(infinity, 0, sizeof(infinity));
  1480. memset(P, 0, sizeof(P));
  1481. memset(Q, 0, sizeof(Q));
  1482. if (argc != 6) {
  1483. fprintf(stderr, "Usage: %s Px Py Qx Qy s\n", argv[0]);
  1484. exit(1);
  1485. }
  1486. arg_to_coord(P[0], argv[1]);
  1487. arg_to_coord(P[1], argv[2]);
  1488. P[2][0] = 1;
  1489. dump_point("P", P);
  1490. arg_to_coord(Q[0], argv[3]);
  1491. arg_to_coord(Q[1], argv[4]);
  1492. Q[2][0] = 1;
  1493. dump_point("Q", Q);
  1494. arg_to_bytearray(s, argv[5]);
  1495. point_double(P2[0], P2[1], P2[2], P[0], P[1], P[2]);
  1496. affine(P2);
  1497. point_add(PQ[0], PQ[1], PQ[2], P[0], P[1], P[2], Q[0], Q[1], Q[2]);
  1498. affine(PQ);
  1499. dump_point("P2", P2);
  1500. dump_point("PQ", PQ);
  1501. gettimeofday(&st, NULL);
  1502. for (i=0;i<niter;++i) {
  1503. point_mul(P, P, s);
  1504. affine(P);
  1505. }
  1506. gettimeofday(&et, NULL);
  1507. el = (et.tv_sec-st.tv_sec)*1000000 + (et.tv_usec-st.tv_usec);
  1508. fprintf(stderr, "%lu / %d = %lu us\n", el, niter, el/niter);
  1509. dump_point("Ps", P);
  1510. return 0;
  1511. }
  1512. #endif
  1513. /* Figure out whether there's a point with x-coordinate x on the main
  1514. * curve. If not, then there's one on the twist curve. (There are
  1515. * actually two, which are negatives of each other; that doesn't
  1516. * matter.) Multiply that point by seckey and set out to the
  1517. * x-coordinate of the result. */
  1518. void ptwist_pointmul(byte out[PTWIST_BYTES], const byte x[PTWIST_BYTES],
  1519. const byte seckey[PTWIST_BYTES])
  1520. {
  1521. /* Compute z = x^3 + a*x + b */
  1522. point P, Q;
  1523. coord z, r2, Qx;
  1524. uint128_t tmp[5];
  1525. int ontwist;
  1526. static const coord three = { 3, 0, 0 };
  1527. static const coord b =
  1528. { 0x46d320e01dc7d6, 0x486ebc69bad316, 0x4e355e95cafedd };
  1529. /* Convert the byte array to a coord */
  1530. bin21_to_felem(P[0], x);
  1531. /* Compute z = x^3 - 3*x + b */
  1532. felem_square(tmp, P[0]); felem_reduce(z, tmp);
  1533. felem_diff64(z, three);
  1534. felem_mul(tmp, z, P[0]); felem_reduce(z, tmp);
  1535. felem_sum64(z, b);
  1536. /*
  1537. dump_coord("z", z);
  1538. */
  1539. /* Compute r = P[1] = z ^ ((p+1)/4). This will be a square root of
  1540. * z, if one exists. */
  1541. felem_sqrt(P[1], z);
  1542. /*
  1543. dump_coord("r", P[1]);
  1544. */
  1545. /* Is P[1] a square root of z? */
  1546. felem_square(tmp, P[1]); felem_diff_128_64(tmp, z); felem_reduce(r2, tmp);
  1547. if (felem_is_zero(r2)) {
  1548. /* P(x,r) is on the curve */
  1549. ontwist = 0;
  1550. } else {
  1551. /* (-x, r) is on the twist */
  1552. ontwist = 1;
  1553. felem_neg(P[0], P[0]);
  1554. }
  1555. /*
  1556. fprintf(stderr, "ontwist = %d\n", ontwist);
  1557. */
  1558. memset(P[2], 0, sizeof(coord));
  1559. P[2][0] = 1;
  1560. /* All set. Now do the point multiplication. */
  1561. /*
  1562. dump_point("P", P);
  1563. for(i=0;i<21;++i) {
  1564. fprintf(stderr, "%02x", seckey[20-i]);
  1565. }
  1566. fprintf(stderr, "\n");
  1567. */
  1568. point_mul(Q, P, seckey);
  1569. affine_x(Qx, Q);
  1570. /*
  1571. dump_point("Q", Q);
  1572. */
  1573. /* Get the x-coordinate of the result, and negate it if we're on the
  1574. * twist. */
  1575. if (ontwist) {
  1576. felem_neg(Qx, Qx);
  1577. }
  1578. /* Convert back to bytes */
  1579. felem_to_bin21(out, Qx);
  1580. /*
  1581. fprintf(stderr, "out: ");
  1582. for(i=0;i<21;++i) {
  1583. fprintf(stderr, "%02x", out[i]);
  1584. }
  1585. fprintf(stderr, "\n");
  1586. */
  1587. }