ptwist168.c 50 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654
  1. #include "ptwist.h"
  2. /* ptwist168.c by Ian Goldberg. Based on: */
  3. /* crypto/ec/ecp_nistp224.c */
  4. /*
  5. * Written by Emilia Kasper (Google) for the OpenSSL project.
  6. */
  7. /* ====================================================================
  8. * Copyright (c) 2000-2010 The OpenSSL Project. All rights reserved.
  9. *
  10. * Redistribution and use in source and binary forms, with or without
  11. * modification, are permitted provided that the following conditions
  12. * are met:
  13. *
  14. * 1. Redistributions of source code must retain the above copyright
  15. * notice, this list of conditions and the following disclaimer.
  16. *
  17. * 2. Redistributions in binary form must reproduce the above copyright
  18. * notice, this list of conditions and the following disclaimer in
  19. * the documentation and/or other materials provided with the
  20. * distribution.
  21. *
  22. * 3. All advertising materials mentioning features or use of this
  23. * software must display the following acknowledgment:
  24. * "This product includes software developed by the OpenSSL Project
  25. * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
  26. *
  27. * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
  28. * endorse or promote products derived from this software without
  29. * prior written permission. For written permission, please contact
  30. * licensing@OpenSSL.org.
  31. *
  32. * 5. Products derived from this software may not be called "OpenSSL"
  33. * nor may "OpenSSL" appear in their names without prior written
  34. * permission of the OpenSSL Project.
  35. *
  36. * 6. Redistributions of any form whatsoever must retain the following
  37. * acknowledgment:
  38. * "This product includes software developed by the OpenSSL Project
  39. * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
  40. *
  41. * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
  42. * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  43. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  44. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
  45. * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  46. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  47. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  48. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  49. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  50. * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  51. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
  52. * OF THE POSSIBILITY OF SUCH DAMAGE.
  53. * ====================================================================
  54. *
  55. * This product includes cryptographic software written by Eric Young
  56. * (eay@cryptsoft.com). This product includes software written by Tim
  57. * Hudson (tjh@cryptsoft.com).
  58. *
  59. */
  60. /*
  61. * A 64-bit implementation of the NIST P-224 elliptic curve point multiplication
  62. *
  63. * Inspired by Daniel J. Bernstein's public domain nistp224 implementation
  64. * and Adam Langley's public domain 64-bit C implementation of curve25519
  65. */
  66. #include <stdint.h>
  67. #include <string.h>
  68. #if defined(__GNUC__) && (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))
  69. /* even with gcc, the typedef won't work for 32-bit platforms */
  70. typedef __uint128_t uint128_t; /* nonstandard; implemented by gcc on 64-bit platforms */
  71. #else
  72. #error "Need GCC 3.1 or later to define type uint128_t"
  73. #endif
  74. typedef uint8_t u8;
  75. /******************************************************************************/
  76. /* INTERNAL REPRESENTATION OF FIELD ELEMENTS
  77. *
  78. * Field elements are represented as a_0 + 2^56*a_1 + 2^112*a_2
  79. * where each slice a_i is a 64-bit word, i.e., a field element is an fslice
  80. * array a with 3 elements, where a[i] = a_i.
  81. * Outputs from multiplications are represented as unreduced polynomials
  82. * b_0 + 2^56*b_1 + 2^112*b_2 + 2^168*b_3 + 2^224*b_4
  83. * where each b_i is a 128-bit word. We ensure that inputs to each field
  84. * multiplication satisfy a_i < 2^60, so outputs satisfy b_i < 4*2^60*2^60,
  85. * and fit into a 128-bit word without overflow. The coefficients are then
  86. * again partially reduced to a_i < 2^57. We only reduce to the unique minimal
  87. * representation at the end of the computation.
  88. *
  89. */
  90. typedef uint64_t fslice;
  91. typedef fslice coord[3];
  92. typedef coord point[3];
  93. #include <stdio.h>
  94. #include <stdlib.h>
  95. /*
  96. static void dump_coord(const char *label, const coord c)
  97. {
  98. if (label) fprintf(stderr, "%s: ", label);
  99. printf("%016lx %016lx %016lx\n", c[2], c[1], c[0]);
  100. }
  101. static void dump_point(const char *label, point p)
  102. {
  103. if (label) fprintf(stderr, "%s:\n", label);
  104. dump_coord(" x", p[0]);
  105. dump_coord(" y", p[1]);
  106. dump_coord(" z", p[2]);
  107. }
  108. */
  109. /* Field element represented as a byte arrary.
  110. * 21*8 = 168 bits is also the group order size for the elliptic curve. */
  111. typedef u8 felem_bytearray[21];
  112. static const felem_bytearray ptwist168_curve_params[5] = {
  113. {0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, /* p */
  114. 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE,
  115. 0xFF},
  116. {0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, /* a */
  117. 0xFF,0xFF,0xFF,0xFF,0xFF,0xFE,0xFF,0xFF,0xFF,0xFE,
  118. 0xFC},
  119. {0x4E,0x35,0x5E,0x95,0xCA,0xFE,0xDD,0x48,0x6E,0xBC, /* b */
  120. 0x69,0xBA,0xD3,0x16,0x46,0xD3,0x20,0xE0,0x1D,0xC7,
  121. 0xD6},
  122. {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* x */
  123. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  124. 0x02},
  125. {0xEA,0x67,0x47,0xB7,0x5A,0xF8,0xC7,0xF9,0x3C,0x1F, /* y */
  126. 0x5E,0x6D,0x32,0x0F,0x88,0xB9,0xBE,0x15,0x66,0xD2,
  127. 0xF2}
  128. };
  129. /* Helper functions to convert field elements to/from internal representation */
  130. static void bin21_to_felem(fslice out[3], const u8 in[21])
  131. {
  132. out[0] = *((const uint64_t *)(in)) & 0x00ffffffffffffff;
  133. out[1] = (*((const uint64_t *)(in+7))) & 0x00ffffffffffffff;
  134. out[2] = (*((const uint64_t *)(in+14))) & 0x00ffffffffffffff;
  135. }
  136. static void felem_to_bin21(u8 out[21], const fslice in[3])
  137. {
  138. unsigned i;
  139. for (i = 0; i < 7; ++i)
  140. {
  141. out[i] = in[0]>>(8*i);
  142. out[i+7] = in[1]>>(8*i);
  143. out[i+14] = in[2]>>(8*i);
  144. }
  145. }
  146. #if 0
  147. /* To preserve endianness when using BN_bn2bin and BN_bin2bn */
  148. static void flip_endian(u8 *out, const u8 *in, unsigned len)
  149. {
  150. unsigned i;
  151. for (i = 0; i < len; ++i)
  152. out[i] = in[len-1-i];
  153. }
  154. #endif
  155. /******************************************************************************/
  156. /* FIELD OPERATIONS
  157. *
  158. * Field operations, using the internal representation of field elements.
  159. * NB! These operations are specific to our point multiplication and cannot be
  160. * expected to be correct in general - e.g., multiplication with a large scalar
  161. * will cause an overflow.
  162. *
  163. */
  164. /* Sum two field elements: out += in */
  165. static void felem_sum64(fslice out[3], const fslice in[3])
  166. {
  167. out[0] += in[0];
  168. out[1] += in[1];
  169. out[2] += in[2];
  170. }
  171. /* Subtract field elements: out -= in */
  172. /* Assumes in[i] < 2^57 */
  173. static void felem_diff64(fslice out[3], const fslice in[3])
  174. {
  175. /* a = 3*2^56 - 3 */
  176. /* b = 3*2^56 - 3*257 */
  177. static const uint64_t a = (((uint64_t) 3) << 56) - ((uint64_t) 3);
  178. static const uint64_t b = (((uint64_t) 3) << 56) - ((uint64_t) 771);
  179. /* Add 0 mod 2^168-2^8-1 to ensure out > in at each element */
  180. /* a*2^112 + a*2^56 + b = 3*p */
  181. out[0] += b;
  182. out[1] += a;
  183. out[2] += a;
  184. out[0] -= in[0];
  185. out[1] -= in[1];
  186. out[2] -= in[2];
  187. }
  188. /* Subtract in unreduced 128-bit mode: out128 -= in128 */
  189. /* Assumes in[i] < 2^119 */
  190. static void felem_diff128(uint128_t out[5], const uint128_t in[5])
  191. {
  192. /* a = 3*2^118 - 192
  193. b = 3*2^118 - 49536
  194. c = 3*2^118
  195. d = 3*2^118 - 12681408
  196. a*2^224 + a*2^168 + b*2^112 + c*2^56 + d
  197. = (3*2^174 + 3*2^118 + 49344)*p
  198. */
  199. static const uint128_t a = (((uint128_t)3) << 118) - ((uint128_t) 192);
  200. static const uint128_t b = (((uint128_t)3) << 118) - ((uint128_t) 49536);
  201. static const uint128_t c = (((uint128_t)3) << 118);
  202. static const uint128_t d = (((uint128_t)3) << 118) - ((uint128_t) 12681408);;
  203. /* Add 0 mod 2^168-2^8-1 to ensure out > in */
  204. out[0] += d;
  205. out[1] += c;
  206. out[2] += b;
  207. out[3] += a;
  208. out[4] += a;
  209. out[0] -= in[0];
  210. out[1] -= in[1];
  211. out[2] -= in[2];
  212. out[3] -= in[3];
  213. out[4] -= in[4];
  214. }
  215. /* Subtract in mixed mode: out128 -= in64 */
  216. /* in[i] < 2^63 */
  217. static void felem_diff_128_64(uint128_t out[5], const fslice in[3])
  218. {
  219. /* a = 3*2^62 - 192
  220. b = 3*2^62 - 49344
  221. a*2^112 + a*2^56 + b = 192*p
  222. */
  223. static const uint128_t a = (((uint128_t) 3) << 62) - ((uint128_t) 192);
  224. static const uint128_t b = (((uint128_t) 3) << 62) - ((uint128_t) 49344);
  225. /* Add 0 mod 2^168-2^8-1 to ensure out > in */
  226. out[0] += b;
  227. out[1] += a;
  228. out[2] += a;
  229. out[0] -= in[0];
  230. out[1] -= in[1];
  231. out[2] -= in[2];
  232. }
  233. /* Multiply a field element by a scalar: out64 = out64 * scalar
  234. * The scalars we actually use are small, so results fit without overflow */
  235. static void felem_scalar64(fslice out[3], const fslice scalar)
  236. {
  237. out[0] *= scalar;
  238. out[1] *= scalar;
  239. out[2] *= scalar;
  240. }
  241. /* Multiply an unreduced field element by a scalar: out128 = out128 * scalar
  242. * The scalars we actually use are small, so results fit without overflow */
  243. static void felem_scalar128(uint128_t out[5], const uint128_t scalar)
  244. {
  245. out[0] *= scalar;
  246. out[1] *= scalar;
  247. out[2] *= scalar;
  248. out[3] *= scalar;
  249. out[4] *= scalar;
  250. }
  251. /* Square a field element: out = in^2 */
  252. static void felem_square(uint128_t out[5], const fslice in[3])
  253. {
  254. out[0] = ((uint128_t) in[0]) * in[0];
  255. out[1] = ((uint128_t) in[0]) * in[1] * 2;
  256. out[2] = ((uint128_t) in[0]) * in[2] * 2 + ((uint128_t) in[1]) * in[1];
  257. out[3] = ((uint128_t) in[1]) * in[2] * 2;
  258. out[4] = ((uint128_t) in[2]) * in[2];
  259. }
  260. /* Multiply two field elements: out = in1 * in2 */
  261. static void felem_mul(uint128_t out[5], const fslice in1[3], const fslice in2[3])
  262. {
  263. out[0] = ((uint128_t) in1[0]) * in2[0];
  264. out[1] = ((uint128_t) in1[0]) * in2[1] + ((uint128_t) in1[1]) * in2[0];
  265. out[2] = ((uint128_t) in1[0]) * in2[2] + ((uint128_t) in1[1]) * in2[1] +
  266. ((uint128_t) in1[2]) * in2[0];
  267. out[3] = ((uint128_t) in1[1]) * in2[2] +
  268. ((uint128_t) in1[2]) * in2[1];
  269. out[4] = ((uint128_t) in1[2]) * in2[2];
  270. }
  271. #define M257(x) (((x)<<8)+(x))
  272. /* Reduce 128-bit coefficients to 64-bit coefficients. Requires in[i] < 2^126,
  273. * ensures out[0] < 2^56, out[1] < 2^56, out[2] < 2^57 */
  274. static void felem_reduce(fslice out[3], const uint128_t in[5])
  275. {
  276. static const uint128_t two56m1 = (((uint128_t) 1)<<56) -
  277. ((uint128_t)1);
  278. uint128_t output[3];
  279. output[0] = in[0]; /* < 2^126 */
  280. output[1] = in[1]; /* < 2^126 */
  281. output[2] = in[2]; /* < 2^126 */
  282. /* Eliminate in[3], in[4] */
  283. output[2] += M257(in[4] >> 56); /* < 2^126 + 2^79 */
  284. output[1] += M257(in[4] & two56m1); /* < 2^126 + 2^65 */
  285. output[1] += M257(in[3] >> 56); /* < 2^126 + 2^65 + 2^79 */
  286. output[0] += M257(in[3] & two56m1); /* < 2^126 + 2^65 */
  287. /* Eliminate the top part of output[2] */
  288. output[0] += M257(output[2] >> 56); /* < 2^126 + 2^65 + 2^79 */
  289. output[2] &= two56m1; /* < 2^56 */
  290. /* Carry 0 -> 1 -> 2 */
  291. output[1] += output[0] >> 56; /* < 2^126 + 2^71 */
  292. output[0] &= two56m1; /* < 2^56 */
  293. output[2] += output[1] >> 56; /* < 2^71 */
  294. output[1] &= two56m1; /* < 2^56 */
  295. /* Eliminate the top part of output[2] */
  296. output[0] += M257(output[2] >> 56); /* < 2^57 */
  297. output[2] &= two56m1; /* < 2^56 */
  298. /* Carry 0 -> 1 -> 2 */
  299. output[1] += output[0] >> 56; /* <= 2^56 */
  300. out[0] = output[0] & two56m1; /* < 2^56 */
  301. out[2] = output[2] + (output[1] >> 56); /* <= 2^56 */
  302. out[1] = output[1] & two56m1; /* < 2^56 */
  303. }
  304. /* Reduce to unique minimal representation */
  305. static void felem_contract(fslice out[3], const fslice in[3])
  306. {
  307. static const uint64_t two56m1 = (((uint64_t) 1)<<56) -
  308. ((uint64_t)1);
  309. static const uint64_t two56m257 = (((uint64_t) 1)<<56) -
  310. ((uint64_t)257);
  311. uint64_t a;
  312. /* in[0] < 2^56, in[1] < 2^56, in[2] <= 2^56 */
  313. /* so in < 2*p for sure */
  314. /* Eliminate the top part of in[2] */
  315. out[0] = in[0] + M257(in[2] >> 56); /* < 2^57 */
  316. out[2] = in[2] & two56m1; /* < 2^56, but if out[0] >= 2^56
  317. then out[2] now = 0 */
  318. /* Carry 0 -> 1 -> 2 */
  319. out[1] = in[1] + (out[0] >> 56); /* < 2^56 + 2, but if
  320. out[1] >= 2^56 then
  321. out[2] = 0 */
  322. out[0] &= two56m1; /* < 2^56 */
  323. out[2] += out[1] >> 56; /* < 2^56 due to the above */
  324. out[1] &= two56m1; /* < 2^56 */
  325. /* Now out < 2^168, but it could still be > p */
  326. a = ((out[2] == two56m1) & (out[1] == two56m1) & (out[0] >= two56m257));
  327. out[2] -= two56m1*a;
  328. out[1] -= two56m1*a;
  329. out[0] -= two56m257*a;
  330. }
  331. /* Negate a field element: out = -in */
  332. /* Assumes in[i] < 2^57 */
  333. static void felem_neg(fslice out[3], const fslice in[3])
  334. {
  335. /* a = 3*2^56 - 3 */
  336. /* b = 3*2^56 - 3*257 */
  337. static const uint64_t a = (((uint64_t) 3) << 56) - ((uint64_t) 3);
  338. static const uint64_t b = (((uint64_t) 3) << 56) - ((uint64_t) 771);
  339. static const uint64_t two56m1 = (((uint64_t) 1) << 56) - ((uint64_t) 1);
  340. fslice tmp[3];
  341. /* Add 0 mod 2^168-2^8-1 to ensure out > in at each element */
  342. /* a*2^112 + a*2^56 + b = 3*p */
  343. tmp[0] = b - in[0];
  344. tmp[1] = a - in[1];
  345. tmp[2] = a - in[2];
  346. /* Carry 0 -> 1 -> 2 */
  347. tmp[1] += tmp[0] >> 56;
  348. tmp[0] &= two56m1; /* < 2^56 */
  349. tmp[2] += tmp[1] >> 56; /* < 2^71 */
  350. tmp[1] &= two56m1; /* < 2^56 */
  351. felem_contract(out, tmp);
  352. }
  353. /* Zero-check: returns 1 if input is 0, and 0 otherwise.
  354. * We know that field elements are reduced to in < 2^169,
  355. * so we only need to check three cases: 0, 2^168 - 2^8 - 1,
  356. * and 2^169 - 2^9 - 2 */
  357. static fslice felem_is_zero(const fslice in[3])
  358. {
  359. fslice zero, two168m8m1, two169m9m2;
  360. static const uint64_t two56m1 = (((uint64_t) 1)<<56) -
  361. ((uint64_t)1);
  362. static const uint64_t two56m257 = (((uint64_t) 1)<<56) -
  363. ((uint64_t)257);
  364. static const uint64_t two57m1 = (((uint64_t) 1)<<57) -
  365. ((uint64_t)1);
  366. static const uint64_t two56m514 = (((uint64_t) 1)<<56) -
  367. ((uint64_t)514);
  368. zero = (in[0] == 0) & (in[1] == 0) & (in[2] == 0);
  369. two168m8m1 = (in[2] == two56m1) & (in[1] == two56m1) &
  370. (in[0] == two56m257);
  371. two169m9m2 = (in[2] == two57m1) & (in[1] == two56m1) &
  372. (in[0] == two56m514);
  373. return (zero | two168m8m1 | two169m9m2);
  374. }
  375. /* Invert a field element */
  376. static void felem_inv(fslice out[3], const fslice in[3])
  377. {
  378. fslice ftmp[3], ftmp2[3], ftmp3[3], ftmp4[3];
  379. uint128_t tmp[5];
  380. unsigned i;
  381. felem_square(tmp, in); felem_reduce(ftmp, tmp); /* 2 */
  382. felem_mul(tmp, in, ftmp); felem_reduce(ftmp, tmp); /* 2^2 - 1 */
  383. /* = ftmp */
  384. felem_square(tmp, ftmp); felem_reduce(ftmp2, tmp); /* 2^3 - 2 */
  385. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^4 - 2^2 */
  386. felem_mul(tmp, ftmp2, ftmp); felem_reduce(ftmp2, tmp); /* 2^4 - 1 */
  387. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^5 - 2 */
  388. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^6 - 2^2 */
  389. felem_mul(tmp, ftmp2, ftmp); felem_reduce(ftmp, tmp); /* 2^6 - 1 */
  390. /* = ftmp */
  391. felem_square(tmp, ftmp); felem_reduce(ftmp2, tmp); /* 2^7 - 2 */
  392. for (i = 0; i < 5; ++i) /* 2^12 - 2^6 */
  393. {
  394. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  395. }
  396. felem_mul(tmp, ftmp, ftmp2); felem_reduce(ftmp3, tmp); /* 2^12 - 1 */
  397. /* = ftmp3 */
  398. felem_square(tmp, ftmp3); felem_reduce(ftmp2, tmp); /* 2^13 - 2 */
  399. for (i = 0; i < 11; ++i) /* 2^24 - 2^12 */
  400. {
  401. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  402. }
  403. felem_mul(tmp, ftmp2, ftmp3); felem_reduce(ftmp3, tmp); /* 2^24 - 1 */
  404. /* = ftmp3 */
  405. felem_square(tmp, ftmp3); felem_reduce(ftmp2, tmp); /* 2^25 - 2 */
  406. for (i = 0; i < 23; ++i) /* 2^48 - 2^24 */
  407. {
  408. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  409. }
  410. felem_mul(tmp, ftmp2, ftmp3); felem_reduce(ftmp4, tmp); /* 2^48 - 1 */
  411. /* = ftmp4 */
  412. felem_square(tmp, ftmp4); felem_reduce(ftmp2, tmp); /* 2^49 - 2 */
  413. for (i = 0; i < 23; ++i) /* 2^72 - 2^24 */
  414. {
  415. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  416. }
  417. felem_mul(tmp, ftmp2, ftmp3); felem_reduce(ftmp4, tmp); /* 2^72 - 1 */
  418. /* = ftmp4 */
  419. felem_square(tmp, ftmp4); felem_reduce(ftmp2, tmp); /* 2^73 - 2 */
  420. for (i = 0; i < 5; ++i) /* 2^78 - 2^6 */
  421. {
  422. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  423. }
  424. felem_mul(tmp, ftmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^78 - 1 */
  425. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^79 - 2 */
  426. felem_mul(tmp, in, ftmp2); felem_reduce(ftmp4, tmp); /* 2^79 - 1 */
  427. /* = ftmp4 */
  428. felem_square(tmp, ftmp4); felem_reduce(ftmp2, tmp); /* 2^80 - 2 */
  429. for (i = 0; i < 78; ++i) /* 2^158 - 2^79 */
  430. {
  431. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  432. }
  433. felem_mul(tmp, ftmp4, ftmp2); felem_reduce(ftmp2, tmp); /* 2^158 - 1 */
  434. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^159 - 2 */
  435. felem_mul(tmp, in, ftmp2); felem_reduce(ftmp2, tmp); /* 2^159 - 1 */
  436. for (i = 0; i < 7; ++i) /* 2^166 - 2^7 */
  437. {
  438. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  439. }
  440. felem_mul(tmp, ftmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^166 - 2^6 - 1 */
  441. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^167 - 2^7 - 2 */
  442. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^168 - 2^8 - 4 */
  443. felem_mul(tmp, in, ftmp2); felem_reduce(out, tmp); /* 2^168 - 2^8 - 3 */
  444. /* = out */
  445. }
  446. /* Take the square root of a field element */
  447. static void felem_sqrt(fslice out[3], const fslice in[3])
  448. {
  449. fslice ftmp[3], ftmp2[3];
  450. uint128_t tmp[5];
  451. unsigned i;
  452. felem_square(tmp, in); felem_reduce(ftmp, tmp); /* 2 */
  453. felem_mul(tmp, in, ftmp); felem_reduce(ftmp, tmp); /* 2^2 - 1 */
  454. /* = ftmp */
  455. felem_square(tmp, ftmp); felem_reduce(ftmp2, tmp); /* 2^3 - 2 */
  456. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^4 - 2^2 */
  457. felem_mul(tmp, ftmp2, ftmp); felem_reduce(ftmp2, tmp); /* 2^4 - 1 */
  458. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^5 - 2 */
  459. felem_mul(tmp, ftmp2, in); felem_reduce(ftmp, tmp); /* 2^5 - 1 */
  460. /* = ftmp */
  461. felem_square(tmp, ftmp); felem_reduce(ftmp2, tmp); /* 2^6 - 2 */
  462. for (i = 0; i < 4; ++i) /* 2^10 - 2^5 */
  463. {
  464. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  465. }
  466. felem_mul(tmp, ftmp, ftmp2); felem_reduce(ftmp, tmp); /* 2^10 - 1 */
  467. /* = ftmp */
  468. felem_square(tmp, ftmp); felem_reduce(ftmp2, tmp); /* 2^11 - 2 */
  469. for (i = 0; i < 9; ++i) /* 2^20 - 2^10 */
  470. {
  471. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  472. }
  473. felem_mul(tmp, ftmp2, ftmp); felem_reduce(ftmp, tmp); /* 2^20 - 1 */
  474. /* = ftmp */
  475. felem_square(tmp, ftmp); felem_reduce(ftmp2, tmp); /* 2^21 - 2 */
  476. for (i = 0; i < 19; ++i) /* 2^40 - 2^20 */
  477. {
  478. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  479. }
  480. felem_mul(tmp, ftmp2, ftmp); felem_reduce(ftmp, tmp); /* 2^40 - 1 */
  481. /* = ftmp */
  482. felem_square(tmp, ftmp); felem_reduce(ftmp2, tmp); /* 2^41 - 2 */
  483. for (i = 0; i < 39; ++i) /* 2^80 - 2^40 */
  484. {
  485. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  486. }
  487. felem_mul(tmp, ftmp2, ftmp); felem_reduce(ftmp, tmp); /* 2^80 - 1 */
  488. /* = ftmp */
  489. felem_square(tmp, ftmp); felem_reduce(ftmp2, tmp); /* 2^81 - 2 */
  490. for (i = 0; i < 79; ++i) /* 2^160 - 2^80 */
  491. {
  492. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  493. }
  494. felem_mul(tmp, ftmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^160 - 1 */
  495. for (i = 0; i < 5; ++i) /* 2^165 - 2^5 */
  496. {
  497. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  498. }
  499. felem_square(tmp, ftmp2); felem_reduce(out, tmp); /* 2^166 - 2^6 */
  500. /* = out */
  501. }
  502. /* Copy in constant time:
  503. * if icopy == 1, copy in to out,
  504. * if icopy == 0, copy out to itself. */
  505. static void
  506. copy_conditional(fslice *out, const fslice *in, unsigned len, fslice icopy)
  507. {
  508. unsigned i;
  509. /* icopy is a (64-bit) 0 or 1, so copy is either all-zero or all-one */
  510. const fslice copy = -icopy;
  511. for (i = 0; i < len; ++i)
  512. {
  513. const fslice tmp = copy & (in[i] ^ out[i]);
  514. out[i] ^= tmp;
  515. }
  516. }
  517. /* Copy in constant time:
  518. * if isel == 1, copy in2 to out,
  519. * if isel == 0, copy in1 to out. */
  520. static void select_conditional(fslice *out, const fslice *in1, const fslice *in2,
  521. unsigned len, fslice isel)
  522. {
  523. unsigned i;
  524. /* isel is a (64-bit) 0 or 1, so sel is either all-zero or all-one */
  525. const fslice sel = -isel;
  526. for (i = 0; i < len; ++i)
  527. {
  528. const fslice tmp = sel & (in1[i] ^ in2[i]);
  529. out[i] = in1[i] ^ tmp;
  530. }
  531. }
  532. /******************************************************************************/
  533. /* ELLIPTIC CURVE POINT OPERATIONS
  534. *
  535. * Points are represented in Jacobian projective coordinates:
  536. * (X, Y, Z) corresponds to the affine point (X/Z^2, Y/Z^3),
  537. * or to the point at infinity if Z == 0.
  538. *
  539. */
  540. /* Double an elliptic curve point:
  541. * (X', Y', Z') = 2 * (X, Y, Z), where
  542. * X' = (3 * (X - Z^2) * (X + Z^2))^2 - 8 * X * Y^2
  543. * Y' = 3 * (X - Z^2) * (X + Z^2) * (4 * X * Y^2 - X') - 8 * Y^2
  544. * Z' = (Y + Z)^2 - Y^2 - Z^2 = 2 * Y * Z
  545. * Outputs can equal corresponding inputs, i.e., x_out == x_in is allowed,
  546. * while x_out == y_in is not (maybe this works, but it's not tested). */
  547. static void
  548. point_double(fslice x_out[3], fslice y_out[3], fslice z_out[3],
  549. const fslice x_in[3], const fslice y_in[3], const fslice z_in[3])
  550. {
  551. uint128_t tmp[5], tmp2[5];
  552. fslice delta[3];
  553. fslice gamma[3];
  554. fslice beta[3];
  555. fslice alpha[3];
  556. fslice ftmp[3], ftmp2[3];
  557. memcpy(ftmp, x_in, 3 * sizeof(fslice));
  558. memcpy(ftmp2, x_in, 3 * sizeof(fslice));
  559. /* delta = z^2 */
  560. felem_square(tmp, z_in);
  561. felem_reduce(delta, tmp);
  562. /* gamma = y^2 */
  563. felem_square(tmp, y_in);
  564. felem_reduce(gamma, tmp);
  565. /* beta = x*gamma */
  566. felem_mul(tmp, x_in, gamma);
  567. felem_reduce(beta, tmp);
  568. /* alpha = 3*(x-delta)*(x+delta) */
  569. felem_diff64(ftmp, delta);
  570. /* ftmp[i] < 2^57 + 2^58 + 2 < 2^59 */
  571. felem_sum64(ftmp2, delta);
  572. /* ftmp2[i] < 2^57 + 2^57 = 2^58 */
  573. felem_scalar64(ftmp2, 3);
  574. /* ftmp2[i] < 3 * 2^58 < 2^60 */
  575. felem_mul(tmp, ftmp, ftmp2);
  576. /* tmp[i] < 2^60 * 2^59 * 4 = 2^121 */
  577. felem_reduce(alpha, tmp);
  578. /* x' = alpha^2 - 8*beta */
  579. felem_square(tmp, alpha);
  580. /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */
  581. memcpy(ftmp, beta, 3 * sizeof(fslice));
  582. felem_scalar64(ftmp, 8);
  583. /* ftmp[i] < 8 * 2^57 = 2^60 */
  584. felem_diff_128_64(tmp, ftmp);
  585. /* tmp[i] < 2^116 + 2^64 + 8 < 2^117 */
  586. felem_reduce(x_out, tmp);
  587. /* z' = (y + z)^2 - gamma - delta */
  588. felem_sum64(delta, gamma);
  589. /* delta[i] < 2^57 + 2^57 = 2^58 */
  590. memcpy(ftmp, y_in, 3 * sizeof(fslice));
  591. felem_sum64(ftmp, z_in);
  592. /* ftmp[i] < 2^57 + 2^57 = 2^58 */
  593. felem_square(tmp, ftmp);
  594. /* tmp[i] < 4 * 2^58 * 2^58 = 2^118 */
  595. felem_diff_128_64(tmp, delta);
  596. /* tmp[i] < 2^118 + 2^64 + 8 < 2^119 */
  597. felem_reduce(z_out, tmp);
  598. /* y' = alpha*(4*beta - x') - 8*gamma^2 */
  599. felem_scalar64(beta, 4);
  600. /* beta[i] < 4 * 2^57 = 2^59 */
  601. felem_diff64(beta, x_out);
  602. /* beta[i] < 2^59 + 2^58 + 2 < 2^60 */
  603. felem_mul(tmp, alpha, beta);
  604. /* tmp[i] < 4 * 2^57 * 2^60 = 2^119 */
  605. felem_square(tmp2, gamma);
  606. /* tmp2[i] < 4 * 2^57 * 2^57 = 2^116 */
  607. felem_scalar128(tmp2, 8);
  608. /* tmp2[i] < 8 * 2^116 = 2^119 */
  609. felem_diff128(tmp, tmp2);
  610. /* tmp[i] < 2^119 + 2^120 < 2^121 */
  611. felem_reduce(y_out, tmp);
  612. }
  613. /* Add two elliptic curve points:
  614. * (X_1, Y_1, Z_1) + (X_2, Y_2, Z_2) = (X_3, Y_3, Z_3), where
  615. * X_3 = (Z_1^3 * Y_2 - Z_2^3 * Y_1)^2 - (Z_1^2 * X_2 - Z_2^2 * X_1)^3 -
  616. * 2 * Z_2^2 * X_1 * (Z_1^2 * X_2 - Z_2^2 * X_1)^2
  617. * Y_3 = (Z_1^3 * Y_2 - Z_2^3 * Y_1) * (Z_2^2 * X_1 * (Z_1^2 * X_2 - Z_2^2 * X_1)^2 - X_3) -
  618. * Z_2^3 * Y_1 * (Z_1^2 * X_2 - Z_2^2 * X_1)^3
  619. * Z_3 = (Z_1^2 * X_2 - Z_2^2 * X_1) * (Z_1 * Z_2) */
  620. /* This function is not entirely constant-time:
  621. * it includes a branch for checking whether the two input points are equal,
  622. * (while not equal to the point at infinity).
  623. * This case never happens during single point multiplication,
  624. * so there is no timing leak for ECDH or ECDSA signing. */
  625. static void point_add(fslice x3[3], fslice y3[3], fslice z3[3],
  626. const fslice x1[3], const fslice y1[3], const fslice z1[3],
  627. const fslice x2[3], const fslice y2[3], const fslice z2[3])
  628. {
  629. fslice ftmp[3], ftmp2[3], ftmp3[3], ftmp4[3], ftmp5[3];
  630. fslice xout[3], yout[3], zout[3];
  631. uint128_t tmp[5], tmp2[5];
  632. fslice z1_is_zero, z2_is_zero, x_equal, y_equal;
  633. /* ftmp = z1^2 */
  634. felem_square(tmp, z1);
  635. felem_reduce(ftmp, tmp);
  636. /* ftmp2 = z2^2 */
  637. felem_square(tmp, z2);
  638. felem_reduce(ftmp2, tmp);
  639. /* ftmp3 = z1^3 */
  640. felem_mul(tmp, ftmp, z1);
  641. felem_reduce(ftmp3, tmp);
  642. /* ftmp4 = z2^3 */
  643. felem_mul(tmp, ftmp2, z2);
  644. felem_reduce(ftmp4, tmp);
  645. /* ftmp3 = z1^3*y2 */
  646. felem_mul(tmp, ftmp3, y2);
  647. /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */
  648. /* ftmp4 = z2^3*y1 */
  649. felem_mul(tmp2, ftmp4, y1);
  650. felem_reduce(ftmp4, tmp2);
  651. /* ftmp3 = z1^3*y2 - z2^3*y1 */
  652. felem_diff_128_64(tmp, ftmp4);
  653. /* tmp[i] < 2^116 + 2^64 + 8 < 2^117 */
  654. felem_reduce(ftmp3, tmp);
  655. /* ftmp = z1^2*x2 */
  656. felem_mul(tmp, ftmp, x2);
  657. /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */
  658. /* ftmp2 =z2^2*x1 */
  659. felem_mul(tmp2, ftmp2, x1);
  660. felem_reduce(ftmp2, tmp2);
  661. /* ftmp = z1^2*x2 - z2^2*x1 */
  662. felem_diff128(tmp, tmp2);
  663. /* tmp[i] < 2^116 + 2^64 + 8 < 2^117 */
  664. felem_reduce(ftmp, tmp);
  665. /* the formulae are incorrect if the points are equal
  666. * so we check for this and do doubling if this happens */
  667. x_equal = felem_is_zero(ftmp);
  668. y_equal = felem_is_zero(ftmp3);
  669. z1_is_zero = felem_is_zero(z1);
  670. z2_is_zero = felem_is_zero(z2);
  671. /* In affine coordinates, (X_1, Y_1) == (X_2, Y_2) */
  672. if (x_equal && y_equal && !z1_is_zero && !z2_is_zero)
  673. {
  674. point_double(x3, y3, z3, x1, y1, z1);
  675. return;
  676. }
  677. /* ftmp5 = z1*z2 */
  678. felem_mul(tmp, z1, z2);
  679. felem_reduce(ftmp5, tmp);
  680. /* zout = (z1^2*x2 - z2^2*x1)*(z1*z2) */
  681. felem_mul(tmp, ftmp, ftmp5);
  682. felem_reduce(zout, tmp);
  683. /* ftmp = (z1^2*x2 - z2^2*x1)^2 */
  684. memcpy(ftmp5, ftmp, 3 * sizeof(fslice));
  685. felem_square(tmp, ftmp);
  686. felem_reduce(ftmp, tmp);
  687. /* ftmp5 = (z1^2*x2 - z2^2*x1)^3 */
  688. felem_mul(tmp, ftmp, ftmp5);
  689. felem_reduce(ftmp5, tmp);
  690. /* ftmp2 = z2^2*x1*(z1^2*x2 - z2^2*x1)^2 */
  691. felem_mul(tmp, ftmp2, ftmp);
  692. felem_reduce(ftmp2, tmp);
  693. /* ftmp4 = z2^3*y1*(z1^2*x2 - z2^2*x1)^3 */
  694. felem_mul(tmp, ftmp4, ftmp5);
  695. /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */
  696. /* tmp2 = (z1^3*y2 - z2^3*y1)^2 */
  697. felem_square(tmp2, ftmp3);
  698. /* tmp2[i] < 4 * 2^57 * 2^57 < 2^116 */
  699. /* tmp2 = (z1^3*y2 - z2^3*y1)^2 - (z1^2*x2 - z2^2*x1)^3 */
  700. felem_diff_128_64(tmp2, ftmp5);
  701. /* tmp2[i] < 2^116 + 2^64 + 8 < 2^117 */
  702. /* ftmp5 = 2*z2^2*x1*(z1^2*x2 - z2^2*x1)^2 */
  703. memcpy(ftmp5, ftmp2, 3 * sizeof(fslice));
  704. felem_scalar64(ftmp5, 2);
  705. /* ftmp5[i] < 2 * 2^57 = 2^58 */
  706. /* xout = (z1^3*y2 - z2^3*y1)^2 - (z1^2*x2 - z2^2*x1)^3 -
  707. 2*z2^2*x1*(z1^2*x2 - z2^2*x1)^2 */
  708. felem_diff_128_64(tmp2, ftmp5);
  709. /* tmp2[i] < 2^117 + 2^64 + 8 < 2^118 */
  710. felem_reduce(xout, tmp2);
  711. /* ftmp2 = z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - xout */
  712. felem_diff64(ftmp2, xout);
  713. /* ftmp2[i] < 2^57 + 2^58 + 2 < 2^59 */
  714. /* tmp2 = (z1^3*y2 - z2^3*y1)*(z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - xout) */
  715. felem_mul(tmp2, ftmp3, ftmp2);
  716. /* tmp2[i] < 4 * 2^57 * 2^59 = 2^118 */
  717. /* yout = (z1^3*y2 - z2^3*y1)*(z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - xout) -
  718. z2^3*y1*(z1^2*x2 - z2^2*x1)^3 */
  719. felem_diff128(tmp2, tmp);
  720. /* tmp2[i] < 2^118 + 2^120 < 2^121 */
  721. felem_reduce(yout, tmp2);
  722. /* the result (xout, yout, zout) is incorrect if one of the
  723. * inputs is the point at infinity, so we need to check for this
  724. * separately */
  725. /* if point 1 is at infinity, copy point 2 to output, and vice versa */
  726. copy_conditional(xout, x2, 3, z1_is_zero);
  727. select_conditional(x3, xout, x1, 3, z2_is_zero);
  728. copy_conditional(yout, y2, 3, z1_is_zero);
  729. select_conditional(y3, yout, y1, 3, z2_is_zero);
  730. copy_conditional(zout, z2, 3, z1_is_zero);
  731. select_conditional(z3, zout, z1, 3, z2_is_zero);
  732. }
  733. /*
  734. static void affine(point P)
  735. {
  736. coord z1, z2, xin, yin;
  737. uint128_t tmp[7];
  738. if (felem_is_zero(P[2])) return;
  739. felem_inv(z2, P[2]);
  740. felem_square(tmp, z2); felem_reduce(z1, tmp);
  741. felem_mul(tmp, P[0], z1); felem_reduce(xin, tmp);
  742. felem_contract(P[0], xin);
  743. felem_mul(tmp, z1, z2); felem_reduce(z1, tmp);
  744. felem_mul(tmp, P[1], z1); felem_reduce(yin, tmp);
  745. felem_contract(P[1], yin);
  746. memset(P[2], 0, sizeof(coord));
  747. P[2][0] = 1;
  748. }
  749. */
  750. static void affine_x(coord out, point P)
  751. {
  752. coord z1, z2, xin;
  753. uint128_t tmp[7];
  754. if (felem_is_zero(P[2])) return;
  755. felem_inv(z2, P[2]);
  756. felem_square(tmp, z2); felem_reduce(z1, tmp);
  757. felem_mul(tmp, P[0], z1); felem_reduce(xin, tmp);
  758. felem_contract(out, xin);
  759. }
  760. /* Multiply the given point by s */
  761. static void point_mul(point out, point in, const felem_bytearray s)
  762. {
  763. int i;
  764. point tmp;
  765. point table[16];
  766. memset(table[0], 0, sizeof(point));
  767. memmove(table[1], in, sizeof(point));
  768. for(i=2; i<16; i+=2) {
  769. point_double(table[i][0], table[i][1], table[i][2],
  770. table[i/2][0], table[i/2][1], table[i/2][2]);
  771. point_add(table[i+1][0], table[i+1][1], table[i+1][2],
  772. table[i][0], table[i][1], table[i][2],
  773. in[0], in[1], in[2]);
  774. }
  775. /*
  776. for(i=0;i<16;++i) {
  777. fprintf(stderr, "table[%d]:\n", i);
  778. affine(table[i]);
  779. dump_point(NULL, table[i]);
  780. }
  781. */
  782. memset(tmp, 0, sizeof(point));
  783. for(i=0;i<21;i++) {
  784. u8 oh = s[20-i] >> 4;
  785. u8 ol = s[20-i] & 0x0f;
  786. point_double(tmp[0], tmp[1], tmp[2], tmp[0], tmp[1], tmp[2]);
  787. point_double(tmp[0], tmp[1], tmp[2], tmp[0], tmp[1], tmp[2]);
  788. point_double(tmp[0], tmp[1], tmp[2], tmp[0], tmp[1], tmp[2]);
  789. point_double(tmp[0], tmp[1], tmp[2], tmp[0], tmp[1], tmp[2]);
  790. point_add(tmp[0], tmp[1], tmp[2], tmp[0], tmp[1], tmp[2],
  791. table[oh][0], table[oh][1], table[oh][2]);
  792. point_double(tmp[0], tmp[1], tmp[2], tmp[0], tmp[1], tmp[2]);
  793. point_double(tmp[0], tmp[1], tmp[2], tmp[0], tmp[1], tmp[2]);
  794. point_double(tmp[0], tmp[1], tmp[2], tmp[0], tmp[1], tmp[2]);
  795. point_double(tmp[0], tmp[1], tmp[2], tmp[0], tmp[1], tmp[2]);
  796. point_add(tmp[0], tmp[1], tmp[2], tmp[0], tmp[1], tmp[2],
  797. table[ol][0], table[ol][1], table[ol][2]);
  798. }
  799. memmove(out, tmp, sizeof(point));
  800. }
  801. #if 0
  802. /* Select a point from an array of 16 precomputed point multiples,
  803. * in constant time: for bits = {b_0, b_1, b_2, b_3}, return the point
  804. * pre_comp[8*b_3 + 4*b_2 + 2*b_1 + b_0] */
  805. static void select_point(const fslice bits[4], const fslice pre_comp[16][3][4],
  806. fslice out[12])
  807. {
  808. fslice tmp[5][12];
  809. select_conditional(tmp[0], pre_comp[7][0], pre_comp[15][0], 12, bits[3]);
  810. select_conditional(tmp[1], pre_comp[3][0], pre_comp[11][0], 12, bits[3]);
  811. select_conditional(tmp[2], tmp[1], tmp[0], 12, bits[2]);
  812. select_conditional(tmp[0], pre_comp[5][0], pre_comp[13][0], 12, bits[3]);
  813. select_conditional(tmp[1], pre_comp[1][0], pre_comp[9][0], 12, bits[3]);
  814. select_conditional(tmp[3], tmp[1], tmp[0], 12, bits[2]);
  815. select_conditional(tmp[4], tmp[3], tmp[2], 12, bits[1]);
  816. select_conditional(tmp[0], pre_comp[6][0], pre_comp[14][0], 12, bits[3]);
  817. select_conditional(tmp[1], pre_comp[2][0], pre_comp[10][0], 12, bits[3]);
  818. select_conditional(tmp[2], tmp[1], tmp[0], 12, bits[2]);
  819. select_conditional(tmp[0], pre_comp[4][0], pre_comp[12][0], 12, bits[3]);
  820. select_conditional(tmp[1], pre_comp[0][0], pre_comp[8][0], 12, bits[3]);
  821. select_conditional(tmp[3], tmp[1], tmp[0], 12, bits[2]);
  822. select_conditional(tmp[1], tmp[3], tmp[2], 12, bits[1]);
  823. select_conditional(out, tmp[1], tmp[4], 12, bits[0]);
  824. }
  825. /* Interleaved point multiplication using precomputed point multiples:
  826. * The small point multiples 0*P, 1*P, ..., 15*P are in pre_comp[],
  827. * the scalars in scalars[]. If g_scalar is non-NULL, we also add this multiple
  828. * of the generator, using certain (large) precomputed multiples in g_pre_comp.
  829. * Output point (X, Y, Z) is stored in x_out, y_out, z_out */
  830. static void batch_mul(fslice x_out[4], fslice y_out[4], fslice z_out[4],
  831. const felem_bytearray scalars[], const unsigned num_points, const u8 *g_scalar,
  832. const fslice pre_comp[][16][3][4], const fslice g_pre_comp[16][3][4])
  833. {
  834. unsigned i, j, num;
  835. unsigned gen_mul = (g_scalar != NULL);
  836. fslice nq[12], nqt[12], tmp[12];
  837. fslice bits[4];
  838. u8 byte;
  839. /* set nq to the point at infinity */
  840. memset(nq, 0, 12 * sizeof(fslice));
  841. /* Loop over all scalars msb-to-lsb, 4 bits at a time: for each nibble,
  842. * double 4 times, then add the precomputed point multiples.
  843. * If we are also adding multiples of the generator, then interleave
  844. * these additions with the last 56 doublings. */
  845. for (i = (num_points ? 28 : 7); i > 0; --i)
  846. {
  847. for (j = 0; j < 8; ++j)
  848. {
  849. /* double once */
  850. point_double(nq, nq+4, nq+8, nq, nq+4, nq+8);
  851. /* add multiples of the generator */
  852. if ((gen_mul) && (i <= 7))
  853. {
  854. bits[3] = (g_scalar[i+20] >> (7-j)) & 1;
  855. bits[2] = (g_scalar[i+13] >> (7-j)) & 1;
  856. bits[1] = (g_scalar[i+6] >> (7-j)) & 1;
  857. bits[0] = (g_scalar[i-1] >> (7-j)) & 1;
  858. /* select the point to add, in constant time */
  859. select_point(bits, g_pre_comp, tmp);
  860. memcpy(nqt, nq, 12 * sizeof(fslice));
  861. point_add(nq, nq+4, nq+8, nqt, nqt+4, nqt+8,
  862. tmp, tmp+4, tmp+8);
  863. }
  864. /* do an addition after every 4 doublings */
  865. if (j % 4 == 3)
  866. {
  867. /* loop over all scalars */
  868. for (num = 0; num < num_points; ++num)
  869. {
  870. byte = scalars[num][i-1];
  871. bits[3] = (byte >> (10-j)) & 1;
  872. bits[2] = (byte >> (9-j)) & 1;
  873. bits[1] = (byte >> (8-j)) & 1;
  874. bits[0] = (byte >> (7-j)) & 1;
  875. /* select the point to add */
  876. select_point(bits,
  877. pre_comp[num], tmp);
  878. memcpy(nqt, nq, 12 * sizeof(fslice));
  879. point_add(nq, nq+4, nq+8, nqt, nqt+4,
  880. nqt+8, tmp, tmp+4, tmp+8);
  881. }
  882. }
  883. }
  884. }
  885. memcpy(x_out, nq, 4 * sizeof(fslice));
  886. memcpy(y_out, nq+4, 4 * sizeof(fslice));
  887. memcpy(z_out, nq+8, 4 * sizeof(fslice));
  888. }
  889. /******************************************************************************/
  890. /* FUNCTIONS TO MANAGE PRECOMPUTATION
  891. */
  892. static NISTP224_PRE_COMP *nistp224_pre_comp_new()
  893. {
  894. NISTP224_PRE_COMP *ret = NULL;
  895. ret = (NISTP224_PRE_COMP *)OPENSSL_malloc(sizeof(NISTP224_PRE_COMP));
  896. if (!ret)
  897. {
  898. ECerr(EC_F_NISTP224_PRE_COMP_NEW, ERR_R_MALLOC_FAILURE);
  899. return ret;
  900. }
  901. memset(ret->g_pre_comp, 0, sizeof(ret->g_pre_comp));
  902. ret->references = 1;
  903. return ret;
  904. }
  905. static void *nistp224_pre_comp_dup(void *src_)
  906. {
  907. NISTP224_PRE_COMP *src = src_;
  908. /* no need to actually copy, these objects never change! */
  909. CRYPTO_add(&src->references, 1, CRYPTO_LOCK_EC_PRE_COMP);
  910. return src_;
  911. }
  912. static void nistp224_pre_comp_free(void *pre_)
  913. {
  914. int i;
  915. NISTP224_PRE_COMP *pre = pre_;
  916. if (!pre)
  917. return;
  918. i = CRYPTO_add(&pre->references, -1, CRYPTO_LOCK_EC_PRE_COMP);
  919. if (i > 0)
  920. return;
  921. OPENSSL_free(pre);
  922. }
  923. static void nistp224_pre_comp_clear_free(void *pre_)
  924. {
  925. int i;
  926. NISTP224_PRE_COMP *pre = pre_;
  927. if (!pre)
  928. return;
  929. i = CRYPTO_add(&pre->references, -1, CRYPTO_LOCK_EC_PRE_COMP);
  930. if (i > 0)
  931. return;
  932. OPENSSL_cleanse(pre, sizeof *pre);
  933. OPENSSL_free(pre);
  934. }
  935. /******************************************************************************/
  936. /* OPENSSL EC_METHOD FUNCTIONS
  937. */
  938. int ec_GFp_nistp224_group_init(EC_GROUP *group)
  939. {
  940. int ret;
  941. ret = ec_GFp_simple_group_init(group);
  942. group->a_is_minus3 = 1;
  943. return ret;
  944. }
  945. int ec_GFp_nistp224_group_set_curve(EC_GROUP *group, const BIGNUM *p,
  946. const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx)
  947. {
  948. int ret = 0;
  949. BN_CTX *new_ctx = NULL;
  950. BIGNUM *curve_p, *curve_a, *curve_b;
  951. if (ctx == NULL)
  952. if ((ctx = new_ctx = BN_CTX_new()) == NULL) return 0;
  953. BN_CTX_start(ctx);
  954. if (((curve_p = BN_CTX_get(ctx)) == NULL) ||
  955. ((curve_a = BN_CTX_get(ctx)) == NULL) ||
  956. ((curve_b = BN_CTX_get(ctx)) == NULL)) goto err;
  957. BN_bin2bn(nistp224_curve_params[0], sizeof(felem_bytearray), curve_p);
  958. BN_bin2bn(nistp224_curve_params[1], sizeof(felem_bytearray), curve_a);
  959. BN_bin2bn(nistp224_curve_params[2], sizeof(felem_bytearray), curve_b);
  960. if ((BN_cmp(curve_p, p)) || (BN_cmp(curve_a, a)) ||
  961. (BN_cmp(curve_b, b)))
  962. {
  963. ECerr(EC_F_EC_GFP_NISTP224_GROUP_SET_CURVE,
  964. EC_R_WRONG_CURVE_PARAMETERS);
  965. goto err;
  966. }
  967. group->field_mod_func = BN_nist_mod_224;
  968. ret = ec_GFp_simple_group_set_curve(group, p, a, b, ctx);
  969. err:
  970. BN_CTX_end(ctx);
  971. if (new_ctx != NULL)
  972. BN_CTX_free(new_ctx);
  973. return ret;
  974. }
  975. /* Takes the Jacobian coordinates (X, Y, Z) of a point and returns
  976. * (X', Y') = (X/Z^2, Y/Z^3) */
  977. int ec_GFp_nistp224_point_get_affine_coordinates(const EC_GROUP *group,
  978. const EC_POINT *point, BIGNUM *x, BIGNUM *y, BN_CTX *ctx)
  979. {
  980. fslice z1[4], z2[4], x_in[4], y_in[4], x_out[4], y_out[4];
  981. uint128_t tmp[7];
  982. if (EC_POINT_is_at_infinity(group, point))
  983. {
  984. ECerr(EC_F_EC_GFP_NISTP224_POINT_GET_AFFINE_COORDINATES,
  985. EC_R_POINT_AT_INFINITY);
  986. return 0;
  987. }
  988. if ((!BN_to_felem(x_in, &point->X)) || (!BN_to_felem(y_in, &point->Y)) ||
  989. (!BN_to_felem(z1, &point->Z))) return 0;
  990. felem_inv(z2, z1);
  991. felem_square(tmp, z2); felem_reduce(z1, tmp);
  992. felem_mul(tmp, x_in, z1); felem_reduce(x_in, tmp);
  993. felem_contract(x_out, x_in);
  994. if (x != NULL)
  995. {
  996. if (!felem_to_BN(x, x_out)) {
  997. ECerr(EC_F_EC_GFP_NISTP224_POINT_GET_AFFINE_COORDINATES,
  998. ERR_R_BN_LIB);
  999. return 0;
  1000. }
  1001. }
  1002. felem_mul(tmp, z1, z2); felem_reduce(z1, tmp);
  1003. felem_mul(tmp, y_in, z1); felem_reduce(y_in, tmp);
  1004. felem_contract(y_out, y_in);
  1005. if (y != NULL)
  1006. {
  1007. if (!felem_to_BN(y, y_out)) {
  1008. ECerr(EC_F_EC_GFP_NISTP224_POINT_GET_AFFINE_COORDINATES,
  1009. ERR_R_BN_LIB);
  1010. return 0;
  1011. }
  1012. }
  1013. return 1;
  1014. }
  1015. /* Computes scalar*generator + \sum scalars[i]*points[i], ignoring NULL values
  1016. * Result is stored in r (r can equal one of the inputs). */
  1017. int ec_GFp_nistp224_points_mul(const EC_GROUP *group, EC_POINT *r,
  1018. const BIGNUM *scalar, size_t num, const EC_POINT *points[],
  1019. const BIGNUM *scalars[], BN_CTX *ctx)
  1020. {
  1021. int ret = 0;
  1022. int i, j;
  1023. BN_CTX *new_ctx = NULL;
  1024. BIGNUM *x, *y, *z, *tmp_scalar;
  1025. felem_bytearray g_secret;
  1026. felem_bytearray *secrets = NULL;
  1027. fslice (*pre_comp)[16][3][4] = NULL;
  1028. felem_bytearray tmp;
  1029. unsigned num_bytes;
  1030. int have_pre_comp = 0;
  1031. size_t num_points = num;
  1032. fslice x_in[4], y_in[4], z_in[4], x_out[4], y_out[4], z_out[4];
  1033. NISTP224_PRE_COMP *pre = NULL;
  1034. fslice (*g_pre_comp)[3][4] = NULL;
  1035. EC_POINT *generator = NULL;
  1036. const EC_POINT *p = NULL;
  1037. const BIGNUM *p_scalar = NULL;
  1038. if (ctx == NULL)
  1039. if ((ctx = new_ctx = BN_CTX_new()) == NULL) return 0;
  1040. BN_CTX_start(ctx);
  1041. if (((x = BN_CTX_get(ctx)) == NULL) ||
  1042. ((y = BN_CTX_get(ctx)) == NULL) ||
  1043. ((z = BN_CTX_get(ctx)) == NULL) ||
  1044. ((tmp_scalar = BN_CTX_get(ctx)) == NULL))
  1045. goto err;
  1046. if (scalar != NULL)
  1047. {
  1048. pre = EC_EX_DATA_get_data(group->extra_data,
  1049. nistp224_pre_comp_dup, nistp224_pre_comp_free,
  1050. nistp224_pre_comp_clear_free);
  1051. if (pre)
  1052. /* we have precomputation, try to use it */
  1053. g_pre_comp = pre->g_pre_comp;
  1054. else
  1055. /* try to use the standard precomputation */
  1056. g_pre_comp = (fslice (*)[3][4]) gmul;
  1057. generator = EC_POINT_new(group);
  1058. if (generator == NULL)
  1059. goto err;
  1060. /* get the generator from precomputation */
  1061. if (!felem_to_BN(x, g_pre_comp[1][0]) ||
  1062. !felem_to_BN(y, g_pre_comp[1][1]) ||
  1063. !felem_to_BN(z, g_pre_comp[1][2]))
  1064. {
  1065. ECerr(EC_F_EC_GFP_NISTP224_POINTS_MUL, ERR_R_BN_LIB);
  1066. goto err;
  1067. }
  1068. if (!EC_POINT_set_Jprojective_coordinates_GFp(group,
  1069. generator, x, y, z, ctx))
  1070. goto err;
  1071. if (0 == EC_POINT_cmp(group, generator, group->generator, ctx))
  1072. /* precomputation matches generator */
  1073. have_pre_comp = 1;
  1074. else
  1075. /* we don't have valid precomputation:
  1076. * treat the generator as a random point */
  1077. num_points = num_points + 1;
  1078. }
  1079. secrets = OPENSSL_malloc(num_points * sizeof(felem_bytearray));
  1080. pre_comp = OPENSSL_malloc(num_points * 16 * 3 * 4 * sizeof(fslice));
  1081. if ((num_points) && ((secrets == NULL) || (pre_comp == NULL)))
  1082. {
  1083. ECerr(EC_F_EC_GFP_NISTP224_POINTS_MUL, ERR_R_MALLOC_FAILURE);
  1084. goto err;
  1085. }
  1086. /* we treat NULL scalars as 0, and NULL points as points at infinity,
  1087. * i.e., they contribute nothing to the linear combination */
  1088. memset(secrets, 0, num_points * sizeof(felem_bytearray));
  1089. memset(pre_comp, 0, num_points * 16 * 3 * 4 * sizeof(fslice));
  1090. for (i = 0; i < num_points; ++i)
  1091. {
  1092. if (i == num)
  1093. /* the generator */
  1094. {
  1095. p = EC_GROUP_get0_generator(group);
  1096. p_scalar = scalar;
  1097. }
  1098. else
  1099. /* the i^th point */
  1100. {
  1101. p = points[i];
  1102. p_scalar = scalars[i];
  1103. }
  1104. if ((p_scalar != NULL) && (p != NULL))
  1105. {
  1106. num_bytes = BN_num_bytes(p_scalar);
  1107. /* reduce scalar to 0 <= scalar < 2^224 */
  1108. if ((num_bytes > sizeof(felem_bytearray)) || (BN_is_negative(p_scalar)))
  1109. {
  1110. /* this is an unusual input, and we don't guarantee
  1111. * constant-timeness */
  1112. if (!BN_nnmod(tmp_scalar, p_scalar, &group->order, ctx))
  1113. {
  1114. ECerr(EC_F_EC_GFP_NISTP224_POINTS_MUL, ERR_R_BN_LIB);
  1115. goto err;
  1116. }
  1117. num_bytes = BN_bn2bin(tmp_scalar, tmp);
  1118. }
  1119. else
  1120. BN_bn2bin(p_scalar, tmp);
  1121. flip_endian(secrets[i], tmp, num_bytes);
  1122. /* precompute multiples */
  1123. if ((!BN_to_felem(x_out, &p->X)) ||
  1124. (!BN_to_felem(y_out, &p->Y)) ||
  1125. (!BN_to_felem(z_out, &p->Z))) goto err;
  1126. memcpy(pre_comp[i][1][0], x_out, 4 * sizeof(fslice));
  1127. memcpy(pre_comp[i][1][1], y_out, 4 * sizeof(fslice));
  1128. memcpy(pre_comp[i][1][2], z_out, 4 * sizeof(fslice));
  1129. for (j = 1; j < 8; ++j)
  1130. {
  1131. point_double(pre_comp[i][2*j][0],
  1132. pre_comp[i][2*j][1],
  1133. pre_comp[i][2*j][2],
  1134. pre_comp[i][j][0],
  1135. pre_comp[i][j][1],
  1136. pre_comp[i][j][2]);
  1137. point_add(pre_comp[i][2*j+1][0],
  1138. pre_comp[i][2*j+1][1],
  1139. pre_comp[i][2*j+1][2],
  1140. pre_comp[i][1][0],
  1141. pre_comp[i][1][1],
  1142. pre_comp[i][1][2],
  1143. pre_comp[i][2*j][0],
  1144. pre_comp[i][2*j][1],
  1145. pre_comp[i][2*j][2]);
  1146. }
  1147. }
  1148. }
  1149. /* the scalar for the generator */
  1150. if ((scalar != NULL) && (have_pre_comp))
  1151. {
  1152. memset(g_secret, 0, sizeof g_secret);
  1153. num_bytes = BN_num_bytes(scalar);
  1154. /* reduce scalar to 0 <= scalar < 2^224 */
  1155. if ((num_bytes > sizeof(felem_bytearray)) || (BN_is_negative(scalar)))
  1156. {
  1157. /* this is an unusual input, and we don't guarantee
  1158. * constant-timeness */
  1159. if (!BN_nnmod(tmp_scalar, scalar, &group->order, ctx))
  1160. {
  1161. ECerr(EC_F_EC_GFP_NISTP224_POINTS_MUL, ERR_R_BN_LIB);
  1162. goto err;
  1163. }
  1164. num_bytes = BN_bn2bin(tmp_scalar, tmp);
  1165. }
  1166. else
  1167. BN_bn2bin(scalar, tmp);
  1168. flip_endian(g_secret, tmp, num_bytes);
  1169. /* do the multiplication with generator precomputation*/
  1170. batch_mul(x_out, y_out, z_out,
  1171. (const felem_bytearray (*)) secrets, num_points,
  1172. g_secret, (const fslice (*)[16][3][4]) pre_comp,
  1173. (const fslice (*)[3][4]) g_pre_comp);
  1174. }
  1175. else
  1176. /* do the multiplication without generator precomputation */
  1177. batch_mul(x_out, y_out, z_out,
  1178. (const felem_bytearray (*)) secrets, num_points,
  1179. NULL, (const fslice (*)[16][3][4]) pre_comp, NULL);
  1180. /* reduce the output to its unique minimal representation */
  1181. felem_contract(x_in, x_out);
  1182. felem_contract(y_in, y_out);
  1183. felem_contract(z_in, z_out);
  1184. if ((!felem_to_BN(x, x_in)) || (!felem_to_BN(y, y_in)) ||
  1185. (!felem_to_BN(z, z_in)))
  1186. {
  1187. ECerr(EC_F_EC_GFP_NISTP224_POINTS_MUL, ERR_R_BN_LIB);
  1188. goto err;
  1189. }
  1190. ret = EC_POINT_set_Jprojective_coordinates_GFp(group, r, x, y, z, ctx);
  1191. err:
  1192. BN_CTX_end(ctx);
  1193. if (generator != NULL)
  1194. EC_POINT_free(generator);
  1195. if (new_ctx != NULL)
  1196. BN_CTX_free(new_ctx);
  1197. if (secrets != NULL)
  1198. OPENSSL_free(secrets);
  1199. if (pre_comp != NULL)
  1200. OPENSSL_free(pre_comp);
  1201. return ret;
  1202. }
  1203. int ec_GFp_nistp224_precompute_mult(EC_GROUP *group, BN_CTX *ctx)
  1204. {
  1205. int ret = 0;
  1206. NISTP224_PRE_COMP *pre = NULL;
  1207. int i, j;
  1208. BN_CTX *new_ctx = NULL;
  1209. BIGNUM *x, *y;
  1210. EC_POINT *generator = NULL;
  1211. /* throw away old precomputation */
  1212. EC_EX_DATA_free_data(&group->extra_data, nistp224_pre_comp_dup,
  1213. nistp224_pre_comp_free, nistp224_pre_comp_clear_free);
  1214. if (ctx == NULL)
  1215. if ((ctx = new_ctx = BN_CTX_new()) == NULL) return 0;
  1216. BN_CTX_start(ctx);
  1217. if (((x = BN_CTX_get(ctx)) == NULL) ||
  1218. ((y = BN_CTX_get(ctx)) == NULL))
  1219. goto err;
  1220. /* get the generator */
  1221. if (group->generator == NULL) goto err;
  1222. generator = EC_POINT_new(group);
  1223. if (generator == NULL)
  1224. goto err;
  1225. BN_bin2bn(nistp224_curve_params[3], sizeof (felem_bytearray), x);
  1226. BN_bin2bn(nistp224_curve_params[4], sizeof (felem_bytearray), y);
  1227. if (!EC_POINT_set_affine_coordinates_GFp(group, generator, x, y, ctx))
  1228. goto err;
  1229. if ((pre = nistp224_pre_comp_new()) == NULL)
  1230. goto err;
  1231. /* if the generator is the standard one, use built-in precomputation */
  1232. if (0 == EC_POINT_cmp(group, generator, group->generator, ctx))
  1233. {
  1234. memcpy(pre->g_pre_comp, gmul, sizeof(pre->g_pre_comp));
  1235. ret = 1;
  1236. goto err;
  1237. }
  1238. if ((!BN_to_felem(pre->g_pre_comp[1][0], &group->generator->X)) ||
  1239. (!BN_to_felem(pre->g_pre_comp[1][1], &group->generator->Y)) ||
  1240. (!BN_to_felem(pre->g_pre_comp[1][2], &group->generator->Z)))
  1241. goto err;
  1242. /* compute 2^56*G, 2^112*G, 2^168*G */
  1243. for (i = 1; i < 5; ++i)
  1244. {
  1245. point_double(pre->g_pre_comp[2*i][0], pre->g_pre_comp[2*i][1],
  1246. pre->g_pre_comp[2*i][2], pre->g_pre_comp[i][0],
  1247. pre->g_pre_comp[i][1], pre->g_pre_comp[i][2]);
  1248. for (j = 0; j < 55; ++j)
  1249. {
  1250. point_double(pre->g_pre_comp[2*i][0],
  1251. pre->g_pre_comp[2*i][1],
  1252. pre->g_pre_comp[2*i][2],
  1253. pre->g_pre_comp[2*i][0],
  1254. pre->g_pre_comp[2*i][1],
  1255. pre->g_pre_comp[2*i][2]);
  1256. }
  1257. }
  1258. /* g_pre_comp[0] is the point at infinity */
  1259. memset(pre->g_pre_comp[0], 0, sizeof(pre->g_pre_comp[0]));
  1260. /* the remaining multiples */
  1261. /* 2^56*G + 2^112*G */
  1262. point_add(pre->g_pre_comp[6][0], pre->g_pre_comp[6][1],
  1263. pre->g_pre_comp[6][2], pre->g_pre_comp[4][0],
  1264. pre->g_pre_comp[4][1], pre->g_pre_comp[4][2],
  1265. pre->g_pre_comp[2][0], pre->g_pre_comp[2][1],
  1266. pre->g_pre_comp[2][2]);
  1267. /* 2^56*G + 2^168*G */
  1268. point_add(pre->g_pre_comp[10][0], pre->g_pre_comp[10][1],
  1269. pre->g_pre_comp[10][2], pre->g_pre_comp[8][0],
  1270. pre->g_pre_comp[8][1], pre->g_pre_comp[8][2],
  1271. pre->g_pre_comp[2][0], pre->g_pre_comp[2][1],
  1272. pre->g_pre_comp[2][2]);
  1273. /* 2^112*G + 2^168*G */
  1274. point_add(pre->g_pre_comp[12][0], pre->g_pre_comp[12][1],
  1275. pre->g_pre_comp[12][2], pre->g_pre_comp[8][0],
  1276. pre->g_pre_comp[8][1], pre->g_pre_comp[8][2],
  1277. pre->g_pre_comp[4][0], pre->g_pre_comp[4][1],
  1278. pre->g_pre_comp[4][2]);
  1279. /* 2^56*G + 2^112*G + 2^168*G */
  1280. point_add(pre->g_pre_comp[14][0], pre->g_pre_comp[14][1],
  1281. pre->g_pre_comp[14][2], pre->g_pre_comp[12][0],
  1282. pre->g_pre_comp[12][1], pre->g_pre_comp[12][2],
  1283. pre->g_pre_comp[2][0], pre->g_pre_comp[2][1],
  1284. pre->g_pre_comp[2][2]);
  1285. for (i = 1; i < 8; ++i)
  1286. {
  1287. /* odd multiples: add G */
  1288. point_add(pre->g_pre_comp[2*i+1][0], pre->g_pre_comp[2*i+1][1],
  1289. pre->g_pre_comp[2*i+1][2], pre->g_pre_comp[2*i][0],
  1290. pre->g_pre_comp[2*i][1], pre->g_pre_comp[2*i][2],
  1291. pre->g_pre_comp[1][0], pre->g_pre_comp[1][1],
  1292. pre->g_pre_comp[1][2]);
  1293. }
  1294. if (!EC_EX_DATA_set_data(&group->extra_data, pre, nistp224_pre_comp_dup,
  1295. nistp224_pre_comp_free, nistp224_pre_comp_clear_free))
  1296. goto err;
  1297. ret = 1;
  1298. pre = NULL;
  1299. err:
  1300. BN_CTX_end(ctx);
  1301. if (generator != NULL)
  1302. EC_POINT_free(generator);
  1303. if (new_ctx != NULL)
  1304. BN_CTX_free(new_ctx);
  1305. if (pre)
  1306. nistp224_pre_comp_free(pre);
  1307. return ret;
  1308. }
  1309. int ec_GFp_nistp224_have_precompute_mult(const EC_GROUP *group)
  1310. {
  1311. if (EC_EX_DATA_get_data(group->extra_data, nistp224_pre_comp_dup,
  1312. nistp224_pre_comp_free, nistp224_pre_comp_clear_free)
  1313. != NULL)
  1314. return 1;
  1315. else
  1316. return 0;
  1317. }
  1318. #endif
  1319. #ifdef TESTING
  1320. #include <sys/time.h>
  1321. static u8 ctoh(char c)
  1322. {
  1323. if (c >= '0' && c <= '9') return c-'0';
  1324. if (c >= 'a' && c <= 'f') return c-'a'+10;
  1325. if (c >= 'A' && c <= 'F') return c-'A'+10;
  1326. return 0;
  1327. }
  1328. static void arg_to_bytearray(felem_bytearray ba, const char *arg)
  1329. {
  1330. /* Convert the arg, which is a string like "1a2637c8" to a byte
  1331. * array like 0xc8 0x37 0x26 0x1a. */
  1332. int size = sizeof(felem_bytearray);
  1333. int arglen = strlen(arg);
  1334. int argsize = (arglen+1)/2;
  1335. const char *argp = arg + arglen;
  1336. u8 *bap = ba;
  1337. memset(ba, 0, size);
  1338. if (size < argsize) {
  1339. fprintf(stderr, "Arg too long: %s\n", arg);
  1340. exit(1);
  1341. }
  1342. while (argp > arg+1) {
  1343. argp -= 2;
  1344. *bap = (ctoh(argp[0])<<4)|(ctoh(argp[1]));
  1345. ++bap;
  1346. }
  1347. if (arglen & 1) {
  1348. /* Handle the stray top nybble */
  1349. argp -= 1;
  1350. *bap = ctoh(argp[0]);
  1351. }
  1352. }
  1353. static void arg_to_coord(coord c, const char *arg)
  1354. {
  1355. felem_bytearray ba;
  1356. arg_to_bytearray(ba, arg);
  1357. /* Now convert it to a coord */
  1358. bin21_to_felem(c, ba);
  1359. }
  1360. int main(int argc, char **argv)
  1361. {
  1362. point infinity, P, Q, P2, PQ;
  1363. felem_bytearray s;
  1364. int i;
  1365. struct timeval st, et;
  1366. unsigned long el;
  1367. int niter = 1000;
  1368. memset(infinity, 0, sizeof(infinity));
  1369. memset(P, 0, sizeof(P));
  1370. memset(Q, 0, sizeof(Q));
  1371. if (argc != 6) {
  1372. fprintf(stderr, "Usage: %s Px Py Qx Qy s\n", argv[0]);
  1373. exit(1);
  1374. }
  1375. arg_to_coord(P[0], argv[1]);
  1376. arg_to_coord(P[1], argv[2]);
  1377. P[2][0] = 1;
  1378. dump_point("P", P);
  1379. arg_to_coord(Q[0], argv[3]);
  1380. arg_to_coord(Q[1], argv[4]);
  1381. Q[2][0] = 1;
  1382. dump_point("Q", Q);
  1383. arg_to_bytearray(s, argv[5]);
  1384. point_double(P2[0], P2[1], P2[2], P[0], P[1], P[2]);
  1385. affine(P2);
  1386. point_add(PQ[0], PQ[1], PQ[2], P[0], P[1], P[2], Q[0], Q[1], Q[2]);
  1387. affine(PQ);
  1388. dump_point("P2", P2);
  1389. dump_point("PQ", PQ);
  1390. gettimeofday(&st, NULL);
  1391. for (i=0;i<niter;++i) {
  1392. point_mul(P, P, s);
  1393. affine(P);
  1394. }
  1395. gettimeofday(&et, NULL);
  1396. el = (et.tv_sec-st.tv_sec)*1000000 + (et.tv_usec-st.tv_usec);
  1397. fprintf(stderr, "%lu / %d = %lu us\n", el, niter, el/niter);
  1398. dump_point("Ps", P);
  1399. return 0;
  1400. }
  1401. #endif
  1402. /* Figure out whether there's a point with x-coordinate x on the main
  1403. * curve. If not, then there's one on the twist curve. (There are
  1404. * actually two, which are negatives of each other; that doesn't
  1405. * matter.) Multiply that point by seckey and set out to the
  1406. * x-coordinate of the result. */
  1407. void ptwist_pointmul(byte out[PTWIST_BYTES], const byte x[PTWIST_BYTES],
  1408. const byte seckey[PTWIST_BYTES])
  1409. {
  1410. /* Compute z = x^3 + a*x + b */
  1411. point P, Q;
  1412. coord z, r2, Qx;
  1413. uint128_t tmp[5];
  1414. int ontwist;
  1415. static const coord three = { 3, 0, 0 };
  1416. static const coord b =
  1417. { 0x46d320e01dc7d6, 0x486ebc69bad316, 0x4e355e95cafedd };
  1418. /* Convert the byte array to a coord */
  1419. bin21_to_felem(P[0], x);
  1420. /* Compute z = x^3 - 3*x + b */
  1421. felem_square(tmp, P[0]); felem_reduce(z, tmp);
  1422. felem_diff64(z, three);
  1423. felem_mul(tmp, z, P[0]); felem_reduce(z, tmp);
  1424. felem_sum64(z, b);
  1425. /*
  1426. dump_coord("z", z);
  1427. */
  1428. /* Compute r = P[1] = z ^ ((p+1)/4). This will be a square root of
  1429. * z, if one exists. */
  1430. felem_sqrt(P[1], z);
  1431. /*
  1432. dump_coord("r", P[1]);
  1433. */
  1434. /* Is P[1] a square root of z? */
  1435. felem_square(tmp, P[1]); felem_diff_128_64(tmp, z); felem_reduce(r2, tmp);
  1436. if (felem_is_zero(r2)) {
  1437. /* P(x,r) is on the curve */
  1438. ontwist = 0;
  1439. } else {
  1440. /* (-x, r) is on the twist */
  1441. ontwist = 1;
  1442. felem_neg(P[0], P[0]);
  1443. }
  1444. /*
  1445. fprintf(stderr, "ontwist = %d\n", ontwist);
  1446. */
  1447. memset(P[2], 0, sizeof(coord));
  1448. P[2][0] = 1;
  1449. /* All set. Now do the point multiplication. */
  1450. /*
  1451. dump_point("P", P);
  1452. for(i=0;i<21;++i) {
  1453. fprintf(stderr, "%02x", seckey[20-i]);
  1454. }
  1455. fprintf(stderr, "\n");
  1456. */
  1457. point_mul(Q, P, seckey);
  1458. affine_x(Qx, Q);
  1459. /*
  1460. dump_point("Q", Q);
  1461. */
  1462. /* Get the x-coordinate of the result, and negate it if we're on the
  1463. * twist. */
  1464. if (ontwist) {
  1465. felem_neg(Qx, Qx);
  1466. }
  1467. /* Convert back to bytes */
  1468. felem_to_bin21(out, Qx);
  1469. /*
  1470. fprintf(stderr, "out: ");
  1471. for(i=0;i<21;++i) {
  1472. fprintf(stderr, "%02x", out[i]);
  1473. }
  1474. fprintf(stderr, "\n");
  1475. */
  1476. }