ptwist168.c 50 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651
  1. #include "ptwist.h"
  2. /* ptwist168.c by Ian Goldberg. Based on: */
  3. /* crypto/ec/ecp_nistp224.c */
  4. /*
  5. * Written by Emilia Kasper (Google) for the OpenSSL project.
  6. */
  7. /* ====================================================================
  8. * Copyright (c) 2000-2010 The OpenSSL Project. All rights reserved.
  9. *
  10. * Redistribution and use in source and binary forms, with or without
  11. * modification, are permitted provided that the following conditions
  12. * are met:
  13. *
  14. * 1. Redistributions of source code must retain the above copyright
  15. * notice, this list of conditions and the following disclaimer.
  16. *
  17. * 2. Redistributions in binary form must reproduce the above copyright
  18. * notice, this list of conditions and the following disclaimer in
  19. * the documentation and/or other materials provided with the
  20. * distribution.
  21. *
  22. * 3. All advertising materials mentioning features or use of this
  23. * software must display the following acknowledgment:
  24. * "This product includes software developed by the OpenSSL Project
  25. * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
  26. *
  27. * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
  28. * endorse or promote products derived from this software without
  29. * prior written permission. For written permission, please contact
  30. * licensing@OpenSSL.org.
  31. *
  32. * 5. Products derived from this software may not be called "OpenSSL"
  33. * nor may "OpenSSL" appear in their names without prior written
  34. * permission of the OpenSSL Project.
  35. *
  36. * 6. Redistributions of any form whatsoever must retain the following
  37. * acknowledgment:
  38. * "This product includes software developed by the OpenSSL Project
  39. * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
  40. *
  41. * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
  42. * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  43. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  44. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
  45. * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  46. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  47. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  48. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  49. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  50. * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  51. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
  52. * OF THE POSSIBILITY OF SUCH DAMAGE.
  53. * ====================================================================
  54. *
  55. * This product includes cryptographic software written by Eric Young
  56. * (eay@cryptsoft.com). This product includes software written by Tim
  57. * Hudson (tjh@cryptsoft.com).
  58. *
  59. */
  60. /*
  61. * A 64-bit implementation of the NIST P-224 elliptic curve point multiplication
  62. *
  63. * Inspired by Daniel J. Bernstein's public domain nistp224 implementation
  64. * and Adam Langley's public domain 64-bit C implementation of curve25519
  65. */
  66. #include <stdint.h>
  67. #include <string.h>
  68. #if defined(__GNUC__) && (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))
  69. /* even with gcc, the typedef won't work for 32-bit platforms */
  70. typedef __uint128_t uint128_t; /* nonstandard; implemented by gcc on 64-bit platforms */
  71. #else
  72. #error "Need GCC 3.1 or later to define type uint128_t"
  73. #endif
  74. typedef uint8_t u8;
  75. /******************************************************************************/
  76. /* INTERNAL REPRESENTATION OF FIELD ELEMENTS
  77. *
  78. * Field elements are represented as a_0 + 2^56*a_1 + 2^112*a_2
  79. * where each slice a_i is a 64-bit word, i.e., a field element is an fslice
  80. * array a with 3 elements, where a[i] = a_i.
  81. * Outputs from multiplications are represented as unreduced polynomials
  82. * b_0 + 2^56*b_1 + 2^112*b_2 + 2^168*b_3 + 2^224*b_4
  83. * where each b_i is a 128-bit word. We ensure that inputs to each field
  84. * multiplication satisfy a_i < 2^60, so outputs satisfy b_i < 4*2^60*2^60,
  85. * and fit into a 128-bit word without overflow. The coefficients are then
  86. * again partially reduced to a_i < 2^57. We only reduce to the unique minimal
  87. * representation at the end of the computation.
  88. *
  89. */
  90. typedef uint64_t fslice;
  91. typedef fslice coord[3];
  92. typedef coord point[3];
  93. #include <stdio.h>
  94. #include <stdlib.h>
  95. static void dump_coord(const char *label, const coord c)
  96. {
  97. if (label) fprintf(stderr, "%s: ", label);
  98. printf("%016lx %016lx %016lx\n", c[2], c[1], c[0]);
  99. }
  100. static void dump_point(const char *label, point p)
  101. {
  102. if (label) fprintf(stderr, "%s:\n", label);
  103. dump_coord(" x", p[0]);
  104. dump_coord(" y", p[1]);
  105. dump_coord(" z", p[2]);
  106. }
  107. /* Field element represented as a byte arrary.
  108. * 21*8 = 168 bits is also the group order size for the elliptic curve. */
  109. typedef u8 felem_bytearray[21];
  110. static const felem_bytearray ptwist168_curve_params[5] = {
  111. {0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, /* p */
  112. 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE,
  113. 0xFF},
  114. {0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, /* a */
  115. 0xFF,0xFF,0xFF,0xFF,0xFF,0xFE,0xFF,0xFF,0xFF,0xFE,
  116. 0xFC},
  117. {0x4E,0x35,0x5E,0x95,0xCA,0xFE,0xDD,0x48,0x6E,0xBC, /* b */
  118. 0x69,0xBA,0xD3,0x16,0x46,0xD3,0x20,0xE0,0x1D,0xC7,
  119. 0xD6},
  120. {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* x */
  121. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  122. 0x02},
  123. {0xEA,0x67,0x47,0xB7,0x5A,0xF8,0xC7,0xF9,0x3C,0x1F, /* y */
  124. 0x5E,0x6D,0x32,0x0F,0x88,0xB9,0xBE,0x15,0x66,0xD2,
  125. 0xF2}
  126. };
  127. /* Helper functions to convert field elements to/from internal representation */
  128. static void bin21_to_felem(fslice out[3], const u8 in[21])
  129. {
  130. out[0] = *((const uint64_t *)(in)) & 0x00ffffffffffffff;
  131. out[1] = (*((const uint64_t *)(in+7))) & 0x00ffffffffffffff;
  132. out[2] = (*((const uint64_t *)(in+14))) & 0x00ffffffffffffff;
  133. }
  134. static void felem_to_bin21(u8 out[21], const fslice in[3])
  135. {
  136. unsigned i;
  137. for (i = 0; i < 7; ++i)
  138. {
  139. out[i] = in[0]>>(8*i);
  140. out[i+7] = in[1]>>(8*i);
  141. out[i+14] = in[2]>>(8*i);
  142. }
  143. }
  144. #if 0
  145. /* To preserve endianness when using BN_bn2bin and BN_bin2bn */
  146. static void flip_endian(u8 *out, const u8 *in, unsigned len)
  147. {
  148. unsigned i;
  149. for (i = 0; i < len; ++i)
  150. out[i] = in[len-1-i];
  151. }
  152. #endif
  153. /******************************************************************************/
  154. /* FIELD OPERATIONS
  155. *
  156. * Field operations, using the internal representation of field elements.
  157. * NB! These operations are specific to our point multiplication and cannot be
  158. * expected to be correct in general - e.g., multiplication with a large scalar
  159. * will cause an overflow.
  160. *
  161. */
  162. /* Sum two field elements: out += in */
  163. static void felem_sum64(fslice out[3], const fslice in[3])
  164. {
  165. out[0] += in[0];
  166. out[1] += in[1];
  167. out[2] += in[2];
  168. }
  169. /* Subtract field elements: out -= in */
  170. /* Assumes in[i] < 2^57 */
  171. static void felem_diff64(fslice out[3], const fslice in[3])
  172. {
  173. /* a = 3*2^56 - 3 */
  174. /* b = 3*2^56 - 3*257 */
  175. static const uint64_t a = (((uint64_t) 3) << 56) - ((uint64_t) 3);
  176. static const uint64_t b = (((uint64_t) 3) << 56) - ((uint64_t) 771);
  177. /* Add 0 mod 2^168-2^8-1 to ensure out > in at each element */
  178. /* a*2^112 + a*2^56 + b = 3*p */
  179. out[0] += b;
  180. out[1] += a;
  181. out[2] += a;
  182. out[0] -= in[0];
  183. out[1] -= in[1];
  184. out[2] -= in[2];
  185. }
  186. /* Subtract in unreduced 128-bit mode: out128 -= in128 */
  187. /* Assumes in[i] < 2^119 */
  188. static void felem_diff128(uint128_t out[5], const uint128_t in[5])
  189. {
  190. /* a = 3*2^118 - 192
  191. b = 3*2^118 - 49536
  192. c = 3*2^118
  193. d = 3*2^118 - 12681408
  194. a*2^224 + a*2^168 + b*2^112 + c*2^56 + d
  195. = (3*2^174 + 3*2^118 + 49344)*p
  196. */
  197. static const uint128_t a = (((uint128_t)3) << 118) - ((uint128_t) 192);
  198. static const uint128_t b = (((uint128_t)3) << 118) - ((uint128_t) 49536);
  199. static const uint128_t c = (((uint128_t)3) << 118);
  200. static const uint128_t d = (((uint128_t)3) << 118) - ((uint128_t) 12681408);;
  201. /* Add 0 mod 2^168-2^8-1 to ensure out > in */
  202. out[0] += d;
  203. out[1] += c;
  204. out[2] += b;
  205. out[3] += a;
  206. out[4] += a;
  207. out[0] -= in[0];
  208. out[1] -= in[1];
  209. out[2] -= in[2];
  210. out[3] -= in[3];
  211. out[4] -= in[4];
  212. }
  213. /* Subtract in mixed mode: out128 -= in64 */
  214. /* in[i] < 2^63 */
  215. static void felem_diff_128_64(uint128_t out[5], const fslice in[3])
  216. {
  217. /* a = 3*2^62 - 192
  218. b = 3*2^62 - 49344
  219. a*2^112 + a*2^56 + b = 192*p
  220. */
  221. static const uint128_t a = (((uint128_t) 3) << 62) - ((uint128_t) 192);
  222. static const uint128_t b = (((uint128_t) 3) << 62) - ((uint128_t) 49344);
  223. /* Add 0 mod 2^168-2^8-1 to ensure out > in */
  224. out[0] += b;
  225. out[1] += a;
  226. out[2] += a;
  227. out[0] -= in[0];
  228. out[1] -= in[1];
  229. out[2] -= in[2];
  230. }
  231. /* Multiply a field element by a scalar: out64 = out64 * scalar
  232. * The scalars we actually use are small, so results fit without overflow */
  233. static void felem_scalar64(fslice out[3], const fslice scalar)
  234. {
  235. out[0] *= scalar;
  236. out[1] *= scalar;
  237. out[2] *= scalar;
  238. }
  239. /* Multiply an unreduced field element by a scalar: out128 = out128 * scalar
  240. * The scalars we actually use are small, so results fit without overflow */
  241. static void felem_scalar128(uint128_t out[5], const uint128_t scalar)
  242. {
  243. out[0] *= scalar;
  244. out[1] *= scalar;
  245. out[2] *= scalar;
  246. out[3] *= scalar;
  247. out[4] *= scalar;
  248. }
  249. /* Square a field element: out = in^2 */
  250. static void felem_square(uint128_t out[5], const fslice in[3])
  251. {
  252. out[0] = ((uint128_t) in[0]) * in[0];
  253. out[1] = ((uint128_t) in[0]) * in[1] * 2;
  254. out[2] = ((uint128_t) in[0]) * in[2] * 2 + ((uint128_t) in[1]) * in[1];
  255. out[3] = ((uint128_t) in[1]) * in[2] * 2;
  256. out[4] = ((uint128_t) in[2]) * in[2];
  257. }
  258. /* Multiply two field elements: out = in1 * in2 */
  259. static void felem_mul(uint128_t out[5], const fslice in1[3], const fslice in2[3])
  260. {
  261. out[0] = ((uint128_t) in1[0]) * in2[0];
  262. out[1] = ((uint128_t) in1[0]) * in2[1] + ((uint128_t) in1[1]) * in2[0];
  263. out[2] = ((uint128_t) in1[0]) * in2[2] + ((uint128_t) in1[1]) * in2[1] +
  264. ((uint128_t) in1[2]) * in2[0];
  265. out[3] = ((uint128_t) in1[1]) * in2[2] +
  266. ((uint128_t) in1[2]) * in2[1];
  267. out[4] = ((uint128_t) in1[2]) * in2[2];
  268. }
  269. #define M257(x) (((x)<<8)+(x))
  270. /* Reduce 128-bit coefficients to 64-bit coefficients. Requires in[i] < 2^126,
  271. * ensures out[0] < 2^56, out[1] < 2^56, out[2] < 2^57 */
  272. static void felem_reduce(fslice out[3], const uint128_t in[5])
  273. {
  274. static const uint128_t two56m1 = (((uint128_t) 1)<<56) -
  275. ((uint128_t)1);
  276. uint128_t output[3];
  277. output[0] = in[0]; /* < 2^126 */
  278. output[1] = in[1]; /* < 2^126 */
  279. output[2] = in[2]; /* < 2^126 */
  280. /* Eliminate in[3], in[4] */
  281. output[2] += M257(in[4] >> 56); /* < 2^126 + 2^79 */
  282. output[1] += M257(in[4] & two56m1); /* < 2^126 + 2^65 */
  283. output[1] += M257(in[3] >> 56); /* < 2^126 + 2^65 + 2^79 */
  284. output[0] += M257(in[3] & two56m1); /* < 2^126 + 2^65 */
  285. /* Eliminate the top part of output[2] */
  286. output[0] += M257(output[2] >> 56); /* < 2^126 + 2^65 + 2^79 */
  287. output[2] &= two56m1; /* < 2^56 */
  288. /* Carry 0 -> 1 -> 2 */
  289. output[1] += output[0] >> 56; /* < 2^126 + 2^71 */
  290. output[0] &= two56m1; /* < 2^56 */
  291. output[2] += output[1] >> 56; /* < 2^71 */
  292. output[1] &= two56m1; /* < 2^56 */
  293. /* Eliminate the top part of output[2] */
  294. output[0] += M257(output[2] >> 56); /* < 2^57 */
  295. output[2] &= two56m1; /* < 2^56 */
  296. /* Carry 0 -> 1 -> 2 */
  297. output[1] += output[0] >> 56; /* <= 2^56 */
  298. out[0] = output[0] & two56m1; /* < 2^56 */
  299. out[2] = output[2] + (output[1] >> 56); /* <= 2^56 */
  300. out[1] = output[1] & two56m1; /* < 2^56 */
  301. }
  302. /* Reduce to unique minimal representation */
  303. static void felem_contract(fslice out[3], const fslice in[3])
  304. {
  305. static const uint64_t two56m1 = (((uint64_t) 1)<<56) -
  306. ((uint64_t)1);
  307. static const uint64_t two56m257 = (((uint64_t) 1)<<56) -
  308. ((uint64_t)257);
  309. uint64_t a;
  310. /* in[0] < 2^56, in[1] < 2^56, in[2] <= 2^56 */
  311. /* so in < 2*p for sure */
  312. /* Eliminate the top part of in[2] */
  313. out[0] = in[0] + M257(in[2] >> 56); /* < 2^57 */
  314. out[2] = in[2] & two56m1; /* < 2^56, but if out[0] >= 2^56
  315. then out[2] now = 0 */
  316. /* Carry 0 -> 1 -> 2 */
  317. out[1] = in[1] + (out[0] >> 56); /* < 2^56 + 2, but if
  318. out[1] >= 2^56 then
  319. out[2] = 0 */
  320. out[0] &= two56m1; /* < 2^56 */
  321. out[2] += out[1] >> 56; /* < 2^56 due to the above */
  322. out[1] &= two56m1; /* < 2^56 */
  323. /* Now out < 2^168, but it could still be > p */
  324. a = ((out[2] == two56m1) & (out[1] == two56m1) & (out[0] >= two56m257));
  325. out[2] -= two56m1*a;
  326. out[1] -= two56m1*a;
  327. out[0] -= two56m257*a;
  328. }
  329. /* Negate a field element: out = -in */
  330. /* Assumes in[i] < 2^57 */
  331. static void felem_neg(fslice out[3], const fslice in[3])
  332. {
  333. /* a = 3*2^56 - 3 */
  334. /* b = 3*2^56 - 3*257 */
  335. static const uint64_t a = (((uint64_t) 3) << 56) - ((uint64_t) 3);
  336. static const uint64_t b = (((uint64_t) 3) << 56) - ((uint64_t) 771);
  337. static const uint64_t two56m1 = (((uint64_t) 1) << 56) - ((uint64_t) 1);
  338. fslice tmp[3];
  339. /* Add 0 mod 2^168-2^8-1 to ensure out > in at each element */
  340. /* a*2^112 + a*2^56 + b = 3*p */
  341. tmp[0] = b - in[0];
  342. tmp[1] = a - in[1];
  343. tmp[2] = a - in[2];
  344. /* Carry 0 -> 1 -> 2 */
  345. tmp[1] += tmp[0] >> 56;
  346. tmp[0] &= two56m1; /* < 2^56 */
  347. tmp[2] += tmp[1] >> 56; /* < 2^71 */
  348. tmp[1] &= two56m1; /* < 2^56 */
  349. felem_contract(out, tmp);
  350. }
  351. /* Zero-check: returns 1 if input is 0, and 0 otherwise.
  352. * We know that field elements are reduced to in < 2^169,
  353. * so we only need to check three cases: 0, 2^168 - 2^8 - 1,
  354. * and 2^169 - 2^9 - 2 */
  355. static fslice felem_is_zero(const fslice in[3])
  356. {
  357. fslice zero, two168m8m1, two169m9m2;
  358. static const uint64_t two56m1 = (((uint64_t) 1)<<56) -
  359. ((uint64_t)1);
  360. static const uint64_t two56m257 = (((uint64_t) 1)<<56) -
  361. ((uint64_t)257);
  362. static const uint64_t two57m1 = (((uint64_t) 1)<<57) -
  363. ((uint64_t)1);
  364. static const uint64_t two56m514 = (((uint64_t) 1)<<56) -
  365. ((uint64_t)514);
  366. zero = (in[0] == 0) & (in[1] == 0) & (in[2] == 0);
  367. two168m8m1 = (in[2] == two56m1) & (in[1] == two56m1) &
  368. (in[0] == two56m257);
  369. two169m9m2 = (in[2] == two57m1) & (in[1] == two56m1) &
  370. (in[0] == two56m514);
  371. return (zero | two168m8m1 | two169m9m2);
  372. }
  373. /* Invert a field element */
  374. static void felem_inv(fslice out[3], const fslice in[3])
  375. {
  376. fslice ftmp[3], ftmp2[3], ftmp3[3], ftmp4[3];
  377. uint128_t tmp[5];
  378. unsigned i;
  379. felem_square(tmp, in); felem_reduce(ftmp, tmp); /* 2 */
  380. felem_mul(tmp, in, ftmp); felem_reduce(ftmp, tmp); /* 2^2 - 1 */
  381. /* = ftmp */
  382. felem_square(tmp, ftmp); felem_reduce(ftmp2, tmp); /* 2^3 - 2 */
  383. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^4 - 2^2 */
  384. felem_mul(tmp, ftmp2, ftmp); felem_reduce(ftmp2, tmp); /* 2^4 - 1 */
  385. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^5 - 2 */
  386. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^6 - 2^2 */
  387. felem_mul(tmp, ftmp2, ftmp); felem_reduce(ftmp, tmp); /* 2^6 - 1 */
  388. /* = ftmp */
  389. felem_square(tmp, ftmp); felem_reduce(ftmp2, tmp); /* 2^7 - 2 */
  390. for (i = 0; i < 5; ++i) /* 2^12 - 2^6 */
  391. {
  392. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  393. }
  394. felem_mul(tmp, ftmp, ftmp2); felem_reduce(ftmp3, tmp); /* 2^12 - 1 */
  395. /* = ftmp3 */
  396. felem_square(tmp, ftmp3); felem_reduce(ftmp2, tmp); /* 2^13 - 2 */
  397. for (i = 0; i < 11; ++i) /* 2^24 - 2^12 */
  398. {
  399. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  400. }
  401. felem_mul(tmp, ftmp2, ftmp3); felem_reduce(ftmp3, tmp); /* 2^24 - 1 */
  402. /* = ftmp3 */
  403. felem_square(tmp, ftmp3); felem_reduce(ftmp2, tmp); /* 2^25 - 2 */
  404. for (i = 0; i < 23; ++i) /* 2^48 - 2^24 */
  405. {
  406. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  407. }
  408. felem_mul(tmp, ftmp2, ftmp3); felem_reduce(ftmp4, tmp); /* 2^48 - 1 */
  409. /* = ftmp4 */
  410. felem_square(tmp, ftmp4); felem_reduce(ftmp2, tmp); /* 2^49 - 2 */
  411. for (i = 0; i < 23; ++i) /* 2^72 - 2^24 */
  412. {
  413. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  414. }
  415. felem_mul(tmp, ftmp2, ftmp3); felem_reduce(ftmp4, tmp); /* 2^72 - 1 */
  416. /* = ftmp4 */
  417. felem_square(tmp, ftmp4); felem_reduce(ftmp2, tmp); /* 2^73 - 2 */
  418. for (i = 0; i < 5; ++i) /* 2^78 - 2^6 */
  419. {
  420. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  421. }
  422. felem_mul(tmp, ftmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^78 - 1 */
  423. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^79 - 2 */
  424. felem_mul(tmp, in, ftmp2); felem_reduce(ftmp4, tmp); /* 2^79 - 1 */
  425. /* = ftmp4 */
  426. felem_square(tmp, ftmp4); felem_reduce(ftmp2, tmp); /* 2^80 - 2 */
  427. for (i = 0; i < 78; ++i) /* 2^158 - 2^79 */
  428. {
  429. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  430. }
  431. felem_mul(tmp, ftmp4, ftmp2); felem_reduce(ftmp2, tmp); /* 2^158 - 1 */
  432. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^159 - 2 */
  433. felem_mul(tmp, in, ftmp2); felem_reduce(ftmp2, tmp); /* 2^159 - 1 */
  434. for (i = 0; i < 7; ++i) /* 2^166 - 2^7 */
  435. {
  436. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  437. }
  438. felem_mul(tmp, ftmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^166 - 2^6 - 1 */
  439. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^167 - 2^7 - 2 */
  440. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^168 - 2^8 - 4 */
  441. felem_mul(tmp, in, ftmp2); felem_reduce(out, tmp); /* 2^168 - 2^8 - 3 */
  442. /* = out */
  443. }
  444. /* Take the square root of a field element */
  445. static void felem_sqrt(fslice out[3], const fslice in[3])
  446. {
  447. fslice ftmp[3], ftmp2[3];
  448. uint128_t tmp[5];
  449. unsigned i;
  450. felem_square(tmp, in); felem_reduce(ftmp, tmp); /* 2 */
  451. felem_mul(tmp, in, ftmp); felem_reduce(ftmp, tmp); /* 2^2 - 1 */
  452. /* = ftmp */
  453. felem_square(tmp, ftmp); felem_reduce(ftmp2, tmp); /* 2^3 - 2 */
  454. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^4 - 2^2 */
  455. felem_mul(tmp, ftmp2, ftmp); felem_reduce(ftmp2, tmp); /* 2^4 - 1 */
  456. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^5 - 2 */
  457. felem_mul(tmp, ftmp2, in); felem_reduce(ftmp, tmp); /* 2^5 - 1 */
  458. /* = ftmp */
  459. felem_square(tmp, ftmp); felem_reduce(ftmp2, tmp); /* 2^6 - 2 */
  460. for (i = 0; i < 4; ++i) /* 2^10 - 2^5 */
  461. {
  462. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  463. }
  464. felem_mul(tmp, ftmp, ftmp2); felem_reduce(ftmp, tmp); /* 2^10 - 1 */
  465. /* = ftmp */
  466. felem_square(tmp, ftmp); felem_reduce(ftmp2, tmp); /* 2^11 - 2 */
  467. for (i = 0; i < 9; ++i) /* 2^20 - 2^10 */
  468. {
  469. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  470. }
  471. felem_mul(tmp, ftmp2, ftmp); felem_reduce(ftmp, tmp); /* 2^20 - 1 */
  472. /* = ftmp */
  473. felem_square(tmp, ftmp); felem_reduce(ftmp2, tmp); /* 2^21 - 2 */
  474. for (i = 0; i < 19; ++i) /* 2^40 - 2^20 */
  475. {
  476. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  477. }
  478. felem_mul(tmp, ftmp2, ftmp); felem_reduce(ftmp, tmp); /* 2^40 - 1 */
  479. /* = ftmp */
  480. felem_square(tmp, ftmp); felem_reduce(ftmp2, tmp); /* 2^41 - 2 */
  481. for (i = 0; i < 39; ++i) /* 2^80 - 2^40 */
  482. {
  483. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  484. }
  485. felem_mul(tmp, ftmp2, ftmp); felem_reduce(ftmp, tmp); /* 2^80 - 1 */
  486. /* = ftmp */
  487. felem_square(tmp, ftmp); felem_reduce(ftmp2, tmp); /* 2^81 - 2 */
  488. for (i = 0; i < 79; ++i) /* 2^160 - 2^80 */
  489. {
  490. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  491. }
  492. felem_mul(tmp, ftmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^160 - 1 */
  493. for (i = 0; i < 5; ++i) /* 2^165 - 2^5 */
  494. {
  495. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  496. }
  497. felem_square(tmp, ftmp2); felem_reduce(out, tmp); /* 2^166 - 2^6 */
  498. /* = out */
  499. }
  500. /* Copy in constant time:
  501. * if icopy == 1, copy in to out,
  502. * if icopy == 0, copy out to itself. */
  503. static void
  504. copy_conditional(fslice *out, const fslice *in, unsigned len, fslice icopy)
  505. {
  506. unsigned i;
  507. /* icopy is a (64-bit) 0 or 1, so copy is either all-zero or all-one */
  508. const fslice copy = -icopy;
  509. for (i = 0; i < len; ++i)
  510. {
  511. const fslice tmp = copy & (in[i] ^ out[i]);
  512. out[i] ^= tmp;
  513. }
  514. }
  515. /* Copy in constant time:
  516. * if isel == 1, copy in2 to out,
  517. * if isel == 0, copy in1 to out. */
  518. static void select_conditional(fslice *out, const fslice *in1, const fslice *in2,
  519. unsigned len, fslice isel)
  520. {
  521. unsigned i;
  522. /* isel is a (64-bit) 0 or 1, so sel is either all-zero or all-one */
  523. const fslice sel = -isel;
  524. for (i = 0; i < len; ++i)
  525. {
  526. const fslice tmp = sel & (in1[i] ^ in2[i]);
  527. out[i] = in1[i] ^ tmp;
  528. }
  529. }
  530. /******************************************************************************/
  531. /* ELLIPTIC CURVE POINT OPERATIONS
  532. *
  533. * Points are represented in Jacobian projective coordinates:
  534. * (X, Y, Z) corresponds to the affine point (X/Z^2, Y/Z^3),
  535. * or to the point at infinity if Z == 0.
  536. *
  537. */
  538. /* Double an elliptic curve point:
  539. * (X', Y', Z') = 2 * (X, Y, Z), where
  540. * X' = (3 * (X - Z^2) * (X + Z^2))^2 - 8 * X * Y^2
  541. * Y' = 3 * (X - Z^2) * (X + Z^2) * (4 * X * Y^2 - X') - 8 * Y^2
  542. * Z' = (Y + Z)^2 - Y^2 - Z^2 = 2 * Y * Z
  543. * Outputs can equal corresponding inputs, i.e., x_out == x_in is allowed,
  544. * while x_out == y_in is not (maybe this works, but it's not tested). */
  545. static void
  546. point_double(fslice x_out[3], fslice y_out[3], fslice z_out[3],
  547. const fslice x_in[3], const fslice y_in[3], const fslice z_in[3])
  548. {
  549. uint128_t tmp[5], tmp2[5];
  550. fslice delta[3];
  551. fslice gamma[3];
  552. fslice beta[3];
  553. fslice alpha[3];
  554. fslice ftmp[3], ftmp2[3];
  555. memcpy(ftmp, x_in, 3 * sizeof(fslice));
  556. memcpy(ftmp2, x_in, 3 * sizeof(fslice));
  557. /* delta = z^2 */
  558. felem_square(tmp, z_in);
  559. felem_reduce(delta, tmp);
  560. /* gamma = y^2 */
  561. felem_square(tmp, y_in);
  562. felem_reduce(gamma, tmp);
  563. /* beta = x*gamma */
  564. felem_mul(tmp, x_in, gamma);
  565. felem_reduce(beta, tmp);
  566. /* alpha = 3*(x-delta)*(x+delta) */
  567. felem_diff64(ftmp, delta);
  568. /* ftmp[i] < 2^57 + 2^58 + 2 < 2^59 */
  569. felem_sum64(ftmp2, delta);
  570. /* ftmp2[i] < 2^57 + 2^57 = 2^58 */
  571. felem_scalar64(ftmp2, 3);
  572. /* ftmp2[i] < 3 * 2^58 < 2^60 */
  573. felem_mul(tmp, ftmp, ftmp2);
  574. /* tmp[i] < 2^60 * 2^59 * 4 = 2^121 */
  575. felem_reduce(alpha, tmp);
  576. /* x' = alpha^2 - 8*beta */
  577. felem_square(tmp, alpha);
  578. /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */
  579. memcpy(ftmp, beta, 3 * sizeof(fslice));
  580. felem_scalar64(ftmp, 8);
  581. /* ftmp[i] < 8 * 2^57 = 2^60 */
  582. felem_diff_128_64(tmp, ftmp);
  583. /* tmp[i] < 2^116 + 2^64 + 8 < 2^117 */
  584. felem_reduce(x_out, tmp);
  585. /* z' = (y + z)^2 - gamma - delta */
  586. felem_sum64(delta, gamma);
  587. /* delta[i] < 2^57 + 2^57 = 2^58 */
  588. memcpy(ftmp, y_in, 3 * sizeof(fslice));
  589. felem_sum64(ftmp, z_in);
  590. /* ftmp[i] < 2^57 + 2^57 = 2^58 */
  591. felem_square(tmp, ftmp);
  592. /* tmp[i] < 4 * 2^58 * 2^58 = 2^118 */
  593. felem_diff_128_64(tmp, delta);
  594. /* tmp[i] < 2^118 + 2^64 + 8 < 2^119 */
  595. felem_reduce(z_out, tmp);
  596. /* y' = alpha*(4*beta - x') - 8*gamma^2 */
  597. felem_scalar64(beta, 4);
  598. /* beta[i] < 4 * 2^57 = 2^59 */
  599. felem_diff64(beta, x_out);
  600. /* beta[i] < 2^59 + 2^58 + 2 < 2^60 */
  601. felem_mul(tmp, alpha, beta);
  602. /* tmp[i] < 4 * 2^57 * 2^60 = 2^119 */
  603. felem_square(tmp2, gamma);
  604. /* tmp2[i] < 4 * 2^57 * 2^57 = 2^116 */
  605. felem_scalar128(tmp2, 8);
  606. /* tmp2[i] < 8 * 2^116 = 2^119 */
  607. felem_diff128(tmp, tmp2);
  608. /* tmp[i] < 2^119 + 2^120 < 2^121 */
  609. felem_reduce(y_out, tmp);
  610. }
  611. /* Add two elliptic curve points:
  612. * (X_1, Y_1, Z_1) + (X_2, Y_2, Z_2) = (X_3, Y_3, Z_3), where
  613. * X_3 = (Z_1^3 * Y_2 - Z_2^3 * Y_1)^2 - (Z_1^2 * X_2 - Z_2^2 * X_1)^3 -
  614. * 2 * Z_2^2 * X_1 * (Z_1^2 * X_2 - Z_2^2 * X_1)^2
  615. * Y_3 = (Z_1^3 * Y_2 - Z_2^3 * Y_1) * (Z_2^2 * X_1 * (Z_1^2 * X_2 - Z_2^2 * X_1)^2 - X_3) -
  616. * Z_2^3 * Y_1 * (Z_1^2 * X_2 - Z_2^2 * X_1)^3
  617. * Z_3 = (Z_1^2 * X_2 - Z_2^2 * X_1) * (Z_1 * Z_2) */
  618. /* This function is not entirely constant-time:
  619. * it includes a branch for checking whether the two input points are equal,
  620. * (while not equal to the point at infinity).
  621. * This case never happens during single point multiplication,
  622. * so there is no timing leak for ECDH or ECDSA signing. */
  623. static void point_add(fslice x3[3], fslice y3[3], fslice z3[3],
  624. const fslice x1[3], const fslice y1[3], const fslice z1[3],
  625. const fslice x2[3], const fslice y2[3], const fslice z2[3])
  626. {
  627. fslice ftmp[3], ftmp2[3], ftmp3[3], ftmp4[3], ftmp5[3];
  628. fslice xout[3], yout[3], zout[3];
  629. uint128_t tmp[5], tmp2[5];
  630. fslice z1_is_zero, z2_is_zero, x_equal, y_equal;
  631. /* ftmp = z1^2 */
  632. felem_square(tmp, z1);
  633. felem_reduce(ftmp, tmp);
  634. /* ftmp2 = z2^2 */
  635. felem_square(tmp, z2);
  636. felem_reduce(ftmp2, tmp);
  637. /* ftmp3 = z1^3 */
  638. felem_mul(tmp, ftmp, z1);
  639. felem_reduce(ftmp3, tmp);
  640. /* ftmp4 = z2^3 */
  641. felem_mul(tmp, ftmp2, z2);
  642. felem_reduce(ftmp4, tmp);
  643. /* ftmp3 = z1^3*y2 */
  644. felem_mul(tmp, ftmp3, y2);
  645. /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */
  646. /* ftmp4 = z2^3*y1 */
  647. felem_mul(tmp2, ftmp4, y1);
  648. felem_reduce(ftmp4, tmp2);
  649. /* ftmp3 = z1^3*y2 - z2^3*y1 */
  650. felem_diff_128_64(tmp, ftmp4);
  651. /* tmp[i] < 2^116 + 2^64 + 8 < 2^117 */
  652. felem_reduce(ftmp3, tmp);
  653. /* ftmp = z1^2*x2 */
  654. felem_mul(tmp, ftmp, x2);
  655. /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */
  656. /* ftmp2 =z2^2*x1 */
  657. felem_mul(tmp2, ftmp2, x1);
  658. felem_reduce(ftmp2, tmp2);
  659. /* ftmp = z1^2*x2 - z2^2*x1 */
  660. felem_diff128(tmp, tmp2);
  661. /* tmp[i] < 2^116 + 2^64 + 8 < 2^117 */
  662. felem_reduce(ftmp, tmp);
  663. /* the formulae are incorrect if the points are equal
  664. * so we check for this and do doubling if this happens */
  665. x_equal = felem_is_zero(ftmp);
  666. y_equal = felem_is_zero(ftmp3);
  667. z1_is_zero = felem_is_zero(z1);
  668. z2_is_zero = felem_is_zero(z2);
  669. /* In affine coordinates, (X_1, Y_1) == (X_2, Y_2) */
  670. if (x_equal && y_equal && !z1_is_zero && !z2_is_zero)
  671. {
  672. point_double(x3, y3, z3, x1, y1, z1);
  673. return;
  674. }
  675. /* ftmp5 = z1*z2 */
  676. felem_mul(tmp, z1, z2);
  677. felem_reduce(ftmp5, tmp);
  678. /* zout = (z1^2*x2 - z2^2*x1)*(z1*z2) */
  679. felem_mul(tmp, ftmp, ftmp5);
  680. felem_reduce(zout, tmp);
  681. /* ftmp = (z1^2*x2 - z2^2*x1)^2 */
  682. memcpy(ftmp5, ftmp, 3 * sizeof(fslice));
  683. felem_square(tmp, ftmp);
  684. felem_reduce(ftmp, tmp);
  685. /* ftmp5 = (z1^2*x2 - z2^2*x1)^3 */
  686. felem_mul(tmp, ftmp, ftmp5);
  687. felem_reduce(ftmp5, tmp);
  688. /* ftmp2 = z2^2*x1*(z1^2*x2 - z2^2*x1)^2 */
  689. felem_mul(tmp, ftmp2, ftmp);
  690. felem_reduce(ftmp2, tmp);
  691. /* ftmp4 = z2^3*y1*(z1^2*x2 - z2^2*x1)^3 */
  692. felem_mul(tmp, ftmp4, ftmp5);
  693. /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */
  694. /* tmp2 = (z1^3*y2 - z2^3*y1)^2 */
  695. felem_square(tmp2, ftmp3);
  696. /* tmp2[i] < 4 * 2^57 * 2^57 < 2^116 */
  697. /* tmp2 = (z1^3*y2 - z2^3*y1)^2 - (z1^2*x2 - z2^2*x1)^3 */
  698. felem_diff_128_64(tmp2, ftmp5);
  699. /* tmp2[i] < 2^116 + 2^64 + 8 < 2^117 */
  700. /* ftmp5 = 2*z2^2*x1*(z1^2*x2 - z2^2*x1)^2 */
  701. memcpy(ftmp5, ftmp2, 3 * sizeof(fslice));
  702. felem_scalar64(ftmp5, 2);
  703. /* ftmp5[i] < 2 * 2^57 = 2^58 */
  704. /* xout = (z1^3*y2 - z2^3*y1)^2 - (z1^2*x2 - z2^2*x1)^3 -
  705. 2*z2^2*x1*(z1^2*x2 - z2^2*x1)^2 */
  706. felem_diff_128_64(tmp2, ftmp5);
  707. /* tmp2[i] < 2^117 + 2^64 + 8 < 2^118 */
  708. felem_reduce(xout, tmp2);
  709. /* ftmp2 = z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - xout */
  710. felem_diff64(ftmp2, xout);
  711. /* ftmp2[i] < 2^57 + 2^58 + 2 < 2^59 */
  712. /* tmp2 = (z1^3*y2 - z2^3*y1)*(z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - xout) */
  713. felem_mul(tmp2, ftmp3, ftmp2);
  714. /* tmp2[i] < 4 * 2^57 * 2^59 = 2^118 */
  715. /* yout = (z1^3*y2 - z2^3*y1)*(z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - xout) -
  716. z2^3*y1*(z1^2*x2 - z2^2*x1)^3 */
  717. felem_diff128(tmp2, tmp);
  718. /* tmp2[i] < 2^118 + 2^120 < 2^121 */
  719. felem_reduce(yout, tmp2);
  720. /* the result (xout, yout, zout) is incorrect if one of the
  721. * inputs is the point at infinity, so we need to check for this
  722. * separately */
  723. /* if point 1 is at infinity, copy point 2 to output, and vice versa */
  724. copy_conditional(xout, x2, 3, z1_is_zero);
  725. select_conditional(x3, xout, x1, 3, z2_is_zero);
  726. copy_conditional(yout, y2, 3, z1_is_zero);
  727. select_conditional(y3, yout, y1, 3, z2_is_zero);
  728. copy_conditional(zout, z2, 3, z1_is_zero);
  729. select_conditional(z3, zout, z1, 3, z2_is_zero);
  730. }
  731. static void affine(point P)
  732. {
  733. coord z1, z2, xin, yin;
  734. uint128_t tmp[7];
  735. if (felem_is_zero(P[2])) return;
  736. felem_inv(z2, P[2]);
  737. felem_square(tmp, z2); felem_reduce(z1, tmp);
  738. felem_mul(tmp, P[0], z1); felem_reduce(xin, tmp);
  739. felem_contract(P[0], xin);
  740. felem_mul(tmp, z1, z2); felem_reduce(z1, tmp);
  741. felem_mul(tmp, P[1], z1); felem_reduce(yin, tmp);
  742. felem_contract(P[1], yin);
  743. memset(P[2], 0, sizeof(coord));
  744. P[2][0] = 1;
  745. }
  746. static void affine_x(coord out, point P)
  747. {
  748. coord z1, z2, xin;
  749. uint128_t tmp[7];
  750. if (felem_is_zero(P[2])) return;
  751. felem_inv(z2, P[2]);
  752. felem_square(tmp, z2); felem_reduce(z1, tmp);
  753. felem_mul(tmp, P[0], z1); felem_reduce(xin, tmp);
  754. felem_contract(out, xin);
  755. }
  756. /* Multiply the given point by s */
  757. static void point_mul(point out, point in, const felem_bytearray s)
  758. {
  759. int i;
  760. point tmp;
  761. point table[16];
  762. memset(table[0], 0, sizeof(point));
  763. memmove(table[1], in, sizeof(point));
  764. for(i=2; i<16; i+=2) {
  765. point_double(table[i][0], table[i][1], table[i][2],
  766. table[i/2][0], table[i/2][1], table[i/2][2]);
  767. point_add(table[i+1][0], table[i+1][1], table[i+1][2],
  768. table[i][0], table[i][1], table[i][2],
  769. in[0], in[1], in[2]);
  770. }
  771. /*
  772. for(i=0;i<16;++i) {
  773. fprintf(stderr, "table[%d]:\n", i);
  774. affine(table[i]);
  775. dump_point(NULL, table[i]);
  776. }
  777. */
  778. memset(tmp, 0, sizeof(point));
  779. for(i=0;i<21;i++) {
  780. u8 oh = s[20-i] >> 4;
  781. u8 ol = s[20-i] & 0x0f;
  782. point_double(tmp[0], tmp[1], tmp[2], tmp[0], tmp[1], tmp[2]);
  783. point_double(tmp[0], tmp[1], tmp[2], tmp[0], tmp[1], tmp[2]);
  784. point_double(tmp[0], tmp[1], tmp[2], tmp[0], tmp[1], tmp[2]);
  785. point_double(tmp[0], tmp[1], tmp[2], tmp[0], tmp[1], tmp[2]);
  786. point_add(tmp[0], tmp[1], tmp[2], tmp[0], tmp[1], tmp[2],
  787. table[oh][0], table[oh][1], table[oh][2]);
  788. point_double(tmp[0], tmp[1], tmp[2], tmp[0], tmp[1], tmp[2]);
  789. point_double(tmp[0], tmp[1], tmp[2], tmp[0], tmp[1], tmp[2]);
  790. point_double(tmp[0], tmp[1], tmp[2], tmp[0], tmp[1], tmp[2]);
  791. point_double(tmp[0], tmp[1], tmp[2], tmp[0], tmp[1], tmp[2]);
  792. point_add(tmp[0], tmp[1], tmp[2], tmp[0], tmp[1], tmp[2],
  793. table[ol][0], table[ol][1], table[ol][2]);
  794. }
  795. memmove(out, tmp, sizeof(point));
  796. }
  797. #if 0
  798. /* Select a point from an array of 16 precomputed point multiples,
  799. * in constant time: for bits = {b_0, b_1, b_2, b_3}, return the point
  800. * pre_comp[8*b_3 + 4*b_2 + 2*b_1 + b_0] */
  801. static void select_point(const fslice bits[4], const fslice pre_comp[16][3][4],
  802. fslice out[12])
  803. {
  804. fslice tmp[5][12];
  805. select_conditional(tmp[0], pre_comp[7][0], pre_comp[15][0], 12, bits[3]);
  806. select_conditional(tmp[1], pre_comp[3][0], pre_comp[11][0], 12, bits[3]);
  807. select_conditional(tmp[2], tmp[1], tmp[0], 12, bits[2]);
  808. select_conditional(tmp[0], pre_comp[5][0], pre_comp[13][0], 12, bits[3]);
  809. select_conditional(tmp[1], pre_comp[1][0], pre_comp[9][0], 12, bits[3]);
  810. select_conditional(tmp[3], tmp[1], tmp[0], 12, bits[2]);
  811. select_conditional(tmp[4], tmp[3], tmp[2], 12, bits[1]);
  812. select_conditional(tmp[0], pre_comp[6][0], pre_comp[14][0], 12, bits[3]);
  813. select_conditional(tmp[1], pre_comp[2][0], pre_comp[10][0], 12, bits[3]);
  814. select_conditional(tmp[2], tmp[1], tmp[0], 12, bits[2]);
  815. select_conditional(tmp[0], pre_comp[4][0], pre_comp[12][0], 12, bits[3]);
  816. select_conditional(tmp[1], pre_comp[0][0], pre_comp[8][0], 12, bits[3]);
  817. select_conditional(tmp[3], tmp[1], tmp[0], 12, bits[2]);
  818. select_conditional(tmp[1], tmp[3], tmp[2], 12, bits[1]);
  819. select_conditional(out, tmp[1], tmp[4], 12, bits[0]);
  820. }
  821. /* Interleaved point multiplication using precomputed point multiples:
  822. * The small point multiples 0*P, 1*P, ..., 15*P are in pre_comp[],
  823. * the scalars in scalars[]. If g_scalar is non-NULL, we also add this multiple
  824. * of the generator, using certain (large) precomputed multiples in g_pre_comp.
  825. * Output point (X, Y, Z) is stored in x_out, y_out, z_out */
  826. static void batch_mul(fslice x_out[4], fslice y_out[4], fslice z_out[4],
  827. const felem_bytearray scalars[], const unsigned num_points, const u8 *g_scalar,
  828. const fslice pre_comp[][16][3][4], const fslice g_pre_comp[16][3][4])
  829. {
  830. unsigned i, j, num;
  831. unsigned gen_mul = (g_scalar != NULL);
  832. fslice nq[12], nqt[12], tmp[12];
  833. fslice bits[4];
  834. u8 byte;
  835. /* set nq to the point at infinity */
  836. memset(nq, 0, 12 * sizeof(fslice));
  837. /* Loop over all scalars msb-to-lsb, 4 bits at a time: for each nibble,
  838. * double 4 times, then add the precomputed point multiples.
  839. * If we are also adding multiples of the generator, then interleave
  840. * these additions with the last 56 doublings. */
  841. for (i = (num_points ? 28 : 7); i > 0; --i)
  842. {
  843. for (j = 0; j < 8; ++j)
  844. {
  845. /* double once */
  846. point_double(nq, nq+4, nq+8, nq, nq+4, nq+8);
  847. /* add multiples of the generator */
  848. if ((gen_mul) && (i <= 7))
  849. {
  850. bits[3] = (g_scalar[i+20] >> (7-j)) & 1;
  851. bits[2] = (g_scalar[i+13] >> (7-j)) & 1;
  852. bits[1] = (g_scalar[i+6] >> (7-j)) & 1;
  853. bits[0] = (g_scalar[i-1] >> (7-j)) & 1;
  854. /* select the point to add, in constant time */
  855. select_point(bits, g_pre_comp, tmp);
  856. memcpy(nqt, nq, 12 * sizeof(fslice));
  857. point_add(nq, nq+4, nq+8, nqt, nqt+4, nqt+8,
  858. tmp, tmp+4, tmp+8);
  859. }
  860. /* do an addition after every 4 doublings */
  861. if (j % 4 == 3)
  862. {
  863. /* loop over all scalars */
  864. for (num = 0; num < num_points; ++num)
  865. {
  866. byte = scalars[num][i-1];
  867. bits[3] = (byte >> (10-j)) & 1;
  868. bits[2] = (byte >> (9-j)) & 1;
  869. bits[1] = (byte >> (8-j)) & 1;
  870. bits[0] = (byte >> (7-j)) & 1;
  871. /* select the point to add */
  872. select_point(bits,
  873. pre_comp[num], tmp);
  874. memcpy(nqt, nq, 12 * sizeof(fslice));
  875. point_add(nq, nq+4, nq+8, nqt, nqt+4,
  876. nqt+8, tmp, tmp+4, tmp+8);
  877. }
  878. }
  879. }
  880. }
  881. memcpy(x_out, nq, 4 * sizeof(fslice));
  882. memcpy(y_out, nq+4, 4 * sizeof(fslice));
  883. memcpy(z_out, nq+8, 4 * sizeof(fslice));
  884. }
  885. /******************************************************************************/
  886. /* FUNCTIONS TO MANAGE PRECOMPUTATION
  887. */
  888. static NISTP224_PRE_COMP *nistp224_pre_comp_new()
  889. {
  890. NISTP224_PRE_COMP *ret = NULL;
  891. ret = (NISTP224_PRE_COMP *)OPENSSL_malloc(sizeof(NISTP224_PRE_COMP));
  892. if (!ret)
  893. {
  894. ECerr(EC_F_NISTP224_PRE_COMP_NEW, ERR_R_MALLOC_FAILURE);
  895. return ret;
  896. }
  897. memset(ret->g_pre_comp, 0, sizeof(ret->g_pre_comp));
  898. ret->references = 1;
  899. return ret;
  900. }
  901. static void *nistp224_pre_comp_dup(void *src_)
  902. {
  903. NISTP224_PRE_COMP *src = src_;
  904. /* no need to actually copy, these objects never change! */
  905. CRYPTO_add(&src->references, 1, CRYPTO_LOCK_EC_PRE_COMP);
  906. return src_;
  907. }
  908. static void nistp224_pre_comp_free(void *pre_)
  909. {
  910. int i;
  911. NISTP224_PRE_COMP *pre = pre_;
  912. if (!pre)
  913. return;
  914. i = CRYPTO_add(&pre->references, -1, CRYPTO_LOCK_EC_PRE_COMP);
  915. if (i > 0)
  916. return;
  917. OPENSSL_free(pre);
  918. }
  919. static void nistp224_pre_comp_clear_free(void *pre_)
  920. {
  921. int i;
  922. NISTP224_PRE_COMP *pre = pre_;
  923. if (!pre)
  924. return;
  925. i = CRYPTO_add(&pre->references, -1, CRYPTO_LOCK_EC_PRE_COMP);
  926. if (i > 0)
  927. return;
  928. OPENSSL_cleanse(pre, sizeof *pre);
  929. OPENSSL_free(pre);
  930. }
  931. /******************************************************************************/
  932. /* OPENSSL EC_METHOD FUNCTIONS
  933. */
  934. int ec_GFp_nistp224_group_init(EC_GROUP *group)
  935. {
  936. int ret;
  937. ret = ec_GFp_simple_group_init(group);
  938. group->a_is_minus3 = 1;
  939. return ret;
  940. }
  941. int ec_GFp_nistp224_group_set_curve(EC_GROUP *group, const BIGNUM *p,
  942. const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx)
  943. {
  944. int ret = 0;
  945. BN_CTX *new_ctx = NULL;
  946. BIGNUM *curve_p, *curve_a, *curve_b;
  947. if (ctx == NULL)
  948. if ((ctx = new_ctx = BN_CTX_new()) == NULL) return 0;
  949. BN_CTX_start(ctx);
  950. if (((curve_p = BN_CTX_get(ctx)) == NULL) ||
  951. ((curve_a = BN_CTX_get(ctx)) == NULL) ||
  952. ((curve_b = BN_CTX_get(ctx)) == NULL)) goto err;
  953. BN_bin2bn(nistp224_curve_params[0], sizeof(felem_bytearray), curve_p);
  954. BN_bin2bn(nistp224_curve_params[1], sizeof(felem_bytearray), curve_a);
  955. BN_bin2bn(nistp224_curve_params[2], sizeof(felem_bytearray), curve_b);
  956. if ((BN_cmp(curve_p, p)) || (BN_cmp(curve_a, a)) ||
  957. (BN_cmp(curve_b, b)))
  958. {
  959. ECerr(EC_F_EC_GFP_NISTP224_GROUP_SET_CURVE,
  960. EC_R_WRONG_CURVE_PARAMETERS);
  961. goto err;
  962. }
  963. group->field_mod_func = BN_nist_mod_224;
  964. ret = ec_GFp_simple_group_set_curve(group, p, a, b, ctx);
  965. err:
  966. BN_CTX_end(ctx);
  967. if (new_ctx != NULL)
  968. BN_CTX_free(new_ctx);
  969. return ret;
  970. }
  971. /* Takes the Jacobian coordinates (X, Y, Z) of a point and returns
  972. * (X', Y') = (X/Z^2, Y/Z^3) */
  973. int ec_GFp_nistp224_point_get_affine_coordinates(const EC_GROUP *group,
  974. const EC_POINT *point, BIGNUM *x, BIGNUM *y, BN_CTX *ctx)
  975. {
  976. fslice z1[4], z2[4], x_in[4], y_in[4], x_out[4], y_out[4];
  977. uint128_t tmp[7];
  978. if (EC_POINT_is_at_infinity(group, point))
  979. {
  980. ECerr(EC_F_EC_GFP_NISTP224_POINT_GET_AFFINE_COORDINATES,
  981. EC_R_POINT_AT_INFINITY);
  982. return 0;
  983. }
  984. if ((!BN_to_felem(x_in, &point->X)) || (!BN_to_felem(y_in, &point->Y)) ||
  985. (!BN_to_felem(z1, &point->Z))) return 0;
  986. felem_inv(z2, z1);
  987. felem_square(tmp, z2); felem_reduce(z1, tmp);
  988. felem_mul(tmp, x_in, z1); felem_reduce(x_in, tmp);
  989. felem_contract(x_out, x_in);
  990. if (x != NULL)
  991. {
  992. if (!felem_to_BN(x, x_out)) {
  993. ECerr(EC_F_EC_GFP_NISTP224_POINT_GET_AFFINE_COORDINATES,
  994. ERR_R_BN_LIB);
  995. return 0;
  996. }
  997. }
  998. felem_mul(tmp, z1, z2); felem_reduce(z1, tmp);
  999. felem_mul(tmp, y_in, z1); felem_reduce(y_in, tmp);
  1000. felem_contract(y_out, y_in);
  1001. if (y != NULL)
  1002. {
  1003. if (!felem_to_BN(y, y_out)) {
  1004. ECerr(EC_F_EC_GFP_NISTP224_POINT_GET_AFFINE_COORDINATES,
  1005. ERR_R_BN_LIB);
  1006. return 0;
  1007. }
  1008. }
  1009. return 1;
  1010. }
  1011. /* Computes scalar*generator + \sum scalars[i]*points[i], ignoring NULL values
  1012. * Result is stored in r (r can equal one of the inputs). */
  1013. int ec_GFp_nistp224_points_mul(const EC_GROUP *group, EC_POINT *r,
  1014. const BIGNUM *scalar, size_t num, const EC_POINT *points[],
  1015. const BIGNUM *scalars[], BN_CTX *ctx)
  1016. {
  1017. int ret = 0;
  1018. int i, j;
  1019. BN_CTX *new_ctx = NULL;
  1020. BIGNUM *x, *y, *z, *tmp_scalar;
  1021. felem_bytearray g_secret;
  1022. felem_bytearray *secrets = NULL;
  1023. fslice (*pre_comp)[16][3][4] = NULL;
  1024. felem_bytearray tmp;
  1025. unsigned num_bytes;
  1026. int have_pre_comp = 0;
  1027. size_t num_points = num;
  1028. fslice x_in[4], y_in[4], z_in[4], x_out[4], y_out[4], z_out[4];
  1029. NISTP224_PRE_COMP *pre = NULL;
  1030. fslice (*g_pre_comp)[3][4] = NULL;
  1031. EC_POINT *generator = NULL;
  1032. const EC_POINT *p = NULL;
  1033. const BIGNUM *p_scalar = NULL;
  1034. if (ctx == NULL)
  1035. if ((ctx = new_ctx = BN_CTX_new()) == NULL) return 0;
  1036. BN_CTX_start(ctx);
  1037. if (((x = BN_CTX_get(ctx)) == NULL) ||
  1038. ((y = BN_CTX_get(ctx)) == NULL) ||
  1039. ((z = BN_CTX_get(ctx)) == NULL) ||
  1040. ((tmp_scalar = BN_CTX_get(ctx)) == NULL))
  1041. goto err;
  1042. if (scalar != NULL)
  1043. {
  1044. pre = EC_EX_DATA_get_data(group->extra_data,
  1045. nistp224_pre_comp_dup, nistp224_pre_comp_free,
  1046. nistp224_pre_comp_clear_free);
  1047. if (pre)
  1048. /* we have precomputation, try to use it */
  1049. g_pre_comp = pre->g_pre_comp;
  1050. else
  1051. /* try to use the standard precomputation */
  1052. g_pre_comp = (fslice (*)[3][4]) gmul;
  1053. generator = EC_POINT_new(group);
  1054. if (generator == NULL)
  1055. goto err;
  1056. /* get the generator from precomputation */
  1057. if (!felem_to_BN(x, g_pre_comp[1][0]) ||
  1058. !felem_to_BN(y, g_pre_comp[1][1]) ||
  1059. !felem_to_BN(z, g_pre_comp[1][2]))
  1060. {
  1061. ECerr(EC_F_EC_GFP_NISTP224_POINTS_MUL, ERR_R_BN_LIB);
  1062. goto err;
  1063. }
  1064. if (!EC_POINT_set_Jprojective_coordinates_GFp(group,
  1065. generator, x, y, z, ctx))
  1066. goto err;
  1067. if (0 == EC_POINT_cmp(group, generator, group->generator, ctx))
  1068. /* precomputation matches generator */
  1069. have_pre_comp = 1;
  1070. else
  1071. /* we don't have valid precomputation:
  1072. * treat the generator as a random point */
  1073. num_points = num_points + 1;
  1074. }
  1075. secrets = OPENSSL_malloc(num_points * sizeof(felem_bytearray));
  1076. pre_comp = OPENSSL_malloc(num_points * 16 * 3 * 4 * sizeof(fslice));
  1077. if ((num_points) && ((secrets == NULL) || (pre_comp == NULL)))
  1078. {
  1079. ECerr(EC_F_EC_GFP_NISTP224_POINTS_MUL, ERR_R_MALLOC_FAILURE);
  1080. goto err;
  1081. }
  1082. /* we treat NULL scalars as 0, and NULL points as points at infinity,
  1083. * i.e., they contribute nothing to the linear combination */
  1084. memset(secrets, 0, num_points * sizeof(felem_bytearray));
  1085. memset(pre_comp, 0, num_points * 16 * 3 * 4 * sizeof(fslice));
  1086. for (i = 0; i < num_points; ++i)
  1087. {
  1088. if (i == num)
  1089. /* the generator */
  1090. {
  1091. p = EC_GROUP_get0_generator(group);
  1092. p_scalar = scalar;
  1093. }
  1094. else
  1095. /* the i^th point */
  1096. {
  1097. p = points[i];
  1098. p_scalar = scalars[i];
  1099. }
  1100. if ((p_scalar != NULL) && (p != NULL))
  1101. {
  1102. num_bytes = BN_num_bytes(p_scalar);
  1103. /* reduce scalar to 0 <= scalar < 2^224 */
  1104. if ((num_bytes > sizeof(felem_bytearray)) || (BN_is_negative(p_scalar)))
  1105. {
  1106. /* this is an unusual input, and we don't guarantee
  1107. * constant-timeness */
  1108. if (!BN_nnmod(tmp_scalar, p_scalar, &group->order, ctx))
  1109. {
  1110. ECerr(EC_F_EC_GFP_NISTP224_POINTS_MUL, ERR_R_BN_LIB);
  1111. goto err;
  1112. }
  1113. num_bytes = BN_bn2bin(tmp_scalar, tmp);
  1114. }
  1115. else
  1116. BN_bn2bin(p_scalar, tmp);
  1117. flip_endian(secrets[i], tmp, num_bytes);
  1118. /* precompute multiples */
  1119. if ((!BN_to_felem(x_out, &p->X)) ||
  1120. (!BN_to_felem(y_out, &p->Y)) ||
  1121. (!BN_to_felem(z_out, &p->Z))) goto err;
  1122. memcpy(pre_comp[i][1][0], x_out, 4 * sizeof(fslice));
  1123. memcpy(pre_comp[i][1][1], y_out, 4 * sizeof(fslice));
  1124. memcpy(pre_comp[i][1][2], z_out, 4 * sizeof(fslice));
  1125. for (j = 1; j < 8; ++j)
  1126. {
  1127. point_double(pre_comp[i][2*j][0],
  1128. pre_comp[i][2*j][1],
  1129. pre_comp[i][2*j][2],
  1130. pre_comp[i][j][0],
  1131. pre_comp[i][j][1],
  1132. pre_comp[i][j][2]);
  1133. point_add(pre_comp[i][2*j+1][0],
  1134. pre_comp[i][2*j+1][1],
  1135. pre_comp[i][2*j+1][2],
  1136. pre_comp[i][1][0],
  1137. pre_comp[i][1][1],
  1138. pre_comp[i][1][2],
  1139. pre_comp[i][2*j][0],
  1140. pre_comp[i][2*j][1],
  1141. pre_comp[i][2*j][2]);
  1142. }
  1143. }
  1144. }
  1145. /* the scalar for the generator */
  1146. if ((scalar != NULL) && (have_pre_comp))
  1147. {
  1148. memset(g_secret, 0, sizeof g_secret);
  1149. num_bytes = BN_num_bytes(scalar);
  1150. /* reduce scalar to 0 <= scalar < 2^224 */
  1151. if ((num_bytes > sizeof(felem_bytearray)) || (BN_is_negative(scalar)))
  1152. {
  1153. /* this is an unusual input, and we don't guarantee
  1154. * constant-timeness */
  1155. if (!BN_nnmod(tmp_scalar, scalar, &group->order, ctx))
  1156. {
  1157. ECerr(EC_F_EC_GFP_NISTP224_POINTS_MUL, ERR_R_BN_LIB);
  1158. goto err;
  1159. }
  1160. num_bytes = BN_bn2bin(tmp_scalar, tmp);
  1161. }
  1162. else
  1163. BN_bn2bin(scalar, tmp);
  1164. flip_endian(g_secret, tmp, num_bytes);
  1165. /* do the multiplication with generator precomputation*/
  1166. batch_mul(x_out, y_out, z_out,
  1167. (const felem_bytearray (*)) secrets, num_points,
  1168. g_secret, (const fslice (*)[16][3][4]) pre_comp,
  1169. (const fslice (*)[3][4]) g_pre_comp);
  1170. }
  1171. else
  1172. /* do the multiplication without generator precomputation */
  1173. batch_mul(x_out, y_out, z_out,
  1174. (const felem_bytearray (*)) secrets, num_points,
  1175. NULL, (const fslice (*)[16][3][4]) pre_comp, NULL);
  1176. /* reduce the output to its unique minimal representation */
  1177. felem_contract(x_in, x_out);
  1178. felem_contract(y_in, y_out);
  1179. felem_contract(z_in, z_out);
  1180. if ((!felem_to_BN(x, x_in)) || (!felem_to_BN(y, y_in)) ||
  1181. (!felem_to_BN(z, z_in)))
  1182. {
  1183. ECerr(EC_F_EC_GFP_NISTP224_POINTS_MUL, ERR_R_BN_LIB);
  1184. goto err;
  1185. }
  1186. ret = EC_POINT_set_Jprojective_coordinates_GFp(group, r, x, y, z, ctx);
  1187. err:
  1188. BN_CTX_end(ctx);
  1189. if (generator != NULL)
  1190. EC_POINT_free(generator);
  1191. if (new_ctx != NULL)
  1192. BN_CTX_free(new_ctx);
  1193. if (secrets != NULL)
  1194. OPENSSL_free(secrets);
  1195. if (pre_comp != NULL)
  1196. OPENSSL_free(pre_comp);
  1197. return ret;
  1198. }
  1199. int ec_GFp_nistp224_precompute_mult(EC_GROUP *group, BN_CTX *ctx)
  1200. {
  1201. int ret = 0;
  1202. NISTP224_PRE_COMP *pre = NULL;
  1203. int i, j;
  1204. BN_CTX *new_ctx = NULL;
  1205. BIGNUM *x, *y;
  1206. EC_POINT *generator = NULL;
  1207. /* throw away old precomputation */
  1208. EC_EX_DATA_free_data(&group->extra_data, nistp224_pre_comp_dup,
  1209. nistp224_pre_comp_free, nistp224_pre_comp_clear_free);
  1210. if (ctx == NULL)
  1211. if ((ctx = new_ctx = BN_CTX_new()) == NULL) return 0;
  1212. BN_CTX_start(ctx);
  1213. if (((x = BN_CTX_get(ctx)) == NULL) ||
  1214. ((y = BN_CTX_get(ctx)) == NULL))
  1215. goto err;
  1216. /* get the generator */
  1217. if (group->generator == NULL) goto err;
  1218. generator = EC_POINT_new(group);
  1219. if (generator == NULL)
  1220. goto err;
  1221. BN_bin2bn(nistp224_curve_params[3], sizeof (felem_bytearray), x);
  1222. BN_bin2bn(nistp224_curve_params[4], sizeof (felem_bytearray), y);
  1223. if (!EC_POINT_set_affine_coordinates_GFp(group, generator, x, y, ctx))
  1224. goto err;
  1225. if ((pre = nistp224_pre_comp_new()) == NULL)
  1226. goto err;
  1227. /* if the generator is the standard one, use built-in precomputation */
  1228. if (0 == EC_POINT_cmp(group, generator, group->generator, ctx))
  1229. {
  1230. memcpy(pre->g_pre_comp, gmul, sizeof(pre->g_pre_comp));
  1231. ret = 1;
  1232. goto err;
  1233. }
  1234. if ((!BN_to_felem(pre->g_pre_comp[1][0], &group->generator->X)) ||
  1235. (!BN_to_felem(pre->g_pre_comp[1][1], &group->generator->Y)) ||
  1236. (!BN_to_felem(pre->g_pre_comp[1][2], &group->generator->Z)))
  1237. goto err;
  1238. /* compute 2^56*G, 2^112*G, 2^168*G */
  1239. for (i = 1; i < 5; ++i)
  1240. {
  1241. point_double(pre->g_pre_comp[2*i][0], pre->g_pre_comp[2*i][1],
  1242. pre->g_pre_comp[2*i][2], pre->g_pre_comp[i][0],
  1243. pre->g_pre_comp[i][1], pre->g_pre_comp[i][2]);
  1244. for (j = 0; j < 55; ++j)
  1245. {
  1246. point_double(pre->g_pre_comp[2*i][0],
  1247. pre->g_pre_comp[2*i][1],
  1248. pre->g_pre_comp[2*i][2],
  1249. pre->g_pre_comp[2*i][0],
  1250. pre->g_pre_comp[2*i][1],
  1251. pre->g_pre_comp[2*i][2]);
  1252. }
  1253. }
  1254. /* g_pre_comp[0] is the point at infinity */
  1255. memset(pre->g_pre_comp[0], 0, sizeof(pre->g_pre_comp[0]));
  1256. /* the remaining multiples */
  1257. /* 2^56*G + 2^112*G */
  1258. point_add(pre->g_pre_comp[6][0], pre->g_pre_comp[6][1],
  1259. pre->g_pre_comp[6][2], pre->g_pre_comp[4][0],
  1260. pre->g_pre_comp[4][1], pre->g_pre_comp[4][2],
  1261. pre->g_pre_comp[2][0], pre->g_pre_comp[2][1],
  1262. pre->g_pre_comp[2][2]);
  1263. /* 2^56*G + 2^168*G */
  1264. point_add(pre->g_pre_comp[10][0], pre->g_pre_comp[10][1],
  1265. pre->g_pre_comp[10][2], pre->g_pre_comp[8][0],
  1266. pre->g_pre_comp[8][1], pre->g_pre_comp[8][2],
  1267. pre->g_pre_comp[2][0], pre->g_pre_comp[2][1],
  1268. pre->g_pre_comp[2][2]);
  1269. /* 2^112*G + 2^168*G */
  1270. point_add(pre->g_pre_comp[12][0], pre->g_pre_comp[12][1],
  1271. pre->g_pre_comp[12][2], pre->g_pre_comp[8][0],
  1272. pre->g_pre_comp[8][1], pre->g_pre_comp[8][2],
  1273. pre->g_pre_comp[4][0], pre->g_pre_comp[4][1],
  1274. pre->g_pre_comp[4][2]);
  1275. /* 2^56*G + 2^112*G + 2^168*G */
  1276. point_add(pre->g_pre_comp[14][0], pre->g_pre_comp[14][1],
  1277. pre->g_pre_comp[14][2], pre->g_pre_comp[12][0],
  1278. pre->g_pre_comp[12][1], pre->g_pre_comp[12][2],
  1279. pre->g_pre_comp[2][0], pre->g_pre_comp[2][1],
  1280. pre->g_pre_comp[2][2]);
  1281. for (i = 1; i < 8; ++i)
  1282. {
  1283. /* odd multiples: add G */
  1284. point_add(pre->g_pre_comp[2*i+1][0], pre->g_pre_comp[2*i+1][1],
  1285. pre->g_pre_comp[2*i+1][2], pre->g_pre_comp[2*i][0],
  1286. pre->g_pre_comp[2*i][1], pre->g_pre_comp[2*i][2],
  1287. pre->g_pre_comp[1][0], pre->g_pre_comp[1][1],
  1288. pre->g_pre_comp[1][2]);
  1289. }
  1290. if (!EC_EX_DATA_set_data(&group->extra_data, pre, nistp224_pre_comp_dup,
  1291. nistp224_pre_comp_free, nistp224_pre_comp_clear_free))
  1292. goto err;
  1293. ret = 1;
  1294. pre = NULL;
  1295. err:
  1296. BN_CTX_end(ctx);
  1297. if (generator != NULL)
  1298. EC_POINT_free(generator);
  1299. if (new_ctx != NULL)
  1300. BN_CTX_free(new_ctx);
  1301. if (pre)
  1302. nistp224_pre_comp_free(pre);
  1303. return ret;
  1304. }
  1305. int ec_GFp_nistp224_have_precompute_mult(const EC_GROUP *group)
  1306. {
  1307. if (EC_EX_DATA_get_data(group->extra_data, nistp224_pre_comp_dup,
  1308. nistp224_pre_comp_free, nistp224_pre_comp_clear_free)
  1309. != NULL)
  1310. return 1;
  1311. else
  1312. return 0;
  1313. }
  1314. #endif
  1315. #ifdef TESTING
  1316. #include <sys/time.h>
  1317. static u8 ctoh(char c)
  1318. {
  1319. if (c >= '0' && c <= '9') return c-'0';
  1320. if (c >= 'a' && c <= 'f') return c-'a'+10;
  1321. if (c >= 'A' && c <= 'F') return c-'A'+10;
  1322. return 0;
  1323. }
  1324. static void arg_to_bytearray(felem_bytearray ba, const char *arg)
  1325. {
  1326. /* Convert the arg, which is a string like "1a2637c8" to a byte
  1327. * array like 0xc8 0x37 0x26 0x1a. */
  1328. int size = sizeof(felem_bytearray);
  1329. int arglen = strlen(arg);
  1330. int argsize = (arglen+1)/2;
  1331. const char *argp = arg + arglen;
  1332. u8 *bap = ba;
  1333. memset(ba, 0, size);
  1334. if (size < argsize) {
  1335. fprintf(stderr, "Arg too long: %s\n", arg);
  1336. exit(1);
  1337. }
  1338. while (argp > arg+1) {
  1339. argp -= 2;
  1340. *bap = (ctoh(argp[0])<<4)|(ctoh(argp[1]));
  1341. ++bap;
  1342. }
  1343. if (arglen & 1) {
  1344. /* Handle the stray top nybble */
  1345. argp -= 1;
  1346. *bap = ctoh(argp[0]);
  1347. }
  1348. }
  1349. static void arg_to_coord(coord c, const char *arg)
  1350. {
  1351. felem_bytearray ba;
  1352. arg_to_bytearray(ba, arg);
  1353. /* Now convert it to a coord */
  1354. bin21_to_felem(c, ba);
  1355. }
  1356. int main(int argc, char **argv)
  1357. {
  1358. point infinity, P, Q, P2, PQ;
  1359. felem_bytearray s;
  1360. int i;
  1361. struct timeval st, et;
  1362. unsigned long el;
  1363. int niter = 1000;
  1364. memset(infinity, 0, sizeof(infinity));
  1365. memset(P, 0, sizeof(P));
  1366. memset(Q, 0, sizeof(Q));
  1367. if (argc != 6) {
  1368. fprintf(stderr, "Usage: %s Px Py Qx Qy s\n", argv[0]);
  1369. exit(1);
  1370. }
  1371. arg_to_coord(P[0], argv[1]);
  1372. arg_to_coord(P[1], argv[2]);
  1373. P[2][0] = 1;
  1374. dump_point("P", P);
  1375. arg_to_coord(Q[0], argv[3]);
  1376. arg_to_coord(Q[1], argv[4]);
  1377. Q[2][0] = 1;
  1378. dump_point("Q", Q);
  1379. arg_to_bytearray(s, argv[5]);
  1380. point_double(P2[0], P2[1], P2[2], P[0], P[1], P[2]);
  1381. affine(P2);
  1382. point_add(PQ[0], PQ[1], PQ[2], P[0], P[1], P[2], Q[0], Q[1], Q[2]);
  1383. affine(PQ);
  1384. dump_point("P2", P2);
  1385. dump_point("PQ", PQ);
  1386. gettimeofday(&st, NULL);
  1387. for (i=0;i<niter;++i) {
  1388. point_mul(P, P, s);
  1389. affine(P);
  1390. }
  1391. gettimeofday(&et, NULL);
  1392. el = (et.tv_sec-st.tv_sec)*1000000 + (et.tv_usec-st.tv_usec);
  1393. fprintf(stderr, "%lu / %d = %lu us\n", el, niter, el/niter);
  1394. dump_point("Ps", P);
  1395. return 0;
  1396. }
  1397. #endif
  1398. /* Figure out whether there's a point with x-coordinate x on the main
  1399. * curve. If not, then there's one on the twist curve. (There are
  1400. * actually two, which are negatives of each other; that doesn't
  1401. * matter.) Multiply that point by seckey and set out to the
  1402. * x-coordinate of the result. */
  1403. void ptwist_pointmul(byte out[PTWIST_BYTES], const byte x[PTWIST_BYTES],
  1404. const byte seckey[PTWIST_BYTES])
  1405. {
  1406. /* Compute z = x^3 + a*x + b */
  1407. point P, Q;
  1408. coord z, r2, Qx;
  1409. uint128_t tmp[5];
  1410. int ontwist;
  1411. static const coord three = { 3, 0, 0 };
  1412. static const coord b =
  1413. { 0x46d320e01dc7d6, 0x486ebc69bad316, 0x4e355e95cafedd };
  1414. /* Convert the byte array to a coord */
  1415. bin21_to_felem(P[0], x);
  1416. /* Compute z = x^3 - 3*x + b */
  1417. felem_square(tmp, P[0]); felem_reduce(z, tmp);
  1418. felem_diff64(z, three);
  1419. felem_mul(tmp, z, P[0]); felem_reduce(z, tmp);
  1420. felem_sum64(z, b);
  1421. /*
  1422. dump_coord("z", z);
  1423. */
  1424. /* Compute r = P[1] = z ^ ((p+1)/4). This will be a square root of
  1425. * z, if one exists. */
  1426. felem_sqrt(P[1], z);
  1427. /*
  1428. dump_coord("r", P[1]);
  1429. */
  1430. /* Is P[1] a square root of z? */
  1431. felem_square(tmp, P[1]); felem_diff_128_64(tmp, z); felem_reduce(r2, tmp);
  1432. if (felem_is_zero(r2)) {
  1433. /* P(x,r) is on the curve */
  1434. ontwist = 0;
  1435. } else {
  1436. /* (-x, r) is on the twist */
  1437. ontwist = 1;
  1438. felem_neg(P[0], P[0]);
  1439. }
  1440. /*
  1441. fprintf(stderr, "ontwist = %d\n", ontwist);
  1442. */
  1443. memset(P[2], 0, sizeof(coord));
  1444. P[2][0] = 1;
  1445. /* All set. Now do the point multiplication. */
  1446. /*
  1447. dump_point("P", P);
  1448. for(i=0;i<21;++i) {
  1449. fprintf(stderr, "%02x", seckey[20-i]);
  1450. }
  1451. fprintf(stderr, "\n");
  1452. */
  1453. point_mul(Q, P, seckey);
  1454. affine_x(Qx, Q);
  1455. /*
  1456. dump_point("Q", Q);
  1457. */
  1458. /* Get the x-coordinate of the result, and negate it if we're on the
  1459. * twist. */
  1460. if (ontwist) {
  1461. felem_neg(Qx, Qx);
  1462. }
  1463. /* Convert back to bytes */
  1464. felem_to_bin21(out, Qx);
  1465. /*
  1466. fprintf(stderr, "out: ");
  1467. for(i=0;i<21;++i) {
  1468. fprintf(stderr, "%02x", out[i]);
  1469. }
  1470. fprintf(stderr, "\n");
  1471. */
  1472. }