// does NOT ignore top bit internal static void fe_frombytes2(out FieldElement h, byte[] data, int offset) { Int64 h0 = load_4(data, offset); Int64 h1 = load_3(data, offset + 4) << 6; Int64 h2 = load_3(data, offset + 7) << 5; Int64 h3 = load_3(data, offset + 10) << 3; Int64 h4 = load_3(data, offset + 13) << 2; Int64 h5 = load_4(data, offset + 16); Int64 h6 = load_3(data, offset + 20) << 7; Int64 h7 = load_3(data, offset + 23) << 5; Int64 h8 = load_3(data, offset + 26) << 4; Int64 h9 = load_3(data, offset + 29) << 2; Int64 carry0; Int64 carry1; Int64 carry2; Int64 carry3; Int64 carry4; Int64 carry5; Int64 carry6; Int64 carry7; Int64 carry8; Int64 carry9; carry9 = (h9 + (Int64)(1 << 24)) >> 25; h0 += carry9 * 19; h9 -= carry9 << 25; carry1 = (h1 + (Int64)(1 << 24)) >> 25; h2 += carry1; h1 -= carry1 << 25; carry3 = (h3 + (Int64)(1 << 24)) >> 25; h4 += carry3; h3 -= carry3 << 25; carry5 = (h5 + (Int64)(1 << 24)) >> 25; h6 += carry5; h5 -= carry5 << 25; carry7 = (h7 + (Int64)(1 << 24)) >> 25; h8 += carry7; h7 -= carry7 << 25; carry0 = (h0 + (Int64)(1 << 25)) >> 26; h1 += carry0; h0 -= carry0 << 26; carry2 = (h2 + (Int64)(1 << 25)) >> 26; h3 += carry2; h2 -= carry2 << 26; carry4 = (h4 + (Int64)(1 << 25)) >> 26; h5 += carry4; h4 -= carry4 << 26; carry6 = (h6 + (Int64)(1 << 25)) >> 26; h7 += carry6; h6 -= carry6 << 26; carry8 = (h8 + (Int64)(1 << 25)) >> 26; h9 += carry8; h8 -= carry8 << 26; h.x0 = (int)h0; h.x1 = (int)h1; h.x2 = (int)h2; h.x3 = (int)h3; h.x4 = (int)h4; h.x5 = (int)h5; h.x6 = (int)h6; h.x7 = (int)h7; h.x8 = (int)h8; h.x9 = (int)h9; }
/* * h = f - g * Can overlap h with f or g. * * Preconditions: |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. |g| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. * * Postconditions: |h| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. */ internal static void fe_sub(out FieldElement h, ref FieldElement f, ref FieldElement g) { Int32 f0 = f.x0; Int32 f1 = f.x1; Int32 f2 = f.x2; Int32 f3 = f.x3; Int32 f4 = f.x4; Int32 f5 = f.x5; Int32 f6 = f.x6; Int32 f7 = f.x7; Int32 f8 = f.x8; Int32 f9 = f.x9; Int32 g0 = g.x0; Int32 g1 = g.x1; Int32 g2 = g.x2; Int32 g3 = g.x3; Int32 g4 = g.x4; Int32 g5 = g.x5; Int32 g6 = g.x6; Int32 g7 = g.x7; Int32 g8 = g.x8; Int32 g9 = g.x9; Int32 h0 = f0 - g0; Int32 h1 = f1 - g1; Int32 h2 = f2 - g2; Int32 h3 = f3 - g3; Int32 h4 = f4 - g4; Int32 h5 = f5 - g5; Int32 h6 = f6 - g6; Int32 h7 = f7 - g7; Int32 h8 = f8 - g8; Int32 h9 = f9 - g9; h.x0 = h0; h.x1 = h1; h.x2 = h2; h.x3 = h3; h.x4 = h4; h.x5 = h5; h.x6 = h6; h.x7 = h7; h.x8 = h8; h.x9 = h9; }
/* * h = f + g * Can overlap h with f or g. * * Preconditions: |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. |g| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. * * Postconditions: |h| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. */ //void fe_add(fe h,const fe f,const fe g) internal static void fe_add(out FieldElement h, ref FieldElement f, ref FieldElement g) { Int32 f0 = f.x0; Int32 f1 = f.x1; Int32 f2 = f.x2; Int32 f3 = f.x3; Int32 f4 = f.x4; Int32 f5 = f.x5; Int32 f6 = f.x6; Int32 f7 = f.x7; Int32 f8 = f.x8; Int32 f9 = f.x9; Int32 g0 = g.x0; Int32 g1 = g.x1; Int32 g2 = g.x2; Int32 g3 = g.x3; Int32 g4 = g.x4; Int32 g5 = g.x5; Int32 g6 = g.x6; Int32 g7 = g.x7; Int32 g8 = g.x8; Int32 g9 = g.x9; Int32 h0 = f0 + g0; Int32 h1 = f1 + g1; Int32 h2 = f2 + g2; Int32 h3 = f3 + g3; Int32 h4 = f4 + g4; Int32 h5 = f5 + g5; Int32 h6 = f6 + g6; Int32 h7 = f7 + g7; Int32 h8 = f8 + g8; Int32 h9 = f9 + g9; h.x0 = h0; h.x1 = h1; h.x2 = h2; h.x3 = h3; h.x4 = h4; h.x5 = h5; h.x6 = h6; h.x7 = h7; h.x8 = h8; h.x9 = h9; }
/* * return 1 if f == 0 * return 0 if f != 0 * * Preconditions: |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. */ // Todo: Discuss this with upstream // Above comment is from the original code. But I believe the original code returned // 0 if f == 0 // -1 if f != 0 // This code actually returns 0 if f==0 and 1 if f != 0 internal static int fe_isnonzero(ref FieldElement f) { FieldElement fr; fe_reduce(out fr, ref f); int differentBits = 0; differentBits |= fr.x0; differentBits |= fr.x1; differentBits |= fr.x2; differentBits |= fr.x3; differentBits |= fr.x4; differentBits |= fr.x5; differentBits |= fr.x6; differentBits |= fr.x7; differentBits |= fr.x8; differentBits |= fr.x9; return((int)((unchecked ((uint)differentBits - 1) >> 31) ^ 1)); }
/* * h = f * g * Can overlap h with f or g. * * Preconditions: |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. |g| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. * * Postconditions: |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. */ /* * Notes on implementation strategy: * * Using schoolbook multiplication. * Karatsuba would save a little in some cost models. * * Most multiplications by 2 and 19 are 32-bit precomputations; * cheaper than 64-bit postcomputations. * * There is one remaining multiplication by 19 in the carry chain; * one *19 precomputation can be merged into this, * but the resulting data flow is considerably less clean. * * There are 12 carries below. * 10 of them are 2-way parallelizable and vectorizable. * Can get away with 11 carries, but then data flow is much deeper. * * With tighter constraints on inputs can squeeze carries into int32. */ internal static void fe_mul(out FieldElement h, ref FieldElement f, ref FieldElement g) { Int32 f0 = f.x0; Int32 f1 = f.x1; Int32 f2 = f.x2; Int32 f3 = f.x3; Int32 f4 = f.x4; Int32 f5 = f.x5; Int32 f6 = f.x6; Int32 f7 = f.x7; Int32 f8 = f.x8; Int32 f9 = f.x9; Int32 g0 = g.x0; Int32 g1 = g.x1; Int32 g2 = g.x2; Int32 g3 = g.x3; Int32 g4 = g.x4; Int32 g5 = g.x5; Int32 g6 = g.x6; Int32 g7 = g.x7; Int32 g8 = g.x8; Int32 g9 = g.x9; Int32 g1_19 = 19 * g1; /* 1.959375*2^29 */ Int32 g2_19 = 19 * g2; /* 1.959375*2^30; still ok */ Int32 g3_19 = 19 * g3; Int32 g4_19 = 19 * g4; Int32 g5_19 = 19 * g5; Int32 g6_19 = 19 * g6; Int32 g7_19 = 19 * g7; Int32 g8_19 = 19 * g8; Int32 g9_19 = 19 * g9; Int32 f1_2 = 2 * f1; Int32 f3_2 = 2 * f3; Int32 f5_2 = 2 * f5; Int32 f7_2 = 2 * f7; Int32 f9_2 = 2 * f9; Int64 f0g0 = f0 * (Int64)g0; Int64 f0g1 = f0 * (Int64)g1; Int64 f0g2 = f0 * (Int64)g2; Int64 f0g3 = f0 * (Int64)g3; Int64 f0g4 = f0 * (Int64)g4; Int64 f0g5 = f0 * (Int64)g5; Int64 f0g6 = f0 * (Int64)g6; Int64 f0g7 = f0 * (Int64)g7; Int64 f0g8 = f0 * (Int64)g8; Int64 f0g9 = f0 * (Int64)g9; Int64 f1g0 = f1 * (Int64)g0; Int64 f1g1_2 = f1_2 * (Int64)g1; Int64 f1g2 = f1 * (Int64)g2; Int64 f1g3_2 = f1_2 * (Int64)g3; Int64 f1g4 = f1 * (Int64)g4; Int64 f1g5_2 = f1_2 * (Int64)g5; Int64 f1g6 = f1 * (Int64)g6; Int64 f1g7_2 = f1_2 * (Int64)g7; Int64 f1g8 = f1 * (Int64)g8; Int64 f1g9_38 = f1_2 * (Int64)g9_19; Int64 f2g0 = f2 * (Int64)g0; Int64 f2g1 = f2 * (Int64)g1; Int64 f2g2 = f2 * (Int64)g2; Int64 f2g3 = f2 * (Int64)g3; Int64 f2g4 = f2 * (Int64)g4; Int64 f2g5 = f2 * (Int64)g5; Int64 f2g6 = f2 * (Int64)g6; Int64 f2g7 = f2 * (Int64)g7; Int64 f2g8_19 = f2 * (Int64)g8_19; Int64 f2g9_19 = f2 * (Int64)g9_19; Int64 f3g0 = f3 * (Int64)g0; Int64 f3g1_2 = f3_2 * (Int64)g1; Int64 f3g2 = f3 * (Int64)g2; Int64 f3g3_2 = f3_2 * (Int64)g3; Int64 f3g4 = f3 * (Int64)g4; Int64 f3g5_2 = f3_2 * (Int64)g5; Int64 f3g6 = f3 * (Int64)g6; Int64 f3g7_38 = f3_2 * (Int64)g7_19; Int64 f3g8_19 = f3 * (Int64)g8_19; Int64 f3g9_38 = f3_2 * (Int64)g9_19; Int64 f4g0 = f4 * (Int64)g0; Int64 f4g1 = f4 * (Int64)g1; Int64 f4g2 = f4 * (Int64)g2; Int64 f4g3 = f4 * (Int64)g3; Int64 f4g4 = f4 * (Int64)g4; Int64 f4g5 = f4 * (Int64)g5; Int64 f4g6_19 = f4 * (Int64)g6_19; Int64 f4g7_19 = f4 * (Int64)g7_19; Int64 f4g8_19 = f4 * (Int64)g8_19; Int64 f4g9_19 = f4 * (Int64)g9_19; Int64 f5g0 = f5 * (Int64)g0; Int64 f5g1_2 = f5_2 * (Int64)g1; Int64 f5g2 = f5 * (Int64)g2; Int64 f5g3_2 = f5_2 * (Int64)g3; Int64 f5g4 = f5 * (Int64)g4; Int64 f5g5_38 = f5_2 * (Int64)g5_19; Int64 f5g6_19 = f5 * (Int64)g6_19; Int64 f5g7_38 = f5_2 * (Int64)g7_19; Int64 f5g8_19 = f5 * (Int64)g8_19; Int64 f5g9_38 = f5_2 * (Int64)g9_19; Int64 f6g0 = f6 * (Int64)g0; Int64 f6g1 = f6 * (Int64)g1; Int64 f6g2 = f6 * (Int64)g2; Int64 f6g3 = f6 * (Int64)g3; Int64 f6g4_19 = f6 * (Int64)g4_19; Int64 f6g5_19 = f6 * (Int64)g5_19; Int64 f6g6_19 = f6 * (Int64)g6_19; Int64 f6g7_19 = f6 * (Int64)g7_19; Int64 f6g8_19 = f6 * (Int64)g8_19; Int64 f6g9_19 = f6 * (Int64)g9_19; Int64 f7g0 = f7 * (Int64)g0; Int64 f7g1_2 = f7_2 * (Int64)g1; Int64 f7g2 = f7 * (Int64)g2; Int64 f7g3_38 = f7_2 * (Int64)g3_19; Int64 f7g4_19 = f7 * (Int64)g4_19; Int64 f7g5_38 = f7_2 * (Int64)g5_19; Int64 f7g6_19 = f7 * (Int64)g6_19; Int64 f7g7_38 = f7_2 * (Int64)g7_19; Int64 f7g8_19 = f7 * (Int64)g8_19; Int64 f7g9_38 = f7_2 * (Int64)g9_19; Int64 f8g0 = f8 * (Int64)g0; Int64 f8g1 = f8 * (Int64)g1; Int64 f8g2_19 = f8 * (Int64)g2_19; Int64 f8g3_19 = f8 * (Int64)g3_19; Int64 f8g4_19 = f8 * (Int64)g4_19; Int64 f8g5_19 = f8 * (Int64)g5_19; Int64 f8g6_19 = f8 * (Int64)g6_19; Int64 f8g7_19 = f8 * (Int64)g7_19; Int64 f8g8_19 = f8 * (Int64)g8_19; Int64 f8g9_19 = f8 * (Int64)g9_19; Int64 f9g0 = f9 * (Int64)g0; Int64 f9g1_38 = f9_2 * (Int64)g1_19; Int64 f9g2_19 = f9 * (Int64)g2_19; Int64 f9g3_38 = f9_2 * (Int64)g3_19; Int64 f9g4_19 = f9 * (Int64)g4_19; Int64 f9g5_38 = f9_2 * (Int64)g5_19; Int64 f9g6_19 = f9 * (Int64)g6_19; Int64 f9g7_38 = f9_2 * (Int64)g7_19; Int64 f9g8_19 = f9 * (Int64)g8_19; Int64 f9g9_38 = f9_2 * (Int64)g9_19; Int64 h0 = f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38; Int64 h1 = f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19; Int64 h2 = f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38; Int64 h3 = f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19; Int64 h4 = f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38; Int64 h5 = f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19; Int64 h6 = f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38; Int64 h7 = f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19; Int64 h8 = f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38; Int64 h9 = f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0; Int64 carry0; Int64 carry1; Int64 carry2; Int64 carry3; Int64 carry4; Int64 carry5; Int64 carry6; Int64 carry7; Int64 carry8; Int64 carry9; /* |h0| <= (1.65*1.65*2^52*(1+19+19+19+19)+1.65*1.65*2^50*(38+38+38+38+38)) * i.e. |h0| <= 1.4*2^60; narrower ranges for h2, h4, h6, h8 |h1| <= (1.65*1.65*2^51*(1+1+19+19+19+19+19+19+19+19)) * i.e. |h1| <= 1.7*2^59; narrower ranges for h3, h5, h7, h9 */ carry0 = (h0 + (Int64)(1 << 25)) >> 26; h1 += carry0; h0 -= carry0 << 26; carry4 = (h4 + (Int64)(1 << 25)) >> 26; h5 += carry4; h4 -= carry4 << 26; /* |h0| <= 2^25 */ /* |h4| <= 2^25 */ /* |h1| <= 1.71*2^59 */ /* |h5| <= 1.71*2^59 */ carry1 = (h1 + (Int64)(1 << 24)) >> 25; h2 += carry1; h1 -= carry1 << 25; carry5 = (h5 + (Int64)(1 << 24)) >> 25; h6 += carry5; h5 -= carry5 << 25; /* |h1| <= 2^24; from now on fits into int32 */ /* |h5| <= 2^24; from now on fits into int32 */ /* |h2| <= 1.41*2^60 */ /* |h6| <= 1.41*2^60 */ carry2 = (h2 + (Int64)(1 << 25)) >> 26; h3 += carry2; h2 -= carry2 << 26; carry6 = (h6 + (Int64)(1 << 25)) >> 26; h7 += carry6; h6 -= carry6 << 26; /* |h2| <= 2^25; from now on fits into int32 unchanged */ /* |h6| <= 2^25; from now on fits into int32 unchanged */ /* |h3| <= 1.71*2^59 */ /* |h7| <= 1.71*2^59 */ carry3 = (h3 + (Int64)(1 << 24)) >> 25; h4 += carry3; h3 -= carry3 << 25; carry7 = (h7 + (Int64)(1 << 24)) >> 25; h8 += carry7; h7 -= carry7 << 25; /* |h3| <= 2^24; from now on fits into int32 unchanged */ /* |h7| <= 2^24; from now on fits into int32 unchanged */ /* |h4| <= 1.72*2^34 */ /* |h8| <= 1.41*2^60 */ carry4 = (h4 + (Int64)(1 << 25)) >> 26; h5 += carry4; h4 -= carry4 << 26; carry8 = (h8 + (Int64)(1 << 25)) >> 26; h9 += carry8; h8 -= carry8 << 26; /* |h4| <= 2^25; from now on fits into int32 unchanged */ /* |h8| <= 2^25; from now on fits into int32 unchanged */ /* |h5| <= 1.01*2^24 */ /* |h9| <= 1.71*2^59 */ carry9 = (h9 + (Int64)(1 << 24)) >> 25; h0 += carry9 * 19; h9 -= carry9 << 25; /* |h9| <= 2^24; from now on fits into int32 unchanged */ /* |h0| <= 1.1*2^39 */ carry0 = (h0 + (Int64)(1 << 25)) >> 26; h1 += carry0; h0 -= carry0 << 26; /* |h0| <= 2^25; from now on fits into int32 unchanged */ /* |h1| <= 1.01*2^24 */ h.x0 = (Int32)h0; h.x1 = (Int32)h1; h.x2 = (Int32)h2; h.x3 = (Int32)h3; h.x4 = (Int32)h4; h.x5 = (Int32)h5; h.x6 = (Int32)h6; h.x7 = (Int32)h7; h.x8 = (Int32)h8; h.x9 = (Int32)h9; }
public static void fe_1(out FieldElement h) { h = default(FieldElement); h.x0 = 1; }
/* * h = f * f * Can overlap h with f. * * Preconditions: |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. * * Postconditions: |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. */ /* * See fe_mul.c for discussion of implementation strategy. */ internal static void fe_sq(out FieldElement h, ref FieldElement f) { Int32 f0 = f.x0; Int32 f1 = f.x1; Int32 f2 = f.x2; Int32 f3 = f.x3; Int32 f4 = f.x4; Int32 f5 = f.x5; Int32 f6 = f.x6; Int32 f7 = f.x7; Int32 f8 = f.x8; Int32 f9 = f.x9; Int32 f0_2 = 2 * f0; Int32 f1_2 = 2 * f1; Int32 f2_2 = 2 * f2; Int32 f3_2 = 2 * f3; Int32 f4_2 = 2 * f4; Int32 f5_2 = 2 * f5; Int32 f6_2 = 2 * f6; Int32 f7_2 = 2 * f7; Int32 f5_38 = 38 * f5; /* 1.959375*2^30 */ Int32 f6_19 = 19 * f6; /* 1.959375*2^30 */ Int32 f7_38 = 38 * f7; /* 1.959375*2^30 */ Int32 f8_19 = 19 * f8; /* 1.959375*2^30 */ Int32 f9_38 = 38 * f9; /* 1.959375*2^30 */ Int64 f0f0 = f0 * (Int64)f0; Int64 f0f1_2 = f0_2 * (Int64)f1; Int64 f0f2_2 = f0_2 * (Int64)f2; Int64 f0f3_2 = f0_2 * (Int64)f3; Int64 f0f4_2 = f0_2 * (Int64)f4; Int64 f0f5_2 = f0_2 * (Int64)f5; Int64 f0f6_2 = f0_2 * (Int64)f6; Int64 f0f7_2 = f0_2 * (Int64)f7; Int64 f0f8_2 = f0_2 * (Int64)f8; Int64 f0f9_2 = f0_2 * (Int64)f9; Int64 f1f1_2 = f1_2 * (Int64)f1; Int64 f1f2_2 = f1_2 * (Int64)f2; Int64 f1f3_4 = f1_2 * (Int64)f3_2; Int64 f1f4_2 = f1_2 * (Int64)f4; Int64 f1f5_4 = f1_2 * (Int64)f5_2; Int64 f1f6_2 = f1_2 * (Int64)f6; Int64 f1f7_4 = f1_2 * (Int64)f7_2; Int64 f1f8_2 = f1_2 * (Int64)f8; Int64 f1f9_76 = f1_2 * (Int64)f9_38; Int64 f2f2 = f2 * (Int64)f2; Int64 f2f3_2 = f2_2 * (Int64)f3; Int64 f2f4_2 = f2_2 * (Int64)f4; Int64 f2f5_2 = f2_2 * (Int64)f5; Int64 f2f6_2 = f2_2 * (Int64)f6; Int64 f2f7_2 = f2_2 * (Int64)f7; Int64 f2f8_38 = f2_2 * (Int64)f8_19; Int64 f2f9_38 = f2 * (Int64)f9_38; Int64 f3f3_2 = f3_2 * (Int64)f3; Int64 f3f4_2 = f3_2 * (Int64)f4; Int64 f3f5_4 = f3_2 * (Int64)f5_2; Int64 f3f6_2 = f3_2 * (Int64)f6; Int64 f3f7_76 = f3_2 * (Int64)f7_38; Int64 f3f8_38 = f3_2 * (Int64)f8_19; Int64 f3f9_76 = f3_2 * (Int64)f9_38; Int64 f4f4 = f4 * (Int64)f4; Int64 f4f5_2 = f4_2 * (Int64)f5; Int64 f4f6_38 = f4_2 * (Int64)f6_19; Int64 f4f7_38 = f4 * (Int64)f7_38; Int64 f4f8_38 = f4_2 * (Int64)f8_19; Int64 f4f9_38 = f4 * (Int64)f9_38; Int64 f5f5_38 = f5 * (Int64)f5_38; Int64 f5f6_38 = f5_2 * (Int64)f6_19; Int64 f5f7_76 = f5_2 * (Int64)f7_38; Int64 f5f8_38 = f5_2 * (Int64)f8_19; Int64 f5f9_76 = f5_2 * (Int64)f9_38; Int64 f6f6_19 = f6 * (Int64)f6_19; Int64 f6f7_38 = f6 * (Int64)f7_38; Int64 f6f8_38 = f6_2 * (Int64)f8_19; Int64 f6f9_38 = f6 * (Int64)f9_38; Int64 f7f7_38 = f7 * (Int64)f7_38; Int64 f7f8_38 = f7_2 * (Int64)f8_19; Int64 f7f9_76 = f7_2 * (Int64)f9_38; Int64 f8f8_19 = f8 * (Int64)f8_19; Int64 f8f9_38 = f8 * (Int64)f9_38; Int64 f9f9_38 = f9 * (Int64)f9_38; Int64 h0 = f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38; Int64 h1 = f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38; Int64 h2 = f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19; Int64 h3 = f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38; Int64 h4 = f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38; Int64 h5 = f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38; Int64 h6 = f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19; Int64 h7 = f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38; Int64 h8 = f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38; Int64 h9 = f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2; Int64 carry0; Int64 carry1; Int64 carry2; Int64 carry3; Int64 carry4; Int64 carry5; Int64 carry6; Int64 carry7; Int64 carry8; Int64 carry9; carry0 = (h0 + (Int64)(1 << 25)) >> 26; h1 += carry0; h0 -= carry0 << 26; carry4 = (h4 + (Int64)(1 << 25)) >> 26; h5 += carry4; h4 -= carry4 << 26; carry1 = (h1 + (Int64)(1 << 24)) >> 25; h2 += carry1; h1 -= carry1 << 25; carry5 = (h5 + (Int64)(1 << 24)) >> 25; h6 += carry5; h5 -= carry5 << 25; carry2 = (h2 + (Int64)(1 << 25)) >> 26; h3 += carry2; h2 -= carry2 << 26; carry6 = (h6 + (Int64)(1 << 25)) >> 26; h7 += carry6; h6 -= carry6 << 26; carry3 = (h3 + (Int64)(1 << 24)) >> 25; h4 += carry3; h3 -= carry3 << 25; carry7 = (h7 + (Int64)(1 << 24)) >> 25; h8 += carry7; h7 -= carry7 << 25; carry4 = (h4 + (Int64)(1 << 25)) >> 26; h5 += carry4; h4 -= carry4 << 26; carry8 = (h8 + (Int64)(1 << 25)) >> 26; h9 += carry8; h8 -= carry8 << 26; carry9 = (h9 + (Int64)(1 << 24)) >> 25; h0 += carry9 * 19; h9 -= carry9 << 25; carry0 = (h0 + (Int64)(1 << 25)) >> 26; h1 += carry0; h0 -= carry0 << 26; h.x0 = (Int32)h0; h.x1 = (Int32)h1; h.x2 = (Int32)h2; h.x3 = (Int32)h3; h.x4 = (Int32)h4; h.x5 = (Int32)h5; h.x6 = (Int32)h6; h.x7 = (Int32)h7; h.x8 = (Int32)h8; h.x9 = (Int32)h9; }
/* * Replace (f,g) with (g,f) if b == 1; * replace (f,g) with (f,g) if b == 0. * * Preconditions: b in {0,1}. */ public static void fe_cswap(ref FieldElement f, ref FieldElement g, uint b) { Int32 f0 = f.x0; Int32 f1 = f.x1; Int32 f2 = f.x2; Int32 f3 = f.x3; Int32 f4 = f.x4; Int32 f5 = f.x5; Int32 f6 = f.x6; Int32 f7 = f.x7; Int32 f8 = f.x8; Int32 f9 = f.x9; Int32 g0 = g.x0; Int32 g1 = g.x1; Int32 g2 = g.x2; Int32 g3 = g.x3; Int32 g4 = g.x4; Int32 g5 = g.x5; Int32 g6 = g.x6; Int32 g7 = g.x7; Int32 g8 = g.x8; Int32 g9 = g.x9; Int32 x0 = f0 ^ g0; Int32 x1 = f1 ^ g1; Int32 x2 = f2 ^ g2; Int32 x3 = f3 ^ g3; Int32 x4 = f4 ^ g4; Int32 x5 = f5 ^ g5; Int32 x6 = f6 ^ g6; Int32 x7 = f7 ^ g7; Int32 x8 = f8 ^ g8; Int32 x9 = f9 ^ g9; int negb = unchecked ((int)-b); x0 &= negb; x1 &= negb; x2 &= negb; x3 &= negb; x4 &= negb; x5 &= negb; x6 &= negb; x7 &= negb; x8 &= negb; x9 &= negb; f.x0 = f0 ^ x0; f.x1 = f1 ^ x1; f.x2 = f2 ^ x2; f.x3 = f3 ^ x3; f.x4 = f4 ^ x4; f.x5 = f5 ^ x5; f.x6 = f6 ^ x6; f.x7 = f7 ^ x7; f.x8 = f8 ^ x8; f.x9 = f9 ^ x9; g.x0 = g0 ^ x0; g.x1 = g1 ^ x1; g.x2 = g2 ^ x2; g.x3 = g3 ^ x3; g.x4 = g4 ^ x4; g.x5 = g5 ^ x5; g.x6 = g6 ^ x6; g.x7 = g7 ^ x7; g.x8 = g8 ^ x8; g.x9 = g9 ^ x9; }
internal static void fe_reduce(out FieldElement hr, ref FieldElement h) { Int32 h0 = h.x0; Int32 h1 = h.x1; Int32 h2 = h.x2; Int32 h3 = h.x3; Int32 h4 = h.x4; Int32 h5 = h.x5; Int32 h6 = h.x6; Int32 h7 = h.x7; Int32 h8 = h.x8; Int32 h9 = h.x9; Int32 q; Int32 carry0; Int32 carry1; Int32 carry2; Int32 carry3; Int32 carry4; Int32 carry5; Int32 carry6; Int32 carry7; Int32 carry8; Int32 carry9; q = (19 * h9 + (((Int32)1) << 24)) >> 25; q = (h0 + q) >> 26; q = (h1 + q) >> 25; q = (h2 + q) >> 26; q = (h3 + q) >> 25; q = (h4 + q) >> 26; q = (h5 + q) >> 25; q = (h6 + q) >> 26; q = (h7 + q) >> 25; q = (h8 + q) >> 26; q = (h9 + q) >> 25; /* Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. */ h0 += 19 * q; /* Goal: Output h-2^255 q, which is between 0 and 2^255-20. */ carry0 = h0 >> 26; h1 += carry0; h0 -= carry0 << 26; carry1 = h1 >> 25; h2 += carry1; h1 -= carry1 << 25; carry2 = h2 >> 26; h3 += carry2; h2 -= carry2 << 26; carry3 = h3 >> 25; h4 += carry3; h3 -= carry3 << 25; carry4 = h4 >> 26; h5 += carry4; h4 -= carry4 << 26; carry5 = h5 >> 25; h6 += carry5; h5 -= carry5 << 25; carry6 = h6 >> 26; h7 += carry6; h6 -= carry6 << 26; carry7 = h7 >> 25; h8 += carry7; h7 -= carry7 << 25; carry8 = h8 >> 26; h9 += carry8; h8 -= carry8 << 26; carry9 = h9 >> 25; h9 -= carry9 << 25; /* h10 = carry9 */ hr.x0 = h0; hr.x1 = h1; hr.x2 = h2; hr.x3 = h3; hr.x4 = h4; hr.x5 = h5; hr.x6 = h6; hr.x7 = h7; hr.x8 = h8; hr.x9 = h9; }
public static void fe_0(out FieldElement h) { h = default(FieldElement); }
internal static void fe_pow22523(out FieldElement result, ref FieldElement z) { FieldElement t0; FieldElement t1; FieldElement t2; int i; /* qhasm: fe z1 */ /* qhasm: fe z2 */ /* qhasm: fe z8 */ /* qhasm: fe z9 */ /* qhasm: fe z11 */ /* qhasm: fe z22 */ /* qhasm: fe z_5_0 */ /* qhasm: fe z_10_5 */ /* qhasm: fe z_10_0 */ /* qhasm: fe z_20_10 */ /* qhasm: fe z_20_0 */ /* qhasm: fe z_40_20 */ /* qhasm: fe z_40_0 */ /* qhasm: fe z_50_10 */ /* qhasm: fe z_50_0 */ /* qhasm: fe z_100_50 */ /* qhasm: fe z_100_0 */ /* qhasm: fe z_200_100 */ /* qhasm: fe z_200_0 */ /* qhasm: fe z_250_50 */ /* qhasm: fe z_250_0 */ /* qhasm: fe z_252_2 */ /* qhasm: fe z_252_3 */ /* qhasm: enter pow22523 */ /* qhasm: z2 = z1^2^1 */ /* asm 1: fe_sq(>z2=fe#1,<z1=fe#11); for (i = 1;i < 1;++i) fe_sq(>z2=fe#1,>z2=fe#1); */ /* asm 2: fe_sq(>z2=t0,<z1=z); for (i = 1;i < 1;++i) fe_sq(>z2=t0,>z2=t0); */ fe_sq(out t0, ref z); //for (i = 1; i < 1; ++i) fe_sq(out t0, ref t0); /* qhasm: z8 = z2^2^2 */ /* asm 1: fe_sq(>z8=fe#2,<z2=fe#1); for (i = 1;i < 2;++i) fe_sq(>z8=fe#2,>z8=fe#2); */ /* asm 2: fe_sq(>z8=t1,<z2=t0); for (i = 1;i < 2;++i) fe_sq(>z8=t1,>z8=t1); */ fe_sq(out t1, ref t0); for (i = 1; i < 2; ++i) { fe_sq(out t1, ref t1); } /* qhasm: z9 = z1*z8 */ /* asm 1: fe_mul(>z9=fe#2,<z1=fe#11,<z8=fe#2); */ /* asm 2: fe_mul(>z9=t1,<z1=z,<z8=t1); */ fe_mul(out t1, ref z, ref t1); /* qhasm: z11 = z2*z9 */ /* asm 1: fe_mul(>z11=fe#1,<z2=fe#1,<z9=fe#2); */ /* asm 2: fe_mul(>z11=t0,<z2=t0,<z9=t1); */ fe_mul(out t0, ref t0, ref t1); /* qhasm: z22 = z11^2^1 */ /* asm 1: fe_sq(>z22=fe#1,<z11=fe#1); for (i = 1;i < 1;++i) fe_sq(>z22=fe#1,>z22=fe#1); */ /* asm 2: fe_sq(>z22=t0,<z11=t0); for (i = 1;i < 1;++i) fe_sq(>z22=t0,>z22=t0); */ fe_sq(out t0, ref t0); //for (i = 1; i < 1; ++i) fe_sq(out t0, ref t0); /* qhasm: z_5_0 = z9*z22 */ /* asm 1: fe_mul(>z_5_0=fe#1,<z9=fe#2,<z22=fe#1); */ /* asm 2: fe_mul(>z_5_0=t0,<z9=t1,<z22=t0); */ fe_mul(out t0, ref t1, ref t0); /* qhasm: z_10_5 = z_5_0^2^5 */ /* asm 1: fe_sq(>z_10_5=fe#2,<z_5_0=fe#1); for (i = 1;i < 5;++i) fe_sq(>z_10_5=fe#2,>z_10_5=fe#2); */ /* asm 2: fe_sq(>z_10_5=t1,<z_5_0=t0); for (i = 1;i < 5;++i) fe_sq(>z_10_5=t1,>z_10_5=t1); */ fe_sq(out t1, ref t0); for (i = 1; i < 5; ++i) { fe_sq(out t1, ref t1); } /* qhasm: z_10_0 = z_10_5*z_5_0 */ /* asm 1: fe_mul(>z_10_0=fe#1,<z_10_5=fe#2,<z_5_0=fe#1); */ /* asm 2: fe_mul(>z_10_0=t0,<z_10_5=t1,<z_5_0=t0); */ fe_mul(out t0, ref t1, ref t0); /* qhasm: z_20_10 = z_10_0^2^10 */ /* asm 1: fe_sq(>z_20_10=fe#2,<z_10_0=fe#1); for (i = 1;i < 10;++i) fe_sq(>z_20_10=fe#2,>z_20_10=fe#2); */ /* asm 2: fe_sq(>z_20_10=t1,<z_10_0=t0); for (i = 1;i < 10;++i) fe_sq(>z_20_10=t1,>z_20_10=t1); */ fe_sq(out t1, ref t0); for (i = 1; i < 10; ++i) { fe_sq(out t1, ref t1); } /* qhasm: z_20_0 = z_20_10*z_10_0 */ /* asm 1: fe_mul(>z_20_0=fe#2,<z_20_10=fe#2,<z_10_0=fe#1); */ /* asm 2: fe_mul(>z_20_0=t1,<z_20_10=t1,<z_10_0=t0); */ fe_mul(out t1, ref t1, ref t0); /* qhasm: z_40_20 = z_20_0^2^20 */ /* asm 1: fe_sq(>z_40_20=fe#3,<z_20_0=fe#2); for (i = 1;i < 20;++i) fe_sq(>z_40_20=fe#3,>z_40_20=fe#3); */ /* asm 2: fe_sq(>z_40_20=t2,<z_20_0=t1); for (i = 1;i < 20;++i) fe_sq(>z_40_20=t2,>z_40_20=t2); */ fe_sq(out t2, ref t1); for (i = 1; i < 20; ++i) { fe_sq(out t2, ref t2); } /* qhasm: z_40_0 = z_40_20*z_20_0 */ /* asm 1: fe_mul(>z_40_0=fe#2,<z_40_20=fe#3,<z_20_0=fe#2); */ /* asm 2: fe_mul(>z_40_0=t1,<z_40_20=t2,<z_20_0=t1); */ fe_mul(out t1, ref t2, ref t1); /* qhasm: z_50_10 = z_40_0^2^10 */ /* asm 1: fe_sq(>z_50_10=fe#2,<z_40_0=fe#2); for (i = 1;i < 10;++i) fe_sq(>z_50_10=fe#2,>z_50_10=fe#2); */ /* asm 2: fe_sq(>z_50_10=t1,<z_40_0=t1); for (i = 1;i < 10;++i) fe_sq(>z_50_10=t1,>z_50_10=t1); */ fe_sq(out t1, ref t1); for (i = 1; i < 10; ++i) { fe_sq(out t1, ref t1); } /* qhasm: z_50_0 = z_50_10*z_10_0 */ /* asm 1: fe_mul(>z_50_0=fe#1,<z_50_10=fe#2,<z_10_0=fe#1); */ /* asm 2: fe_mul(>z_50_0=t0,<z_50_10=t1,<z_10_0=t0); */ fe_mul(out t0, ref t1, ref t0); /* qhasm: z_100_50 = z_50_0^2^50 */ /* asm 1: fe_sq(>z_100_50=fe#2,<z_50_0=fe#1); for (i = 1;i < 50;++i) fe_sq(>z_100_50=fe#2,>z_100_50=fe#2); */ /* asm 2: fe_sq(>z_100_50=t1,<z_50_0=t0); for (i = 1;i < 50;++i) fe_sq(>z_100_50=t1,>z_100_50=t1); */ fe_sq(out t1, ref t0); for (i = 1; i < 50; ++i) { fe_sq(out t1, ref t1); } /* qhasm: z_100_0 = z_100_50*z_50_0 */ /* asm 1: fe_mul(>z_100_0=fe#2,<z_100_50=fe#2,<z_50_0=fe#1); */ /* asm 2: fe_mul(>z_100_0=t1,<z_100_50=t1,<z_50_0=t0); */ fe_mul(out t1, ref t1, ref t0); /* qhasm: z_200_100 = z_100_0^2^100 */ /* asm 1: fe_sq(>z_200_100=fe#3,<z_100_0=fe#2); for (i = 1;i < 100;++i) fe_sq(>z_200_100=fe#3,>z_200_100=fe#3); */ /* asm 2: fe_sq(>z_200_100=t2,<z_100_0=t1); for (i = 1;i < 100;++i) fe_sq(>z_200_100=t2,>z_200_100=t2); */ fe_sq(out t2, ref t1); for (i = 1; i < 100; ++i) { fe_sq(out t2, ref t2); } /* qhasm: z_200_0 = z_200_100*z_100_0 */ /* asm 1: fe_mul(>z_200_0=fe#2,<z_200_100=fe#3,<z_100_0=fe#2); */ /* asm 2: fe_mul(>z_200_0=t1,<z_200_100=t2,<z_100_0=t1); */ fe_mul(out t1, ref t2, ref t1); /* qhasm: z_250_50 = z_200_0^2^50 */ /* asm 1: fe_sq(>z_250_50=fe#2,<z_200_0=fe#2); for (i = 1;i < 50;++i) fe_sq(>z_250_50=fe#2,>z_250_50=fe#2); */ /* asm 2: fe_sq(>z_250_50=t1,<z_200_0=t1); for (i = 1;i < 50;++i) fe_sq(>z_250_50=t1,>z_250_50=t1); */ fe_sq(out t1, ref t1); for (i = 1; i < 50; ++i) { fe_sq(out t1, ref t1); } /* qhasm: z_250_0 = z_250_50*z_50_0 */ /* asm 1: fe_mul(>z_250_0=fe#1,<z_250_50=fe#2,<z_50_0=fe#1); */ /* asm 2: fe_mul(>z_250_0=t0,<z_250_50=t1,<z_50_0=t0); */ fe_mul(out t0, ref t1, ref t0); /* qhasm: z_252_2 = z_250_0^2^2 */ /* asm 1: fe_sq(>z_252_2=fe#1,<z_250_0=fe#1); for (i = 1;i < 2;++i) fe_sq(>z_252_2=fe#1,>z_252_2=fe#1); */ /* asm 2: fe_sq(>z_252_2=t0,<z_250_0=t0); for (i = 1;i < 2;++i) fe_sq(>z_252_2=t0,>z_252_2=t0); */ fe_sq(out t0, ref t0); for (i = 1; i < 2; ++i) { fe_sq(out t0, ref t0); } /* qhasm: z_252_3 = z_252_2*z1 */ /* asm 1: fe_mul(>z_252_3=fe#12,<z_252_2=fe#1,<z1=fe#11); */ /* asm 2: fe_mul(>z_252_3=out,<z_252_2=t0,<z1=z); */ fe_mul(out result, ref t0, ref z); /* qhasm: return */ }
public GroupElementPreComp(FieldElement yplusx, FieldElement yminusx, FieldElement xy2d) { this.yplusx = yplusx; this.yminusx = yminusx; this.xy2d = xy2d; }
internal static void scalarmult( out FieldElement q, byte[] n, int noffset, ref FieldElement p) { byte[] e = new byte[32]; //ToDo: remove allocation UInt32 i; FieldElement x1; FieldElement x2; FieldElement z2; FieldElement x3; FieldElement z3; FieldElement tmp0; FieldElement tmp1; int pos; UInt32 swap; UInt32 b; for (i = 0; i < 32; ++i) { e[i] = n[noffset + i]; } ScalarOperations.sc_clamp(e, 0); x1 = p; FieldOperations.fe_1(out x2); FieldOperations.fe_0(out z2); x3 = x1; FieldOperations.fe_1(out z3); swap = 0; for (pos = 254; pos >= 0; --pos) { b = (uint)(e[pos / 8] >> (pos & 7)); b &= 1; swap ^= b; FieldOperations.fe_cswap(ref x2, ref x3, swap); FieldOperations.fe_cswap(ref z2, ref z3, swap); swap = b; /* qhasm: fe X2 */ /* qhasm: fe Z2 */ /* qhasm: fe X3 */ /* qhasm: fe Z3 */ /* qhasm: fe X4 */ /* qhasm: fe Z4 */ /* qhasm: fe X5 */ /* qhasm: fe Z5 */ /* qhasm: fe A */ /* qhasm: fe B */ /* qhasm: fe C */ /* qhasm: fe D */ /* qhasm: fe E */ /* qhasm: fe AA */ /* qhasm: fe BB */ /* qhasm: fe DA */ /* qhasm: fe CB */ /* qhasm: fe t0 */ /* qhasm: fe t1 */ /* qhasm: fe t2 */ /* qhasm: fe t3 */ /* qhasm: fe t4 */ /* qhasm: enter ladder */ /* qhasm: D = X3-Z3 */ /* asm 1: fe_sub(>D=fe#5,<X3=fe#3,<Z3=fe#4); */ /* asm 2: fe_sub(>D=tmp0,<X3=x3,<Z3=z3); */ FieldOperations.fe_sub(out tmp0, ref x3, ref z3); /* qhasm: B = X2-Z2 */ /* asm 1: fe_sub(>B=fe#6,<X2=fe#1,<Z2=fe#2); */ /* asm 2: fe_sub(>B=tmp1,<X2=x2,<Z2=z2); */ FieldOperations.fe_sub(out tmp1, ref x2, ref z2); /* qhasm: A = X2+Z2 */ /* asm 1: fe_add(>A=fe#1,<X2=fe#1,<Z2=fe#2); */ /* asm 2: fe_add(>A=x2,<X2=x2,<Z2=z2); */ FieldOperations.fe_add(out x2, ref x2, ref z2); /* qhasm: C = X3+Z3 */ /* asm 1: fe_add(>C=fe#2,<X3=fe#3,<Z3=fe#4); */ /* asm 2: fe_add(>C=z2,<X3=x3,<Z3=z3); */ FieldOperations.fe_add(out z2, ref x3, ref z3); /* qhasm: DA = D*A */ /* asm 1: fe_mul(>DA=fe#4,<D=fe#5,<A=fe#1); */ /* asm 2: fe_mul(>DA=z3,<D=tmp0,<A=x2); */ FieldOperations.fe_mul(out z3, ref tmp0, ref x2); /* qhasm: CB = C*B */ /* asm 1: fe_mul(>CB=fe#2,<C=fe#2,<B=fe#6); */ /* asm 2: fe_mul(>CB=z2,<C=z2,<B=tmp1); */ FieldOperations.fe_mul(out z2, ref z2, ref tmp1); /* qhasm: BB = B^2 */ /* asm 1: fe_sq(>BB=fe#5,<B=fe#6); */ /* asm 2: fe_sq(>BB=tmp0,<B=tmp1); */ FieldOperations.fe_sq(out tmp0, ref tmp1); /* qhasm: AA = A^2 */ /* asm 1: fe_sq(>AA=fe#6,<A=fe#1); */ /* asm 2: fe_sq(>AA=tmp1,<A=x2); */ FieldOperations.fe_sq(out tmp1, ref x2); /* qhasm: t0 = DA+CB */ /* asm 1: fe_add(>t0=fe#3,<DA=fe#4,<CB=fe#2); */ /* asm 2: fe_add(>t0=x3,<DA=z3,<CB=z2); */ FieldOperations.fe_add(out x3, ref z3, ref z2); /* qhasm: assign x3 to t0 */ /* qhasm: t1 = DA-CB */ /* asm 1: fe_sub(>t1=fe#2,<DA=fe#4,<CB=fe#2); */ /* asm 2: fe_sub(>t1=z2,<DA=z3,<CB=z2); */ FieldOperations.fe_sub(out z2, ref z3, ref z2); /* qhasm: X4 = AA*BB */ /* asm 1: fe_mul(>X4=fe#1,<AA=fe#6,<BB=fe#5); */ /* asm 2: fe_mul(>X4=x2,<AA=tmp1,<BB=tmp0); */ FieldOperations.fe_mul(out x2, ref tmp1, ref tmp0); /* qhasm: E = AA-BB */ /* asm 1: fe_sub(>E=fe#6,<AA=fe#6,<BB=fe#5); */ /* asm 2: fe_sub(>E=tmp1,<AA=tmp1,<BB=tmp0); */ FieldOperations.fe_sub(out tmp1, ref tmp1, ref tmp0); /* qhasm: t2 = t1^2 */ /* asm 1: fe_sq(>t2=fe#2,<t1=fe#2); */ /* asm 2: fe_sq(>t2=z2,<t1=z2); */ FieldOperations.fe_sq(out z2, ref z2); /* qhasm: t3 = a24*E */ /* asm 1: fe_mul121666(>t3=fe#4,<E=fe#6); */ /* asm 2: fe_mul121666(>t3=z3,<E=tmp1); */ FieldOperations.fe_mul121666(out z3, ref tmp1); /* qhasm: X5 = t0^2 */ /* asm 1: fe_sq(>X5=fe#3,<t0=fe#3); */ /* asm 2: fe_sq(>X5=x3,<t0=x3); */ FieldOperations.fe_sq(out x3, ref x3); /* qhasm: t4 = BB+t3 */ /* asm 1: fe_add(>t4=fe#5,<BB=fe#5,<t3=fe#4); */ /* asm 2: fe_add(>t4=tmp0,<BB=tmp0,<t3=z3); */ FieldOperations.fe_add(out tmp0, ref tmp0, ref z3); /* qhasm: Z5 = X1*t2 */ /* asm 1: fe_mul(>Z5=fe#4,x1,<t2=fe#2); */ /* asm 2: fe_mul(>Z5=z3,x1,<t2=z2); */ FieldOperations.fe_mul(out z3, ref x1, ref z2); /* qhasm: Z4 = E*t4 */ /* asm 1: fe_mul(>Z4=fe#2,<E=fe#6,<t4=fe#5); */ /* asm 2: fe_mul(>Z4=z2,<E=tmp1,<t4=tmp0); */ FieldOperations.fe_mul(out z2, ref tmp1, ref tmp0); /* qhasm: return */ } FieldOperations.fe_cswap(ref x2, ref x3, swap); FieldOperations.fe_cswap(ref z2, ref z3, swap); FieldOperations.fe_invert(out z2, ref z2); FieldOperations.fe_mul(out x2, ref x2, ref z2); q = x2; CryptoBytes.Wipe(e); }