public static unsafe void sc25519_mul(sc25519 *r, sc25519 *x, sc25519 *y) { UInt32 *t = stackalloc UInt32[64]; for (int i = 0; i < 64; i++) { t[i] = 0; } for (int i = 0; i < 32; i++) { for (int j = 0; j < 32; j++) { t[i + j] += x->v[i] * y->v[j]; } } /* Reduce coefficients */ for (int i = 0; i < 63; i++) { uint carry = t[i] >> 8; t[i + 1] += carry; t[i] &= 0xff; } barrett_reduce(r, t); }
/* XXX: What we actually want for crypto_group is probably just something like * void sc25519_frombytes(sc25519 *r, const unsigned char *x, size_t xlen) */ public static unsafe void sc25519_to32bytes(Byte *r, sc25519 *x) //unsigned char r[32] { for (int i = 0; i < 32; i++) { r[i] = (Byte)x->v[i]; } }
public static unsafe void sc25519_from64bytes(sc25519 *r, Byte *x) //const unsigned char x[64] { UInt32 *t = stackalloc UInt32[64]; // { 0 }; for (int i = 0; i < 64; i++) { t[i] = x[i]; } barrett_reduce(r, t); }
public static unsafe void sc25519_add(sc25519 *r, sc25519 *x, sc25519 *y) { for (int i = 0; i < 32; i++) { r->v[i] = x->v[i] + y->v[i]; } for (int i = 0; i < 31; i++) { uint carry = r->v[i] >> 8; r->v[i + 1] += carry; r->v[i] &= 0xff; } reduce_add_sub(r); }
public unsafe static void ge25519_scalarmult_base(ge25519 *r, sc25519 *s) { /* XXX: Better algorithm for known-base-point scalar multiplication */ ge25519 t; fixed(Byte *ge25519_base_xp = ge25519_base_x) fe25519.fe25519_unpack(&t.x, ge25519_base_xp); fixed(Byte *ge25519_base_yp = ge25519_base_y) fe25519.fe25519_unpack(&t.y, ge25519_base_yp); fixed(Byte *ge25519_base_zp = ge25519_base_z) fe25519.fe25519_unpack(&t.z, ge25519_base_zp); fixed(Byte *ge25519_base_tp = ge25519_base_t) fe25519.fe25519_unpack(&t.t, ge25519_base_tp); ge25519_scalarmult(r, &t, s); }
/* Reduce coefficients of r before calling reduce_add_sub */ static unsafe void reduce_add_sub(sc25519 *r) { int i, b = 0, pb = 0, nb; Byte *t = stackalloc Byte[32]; for (i = 0; i < 32; i++) { b = (r->v[i] < pb + m[i]) ? 1 : 0; t[i] = (Byte)(r->v[i] - pb - m[i] + b * 256); pb = b; } nb = 1 - b; for (i = 0; i < 32; i++) { r->v[i] = (uint)(r->v[i] * b + t[i] * nb); } }
/* Reduce coefficients of x before calling barrett_reduce */ static unsafe void barrett_reduce(sc25519 *r, UInt32 *x) // const crypto_uint32 x[64] /* See HAC, Alg. 14.42 */ { UInt32 *q2 = stackalloc UInt32[66]; // { 0 }; for (int z = 0; z < 66; z++) { q2[z] = 0; } UInt32 *q3 = q2 + 33; UInt32 *r1 = stackalloc UInt32[33]; UInt32 *r2 = stackalloc UInt32[33]; // { 0 }; for (int z = 0; z < 33; z++) { r2[z] = 0; } UInt32 carry; int b, pb = 0; for (int i = 0; i < 33; i++) { for (int j = 0; j < 33; j++) { if (i + j >= 31) { q2[i + j] += mu[i] * x[j + 31]; } } } carry = q2[31] >> 8; q2[32] += carry; carry = q2[32] >> 8; q2[33] += carry; for (int i = 0; i < 33; i++) { r1[i] = x[i]; } for (int i = 0; i < 32; i++) { for (int j = 0; j < 33; j++) { if (i + j < 33) { r2[i + j] += m[i] * q3[j]; } } } for (int i = 0; i < 32; i++) { carry = r2[i] >> 8; r2[i + 1] += carry; r2[i] &= 0xff; } for (int i = 0; i < 32; i++) { b = (r1[i] < pb + r2[i]) ? 1 : 0; r->v[i] = (uint)(r1[i] - pb - r2[i] + b * 256); pb = b; } /* XXX: Can it really happen that r<0?, See HAC, Alg 14.42, Step 3 * If so: Handle it here! */ reduce_add_sub(r); reduce_add_sub(r); }
public static unsafe void ge25519_scalarmult(ge25519 *r, ge25519 *p, sc25519 *s) { int i, j, k; ge25519 g; fixed(Byte *ge25519_neutral_xp = ge25519_neutral_x) fe25519.fe25519_unpack(&g.x, ge25519_neutral_xp); fixed(Byte *ge25519_neutral_yp = ge25519_neutral_y) fe25519.fe25519_unpack(&g.y, ge25519_neutral_yp); fixed(Byte *ge25519_neutral_zp = ge25519_neutral_z) fe25519.fe25519_unpack(&g.z, ge25519_neutral_zp); fixed(Byte *ge25519_neutral_tp = ge25519_neutral_t) fe25519.fe25519_unpack(&g.t, ge25519_neutral_tp); ge25519[] pre = new ge25519[(1 << WINDOWSIZE)]; ge25519 t; ge25519_p1p1 tp1p1; Byte w; Byte * sb = stackalloc Byte[32]; sc25519.sc25519_to32bytes(sb, s); // Precomputation pre[0] = g; pre[1] = *p; for (i = 2; i < (1 << WINDOWSIZE); i += 2) { fixed(ge25519 *prep = pre) { dbl_p1p1(&tp1p1, (ge25519_p2 *)(prep + i / 2)); p1p1_to_p3(prep + i, &tp1p1); add_p1p1(&tp1p1, prep + i, prep + 1); p1p1_to_p3(prep + i + 1, &tp1p1); } } // Fixed-window scalar multiplication for (i = 32; i > 0; i--) { for (j = 8 - WINDOWSIZE; j >= 0; j -= WINDOWSIZE) { for (k = 0; k < WINDOWSIZE - 1; k++) { dbl_p1p1(&tp1p1, (ge25519_p2 *)&g); p1p1_to_p2((ge25519_p2 *)&g, &tp1p1); } dbl_p1p1(&tp1p1, (ge25519_p2 *)&g); p1p1_to_p3(&g, &tp1p1); // Cache-timing resistant loading of precomputed value: w = (Byte)((sb[i - 1] >> j) & WINDOWMASK); t = pre[0]; for (k = 1; k < (1 << WINDOWSIZE); k++) fixed(ge25519 *prekp = &pre[k]) cmov_p3(&t, prekp, (k == w) ? (Byte)1 : (Byte)0); add_p1p1(&tp1p1, &g, &t); if (j != 0) { p1p1_to_p2((ge25519_p2 *)&g, &tp1p1); } else { p1p1_to_p3(&g, &tp1p1); /* convert to p3 representation at the end */ } } } r->x = g.x; r->y = g.y; r->z = g.z; r->t = g.t; }
public static unsafe void sc25519_square(sc25519 *r, sc25519 *x) { sc25519_mul(r, x, x); }