public void TestInc() { var matrix = new Matrix<double>(5, 5); double[] row = { 1, 2, 3, 4, 5 }; for (int i = 0; i < 5; i++) matrix.SetRow(i, row); matrix.Inc(3, 4, 2.5); Assert.AreEqual(7.5, matrix[3, 4]); var matrix1 = new Matrix<double>(5, 5); for (int i = 0; i < 5; i++) matrix1.SetRow(i, row); var matrix2 = new Matrix<double>(5, 5); for (int i = 0; i < 5; i++) matrix2.SetRow(i, row); double[] testrow = { 2, 4, 6, 8, 10 }; matrix1.Inc(matrix2); Assert.AreEqual(testrow, matrix1.GetRow(2)); var matrix3 = new Matrix<double>(5, 5); for (int i = 0; i < 5; i++) matrix3.SetRow(i, row); matrix3.Inc(1.0); for (int j = 0; j < 5; j++) Assert.AreEqual(row[j] + 1, matrix3[1, j]); var matrix4 = new Matrix<int>(5, 5); int[] int_row = { 1, 2, 3, 4, 5 }; for (int i = 0; i < 5; i++) matrix4.SetRow(i, int_row); Assert.AreEqual(matrix4[1, 2], 3); matrix4.Inc(1, 2); Assert.AreEqual(matrix4[1, 2], 4); }
private void IterateBatch() { SetupLoss(); SparseBooleanMatrix user_reverse_connections = (SparseBooleanMatrix) user_connections.Transpose(); // I. compute gradients var user_factors_gradient = new Matrix<float>(user_factors.dim1, user_factors.dim2); var item_factors_gradient = new Matrix<float>(item_factors.dim1, item_factors.dim2); var user_bias_gradient = new float[user_factors.dim1]; var item_bias_gradient = new float[item_factors.dim1]; // I.1 prediction error for (int index = 0; index < ratings.Count; index++) { int user_id = ratings.Users[index]; int item_id = ratings.Items[index]; // prediction float score = global_bias + user_bias[user_id] + item_bias[item_id]; score += DataType.MatrixExtensions.RowScalarProduct(user_factors, user_id, item_factors, item_id); double sig_score = 1 / (1 + Math.Exp(-score)); float prediction = (float) (MinRating + sig_score * rating_range_size); float error = prediction - ratings[index]; float gradient_common = compute_gradient_common(sig_score, error); user_bias_gradient[user_id] += gradient_common; item_bias_gradient[item_id] += gradient_common; for (int f = 0; f < NumFactors; f++) { float u_f = user_factors[user_id, f]; float i_f = item_factors[item_id, f]; user_factors_gradient.Inc(user_id, f, gradient_common * i_f); item_factors_gradient.Inc(item_id, f, gradient_common * u_f); } } // I.2 L2 regularization // biases for (int u = 0; u < user_bias_gradient.Length; u++) user_bias_gradient[u] += user_bias[u] * RegU * BiasReg; for (int i = 0; i < item_bias_gradient.Length; i++) item_bias_gradient[i] += item_bias[i] * RegI * BiasReg; // latent factors for (int u = 0; u < user_factors_gradient.dim1; u++) for (int f = 0; f < user_factors_gradient.dim2; f++) user_factors_gradient.Inc(u, f, user_factors[u, f] * RegU); for (int i = 0; i < item_factors_gradient.dim1; i++) for (int f = 0; f < item_factors_gradient.dim2; f++) item_factors_gradient.Inc(i, f, item_factors[i, f] * RegI); // I.3 social network regularization -- see eq. (13) in the paper if (SocialRegularization != 0) for (int u = 0; u < user_factors_gradient.dim1; u++) { var sum_connections = new float[NumFactors]; float bias_sum_connections = 0; int num_connections = user_connections[u].Count; foreach (int v in user_connections[u]) { bias_sum_connections += user_bias[v]; for (int f = 0; f < sum_connections.Length; f++) sum_connections[f] += user_factors[v, f]; } if (num_connections != 0) { user_bias_gradient[u] += social_regularization * (user_bias[u] - bias_sum_connections / num_connections); for (int f = 0; f < user_factors_gradient.dim2; f++) user_factors_gradient.Inc(u, f, social_regularization * (user_factors[u, f] - sum_connections[f] / num_connections)); } foreach (int v in user_reverse_connections[u]) { float trust_v = (float) 1 / user_connections[v].Count; float neg_trust_times_reg = -social_regularization * trust_v; float bias_diff = 0; var factor_diffs = new float[NumFactors]; foreach (int w in user_connections[v]) { bias_diff -= user_bias[w]; for (int f = 0; f < factor_diffs.Length; f++) factor_diffs[f] -= user_factors[w, f]; } bias_diff *= trust_v; // normalize bias_diff += user_bias[v]; user_bias_gradient[u] += neg_trust_times_reg * bias_diff; for (int f = 0; f < factor_diffs.Length; f++) { factor_diffs[f] *= trust_v; // normalize factor_diffs[f] += user_factors[v, f]; user_factors_gradient.Inc(u, f, neg_trust_times_reg * factor_diffs[f]); } } } // II. apply gradient descent step for (int user_id = 0; user_id < user_factors_gradient.dim1; user_id++) user_bias[user_id] -= user_bias_gradient[user_id] * LearnRate * BiasLearnRate; for (int item_id = 0; item_id < item_factors_gradient.dim1; item_id++) item_bias[item_id] -= item_bias_gradient[item_id] * LearnRate * BiasLearnRate; user_factors_gradient.Multiply(-LearnRate); user_factors.Inc(user_factors_gradient); item_factors_gradient.Multiply(-LearnRate); item_factors.Inc(item_factors_gradient); }