Regularized logistic regression trained by stochastic gradient descent
Implementation for dense feature vectors. Predictor variables are implemented transposed to their normal layout, due to being used in for the KDD Cup 2011 ensembles.
    static IList<double> MergeScores(IList<IList<double>> scores, Dictionary<int, IList<int>> candidates, Dictionary<int, IList<int>> hits)
    {
        double[] weights;

        if (log_reg)
        {
            var lr = new LogisticRegression();
            lr.LearnRate = learn_rate;
            lr.NumIter = num_it;
            lr.Regularization = regularization;

            lr.PredictorVariables = new Matrix<double>(scores);

            var targets = new byte[scores[0].Count];
            int pos = 0;
            foreach (int u in candidates.Keys)
                foreach (int i in candidates[u])
                    targets[pos++] = hits[u].Contains(i) ? (byte) 1 : (byte) 0;
            lr.TargetVariables = targets;

            lr.Train();
            //lr.InitModel();

            weights = lr.parameters.ToArray();

            /*
            for (int i = 0; i < weights.Length; i++)
                Console.Error.WriteLine(weights[i]);
            */
        }
        else
        {
            weights = new double[scores.Count];
            for (int i = 0; i < weights.Length; i++)
                weights[i] = 1;
        }

        return MergeScores(scores, weights);
    }
 // TODO integrate logistic regression and LIBSVM, maybe later LIBLINEAR
 public static void Main(string[] args)
 {
     var lr = new LogisticRegression();
 }