//The M Step - Much better numeric properties if A and R are put on the same scale at the start
        //of the process
        private void MaximizeGivenZExpectationsScaled()
        {
            //Recreating as I didn't know if it remembered the Hessian
            QN = new QuasiNewton();
            QN.MaxIterations = MaxIterations;
            QN.Tolerance     = TerminationTolerance;
            currentWinner    = new CurrentBestSolution()
            {
                curBestFunction = double.MaxValue
            };

            // results = QN.MinimizeDetail(new DiffFunc(GetDerivatives), new double[] {InitialPopSize,GrowthRate});
            results = QN.MinimizeDetail(new DiffFunc(GetDerivativesScaled), startParams);

            pParameters = results.solution;
            curVal      = results.funcValue;
            if (results.quality != Microsoft.SolverFoundation.Solvers.CompactQuasiNewtonSolutionQuality.LocalOptima)
            {
                //Sometimes it went to the local optimum, but didn't seem to think so.
                if (results.quality == Microsoft.SolverFoundation.Solvers.CompactQuasiNewtonSolutionQuality.UserCalculationError &&
                    currentWinner.GradientL2NormAtBest < .002)
                {
                    pParameters = currentWinner.curBestParameters;
                    curVal      = currentWinner.curBestFunction;
                }
                else
                {
                    throw new Exception("M Step Failed to Converge during Solving");
                }
            }
        }
        //The M Step
        private void MaximizeGivenZExpectations()
        {
            //Solver doesn't work well, but is here.
            //LBFGS solver = new LBFGS(2);
            //FunctionEval ff = new FunctionEval(GetDerivativesLBFGS);
            //Vector x0 = Vector.FromArray(new double[] { InitialPopSize, GrowthRate });
            //solver.debug = true;
            //solver.linesearchDebug = true;
            //solver.Run(x0,1, ff);
            //pParameters = x0.ToArray();
            //var b = solver.convergenceCriteria;

            //Recreating as I didn't know if it remembered the Hessian
            QN = new QuasiNewton();
            QN.MaxIterations = MaxIterations;
            QN.Tolerance     = TerminationTolerance;
            currentWinner    = new CurrentBestSolution()
            {
                curBestFunction = double.MaxValue
            };

            // results = QN.MinimizeDetail(new DiffFunc(GetDerivatives), new double[] {InitialPopSize,GrowthRate});
            results = QN.MinimizeDetail(new DiffFunc(GetDerivatives), startParams);

            pParameters = results.solution;
            curVal      = results.funcValue;
            if (results.quality != Microsoft.SolverFoundation.Solvers.CompactQuasiNewtonSolutionQuality.LocalOptima)
            {
                //Sometimes it went to the local optimum, but didn't seem to think so.
                if (results.quality == Microsoft.SolverFoundation.Solvers.CompactQuasiNewtonSolutionQuality.UserCalculationError &&
                    currentWinner.GradientL2NormAtBest < .002)
                {
                    pParameters = currentWinner.curBestParameters;
                    curVal      = currentWinner.curBestFunction;
                }
                else
                {
                    throw new Exception("M Step Failed to Converge during Solving");
                }
            }
        }