public IHttpActionResult PutSpecial(int id, Special special)
        {
            if (!ModelState.IsValid)
            {
                return BadRequest(ModelState);
            }

            if (id != special.id)
            {
                return BadRequest();
            }

            db.Entry(special).State = EntityState.Modified;

            try
            {
                db.SaveChanges();
            }
            catch (DbUpdateConcurrencyException)
            {
                if (!SpecialExists(id))
                {
                    return NotFound();
                }
                else
                {
                    throw;
                }
            }

            return StatusCode(HttpStatusCode.NoContent);
        }
Ejemplo n.º 2
0
    public Monster( string name, int level, bool isBoss, GameObject characterObject)
        : base(name, level, level * 12)
    {
        Speed = 0;

        BasicAttack	  = new Basic();
        StrongAttack  = new Strong();
        SpecialAttack = new Special();

        BasicDefend   = new Basic();
        StrongDefend  = new Strong();
        SpecialDefend = new Special();

        CharacterObject = characterObject;

        Monster _thisMonster = this;
        IsBoss = isBoss;

        if (isBoss)
        {
            Health.SetNewMaxValue(Health.MaxValue * 3);
            AIBehavior = new BossAIBehavior(ref _thisMonster);
        }
        else
        {
            AIBehavior = new MinionAIBehavior(ref _thisMonster);
        }
    }
Ejemplo n.º 3
0
        public Special GetSpecial(int SpecialId)
        {
            Special special = new Special();
            DbCommand command = DbProviderHelper.CreateCommand("SELECTSpecial",CommandType.StoredProcedure);
            command.Parameters.Add(DbProviderHelper.CreateParameter("@SpecialId",DbType.Int32,SpecialId));
            DbDataReader dataReader = DbProviderHelper.ExecuteReader(command);
            while (dataReader.Read())
            {
                special.SpecialId = Convert.ToInt32(dataReader["SpecialId"]);
                special.SpecialGuid = (Guid) dataReader["SpecialGuid"];
                special.Title = Convert.ToString(dataReader["Title"]);

                if(dataReader["ContentHtml"] != DBNull.Value)
                    special.ContentHtml = Convert.ToString(dataReader["ContentHtml"]);
                special.TemplatePath = Convert.ToString(dataReader["TemplatePath"]);
                special.ReleasePath = Convert.ToString(dataReader["ReleasePath"]);

                if(dataReader["ImagePath"] != DBNull.Value)
                    special.ImagePath = Convert.ToString(dataReader["ImagePath"]);

                if(dataReader["ImageWidth"] != DBNull.Value)
                    special.ImageWidth = Convert.ToInt32(dataReader["ImageWidth"]);

                if(dataReader["ImageHeight"] != DBNull.Value)
                    special.ImageHeight = Convert.ToInt32(dataReader["ImageHeight"]);
                special.Hits = Convert.ToInt32(dataReader["Hits"]);
                special.Comments = Convert.ToInt32(dataReader["Comments"]);
            }
            dataReader.Close();
            return special;
        }
Ejemplo n.º 4
0
        public ActionResult Details(int id)
        {
            myHandler = new BusinessLogicHandler();
            mySpecial = myHandler.GetSpecialsList().Single(sp => sp.SpecialID == id);

            return View(mySpecial);
        }
Ejemplo n.º 5
0
        // parseList() `parses' special forms, constructs an appropriate
        // object of a subclass of Special, and stores a pointer to that
        // object in variable form.  It would be possible to fully parse
        // special forms at this point.  Since this causes complications
        // when using (incorrect) programs as data, it is easiest to let
        // parseList only look at the car for selecting the appropriate
        // object from the Special hierarchy and to leave the rest of
        // parsing up to the interpreter.
        private void parseList()
        {
            if (! car.isSymbol())
                form = new Regular();
            else
            {
                string name = car.getName();

                if (name.Equals("quote"))
                    form = new Quote();
                else if (name.Equals("lambda"))
                    form = new Lambda();
                else if (name.Equals("begin"))
                    form = new Begin();
                else if (name.Equals("if"))
                    form = new If();
                else if (name.Equals("let"))
                    form = new Let();
                else if (name.Equals("cond"))
                    form = new Cond();
                else if (name.Equals("define"))
                    form = new Define();
                else if (name.Equals("set!"))
                    form = new Set();
                else
                    form = new Regular();
            }
        }
 public SuperSpecial(Special baseSpecial)
 {
     this.baseSpecial = baseSpecial;
     up = new Vector3(1, 0, 0);
     down = new Vector3(-1, 0, 0);
     left = new Vector3(0, -1, 0);
     right = new Vector3(0, 1, 0);
 }
Ejemplo n.º 7
0
	void OnTriggerEnter2D( Collider2D other ){
		if ( other.gameObject.tag == "Player" ){
			Messenger<string>.Invoke("ShowLine","You've picked a battery for light!");
			Messenger.Invoke("light up");

			Special battery_item = new Special("Battery", "Uber Chargeeeeeee");
			other.transform.FindChild("Inventory").GetComponent<Inventory>().addItems(battery_item);

			Destroy(gameObject);
		}
	}
        public IHttpActionResult PostSpecial(Special special)
        {
            if (!ModelState.IsValid)
            {
                return BadRequest(ModelState);
            }

            db.Specials.Add(special);
            db.SaveChanges();

            return CreatedAtRoute("DefaultApi", new { id = special.id }, special);
        }
Ejemplo n.º 9
0
    public void InitialiseSpecials()
    {
        // get number of powers
        int numberOfSpecials = specialFiles.Length;
        chosenNumbers = new List<int>();

        // choose three random powers and initialise them all
        specialOne = createSpecialPower(0);
        specialTwo = createSpecialPower(1);
        specialThree = createSpecialPower(2);

        // initialise level up
        specialLevelUp = createSpecial(LevelUpPrefab);
    }
Ejemplo n.º 10
0
        public ActionResult Create(FormCollection collection)
        {
            try
            {
                myHandler = new BusinessLogicHandler();
                mySpecial = new Special();
                if (ModelState.IsValid)
                {
                    myHandler.AddSpecial(mySpecial);
                }
                return RedirectToAction("Index");

            }
            catch
            {
                return View();
            }
        }
Ejemplo n.º 11
0
		public static char? SpecialChar(Special s)
		{
			switch (s)
			{
				case Special.RegularEpisode:
					return null;
				case Special.Special:
					return 'S';
				case Special.Credit:
					return 'C';
				case Special.Trailer:
					return 'T';
				case Special.Parody:
					return 'P';
				case Special.Other:
					return 'O';
				default:
					return null;
			}
		}
Ejemplo n.º 12
0
 public ActionResult Edit(int id, FormCollection collection)
 {
     try
     {
         myHandler = new BusinessLogicHandler();
         mySpecial = new Special();
         TryUpdateModel(mySpecial);
         if (ModelState.IsValid)
         {
             myHandler.UpdateSpecial(mySpecial);
         }
         return RedirectToAction("Index");
     }
     catch
     {
         return View();
     }
 }
Ejemplo n.º 13
0
 /// <summary>
 ///   The Probit mean (activation) function.
 /// </summary>
 ///
 /// <param name="x">A transformed value.</param>
 ///
 /// <returns>The reverse transformed value.</returns>
 ///
 public double Inverse(double x)
 {
     return(Special.Erfc(-x / Constants.Sqrt2) * 0.5);
 }
Ejemplo n.º 14
0
        public void RoundTripSpecials()
        {
            UndoMgr undomgr = new UndoMgr(5);
            EventDB eventDB = new EventDB(undomgr);

            Special sp1, sp2, sp3, sp4, sp5, sp6, sp7, sp8, sp9, sp10, sp11, sp12, sp13;

            undomgr.BeginCommand(88, "Command1");

            sp1 = new Special(SpecialKind.FirstAid, new PointF[1] { new PointF(4.5F, 1.2F) });
            sp2 = new Special(SpecialKind.OptCrossing, new PointF[1] { new PointF(-4.2F, 1.7F) });
            sp2.allCourses = false;
            sp2.courses = new CourseDesignator[] { Designator(1), Designator(2), Designator(3), CourseDesignator.AllControls };
            sp2.orientation = 45F;
            sp3 = new Special(SpecialKind.Boundary, new PointF[2] { new PointF(8, 7), new PointF(1, 2) });
            sp4 = new Special(SpecialKind.OOB, new PointF[4] { new PointF(3, 7), new PointF(11, 2), new PointF(0, -1), new PointF(-12, -3) });
            sp5 = new Special(SpecialKind.Text, new PointF[2] { new PointF(3, 7), new PointF(11, 4) });
            sp5.text = "Hello";
            sp5.fontName = "Tahoma";
            sp5.fontBold = true;
            sp5.fontItalic = false;
            sp5.allCourses = false;
            sp5.color = new SpecialColor(0.2F, 0.5F, 0.3F, 0F);
            sp5.courses = new CourseDesignator[2] { Designator(2), new CourseDesignator(CourseId(3), 1) };
            sp6 = new Special(SpecialKind.Descriptions, new PointF[2] { new PointF(5, 6), new PointF(11, 6) });
            sp6.numColumns = 2;
            sp7 = new Special(SpecialKind.Text, new PointF[2] { new PointF(8, 7), new PointF(18, 5) });
            sp7.fontName = "Courier New";
            sp7.fontBold = false;
            sp7.fontItalic = true;
            sp7.text = "$(CourseName)";
            sp7.color = SpecialColor.Purple;
            sp8 = new Special(SpecialKind.WhiteOut, new PointF[4] { new PointF(13, 17), new PointF(21, 12), new PointF(10, -1), new PointF(-2, 7) });
            sp9 = new Special(SpecialKind.Image, new PointF[2] { new PointF(18, 17), new PointF(28, 15) });
            sp9.imageBitmap = (Bitmap)Image.FromFile(TestUtil.GetTestFile("eventDB\\testimage.jpg"));
            sp9.text = "testimage.jpg";
            sp10 = new Special(SpecialKind.Line, new PointF[3] { new PointF(8, 7), new PointF(1, 2), new PointF(5, 12) });
            sp10.color = SpecialColor.Black;
            sp10.lineKind = LineKind.Single;
            sp10.lineWidth = 0.1F;
            sp11 = new Special(SpecialKind.Line, new PointF[3] { new PointF(8, 7), new PointF(1, 2), new PointF(5, 12) });
            sp11.color = new SpecialColor(1F, 0.66F, 0.45F, 0.83F);
            sp11.lineKind = LineKind.Double;
            sp11.lineWidth = 0.1F;
            sp11.gapSize = 0.15F;
            sp12 = new Special(SpecialKind.Line, new PointF[2] { new PointF(8, 7), new PointF(1, 2) });
            sp12.color = SpecialColor.Purple;
            sp12.lineKind = LineKind.Dashed;
            sp12.lineWidth = 0.1F;
            sp12.gapSize = 0.15F;
            sp12.dashSize = 0.44F;
            sp13 = new Special(SpecialKind.Rectangle, new PointF[2] { new PointF(8, 7), new PointF(1, 2) });
            sp13.color = SpecialColor.Purple;
            sp13.lineKind = LineKind.Single;
            sp13.lineWidth = 0.1F;
            sp13.cornerRadius = 0.23F;

            eventDB.AddSpecial(sp1);
            eventDB.AddSpecial(sp2);
            eventDB.AddSpecial(sp3);
            eventDB.AddSpecial(sp4);
            eventDB.AddSpecial(sp5);
            eventDB.AddSpecial(sp6);
            eventDB.AddSpecial(sp7);
            eventDB.AddSpecial(sp8);
            eventDB.AddSpecial(sp9);
            eventDB.AddSpecial(sp10);
            eventDB.AddSpecial(sp11);
            eventDB.AddSpecial(sp12);
            eventDB.AddSpecial(sp13);

            undomgr.EndCommand(88);

            eventDB.Save(TestUtil.GetTestFile("eventDB\\testoutput_temp.xml"));

            undomgr.Clear();
            eventDB = new EventDB(undomgr);

            eventDB.Load(TestUtil.GetTestFile("eventdb\\testoutput_temp.xml"));

            // The loaded image won't compare equal. Check the image is the same, then force equal.
            Special loadedSp9 = eventDB.GetSpecial(SpecialId(9));
            Assert.IsNotNull(loadedSp9.imageBitmap);
            Assert.AreEqual(loadedSp9.imageBitmap.Width, sp9.imageBitmap.Width);
            Assert.AreEqual(loadedSp9.imageBitmap.Height, sp9.imageBitmap.Height);
            Assert.AreEqual(loadedSp9.imageBitmap.RawFormat, sp9.imageBitmap.RawFormat);
            loadedSp9.imageBitmap = sp9.imageBitmap;

            TestUtil.TestEnumerableAnyOrder(eventDB.AllSpecialPairs,
                new KeyValuePair<Id<Special>, Special>[] {
                    new KeyValuePair<Id<Special>,Special>(SpecialId(1), sp1),
                    new KeyValuePair<Id<Special>,Special>(SpecialId(2), sp2),
                    new KeyValuePair<Id<Special>,Special>(SpecialId(3), sp3),
                    new KeyValuePair<Id<Special>,Special>(SpecialId(4), sp4),
                    new KeyValuePair<Id<Special>,Special>(SpecialId(5), sp5),
                    new KeyValuePair<Id<Special>,Special>(SpecialId(6), sp6),
                    new KeyValuePair<Id<Special>,Special>(SpecialId(7), sp7),
                    new KeyValuePair<Id<Special>,Special>(SpecialId(8), sp8),
                    new KeyValuePair<Id<Special>,Special>(SpecialId(9), sp9),
                    new KeyValuePair<Id<Special>,Special>(SpecialId(10), sp10),
                    new KeyValuePair<Id<Special>,Special>(SpecialId(11), sp11),
                    new KeyValuePair<Id<Special>,Special>(SpecialId(12), sp12),
                    new KeyValuePair<Id<Special>,Special>(SpecialId(13), sp13),
                }
            );
        }
Ejemplo n.º 15
0
        /// <summary>
        ///   Predicts the next observation occurring after a given observation sequence.
        /// </summary>
        ///
        private double[][] predict(double[][] observations, int next,
                                   out double logLikelihood, out double[][] lnFuture)
        {
            int states = States;
            int T      = next;

            double[,] lnA = Transitions;

            double[][] prediction  = new double[next][];
            double[][] expectation = new double[states][];

            // Compute expectations for each state
            for (int i = 0; i < states; i++)
            {
                expectation[i] = getMode(B[i]);
            }


            // Compute forward probabilities for the given observation sequence.
            double[,] lnFw0 = ForwardBackwardAlgorithm.LogForward(this, observations, out logLikelihood);

            // Create a matrix to store the future probabilities for the prediction
            // sequence and copy the latest forward probabilities on its first row.
            double[][] lnFwd = new double[T + 1][];
            for (int i = 0; i < lnFwd.Length; i++)
            {
                lnFwd[i] = new double[States];
            }


            // 1. Initialization
            for (int i = 0; i < States; i++)
            {
                lnFwd[0][i] = lnFw0[observations.Length - 1, i];
            }


            // 2. Induction
            for (int t = 0; t < T; t++)
            {
                double[] weights = lnFwd[t + 1];

                for (int i = 0; i < weights.Length; i++)
                {
                    double sum = Double.NegativeInfinity;
                    for (int j = 0; j < states; j++)
                    {
                        sum = Special.LogSum(sum, lnFwd[t][j] + lnA[j, i]);
                    }

                    weights[i] = sum + B[i].LogProbabilityFunction(expectation[i]);
                }

                double sumWeight = Double.NegativeInfinity;
                for (int i = 0; i < weights.Length; i++)
                {
                    sumWeight = Special.LogSum(sumWeight, weights[i]);
                }
                for (int i = 0; i < weights.Length; i++)
                {
                    weights[i] -= sumWeight;
                }

                // Select most probable value
                double maxWeight = weights[0];
                prediction[t] = expectation[0];
                for (int i = 1; i < states; i++)
                {
                    if (weights[i] > maxWeight)
                    {
                        maxWeight     = weights[i];
                        prediction[t] = expectation[i];
                    }
                }

                // Recompute log-likelihood
                logLikelihood = maxWeight;
            }

            // Returns the future-forward probabilities
            lnFuture = lnFwd;

            return(prediction);
        }
Ejemplo n.º 16
0
        //
        // This routine uses the Goldfarb/Idnani algorithm to solve the
        // following minimization problem:
        //
        //       minimize 1/2 * x^T D x + d^T x
        //       where   A1 x  = b1
        //               A2 x >= b2
        //
        // the matrix D is assumed to be positive definite.  Especially,
        // w.l.o.g. D is assumed to be symmetric. This is slightly different
        // from the original implementation by Berwin A. Turlach.
        //
        // Input parameter:
        // dmat   nxn matrix, the matrix D from above (dp)
        //        *** WILL BE DESTROYED ON EXIT ***
        //        The user has two possibilities:
        //        a) Give D (ierr=0), in this case we use routines from LINPACK
        //           to decompose D.
        //        b) To get the algorithm started we need R^-1, where D=R^TR.
        //           So if it is cheaper to calculate R^-1 in another way (D may
        //           be a band matrix) then with the general routine, the user
        //           may pass R^{-1}.  Indicated by ierr not equal to zero.
        // dvec   nx1 vector, the vector d from above (dp)
        //        *** WILL BE DESTROYED ON EXIT ***
        //        contains on exit the solution to the initial, i.e.,
        //        unconstrained problem
        // fddmat scalar, the leading dimension of the matrix dmat
        // n      the dimension of dmat and dvec (int)
        // amat   nxq matrix, the matrix A from above (dp) [ A=(A1 A2) ]
        //        *** ENTRIES CORRESPONDING TO EQUALITY CONSTRAINTS MAY HAVE
        //            CHANGED SIGNES ON EXIT ***
        // bvec   qx1 vector, the vector of constants b in the constraints (dp)
        //        [ b = (b1^T b2^T)^T ]
        //        *** ENTRIES CORRESPONDING TO EQUALITY CONSTRAINTS MAY HAVE
        //            CHANGED SIGNES ON EXIT ***
        // fdamat the first dimension of amat as declared in the calling program.
        //        fdamat >= n !!
        // q      int, the number of constraints.
        // meq    int, the number of equality constraints, 0 <= meq <= q.
        // ierr   int, code for the status of the matrix D:
        //           ierr =  0, we have to decompose D
        //           ierr != 0, D is already decomposed into D=R^TR and we were
        //                      given R^{-1}.
        //
        // Output parameter:
        // sol   nx1 the final solution (x in the notation above)
        // crval scalar, the value of the criterion at the minimum
        // iact  qx1 vector, the constraints which are active in the final
        //       fit (int)
        // nact  scalar, the number of constraints active in the final fit (int)
        // iter  2x1 vector, first component gives the number of "main"
        //       iterations, the second one says how many constraints were
        //        deleted after they became active
        //  ierr  int, error code on exit, if
        //          ierr = 0, no problems
        //          ierr = 1, the minimization problem has no solution
        //          ierr = 2, problems with decomposing D, in this case sol
        //                     contains garbage!!
        //
        //  Working space:
        //  work  vector with length at least 2n+r*(r+5)/2 + 2q +1
        //        where r=min(n,q)
        //
        private void qpgen2(double[,] dmat, double[] dvec, int[] iact, out int nact, ref int ierr)
        {
            int n   = NumberOfVariables;
            int q   = NumberOfConstraints;
            int meq = NumberOfEqualities;

            double[,] amat = constraintMatrix;
            double[] bvec = constraintValues;
            double[] sol  = Solution;

            int    l1;
            double gc, gs, tt, sum;

            double f = 0;

            nact = 0;


            // Store the initial dvec to calculate below the
            //  unconstrained minima of the critical value.

            Array.Clear(iwzv, 0, iwzv.Length);
            Array.Clear(iwrv, 0, iwrv.Length);
            Array.Clear(iwuv, 0, iwuv.Length);
            Array.Clear(iwrm, 0, iwrm.Length);
            Array.Clear(iwsv, 0, iwsv.Length);
            Array.Clear(iwnbv, 0, iwnbv.Length);

            for (int i = 0; i < dvec.Length; i++)
            {
                work[i] = dvec[i];
            }

            for (int i = 0; i < iact.Length; i++)
            {
                iact[i] = -1;
            }


            // Get the initial solution
            if (ierr == 0)
            {
                // L'L = chol(D)
                bool success = dpofa(dmat);

                if (!success)
                {
                    ierr = 2;
                    return;
                }

                // L*x = d
                dposl(dmat, dvec);

                // D = inv(L)
                dpori(dmat);
            }
            else
            {
                // Matrix D is already factorized, so we have to multiply d first with
                // R^-T and then with R^-1.  R^-1 is stored in the upper half of the
                // array dmat.

                for (int j = 0; j < sol.Length; j++)
                {
                    sol[j] = 0.0;

                    for (int i = 0; i < j; i++)
                    {
                        sol[j] += dmat[j, i] * dvec[i];
                    }
                }

                for (int j = 0; j < dvec.Length; j++)
                {
                    dvec[j] = 0.0;

                    for (int i = j; i < sol.Length; i++)
                    {
                        dvec[j] += dmat[i, j] * sol[i];
                    }
                }
            }

            // Set upper triangular of dmat to zero, store dvec in sol and
            //   calculate value of the criterion at unconstrained minima

            f = 0.0;


            // calculate some constants, i.e., from which index on
            // the different quantities are stored in the work matrix

            for (int j = 0; j < sol.Length; j++)
            {
                sol[j]  = dvec[j];
                f      += work[j] * sol[j];
                work[j] = 0.0;

                for (int i = j + 1; i < n; i++)
                {
                    dmat[j, i] = 0.0;
                }
            }

            f    = -f / 2.0;
            ierr = 0;


            // calculate the norm of each column of the A matrix

            for (int i = 0; i < iwnbv.Length; i++)
            {
                sum = 0.0;

                for (int j = 0; j < n; j++)
                {
                    sum += amat[i, j] * amat[i, j];
                }

                iwnbv[i] = Math.Sqrt(sum);
            }

            nact       = 0;
            Iterations = 0;
            Deletions  = 0;


L50:        // start a new iteration

            Iterations++;

            // calculate all constraints and check which are still violated
            // for the equality constraints we have to check whether the normal
            // vector has to be negated (as well as bvec in that case)

            int l = 0;

            for (int i = 0; i < bvec.Length; i++)
            {
                sum = -bvec[i];

                for (int j = 0; j < sol.Length; j++)
                {
                    sum += amat[i, j] * sol[j];
                }

                if (i >= meq)
                {
                    // this is a inequality constraint
                    iwsv[l] = sum;
                }
                else
                {
                    // this is an equality constraint
                    iwsv[l] = -Math.Abs(sum);
                    if (sum > 0.0)
                    {
                        for (int j = 0; j < n; j++)
                        {
                            amat[i, j] = -amat[i, j];
                        }
                        bvec[i] = -bvec[i];
                    }
                }

                l++;
            }

            // as safeguard against rounding errors set
            // already active constraints explicitly to zero

            for (int i = 0; i < nact; i++)
            {
                iwsv[iact[i]] = 0.0;
            }

            // We weight each violation by the number of non-zero elements in the
            // corresponding row of A. then we choose the violated constraint which
            // has maximal absolute value, i.e., the minimum. By obvious commenting
            // and uncommenting we can choose the strategy to take always the first
            // constraint which is violated. ;-)

            int    nvl  = -1;
            double temp = 0.0;

            for (int i = 0; i < iwnbv.Length; i++)
            {
                if (iwsv[i] < temp * iwnbv[i])
                {
                    nvl  = i;
                    temp = iwsv[i] / iwnbv[i];
                }

                // if (work(iwsv+i) .LT. 0.d0) then
                //     nvl = i
                //     goto 72
                // endif
            }

            if (nvl == -1)
            {
                return;
            }


L55:

            // calculate d=J^Tn^+ where n^+ is the normal vector of the violated
            // constraint. J is stored in dmat in this implementation!!
            // if we drop a constraint, we have to jump back here.

            for (int i = 0; i < work.Length; i++)
            {
                sum = 0.0;

                for (int j = 0; j < n; j++)
                {
                    sum += dmat[i, j] * amat[nvl, j];
                }

                work[i] = sum;
            }

            // Now calculate z = J_2 d_2 ...

            for (int i = 0; i < iwzv.Length; i++)
            {
                iwzv[i] = 0.0;
            }

            for (int j = nact; j < work.Length; j++)
            {
                for (int i = 0; i < iwzv.Length; i++)
                {
                    iwzv[i] += dmat[j, i] * work[j];
                }
            }

            // ... and r = R^{-1} d_1, check also if r has positive elements
            // (among the entries corresponding to inequalities constraints).

            l1 = 0;
            int    it1   = 0;
            double t1    = 0;
            bool   t1inf = true;

            for (int i = nact - 1; i >= 0; i--)
            {
                sum = work[i];
                l   = ((i + 1) * (i + 4)) / 2 - 1;
                l1  = l - i - 1;

                for (int j = i + 1; j < nact; j++)
                {
                    sum -= iwrm[l] * iwrv[j];
                    l   += j + 1;
                }

                sum /= iwrm[l1];

                iwrv[i] = sum;

                if (iact[i] + 1 < meq)
                {
                    continue;
                }

                if (sum <= 0.0)
                {
                    continue;
                }

                if (Double.IsNaN(sum))
                {
                    continue;
                }

                t1inf = false;
                it1   = i;
            }

            // if r has positive elements, find the partial step length t1, which is
            // the maximum step in dual space without violating dual feasibility.
            // it1 stores in which component t1, the min of u/r, occurs.

            if (!t1inf)
            {
                t1 = iwuv[it1] / iwrv[it1];

                for (int i = 0; i < nact; i++)
                {
                    if (iact[i] < meq)
                    {
                        continue;
                    }

                    if (iwrv[i] < 0.0)
                    {
                        continue;
                    }

                    temp = iwuv[i] / iwrv[i];

                    if (temp < t1)
                    {
                        t1  = temp;
                        it1 = i;
                    }
                }
            }


            // test if the z vector is equal to zero

            sum = 0.0;
            for (int i = 0; i < iwzv.Length; i++)
            {
                sum += iwzv[i] * iwzv[i];
            }

            if (Math.Abs(sum) < Constants.DoubleEpsilon)
            {
                // No step in primal space such that the new constraint becomes
                // feasible. Take step in dual space and drop a constant.

                if (t1inf)
                {
                    // No step in dual space possible
                    // either, problem is not solvable
                    ierr = 1;
                    return;
                }
                else
                {
                    // we take a partial step in dual space and drop constraint it1,
                    // that is, we drop the it1-th active constraint.
                    // then we continue at step 2(a) (marked by label 55)

                    for (int i = 0; i < nact; i++)
                    {
                        iwuv[i] -= t1 * iwrv[i];
                    }

                    iwuv[nact] += t1;
                    goto L700;
                }
            }
            else
            {
                // compute full step length t2, minimum step in primal space such that
                // the constraint becomes feasible.
                // keep sum (which is z^Tn^+) to update crval below!

                sum = 0.0;
                for (int i = 0; i < iwzv.Length; i++)
                {
                    sum += iwzv[i] * amat[nvl, i];
                }

                tt = -iwsv[nvl] / sum;
                bool t2min = true;

                if (!t1inf)
                {
                    if (t1 < tt)
                    {
                        tt    = t1;
                        t2min = false;
                    }
                }

                // take step in primal and dual space
                for (int i = 0; i < sol.Length; i++)
                {
                    sol[i] += tt * iwzv[i];
                }

                f += tt * sum * (tt / 2.0 + iwuv[nact]);

                for (int i = 0; i < nact; i++)
                {
                    iwuv[i] -= tt * iwrv[i];
                }

                iwuv[nact] += tt;

                // if it was a full step, then we check whether further constraints are
                // violated otherwise we can drop the current constraint and iterate once
                // more

                if (t2min)
                {
                    // we took a full step. Thus add constraint nvl to the list of active
                    // constraints and update J and R

                    iact[nact++] = nvl;


                    // to update R we have to put the first nact-1 components of the d vector
                    // into column (nact) of R

                    l = ((nact - 1) * (nact)) / 2;
                    for (int i = 0; i < nact - 1; i++, l++)
                    {
                        iwrm[l] = work[i];
                    }

                    // if now nact=n, then we just have to add the last element to the new
                    // row of R.

                    // Otherwise we use Givens transformations to turn the vector d(nact:n)
                    // into a multiple of the first unit vector. That multiple goes into the
                    // last element of the new row of R and J is accordingly updated by the
                    // Givens transformations.

                    if (nact == n)
                    {
                        iwrm[l] = work[n - 1];
                    }
                    else
                    {
                        for (int i = n - 1; i >= nact; i--)
                        {
                            // We have to find the Givens rotation which will reduce the element
                            // (l1) of d to zero. If it is already zero we don't have to do anything,
                            // except of decreasing l1

                            if (work[i] == 0.0)
                            {
                                continue;
                            }

                            gc   = Math.Max(Math.Abs(work[i - 1]), Math.Abs(work[i]));
                            gs   = Math.Min(Math.Abs(work[i - 1]), Math.Abs(work[i]));
                            temp = Special.Sign(gc * Math.Sqrt(1.0 + gs * gs / (gc * gc)), work[i - 1]);
                            gc   = work[i - 1] / temp;
                            gs   = work[i] / temp;

                            // The Givens rotation is done with the matrix (gc gs, gs -gc). If
                            // gc is one, then element (i) of d is zero compared with element
                            // (l1-1). Hence we don't have to do anything. If gc is zero, then
                            // we just have to switch column (i) and column (i-1) of J. Since
                            // we only switch columns in J, we have to be careful how we update
                            // d depending on the sign of gs. Otherwise we have to apply the
                            // Givens rotation to these columns. The i-1 element of d has to be
                            // updated to temp.

                            if (gc == 1.0)
                            {
                                continue;
                            }

                            if (gc == 0.0)
                            {
                                work[i - 1] = gs * temp;

                                for (int j = 0; j < n; j++)
                                {
                                    temp           = dmat[i - 1, j];
                                    dmat[i - 1, j] = dmat[i, j];
                                    dmat[i, j]     = temp;
                                }
                            }
                            else
                            {
                                work[i - 1] = temp;
                                double nu = gs / (gc + 1.0);

                                for (int j = 0; j < n; j++)
                                {
                                    temp           = gc * dmat[i - 1, j] + gs * dmat[i, j];
                                    dmat[i, j]     = nu * (dmat[i - 1, j] + temp) - dmat[i, j];
                                    dmat[i - 1, j] = temp;
                                }
                            }
                        }

                        // l is still pointing to element (nact,nact) of
                        // the matrix R. So store d(nact) in R(nact,nact)
                        iwrm[l] = work[nact - 1];
                    }
                }
                else
                {
                    // We took a partial step in dual space. Thus drop constraint it1,
                    // that is, we drop the it1-th active constraint. Then we continue
                    // at step 2(a) (marked by label 55) but since the fit changed, we
                    // have to recalculate now "how much" the fit violates the chosen
                    // constraint now.

                    sum = -bvec[nvl];

                    for (int j = 0; j < sol.Length; j++)
                    {
                        sum += sol[j] * amat[nvl, j];
                    }

                    if (nvl >= meq)
                    {
                        iwsv[nvl] = sum;
                    }
                    else
                    {
                        iwsv[nvl] = -Math.Abs(sum);

                        if (sum > 0.0)
                        {
                            for (int j = 0; j < n; j++)
                            {
                                amat[nvl, j] = -amat[nvl, j];
                            }

                            bvec[nvl] = -bvec[nvl];
                        }
                    }

                    goto L700;
                }
            }

            goto L50;


L700:       // Drop constraint it1

            // if it1 = nact it is only necessary
            // to update the vector u and nact

            if (it1 == nact - 1)
            {
                goto L799;
            }


L797:       // After updating one row of R (column of J) we will also come back here

            // We have to find the Givens rotation which will reduce the element
            // (it1+1,it1+1) of R to zero. If it is already zero we don't have to
            // do anything except of updating u, iact, and shifting column (it1+1)
            // of R to column (it1). Then l  will point to element (1,it1+1) of R
            // and l1 will point to element (it1+1,it1+1) of R.

            l  = ((it1 + 1) * (it1 + 2)) / 2;
            l1 = l + it1 + 1;

            if (iwrm[l1 - 1] == 0.0)
            {
                goto L798;
            }

            gc   = Math.Max(Math.Abs(iwrm[l1 - 1]), Math.Abs(iwrm[l1]));
            gs   = Math.Min(Math.Abs(iwrm[l1 - 1]), Math.Abs(iwrm[l1]));
            temp = Special.Sign(gc * Math.Sqrt(1.0 + gs * gs / (gc * gc)), iwrm[l1 - 1]);
            gc   = iwrm[l1 - 1] / temp;
            gs   = iwrm[l1] / temp;


            // The Givens rotation is done with the matrix (gc gs, gs -gc). If gc is
            // one, then element (it1+1,it1+1) of R is zero compared with element
            // (it1,it1+1). Hence we don't have to do anything. if gc is zero, then
            // we just have to switch row (it1) and row (it1+1) of R and column (it1)
            // and column (it1+1) of J. Since we switch rows in R and columns in J,
            // we can ignore the sign of gs. Otherwise we have to apply the Givens
            // rotation to these rows/columns.

            if (gc == 1.0)
            {
                goto L798;
            }

            if (gc == 0.0)
            {
                for (int i = it1 + 2; i <= nact; i++)
                {
                    temp         = iwrm[l1 - 1];
                    iwrm[l1 - 1] = iwrm[l1];
                    iwrm[l1]     = temp;
                    l1          += i;
                }

                for (int i = 0; i < n; i++)
                {
                    temp             = dmat[it1, i];
                    dmat[it1, i]     = dmat[it1 + 1, i];
                    dmat[it1 + 1, i] = temp;
                }
            }
            else
            {
                double nu = gs / (gc + 1.0);

                for (int i = it1 + 2; i <= nact; i++)
                {
                    temp         = gc * iwrm[l1 - 1] + gs * iwrm[l1];
                    iwrm[l1]     = nu * (iwrm[l1 - 2] + temp) - iwrm[l1];
                    iwrm[l1 - 1] = temp;
                    l1          += i;
                }

                for (int i = 0; i < n; i++)
                {
                    temp             = gc * dmat[it1, i] + gs * dmat[it1 + 1, i];
                    dmat[it1 + 1, i] = nu * (dmat[it1, i] + temp) - dmat[it1 + 1, i];
                    dmat[it1, i]     = temp;
                }
            }

L798:

            // shift column (it1+1) of R to column (it1) (that is, the first it1
            // elements). The position of element (1,it1+1) of R was calculated
            // above and stored in l.

            l1 = l - it1;
            for (int i = 0; i <= it1; i++, l++, l1++)
            {
                iwrm[l1 - 1] = iwrm[l];
            }

            // update vector u and iact as necessary
            // Continue with updating the matrices J and R

            iwuv[it1] = iwuv[it1 + 1];
            iact[it1] = iact[it1 + 1];
            it1++;

            if (it1 < nact - 1)
            {
                goto L797;
            }

L799:

            iwuv[nact - 1] = iwuv[nact];
            iwuv[nact]     = 0.0;
            iact[nact - 1] = -1;

            nact--;
            Deletions++;

            goto L55;
        }
Ejemplo n.º 17
0
        /// <summary>
        ///   Learns a model that can map the given inputs to the desired outputs.
        /// </summary>
        ///
        /// <param name="x">The model inputs.</param>
        /// <param name="weights">The weight of importance for each input sample.</param>
        ///
        /// <returns>A model that has learned how to produce suitable outputs
        ///   given the input data <paramref name="x"/>.</returns>
        ///
        public TModel Learn(TObservation[][] x, double[] weights = null)
        {
            // Initial argument checks
            CheckArgs(x, weights);

            // Baum-Welch algorithm.

            // The Baum–Welch algorithm is a particular case of a generalized expectation-maximization
            // (GEM) algorithm. It can compute maximum likelihood estimates and posterior mode estimates
            // for the parameters (transition and emission probabilities) of an HMM, when given only
            // emissions as training data.

            // The algorithm has two steps:
            //  - Calculating the forward probability and the backward probability for each HMM state;
            //  - On the basis of this, determining the frequency of the transition-emission pair values
            //    and dividing it by the probability of the entire string. This amounts to calculating
            //    the expected count of the particular transition-emission pair. Each time a particular
            //    transition is found, the value of the quotient of the transition divided by the probability
            //    of the entire string goes up, and this value can then be made the new value of the transition.


            samples            = x.Concatenate();
            vectorObservations = x;
            sampleWeights      = new double[samples.Length];

            if (Model == null)
            {
                throw new InvalidOperationException("The model must have been created first.");
            }
            //Model = CreateModel(observations);

            // Grab model information
            int states = Model.NumberOfStates;
            var logA   = Model.LogTransitions;
            var logP   = Model.LogInitial;


            // Initialize the algorithm
            int    N    = x.Length;
            double logN = Math.Log(N);

            LogKsi     = new double[N][][, ];
            LogGamma   = new double[N][, ];
            LogWeights = new double[N];
            if (weights != null)
            {
                weights.Log(result: LogWeights);
            }


            for (int i = 0; i < x.Length; i++)
            {
                int T = x[i].Length;

                LogKsi[i]   = new double[T][, ];
                LogGamma[i] = new double[T, states];

                for (int t = 0; t < LogKsi[i].Length; t++)
                {
                    LogKsi[i][t] = new double[states, states];
                }
            }


            int TMax = x.Apply(x_i => x_i.Length).Max();

            double[,] lnFwd = new double[TMax, states];
            double[,] lnBwd = new double[TMax, states];

            // Initialize the model log-likelihoods
            double newLogLikelihood = Double.NegativeInfinity;

            convergence.NewValue = Double.NegativeInfinity;


            while (true) // Until convergence or max iterations is reached
            {
                // For each sequence in the observations input
                for (int i = 0; i < x.Length; i++)
                {
                    int T = x[i].Length;
                    double[,] logGamma = LogGamma[i];
                    double w = LogWeights[i];


                    // 1st step - Calculating the forward probability and the
                    //            backward probability for each HMM state.
                    ComputeForwardBackward(i, lnFwd, lnBwd);


                    // 2nd step - Determining the frequency of the transition-emission pair values
                    //            and dividing it by the probability of the entire string.

                    // Calculate gamma values for next computations
                    // TODO: Use parallel-for
                    for (int t = 0; t < T; t++)
                    {
                        double lnsum = Double.NegativeInfinity;
                        for (int k = 0; k < states; k++)
                        {
                            logGamma[t, k] = lnFwd[t, k] + lnBwd[t, k] + w;
                            lnsum          = Special.LogSum(lnsum, logGamma[t, k]);
                        }

                        Accord.Diagnostics.Debug.Assert(!Double.IsNaN(lnsum));

                        // Normalize if different from zero
                        if (lnsum != Double.NegativeInfinity)
                        {
                            for (int k = 0; k < states; k++)
                            {
                                logGamma[t, k] = logGamma[t, k] - lnsum;
                            }
                        }
                    }

                    // Calculate ksi values for next computations
                    ComputeKsi(i, lnFwd, lnBwd);

                    // Compute log-likelihood for the given sequence
                    for (int j = 0; j < states; j++)
                    {
                        newLogLikelihood = Special.LogSum(newLogLikelihood, lnFwd[T - 1, j]);
                    }
                }


                // Average the likelihood for all sequences
                newLogLikelihood    /= x.Length;
                convergence.NewValue = newLogLikelihood;
                LogLikelihood        = newLogLikelihood;

                // Check for convergence
                if (convergence.HasConverged || Token.IsCancellationRequested)
                {
                    break;
                }

                // 3. Continue with parameter re-estimation
                newLogLikelihood = Double.NegativeInfinity;

                // 3.1 Re-estimation of initial state probabilities
                // TODO: Use parallel-for
                for (int i = 0; i < logP.Length; i++)
                {
                    double lnsum = Double.NegativeInfinity;
                    for (int k = 0; k < LogGamma.Length; k++)
                    {
                        lnsum = Special.LogSum(lnsum, LogGamma[k][0, i]);
                    }
                    logP[i] = lnsum - logN;
                }

                // 3.2 Re-estimation of transition probabilities
                // TODO: Use parallel-for
                for (int i = 0; i < states; i++)
                {
                    for (int j = 0; j < states; j++)
                    {
                        double lnnum = Double.NegativeInfinity;
                        double lnden = Double.NegativeInfinity;

                        for (int k = 0; k < LogGamma.Length; k++)
                        {
                            int T = x[k].Length;

                            for (int t = 0; t < T - 1; t++)
                            {
                                lnnum = Special.LogSum(lnnum, LogKsi[k][t][i, j]);
                                lnden = Special.LogSum(lnden, LogGamma[k][t, i]);
                            }
                        }

                        logA[i][j] = (lnnum == lnden) ? 0 : lnnum - lnden;

                        Accord.Diagnostics.Debug.Assert(!Double.IsNaN(logA[i][j]));
                    }
                }

                // 3.3 Re-estimation of emission probabilities
                UpdateEmissions(); // discrete and continuous
            }

            // Returns the model average log-likelihood
            return(Model);
        }
Ejemplo n.º 18
0
        /// <summary>
        ///   Gets the inverse of the cumulative distribution function (icdf) for
        ///   this distribution evaluated at probability <c>p</c>. This function
        ///   is also known as the Quantile function.
        /// </summary>
        ///
        /// <param name="p">A probability value between 0 and 1.</param>
        ///
        /// <returns>
        ///   A sample which could original the given probability
        ///   value when applied in the <see cref="DistributionFunction(int)"/>.
        /// </returns>
        ///
        public override int InverseDistributionFunction(double p)
        {
            double ratio = Special.Log1m(p) / Special.Log1m(this.p);

            return((int)Math.Floor(ratio));
        }
Ejemplo n.º 19
0
        protected string GetEditLink(object dataItem)
        {
            Special special = (Special)dataItem;

            return("EditSpecial.aspx?SpecialId=" + special.Id + "&CategoryId=" + _CategoryId + "&ProductId=" + special.ProductId);
        }
 private string InitSiteMapCategory(int categoryId)
 {
     return(Special.GetSpecialCategoryInfoById(categoryId).SpecialCategoryName);
 }
Ejemplo n.º 21
0
        /// <summary>
        ///     取得一個Note的開頭物件
        /// </summary>
        /// <param name="baseHitObject"></param>
        /// <returns></returns>
        public static string GetStartObjectImageNameByType(ObjectType type, Special special, Direction direction)
        {
            string path = GetObjectImagePathByType(type, special, false) + GetImageNameByDirection(direction);

            return(path);
        }
        /// <summary>
        ///   Computes the Upper Tail of the P[Dn &gt;= x] distribution.
        /// </summary>
        ///
        /// <remarks>
        ///   This function approximates the upper tail of the P[Dn &gt;= x]
        ///   distribution using the one-sided Kolmogorov-Smirnov statistic.
        /// </remarks>
        ///
        public static double OneSideUpperTail(double n, double x)
        {
            if (n > 200000)
            {
                // Use an asymptotic formula for n too high
                double t = (6 * n * x + 1.0);
                double z = t * t / (18 * n);
                double v = (1.0 - (2 * z * z - 4 * z - 1.0) / (18 * n)) * Math.Exp(-z);

                if (v <= 0.0)
                {
                    return(0.0);
                }
                if (v >= 1.0)
                {
                    return(1.0);
                }
                else
                {
                    return(1.0 * v);
                }
            }
            else
            {
                // Use Smirnov's stable formula for computing Pn+, the upper tail of
                // the one-sided Kolmogorov's statistic Dn+. This upper tail of the
                // one-sided statistic can then be used to approximate the upper tail
                // Pn of the Kolmogorov statistic Dn with Pn ~ 2*Pn+.

                int jmax = (int)(n * (1.0 - x));
                if ((1.0 - x - (double)jmax / n) <= 0.0)
                {
                    jmax--;
                }

                // Initialize
                int jdiv   = (n > 3000) ? 2 : 3;
                int jstart = jmax / jdiv + 1;

                double logBinomial = Special.LogBinomial(n, jstart);
                double LOGJMAX     = logBinomial;
                double EPSILON     = 1.0E-12;


                // Start computing the series
                double sum = 0;

                for (int j = jstart; j <= jmax; j++)
                {
                    double q    = (double)j / n + x;
                    double term = logBinomial + (j - 1) * Math.Log(q) + (n - j) * Special.Log1p(-q);
                    double t    = Math.Exp(term);

                    sum         += t;
                    logBinomial += Math.Log((double)(n - j) / (j + 1));

                    if (t <= sum * EPSILON)
                    {
                        break;
                    }
                }

                jstart      = jmax / jdiv;
                logBinomial = LOGJMAX + Math.Log((double)(jstart + 1) / (n - jstart));

                for (int j = jstart; j > 0; j--)
                {
                    double q    = (double)j / n + x;
                    double term = logBinomial + (j - 1) * Math.Log(q) + (n - j) * Special.Log1p(-q);
                    double t    = Math.Exp(term);

                    sum         += t;
                    logBinomial += Math.Log((double)j / (n - j + 1));

                    if (t <= sum * EPSILON)
                    {
                        break;
                    }
                }


                return(sum * x + Math.Exp(n * Special.Log1p(-x)));
            }
        }
Ejemplo n.º 23
0
        public static IDictionary <string, List <IModifier> > ReadTriggerLex(string filename)
        {
            var lex = new Dictionary <string, List <IModifier> >();

            using (var reader = new StreamReader(filename))
                using (var csv = new CsvReader(reader))
                {
                    csv.Configuration.HasHeaderRecord = true;
                    //Triggerword	ModifierType	Lookahead	ModValue	Extra
                    csv.Read();
                    csv.Configuration.Delimiter = ",";
                    while (csv.Read())
                    {
                        var mods      = new List <IModifier>();
                        var tword     = Program.InternConcurrentSafe(csv.GetField(0).ToLower());
                        var lookahead = int.Parse(csv.GetField(2));
                        var modValue  = float.Parse(csv.GetField(3).Replace(".", ","));

                        IModifier modifier = null; // this could be waaaay prettier... Just wanted to try a switch case
                        switch (csv.GetField(1))
                        {
                        case "mult":
                            modifier = new Mult(modValue, lookahead);
                            break;

                        case "v":
                            var tokens = new List <Token>();
                            foreach (string s in csv.GetField(4).Split("|")) // couldn't a vending just take strings instead?... no reason to have a token
                            {
                                tokens.Add(new Token(s, 0));
                            }
                            modifier = new Vending(modValue, lookahead, tokens);
                            break;

                        case "neg":
                            modifier = new Mult(-1.0f, lookahead);
                            break;

                        case "repeating":
                            modifier = new Repeating(modValue, lookahead, tword, int.Parse(csv.GetField(4)));
                            break;

                        case "special":
                            modifier = new Special(modValue, lookahead);
                            break;

                        default:
                            FileWriter.WriteErrorLog("[TriggerLexicon] Couldn't match triggerword '" + tword + " -> is it unique in the lex?");
                            break;
                        }

                        if (!lex.ContainsKey(tword))
                        {
                            mods.Add(modifier);
                            if (!lex.TryAdd(tword, mods))
                            {
                                FileWriter.WriteErrorLog("[Lexicon] Couldn't add '" + tword + "' with mod " + modifier + " -> is it unique in the lex?");
                            }
                        }
                        else
                        {
                            lex.TryGetValue(tword, out mods);
                            mods.Add(modifier);
                            lex.Remove(tword);
                            lex.Add(tword, mods);
                        }
                    }
                    reader.Close();
                }
            return(lex);
        }
Ejemplo n.º 24
0
        void Edit(int selected)
        {
            selectedItem = specialsDatabase.Get(selected);
            EditorStyles.textField.wordWrap = true;

            GUILayout.BeginHorizontal(GUILayout.ExpandWidth(true));
            GUILayout.BeginVertical(GUILayout.ExpandHeight(true));
            // Fields to show/change the name and 3 character designation of the selected special
            selectedItem.Name = EditorGUILayout.TextField("Name:", selectedItem.Name, GUILayout.Width(330));
            selectedItem.CharDesig = EditorGUILayout.TextField("3 Char Desig:", selectedItem.CharDesig);
            selectedItem.EnhancedAttribute = (Special.enhance)EditorGUILayout.EnumPopup("Enhanced Attribute",selectedItem.EnhancedAttribute);

            GUILayout.EndVertical();
            GUILayout.FlexibleSpace();

            // Shows and lets you change the sprite
            // Finds the sprites texture if there is one
            if (specialsDatabase.Get(selected).Icon)
            {
                selectedTexture = specialsDatabase.Get(selected).Icon.texture;
            }
            else { selectedTexture = null; }

            if (GUILayout.Button(selectedTexture, GUILayout.Width(SPRITE_BUTTON_SIZE), GUILayout.Height(SPRITE_BUTTON_SIZE)))
            {
                int controlerID = EditorGUIUtility.GetControlID(FocusType.Passive);
                EditorGUIUtility.ShowObjectPicker<Sprite>(null, true, null, controlerID);
            }

            string commandName = Event.current.commandName;
            if (commandName == "ObjectSelectorUpdated")
            {
                selectedItem.Icon = (Sprite)EditorGUIUtility.GetObjectPickerObject();
                Repaint();
            }

            GUILayout.EndHorizontal();

            GUILayout.BeginHorizontal();
            selectedItem.Description = EditorGUILayout.TextField("Description:", selectedItem.Description, GUILayout.ExpandWidth(true), GUILayout.Width(330), GUILayout.Height(200));
            GUILayout.EndHorizontal();

            GUILayout.BeginHorizontal();

            if (GUILayout.Button("Save"))
            {
                if (selectedItem == null || selectedItem.Name == "") { return; }

                specialsDatabase.Replace(selected, selectedItem);
                current = State.List;

            }

            if (GUILayout.Button("Cancel"))
            {
                current = State.List;

            }

            if (GUILayout.Button("Delete"))
            {
                if (EditorUtility.DisplayDialog("Delete Special",
                    "Are you sure that you want to delete " + selectedItem.Name + "  special ability forever???",
                    "Delete",
                    "Cancel"))
                {
                    specialsDatabase.Remove(selected);
                    current = State.List;
                }
            }
            GUILayout.EndHorizontal();
        }
Ejemplo n.º 25
0
        /// <summary>
        ///   von-Mises cumulative distribution function.
        /// </summary>
        ///
        /// <remarks>
        ///   This method implements the Von-Mises CDF calculation code
        ///   as given by Geoffrey Hill on his original FORTRAN code and
        ///   shared under the GNU LGPL license.
        ///
        /// <para>
        ///   References:
        ///   <list type="bullet">
        ///     <item><description>Geoffrey Hill, ACM TOMS Algorithm 518,
        ///     Incomplete Bessel Function I0: The von Mises Distribution,
        ///     ACM Transactions on Mathematical Software, Volume 3, Number 3,
        ///     September 1977, pages 279-284.</description></item>
        ///   </list></para>
        /// </remarks>
        ///
        /// <param name="x">The point where to calculate the CDF.</param>
        /// <param name="mu">The location parameter μ (mu).</param>
        /// <param name="kappa">The concentration parameter κ (kappa).</param>
        ///
        /// <returns>The value of the von-Mises CDF at point <paramref name="x"/>.</returns>
        ///
        public static double DistributionFunction(double x, double mu, double kappa)
        {
            double a1  = 12.0;
            double a2  = 0.8;
            double a3  = 8.0;
            double a4  = 1.0;
            double c1  = 56.0;
            double ck  = 10.5;
            double cdf = 0;

            if (x - mu <= -Math.PI)
            {
                return(0);
            }

            if (Math.PI <= x - mu)
            {
                return(1.0);
            }

            double z = kappa;

            double u = (x - mu + Math.PI) % (2.0 * Math.PI);

            if (u < 0.0)
            {
                u = u + 2.0 * Math.PI;
            }

            double y = u - Math.PI;

            if (z <= ck)
            {
                double v = 0.0;

                if (0.0 < z)
                {
                    double ip = Math.Floor(z * a2 - a3 / (z + a4) + a1);
                    double p  = ip;
                    double s  = Math.Sin(y);
                    double c  = Math.Cos(y);
                    y = p * y;
                    double sn = Math.Sin(y);
                    double cn = Math.Cos(y);
                    double r  = 0.0;
                    z = 2.0 / z;

                    for (int n = 2; n <= ip; n++)
                    {
                        p  = p - 1.0;
                        y  = sn;
                        sn = sn * c - cn * s;
                        cn = cn * c + y * s;
                        r  = 1.0 / (p * z + r);
                        v  = (sn / p + v) * r;
                    }
                }

                cdf = (u * 0.5 + v) / Math.PI;
            }
            else
            {
                double c = 24.0 * z;
                double v = c - c1;
                double r = Math.Sqrt((54.0 / (347.0 / v + 26.0 - c) - 6.0 + c) / 12.0);
                z = Math.Sin(0.5 * y) * r;
                double s = 2.0 * z * z;
                v = v - s + 3.0;
                y = (c - s - s - 16.0) / 3.0;
                y = ((s + 1.75) * s + 83.5) / v - y;
                double arg  = z * (1.0 - s / (y * y));
                double erfx = Special.Erf(arg);
                cdf = 0.5 * erfx + 0.5;
            }

            cdf = Math.Max(cdf, 0.0);
            cdf = Math.Min(cdf, 1.0);

            return(cdf);
        }
Ejemplo n.º 26
0
 /// <summary>
 /// Initializes a product data entry for use in XML.
 /// </summary>
 /// <param name="currencyType">Type of currency the item is sold for</param>
 /// <param name="description">NFS: World Beta feature, still gonna keep it for MAYBE future-use</param>
 /// <param name="rentalDurationInMinutes">0 if not a rental, rental duration in minutes if else</param>
 /// <param name="hash">Item hash value that is recognized by NFS: World</param>
 /// <param name="iconString">Item icon that is displayed somewhere around its title</param>
 /// <param name="levelLimit">0 if not level limited, minimum level value if else</param>
 /// <param name="tooltipDescription">NFS: World Beta feature, still gonna keep it for MAYBE future-use</param>
 /// <param name="price">How much the item is sold for</param>
 /// <param name="priorityNumber">Priority in the shopping list in-game, commonly used for new items or discounts</param>
 /// <param name="id">Server product id</param>
 /// <param name="title">Item title that is displayed in-game</param>
 /// <param name="itemType">Item type that NFS: World can recognize</param>
 /// <param name="extraDetail">If there is one, a special condition for the item that is displayed in-game</param>
 /// <returns>An XElement wrapped around in ProductTrans tags.</returns>
 public static XElement getProductTransactionEntry(Currency currencyType, String description, Int32 rentalDurationInMinutes, Int64 hash, String iconString, Int16 levelLimit, String tooltipDescription, Int32 price, Int16 priorityNumber, String id, String title, GameItemType itemType, Special extraDetail = Special.None)
 {
     XElement ProductNode =
         new XElement("ProductTrans",
             new XElement("Currency", currencyType.GetString()),
             new XElement("Description", description),
             new XElement("DurationMinute", rentalDurationInMinutes.ToString()),
             new XElement("Hash", hash.ToString()),
             new XElement("Icon", iconString),
             new XElement("Level", levelLimit.ToString()),
             new XElement("LongDescription", tooltipDescription),
             new XElement("Price", price.ToString()),
             new XElement("Priority", priorityNumber.ToString()),
             new XElement("ProductId", id),
             new XElement("ProductTitle", title),
             new XElement("ProductType", itemType.GetString()),
             new XElement("SecondaryIcon", extraDetail.GetString()),
             new XElement("UseCount", "1")
         );
     return ProductNode;
 }
Ejemplo n.º 27
0
        /// <summary>
        ///   Predicts the next observations occurring after a given observation sequence.
        /// </summary>
        ///
        /// <param name="observations">A sequence of observations. Predictions will be made regarding
        ///   the next observations that should be coming after the last observation in this sequence.</param>
        /// <param name="next">The number of observations to be predicted. Default is 1.</param>
        /// <param name="logLikelihoods">The log-likelihood of the different symbols for each predicted
        ///   next observations. In order to convert those values to probabilities, exponentiate the
        ///   values in the vectors (using the Exp function) and divide each value by their vector's sum.</param>
        /// <param name="logLikelihood">The log-likelihood of the given sequence, plus the predicted
        ///   next observations. Exponentiate this value (use the System.Math.Exp function) to obtain
        ///   a <c>likelihood</c> value.</param>
        ///
        public int[] Predict(int[] observations, int next, out double logLikelihood, out double[][] logLikelihoods)
        {
            int states = States;
            int T      = next;

            double[][] lnA = LogTransitions;

            int[] prediction = new int[next];
            logLikelihoods = new double[next][];

            try
            {
                // Compute forward probabilities for the given observation sequence.
                double[,] lnFw0 = ForwardBackwardAlgorithm.LogForward(this, observations, out logLikelihood);

                // Create a matrix to store the future probabilities for the prediction
                // sequence and copy the latest forward probabilities on its first row.
                double[,] lnFwd = new double[T + 1, states];


                // 1. Initialization
                for (int i = 0; i < states; i++)
                {
                    lnFwd[0, i] = lnFw0[observations.Length - 1, i];
                }

                // 2. Induction
                for (int t = 0; t < T; t++)
                {
                    double[] weights = new double[symbols];
                    for (int s = 0; s < symbols; s++)
                    {
                        weights[s] = Double.NegativeInfinity;

                        for (int i = 0; i < states; i++)
                        {
                            double sum = Double.NegativeInfinity;
                            for (int j = 0; j < states; j++)
                            {
                                sum = Special.LogSum(sum, lnFwd[t, j] + lnA[j][i]);
                            }
                            lnFwd[t + 1, i] = sum + logB[i][s];

                            weights[s] = Special.LogSum(weights[s], lnFwd[t + 1, i]);
                        }
                    }

                    double sumWeight = Double.NegativeInfinity;
                    for (int i = 0; i < weights.Length; i++)
                    {
                        sumWeight = Special.LogSum(sumWeight, weights[i]);
                    }
                    for (int i = 0; i < weights.Length; i++)
                    {
                        weights[i] -= sumWeight;
                    }


                    // Select most probable symbol
                    double maxWeight = weights[0];
                    prediction[t] = 0;
                    for (int i = 1; i < weights.Length; i++)
                    {
                        if (weights[i] > maxWeight)
                        {
                            maxWeight     = weights[i];
                            prediction[t] = i;
                        }
                    }

                    // Recompute log-likelihood
                    logLikelihoods[t] = weights;
                    logLikelihood     = maxWeight;
                }


                return(prediction);
            }
            catch (IndexOutOfRangeException ex)
            {
                checkObservations(ex, observations);
                throw;
            }
        }
Ejemplo n.º 28
0
 /// <summary>
 /// Gets the cumulative distribution function (cdf) for
 /// the this distribution evaluated at point <c>x</c>.
 /// </summary>
 /// <param name="x">A single point in the distribution range.</param>
 /// <returns></returns>
 /// <remarks>
 /// The Cumulative Distribution Function (CDF) describes the cumulative
 /// probability that a given value or any value smaller than it will occur.
 /// </remarks>
 public override double DistributionFunction(double x)
 {
     return(lambda * Special.NormalInverse(x) + mean);
 }
Ejemplo n.º 29
0
 /// <summary>
 /// 发布专题页。
 /// </summary>
 /// <param name="special"></param>
 /// <param name="specialRelease"></param>
 public void ReleasingSpecial(Special special, Release specialRelease)
 {
     // 专题页:{RootPath}/{SpecialId}.htm 这个{SpecialId}是已知的
 }
        /// <summary>
        ///   Pomeranz algorithm.
        /// </summary>
        ///
        public static double Pomeranz(int n, double x)
        {
            // The Pomeranz algorithm to compute the KS distribution
            double EPS  = 1.0e-15;
            int    ENO  = 350;
            double RENO = Math.Pow(2, ENO); // for renormalization of V
            int    renormalizations;
            double t = n * x;
            double w, sum, minsum;
            int    k, s;
            int    r1, r2; // Indices i and i-1 for V[i][]
            int    jlow, jup, klow, kup, kup0;

            double[] A        = new double[2 * n + 3];
            double[] floors   = new double[2 * n + 3];
            double[] ceilings = new double[2 * n + 3];

            double[][] V = new double[2][];
            for (int j = 0; j < V.Length; j++)
            {
                V[j] = new double[n + 2];
            }

            double[][] H = new double[4][];     // = pow(w, j) / Factorial(j)
            for (int j = 0; j < H.Length; j++)
            {
                H[j] = new double[n + 2];
            }

            double z = computeLimits(t, floors, ceilings);

            computeA(n, A, z);
            computeH(n, A, H);

            V[1][1]          = RENO;
            renormalizations = 1;

            r1 = 0;
            r2 = 1;
            for (int i = 2; i <= 2 * n + 2; i++)
            {
                jlow = (int)(2 + floors[i]);
                if (jlow < 1)
                {
                    jlow = 1;
                }
                jup = (int)(ceilings[i]);
                if (jup > n + 1)
                {
                    jup = n + 1;
                }

                klow = (int)(2 + floors[i - 1]);
                if (klow < 1)
                {
                    klow = 1;
                }
                kup0 = (int)(ceilings[i - 1]);

                // Find to which case it corresponds
                w = (A[i] - A[i - 1]) / n;
                s = -1;
                for (int j = 0; j < 4; j++)
                {
                    if (Math.Abs(w - H[j][1]) <= EPS)
                    {
                        s = j;
                        break;
                    }
                }

                minsum = RENO;
                r1     = (r1 + 1) & 1;      // i - 1
                r2     = (r2 + 1) & 1;      // i

                for (int j = jlow; j <= jup; j++)
                {
                    kup = kup0;
                    if (kup > j)
                    {
                        kup = j;
                    }
                    sum = 0;
                    for (k = kup; k >= klow; k--)
                    {
                        sum += V[r1][k] * H[s][j - k];
                    }
                    V[r2][j] = sum;
                    if (sum < minsum)
                    {
                        minsum = sum;
                    }
                }

                if (minsum < 1.0e-280)
                {
                    // V is too small: renormalize to avoid underflow of probabilities
                    for (int j = jlow; j <= jup; j++)
                    {
                        V[r2][j] *= RENO;
                    }
                    renormalizations++;              // keep track of log of RENO
                }
            }

            sum = V[r2][n + 1];
            w   = Special.LogFactorial(n) - renormalizations * ENO * Constants.Log2 + Math.Log(sum);
            if (w >= 0)
            {
                return(1);
            }
            return(Math.Exp(w));
        }
Ejemplo n.º 31
0
        /// <summary>
        ///   Computes the gradient using the 
        ///   input/outputs stored in this object.
        /// </summary>
        /// 
        /// <returns>The value of the gradient vector for the given parameters.</returns>
        /// 
        public double[] Gradient()
        {
            // Localize thread locals
            double[][] logLikelihoods = this.logLikelihoods.Value;
            T[][] inputs = this.inputs.Value;
            int[] outputs = this.outputs.Value;
            double[] lnZx = this.lnZx.Value;
            double[] lnZxy = this.lnZxy.Value;
            double[] gradient = this.gradient.Value;

            double error = 0;

            // The previous call to Objective could have computed
            // the log-likelihoods for all input values. However, if
            // this hasn't been the case, compute them now:

            if (logLikelihoods == null)
                model.LogLikelihood(inputs, outputs, out logLikelihoods);

            // Compute the partition function using the previously
            // computed likelihoods. Also compute the total error

            // For each x, compute lnZ(x) and lnZ(x,y)
            for (int i = 0; i < inputs.Length; i++)
            {
                double[] lli = logLikelihoods[i];

                // Compute the marginal likelihood
                double sum = Double.NegativeInfinity;
                for (int j = 0; j < lli.Length; j++)
                    sum = Special.LogSum(sum, lli[j]);

                lnZx[i] = sum;
                lnZxy[i] = lli[outputs[i]];

                // compute and return the negative
                // log-likelihood as error function
                error -= lnZxy[i] - lnZx[i];

                Accord.Diagnostics.Debug.Assert(!Double.IsNaN(error));
            }

            // Now start computing the gradient w.r.t to the
            // feature functions. Each feature function belongs
            // to a factor potential function, so:

#if SERIAL  // For each clique potential (factor potential function)
            for (int c = 0; c < function.Factors.Length; c++)
#else
            Parallel.For(0, function.Factors.Length, c =>
#endif
            {
                FactorPotential<T> factor = function.Factors[c];

                int factorIndex = factor.Index;

                // Compute all forward and backward matrices to be
                //  used in the feature functions marginal computations.

                double[][,] lnFwds = new double[inputs.Length][,];
                double[][,] lnBwds = new double[inputs.Length][,];
                for (int i = 0; i < inputs.Length; i++)
                {
                    lnFwds[i] = ForwardBackwardAlgorithm.LogForward(factor, inputs[i], factorIndex);
                    lnBwds[i] = ForwardBackwardAlgorithm.LogBackward(factor, inputs[i], factorIndex);
                }

                double[] marginals = new double[function.Outputs];

                // For each feature in the factor potential function
                int end = factor.FactorParameters.Offset + factor.FactorParameters.Count;
                for (int k = factor.FactorParameters.Offset; k < end; k++)
                {
                    IFeature<T> feature = function.Features[k];
                    double parameter = function.Weights[k];

                    if (Double.IsInfinity(parameter))
                    {
                        gradient[k] = 0; continue;
                    }


                    // Compute the two marginal sums for the gradient calculation
                    // as given in eq. 1.52 of Sutton, McCallum; "An introduction to
                    // Conditional Random Fields for Relational Learning". The sums
                    // will be computed in the log domain for numerical stability.

                    double lnsum1 = Double.NegativeInfinity;
                    double lnsum2 = Double.NegativeInfinity;

                    // For each training sample (sequences)
                    for (int i = 0; i < inputs.Length; i++)
                    {
                        T[] x = inputs[i]; // training input
                        int y = outputs[i];  // training output

                        // Compute marginals for all possible outputs
                        for (int j = 0; j < marginals.Length; j++)
                            marginals[j] = Double.NegativeInfinity;

                        // However, making the assumption that each factor is responsible for only 
                        // one output label, we can compute the marginal only for the current factor
                        marginals[factorIndex] = feature.LogMarginal(lnFwds[i], lnBwds[i], x, factorIndex);

                        // The first term contains a marginal probability p(w|x,y), which is
                        // exactly a marginal distribution of the clamped CRF (eq. 1.46).
                        lnsum1 = Special.LogSum(lnsum1, (marginals[y] == lnZxy[i]) ? 0 : marginals[y] - lnZxy[i]);

                        // The second term contains a different marginal p(w,y|x) which is the
                        // same marginal probability required in as fully-observed CRF.
                        for (int j = 0; j < marginals.Length; j++)
                            lnsum2 = Special.LogSum(lnsum2, marginals[j] - lnZx[i]);

                        Accord.Diagnostics.Debug.Assert(!marginals.HasNaN());
                        Accord.Diagnostics.Debug.Assert(!Double.IsNaN(lnsum1));
                        Accord.Diagnostics.Debug.Assert(!Double.IsNaN(lnsum2));
                    }

                    // Compute the current derivative
                    double sum1 = Math.Exp(lnsum1);
                    double sum2 = Math.Exp(lnsum2);
                    double derivative = sum1 - sum2;

                    if (sum1 == sum2) derivative = 0;

                    Accord.Diagnostics.Debug.Assert(!Double.IsNaN(derivative));

                    // Include regularization derivative if required
                    if (sigma != 0) derivative -= parameter / sigma;

                    gradient[k] = -derivative;
                }
            }
#if !SERIAL
);
#endif

            // Reset log-likelihoods so they are recomputed in the next run,
            // either by the Objective function or by the Gradient calculation.

            this.logLikelihoods.Value = null;
            this.error.Value = error;

            Accord.Diagnostics.Debug.Assert(!Double.IsNaN(error));

            return gradient; // return the gradient.
        }
Ejemplo n.º 32
0
 protected void EgvSpecial_RowDataBound(object sender, GridViewRowEventArgs e)
 {
     if (e.Row.RowType == DataControlRowType.DataRow)
     {
         SpecialTree dataItem = (SpecialTree)e.Row.DataItem;
         Label       label    = (Label)e.Row.FindControl("LabName");
         CheckBox    box      = (CheckBox)e.Row.FindControl("ChkSpecialInput");
         CheckBox    box2     = (CheckBox)e.Row.FindControl("ChkSpecialManage");
         HiddenField field    = (HiddenField)e.Row.FindControl("HdnSpecialId");
         if (dataItem != null)
         {
             label.Text = Special.TreeLine(dataItem.TreeLineType) + dataItem.Name;
             if (dataItem.IsSpecialCategory)
             {
                 field.Value  = "0";
                 box.Visible  = false;
                 box2.Visible = false;
             }
             else
             {
                 field.Value = dataItem.Id.ToString();
             }
             if (!dataItem.IsSpecialCategory)
             {
                 if (dataItem.Id == -1)
                 {
                     this.m_inputSpecialId  = box.ClientID;
                     this.m_manageSpecialId = box2.ClientID;
                     box.Attributes.Add("onclick", "ChkSpecialAll(this.form,'" + box.ID + "'," + this.m_inputSpecialId + ")");
                     box2.Attributes.Add("onclick", "ChkSpecialAll(this.form,'" + box2.ID + "'," + this.m_manageSpecialId + ")");
                 }
                 else
                 {
                     box.Attributes.Add("onclick", "ChkWipeOffSpecialAll(" + this.m_inputSpecialId + ")");
                     box2.Attributes.Add("onclick", "ChkWipeOffSpecialAll(" + this.m_manageSpecialId + ")");
                 }
                 IList <RoleSpecialPermissionsInfo> specialPermissionsByRoleId = RolePermissions.GetSpecialPermissionsByRoleId(this.m_RoleId, OperateCode.SpecialContentInput);
                 IList <RoleSpecialPermissionsInfo> list2 = RolePermissions.GetSpecialPermissionsByRoleId(this.m_RoleId, OperateCode.SepcialContentManage);
                 foreach (RoleSpecialPermissionsInfo info in specialPermissionsByRoleId)
                 {
                     if (info.SpecialId == DataConverter.CLng(field.Value))
                     {
                         if (dataItem.Id == -1)
                         {
                             this.m_inputSpecialAll = true;
                             box.Checked            = true;
                         }
                         else if (!this.m_inputSpecialAll)
                         {
                             box.Checked = true;
                         }
                     }
                 }
                 foreach (RoleSpecialPermissionsInfo info2 in list2)
                 {
                     if (info2.SpecialId == DataConverter.CLng(field.Value))
                     {
                         if (dataItem.Id == -1)
                         {
                             this.m_manageSpecialAll = true;
                             box2.Checked            = true;
                         }
                         else if (!this.m_manageSpecialAll)
                         {
                             box2.Checked = true;
                         }
                     }
                 }
             }
         }
     }
 }
Ejemplo n.º 33
0
 public void AddSpecial(Special aSpecial)
 {
     context.Specials.Add(aSpecial);
     context.SaveChanges();
 }
        private double run(Array[] observations)
        {
            // Baum-Welch algorithm.

            // The Baum–Welch algorithm is a particular case of a generalized expectation-maximization
            // (GEM) algorithm. It can compute maximum likelihood estimates and posterior mode estimates
            // for the parameters (transition and emission probabilities) of an HMM, when given only
            // emissions as training data.

            // The algorithm has two steps:
            //  - Calculating the forward probability and the backward probability for each HMM state;
            //  - On the basis of this, determining the frequency of the transition-emission pair values
            //    and dividing it by the probability of the entire string. This amounts to calculating
            //    the expected count of the particular transition-emission pair. Each time a particular
            //    transition is found, the value of the quotient of the transition divided by the probability
            //    of the entire string goes up, and this value can then be made the new value of the transition.


            // Grab model information
            int states = model.States;
            var logA   = model.Transitions;
            var logP   = model.Probabilities;


            // Initialize the algorithm
            int    N    = observations.Length;
            double logN = Math.Log(N);

            LogKsi   = new double[N][][, ];
            LogGamma = new double[N][, ];


            for (int i = 0; i < observations.Length; i++)
            {
                int T = observations[i].Length;

                LogKsi[i]   = new double[T][, ];
                LogGamma[i] = new double[T, states];

                for (int t = 0; t < LogKsi[i].Length; t++)
                {
                    LogKsi[i][t] = new double[states, states];
                }
            }


            bool stop = false;

            int TMax = observations.Max(x => x.Length);

            double[,] lnFwd = new double[TMax, states];
            double[,] lnBwd = new double[TMax, states];

            // Initialize the model log-likelihoods
            double newLogLikelihood = Double.NegativeInfinity;

            convergence.NewValue = Double.NegativeInfinity;

            int itersLeft = 30;

            do // Until convergence or max iterations is reached
            {
                itersLeft--;
                // For each sequence in the observations input
                for (int i = 0; i < observations.Length; i++)
                {
                    int T = observations[i].Length;
                    double[,] logGamma = LogGamma[i];
                    double w = LogWeights[i];


                    // 1st step - Calculating the forward probability and the
                    //            backward probability for each HMM state.
                    ComputeForwardBackward(i, lnFwd, lnBwd);


                    // 2nd step - Determining the frequency of the transition-emission pair values
                    //            and dividing it by the probability of the entire string.

                    // Calculate gamma values for next computations
                    for (int t = 0; t < T; t++)
                    {
                        double lnsum = Double.NegativeInfinity;
                        for (int k = 0; k < states; k++)
                        {
                            logGamma[t, k] = lnFwd[t, k] + lnBwd[t, k] + w;
                            lnsum          = Special.LogSum(lnsum, logGamma[t, k]);
                        }

                        // System.Diagnostics.Debug.Assert(!Double.IsNaN(lnsum));

                        // Normalize if different from zero
                        if (lnsum != Double.NegativeInfinity)
                        {
                            for (int k = 0; k < states; k++)
                            {
                                logGamma[t, k] = logGamma[t, k] - lnsum;
                            }
                        }
                    }

                    // Calculate ksi values for next computations
                    ComputeKsi(i, lnFwd, lnBwd);

                    // Compute log-likelihood for the given sequence
                    for (int j = 0; j < states; j++)
                    {
                        newLogLikelihood = Special.LogSum(newLogLikelihood, lnFwd[T - 1, j]);
                    }
                }


                // Average the likelihood for all sequences
                newLogLikelihood    /= observations.Length;
                convergence.NewValue = newLogLikelihood;

                // Check for convergence
                if (!convergence.HasConverged)
                {
                    // We haven't converged yet

                    // 3. Continue with parameter re-estimation
                    newLogLikelihood = Double.NegativeInfinity;

                    // 3.1 Re-estimation of initial state probabilities
                    for (int i = 0; i < logP.Length; i++)
                    {
                        double lnsum = Double.NegativeInfinity;
                        for (int k = 0; k < LogGamma.Length; k++)
                        {
                            lnsum = Special.LogSum(lnsum, LogGamma[k][0, i]);
                        }
                        logP[i] = lnsum - logN;
                    }

                    // 3.2 Re-estimation of transition probabilities
                    for (int i = 0; i < states; i++)
                    {
                        for (int j = 0; j < states; j++)
                        {
                            double lnnum = Double.NegativeInfinity;
                            double lnden = Double.NegativeInfinity;

                            for (int k = 0; k < LogGamma.Length; k++)
                            {
                                int T = observations[k].Length;

                                for (int t = 0; t < T - 1; t++)
                                {
                                    lnnum = Special.LogSum(lnnum, LogKsi[k][t][i, j]);
                                    lnden = Special.LogSum(lnden, LogGamma[k][t, i]);
                                }
                            }

                            logA[i, j] = (lnnum == lnden) ? 0 : lnnum - lnden;

                            System.Diagnostics.Debug.Assert(!Double.IsNaN(logA[i, j]));
                        }
                    }

                    // 3.3 Re-estimation of emission probabilities
                    UpdateEmissions(); // discrete and continuous
                }
                else
                {
                    stop = true; // The model has converged.
                }
            } while (!stop && itersLeft > 0);


            // Returns the model average log-likelihood
            return(newLogLikelihood);
        }
Ejemplo n.º 35
0
 /// <summary>
 /// Returns the value of a window function.
 /// </summary>
 /// <param name="x">Argument</param>
 /// <param name="frameSize">Window size</param>
 /// <returns>float precision floating point number</returns>
 public override float Function(float x, int frameSize)
 {
     // Lanczos function:
     return(Special.Sinc(2 * x / (frameSize - 1) - 1, Maths.Pi));
 }
        /// <summary>
        ///   Computes the gradient using the 
        ///   input/outputs stored in this object.
        /// </summary>
        /// 
        /// <param name="parameters">The parameter vector lambda to use in the model.</param>
        /// <returns>The value of the gradient vector for the given parameters.</returns>
        /// 
        protected double[] Gradient(double[] parameters)
        {
            model.Function.Weights = parameters;

            error = 0;

            // The previous call to Objective should have computed
            // the log-likelihoods for all input values. However, if
            // this hasn't been the case, compute them now:

            if (logLikelihoods == null)
                model.LogLikelihood(inputs, outputs, out logLikelihoods);

            // Compute the partition function using the previously
            // computed likelihoods. Also compute the total error

            // For each x, compute lnZ(x) and lnZ(x,y)
            for (int i = 0; i < inputs.Length; i++)
            {
                double[] lli = logLikelihoods[i];

                // Compute the marginal likelihood
                double sum = Double.NegativeInfinity;
                for (int j = 0; j < lli.Length; j++)
                    sum = Special.LogSum(sum, lli[j]);

                lnZx[i] = sum;
                lnZxy[i] = lli[outputs[i]];

                // compute and return the negative
                // log-likelihood as error function
                error -= lnZxy[i] - lnZx[i];
            }

            // Now start computing the gradient w.r.t to the
            // feature functions. Each feature function belongs
            // to a factor potential function, so:

            // For each clique potential (factor potential function)
#if DEBUG
            for (int c = 0; c < function.Factors.Length; c++)
#else
            Parallel.For(0, function.Factors.Length, c =>
#endif
            {
                FactorPotential<T> factor = function.Factors[c];

                // Compute all forward and backward matrices to be
                //  used in the feature functions marginal computations.

                var lnFwds = new double[inputs.Length, function.Outputs][,];
                var lnBwds = new double[inputs.Length, function.Outputs][,];
                for (int i = 0; i < inputs.Length; i++)
                {
                    for (int j = 0; j < function.Outputs; j++)
                    {
                        lnFwds[i, j] = ForwardBackwardAlgorithm.LogForward(factor, inputs[i], j);
                        lnBwds[i, j] = ForwardBackwardAlgorithm.LogBackward(factor, inputs[i], j);
                    }
                }

                double[] marginals = new double[function.Outputs];

                // For each feature in the factor potential function
                int end = factor.ParameterIndex + factor.FeatureCount;
                for (int k = factor.ParameterIndex; k < end; k++)
                {
                    IFeature<T> feature = function.Features[k];
                    double weight = function.Weights[k];

                    if (Double.IsInfinity(weight))
                    {
                        g[k] = 0; continue;
                    }


                    // Compute the two marginal sums for the gradient calculation
                    // as given in eq. 1.52 of Sutton, McCallum; "An introduction to
                    // Conditional Random Fields for Relational Learning". The sums
                    // will be computed in the log domain for numerical stability.

                    double lnsum1 = Double.NegativeInfinity;
                    double lnsum2 = Double.NegativeInfinity;

                    // For each training sample (sequences)
                    for (int i = 0; i < inputs.Length; i++)
                    {
                        T[] x = inputs[i]; // training input
                        int y = outputs[i];  // training output

                        // Compute marginals for all possible outputs
                        for (int j = 0; j < marginals.Length; j++)
                            marginals[j] = feature.LogMarginal(lnFwds[i, j], lnBwds[i, j], x, j);


                        // The first term contains a marginal probability p(w|x,y), which is
                        // exactly a marginal distribution of the clamped CRF (eq. 1.46).
                        lnsum1 = Special.LogSum(lnsum1, marginals[y] - lnZxy[i]);

                        // The second term contains a different marginal p(w,y|x) which is the
                        // same marginal probability required in as fully-observed CRF.
                        for (int j = 0; j < marginals.Length; j++)
                            lnsum2 = Special.LogSum(lnsum2, marginals[j] - lnZx[i]);

#if DEBUG
                        if (Double.IsNaN(lnsum1) || Double.IsNaN(lnsum2))
                            throw new Exception();
#endif
                    }

                    // Compute the current derivative
                    double derivative = -(Math.Exp(lnsum1) - Math.Exp(lnsum2));

#if DEBUG
                    if (Double.IsNaN(derivative))
                        throw new Exception();
#endif

                    // Include regularization derivative if required
                    if (beta != 0) derivative += weight * beta;

                    g[k] = derivative;
                }
            }
#if !DEBUG
);
#endif

            // Reset log-likelihoods so they are recomputed in the next run,
            // either by the Objective function or by the Gradient calculation.

            logLikelihoods = null;

            return g; // return the gradient.
        }
        /// <summary>
        ///   Runs the calibration algorithm.
        /// </summary>
        ///
        /// <param name="computeError">
        ///   True to compute error after the training
        ///   process completes, false otherwise. Default is true.
        /// </param>
        ///
        /// <returns>
        ///   The log-likelihood of the calibrated model.
        /// </returns>
        ///
        public double Run(bool computeError)
        {
            // This method is a direct implementation of the algorithm
            // as published by Hsuan-Tien Lin, Chih-Jen Lin and Ruby C.
            // Weng, 2007. See references in documentation for more details.
            //

            // Compute the Support Vector Machine outputs
            for (int i = 0; i < distances.Length; i++)
            {
                machine.Compute(inputs[i], out distances[i]);
            }

            // Define the target probabilities we aim to produce
            double high = (positives + 1.0) / (positives + 2.0);
            double low  = 1.0 / (negatives + 2.0);

            for (int i = 0; i < outputs.Length; i++)
            {
                targets[i] = (outputs[i] == 1) ? high : low;
            }

            // Initialize
            double A             = 0.0;
            double B             = Math.Log((negatives + 1.0) / (positives + 1.0));
            double logLikelihood = 0;
            int    iterations    = 0;

            // Compute the log-likelihood function
            for (int i = 0; i < distances.Length; i++)
            {
                double y = distances[i] * A + B;

                if (y >= 0)
                {
                    logLikelihood += targets[i] * y + Special.Log1p(Math.Exp(-y));
                }
                else
                {
                    logLikelihood += (targets[i] - 1) * y + Special.Log1p(Math.Exp(y));
                }
            }

            // Start main algorithm loop.
            while (iterations < maxIterations)
            {
                iterations++;

                // Update the Gradient and Hessian
                //  (Using that H' = H + sigma I)

                double h11 = sigma;
                double h22 = sigma;
                double h21 = 0;

                double g1 = 0;
                double g2 = 0;

                for (int i = 0; i < distances.Length; i++)
                {
                    double p, q;
                    double y = distances[i] * A + B;

                    if (y >= 0)
                    {
                        p = Math.Exp(-y) / (1.0 + Math.Exp(-y));
                        q = 1.0 / (1.0 + Math.Exp(-y));
                    }
                    else
                    {
                        p = 1.0 / (1.0 + Math.Exp(y));
                        q = Math.Exp(y) / (1.0 + Math.Exp(y));
                    }

                    double d1 = targets[i] - p;
                    double d2 = p * q;

                    // Update Hessian
                    h11 += distances[i] * distances[i] * d2;
                    h22 += d2;
                    h21 += distances[i] * d2;

                    // Update Gradient
                    g1 += distances[i] * d1;
                    g2 += d1;
                }

                // Check if the gradient is near zero as stopping criteria
                if (Math.Abs(g1) < tolerance && Math.Abs(g2) < tolerance)
                {
                    break;
                }

                // Compute modified Newton directions
                double det = h11 * h22 - h21 * h21;
                double dA  = -(h22 * g1 - h21 * g2) / det;
                double dB  = -(-h21 * g1 + h11 * g2) / det;
                double gd  = g1 * dA + g2 * dB;

                double stepSize = 1;

                // Perform a line search
                while (stepSize >= minStepSize)
                {
                    double newA             = A + stepSize * dA;
                    double newB             = B + stepSize * dB;
                    double newLogLikelihood = 0.0;

                    // Compute the new log-likelihood function
                    for (int i = 0; i < distances.Length; i++)
                    {
                        double y = distances[i] * newA + newB;

                        if (y >= 0)
                        {
                            newLogLikelihood += (targets[i]) * y + Special.Log1p(Math.Exp(-y));
                        }
                        else
                        {
                            newLogLikelihood += (targets[i] - 1) * y + Special.Log1p(Math.Exp(y));
                        }
                    }

                    // Check if a sufficient decrease has been obtained
                    if (newLogLikelihood < logLikelihood + 1e-4 * stepSize * gd)
                    {
                        // Yes, it has. Update parameters with the new values
                        A = newA; B = newB; logLikelihood = newLogLikelihood;
                        break;
                    }
                    else
                    {
                        // Decrease the step size until it can achieve
                        // a sufficient decrease or until it fails.
                        stepSize /= 2.0;
                    }

                    if (stepSize < minStepSize)
                    {
                        // No decrease could be obtained.
                        break; // throw new LineSearchFailedException("No sufficient decrease was obtained.");
                    }
                }
            }


            // The iterative algorithm has converged
            machine.Link = new LogitLinkFunction(beta: -A, constant: -B);


            // Compute log-likelihood if required
            return((computeError) ? LogLikelihood(inputs, outputs) : 0.0);
        }
Ejemplo n.º 38
0
 public async Task <string> GetSpecialZipFileUrlAsync(Site site, Special special)
 {
     return(await ParseSiteUrlAsync(site, $"@/{special.Url}/{special.Title}.zip", true));
 }
Ejemplo n.º 39
0
 public bool IsInherited(int m) => !Base.Contains(m) || Special.Contains(m) ||
 Egg.Contains(m) || LevelUp.Contains(m) ||
 TMHM.Contains(m) || Tutor.Contains(m);
Ejemplo n.º 40
0
 public ActionResult Del(int idList)
 {
     Special.Delete(idList);
     //AddAdminOperateLog("删除", "删除友情链接,友情链接ID为:" + CommonHelper.IntArrayToString(idList));
     return(PromptView("专题删除成功"));
 }
Ejemplo n.º 41
0
 private static bool SpecialOfferCanBeApplied(Special special, ImmutableDictionary <string, int> quantitiesLookup)
 {
     return(special.Quantities.ToList().TrueForAll(p =>
                                                   quantitiesLookup.ContainsKey(p.Name) && quantitiesLookup[p.Name] >= p.Number));
 }
 /// <summary>
 ///   Gets the log-probability mass function (pmf) for
 ///   this distribution evaluated at point <c>x</c>.
 /// </summary>
 ///
 /// <param name="k">A single point in the distribution range.</param>
 ///
 /// <returns>
 ///   The logarithm of the probability of <c>x</c>
 ///   occurring in the current distribution.
 /// </returns>
 ///
 /// <remarks>
 ///   The Probability Mass Function (PMF) describes the
 ///   probability that a given value <c>k</c> will occur.
 /// </remarks>
 ///
 public override double LogProbabilityMassFunction(int k)
 {
     return(Special.LogBinomial(m, k) + Special.LogBinomial(N - m, n - k)
            - Special.LogBinomial(N, n));
 }
Ejemplo n.º 43
0
        NelderMeadStatus sbplx_minimize()
        {
            var ret = NelderMeadStatus.Success;

            double[] x = Solution;
            Value = Function(x);

            this.stop.Evaluations++;
            if (NelderMead.nlopt_stop_forced(stop))
            {
                return(NelderMeadStatus.ForcedStop);
            }
            if (Value < this.minf_max)
            {
                return(NelderMeadStatus.MinimumAllowedValueReached);
            }
            if (NelderMead.nlopt_stop_evals(stop))
            {
                return(NelderMeadStatus.MaximumEvaluationsReached);
            }
            if (NelderMead.nlopt_stop_time(stop))
            {
                return(NelderMeadStatus.MaximumTimeReached);
            }


            Array.Copy(xstep0, xstep, xstep.Length);


            while (true)
            {
                double normi = 0;
                double normdx = 0;
                int    ns, nsubs = 0;
                int    nevals = this.stop.Evaluations;
                double fdiff, fdiff_max = 0;

                Array.Copy(x, xprev, x.Length);

                double fprev = Value;

                // sort indices into the progress vector dx
                // by decreasing order of magnitude abs(dx)
                //
                for (int i = 0; i < p.Length; i++)
                {
                    p[i] = i;
                }

                for (int j = 0; j < absdx.Length; j++)
                {
                    absdx[j] = Math.Abs(dx[j]);
                }

                Array.Sort(p, absdx);


                // find the subspaces, and perform nelder-mead on each one
                for (int i = 0; i < absdx.Length; i++)
                {
                    normdx += absdx[i]; // L1 norm
                }
                int last = 0;
                for (int i = 0; i + nsmin < n; i += ns)
                {
                    last = i;

                    // find subspace starting at index i
                    double ns_goodness = -Double.MaxValue;
                    double norm        = normi;
                    int    nk          = i + nsmax > n ? n : i + nsmax; // max k for this subspace

                    for (int k = i; k < i + nsmin - 1; k++)
                    {
                        norm += absdx[p[k]];
                    }

                    ns = nsmin;
                    for (int k = i + nsmin - 1; k < nk; k++)
                    {
                        double goodness;
                        norm += absdx[p[k]];

                        // remaining subspaces must be big enough to partition
                        if (n - (k + 1) < nsmin)
                        {
                            continue;
                        }

                        // maximize figure of merit defined by Rowan thesis:
                        // look for sudden drops in average |dx|

                        if (k + 1 < n)
                        {
                            goodness = norm / (k + 1) - (normdx - norm) / (n - (k + 1));
                        }
                        else
                        {
                            goodness = normdx / n;
                        }

                        if (goodness > ns_goodness)
                        {
                            ns_goodness = goodness;
                            ns          = (k + 1) - i;
                        }
                    }

                    for (int k = i; k < i + ns; ++k)
                    {
                        normi += absdx[p[k]];
                    }

                    // do nelder-mead on subspace of dimension ns starting w/i
                    sindex = i;
                    for (int k = i; k < i + ns; ++k)
                    {
                        nelderMead.Solution[k - i]    = x[p[k]];
                        nelderMead.StepSize[k - i]    = xstep[p[k]];
                        nelderMead.LowerBounds[k - i] = lb[p[k]];
                        nelderMead.UpperBounds[k - i] = ub[p[k]];
                    }

                    nsubs++;
                    nevals = this.stop.Evaluations;

                    nelderMead.NumberOfVariables = ns;
                    nelderMead.DiameterTolerance = psi;
                    ret = nelderMead.Minimize(Value);

                    fdiff = nelderMead.Difference;
                    Value = nelderMead.Value;

                    if (fdiff > fdiff_max)
                    {
                        fdiff_max = fdiff;
                    }

                    Trace.WriteLine(String.Format("{0} NM iterations for ({1},{2}) subspace",
                                                  this.stop.Evaluations - nevals, sindex, ns));

                    for (int k = i; k < i + ns; k++)
                    {
                        x[p[k]] = nelderMead.Solution[k - i];
                    }

                    if (ret == NelderMeadStatus.Failure)
                    {
                        return(NelderMeadStatus.SolutionToleranceReached);
                    }

                    if (ret != NelderMeadStatus.SolutionToleranceReached)
                    {
                        return(ret);
                    }
                }

                // nelder-mead on last subspace
                ns     = n - last;
                sindex = last;
                for (int i = last; i < n; i++)
                {
                    nelderMead.Solution[i - sindex]    = x[p[i]];
                    nelderMead.StepSize[i - sindex]    = xstep[p[i]];
                    nelderMead.LowerBounds[i - sindex] = lb[p[i]];
                    nelderMead.UpperBounds[i - sindex] = ub[p[i]];
                }

                nsubs++;
                nevals = this.stop.Evaluations;

                nelderMead.NumberOfVariables = ns;
                nelderMead.DiameterTolerance = psi;
                ret = nelderMead.Minimize(Value);

                fdiff = nelderMead.Difference;
                Value = nelderMead.Value;

                if (fdiff > fdiff_max)
                {
                    fdiff_max = fdiff;
                }

                Trace.WriteLine(String.Format("sbplx: {0} NM iterations for ({1},{2}) subspace",
                                              this.stop.Evaluations - nevals, sindex, ns));


                for (int i = sindex; i < p.Length; i++)
                {
                    x[p[i]] = nelderMead.Solution[i - sindex];
                }

                if (ret == NelderMeadStatus.Failure)
                {
                    return(NelderMeadStatus.SolutionToleranceReached);
                }

                if (ret != NelderMeadStatus.SolutionToleranceReached)
                {
                    return(ret);
                }

                // termination tests:
                if (NelderMead.nlopt_stop_ftol(stop, Value, Value + fdiff_max))
                {
                    return(NelderMeadStatus.FunctionToleranceReached);
                }

                if (NelderMead.nlopt_stop_xtol(stop, x, xprev, n))
                {
                    int j;

                    // as explained in Rowan's thesis, it is important
                    // to check |xstep| as well as |x-xprev|, since if
                    // the step size is too large (in early iterations),
                    // the inner Nelder-Mead may not make much progress
                    //
                    for (j = 0; j < xstep.Length; j++)
                    {
                        if (Math.Abs(xstep[j]) * psi > stop.AbsoluteParameterTolerance[j] &&
                            Math.Abs(xstep[j]) * psi > stop.RelativeParameterTolerance * Math.Abs(x[j]))
                        {
                            break;
                        }
                    }

                    if (j == n)
                    {
                        return(NelderMeadStatus.SolutionToleranceReached);
                    }
                }

                // compute change in optimal point
                for (int i = 0; i < x.Length; i++)
                {
                    dx[i] = x[i] - xprev[i];
                }

                // setting step sizes
                {
                    double scale;
                    if (nsubs == 1)
                    {
                        scale = psi;
                    }
                    else
                    {
                        double stepnorm = 0, dxnorm = 0;
                        for (int i = 0; i < dx.Length; i++)
                        {
                            stepnorm += Math.Abs(xstep[i]);
                            dxnorm   += Math.Abs(dx[i]);
                        }

                        scale = dxnorm / stepnorm;

                        if (scale < omega)
                        {
                            scale = omega;
                        }

                        if (scale > 1 / omega)
                        {
                            scale = 1 / omega;
                        }
                    }


                    Trace.WriteLine("sbplx: stepsize scale factor = " + scale);


                    for (int i = 0; i < xstep.Length; i++)
                    {
                        xstep[i] = (dx[i] == 0) ?
                                   -(xstep[i] * scale) : Special.Sign(xstep[i] * scale, dx[i]);
                    }
                }
            }
        }
Ejemplo n.º 44
0
        public void Delete(int SpecialId)
        {
            Special special = _specials.FirstOrDefault(s => s.SpecialId == SpecialId);

            _specials.Remove(special);
        }
Ejemplo n.º 45
0
	// Initialize the common writer fields.
	private void Initialize()
			{
				namespaces = true;
				needPrefixDecl = false;
				indentation = 2;
				indentLevel = 0;
				indentChar = ' ';
				quoteChar = '"';
				indentChars = new char[indentation];
				scope = null;
				formatting = System.Xml.Formatting.None;
				special = Special.None;
				specialWriter = new SpecialWriter(writer);
				nsPrefix = null;
				xmlLang = null;
				writeState = System.Xml.WriteState.Start;
				nameTable = new NameTable();
				namespaceManager = new XmlNamespaceManager(nameTable);
				xmlSpace = XmlSpace.None;
				autoShiftToContent = false;
				for(int i = 0; i < indentation; i++)
				{
					indentChars[i] = indentChar;
				}
			}
Ejemplo n.º 46
0
	// Handle the start of a special attribute.
	private void SpecialAttributeStart
				(String prefix, String localName, Special type)
			{
				// Set the type of the special attribute.
				special = type;

				// Write the attribute prefix, if needed.
				if(((Object)prefix) == null)
				{
					// Write the name.
					writer.Write("xmlns");

					// Set the local name.
					localName = String.Empty;
				}
				else
				{
					// Write the prefixed attribute name.
					writer.Write(prefix);
					writer.Write(':');
					writer.Write(localName);
				}

				// Set the namespace prefix, if needed.
				if(special == Special.Namespace)
				{
					// Set the namespace prefix.
					nsPrefix = localName;
				}

				// Output the start of the attribute value.
				writer.Write('=');
				writer.Write(quoteChar);

				// Set the writer.
				writer = specialWriter;

				// We are now in the attribute state.
				writeState = System.Xml.WriteState.Attribute;
			}
Ejemplo n.º 47
0
 void addDataToSpecial(int id, Special special)
 {
     /* currently only gets the recharge cost.
       TODO: get any other info in a similar way, but
             first we have to agree on what other
             info we actually want to provide.
     */
     //if (special)
     //{
     //    if (Script *script = globalEventScript)
     //    {
     //        script.prepare("get_special_recharge_cost");
     //        script.push(id);
     //        int scriptReturn = script.execute();
     //        special.neededMana = scriptReturn;
     //    }
     //}
 }
Ejemplo n.º 48
0
 /// <summary>
 /// Initializes a product data entry for use in XML.
 /// </summary>
 /// <param name="CurrencyType">Type of currency the item is sold for</param>
 /// <param name="Description">NFS: World Beta feature, still gonna keep it for MAYBE future-use</param>
 /// <param name="RentalDurationInMinutes">0 if not a rental, rental duration in minutes if else</param>
 /// <param name="Hash">Item hash value that is recognized by NFS: World</param>
 /// <param name="IconString">Item icon that is displayed somewhere around it's title</param>
 /// <param name="LevelLimit">0 if not level limited, minimum level value if else</param>
 /// <param name="TooltipDescription">NFS: World Beta feature, still gonna keep it for MAYBE future-use</param>
 /// <param name="Price">How much the item is sold for</param>
 /// <param name="PriorityNumber">Priority in the shopping list in-game, commonly used for new items or discounts</param>
 /// <param name="SType">Item type that the server can recognize, not the game</param>
 /// <param name="Id">Item index for the server</param>
 /// <param name="Title">Item title that is displayed in-game</param>
 /// <param name="GType">Item type that NFS: World can recognize, not the server</param>
 /// <param name="ExtraDetail">If there is one, a special condition for the item that is displayed in-game</param>
 /// <returns>An XElement wrapped around in ProductTrans tags.</returns>
 public static XElement GetProductTransactionEntry(Currency CurrencyType, String Description, Int32 RentalDurationInMinutes, Int64 Hash, String IconString, Int16 LevelLimit, String TooltipDescription, Int32 Price, Int16 PriorityNumber, ServerItemType SType, Int32 Id, String Title, GameItemType GType, Special ExtraDetail)
 {
     XElement ProductNode = 
         new XElement("ProductTrans",
             new XElement("BundleItems",
                 new XAttribute(ServerAttributes.nilNS + "nil", "true")
             ),
             new XElement("CategoryId",
                 new XAttribute(ServerAttributes.nilNS + "nil", "true")
             ),
             new XElement("Currency", CurrencyType.GetString()),
             new XElement("Description", Description),
             new XElement("DurationMinute", RentalDurationInMinutes.ToString()),
             new XElement("Hash", Hash.ToString()),
             new XElement("Icon", IconString),
             new XElement("Level", LevelLimit.ToString()),
             new XElement("LongDescription", TooltipDescription),
             new XElement("Price", Price.ToString()),                   
             new XElement("Priority", PriorityNumber.ToString()),
             new XElement("ProductId", String.Format("ItemEntry{0}-{1}", SType.GetString(), Id.ToString())),
             new XElement("ProductTitle", Title),
             new XElement("ProductType", GType.GetString()),
             new XElement("SecondaryIcon", ExtraDetail.GetString()),
             new XElement("UseCount", "1")
         );
     return ProductNode;
 }
Ejemplo n.º 49
0
	// Handle the end of a special attribute.
	private void SpecialAttributeEnd()
			{
				switch(special)
				{
					case Special.Lang:
					{
						// Set the xml:lang value.
						xmlLang = specialWriter.AttributeValue;
					}
					break;

					case Special.Space:
					{
						// Get the attribute value.
						String value = specialWriter.AttributeValue;

						// Get the length of the attribute value.
						int len = value.Length;

						// Set the xml:space value.
						if(len == 7 && value == "default")
						{
							xmlSpace = System.Xml.XmlSpace.Default;
						}
						else if(len == 8 && value == "preserve")
						{
							xmlSpace = System.Xml.XmlSpace.Preserve;
						}
						else
						{
							// Reset the attribute value builder.
							specialWriter.ResetBuilder();

							// Reset the writer.
							writer = specialWriter.Writer;

							// Reset the special.
							special = Special.None;

							// Signal that the xml:space value is invalid.
							throw new ArgumentException(/* TODO */);
						}
					}
					break;

					case Special.Namespace:
					{
						// Get the attribute value.
						String value = specialWriter.AttributeValue;

						// Add the mapping to the namespace manager.
						namespaceManager.AddNamespace(nsPrefix, value);

						// Check for match to required prefix declaration.
						if(needPrefixDecl && nsPrefix == scope.prefix &&
						   value == scope.xmlns)
						{
							// Flag that required prefix declaration is present.
							needPrefixDecl = false;
						}

						// Reset the namespace prefix.
						nsPrefix = null;
					}
					break;
				}

				// Reset the attribute value builder.
				specialWriter.ResetBuilder();

				// Reset the writer.
				writer = specialWriter.Writer;

				// Reset the special.
				special = Special.None;
			}
        /// <summary>
        ///   Computes the Complementary Cumulative Distribution Function (1-CDF)
        ///   for the Kolmogorov-Smirnov statistic's distribution.
        /// </summary>
        ///
        /// <param name="n">The sample size.</param>
        /// <param name="x">The Kolmogorov-Smirnov statistic.</param>
        /// <returns>Returns the complementary cumulative probability of the statistic
        /// <paramref name="x"/> under a sample size <paramref name="n"/>.</returns>
        ///
        public static double ComplementaryDistributionFunction(double n, double x)
        {
            double nxx = n * x * x; // nx²

            // First of all, check if the given values do not represent
            // a special case. There are some combination of values for
            // which the distribution has a known, exact solution.

            // Ruben-Gambino's Complement
            if (x >= 1.0 || nxx >= 370.0)
            {
                return(0.0);
            }

            if (x <= 0.5 / n || nxx <= 0.0274)
            {
                return(1.0);
            }

            if (n == 1)
            {
                return(2.0 - 2.0 * x);
            }

            if (x <= 1.0 / n)
            {
                return((n <= 20) ? 1.0 - Special.Factorial(n) * Math.Pow(2.0 * x - 1.0 / n, n)
                    : 1.0 - Math.Exp(Special.LogFactorial(n) + n * Math.Log(2.0 * x - 1.0 / n)));
            }

            if (x >= 1.0 - 1.0 / n)
            {
                return(2.0 * Math.Pow(1.0 - x, n));
            }

            // This is not a special case. Continue processing to
            //  select the most adequate method for the given inputs

            if (n <= 140)
            {
                // This is the first region (i) of the complementary
                // CDF as detailed in Simard's paper. It is further
                // divided into two sub-regions.
                if (nxx >= 4.0)
                {
                    // For x close to one, Simard's advocates the use of the one-
                    // sided Kolmogorov-Smirnov statistic as given by Miller (1956).
                    return(2 * OneSideUpperTail(n, x));
                }
                else
                {
                    // For higher values of x, the direct cumulative
                    // distribution will be giving enough precision.
                    return(1.0 - CumulativeFunction(n, x));
                }
            }
            else
            {
                // This is the second region (ii) of the complementary
                // CDF discussed in Simard's paper. It is again divided
                // into two sub-regions depending on the value of nx².
                if (nxx >= 2.2)
                {
                    // In this region, the Miller approximation returns
                    // at least 6 digits of precision (Simard, 2010).
                    return(2 * OneSideUpperTail(n, x));
                }
                else
                {
                    // In this region, the direct cumulative
                    // distribution will give enough precision.
                    return(1.0 - CumulativeFunction(n, x));
                }
            }
        }
Ejemplo n.º 51
0
 public FireWave(int timeAvaliable, Rectangle startpos, char direction, global_vars.sender sender, ref global_vars variables, ref Game1 game)
     : base(timeAvaliable, startpos, direction, sender, ref variables, ref game)
 {
     thisFirewave = this;
 }
Ejemplo n.º 52
0
 public SpecialAttribute(Special name) { }
 public override void SetBase(Special baseSpecial)
 {
     this.baseSpecial = baseSpecial;
 }
        /// <summary>
        ///   Computes the Cumulative Distribution Function (CDF)
        ///   for the Kolmogorov-Smirnov statistic's distribution.
        /// </summary>
        ///
        /// <param name="n">The sample size.</param>
        /// <param name="x">The Kolmogorov-Smirnov statistic.</param>
        /// <returns>Returns the cumulative probability of the statistic
        /// <paramref name="x"/> under a sample size <paramref name="n"/>.</returns>
        ///
        /// <remarks>
        /// <para>
        ///   This function computes the cumulative probability P[Dn &lt;= x] of
        ///   the Kolmogorov-Smirnov distribution using multiple methods as
        ///   suggested by Richard Simard (2010).</para>
        ///
        /// <para>
        ///   Simard partitioned the problem of evaluating the CDF using multiple
        ///   approximation and asymptotic methods in order to achieve a best compromise
        ///   between speed and precision. This function follows the same partitioning as
        ///   Simard, which is described in the table below.</para>
        ///
        ///   <list type="table">
        ///    <listheader><term>For n &lt;= 140 and:</term></listheader>
        ///      <item><term>1/n > x >= 1-1/n</term><description>Uses the Ruben-Gambino formula.</description></item>
        ///      <item><term>1/n &lt; nx² &lt; 0.754693</term><description>Uses the Durbin matrix algorithm.</description></item>
        ///      <item><term>0.754693 &lt;= nx² &lt; 4</term><description>Uses the Pomeranz algorithm.</description></item>
        ///      <item><term>4 &lt;= nx² &lt; 18</term><description>Uses the complementary distribution function.</description></item>
        ///      <item><term>nx² >= 18</term><description>Returns the constant 1.</description></item></list>
        ///
        ///   <list type="table">
        ///    <listheader><term>For 140 &lt; n &lt;= 10^5</term></listheader>
        ///      <item><term>nx² &gt;= 18</term><description>Returns the constant 1.</description></item>
        ///      <item><term>nx^(3/2) &lt; 1.4</term><description>Durbin matrix algorithm.</description></item>
        ///      <item><term>nx^(3/2) &gt; 1.4</term><description>Pelz-Good asymptotic series.</description></item></list>
        ///
        ///   <list type="table">
        ///    <listheader><term>For n &gt; 10^5</term></listheader>
        ///      <item><term>nx² &gt;= 18</term><description>Returns the constant 1.</description></item>
        ///      <item><term>nx² &lt; 18</term><description>Pelz-Good asymptotic series.</description></item></list>
        ///
        /// </remarks>
        ///
        public static double CumulativeFunction(double n, double x)
        {
            double nxx = n * x * x; // nx²

            int nn = (int)Math.Ceiling(n);

            // First of all, check if the given values do not represent
            // a special case. There are some combination of values for
            // which the distribution has a known, exact solution.

            // Ruben-Gambino
            if (x >= 1.0 || nxx >= 18.0)
            {
                return(1.0);
            }

            if (x <= 0.5 / n)
            {
                return(0.0);
            }

            if (n == 1)
            {
                return(2.0 * x - 1.0);
            }

            if (x <= 1.0 / n)
            {
                return((n <= 20) ? Special.Factorial(n) * Math.Pow(2.0 * x - 1.0 / n, n)
                    : Math.Exp(Special.LogFactorial(n) + n * Math.Log(2.0 * x - 1.0 / n)));
            }

            if (x >= 1.0 - 1.0 / n)
            {
                return(1.0 - 2.0 * Math.Pow(1.0 - x, n));
            }

            // This is not a special case. Continue processing to
            //  select the most adequate method for the given inputs

            if (n <= 140)
            {
                // This is the first case (i) as referred in Simard's
                // paper. Use exact algorithms depending on nx² with
                // at least 13 to 15 decimal digits of precision.

                // Durbin
                if (nxx < 0.754693)
                {
                    return(Durbin(nn, x));
                }

                // Pomeranz
                if (nxx < 4.0)
                {
                    return(Pomeranz(nn, x));
                }

                // Complementary CDF
                return(1.0 - ComplementaryDistributionFunction(n, x));
            }
            else
            {
                if (n <= 100000)
                {
                    // This is the second case (ii) referred in Simard's
                    // paper. Use either the Durbin approximation or the
                    // Pelz-Good asymptotic series depending on nx^(3/2).

                    // Obs:
                    //
                    //   x^(3/2) = x^(1 + 1/2) = x*x^(1/2) = x*sqrt(x)
                    //
                    //          (n*x)      * sqrt(x) <= 1.40
                    //   sqrt((n*x)*(n*x)) * sqrt(x) <= 1.40
                    //   sqrt((n*x)*(n*x)  *      x) <= 1.40
                    //        (n*x)*(n*x)  *      x  <= 1.96
                    //
                    //    n*n*x*x*x <= 1.96
                    //

                    if (n * nxx * x <= 1.96)
                    {
                        return(Durbin(nn, x));
                    }
                    else
                    {
                        return(PelzGood(n, x));
                    }
                }
                else
                {
                    // This is the third case (iii) as referred in Simard's
                    // paper. Use only the Pelz-Good asymptotic series.
                    return(PelzGood(n, x));
                }
            }
        }
Ejemplo n.º 55
0
	// Write the start of an attribute with a full name.
	public override void WriteStartAttribute(String prefix, String localName,
										     String ns)
			{
				// Get the length of the prefix.
				int prefixLen = (((Object)prefix) == null ? 0 : prefix.Length);

				// Get the length of the namespace.
				int nsLen = (((Object)ns) == null ? 0 : ns.Length);

				// Validate the parameters.
				if(!namespaces && (prefixLen != 0 || nsLen != 0))
				{
					throw new ArgumentException
						(S._("Xml_NamespacesNotSupported"));
				}

				// Check the state and output delimiters.
				if(writeState == System.Xml.WriteState.Attribute)
				{
					// Terminate the attribute.
					TerminateAttribute();

					// Write a space before the start of the attribute.
					writer.Write(' ');
				}
				else if(writeState == System.Xml.WriteState.Element)
				{
					writer.Write(' ');
				}
				else if(writeState == System.Xml.WriteState.Closed)
				{
					throw new InvalidOperationException
						(S._("Xml_InvalidWriteState"));
				}

				// Set the special.
				special = Special.None;

				// Output the name of the attribute, with appropriate prefixes.
				if(prefixLen != 0)
				{
					if(prefixLen == 5 && prefix == "xmlns")
					{
						// Ensure the namespace is correct.
						if(nsLen != 0 &&
						   (nsLen != 29 ||
						    ns != XmlDocument.xmlns))
						{
							throw new ArgumentException(/* TODO */);
						}

						// Determine if the name starts with (X|x)(M|m)(L|l).
						bool startsWithXml = localName.ToLower
							(CultureInfo.InvariantCulture).StartsWith("xml");

						// Ensure that the name is valid.
						if(startsWithXml)
						{
							throw new ArgumentException(/* TODO */);
						}

						// Handle the start of the special attribute.
						SpecialAttributeStart
							(prefix, localName, Special.Namespace);

						// We're done here.
						return;
					}
					else if(prefixLen == 3 && prefix == "xml")
					{
						// Ensure the namespace is correct.
						if(nsLen != 0 &&
						   (nsLen != 36 ||
						    ns != XmlDocument.xmlnsXml))
						{
							throw new ArgumentException(/* TODO */);
						}

						// Get the length of the local name.
						int nameLen =
							(((Object)localName) == null ? 0 : localName.Length);

						// Set the special based on the local name.
						if(nameLen == 4 && localName == "lang")
						{
							// Handle the start of the special attribute.
							SpecialAttributeStart
								(prefix, localName, Special.Lang);

							// We're done here.
							return;
						}
						else if(nameLen == 5 && localName == "space")
						{
							// Handle the start of the special attribute.
							SpecialAttributeStart
								(prefix, localName, Special.Space);

							// We're done here.
							return;
						}
					}
					else if(nsLen != 0)
					{
						// Get the current mapping for the namespace.
						String currMapping = LookupPrefix(ns);

						// Ensure the correct mapping is in scope.
						if(currMapping != prefix)
						{
							// Add the mapping to the namespace manager.
							namespaceManager.AddNamespace(prefix, ns);

							// Write the namespace declaration.
							writer.Write("xmlns:");
							writer.Write(prefix);
							writer.Write('=');
							writer.Write(quoteChar);
							WriteQuotedString(ns);
							writer.Write(quoteChar);
							writer.Write(' ');
						}
					}
					else
					{
						// Lookup the namespace for the given prefix.
						ns = namespaceManager.LookupNamespace(prefix);

						// Ensure we have a namespace for the given prefix.
						if(((Object)ns) == null || ns.Length == 0)
						{
							throw new ArgumentException(/* TODO */);
						}
					}

					// Write the prefix.
					writer.Write(prefix);
					writer.Write(':');
				}
				else if(nsLen != 0)
				{
					if(((Object)localName) != null &&
					   (localName.Length == 5 && localName == "xmlns"))
					{
						// Ensure the namespace is correct.
						if(nsLen != 29 || ns != XmlDocument.xmlns)
						{
							throw new ArgumentException(/* TODO */);
						}

						// Handle the start of the special attribute.
						SpecialAttributeStart(null, null, Special.Namespace);

						// We're done here.
						return;
					}
					else if(nsLen == 29 &&
					        ns == XmlDocument.xmlns)
					{
						throw new ArgumentException(/* TODO */);
					}
					else if(nsLen == 36 &&
					        ns == XmlDocument.xmlnsXml)
					{
						throw new ArgumentException(/* TODO */);
					}
					else
					{
						// We were only given a namespace, so find the prefix.
						prefix = LookupPrefix(ns);

						// Ensure we have a prefix.
						if(((Object)prefix) == null || prefix.Length == 0)
						{
							// Generate a prefix.
							prefix = GeneratePrefix(ns);

							// Write the namespace declaration for the prefix.
							writer.Write("xmlns:");
							writer.Write(prefix);
							writer.Write('=');
							writer.Write(quoteChar);
							WriteQuotedString(ns);
							writer.Write(quoteChar);
							writer.Write(' ');
						}

						// Write the prefix.
						writer.Write(prefix);
						writer.Write(':');
					}
				}

				// Write the local name.
				writer.Write(localName);

				// Output the start of the attribute value.
				writer.Write('=');
				writer.Write(quoteChar);

				// We are now in the attribute state.
				writeState = System.Xml.WriteState.Attribute;
			}