/// <summary>
        ///   Constructs a new Multi-class Kernel Support Vector Machine
        /// </summary>
        ///
        /// <param name="kernel">The chosen kernel for the machine. Default is to
        ///   use the <see cref="Linear"/> kernel.</param>
        /// <param name="inputs">The number of inputs for the machine. If sequences have
        ///   varying length, pass zero to this parameter and pass a suitable sequence
        ///   kernel to this constructor, such as <see cref="DynamicTimeWarping"/>.</param>
        /// <param name="classes">The number of classes in the classification problem.</param>
        ///
        /// <remarks>
        ///   If the number of inputs is zero, this means the machine
        ///   accepts a indefinite number of inputs. This is often the
        ///   case for kernel vector machines using a sequence kernel.
        /// </remarks>
        ///
        public MulticlassSupportVectorMachine(int inputs, IKernel kernel, int classes)
        {
            if (classes <= 1)
            {
                throw new ArgumentException("The machine must have at least two classes.", "classes");
            }

            // Create the kernel machines
            machines = new KernelSupportVectorMachine[classes - 1][];
            for (int i = 0; i < machines.Length; i++)
            {
                machines[i] = new KernelSupportVectorMachine[i + 1];

                for (int j = 0; j <= i; j++)
                {
                    machines[i][j] = new KernelSupportVectorMachine(kernel, inputs);
                }
            }

            this.initialize();
        }
        /// <summary>
        ///   Compute SVM output with support vector sharing.
        /// </summary>
        ///
        private int computeParallel(int classA, int classB, double[] input, out double output, Cache cache)
        {
            // Get the machine for this problem
            KernelSupportVectorMachine machine = machines[classA - 1][classB];

            // Get the vectors shared among all machines
            int[] vectors = cache.Vectors[classA - 1][classB];

            double[] values = cache.Products;
#if !NET35
            SpinLock[] locks = cache.SyncObjects;
#endif
            double sum = machine.Threshold;


            if (machine.IsCompact)
            {
                if (machine.Weights == null)
                {
                    throw new Exception();
                }

                // For linear machines, computation is simpler
                for (int i = 0; i < machine.Weights.Length; i++)
                {
                    sum += machine.Weights[i] * input[i];
                }
            }
            else
            {
#if NET35
                #region Backward compatibility
                for (int i = 0; i < vectors.Length; i++)
                {
                    double value;

                    // Check if it is a shared vector
                    int j = vectors[i];

                    if (j >= 0)
                    {
                        // This is a shared vector. Check
                        // if it has already been computed

                        if (!Double.IsNaN(values[j]))
                        {
                            // Yes, it has. Retrieve the value from the cache
                            value = values[j];
                        }
                        else
                        {
                            // No, it has not. Compute and store the computed value in the cache
                            value = values[j] = machine.Kernel.Function(machine.SupportVectors[i], input);
                            Interlocked.Increment(ref cache.Evaluations);
                        }
                    }
                    else
                    {
                        // This vector is not shared by any other machine. No need to cache
                        value = machine.Kernel.Function(machine.SupportVectors[i], input);
                        Interlocked.Increment(ref cache.Evaluations);
                    }

                    sum += machine.Weights[i] * value;
                }
                #endregion
#else
                // For each support vector in the machine
                Parallel.For <double>(0, vectors.Length,

                                      // Init
                                      () => 0.0,

                                      // Map
                                      (i, state, partialSum) =>
                {
                    double value;

                    // Check if it is a shared vector
                    int j = vectors[i];

                    if (j >= 0)
                    {
                        // This is a shared vector. Check
                        // if it has already been computed

                        bool taken = false;
                        locks[j].Enter(ref taken);

                        if (!Double.IsNaN(values[j]))
                        {
                            // Yes, it has. Retrieve the value from the cache
                            value = values[j];
                        }
                        else
                        {
                            // No, it has not. Compute and store the computed value in the cache
                            value = values[j] = machine.Kernel.Function(machine.SupportVectors[i], input);
                            Interlocked.Increment(ref cache.Evaluations);
                        }

                        locks[j].Exit();
                    }
                    else
                    {
                        // This vector is not shared by any other machine. No need to cache
                        value = machine.Kernel.Function(machine.SupportVectors[i], input);
                        Interlocked.Increment(ref cache.Evaluations);
                    }

                    return(partialSum + machine.Weights[i] * value);
                },

                                      // Reduce
                                      (partialSum) => { lock (locks) sum += partialSum; }
                                      );
#endif
            }

            // Produce probabilities if required
            if (machine.IsProbabilistic)
            {
                output = machine.Link.Inverse(sum);
                return(output >= 0.5 ? +1 : -1);
            }
            else
            {
                output = sum;
                return(output >= 0 ? +1 : -1);
            }
        }
        /// <summary>
        ///   Compute SVM output with support vector sharing.
        /// </summary>
        ///
        private int computeSequential(int classA, int classB, double[] input, out double output, Cache cache)
        {
            // Get the machine for this problem
            KernelSupportVectorMachine machine = machines[classA - 1][classB];

            // Get the vectors shared among all machines
            int[]    vectors = cache.Vectors[classA - 1][classB];
            double[] values  = cache.Products;

            double sum = machine.Threshold;


            if (machine.IsCompact)
            {
                // For linear machines, computation is simpler
                for (int i = 0; i < machine.Weights.Length; i++)
                {
                    sum += machine.Weights[i] * input[i];
                }
            }
            else
            {
                // For each support vector in the machine
                for (int i = 0; i < vectors.Length; i++)
                {
                    double value;

                    // Check if it is a shared vector
                    int j = vectors[i];

                    if (j >= 0)
                    {
                        // This is a shared vector. Check
                        // if it has already been computed

                        if (!Double.IsNaN(values[j]))
                        {
                            // Yes, it has. Retrieve the value from the cache
                            value = values[j];
                        }
                        else
                        {
                            // No, it has not. Compute and store the computed value in the cache
                            value = values[j] = machine.Kernel.Function(machine.SupportVectors[i], input);
                            Interlocked.Increment(ref cache.Evaluations);
                        }
                    }
                    else
                    {
                        // This vector is not shared by any other machine. No need to cache
                        value = machine.Kernel.Function(machine.SupportVectors[i], input);
                        Interlocked.Increment(ref cache.Evaluations);
                    }

                    sum += machine.Weights[i] * value;
                }
            }


            // Produce probabilities if required
            if (machine.IsProbabilistic)
            {
                output = machine.Link.Inverse(sum);
                return(output >= 0.5 ? +1 : -1);
            }
            else
            {
                output = sum;
                return(output >= 0 ? +1 : -1);
            }
        }