/// <summary> /// Runs the one-against-one learning algorithm. /// </summary> /// /// <param name="computeError"> /// True to compute error after the training /// process completes, false otherwise. Default is true. /// </param> /// /// <returns> /// The sum of squares error rate for /// the resulting support vector machine. /// </returns> /// public double Run(bool computeError) { int total = msvm.Machines.Length; int progress = 0; // For each class i Parallel.For(0, msvm.Machines.Length, i => { // We will start the binary sub-problem var args = new SubproblemEventArgs(i, -i); OnSubproblemStarted(args); // Retrieve the associated machine KernelSupportVectorMachine machine = msvm.Machines[i]; // Extract outputs for the given label int[] subOutputs = outputs.GetColumn(i); // Train the machine on the two-class problem. configure(machine, inputs, subOutputs, i, -i).Run(false); // Update and report progress args.Progress = Interlocked.Increment(ref progress); args.Maximum = total; OnSubproblemFinished(args); }); // Compute error if required. return (computeError) ? ComputeError(inputs, outputs) : 0.0; }
/// <summary> /// Raises the <see cref="E:SubproblemStarted"/> event. /// </summary> /// /// <param name="args">The <see cref="Accord.MachineLearning.VectorMachines.Learning.SubproblemEventArgs"/> instance containing the event data.</param> /// protected void OnSubproblemStarted(SubproblemEventArgs args) { if (SubproblemStarted != null) SubproblemStarted(this, args); }
/// <summary> /// Runs the one-against-one learning algorithm. /// </summary> /// /// <param name="computeError"> /// True to compute error after the training /// process completes, false otherwise. Default is true. /// </param> /// <param name="token"> /// A <see cref="CancellationToken"/> which can be used /// to request the cancellation of the learning algorithm /// when it is being run in another thread. /// </param> /// /// <returns> /// The sum of squares error rate for /// the resulting support vector machine. /// </returns> /// public double Run(bool computeError, CancellationToken token) { if (configure == null) { var excp = new InvalidOperationException("Please specify the algorithm configuration function " + "by setting the Algorithm property for this class. Examples are available in the " + "documentation for Multiclass Support Vector Learning class (given in the help link)."); excp.HelpLink = "http://accord-framework.net/svn/docs/html/T_Accord_MachineLearning_VectorMachines_MulticlassSupportVectorMachine.htm"; throw excp; } int classes = msvm.Classes; int total = (classes * (classes - 1)) / 2; int progress = 0; msvm.Reset(); // For each class i Parallel.For(0, msvm.Classes, i => { // For each class j Parallel.For(0, i, j => { if (token.IsCancellationRequested) return; // We will start the binary sub-problem var args = new SubproblemEventArgs(i, j); OnSubproblemStarted(args); // Retrieve the associated machine KernelSupportVectorMachine machine = msvm[i, j]; // Retrieve the associated classes int[] idx = outputs.Find(x => x == i || x == j); double[][] subInputs = inputs.Submatrix(idx); int[] subOutputs = outputs.Submatrix(idx); // Transform it into a two-class problem subOutputs.ApplyInPlace(x => x = (x == i) ? -1 : +1); // Train the machine on the two-class problem. var subproblem = configure(machine, subInputs, subOutputs, i, j); var canCancel = (subproblem as ISupportCancellation); if (canCancel != null) canCancel.Run(false, token); else subproblem.Run(false); // Update and report progress args.Progress = Interlocked.Increment(ref progress); args.Maximum = total; OnSubproblemFinished(args); }); }); // Compute error if required. return (computeError) ? ComputeError(inputs, outputs) : 0.0; }
/// <summary> /// Runs the one-against-one learning algorithm. /// </summary> /// /// <param name="computeError"> /// True to compute error after the training /// process completes, false otherwise. Default is true. /// </param> /// <param name="token"> /// A <see cref="CancellationToken"/> which can be used /// to request the cancellation of the learning algorithm /// when it is being run in another thread. /// </param> /// /// <returns> /// The sum of squares error rate for /// the resulting support vector machine. /// </returns> /// public double Run(bool computeError, CancellationToken token) { if (configure == null) { var excp = new InvalidOperationException("Please specify the algorithm configuration function " + "by setting the Algorithm property for this class. Examples are available in the " + "documentation for Multiclass Support Vector Learning class (given in the help link)."); excp.HelpLink = "http://accord-framework.net/svn/docs/html/T_Accord_MachineLearning_VectorMachines_MulticlassSupportVectorMachine.htm"; throw excp; } int classes = msvm.Classes; int total = (classes * (classes - 1)) / 2; int progress = 0; msvm.Reset(); // For each class i global::Accord.Threading.Tasks.Parallel.For(0, msvm.Classes, i => { // For each class j global::Accord.Threading.Tasks.Parallel.For(0, i, j => { if (token.IsCancellationRequested) { return; } // We will start the binary sub-problem var args = new SubproblemEventArgs(i, j); OnSubproblemStarted(args); // Retrieve the associated machine KernelSupportVectorMachine machine = msvm[i, j]; // Retrieve the associated classes int[] idx = outputs.Find(x => x == i || x == j); double[][] subInputs = inputs.Submatrix(idx); int[] subOutputs = outputs.Submatrix(idx); // Transform it into a two-class problem subOutputs.ApplyInPlace(x => x = (x == i) ? -1 : +1); // Train the machine on the two-class problem. var subproblem = configure(machine, subInputs, subOutputs, i, j); var canCancel = (subproblem as ISupportCancellation); if (canCancel != null) { canCancel.Run(false, token); } else { subproblem.Run(false); } // Update and report progress args.Progress = Interlocked.Increment(ref progress); args.Maximum = total; OnSubproblemFinished(args); }); }); // Compute error if required. return((computeError) ? ComputeError(inputs, outputs) : 0.0); }
/// <summary> /// Runs the one-against-one learning algorithm. /// </summary> /// /// <param name="computeError"> /// True to compute error after the training /// process completes, false otherwise. Default is true. /// </param> /// <param name="token"> /// A <see cref="CancellationToken"/> which can be used /// to request the cancellation of the learning algorithm /// when it is being run in another thread. /// </param> /// /// <returns> /// The sum of squares error rate for /// the resulting support vector machine. /// </returns> /// public double Run(bool computeError, CancellationToken token) { if (configure == null) { var excp = new InvalidOperationException("Please specify the algorithm configuration function " + "by setting the Algorithm property for this class. Examples are available in the " + "documentation for Multiclass Support Vector Learning class (given in the help link)."); excp.HelpLink = "http://accord-framework.net/svn/docs/html/T_Accord_MachineLearning_VectorMachines_MulticlassSupportVectorMachine.htm"; throw excp; } int classes = msvm.Classes; int total = (classes * (classes - 1)) / 2; int progress = 0; var pairs = new Tuple<int, int>[total]; for (int i = 0, k = 0; i < classes; i++) for (int j = 0; j < i; j++, k++) pairs[k] = Tuple.Create(i, j); msvm.Reset(); // Save exceptions but process all machines var exceptions = new ConcurrentBag<Exception>(); // For each class i Parallel.For(0, total, k => { if (token.IsCancellationRequested) return; int i = pairs[k].Item1; int j = pairs[k].Item2; // We will start the binary sub-problem var args = new SubproblemEventArgs(i, j); OnSubproblemStarted(args); // Retrieve the associated machine KernelSupportVectorMachine machine = msvm[i, j]; // Retrieve the associated classes int[] idx = outputs.Find(x => x == i || x == j); double[][] subInputs = inputs.Submatrix(idx); int[] subOutputs = outputs.Submatrix(idx); // Transform it into a two-class problem subOutputs.ApplyInPlace(x => x = (x == i) ? -1 : +1); // Train the machine on the two-class problem. var subproblem = configure(machine, subInputs, subOutputs, i, j); var canCancel = (subproblem as ISupportCancellation); try { if (canCancel != null) canCancel.Run(false, token); else subproblem.Run(false); } catch (Exception ex) { exceptions.Add(ex); } // Update and report progress args.Progress = Interlocked.Increment(ref progress); args.Maximum = total; OnSubproblemFinished(args); }); if (exceptions.Count > 0) { throw new AggregateException("One or more exceptions were thrown when teaching " + "the machines. Please check the InnerException property of this AggregateException " + "to discover what exactly caused this error.", exceptions); } // Compute error if required. return (computeError) ? ComputeError(inputs, outputs) : 0.0; }
/// <summary> /// Runs the one-against-one learning algorithm. /// </summary> /// /// <param name="computeError"> /// True to compute error after the training /// process completes, false otherwise. Default is true. /// </param> /// /// <returns> /// The sum of squares error rate for /// the resulting support vector machine. /// </returns> /// public double Run(bool computeError) { int classes = msvm.Classes; int total = (classes * (classes - 1)) / 2; int progress = 0; msvm.Reset(); #if DEBUG for (int i = 0; i < msvm.Classes; i++) { for (int j = 0; j < i; j++) { #else // For each class i Parallel.For(0, msvm.Classes, i => { // For each class j Parallel.For(0, i, j => { #endif // We will start the binary sub-problem var args = new SubproblemEventArgs(i, j); OnSubproblemStarted(args); // Retrieve the associated machine KernelSupportVectorMachine machine = msvm[i, j]; // Retrieve the associated classes int[] idx = outputs.Find(x => x == i || x == j); double[][] subInputs = inputs.Submatrix(idx); int[] subOutputs = outputs.Submatrix(idx); // Transform in a two-class problem subOutputs.ApplyInPlace(x => x = (x == i) ? -1 : +1); // Train the machine on the two-class problem. configure(machine, subInputs, subOutputs, i, j).Run(false); // Update and report progress args.Progress = Interlocked.Increment(ref progress); args.Maximum = total; OnSubproblemFinished(args); } #if !DEBUG ); #endif } #if !DEBUG ); #endif // Compute error if required. return (computeError) ? ComputeError(inputs, outputs) : 0.0; }