Specifies a training profile for an OpenCL training session. Includes the following information. device The device to use. local ratio: The local workgroup is a OpenCL concept where the global work group is broken into several local work groups. The bigger the local work group the faster things will run. However, your OpenCL device will impose a maximum local work group size. This ratio allows you to use a smaller local work group, for example 0.5 would be half of the max size of the local work group. You will almost always want to leave this value at the max 1.0. It is rare that you might need to decrease it because of the GPU being overtaxed. global ratio: The global work group must be a multiple of the local work group. The default value is 1, which means local and global workgroups the same size. Do not set this value lower than 1.0. Values higher than 1.0 can result in higher performance. Should be set to an integer value. For example, 2 would specify a global work workgroup twice the size of the local. Higher values will increase resource load on the GPU and may crash. segmentation ratio: The main purpose of this ratio is to allow you to scale back on how long the kernels take to execute. For maximum performance leave this value at the default 1.0 value. However, if your GPU is crashing, setting it to a value lower can help. If your are running Encog on the same GPU as your display uses, you may run into timeout issues if your kernel takes too long to execute. Setting this ratio lower can help.
Пример #1
0
        /// <inheritDoc/>
        public override void CreateTrainer(OpenCLTrainingProfile profile, Boolean singleThreaded)
        {
            Propagation.Propagation train = new Backpropagation(Network, Training,
                   profile, LearningRate, Momentum);

            if (singleThreaded)
                train.NumThreads = 1;

            foreach (IStrategy strategy in Strategies)
            {
                train.AddStrategy(strategy);
            }

            Train = train;
        }
Пример #2
0
        /// <inheritdoc/>
        public override void CreateTrainer(OpenCLTrainingProfile profile, bool singleThreaded)
        {
            Propagation.Propagation train = new ResilientPropagation(Network,
                    Training, profile, InitialUpdate, MaxStep);

            if (singleThreaded)
                train.NumThreads = 1;
            else
                train.NumThreads = 0;


            foreach (IStrategy strategy in Strategies)
            {
                train.AddStrategy(strategy);
            }

            Train = train;
        }
        /// <summary>
        /// Train a flat network multithreaded.
        /// </summary>
        ///
        /// <param name="network">The network to train.</param>
        /// <param name="training">The training data to use.</param>
        /// <param name="profile">The OpenCL training profile.</param>
        public TrainFlatNetworkOpenCL(FlatNetwork network,
                                      IEngineDataSet training, OpenCLTrainingProfile profile)
        {
            (new ValidateForOpenCL()).Validate(network);

            if (!(training is IEngineIndexableSet))
            {
                throw new EncogEngineError(
                          "Training data must be Indexable for this training type.");
            }

            if (EncogEngine.Instance.CL == null)
            {
                throw new EncogEngineError(
                          "You must enable OpenCL before using this training type.");
            }

            this.profile  = profile;
            this.network  = network;
            this.training = (IEngineIndexableSet)training;
        }
        /// <summary>
        /// Construct a Manhattan propagation training object. 
        /// </summary>
        /// <param name="network">The network to train.</param>
        /// <param name="training">The training data to use.</param>
        /// <param name="profile">The learning rate.</param>
        /// <param name="learnRate">The OpenCL profile to use, null for CPU.</param>
        public ManhattanPropagation(BasicNetwork network,
                 INeuralDataSet training, OpenCLTrainingProfile profile, double learnRate)
            : base(network, training)
        {

            if (profile == null)
            {
                FlatTraining = new TrainFlatNetworkManhattan(
                        network.Structure.Flat,
                        this.Training,
                        learnRate);
            }
#if !SILVERLIGHT
            else
            {
                TrainFlatNetworkOpenCL rpropFlat = new TrainFlatNetworkOpenCL(
                        network.Structure.Flat, this.Training,
                        profile);
                rpropFlat.LearnManhattan(learnRate);
                this.FlatTraining = rpropFlat;
            }
#endif
        }
        /// <summary>
        /// Construct a resilient training object, allow the training parameters to
        /// be specified. Usually the default parameters are acceptable for the
        /// resilient training algorithm. Therefore you should usually use the other
        /// constructor, that makes use of the default values. 
        /// </summary>
        /// <param name="network">The network to train.</param>
        /// <param name="training">The training set to use.</param>
        /// <param name="profile">Optional EncogCL profile to execute on.</param>
        /// <param name="initialUpdate">The initial update values, this is the amount that the deltas
        /// are all initially set to.</param>
        /// <param name="maxStep">The maximum that a delta can reach.</param>
        public ResilientPropagation(BasicNetwork network,
                 INeuralDataSet training, OpenCLTrainingProfile profile,
                 double initialUpdate, double maxStep)
            : base(network, training)
        {
            if (profile == null)
            {
                TrainFlatNetworkResilient rpropFlat = new TrainFlatNetworkResilient(
                        network.Structure.Flat, this.Training);
                this.FlatTraining = rpropFlat;
            }
#if !SILVERLIGHT
            else
            {
                TrainFlatNetworkOpenCL rpropFlat = new TrainFlatNetworkOpenCL(
                        network.Structure.Flat, this.Training,
                        profile);
                rpropFlat.LearnRPROP(initialUpdate, maxStep);
                this.FlatTraining = rpropFlat;
            }
#endif
        }
        /// <summary>
        /// Construct an RPROP trainer, allows an OpenCL device to be specified. Use
        /// the defaults for all training parameters. Usually this is the constructor
        /// to use as the resilient training algorithm is designed for the default
        /// parameters to be acceptable for nearly all problems. 
        /// </summary>
        /// <param name="network">The network to train.</param>
        /// <param name="training">The training data to use.</param>
        /// <param name="profile">The profile to use.</param>
        public ResilientPropagation(BasicNetwork network,
                 INeuralDataSet training, OpenCLTrainingProfile profile)
            : this(network, training, profile, RPROPConst.DEFAULT_INITIAL_UPDATE,
                RPROPConst.DEFAULT_MAX_STEP)
        {

        }
        /// <summary>
        /// Setup the kernel.
        /// </summary>
        ///
        public void Init(OpenCLTrainingProfile profile)
        {
            int errorSize = profile.KernelGlobalWorkgroup;
            int gradientSize = profile.KernelGlobalWorkgroup
                    * this.flat.Weights.Length;

            this.errors = new float[errorSize];

            this.paramArray[0] = this.flat.InputCount;
            this.paramArray[1] = this.flat.OutputCount;
            this.paramArray[2] = this.flat.LayerCounts.Length;

            // create the buffers
            this.inputBuffer = CreateArrayReadOnly(this.inputArray);
            this.idealBuffer = CreateArrayReadOnly(this.idealArray);
            this.errorBuffer = CreateFloatArrayWriteOnly(errorSize);
            this.gradientOutBuffer = CreateFloatArrayWriteOnly(gradientSize);
            this.gradientInBuffer = CreateArrayReadOnly(this.gradients);
            this.paramBuffer = CreateArrayReadOnly(this.paramArray);
            this.layerIndexBuffer = CreateArrayReadOnly(this.flat.LayerIndex);
            this.layerCountBuffer = CreateArrayReadOnly(this.flat.LayerCounts);
            this.layerFeedCountBuffer = CreateArrayReadOnly(this.flat.LayerFeedCounts);
            this.weightInArrayBuffer = CreateArrayReadOnly(this.weightInArray);
            this.weightOutArrayBuffer = CreateFloatArrayWriteOnly(this.weightInArray.Length);
            this.weightIndexBuffer = CreateArrayReadOnly(this.flat.WeightIndex);
            this.activationTypeBuffer = CreateArrayReadOnly(this.flat.LayerCounts);
            this.tempDataInBuffer = CreateArrayReadOnly(this.tempDataArray);
            this.tempDataOutBuffer = CreateFloatArrayWriteOnly(this.tempDataArray.Length);
        }
        /// <summary>
        /// Compile the kernel.
        /// </summary>
        ///
        /// <param name="options">The options.</param>
        /// <param name="network">The network to compile for.</param>
        /// <param name="profile">The OpenCL training profile to use.</param>
        public void Compile(IDictionary<String, String> options,
                OpenCLTrainingProfile profile, FlatNetwork network)
        {

            IActivationFunction activation = network.ActivationFunctions[0];
            StringBuilder source = new StringBuilder();

            source.Append("#define ACTIVATION(x,slope)");
            source.Append(activation.GetOpenCLExpression(false));
            source.Append("\r\n");

            source.Append("#define DERIVATIVE(x,slope)");
            source.Append(activation.GetOpenCLExpression(true));
            source.Append("\r\n");

            source.Append(ResourceLoader.LoadString(SourceName));
            CLSource = source.ToString();

            Compile(options);
            profile.CalculateKernelParams(this, training);
            // setup
            Init(profile);
        }
Пример #9
0
 /// <summary>
 /// Create a trainer to use. 
 /// </summary>
 /// <param name="profile">The OpenCL device to use, or null for the CPU.</param>
 /// <param name="singleThreaded">True, if single threaded.</param>
 public abstract void CreateTrainer(OpenCLTrainingProfile profile, bool singleThreaded);
        /// <inheritdoc/>
        public void Run()
        {
            Stopwatch watch = new Stopwatch();
            try
            {
                watch.Start();
                OpenCLTrainingProfile profile = null;
#if !SILVERLIGHT
                if (this is ConcurrentTrainingPerformerOpenCL)
                {
                    EncogCLDevice device = ((ConcurrentTrainingPerformerOpenCL)this).Device;
                    profile = new OpenCLTrainingProfile(device,
                            this.currentJob.LocalRatio,
                            this.currentJob.GlobalRatio,
                            this.currentJob.SegmentationRatio);
                }
#endif

                this.currentJob.CreateTrainer(profile, Manager.SingleThreaded);
                ITrain train = this.currentJob.Train;
                int interation = 1;

                while (this.currentJob.ShouldContinue())
                {
                    train.Iteration(this.currentJob.IterationsPer);
                    interation++;
                }
                watch.Stop();
            }
            catch (Exception t)
            {
                this.currentJob.Error = t;
            }
            finally
            {
                lock (this)
                {
                    this.ready = true;
                }
                this.Manager.JobDone(watch.ElapsedMilliseconds, this);
            }
        }
Пример #11
0
        /// <summary>
        /// The network that is to be trained.
        /// </summary>
        /// <param name="network">The training set.</param>
        /// <param name="training">The OpenCL profile to use, null for CPU.</param>
        /// <param name="profile">The OpenCL profile, or null for none.</param>
        /// <param name="learnRate">The rate at which the weight matrix will be adjusted based on
        /// learning.</param>
        /// <param name="momentum">The influence that previous iteration's training deltas will
        /// have on the current iteration.</param>
        public Backpropagation(BasicNetwork network,
                 INeuralDataSet training, OpenCLTrainingProfile profile, double learnRate,
                 double momentum)
            : base(network, training)
        {

            if (profile == null)
            {
                TrainFlatNetworkBackPropagation backFlat = new TrainFlatNetworkBackPropagation(
                        network.Structure.Flat,
                        this.Training,
                        learnRate,
                        momentum);
                this.FlatTraining = backFlat;
            }
#if !SILVERLIGHT
            else
            {
                TrainFlatNetworkOpenCL rpropFlat = new TrainFlatNetworkOpenCL(
                        network.Structure.Flat, this.Training,
                        profile);
                rpropFlat.LearnBPROP(learnRate, momentum);
                this.FlatTraining = rpropFlat;
            }
#endif

        }
        /// <summary>
        /// Train a flat network multithreaded.
        /// </summary>
        ///
        /// <param name="network">The network to train.</param>
        /// <param name="training">The training data to use.</param>
        /// <param name="profile">The OpenCL training profile.</param>
        public TrainFlatNetworkOpenCL(FlatNetwork network,
                IEngineDataSet training, OpenCLTrainingProfile profile)
        {

            (new ValidateForOpenCL()).Validate(network);

            if (!(training is IEngineIndexableSet))
            {
                throw new EncogEngineError(
                        "Training data must be Indexable for this training type.");
            }

            if (EncogEngine.Instance.CL == null)
            {
                throw new EncogEngineError(
                        "You must enable OpenCL before using this training type.");

            }

            this.profile = profile;
            this.network = network;
            this.training = (IEngineIndexableSet)training;
        }
Пример #13
0
        /// <summary>
        /// Evaluate training, use OpenCL.
        /// </summary>
        /// <param name="device">The OpenCL device, null for CPU.</param>
        /// <param name="input">Input neurons.</param>
        /// <param name="hidden1">Hidden 1 neurons.</param>
        /// <param name="hidden2">Hidden 2 neurons.</param>
        /// <param name="output">Output neurons.</param>
        /// <returns>The result of the evaluation.</returns>
        public static int EvaluateTrain(EncogCLDevice device, int input, int hidden1, int hidden2,
                int output)
        {
            BasicNetwork network = EncogUtility.SimpleFeedForward(input,
                    hidden1, hidden2, output, true);
            INeuralDataSet training = RandomTrainingFactory.Generate(1000,
                    10000, input, output, -1, 1);

            OpenCLTrainingProfile profile = null;

#if !SILVERLIGHT
            if (device != null)
                profile = new OpenCLTrainingProfile(device);
#endif

            return EvaluateTrain(profile, network, training);
        }
Пример #14
0
        /// <summary>
        /// Evaluate how long it takes to calculate the error for the network. This
        /// causes each of the training pairs to be run through the network. The
        /// network is evaluated 10 times and the lowest time is reported. 
        /// </summary>
        /// <param name="profile">The network to evaluate with.</param>
        /// <param name="network">The training data to use.</param>
        /// <param name="training">The number of seconds that it took.</param>
        /// <returns></returns>
        public static int EvaluateTrain(OpenCLTrainingProfile profile,
                BasicNetwork network, INeuralDataSet training)
        {
            // train the neural network
            ITrain train;

            if (profile == null)
            {
                train = new ResilientPropagation(network, training);
            }
            else
            {
                train = new ResilientPropagation(
                        network,
                        training,
                        profile,
                        RPROPConst.DEFAULT_INITIAL_UPDATE,
                        RPROPConst.DEFAULT_MAX_STEP);
            }

            int iterations = 0;
            Stopwatch watch = new Stopwatch();
            watch.Start();
            while (watch.ElapsedMilliseconds < (10 * MILIS))
            {
                iterations++;
                train.Iteration();
            }

            return iterations;
        }