Esempio n. 1
0
        public void Train(IntPtr d_source,
                          IntPtr d_target,
                          float learningRate,
                          int threadID,
                          out float[] prediction,
                          out float[] loss)
        {
            if (!ForTraining)
            {
                throw new Exception("Network was loaded in prediction mode, but asked to train.");
            }

            lock (TFHelper.DeviceSync[DeviceID])
            {
                GPU.CopyDeviceToHostPinned(d_source, TensorSource[threadID].Data, BatchSize * (int)BoxDimensions.Elements());
                GPU.CopyDeviceToHostPinned(d_target, TensorTarget[threadID].Data, BatchSize * (int)BoxDimensions.Elements());

                Marshal.Copy(new[] { learningRate }, 0, TensorLearningRate[threadID].Data, 1);

                var Output = RunnerTraining[threadID].Run();

                Marshal.Copy(Output[0].Data, ResultPredicted[threadID], 0, BatchSize * (int)BoxDimensions.Elements());

                Marshal.Copy(Output[1].Data, ResultLoss[threadID], 0, 1);

                prediction = ResultPredicted[threadID];
                loss       = ResultLoss[threadID];

                foreach (var tensor in Output)
                {
                    tensor.Dispose();
                }
            }
        }
Esempio n. 2
0
        public void Encode(IntPtr d_source,
                           IntPtr d_weight,
                           int threadID,
                           //out float[] prediction,
                           out float[] bottleneck)
        {
            //if (!ForTraining)
            //    throw new Exception("Network was loaded in prediction mode, but asked to train.");

            GPU.CopyDeviceToHostPinned(d_source, TensorSource[threadID].Data, BatchSize * (int)BoxDimensions.ElementsFFT() * 2);
            GPU.CopyDeviceToHostPinned(d_weight, TensorWeightSource[threadID].Data, BatchSize * (int)BoxDimensions.ElementsFFT());
            Marshal.Copy(new[] { 0f }, 0, TensorDropoutRate[threadID].Data, 1);

            var Output = RunnerEncode[threadID].Run();

            //Marshal.Copy(Output[0].Data, ResultPredicted[threadID], 0, BatchSize * (int)BoxDimensions.ElementsFFT() * 2);
            Marshal.Copy(Output[0].Data, ResultBottleneck[threadID], 0, BatchSize * BottleneckWidth);

            //prediction = ResultPredicted[threadID];
            bottleneck = ResultBottleneck[threadID];

            foreach (var tensor in Output)
            {
                tensor.Dispose();
            }
        }
Esempio n. 3
0
        public float Train(IntPtr d_data, IntPtr d_labels, IntPtr d_weights, float learningRate, int threadID, out long[] argmax, out float[] probability)
        {
            if (!ForTraining)
            {
                throw new Exception("Network was loaded in prediction mode, but asked to train.");
            }

            lock (TFHelper.DeviceSync[DeviceID])
            {
                GPU.CopyDeviceToHostPinned(d_data, TensorMicTile[threadID].Data, BatchSize * (int)BoxDimensionsTrain.Elements());
                GPU.CopyDeviceToHostPinned(d_labels, TensorTrainingLabels[threadID].Data, BatchSize * (int)BoxDimensionsTrain.Elements() * NClasses);
                GPU.CopyDeviceToHostPinned(d_weights, TensorTrainingWeights[threadID].Data, BatchSize * (int)BoxDimensionsTrain.Elements());

                Marshal.Copy(new[] { learningRate }, 0, TensorLearningRate[threadID].Data, 1);

                var Output = RunnerTraining[threadID].Run();

                Marshal.Copy(Output[1].Data, ResultArgMax[threadID], 0, BatchSize * (int)BoxDimensionsTrain.Elements());
                Marshal.Copy(Output[2].Data, ResultSoftMax[threadID], 0, BatchSize * (int)BoxDimensionsTrain.Elements() * NClasses);
                Marshal.Copy(Output[3].Data, ResultLoss[threadID], 0, BatchSize);

                argmax      = ResultArgMax[threadID];
                probability = ResultSoftMax[threadID];

                foreach (var tensor in Output)
                {
                    tensor.Dispose();
                }

                return(MathHelper.Mean(ResultLoss[threadID]));
            }
        }
Esempio n. 4
0
        public void Train(IntPtr d_source,
                          IntPtr d_target,
                          IntPtr d_weightSource,
                          IntPtr d_weightTarget,
                          float dropoutRate,
                          float learningRate,
                          float orthogonalityRate,
                          int threadID,
                          out float[] prediction,
                          out float[] bottleneck,
                          out float[] loss,
                          out float[] lossKL)
        {
            if (!ForTraining)
            {
                throw new Exception("Network was loaded in prediction mode, but asked to train.");
            }

            GPU.CopyDeviceToHostPinned(d_source, TensorSource[threadID].Data, BatchSize * (int)BoxDimensions.ElementsFFT() * 2);
            GPU.CopyDeviceToHostPinned(d_target, TensorTarget[threadID].Data, BatchSize * (int)BoxDimensions.ElementsFFT() * 2);
            GPU.CopyDeviceToHostPinned(d_weightSource, TensorWeightSource[threadID].Data, BatchSize * (int)BoxDimensions.ElementsFFT());
            GPU.CopyDeviceToHostPinned(d_weightTarget, TensorWeightTarget[threadID].Data, BatchSize * (int)BoxDimensions.ElementsFFT());

            Marshal.Copy(new[] { dropoutRate }, 0, TensorDropoutRate[threadID].Data, 1);
            Marshal.Copy(new[] { learningRate }, 0, TensorLearningRate[threadID].Data, 1);
            Marshal.Copy(new[] { orthogonalityRate }, 0, TensorOrthogonalityRate[threadID].Data, 1);

            var Output = RunnerTraining[threadID].Run();

            Marshal.Copy(Output[0].Data, ResultPredicted[threadID], 0, BatchSize * (int)BoxDimensions.ElementsFFT() * 2);

            Marshal.Copy(Output[1].Data, ResultLoss[threadID], 0, 1);
            Marshal.Copy(Output[2].Data, ResultLossKL[threadID], 0, 1);

            Marshal.Copy(Output[3].Data, ResultBottleneck[threadID], 0, BatchSize * BottleneckWidth);

            prediction = ResultPredicted[threadID];
            bottleneck = ResultBottleneck[threadID];
            loss       = ResultLoss[threadID];
            lossKL     = ResultLossKL[threadID];

            foreach (var tensor in Output)
            {
                tensor.Dispose();
            }
        }
Esempio n. 5
0
        public void Predict(IntPtr d_data, int threadID, out long[] argmax, out float[] probability)
        {
            //if (ForTraining)
            //    throw new Exception("Network was loaded in training mode, but asked to predict.");

            GPU.CopyDeviceToHostPinned(d_data, TensorMicTilePredict[threadID].Data, BatchSize * (int)BoxDimensionsPredict.Elements());
            var Output = RunnerPrediction[threadID].Run();

            Marshal.Copy(Output[0].Data, ResultArgMax[threadID], 0, BatchSize * (int)BoxDimensionsPredict.Elements());
            Marshal.Copy(Output[1].Data, ResultSoftMax[threadID], 0, BatchSize * (int)BoxDimensionsPredict.Elements() * 3);

            argmax      = ResultArgMax[threadID];
            probability = ResultSoftMax[threadID];

            foreach (var tensor in Output)
            {
                tensor.Dispose();
            }
        }
Esempio n. 6
0
        public void Predict(IntPtr d_data, int threadID, out float[] prediction)
        {
            lock (TFHelper.DeviceSync[DeviceID])
            {
                //if (ForTraining)
                //    throw new Exception("Network was loaded in training mode, but asked to predict.");

                GPU.CopyDeviceToHostPinned(d_data, TensorSource[threadID].Data, BatchSize * (int)BoxDimensions.Elements());
                var Output = RunnerPrediction[threadID].Run();

                Marshal.Copy(Output[0].Data, ResultPredicted[threadID], 0, BatchSize * (int)BoxDimensions.Elements());

                prediction = ResultPredicted[threadID];

                foreach (var tensor in Output)
                {
                    tensor.Dispose();
                }
            }
        }
Esempio n. 7
0
        public void Train(IntPtr d_data, float[] labels, float learningRate, int threadID, out long[] argmax, out float[] probability)
        {
            if (!ForTraining)
            {
                throw new Exception("Network was loaded in prediction mode, but asked to train.");
            }

            GPU.CopyDeviceToHostPinned(d_data, TensorMicTile[threadID].Data, BatchSize * (int)BoxDimensions.Elements());
            Marshal.Copy(labels, 0, TensorTrainingLabels[threadID].Data, BatchSize * 2);
            Marshal.Copy(new[] { learningRate }, 0, TensorLearningRate[threadID].Data, 1);

            var Output = RunnerTraining[threadID].Run();

            Marshal.Copy(Output[0].Data, ResultArgMax[threadID], 0, BatchSize);
            Marshal.Copy(Output[1].Data, ResultSoftMax[threadID], 0, BatchSize * 2);

            argmax      = ResultArgMax[threadID];
            probability = ResultSoftMax[threadID];

            foreach (var tensor in Output)
            {
                tensor.Dispose();
            }
        }