public void CheckExhaustive(Layer layer, TensorCollection bottom, TensorCollection top, int checkBottom = -1)
        {
            layer.Setup( bottom, top );
            Assert.True(top.Count > 0, "Exhaustive mode requires at least one top blob.");

            for (int i = 0; i < top.Count; i++)
                for (int j = 0; j < top[i].Count; j++)
                    CheckSingle(layer, bottom, top, checkBottom, i, j);
        }
        public void CheckExhaustive(Layer layer, TensorCollection bottom, TensorCollection top, int checkBottom = -1)
        {
            layer.Setup(bottom, top);
            Assert.True(top.Count > 0, "Exhaustive mode requires at least one top blob.");

            for (int i = 0; i < top.Count; i++)
            {
                for (int j = 0; j < top[i].Count; j++)
                {
                    CheckSingle(layer, bottom, top, checkBottom, i, j);
                }
            }
        }
 public void Check(Layer layer, TensorCollection bottom, TensorCollection top, int checkBottom = -1)
 {
     layer.Setup(bottom, top);
     CheckSingle(layer, bottom, top, checkBottom, -1, -1);
 }
        public void CheckSingle(Layer layer, TensorCollection bottom, TensorCollection top, int checkBottom, int topId, int topDataId, bool elementwise = false)
        {
            //TODO If implemented at all the ability of the layer to access stored blobs, we need to recheck this.
            if (elementwise)
            {
                Assert.True(topId >= 0);
                Assert.True(topDataId >= 0);

                int topCount = top[topId].Count;
                for (int blobId = 0; blobId < bottom.Count; blobId++)
                {
                    Assert.Equal(topCount, bottom[blobId].Count);
                }
            }

            // First, figure out what blobs we need to check against.
            var blobsToCheck  = new TensorCollection();
            var propagateDown = new List <bool>().Repeated(bottom.Count, checkBottom < 0);

            if (checkBottom < 0)
            {
                // We are not checking the bottom.
                for (int i = 0; i < bottom.Count; i++)
                {
                    blobsToCheck.Add(bottom[i]);
                }
            }
            else
            {
                // We are checking the bottom, therefore we must ensure that the blob checked exists.
                Assert.True(checkBottom < bottom.Count);
                blobsToCheck.Add(bottom[checkBottom]);
                propagateDown[checkBottom] = true;
            }

            //TODO Add a general random generator that layers should use, to ensure we always apply it when layers are non-deterministic.

            // Compute the gradient analytically using Backward
            // Get any loss from the layer
            double computedObjective = layer.Forward(bottom, top);

            // Get additional loss from the objective
            computedObjective += GetObjectiveAndGradient(top, topId, topDataId);
            layer.Backward(top, propagateDown, bottom);

            // Store computed gradients for all checked blobs
            var computedGradientsBlob = new Tensor[blobsToCheck.Count];

            for (int blobId = 0; blobId < blobsToCheck.Count; blobId++)
            {
                var currentBlob = blobsToCheck[blobId];
                computedGradientsBlob[blobId] = new Tensor(currentBlob);

                using (var currentBlobCpu = currentBlob.OnCpu())
                    using (var computedGradientsBlobCpu = computedGradientsBlob[blobId].OnCpu())
                    {
                        var currentDiff       = currentBlobCpu.Diff;
                        var computedGradients = computedGradientsBlobCpu.Data;
                        currentDiff.CopyTo(computedGradients);
                    }
            }

            // Compute derivative of top w.r.t. each bottom and parameter input using
            // finite differencing.

            for (int blobId = 0; blobId < blobsToCheck.Count; blobId++)
            {
                var currentBlob = blobsToCheck[blobId];

                using (var currentBlobCpu = currentBlob.OnCpu())
                    using (var computedGradientsBlobCpu = computedGradientsBlob[blobId].OnCpu())
                    {
                        var computedGradients = computedGradientsBlobCpu.Data;
                        for (int featId = 0; featId < currentBlob.Count; featId++)
                        {
                            // For an element-wise layer, we only need to do finite differencing to
                            // compute the derivative of topData[top_id][top_data_id] w.r.t.
                            // bottomData[blob_id][i] only for i == top_data_id.  For any other
                            // i != top_data_id, we know the derivative is 0 by definition, and simply
                            // check that that's true.
                            double estimatedGradient = 0;
                            if (!elementwise || featId == topDataId)
                            {
                                //TODO Add a general random generator that layers should use, to ensure we always apply it when layers are non-deterministic.

                                // Do finite differencing.
                                // Compute loss with step-size added to input.
                                currentBlobCpu.Data[featId] += step;
                                double positiveObjective = layer.Forward(bottom, top);
                                positiveObjective += GetObjectiveAndGradient(top, topId, topDataId);

                                // Compute loss with step-size subtracted from input.
                                currentBlobCpu.Data[featId] -= step * 2;

                                //TODO Add a general random generator that layers should use, to ensure we always apply it when layers are non-deterministic.

                                double negativeObjective = layer.Forward(bottom, top);
                                negativeObjective += GetObjectiveAndGradient(top, topId, topDataId);

                                // Recover original input value.
                                currentBlobCpu.Data[featId] += step;
                                estimatedGradient            = (positiveObjective - negativeObjective) / step / 2.0d;
                            }

                            double computedGradient = computedGradients[featId];
                            double feature          = currentBlobCpu.Data[featId];
                            if (kink - kinkRange > Math.Abs(feature) || Math.Abs(feature) > kink + kinkRange)
                            {
                                // We check relative accuracy, but for too small values, we threshold
                                // the scale factor by 1

                                double scale = Math.Max(Math.Max(Math.Abs(computedGradient), Math.Abs(estimatedGradient)), 1.0d);
                                Assert.InRange(computedGradient - estimatedGradient, -threshold * scale, threshold * scale);
                            }
                        }
                    }
            }
        }
        public void CheckSingle(Layer layer, TensorCollection bottom, TensorCollection top, int checkBottom, int topId, int topDataId, bool elementwise = false)
        {
            //TODO If implemented at all the ability of the layer to access stored blobs, we need to recheck this.
            if ( elementwise )
            {
                Assert.True(topId >= 0);
                Assert.True(topDataId >= 0);

                int topCount = top[topId].Count;
                for (int blobId = 0; blobId < bottom.Count; blobId++)
                    Assert.Equal(topCount, bottom[blobId].Count);
            }

            // First, figure out what blobs we need to check against.
            var blobsToCheck = new TensorCollection();
            var propagateDown = new List<bool>().Repeated(bottom.Count, checkBottom < 0);
            if ( checkBottom < 0 )
            {
                // We are not checking the bottom.
                for (int i = 0; i < bottom.Count; i++)
                    blobsToCheck.Add(bottom[i]);
            }
            else
            {
                // We are checking the bottom, therefore we must ensure that the blob checked exists.
                Assert.True(checkBottom < bottom.Count);
                blobsToCheck.Add(bottom[checkBottom]);
                propagateDown[checkBottom] = true;
            }

            //TODO Add a general random generator that layers should use, to ensure we always apply it when layers are non-deterministic.

            // Compute the gradient analytically using Backward
            // Get any loss from the layer
            double computedObjective = layer.Forward(bottom, top);

            // Get additional loss from the objective
            computedObjective += GetObjectiveAndGradient(top, topId, topDataId);
            layer.Backward(top, propagateDown, bottom);

            // Store computed gradients for all checked blobs
            var computedGradientsBlob = new Tensor[blobsToCheck.Count];
            for ( int blobId = 0; blobId < blobsToCheck.Count; blobId++ )
            {
                var currentBlob = blobsToCheck[blobId];
                computedGradientsBlob[blobId] = new Tensor(currentBlob);

                using (var currentBlobCpu = currentBlob.OnCpu())
                using (var computedGradientsBlobCpu = computedGradientsBlob[blobId].OnCpu())
                {
                    var currentDiff = currentBlobCpu.Diff;
                    var computedGradients = computedGradientsBlobCpu.Data;
                    currentDiff.CopyTo(computedGradients);
                }
            }

            // Compute derivative of top w.r.t. each bottom and parameter input using
            // finite differencing.

            for (int blobId = 0; blobId < blobsToCheck.Count; blobId++ )
            {
                var currentBlob = blobsToCheck[blobId];

                using (var currentBlobCpu = currentBlob.OnCpu())
                using (var computedGradientsBlobCpu = computedGradientsBlob[blobId].OnCpu())
                {
                    var computedGradients = computedGradientsBlobCpu.Data;
                    for (int featId = 0; featId < currentBlob.Count; featId++)
                    {
                        // For an element-wise layer, we only need to do finite differencing to
                        // compute the derivative of topData[top_id][top_data_id] w.r.t.
                        // bottomData[blob_id][i] only for i == top_data_id.  For any other
                        // i != top_data_id, we know the derivative is 0 by definition, and simply
                        // check that that's true.
                        double estimatedGradient = 0;
                        if (!elementwise || featId == topDataId)
                        {
                            //TODO Add a general random generator that layers should use, to ensure we always apply it when layers are non-deterministic.

                            // Do finite differencing.
                            // Compute loss with step-size added to input.
                            currentBlobCpu.Data[featId] += step;
                            double positiveObjective = layer.Forward(bottom, top);
                            positiveObjective += GetObjectiveAndGradient(top, topId, topDataId);

                            // Compute loss with step-size subtracted from input.
                            currentBlobCpu.Data[featId] -= step * 2;

                            //TODO Add a general random generator that layers should use, to ensure we always apply it when layers are non-deterministic.

                            double negativeObjective = layer.Forward(bottom, top);
                            negativeObjective += GetObjectiveAndGradient(top, topId, topDataId);

                            // Recover original input value.
                            currentBlobCpu.Data[featId] += step;
                            estimatedGradient = (positiveObjective - negativeObjective) / step / 2.0d;
                        }

                        double computedGradient = computedGradients[featId];
                        double feature = currentBlobCpu.Data[featId];
                        if (kink - kinkRange > Math.Abs(feature) || Math.Abs(feature) > kink + kinkRange)
                        {
                            // We check relative accuracy, but for too small values, we threshold
                            // the scale factor by 1

                            double scale = Math.Max(Math.Max(Math.Abs(computedGradient), Math.Abs(estimatedGradient)), 1.0d);
                            Assert.InRange(computedGradient - estimatedGradient, -threshold * scale, threshold * scale);
                        }
                    }
                }
            }
        }
 public void Check(Layer layer, TensorCollection bottom, TensorCollection top, int checkBottom = -1)
 {
     layer.Setup( bottom, top );
     CheckSingle( layer, bottom, top, checkBottom, -1, -1);
 }
Beispiel #7
0
        protected virtual void CheckBlobCount(TensorCollection bottom, TensorCollection top)
        {
            // Bottom layer
            if (ExactNumBottomBlobs >= 0)
            {
                if (ExactNumBottomBlobs != bottom.Count)
                    throw new ArgumentException(string.Format("{0} Layer takes {1} bottom blob(s) as input.", this.GetType().Name, this.ExactNumBottomBlobs));
            }

            if (MinBottomBlobs >= 0)
            {
                if (bottom.Count < MinBottomBlobs)
                    throw new ArgumentOutOfRangeException(string.Format("{0} Layer takes at least {1} bottom blob(s) as input.", this.GetType().Name, this.MinBottomBlobs));
            }

            if (MaxBottomBlobs >= 0)
            {
                if (bottom.Count > MaxBottomBlobs)
                    throw new ArgumentOutOfRangeException(string.Format("{0} Layer takes at most {1} bottom blob(s) as input.", this.GetType().Name, this.MaxBottomBlobs));
            }

            // Top layer
            if (ExactNumTopBlobs >= 0)
            {
                if (ExactNumTopBlobs != top.Count)
                    throw new ArgumentException(string.Format("{0} Layer takes {1} top blob(s) as input.", this.GetType().Name, this.ExactNumTopBlobs));
            }

            if (MinTopBlobs >= 0)
            {
                if (top.Count < MinTopBlobs)
                    throw new ArgumentOutOfRangeException(string.Format("{0} Layer takes at least {1} top blob(s) as input.", this.GetType().Name, this.MinTopBlobs));
            }

            if (MaxTopBlobs >= 0)
            {
                if (top.Count > MaxTopBlobs)
                    throw new ArgumentOutOfRangeException(string.Format("{0} Layer takes at most {1} top blob(s) as input.", this.GetType().Name, this.MaxTopBlobs));
            }
        }
Beispiel #8
0
        public virtual void Setup(TensorCollection bottom, TensorCollection top)
        {
            Contract.Requires(bottom != null);
            Contract.Requires(top != null);

            Guard.That(() => bottom).IsNotNull();
            Guard.That(() => top).IsNotNull();

            CheckBlobCount(bottom, top);
        }
Beispiel #9
0
        public void Setup( Tensor bottom, Tensor top )
        {
            Contract.Requires(bottom != null);
            Contract.Requires(top != null);

            var bottomList = new TensorCollection { bottom };
            var topList = new TensorCollection { top };
            this.Setup( bottomList, topList );
        }
Beispiel #10
0
        public double Forward(TensorCollection bottom, TensorCollection top)
        {
            Contract.Requires(bottom != null && top != null);
            Contract.Requires(bottom.Count > 0 && top.Count > 0);
            Contract.ForAll<Tensor>(bottom, x => x != null);
            Contract.ForAll<Tensor>(top, x => x != null);

            // TODO Fail if not initialized.
            Guard.That(() => bottom).IsNotNull();
            Guard.That(() => top).IsNotNull();

            #if EXHAUSTIVE_DEBUG

            Guard.That(() => bottom).IsTrue(x => !x.Contains(null), "Cannot contain null.");
            Guard.That(() => top).IsTrue(x => !x.Contains(null), "Cannot contain null.");

            #endif
            switch (Context.Instance.Mode)
            {
                case ExecutionModeType.Automatic:
                    {
                        if (forwardGpuSupported)
                        {
                            using (var bottomGpu = bottom.OnGpu())
                            using (var topGpu = top.OnGpu())
                            {
                                try
                                {
                                    return ForwardGpu(bottomGpu, topGpu);
                                }
                                catch (NotSupportedException)
                                {
                                    forwardGpuSupported = false;
                                }
                            }
                        }

                        using (var bottomCpu = bottom.OnCpu())
                        using (var topCpu = top.OnCpu())
                        {
                            return ForwardCpu(bottomCpu, topCpu);
                        }
                    }
                case ExecutionModeType.Gpu:
                    {
                        using (var bottomGpu = bottom.OnGpu())
                        using (var topGpu = top.OnGpu())
                        {
                            return ForwardGpu(bottomGpu, topGpu);
                        }
                    }
                case ExecutionModeType.Cpu:
                    {
                        using (var bottomCpu = bottom.OnCpu())
                        using (var topCpu = top.OnCpu())
                        {
                            return ForwardCpu(bottomCpu, topCpu);
                        }
                    }
                default: throw new NotSupportedException(string.Format("Mode of operation '{0}' not support", Context.Instance.Mode.ToString()));
            }
        }
Beispiel #11
0
        public double Forward(Tensor bottom, Tensor top)
        {
            Contract.Requires(bottom != null && top != null);

            Guard.That(() => bottom).IsNotNull();
            Guard.That(() => top).IsNotNull();

            var bottomList = new TensorCollection { bottom };
            var topList = new TensorCollection { top };

            return this.Forward(bottomList, topList);
        }