public override NDArray ReduceAMin(NDArray arr, int? axis_, bool keepdims = false, NPTypeCode? typeCode = null) { //in order to iterate an axis: //consider arange shaped (1,2,3,4) when we want to summarize axis 1 (2nd dimension which its value is 2) //the size of the array is [1, 2, n, m] all shapes after 2nd multiplied gives size //the size of what we need to reduce is the size of the shape of the given axis (shape[axis]) var shape = arr.Shape; if (shape.IsEmpty) return arr; if (shape.IsScalar || shape.size == 1 && shape.dimensions.Length == 1) { var r = typeCode.HasValue ? Cast(arr, typeCode.Value, true) : arr.Clone(); if (keepdims) r.Storage.ExpandDimension(0); else if (!r.Shape.IsScalar && r.Shape.size == 1 && r.ndim == 1) r.Storage.Reshape(Shape.Scalar); return r; } if (axis_ == null) { var r = NDArray.Scalar(amin_elementwise(arr, typeCode)); if (keepdims) r.Storage.ExpandDimension(0); else if (!r.Shape.IsScalar && r.Shape.size == 1 && r.ndim == 1) r.Storage.Reshape(Shape.Scalar); return r; } var axis = axis_.Value; while (axis < 0) axis = arr.ndim + axis; //handle negative axis if (axis >= arr.ndim) throw new ArgumentOutOfRangeException(nameof(axis)); if (shape[axis] == 1) { //if the given div axis is 1 and can be squeezed out. if (keepdims) return new NDArray(arr.Storage.Alias()); return np.squeeze_fast(arr, axis); } //handle keepdims Shape axisedShape = Shape.GetAxis(shape, axis); //prepare ret var ret = new NDArray(typeCode ?? arr.GetTypeCode, axisedShape, false); var iterAxis = new NDCoordinatesAxisIncrementor(ref shape, axis); var iterRet = new NDCoordinatesIncrementor(ref axisedShape); var iterIndex = iterRet.Index; var slices = iterAxis.Slices; #if _REGEN1 #region Compute switch (arr.GetTypeCode) { %foreach supported_numericals,supported_numericals_lowercase% case NPTypeCode.#1: { switch (ret.GetTypeCode) { %foreach supported_numericals,supported_numericals_lowercase,supported_numericals_defaultvals% case NPTypeCode.#101: { do { var iter = arr[slices].AsIterator<#2>();
public override NDArray Cast(NDArray nd, NPTypeCode dtype, bool copy) { if (dtype == NPTypeCode.Empty) { throw new ArgumentNullException(nameof(dtype)); } //incase its an empty array if (nd.Shape.IsEmpty) { if (copy) { return(new NDArray(dtype)); } nd.Storage = new UnmanagedStorage(dtype); return(nd); } //incase its a scalar if (nd.Shape.IsScalar) { var ret = NDArray.Scalar(nd.GetAtIndex(0), dtype); if (copy) { return(ret); } nd.Storage = ret.Storage; return(nd); } //incase its a (1,) shaped if (nd.Shape.size == 1 && nd.Shape.NDim == 1) { var ret = new NDArray(ArraySlice.Scalar(nd.GetAtIndex(0), dtype), Shape.Vector(1)); if (copy) { return(ret); } nd.Storage = ret.Storage; return(nd); } //regular clone if (nd.GetTypeCode == dtype) { //casting not needed return(copy ? clone() : nd); } else { //casting needed if (copy) { if (nd.Shape.IsSliced) { nd = clone(); } return(new NDArray(new UnmanagedStorage(ArraySlice.FromMemoryBlock(nd.Array.CastTo(dtype), false), nd.Shape))); } else { var storage = nd.Shape.IsSliced ? nd.Storage.Clone() : nd.Storage; nd.Storage = new UnmanagedStorage(ArraySlice.FromMemoryBlock(storage.InternalArray.CastTo(dtype), false), storage.Shape); return(nd); } } NDArray clone() => nd.Clone(); }
//This function feeds an image into the neural network and applies the backpropagation public void ProcessNeuralNetwork(TestCase testCase, bool storeIncrementalSteps) { //Start //10 fps: https://www.youtube.com/watch?v=zpCFjNjuBaY&t=0 //60 fps: https://www.youtube.com/watch?v=IQdxHrfdMwk&t=0 NDArray image784x1 = testCase.AsNDArray(); //Hidden neuron calculations //10 fps: https://www.youtube.com/watch?v=zpCFjNjuBaY&t=18 //60 fps: https://www.youtube.com/watch?v=IQdxHrfdMwk&t=18 var hiddenPreSigmoid20x1 = hiddenBiases20x1 + np.matmul(inputToHiddenWeights20x784, image784x1); if (storeIncrementalSteps) { hiddenNeuronsBeforeSigmoid20x1 = hiddenPreSigmoid20x1.Clone(); } //normalize with sigmoid currentHiddenNeurons20X1 = np.divide(1, (np.add(1, np.exp(-hiddenPreSigmoid20x1)))); //Final output neuron calculation //10 fps: https://www.youtube.com/watch?v=zpCFjNjuBaY&t=78 //60 fps: https://www.youtube.com/watch?v=IQdxHrfdMwk&t=78 //map from inputs to output and add bias var outputPreSigmoid10x1 = outputBiases10x1 + np.matmul(hiddenToOutputWeights10x20, currentHiddenNeurons20X1); if (storeIncrementalSteps) { outputNeuronsBeforeSigmoid10x1 = outputPreSigmoid10x1.Clone(); } //normalize with sigmoid currentOutputNeurons10x1 = 1 / (1 + np.exp(-outputPreSigmoid10x1)); expectedOutput10x1 = testCase.AsLabelNDArray(); //np.argmax(o) gives the index of the maximum number. This is simply checking to see which output was switched on detected = np.argmax(currentOutputNeurons10x1); //Start back propagation by figuring out by how much the output was "wrong" //10 fps: https://www.youtube.com/watch?v=zpCFjNjuBaY&t=108 //60 fps: https://www.youtube.com/watch?v=IQdxHrfdMwk&t=108 expectedOutputDelta10X1 = currentOutputNeurons10x1 - expectedOutput10x1; //Calculate the hidden to output weights "wrongness" using the output's "wrongness" //10 fps: https://www.youtube.com/watch?v=zpCFjNjuBaY&t=138 //60 fps: https://www.youtube.com/watch?v=IQdxHrfdMwk&t=138 //Multiply difference from expected by the hidden neurons to make a matrix to adjust the hidden to output weights hiddenToOutputWeightAdjustment10X20 = np.matmul(expectedOutputDelta10X1, np.transpose(currentHiddenNeurons20X1)); if (storeIncrementalSteps) { hiddenToOutputWeightsPreAdjust10x20 = hiddenToOutputWeights10x20.Clone(); } //Adjust the hidden to output weights by the calculated "wrongness" //10 fps: https://www.youtube.com/watch?v=zpCFjNjuBaY&t=198 //60 fps: https://www.youtube.com/watch?v=IQdxHrfdMwk&t=198" //adjust the hidden to output weights hiddenToOutputWeights10x20 += -learnRate * hiddenToOutputWeightAdjustment10X20; if (storeIncrementalSteps) { outputBiasPreAdjust10x1 = outputBiases10x1.Clone(); } //Adjust the output biases outputBiases10x1 += -learnRate * expectedOutputDelta10X1; //Now move on to building the matrix to adjust the input to hidden weights //differential of sigmoid is sigmoid * (1 - sigmoid). Remember above where we used sigmoid function on currentHiddenNeurons? //Result is 20x1 matrix if (storeIncrementalSteps) { oneMinusHiddenNeurons20x1 = 1 - currentHiddenNeurons20X1; } //Calculate the sigmoid differential of the hidden neurons to use later //10 fps: https://www.youtube.com/watch?v=zpCFjNjuBaY&t=204 //60 fps: https://www.youtube.com/watch?v=IQdxHrfdMwk&t=204 hiddenNeuronsSigmoidDifferential20X1 = (currentHiddenNeurons20X1 * (1 - currentHiddenNeurons20X1)); //Multiply the "wrongness" of the output by the adjusted hidden to output weights to get the weights "wrongness" //10 fps: https://www.youtube.com/watch?v=zpCFjNjuBaY&t=264 //60 fps: https://www.youtube.com/watch?v=IQdxHrfdMwk&t=264 //Combine expectedOutputDelta (10X1) with the hiddenToOutputWeights (10x20), resulting in a 20x1 matrix hiddenToOutputWeightsXExpectedOutputDelta20x1 = np.matmul(np.transpose(hiddenToOutputWeights10x20), expectedOutputDelta10X1); //Multiply the "wrongness" of the adjusted hidden to output weights by the hidden neuron sigmoid differential to get the "wrongness" of each hidden neuron //10 fps: https://www.youtube.com/watch?v=zpCFjNjuBaY&t=324 //60 fps: https://www.youtube.com/watch?v=IQdxHrfdMwk&t=324" //Then combine in the differential of the sigmoid (20X1) X (20X1) = 20X1 expectedHiddenDelta20X1 = hiddenToOutputWeightsXExpectedOutputDelta20x1 * hiddenNeuronsSigmoidDifferential20X1; //Multiply the "wrongness" of each hidden neuron by the original image to calculate the "wrongness" of the input to hidden matrix //10 fps: https://www.youtube.com/watch?v=zpCFjNjuBaY&t=384 //60 fps: https://www.youtube.com/watch?v=IQdxHrfdMwk&t=384 //Then multiply by the image to make a 20x784 matrix inputToHiddenWeightAdjustment20x784 = np.matmul(expectedHiddenDelta20X1, np.transpose(image784x1)); if (storeIncrementalSteps) { inputToHiddenWeightsPreAdjust20x784 = inputToHiddenWeights20x784.Clone(); } //Apply the "wrongness" of the input to hidden matrix to the input to hidden matrix //10 fps: https://www.youtube.com/watch?v=zpCFjNjuBaY&t=453 //60 fps: https://www.youtube.com/watch?v=IQdxHrfdMwk&t=453 //Finally we adjust the actual weights using our calculated adjustment matrix inputToHiddenWeights20x784 += -learnRate * inputToHiddenWeightAdjustment20x784; if (storeIncrementalSteps) { hiddenNeuronBiasPreAdjust20x1 = hiddenBiases20x1.Clone(); } //And adjust the bias using a portion of how far off the hidden neurons were hiddenBiases20x1 += -learnRate * expectedHiddenDelta20X1; }