public double[] GetDenseWeights(double[] weights) { if (!prepared) { Prepare(); } int rows = maps * Corners.Length; int columns = InputShape.Aggregate(1, (acc, val) => acc * val); int kernelSize = KernelShape.Aggregate(1, (acc, val) => acc * val); Matrix <double> mat = Matrix <double> .Build.Dense(rows, columns); for (int m = 0; m < maps; m++) { for (int i = 0; i < Corners.Length; i++) { var c = Corners[i]; foreach (var o in Offsets) { var l = Location(c, o, InputShape); if (l < 0) { continue; } var k = Location(null, o, KernelShape); mat[m * Corners.Length + i, l] = weights[k + m * kernelSize]; } } } return(mat.ToRowMajorArray()); }
public static double calculateKernelValue(double distance, double bandwidth, KernelShape shape) { switch(shape) { case KernelShape.Epanechnikov: return epanechnikovKernel(distance, bandwidth); case KernelShape.Gaussian: return gaussianKernel(distance, bandwidth); case KernelShape.Quartic: return quarticKernel(distance, bandwidth); case KernelShape.Triweight: return triweightKernel(distance, bandwidth); case KernelShape.Uniform: return uniformKernel(distance, bandwidth); } return 0; }
public override void Prepare() { if (!layerPrepared) { convolutionEngine.Prepare(); kernelSize = KernelShape.Aggregate(1, (acc, val) => acc * val); if (Bias == null) { kernelSize++; } if (Weights == null) { return; } PrepareWeightsWindows(); double BiasScale = GetOutputScale(); int maps = (MapCount == null) ? 1 : MapCount.Aggregate(1, (acc, val) => acc * val); if (HotIndices == null) { HotIndices = Vector <double> .Build.Dense(Corners.Length) + 1; } if (Bias != null) { biasVectors = new IVector[maps]; ParallelProcessInEnv(maps, (env, taskIndex, mapIndex) => { biasVectors[mapIndex] = Factory.GetPlainVector(HotIndices * Bias[mapIndex], EVectorFormat.dense, Source.GetOutputScale() * WeightsScale); }); } else { biasVectors = new IVector[maps]; ParallelProcessInEnv(maps, (env, taskIndex, mapIndex) => { biasVectors[mapIndex] = Factory.GetPlainVector(HotIndices * Weights[(mapIndex + 1) * kernelSize - 1], EVectorFormat.dense, Source.GetOutputScale() * WeightsScale); }); } layerPrepared = true; } }
IEnumerable <int[]> OffsetGenerator() { var offset = KernelShape.Select(x => 0).ToArray(); bool goodToGo = false; do { yield return(offset); goodToGo = false; for (int i = 0; i < KernelShape.Length; i++) { offset[i]++; if (offset[i] < KernelShape[i]) { goodToGo = true; break; } offset[i] = 0; } } while (goodToGo); }
public override void Prepare() { if (layerPrepared) { return; } convolutionEngine.Prepare(); kernelSize = KernelShape.Aggregate(1, (acc, val) => acc * val); if (Bias == null) { kernelSize++; } if (Weights == null) { return; } PrepareWeightsWindows(); biasVectors = null; layerPrepared = true; }
IEnumerable <int[]> CornerGenerator() { int[] min = KernelShape.Select((v, i) => - Lowerpadding[i] - ((Padding[i]) ? -(v / 2) : 0)).ToArray(); int[] max = KernelShape.Select((v, i) => InputShape[i] + Upperpadding[i] - ((Padding[i]) ? ((v + 1) / 2) : v)).ToArray(); var offset = min.Select(i => i).ToArray(); // deep copy bool goodToGo = false; do { yield return(offset); goodToGo = false; for (int i = KernelShape.Length - 1; i >= 0; i--) { offset[i] += Stride[i]; if (offset[i] <= max[i]) { goodToGo = true; break; } offset[i] = min[i]; } } while (goodToGo); }