public void AddFrom(Vol V) { for (int k = 0; k < this.W.Length; k++) { this.W[k] += V.W[k]; } }
public Vol Forward(Vol V, bool is_training) { is_training = false; this.input = V; var V2 = V.Clone(); var N = V.W.Length; if (is_training) { // do dropout for (var i = 0; i < N; i++) { if ((float)r.NextDouble() < this.drop_prob) { V2.W[i] = 0; this.dropped[i] = true; } // drop! else { this.dropped[i] = false; } } } else { // scale the activations during prediction for (var i = 0; i < N; i++) { V2.W[i] *= 0.5f; } } this.Output = V2; return(this.Output); // dummy identity function for now }
public static Vol Image_To_Vol(double[] img, int width, int hight, bool convert_grayscale) { var p = img; var w = width; var h = hight; double[] pv = new double[img.Length]; for (var i = 0; i < p.Length; i++) { pv[i] = (p[i] / 255.0 - 0.5); // normalize image pixels to [-0.5, 0.5] } var x = new Vol(w, h, 4, 0.0); x.W = pv; if (convert_grayscale) { var x1 = new Vol(w, h, 1, 0.0); for (int i = 0; i < w; i++) { for (int j = 0; j < h; j++) { x1.Set(i, j, 0, x.Get(i, j, 0)); } } x = x1; } return(x); }
public void AddFromScaled(Vol V, double a) { for (int k = 0; k < this.W.Length; k++) { this.W[k] += a * V.W[k]; } }
private void DLRNforWidth(Vol V, double n2, int x) { for (var y = 0; y < V.SY; y++) { for (var i = 0; i < V.Depth; i++) { var chain_grad = this.Output.Get_Grad(x, y, i); var S = this.S_cache_.Get(x, y, i); var SB = Math.Pow(S, this.beta); var SB2 = SB * SB; // normalize in a window of size n for (var j = Math.Max(0, i - n2); j <= Math.Min(i + n2, V.Depth - 1); j++) { var aj = V.Get(x, y, (int)j); var g = -aj *this.beta *Math.Pow(S, this.beta - 1) * this.alpha / this.n * 2 * aj; if (j == i) { g += SB; } g /= SB2; g *= chain_grad; V.Add_Grad(x, y, (int)j, g); } } } }
public Vol Forward(Vol V, bool is_training) { this.input = V; var A = new Vol(1, 1, this.OutputDepth, 0.0f); this.Output = V; return(this.Output); }
private void LRN(Vol V, Vol A, float n2) { var source = Enumerable.Range(0, V.Width); var pquery = from num in source.AsParallel() select num; pquery.ForAll((x) => LRNforWidth(V, A, n2, x)); }
private void ConvFilter(Vol V, Vol tempOutput, int inputWidth, int inputHeight, int xy_stride, int d) { var source = Enumerable.Range(0, this.OutputHeight); var pquery = from num in source.AsParallel() select num; pquery.ForAll((ay) => ConvOverRows(V, tempOutput, inputWidth, inputHeight, xy_stride, d, ay)); }
private void DLRN(Vol V, double n2) { var source = Enumerable.Range(0, V.SX); var pquery = from num in source.AsParallel() select num; pquery.ForAll((x) => DLRNforWidth(V, n2, x)); }
public static Vol Augment(Vol V, int corp, int dx, int dy, bool fliplr) { //if (dx==0) //{ // dx = Util.Randi(0, V.SX - corp); //} //if (dy == 0) //{ // dy = Util.Randi(0, V.SY- corp); //} Vol W; if (corp != V.SX || dx != 0 || dy != 0) { W = new Vol(corp, corp, V.Depth, 0.0); for (int x = 0; x < corp; x++) { for (int y = 0; y < corp; y++) { if (x + dx < 0 || x + dx >= V.SX || y + dy < 0 || y + dy >= V.SY) { continue; } for (int d = 0; d < V.Depth; d++) { W.Set(x, y, d, V.Get(x + dx, y + dy, d)); } } } } else { W = V; } if (fliplr) { var W2 = W.CloneAndZero(); for (int x = 0; x < W.SX; x++) { for (int y = 0; y < W.SY; y++) { for (int d = 0; d < W.Depth; d++) { W2.Set(x, y, d, W.Get(W.SX - x - 1, y, d)); } } } W = W2; } return(W); }
public Vol Clone() { var v = new Vol(this.SX, this.SY, this.Depth, 0.0); var n = this.W.Length; for (int i = 0; i < n; i++) { v.W[i] = this.W[i]; } return(v); }
public Vol Clone() { var v = new Vol(this.Width, this.Height, this.Depth, 0.0f); var n = this.W.Length; for (int i = 0; i < n; i++) { v.W[i] = this.W[i]; } return(v); }
public Vol Forward(Vol V, bool is_training) { var act = this.Layers[0].Forward(V, is_training); for (int i = 1; i < this.Layers.Count; i++) { act = this.Layers[i].Forward(act, is_training); } act = this.LossLayer.Forward(act, is_training); return(act); }
public Vol Forward(Vol V, bool is_training) { this.input = V; var A = new Vol(this.OutputWidth, this.OutputHeight, this.OutputDepth, 0.0f); Conv(V, is_training, A); this.Output = A; return(this.Output); }
public static Vol Augment(Vol V, int corp) { int dx; int dy; bool fliplr = false; dx = (int)Util.Randi(0, V.Width - corp); dy = (int)Util.Randi(0, V.Height - corp); Vol W; if (corp != V.Width || dx != 0 || dy != 0) { W = new Vol(corp, corp, V.Depth, 0.0f); for (int x = 0; x < corp; x++) { for (int y = 0; y < corp; y++) { if (x + dx < 0 || x + dx >= V.Width || y + dy < 0 || y + dy >= V.Height) { continue; } for (int d = 0; d < V.Depth; d++) { W.Set(x, y, d, V.Get(x + dx, y + dy, d)); } } } } else { W = V; } if (fliplr) { var W2 = W.CloneAndZero(); for (int x = 0; x < W.Width; x++) { for (int y = 0; y < W.Height; y++) { for (int d = 0; d < W.Depth; d++) { W2.Set(x, y, d, W.Get(W.Width - x - 1, y, d)); } } } W = W2; } return(W); }
public Vol Forward(Vol V, bool is_training) { this.Input = V; var tempOutput = new Vol(this.OutputWidth | 0, this.OutputHeight | 0, this.OutputDepth | 0, 0.0); var inputWidth = V.SX | 0; var inputHeight = V.SY | 0; var xy_stride = this.Stride | 0; for (int d = 0; d < this.OutputDepth; d++)//for each output depth aka filters { var f = this.Filters[d]; var x = -this.Pad | 0; var y = -this.Pad | 0; for (int ay = 0; ay < this.OutputHeight; y += xy_stride, ay++)// for each out height { x = -this.Pad | 0; for (var ax = 0; ax < this.OutputWidth; x += xy_stride, ax++) // for each out width { // xy_stride // convolve centered at this particular location var a = 0.0; for (var fy = 0; fy < f.SY; fy++) // for each element in the filter height { var oy = y + fy; // coordinates in the original input array coordinates for (var fx = 0; fx < f.SX; fx++) // for each element in the filter width { //x is current width element of the output //fx is the current width element of the filter var ox = x + fx; if (oy >= 0 && oy < inputHeight && ox >= 0 && ox < inputWidth) { for (var fd = 0; fd < f.Depth; fd++) // for each filter depth or input depth { // multiply filter pixel by image pixel and add (shared weight filter) // avoid function call overhead (x2) for efficiency, compromise modularity :( //filter (fx,fy,fd) * //input (ox,oy,fd) a += f.W[((f.SX * fy) + fx) * f.Depth + fd] * V.W[((inputWidth * oy) + ox) * V.Depth + fd]; } } } } a += this.Biases.W[d]; tempOutput.Set(ax, ay, d, a); } } } this.Output = tempOutput; return(this.Output); }
public Vol Forward(Vol V, bool is_training) { this.in_Act = V; var A = V.CloneAndZero(); this.S_cache_ = V.CloneAndZero(); var n2 = Math.Floor(this.n / 2); LRN(V, A, n2); this.Output = A; return(this.Output); // dummy identity function for now }
public Vol Forward(Vol V, bool is_training) { this.in_Act = V; var A = new Vol(this.OutputWidth, this.OutputHeight, this.OutputDepth, 0.0); var n = 0; // a counter for switches for (var d = 0; d < this.OutputDepth; d++) { var x = -this.Pad; var y = -this.Pad; for (var ax = 0; ax < this.OutputWidth; x += this.Stride, ax++) { y = -this.Pad; for (var ay = 0; ay < this.OutputHeight; y += this.Stride, ay++) { // convolve centered at this particular location double a = -99999; // hopefully small enough ;\ var winx = -1; var winy = -1; for (var fx = 0; fx < this.SX; fx++) { for (var fy = 0; fy < this.SY; fy++) { var oy = y + fy; var ox = x + fx; if (oy >= 0 && oy < V.SY && ox >= 0 && ox < V.SX) { var v = V.Get(ox, oy, d); // perform max pooling and store pointers to where // the max came from. This will speed up backprop // and can help make nice visualizations in future if (v > a) { a = v; winx = ox; winy = oy; } } } } this.Switchx[n] = winx; this.Switchy[n] = winy; n++; A.Set(ax, ay, d, a); } } } this.Output = A; return(this.Output); }
public Vol Forward(Vol V, bool is_training) { this.in_Act = V; var V2 = V.CloneAndZero(); var N = V.W.Length; var V2w = V2.W; var Vw = V.W; for (var i = 0; i < N; i++) { V2w[i] = 1.0 / (1.0 + Math.Exp(-Vw[i])); } this.Output = V2; return(this.Output); }
public Vol Forward(Vol V, bool is_training) { var act = this.Layers[0].Forward(V, is_training); for (int i = 1; i < this.Layers.Count; i++) { act = this.Layers[i].Forward(act, is_training); if (this.ForwardLayer != null) { this.ForwardLayer(this.Layers[i], new EventArgs()); } } act = this.LossLayer.Forward(act, is_training); return(act); }
public Vol Forward(Vol V, bool is_training) { this.input = V; var V2 = V.CloneAndZero(); var N = V.W.Length; var V2w = V2.W; var Vw = V.W; for (var i = 0; i < N; i++) { V2w[i] = (float)Math.Tanh(Vw[i]); } this.Output = V2; return(this.Output); }
private void Conv(Vol V, bool is_training, Vol A) { var n = 0; // a counter for switches for (int d = 0; d < this.OutputDepth; d++) { for (var ax = 0; ax < this.OutputWidth; ax++) { var x = -this.Pad; x += (this.Stride * ax); for (var ay = 0; ay < this.OutputHeight; ay++) { var y = -this.Pad; y += (this.Stride * ay); // convolve centered at this particular location float a = -99999; // hopefully small enough ;\ var winx = -1; var winy = -1; for (var fx = 0; fx < this.KernelWidth; fx++) { for (var fy = 0; fy < this.KernelHeight; fy++) { var oy = y + fy; var ox = x + fx; if (oy >= 0 && oy < V.Height && ox >= 0 && ox < V.Width) { var v = V.Get(ox, oy, d); // perform max pooling and store pointers to where // the max came from. This will speed up backprop // and can help make nice visualizations in future if (v > a) { a = v; winx = ox; winy = oy; } } } } this.Switchx[n] = winx; this.Switchy[n] = winy; n++; A.Set(ax, ay, d, a); } } } //var source = Enumerable.Range(0, this.OutputDepth); //var pquery = from num in source.AsParallel() // select num; // pquery.ForAll((d) => ); }
public Vol Forward(Vol V, bool is_training) { this.Input = V; var tempOutput = new Vol(this.OutputWidth | 0, this.OutputHeight | 0, this.OutputDepth | 0, 0.0f); var inputWidth = V.Width | 0; var inputHeight = V.Height | 0; var xy_stride = this.Stride | 0; for (int d = 0; d < this.OutputDepth; d++) { ConvFilter(V, tempOutput, inputWidth, inputHeight, xy_stride, d); } this.Output = tempOutput; return(this.Output); }
public Vol Forward(Vol V, bool is_training) { this.in_Act = V; var V2 = V.Clone(); var N = V.W.Length; var V2w = V2.W; for (var i = 0; i < N; i++) { if (V2w[i] < 0) { V2w[i] = 0; // threshold at 0 } } this.Output = V2; return(this.Output); }
public Vol Forward(Vol V, bool is_training) { this.input = V; var A = new Vol(1, 1, this.OutputDepth, 0.0f); var Vw = V.W; for (int i = 0; i < this.OutputDepth; i++) { var a = 0.0f; var wi = this.Filters[i].W; for (var d = 0; d < this.num_inputs; d++) { a += Vw[d] * wi[d]; } a += this.Biases.W[i]; A.W[i] = a; } this.Output = A; return(this.Output); }
public Vol Forward(Vol V, bool is_training) { this.in_Act = V; var A = new Vol(1, 1, this.OutputDepth, 0.0); var Vw = V.W; for (var i = 0; i < this.OutputDepth; i++) { var a = 0.0; var wi = this.Filters[i].W; for (var d = 0; d < this.num_inputs; d++) { a += Vw[d] * wi[d]; // for efficiency use Vols directly for now } a += this.Biases.W[i]; A.W[i] = a; } this.Output = A; return(this.Output); }
public Vol Forward(Vol V, bool is_training) { this.in_Act = V; var A = new Vol(1, 1, this.Out_Depth, 0.0); // compute max activation var ass = V.W; var amax = V.W[0]; for (var i = 1; i < this.Out_Depth; i++) { if (ass[i] > amax) { amax = ass[i]; } } // compute exponentials (carefully to not blow up) var es = new double[this.Out_Depth]; var esum = 0.0; for (var i = 0; i < this.Out_Depth; i++) { var e = Math.Exp(ass[i] - amax); esum += e; es[i] = e; } // normalize and output to sum to one for (var i = 0; i < this.Out_Depth; i++) { es[i] /= esum; A.W[i] = es[i]; } this.es = es; // save these for backprop this.Output = A; return(this.Output); }
public Policy policy(double[] s) { // compute the value of doing any action in this state // and return the argmax action and its value var svol = new Vol(1, 1, this.net_inputs); svol.W = s; var action_values = this.value_net.Forward(svol, false); var maxk = 0; var maxval = action_values.W[0]; for (var k = 1; k < this.num_actions; k++) { if (action_values.W[k] > maxval) { maxk = k; maxval = action_values.W[k]; } } return(new Policy() { action = maxk, value = maxval }); }
private void LRNforWidth(Vol V, Vol A, double n2, int x) { for (var y = 0; y < V.SY; y++) { for (var i = 0; i < V.Depth; i++) { var ai = V.Get(x, y, i); // normalize in a window of size n var den = 0.0; for (var j = Math.Max(0, i - n2); j <= Math.Min(i + n2, V.Depth - 1); j++) { var aa = V.Get(x, y, (int)j); den += aa * aa; } den *= this.alpha / this.n; den += this.k; this.S_cache_.Set(x, y, i, den); // will be useful for backprop den = Math.Pow(den, this.beta); A.Set(x, y, i, ai / den); } } }
private void ConvOverRows(Vol V, Vol tempOutput, int inputWidth, int inputHeight, int xy_stride, int d, int ay) { var y = (-this.Pad | 0) + (xy_stride * ay); var f = this.Filters[d]; for (var ax = 0; ax < this.OutputWidth; ax++) // for each out width { var x = (-this.Pad | 0) + (xy_stride * ax); // convolve centered at this particular location var a = 0.0; for (var fy = 0; fy < f.Height; fy++) // for each element in the filter height { var oy = y + fy; // coordinates in the original input array coordinates for (var fx = 0; fx < f.Width; fx++) // for each element in the filter width { //x is current width element of the output //fx is the current width element of the filter var ox = x + fx; if (oy >= 0 && oy < inputHeight && ox >= 0 && ox < inputWidth) { for (var fd = 0; fd < f.Depth; fd++) // for each filter depth or input depth { // multiply filter pixel by image pixel and add (shared weight filter) // avoid function call overhead (x2) for efficiency, compromise modularity :( //filter (fx,fy,fd) * //input (ox,oy,fd) a += f.W[((f.Width * fy) + fx) * f.Depth + fd] * V.W[((inputWidth * oy) + ox) * V.Depth + fd]; } } } } a += this.Biases.W[d]; tempOutput.Set(ax, ay, d, (float)a); } }