Example #1
0
        protected override Tensor call(Tensor inputs, Tensor training = null)
        {
            Tensor outputs = null;
            var    rank    = inputs.rank;

            if (rank > 2)
            {
                throw new NotImplementedException("call rank > 2");
            }
            else
            {
                outputs = gen_math_ops.mat_mul(inputs, kernel);
            }

            if (use_bias)
            {
                outputs = tf.nn.bias_add(outputs, bias);
            }
            if (activation != null)
            {
                return(activation.Activate(outputs));
            }

            return(outputs);
        }
        public double[] Activate(double[] threshold)
        {
            double[] activation = new double[threshold.Length];
            switch (type)
            {
            case Type.Softmax:
                double[] _   = new double[activation.Length];
                double   sum = 0;
                for (int i = 0; i < activation.Length; i++)
                {
                    sum += _[i] = Math.Exp(threshold[i]);
                }
                for (int i = 0; i < activation.Length; i++)
                {
                    activation[i] = _[i] / sum;
                }
                break;

            default:
                for (int i = 0; i < activation.Length; i++)
                {
                    activation[i] = function.Activate(threshold[i]);
                }
                break;
            }
            return(activation);
        }
Example #3
0
        protected override Tensor[] call(Tensor inputs, bool training = false, Tensor state = null)
        {
            Tensor outputs = null;
            var    rank    = inputs.rank;

            if (rank > 2)
            {
                throw new NotImplementedException("call rank > 2");
            }
            else
            {
                outputs = gen_math_ops.mat_mul(inputs, kernel.Handle);
            }

            if (use_bias)
            {
                outputs = tf.nn.bias_add(outputs, bias);
            }
            if (activation != null)
            {
                outputs = activation.Activate(outputs);
            }

            return(new[] { outputs, outputs });
        }
Example #4
0
        /// <summary>
        /// Long short-term memory cell (LSTM).
        /// </summary>
        /// <param name="inputs"></param>
        /// <param name="training"></param>
        /// <param name="state"></param>
        /// <returns></returns>
        protected override Tensor[] call(Tensor inputs, Tensor training = null, Tensor state = null)
        {
            var one = constant_op.constant(1, dtype: dtypes.int32);
            // Parameters of gates are concatenated into one multiply for efficiency.
            Tensor c = null;
            Tensor h = null;

            if (_state_is_tuple)
            {
                (c, h) = ((Tensor)_state.c, (Tensor)_state.h);
            }
            else
            {
                // array_ops.split(value: state, num_or_size_splits: 2, axis: one);
                throw new NotImplementedException("BasicLstmCell call");
            }
            var gate_inputs = math_ops.matmul(array_ops.concat(new[] { inputs, h }, 1), _kernel as RefVariable);

            gate_inputs = nn_ops.bias_add(gate_inputs, _bias as RefVariable);

            // i = input_gate, j = new_input, f = forget_gate, o = output_gate
            var tensors = array_ops.split(value: gate_inputs, num_or_size_splits: 4, axis: one);

            var(i, j, f, o) = (tensors[0], tensors[1], tensors[2], tensors[3]);

            var forget_bias_tensor = constant_op.constant(_forget_bias, dtype: f.dtype);
            // Note that using `add` and `multiply` instead of `+` and `*` gives a
            // performance improvement. So using those at the cost of readability.
            var new_c = gen_math_ops.add(
                math_ops.multiply(c, math_ops.sigmoid(gen_math_ops.add(f, forget_bias_tensor))),
                math_ops.multiply(math_ops.sigmoid(i), _activation.Activate(j)));

            var new_h = math_ops.multiply(_activation.Activate(new_c), math_ops.sigmoid(o));


            if (_state_is_tuple)
            {
                return new[] { new_c, new_h }
            }
            ;
            else
            {
                return new[] { array_ops.concat(new[] { new_c, new_h }, 1) }
            };
        }
Example #5
0
        public Tensor __call__(Tensor x)
        {
            var dot = tf.matmul(x, W);

            if (this.activation != null)
            {
                dot = activation.Activate(dot);
            }
            Console.WriteLine("Calling Layer \"" + name + "(" + np.array(dot.TensorShape.dims).ToString() + ")\" ...");
            return(dot);
        }
			public virtual float Activate(float[] inputs, IActivation act) 
			{
				float preactivation = 0f;
				for (int i = 0; i < inputs.Length; i++)
				{
					preactivation += inputs[i] * weights[i];
				}
				preactivation += bias;
				output = act.Activate(preactivation);
				return output;
			}
Example #7
0
        public Matrix <double> Forward(Matrix <double> A_prev)
        {
            if (B == null)
            {
                B = BuildBiasMatrix(initial_b);
            }

            Z = W * A_prev + B;
            A = _activation.Activate(Z);

            return(A);
        }
Example #8
0
        public Matrix Activate(Matrix x)
        {
            if (!x.IsVector())
            {
                throw new Exception();
            }
            Matrix ret = new Matrix(x.GetRowCount(), x.GetColumnCount());

            for (int i = 0; i < ret.GetRowCount(); i++)
            {
                ret[i, 0] = function.Activate(x[i, 0]);
            }
            return(ret);
        }
        // Example of how to create a neuron layer from scratch, use tf.layers.dense instead
        public static Tensor NeuronLayer(Tensor X, int nNeurons, string name, IActivation activation = null)
        {
            using (tf.name_scope(name))
            {
                int         nInputs = X.shape[1];
                NDArray     stddev  = 2 / np.sqrt(nInputs);
                Tensor      init    = tf.truncated_normal(new[] { nInputs, nNeurons }, stddev: stddev);
                RefVariable W       = tf.Variable(init, name: "kernel");
                RefVariable b       = tf.Variable(tf.zeros(new[] { nNeurons }), name: "bias");
                Tensor      Z       = tf.matmul(X, W) + b;

                if (activation != null)
                {
                    return(activation.Activate(Z));
                }

                return(Z);
            }
        }
Example #10
0
        protected override Tensor call(Tensor inputs, Tensor training = null)
        {
            var outputs = _convolution_op.__call__(inputs, kernel);

            if (use_bias)
            {
                if (data_format == "channels_first")
                {
                    throw new NotImplementedException("call channels_first");
                }
                else
                {
                    outputs = nn_ops.bias_add(outputs, bias, data_format: "NHWC");
                }
            }

            if (activation != null)
            {
                return(activation.Activate(outputs));
            }

            return(outputs);
        }
Example #11
0
 internal double Activate(double x)
 {
     return(function.Activate(x));
 }