public QNetworkSimple(int stateSize, int actionSize, int numLayers, int hiddenSize, DeviceDescriptor device, float initialWeightScale = 0.01f) { Device = device; StateSize = stateSize; ActionSize = actionSize; //create actor network part var inputA = new InputLayerDense(stateSize); var outputA = new OutputLayerDense(hiddenSize, null, OutputLayerDense.LossFunction.None); outputA.HasBias = false; outputA.InitialWeightScale = initialWeightScale; SequentialNetworkDense qNetwork = new SequentialNetworkDense(inputA, LayerDefineHelper.DenseLayers(numLayers, hiddenSize, false, NormalizationMethod.None, 0, initialWeightScale, new ReluDef()), outputA, device); //seperate the advantage and value part. It is said to be better var midStream = outputA.GetOutputVariable(); var advantageStream = CNTKLib.Slice(midStream, AxisVector.Repeat(new Axis(0), 1), IntVector.Repeat(0, 1), IntVector.Repeat(hiddenSize / 2, 1)); var valueStream = CNTKLib.Slice(midStream, AxisVector.Repeat(new Axis(0), 1), IntVector.Repeat(hiddenSize / 2, 1), IntVector.Repeat(hiddenSize, 1)); var adv = Layers.Dense(advantageStream, actionSize, device, false, "QNetworkAdvantage", initialWeightScale); var value = Layers.Dense(valueStream, 1, device, false, "QNetworkValue", initialWeightScale); InputState = inputA.InputVariable; //OutputQs = outputA.GetOutputVariable(); OutputQs = value.Output + CNTKLib.Minus(adv, CNTKLib.ReduceMean(adv, Axis.AllStaticAxes())).Output; }
static void Main(string[] args) { int[] shape = new int[] { 6, 3 }; float[] data = new float[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; NDArrayView array = new NDArrayView(shape, data, DeviceDescriptor.CPUDevice); Variable variable = new Variable(shape, VariableKind.Parameter, CNTK.DataType.Float, array, false, new AxisVector(), false, "", ""); var slicedData = CNTKLib.Slice(variable, AxisVector.Repeat(new Axis(0), 1), IntVector.Repeat(1, 1), IntVector.Repeat(3, 1)); var resultArray = GetArray(slicedData); Global.UseEngine(SiaNet.Backend.CNTKLib.SiaNetBackend.Instance, DeviceType.CPU); var K = Global.CurrentBackend; var a = K.CreateVariable(new float[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9 }, new long[] { 6, 3 }); a.Print(); var sliced = K.SliceRows(a, 1, 2); //var d = K.CreateVariable(new float[] { 1 }, new long[] { 1, 1 }); //c = c + d; Console.ReadLine(); }
protected override Function BuildNetwork(Variable input, DeviceDescriptor device, string name) { var c1 = UnityCNTK.Layers.Dense(input, HiddenSize, device, true, name + ".Dense", InitialWeightScale); resultOutput = CNTKLib.Slice(c1, AxisVector.Repeat(new Axis(0), 1), IntVector.Repeat(0, 1), IntVector.Repeat(HiddenSize - 1, 1)); varianceOutput = CNTKLib.Square(CNTKLib.Slice(c1, AxisVector.Repeat(new Axis(0), 1), IntVector.Repeat(HiddenSize - 1, 1), IntVector.Repeat(HiddenSize, 1))); targetInput = CNTKLib.InputVariable(resultOutput.Shape, resultOutput.DataType, name + ".TargetInput"); var squareErrorRoot = CNTKLib.Sqrt(CNTKLib.SquaredError(resultOutput, targetInput)); var l = CNTKLib.ElementDivide(squareErrorRoot, CNTKLib.ElementTimes(Constant.Scalar(DataType.Float, 2), varianceOutput)); lossOutput = l.Output + CNTKLib.ElementTimes(CNTKLib.Log(varianceOutput), Constant.Scalar(DataType.Float, 1)); //lossOutput = squareError;//test //add parameters to list ParameterNames.Add(ParamTypeToName(DenseParamType.Weight)); ParameterNames.Add(ParamTypeToName(DenseParamType.Bias)); return(lossOutput); }
public Tensor SliceRows(Tensor x, long start, long end) { return(Out(C.Slice(In(x), AxisVector.Repeat(new Axis(0), 1), IntVector.Repeat((int)start, 1), IntVector.Repeat((int)end, 1)))); }