public static async Task Aggregator( FPGA.InputSignal <bool> RXD, FPGA.OutputSignal <bool> TXD ) { Sequential handler = () => { byte data = 0; UART.Read(115200, RXD, out data); byte result = 0; TestMethod(data, out result); UART.Write(115200, result, TXD); }; const bool trigger = true; FPGA.Config.OnSignal(trigger, handler); }
static void PredictMnist(string modelPath, string xtestPath, string ytestPath = null) { var xtest = TensorUtils.Deserialize(File.OpenRead(xtestPath)); xtest = xtest.Cast(DType.Float32); xtest = Ops.Div(null, xtest, 255f); // xtest = xtest.Narrow(0, 0, 101); var model = Sequential.Load(modelPath); var result = model.Predict(xtest, batchSize: 32); if (ytestPath == null) { return; } var ytest = TensorUtils.Deserialize(File.OpenRead(ytestPath)); ytest = ytest.Cast(DType.Float32); // ytest = ytest.Narrow(0, 0, 101); ytest = Ops.Argmax(null, ytest, 1).Squeeze(); var t = result.Narrow(0, 0, 11); // Console.WriteLine(t.Format()); result = Ops.Argmax(null, result, 1).Squeeze(); t = result.Narrow(0, 0, 11); // Console.WriteLine(t.Format()); double sum = 0.0; for (var i = 0; i < ytest.Sizes[0]; ++i) { sum += (int)ytest.GetElementAsFloat(i) == (int)result.GetElementAsFloat(i) ? 1.0 : 0.0; } Console.WriteLine($"Accuracy: {sum / ytest.Sizes[0] * 100}%"); }
public static async Task Aggregator( FPGA.InputSignal <bool> RXD, FPGA.OutputSignal <bool> TXD ) { Sequential handler = () => { while (true) { byte start = 0; UART.Read(115200, RXD, out start); for (uint counter = 0; counter < 100; counter++) { ulong fib = 0; SequentialMath.Calculators.Fibonacci(counter, out fib); if (fib > uint.MaxValue) { break; } bool isPrime = false; SequentialMath.Calculators.IsPrime((uint)fib, out isPrime); if (isPrime) { for (byte i = 0; i < 4; i++) { byte data = (byte)fib; UART.Write(115200, data, TXD); fib = fib >> 8; } } } } }; const bool trigger = true; FPGA.Config.OnSignal(trigger, handler); }
public void ConvolutionalMnist() { // Load Data var lines = File.ReadAllLines("..\\..\\..\\..\\..\\datasets\\mnist_train.csv").ToList(); var mnistLables = new List <Matrix>(); var mnistData = new List <Matrix>(); for (var j = 1; j < lines.Count; j++) { var t = lines[j]; var data = t.Split(',').ToList(); mnistLables.Add(new Matrix(1, 1).Fill(int.Parse(data[0]))); var mnist = new Matrix(784, 1); for (var i = 1; i < data.Count; i++) { mnist[i - 1, 0] = double.Parse(data[i]); } mnistData.Add(mnist); } // Create Network var net = new Sequential(new QuadraticCost(), new NesterovMomentum(0.03)); net.CreateLayer(new Convolutional(784, new Average(), new Tanh())); net.CreateLayer(new Convolutional(100, new Average(), new Tanh())); net.CreateLayer(new Dense(100, new Tanh())); net.CreateLayer(new Output(10, new Softmax())); net.InitNetwork(); // Train Network for (var i = 0; i < 800; i++) { net.Train(mnistData[i % mnistData.Count], mnistLables[i % mnistData.Count]); } // Write Acc Result // Trace.WriteLine(" Metrics Accuracy: " + acc); // Assert.IsTrue(acc > 80.0, "Network did not learn MNIST"); }
public static void Predict(string text) { var model = Sequential.LoadModel("model.h5"); string result = ""; float[,] tokens = new float[1, max_words]; string[] words = TextUtil.TextToWordSequence(text).Take(max_words).ToArray(); for (int i = 0; i < words.Length; i++) { tokens[0, i] = indexesByFrequency.ContainsKey(words[i]) ? (float)indexesByFrequency[words[i]] : 0f; } NDarray x = np.array(tokens); var y = model.Predict(x); var binary = Math.Round(y[0].asscalar <float>()); result = binary == 0 ? "Норм, не токсично" : "ТОКСИЧНО"; Console.WriteLine($"Результат для \"{text}\": {result}, оценка: {y[0].asscalar<float>()}"); }
public void sequential_guide_training_1() { // For a single-input model with 2 classes (binary classification): var model = new Sequential(); model.Add(new Dense(32, activation: "relu", input_dim: 100)); model.Add(new Dense(1, activation: "sigmoid")); model.Compile(optimizer: "rmsprop", loss: "binary_crossentropy", metrics: new[] { "accuracy" }); // Generate dummy data double[,] data = Accord.Math.Matrix.Random(1000, 100); int[] labels = Accord.Math.Vector.Random(1000, min: 0, max: 10); // Train the model, iterating on the data in batches of 32 samples model.fit(data, labels, epochs: 10, batch_size: 32); // For a single-input model with 10 classes (categorical classification): }
public static async Task Aggregator( FPGA.InputSignal <bool> RXD, FPGA.OutputSignal <bool> TXD ) { Sequential mainHandler = () => { byte seed = 0; UART.Read(115200, RXD, out seed); int sum = 0; Handler(seed, out sum); UART.Write(115200, (byte)sum, TXD); }; const bool trigger = true; FPGA.Config.OnSignal(trigger, mainHandler); }
/// <inheritdoc /> public override int GetHashCode() { unchecked // Overflow is fine, just wrap { var hashCode = 41; if (Sequential != null) { hashCode = hashCode * 59 + Sequential.GetHashCode(); } if (SequentialMinus != null) { hashCode = hashCode * 59 + SequentialMinus.GetHashCode(); } if (Diverging != null) { hashCode = hashCode * 59 + Diverging.GetHashCode(); } return(hashCode); } }
static void SummarizePerformance(int epoch, Sequential generator, Sequential discriminator, NDarray dataset, int latentDim, int sampleCount = 50) { var real = GenerateRealSamples(dataset, sampleCount); var realAcc = discriminator.Evaluate(real.Item1, real.Item2, verbose: 0); var fake = GenerateFakeGeneratorSamples(generator, latentDim, sampleCount); var fakeAcc = discriminator.Evaluate(fake.Item1, fake.Item2, verbose: 0); Console.WriteLine("Accuracy real: \t " + realAcc.Last() * 100 + "% \t fake:" + fakeAcc.Last() * 100 + "%"); var fakes = fake.Item1; fakes = (fakes + 1) / 2; for (int i = 0; i < sampleCount; i++) { SaveArrayAsImage(fakes[i], "output/gantest_" + epoch + "_" + i + ".png"); } generator.Save("output/generator" + epoch + ".h5"); }
public void sequential_guide_training_2() { var model = new Sequential(); model.Add(new Dense(32, activation: "relu", input_dim: 100)); model.Add(new Dense(10, activation: "softmax")); model.Compile(optimizer: "rmsprop", loss: "categorical_crossentropy", metrics: new[] { "accuracy" }); // Generate dummy data double[][] data = Accord.Math.Jagged.Random(1000, 100); int[] labels = Accord.Math.Vector.Random(1000, min: 0, max: 10); // Convert labels to categorical one-hot encoding double[][] one_hot_labels = Accord.Math.Jagged.OneHot(labels, columns: 10); // Train the model, iterating on the data in batches of 32 samples model.fit(data, one_hot_labels, epochs: 10, batch_size: 32); }
public static void Init() { try { using (Py.GIL()) { if (model == null) { string path = Directory.GetCurrentDirectory(); model = Sequential.ModelFromJson(File.ReadAllText(path + MODEL_PATH)); model.LoadWeight(path + WEIGHTS_PATH); } } } catch (Exception ex) { Console.WriteLine(ex.Message); throw; } }
public static void DeserializeFromUART <T>( ref T obj, FPGA.InputSignal <bool> RXD, FPGA.Signal <bool> deserialized) where T : new() { byte data = 0; FPGA.Config.JSONDeserializer(obj, data, deserialized); const bool trigger = true; Sequential uartHandler = () => { while (true) { UART.Read(115200, RXD, out data); } }; FPGA.Config.OnSignal(trigger, uartHandler); }
// Evaluate the model on the test set. Prints the total number of training samples that are classified correctly private static void EvaluateModel(Sequential model, DataSet testingSet, int numInputs) { float totalCorrect = 0; for (int batchStart = 0; batchStart <= testingSet.inputs.Shape[0] - BatchSize; batchStart += BatchSize) { using (var mbInputs = testingSet.inputs.Narrow(0, batchStart, BatchSize)) using (var mbTargetValues = testingSet.targetValues.Narrow(0, batchStart, BatchSize)) { var modelOutput = model.Forward(mbInputs, ModelMode.Evaluate); totalCorrect += (modelOutput.TVar().Argmax(1) == mbTargetValues) .SumAll() .ToScalar() .Evaluate(); } } Console.WriteLine("Test set total correct: " + totalCorrect + " / " + testingSet.inputs.Shape[0]); }
public void dense_example_1() { #region doc_dense_example_1 // as first layer in a sequential model: var model = new Sequential(); model.Add(new Dense(32, input_shape: new int?[] { 16 })); // now the model will take as input arrays of shape (*, 16) // and output arrays of shape (*, 32) // after the first layer, you don't need to specify // the size of the input anymore: model.Add(new Dense(32)); #endregion Assert.AreEqual(2, model.layers.Count); Assert.AreEqual("dense_1", model.layers[0].name); Assert.AreEqual(new int?[] { null, 16 }, model.layers[0].input_shape[0]); Assert.AreEqual("dense_2", model.layers[1].name); Assert.AreEqual(new int?[] { null, 32 }, model.layers[1].input_shape[0]); }
public static void Predict(string text) { var model = Sequential.LoadModel("model.h5"); string result = ""; var indexes = IMDB.GetWordIndex(); string[] words = TextUtil.TextToWordSequence(text); float[] tokens = words.Select(i => ((float)indexes[i])).ToArray(); NDarray x = np.array(tokens); x = x.reshape(1, x.shape[0]); x = SequenceUtil.PadSequences(x, maxlen: 500); var y = model.Predict(x); var binary = Math.Round(y[0].asscalar <float>()); result = binary == 0 ? "Negative" : "Positive"; Console.WriteLine("Sentiment for \"{0}\": {1}", text, result); }
public DQNConv(int[] inputSize, int numberOfActions, float learningRate, float discountFactor, int batchSize, BaseExperienceReplay memory) : base(null, numberOfActions, learningRate, discountFactor, batchSize, memory) { Tensor.SetOpMode(Tensor.OpMode.GPU); InputSize = inputSize; Shape inputShape = new Shape(inputSize[0], inputSize[1], TemporalDataSize); Net = new NeuralNetwork("DQNConv"); var Model = new Sequential(); Model.AddLayer(new Convolution(inputShape, 8, 32, 2, Activation.ELU)); Model.AddLayer(new Convolution(Model.LastLayer, 4, 64, 2, Activation.ELU)); Model.AddLayer(new Convolution(Model.LastLayer, 4, 128, 2, Activation.ELU)); Model.AddLayer(new Flatten(Model.LastLayer)); Model.AddLayer(new Dense(Model.LastLayer, 512, Activation.ELU)); Model.AddLayer(new Dense(Model.LastLayer, numberOfActions, Activation.Softmax)); Net.Model = Model; Net.Optimize(new Adam(learningRate), new CustomHuberLoss(ImportanceSamplingWeights)); }
// Constructs a network composed of two fully-connected sigmoid layers public static void BuildMLP(IAllocator allocator, SeedSource seedSource, int batchSize, bool useCudnn, out Sequential model, out ICriterion criterion, out bool outputIsClassIndices) { int inputSize = MnistParser.ImageSize * MnistParser.ImageSize; int hiddenSize = 100; int outputSize = MnistParser.LabelCount; var elementType = DType.Float32; model = new Sequential(); model.Add(new ViewLayer(batchSize, inputSize)); model.Add(new LinearLayer(allocator, seedSource, elementType, inputSize, hiddenSize, batchSize)); model.Add(new SigmoidLayer(allocator, elementType, batchSize, hiddenSize)); model.Add(new LinearLayer(allocator, seedSource, elementType, hiddenSize, outputSize, batchSize)); model.Add(new SigmoidLayer(allocator, elementType, batchSize, outputSize)); criterion = new MSECriterion(allocator, batchSize, outputSize); outputIsClassIndices = false; // output is class (pseudo-)probabilities, not class indices }
public static void Run() { //Load train data NDarray x = np.array(new float[, ] { { 0, 0 }, { 0, 1 }, { 1, 0 }, { 1, 1 } }); NDarray y = np.array(new float[] { 0, 1, 1, 0 }); //Build sequential model var model = new Sequential(); model.Add(new Dense(4, activation: "relu", input_shape: new Shape(2))); model.Add(new Dense(1)); var stoploss = Callback.Custom("AccHistory", "AccHistory.py"); //Compile and train model.Compile(optimizer: new SGD(), loss: "binary_crossentropy", metrics: new string[] { "accuracy" }); var history = model.Fit(x, y, batch_size: 2, epochs: 10, verbose: 1, callbacks: new Callback[] { stoploss }); }
// Constructs a network with two fully-connected layers; one sigmoid, one softmax public static void BuildMLPSoftmax(IAllocator allocator, SeedSource seedSource, int batchSize, bool useCudnn, out Sequential model, out ICriterion criterion, out bool outputIsClassIndices) { int inputSize = MnistParser.ImageSize * MnistParser.ImageSize; int hiddenSize = 100; int outputSize = MnistParser.LabelCount; var elementType = DType.Float32; model = new Sequential(); model.Add(new ViewLayer(batchSize, inputSize)); model.Add(new LinearLayer(allocator, seedSource, elementType, inputSize, hiddenSize, batchSize)); model.Add(new SigmoidLayer(allocator, elementType, batchSize, hiddenSize)); model.Add(new LinearLayer(allocator, seedSource, elementType, hiddenSize, outputSize, batchSize)); model.Add(LayerBuilder.BuildLogSoftMax(allocator, elementType, batchSize, outputSize, useCudnn)); criterion = new ClassNLLCriterion(allocator, batchSize, outputSize); outputIsClassIndices = true; // output of criterion is class indices }
public static async Task Aggregator( FPGA.InputSignal <bool> RXD, FPGA.OutputSignal <bool> TXD ) { Sequential handler = () => { byte data = 0; ulong tmp = 0; ulong op1 = 0, op2 = 0; for (byte i = 0; i < 2; i++) { for (byte j = 0; j < 8; j++) { UART.Read(115200, RXD, out data); tmp = ((ulong)data << 56) | (tmp >> 8); } if (i == 0) { op1 = tmp; } else { op2 = tmp; } } tmp = op1 + op2; for (byte j = 0; j < 8; j++) { UART.Write(115200, (byte)tmp, TXD); tmp = tmp >> 8; } }; const bool trigger = true; FPGA.Config.OnSignal(trigger, handler); }
#pragma warning restore MSML_PrivateFieldName // Private field name not in: _camelCase format public FeedForwardLayer( int embeddingDim = 768, int ffnEmbeddingDim = 3072, double dropoutRate = 0.1, double activationDropoutRate = 0.1, string activationFn = "relu", bool dynamicDropout = false) : base(nameof(FeedForwardLayer)) { // Initialize parameters if (dynamicDropout) { dropoutRate = CalculateDropout(dropoutRate, embeddingDim, SearchSpace.HiddenSizeChoices[SearchSpace.HiddenSizeChoices.Length - 1]); activationDropoutRate = CalculateDropout(activationDropoutRate, embeddingDim, SearchSpace.HiddenSizeChoices[SearchSpace.HiddenSizeChoices.Length - 1]); } // Layer norm associated with the position wise feed-forward NN var fullConnected1 = torch.nn.Linear(embeddingDim, ffnEmbeddingDim); var activation = new ActivationFunction(activationFn); var activationDropoutLayer = torch.nn.Dropout(activationDropoutRate); var fullConnected2 = torch.nn.Linear(ffnEmbeddingDim, embeddingDim); var dropoutLayer = torch.nn.Dropout(dropoutRate); ModelUtils.InitNormal(fullConnected1.weight, mean: 0.0, std: 0.02); ModelUtils.InitZeros(fullConnected1.bias); ModelUtils.InitNormal(fullConnected2.weight, mean: 0.0, std: 0.02); ModelUtils.InitZeros(fullConnected2.bias); FullConnects = torch.nn.Sequential( ("fc1", fullConnected1), ("activation", activation), ("dropout1", activationDropoutLayer), ("fc2", fullConnected2), ("dropout2", dropoutLayer) ); FinalLayerNorm = torch.nn.LayerNorm(new long[] { embeddingDim }); RegisterComponents(); }
public static VCExpr RegExpr(RE r, VCExpr N, VCContext ctxt) { Contract.Requires(r != null); Contract.Requires(N != null); Contract.Requires(ctxt != null); Contract.Ensures(Contract.Result <VCExpr>() != null); if (r is AtomicRE) { AtomicRE ar = (AtomicRE)r; return(Block(ar.b, N, ctxt)); } else if (r is Sequential) { Sequential s = (Sequential)r; return(RegExpr(s.first, RegExpr(s.second, N, ctxt), ctxt)); } else if (r is Choice) { Choice ch = (Choice)r; VCExpr res; if (ch.rs == null || ch.rs.Count == 0) { res = N; } else { VCExpr currentWLP = RegExpr(cce.NonNull(ch.rs[0]), N, ctxt); for (int i = 1, n = ch.rs.Count; i < n; i++) { currentWLP = ctxt.Ctxt.ExprGen.And(currentWLP, RegExpr(cce.NonNull(ch.rs[i]), N, ctxt)); } res = currentWLP; } return(res); } else { Contract.Assert(false); throw new cce.UnreachableException(); // unexpected RE subtype } }
public static async Task Aggregator( // blinker FPGA.OutputSignal <bool> LED1, // UART FPGA.InputSignal <bool> RXD, FPGA.OutputSignal <bool> TXD ) { IsAlive.Blink(LED1); SnakeDBG dbg = new SnakeDBG(); Sequential handler = () => { dbg.C1++; JSON.SerializeToUART(ref dbg, TXD); }; FPGA.Config.OnStartup(handler); }
public ConvSeparable(int inChannels, int outChannels, int kernelSize, int padding, double dropout) : base(nameof(ConvSeparable)) { // Weight shape: [InChannels, 1, KernelSize] var conv1 = torch.nn.Conv1d(inChannels, inChannels, kernelSize, padding: padding, groups: inChannels); // Weight shape: [OutChannels, InChannels, 1], Bias shape: [OutChannels] var conv2 = torch.nn.Conv1d(inChannels, outChannels, 1, padding: 0L, groups: 1, bias: true); var std = Math.Sqrt((4 * (1.0 - dropout)) / (kernelSize * inChannels)); ModelUtils.InitNormal(conv1.weight, mean: 0, std: std); ModelUtils.InitNormal(conv2.weight, mean: 0, std: std); ModelUtils.InitConstant(conv2.bias, 0); Conv = torch.nn.Sequential( ("conv1", conv1), ("conv2", conv2) ); RegisterComponents(); }
static void FifthNN() { var r = new Random(); // ??? //var data = new Tensor ((Matrix) new double[, ] { { 0, 0 }, { 0, 1 }, { 1, 0 }, { 1, 1 } }, true); //var target = new Tensor ((Matrix) new double[, ] { { 1 }, { 0 }, { 0 }, { 1 } }, true); // XOR: works! var data = new Tensor((Matrix) new double[, ] { { 1, 0 }, { 0, 0 }, { 0, 1 }, { 1, 1 } }, true); var target = new Tensor((Matrix) new double[, ] { { 1 }, { 0 }, { 1 }, { 0 } }, true); var seq = new Sequential(); seq.Layers.Add(new Linear(2, 5, r)); seq.Layers.Add(new SigmoidLayer()); seq.Layers.Add(new Linear(5, 5, r)); seq.Layers.Add(new SigmoidLayer()); seq.Layers.Add(new Linear(5, 1, r)); seq.Layers.Add(new SigmoidLayer()); var sgd = new StochasticGradientDescent(seq.Parameters, 1f); var mse = new MeanSquaredError(); for (var i = 0; i < 400; i++) { var pred = seq.Forward(data); var loss = mse.Forward(pred, target); loss.Backward(new Tensor(Matrix.Ones(loss.Data.X, loss.Data.Y))); sgd.Step(); Console.WriteLine($"Epoch: {i} Loss: {loss}"); } }
public static async Task Aggregator( FPGA.InputSignal <bool> RXD, FPGA.OutputSignal <bool> TXD ) { Controllers.eShiftCommand cmd = 0; Sequential readHandler = () => { byte data = 0; UART.Read(115200, RXD, out data); cmd = (eShiftCommand)data; }; const bool trigger = true; FPGA.Config.OnSignal(trigger, readHandler); byte write = 0; FPGA.Signal <bool> dataWritten = false; Sequential writeHandler = () => { UART.Write(115200, write, TXD); dataWritten = true; }; FPGA.Config.OnRegisterWritten(write, writeHandler); Sequential cmdHandler = () => { byte result = 0; ValueForCommand(cmd, out result); write = result; FPGA.Runtime.WaitForAllConditions(dataWritten); }; FPGA.Config.OnRegisterWritten(cmd, cmdHandler); }
static void Serialize() { var player.Factory = new EntityFactory<Player>(); player.Factory.AddBehavior(Attackable.DefaultPreset); player.Factory.AddBehavior(Attacking.Preset); player.Factory.AddBehavior(Displaceable.DefaultPreset); player.Factory.AddBehavior(Moving.Preset); player.Factory.AddBehavior(Pushable.Preset); player.Factory.AddBehavior(Statused.Preset); player.Factory.AddBehavior(Sequential.Preset(new Sequential.Config(new Step[0]))); System.Console.WriteLine("Set up player.Factory"); var player = player.Factory.Instantiate(); World world = new World(1, 1); player.Init(new IntVector2(1, 1), world); var slot = new SizedSlot<CircularItemContainer, Hopper.Core.Items.IItem>("stuff", 5); var item = new TinkerItem( new ItemMetadata("Test_Item_1"), new Tinker<TinkerData>(new ChainDef<ContextBase>[] { }), slot); var item2 = new TinkerItem( new ItemMetadata("Test_Item_2"), new Tinker<TinkerData>(new ChainDef<ContextBase>[] { }), slot); var packed = Registry.Default.Items.PackModMap(); ((Inventory)player.Inventory).AddContainer(slot, new CircularItemContainer(5)); player.Inventory.Equip(item); player.Inventory.Equip(item2); MemoryTraceWriter traceWriter = new MemoryTraceWriter(); JsonSerializerSettings settings = new JsonSerializerSettings(); settings.Converters.Add(new IHaveIdConverter<Hopper.Core.Items.IItem>()); settings.Converters.Add(new InventoryConverter()); // settings.Converters.Add(new BehaviorConverter()); settings.Converters.Add(new BehaviorControlConverter()); settings.Converters.Add(new EntityConverter()); settings.TraceWriter = traceWriter; string result = JsonConvert.SerializeObject(player, settings); System.Console.WriteLine(result); Entity entity = JsonConvert.DeserializeObject<Player>(result, settings); System.Console.WriteLine(entity); // System.Console.WriteLine(traceWriter.ToString()); }
static void Main(string[] args) { Global.UseGpu(); Tensor x = Tensor.FromArray(Global.Device, new float[] { 1, 2, 3, 4, 5, 6, 7, 8, 9 }); x = x.Reshape(3, 3); var result = TOps.Diag(x); result.Print(); string datasetFolder = @"C:\dataset\MNIST"; bool useDenseModel = false; var((trainX, trainY), (valX, valY)) = MNISTParser.LoadDataSet(datasetFolder, trainCount: 60000, testCount: 10000, flatten: useDenseModel); Console.WriteLine("Train and Test data loaded"); DataFrameIter trainIter = new DataFrameIter(trainX, trainY); DataFrameIter valIter = new DataFrameIter(valX, valY); Sequential model = null; if (useDenseModel) { model = BuildFCModel(); } else { model = BuildConvModel(); } model.Compile(OptimizerType.Adam, LossType.CategorialCrossEntropy, MetricType.Accuracy); Console.WriteLine("Model compiled.. initiating training"); model.EpochEnd += Model_EpochEnd; model.Train(trainIter, 10, 32, valIter); Console.ReadLine(); }
public static async Task Aggregator( FPGA.InputSignal <bool> RXD, FPGA.OutputSignal <bool> TXD ) { Sequential handler = () => { ulong tmp = 0; ulong op1 = 0, op2 = 0; for (byte i = 0; i < 2; i++) { for (byte j = 0; j < 8; j++) { byte data = UART.Read(115200, RXD); tmp = ((ulong)data << 56) | (tmp >> 8); } if (i == 0) { op1 = tmp; } else { op2 = tmp; } } FunctionalTest.Pipeline_Pipelined64BitAdder.PipelinedAdder(op1, op2, out tmp); for (byte j = 0; j < 8; j++) { UART.Write(115200, (byte)tmp, TXD); tmp = tmp >> 8; } }; const bool trigger = true; FPGA.Config.OnSignal(trigger, handler); }
public static void Run() { //Load train data NDarray dataset = np.loadtxt(fname: "C:/Project/LSTMCoreApp/pima-indians-diabetes.data.csv", delimiter: ","); var X = dataset[":,0: 8"]; var Y = dataset[":, 8"]; //Build sequential model var model = new Sequential(); model.Add(new Dense(12, input_dim: 8, kernel_initializer: "uniform", activation: "relu")); model.Add(new Dense(8, kernel_initializer: "uniform", activation: "relu")); model.Add(new Dense(1, activation: "sigmoid")); //Compile and train model.Compile(optimizer: "adam", loss: "binary_crossentropy", metrics: new string[] { "accuracy" }); model.Fit(X, Y, batch_size: 10, epochs: 150, verbose: 1); //Evaluate model var scores = model.Evaluate(X, Y, verbose: 1); Console.WriteLine("Accuracy: {0}", scores[1] * 100); //Save model and weights string json = model.ToJson(); File.WriteAllText("model.json", json); model.SaveWeight("model.h5"); Console.WriteLine("Saved model to disk"); //Load model and weight var loaded_model = Sequential.ModelFromJson(File.ReadAllText("model.json")); loaded_model.LoadWeight("model.h5"); Console.WriteLine("Loaded model from disk"); loaded_model.Compile(optimizer: "rmsprop", loss: "binary_crossentropy", metrics: new string[] { "accuracy" }); scores = model.Evaluate(X, Y, verbose: 1); Console.WriteLine("Accuracy: {0}", scores[1] * 100); }