public void Load() { ModelContextData tosave = new ModelContextData(); BinaryFormatter bf = new BinaryFormatter(); FileStream fs = new FileStream("Model.bin", FileMode.Open, FileAccess.Read); tosave = bf.Deserialize(fs) as ModelContextData; fs.Close(); fs.Dispose(); this.bd = tosave.bd; this.clipval = tosave.clipval; this.decoder = tosave.decoder; this.Depth = tosave.Depth; this.encoder = tosave.encoder; this.hidden_size = tosave.hidden_sizes; this.learning_rate = tosave.learning_rate; this.word_size = tosave.letter_size; this.max_word = 100; this.regc = tosave.regc; this.ReversEncoder = tosave.ReversEncoder; this.UseDropout = tosave.UseDropout; this.Whd = tosave.Whd; this.Embedding = tosave.Wil; }
public ContextSeq2Seq(int inputSize, int hiddenSize, int depth, List <List <string> > input, List <List <string> > output, bool useDropout) { this.InputSequences = input; this.OutputSequences = output; this.Depth = depth; // list of sizes of hidden layers word_size = inputSize; // size of word embeddings. this.hidden_size = hiddenSize; solver = new Optimizer(); OneHotEncoding(input, output); this.Whd = new WeightMatrix(hidden_size, vocab.Count + 2, true); this.bd = new WeightMatrix(1, vocab.Count + 2, 0); Embedding = new WeightMatrix(vocab.Count + 2, word_size, true); encoder = new Encoder(hidden_size, word_size, depth); ReversEncoder = new Encoder(hidden_size, word_size, depth); decoder = new ContextDecoder(hidden_size, word_size, depth); }