/// <summary> /// The private constructor /// </summary> private InAppPurchases() { // Set some default values: _runningMode = RunningMode.Testing; _licenseInformation = null; _noAdsOptionIsActivated = false; }
public static extern void PNSetRunningMode(RunningMode runMode);
private static bool ParseCliArguments(string[] args) { var appVersion = typeof(Program).Assembly.GetName().Version; var app = new CommandLineApplication(false) { Name = typeof(Program).Namespace, FullName = typeof(Program).Namespace, ShortVersionGetter = () => appVersion.ToString(2), LongVersionGetter = () => appVersion.ToString(3) }; app.HelpOption("--help | -h"); app.VersionOption("-v | --version", appVersion.ToString(2), appVersion.ToString(3)); var modeOpt = app.Option("-m | --mode", Strings.ProgramModeDesc, CommandOptionType.SingleValue); var srcOpt = app.Option("-f | --folder", Strings.SourceFolderDesc, CommandOptionType.SingleValue); var langOpt = app.Option("-l | --language", Strings.LanguageDesc, CommandOptionType.SingleValue); var modFilePathArg = app.Argument(Strings.PathArgumentName, Strings.PathDesc); app.OnExecute(() => { if (modeOpt.HasValue()) { _mode = !string.Equals(modeOpt.Value(), "patch", StringComparison.OrdinalIgnoreCase) ? RunningMode.Dump : RunningMode.Patch; } if (srcOpt.HasValue()) { SourcePath = srcOpt.Value(); } if (langOpt.HasValue()) { if (!Enum.TryParse(langOpt.Value(), out _language)) { Logger.Error(Strings.InvalidGameCulture); } } _modFilePath = modFilePathArg.Value; return(0); }); app.Execute(args); // validate arguments if (string.IsNullOrWhiteSpace(_modFilePath)) { Logger.Error(Strings.NoFileSpecified); return(false); } // ReSharper disable once InvertIf if (string.IsNullOrWhiteSpace(SourcePath) && _mode == RunningMode.Patch) { Logger.Error(Strings.NoSourceFolderSpecified); return(false); } return(true); }
/// <summary> /// Process entire corpus set by given RNN /// </summary> /// <param name="rnns"></param> /// <param name="corpusSet"></param> /// <param name="runningMode"></param> public void Process(List <RNN <T> > rnns, DataSet <T> corpusSet, RunningMode runningMode) { parallelOptions = new ParallelOptions(); parallelOptions.MaxDegreeOfParallelism = Environment.ProcessorCount; processedSequence = 0; processedWordCnt = 0; tknErrCnt = 0; sentErrCnt = 0; corpusSet.Shuffle(); //Add RNN instance into job queue ConcurrentQueue <RNN <T> > qRNNs = new ConcurrentQueue <RNN <T> >(); foreach (var rnn in rnns) { qRNNs.Enqueue(rnn); } Parallel.For(0, corpusSet.SequenceList.Count, parallelOptions, i => { //Get a free RNN instance for running RNN <T> rnn; if (qRNNs.TryDequeue(out rnn) == false) { //The queue is empty, so we clone a new one rnn = rnns[0].Clone(); Logger.WriteLine("Cloned a new RNN instance for training."); } var pSequence = corpusSet.SequenceList[i]; //Calcuate how many tokens we are going to process in this sequence int tokenCnt = 0; if (pSequence is Sequence) { tokenCnt = (pSequence as Sequence).States.Length; } else { SequencePair sp = pSequence as SequencePair; if (sp.srcSentence.TokensList.Count > rnn.MaxSeqLength) { qRNNs.Enqueue(rnn); return; } tokenCnt = sp.tgtSequence.States.Length; } //This sequence is too long, so we ignore it if (tokenCnt > rnn.MaxSeqLength) { qRNNs.Enqueue(rnn); return; } //Run neural network int[] predicted; if (IsCRFTraining) { predicted = rnn.ProcessSequenceCRF(pSequence as Sequence, runningMode); } else { Matrix <float> m; predicted = rnn.ProcessSequence(pSequence, runningMode, false, out m); } //Update counters Interlocked.Add(ref processedWordCnt, tokenCnt); Interlocked.Increment(ref processedSequence); Interlocked.Increment(ref processMiniBatch); int newTknErrCnt; if (pSequence is Sequence) { newTknErrCnt = GetErrorTokenNum(pSequence as Sequence, predicted); } else { newTknErrCnt = GetErrorTokenNum((pSequence as SequencePair).tgtSequence, predicted); } Interlocked.Add(ref tknErrCnt, newTknErrCnt); if (newTknErrCnt > 0) { Interlocked.Increment(ref sentErrCnt); } //Update weights //We only allow one thread to update weights, and other threads keep running to train or predict given sequences //Note: we don't add any lock when updating weights and deltas for weights in order to improve performance singificantly, //so that means race condition will happen and it's okay for us. if (runningMode == RunningMode.Training && processMiniBatch > 0 && processMiniBatch % ModelSettings.MiniBatchSize == 0 && updatingWeights == 0) { Interlocked.Increment(ref updatingWeights); if (updatingWeights == 1) { rnn.UpdateWeights(); Interlocked.Exchange(ref processMiniBatch, 0); } Interlocked.Decrement(ref updatingWeights); } //Show progress information if (processedSequence % 1000 == 0) { Logger.WriteLine("Progress = {0} ", processedSequence / 1000 + "K/" + corpusSet.SequenceList.Count / 1000.0 + "K"); Logger.WriteLine(" Error token ratio = {0}%", (double)tknErrCnt / (double)processedWordCnt * 100.0); Logger.WriteLine(" Error sentence ratio = {0}%", (double)sentErrCnt / (double)processedSequence * 100.0); } //Save intermediate model file if (ModelSettings.SaveStep > 0 && processedSequence % ModelSettings.SaveStep == 0) { //After processed every m_SaveStep sentences, save current model into a temporary file Logger.WriteLine("Saving temporary model into file..."); try { rnn.SaveModel("model.tmp"); } catch (Exception err) { Logger.WriteLine($"Fail to save temporary model into file. Error: {err.Message.ToString()}"); } } qRNNs.Enqueue(rnn); }); }
private void ComputeTopLayer(Sequence pSequence, out Matrix <float> rawOutputLayer, RunningMode runningMode, bool outputRawScore) { var numStates = pSequence.States.Length; var lastLayerOutputs = layersOutput[forwardHiddenLayers.Count - 1]; //Calculate output layer Matrix <float> tmpOutputResult = null; if (outputRawScore) { tmpOutputResult = new Matrix <float>(numStates, OutputLayer.LayerSize); } OutputLayer.LabelShortList = pSequence.States.Select(state => state.Label).ToList(); for (var curState = 0; curState < numStates; curState++) { var state = pSequence.States[curState]; OutputLayer.ForwardPass(state.SparseFeature, lastLayerOutputs[curState]); OutputLayer.CopyNeuronTo(OutputCells[curState]); if (outputRawScore) { OutputLayer.Cells.CopyTo(tmpOutputResult[curState], 0); } } rawOutputLayer = tmpOutputResult; }
private void ComputeMiddleLayers(Sequence pSequence, ILayer forwardLayer, ILayer backwardLayer, RunningMode runningMode, int layerIdx) { var numStates = pSequence.States.Length; float[][] lastLayerOutputs = layersOutput[layerIdx - 1]; //Computing forward RNN forwardLayer.Reset(); for (var curState = 0; curState < numStates; curState++) { var state = pSequence.States[curState]; forwardLayer.ForwardPass(state.SparseFeature, lastLayerOutputs[curState]); forwardLayer.CopyNeuronTo(forwardCellList[layerIdx][curState]); } //Computing backward RNN backwardLayer.Reset(); for (var curState = numStates - 1; curState >= 0; curState--) { var state = pSequence.States[curState]; backwardLayer.ForwardPass(state.SparseFeature, lastLayerOutputs[curState]); backwardLayer.CopyNeuronTo(backwardCellList[layerIdx][curState]); } //Merge forward and backward MergeForwardBackwardLayers(numStates, forwardLayer.LayerSize, layerIdx); }
/// <summary> /// 接收来自客户端当前的模式 /// 比赛 训练(需要目标区域号码) /// </summary> /// <param name="model"></param> public void SetRunningModel(RunningMode model) { Console.WriteLine("收到工作模式为:{0},参数为:{1}的模式设置", model.Model, model.TargetMarkNumber); }
public abstract int[] ProcessSequence(ISequence sequence, RunningMode runningMode, bool outputRawScore, out Matrix <float> m);
public static bool IsSynchronous(this RunningMode runningMode) { return(runningMode == RunningMode.Sync || runningMode == RunningMode.NonBlockingSync); }
private int[] TrainSequencePair(ISequence sequence, RunningMode runningMode, bool outputRawScore, out Matrix <float> m) { SequencePair pSequence = sequence as SequencePair; var tgtSequence = pSequence.tgtSequence; //Reset all layers foreach (var layer in HiddenLayerList) { layer.Reset(); } Sequence srcSequence; //Extract features from source sentences srcSequence = pSequence.autoEncoder.Config.BuildSequence(pSequence.srcSentence); ExtractSourceSentenceFeature(pSequence.autoEncoder, srcSequence, tgtSequence.SparseFeatureSize); var numStates = pSequence.tgtSequence.States.Length; var numLayers = HiddenLayerList.Count; var predicted = new int[numStates]; m = outputRawScore ? new Matrix <float>(numStates, OutputLayer.LayerSize) : null; //Set target sentence labels into short list in output layer OutputLayer.LabelShortList.Clear(); foreach (var state in tgtSequence.States) { OutputLayer.LabelShortList.Add(state.Label); } CreateDenseFeatureList(); for (int i = 0; i < numLayers; i++) { srcHiddenAvgOutput.CopyTo(denseFeaturesList[i], 0); } srcHiddenAvgOutput.CopyTo(denseFeaturesList[numLayers], 0); var sparseVector = new SparseVector(); for (var curState = 0; curState < numStates; curState++) { //Build runtime features var state = tgtSequence.States[curState]; SetRuntimeFeatures(state, curState, numStates, predicted); //Build sparse features for all layers sparseVector.Clean(); sparseVector.SetLength(tgtSequence.SparseFeatureSize + srcSequence.SparseFeatureSize); sparseVector.AddKeyValuePairData(state.SparseFeature); sparseVector.AddKeyValuePairData(srcSparseFeatures); //Compute first layer state.DenseFeature.CopyTo().CopyTo(denseFeaturesList[0], srcHiddenAvgOutput.Length); HiddenLayerList[0].ForwardPass(sparseVector, denseFeaturesList[0]); //Compute middle layers for (var i = 1; i < numLayers; i++) { //We use previous layer's output as dense feature for current layer HiddenLayerList[i - 1].Cells.CopyTo(denseFeaturesList[i], srcHiddenAvgOutput.Length); HiddenLayerList[i].ForwardPass(sparseVector, denseFeaturesList[i]); } //Compute output layer HiddenLayerList[numLayers - 1].Cells.CopyTo(denseFeaturesList[numLayers], srcHiddenAvgOutput.Length); OutputLayer.ForwardPass(sparseVector, denseFeaturesList[numLayers]); if (m != null) { OutputLayer.Cells.CopyTo(m[curState], 0); } predicted[curState] = OutputLayer.GetBestOutputIndex(); if (runningMode == RunningMode.Training) { // error propogation OutputLayer.ComputeLayerErr(CRFSeqOutput, state, curState); //propogate errors to each layer from output layer to input layer HiddenLayerList[numLayers - 1].ComputeLayerErr(OutputLayer); for (var i = numLayers - 2; i >= 0; i--) { HiddenLayerList[i].ComputeLayerErr(HiddenLayerList[i + 1]); } //Update net weights OutputLayer.BackwardPass(); for (var i = 0; i < numLayers; i++) { HiddenLayerList[i].BackwardPass(); } } } return(predicted); }
public override int[] ProcessSequence(ISequence sequence, RunningMode runningMode, bool outputRawScore, out Matrix <float> m) { return(TrainSequencePair(sequence, runningMode, outputRawScore, out m)); }
public override int[] ProcessSequence(Sequence pSequence, RunningMode runningMode, bool outputRawScore, out Matrix <double> m) { int numStates = pSequence.States.Length; int numLayers = HiddenLayerList.Count; if (outputRawScore == true) { m = new Matrix <double>(numStates, OutputLayer.LayerSize); } else { m = null; } int[] predicted = new int[numStates]; bool isTraining = true; if (runningMode == RunningMode.Training) { isTraining = true; } else { isTraining = false; } //reset all layers foreach (SimpleLayer layer in HiddenLayerList) { layer.netReset(isTraining); } for (int curState = 0; curState < numStates; curState++) { //Compute first layer State state = pSequence.States[curState]; SetInputLayer(state, curState, numStates, predicted); HiddenLayerList[0].computeLayer(state.SparseData, state.DenseData.CopyTo(), isTraining); //Compute each layer for (int i = 1; i < numLayers; i++) { //We use previous layer's output as dense feature for current layer HiddenLayerList[i].computeLayer(state.SparseData, HiddenLayerList[i - 1].cellOutput, isTraining); } //Compute output layer OutputLayer.CurrentLabelId = state.Label; OutputLayer.computeLayer(state.SparseData, HiddenLayerList[numLayers - 1].cellOutput, isTraining); if (m != null) { OutputLayer.cellOutput.CopyTo(m[curState], 0); } OutputLayer.Softmax(isTraining); predicted[curState] = OutputLayer.GetBestOutputIndex(isTraining); if (runningMode != RunningMode.Test) { logp += Math.Log10(OutputLayer.cellOutput[state.Label] + 0.0001); } if (runningMode == RunningMode.Training) { // error propogation OutputLayer.ComputeLayerErr(CRFSeqOutput, state, curState); //propogate errors to each layer from output layer to input layer HiddenLayerList[numLayers - 1].ComputeLayerErr(OutputLayer); for (int i = numLayers - 2; i >= 0; i--) { HiddenLayerList[i].ComputeLayerErr(HiddenLayerList[i + 1]); } //Update net weights Parallel.Invoke(() => { OutputLayer.LearnFeatureWeights(numStates, curState); }, () => { Parallel.For(0, numLayers, parallelOption, i => { HiddenLayerList[i].LearnFeatureWeights(numStates, curState); }); }); } } return(predicted); }
public override int[] ProcessSequenceCRF(Sequence pSequence, RunningMode runningMode) { int numStates = pSequence.States.Length; int numLayers = HiddenLayerList.Count; //Get network output without CRF Matrix <double> nnOutput; ProcessSequence(pSequence, RunningMode.Test, true, out nnOutput); //Compute CRF result ForwardBackward(numStates, nnOutput); if (runningMode != RunningMode.Test) { //Get the best result for (int i = 0; i < numStates; i++) { logp += Math.Log10(CRFSeqOutput[i][pSequence.States[i].Label] + 0.0001); } } //Compute best path in CRF result int[] predicted = Viterbi(nnOutput, numStates); if (runningMode == RunningMode.Training) { //Update tag bigram transition for CRF model UpdateBigramTransition(pSequence); //Reset all layer states foreach (SimpleLayer layer in HiddenLayerList) { layer.netReset(true); } for (int curState = 0; curState < numStates; curState++) { // error propogation State state = pSequence.States[curState]; SetInputLayer(state, curState, numStates, null); HiddenLayerList[0].computeLayer(state.SparseData, state.DenseData.CopyTo()); for (int i = 1; i < numLayers; i++) { HiddenLayerList[i].computeLayer(state.SparseData, HiddenLayerList[i - 1].cellOutput); } OutputLayer.ComputeLayerErr(CRFSeqOutput, state, curState); HiddenLayerList[numLayers - 1].ComputeLayerErr(OutputLayer); for (int i = numLayers - 2; i >= 0; i--) { HiddenLayerList[i].ComputeLayerErr(HiddenLayerList[i + 1]); } //Update net weights Parallel.Invoke(() => { OutputLayer.LearnFeatureWeights(numStates, curState); }, () => { Parallel.For(0, numLayers, parallelOption, i => { HiddenLayerList[i].LearnFeatureWeights(numStates, curState); }); }); } } return(predicted); }
// --- Methods of the class --- /// <summary> /// Instantiates the InAppPurchasing. This must be the first method that is called of this class. /// </summary> /// <param name="thisIsReal">True if the app is to be run in the "real" Windows 8 shop. False if this app shall run in test mode.</param> public void Instantiate(RunningMode runningMode) { // By calling this function, the constructor is called first. if (_instantiated) { // We are already instantiated. This should not happen. Logging.I.LogMessage("InAppPurchase.Instatiate: Leaving early.\n", Logging.LogLevel.Error); return; } // Get the license information, i.e. the object that stores information about already purchased in-app features: switch(runningMode) { case RunningMode.Real: //DEBUG Logging.I.LogMessage("InAppPurchases: Instanciating LicenseInformation object in Real mode.\n"); // Get the "real" license info: _licenseInformation = CurrentApp.LicenseInformation; break; case RunningMode.Testing: Logging.I.LogMessage("InAppPurchases: Instanciating LicenseInformation object in Test mode.\n"); // Get the license info for testing purposes: _licenseInformation = CurrentAppSimulator.LicenseInformation; /* Removed because of issues with asynchronicity: // Load test data: StorageFolder proxyDataFolder = await Package.Current.InstalledLocation.GetFolderAsync("Assets"); StorageFile proxyFile = await proxyDataFolder.GetFileAsync("InAppPurchasing_Test.xml"); await CurrentAppSimulator.ReloadSimulatorAsync(proxyFile); */ break; default: throw new PSTException("InAppPurchases.Instantiate(): Unknown RunningMode: " + runningMode + "."); } // Set the private member: _runningMode = runningMode; // Set the flag that we are instanciated from now on: _instantiated = true; // Get the latest in-app purchasing information: Update(); }
internal static IEnumerable <ICommunicationSchema> ScanForTSLCommunicationSchema(RunningMode schemaRunningMode) { Debug.Assert(schemaRunningMode == RunningMode.Server || schemaRunningMode == RunningMode.Proxy); var schema_interface_type = typeof(ICommunicationSchema); var comm_instance_base_type = schemaRunningMode == RunningMode.Server ? typeof(TrinityServer) : typeof(TrinityProxy); var default_comm_schema = typeof(DefaultCommunicationSchema); return(AssemblyUtility.GetAllClassInstances <ICommunicationSchema, CommunicationSchemaAttribute>( _ => comm_instance_base_type.IsAssignableFrom(_) && _ != default_comm_schema, _ => _.CommunicationSchemaType .GetConstructor(new Type[] { }) .Invoke(new object[] { }) as ICommunicationSchema)); }
public abstract Matrix <double> ProcessSequence(Sequence pSequence, RunningMode runningMode);
private unsafe void CheckProtocolSignatures_impl(RemoteStorage storage, RunningMode from, RunningMode to) { if (storage == null) { return; } string my_schema_name; string my_schema_signature; string remote_schema_name; string remote_schema_signature; ICommunicationSchema my_schema; storage.GetCommunicationSchema(out remote_schema_name, out remote_schema_signature); if (from != to)// Asymmetrical checking, need to scan for matching local comm schema first. { var local_candidate_schemas = Global.ScanForTSLCommunicationSchema(to); /* If local or remote is default, we skip the verification. */ if (local_candidate_schemas.Count() == 0) { Log.WriteLine(LogLevel.Info, "{0}-{1}: Local instance has default communication capabilities.", from, to); return; } if (remote_schema_name == DefaultCommunicationSchema.GetName() || remote_schema_signature == "{[][][]}") { Log.WriteLine(LogLevel.Info, "{0}-{1}: Remote cluster has default communication capabilities.", from, to); return; } /* Both local and remote are not default instances. */ my_schema = local_candidate_schemas.FirstOrDefault(_ => _.Name == remote_schema_name); if (my_schema == null) { Log.WriteLine(LogLevel.Fatal, "No candidate local communication schema signature matches the remote one.\r\n\tName: {0}\r\n\tSignature: {1}", remote_schema_name, remote_schema_signature); Global.Exit(-1); } } else { my_schema = Global.CommunicationSchema; } my_schema_name = my_schema.Name; my_schema_signature = CommunicationSchemaSerializer.SerializeProtocols(my_schema); if (my_schema_name != remote_schema_name) { Log.WriteLine(LogLevel.Error, "Local communication schema name not matching the remote one.\r\n\tLocal: {0}\r\n\tRemote: {1}", my_schema_name, remote_schema_name); } if (my_schema_signature != remote_schema_signature) { Log.WriteLine(LogLevel.Fatal, "Local communication schema signature not matching the remote one.\r\n\tLocal: {0}\r\n\tRemote: {1}", my_schema_signature, remote_schema_signature); Global.Exit(-1); } }
public abstract Matrix<double> ProcessSequence(Sequence pSequence, RunningMode runningMode);
public override Matrix<double> PredictSentence(Sequence pSequence, RunningMode runningMode) { //Reset the network int numStates = pSequence.States.Length; //Predict output Matrix<neuron> mergedHiddenLayer = null; Matrix<double> rawOutputLayer = null; neuron[][] seqOutput = InnerDecode(pSequence, out mergedHiddenLayer, out rawOutputLayer); if (runningMode != RunningMode.Test) { //Merge forward and backward for (int curState = 0; curState < numStates; curState++) { logp += Math.Log10(seqOutput[curState][pSequence.States[curState].Label].cellOutput); } } if (runningMode == RunningMode.Train) { //Update hidden-output layer weights for (int curState = 0; curState < numStates; curState++) { int label = pSequence.States[curState].Label; //For standard RNN for (int c = 0; c < L2; c++) { seqOutput[curState][c].er = -seqOutput[curState][c].cellOutput; } seqOutput[curState][label].er = 1 - seqOutput[curState][label].cellOutput; } LearnTwoRNN(pSequence, mergedHiddenLayer, seqOutput); } return rawOutputLayer; }
public abstract int[] ProcessSequenceCRF(Sequence pSequence, RunningMode runningMode);
/// <summary> /// Constructor /// </summary> /// <param name="mode">running mode</param> /// <param name="parentForm">GUI instance</param> public VerifierLog(RunningMode mode, MainForm parentForm) { this.Mode = mode; this.ParentForm = parentForm; }
/// <summary> /// Process a given sequence by bi-directional recurrent neural network /// </summary> /// <param name="pSequence"></param> /// <param name="runningMode"></param> /// <returns></returns> public override Matrix<double> ProcessSequence(Sequence pSequence, RunningMode runningMode) { List<SimpleLayer[]> layerList; Matrix<double> rawOutputLayer; //Forward process from bottom layer to top layer SimpleLayer[] seqOutput = ComputeLayers(pSequence, runningMode == RunningMode.Train, out layerList, out rawOutputLayer); if (runningMode != RunningMode.Test) { int numStates = pSequence.States.Length; for (int curState = 0; curState < numStates; curState++) { logp += Math.Log10(seqOutput[curState].cellOutput[pSequence.States[curState].Label] + 0.0001); } } if (runningMode == RunningMode.Train) { //In training mode, we calculate each layer's error and update their net weights List<double[][]> fErrLayers; List<double[][]> bErrLayers; ComputeDeepErr(pSequence, seqOutput, out fErrLayers, out bErrLayers); DeepLearningNet(pSequence, seqOutput, fErrLayers, bErrLayers, layerList); } return rawOutputLayer; }
/// <summary> /// Compute the output of bottom layer /// </summary> /// <param name="sequence"></param> /// <param name="forwardLayer"></param> /// <param name="backwardLayer"></param> /// <returns></returns> private void ComputeBottomLayer(Sequence sequence, ILayer forwardLayer, ILayer backwardLayer, RunningMode runningMode) { var numStates = sequence.States.Length; //Computing forward RNN forwardLayer.Reset(); for (var curState = 0; curState < numStates; curState++) { var state = sequence.States[curState]; forwardLayer.ForwardPass(state.SparseFeature, state.DenseFeature.CopyTo()); forwardLayer.CopyNeuronTo(forwardCellList[0][curState]); } //Computing backward RNN backwardLayer.Reset(); for (var curState = numStates - 1; curState >= 0; curState--) { var state = sequence.States[curState]; backwardLayer.ForwardPass(state.SparseFeature, state.DenseFeature.CopyTo()); backwardLayer.CopyNeuronTo(backwardCellList[0][curState]); } //Merge forward and backward MergeForwardBackwardLayers(numStates, forwardLayer.LayerSize, 0); }
/// <summary> /// Process a given sequence by bi-directional recurrent neural network and CRF /// </summary> /// <param name="pSequence"></param> /// <param name="runningMode"></param> /// <returns></returns> public override int[] ProcessSequenceCRF(Sequence pSequence, RunningMode runningMode) { //Reset the network int numStates = pSequence.States.Length; List<SimpleLayer[]> layerList; Matrix<double> rawOutputLayer; SimpleLayer[] seqOutput = ComputeLayers(pSequence, runningMode == RunningMode.Train, out layerList, out rawOutputLayer); ForwardBackward(numStates, rawOutputLayer); if (runningMode != RunningMode.Test) { //Merge forward and backward for (int curState = 0; curState < numStates; curState++) { logp += Math.Log10(CRFSeqOutput[curState][pSequence.States[curState].Label] + 0.0001); } } int[] predict = Viterbi(rawOutputLayer, numStates); if (runningMode == RunningMode.Train) { UpdateBigramTransition(pSequence); List<double[][]> fErrLayers; List<double[][]> bErrLayers; ComputeDeepErr(pSequence, seqOutput, out fErrLayers, out bErrLayers, true); DeepLearningNet(pSequence, seqOutput, fErrLayers, bErrLayers, layerList); } return predict; }
public override int[] ProcessSequence(ISentence sentence, Config featurizer, RunningMode runningMode, bool outputRawScore, out Matrix <float> m) { var seq = featurizer.BuildSequence(sentence as Sentence); return(ProcessSequence(seq, runningMode, outputRawScore, out m)); }
/// <summary> /// It is guaranteed that CloudStorage can be accessed (server started) /// before a client calls module ClientInitialize() method. /// </summary> public unsafe void ClientInitialize(RunningMode remoteRunningMode) { ClientInitialize(remoteRunningMode, Global.CloudStorage); }
public RunningModeEventArgs(RunningMode mode) { Mode = mode; }
private SimpleLayer[] ComputeTopLayer(Sequence pSequence, out Matrix <float> rawOutputLayer, RunningMode runningMode, bool outputRawScore) { var numStates = pSequence.States.Length; var lastLayerOutputs = layersOutput[forwardHiddenLayers.Count - 1]; //Calculate output layer Matrix <float> tmpOutputResult = null; if (outputRawScore) { tmpOutputResult = new Matrix <float>(numStates, OutputLayer.LayerSize); } var labelSet = pSequence.States.Select(state => state.Label).ToList(); //Initialize output layer or reallocate it if (seqFinalOutput == null || seqFinalOutput.Length < numStates) { seqFinalOutput = new SimpleLayer[numStates]; for (var i = 0; i < numStates; i++) { seqFinalOutput.SetValue(Activator.CreateInstance(OutputLayer.GetType(), OutputLayer.LayerConfig), i); OutputLayer.ShallowCopyWeightTo(seqFinalOutput[i]); } } for (var curState = 0; curState < numStates; curState++) { var state = pSequence.States[curState]; var outputCells = seqFinalOutput[curState]; outputCells.LabelShortList = labelSet; outputCells.ForwardPass(state.SparseFeature, lastLayerOutputs[curState]); if (outputRawScore) { outputCells.Cells.CopyTo(tmpOutputResult[curState], 0); } } rawOutputLayer = tmpOutputResult; return(seqFinalOutput); }
/// <summary> /// 接收来自客户端当前的模式 /// 比赛 训练(需要目标区域号码) /// </summary> /// <param name="model"></param> public void SetRunningModel(RunningMode model) { throw new NotImplementedException(); }
void ReleaseDesignerOutlets() { if (ConnectionTimeout != null) { ConnectionTimeout.Dispose(); ConnectionTimeout = null; } if (DBInitialisedOn != null) { DBInitialisedOn.Dispose(); DBInitialisedOn = null; } if (UpdateAppUri != null) { UpdateAppUri.Dispose(); UpdateAppUri = null; } if (EventUTCOffset != null) { EventUTCOffset.Dispose(); EventUTCOffset = null; } if (NotificationsBtn != null) { NotificationsBtn.Dispose(); NotificationsBtn = null; } if (PromptsBtn != null) { PromptsBtn.Dispose(); PromptsBtn = null; } if (LogsBtn != null) { LogsBtn.Dispose(); LogsBtn = null; } if (PromptView != null) { PromptView.Dispose(); PromptView = null; } if (NotificationView != null) { NotificationView.Dispose(); NotificationView = null; } if (CancelBtn != null) { CancelBtn.Dispose(); CancelBtn = null; } if (DatabaseSize != null) { DatabaseSize.Dispose(); DatabaseSize = null; } if (RunningMode != null) { RunningMode.Dispose(); RunningMode = null; } if (CurrentVersion != null) { CurrentVersion.Dispose(); CurrentVersion = null; } if (GoLiveDate != null) { GoLiveDate.Dispose(); GoLiveDate = null; } if (ResponderRegion != null) { ResponderRegion.Dispose(); ResponderRegion = null; } if (EventPasscode != null) { EventPasscode.Dispose(); EventPasscode = null; } if (RefreshViewBtn != null) { RefreshViewBtn.Dispose(); RefreshViewBtn = null; } if (EventName != null) { EventName.Dispose(); EventName = null; } if (SyncDataBtn != null) { SyncDataBtn.Dispose(); SyncDataBtn = null; } if (ResponderId != null) { ResponderId.Dispose(); ResponderId = null; } if (SyncDataInterval != null) { SyncDataInterval.Dispose(); SyncDataInterval = null; } if (SyncMode != null) { SyncMode.Dispose(); SyncMode = null; } if (WANServiceUri != null) { WANServiceUri.Dispose(); WANServiceUri = null; } if (SaveChangesBtn != null) { SaveChangesBtn.Dispose(); SaveChangesBtn = null; } if (LANServiceUri != null) { LANServiceUri.Dispose(); LANServiceUri = null; } if (LastSuccessfulDataPush != null) { LastSuccessfulDataPush.Dispose(); LastSuccessfulDataPush = null; } if (LogView != null) { LogView.Dispose(); LogView = null; } if (LastSuccessfulServiceUpdate != null) { LastSuccessfulServiceUpdate.Dispose(); LastSuccessfulServiceUpdate = null; } }
public void SetRunningMode(RunningMode mode) { runningMode = mode; }
public abstract int[] ProcessSeq2Seq(SequencePair pSequence, RunningMode runningMode);
internal static IEnumerable <ICommunicationSchema> ScanForTSLCommunicationSchema(RunningMode schemaRunningMode) { Debug.Assert(schemaRunningMode == RunningMode.Server || schemaRunningMode == RunningMode.Proxy); var schema_interface_type = typeof(ICommunicationSchema); var default_schema_type = typeof(DefaultCommunicationSchema); var comm_instance_base_type = schemaRunningMode == RunningMode.Server ? typeof(TrinityServer) : typeof(TrinityProxy); var comm_instance_schema_attrs = from type in AssemblyUtility.GetAllTypes() where comm_instance_base_type.IsAssignableFrom(type) select type.GetCustomAttributes(typeof(CommunicationSchemaAttribute), inherit : true).FirstOrDefault() as CommunicationSchemaAttribute; var schema_instances = from schema_attr in comm_instance_schema_attrs where schema_attr != null select schema_attr.CommunicationSchemaType.GetConstructor(new Type[] { }).Invoke(new object[] { }) as ICommunicationSchema; return(schema_instances); }
public void Process(RNN <T> rnn, DataSet <T> trainingSet, RunningMode runningMode, int totalSequenceNum) { //Shffle training corpus trainingSet.Shuffle(); for (var i = 0; i < trainingSet.SequenceList.Count; i++) { var pSequence = trainingSet.SequenceList[i]; int wordCnt = 0; if (pSequence is Sequence) { wordCnt = (pSequence as Sequence).States.Length; } else { SequencePair sp = pSequence as SequencePair; if (sp.srcSentence.TokensList.Count > rnn.MaxSeqLength) { continue; } wordCnt = sp.tgtSequence.States.Length; } if (wordCnt > rnn.MaxSeqLength) { continue; } Interlocked.Add(ref processedWordCnt, wordCnt); int[] predicted; if (IsCRFTraining) { predicted = rnn.ProcessSequenceCRF(pSequence as Sequence, runningMode); } else { Matrix <float> m; predicted = rnn.ProcessSequence(pSequence, runningMode, false, out m); } int newTknErrCnt; if (pSequence is Sequence) { newTknErrCnt = GetErrorTokenNum(pSequence as Sequence, predicted); } else { newTknErrCnt = GetErrorTokenNum((pSequence as SequencePair).tgtSequence, predicted); } Interlocked.Add(ref tknErrCnt, newTknErrCnt); if (newTknErrCnt > 0) { Interlocked.Increment(ref sentErrCnt); } Interlocked.Increment(ref processedSequence); if (processedSequence % 1000 == 0) { Logger.WriteLine("Progress = {0} ", processedSequence / 1000 + "K/" + totalSequenceNum / 1000.0 + "K"); Logger.WriteLine(" Error token ratio = {0}%", (double)tknErrCnt / (double)processedWordCnt * 100.0); Logger.WriteLine(" Error sentence ratio = {0}%", (double)sentErrCnt / (double)processedSequence * 100.0); } if (ModelSettings.SaveStep > 0 && processedSequence % ModelSettings.SaveStep == 0) { //After processed every m_SaveStep sentences, save current model into a temporary file Logger.WriteLine("Saving temporary model into file..."); rnn.SaveModel("model.tmp"); } } }
public abstract int[] ProcessSequence(Sequence pSequence, RunningMode runningMode, bool outputRawScore, out Matrix <double> m);
public abstract int[] ProcessSequence(ISentence sentence, Config featurizer, RunningMode runningMode, bool outputRawScore, out Matrix <float> m);
public override int[] PredictSentenceCRF(Sequence pSequence, RunningMode runningMode) { //Reset the network int numStates = pSequence.States.Length; //Predict output Matrix<neuron> mergedHiddenLayer = null; Matrix<double> rawOutputLayer = null; neuron[][] seqOutput = InnerDecode(pSequence, out mergedHiddenLayer, out rawOutputLayer); ForwardBackward(numStates, rawOutputLayer); if (runningMode != RunningMode.Test) { //Get the best result for (int i = 0; i < numStates; i++) { logp += Math.Log10(CRFSeqOutput[i][pSequence.States[i].Label]); } } int[] predict = Viterbi(rawOutputLayer, numStates); if (runningMode == RunningMode.Train) { UpdateBigramTransition(pSequence); //Update hidden-output layer weights for (int curState = 0; curState < numStates; curState++) { int label = pSequence.States[curState].Label; //For standard RNN for (int c = 0; c < L2; c++) { seqOutput[curState][c].er = -CRFSeqOutput[curState][c]; } seqOutput[curState][label].er = 1 - CRFSeqOutput[curState][label]; } LearnTwoRNN(pSequence, mergedHiddenLayer, seqOutput); } return predict; }