Example #1
0
        public TraModel()
        {
            this.PropertyChanged += TraModel_PropertyChanged;

            trained = false;

            _selectionTrainingPackage = new TrainingPackage();
            _selectedTrainingSets = new ObservableCollection<DataSet>();
            _progressLog = new ProgressLog();

            _selectedTrainingSets.CollectionChanged += _selectedTrainingSets_CollectionChanged;
        }
        public override void Run()
        {
            double[] itemToClassify = new double[2] {2.81,5.46};
            double[] result = new double[27];

            _trainingPackage = MakeTrainingPackage();

            //Now we inintialize and train the LDAPatternRecognizer.
            //Input dimension is 2 because we have 2 features per feature vector. Output dimension is 27 because
            //on the final application there will be 27 possible movements including rest (movement 0)
            _patternRecognizer = GenericFactory<PatternRecognizer>.Instance.CreateProduct("LDA");//new LDAPatternRecognizer(_trainingPackage, 2, 27);

            //_patternRecognizer.inputDim = 2;
            //_patternRecognizer.outputDim = 27;
            _patternRecognizer.trainingPackage = _trainingPackage;

            _patternRecognizer.activationFunctionIdx = 0;
            _patternRecognizer.normalizerIdx = 0;

            _patternRecognizer.RunTraining();

            result = (double[])_patternRecognizer.Classify(itemToClassify);
        }
Example #3
0
        /// <summary>
        /// Starting from data recorded or loaded at the acquisition stage, this method produces a training set for the
        /// training stage
        /// </summary>
        public void ProcessRecording()
        {
            running = true;

            _pretreatedRecording.parameters = _acqRecording.parameters;
            _treatmentConfig.features = selectedFeatures.ToList<string>();
            _treatmentConfig.features.Sort();

            _featureExtractor.selectedFeatures = _treatmentConfig.features;

            _trainingPackage = new TrainingPackage();

            Pretreat();

            _pipeline.Init();
            _pipeline.Start();

            foreach (object generic in _windowMaker.lockingCollection.GetConsumingEnumerable()) ;
            //A dirty way of waiting for the WindowMaker stage to finish!
            _pipeline.Stop();

            _treatedWindows = _featureExtractor.outputData;

            _trainingPackage.recordingConfig = _acqRecording.parameters;
            //The following line is intended to make the DataProviders work as intended with online data.
            _trainingPackage.recordingConfig.scheduleWarmupItems = 0;
            _trainingPackage.treatmentConfig = _treatmentConfig;

            BuildTrainingPackage();

            treated = true;

            running = false;
            //And hopefully we have now a training package :)
        }
Example #4
0
 /// <summary>
 /// Initializes a new TrainingPackage, effectively dereferencing the existing one
 /// </summary>
 public void ClearTreatment()
 {
     _trainingPackage = new TrainingPackage();
     treated = false;
 }
Example #5
0
 public LDAPatternRecognizer(TrainingPackage trainingPackage)
     : base(trainingPackage)
 {
     Init();
 }
Example #6
0
        /// <summary>
        /// Used to remove elements from the feature vectors belonging to channels that are not active.
        /// Frames stored in each DataWindow are also discarded.
        /// </summary>
        /// <param name="inputDataSet"></param>
        /// <returns>A DataSet object containing no frames and only features for the active channels.</returns>
        private DataSet CreateLightDataSet(DataSet inputDataSet, TrainingPackage trainigPackage)
        {
            DataSet outputDataSet = new DataSet(inputDataSet.movementCode, inputDataSet.movementComposition);

            foreach (DataWindow window in inputDataSet.set)
            {
                DataWindow lightWindow = new DataWindow();

                foreach (string featureName in window.features.Keys)
                {
                    double[] channelVector;
                    object placeholder;

                    double[] cleanVector = new double[trainingPackage.recordingConfig.activeChannels];

                    window.features.TryGetValue(featureName,out placeholder);
                    channelVector = (double[])placeholder;

                    int pos = 0;

                    for (int i = 0; i < channelVector.Length; i++)
                    {
                        if(trainingPackage.recordingConfig.channelMask[i])
                        {
                            cleanVector[pos] = channelVector[i];
                            pos++;
                        }
                    }

                    lightWindow.features.Add(featureName,cleanVector);
                }

                outputDataSet.set.Add(lightWindow);

            }

            return outputDataSet;
        }
Example #7
0
        /// <summary>
        /// Performs the training of the selected PatternRecognizer
        /// </summary>
        public void RunTraining()
        {
            trainingFailed = false;

            if (_selectionTrainingPackage.trainingSets.Count > 0 && _patternRecognizer != null)
            {
                progressLog.logItems.Add(new ProgressLogItem(ProgressLogItem.Info, String.Format("Training {0} pattern recognizer", _patternRecognizer.GetType().GetProperty("ID").GetValue(null, null))));

                //We shouldn't need this anymore. inputDim and outputDim are calculated ad the PatternRecognizer class
                //when a new trainingPackage is assigned to the PatternRecognizer.
                /*
                _patternRecognizer.inputDim = _selectionTrainingPackage.trainingSets.ElementAt(0).set.ElementAt(0).features.Count * (int)_selectionTrainingPackage.recordingConfig.nChannels;

                if (_patternRecognizer.multipleActivationEnabled) _patternRecognizer.outputDim = totalSingleMovements;
                else _patternRecognizer.outputDim = totalMovements; //This must be taken from the ViewModel
                */

                _patternRecognizer.trainingPackage = _selectionTrainingPackage;

                _patternRecognizer.RunTraining();

                progressLog.logItems.Add(new ProgressLogItem(ProgressLogItem.Info, String.Format("Training complete.")));

                InstanceManager<PatternRecognizer>.Instance.Register(_patternRecognizer);

                progressLog.logItems.Add(new ProgressLogItem(ProgressLogItem.Info, String.Format("List of trained pattern recognizers updated.")));

                trained = true;

                //We do this because from now on the used training package belongs to the trained PatternRecognizer only!
                //So we initialize a new _selectionTrainingPackage for the next training
                _selectionTrainingPackage = new TrainingPackage();
                UpdateSelectionTrainingPackage();

                // Training packages are no longer needed in this PatterRecognizer, so we clear them
                _patternRecognizer.trainingPackage.trainingSets.Clear();

            }
            else
            {
                progressLog.logItems.Add(new ProgressLogItem(ProgressLogItem.Info, String.Format("Training: nothing to do!")));
                trainingFailed = true;
            }
        }
Example #8
0
        public PatternRecognizer()
        {
            Init();

            _trainingPackage = null;
            _inputDim = 0;
            _outputDim = 0;
        }
Example #9
0
 /// <summary>
 ///  
 /// </summary>
 /// <param name="newTrainingPackage">Reference to the TrainingPackage instance that the PatternRecognizer will use for training and verification.</param>
 public PatternRecognizer(TrainingPackage newTrainingPackage)
 {
     Init();
     _trainingPackage = newTrainingPackage;
 }
Example #10
0
 /// <summary>
 /// Performs a copy of the configuration values only, not copying any data.
 /// </summary>
 /// <param name="source"></param>
 public void Copy(TrainingPackage source)
 {
     recordingConfig = source.recordingConfig;
     treatmentConfig = source.treatmentConfig;
     movementCodes.Clear();
     foreach (int item in source.movementCodes)
         movementCodes.Add(item);
 }
        /// <summary>
        ///Prepare the training package with a training set based on the example solved in 
        ///http://people.revoledu.com/kardi/tutorial/LDA/Numerical%20Example.html
        ///One set of vectors will be assigned to movement code 1, the other to movement code 2.
        /// </summary>
        /// <returns></returns>
        private TrainingPackage MakeTrainingPackage()
        {
            DataWindow tempWindow;
            TrainingPackage trainingPackage;

            List<DataWindow> windowList1 = new List<DataWindow>();
            List<DataWindow> windowList2 = new List<DataWindow>();

            //Filling the window lists with feature vectors
            //List 1
            tempWindow = new DataWindow();
            tempWindow.features.Add("curvature", 2.95);
            tempWindow.features.Add("diameter", 6.63);
            windowList1.Add(tempWindow);

            tempWindow = new DataWindow();
            tempWindow.features.Add("curvature", 2.53);
            tempWindow.features.Add("diameter", 7.79);
            windowList1.Add(tempWindow);

            tempWindow = new DataWindow();
            tempWindow.features.Add("curvature", 3.57);
            tempWindow.features.Add("diameter", 5.65);
            windowList1.Add(tempWindow);

            tempWindow = new DataWindow();
            tempWindow.features.Add("curvature", 3.16);
            tempWindow.features.Add("diameter", 5.47);
            windowList1.Add(tempWindow);

            //List 2
            tempWindow = new DataWindow();
            tempWindow.features.Add("curvature", 2.58);
            tempWindow.features.Add("diameter", 4.46);
            windowList2.Add(tempWindow);

            tempWindow = new DataWindow();
            tempWindow.features.Add("curvature", 2.16);
            tempWindow.features.Add("diameter", 6.22);
            windowList2.Add(tempWindow);

            tempWindow = new DataWindow();
            tempWindow.features.Add("curvature", 3.27);
            tempWindow.features.Add("diameter", 3.52);
            windowList2.Add(tempWindow);

            trainingPackage = new TrainingPackage();
            trainingPackage.trainingSets.Add(new DataSet(1, windowList1));
            trainingPackage.trainingSets.Add(new DataSet(2, windowList2));

            return trainingPackage;
        }