Exemple #1
0
        public void EvalAudiogramUpdate(int frequencyPlotted, int dbPlotted, Audiogram.SymbolType symbol)
        {
            int ear, masked, transducer;
            int frequency;
            byte symb = (byte)symbol;
            Audiometer.channelSettings maskChannel;
            Audiometer.channelSettings testChannel;

            if (programMode != ProgramMode.Eval)
                return;

            frequency = FrequencyToIndex(frequencyPlotted);
            if (frequency < 0 || frequency > 5)
                return;

            ear = symb & Audiogram.RightEar;
            transducer = (symb & Audiogram.AirTrans) >> 1;
            masked = (symb & Audiogram.Masked) >> 2;

            evalOutput.StudentAudiogram[frequency, transducer, masked, ear] = dbPlotted;

            evalOutput.StudentAudiometer[frequency, transducer, masked, ear].ameterSettings = admtr.ameterSettings;

            if (admtr.Channel1.interrupt == true)
            {
                maskChannel = admtr.Channel1;
                testChannel = admtr.Channel2;
            }
            else if (admtr.Channel2.interrupt == true)
            {
                maskChannel = admtr.Channel2;
                testChannel = admtr.Channel1;
            }
            else
            {
                // When masking, it is easy to determine the presentation channel.

                // This function is called when the user plots a point on the audiogram. It is not possible to
                // determine exactly how the user came to the conclusion of this response. On top of this, there
                // are two channels to choose from. Therefore, we must rank the channels to determine which channel
                // is most likely to be the one that the user used to plot the point on the audiogram.

                Ear earType = ear == 0 ? Ear.left : Ear.right;
                Audiometer.TransducerType transType = transducer == 0 ? Audiometer.TransducerType.Bone : Audiometer.TransducerType.Insert;

                // These scores are incremented upon finding a similarity between the plotted point and the channel settings
                int ch1Score = 0, ch2Score = 0;

                if (admtr.Channel1.route == earType) ch1Score++;
                if (admtr.Channel2.route == earType) ch2Score++;
                if (admtr.Channel1.volume == dbPlotted) ch1Score++;
                if (admtr.Channel2.volume == dbPlotted) ch2Score++;
                if (admtr.Channel1.stim == Audiometer.StimulusType.Tone) ch1Score++;
                if (admtr.Channel2.stim == Audiometer.StimulusType.Tone) ch2Score++;

                if (transType == Audiometer.TransducerType.Insert)
                {
                    if (admtr.Channel1.trans == Audiometer.TransducerType.Insert ||
                        admtr.Channel1.trans == Audiometer.TransducerType.Phone)
                        ch1Score++;
                    else if (admtr.Channel2.trans == Audiometer.TransducerType.Insert ||
                             admtr.Channel2.trans == Audiometer.TransducerType.Phone)
                        ch2Score++;
                }
                else
                {
                    if (transType == admtr.Channel1.trans) ch1Score++;
                    if (transType == admtr.Channel2.trans) ch2Score++;
                }

                // Scoring is complete, determine the channel. If the scores are the same, then just go with channel 1
                if (ch1Score >= ch2Score)
                {
                    testChannel = admtr.Channel1;
                    maskChannel = admtr.Channel2;
                }
                else
                {
                    testChannel = admtr.Channel2;
                    maskChannel = admtr.Channel1;
                }
            }

            evalOutput.StudentAudiometer[frequency, transducer, masked, ear].testChannel = testChannel;
            evalOutput.StudentAudiometer[frequency, transducer, masked, ear].maskChannel = maskChannel;
        }
Exemple #2
0
        /// <summary>
        /// Called when the whole program first loads
        /// </summary>
        private void VirtualLabToplevel_Load(object	sender,	System.EventArgs e)
        {
            // Display a "WELCOME SCREEN" allowing the user to chose the program type
            WelcomeScreen ws = new WelcomeScreen();
            DialogResult dResult = ws.ShowDialog();
            switch (dResult)
            {

                case DialogResult.Yes:  // practice mode
                    programMode = ProgramMode.Practice;
                    this.Text += " Practice Mode";
                    break;
                case DialogResult.No:	// evaluation mode
                    programMode = ProgramMode.Eval;
                    resultsShow.Visible = false;
                    pplotShowHide.Enabled = false;
                    this.Text += " Evaluation Mode";
                    pplotShowHide.Enabled = false;
                    break;
            }

            if (dResult != DialogResult.Cancel)
            {

                // get student parameters
                si.stName = ws.stName;
                si.stID = ws.stID;
                si.stCourse = ws.stCourse;
                si.stEmail = ws.stEmail;
                si.stProf = ws.stProf;

                // show the students name in title (if available
                if (si.stName != string.Empty)
                    Text += "                                             Clinician: " + si.stName;

                // construct and show children
                admtr = new Audiometer(
                    new Audiometer.presentationDelegate(presFunc),
                    new Audiometer.stateChangedDelegate(stChang));
                admtr.MdiParent = this;
                admtr.Show();
                admtr.Location = new Point(0, 0);
                admtr.Size = new Size(661, 460);

                // Patient Window
                //ptwind = new PatientWindow(pt); //Changes made 04/12/2011
                ptwind = new PatientWindow();
                ptwind.MdiParent = this;
                ptwind.Show();
                ptwind.Location = new Point(0, admtr.Height);
                ptwind.Size = new Size(400, 450);

                adgrm = new Audiogram();
                adgrm.vltl = this;
                adgrm.patient = pt; // delete me
                adgrm.MdiParent = this;
                adgrm.Show();
                adgrm.Location = new Point(admtr.Width,0);
                adgrm.SpAudInput.MdiParent = this;
                adgrm.SpAudInput.Location = new Point(750, 496);

                pplot = new Plateau_Plot.Plateau_Plot();
                pplot.MdiParent = this;
                pplot.Show();
                pplot.Location = new Point(adgrm.Width, adgrm.Height);
                pplot.Visible = false;

                // SRT
                srtWind = new SRT.SRT(
                    new SRT.SRT.ResponseDelegate(DoResponse),
                    new SRT.SRT.DoneDelegate(EnableSpeechMenu));
                srtWind.MdiParent = this;
                srtWind.Location = new Point(750, 496);
                srtWind.Hide();

                // WI
                wiWind = new WI.WI(
                    new WI.WI.DoneDelegate(EnableSpeechMenu),
                    new WI.WI.ResponseDelegate(DoResponse));
                wiWind.MdiParent = this;
                wiWind.Location = new Point(750, 496);
                wiWind.Hide();

                // show form
                this.Visible = true;
                WindowState = System.Windows.Forms.FormWindowState.Maximized;
                prevState = admtr.ameterSettings;
            } // end if ( dResult != Cancel )
        }
Exemple #3
0
        /// <summary>
        /// Parses the students tested thresholds from the audiogram and compares these
        /// to the patient's actual thresholds
        /// </summary>
        /// <param name="adgrm"> the audiogram used in the test</param>
        /// <param name="pt"> the patient tested</param>
        public void storePureToneResults(Audiogram adgrm, Patient.Patient pt)
        {
            // this will go through each point plotted on the audiogram and
            // compare it to the corresponding threshold

            // values returned from audiogram
            string[] adgrm_freq, adgrm_dB, adgrm_cursors;
            string adgrmPts;
            string[] adgrmPtsSplit;

            // Array lists containing the AC and BC information
            ArrayList l_ac_results = new ArrayList();
            ArrayList l_bc_results = new ArrayList();
            ArrayList r_ac_results = new ArrayList();
            ArrayList r_bc_results = new ArrayList();

            // first get the points string and parse it
            adgrmPts = adgrm.GetPlottedPoints(false);
            adgrmPtsSplit = adgrmPts.Split("\n".ToCharArray());

            adgrm_freq = adgrmPtsSplit[0].Split(",".ToCharArray());
            adgrm_dB = adgrmPtsSplit[1].Split(",".ToCharArray());
            adgrm_cursors = adgrmPtsSplit[2].Split(",".ToCharArray());

            // check the empty condition
            if (adgrm_freq[0] == string.Empty) return;

            // sort the pts into AC/BC bins
            int iter = 0;
            foreach (string str in adgrm_cursors)
            {
                // parse out the pt info
                int pt_freq = Convert.ToInt32(adgrm_freq[iter]);
                int pt_dB = Convert.ToInt32(adgrm_dB[iter]);
                int temp_dB, index;

                TestResults tr_temp = new TestResults(pt_freq, pt_dB, string.Empty);

                // grab the corresponding freq and thresh
                switch (str)
                {
                    case "AC_Left":
                    case UNICODE_SQUARE:
                        // left ear AC thresh
                        index = l_ac_results.BinarySearch(tr_temp);
                        if (index < 0)
                        {
                            l_ac_results.Add(tr_temp);
                        }
                        else
                        {
                            // give it the max dB
                            temp_dB = ((TestResults)l_ac_results[index]).tr_dB;
                            ((TestResults)l_ac_results[index]).tr_dB =
                                (temp_dB > tr_temp.tr_dB) ? temp_dB : tr_temp.tr_dB;
                        }
                        break;
                    case "AC_Right":
                    case UNICODE_TRIANGLE:
                        // right ear AC thresh
                        index = r_ac_results.BinarySearch(tr_temp);
                        if (index < 0)
                        {
                            r_ac_results.Add(tr_temp);
                        }
                        else
                        {
                            // give it the max dB
                            temp_dB = ((TestResults)r_ac_results[index]).tr_dB;
                            ((TestResults)r_ac_results[index]).tr_dB =
                                (temp_dB > tr_temp.tr_dB) ? temp_dB : tr_temp.tr_dB;
                        }
                        break;
                    case "BC_Left":
                    case "]":
                        // left ear BC thresh
                        index = l_bc_results.BinarySearch(tr_temp);
                        if (index < 0)
                        {
                            l_bc_results.Add(tr_temp);
                        }
                        else
                        {
                            // give it the max dB
                            temp_dB = ((TestResults)l_bc_results[index]).tr_dB;
                            ((TestResults)l_bc_results[index]).tr_dB =
                                (temp_dB > tr_temp.tr_dB) ? temp_dB : tr_temp.tr_dB;
                        }
                        break;
                    case "BC_Right":
                    case "[":
                        // right ear BC thresh
                        index = r_bc_results.BinarySearch(tr_temp);
                        if (index < 0)
                        {
                            r_bc_results.Add(tr_temp);
                        }
                        else
                        {
                            // give it the max dB
                            temp_dB = ((TestResults)r_bc_results[index]).tr_dB;
                            ((TestResults)r_bc_results[index]).tr_dB =
                                (temp_dB > tr_temp.tr_dB) ? temp_dB : tr_temp.tr_dB;
                        }
                        break;
                    default:
                        Console.WriteLine(str + " Found");
                        break;
                }

                iter++;
            }

            // sort and fill comments
            if (l_ac_results.Count > 0)
            {
                l_ac_results.Sort();
                fillComments(ref l_ac_results, pt, "lac");
            }
            if (l_bc_results.Count > 0)
            {
                l_bc_results.Sort();
                fillComments(ref l_bc_results, pt, "lbc");
            }
            if (r_ac_results.Count > 0)
            {
                r_ac_results.Sort();
                fillComments(ref r_ac_results, pt, "rac");
            }
            if (r_bc_results.Count > 0)
            {
                r_bc_results.Sort();
                fillComments(ref r_bc_results, pt, "rbc");
            }

            // add info to the results string
            pureToneResults = "Pure tone results for " + pt.GetPath().PathType + " pathology.\n";
            // first the Left AC
            pureToneResults += "Left Ear Air Conduction Results:\n";
            for (int i = 0; i < l_ac_results.Count; i++)
            {
                TestResults temp_tr = (TestResults)l_ac_results[i];
                pureToneResults += "At " + temp_tr.tr_freq + ", " + temp_tr.tr_desc + "\n";
            }
            // then the Right AC
            pureToneResults += "Right Ear Air Conduction Results:\n";
            for (int i = 0; i < r_ac_results.Count; i++)
            {
                TestResults temp_tr = (TestResults)r_ac_results[i];
                pureToneResults += "At " + temp_tr.tr_freq + ", " + temp_tr.tr_desc + "\n";
            }
            // then the Left BC
            pureToneResults += "Left Ear Bone Conduction Results:\n";
            for (int i = 0; i < l_bc_results.Count; i++)
            {
                TestResults temp_tr = (TestResults)l_bc_results[i];
                pureToneResults += "At " + temp_tr.tr_freq + ", " + temp_tr.tr_desc + "\n";
            }
            // lastly the Right BC
            pureToneResults += "Right Ear Bone Conduction Results:\n";
            for (int i = 0; i < r_bc_results.Count; i++)
            {
                TestResults temp_tr = (TestResults)r_bc_results[i];
                pureToneResults += "At " + temp_tr.tr_freq + ", " + temp_tr.tr_desc + "\n";
            }
        }
Exemple #4
0
        public void CompileResults(Audiogram audiogram)
        {
            StudentMaskedMaximums[0, 0, 0] = Convert.ToInt32(audiogram.txtLMaskBC250.Text);
            StudentMaskedMaximums[0, 0, 1] = Convert.ToInt32(audiogram.txtRMaskBC250.Text);
            StudentMaskedMaximums[0, 1, 0] = Convert.ToInt32(audiogram.txtLMaskAC250.Text);
            StudentMaskedMaximums[0, 1, 1] = Convert.ToInt32(audiogram.txtRMaskAC250.Text);
            StudentMaskedMaximums[1, 0, 0] = Convert.ToInt32(audiogram.txtLMaskBC500.Text);
            StudentMaskedMaximums[1, 0, 1] = Convert.ToInt32(audiogram.txtRMaskBC500.Text);
            StudentMaskedMaximums[1, 1, 0] = Convert.ToInt32(audiogram.txtLMaskAC500.Text);
            StudentMaskedMaximums[1, 1, 1] = Convert.ToInt32(audiogram.txtRMaskAC500.Text);
            StudentMaskedMaximums[2, 0, 0] = Convert.ToInt32(audiogram.txtLMaskBC1000.Text);
            StudentMaskedMaximums[2, 0, 1] = Convert.ToInt32(audiogram.txtRMaskBC1000.Text);
            StudentMaskedMaximums[2, 1, 0] = Convert.ToInt32(audiogram.txtLMaskAC1000.Text);
            StudentMaskedMaximums[2, 1, 1] = Convert.ToInt32(audiogram.txtRMaskAC1000.Text);
            StudentMaskedMaximums[3, 0, 0] = Convert.ToInt32(audiogram.txtLMaskBC2000.Text);
            StudentMaskedMaximums[3, 0, 1] = Convert.ToInt32(audiogram.txtRMaskBC2000.Text);
            StudentMaskedMaximums[3, 1, 0] = Convert.ToInt32(audiogram.txtLMaskAC2000.Text);
            StudentMaskedMaximums[3, 1, 1] = Convert.ToInt32(audiogram.txtRMaskAC2000.Text);
            StudentMaskedMaximums[4, 0, 0] = Convert.ToInt32(audiogram.txtLMaskBC4000.Text);
            StudentMaskedMaximums[4, 0, 1] = Convert.ToInt32(audiogram.txtRMaskBC4000.Text);
            StudentMaskedMaximums[4, 1, 0] = Convert.ToInt32(audiogram.txtLMaskAC4000.Text);
            StudentMaskedMaximums[4, 1, 1] = Convert.ToInt32(audiogram.txtRMaskAC4000.Text);

            // No masking at 8000 Hz, but the code is here if needed
            //StudentMaskedMaximums[5, 0, 0] = Convert.ToInt32(audiogram.txtLMaskBC8000.Text);
            //StudentMaskedMaximums[5, 0, 1] = Convert.ToInt32(audiogram.txtRMaskBC8000.Text);
            //StudentMaskedMaximums[5, 1, 0] = Convert.ToInt32(audiogram.txtLMaskAC8000.Text);
            //StudentMaskedMaximums[5, 1, 1] = Convert.ToInt32(audiogram.txtRMaskAC8000.Text);

            EvalHtml eval = new EvalHtml();

            int freq, freqindex, cond, mask, ear;
            int colorCode;

            const int ColorCode_Red = 2;
            const int ColorCode_Yellow = 1;
            const int ColorCode_Green = 0;

            string sFreq, sConduction, sMasked, sEar;
            string outputColor;
            int pathologyValue;
            string studentValue;
            string tagID;

            for (freqindex = 0; freqindex <= 5; freqindex++)
            {
                freq = 250 * (int)Math.Pow(2, freqindex);
                sFreq = freq.ToString();
                for (cond = 0; cond <= 1; cond++)
                {
                    sConduction = cond == 0 ? "bc" : "ac";
                    for (mask = 0; mask <= 1; mask++)
                    {
                        sMasked = mask == 0 ? "u" : "m";
                        for (ear = 0; ear <= 1; ear++)
                        {
                            sEar = ear == 0 ? "l" : "r";

                            if (cond == 0 && mask == 0)
                                pathologyValue = pathology.BC_Thresh_Val(freq, (Ear) ear);
                            else if (cond == 1 && mask == 0)
                                pathologyValue = pathology.AC_Thresh_Val(freq, (Ear) ear);
                            else if (cond == 0 && mask == 1)
                                pathologyValue = pathology.BC_Mask_Val(freq, (Ear) ear);
                            else
                                pathologyValue = pathology.AC_Mask_Val(freq, (Ear) ear);

                            tagID = string.Format("{0}t{1}{2}{3}", sEar, sConduction, sMasked, sFreq);
                            eval.ChangeTagAttributes(tagID, pathologyValue.ToString(), "color:white");

                            colorCode = ColorCode_Green;

                            /*  Yellow Conditions  */
                            if (StudentAudiogram[freqindex, cond, mask, ear] < pathologyValue - 5 || StudentAudiogram[freqindex, cond, mask, ear] >= pathologyValue + 5)
                                colorCode = ColorCode_Yellow;

                            /*  Red Conditions  */
                            // Test to make sure the student didn't plot the wrong transducer symbol
                            switch (StudentAudiometer[freqindex, cond, mask, ear].testChannel.trans)
                            {
                                case Audiometer.TransducerType.Bone :
                                    if (cond != 0)
                                        colorCode = ColorCode_Red;
                                    break;
                                case Audiometer.TransducerType.Insert :
                                    if (cond != 1)
                                        colorCode = ColorCode_Red;
                                    break;
                                case Audiometer.TransducerType.Phone :
                                    if (cond != 1)
                                        colorCode = ColorCode_Red;
                                    break;
                            }

                            // Test to make sure the student didn't plot the wrong ear symbol
                            switch (StudentAudiometer[freqindex, cond, mask, ear].testChannel.route)
                            {
                                case Ear.left :
                                    if (ear != 0)
                                        colorCode = ColorCode_Red;
                                    break;
                                case Ear.right :
                                    if (ear != 1)
                                        colorCode = ColorCode_Red;
                                    break;
                            }

                            // Test to make sure the student didn't present a tone from a channel that has the interrupt set
                            if (StudentAudiometer[freqindex, cond, mask, ear].testChannel.interrupt)
                                colorCode = ColorCode_Red;

                            // Test to make sure the student didn't mask a tone that wasn't supposed to be masked
                            if (mask == 0 && StudentAudiometer[freqindex, cond, mask, ear].maskChannel.interrupt)
                                colorCode = ColorCode_Red;

                            // Test to make sure the student didn't forget to mask a tone that should have been masked
                            if (mask == 1 && !StudentAudiometer[freqindex, cond, mask, ear].maskChannel.interrupt)
                                colorCode = ColorCode_Red;

                            // Test to make sure that while masking, the student had the correct masking channel settings
                            if (mask == 1)
                            {
                                // Test to make sure the student used NBNoise to mask a tone
                                if (StudentAudiometer[freqindex, cond, mask, ear].maskChannel.stim != Audiometer.StimulusType.NB)
                                    colorCode = ColorCode_Red;

                                // Test to make sure the interrupt was sent to the opposite ear
                                if (StudentAudiometer[freqindex, cond, mask, ear].maskChannel.route != (Ear) Math.Abs(ear - 1))
                                    colorCode = ColorCode_Red;

                                // Test to make sure the proper transducer was used to mask
                                if (StudentAudiometer[freqindex, cond, mask, ear].maskChannel.trans != Audiometer.TransducerType.Phone)
                                    colorCode = ColorCode_Red;
                            }

                            // Test to make sure the student placed the point at the proper frequency
                            if (StudentAudiometer[freqindex, cond, mask, ear].ameterSettings.frequency != freq)
                                colorCode = ColorCode_Red;

                            /*  Decode Color Codes  */
                            switch (colorCode)
                            {
                                case ColorCode_Green :
                                    studentValue = StudentAudiogram[freqindex, cond, mask, ear].ToString();
                                    outputColor = "color:green";
                                    break;
                                case ColorCode_Red :
                                    studentValue = "[ " + StudentAudiogram[freqindex, cond, mask, ear].ToString() + " ]";
                                    outputColor = "color:red";
                                    break;
                                case ColorCode_Yellow :
                                    studentValue = "( " + StudentAudiogram[freqindex, cond, mask, ear].ToString() + " )";
                                    outputColor = "color:yellow";
                                    break;
                                default :
                                    studentValue = StudentAudiogram[freqindex, cond, mask, ear].ToString();
                                    outputColor = "color:white";
                                    break;
                            }

                            tagID = string.Format("{0}s{1}{2}{3}", sEar, sConduction, sMasked, sFreq);

                            eval.ChangeTagAttributes(tagID, studentValue, outputColor);
                        }   // for ear
                    }   // for mask
                }   // for cond
            }   // for freqindex
            //eval.ChangeTdTag("rsac250", "50", "color_green");

            eval.SaveEval();
        }