コード例 #1
0
ファイル: Virtual_Lab.cs プロジェクト: jlaswell/Virtual_Lab
        ///	<summary>
        ///	pureToneTest is	used to	control	playing	patients' responses	to pure	tone testing
        ///	</summary>
        ///	<param name="testCh">which "side" of the audiometer	is used	for	testing</param>
        ///	<returns>the patient response</returns>
        private int pureToneTest(Audiometer.channelSettings TestChannel, Audiometer.channelSettings MaskedChannel)
        {
            int	patResp	= -1;
            patResp = pt.DetermineResponsePureTone(TestChannel, MaskedChannel, admtr.ameterSettings);

            // Need the "wrong stimuli" message box code from below

            /*
            testEar	= (testCh == (int)Ear.left)	? admtr.Channel1.route : admtr.Channel2.route;
            int	ntEar =	(testEar + 1)%2;
            int	testType;

            dB = new int[2];
            //string resString = "";

            if (testCh == (int)Ear.left)
            {
                // determine whether test is AC	or BC
                testType = (admtr.Channel1.trans == Audiometer.TransducerType.Bone) ?
                    (int)TestType.BC : (int)TestType.AC;

                // determine what the IA values	are	for	the	testEar

                dB[testEar]	= admtr.ameterSettings.ch1_tone;
                // checks that the ch2 interrupt is selected and the stimulus is NB for masking
                // If interrupt is not selected then set db[ntEar] to a value low enough not to effect the
                // threshold value in the patient response function
                if (admtr.ameterSettings.ch2_interrupt_select == false)
                {
                    dB[ntEar] = -50; // that's pretty low
                }
                dB[ntEar] =	(admtr.ameterSettings.ch2_interrupt_select &&
                                admtr.Channel2.stim == Audiometer.StimulusType.NB)	?
                    admtr.ameterSettings.ch2_tone :	0;

                if (admtr.ameterSettings.ch2_interrupt_select &&
                    admtr.Channel2.stim != Audiometer.StimulusType.NB)
                {
                    // this means that masking isn't set up right -- alert in practice mode
                    MessageBox.Show("Masking is not set up correctly (wrong stimuli)");
                    return (int)ResponseType.NoResponse;
                }

                // determine pure tone response
                patResp	= pt.DetermineResponsePureTone(testEar,	admtr.ameterSettings.frequency,
                    testType, IA, dB, ref resString);
            }
            else if	(testCh	== (int)Ear.right)
            {
                // determine whether test is AC	or BC
                testType = (admtr.Channel2.trans == Audiometer.TransducerType.Bone) ?
                    (int)TestType.BC : (int)TestType.AC;

                // determine what the IA values	are	for	the	testEar

                dB[testEar]	= admtr.ameterSettings.ch2_tone;
                // checks that the ch1 interrupt is selected and the stimulus is NB for masking
                dB[ntEar] =	(admtr.ameterSettings.ch1_interrupt_select  &&
                    admtr.Channel1.stim == Audiometer.StimulusType.NB)	?
                    admtr.ameterSettings.ch1_tone :	0;

                if (admtr.ameterSettings.ch1_interrupt_select &&
                    admtr.Channel1.stim != Audiometer.StimulusType.NB)
                {
                    // this means that masking isn't set up right -- alert in practice mode
                    MessageBox.Show("Masking is not set up correctly (wrong stimuli)");
                    return (int)ResponseType.NoResponse;
                }

                // Determine patient's response
                patResp	= pt.DetermineResponsePureTone(testEar,	admtr.ameterSettings.frequency,
                    testType, IA, dB, ref resString);
            }
            else
            {
                Console.WriteLine("Invalid test	parameters");
                return (int)ResponseType.NoResponse;
            }

            testEar_IA = IA[testEar];
            */
            return patResp;
        }
コード例 #2
0
ファイル: Patient.cs プロジェクト: jlaswell/Virtual_Lab
        /// <summary>
        /// This function returns an enumerated type to describe the patients response to
        /// to a pure tone test
        ///			ex.
        ///			DetermineResponsePureTone(Ear.Left,Freq.f_250,TestType.AC, new int[2]{40,40},
        ///			               new int[2]{50,20}, ref result);
        /// </summary>
        /// <param name="testEar">pass in the ENUMERATED test ear</param>
        /// <param name="freq">pass in the ENUMERATED frequency</param>
        /// <param name="testType">pass in the ENUMERATED AC or BC type </param>
        /// <param name="IA">pass in an array of IA's [left ear, right ear]</param>
        /// <param name="dBLevels">pass in an array of dB Level's [left ear, right ear]</param>
        /// <param name="result">a reference to a string to store result at current test</param>
        /// <returns>an enumerated type describing the response (includes ear, and relative surety)</returns>
        public int DetermineResponsePureTone(Audiometer.channelSettings TestChannel, 
            Audiometer.channelSettings MaskedChannel, Audiometer.audiometerSettings ameterSettings)
        {
            int freq = ameterSettings.frequency; // simplify ameterSettings.frequency
            Ear testEar = TestChannel.route;
            Ear ntEar = testEar == Ear.left ? Ear.right : Ear.left;

            int NonTestEarThresh;

            int UnmaskedThreshold, MaskedThreshold;

            int OE;
            int[] IA = new int[2];

            Random rand = new Random(); // Randomize patient response when not masking

            int thresh;
            if (TestChannel.trans == Audiometer.TransducerType.Bone)
            {
                MaskedThreshold = path.BC_Mask_Val(freq, testEar);
                UnmaskedThreshold = path.BC_Thresh_Val(freq, testEar);
                NonTestEarThresh = path.BC_Thresh_Val(freq, ntEar);

                // Determine OE
                switch (ameterSettings.frequency)
                {
                    case 250:
                        OE = 20;
                        break;
                    case 500:
                        OE = 15;
                        break;
                    case 1000:
                        OE = 10;
                        break;
                    default:
                        OE = 0;
                        break;
                }
            }
            else
            {
                MaskedThreshold = path.AC_Mask_Val(freq, testEar);
                UnmaskedThreshold = path.AC_Thresh_Val(freq, testEar);
                NonTestEarThresh = path.AC_Thresh_Val(freq, ntEar);

                OE = 0;
            }

            #region Interaural Attenuation
            switch (TestChannel.trans)
            {
                case Audiometer.TransducerType.Phone:
                    IA[(int)testEar] = 40;
                    break;
                case Audiometer.TransducerType.Insert:
                    IA[(int)testEar] = 60;
                    break;
                default:
                    IA[(int)testEar] = 0;
                    break;
            }
            // determine what the IA values	are	for	the	non-test Ear
            switch (MaskedChannel.trans)
            {
                case Audiometer.TransducerType.Phone:
                    IA[(int)ntEar] = 40;
                    break;
                case Audiometer.TransducerType.Insert:
                    IA[(int)ntEar] = 60;
                    break;
                default:
                    IA[(int)ntEar] = 0;
                    break;
            }
            #endregion

            int minPlateauPoint;
            int maxPlateauPoint = 0;

            // Determine the minimum and maximum masking levels (Min and Max equations)
            // Maximum: BCnte + IA - 5  for bone and air
            // Minimum: BCnte + 15 + OE for bone
            //          ACnte + 15      for air (NOTE: OE set to 0 if using air)
            int maxMaskingLevel = IA[(int)ntEar] + path.BC_Thresh_Val(freq, ntEar) - 5;
            int minMaskingLevel = NonTestEarThresh + 15 + OE;

            // Determine the center of the minimum and maximum masking levels
            // The plateau should occur here
            float averageMasking = (float)((maxMaskingLevel + minMaskingLevel) / 2.0);

            // Use truncation to determine if the patient will respond 3 times or 4 times
            // at the plateau
            int truncatedAverage = (int)averageMasking;
            if ((float)truncatedAverage != averageMasking)
            {
                // If the average is not an integer, then use 4 points for the plateau
                // by rounding down to the lowest 5 db point and offsetting the maximum
                // plateau point by 5 db
                truncatedAverage -= 2;
                maxPlateauPoint = 5;
            }

            // Determine where the beginning and end of the plateau is
            minPlateauPoint = truncatedAverage - 5;
            maxPlateauPoint += truncatedAverage + 5;

            // If the minimum masking level is greater than, equal to, or within 15 dB of the
            // maximimum masking level, then this is a masking dilemma
            // 15 dB range is due to the fact that a plateau must be at least 3 points wide
            // to determine true hearing level
            if (MaskedChannel.interrupt && maxMaskingLevel - minMaskingLevel < 15)
                // Since a masking dilemma has been encountered, the patient should not plateau
                maxPlateauPoint = minPlateauPoint;

            if (MaskedChannel.interrupt)
            {
                // Masking

                // Assume the patient will respond at the masked level
                thresh = MaskedThreshold;

                // Determine if the masked noise is to loud or soft for the masked level
                if (MaskedChannel.volume > maxPlateauPoint)
                {
                    // If the masked noise is too loud, then the patient response will be the masked level
                    // plus the difference between the maximum plateau point and the masked test level
                    thresh = MaskedThreshold + MaskedChannel.volume - maxPlateauPoint;
                }

                // check for under masking
                else if (MaskedChannel.volume < minPlateauPoint)
                {
                    // If the masked noise is too soft, then the patient response will be the masked level
                    // plus the difference between the minimum plateau point and the masked test level
                    thresh = MaskedThreshold + MaskedChannel.volume - minPlateauPoint;

                    // If the new threshold is less than the actual bone level, then just assign the threshold to the bone level
                    if (thresh < UnmaskedThreshold)
                        thresh = UnmaskedThreshold;
                }
            }
            else
            {
                // Not masking, patient responds at the unmasked level
                thresh = UnmaskedThreshold;
            }

            // determine response
            if (TestChannel.volume == thresh)
            {
                // if masking is not a factor - respond occasionally
                if (MaskedChannel.interrupt == true || rand.Next(10) <= 7)
                    return (testEar == (int)Ear.left) ? (int)ResponseType.LeftUnsure : (int)ResponseType.RightUnsure;
            }
            else if (TestChannel.volume >= thresh + 5)
            {
                return (testEar == (int)Ear.left) ? (int)ResponseType.LeftSure : (int)ResponseType.RightSure;
            }

            return (int)ResponseType.NoResponse;
        }
コード例 #3
0
ファイル: Virtual_Lab.cs プロジェクト: jlaswell/Virtual_Lab
        /// <summary>
        /// Called when the whole program first loads
        /// </summary>
        private void VirtualLabToplevel_Load(object	sender,	System.EventArgs e)
        {
            // Display a "WELCOME SCREEN" allowing the user to chose the program type
            WelcomeScreen ws = new WelcomeScreen();
            DialogResult dResult = ws.ShowDialog();
            switch (dResult)
            {

                case DialogResult.Yes:  // practice mode
                    programMode = ProgramMode.Practice;
                    this.Text += " Practice Mode";
                    break;
                case DialogResult.No:	// evaluation mode
                    programMode = ProgramMode.Eval;
                    resultsShow.Visible = false;
                    pplotShowHide.Enabled = false;
                    this.Text += " Evaluation Mode";
                    pplotShowHide.Enabled = false;
                    break;
            }

            if (dResult != DialogResult.Cancel)
            {

                // get student parameters
                si.stName = ws.stName;
                si.stID = ws.stID;
                si.stCourse = ws.stCourse;
                si.stEmail = ws.stEmail;
                si.stProf = ws.stProf;

                // show the students name in title (if available
                if (si.stName != string.Empty)
                    Text += "                                             Clinician: " + si.stName;

                // construct and show children
                admtr = new Audiometer(
                    new Audiometer.presentationDelegate(presFunc),
                    new Audiometer.stateChangedDelegate(stChang));
                admtr.MdiParent = this;
                admtr.Show();
                admtr.Location = new Point(0, 0);
                admtr.Size = new Size(661, 460);

                // Patient Window
                //ptwind = new PatientWindow(pt); //Changes made 04/12/2011
                ptwind = new PatientWindow();
                ptwind.MdiParent = this;
                ptwind.Show();
                ptwind.Location = new Point(0, admtr.Height);
                ptwind.Size = new Size(400, 450);

                adgrm = new Audiogram();
                adgrm.vltl = this;
                adgrm.patient = pt; // delete me
                adgrm.MdiParent = this;
                adgrm.Show();
                adgrm.Location = new Point(admtr.Width,0);
                adgrm.SpAudInput.MdiParent = this;
                adgrm.SpAudInput.Location = new Point(750, 496);

                pplot = new Plateau_Plot.Plateau_Plot();
                pplot.MdiParent = this;
                pplot.Show();
                pplot.Location = new Point(adgrm.Width, adgrm.Height);
                pplot.Visible = false;

                // SRT
                srtWind = new SRT.SRT(
                    new SRT.SRT.ResponseDelegate(DoResponse),
                    new SRT.SRT.DoneDelegate(EnableSpeechMenu));
                srtWind.MdiParent = this;
                srtWind.Location = new Point(750, 496);
                srtWind.Hide();

                // WI
                wiWind = new WI.WI(
                    new WI.WI.DoneDelegate(EnableSpeechMenu),
                    new WI.WI.ResponseDelegate(DoResponse));
                wiWind.MdiParent = this;
                wiWind.Location = new Point(750, 496);
                wiWind.Hide();

                // show form
                this.Visible = true;
                WindowState = System.Windows.Forms.FormWindowState.Maximized;
                prevState = admtr.ameterSettings;
            } // end if ( dResult != Cancel )
        }
コード例 #4
0
ファイル: EvalOutput.cs プロジェクト: jlaswell/Virtual_Lab
        public void CheckMinimum(int frequencyIndex, Audiometer.channelSettings TestChannel, Audiometer.channelSettings MaskChannel)
        {
            if (MaskChannel.interrupt == false)
                return;

            int ear = MaskChannel.route == Ear.left ? 0 : 1;
            int trans = TestChannel.trans == Audiometer.TransducerType.Bone ? 0 : 1;

            if (StudentMaskedMinimums[frequencyIndex, trans, ear] > MaskChannel.volume)
                StudentMaskedMinimums[frequencyIndex, trans, ear] = MaskChannel.volume;
        }