/// ------------------------------------------------------------------------------------
        private WaveStreamProvider GetWaveStreamForOralAnnotationSegment(string filename,
                                                                         AudioRecordingType annotationType)
        {
            var provider = WaveStreamProvider.Create(_output1ChannelAudioFormat, filename);

            if (provider.Error != null && !(provider.Error is FileNotFoundException))
            {
                var msg = LocalizationManager.GetString(
                    "SessionsView.Transcription.GeneratedOralAnnotationView.ProcessingAnnotationFileErrorMsg",
                    "There was an error processing a {0} annotation file.",
                    "The parameter is the annotation type (i.e. careful, translation).");

                var type = annotationType.ToString().ToLower();
                if (_synchInvoke.InvokeRequired)
                {
                    _synchInvoke.Invoke((Action)(() => ErrorReport.NotifyUserOfProblem(_srcRecStreamProvider.Error, msg, type)), null);
                }
                else
                {
                    ErrorReport.NotifyUserOfProblem(_srcRecStreamProvider.Error, msg, type);
                }
            }

            return(provider);
        }
        /// ------------------------------------------------------------------------------------
        private OralAnnotationFileGenerator(TimeTier sourceTier, Func <int, bool> ignoreSegment,
                                            ISynchronizeInvoke synchInvoke)
        {
            _srcRecordingTier = sourceTier;
            _synchInvoke      = synchInvoke;

            bool fullySegmented = sourceTier.IsFullySegmented;

            _srcRecordingSegments = new List <TimeRange>();
            for (int i = 0; i < sourceTier.Segments.Count; i++)
            {
                // Per JohnH's request via e-mail (8-12-2012), exclude ignored segments
                if (!ignoreSegment(i))
                {
                    _srcRecordingSegments.Add(sourceTier.Segments[i].TimeRange);
                }
            }
            if (!fullySegmented)
            {
                _srcRecordingSegments.Add(new TimeRange(sourceTier.EndOfLastSegment, sourceTier.TotalTime));
            }

            _srcRecStreamProvider = WaveStreamProvider.Create(
                AudioUtils.GetDefaultWaveFormat(1), _srcRecordingTier.MediaFileName);

            var sourceFormat = _srcRecStreamProvider.Stream.WaveFormat;

            _outputAudioFormat = new WaveFormat(sourceFormat.SampleRate,
                                                sourceFormat.BitsPerSample, sourceFormat.Channels + 2);

            _output1ChannelAudioFormat = new WaveFormat(sourceFormat.SampleRate,
                                                        sourceFormat.BitsPerSample, 1);
        }