/// ------------------------------------------------------------------------------------
        private WaveStreamProvider GetWaveStreamForOralAnnotationSegment(string filename,
                                                                         AudioRecordingType annotationType)
        {
            var provider = WaveStreamProvider.Create(_output1ChannelAudioFormat, filename);

            if (provider.Error != null && !(provider.Error is FileNotFoundException))
            {
                var msg = LocalizationManager.GetString(
                    "SessionsView.Transcription.GeneratedOralAnnotationView.ProcessingAnnotationFileErrorMsg",
                    "There was an error processing a {0} annotation file.",
                    "The parameter is the annotation type (i.e. careful, translation).");

                var type = annotationType.ToString().ToLower();
                if (_synchInvoke.InvokeRequired)
                {
                    _synchInvoke.Invoke((Action)(() => ErrorReport.NotifyUserOfProblem(_srcRecStreamProvider.Error, msg, type)), null);
                }
                else
                {
                    ErrorReport.NotifyUserOfProblem(_srcRecStreamProvider.Error, msg, type);
                }
            }

            return(provider);
        }
        /// ------------------------------------------------------------------------------------
        private OralAnnotationFileGenerator(TimeTier sourceTier, Func <int, bool> ignoreSegment,
                                            ISynchronizeInvoke synchInvoke)
        {
            _srcRecordingTier = sourceTier;
            _synchInvoke      = synchInvoke;

            bool fullySegmented = sourceTier.IsFullySegmented;

            _srcRecordingSegments = new List <TimeRange>();
            for (int i = 0; i < sourceTier.Segments.Count; i++)
            {
                // Per JohnH's request via e-mail (8-12-2012), exclude ignored segments
                if (!ignoreSegment(i))
                {
                    _srcRecordingSegments.Add(sourceTier.Segments[i].TimeRange);
                }
            }
            if (!fullySegmented)
            {
                _srcRecordingSegments.Add(new TimeRange(sourceTier.EndOfLastSegment, sourceTier.TotalTime));
            }

            _srcRecStreamProvider = WaveStreamProvider.Create(
                AudioUtils.GetDefaultWaveFormat(1), _srcRecordingTier.MediaFileName);

            var sourceFormat = _srcRecStreamProvider.Stream.WaveFormat;

            _outputAudioFormat = new WaveFormat(sourceFormat.SampleRate,
                                                sourceFormat.BitsPerSample, sourceFormat.Channels + 2);

            _output1ChannelAudioFormat = new WaveFormat(sourceFormat.SampleRate,
                                                        sourceFormat.BitsPerSample, 1);
        }
Esempio n. 3
0
        public static int LoadSfxMusic(byte[] data)
        {
            const DWORD Mono           = 0x0002u;
            var         streamProvider = new WaveStreamProvider(data);

            NativeBass.StreamProcedure streamProc = streamProvider.StreamProcedure;
            int music = NativeBass.BASS_StreamCreate(8000u, 1u, Mono, streamProc, IntPtr.Zero);

            streamProviders.Add(music, streamProc);
            createdChannels.Add(music, Music.Type.Sfx);
            return(music);
        }
        /// ------------------------------------------------------------------------------------
        public void Dispose()
        {
            if (_audioFileWriter != null)
            {
                _audioFileWriter.Close();
            }

            if (_srcRecStreamProvider != null)
            {
                _srcRecStreamProvider.Dispose();
            }

            _audioFileWriter      = null;
            _srcRecStreamProvider = null;
        }