Beispiel #1
0
        /// <summary>
        /// Get min amplitude ( for right choose silence threshold for method DetectSilenceLevel() )
        /// </summary>
        /// <returns>Min Amplitude</returns>
        public float GetMinAmplitude()
        {
            // safe old position of cursor

            long oldPosition = AudioReader.Position;

            // buffer

            float[] amplitudeArray = new float[AudioReader.WaveFormat.SampleRate];

            // end of file

            bool eof = false;

            float min = 0;

            while (!eof)
            {
                int ReadedSamples = AudioReader.Read(amplitudeArray, 0, amplitudeArray.Length);

                if (ReadedSamples == 0)
                {
                    eof = true;
                }
                for (int i = 0; i < ReadedSamples; i++)
                {
                    min = Math.Min(amplitudeArray[i], min);
                }
            }

            AudioReader.Position = oldPosition;
            return(min);
        }
Beispiel #2
0
        internal OpenTKAudioCue(Stream data, AudioContext ac)
        {
            this.ac = ac;

            buffer = AL.GenBuffer();
            ac.CheckErrors();

            source = AL.GenSource();
            ac.CheckErrors();

            AL.Source(source, ALSourcef.Gain, (float)this.Volume);
            ac.CheckErrors();

            using (AudioReader ar = new AudioReader(data))
            {
                SoundData d = ar.ReadToEnd();
                AL.BufferData(source, d);
                ac.CheckErrors();
            }

            AL.Source(source, ALSourcei.Buffer, buffer);
            ac.CheckErrors();

            this.VolumeChanged  += new VolumeChangedEventHandler(OpenTKAudioCue_VolumeChanged);
            this.BalanceChanged += new BalanceChangedEventHandler(OpenTKAudioCue_BalanceChanged);
            this.FadeChanged    += new FadeChangedEventHandler(OpenTKAudioCue_FadeChanged);
        }
Beispiel #3
0
        public IAsyncAction CloseAsync()
        {
            return(Task.Run(async() =>
            {
                await BodyIndexReader?.CloseAsync();
                BodyIndexReader?.Dispose();
                BodyIndexReader = null;

                await BodyReader?.CloseAsync();
                BodyReader?.Dispose();
                BodyReader = null;

                await ColorReader?.CloseAsync();
                ColorReader?.Dispose();
                ColorReader = null;

                await DepthReader?.CloseAsync();
                DepthReader?.Dispose();
                DepthReader = null;

                AudioReader?.Close();
                AudioReader?.Dispose();
                AudioReader = null;

                _mediaCapture?.Dispose();
                _mediaCapture = null;

                _networkClient?.CloseConnection();
                _networkClient = null;

                _networkServer?.CloseConnection();
                _networkServer = null;
            }).AsAsyncAction());
        }
Beispiel #4
0
        public IAsyncOperation <AudioFrameReader> OpenAudioFrameReaderAsync()
        {
            return(Task.Run(async() =>
            {
                if (AudioReader == null)
                {
                    var microphones = await DeviceInformation.FindAllAsync(DeviceInformation.GetAqsFilterFromDeviceClass(DeviceClass.AudioCapture));
                    var kinectMicArray = microphones.FirstOrDefault(mic => mic.Name.ToLowerInvariant().Contains("xbox nui sensor"));

                    if (kinectMicArray != null)
                    {
                        //TODO: review parameters
                        var settings = new AudioGraphSettings(AudioRenderCategory.Speech);
                        settings.EncodingProperties = AudioEncodingProperties.CreatePcm(16000, 4, 32);
                        settings.EncodingProperties.Subtype = MediaEncodingSubtypes.Float;
                        settings.QuantumSizeSelectionMode = QuantumSizeSelectionMode.LowestLatency;
                        settings.DesiredRenderDeviceAudioProcessing = Windows.Media.AudioProcessing.Raw;

                        var audioGraphResult = await AudioGraph.CreateAsync(settings);
                        if (audioGraphResult.Status == AudioGraphCreationStatus.Success)
                        {
                            var inputNodeResult = await audioGraphResult.Graph.CreateDeviceInputNodeAsync(MediaCategory.Speech, audioGraphResult.Graph.EncodingProperties, kinectMicArray);

                            if (inputNodeResult.Status == AudioDeviceNodeCreationStatus.Success)
                            {
                                var output = audioGraphResult.Graph.CreateFrameOutputNode(audioGraphResult.Graph.EncodingProperties);
                                AudioReader = new AudioFrameReader(audioGraphResult.Graph, output);
                            }
                        }
                    }
                }
                AudioReader?.Open();
                return AudioReader;
            }).AsAsyncOperation());
        }
Beispiel #5
0
        static void ReadWriteAudio(string input, string output)
        {
            var audio = new AudioReader(input);

            audio.LoadMetadataAsync().Wait();
            audio.Load();

            using (var writer = new AudioWriter(output, audio.Metadata.Channels, audio.Metadata.SampleRate))
            {
                writer.OpenWrite(true);

                var frame = new AudioFrame(1);
                while (true)
                {
                    // read next sample
                    var f = audio.NextFrame(frame);
                    if (f == null)
                    {
                        break;
                    }

                    writer.WriteFrame(frame);
                }
            }
        }
Beispiel #6
0
        internal SoundEffect(string fileName)
        {
            AudioEngine.EnsureInit();

            buffer = AL.GenBuffer();

            var error = AL.GetError();

            if (error != ALError.NoError)
            {
                throw new OpenALException(error, "borked generation. ALError: " + error.ToString());
            }

            XRamExtension XRam = new XRamExtension();

            if (XRam.IsInitialized)
            {
                XRam.SetBufferMode(1, ref buffer, XRamExtension.XRamStorage.Hardware);
            }



            AudioReader sound     = new AudioReader(fileName);
            var         sounddata = sound.ReadToEnd();

            AL.BufferData(buffer, sounddata.SoundFormat.SampleFormatAsOpenALFormat, sounddata.Data, sounddata.Data.Length, sounddata.SoundFormat.SampleRate);
            error = AL.GetError();
            if (error != ALError.NoError)
            {
                throw new OpenALException(error, "unable to read " + fileName);
            }

            _name = Path.GetFileNameWithoutExtension(fileName);
        }
Beispiel #7
0
 protected void uploadAudio_click(object sender, EventArgs e)
 {
     if (Page.IsValid && uploadedfile.HasFile)
     {
         String extension = Path.GetExtension(uploadedfile.FileName);
         if (AudioReader.isAudioFile(uploadedfile.FileName))
         {
             string filename = uploadedfile.FileName;
             uploadedfile.SaveAs(Server.MapPath("~\\Audio\\") + filename);
             string filepath = Server.MapPath("~\\Audio\\");
             ar = new AudioReader(filename, filepath);
             FileInfoLabel.Text = ar.audioInfo();
             //ar.testloop();
             step1.Visible        = false;
             step2.Visible        = true;
             namelabel.Text       = filename;
             pathlabel.Text       = filepath;
             mediainfo.Visible    = true;
             ErrorMessage.Visible = false;
         }
         else
         {
             ErrorMessage.Text    = "Please upload an Audio File (.wav, .midi, .mp3, etc...)";
             ErrorMessage.Visible = true;
         }
     }
     else
     {
         ErrorMessage.Text    = "No file found!";
         ErrorMessage.Visible = true;
     }
 }
Beispiel #8
0
        void ProcessCommon(AudioReader reader, ref float[] impulse)
        {
            int targetLen = QMath.Base2Ceil((int)reader.Length) << 1;

            Complex[] commonSpectrum = new Complex[targetLen];
            FFTCache  cache          = new FFTCache(targetLen);

            for (int ch = 0; ch < reader.ChannelCount; ++ch)
            {
                float[] channel = new float[targetLen];
                WaveformUtils.ExtractChannel(impulse, channel, ch, reader.ChannelCount);

                Complex[] spectrum = Measurements.FFT(channel, cache);
                for (int band = 0; band < spectrum.Length; ++band)
                {
                    commonSpectrum[band] += spectrum[band];
                }
            }

            float mul = 1f / reader.ChannelCount;

            for (int band = 0; band < commonSpectrum.Length; ++band)
            {
                commonSpectrum[band] = (commonSpectrum[band] * mul).Invert();
            }

            Array.Resize(ref impulse, impulse.Length << 1);
            for (int ch = 0; ch < reader.ChannelCount; ++ch)
            {
                Convolver filter = GetFilter(commonSpectrum, 1, reader.SampleRate);
                filter.Process(impulse, ch, reader.ChannelCount);
            }
        }
Beispiel #9
0
        void ProcessPerChannel(AudioReader reader, ref float[] impulse)
        {
            int targetLen = QMath.Base2Ceil((int)reader.Length) << 1;

            Convolver[] filters = new Convolver[reader.ChannelCount];
            FFTCache    cache   = new FFTCache(targetLen);

            for (int ch = 0; ch < reader.ChannelCount; ++ch)
            {
                float[] channel = new float[targetLen];
                WaveformUtils.ExtractChannel(impulse, channel, ch, reader.ChannelCount);

                Complex[] spectrum = Measurements.FFT(channel, cache);
                for (int band = 0; band < spectrum.Length; ++band)
                {
                    spectrum[band] = spectrum[band].Invert();
                }
                filters[ch] = GetFilter(spectrum, WaveformUtils.GetRMS(channel), reader.SampleRate);
            }

            Array.Resize(ref impulse, impulse.Length << 1);
            for (int ch = 0; ch < reader.ChannelCount; ++ch)
            {
                filters[ch].Process(impulse, ch, reader.ChannelCount);
            }
        }
Beispiel #10
0
		public AL_SoundBuffer(Stream inStream)
		{
			using (AudioReader reader = new AudioReader(inStream))
			{
				buffer = AL.GenBuffer();
				AL.BufferData(buffer, reader.ReadToEnd());
			}
		}
 public void Start()
 {
     Init();
     SampleCount = AudioReader.GetSampleCount();
     float[] samples = new float[SampleCount];
     SampleProvider.Read(new float[SampleCount], 0, SampleCount);
     SampleProvider.Sample -= OnSample;
 }
Beispiel #12
0
        protected void uploadAudio_click(object sender, EventArgs e)
        {
            if (Page.IsValid)
            {
                if (AudioReader.isAudioFile(uploadedfile.FileName) && AudioReader.isAudioFile(uploadedfile2.FileName) && uploadedfile.HasFile && uploadedfile2.HasFile)
                {
                    string filepath = Server.MapPath("~\\AudioWatermark\\");
                    string filename;

                    filename = uploadedfile.FileName;
                    uploadedfile.SaveAs(Server.MapPath("~\\AudioWatermark\\") + filename);
                    ar1 = new AudioReader(filename, filepath);
                    originalinfolabel.Text = ar1.audioInfo();
                    string file1path = filepath;
                    string file1name = filename;
                    ;

                    filename = uploadedfile2.FileName;
                    uploadedfile2.SaveAs(Server.MapPath("~\\AudioWatermark\\") + filename);
                    ar2 = new AudioReader(filename, filepath);
                    watermarkinfolabel.Text = ar2.audioInfo();
                    string file2path = filepath;
                    string file2name = filename;
                    watermarkdatalabel.Text = AudioReader.verifyWatermark(ar1.readAudio(0, 40000, 2), ar2.readAudio(0, 40000, 2), Int32.Parse(UserDropdown.SelectedValue), file1name, file2path + file2name);

                    step1.Visible = false;
                    step2.Visible = true;
                }


                if (!uploadedfile.HasFile)
                {
                    ErrorMessage1.Text    = "No file found!";
                    ErrorMessage1.Visible = true;
                }
                else
                {
                    if (!AudioReader.isAudioFile(uploadedfile.FileName))
                    {
                        ErrorMessage1.Text    = "Please upload an Audio File (.wav, .midi, .mp3, etc...)";
                        ErrorMessage1.Visible = true;
                    }
                }
                if (!uploadedfile2.HasFile)
                {
                    ErrorMessage2.Text    = "No file found!";
                    ErrorMessage2.Visible = true;
                }
                else
                {
                    if (!AudioReader.isAudioFile(uploadedfile2.FileName))
                    {
                        ErrorMessage2.Text    = "Please upload an Audio File (.wav, .midi, .mp3, etc...)";
                        ErrorMessage2.Visible = true;
                    }
                }
            }
        }
Beispiel #13
0
 AudioReader Import(string fileName)
 {
     browser.FileName = fileName;
     if (browser.ShowDialog().Value)
     {
         return(AudioReader.Open(browser.FileName));
     }
     return(null);
 }
        /*
         * Yet im not able to calculate FFT with non power of 2 fftsize. !!! check it out? !!! it is important? !!!
         * -------> WIP <---------------
         * public ShortTimeFourierTransform(string filePath, double frequencyGap, double timeResolution = 0.010)
         * {
         *  FilePath = filePath;
         *  TimeResolution = timeResolution;
         *  Reset();
         *  FFTLength = (int)(AudioReader.WaveFormat.SampleRate / frequencyGap);
         *  m = (int)Math.Log(FFTLength, 2.0);
         * } */
        public void Start()
        {
            Reset();
            float[]   samples   = new float[SampleCount];
            Stopwatch stopwatch = Stopwatch.StartNew();

            SampleProvider.Read(new float[SampleCount], 0, SampleCount);
            SampleProvider.Sample -= OnSample;
            stopwatch.Stop();
            AudioReader.Dispose();
            Ended?.Invoke(stopwatch.Elapsed.TotalSeconds);
        }
Beispiel #15
0
    private void Awake()
    {
        if (Instance != null)
        {
            Destroy(gameObject);
            return;
        }

        Instance = this;

        _audioSource = GetComponent <AudioSource>();
        SpectrumData = new float[512];
    }
Beispiel #16
0
		public AL_Music(Stream inStream)
		{
			using (AudioReader reader = new AudioReader(inStream))
			{
				buffer = AL.GenBuffer();
				source = AL.GenSource();

				AL.BufferData(buffer, reader.ReadToEnd());
				AL.Source(source, ALSourcei.Buffer, buffer);
			}

			OnSetLoop(true);
		}
Beispiel #17
0
        protected void Confirm_Click(object sender, EventArgs e)
        {
            ar = new AudioReader(namelabel.Text, pathlabel.Text);
            //FrameInfoLabel.Text = FrameDropDown.SelectedValue;
            //int headerlength = Int32.Parse(FrameDropDown.SelectedValue);
            FrameInfoLabel.Text = "40000";
            int headerlength = 40000;
            int userid       = Int32.Parse(useridlabel.Text);

            ar.readAudio(userid, headerlength, 1);
            userinfo.Visible  = true;
            mediainfo.Visible = true;
            frameinfo.Visible = true;
        }
Beispiel #18
0
        public async Task LoadMetadataOgg()
        {
            var audio = new AudioReader(Res.GetPath(Res.Audio_Ogg));

            await audio.LoadMetadata();

            Assert.True(audio.Metadata.Codec == "vorbis");
            Assert.True(audio.Metadata.BitRate == 48000);
            Assert.True(audio.Metadata.SampleFormat == "fltp");
            Assert.True(audio.Metadata.SampleRate == 11025);
            Assert.True(audio.Metadata.Channels == 2);
            Assert.True(audio.Metadata.Streams.Length == 1);
            Assert.True(Math.Abs(audio.Metadata.Duration - 1.515102) < 0.01);
        }
Beispiel #19
0
        public async Task LoadMetadataMp3()
        {
            var audio = new AudioReader(Res.GetPath(Res.Audio_Mp3));

            await audio.LoadMetadata();

            Assert.True(audio.Metadata.Codec == "mp3");
            Assert.True(audio.Metadata.BitRate == 128000);
            Assert.True(audio.Metadata.SampleFormat == "fltp");
            Assert.True(audio.Metadata.SampleRate == 44100);
            Assert.True(audio.Metadata.Channels == 2);
            Assert.True(audio.Metadata.Streams.Length == 1);
            Assert.True(Math.Abs(audio.Metadata.Duration - 1.549187) < 0.01);
        }
Beispiel #20
0
        public HRTFSetEntry(double hAngle, double wAngle, double distance, string path)
        {
            Azimuth   = hAngle;
            Elevation = wAngle;
            Distance  = distance;

            Clip clip = AudioReader.ReadClip(path);

            Data = new float[clip.Channels][];
            for (int channel = 0; channel < clip.Channels; ++channel)
            {
                Data[channel] = new float[clip.Samples];
            }
            clip.GetData(Data, 0);
        }
Beispiel #21
0
        public async Task ConversionStreamTest()
        {
            var path  = Res.GetPath(Res.Audio_Mp3);
            var opath = "out-test-v-2.aac";

            try
            {
                using var reader = new AudioReader(path);
                await reader.LoadMetadataAsync();

                var encoder = new AACEncoder
                {
                    Format = "flv"
                };

                using (var filestream = File.Create(opath))
                {
                    using (var writer = new AudioWriter(filestream,
                                                        reader.Metadata.Channels,
                                                        reader.Metadata.SampleRate, 16,
                                                        encoder.Create()))
                    {
                        writer.OpenWrite();

                        reader.Load();

                        await reader.CopyToAsync(writer);
                    }
                }

                using var audio = new AudioReader(opath);
                await audio.LoadMetadataAsync();

                Assert.True(audio.Metadata.Format.FormatName == "flv");
                Assert.True(audio.Metadata.Channels == 2);
                Assert.True(audio.Metadata.Streams.Length == 1);
                Assert.True(Math.Abs(audio.Metadata.Duration - 1.515102) < 0.2);
            }
            finally
            {
                if (File.Exists(opath))
                {
                    File.Delete(opath);
                }
            }
        }
        public async Task FFmpegWrapperProgressTest()
        {
            var path  = Res.GetPath(Res.Audio_Ogg);
            var opath = "out-test.mp3";

            double lastval = -1;

            try
            {
                var audio = new AudioReader(path);

                await audio.LoadMetadataAsync();

                var dur = audio.Metadata.Duration;
                audio.Dispose();

                Assert.True(Math.Abs(dur - 1.515102) < 0.01);

                var p        = FFmpegWrapper.ExecuteCommand("ffmpeg", $"-i \"{path}\" \"{opath}\"");
                var progress = FFmpegWrapper.RegisterProgressTracker(p, dur);
                progress.ProgressChanged += (s, prg) => lastval = prg;
                p.WaitForExit();

                await Task.Delay(300);

                Assert.True(lastval > 50 && lastval <= 100);

                audio = new AudioReader(opath);

                await audio.LoadMetadataAsync();

                Assert.True(audio.Metadata.Channels == 2);
                Assert.True(audio.Metadata.Streams.Length == 1);
                Assert.True(Math.Abs(audio.Metadata.Duration - 1.515102) < 0.2);

                audio.Dispose();
            }
            finally
            {
                if (File.Exists(opath))
                {
                    File.Delete(opath);
                }
            }
        }
Beispiel #23
0
        /// ------------------------------------------------------------------------------------
        /// <summary>
        ///
        /// </summary>
        /// ------------------------------------------------------------------------------------
        private bool Initialize(string audioFilePath)
        {
            m_doc = SaAudioDocument.Load(audioFilePath, false, true);
            if (m_doc != null)
            {
                ResetSegmentEnumerators();
                return(true);
            }

            try
            {
                using (AudioReader audioReader = new AudioReader())
                {
                    AudioReader.InitResult result = audioReader.Initialize(audioFilePath);
                    if (result == AudioReader.InitResult.FileNotFound)
                    {
                        return(false);
                    }

                    if ((result == AudioReader.InitResult.InvalidFormat))
                    {
                        return(false);
                    }

                    // Try reading data from older SA audio files, converting
                    // it to Unicode along the way.
                    if (!audioReader.Read(true))
                    {
                        return(false);
                    }

                    // Now try reading the companion transcription file again.
                    m_doc = SaAudioDocument.Load(audioFilePath, false, false);
                    ResetSegmentEnumerators();
                }
            }
            catch
            {
                return(false);
            }

            return(true);
        }
Beispiel #24
0
        void ProcessImpulse(object sender, RoutedEventArgs e)
        {
            if (browser.ShowDialog().Value)
            {
                AudioReader reader  = AudioReader.Open(browser.FileName);
                float[]     impulse = reader.Read();
                float       gain    = 1;
                if (keepGain.IsChecked.Value)
                {
                    gain = WaveformUtils.GetPeak(impulse);
                }

                if (commonEQ.IsChecked.Value)
                {
                    ProcessCommon(reader, ref impulse);
                }
                else
                {
                    ProcessPerChannel(reader, ref impulse);
                }

                if (keepGain.IsChecked.Value)
                {
                    WaveformUtils.Gain(impulse, gain / WaveformUtils.GetPeak(impulse));
                }

                BitDepth bits = reader.Bits;
                if (forceFloat.IsChecked.Value)
                {
                    bits = BitDepth.Float32;
                }

                int targetLen = QMath.Base2Ceil((int)reader.Length);
                if (separateExport.IsChecked.Value)
                {
                    ReferenceChannel[] channels = ChannelPrototype.GetStandardMatrix(reader.ChannelCount);
                    for (int ch = 0; ch < reader.ChannelCount; ++ch)
                    {
                        string exportName  = Path.GetFileName(browser.FileName);
                        int    idx         = exportName.LastIndexOf('.');
                        string channelName = ChannelPrototype.Mapping[(int)channels[ch]].Name;
                        exporter.FileName = $"{exportName[..idx]} - {channelName}{exportName[idx..]}";
        /// ------------------------------------------------------------------------------------
        /// <summary>
        /// Converts the specified millisecond value to a byte offset used (and only understood
        /// by SA) for altered speed playback in SA.
        /// </summary>
        /// ------------------------------------------------------------------------------------
        public static long MillisecondValueToBytes(long millisecondVal, string filename)
        {
            using (var reader = new AudioReader())
            {
                if (reader.Initialize(filename) == AudioReader.InitResult.FileNotFound)
                {
                    return(0);
                }

                // Assume the bytes per second is 44100, which it will be if the audio file
                // is not a wave file.
                long bytesPerSecond = 44100;

                if (reader.IsValidWaveFile())
                {
                    bytesPerSecond = reader.BytesPerSecond;
                }

                return((millisecondVal * bytesPerSecond) / 1000);
            }
        }
        public async Task ConversionTest()
        {
            var path  = Res.GetPath(Res.Audio_Ogg);
            var opath = "out-test-2.mp3";

            try
            {
                using var reader = new AudioReader(path);
                await reader.LoadMetadataAsync();

                using (var writer = new AudioWriter(opath,
                                                    reader.Metadata.Channels,
                                                    reader.Metadata.SampleRate, 16,
                                                    new MP3Encoder().Create()))
                {
                    writer.OpenWrite();

                    reader.Load();

                    await reader.CopyToAsync(writer);
                }

                using var audio = new AudioReader(opath);
                await audio.LoadMetadataAsync();

                Assert.True(audio.Metadata.Format.FormatName == "mp3");
                Assert.True(audio.Metadata.Channels == 2);
                Assert.True(audio.Metadata.Streams.Length == 1);
                Assert.True(Math.Abs(audio.Metadata.Duration - 1.515102) < 0.2);
            }
            finally
            {
                if (File.Exists(opath))
                {
                    File.Delete(opath);
                }
            }
        }
Beispiel #27
0
        static Dictionary <int, Dictionary <int, float[][]> > ImportImpulses(string path, Regex pattern)
        {
            Settings.Default.LastFolder = path;
            string[] folders = Directory.GetFiles(path);
            Dictionary <int, Dictionary <int, float[][]> > data = new Dictionary <int, Dictionary <int, float[][]> >();

            for (int file = 0; file < folders.Length; ++file)
            {
                string fileName = Path.GetFileName(folders[file]);
                Match  match    = pattern.Match(fileName);
                if (match.Success &&
                    int.TryParse(match.Groups["param1"].Value, out int angle) &&
                    int.TryParse(match.Groups["param2"].Value, out int distance))
                {
                    if (!data.ContainsKey(angle))
                    {
                        data.Add(angle, new Dictionary <int, float[][]>());
                    }
                    data[angle][distance] = AudioReader.Open(folders[file]).ReadMultichannel();
                }
            }
            return(data);
        }
Beispiel #28
0
        static void ReadPlayAudio(string input, string output)
        {
            var audio = new AudioReader(input);

            audio.LoadMetadataAsync().Wait();
            audio.Load();

            using (var player = new AudioPlayer())
            {
                player.OpenWrite(audio.Metadata.SampleRate, audio.Metadata.Channels, showWindow: false);

                // For simple playing, can just use "CopyTo"
                // audio.CopyTo(player);

                var frame = new AudioFrame(audio.Metadata.Channels);
                while (true)
                {
                    // read next frame
                    var f = audio.NextFrame(frame);
                    if (f == null)
                    {
                        break;
                    }

                    try
                    {
                        player.WriteFrame(frame);
                    }
                    catch (IOException) { break; }
                    catch
                    {
                        throw;
                    }
                }
            }
        }
 private void btnFile2_Click(object sender, EventArgs e)
 {
     if (ofdFile.ShowDialog(this) == DialogResult.OK)
     {
         tbFile2.Text = ofdFile.FileName;
         int lenExt = 4;
         string ext = ofdFile.FileName.Substring(ofdFile.FileName.Length - lenExt,
             lenExt).ToLower();
         FileStream fs = new FileStream(ofdFile.FileName, FileMode.Open);
         switch (ext)
         {
             case ".wav" :
                 ar = new WaveReader(fs);
                 break;
             case ".avi" :
                 ar = new AviReader(fs);
                 if (!((AviReader)ar).HasAudio)
                 {
                     MessageBox.Show("Avi stream has not audio track");
                     return;
                 }
                 break;
             case ".mp3" :
                 ar = new Mp3Reader(fs);
                 break;
             default:
                 return;
         }
         oldFormat = ar.ReadFormat();
         FormatDetails fd = AudioCompressionManager.GetFormatDetails(oldFormat);
         lblFileFormat.Text = string.Format("{0} {1}", AudioCompressionManager.GetFormatTagDetails(fd.FormatTag).FormatTagName, fd.FormatName);
         GetFormatsConverted(oldFormat);
         gbConvert.Enabled = true;
         btnMakeMp3.Enabled = false;
     }
 }
Beispiel #30
0
        public void soundEngine()
        {
            AudioReader ar;
            int         buff;
            int         state;

            // start off at a capacity of 2, will double when necessary
            int [] currentSounds  = new int[2];
            int[]  soundBluePrint = new int[2]; // to build a list of sounds which will be copied
            // into currentSounds -- this is so I don't have to modify currentSounds while it
            // is being used, which could cause problems since it is a reference object
            int nSounds;  // current number of sounds

            while (true)
            {
                swh.WaitOne();
                lock (locker)
                {
                    if (!somethingToPlay)  // a Set may have been called while requests were processed
                                           // below
                    {
                        continue;
                    }
                }
                int k;
                lock (locker)
                {
                    somethingToPlay = false;
                    nSounds         = 0;
                    if (currentSounds.Length < request.Length)
                    {
                        Array.Resize(ref currentSounds, request.Length);
                    }
                    for (k = 0; k < nrequests; k++)
                    {
                        buff = AL.GenBuffer();
                        currentSounds[nSounds] = AL.GenSource();
                        ar = new AudioReader(sound[request[k]]);
                        AL.BufferData(buff, ar.ReadToEnd());
                        AL.Source(currentSounds[nSounds], ALSourcei.Buffer, buff);
                        nSounds++;
                    }
                    // request array is used up, so clear it
                    if (request.Length > 2)
                    {
                        Array.Resize(ref request, 2);
                    }
                    nrequests = 0;
                }
                AL.SourcePlay(nSounds, currentSounds);
                do
                {
                    Thread.Sleep(100); // seems like I kind set it less than 100 without sound distortion
                    int nCurrentSounds = nSounds;
                    nSounds = 0;
                    // check if something new to play
                    // if new sounds are played, we cannot mess with the currentSounds array, which is being used --
                    // we have to build up a soundBluePrint array
                    lock (locker)
                    {
                        if (somethingToPlay)
                        {
                            somethingToPlay = false;
                            if (nCurrentSounds + request.Length > soundBluePrint.Length)
                            {
                                Array.Resize(ref soundBluePrint, nCurrentSounds + request.Length);
                            }
                            for (k = 0; k < nrequests; k++)
                            {
                                buff = AL.GenBuffer();
                                soundBluePrint[nSounds] = AL.GenSource();
                                ar = new AudioReader(sound[request[k]]);
                                AL.BufferData(buff, ar.ReadToEnd());
                                AL.Source(soundBluePrint[nSounds], ALSourcei.Buffer, buff);
                                nSounds++;
                            }
                            if (request.Length > 2)
                            {
                                Array.Resize(ref request, 2);
                            }
                            nrequests = 0;
                        }
                    }

                    // Query the sources to find out if any stop playing
                    int temp = nCurrentSounds;
                    for (int j = 0; j < temp; j++)
                    {
                        AL.GetSource(currentSounds[j], ALGetSourcei.SourceState, out state);
                        if ((ALSourceState)state == ALSourceState.Playing)
                        {
                            // I want to pause them here, so I can include them with the new
                            // sounds later -- if played again without being paused, they
                            // will start from the beginning.  I want them to be played
                            // simulatenously with the new sounds, but to pick up where
                            // they left off
                            AL.SourcePause(currentSounds[j]);
                            soundBluePrint[nSounds] = currentSounds[j];
                            nSounds++;
                        }
                        else
                        {
                            // free these resources for later sounds
                            AL.DeleteSource(currentSounds[j]);
                        }
                    }
                    if (nSounds > 0)
                    {
                        if (currentSounds.Length < nSounds)
                        {
                            Array.Resize(ref currentSounds, nSounds);
                        }
                        for (k = 0; k < nSounds; k++)
                        {
                            currentSounds[k] = soundBluePrint[k];
                        }
                        AL.SourcePlay(nSounds, currentSounds);
                    }
                } while (nSounds > 0);
            }
        }
        public async Task ConversionStreamTest()
        {
            var vpath = Res.GetPath(Res.Video_Mp4);
            var apath = Res.GetPath(Res.Audio_Mp3);
            var opath = "out-test-av-2.mp4";

            try
            {
                using var vreader = new VideoReader(vpath);
                await vreader.LoadMetadataAsync();

                vreader.Load();

                using var areader = new AudioReader(apath);
                await areader.LoadMetadataAsync();

                areader.Load();

                // Get video and audio stream metadata
                var vstream = vreader.Metadata.GetFirstVideoStream();
                var astream = areader.Metadata.GetFirstAudioStream();

                var encoder = new H264Encoder
                {
                    Format = "flv"
                };

                using (var filestream = File.Create(opath))
                {
                    // Prepare writer (Converting to H.264 + AAC video)
                    using (var writer = new AudioVideoWriter(filestream,
                                                             vstream.Width.Value,
                                                             vstream.Height.Value,
                                                             vstream.AvgFrameRateNumber,
                                                             astream.Channels.Value,
                                                             astream.SampleRateNumber, 16,
                                                             encoder.Create(),
                                                             new AACEncoder().Create()))
                    {
                        // Open for writing (this starts the FFmpeg process)
                        writer.OpenWrite();

                        // Copy raw data directly from stream to stream
                        var t2 = areader.DataStream.CopyToAsync(writer.InputDataStreamAudio);
                        var t1 = vreader.DataStream.CopyToAsync(writer.InputDataStreamVideo);

                        await t1;
                        await t2;
                    }
                }

                using var video = new VideoReader(opath);
                await video.LoadMetadataAsync();

                Assert.True(video.Metadata.Streams.Length == 2);

                vstream = video.Metadata.GetFirstVideoStream();
                astream = video.Metadata.GetFirstAudioStream();

                Assert.True(Math.Abs(vstream.AvgFrameRateNumber - vreader.Metadata.AvgFramerate) < 0.1);
                Assert.True(Math.Abs(video.Metadata.Duration - vreader.Metadata.Duration) < 0.2);
                Assert.True(vstream.Width.Value == vreader.Metadata.Width);
                Assert.True(vstream.Height.Value == vreader.Metadata.Height);
                Assert.True(astream.SampleRateNumber == areader.Metadata.SampleRate);
            }
            finally
            {
                if (File.Exists(opath))
                {
                    File.Delete(opath);
                }
            }
        }