/// <summary> /// transcode audio /// </summary> /// <param name="input">input audio file</param> /// <param name="output">output audio file</param> /// <param name="outChannels">output audio file channels</param> /// <param name="outSampleRate">output audio file sample rate</param> public AudioTranscode(string input, string output, int outChannels = 2, int outSampleRate = 44100) { using (MediaWriter writer = new MediaWriter(output)) using (MediaReader reader = new MediaReader(input)) { int audioIndex = reader.First(_ => _.Codec.Type == AVMediaType.AVMEDIA_TYPE_AUDIO).Index; writer.AddStream(MediaEncoder.CreateAudioEncode(writer.Format, outChannels, outSampleRate)); writer.Initialize(); AudioFrame dst = AudioFrame.CreateFrameByCodec(writer[0].Codec); SampleConverter converter = new SampleConverter(dst); long pts = 0; foreach (var packet in reader.ReadPacket()) { foreach (var srcframe in reader[audioIndex].ReadFrame(packet)) { foreach (var dstframe in converter.Convert(srcframe)) { pts += dstframe.AVFrame.nb_samples; dstframe.Pts = pts; // audio's pts is total samples, pts can only increase. foreach (var outpacket in writer[0].WriteFrame(dstframe)) { writer.WritePacket(outpacket); } } } } writer.FlushMuxer(); } }
public EncodeAudioByMat(string output) { using (MediaWriter writer = new MediaWriter(output)) { writer.AddStream(MediaEncoder.CreateAudioEncode(writer.Format, 2, 44100)); writer.Initialize(); AudioFrame dstFrame = AudioFrame.CreateFrameByCodec(writer[0].Codec); SampleConverter converter = new SampleConverter(dstFrame); using (Mat mat = CreateMat(writer[0].Codec.AVCodecContext.channels)) { long pts = 0; for (int i = 0; i < 1000; i++) { foreach (var item in converter.Convert(mat.ToAudioFrame(dstSampleRate: writer[0].Codec.AVCodecContext.sample_rate))) { pts += item.NbSamples; item.Pts = pts; foreach (var packet in writer[0].WriteFrame(item)) { writer.WritePacket(packet); } } } } writer.FlushMuxer(); } }
/// <summary> /// <see cref="sws_getCachedContext(SwsContext*, int, int, AVPixelFormat, int, int, AVPixelFormat, int, SwsFilter*, SwsFilter*, double*)"/> /// <see cref="sws_scale(SwsContext*, byte*[], int[], int, int, byte*[], int[])"/> /// </summary> public static IEnumerable <Frame> ConvertFrames(this CodecContext c, IEnumerable <Frame> sourceFrames, ScaleFlag flags = ScaleFlag.Bilinear) { using var destFrame = c.CreateFrame(); int pts = 0; if (c.Codec.Type == MediaType.Video) { using var frameConverter = new FrameConverter(); foreach (var sourceFrame in sourceFrames) { frameConverter.ConvertFrame(sourceFrame, destFrame, flags); destFrame.Pts = pts++; yield return(destFrame); } } else if (c.Codec.Type == MediaType.Audio) { using var frameConverter = new SampleConverter(); foreach (var sourceFrame in sourceFrames) { frameConverter.ConvertFrame(destFrame, sourceFrame); destFrame.Pts = pts += c.FrameSize; yield return(destFrame); } } }
public void SortType_Complex() { var valueTypeConverter = new SampleConverter(); mapper = new NumericReflectionFieldMapper <Sample>(typeof(Sample).GetProperty("Complex"), StoreMode.Yes, valueTypeConverter, TypeDescriptor.GetConverter(typeof(int)), "Complex", 128, 1.0f); Assert.That(mapper.CreateSortField(false).Type, Is.EqualTo(SortField.INT)); }
public Player_AppleII(ScummEngine scumm, IMixer mixer) { _mixer = mixer; _vm = scumm; _sampleConverter = new SampleConverter(); ResetState(); SetSampleRate(_mixer.OutputRate); _soundHandle = _mixer.PlayStream(SoundType.Plain, this, -1, Mixer.MaxChannelVolume, 0, false, true); }
public void ConvertsFieldValueToNonValueType() { var valueTypeConverter = new SampleConverter(); mapper = new NumericReflectionFieldMapper <Sample>(typeof(Sample).GetProperty("Complex"), StoreMode.Yes, valueTypeConverter, TypeDescriptor.GetConverter(typeof(int)), "Complex", 128, 1.0f); var result = mapper.ConvertFieldValue(new Field("Complex", "100", Field.Store.YES, Field.Index.NO)); Assert.That(result, Is.InstanceOf <Complex>()); }
public Mp4VideoWriter AddAudio(int dstChannels, int dstSampleRate) { if (writer.Where(_ => _.Codec.Type == AVMediaType.AVMEDIA_TYPE_AUDIO).Count() == 0) { Channels = dstChannels; SampleRate = dstSampleRate; var stream = writer.AddStream(MediaEncoder.CreateAudioEncode(writer.Format, dstChannels, dstSampleRate)); audioIndex = writer.Count - 1; sampleConverter = new SampleConverter(stream.Codec); } return(this); }
private int readAsFloat(float[] buffer, int count) { int reads = 0; // Detect the underlying stream format. if (waveStream.Format.Encoding == WaveFormatEncoding.Pcm) { // The wave is in standard PCM format. We'll need // to convert it to IeeeFloat. switch (waveStream.Format.BitsPerSample) { case 8: // Stream is 8 bits { byte[] block = new byte[bufferSize]; reads = read(block, count); SampleConverter.Convert(block, buffer); } break; case 16: // Stream is 16 bits { short[] block = new short[bufferSize]; reads = read(block, count); SampleConverter.Convert(block, buffer); } break; case 32: // Stream is 32 bits { int[] block = new int[bufferSize]; reads = read(block, count); SampleConverter.Convert(block, buffer); } break; default: throw new NotSupportedException(); } } else if (waveStream.Format.Encoding == WaveFormatEncoding.IeeeFloat) { // Format is 16-bit IEEE float, // just copy to the buffer. reads = read(buffer, count); } else { throw new NotSupportedException("The wave format isn't supported"); } return(reads); // return the number of bytes read }
/// <summary> /// Convert to audio frame to <paramref name="dstFotmat"/> /// <para><see cref="DepthType"/> to <see cref="AVSampleFormat"/> mapping table. /// if <see cref="Mat.NumberOfChannels"/> > 1, use packet format, otherwise planar</para> /// <list type="table" > /// <item> /// <term><see cref="DepthType.Cv8U"/></term> /// <description1><see cref="AVSampleFormat.AV_SAMPLE_FMT_U8"/>/<see cref="AVSampleFormat.AV_SAMPLE_FMT_U8P"/></description1> /// </item> /// <item> /// <term><see cref="DepthType.Cv16S"/></term> /// <description1><see cref="AVSampleFormat.AV_SAMPLE_FMT_S16"/>/<see cref="AVSampleFormat.AV_SAMPLE_FMT_S16P"/></description1> /// </item> /// <item> /// <term><see cref="DepthType.Cv32S"/></term> /// <description1><see cref="AVSampleFormat.AV_SAMPLE_FMT_S32"/>/<see cref="AVSampleFormat.AV_SAMPLE_FMT_S32P"/></description1> /// </item> /// <item> /// <term><see cref="DepthType.Cv32F"/></term> /// <description1><see cref="AVSampleFormat.AV_SAMPLE_FMT_FLT"/>/<see cref="AVSampleFormat.AV_SAMPLE_FMT_FLTP"/></description1> /// </item> /// <item> /// <term><see cref="DepthType.Cv64F"/></term> /// <description1><see cref="AVSampleFormat.AV_SAMPLE_FMT_DBL"/>/<see cref="AVSampleFormat.AV_SAMPLE_FMT_DBLP"/></description1> /// </item> /// <item> /// <term><see cref="DepthType.Cv64F"/></term> /// <description1><see cref="AVSampleFormat.AV_SAMPLE_FMT_S64"/>/<see cref="AVSampleFormat.AV_SAMPLE_FMT_S64P"/></description1> /// </item> /// <item>NOTE: Emgucv not supported int64, mapping Cv64F to int64, /// so set Mat with int64 if <paramref name="dstFotmat"/> is <see cref="AVSampleFormat.AV_SAMPLE_FMT_S64"/> or <see cref="AVSampleFormat.AV_SAMPLE_FMT_S64P"/> /// </item> /// </list> /// </summary> /// <param name="mat"></param> /// <param name="dstFotmat">Default is auto format by <see cref="Mat.Depth"/> and <see cref="Mat.NumberOfChannels"/> use mapping table</param> /// <param name="dstSampleRate">Mat not have sample rate, set value here or later</param> /// <returns></returns> public static AudioFrame ToAudioFrame(this Mat mat, AVSampleFormat dstFotmat = AVSampleFormat.AV_SAMPLE_FMT_NONE, int dstSampleRate = 0) { AVSampleFormat srcformat; switch (mat.Depth) { case DepthType.Default: case DepthType.Cv8U: case DepthType.Cv8S: srcformat = mat.NumberOfChannels > 1 ? AVSampleFormat.AV_SAMPLE_FMT_U8 : AVSampleFormat.AV_SAMPLE_FMT_U8P; break; case DepthType.Cv16U: case DepthType.Cv16S: srcformat = mat.NumberOfChannels > 1 ? AVSampleFormat.AV_SAMPLE_FMT_S16 : AVSampleFormat.AV_SAMPLE_FMT_S16P; break; case DepthType.Cv32S: srcformat = mat.NumberOfChannels > 1 ? AVSampleFormat.AV_SAMPLE_FMT_S32 : AVSampleFormat.AV_SAMPLE_FMT_S32P; break; case DepthType.Cv32F: srcformat = mat.NumberOfChannels > 1 ? AVSampleFormat.AV_SAMPLE_FMT_FLT : AVSampleFormat.AV_SAMPLE_FMT_FLTP; break; case DepthType.Cv64F: srcformat = mat.NumberOfChannels > 1 ? AVSampleFormat.AV_SAMPLE_FMT_DBL : AVSampleFormat.AV_SAMPLE_FMT_DBLP; break; default: throw new FFmpegException(FFmpegException.NotSupportFormat); } if (dstFotmat != AVSampleFormat.AV_SAMPLE_FMT_NONE && dstFotmat != srcformat) { // converter must need set sample rate using (SampleConverter converter = new SampleConverter(dstFotmat, mat.NumberOfChannels > 1 ? mat.NumberOfChannels : mat.Height, mat.Width, Math.Min(1, dstSampleRate))) { AudioFrame frame = converter.ConvertFrame(MatToAudioFrame(mat, srcformat, Math.Min(1, dstSampleRate)), out int a, out int b); unsafe { // set real sample rate after convert ((AVFrame *)frame)->sample_rate = dstSampleRate; } } } return(MatToAudioFrame(mat, srcformat, dstSampleRate)); }
/// <summary> /// recording audio. /// <para> /// first set inputDeviceName = null, you will get inputDeviceName list in vs output, /// </para> /// <para> /// then set inputDeviceName to your real device name and run again,you will get a audio output. /// </para> /// <para> /// if you want stop record, exit console; /// </para> /// <para>ffmpeg </para> /// </summary> /// <param name="outputFile"></param> public RecordingAudio(string outputFile, string inputDeviceName = null) { // console output FFmpegHelper.SetupLogging(logWrite: _ => Console.Write(_)); // register all device FFmpegHelper.RegisterDevice(); var dshowInput = new InFormat("dshow"); // list all "dshow" device at console output, ffmpeg does not support direct reading of device names MediaDevice.PrintDeviceInfos(dshowInput, "list", MediaDevice.ListDevicesOptions); if (string.IsNullOrWhiteSpace(inputDeviceName)) { return; } // get your audio input device name from console output // NOTE: DO NOT delete "audio=" using (MediaReader reader = new MediaReader($"audio={inputDeviceName}", dshowInput)) using (MediaWriter writer = new MediaWriter(outputFile)) { var stream = reader.Where(_ => _.Codec.Type == AVMediaType.AVMEDIA_TYPE_AUDIO).First(); writer.AddStream(MediaEncode.CreateAudioEncode(writer.Format, stream.Codec.AVCodecContext.channels, stream.Codec.AVCodecContext.sample_rate)); writer.Initialize(); AudioFrame dstFrame = AudioFrame.CreateFrameByCodec(writer[0].Codec); SampleConverter converter = new SampleConverter(dstFrame); long pts = 0; foreach (var packet in reader.ReadPacket()) { foreach (var frame in stream.ReadFrame(packet)) { foreach (var dstframe in converter.Convert(frame)) { pts += dstFrame.AVFrame.nb_samples; dstFrame.Pts = pts; foreach (var dstpacket in writer[0].WriteFrame(dstFrame)) { writer.WritePacket(dstpacket); } } } } writer.FlushMuxer(); } }
private void Refill(int count) { lock (locker) { if (source.Position >= source.TotalSamples) { if (isLooping) { source.Position = 0; } else { return; } } var state = AL.GetSourceState(IDs[0]); for (int a = 0; a < count; a++) { var sampleRate = source.SampleRate; var bufferCount = Sound.Supports3D ? source.Channels : 1; var channelCount = Sound.Supports3D ? 1 : source.Channels; float[] samples; samples = source.ReadSamples(RINGBUFFER_SAMPLES); if (samples.Length == 0) { return; } short[] samplesChannel = new short[samples.Length / bufferCount]; for (int i = 0; i < bufferCount; i++) { SampleConverter.To16Bit(samples, samplesChannel, i, bufferCount); var buffer = ringbuffers[i, ringBufferIndex]; buffer.SetData(AudioFormat.Short16, channelCount, samplesChannel, sampleRate); AL.SourceQueueBuffer(IDs[i], buffer.ID); } ringBufferIndex = (ringBufferIndex + 1) % RINGBUFFER_COUNT; } var newState = AL.GetSourceState(IDs[0]); if (newState != ALSourceState.Playing && state == ALSourceState.Playing) { Play(); } } }
public DecodeAudioToMat(string inputFile) { using (MediaReader reader = new MediaReader(inputFile)) { foreach (var packet in reader.ReadPacket()) { // audio maybe have one more stream, e.g. 0 is mp3 audio, 1 is mpeg cover var audioIndex = reader.Where(_ => _.Codec.AVCodecContext.codec_type == AVMediaType.AVMEDIA_TYPE_AUDIO).First().Index; AudioFrame audioFrame = new AudioFrame(AVSampleFormat.AV_SAMPLE_FMT_S16P, 2, 1024, 44100); SampleConverter converter = new SampleConverter(audioFrame); foreach (var frame in reader[audioIndex].ReadFrame(packet)) { Mat mat = frame.ToMat(); } } } }
internal Sound(AudioDevice audioDevice, string file, SoundFlags flags) : base(audioDevice) { if (string.IsNullOrEmpty(file)) { throw new ArgumentNullException("file"); } if (!System.IO.File.Exists(file)) { throw new System.IO.FileNotFoundException("file", file); } this.File = file; this.Flags = flags; this.Supports3D = flags.HasFlag(SoundFlags.Support3D); this.IsStreamed = flags.HasFlag(SoundFlags.Streamed); this.AllowRead = flags.HasFlag(SoundFlags.AllowRead); instances = new List <WeakReference <SoundInstance> >(); if (!IsStreamed) { using (var source = SampleSourceFactory.FromFile(file)) { var bufferCount = Supports3D ? source.Channels : 1; var channelCount = Supports3D ? 1 : source.Channels; var sampleRate = source.SampleRate; var samples = source.ReadAll(); buffers = new List <AudioBuffer <short> >(); short[] samplesChannel = new short[samples.Length / bufferCount]; for (int i = 0; i < bufferCount; i++) { SampleConverter.To16Bit(samples, samplesChannel, i, bufferCount); var buffer = new AudioBuffer <short>(audioDevice, !AllowRead); buffer.SetData(AudioFormat.Short16, channelCount, samplesChannel, sampleRate); buffers.Add(buffer); } } } }
protected override void ProcessFilter(Signal sourceData, Signal destinationData) { int channels = sourceData.Channels; int length = sourceData.Length; SampleFormat dstFormat = destinationData.SampleFormat; SampleFormat srcFormat = sourceData.SampleFormat; if (dstFormat == SampleFormat.Format32BitIeeeFloat) { float dst; if (srcFormat == SampleFormat.Format16Bit) { short src; for (int c = 0; c < channels; c++) { for (int i = 0; i < length; i++) { src = (short)sourceData.GetSample(c, i); SampleConverter.Convert(src, out dst); } } } else if (srcFormat == SampleFormat.Format32Bit) { int src; for (int c = 0; c < channels; c++) { for (int i = 0; i < length; i++) { src = (int)sourceData.GetSample(c, i); SampleConverter.Convert(src, out dst); } } } } }