static AudioStreamBasicDescription GetOutputDescription(AudioStreamBasicDescription inputDescription) { var result = new AudioStreamBasicDescription() { SampleRate = inputDescription.SampleRate, FramesPerPacket = 4096, AudioFormat = AudioFormat.AppleLossless, ChannelsPerFrame = inputDescription.ChannelsPerFrame }; // Some sample rates aren't supported on output, so a best match should be made: switch (inputDescription.BitsPerChannel) { case 16: result.Flags = AudioFormatFlags.Alac16BitSourceData; break; case 20: result.Flags = AudioFormatFlags.Alac20BitSourceData; break; case 24: result.Flags = AudioFormatFlags.Alac24BitSourceData; break; case 32: result.Flags = AudioFormatFlags.Alac32BitSourceData; break; default: throw new IOException(string.Format(CultureInfo.CurrentCulture, Resources.LosslessSampleEncoderBitRateError, inputDescription.BitsPerChannel)); } return(result); }
public void Initialize(Stream stream) { Contract.Ensures(_converter != null); Contract.Ensures(_magicCookie != IntPtr.Zero); Contract.Ensures(_divisor > 0); try { var audioFile = new NativeAudioFile(AudioFileType.M4A, stream); _inputDescription = audioFile.GetProperty <AudioStreamBasicDescription>(AudioFilePropertyId.DataFormat); if (_inputDescription.AudioFormat != AudioFormat.AppleLossless) { throw new UnsupportedAudioException(Resources.LosslessSampleDecoderFormatError); } var outputDescription = InitializeOutputDescription(_inputDescription); _divisor = (float)Math.Pow(2, outputDescription.BitsPerChannel - 1); _converter = new NativeAudioConverter(ref _inputDescription, ref outputDescription, audioFile); _magicCookie = InitializeMagicCookie(audioFile, _converter); } catch (TypeInitializationException e) { if (e.InnerException != null && e.InnerException.GetType() == typeof(ExtensionInitializationException)) { throw e.InnerException; } throw; } }
internal NativeAudioFile(AudioStreamBasicDescription description, AudioFileType fileType, Stream stream) { Contract.Requires(stream != null); Contract.Requires(stream.CanRead); Contract.Requires(stream.CanWrite); Contract.Requires(stream.CanSeek); Contract.Ensures(_stream != null); Contract.Ensures(_stream == stream); Contract.Ensures(Handle != null); Contract.Ensures(!Handle.IsClosed); Contract.Ensures(_readCallback != null); Contract.Ensures(_writeCallback != null); Contract.Ensures(_getSizeCallback != null); Contract.Ensures(_setSizeCallback != null); _readCallback = ReadCallback; _writeCallback = WriteCallback; _getSizeCallback = GetSizeCallback; _setSizeCallback = SetSizeCallback; _stream = stream; NativeAudioFileHandle outHandle; AudioFileStatus status = SafeNativeMethods.AudioFileInitializeWithCallbacks(IntPtr.Zero, _readCallback, _writeCallback, _getSizeCallback, _setSizeCallback, fileType, ref description, 0, out outHandle); if (status != AudioFileStatus.Ok) { throw new IOException(string.Format(CultureInfo.CurrentCulture, Resources.NativeAudioFileInitializationError, status)); } Handle = outHandle; }
internal NativeExtendedAudioFile(AudioStreamBasicDescription description, AudioFileType fileType, Stream stream) : base(description, fileType, stream) { Contract.Requires(stream != null); Contract.Ensures(_handle != null); Contract.Ensures(!_handle.IsClosed); ExtendedAudioFileStatus status = SafeNativeMethods.ExtAudioFileWrapAudioFile(Handle, true, out _handle); if (status != ExtendedAudioFileStatus.Ok) { throw new IOException(string.Format(CultureInfo.CurrentCulture, Resources.NativeExtendedAudioFileInitializationError, status)); } }
static AudioStreamBasicDescription GetOutputDescription(AudioStreamBasicDescription inputDescription) { var result = new AudioStreamBasicDescription { FramesPerPacket = 1024, AudioFormat = AudioFormat.AacLowComplexity, ChannelsPerFrame = inputDescription.ChannelsPerFrame }; // Some sample rates aren't supported on output, so a best match should be made: switch ((int)inputDescription.SampleRate) { case 192000: case 144000: case 128000: // conversion required case 96000: case 64000: // conversion required case 48000: result.SampleRate = 48000; break; case 176400: case 88200: case 44100: case 37800: // conversion required case 36000: // conversion required result.SampleRate = 44100; break; case 32000: case 28000: // conversion required result.SampleRate = 32000; break; case 22050: case 18900: // conversion required result.SampleRate = 22050; break; } return(result); }
public void Initialize(Stream stream, AudioInfo audioInfo, MetadataDictionary metadata, SettingsDictionary settings) { Contract.Ensures(_stream != null); Contract.Ensures(_stream == stream); Contract.Ensures(_metadata != null); Contract.Ensures(_metadata == metadata); Contract.Ensures(_settings != null); Contract.Ensures(_settings == settings); Contract.Ensures(_audioFile != null); _stream = stream; _multiplier = (float)Math.Pow(2, audioInfo.BitsPerSample - 1); _metadata = metadata; _settings = settings; AudioStreamBasicDescription inputDescription = GetInputDescription(audioInfo); AudioStreamBasicDescription outputDescription = GetOutputDescription(inputDescription); try { _audioFile = new NativeExtendedAudioFile(outputDescription, AudioFileType.M4A, stream); ExtendedAudioFileStatus status = _audioFile.SetProperty(ExtendedAudioFilePropertyId.ClientDataFormat, inputDescription); if (status != ExtendedAudioFileStatus.Ok) { throw new IOException(string.Format(CultureInfo.CurrentCulture, Resources.SampleEncoderInitializationError, status)); } } catch (TypeInitializationException e) { if (e.InnerException != null && e.InnerException.GetType() == typeof(ExtensionInitializationException)) { throw e.InnerException; } throw; } }
internal NativeAudioConverter(ref AudioStreamBasicDescription inputDescription, ref AudioStreamBasicDescription outputDescription, NativeAudioFile audioFile) { Contract.Requires(audioFile != null); Contract.Ensures(_handle != null); Contract.Ensures(!_handle.IsClosed); Contract.Ensures(_inputCallback != null); Contract.Ensures(_audioFile != null); Contract.Ensures(_audioFile == audioFile); AudioConverterStatus status = SafeNativeMethods.AudioConverterNew(ref inputDescription, ref outputDescription, out _handle); if (status != AudioConverterStatus.Ok) { throw new IOException(string.Format(CultureInfo.CurrentCulture, Resources.NativeAudioConverterInitializationError, status)); } _inputCallback = InputCallback; _audioFile = audioFile; }
static AudioStreamBasicDescription InitializeOutputDescription(AudioStreamBasicDescription inputDescription) { uint bitsPerSample; switch (inputDescription.Flags) { case AudioFormatFlags.Alac16BitSourceData: bitsPerSample = 16; break; case AudioFormatFlags.Alac20BitSourceData: bitsPerSample = 20; break; case AudioFormatFlags.Alac24BitSourceData: bitsPerSample = 24; break; case AudioFormatFlags.Alac32BitSourceData: bitsPerSample = 32; break; default: throw new IOException(string.Format(CultureInfo.CurrentCulture, Resources.LosslessSampleDecoderFlagsError, inputDescription.Flags)); } return(new AudioStreamBasicDescription { AudioFormat = AudioFormat.LinearPcm, Flags = AudioFormatFlags.PcmIsSignedInteger, BytesPerPacket = sizeof(int) * inputDescription.ChannelsPerFrame, FramesPerPacket = 1, BytesPerFrame = sizeof(int) * inputDescription.ChannelsPerFrame, ChannelsPerFrame = inputDescription.ChannelsPerFrame, BitsPerChannel = bitsPerSample, SampleRate = inputDescription.SampleRate }); }
internal static extern AudioConverterStatus AudioConverterNew(ref AudioStreamBasicDescription sourceFormat, ref AudioStreamBasicDescription destinationFormat, out NativeAudioConverterHandle handle);
internal static extern AudioFileStatus AudioFileInitializeWithCallbacks(IntPtr userData, AudioFileReadCallback readCallback, AudioFileWriteCallback writeCallback, AudioFileGetSizeCallback getSizeCallback, AudioFileSetSizeCallback setSizeCallback, AudioFileType fileType, ref AudioStreamBasicDescription description, uint flags, out NativeAudioFileHandle handle);
public void Initialize(Stream stream, AudioInfo audioInfo, MetadataDictionary metadata, SettingsDictionary settings) { Contract.Ensures(_stream != null); Contract.Ensures(_stream == stream); Contract.Ensures(_metadata != null); Contract.Ensures(_metadata == metadata); Contract.Ensures(_settings != null); Contract.Ensures(_settings == settings); Contract.Ensures(_audioFile != null); _stream = stream; _metadata = metadata; _settings = settings; // Load the external gain filter: ExportFactory <ISampleFilter> sampleFilterFactory = ExtensionProvider.GetFactories <ISampleFilter>("Name", "ReplayGain").SingleOrDefault(); if (sampleFilterFactory == null) { throw new ExtensionInitializationException(Resources.AacSampleEncoderReplayGainFilterError); } _replayGainFilterLifetime = sampleFilterFactory.CreateExport(); _replayGainFilterLifetime.Value.Initialize(metadata, settings); AudioStreamBasicDescription inputDescription = GetInputDescription(audioInfo); AudioStreamBasicDescription outputDescription = GetOutputDescription(inputDescription); try { _audioFile = new NativeExtendedAudioFile(outputDescription, AudioFileType.M4A, stream); ExtendedAudioFileStatus status = _audioFile.SetProperty(ExtendedAudioFilePropertyId.ClientDataFormat, inputDescription); if (status != ExtendedAudioFileStatus.Ok) { throw new IOException(string.Format(CultureInfo.CurrentCulture, Resources.SampleEncoderInitializationError, status)); } // Configure the audio converter: ConfigureConverter(settings, audioInfo.Channels, _audioFile.GetProperty <IntPtr>(ExtendedAudioFilePropertyId.AudioConverter)); // Setting the ConverterConfig property to null resynchronizes the converter settings: ExtendedAudioFileStatus fileStatus = _audioFile.SetProperty( ExtendedAudioFilePropertyId.ConverterConfig, IntPtr.Zero); if (fileStatus != ExtendedAudioFileStatus.Ok) { throw new IOException(string.Format(CultureInfo.CurrentCulture, Resources.SampleEncoderConverterError, status)); } } catch (TypeInitializationException e) { if (e.InnerException != null && e.InnerException.GetType() == typeof(ExtensionInitializationException)) { throw e.InnerException; } throw; } }