internal static AudioCodec TypeOf(WAVEFORMATEX format) { AudioCodec result = AudioCodec.Undefined; switch (format.wFormatTag) { case 1: switch (format.nBlockAlign / format.nChannels) { case 1: result = AudioCodec.PCM8; break; case 2: result = AudioCodec.PCM16; break; } break; case 6: result = AudioCodec.G711A; break; case 7: result = AudioCodec.G711U; break; } return(result); }
public static string AudioCodecAsString(AudioCodec codec) { string audioCodec; switch (codec) { case AudioCodec.Copy: audioCodec = "copy"; break; case AudioCodec.MP3: audioCodec = "libmp3lame"; break; case AudioCodec.Vorbis: audioCodec = "libvorbis"; break; case AudioCodec.WavPack: audioCodec = "wavpack"; break; case AudioCodec.Remove: audioCodec = "none"; break; default: audioCodec = "none"; break; } return(audioCodec); }
/// <summary> /// gets the overhead a given audio type will incurr in the matroska container /// given its length and sampling rate /// </summary> /// <param name="AudioType">type of the audio track</param> /// <param name="samplingRate">sampling rate of the audio track</param> /// <param name="length">length of the audio track</param> /// <returns>overhead this audio track will incurr</returns> private static int GetMkvAudioOverhead(AudioCodec audioType, int samplingRate, double length) { Int64 nbSamples = Convert.ToInt64((double)samplingRate * length); int headerSize = mkvAudioTrackHeaderSize; int samplesPerBlock = 0; if (audioType == AudioCodec.AacVbr || audioType == AudioCodec.AacCbr) { samplesPerBlock = AACBlockSize; } else if (audioType == AudioCodec.Mp3Cbr || audioType == AudioCodec.Mp3Vbr || audioType == AudioCodec.Dts) { samplesPerBlock = MP3BlockSize; } else if (audioType == AudioCodec.Ac3) { samplesPerBlock = AC3BlockSize; } else if (audioType == AudioCodec.OggVorbis) { samplesPerBlock = VorbisBlockSize; headerSize = mkvVorbisTrackHeaderSize; } else // unknown types.. { samplesPerBlock = AC3BlockSize; } double blockOverhead = (double)nbSamples / (double)samplesPerBlock * 22.0 / 8.0; int overhead = (int)(headerSize + 5 * length + blockOverhead); return(overhead); }
/// <summary> /// gets the avi container overhead for the given audio type and bitrate mode /// bitrate mode only needs to be taken into account for MP3 but it's there for all cases nontheless /// </summary> /// <param name="AudioType">the type of audio</param> /// <param name="bitrateMode">the bitrate mode of the given audio type</param> /// <returns>the overhead in bytes per frame</returns> private static decimal GetAviAudioOverhead(AudioCodec audioType) { if (audioType == AudioCodec.Ac3) { return(ac3Overhead); } else if (audioType == AudioCodec.Mp3Vbr) { return(vbrMP3Overhead); } else if (audioType == AudioCodec.Mp3Cbr) { return(cbrMP3Overhead); } else if (audioType == AudioCodec.AacVbr) { return(vbrMP3Overhead); } else if (audioType == AudioCodec.AacCbr) { return(cbrMP3Overhead); } else if (audioType == AudioCodec.Dts) { return(ac3Overhead); } else { return(0); } }
protected override void PushNextPacket() { if (AudioCodec == null) { return; } RTPPacket packet = IncomingRTPPacketBuffer.GetPacket(); if (packet == null) { return; } byte[] bNewAudioData = AudioCodec.DecodeToBytes(packet); if (bNewAudioData != null) { ReceiveAudioQueue.AppendData(bNewAudioData); if (ReceiveAudioQueue.Size > m_nPacketBytes * MaxAudioPacketsQueue) // someone isn't taking our packets (either directly our through IAudioSource), so let's not get too big { ReceiveAudioQueue.GetNSamples(ReceiveAudioQueue.Size - m_nPacketBytes * MaxAudioPacketsQueue); } if (RenderSink != null) { MediaSample samp = new MediaSample(bNewAudioData, AudioCodec.AudioFormat); RenderSink.PushSample(samp, this); } } }
internal bool PrepareConverter(ref WAVEFORMATEX inWavFormat, ref WAVEFORMATEX outWavFormat) { bool result = true; if (inWavFormat.nSamplesPerSec <= 0 || inWavFormat.nChannels > 2 || inWavFormat.nChannels <= 0 || outWavFormat.nChannels <= 0 || outWavFormat.nSamplesPerSec <= 0 || outWavFormat.nChannels > 2) { throw new FormatException(); } _iInFormatType = AudioFormatConverter.TypeOf(inWavFormat); _iOutFormatType = AudioFormatConverter.TypeOf(outWavFormat); if (_iInFormatType < AudioCodec.G711U || _iOutFormatType < AudioCodec.G711U) { throw new FormatException(); } if (outWavFormat.nSamplesPerSec == inWavFormat.nSamplesPerSec && _iOutFormatType == _iInFormatType && outWavFormat.nChannels == inWavFormat.nChannels) { result = false; } else { if (inWavFormat.nSamplesPerSec != outWavFormat.nSamplesPerSec) { CreateResamplingFilter(inWavFormat.nSamplesPerSec, outWavFormat.nSamplesPerSec); } _inWavFormat = inWavFormat; _outWavFormat = outWavFormat; } return(result); }
public static INetworkChatCodec GetCodec(AudioCodec codec) { switch (codec) { case AudioCodec.AcmALaw: return new AcmALawChatCodec(); case AudioCodec.ALaw: return new ALawChatCodec(); case AudioCodec.G722: return new G722ChatCodec(); case AudioCodec.Gsm610: return new Gsm610ChatCodec(); case AudioCodec.MicrosoftAdpcm: return new MicrosoftAdpcmChatCodec(); case AudioCodec.MuLaw: return new MuLawChatCodec(); case AudioCodec.NarrowBandSpeex: return new NarrowBandSpeexCodec(); case AudioCodec.WideBandSpeex: return new WideBandSpeexCodec(); case AudioCodec.UltraWideBandSpeex: return new UltraWideBandSpeexCodec(); case AudioCodec.TrueSpeech: return new TrueSpeechChatCodec(); case AudioCodec.UnCompressedPcm: return new UncompressedPcmChatCodec(); default: return null; } }
internal static byte[] Convert(short[] data, AudioCodec from, AudioCodec to) { ConvertShortByte convertShortByte = null; if (from == AudioCodec.PCM16) { switch (to) { case AudioCodec.PCM8: convertShortByte = ConvertLinear8LinearShortByte; break; case AudioCodec.PCM16: convertShortByte = ConvertLinear2LinearShortByte; break; case AudioCodec.G711U: convertShortByte = ConvertLinear2ULaw; break; case AudioCodec.G711A: convertShortByte = ConvertLinear2ALaw; break; } return(convertShortByte(data, data.Length)); } throw new FormatException(); }
internal static ESPlayer.AudioMimeType GetCodecMimeType(AudioCodec audioCodec) { switch (audioCodec) { case AudioCodec.AAC: return ESPlayer.AudioMimeType.Aac; case AudioCodec.MP2: return ESPlayer.AudioMimeType.Mp2; case AudioCodec.MP3: return ESPlayer.AudioMimeType.Mp3; case AudioCodec.VORBIS: return ESPlayer.AudioMimeType.Vorbis; case AudioCodec.PCM_S16BE: return ESPlayer.AudioMimeType.PcmS16be; case AudioCodec.PCM_S24BE: return ESPlayer.AudioMimeType.PcmS24be; case AudioCodec.EAC3: return ESPlayer.AudioMimeType.Eac3; case AudioCodec.AC3: return ESPlayer.AudioMimeType.Ac3; case AudioCodec.PCM: case AudioCodec.FLAC: case AudioCodec.AMR_NB: case AudioCodec.AMR_WB: case AudioCodec.PCM_MULAW: case AudioCodec.GSM_MS: case AudioCodec.OPUS: case AudioCodec.WMAV1: case AudioCodec.WMAV2: default: throw new ArgumentOutOfRangeException($"No mapping from Juvo audio codec {audioCodec} to ESPlayer audio codec"); } }
public static int?GetAudioNumberOfChannels(AudioCodec sourceCodec, AudioCodec targetCodec, int?sourceChannels, bool forceStereo) { bool downmixingSupported = sourceCodec != AudioCodec.Flac; if (!sourceChannels.HasValue) { if (forceStereo) { return(2); } } else { int maxChannels = GetMaxNumberOfChannels(targetCodec); if (sourceChannels > 2 && forceStereo && downmixingSupported) { return(2); } if (maxChannels > 0 && maxChannels < sourceChannels) { return(maxChannels); } if (targetCodec == AudioCodec.Aac && sourceChannels == 7) { return(6); } return(sourceChannels); } return(null); }
/// <summary> /// Initializes a new instance of the <see cref="AudioEncoderSettings"/> class with default video settings values. /// </summary> /// <param name="sampleRate">The sample rate of the stream.</param> /// <param name="channels">The number of channels in the stream.</param> /// <param name="codec">The audio encoder.</param> public AudioEncoderSettings(int sampleRate, int channels, AudioCodec codec = AudioCodec.Default) { SampleRate = sampleRate; Channels = channels; Codec = codec; CodecOptions = new Dictionary <string, string>(); }
/// <summary> /// Asynchronously streams a windows screen and audio capture to a specified IP-Address. /// </summary> /// <returns>The windows screen to ip async.</returns> /// <param name="videoDeviceName">Video device name.</param> /// <param name="audioDeviceName">Audio device name.</param> /// <param name="ip">IP-Address.</param> /// <param name="mode">Streaming mode.</param> /// <param name="frameRate">Desired frame rate.</param> /// <param name="quality">Quality of compression.</param> public async Task<bool> StreamWindowsScreenToIpAsync (string videoDeviceName, string audioDeviceName, string ip, string port, VideoCodec vcodec, AudioCodec acodec, StreamingMode mode, int frameRate, Resolution outputSize, string videoExtras, int quality = 20) { // TODO: -b for bitrate string input = string.Format ( "-f dshow -i video=\"{0}\":audio=\"{1}\" -r {2} -async 1 -vcodec {3} {4} -q {5} -s {6} -maxrate 750k -bufsize 3000k -acodec {7} -ab 128k", videoDeviceName, audioDeviceName, frameRate.ToString(), FFmpegManager.GetCodecName(vcodec), videoExtras, quality.ToString(), outputSize, FFmpegManager.GetCodecName(acodec) ); string output = string.Format ( "-f mpegts udp://{0}:{1}?pkt_size=188?buffer_size=10000000?fifo_size=100000", ip, port ); string args = input + " " + output; try { FFmpegProcess = FFmpegManager.GetFFmpegProcess(args); } catch(FileNotFoundException e) { throw new FileNotFoundException (e.Message, e); } FFmpegProcess.Start (); await Task.Run(() => FFmpegProcess.WaitForExit ()); return true; }
/// <summary> /// Format an audio codec argument for ffmpeg. /// </summary> public static string formatAudioCodecArg(AudioCodec audioCodec) { string audioCodecArg = ""; string command = "-codec:a "; switch (audioCodec) { case AudioCodec.COPY: audioCodecArg = command + "copy"; break; case AudioCodec.AAC: audioCodecArg = command + "aac"; break; case AudioCodec.MP3: audioCodecArg = command + "mp3"; break; default: audioCodecArg = ""; break; } return(audioCodecArg); }
private void ClearAudio() { if (AudioCodecId == AudioCodec.Aac) { Aac.Clear(); } AudioCodecId = AudioCodec.PassThrough; }
public static int GetMaxNumberOfChannels(AudioCodec codec) { if (codec != AudioCodec.Unknown && MAX_CHANNEL_NUMBER.ContainsKey(codec)) { return(MAX_CHANNEL_NUMBER[codec]); } return(2); }
public static string GetAudioCodec(AudioCodec codec) { switch (codec) { case AudioCodec.Mp3: return("libmp3lame"); //return "libshine"; case AudioCodec.Mp2: return("mp2"); case AudioCodec.Aac: return("aac"); case AudioCodec.Ac3: return("ac3"); case AudioCodec.Lpcm: return("pcm_s16le"); case AudioCodec.Dts: return("dca"); case AudioCodec.Wma: return("wmav2"); case AudioCodec.Flac: return("flac"); case AudioCodec.Vorbis: return("libvorbis"); case AudioCodec.Amr: return("amrnb"); case AudioCodec.Real: return("ralf"); case AudioCodec.Alac: return("alac"); case AudioCodec.Speex: return("libspeex"); case AudioCodec.EAc3: return("eac3"); case AudioCodec.DtsHd: return("dca"); case AudioCodec.WmaPro: return("wmapro"); case AudioCodec.TrueHd: return("truehd"); } return("copy"); }
private string GetAudioCodec(AudioCodec codec) { switch (codec) { case AudioCodec.MP3: default: return("MP3"); } }
public async Task <byte[]> TextToSpeechAsync(string text, AudioCodec codec, AudioFormat format, OutputLanguage language) { string baseAddress = _settingsService.GetVoiceRssApiUrl(); string query = BuildQuery(text, codec, format, language); byte[] response = await _httpClient.GetContentAsByte(baseAddress + query); return(response); }
public AudioFormat(uint channels, uint sampleRate, AudioCodec codec, uint sampleSize = 0, uint sampleType = 0) { Channels = channels; SampleRate = sampleRate; Codec = codec; SampleSize = sampleSize; SampleType = sampleType; }
public void InitializeAudio() { try { string codec = System.Configuration.ConfigurationManager.AppSettings["TargetAudioSampleRate"]; defaultCodec = (AudioCodec)Enum.Parse(typeof(AudioCodec), codec); } catch { } ConnectAudio(); }
public bool InitAudioAAC(Stream pBuffer, int length) { ClearAudio(); if (!Aac.Init(pBuffer, length)) { ClearAudio(); return(false); } AudioCodecId = AudioCodec.Aac; return(true); }
public static IMediaCodec CreateMedia(AudioCodec codec) { if (!CodecFactory.ContainsKey(codec)) { throw new ArgumentOutOfRangeException("codec", string.Format("Factory for codec {0} not found", codec)); } Interlocked.CompareExchange(ref currentPort, InitialPort, InitialPort); return(CodecFactory[codec]()); }
public static string GetCodecName(AudioCodec ac) { switch (ac) { case AudioCodec.LameMP3: return "libmp3lame"; case AudioCodec.WMA: return ""; default: return ""; } }
public static IRecorder CreateRecorder(AudioCodec codec, bool loopback, int recordDeviceNumber=0) { if (loopback) { return new WasapiLoopbackRecorder(GetCodec(codec)); } else { return new WaveInRecorder(GetCodec(codec), recordDeviceNumber); } }
public CmdConnect(byte[] payload) { int payloadLen = payload.Length; double result = 0; if (!ArrayUtil.AMF0Number(payload, posID, ref result)) return; TransactionID = result; var posConnObjEnd = ArrayUtil.FindPattern(payload, objectEnd, 20); if (posConnObjEnd < 0) return; var connObjectData = ArrayUtil.Mid(payload, 20, posConnObjEnd - 20); connObject = new AMFObject(connObjectData); AudioCodecs = new Dictionary<AudioCodec, bool>(); VideoCodecs = new Dictionary<VideoCodec, bool>(); var audioCodecs = new AudioCodec[] { AudioCodec.Raw, AudioCodec.ADPCM, AudioCodec.MP3, AudioCodec.NotUsed1, AudioCodec.NotUsed2, AudioCodec.NellyMoser8KHz, AudioCodec.NellyMoser44KHz, AudioCodec.G711A, AudioCodec.G711U, AudioCodec.NellyMoser16KHz, AudioCodec.AAC, AudioCodec.Speex, AudioCodec.All }; var videoCodecs = new VideoCodec[] { VideoCodec.Obsolete1, VideoCodec.Obsolete2, VideoCodec.FlashVideo, VideoCodec.V1ScrSharing, VideoCodec.VP6, VideoCodec.VP6Alpha, VideoCodec.HomeBrewV, VideoCodec.H264, VideoCodec.All }; foreach( var codec in audioCodecs ) AudioCodecs.Add( codec, false ); foreach (var codec in videoCodecs) VideoCodecs.Add( codec, false); }
/// <summary> /// Convert the input video using the specified options. /// /// Note: /// h.264 and .mp4 have timing/cutting issues. h.264 only cuts on the last keyframe, /// which could be several seconds before the time that you actually want to cut. /// /// When cutting an .mp4 (even with MPEG4 video and MP3 audio), the cut will take place /// ~0.5 seconds before it should. /// /// (Is this still true?) /// </summary> public static void convertVideo(string inFile, string audioStream, DateTime startTime, DateTime endTime, ImageSize size, ImageCrop crop, int bitrateVideo, int bitrateAudio, VideoCodec videoCodec, AudioCodec audioCodec, Profilex264 profile, Presetx264 preset, string outFile, DialogProgress dialogProgress) { string videoMapArg = formatVideoMapArg(); string audioMapArg = formatAudioMapArg(audioStream); string videoCodecArg = formatVideoCodecArg(videoCodec); string presetArg = formatPresetFileArg(preset); string keyframeOptionsArg = formatKeyframeOptionsArg(videoCodec); string profileArg = formatProfileFileArg(profile); string videoSizeArg = formatVideoSizeArg(inFile, size, crop, 16, 2); string videoBitrateArg = $"-b:v {bitrateVideo}k"; string audioCodecArg = formatAudioCodecArg(audioCodec); string audioBitrateArg = formatAudioBitrateArg(bitrateAudio); string timeArg = formatStartTimeAndDurationArg(startTime, endTime); string cropArg = formatCropArg(inFile, size, crop); string threadsArg = "-threads 0"; string ffmpegConvertArgs = ""; // Good ffmpeg resource: http://howto-pages.org/ffmpeg/ // 0:0 is assumed to be the video stream // Audio stream: 0:n where n is the number of the audio stream (usually 1) // // Example format: // -y -i "G:\Temp\input.mkv" -ac 2 -map 0:v:0 -map 0:a:0 -codec:v libx264 -preset superfast -g 6 -keyint_min 6 // -fpre "E:\subs2srs\subs2srs\bin\Release\Utils\ffmpeg\presets\libx264-ipod640.ffpreset" // -b:v 800k -codec:a aac -b:a 128k -ss 00:03:32.420 -t 00:02:03.650 -vf "scale 352:202, crop=352:202:0:0" -threads 0 // "C:\Documents and Settings\cb4960\Local Settings\Temp\~subs2srs_temp.mp4" ffmpegConvertArgs = $"-y -i \"{inFile}\" -ac 2 {videoMapArg} {audioMapArg} {videoCodecArg} {presetArg} {keyframeOptionsArg} {profileArg} {videoBitrateArg} {audioCodecArg} {audioBitrateArg}" + $" {timeArg} -vf \"{videoSizeArg}, {cropArg}\" {threadsArg} \"{outFile}\" "; // {14} if (dialogProgress == null) { UtilsCommon.startFFmpeg(ffmpegConvertArgs, true, true); } else { UtilsCommon.startFFmpegProgress(ffmpegConvertArgs, dialogProgress); } }
static void TestFFMPEG2() { string outputPath = Path.GetFullPath("output.avi"); // First, we create a new VideoFileWriter: var videoWriter = new VideoFileWriter() { // Our video will have the following characteristics: Width = 800, Height = 600, FrameRate = 24, BitRate = 1200 * 1000, VideoCodec = VideoCodec.Mpeg4, //PixelFormat = Accord.Video.FFMPEG.PixelFormat.FormatYUV420P }; // We can open for it writing: videoWriter.Open(outputPath); // At this point, we can check the console of our application for useful // information regarding our media streams created by FFMPEG. We can also // check those properties using the class itself, specially for properties // that we didn't set beforehand but that have been filled by FFMPEG: int width = videoWriter.Width; int height = videoWriter.Height; int frameRate = videoWriter.FrameRate.Numerator; int bitRate = videoWriter.BitRate; VideoCodec videoCodec = videoWriter.VideoCodec; // We haven't set those properties, but FFMPEG has filled them for us: AudioCodec audioCodec = videoWriter.AudioCodec; int audioSampleRate = videoWriter.SampleRate; AudioLayout audioChannels = videoWriter.AudioLayout; int numberOfChannels = videoWriter.NumberOfChannels; // Now, let's say we would like to save dummy images of changing color var m2i = new MatrixToImage(); Bitmap frame; for (byte i = 0; i < 255; i++) { // Create bitmap matrix from a matrix of RGB values: byte[,] matrix = Matrix.Create(height, width, i); m2i.Convert(matrix, out frame); // Write the frame to the stream. We can optionally specify // the duration that this frame should remain in the stream: videoWriter.WriteVideoFrame(frame, TimeSpan.FromSeconds(i)); } }
/// <summary> /// gets the overhead a given audio type will incurr in the matroska container /// given its length and sampling rate /// </summary> /// <param name="AudioType">type of the audio track</param> /// <param name="samplingRate">sampling rate of the audio track</param> /// <param name="length">length of the audio track</param> /// <returns>overhead this audio track will incurr</returns> private static int GetMkvAudioOverhead(AudioCodec audioType, int samplingRate, double length) { Int64 nbSamples = Convert.ToInt64((double)samplingRate * length); int headerSize = mkvAudioTrackHeaderSize; int samplesPerBlock = 0; if (audioType == AudioCodec.AacVbr || audioType == AudioCodec.AacCbr) samplesPerBlock = AACBlockSize; else if (audioType == AudioCodec.Mp3Cbr || audioType == AudioCodec.Mp3Vbr || audioType == AudioCodec.Dts) samplesPerBlock = MP3BlockSize; else if (audioType == AudioCodec.Ac3) samplesPerBlock = AC3BlockSize; else if (audioType == AudioCodec.OggVorbis) { samplesPerBlock = VorbisBlockSize; headerSize = mkvVorbisTrackHeaderSize; } else // unknown types.. { samplesPerBlock = AC3BlockSize; } double blockOverhead = (double)nbSamples / (double)samplesPerBlock * 22.0 / 8.0; int overhead = (int)(headerSize + 5 * length + blockOverhead); return overhead; }
/// <summary> /// gets the overhead a given audio type will incurr in the m2ts container /// given its length and sampling rate /// </summary> /// <param name="AudioType">type of the audio track</param> /// <param name="samplingRate">sampling rate of the audio track</param> /// <param name="length">length of the audio track</param> /// <returns>overhead this audio track will incurr</returns> private static int GetM2tsAudioOverhead(AudioCodec audioType, int samplingRate, double length) { // TODO: ?? return 0; }
/// <summary> /// gets the avi container overhead for the given audio type and bitrate mode /// bitrate mode only needs to be taken into account for MP3 but it's there for all cases nontheless /// </summary> /// <param name="AudioType">the type of audio</param> /// <param name="bitrateMode">the bitrate mode of the given audio type</param> /// <returns>the overhead in bytes per frame</returns> private static decimal GetAviAudioOverhead(AudioCodec audioType) { if (audioType == AudioCodec.Ac3) return ac3Overhead; else if (audioType == AudioCodec.Mp3Vbr) return vbrMP3Overhead; else if (audioType == AudioCodec.Mp3Cbr) return cbrMP3Overhead; else if (audioType == AudioCodec.AacVbr) return vbrMP3Overhead; else if (audioType == AudioCodec.AacCbr) return cbrMP3Overhead; else if (audioType == AudioCodec.Dts) return ac3Overhead; else return 0; }
/// <summary> /// Parse all the values that are shared between shows and movies /// </summary> /// <param name="input">Input string</param> /// <param name="dir">Switch whenever it is a directory name.</param> private void ParseShared(String input, Boolean dir = false) { String inputCl = helperDictionary.CleanFileName(input); Int32 TmpStart; String TmpString; #region videoQuality if (videoQuality == VideoQuality.Unknown) { TmpString = Check(inputCl, helperDictionary.VideoQualityStrings, out TmpStart); videoQuality = helperDictionary.StrToVideoQuality(TmpString); if (TmpString.Length > 0) Index.Add(new StringLocation(TmpString, TmpStart, TmpString.Length, true, dir)); } #endregion #region videoSource if (videoSource == VideoSource.Unknown) { TmpString = Check(inputCl, helperDictionary.VideoSourceStrings, out TmpStart); videoSource = helperDictionary.StrToVideoSource(TmpString); if (TmpString.Length > 0) Index.Add(new StringLocation(TmpString, TmpStart, TmpString.Length, true, dir)); } #endregion #region container if (container == Container.Unknown & !dir) { //TmpString = Check(fileExt, helperDictionary.ContainerStrings, out TmpStart); container = helperDictionary.StrToContainer(fileExt); //if (TmpString.Length > 0) //Index.Add(new StringLocation(TmpString, TmpStart, TmpString.Length, true, dir)); } #endregion #region videoCodec if (videoCodec == VideoCodec.Unknown) { TmpString = Check(inputCl, helperDictionary.VideoCodecStrings, out TmpStart); videoCodec = helperDictionary.StrToVideoCodec(TmpString); if (TmpString.Length > 0) Index.Add(new StringLocation(TmpString, TmpStart, TmpString.Length, true, dir)); } #endregion #region audioCodec if (audioCodec == AudioCodec.Unknown) { TmpString = Check(inputCl, helperDictionary.AudioCodecStrings, out TmpStart); audioCodec = helperDictionary.StrToAudioCodec(TmpString); if (TmpString.Length > 0) Index.Add(new StringLocation(TmpString, TmpStart, TmpString.Length, true, dir)); } #endregion #region sample //Check if our file is a sample if (!sample) { TmpStart = inputCl.IndexOf("sample"); if (TmpStart > -1 & (fileSize < 1024 * 1024 * 1024)) { sample = true; Index.Add(new StringLocation("sample", TmpStart, 6, true, dir)); } } #endregion }
public static INetworkPlayer CreatePlayer(AudioCodec codec, PlayerType type) { switch (type) { case PlayerType.DirectSound: return new DirectSoundPlayer(GetCodec(codec)); case PlayerType.WaveOut: return new WaveOutPlayer(GetCodec(codec)); default: return null; } }
public bool InitAudioAAC(Stream pBuffer, int length) { ClearAudio(); if (!Aac.Init(pBuffer, length)) { ClearAudio(); return false; } AudioCodecId = AudioCodec.Aac; return true; }