public Task Transcode(CancellationToken ct, FileInfo sourceFile, AudioFormat format, DirectoryInfo targetDirectory) { if (sourceFile == null) throw new ArgumentNullException(nameof(sourceFile)); if (targetDirectory == null) throw new ArgumentNullException(nameof(targetDirectory)); if (format == null) throw new ArgumentNullException(nameof(format)); return TranscodeInternal(ct, sourceFile, format, targetDirectory); }
public void Equals() { var one = new AudioFormat (WaveFormatEncoding.LPCM, 2, 16, 48000); var two = new AudioFormat (WaveFormatEncoding.LPCM, 2, 16, 48000); Assert.IsTrue (one.Equals (two)); }
/// <summary> /// Creates an AudioStream /// </summary> /// <param name="sampleFrequency">Frequency</param> /// <param name="format">format of stream data</param> /// <param name="channels">Mono or Stereo</param> /// <param name="samples">number of samples</param> /// <param name="callback">Method callback to get more data</param> /// <param name="data">data object</param> public AudioStream(int sampleFrequency, AudioFormat format, SoundChannel channels, short samples, AudioCallback callback, object data) { this.samples = samples; this.queue = new Queue<short[]>(5); this.sampleFrequency = sampleFrequency; // To keep compiler happy, we must 'initialize' these values spec.padding = 0; spec.size = 0; spec.silence = 0; spec.freq = sampleFrequency; spec.format = (short)format; spec.channels = (byte)channels; if (callback != null) { spec.callback = Marshal.GetFunctionPointerForDelegate(callback); } spec.samples = samples; spec.userdata = data; if (((ushort)spec.format & 0x8000) != 0x8000) // signed { this.offset = 2 << ((byte)spec.format - 2); //this.offset = 0; } //else //{ // this.offset = 2 << ((byte)spec.format - 2); //} }
public AudioCodec(AudioFormat format, int bitrate, short frameSize, byte complexity) { if (format == null) throw new ArgumentNullException ("format"); this.settings = new AudioCodecArgs (format, bitrate, frameSize, complexity); }
/// <summary> /// Opens a device for input. /// </summary> /// <param name="deviceName">Name of input device to open.</param> /// <param name="recordFormat">Audio recording format.</param> /// <param name="internalBufferSize">Size of internal sample buffer in bytes.</param> public OpenALInput(string deviceName, AudioFormat recordFormat, int internalBufferSize) { if (deviceName == null) throw new ArgumentNullException("deviceName"); if (recordFormat == null) throw new ArgumentNullException("recordFormat"); if (recordFormat.BitDepth != 8 && recordFormat.BitDepth != 16) throw new ArgumentOutOfRangeException("recordFormat", "Only 8 or 16 bitdepths are supported."); if (recordFormat.Channels != 1 && recordFormat.Channels != 2) throw new ArgumentOutOfRangeException("recordFormat", "Only 1 or 2 channels are supported."); Name = deviceName; Format = new AudioFormat { BitDepth = recordFormat.BitDepth, Channels = recordFormat.Channels, SampleRate = recordFormat.SampleRate }; var format = ALAudioFormat.Unknown; if (recordFormat.BitDepth == 8 && recordFormat.Channels == 1) format = ALAudioFormat.Mono8Bit; if (recordFormat.BitDepth == 8 && recordFormat.Channels == 2) format = ALAudioFormat.Stereo8Bit; if (recordFormat.BitDepth == 16 && recordFormat.Channels == 1) format = ALAudioFormat.Mono16Bit; if (recordFormat.BitDepth == 16 && recordFormat.Channels == 2) format = ALAudioFormat.Stereo16Bit; _sampleSize = FormatHelper.SampleSize(recordFormat.BitDepth, recordFormat.Channels); _device = API.alcCaptureOpenDevice(deviceName, (uint)recordFormat.SampleRate, format, internalBufferSize); API.alcCaptureStart(_device); }
/// <summary> /// Converts the file /// </summary> /// <param name="fileName">The path to the file which should become converted</param> /// <param name="newFileName">The name of the new file WITHOUT extension</param> /// <param name="bitrate">The audio bitrate</param> /// <param name="format"></param> public static async Task ConvertFile(string fileName, string newFileName, AudioBitrate bitrate, AudioFormat format) { var fileToConvert = new FileInfo(fileName); var p = new Process { StartInfo = { CreateNoWindow = true, FileName = HurricaneSettings.Paths.FFmpegPath, Arguments = GetParameter(fileName, newFileName, bitrate, format), UseShellExecute = false } }; p.Start(); await Task.Run(() => p.WaitForExit()); var newFile = new FileInfo(newFileName); if (!newFile.Exists || newFile.Length == 0) { if (newFile.Exists) newFile.Delete(); fileToConvert.MoveTo(newFileName); //If the convert failed, we just use the "old" file } fileToConvert.Delete(); }
public void GetHashCodeTest() { var one = new AudioFormat (WaveFormatEncoding.LPCM, 2, 16, 48000); var two = new AudioFormat (WaveFormatEncoding.LPCM, 2, 16, 48000); Assert.AreEqual (one.GetHashCode(), two.GetHashCode()); }
internal Sound(string name, string file, AudioFormat format = AudioFormat.Wav) { if (format != AudioFormat.Wav) throw new NotImplementedException("Support for formats other than WAVE has not yet been implemented."); _name = name; _file = file; // Generate a buffer _buffer = AL.GenBuffer(); // Generate our sources _sources = AL.GenSources(SourceCount); // Create the states array _states = new ALSourceState[SourceCount]; // Now read in our wave file _data = LoadWave(_file, out _channels, out _bitsPerSample, out _sampleRate); // Set up the buffer with our wave data AL.BufferData(_buffer, GetSoundFormat(_channels, _bitsPerSample), _data, _data.Length, _sampleRate); // Set up each individual source to use our buffer // We need multiple sources if we want to play multiple instances // of our wave at the same time. // We also get the initial source states here. for (var i = 0; i < SourceCount; i++) { AL.Source(_sources[i], ALSourcei.Buffer, _buffer); _states[i] = GetSourceState(_sources[i]); } }
public static byte[] TtsAudioOutput(string lang, string voiceName, AudioFormat format, string text, float prosodyRate = 1.0f) { byte[] output = null; AccessTokenInfo token = auth.GetAccessToken(); string accessToken = token.access_token; string uri = "https://speech.platform.bing.com/synthesize"; HttpWebRequest webRequest = (HttpWebRequest)WebRequest.Create(uri); string ImpressionGUID = Guid.NewGuid().ToString(); webRequest.ContentType = "application/ssml+xml"; webRequest.UserAgent = "QueuingMachine"; string formatName = (format == AudioFormat.Silk) ? "ssml-16khz-16bit-mono-silk" : "riff-16khz-16bit-mono-pcm"; webRequest.Headers.Add("X-MICROSOFT-OutputFormat", formatName); webRequest.Headers.Add("X-Search-AppId", "07D3234E49CE426DAA29772419F436CA"); webRequest.Headers.Add("X-Search-ClientID", "1ECFAE91408841A480F00935DC390960"); webRequest.Headers.Add("Authorization", "Bearer " + token.access_token); webRequest.Method = "POST"; string bodyTemplate = "<speak version=\"1.0\" xmlns=\"http://www.w3.org/2001/10/synthesis\" xmlns:mstts=\"http://www.w3.org/2001/mstts\" xmlns:emo=\"http://www.w3.org/2009/10/emotionml\" xml:lang=\"{0}\">{1}<emo:emotion><emo:category name=\"CALM\" value=\"1.0\"/><prosody rate=\"{2:F1}\">{3}</prosody></emo:emotion></voice></speak>"; string voiceTag = "<voice name=\"" + voiceName + "\">"; string deviceLanguage = lang; string encodedXml = text.Replace("&", "&").Replace("<", "<").Replace(">", ">").Replace("\"", """).Replace("'", "'"); if(prosodyRate < 0.1f) { prosodyRate = 0.1f; }else if(prosodyRate > 2.0f) { prosodyRate = 2.0f; } string body = string.Format(bodyTemplate, deviceLanguage, voiceTag, prosodyRate, encodedXml); byte[] bytes = Encoding.UTF8.GetBytes(body); webRequest.ContentLength = bytes.Length; using (Stream outputStream = webRequest.GetRequestStream()) { outputStream.Write(bytes, 0, bytes.Length); } WebResponse webResponse = webRequest.GetResponse(); using (Stream stream = webResponse.GetResponseStream()) { using (MemoryStream ms = new MemoryStream()) { int count = 0; do { byte[] buf = new byte[1024]; count = stream.Read(buf, 0, 1024); ms.Write(buf, 0, count); } while (stream.CanRead && count > 0); output = ms.ToArray(); } } return output; }
private void TryLoadData(Stream fileData) { MemoryStream stream = new MemoryStream(); fileData.CopyTo(stream); stream.Seek(0, SeekOrigin.Begin); musicStream = new MusicStreamFactory().Load(stream); format = musicStream.Channels == 2 ? AudioFormat.Stereo16 : AudioFormat.Mono16; }
public void SetDefault() { IsConverterEnabled = false; Bitrate = AudioBitrate.B256; Format = AudioFormat.Copy; AddTags = true; DownloadFolder2Serialize = string.Empty; }
/** @brief Get the audio format string representation @param[in] format The Audio format enumerator. @return the string representation of the audio format. */ public static String AudioFormatToString(AudioFormat format) { switch (format) { case AudioFormat.AUDIO_FORMAT_PCM: return "PCM"; case AudioFormat.AUDIO_FORMAT_IEEE_FLOAT: return "Float"; } return "Unknown"; }
public override int onMusicDelivery(AudioFormat audioFormat, short[] pcmData, int numFrames) { consumedTrackLength += (numFrames / (audioFormat.SampleRate / 1000.0)); if (totalTrackLength > 0) { PlaybackPosition = consumedTrackLength / totalTrackLength; } else { PlaybackPosition = 0; } return numFrames; }
protected override void OnFormatChange(AudioFormat audioFormat) { if (this.audioFormat == null) { this.audioFormat = audioFormat; } else { throw new InvalidOperationException(); } }
public DecimationDownSampler(AudioFormat inputFormat, int downsampleFactor) { _downsampleFactor = downsampleFactor; InputAudioFormat = inputFormat; OutputAudioFormat = new AudioFormat { BitDepth = InputAudioFormat.BitDepth, Channels = InputAudioFormat.Channels, SampleRate = InputAudioFormat.SampleRate / downsampleFactor }; }
public override int onMusicDelivery(AudioFormat audioFormat, short[] pcmData, int numFrames) { if (playbackStream != null) { lock (streamLock) { for (int n = 0; n < pcmData.Length; n++) { playbackWriter.Write(pcmData[n]); } } } return numFrames; }
//------------------------------------------------------------------------------------------------------------------------ public void Start() { audioFormat = new AudioFormat(8000, 16, 2); IsActive = true; waveSource = new WaveInEvent(); //wave format waveSource.WaveFormat = new WaveFormat(audioFormat.samplerate, audioFormat.bitsperchannel, audioFormat.channels); //register event cbs waveSource.DataAvailable += new EventHandler<WaveInEventArgs>(waveSource_DataAvailable); waveSource.RecordingStopped += new EventHandler<StoppedEventArgs>(waveSource_RecordingStopped); //start record from mic waveSource.StartRecording(); }
public MonoToStereoAudioSampleProvider16(IAudioSampleProvider audioSampleProvider) { if (audioSampleProvider.AudioFormat.Channels != 1) { throw new ArgumentException("audioSampleProvider expected to be Mono"); } if (audioSampleProvider.AudioFormat.BitsPerSample != 16) { throw new ArgumentException("audioSampleProvider expected to be 16 bit"); } _audioSampleProvider = audioSampleProvider; _audioFormat = new AudioFormat(_audioSampleProvider.AudioFormat.SampleRate, 2, _audioSampleProvider.AudioFormat.BitsPerSample); }
private async Task TranscodeInternal(CancellationToken ct, FileInfo sourceFile, AudioFormat format, DirectoryInfo targetDirectory) { if (!await _asyncDirectoryOperations.Exists(targetDirectory.FullName)) { await _asyncDirectoryOperations.CreateDirectory(targetDirectory.FullName); } var targetFile = Path.Combine(targetDirectory.FullName, GetTranscodedFileName(sourceFile.Name)); using (var sourceStream = await _asyncFileOperations.OpenRead(sourceFile.FullName)) using (var sourceWaveStream = await _audioStreamReader.ReadWave(ct, sourceStream, format)) using (var targetStream = await _asyncFileOperations.OpenWrite(targetFile)) { await _waveStreamTranscoder.Transcode(ct, sourceWaveStream, targetStream); } }
public VoiceActivation(AudioFormat format, int frameSize, int startVolume, int continueVolume, TimeSpan threshold) { if (format == null) throw new ArgumentNullException ("format"); if (format.Channels != 1 || format.WaveEncoding != WaveFormatEncoding.LPCM) throw new ArgumentException ("Can not perform voice activation on a non-mono or non-LPCM source."); this.format = format; this.length = (double) frameSize / this.format.SampleRate; this.startVol = startVolume; this.contVol = continueVolume; this.threshold = threshold.TotalSeconds; }
protected override void LoadData(Stream fileData) { try { video = new VideoStreamFactory().Load(fileData, "Content/" + Name); format = video.Channels == 2 ? AudioFormat.Stereo16 : AudioFormat.Mono16; } catch (Exception ex) { Logger.Error(ex); if (Debugger.IsAttached) throw new VideoNotFoundOrAccessible(Name, ex); } }
public WavData(ILogger logger, Stream data, AutoResetEvent stopEvent) { _logger = logger; _stopEvent = stopEvent; _stream = data; var buffer = new byte[40]; var readBytes = _stream.Read(buffer, 0, 12); if (readBytes != 12 || buffer[0] != 'R' || buffer[1] != 'I' || buffer[2] != 'F' || buffer[3] != 'F' || buffer[8] != 'W' || buffer[9] != 'A' || buffer[10] != 'V' || buffer[11] != 'E') throw new Exception("incorrect RIFF chunk descriptor"); int chunkSize; if (string.CompareOrdinal(ReadChunkHeader(_stream, out chunkSize), "fmt ") != 0) throw new Exception("incorrect format (fmt)"); readBytes = _stream.Read(buffer, 0, chunkSize); if (readBytes != chunkSize) throw new Exception("incorrect format (Subchunk1)"); if (GetShort(buffer, 0) != 1) // compression throw new Exception("incorrect format (not PCM)"); _channels = GetShort(buffer, 2); _sampleRate = GetInt(buffer, 4); /* int avg_bytes = GetInt(buffer, 8); ushort block_align = GetShort(buffer, 12); */ var signBits = GetShort(buffer, 14); var chunkName = ReadChunkHeader(_stream, out chunkSize); if (string.CompareOrdinal(chunkName, "fact") == 0) { _stream.Seek(chunkSize, SeekOrigin.Current); chunkName = ReadChunkHeader(_stream, out chunkSize); } if (string.CompareOrdinal(chunkName, "data") != 0) throw new Exception("incorrect format (data)"); _dataLen = chunkSize; switch (signBits) { case 8: _frameDivider = 1; _format = AudioFormat.U8; break; case 16: _frameDivider = 2; _format = AudioFormat.S16_LE; break; default: throw new Exception("bits per sample"); } }
public void GetHashCodeNotMatching() { var one = new AudioFormat (WaveFormatEncoding.Unknown, 2, 16, 48000); var two = new AudioFormat (WaveFormatEncoding.LPCM, 2, 16, 48000); Assert.AreNotEqual (one.GetHashCode(), two.GetHashCode()); one = new AudioFormat (WaveFormatEncoding.LPCM, 1, 16, 48000); Assert.AreNotEqual (one.GetHashCode(), two.GetHashCode()); one = new AudioFormat (WaveFormatEncoding.LPCM, 2, 8, 48000); Assert.AreNotEqual (one.GetHashCode(), two.GetHashCode()); one = new AudioFormat (WaveFormatEncoding.LPCM, 2, 16, 44100); Assert.AreNotEqual (one.GetHashCode(), two.GetHashCode()); }
public void DoesNotEqual() { var one = new AudioFormat (WaveFormatEncoding.Unknown, 2, 16, 48000); var two = new AudioFormat (WaveFormatEncoding.LPCM, 2, 16, 48000); Assert.IsFalse (one.Equals (two)); one = new AudioFormat (WaveFormatEncoding.LPCM, 1, 16, 48000); Assert.IsFalse (one.Equals (two)); one = new AudioFormat (WaveFormatEncoding.LPCM, 2, 8, 48000); Assert.IsFalse (one.Equals (two)); one = new AudioFormat (WaveFormatEncoding.LPCM, 2, 16, 44100); Assert.IsFalse (one.Equals (two)); }
public async Task Transcode(CancellationToken ct, FileInfo sourceFile, AudioFormat format, DirectoryInfo targetDirectory) { var sourceFileName = sourceFile.FullName; var targetFileName = GetTranscodedFileName(sourceFile.FullName); _log.Info("Transcoding file {0} to {1}", sourceFileName, targetFileName); try { await _fileTranscoder.Transcode(ct, sourceFile, format, targetDirectory); _log.Info("Transcoding complete for file {0} {1}", sourceFileName, targetFileName); } catch (Exception ex) { _log.Error(ex, "Error while transcoding file {0} to {1}", sourceFileName, targetFileName); throw; } }
public static string GetAudioLibraryFromFormat(AudioFormat format) { switch (format) { case AudioFormat.Copy: return "copy"; case AudioFormat.MP3: return "libmp3lame"; //works case AudioFormat.AAC: return "libfdk_aac"; case AudioFormat.WMA: return "wmav2"; default: throw new ArgumentOutOfRangeException(); } }
public static byte[] GetWavFileHeader(long audioLength, AudioFormat audioFormat) { // This code could use some constants... MemoryStream stream = new MemoryStream(44); // "RIFF" stream.Write(new byte[] { 0x52, 0x49, 0x46, 0x46 }, 0, 4); // Data length + 44 byte header length - 8 bytes occupied by first 2 fields stream.Write(BitConverter.GetBytes((UInt32)(audioLength + 44 - 8)), 0, 4); // "WAVE" stream.Write(new byte[] { 0x57, 0x41, 0x56, 0x45 }, 0, 4); // "fmt " stream.Write(new byte[] { 0x66, 0x6D, 0x74, 0x20 }, 0, 4); // Magic # of PCM file - not sure about that one stream.Write(BitConverter.GetBytes((UInt32)16), 0, 4); // 1 == Uncompressed stream.Write(BitConverter.GetBytes((UInt16)1), 0, 2); // Channel count stream.Write(BitConverter.GetBytes((UInt16)audioFormat.Channels), 0, 2); // Sample rate stream.Write(BitConverter.GetBytes((UInt32)audioFormat.SamplesPerSecond), 0, 4); // Byte rate stream.Write(BitConverter.GetBytes((UInt32)((audioFormat.SamplesPerSecond * audioFormat.Channels * audioFormat.BitsPerSample) / 8)), 0, 4); // Block alignment stream.Write(BitConverter.GetBytes((UInt16)((audioFormat.Channels * audioFormat.BitsPerSample) / 8)), 0, 2); // Bits per sample stream.Write(BitConverter.GetBytes((UInt16)audioFormat.BitsPerSample), 0, 2); // "data" stream.Write(new byte[] { 0x64, 0x61, 0x74, 0x61 }, 0, 4); // Length of the rest of the file stream.Write(BitConverter.GetBytes((UInt32)audioLength), 0, 4); return (stream.GetBuffer()); }
internal Song(string name, string file, AudioFormat format = AudioFormat.Ogg) { if (format != AudioFormat.Ogg) throw new NotImplementedException("Support for formats other than ogg is not yet implemented."); _name = name; _file = file; try { _stream = new OggStream(_file); _stream.Prepare(); } catch (InvalidDataException) { _stream.Dispose(); _stream = new OggStream(_file); _stream.Prepare(); } }
public static IAudioReader GetReader(Stream stream, AudioFormat format) { switch (format) { case AudioFormat.OGG: #if OGG return new OggAudioReader(stream, false); #else throw new Exception("OGG support disabled"); #endif case AudioFormat.WAV: #if WAV return new WavAudioReader(stream, false); #else throw new Exception("WAV support disabled"); #endif case AudioFormat.Unknown: throw new Exception("Can't get reader of unknown format"); } throw new Exception("WTF"); }
public RiffDTO SaveRiff(string name, AudioFormat audioFormat, ChannelConfiguration channelConfiguration, int sampleRate, byte[] data, string userId) { var audio = new Audio(); audio.SetAudioFormat(audioFormat); audio.SetChannelConfiguration(channelConfiguration); audio.SampleRate = sampleRate; audio.Data = data; audio = _ar.Insert(audio); var riff = new Riff(); riff.AudioID = audio.AudioID; riff.Name = name; riff.UserID = userId; riff = _rr.Insert(riff); return new RiffDTO(riff, audio); }
/// <summary>Create with video and audio stream.</summary> public AviWriter(System.IO.Stream outputAvi, string fourCC, VideoFormat videoFormat, AudioFormat audioFormat) { // RIFFファイルは、RIFFヘッダーとその後ろに続く 0個以上のリストとチャンクで構成されている。 // RIFFヘッダーは、'RIFF'のFOURCC、4バイトのデータサイズ、データを識別するFOURCC、データから構成されている。 // リストは、'LIST'のFOURCC、4バイトのデータサイズ、データを識別するFOURCC、データから構成されている。 // チャンクは、データを識別するFOURCC、4バイトのデータサイズ、データから構成されている。 // チャンクデータを識別するFOURCCは、2桁のストリーム番号とその後に続く2文字コード(dc=ビデオ,wb=音声,tx=字幕など)で構成されている。 // AVIファイルは、'AVI 'のFOURCCと、2つの必須のLISTチャンク('hdrl''movi')、オプションのインデックスチャンクから構成されるRIFFファイルである。 var riffFile = new RiffFile(outputAvi, "AVI "); // hdrlリストをとりあえずフレーム数=0で作成(あとで上書き) var hdrlList = riffFile.CreateList("hdrl"); WriteHdrlList(hdrlList, fourCC, videoFormat, audioFormat, 0, 0); hdrlList.Close(); // moviリストを作成し、AddImage/AddAudioごとにデータチャンクを追加 var idx1List = new List <Idx1Entry>(); var moviList = riffFile.CreateList("movi"); this.AddImage += (data) => { if (videoFormat == null) { throw new InvalidOperationException("no video stream."); } var idx1 = WriteMoviList(moviList, "00dc", data); idx1List.Add(idx1); }; this.AddAudio += (data) => { if (audioFormat == null) { throw new InvalidOperationException("no audio stream."); } var idx1 = WriteMoviList(moviList, "01wb", data); idx1List.Add(idx1); }; // ファイルをクローズ this.Close += () => { // moviリストを閉じる moviList.Close(); // idx1チャンクを作成 WriteIdx1Chunk(riffFile, idx1List); var videoFrames = idx1List.Where(x => x.ChunkId == "00dc").Count(); var audioFrames = idx1List.Where(x => x.ChunkId == "01wb").Count(); // hdrlListを正しいフレーム数で上書き var offset = hdrlList.Offset; riffFile.BaseStream.Seek(offset, System.IO.SeekOrigin.Begin); // hdrlリストの先頭まで戻る riffFile.BaseStream.Seek(12, System.IO.SeekOrigin.Current); // hdrlリストのヘッダ分飛ばす WriteHdrlList(riffFile, fourCC, videoFormat, audioFormat, videoFrames, audioFrames); // hdrlリストのデータを正しいフレーム数で上書き riffFile.BaseStream.Seek(0, System.IO.SeekOrigin.End); // 元の場所に戻る // ファイルをクローズ riffFile.Close(); outputAvi.Dispose(); }; }
public InterceptedEventArgs(AudioFormat format, float[] buffer) { Format = format; Buffer = buffer; }
public void DataFile(AudioFormat format, string path) { DataFile(format, path, Frequency.Zero); }
public ConvertToMp3Manager(StorageFile sourceAudio, StorageFile destinationAudio, AudioFormat AudioType = AudioFormat.MP3, AudioEncodingQuality audioEncodingQuality = AudioEncodingQuality.High) { if (sourceAudio == null || destinationAudio == null) { throw new ArgumentNullException("sourceAudio and destinationAudio cannot be null"); } switch (AudioType) { case AudioFormat.AAC: case AudioFormat.M4A: profile = MediaEncodingProfile.CreateM4a(audioEncodingQuality); break; case AudioFormat.MP3: profile = MediaEncodingProfile.CreateMp3(audioEncodingQuality); break; case AudioFormat.WMA: profile = MediaEncodingProfile.CreateWma(audioEncodingQuality); break; } this.SourceAudio = sourceAudio; this.DestinationAudio = destinationAudio; this.AudioFormat = AudioType; this.AudioQuality = audioEncodingQuality; this.TransCoder = new MediaTranscoder(); }
// Token: 0x06000167 RID: 359 RVA: 0x000049B0 File Offset: 0x00002BB0 public void Decode(OpusDecoder decoder, ReadOnlySpan <byte> opus, ref Span <byte> target, bool useFec, out AudioFormat outputFormat) { int num; int num2; int num3; int frameSize; Interop.OpusGetPacketMetrics(opus, this.AudioFormat.SampleRate, out num, out num2, out num3, out frameSize); outputFormat = ((this.AudioFormat.ChannelCount != num) ? new AudioFormat(this.AudioFormat.SampleRate, num, this.AudioFormat.VoiceApplication) : this.AudioFormat); if (decoder.AudioFormat.ChannelCount != num) { decoder.Initialize(outputFormat); } int sampleCount = Interop.OpusDecode(decoder.Decoder, opus, frameSize, target, useFec); int num4 = outputFormat.SampleCountToSampleSize(sampleCount); target = target.Slice(0, num4); }
public DtxFilter(AudioFormat audioFormat) { _audioFormat = audioFormat; _vad = new VoiceActivityDetector(audioFormat, VoiceActivityDetector.Aggressiveness.Normal); }
public async Task <Guid> SubmitWorkItem(AudioJob job) { if (job == null) { throw new Exception("audio job is null"); } //VoiceGender gender = VoiceGender.NotSet; //CultureInfo ci = CultureInfo.CurrentCulture; //EncodingFormat eformat = EncodingFormat.Pcm; //string voiceName = String.Empty; //VoicePropriety voicePropriety = job.VoicePropriety.First(); // switch (job.AudioLanguage) // { // case Language.daDK: // ci = CultureInfo.GetCultureInfo("da-DK"); // if (voicePropriety.Equals(VoicePropriety.Male)) // { // gender = VoiceGender.Male; // voiceName = "Carsten"; // } // else // { // gender = VoiceGender.Female; // if (job.VoicePropriety.Contains(VoicePropriety.Anne)) // voiceName = "Anne"; // else // voiceName = "Sara"; // } // break; // case Language.ltLT: // ci = CultureInfo.GetCultureInfo("lt-LT"); // if (voicePropriety.Equals(VoicePropriety.Male)) // { // gender = VoiceGender.Male; // if (job.VoicePropriety.Contains(VoicePropriety.Older)) // voiceName = "Vladas"; // else voiceName = "Edvardas"; // } // else // { // gender = VoiceGender.Female; // if (job.VoicePropriety.Contains(VoicePropriety.Older)) // voiceName = "Regina"; // else voiceName = "Aiste"; // } // break; // //case Language.arEG: // // ci = CultureInfo.GetCultureInfo("ar-EG"); // // if (voicePropriety.Equals(VoicePropriety.Male)) // // { // // gender = VoiceGender.Male; // // voiceName = "Sakhr Voice One"; // // } // // else // // { // // gender = VoiceGender.Female; // // voiceName = "Sakhr Voice Two"; //Three, Four, Five, Six // // } // // break; // case Language.huHU: ci = CultureInfo.GetCultureInfo("hu-HU"); // if (voicePropriety.Equals(VoicePropriety.Male)) // { // gender = VoiceGender.Male; // voiceName = "Gabor"; // } // else gender = VoiceGender.Female; // voiceName = "Eszter"; // break; // case Language.isIS: ci = CultureInfo.GetCultureInfo("is-IS"); // if (voicePropriety.Equals(VoicePropriety.Male)) // { // gender = VoiceGender.Male; // voiceName = "IVONA 2 Karl"; // } // else // { // gender = VoiceGender.Female; // voiceName = "IVONA 2 Dóra"; // } // break; // case Language.nlNL: ci = CultureInfo.GetCultureInfo("nl-NL"); // if (voicePropriety.Equals(VoicePropriety.Male)) // { // gender = VoiceGender.Male; // voiceName = "Arthur"; // } // else // { // gender = VoiceGender.Female; // voiceName = "Janneke"; // }; // break; // case Language.enUS: ci = CultureInfo.GetCultureInfo("en-US"); gender = VoiceGender.Female; voiceName = "IVONA 2 Jennifer"; break; // case Language.enGB: ci = CultureInfo.GetCultureInfo("en-GB"); gender = VoiceGender.Female; voiceName = "Kate"; break; // case Language.frFR: ci = CultureInfo.GetCultureInfo("fr-FR"); gender = VoiceGender.Female; voiceName = "ScanSoft Virginie_Full_22kHz"; break; // case Language.deDE: ci = CultureInfo.GetCultureInfo("de-DE"); gender = VoiceGender.Male; voiceName = "Stefan"; break; // case Language.esES: ci = CultureInfo.GetCultureInfo("es-ES"); gender = VoiceGender.Male; voiceName = "Jorge"; break; // case Language.esCO: ci = CultureInfo.GetCultureInfo("es-CO"); gender = VoiceGender.Female; voiceName = "Ximena"; break; // case Language.bgBG: ci = CultureInfo.GetCultureInfo("bg-BG"); gender = VoiceGender.Female; voiceName = "Gergana"; break; // case Language.itIT: ci = CultureInfo.GetCultureInfo("it-IT"); gender = VoiceGender.Female; voiceName = "Paola"; break; // case Language.nbNO: ci = CultureInfo.GetCultureInfo("nb-NO"); break; // case Language.roRO: ci = CultureInfo.GetCultureInfo("ro-RO"); gender = VoiceGender.Female; voiceName = "IVONA 2 Carmen"; break; // case Language.svSE: ci = CultureInfo.GetCultureInfo("sv-SE"); break; // case Language.plPL: ci = CultureInfo.GetCultureInfo("pl-PL"); gender = VoiceGender.Male; voiceName = "Krzysztof"; break; // case Language.ptBR: ci = CultureInfo.GetCultureInfo("pt-BR"); break; // case Language.enAU: ci = CultureInfo.GetCultureInfo("en-AU"); break; // case Language.frCA: ci = CultureInfo.GetCultureInfo("fr-CA"); break; // case Language.ptPT: ci = CultureInfo.GetCultureInfo("pt-PT"); gender = VoiceGender.Female; voiceName = "Amalia"; break; // case Language.klGL: ci = CultureInfo.GetCultureInfo("kl-GL"); gender = VoiceGender.Female; voiceName = "Martha"; break; // case Language.elGR: ci = CultureInfo.GetCultureInfo("el-GR"); gender = VoiceGender.Female; voiceName = "Maria"; break; // case Language.slSI: ci = CultureInfo.GetCultureInfo("sl-SI"); gender = VoiceGender.Male; voiceName = "Matej Govorec"; break; // case Language.jaJP: ci = CultureInfo.GetCultureInfo("ja-JP"); break; // case Language.koKR: ci = CultureInfo.GetCultureInfo("ko-KR"); break; // case Language.zhCN: ci = CultureInfo.GetCultureInfo("zh-CN"); break; // case Language.zhHK: ci = CultureInfo.GetCultureInfo("zh-HK"); break; // case Language.zhTW: ci = CultureInfo.GetCultureInfo("zh-TW"); break; // case Language.fiFI: ci = CultureInfo.GetCultureInfo("fi-FI"); break; // case Language.esMX: ci = CultureInfo.GetCultureInfo("es-MX"); break; // case Language.caES: ci = CultureInfo.GetCultureInfo("ca-ES"); break; // case Language.ruRU: ci = CultureInfo.GetCultureInfo("ru-RU"); gender = VoiceGender.Female; voiceName = "IVONA 2 Tatyana"; break; // default: ci = CultureInfo.GetCultureInfo("en-US"); gender = VoiceGender.Female; voiceName = "IVONA 2 Jennifer"; break; //} try { _context.Jobs.Add(job); _context.SaveChanges(); } catch (Exception ex) { Console.WriteLine(ex); throw ex; } var task = Task.Factory.StartNew(t => { AudioJob auJob = (AudioJob)t; try { //if (isVoiceInstalled(ci, gender, voiceName) && !"Gergana".Equals(voiceName)) //{ // string tempfile = Path.Combine(Path.GetTempPath(), Path.GetTempFileName()); // int rate = (int)Enum.Parse(typeof(AudioSpeed), Convert.ToString(auJob.SpeedOptions)); // switch (auJob.FormatOptions) // { // case AudioFormat.Mp3: // tempfile = Path.Combine(Path.GetTempPath(), Guid.NewGuid().ToString() + ".mp3"); // break; // case AudioFormat.Wav: // tempfile = Path.Combine(Path.GetTempPath(), Guid.NewGuid().ToString() + ".wav"); // break; // case AudioFormat.Wma: // tempfile = Path.Combine(Path.GetTempPath(), Guid.NewGuid().ToString() + ".wma"); // break; // case AudioFormat.Aac: // tempfile = Path.Combine(Path.GetTempPath(), Guid.NewGuid().ToString() + ".aac"); // break; // } // var speech = new SpeechSynthesizer(); // speech.Rate = rate; // speech.SelectVoice(voiceName); // if (speech.Voice.Equals(null)) // { // speech.SelectVoiceByHints(gender, VoiceAge.Adult, 1, ci); // if (speech.Voice.Equals(null)) // { // //return a message saying the voice is not installed on the system // RoboBrailleProcessor.SetJobFaulted(auJob); // return; // } // } // var safi = new SpeechAudioFormatInfo(eformat, 44100, 16, 2, 44100 * 4, 4, null); // speech.SetOutputToWaveFile(tempfile, safi); // Encoding enc = RoboBrailleProcessor.GetEncoding(auJob.FileContent); // if (enc.Equals(Encoding.ASCII)) // enc = RoboBrailleProcessor.GetEncodingByCountryCode(auJob.AudioLanguage); // speech.Speak(enc.GetString(auJob.FileContent)); // speech.SetOutputToNull(); // auJob.ResultContent = File.ReadAllBytes(tempfile); // if (File.Exists(tempfile)) // File.Delete(tempfile); //} //else //{ //send job to rabbitmq cluster byte[] result = _auSender.SendAudioJobToQueue(auJob); //get file from WEBSERVER2\Temp file system. this is where RBA16 placed the result string outputPath = Encoding.UTF8.GetString(result); //TODO uncomment this line when publishing to the SERVER //note: it may be C:\RoboBrailleWebApi\Temp instead of just temp. outputPath = outputPath.Replace(@"C:\RoboBrailleWebApi", @"\\WEBSERVER2"); //outputPath = outputPath.Replace(@"C:", @"\\WEBSERVER2"); if (File.Exists(outputPath)) { result = File.ReadAllBytes(outputPath); File.Delete(outputPath); } else { result = null; } if (result == null) { RoboBrailleProcessor.SetJobFaulted(auJob, _context); throw new Exception("Job result is null!"); } else { auJob.ResultContent = result; } } catch (Exception ex) { Trace.WriteLine(ex.Message); RoboBrailleProcessor.SetJobFaulted(auJob, _context); throw ex; } string mime = "audio/wav"; string fileExtension = ".wav"; AudioFormat fmtOptions = auJob.FormatOptions; switch (fmtOptions) { case AudioFormat.Mp3: mime = "audio/mpeg3"; fileExtension = ".mp3"; break; case AudioFormat.Wav: mime = "audio/wav"; fileExtension = ".wav"; break; case AudioFormat.Aac: mime = "audio/aac"; fileExtension = ".aac"; break; default: mime = "audio/wav"; fileExtension = ".wav"; break; } try { auJob.DownloadCounter = 0; auJob.ResultFileExtension = fileExtension; auJob.ResultMimeType = mime; auJob.FinishTime = DateTime.Now; auJob.Status = JobStatus.Done; _context.Jobs.Attach(auJob); _context.Entry(auJob).State = EntityState.Modified; _context.SaveChanges(); } catch (Exception ex) { Trace.WriteLine(ex.Message); throw ex; } }, job); return(job.Id); }
private static string GetParameter(string inputFile, string outputFile, AudioBitrate bitrate, AudioFormat format) { return ($"-i \"{inputFile}\" -c:a {GetAudioLibraryFromFormat(format)} -vn -b:a {bitrate.ToString().Remove(0, 1)}k \"{outputFile}\""); }
public Utterance(string name, AudioFormat format) { this.name = name; this.audioFormat = format; this.audioBuffer = new ByteArrayOutputStream(); }
/// <summary> /// Converts the file /// </summary> /// <param name="fileName">The path to the file which should become converted</param> /// <param name="newFileName">The name of the new file WITHOUT extension</param> /// <param name="bitrate">The audio bitrate</param> /// <param name="format"></param> public static async Task ConvertFile(string fileName, string newFileName, AudioBitrate bitrate, AudioFormat format) { var fileToConvert = new FileInfo(fileName); var p = new Process { StartInfo = { CreateNoWindow = true, FileName = AnyListenSettings.Paths.FFmpegPath, Arguments = GetParameter(fileName, newFileName,bitrate, format), UseShellExecute = false } }; p.Start(); await Task.Run(() => p.WaitForExit()); var newFile = new FileInfo(newFileName); if (!newFile.Exists || newFile.Length == 0) { if (newFile.Exists) { newFile.Delete(); } fileToConvert.MoveTo(newFileName); //If the convert failed, we just use the "old" file } fileToConvert.Delete(); }
public PuppetString(string token, AudioFormat format) { _format = format; _token = token; _inputSimulator.Mouse.MouseWheelClickSize = 20; }
public static AudioClip Load(Stream dataStream, AudioFormat audioFormat, string unityAudioClipName, bool doStream = false, bool loadInBackground = true, bool diposeDataStreamIfNotNeeded = true) { AudioClip audioClip = null; AudioFileReader reader = null; try { reader = new AudioFileReader(dataStream, audioFormat); AudioInstance audioInstance = new AudioInstance { reader = reader, samplesCount = (int)(reader.Length / (reader.WaveFormat.BitsPerSample / 8)) }; if (doStream) { audioClip = AudioClip.Create(unityAudioClipName, audioInstance.samplesCount / audioInstance.channels, audioInstance.channels, audioInstance.sampleRate, doStream, delegate(float[] target) { reader.Read(target, 0, target.Length); }, delegate(int target) { if (audioInstance.channels == 1) { reader.Seek(target * 4, SeekOrigin.Begin); } else { reader.Seek(target * 8, SeekOrigin.Begin); } }); audioInstance.audioClip = audioClip; SetAudioClipLoadType(audioInstance, AudioClipLoadType.Streaming); SetAudioClipLoadState(audioInstance, AudioDataLoadState.Loaded); } else { audioClip = AudioClip.Create(unityAudioClipName, audioInstance.samplesCount / audioInstance.channels, audioInstance.channels, audioInstance.sampleRate, doStream); audioInstance.audioClip = audioClip; if (diposeDataStreamIfNotNeeded) { audioInstance.streamToDisposeOnceDone = dataStream; } SetAudioClipLoadType(audioInstance, AudioClipLoadType.DecompressOnLoad); SetAudioClipLoadState(audioInstance, AudioDataLoadState.Loading); if (loadInBackground) { object obj = deferredLoadQueue; lock (obj) { deferredLoadQueue.Enqueue(audioInstance); } RunDeferredLoaderThread(); EnsureInstanceExists(); } else { audioInstance.dataToSet = new float[audioInstance.samplesCount]; audioInstance.reader.Read( audioInstance.dataToSet, 0, audioInstance.dataToSet.Length); audioInstance.audioClip.SetData(audioInstance.dataToSet, 0); SetAudioClipLoadState(audioInstance, AudioDataLoadState.Loaded); } } } catch (Exception ex) { // SetAudioClipLoadState(audioClip, AudioDataLoadState.Failed); MSCLoader.ModConsole.Error(string.Concat( new object[] { unityAudioClipName, " - Failed:", ex.Message })); System.Console.WriteLine( string.Concat(new object[] { "Could not load AudioClip named '", unityAudioClipName, "', exception:", ex })); } return(audioClip); }
/** * Sets the audio format used for audio data to enqueue and play on cast remote display. */ public static void SetAudioFormat(AudioFormat audioFormat, int sampleRate, int numberChannels, bool isInterleaved) { _native_GCKUnitySetAudioFormat((int)audioFormat, sampleRate, numberChannels, isInterleaved); }
public static IEnumerable <Variant> Parse(Response.Variant[] sources, Language language, AudioFormat audioFormat) { foreach (var source in sources) { var variant = new Variant { Cutback = source.Cutback, Label = source.VariantLabel, SenseSpecificInflectionPluralLabel = source.SenseSpecificInflectionPluralLabel, Text = source.Text }; if (source.Pronunciations.Any()) { variant.Pronunciations = new List <Pronunciation>(); foreach (var pronunciation in source.Pronunciations) { variant.Pronunciations.Add(PronunciationHelper.Parse(pronunciation, language, audioFormat)); } } yield return(variant); } }
internal ExtractionInfo(AudioFormat audioFormat, VideoFormat videoFormat) { this.AudioFormat = audioFormat; this.VideoFormat = videoFormat; }
public TextToSpeechRequest(string region, string resourceName, AudioFormat outputFormat) : base(HttpMethod.Post, region, "cognitiveservices/v1", outputFormat?.ContentType ?? throw new ArgumentNullException(nameof(outputFormat))) { this.resourceName = resourceName; OutputFormat = outputFormat; }
/// <summary> /// Initializes a new instance of the RecognitionClient class. /// </summary> /// <param name="clientId">ID associated with all requests related to this client</param> /// <param name="speakerIds">Speaker IDs for identification</param> /// <param name="stepSize">Step size in seconds</param> /// <param name="windowSize">Number of seconds sent per request</param> /// <param name="audioFormat">Audio format</param> /// <param name="resultCallback">Value callback action consisted of identification result, client ID and request ID</param> /// <param name="serviceClient">Client used in identifying the streamed audio file</param> internal RecognitionClient(Guid clientId, Guid[] speakerIds, int stepSize, int windowSize, AudioFormat audioFormat, Action <RecognitionResult> resultCallback, SpeakerIdentificationServiceClient serviceClient) { this.ClientId = clientId; this.SpeakerIds = speakerIds; this.StepSize = stepSize; this.WindowSize = windowSize; this.requestID = 0; this.AudioFormat = audioFormat; this.audioFormatHandler = new AudioFormatHandler(audioFormat); this.serviceClient = serviceClient; this.audioProcessor = new AudioProcessor(this.WindowSize, this.StepSize, this.audioFormatHandler); this.idClient = new IdentificationClient(this.SpeakerIds, resultCallback); this.requestingTaskCancelletionTokenSource = new CancellationTokenSource(); this.requestingTask = Task.Run(async() => { await SendingRequestsTask(requestingTaskCancelletionTokenSource.Token).ConfigureAwait(false); }); }
public IAudioDecoder GetAudioDecoder(AudioCodecType codecTypeType, MediaStatistics mediaStatistics = null) { var audioFormat = new AudioFormat(); return(new SpeexDecoder(audioFormat)); }
private static extern void Internal_SetFormat(IntPtr thisPtr, AudioFormat format);
/// <summary> /// Initializes the echo canceller. /// </summary> /// <param name="systemLatency">The amount of latency that the operating environment adds (in milliseconds). /// Determines how long a played frame is held before being submitted to the echo canceller. /// For Silverlight v4, this is typically ~150ms.</param> /// <param name="filterLength">The length of the echo cancellation filter in milliseconds (typically ~150).</param> /// <param name="recordedAudioFormat">The format of the recorded audio</param> /// <param name="playedAudioFormat">The format of the played audio</param> /// <param name="playedResampler">An instance of an IAudioFilter<short> which can be used to resample or synchronize played frames.</param> /// <param name="recordedResampler">An instance of an IAudioFilter<short> which can be used to resample or synchronize played frames.</param> protected EchoCancelFilter(int systemLatency, int filterLength, AudioFormat recordedAudioFormat, AudioFormat playedAudioFormat, IAudioFilter playedResampler = null, IAudioFilter recordedResampler = null) { _recordedAudioFormat = recordedAudioFormat; _playedAudioFormat = playedAudioFormat; // We need to resample the audio we play (typically 16Khz) so that it matches the audio we // get from the AudioSinkAdapter (sometimes 16Khz, but often 8Khz); otherwise, the echo cancellation // wouldn't work. if (playedResampler == null) { playedResampler = new ResampleFilter(playedAudioFormat, recordedAudioFormat); playedResampler.InstanceName = "EchoCanceller_PlayedResampler"; } // We don't typically need to resample the audio we get from the AudioSinkAdapter, but // this is here for historical reasons, as we have at times in the past tried to experiment with // synchronizing the played and the recorded streams, to account for differences in clock speed. // In general, that didn't seem to work, but I like the architectural ability to specify // a resampler here, so I've kept it in the pipeline. if (recordedResampler == null) { recordedResampler = new NullAudioFilter(recordedAudioFormat.SamplesPerFrame * sizeof(short)); recordedResampler.InstanceName = "EchoCanceller_RecordedResampler"; } _logger = new EchoCancelFilterLogger(); SystemLatency = systemLatency; FilterLength = filterLength * (recordedAudioFormat.SamplesPerSecond / 1000); SamplesPerFrame = recordedAudioFormat.SamplesPerFrame; SamplesPerSecond = recordedAudioFormat.SamplesPerSecond; _recorded = new short[SamplesPerFrame]; // Configure the latency queue. QueueSize = Math.Max(systemLatency / recordedAudioFormat.MillisecondsPerFrame, 1); _maxQueueSize = QueueSize + 1; _playedQueue = new Queue <short[]>(); _playedResampler = playedResampler; _recordedResampler = recordedResampler; }
public static async Task <bool> ConvertAudioAsync(StorageFile sourceAudio, StorageFile destinationAudio, AudioFormat AudioType = AudioFormat.MP3, AudioEncodingQuality audioEncodingQuality = AudioEncodingQuality.High) { ConvertToMp3Manager convertToMp3Manager = new ConvertToMp3Manager(sourceAudio, destinationAudio, AudioType, audioEncodingQuality); var success = await convertToMp3Manager.ConvertAudioAsync(); return(success); }
// Constructor for writing public SuperWAV(string path, WavFormat wavFormatForWritingA, UInt32 sampleRateA, UInt16 channelCountA, AudioFormat audioFormatA, UInt16 bitsPerSampleA, UInt64 initialDataLengthInTicks = 0) { openMode = OpenMode.CREATE_FOR_READ_WRITE; fs = new FileStream(path, FileMode.CreateNew, FileAccess.ReadWrite, FileShare.Read); br = new BinaryReader(fs); bw = new BinaryWriter(fs); bytesPerSample = (UInt16)(bitsPerSampleA / 8); dataLengthInTicks = initialDataLengthInTicks; wavInfo.sampleRate = sampleRateA; wavInfo.channelCount = channelCountA; wavInfo.audioFormat = audioFormatA; wavInfo.bitsPerSample = bitsPerSampleA; wavInfo.bytesPerTick = (UInt16)(bytesPerSample * channelCountA); wavInfo.dataLength = initialDataLengthInTicks * wavInfo.bytesPerTick; wavInfo.byteRate = wavInfo.sampleRate * wavInfo.bytesPerTick; wavFormat = wavFormatForWritingA; writeFileHusk(wavFormatForWritingA, ref wavInfo); }
public void Data <T>(AudioFormat format, T[] data, Frequency frequency) where T : struct { Data(format, data, 0, data != null ? data.Length : 0, frequency); }
public static Options Parse(string[] args) { var options = new Options(); for (int i = 0; i < args.Length; i++) { if (string.IsNullOrEmpty(args[i])) { continue; } if (args[i][0] == '-' || args[i][0] == '/') { switch (args[i].Split(':')[0].Substring(1).ToUpper()) { case "C" when i == 0: case "-CONVERT" when i == 0: options.Job = JobType.Convert; continue; case "B" when i == 0: case "-BATCH" when i == 0: options.Job = JobType.Batch; continue; case "M" when i == 0: case "-METADATA" when i == 0: options.Job = JobType.Metadata; continue; case "H" when i == 0: case "-HELP" when i == 0: PrintUsage(); return(null); case "-VERSION" when i == 0: Console.WriteLine($"VGAudio v{GetProgramVersion()}"); return(null); case "I" when options.Job == JobType.Batch: if (i + 1 >= args.Length) { PrintWithUsage("No argument after -i switch."); return(null); } options.InDir = args[i + 1]; i++; continue; case "I": List <int> range = null; if (i + 1 >= args.Length) { PrintWithUsage("No argument after -i switch."); return(null); } if (args[i].Length > 2 && args[i][2] == ':') { range = ParseIntRange(args[i].Substring(3)); } options.InFiles.Add(new AudioFile { Path = args[i + 1], Channels = range }); i++; continue; case "O" when options.Job == JobType.Convert: if (options.OutFiles.Count > 0) { PrintWithUsage("Can't set multiple outputs."); return(null); } if (i + 1 >= args.Length) { PrintWithUsage("No argument after -o switch."); return(null); } options.OutFiles.Add(new AudioFile { Path = args[i + 1] }); i++; continue; case "O" when options.Job == JobType.Batch: if (i + 1 >= args.Length) { PrintWithUsage("No argument after -o switch."); return(null); } options.OutDir = args[i + 1]; i++; continue; case "R": options.Recurse = true; continue; case "L": if (options.NoLoop) { PrintWithUsage("Can't set loop points while using --no-loop."); return(null); } if (i + 1 >= args.Length) { PrintWithUsage("No argument after -l switch."); return(null); } string[] loopPoints = args[i + 1].Split('-'); if (loopPoints.Length != 2) { PrintWithUsage("-l switch requires two loop points in the format <start>-<end>."); return(null); } if (!(int.TryParse(loopPoints[0], out int loopStart) && int.TryParse(loopPoints[1], out int loopEnd))) { PrintWithUsage("Error parsing loop points."); return(null); } options.Loop = true; options.LoopStart = loopStart; options.LoopEnd = loopEnd; i++; continue; case "-NO-LOOP": if (options.Loop) { PrintWithUsage("Can't set loop points while using --no-loop."); return(null); } options.NoLoop = true; continue; case "-LOOP-ALIGN": if (i + 1 >= args.Length) { PrintWithUsage("No argument after --loop-align."); return(null); } if (!int.TryParse(args[i + 1], out int align)) { PrintWithUsage("Error parsing loop alignment."); return(null); } options.LoopAlignment = align; i++; continue; case "F": if (options.OutFormat != AudioFormat.None) { PrintWithUsage("Can't set multiple formats."); return(null); } if (i + 1 >= args.Length) { PrintWithUsage("No argument after -f switch."); return(null); } AudioFormat format = GetFormat(args[i + 1]); if (format == AudioFormat.None) { PrintWithUsage("Format must be one of pcm16, pcm8, or GcAdpcm"); return(null); } options.OutFormat = format; i++; continue; case "-VERSION": if (i + 1 >= args.Length) { PrintWithUsage("No argument after --version."); return(null); } if (!int.TryParse(args[i + 1], out int version)) { PrintWithUsage("Error parsing version."); return(null); } options.Version = version; i++; continue; case "-FRAMESIZE": if (i + 1 >= args.Length) { PrintWithUsage("No argument after --FrameSize."); return(null); } if (!int.TryParse(args[i + 1], out int framesize)) { PrintWithUsage("Error parsing frame size."); return(null); } options.FrameSize = framesize; i++; continue; case "-FILTER": if (i + 1 >= args.Length) { PrintWithUsage("No argument after --filter."); return(null); } if (!int.TryParse(args[i + 1], out int filter)) { PrintWithUsage("Error parsing filter value."); return(null); } options.Filter = filter; i++; continue; case "-ADXTYPE": if (i + 1 >= args.Length) { PrintWithUsage("No argument after --AdxType."); return(null); } string type = args[i + 1]; CriAdxType adxType; switch (type.ToUpper()) { case "LINEAR": adxType = CriAdxType.Linear; break; case "FIXED": adxType = CriAdxType.Fixed; break; case "EXP": case "EXPONENTIAL": adxType = CriAdxType.Exponential; break; default: Console.WriteLine("Valid ADX types are Linear, Fixed, or Exp(onential)"); return(null); } options.AdxType = adxType; i++; continue; case "-KEYSTRING": if (i + 1 >= args.Length) { PrintWithUsage("No argument after --keystring."); return(null); } options.KeyString = args[i + 1]; i++; continue; case "-OUT-FORMAT": if (i + 1 >= args.Length) { PrintWithUsage("No argument after --out-format."); return(null); } options.OutTypeName = args[i + 1]; i++; continue; case "-KEYCODE": if (i + 1 >= args.Length) { PrintWithUsage("No argument after --keycode."); return(null); } if (!ulong.TryParse(args[i + 1], out ulong keycode)) { PrintWithUsage("Error parsing key code."); return(null); } options.KeyCode = keycode; i++; continue; case "-HCAQUALITY": if (i + 1 >= args.Length) { PrintWithUsage("No argument after --hcaquality."); return(null); } string quality = args[i + 1]; CriHcaQuality hcaQuality; switch (quality.ToUpper()) { case "HIGHEST": hcaQuality = CriHcaQuality.Highest; break; case "HIGH": hcaQuality = CriHcaQuality.High; break; case "MIDDLE": hcaQuality = CriHcaQuality.Middle; break; case "LOW": hcaQuality = CriHcaQuality.Low; break; case "LOWEST": hcaQuality = CriHcaQuality.Lowest; break; default: Console.WriteLine("Valid qualities are Highest, High, Middle, Low, or Lowest."); return(null); } options.HcaQuality = hcaQuality; i++; continue; case "-BITRATE": if (i + 1 >= args.Length) { PrintWithUsage("No argument after --bitrate."); return(null); } if (!int.TryParse(args[i + 1], out int bitrate)) { PrintWithUsage("Error parsing bitrate."); return(null); } options.Bitrate = bitrate; i++; continue; case "-LIMIT-BITRATE": options.LimitBitrate = true; continue; case "-BIG-ENDIAN": options.Endianness = Endianness.BigEndian; continue; case "-LITTLE-ENDIAN": options.Endianness = Endianness.LittleEndian; continue; case "-OPUSHEADER": if (i + 1 >= args.Length) { PrintWithUsage("No argument after --OpusHeader"); return(null); } string headerType = args[i + 1]; NxOpusHeaderType nxHeaderType; switch (headerType.ToUpper()) { case "STANDARD": nxHeaderType = NxOpusHeaderType.Standard; break; case "NAMCO": nxHeaderType = NxOpusHeaderType.Namco; break; default: Console.WriteLine("Invalid header type"); return(null); } options.NxOpusHeaderType = nxHeaderType; i++; continue; } } if (options.InFiles.Count == 0) { options.InFiles.Add(new AudioFile { Path = args[i] }); continue; } if (options.OutFiles.Count == 0) { options.OutFiles.Add(new AudioFile { Path = args[i] }); continue; } PrintWithUsage($"Unknown parameter: {args[i]}"); return(null); } if (!ValidateFileNameAndType(options)) { return(null); } return(options); }
public void DataFile(AudioFormat format, string path, Frequency frequency) { Data(format, File.ReadAllBytes(path), frequency); }
public override void StartRecording(Container container, VideoFormat videoFormat, AudioFormat audioFormat, RecordingCallback recordingCallback) { base.StartRecording( container, videoFormat, audioFormat, path => recordingCallback(path.Replace('/', '\\')) ); }
public void StreamConvert() { var pepsi = Engine.AssetLoader.Get <AudioAsset>("Sounds/pepsi.wav"); var format = new AudioFormat(32, true, 2, 48000); var copy = new byte[pepsi.SoundData.Length * 4]; CopyToByteBuffer(pepsi, copy); AudioUtil.ConvertFormat(pepsi.Format, format, ref copy); var testTasks = new List <Task>(); for (var io = 0; io < 5; io++) { testTasks.Add(Task.Run(() => { var streamer = new AudioConverter(pepsi.Format, pepsi.SoundData); var segmentConvert = new List <byte>(); int framesGet = new Random().Next(1, 500); Engine.Log.Info($"StreamConvert has chosen {framesGet} for its poll size.", TestRunnerLogger.TestRunnerSrc); var minutesTimeout = 2; DateTime start = DateTime.Now; var playHead = 0; while (DateTime.Now.Subtract(start).TotalMinutes < minutesTimeout) // timeout { var spanData = new Span <byte>(new byte[framesGet * format.FrameSize]); int samplesAmount = streamer.GetSamplesAtByte(format, playHead, framesGet, spanData); if (samplesAmount == 0) { break; } playHead += samplesAmount; Assert.True(spanData.Length >= samplesAmount * format.SampleSize); segmentConvert.AddRange(spanData.Slice(0, samplesAmount * format.SampleSize).ToArray()); } if (DateTime.Now.Subtract(start).TotalMinutes >= minutesTimeout) { Engine.Log.Info("StreamConvert timeout.", TestRunnerLogger.TestRunnerSrc); } Assert.Equal(segmentConvert.Count, copy.Length); // V No longer true due to floating point precision. //for (var i = 0; i < copy.Length; i++) //{ // Assert.Equal(copy[i], segmentConvert[i]); //} })); } Task.WaitAll(testTasks.ToArray()); testTasks.Clear(); var money = Engine.AssetLoader.Get <AudioAsset>("Sounds/money.wav"); copy = new byte[money.SoundData.Length * 4]; CopyToByteBuffer(money, copy); AudioUtil.ConvertFormat(money.Format, format, ref copy); for (var io = 0; io < 5; io++) { testTasks.Add(Task.Run(() => { var streamer = new AudioConverter(money.Format, money.SoundData); var segmentConvert = new List <byte>(); int framesGet = new Random().Next(1, 500); Engine.Log.Info($"StreamConvert (Mono) has chosen {framesGet} for its poll size.", TestRunnerLogger.TestRunnerSrc); DateTime start = DateTime.Now; int playHead = 0; while (DateTime.Now.Subtract(start).TotalMinutes < 1f) // timeout { var data = new byte[framesGet * format.FrameSize]; var spanData = new Span <byte>(data); int sampleAmount = streamer.GetSamplesAtByte(format, playHead, framesGet, spanData); if (sampleAmount == 0) { break; } playHead += sampleAmount; Assert.True(data.Length >= sampleAmount * format.SampleSize); segmentConvert.AddRange(spanData.Slice(0, sampleAmount * format.SampleSize).ToArray()); } Assert.Equal(segmentConvert.Count, copy.Length); // V No longer true due to floating point precision. //for (var i = 0; i < copy.Length; i++) //{ // Assert.Equal(copy[i], segmentConvert[i]); //} })); } Task.WaitAll(testTasks.ToArray()); }
internal ExtractionInfo(AudioFormat audioFormat) { this.AudioFormat = audioFormat; }
/// <summary> /// Creates new identification-streaming recognition client /// </summary> /// <param name="clientId">ID associated with all requests related to this client</param> /// <param name="speakerIds">Speaker ids for recognition</param> /// <param name="stepSize">Frequency of sending requests to the server in seconds. /// If set to 1, the client will send a request to the server for every second received from the user</param> /// <param name="windowSize">Number of seconds sent per request</param> /// <param name="audioFormat">Audio format</param> /// <param name="resultCallBack">Value callback action consisted of identification result, client ID and request ID</param> /// <param name="serviceClient">Client used in identifying the streamed audio file</param> /// <returns>Identification-Streaming and recognition client</returns> public RecognitionClient CreateRecognitionClient(Guid clientId, Guid[] speakerIds, int stepSize, int windowSize, AudioFormat audioFormat, Action <RecognitionResult> resultCallBack, SpeakerIdentificationServiceClient serviceClient) { if (speakerIds.Length < 1) { throw new ArgumentException("Speakers count can't be smaller than 1."); } var recognitionClient = new RecognitionClient(clientId, speakerIds, stepSize, windowSize, audioFormat, resultCallBack, serviceClient); return(recognitionClient); }
public NdiAudioSink(NDI.Sender ndiSender, int maxRate, int sampleRate, int channelCount, AudioFormat format) : base(format) { Initialize(ndiSender, maxRate, sampleRate, channelCount); // 20ms audio samples. }