/// <summary> /// Converts a <see cref="AudioEncoding"/> value to a <see cref="AudioSubTypes"/>-value. /// </summary> /// <param name="audioEncoding">The <see cref="AudioEncoding"/> to convert to the equivalent <see cref="AudioSubTypes"/>-value.</param> /// <returns>The <see cref="AudioSubTypes"/>-value which belongs to the specified <paramref name="audioEncoding"/>.</returns> public static Guid SubTypeFromEncoding(AudioEncoding audioEncoding) { if (Enum.IsDefined(typeof(AudioEncoding), (short)audioEncoding)) return new Guid((int)audioEncoding, 0x0000, 0x0010, 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71); throw new ArgumentException("Invalid encoding.", "audioEncoding"); }
public WaveToSampleBase(IWaveSource source, int bits, AudioEncoding encoding) { if (source == null) throw new ArgumentNullException("source"); _source = source; _waveFormat = new WaveFormat(source.WaveFormat.SampleRate, 32, source.WaveFormat.Channels, AudioEncoding.IeeeFloat); _bpsratio = 32.0 / bits; }
public SampleToWaveBase(ISampleSource source, int bits, AudioEncoding encoding) { if (source == null) throw new ArgumentNullException("source"); if (bits < 1) throw new ArgumentOutOfRangeException("bits"); _waveFormat = new WaveFormat(source.WaveFormat.SampleRate, (short)bits, source.WaveFormat.Channels, encoding); _source = source; _ratio = 32.0 / bits; }
/// <summary> /// Initializes a new instance of the <see cref="SampleToWaveBase"/> class. /// </summary> /// <param name="source">The underlying <see cref="ISampleSource"/> which has to get converted to a <see cref="IWaveSource"/>.</param> /// <param name="bits">The <see cref="CSCore.WaveFormat.BitsPerSample"/> of the Output-<see cref="WaveFormat"/>.</param> /// <param name="encoding">The <see cref="CSCore.WaveFormat.WaveFormatTag"/> of the Output-<see cref="WaveFormat"/>.</param> /// <exception cref="ArgumentNullException">The <paramref name="source"/> is null.</exception> /// <exception cref="ArgumentOutOfRangeException">Invalid number of bits per sample specified by the <paramref name="bits"/> argument.</exception> protected SampleToWaveBase(ISampleSource source, int bits, AudioEncoding encoding) { if (source == null) throw new ArgumentNullException("source"); if (bits < 1) throw new ArgumentOutOfRangeException("bits"); _waveFormat = (WaveFormat) source.WaveFormat.Clone(); _waveFormat.BitsPerSample = bits; _waveFormat.SetWaveFormatTagInternal(encoding); Source = source; _ratio = 32.0 / bits; }
public WaveFormat(int sampleRate, int bits, int channels, AudioEncoding encoding, int extraSize) { if (sampleRate < 1) throw new ArgumentOutOfRangeException("_sampleRate"); if (bits < 0) throw new ArgumentOutOfRangeException("bits"); if (channels < 1) throw new ArgumentOutOfRangeException("Channels must be > 0"); this._sampleRate = sampleRate; this.bitsPerSample = (short)bits; this._channels = (short)channels; this._encoding = encoding; this.blockAlign = (short)(channels * (bits / 8)); this._bytesPerSecond = (sampleRate * blockAlign); this.ExtraSize = (short)extraSize; }
public CommandLineBuilder AudioCodec(AudioEncoding codec) { AudioEncoding = codec; return this; }
MediaUri GetAudioMediaUri( TestAudioEncoderConfigurationOptions audioTest, string audioCodec, AudioEncoding audioEncoding, StreamType streamType, TransportProtocol protocol, IPType?multicastAddressType) { Profile[] profiles = GetProfiles(); Profile profile = null; AudioEncoderConfigurationOptions audioOptions = null; int bitrate = 0; int sampleRate = 0; RunStep(() => { foreach (Profile p in profiles) { if (p.AudioEncoderConfiguration != null && p.AudioSourceConfiguration != null) { LogStepEvent("GetAudioEncoderConfigurationOptions"); audioOptions = Client.GetAudioEncoderConfigurationOptions(p.AudioEncoderConfiguration.token, p.token); DoRequestDelay(); if (audioTest(audioOptions)) { profile = p; LogStepEvent("OK - profile found"); break; } } else { LogStepEvent("GetAudioEncoderConfigurations"); AudioEncoderConfiguration[] audioEncoderConfigurations = Client.GetAudioEncoderConfigurations(); DoRequestDelay(); bool audioEncoderConfigurationFound = false; foreach (AudioEncoderConfiguration configuration in audioEncoderConfigurations) { LogStepEvent("GetAudioEncoderConfigurationOptions"); audioOptions = Client.GetAudioEncoderConfigurationOptions(configuration.token, p.token); DoRequestDelay(); if (audioTest(audioOptions)) { if (p.AudioSourceConfiguration == null) { AudioSourceConfiguration[] audioSourceConfigurations = Client.GetAudioSourceConfigurations(); DoRequestDelay(); if (audioSourceConfigurations.Length > 0) { LogStepEvent("AddAudioSourceConfiguration"); Client.AddAudioSourceConfiguration(p.token, audioSourceConfigurations[0].token); DoRequestDelay(); } else { throw new DutPropertiesException("Audio Source Configurations not found"); } } LogStepEvent("AddAudioEncoderConfiguration"); Client.AddAudioEncoderConfiguration(p.token, configuration.token); DoRequestDelay(); p.AudioEncoderConfiguration = configuration; profile = p; LogStepEvent(string.Format("Add Audio configuration to the {0} profile - OK", profile.Name)); audioEncoderConfigurationFound = true; break; } } if (!audioEncoderConfigurationFound) { throw new DutPropertiesException("Audio Encoder Configuration with required properties not found"); } } } if (profile == null) { throw new DutPropertiesException("Respective profile cannot be found or created"); } }, string.Format("Select or create profile with {0} Audio encoder configuration", audioCodec)); // find nearest bitrate and samplerate bitrate = FindNearestAudioBitrate(profile.AudioEncoderConfiguration.Bitrate, audioEncoding, audioOptions); sampleRate = FindNearestAudioSamplerate(profile.AudioEncoderConfiguration.SampleRate, audioEncoding, audioOptions); if (multicastAddressType.HasValue) { SetMulticastSettings(profile, false, true, multicastAddressType.Value); } profile.AudioEncoderConfiguration.Encoding = audioEncoding; profile.AudioEncoderConfiguration.Bitrate = bitrate; profile.AudioEncoderConfiguration.SampleRate = sampleRate; SetAudioEncoderConfiguration(profile.AudioEncoderConfiguration, false, multicastAddressType.HasValue); StreamSetup streamSetup = new StreamSetup(); streamSetup.Transport = new Transport(); streamSetup.Transport.Protocol = protocol; streamSetup.Stream = streamType; UsedProfileToken = profile.token; MediaUri streamUri = GetStreamUri(streamSetup, profile.token); AdjustVideo(protocol, streamType, streamUri, profile.VideoEncoderConfiguration); return(streamUri); }
MediaUri GetAudioVideoMediaUri(TestVideoEncoderConfigurationOptions videoTest, string videoCodec, VideoEncoding encoding, TestAudioEncoderConfigurationOptions audioTest, string audioCodec, AudioEncoding audioEncoding, StreamType streamType, TransportProtocol protocol, IPType?multicastAddressType) { Profile[] profiles = GetProfiles(); Profile profile = null; VideoEncoderConfigurationOptions options = null; int bitrate = 0; int sampleRate = 0; RunStep(() => { foreach (Profile p in profiles) { LogStepEvent(string.Format("Check if {0} profile satisfies current needs", p.Name)); if (p.VideoEncoderConfiguration != null) { LogStepEvent("GetVideoEncoderConfigurationOptions"); VideoEncoderConfigurationOptions videoOptions = Client.GetVideoEncoderConfigurationOptions(p.VideoEncoderConfiguration.token, p.token); DoRequestDelay(); if (videoTest(videoOptions)) { // Video configuration OK - configure Audio, if needed. options = videoOptions; if (p.AudioEncoderConfiguration != null && p.AudioSourceConfiguration != null) { LogStepEvent("GetAudioEncoderConfigurationOptions"); AudioEncoderConfigurationOptions audioOptions = Client.GetAudioEncoderConfigurationOptions(p.AudioEncoderConfiguration.token, p.token); DoRequestDelay(); if (audioTest(audioOptions)) { profile = p; LogStepEvent("OK - profile found"); // find nearest bitrate and samplerate bitrate = FindNearestAudioBitrate(p.AudioEncoderConfiguration.Bitrate, audioEncoding, audioOptions); sampleRate = FindNearestAudioSamplerate(p.AudioEncoderConfiguration.SampleRate, audioEncoding, audioOptions); break; } } else { LogStepEvent("GetAudioEncoderConfigurations"); AudioEncoderConfiguration[] audioEncoderConfigurations = Client.GetAudioEncoderConfigurations(); DoRequestDelay(); bool audioEncoderConfigurationFound = false; foreach (AudioEncoderConfiguration configuration in audioEncoderConfigurations) { LogStepEvent("GetAudioEncoderConfigurationOptions"); AudioEncoderConfigurationOptions audioOptions = Client.GetAudioEncoderConfigurationOptions(configuration.token, p.token); DoRequestDelay(); if (audioTest(audioOptions)) { if (p.AudioSourceConfiguration == null) { AudioSourceConfiguration[] audioSourceConfigurations = Client.GetAudioSourceConfigurations(); DoRequestDelay(); if (audioSourceConfigurations.Length > 0) { LogStepEvent("AddAudioSourceConfiguration"); Client.AddAudioSourceConfiguration(p.token, audioSourceConfigurations[0].token); DoRequestDelay(); } else { throw new DutPropertiesException("Audio Source Configurations not found"); } } bitrate = FindNearestAudioBitrate(configuration.Bitrate, audioEncoding, audioOptions); sampleRate = FindNearestAudioSamplerate(configuration.SampleRate, audioEncoding, audioOptions); LogStepEvent("AddAudioEncoderConfiguration"); Client.AddAudioEncoderConfiguration(p.token, configuration.token); DoRequestDelay(); p.AudioEncoderConfiguration = configuration; profile = p; LogStepEvent(string.Format("Add Audio configuration to the {0} profile - OK", profile.Name)); audioEncoderConfigurationFound = true; break; } } if (!audioEncoderConfigurationFound) { throw new DutPropertiesException("Audio Encoder Configuration with required properties not found"); } } } } } if (profile == null) { throw new DutPropertiesException("Respective profile cannot be found or created"); } }, string.Format("Select or create profile with {0} Video encoder configuration and {1} Audio encoder configuration", videoCodec, audioCodec)); // profile found profile.VideoEncoderConfiguration.Encoding = encoding; // support for extensions (bitrate limits) // fix for Panasonic if (encoding == VideoEncoding.JPEG) { profile.VideoEncoderConfiguration.MPEG4 = null; profile.VideoEncoderConfiguration.H264 = null; // support for extensions (bitrate limits) if (options.Extension != null) { if (options.Extension.JPEG != null) { if (options.Extension.JPEG.BitrateRange != null) { if (profile.VideoEncoderConfiguration.RateControl.BitrateLimit < options.Extension.JPEG.BitrateRange.Min) { profile.VideoEncoderConfiguration.RateControl.BitrateLimit = options.Extension.JPEG.BitrateRange.Min; } if (profile.VideoEncoderConfiguration.RateControl.BitrateLimit > options.Extension.JPEG.BitrateRange.Max) { profile.VideoEncoderConfiguration.RateControl.BitrateLimit = options.Extension.JPEG.BitrateRange.Max; } } } } } if (encoding == VideoEncoding.MPEG4) { profile.VideoEncoderConfiguration.H264 = null; // support for extensions (bitrate limits) if (options.Extension != null) { if (options.Extension.MPEG4 != null) { if (options.Extension.MPEG4.BitrateRange != null) { if (profile.VideoEncoderConfiguration.RateControl.BitrateLimit < options.Extension.MPEG4.BitrateRange.Min) { profile.VideoEncoderConfiguration.RateControl.BitrateLimit = options.Extension.MPEG4.BitrateRange.Min; } if (profile.VideoEncoderConfiguration.RateControl.BitrateLimit > options.Extension.MPEG4.BitrateRange.Max) { profile.VideoEncoderConfiguration.RateControl.BitrateLimit = options.Extension.MPEG4.BitrateRange.Max; } } } } } if (encoding == VideoEncoding.H264) { profile.VideoEncoderConfiguration.MPEG4 = null; // support for extensions (bitrate limits) if (options.Extension != null) { if (options.Extension.H264 != null) { if (options.Extension.H264.BitrateRange != null) { if (profile.VideoEncoderConfiguration.RateControl.BitrateLimit < options.Extension.H264.BitrateRange.Min) { profile.VideoEncoderConfiguration.RateControl.BitrateLimit = options.Extension.H264.BitrateRange.Min; } if (profile.VideoEncoderConfiguration.RateControl.BitrateLimit > options.Extension.H264.BitrateRange.Max) { profile.VideoEncoderConfiguration.RateControl.BitrateLimit = options.Extension.H264.BitrateRange.Max; } } } } } if (multicastAddressType.HasValue) { SetMulticastSettings(profile, true, true, multicastAddressType.Value); } SetVideoEncoderConfiguration(profile.VideoEncoderConfiguration, false, multicastAddressType.HasValue); profile.AudioEncoderConfiguration.Encoding = audioEncoding; profile.AudioEncoderConfiguration.Bitrate = bitrate; profile.AudioEncoderConfiguration.SampleRate = sampleRate; SetAudioEncoderConfiguration(profile.AudioEncoderConfiguration, false, multicastAddressType.HasValue); StreamSetup streamSetup = new StreamSetup(); streamSetup.Transport = new Transport(); streamSetup.Transport.Protocol = protocol; streamSetup.Stream = streamType; UsedProfileToken = profile.token; MediaUri streamUri = GetStreamUri(streamSetup, profile.token); AdjustVideo(protocol, streamType, streamUri, profile.VideoEncoderConfiguration); return(streamUri); }
public VideoListItemStatusEnum IsHigherQualityAvailableInternal(BestFormatInfo serverFile, string localFile, FFmpegProcess InfoReader) { string LocalFileExt = Path.GetExtension(localFile).ToLower(); // If local file is FLV and there's another format available, it should be downloaded. if (LocalFileExt == ".flv" && serverFile.BestVideo.Container != Container.Flv) { return(VideoListItemStatusEnum.HigherQualityAvailable); } // Original VCD files and files of unrecognized extensions should not be replaced. if (!DownloadManager.DownloadedExtensions.Contains(LocalFileExt) || InfoReader?.VideoStream?.Format == "mpeg1video") { serverFile.StatusText = "Not from YouTube"; return(VideoListItemStatusEnum.OK); } // For server file size, estimate 4% extra for audio. Estimate 30% advantage for VP9 format. non-DASH WebM is VP8 and doesn't have that bonus. long ServerFileSize = (long)(serverFile.BestVideo.Size * 1.04); if (DownloadManager.GetVideoEncoding(serverFile.BestVideo) == VideoEncoding.Vp9) { ServerFileSize = (long)(ServerFileSize * 1.3); } long LocalFileSize = new FileInfo(localFile).Length; if (InfoReader?.VideoStream?.Format == "vp9") { LocalFileSize = (long)(LocalFileSize * 1.3); } // If server resolution is better, download unless local file is bigger. int LocalFileHeight = InfoReader?.VideoStream?.Height ?? 0; if (DownloadManager.GetVideoHeight(serverFile.BestVideo) > LocalFileHeight) { if (ServerFileSize > LocalFileSize) { return(VideoListItemStatusEnum.HigherQualityAvailable); } else if (ServerFileSize != 0) { return(VideoListItemStatusEnum.OK); } } else if (DownloadManager.GetVideoHeight(serverFile.BestVideo) < LocalFileHeight) { // If local resolution is higher, keep. return(VideoListItemStatusEnum.OK); } // Choose whether to download only audio, only video, or both. bool DownloadVideo = false; bool DownloadAudio = false; // Is estimated server file size is at least 15% larger than local file (for same resolution), download. if (ServerFileSize > LocalFileSize * 1.15) { DownloadVideo = true; } // If PreferredFormat is set to a format, download that format. else if (Manager != null) { if (Manager.Options.PreferredFormat == SelectStreamFormat.MP4 && InfoReader.VideoStream?.Format == "vp9" && (DownloadManager.GetVideoEncoding(serverFile.BestVideo) == VideoEncoding.H264 || DownloadManager.GetVideoEncoding(serverFile.BestVideo) == VideoEncoding.H263)) { DownloadVideo = true; } else if (Manager.Options.PreferredFormat == SelectStreamFormat.VP9 && InfoReader.VideoStream?.Format == "h264" && (DownloadManager.GetVideoEncoding(serverFile.BestVideo) == VideoEncoding.Vp9 || DownloadManager.GetVideoEncoding(serverFile.BestVideo) == VideoEncoding.Vp8)) { DownloadVideo = true; } } if (InfoReader.AudioStream == null) { DownloadAudio = true; } // Can only upgrade is video length is same. else if (Math.Abs((InfoReader.FileDuration - serverFile.Duration).TotalSeconds) < 1) { // download audio and merge with local video. string LocalAudio = InfoReader.AudioStream.Format; AudioEncoding RemoteAudio = serverFile.BestAudio?.AudioEncoding ?? AudioEncoding.Aac; if (InfoReader.AudioStream.Bitrate == 0 && LocalAudio == "opus" || LocalAudio == "vorbis") { InfoReader.AudioStream.Bitrate = 160; // FFmpeg doesn't return bitrate of Opus and Vorbis audios, but it's 160. } if (InfoReader.AudioStream.Bitrate == 0) { InfoReader.AudioStream.Bitrate = GetAudioBitrateMuxe(localFile); } int LocalAudioBitRate = InfoReader.AudioStream.Bitrate; long ServerAudioBitRate = serverFile.BestAudio != null ? serverFile.BestAudio.Bitrate / 1024 : 0; // MediaInfo returns no bitrate for MKV containers with AAC audio. InfoReader.AudioStream.Bitrate = 160; if (LocalAudioBitRate > 0 || LocalFileExt != ".mkv") { if ((LocalAudioBitRate == 0 || LocalAudioBitRate < ServerAudioBitRate * .8)) { DownloadAudio = true; } } if ((LocalAudio == "opus" && RemoteAudio == AudioEncoding.Vorbis) || (LocalAudio == "vorbis" && RemoteAudio == AudioEncoding.Opus)) { DownloadAudio = true; } } else { DownloadAudio = DownloadVideo; } if (DownloadVideo && DownloadAudio) { return(VideoListItemStatusEnum.HigherQualityAvailable); } else if (DownloadVideo) { return(VideoListItemStatusEnum.BetterVideoAvailable); } else if (DownloadAudio) { return(VideoListItemStatusEnum.BetterAudioAvailable); } return(VideoListItemStatusEnum.OK); }
public void setEncoding(AudioEncoding encoding) { this.encoding = encoding; }
/// <summary> /// Get an EncodeJob model for a LibHB Encode. /// </summary> /// <param name="task"> /// The task. /// </param> /// <param name="configuration"> /// The configuration. /// </param> /// <returns> /// An Interop.EncodeJob model. /// </returns> public static EncodeJob GetEncodeJob(EncodeTask task, HBConfiguration configuration) { // The current Job Configuration EncodeTask work = task; // Which will be converted to this EncodeJob Model. EncodeJob job = new EncodeJob(); // Audio Settings job.AudioEncodings = new List <AudioEncoding>(); foreach (AudioTrack track in work.AudioTracks) { AudioEncoding newTrack = new AudioEncoding { Bitrate = track.Bitrate, Drc = track.DRC, Gain = track.Gain, Encoder = Converters.GetCliAudioEncoder(track.Encoder), InputNumber = track.Track.HasValue ? track.Track.Value : 0, Mixdown = Converters.GetCliMixDown(track.MixDown), SampleRateRaw = GetSampleRateRaw(track.SampleRate), EncodeRateType = AudioEncodeRateType.Bitrate, Name = track.TrackName, IsPassthru = track.IsPassthru, }; job.AudioEncodings.Add(newTrack); } // Title Settings job.OutputPath = work.Destination; job.SourcePath = work.Source; job.Title = work.Title; // job.SourceType = work.Type; switch (work.PointToPointMode) { case PointToPointMode.Chapters: job.RangeType = VideoRangeType.Chapters; break; case PointToPointMode.Seconds: job.RangeType = VideoRangeType.Seconds; break; case PointToPointMode.Frames: job.RangeType = VideoRangeType.Frames; break; case PointToPointMode.Preview: job.RangeType = VideoRangeType.Preview; break; } if (work.PointToPointMode == PointToPointMode.Seconds) { job.SecondsEnd = work.EndPoint; job.SecondsStart = work.StartPoint; } if (work.PointToPointMode == PointToPointMode.Chapters) { job.ChapterStart = work.StartPoint; job.ChapterEnd = work.EndPoint; } if (work.PointToPointMode == PointToPointMode.Frames) { job.FramesEnd = work.EndPoint; job.FramesStart = work.StartPoint; } if (work.PointToPointMode == PointToPointMode.Preview) { job.StartAtPreview = work.PreviewEncodeStartAt.HasValue ? work.PreviewEncodeStartAt.Value + 1 : 1; job.SecondsEnd = work.PreviewEncodeDuration.HasValue ? work.PreviewEncodeDuration.Value : 30; job.SeekPoints = configuration.PreviewScanCount; } job.Angle = work.Angle; // Output Settings job.IPod5GSupport = work.IPod5GSupport; job.Optimize = work.OptimizeMP4; switch (work.OutputFormat) { case OutputFormat.Mp4: job.ContainerName = "av_mp4"; // TODO make part of enum. break; case OutputFormat.Mkv: job.ContainerName = "av_mkv"; // TODO make part of enum. break; } // Picture Settings job.Anamorphic = work.Anamorphic; job.Cropping = new Cropping { Top = work.Cropping.Top, Bottom = work.Cropping.Bottom, Left = work.Cropping.Left, Right = work.Cropping.Right }; job.DisplayWidth = work.DisplayWidth.HasValue ? int.Parse(Math.Round(work.DisplayWidth.Value, 0).ToString()) : 0; job.PixelAspectX = work.PixelAspectX; job.PixelAspectY = work.PixelAspectY; job.Height = work.Height.HasValue ? work.Height.Value : 0; job.KeepDisplayAspect = work.KeepDisplayAspect; job.MaxHeight = work.MaxHeight.HasValue ? work.MaxHeight.Value : 0; job.MaxWidth = work.MaxWidth.HasValue ? work.MaxWidth.Value : 0; job.Modulus = work.Modulus.HasValue ? work.Modulus.Value : 16; job.UseDisplayWidth = true; job.Width = work.Width.HasValue ? work.Width.Value : 0; // Filter Settings job.CustomDecomb = work.CustomDecomb; job.CustomDeinterlace = work.CustomDeinterlace; job.CustomDenoise = work.CustomDenoise; job.DenoisePreset = work.DenoisePreset.ToString().ToLower().Replace(" ", string.Empty); job.DenoiseTune = work.DenoiseTune.ToString().ToLower().Replace(" ", string.Empty); job.CustomDetelecine = work.CustomDetelecine; if (work.Deblock > 4) { job.Deblock = work.Deblock; } job.Decomb = work.Decomb; job.Deinterlace = work.Deinterlace; job.Denoise = work.Denoise; job.Detelecine = work.Detelecine; job.Grayscale = work.Grayscale; // Video Settings job.Framerate = work.Framerate.HasValue ? work.Framerate.Value : 0; job.ConstantFramerate = work.FramerateMode == FramerateMode.CFR; job.PeakFramerate = work.FramerateMode == FramerateMode.PFR; job.Quality = work.Quality.HasValue ? work.Quality.Value : 0; job.VideoBitrate = work.VideoBitrate.HasValue ? work.VideoBitrate.Value : 0; job.VideoEncodeRateType = work.VideoEncodeRateType; job.VideoEncoder = Converters.GetVideoEncoder(work.VideoEncoder); job.TwoPass = work.TwoPass; job.TurboFirstPass = work.TurboFirstPass; if (work.VideoEncoder == VideoEncoder.X264 || work.VideoEncoder == VideoEncoder.X265 || work.VideoEncoder == VideoEncoder.QuickSync) { job.VideoPreset = work.VideoPreset.ShortName; job.VideoProfile = work.VideoProfile.ShortName; job.VideoLevel = work.VideoLevel.ShortName; if (work.VideoEncoder != VideoEncoder.QuickSync) { job.VideoTunes = new List <string>(); foreach (var item in work.VideoTunes) { job.VideoTunes.Add(item.ShortName); } } } // Chapter Markers job.IncludeChapterMarkers = work.IncludeChapterMarkers; job.CustomChapterNames = work.ChapterNames.Select(item => item.ChapterName).ToList(); job.UseDefaultChapterNames = work.IncludeChapterMarkers; // Advanced Settings job.VideoOptions = work.ShowAdvancedTab ? work.AdvancedEncoderOptions : work.ExtraAdvancedArguments; // Subtitles job.Subtitles = new Subtitles { SourceSubtitles = new List <SourceSubtitle>(), SrtSubtitles = new List <SrtSubtitle>() }; foreach (SubtitleTrack track in work.SubtitleTracks) { if (track.IsSrtSubtitle) { job.Subtitles.SrtSubtitles.Add( new SrtSubtitle { CharacterCode = track.SrtCharCode, Default = track.Default, FileName = track.SrtPath, LanguageCode = track.SrtLang, Offset = track.SrtOffset, BurnedIn = track.Burned }); } else { if (track.SourceTrack != null) { job.Subtitles.SourceSubtitles.Add( new SourceSubtitle { BurnedIn = track.Burned, Default = track.Default, Forced = track.Forced, TrackNumber = track.SourceTrack.TrackNumber }); } } } return(job); }
public static Preset ConvertHandBrakePresetToVC(HBPreset hbPreset) { var profile = new VCProfile(); // Container profile.ContainerName = hbPreset.FileFormat; profile.PreferredExtension = VCOutputExtension.Mp4; profile.IncludeChapterMarkers = hbPreset.ChapterMarkers; profile.AlignAVStart = hbPreset.AlignAVStart; profile.Optimize = hbPreset.Mp4HttpOptimize; profile.IPod5GSupport = hbPreset.Mp4iPodCompatible; // Sizing profile.Cropping = new VCCropping(hbPreset.PictureTopCrop, hbPreset.PictureBottomCrop, hbPreset.PictureLeftCrop, hbPreset.PictureRightCrop); profile.CroppingType = hbPreset.PictureAutoCrop ? VCCroppingType.Automatic : VCCroppingType.Custom; profile.SizingMode = VCSizingMode.Automatic; profile.ScalingMode = VCScalingMode.DownscaleOnly; profile.UseAnamorphic = hbPreset.PicturePAR != "off"; profile.PixelAspectX = 1; profile.PixelAspectY = 1; profile.Width = hbPreset.PictureWidth ?? 0; profile.Height = hbPreset.PictureHeight ?? 0; profile.PaddingMode = VCPaddingMode.None; profile.Modulus = hbPreset.PictureModulus; profile.Rotation = VCPictureRotation.None; profile.FlipHorizontal = false; profile.FlipVertical = false; // Video filters profile.Grayscale = hbPreset.VideoGrayScale; profile.Detelecine = hbPreset.PictureDetelecine; profile.CustomDetelecine = hbPreset.PictureDetelecineCustom; switch (hbPreset.PictureDeinterlaceFilter) { case "yadif": profile.DeinterlaceType = VCDeinterlace.Yadif; break; case "decomb": profile.DeinterlaceType = VCDeinterlace.Decomb; break; case "off": default: profile.DeinterlaceType = VCDeinterlace.Off; break; } profile.DeinterlacePreset = hbPreset.PictureDeinterlacePreset; profile.CustomDeinterlace = hbPreset.PictureDeinterlaceCustom; profile.CombDetect = hbPreset.PictureCombDetectPreset; profile.CustomCombDetect = hbPreset.PictureCombDetectCustom; // Video encoding profile.VideoEncoder = hbPreset.VideoEncoder; profile.VideoProfile = hbPreset.VideoProfile; profile.VideoPreset = hbPreset.VideoPreset; profile.VideoLevel = hbPreset.VideoLevel; profile.VideoTunes = new List <string>(); if (!string.IsNullOrEmpty(hbPreset.VideoTune)) { profile.VideoTunes.Add(hbPreset.VideoTune); } profile.VideoOptions = hbPreset.VideoOptionExtra; profile.TwoPass = hbPreset.VideoTwoPass; profile.TurboFirstPass = hbPreset.VideoTurboTwoPass; profile.Quality = hbPreset.VideoQualitySlider; profile.VideoBitrate = hbPreset.VideoAvgBitrate ?? 0; profile.QsvDecode = false; switch (hbPreset.VideoQualityType) { case 0: profile.VideoEncodeRateType = VCVideoEncodeRateType.TargetSize; break; case 1: profile.VideoEncodeRateType = VCVideoEncodeRateType.AverageBitrate; break; case 2: default: profile.VideoEncodeRateType = VCVideoEncodeRateType.ConstantQuality; break; } double parsedFramerate; double.TryParse(hbPreset.VideoFramerate, out parsedFramerate); switch (hbPreset.VideoFramerateMode) { case "cfr": profile.ConstantFramerate = true; profile.Framerate = parsedFramerate; break; case "pfr": profile.ConstantFramerate = false; profile.Framerate = parsedFramerate; break; case "vfr": default: profile.ConstantFramerate = false; profile.Framerate = 0; break; } profile.TargetSize = 0; // Audio profile.AudioCopyMask = new List <CopyMaskChoice>(); foreach (string audioCodec in hbPreset.AudioCopyMask) { string codec; if (audioCodec.StartsWith("copy:", StringComparison.Ordinal)) { codec = audioCodec.Substring(5); } else { codec = audioCodec; } profile.AudioCopyMask.Add(new CopyMaskChoice { Codec = codec, Enabled = true }); } profile.AudioEncoderFallback = hbPreset.AudioEncoderFallback; profile.AudioEncodings = new List <AudioEncoding>(); foreach (var hbAudio in hbPreset.AudioList) { var audioEncoding = new AudioEncoding(); audioEncoding.InputNumber = 0; audioEncoding.Encoder = hbAudio.AudioEncoder; audioEncoding.Bitrate = hbAudio.AudioBitrate; audioEncoding.PassthroughIfPossible = false; audioEncoding.Mixdown = hbAudio.AudioMixdown; audioEncoding.Quality = (float)hbAudio.AudioTrackQuality; audioEncoding.EncodeRateType = hbAudio.AudioTrackQualityEnable ? AudioEncodeRateType.Quality : AudioEncodeRateType.Bitrate; audioEncoding.Compression = (float)hbAudio.AudioCompressionLevel; if (hbAudio.AudioSamplerate == "auto") { audioEncoding.SampleRateRaw = 0; } else { double parsedSampleRate; if (double.TryParse(hbAudio.AudioSamplerate, out parsedSampleRate)) { audioEncoding.SampleRateRaw = (int)(parsedSampleRate * 1000); } else { audioEncoding.SampleRateRaw = 0; } } audioEncoding.Gain = (int)hbAudio.AudioTrackGainSlider; audioEncoding.Drc = hbAudio.AudioTrackDRCSlider; profile.AudioEncodings.Add(audioEncoding); } return(new Preset { Name = hbPreset.PresetName, EncodingProfile = profile, IsBuiltIn = true, }); }
public AudioEncodingViewModel(AudioEncoding audioEncoding, Title selectedTitle, List <int> chosenAudioTracks, string containerName, AudioPanelViewModel audioPanelVM) { this.initializing = true; this.audioPanelVM = audioPanelVM; this.targetStreams = new ObservableCollection <TargetStreamViewModel>(); this.targetStreamIndex = audioEncoding.InputNumber; this.SetChosenTracks(chosenAudioTracks, selectedTitle); this.audioEncoders = new List <AudioEncoderViewModel>(); this.mixdownChoices = new List <MixdownViewModel>(); this.containerName = containerName; this.RefreshEncoderChoices(); HBAudioEncoder hbAudioEncoder = Encoders.GetAudioEncoder(audioEncoding.Encoder); if (hbAudioEncoder.IsPassthrough) { this.selectedAudioEncoder = this.audioEncoders[0]; this.selectedPassthrough = audioEncoding.Encoder; } else { this.selectedAudioEncoder = this.audioEncoders.Skip(1).FirstOrDefault(e => e.Encoder.ShortName == audioEncoding.Encoder); this.selectedPassthrough = "copy"; } if (this.selectedAudioEncoder == null) { this.selectedAudioEncoder = this.audioEncoders[1]; } this.RefreshMixdownChoices(); this.RefreshBitrateChoices(); this.RefreshSampleRateChoices(); this.SelectMixdown(Encoders.GetMixdown(audioEncoding.Mixdown)); this.sampleRate = audioEncoding.SampleRateRaw; if (!this.HBAudioEncoder.SupportsQuality) { this.encodeRateType = AudioEncodeRateType.Bitrate; } else { this.encodeRateType = audioEncoding.EncodeRateType; } this.audioQuality = audioEncoding.Quality; if (audioEncoding.Compression >= 0) { this.audioCompression = audioEncoding.Compression; } else { this.audioCompression = this.HBAudioEncoder.DefaultCompression; } this.selectedBitrate = this.BitrateChoices.SingleOrDefault(b => b.Bitrate == audioEncoding.Bitrate); if (this.selectedBitrate == null) { this.selectedBitrate = this.BitrateChoices.First(); } this.gain = audioEncoding.Gain; this.drc = audioEncoding.Drc; this.passthroughIfPossible = audioEncoding.PassthroughIfPossible; this.name = audioEncoding.Name; Messenger.Default.Register <SelectedTitleChangedMessage>( this, message => { this.RefreshMixdownChoices(); this.RefreshBitrateChoices(); this.RefreshDrc(); }); Messenger.Default.Register <AudioInputChangedMessage>( this, message => { this.RefreshMixdownChoices(); this.RefreshBitrateChoices(); this.RefreshDrc(); }); Messenger.Default.Register <OptionsChangedMessage>( this, message => { this.RaisePropertyChanged(() => this.NameVisible); }); Messenger.Default.Register <ContainerChangedMessage>( this, message => { this.containerName = message.ContainerName; this.RefreshEncoderChoices(); }); this.initializing = false; }
internal override void SetWaveFormatTagInternal(AudioEncoding waveFormatTag) { SubFormat = AudioSubTypes.SubTypeFromEncoding(waveFormatTag); }
private string EncodeFileToMp4(string inputPath, string outputPath, bool encodeAudio = true, Android.Net.Uri inputUri = null) { LatestInputVideoLength = AudioEncoding.GetVideoLength(inputPath, inputUri); LatestAudioInputFormat = AudioEncoding.GetAudioTrackFormat(inputPath, inputUri); EstimateTotalSize(LatestInputVideoLength, _bitRate); try { prepareMediaPlayer(inputPath, inputUri); prepareEncoder(outputPath); _inputSurface.MakeCurrent(); prepareWeakSurfaceTexture(); _mediaPlayer.Start(); _mediaPlayer.SetAudioStreamType(Android.Media.Stream.VoiceCall); _mediaPlayer.SetVolume(0, 0); _frameCount = 0; } catch (System.Exception ex) { Log.Debug("VideoEncoder", ex.Message); } VideoEncodingInProgress = true; while (true) { D(false); _frameCount++; /* * Disable this to make it faster when not debugging */ #if DEBUG if (_frameCount >= 120 && AppSettings.Logging.SendToConsole) { System.Console.WriteLine($"FileToMp4 exited @ {_outputSurface.WeakSurfaceTexture.Timestamp} " + $" | encoded bits {_bitsEncodedSoFar} of estimated {_estimatedTotalSize}"); } #endif // Acquire a new frame of input, and render it to the Surface. If we had a // GLSurfaceView we could switch EGL contexts and call drawImage() a second // time to render it on screen. The texture can be shared between contexts by // passing the GLSurfaceView's EGLContext as eglCreateContext()'s share_context // argument. if (!_outputSurface.AwaitNewImage(true)) { break; } _outputSurface.DrawImage(); // Set the presentation time stamp from the WeakSurfaceTexture's time stamp. This // will be used by MediaMuxer to set the PTS in the video. _inputSurface.SetPresentationTime(_outputSurface.WeakSurfaceTexture.Timestamp); //if (AppSettings.Logging.SendToConsole) Log.Debug("MediaLoop", "Set Time " + st.Timestamp); // Submit it to the encoder. The eglSwapBuffers call will block if the input // is full, which would be bad if it stayed full until we dequeued an output // buffer (which we can't do, since we're stuck here). So long as we fully drain // the encoder before supplying additional input, the system guarantees that we // can supply another frame without blocking. //if (AppSettings.Logging.SendToConsole) Log.Debug(TAG, "sending frame to encoder:"); _inputSurface.SwapBuffers(); if (_bitsEncodedSoFar >= _estimatedTotalSize) { break; } } D(true); VideoEncodingInProgress = false; #if DEBUG if (AppSettings.Logging.SendToConsole) { System.Console.WriteLine($"DrainEncoder started @ {_firstKnownBuffer} exited @ " + $"{_outputSurface.WeakSurfaceTexture.Timestamp} " + $"| encoded bits {_bitsEncodedSoFar} of estimated {_estimatedTotalSize}"); } #endif try { releaseMediaPlayer(); releaseEncoder(); releaseWeakSurfaceTexture(); }catch { } _firstKnownBuffer = 0; _estimatedTotalSize = 0; _frameCount = 0; _bitsEncodedSoFar = 0; _bfi = new BufferInfo(); if (!AudioEncodingInProgress) { _muxer.Stop(); // if the audio encoding isn't still running then we'll stop everything and return _muxer.Release(); _muxer = null; if (File.Exists(outputPath)) { this.Progress.Invoke(new EncoderMinArgs(EncodedBits(_bfi.Size), _estimatedTotalSize, true, false, outputPath)); return(outputPath); } } this.Progress.Invoke(new EncoderMinArgs(EncodedBits(_bfi.Size), _estimatedTotalSize, false, false, null)); return(null); //file isn't finished processing yet }
public static NullAudioSink CreateNullSink(this AudioEncoding encoding, bool isPacketized) { return(new NullAudioSink(CreateFormat(encoding, isPacketized))); }
public WaveFormat(int sampleRate, int bits, int channels, AudioEncoding encoding) : this(sampleRate, bits, channels, encoding, 0) { }
private void ReadWaveHeader(Stream stream) { byte[] buffer = new byte[12]; stream.Read(buffer, 0, 12); if (Encoding.UTF8.GetString(buffer, 0, 4) != "RIFF") { Debug.LogWarning("Invalid riff header"); return; } uint riffSize = BitConverter.ToUInt32(buffer, 4); if (Encoding.UTF8.GetString(buffer, 8, 4) != "WAVE") { Debug.LogWarning("Invalid wave header"); return; } bool readChunks = false; string chunk; while (!readChunks) { stream.Read(buffer, 0, 4); chunk = Encoding.UTF8.GetString(buffer, 0, 4); if (chunk == "fmt ") { buffer = new byte[18]; stream.Read(buffer, 0, 18); chunkSize = (int)BitConverter.ToUInt32(buffer, 0); encoding = (AudioEncoding)BitConverter.ToUInt16(buffer, 4); channels = BitConverter.ToUInt16(buffer, 6); sampleRate = BitConverter.ToInt32(buffer, 8); uint bytesPerSec = BitConverter.ToUInt32(buffer, 12); ushort frameSize = BitConverter.ToUInt16(buffer, 14); ushort bits = BitConverter.ToUInt16(buffer, 16); if (encoding == AudioEncoding.ADPCM) { stream.Read(buffer, 0, 2); ushort blobSize = BitConverter.ToUInt16(buffer, 0); byte[] blobData = new byte[blobSize]; stream.Read(blobData, 0, blobSize); } } else if (chunk == "fact") { stream.Read(buffer, 0, 8); uint factSize = BitConverter.ToUInt32(buffer, 0); uint factBOH = BitConverter.ToUInt32(buffer, 4); } else if (chunk == "data") { stream.Read(buffer, 0, 4); uint dataSize = BitConverter.ToUInt32(buffer, 0); dataOffset = stream.Position; } else { readChunks = true; } } dataStream = ReadDataStream(stream); }
public AudioEncodingViewModel(AudioEncoding audioEncoding, SourceTitle selectedTitle, List <int> chosenAudioTracks, AudioPanelViewModel audioPanelVM) { this.initializing = true; this.audioPanelVM = audioPanelVM; this.targetStreams = new ObservableCollection <TargetStreamViewModel>(); this.targetStreamIndex = audioEncoding.InputNumber; this.SetChosenTracks(chosenAudioTracks, selectedTitle); this.audioEncoders = new List <AudioEncoderViewModel>(); this.mixdownChoices = new List <MixdownViewModel>(); this.RefreshEncoderChoices(); this.presetChangeSubscription = this.presetsService.WhenAnyValue(x => x.SelectedPreset.Preset.EncodingProfile) .Subscribe(profile => { this.containerSubscription?.Dispose(); this.containerSubscription = profile.WhenAnyValue(x => x.ContainerName) .Skip(1) .Subscribe(_ => { this.RefreshEncoderChoices(isContainerChange: true); }); this.audioPanelVM.SuppressProfileEdits = true; this.RefreshEncoderChoices(); this.audioPanelVM.SuppressProfileEdits = false; }); HBAudioEncoder hbAudioEncoder = HandBrakeEncoderHelpers.GetAudioEncoder(audioEncoding.Encoder); if (hbAudioEncoder.IsPassthrough) { this.selectedAudioEncoder = this.audioEncoders[0]; this.selectedPassthrough = audioEncoding.Encoder; } else { this.selectedAudioEncoder = this.audioEncoders.Skip(1).FirstOrDefault(e => e.Encoder.ShortName == audioEncoding.Encoder); this.selectedPassthrough = "copy"; } if (this.selectedAudioEncoder == null) { this.selectedAudioEncoder = this.audioEncoders[1]; } this.RefreshMixdownChoices(); this.RefreshBitrateChoices(); this.RefreshSampleRateChoices(); this.SelectMixdown(HandBrakeEncoderHelpers.GetMixdown(audioEncoding.Mixdown)); this.sampleRate = audioEncoding.SampleRateRaw; if (!this.HBAudioEncoder.SupportsQuality) { this.encodeRateType = AudioEncodeRateType.Bitrate; } else { this.encodeRateType = audioEncoding.EncodeRateType; } this.audioQuality = audioEncoding.Quality; if (audioEncoding.Compression >= 0) { this.audioCompression = audioEncoding.Compression; } else { this.audioCompression = this.HBAudioEncoder.DefaultCompression; } this.selectedBitrate = this.BitrateChoices.SingleOrDefault(b => b.Bitrate == audioEncoding.Bitrate); if (this.selectedBitrate == null) { this.selectedBitrate = this.BitrateChoices.First(); } this.gain = audioEncoding.Gain; this.drc = audioEncoding.Drc; this.passthroughIfPossible = audioEncoding.PassthroughIfPossible; this.name = audioEncoding.Name; // EncoderSettingsVisible this.WhenAnyValue(x => x.SelectedAudioEncoder, x => x.SelectedPassthrough, (audioEncoder, passthrough) => { if (audioEncoder == null) { return(false); } if (passthrough == null) { return(false); } return(!GetHBAudioEncoder(audioEncoder, passthrough).IsPassthrough); }).ToProperty(this, x => x.EncoderSettingsVisible, out this.encoderSettingsVisible); // AudioCompressionVisible this.WhenAnyValue(x => x.SelectedAudioEncoder, audioEncoder => { return(!audioEncoder.IsPassthrough && audioEncoder.Encoder.SupportsCompression); }).ToProperty(this, x => x.AudioCompressionVisible, out this.audioCompressionVisible); // PassthroughIfPossibleVisible this.WhenAnyValue(x => x.SelectedAudioEncoder, audioEncoder => { if (audioEncoder.IsPassthrough) { return(false); } if (HandBrakeEncoderHelpers.AudioEncoders.Any(e => e.Id == (NativeConstants.HB_ACODEC_PASS_FLAG | audioEncoder.Encoder.Id))) { return(true); } return(audioEncoder.Encoder.ShortName.ToLowerInvariant().Contains("aac") || audioEncoder.Encoder.ShortName.ToLowerInvariant().Contains("mp3")); }).ToProperty(this, x => x.PassthroughIfPossibleVisible, out this.passthroughIfPossibleVisible); // BitrateVisible this.WhenAnyValue( x => x.SelectedAudioEncoder, x => x.EncoderSettingsVisible, x => x.SelectedMixdown, x => x.EncodeRateType, (audioEncoder, encoderSettingsVisible, mixdown, encodeRateType) => { if (audioEncoder.IsPassthrough) { return(false); } if (encoderSettingsVisible && mixdown != null && encodeRateType == AudioEncodeRateType.Bitrate) { // We only need to find out if the bitrate limits exist, so pass in some normal values for sample rate and mixdown. BitrateLimits bitrateLimits = HandBrakeEncoderHelpers.GetBitrateLimits(audioEncoder.Encoder, 48000, HandBrakeEncoderHelpers.GetMixdown("dpl2")); return(bitrateLimits.High > 0); } return(false); }).ToProperty(this, x => x.BitrateVisible, out this.bitrateVisible); // BitrateLabelVisible this.WhenAnyValue(x => x.BitrateVisible, x => x.SelectedAudioEncoder, (bitrateVisible, audioEncoder) => { return(!audioEncoder.IsPassthrough && bitrateVisible && !audioEncoder.Encoder.SupportsQuality); }).ToProperty(this, x => x.BitrateLabelVisible, out this.bitrateLabelVisible); // AudioQualityVisible this.WhenAnyValue(x => x.SelectedAudioEncoder, x => x.EncodeRateType, (audioEncoder, encodeRateType) => { return(!audioEncoder.IsPassthrough && audioEncoder.Encoder.SupportsQuality && encodeRateType == AudioEncodeRateType.Quality); }).ToProperty(this, x => x.AudioQualityVisible, out this.audioQualityVisible); // AudioQualityRadioVisible this.WhenAnyValue(x => x.SelectedAudioEncoder, audioEncoder => { return(!audioEncoder.IsPassthrough && audioEncoder.Encoder.SupportsQuality); }).ToProperty(this, x => x.AudioQualityRadioVisible, out this.audioQualityRadioVisible); // AudioQualityMinimum this.WhenAnyValue(x => x.SelectedAudioEncoder, audioEncoder => { if (audioEncoder.IsPassthrough) { return(0); } return(Math.Round(audioEncoder.Encoder.QualityLimits.Low, RangeRoundDigits)); }).ToProperty(this, x => x.AudioQualityMinimum, out this.audioQualityMinimum); // AudioQualityMaximum this.WhenAnyValue(x => x.SelectedAudioEncoder, audioEncoder => { if (audioEncoder.IsPassthrough) { return(0); } return(Math.Round(audioEncoder.Encoder.QualityLimits.High, RangeRoundDigits)); }).ToProperty(this, x => x.AudioQualityMaximum, out this.audioQualityMaximum); // AudioQualityGranularity this.WhenAnyValue(x => x.SelectedAudioEncoder, audioEncoder => { if (audioEncoder.IsPassthrough) { return(0); } return(Math.Round(audioEncoder.Encoder.QualityLimits.Granularity, RangeRoundDigits)); }).ToProperty(this, x => x.AudioQualityGranularity, out this.audioQualityGranularity); // AudioQualityToolTip this.WhenAnyValue(x => x.SelectedAudioEncoder, audioEncoder => { if (audioEncoder.IsPassthrough) { return(string.Empty); } string directionSentence; if (audioEncoder.Encoder.QualityLimits.Ascending) { directionSentence = EncodingRes.AscendingQualityToolTip; } else { directionSentence = EncodingRes.DescendingQualityToolTip; } return(string.Format( EncodingRes.AudioQualityToolTip, directionSentence, audioEncoder.Encoder.QualityLimits.Low, audioEncoder.Encoder.QualityLimits.High)); }).ToProperty(this, x => x.AudioQualityToolTip, out this.audioQualityToolTip); // AudioCompressionMinimum this.WhenAnyValue(x => x.SelectedAudioEncoder, audioEncoder => { if (audioEncoder.IsPassthrough) { return(0); } return(Math.Round(audioEncoder.Encoder.CompressionLimits.Low, RangeRoundDigits)); }).ToProperty(this, x => x.AudioCompressionMinimum, out this.audioCompressionMinimum); // AudioCompressionMaximum this.WhenAnyValue(x => x.SelectedAudioEncoder, audioEncoder => { if (audioEncoder.IsPassthrough) { return(0); } return(Math.Round(audioEncoder.Encoder.CompressionLimits.High, RangeRoundDigits)); }).ToProperty(this, x => x.AudioCompressionMaximum, out this.audioCompressionMaximum); // AudioCompressionGranularity this.WhenAnyValue(x => x.SelectedAudioEncoder, audioEncoder => { if (audioEncoder.IsPassthrough) { return(0); } return(Math.Round(audioEncoder.Encoder.CompressionLimits.Granularity, RangeRoundDigits)); }).ToProperty(this, x => x.AudioCompressionGranularity, out this.audioCompressionGranularity); // AudioCompressionToolTip this.WhenAnyValue(x => x.SelectedAudioEncoder, audioEncoder => { if (audioEncoder.IsPassthrough) { return(string.Empty); } string directionSentence; if (audioEncoder.Encoder.QualityLimits.Ascending) { directionSentence = EncodingRes.AscendingCompressionToolTip; } else { directionSentence = EncodingRes.DescendingCompressionToolTip; } return(string.Format( EncodingRes.AudioCompressionToolTip, directionSentence, audioEncoder.Encoder.CompressionLimits.Low, audioEncoder.Encoder.CompressionLimits.High)); }).ToProperty(this, x => x.AudioCompressionToolTip, out this.audioCompressionToolTip); // PassthroughChoicesVisible this.WhenAnyValue(x => x.SelectedAudioEncoder, audioEncoder => { return(audioEncoder.IsPassthrough); }).ToProperty(this, x => x.PassthroughChoicesVisible, out this.passthroughChoicesVisible); this.selectedTitleSubscription = this.main.WhenAnyValue(x => x.SelectedTitle) .Skip(1) .Subscribe(_ => { this.RefreshFromNewInput(); }); this.audioTrackChangedSubscription = this.main.AudioTracks.Connect().WhenAnyPropertyChanged().Subscribe(_ => { this.RefreshFromNewInput(); }); Config.Observables.ShowAudioTrackNameField.ToProperty(this, x => x.NameVisible, out this.nameVisible); this.initializing = false; }
/// <summary> /// Get an EncodeJob model for a LibHB Encode. /// </summary> /// <param name="task"> /// The task. /// </param> /// <returns> /// An Interop.EncodeJob model. /// </returns> public static EncodeJob GetEncodeJob(EncodeTask task) { // The current Job Configuration EncodeTask work = task; // Which will be converted to this EncodeJob Model. EncodeJob job = new EncodeJob(); EncodingProfile profile = new EncodingProfile(); job.EncodingProfile = profile; // Audio Settings profile.AudioEncodings = new List <AudioEncoding>(); job.ChosenAudioTracks = new List <int>(); foreach (AudioTrack track in work.AudioTracks) { AudioEncoding newTrack = new AudioEncoding { Bitrate = track.Bitrate, Drc = track.DRC, Gain = track.Gain, Encoder = Converters.GetCliAudioEncoder(track.Encoder), InputNumber = track.Track.HasValue ? track.Track.Value : 0, Mixdown = Converters.GetCliMixDown(track.MixDown), SampleRateRaw = GetSampleRateRaw(track.SampleRate), }; profile.AudioEncodings.Add(newTrack); if (track.Track != null) { job.ChosenAudioTracks.Add(track.Track.Value); } } // Title Settings job.OutputPath = work.Destination; job.SourcePath = work.Source; job.Title = work.Title; // job.SourceType = work.Type; switch (work.PointToPointMode) { case PointToPointMode.Chapters: job.RangeType = VideoRangeType.Chapters; break; case PointToPointMode.Seconds: job.RangeType = VideoRangeType.Seconds; break; case PointToPointMode.Frames: job.RangeType = VideoRangeType.Frames; break; } if (work.PointToPointMode == PointToPointMode.Seconds) { job.SecondsEnd = work.EndPoint; job.SecondsStart = work.StartPoint; } if (work.PointToPointMode == PointToPointMode.Chapters) { job.ChapterStart = work.StartPoint; job.ChapterEnd = work.EndPoint; } if (work.PointToPointMode == PointToPointMode.Frames) { job.FramesEnd = work.EndPoint; job.FramesStart = work.StartPoint; } job.Angle = work.Angle; job.EncodingProfile = profile; // Output Settings profile.IPod5GSupport = work.IPod5GSupport; profile.Optimize = work.OptimizeMP4; switch (work.OutputFormat) { case OutputFormat.Mp4: case OutputFormat.M4V: profile.ContainerName = "av_mp4"; // TODO make part of enum. break; case OutputFormat.Mkv: profile.ContainerName = "av_mkv"; // TODO make part of enum. break; } // Picture Settings profile.Anamorphic = work.Anamorphic; profile.Cropping = new Cropping { Top = work.Cropping.Top, Bottom = work.Cropping.Bottom, Left = work.Cropping.Left, Right = work.Cropping.Right }; profile.CroppingType = CroppingType.Custom; // TODO deal with this better profile.DisplayWidth = work.DisplayWidth.HasValue ? int.Parse(Math.Round(work.DisplayWidth.Value, 0).ToString()) : 0; profile.PixelAspectX = work.PixelAspectX; profile.PixelAspectY = work.PixelAspectY; profile.Height = work.Height.HasValue ? work.Height.Value : 0; profile.KeepDisplayAspect = work.KeepDisplayAspect; profile.MaxHeight = work.MaxHeight.HasValue ? work.MaxHeight.Value : 0; profile.MaxWidth = work.MaxWidth.HasValue ? work.MaxWidth.Value : 0; profile.Modulus = work.Modulus.HasValue ? work.Modulus.Value : 16; profile.UseDisplayWidth = true; profile.Width = work.Width.HasValue ? work.Width.Value : 0; // Filter Settings profile.CustomDecomb = work.CustomDecomb; profile.CustomDeinterlace = work.CustomDeinterlace; profile.CustomDenoise = work.CustomDenoise; profile.CustomDetelecine = work.CustomDetelecine; if (work.Deblock > 4) { profile.Deblock = work.Deblock; } profile.Decomb = work.Decomb; profile.Deinterlace = work.Deinterlace; profile.Denoise = work.Denoise; profile.Detelecine = work.Detelecine; profile.Grayscale = work.Grayscale; // Video Settings profile.Framerate = work.Framerate.HasValue ? work.Framerate.Value : 0; profile.ConstantFramerate = work.FramerateMode == FramerateMode.CFR; profile.Quality = work.Quality.HasValue ? work.Quality.Value : 0; profile.VideoBitrate = work.VideoBitrate.HasValue ? work.VideoBitrate.Value : 0; profile.VideoEncodeRateType = work.VideoEncodeRateType; profile.VideoEncoder = Converters.GetVideoEncoder(work.VideoEncoder); profile.H264Level = work.H264Level; profile.X264Profile = work.H264Profile.ToString().ToLower().Replace(" ", string.Empty); // TODO change these away from strings. profile.X264Preset = work.X264Preset.ToString().ToLower().Replace(" ", string.Empty); profile.X264Tunes = new List <string>(); if (work.X264Tune != x264Tune.None) { profile.X264Tunes.Add(work.X264Tune.ToString().ToLower().Replace(" ", string.Empty)); } if (work.FastDecode) { profile.X264Tunes.Add("fastdecode"); } // Chapter Markers profile.IncludeChapterMarkers = work.IncludeChapterMarkers; job.CustomChapterNames = work.ChapterNames.Select(item => item.ChapterName).ToList(); job.UseDefaultChapterNames = work.IncludeChapterMarkers; // Advanced Settings profile.X264Options = work.AdvancedEncoderOptions; // Subtitles job.Subtitles = new Subtitles { SourceSubtitles = new List <SourceSubtitle>(), SrtSubtitles = new List <SrtSubtitle>() }; foreach (SubtitleTrack track in work.SubtitleTracks) { if (track.IsSrtSubtitle) { job.Subtitles.SrtSubtitles.Add( new SrtSubtitle { CharacterCode = track.SrtCharCode, Default = track.Default, FileName = track.SrtFileName, LanguageCode = track.SrtLang, Offset = track.SrtOffset }); } else { if (track.SourceTrack != null) { job.Subtitles.SourceSubtitles.Add( new SourceSubtitle { BurnedIn = track.Burned, Default = track.Default, Forced = track.Forced, TrackNumber = track.SourceTrack.TrackNumber }); } } } return(job); }
internal virtual void SetWaveFormatTagInternal(AudioEncoding waveFormatTag) { WaveFormatTag = waveFormatTag; }
/// <summary> /// Initializes a new instance of the <see cref="WaveFormat"/> struct. /// </summary> /// <param name="sampleRate">The sample rate.</param> /// <param name="bitDepth">The bit depth.</param> /// <param name="channels">The channels.</param> /// <param name="encoding">The encoding.</param> /// <param name="extraSize">Size of the extra.</param> public WaveFormat(int sampleRate, int bitDepth, int channels, AudioEncoding encoding, int extraSize) : this(sampleRate, bitDepth, channels, encoding) { ExtraSize = extraSize; }
protected void CheckRecording(RecordingServiceCapabilities capabilities, GetRecordingsResponseItem recording, RecordingConfiguration conf) { //var recording = recordings.Where(r => r.RecordingToken == recordingToken).FirstOrDefault(); //Assert(recording != null, // "recording list doesn't contain new recording", // "Check that recording list contains new recording after refresh"); bool ok = true; StringBuilder logger = new StringBuilder(); if (recording.Configuration.MaximumRetentionTime != conf.MaximumRetentionTime) { ok = false; logger.Append(string.Format("MaximumRetentionTime is invalid{0}", Environment.NewLine)); } if (recording.Configuration.Content != conf.Content) { ok = false; logger.Append(string.Format("Content is '{0}' but must be '{1}'{2}", recording.Configuration.Content, conf.Content, Environment.NewLine)); } if (recording.Configuration.Source != null) { if (recording.Configuration.Source.Address != conf.Source.Address) { ok = false; logger.Append(string.Format("Source Address is '{0}' but must be '{1}'{2}", recording.Configuration.Source.Address, conf.Source.Address, Environment.NewLine)); } if (recording.Configuration.Source.Description != conf.Source.Description) { ok = false; logger.Append(string.Format("Source Description is '{0}' but must be '{1}'{2}", recording.Configuration.Source.Description, conf.Source.Description, Environment.NewLine)); } if (recording.Configuration.Source.Location != conf.Source.Location) { ok = false; logger.Append(string.Format("Source Location is '{0}' but must be '{1}'{2}", recording.Configuration.Source.Location, conf.Source.Location, Environment.NewLine)); } if (recording.Configuration.Source.Name != conf.Source.Name) { ok = false; logger.Append(string.Format("Source Name is '{0}' but must be '{1}'{2}", recording.Configuration.Source.Name, conf.Source.Name, Environment.NewLine)); } if (recording.Configuration.Source.SourceId != conf.Source.SourceId) { ok = false; logger.Append(string.Format("Source SourceId is '{0}' but must be '{1}'{2}", recording.Configuration.Source.SourceId, conf.Source.SourceId, Environment.NewLine)); } } else { ok = false; logger.Append(string.Format("recording doesn't contain Source{0}", Environment.NewLine)); } if (recording.Tracks == null || recording.Tracks.Track == null) { ok = false; logger.Append(string.Format("Track list of recording '{0}' is empty", recording.RecordingToken)); } else { foreach (TrackType type in Enum.GetValues(typeof(TrackType))) { if (type != TrackType.Extended) { //if (recording.Tracks.Track.FirstOrDefault(t => t.Configuration.TrackType == type) == null) //{ // ok = false; // logger.Append(string.Format("Recording doesn't contain tracks with track type '{0}'{1}", type, Environment.NewLine)); //} var actualTrackCount = recording.Tracks.Track.Count(t => t.Configuration.TrackType == type); if (TrackType.Audio == type) { var audioEncodingsCount = capabilities.Encoding.Count(e => { AudioEncoding v; return(AudioEncoding.TryParse(e, out v)); }); if (0 != audioEncodingsCount) { var flag = (1 <= actualTrackCount); if (!flag) { logger.AppendLine(string.Format("There are no tracks of type: '{0}'.", type)); } ok = ok && flag; } } if (TrackType.Video == type) { var videoEncodingsCount = capabilities.Encoding.Count(e => { VideoEncoding v; return(VideoEncoding.TryParse(e, out v)); }); if (0 != videoEncodingsCount) { var flag = (1 <= actualTrackCount); if (!flag) { logger.AppendLine(string.Format("There are no tracks of type: '{0}'.", type)); } ok = ok && flag; } } if (TrackType.Metadata == type && Features.ContainsFeature(Feature.MetadataRecording)) { var flag = (1 <= actualTrackCount); if (!flag) { logger.AppendLine(string.Format("There are no tracks of type: '{0}'.", type)); } ok = ok && flag; } } } } Assert(ok, logger.ToStringTrimNewLine(), "Check that configuration parameters of new recording are valid"); }
public void SetAudioSourceConfigurationTest() { Profile deletedProfile = null; Profile createdProfile = null; Profile modifiedProfile = null; AudioEncoding backupEncoding = AudioEncoding.G711; AudioEncoderConfiguration configBackup = null; RunTest( () => { Profile profile = CreateProfileByAnnex3("testprofileX", null, out deletedProfile, out createdProfile, out modifiedProfile); string reason; //5. ONVIF Client will invoke GetCompatibleAudioSourceConfigurationsRequest message // (ProfileToken = ‘testprofileX’) to retrieve the list of audio source configurations // compatible with profile. //6. ONVIF Client verifies the list of audio source configurations sent by DUT. // Audio source AudioSourceConfiguration[] configs = GetCompatibleAudioSourceConfigurations(profile.token); Assert(ValidateAudioSourceConfigs(configs, out reason), reason, Resources.StepValidatingAudioSources_Title); //7. ONVIF Client invokes AddAudioSourceConfigurationRequest message // (ProfileToken = ‘testprofileX’, ConfigurationToken as one of the tokens received in the // GetCompatibleAudioSourceConfigurationsResponse message) to add audio source configuration // to profile. //8. DUT adds the audio source configuration to the profile and sends the response. AudioSourceConfiguration config = configs[0]; AddAudioSourceConfiguration(profile.token, config.token); //9. ONVIF Client invokes GetCompatibleAudioEncoderConfigurationsRequest message // (ProfileToken = ‘testprofileX’) to retrieve audio encoder configurations compatible with // profile. //10. DUT sends the list of audio encoder configurations compatible with the received // media profile token. // Audio encoder AudioEncoderConfiguration[] encoderConfigurations = GetCompatibleAudioEncoderConfigurations(profile.token); Assert(ValidateAudioEncoderConfigs(encoderConfigurations, out reason), reason, Resources.StepValidatingAudioEncoders_Title); //11. ONVIF Client invokes AddAudioEncoderConfigurationRequest message (ProfileToken = // ‘testprofileX’, ConfigurationToken as one of the tokens received in the // GetCompatibleAudioencoderConfigurationsResponse message) to add audio encoder // configuration to profile. //12. DUT adds the audio encoder configuration to the profile and sends the response. AudioEncoderConfiguration encoderConfig = encoderConfigurations[0]; AddAudioEncoderConfiguration(profile.token, encoderConfig.token); //13. ONVIF Client invokes GetAudioEncoderConfigurationOptionsRequest // (ProfileToken = ‘testprofileX’) request to retrieve audio encoder options for // specified profile. //14. DUT sends the audio encoder configuration options which could be applied // to audio encoder from specified profile. AudioEncoderConfigurationOptions options = GetAudioEncoderConfigurationOptions(null, profile.token); Assert(options != null && options.Options != null, "No Audio Encoder Configuration options returned", "Validate response received"); // Select valid options List <AudioEncoderConfigurationOption> validOptions = options.Options.Where(o => o.BitrateList != null && o.SampleRateList != null).ToList(); Assert(validOptions.Count > 0, "No valid options can be selected", "Select AudioEncoderConfigurationOption to check configuration changing"); configBackup = Utils.CopyMaker.CreateCopy(encoderConfig); backupEncoding = encoderConfig.Encoding; List <AudioEncoderConfigurationOption> opts = validOptions.Where(O => O.Encoding != backupEncoding).ToList(); AudioEncoderConfigurationOption encodingDifferent = opts.FirstOrDefault(); if (opts.Count == 0) { opts = validOptions; } // select with different encoding AudioEncoderConfigurationOption selectedOptions = null; AudioEncoderConfigurationOption bitrateDifferent = null; AudioEncoderConfigurationOption sampleRateDifferent = null; foreach (AudioEncoderConfigurationOption opt in opts) { bool bitrateDiffers = opt.BitrateList.Where(B => B != encoderConfig.Bitrate).Count() > 0; bool sampleRateDiffers = opt.SampleRateList.Where(SR => SR != encoderConfig.SampleRate).Count() > 0; if (bitrateDiffers && sampleRateDiffers) { selectedOptions = opt; break; } if (bitrateDiffers) { bitrateDifferent = opt; } if (sampleRateDiffers) { sampleRateDifferent = opt; } } if (selectedOptions == null) { selectedOptions = (encodingDifferent != null) ? encodingDifferent : (bitrateDifferent != null ? bitrateDifferent : sampleRateDifferent); } if (selectedOptions != null) { //15. ONVIF Client invokes SetAudioEncoderConfigurationRequest message // (ConfigurationToken, Encoding=[other than current], Bitrate = [other than current], // SampleRate = [other than current], ForcePersistence = false, where all values was // taken from audio encoder configuration options) to change //16. DUT sends SetAudioEncoderConfigurationResponse message. // Update encoder configuration encoderConfig.Encoding = selectedOptions.Encoding; List <int> bitrates = selectedOptions.BitrateList.Where(B => B != encoderConfig.Bitrate).ToList(); if (bitrates.Count > 0) { encoderConfig.Bitrate = bitrates[0]; } List <int> sampleRates = selectedOptions.SampleRateList.Where(SR => SR != encoderConfig.SampleRate).ToList(); if (sampleRates.Count > 0) { encoderConfig.SampleRate = sampleRates[0]; } SetAudioEncoderConfiguration(encoderConfig, false); //17. ONVIF Client invokes GetAudioEncoderConfigurationRequest message // (ConfigurationToken) to get new audio encoder configuration parameters. //18. DUT sends GetAudioEncoderConfigurationResponse message with parameters // specified in set request. AudioEncoderConfiguration actual = GetAudioEncoderConfiguration(encoderConfig.token); //19. ONVIF Client checks that Audio configuration in GetAudioEncoderConfigurationResponse // message is the same as in SetAudioEncoderConfigurationRequest message. string err = null; bool equal = EqualConfigurations(encoderConfig, actual, out err); string message = string.Format(Resources.ErrorAudioEncoderConfigNotEqual_Format, System.Environment.NewLine + err); Assert(equal, message, Resources.StepCompareAudioEncoderConfigs_Title); } }, () => { if (configBackup != null) { SetAudioEncoderConfiguration(configBackup, true); } RestoreProfileByAnnex3(deletedProfile, createdProfile, modifiedProfile); }); }
internal override void SetWaveFormatTagInternal(AudioEncoding waveFormatTag) { _subFormat = AudioSubTypes.SubTypeFromEncoding(waveFormatTag); }
/// <summary> /// Initializes an instance of <see cref="AudioStreamInfo"/>. /// </summary> public AudioStreamInfo(int itag, string url, Container container, long size, long bitrate, AudioEncoding audioEncoding) : base(itag, url, container, size) { Bitrate = bitrate; AudioEncoding = audioEncoding; }
/// <summary /> public AudioStreamInfo(int itag, string url, long size, long bitrate) : base(itag, url, size) { Bitrate = bitrate.EnsureNotNegative(nameof(bitrate)); AudioEncoding = GetAudioEncoding(itag); }
// ANNEX A.X1 Media Profile Configuration for Audio Streaming protected Profile SelectAudioProfile( AudioEncoding audioEncoding, string audioCodec, TestAudioEncoderConfigurationOptions testAudio, ref AudioEncoderConfigurationOptions audioOptions) { Profile[] profiles = GetProfiles(); Profile profile = null; AudioEncoderConfigurationOptions audioOptionsTmp = null; RunStep(() => { foreach (Profile p in profiles) { if (p.AudioEncoderConfiguration != null && p.AudioSourceConfiguration != null) { LogStepEvent("GetAudioEncoderConfigurationOptions"); audioOptionsTmp = Client.GetAudioEncoderConfigurationOptions(p.AudioEncoderConfiguration.token, p.token); DoRequestDelay(); if (testAudio(audioOptionsTmp)) { profile = p; LogStepEvent("OK - profile found"); break; } } else { LogStepEvent("GetAudioEncoderConfigurations"); AudioEncoderConfiguration[] audioEncoderConfigurations = Client.GetAudioEncoderConfigurations(); DoRequestDelay(); bool audioEncoderConfigurationFound = false; foreach (AudioEncoderConfiguration configuration in audioEncoderConfigurations) { LogStepEvent("GetAudioEncoderConfigurationOptions"); audioOptionsTmp = Client.GetAudioEncoderConfigurationOptions(configuration.token, p.token); DoRequestDelay(); if (testAudio(audioOptionsTmp)) { if (p.AudioSourceConfiguration == null) { AudioSourceConfiguration[] audioSourceConfigurations = Client.GetAudioSourceConfigurations(); DoRequestDelay(); if (audioSourceConfigurations.Length > 0) { LogStepEvent("AddAudioSourceConfiguration"); Client.AddAudioSourceConfiguration(p.token, audioSourceConfigurations[0].token); DoRequestDelay(); } else { throw new DutPropertiesException("Audio Source Configurations not found"); } } LogStepEvent("AddAudioEncoderConfiguration"); Client.AddAudioEncoderConfiguration(p.token, configuration.token); DoRequestDelay(); p.AudioEncoderConfiguration = configuration; profile = p; LogStepEvent(string.Format("Add Audio configuration to the {0} profile - OK", profile.Name)); audioEncoderConfigurationFound = true; break; } } if (!audioEncoderConfigurationFound) { throw new DutPropertiesException("Audio Encoder Configuration with required properties not found"); } } } if (profile == null) { throw new DutPropertiesException("Respective profile cannot be found or created"); } }, string.Format("Select or create profile with {0} Audio encoder configuration", audioCodec)); // find nearest bitrate and samplerate profile.AudioEncoderConfiguration.Bitrate = FindNearestAudioBitrate(profile.AudioEncoderConfiguration.Bitrate, audioEncoding, audioOptions); profile.AudioEncoderConfiguration.SampleRate = FindNearestAudioSamplerate(profile.AudioEncoderConfiguration.SampleRate, audioEncoding, audioOptions); audioOptions = audioOptionsTmp; return(profile); }
private AudioFormat ParseContainerHeader(byte[] header) { AudioFormat parsedFormat = null; using (Stream stream = new MemoryStream(header)) using (BinaryReader reader = new BinaryReader(stream)) { if (this.InputAudioFormat.Container.ContainerType.Equals(AudioContainerType.WAV)) { string label = this.GetChunkLabel(reader, stream, 0); if (string.CompareOrdinal(label, "RIFF") != 0) { throw new InvalidDataException("Unable to find RIFF signature in header"); } label = this.GetChunkLabel(reader, stream, 8); if (string.CompareOrdinal(label, "WAVE") != 0) { throw new InvalidDataException("Unable to find WAVE signature in header"); } bool isParsed = false; while (!isParsed) { // Safe to cast to int because the header size can't be > 5k label = this.GetChunkLabel(reader, stream, (int)stream.Position); int chunkSize = reader.ReadInt32(); switch (label) { case "fmt ": long currentStreamPosition = stream.Position; AudioEncoding encoding = AudioEncoding.None; if (reader.ReadInt16() == 1) { encoding = AudioEncoding.PCM; } int channelsNumber = reader.ReadInt16(); int sampleRate = reader.ReadInt32(); // Skipping the unneeded format specs stream.Position += 6; int bitsPerSample = reader.ReadInt16(); parsedFormat = new AudioFormat(encoding, channelsNumber, sampleRate, bitsPerSample, new AudioContainer(AudioContainerType.WAV)); stream.Position = currentStreamPosition + chunkSize; break; case "data": isParsed = true; this.parsingResult.DataChunckStart = (int)stream.Position; if (parsedFormat == null) { throw new InvalidDataException("Unable to find the fmt chunk in header"); } break; default: stream.Position += chunkSize; break; } } } else { throw new InvalidDataException($"Unsupported container format: {this.InputAudioFormat.Container.ContainerType.ToString()}"); } } return(parsedFormat); }
public void SetAudioSourceConfigurationTest() { Profile deletedProfile = null; Profile createdProfile = null; Profile modifiedProfile = null; AudioEncoding backupEncoding = AudioEncoding.G711; AudioEncoderConfiguration configBackup = null; RunTest( () => { string reason; bool executeAnnex = false; Profile profile = null; //3. ONVIF Client invokes CreateProfileRequest message (ProfileToken = ‘testprofileX’) // to create new profile. //4. Verify CreateProfileResponse message or SOAP 1.2 fault message (Action/MaxNVTProfiles) // from the DUT. If fault was received execute Annex A.1 /// create profile try { BeginStep("Create profile"); profile = Client.CreateProfile("testprofileX", null); StepPassed(); Assert(IsEmptyProfile(profile, out reason), reason, Resources.StepValidatingNewProfile_Title); createdProfile = profile; } catch (FaultException exc) { LogFault(exc); string faultDump; if (exc.IsValidOnvifFault("Receiver/Action/MaxNVTProfiles", out faultDump)) { LogStepEvent("Unable to create profile - delete one or select existing for test"); executeAnnex = true; StepPassed(); } else { throw exc; } } if (executeAnnex) { Profile[] profiles = GetProfiles(); bool nonFixedFound = false; foreach (Profile p in profiles) { if (!(p.fixedSpecified && p.@fixed)) { nonFixedFound = true; deletedProfile = p; DeleteProfile(p.token); break; } } if (nonFixedFound) { profile = CreateProfile("testprofileX", null); createdProfile = profile; } else { bool audioProfileFound = false; foreach (Profile p in profiles) { if (p.AudioSourceConfiguration != null) { profile = p; LogTestEvent(string.Format("Use profile with token '{0}' for test{1}", p.token, System.Environment.NewLine)); modifiedProfile = p; audioProfileFound = true; break; } } if (!audioProfileFound) { LogTestEvent("Unable to create or select profile with Audio configuration for test."); return; } } } //5. ONVIF Client will invoke GetCompatibleAudioSourceConfigurationsRequest message // (ProfileToken = ‘testprofileX’) to retrieve the list of audio source configurations // compatible with profile. //6. ONVIF Client verifies the list of audio source configurations sent by DUT. // Audio source AudioSourceConfiguration[] configs = GetCompatibleAudioSourceConfigurations(profile.token); Assert(ValidateAudioSourceConfigs(configs, out reason), reason, Resources.StepValidatingAudioSources_Title); //7. ONVIF Client invokes AddAudioSourceConfigurationRequest message // (ProfileToken = ‘testprofileX’, ConfigurationToken as one of the tokens received in the // GetCompatibleAudioSourceConfigurationsResponse message) to add audio source configuration // to profile. //8. DUT adds the audio source configuration to the profile and sends the response. AudioSourceConfiguration config = configs[0]; AddAudioSourceConfiguration(profile.token, config.token); //9. ONVIF Client invokes GetCompatibleAudioEncoderConfigurationsRequest message // (ProfileToken = ‘testprofileX’) to retrieve audio encoder configurations compatible with // profile. //10. DUT sends the list of audio encoder configurations compatible with the received // media profile token. // Audio encoder AudioEncoderConfiguration[] encoderConfigurations = GetCompatibleAudioEncoderConfigurations(profile.token); Assert(ValidateAudioEncoderConfigs(encoderConfigurations, out reason), reason, Resources.StepValidatingAudioEncoders_Title); //11. ONVIF Client invokes AddAudioEncoderConfigurationRequest message (ProfileToken = // ‘testprofileX’, ConfigurationToken as one of the tokens received in the // GetCompatibleAudioencoderConfigurationsResponse message) to add audio encoder // configuration to profile. //12. DUT adds the audio encoder configuration to the profile and sends the response. AudioEncoderConfiguration encoderConfig = encoderConfigurations[0]; AddAudioEncoderConfiguration(profile.token, encoderConfig.token); //13. ONVIF Client invokes GetAudioEncoderConfigurationOptionsRequest // (ProfileToken = ‘testprofileX’) request to retrieve audio encoder options for // specified profile. //14. DUT sends the audio encoder configuration options which could be applied // to audio encoder from specified profile. AudioEncoderConfigurationOptions options = GetAudioEncoderConfigurationOptions(null, profile.token); Assert(options != null && options.Options != null, "No Audio Encoder Configuration options returned", "Validate response received"); // Select valid options List <AudioEncoderConfigurationOption> validOptions = options.Options.Where(o => o.BitrateList != null && o.SampleRateList != null).ToList(); Assert(validOptions.Count > 0, "No valid options can be selected", "Select AudioEncoderConfigurationOption to check configuration changing"); configBackup = Utils.CopyMaker.CreateCopy(encoderConfig); backupEncoding = encoderConfig.Encoding; List <AudioEncoderConfigurationOption> opts = validOptions.Where(O => O.Encoding != backupEncoding).ToList(); AudioEncoderConfigurationOption encodingDifferent = opts.FirstOrDefault(); if (opts.Count == 0) { opts = validOptions; } // select with different encoding AudioEncoderConfigurationOption selectedOptions = null; AudioEncoderConfigurationOption bitrateDifferent = null; AudioEncoderConfigurationOption sampleRateDifferent = null; foreach (AudioEncoderConfigurationOption opt in opts) { bool bitrateDiffers = opt.BitrateList.Where(B => B != encoderConfig.Bitrate).Count() > 0; bool sampleRateDiffers = opt.SampleRateList.Where(SR => SR != encoderConfig.SampleRate).Count() > 0; if (bitrateDiffers && sampleRateDiffers) { selectedOptions = opt; break; } if (bitrateDiffers) { bitrateDifferent = opt; } if (sampleRateDiffers) { sampleRateDifferent = opt; } } if (selectedOptions == null) { selectedOptions = (encodingDifferent != null) ? encodingDifferent : (bitrateDifferent != null ? bitrateDifferent : sampleRateDifferent); } if (selectedOptions != null) { //15. ONVIF Client invokes SetAudioEncoderConfigurationRequest message // (ConfigurationToken, Encoding=[other than current], Bitrate = [other than current], // SampleRate = [other than current], ForcePersistence = false, where all values was // taken from audio encoder configuration options) to change //16. DUT sends SetAudioEncoderConfigurationResponse message. // Update encoder configuration encoderConfig.Encoding = selectedOptions.Encoding; List <int> bitrates = selectedOptions.BitrateList.Where(B => B != encoderConfig.Bitrate).ToList(); if (bitrates.Count > 0) { encoderConfig.Bitrate = bitrates[0]; } List <int> sampleRates = selectedOptions.SampleRateList.Where(SR => SR != encoderConfig.SampleRate).ToList(); if (sampleRates.Count > 0) { encoderConfig.SampleRate = sampleRates[0]; } SetAudioEncoderConfiguration(encoderConfig, false); //17. ONVIF Client invokes GetAudioEncoderConfigurationRequest message // (ConfigurationToken) to get new audio encoder configuration parameters. //18. DUT sends GetAudioEncoderConfigurationResponse message with parameters // specified in set request. AudioEncoderConfiguration actual = GetAudioEncoderConfiguration(encoderConfig.token); //19. ONVIF Client checks that Audio configuration in GetAudioEncoderConfigurationResponse // message is the same as in SetAudioEncoderConfigurationRequest message. string err = null; bool equal = EqualConfigurations(encoderConfig, actual, out err); string message = string.Format(Resources.ErrorAudioEncoderConfigNotEqual_Format, System.Environment.NewLine + err); Assert(equal, message, Resources.StepCompareAudioEncoderConfigs_Title); } //20. If used created Media Profile then ONVIF Client invokes DeleteProfileRequest // message (ProfileToken = ‘testprofileX’). Otherwise ONVIF client skip rest steps // and and restore profile settings. //21. DUT deletes the media profile and sends the response. if (modifiedProfile == null) { // // if modifiedProfile != null, it means that all profiles have "fixed" attribute // and one of them has been selected for test. // In this case we cannot delete this profile. // DeleteProfile(profile.token); createdProfile = null; } }, () => { if (configBackup != null) { SetAudioEncoderConfiguration(configBackup, true); } if (createdProfile != null) { DeleteProfile(createdProfile.token); } string remark = null; bool create = true; Profile profileToRestore = null; if (deletedProfile != null) { profileToRestore = deletedProfile; remark = "Restore profile deleted during the test"; } if (modifiedProfile != null) { // // We did not change video etc. configirations during the test. // The only configurations which (possible) need to be restored are // audio source and audio encoder (add) // profileToRestore = modifiedProfile; remark = "Restore profile modified during the test"; create = false; } if (profileToRestore != null) { // profile has been deleted for test: add all possible configurations // profile has been selected for test: it has been deleted at the end. RestoreProfile(profileToRestore, remark, create); } }); }
/// <inheritdoc /> public AudioStreamInfo(int itag, string url, long contentLength, long bitrate) : base(itag, url, contentLength) { Bitrate = bitrate >= 0 ? bitrate : throw new ArgumentOutOfRangeException(nameof(bitrate)); AudioEncoding = GetAudioEncoding(itag); }
/// <summary> /// Run the program service. /// </summary> /// <returns>A task.</returns> public async Task RunAsync() { if (_speechService is null) { try { _speechService = new GoogleSpeechService(_options.CredentialsPath); } catch (Exception ex) { _logger.Error(ex, "Failed to start the speech service."); return; } } if (_storageService is null) { try { _storageService = new GoogleStorageService(_options.CredentialsPath); } catch (Exception ex) { _logger.Error(ex, "Failed to start the storage service."); return; } } if (_taglibService is null) { try { _taglibService = new TaglibService(); } catch (Exception ex) { _logger.Error(ex, "Failed to start the taglib service."); return; } } if (!File.Exists(_options.AudioPath)) { _logger.Error("The audio file at path {audioPath} does not exist.", _options.AudioPath); return; } _logger.Information("Starting transcription for {audioPath}.", _options.AudioPath); // Retrieve audio metadata. var codec = _taglibService.GetAudioCodec(_options.AudioPath); var sampleRate = _taglibService.GetAudioSampleRate(_options.AudioPath); // Match audio metadata against supported formats. AudioEncoding encoding = default; switch (codec) { case var _ when codec is TagLib.Riff.WaveFormatEx: encoding = AudioEncoding.Linear16; break; case var _ when codec is TagLib.Flac.StreamHeader: encoding = AudioEncoding.Flac; break; default: throw new NotImplementedException("The codec is not supported."); } ; // Asynchronously create the bucket if it doesn't already exist. if (await _storageService.GetBucketAsync(_options.Bucket) is null) { var bucket = await _storageService.CreateBucketAsync(_options.Bucket); if (bucket is null) { throw new InvalidOperationException("Unable to create bucket."); } _logger.Information("Bucket {bucketName} was created.", _options.Bucket); } // Asynchronously upload the audio. _logger.Information("Uploading audio to bucket {bucketName}.", _options.Bucket); var objectName = $"{Guid.NewGuid()}{Path.GetExtension(_options.AudioPath)}"; var uploadedAudio = await _storageService.UploadAsync(_options.Bucket, objectName, _options.AudioPath); var uploadedAudioUri = $"gs://{_options.Bucket}/{objectName}"; _logger.Information("Uploaded audio to {audioUri}.", uploadedAudioUri); // Asynchronously transcribe the audio. try { _logger.Information("Transcription started."); IReadOnlyList <SpeechRecognitionAlternative> transcription = null; await foreach (var result in _speechService.LongRunningRecognizeAsync(uploadedAudioUri, encoding, sampleRate, _options.LanguageCode)) { if (result.Progress < 100) { _logger.Information("Transcription progress {progress}%.", result.Progress); continue; } transcription = result.Transcription; } _logger.Information("Transcription completed."); // Analyze transcription by speaker. var textBlocks = new List <TranscribedTextBlock>(); var wordsBySpeakerTag = transcription.SelectMany(q => q.Words).Where(q => q.SpeakerTag != 0).GroupAdjacent(q => q.SpeakerTag); foreach (var group in wordsBySpeakerTag) { var textBlock = new TranscribedTextBlock() { SpeakerTag = group.Key, Text = string.Join(" ", group.Select(x => x.Word.ToString())) }; textBlocks.Add(textBlock); } // Write to .json file. var transcribedFile = new TranscribedFile() { AudioPath = _options.AudioPath, AudioUri = uploadedAudioUri, Created = DateTime.Now, TextBlocks = textBlocks.ToArray() }; var json = JsonConvert.SerializeObject(transcribedFile); var jsonPath = Path.Combine(Path.GetDirectoryName(Process.GetCurrentProcess().MainModule.FileName), $"Transcription-{Path.GetFileNameWithoutExtension(_options.AudioPath)}.json"); File.WriteAllText(jsonPath, json); // Write to .txt file. var text = string.Join("\n", textBlocks.Select(q => $"Speaker {q.SpeakerTag}: {q.Text}")); var textPath = Path.Combine(Path.GetDirectoryName(Process.GetCurrentProcess().MainModule.FileName), $"Transcription-{Path.GetFileNameWithoutExtension(_options.AudioPath)}.txt"); File.WriteAllText(textPath, text); } catch (Exception ex) { _logger.Error(ex, "Transcription failed."); } // Asynchronously delete the uploaded audio. switch (await _storageService.DeleteAsync(_options.Bucket, objectName)) { case true: _logger.Information("Deleted uploaded audio."); break; case false: _logger.Information("Failed to delete uploaded audio."); break; } }
bool CheckAudioSupport(AudioEncoderConfigurationOptions options, AudioEncoding audioEncoding) { return((options.Options != null) && (options.Options.Where(o => o.Encoding == audioEncoding).FirstOrDefault() != null)); }
public CommandLineBuilder AudioCodec(AudioEncoding codec) { AudioEncoding = codec; return(this); }
/* * TODO: This conversion class needs to be finished off before libencode will work. */ /// <summary> /// Get an EncodeJob model for a LibHB Encode. /// </summary> /// <param name="task"> /// The task. /// </param> /// <returns> /// An Interop.EncodeJob model. /// </returns> public static EncodeJob GetEncodeJob(QueueTask task) { // Sanity Checking if (task == null || task.Task == null) { return(null); } // The current Job Configuration EncodeTask work = task.Task; // Which will be converted to this EncodeJob Model. EncodeJob job = new EncodeJob(); EncodingProfile profile = new EncodingProfile(); job.EncodingProfile = profile; profile.Anamorphic = work.Anamorphic; profile.AudioEncodings = new List <AudioEncoding>(); job.ChosenAudioTracks = new List <int>(); foreach (AudioTrack track in work.AudioTracks) { AudioEncoding newTrack = new AudioEncoding { Bitrate = track.Bitrate, Drc = track.DRC, Gain = track.Gain, Encoder = track.Encoder, InputNumber = track.Track.HasValue ? track.Track.Value : 0, Mixdown = track.MixDown, SampleRateRaw = GetSampleRateRaw(track.SampleRate), }; profile.AudioEncodings.Add(newTrack); if (track.Track != null) { job.ChosenAudioTracks.Add(track.Track.Value); } } profile.Cropping = new Cropping { Top = work.Cropping.Top, Bottom = work.Cropping.Bottom, Left = work.Cropping.Left, Right = work.Cropping.Right }; profile.CustomCropping = true; // TODO deal with this better profile.CustomDecomb = work.CustomDecomb; profile.CustomDeinterlace = work.CustomDeinterlace; profile.CustomDenoise = work.CustomDenoise; profile.CustomDetelecine = work.CustomDetelecine; profile.Deblock = work.Deblock; profile.Decomb = work.Decomb; profile.Deinterlace = work.Deinterlace; profile.Denoise = work.Denoise; profile.Detelecine = work.Detelecine; profile.DisplayWidth = work.DisplayWidth.HasValue ? int.Parse(Math.Round(work.DisplayWidth.Value, 0).ToString()) : 0; profile.Framerate = work.Framerate.HasValue ? work.Framerate.Value : 0; profile.Grayscale = work.Grayscale; profile.Height = work.Height.HasValue ? work.Height.Value : 0; profile.IPod5GSupport = work.IPod5GSupport; profile.IncludeChapterMarkers = work.IncludeChapterMarkers; profile.KeepDisplayAspect = work.KeepDisplayAspect; profile.LargeFile = work.LargeFile; profile.MaxHeight = work.MaxHeight.HasValue ? work.MaxHeight.Value : 0; profile.MaxWidth = work.MaxWidth.HasValue ? work.MaxWidth.Value : 0; profile.Modulus = work.Modulus.HasValue ? work.Modulus.Value : 16; profile.Optimize = work.OptimizeMP4; switch (work.OutputFormat) { case OutputFormat.Mp4: case OutputFormat.M4V: profile.OutputFormat = Interop.Model.Encoding.OutputFormat.Mp4; break; case OutputFormat.Mkv: profile.OutputFormat = Interop.Model.Encoding.OutputFormat.Mkv; break; } profile.PeakFramerate = work.FramerateMode == FramerateMode.PFR; profile.PixelAspectX = work.PixelAspectX; profile.PixelAspectY = work.PixelAspectY; switch (work.OutputFormat) { case OutputFormat.Mp4: profile.PreferredExtension = OutputExtension.Mp4; break; case OutputFormat.M4V: profile.PreferredExtension = OutputExtension.M4v; break; } profile.Quality = work.Quality.HasValue ? work.Quality.Value : 0; profile.UseDisplayWidth = true; profile.VideoBitrate = work.VideoBitrate.HasValue ? work.VideoBitrate.Value : 0; profile.VideoEncodeRateType = work.VideoEncodeRateType; profile.VideoEncoder = work.VideoEncoder; profile.Width = work.Width.HasValue ? work.Width.Value : 0; profile.X264Options = work.AdvancedEncoderOptions; if (work.PointToPointMode == PointToPointMode.Chapters) { job.ChapterStart = work.StartPoint; job.ChapterEnd = work.EndPoint; } job.Angle = work.Angle; job.EncodingProfile = profile; if (work.PointToPointMode == PointToPointMode.Frames) { job.FramesEnd = work.EndPoint; job.FramesStart = work.StartPoint; } job.CustomChapterNames = work.ChapterNames; job.UseDefaultChapterNames = work.IncludeChapterMarkers; job.OutputPath = work.Destination; switch (work.PointToPointMode) { case PointToPointMode.Chapters: job.RangeType = VideoRangeType.Chapters; break; case PointToPointMode.Seconds: job.RangeType = VideoRangeType.Seconds; break; case PointToPointMode.Frames: job.RangeType = VideoRangeType.Frames; break; } if (work.PointToPointMode == PointToPointMode.Seconds) { job.SecondsEnd = work.EndPoint; job.SecondsStart = work.StartPoint; } job.SourcePath = work.Source; // job.SourceType = work.Type; job.Title = work.Title; // TODO Setup subtitles job.Subtitles = new Subtitles { SourceSubtitles = new List <SourceSubtitle>(), SrtSubtitles = new List <SrtSubtitle>() }; //foreach (SubtitleTrack track in work.SubtitleTracks) //{ // // TODO //} return(job); }
/// <summary> /// Returns the file extension for specified audio type. /// </summary> /// <param name="audio">The audio type to get file extension for.</param> /// <returns>The file extension.</returns> public string GetAudioExtension(AudioEncoding audio) { return("." + audio.ToString().ToLower()); }