Example #1
0
 public OneClickAudioTrack(AudioJob oAudioJob, MuxStream oMuxStream, AudioTrackInfo oAudioTrackInfo, bool bMKVTrack)
 {
     _bMKVTrack      = bMKVTrack;
     _audioJob       = oAudioJob;
     _directMuxAudio = oMuxStream;
     _audioTrackInfo = oAudioTrackInfo;
 }
Example #2
0
 public OneClickAudioTrack(AudioJob oAudioJob, MuxStream oMuxStream, AudioTrackInfo oAudioTrackInfo, bool bMKVTrack)
 {
     _audioJob       = oAudioJob;
     _directMuxAudio = oMuxStream;
     _audioTrackInfo = oAudioTrackInfo;
     if (_audioTrackInfo != null)
     {
         _audioTrackInfo.ExtractMKVTrack = bMKVTrack;
     }
 }
 /// <summary>
 /// Returns null if all audio configurations are valid or incomplete. Returns
 /// an error message if any audio configuration issues a serious (not just incomplete)
 /// error message
 /// </summary>
 /// <returns></returns>
 internal string verifyAudioSettings()
 {
     foreach (AudioEncodingTab t in tabs)
     {
         AudioJob a = t.AudioJob;
         if (a == null)
         {
             continue;
         }
         string s = t.verifyAudioSettings();
         if (s != null)
         {
             return(s);
         }
     }
     return(null);
 }
Example #4
0
        public void setup(Job job, StatusUpdate su)
        {
            this.audioJob = (AudioJob)job;

            this.su = su;


            //let's create avisynth script
            StringBuilder script = new StringBuilder();

            string id  = _uniqueId;
            string tmp = Path.Combine(Path.GetTempPath(), id);



            bool directShow = audioJob.Settings.ForceDecodingViaDirectShow;

            if (!directShow)
            {
                switch (Path.GetExtension(audioJob.Input).ToLower())
                {
                case ".ac3":
                    script.AppendFormat("NicAc3Source(\"{0}\"", audioJob.Input);
                    if (audioJob.Settings.AutoGain)
                    {
                        script.AppendFormat(", DRC=1){0}", Environment.NewLine);
                    }
                    else
                    {
                        script.Append(")");
                    }
                    break;

                case ".avs":
                    script.AppendFormat("Import(\"{0}\"){1}", audioJob.Input, Environment.NewLine);
                    break;

                case ".wav":
                    script.AppendFormat("WavSource(\"{0}\"){1}", audioJob.Input, Environment.NewLine);
                    break;

                case ".dts":
                    script.AppendFormat("NicDtsSource(\"{0}\")", audioJob.Input);
                    if (audioJob.Settings.AutoGain)
                    {
                        script.AppendFormat(", DRC=1){0}", Environment.NewLine);
                    }
                    else
                    {
                        script.Append(")");
                    }
                    break;

                case ".mpa":
                case ".mpg":
                case ".mp2":
                    script.AppendFormat("NicMPASource(\"{0}\"){1}", audioJob.Input, Environment.NewLine);
                    break;

                case ".mp3":
                    script.AppendFormat("NicMPG123Source(\"{0}\"){1}", audioJob.Input, Environment.NewLine);
                    break;

                default:
                    directShow = true;
                    break;
                }
            }
            if (directShow)
            {
                script.AppendFormat("DirectShowSource(\"{0}\"){1}", audioJob.Input, Environment.NewLine);
            }

            script.AppendFormat("EnsureVBRMP3Sync(){0}", Environment.NewLine);


            if (audioJob.Delay != 0)
            {
                script.AppendFormat("DelayAudio({0}.0/1000.0){1}", audioJob.Delay, Environment.NewLine);
            }

            if (audioJob.Settings.ImproveAccuracy || audioJob.Settings.AutoGain /* to fix the bug */)
            {
                script.AppendFormat("ConvertAudioToFloat(){0}", Environment.NewLine);
            }

            if (!string.IsNullOrEmpty(audioJob.CutFile))
            {
                try
                {
                    Cuts cuts = FilmCutter.ReadCutsFromFile(audioJob.CutFile);
                    script.AppendLine(FilmCutter.GetCutsScript(cuts, true));
                }
                catch (FileNotFoundException)
                {
                    deleteTempFiles();
                    throw new MissingFileException(audioJob.CutFile);
                }
                catch (Exception)
                {
                    deleteTempFiles();
                    throw new JobRunException("Broken cuts file, " + audioJob.CutFile + ", can't continue.");
                }
            }

            if (audioJob.Settings.AutoGain)
            {
                script.AppendFormat("Normalize(){0}", Environment.NewLine);
            }

            switch (audioJob.Settings.DownmixMode)
            {
            case ChannelMode.KeepOriginal:
                break;

            case ChannelMode.ConvertToMono:
                script.AppendFormat("ConvertToMono(){0}", Environment.NewLine);
                break;

            case ChannelMode.DPLDownmix:
                script.Append("6<=Audiochannels(last)?x_dpl" + id + @"(ConvertAudioToFloat(last)):last" + Environment.NewLine);
                break;

            case ChannelMode.DPLIIDownmix:
                script.Append("6<=Audiochannels(last)?x_dpl2" + id + @"(ConvertAudioToFloat(last)):last" + Environment.NewLine);
                break;

            case ChannelMode.StereoDownmix:
                script.Append("6<=Audiochannels(last)?x_stereo" + id + @"(ConvertAudioToFloat(last)):last" + Environment.NewLine);
                break;

            case ChannelMode.Upmix:
                createTemporallyEqFiles(tmp);
                script.Append("2==Audiochannels(last)?x_upmix" + id + @"(last):last" + Environment.NewLine);
                break;

            case ChannelMode.UpmixUsingSoxEq:
                script.Append("2==Audiochannels(last)?x_upmixR" + id + @"(last):last" + Environment.NewLine);
                break;

            case ChannelMode.UpmixWithCenterChannelDialog:
                script.Append("2==Audiochannels(last)?x_upmixC" + id + @"(last):last" + Environment.NewLine);
                break;
            }

            //let's obtain command line & other staff
            if (audioJob.Settings is AC3Settings)
            {
                script.Append("6<=Audiochannels(last)?GetChannel(last,1,3,2,5,6,4):last" + Environment.NewLine);
                _mustSendWavHeaderToEncoderStdIn = true;
                AC3Settings n = audioJob.Settings as AC3Settings;
                _encoderExecutablePath = this._settings.FFMpegPath;
                _encoderCommandLine    = "-i - -y -acodec ac3 -ab " + n.Bitrate + "k \"{0}\"";
            }
            if (audioJob.Settings is MP2Settings)
            {
                _mustSendWavHeaderToEncoderStdIn = true;
                MP2Settings n = audioJob.Settings as MP2Settings;
                _encoderExecutablePath = this._settings.FFMpegPath;
                _encoderCommandLine    = "-i - -y -acodec mp2 -ab " + n.Bitrate + "k \"{0}\"";
            }
            if (audioJob.Settings is WinAmpAACSettings)
            {
                _mustSendWavHeaderToEncoderStdIn = false;
                WinAmpAACSettings n = audioJob.Settings as WinAmpAACSettings;
                _encoderExecutablePath = this._settings.EncAacPlusPath;
                StringBuilder sb = new StringBuilder("- \"{0}\" --rawpcm {1} {3} {2} --cbr ");
                sb.Append(n.Bitrate * 1000);
                if (n.Mpeg2AAC)
                {
                    sb.Append(" --mpeg2aac");
                }
                switch (n.Profile)
                {
                case AacProfile.PS:
                    break;

                case AacProfile.HE:
                    sb.Append(" --nops");
                    break;

                case AacProfile.LC:
                    sb.Append(" --lc");
                    break;
                }
                switch (n.StereoMode)
                {
                case WinAmpAACSettings.AacStereoMode.Dual:
                    sb.Append(" --dc");
                    break;

                case WinAmpAACSettings.AacStereoMode.Joint:
                    break;

                case WinAmpAACSettings.AacStereoMode.Independent:
                    sb.Append(" --is");
                    break;
                }
                _encoderCommandLine = sb.ToString();
            }

            if (audioJob.Settings is AudXSettings)
            {
                script.Append("ResampleAudio(last,48000)" + Environment.NewLine);
                script.Append("6==Audiochannels(last)?last:GetChannel(last,1,1,1,1,1,1)" + Environment.NewLine);
                _mustSendWavHeaderToEncoderStdIn = false;
                AudXSettings n = audioJob.Settings as AudXSettings;
                _encoderExecutablePath = this._settings.EncAudXPath;
                _encoderCommandLine    = "- \"{0}\" --q " + ((int)n.Quality) + " --raw {1}";
            }
            if (audioJob.Settings is OggVorbisSettings)
            {
                // http://forum.doom9.org/showthread.php?p=831098#post831098
                //if(!this._settings.FreshOggEnc2)
                script.Append("6==Audiochannels(last)?GetChannel(last,1,3,2,5,6,4):last" + Environment.NewLine);
                _mustSendWavHeaderToEncoderStdIn = false;
                OggVorbisSettings n = audioJob.Settings as OggVorbisSettings;
                _encoderExecutablePath = this._settings.OggEnc2Path;
                _encoderCommandLine    = "-Q --raw --raw-bits={2} --raw-chan={3} --raw-rate={1} --quality " + n.Quality.ToString(System.Globalization.CultureInfo.InvariantCulture) + " -o \"{0}\" -";
            }
            if (audioJob.Settings is NeroAACSettings)
            {
                _mustSendWavHeaderToEncoderStdIn = true;
                NeroAACSettings n   = audioJob.Settings as NeroAACSettings;
                NeroAACSettings nas = n;
                _encoderExecutablePath = this._settings.NeroAacEncPath;
                StringBuilder sb = new StringBuilder("-ignorelength ");
                switch (n.Profile)
                {
                case AacProfile.HE:
                    sb.Append("-he ");
                    break;

                case AacProfile.PS:
                    sb.Append("-hev2 ");
                    break;

                case AacProfile.LC:
                    sb.Append("-lc ");
                    break;
                }
                if (n.CreateHintTrack)
                {
                    sb.Append("-hinttrack ");
                }

                switch (n.BitrateMode)
                {
                case BitrateManagementMode.ABR:
                    sb.AppendFormat(System.Globalization.CultureInfo.InvariantCulture, "-br {0} ", n.Bitrate * 1000);
                    break;

                case BitrateManagementMode.CBR:
                    sb.AppendFormat(System.Globalization.CultureInfo.InvariantCulture, "-cbr {0} ", n.Bitrate * 1000);
                    break;

                case BitrateManagementMode.VBR:
                    sb.AppendFormat(System.Globalization.CultureInfo.InvariantCulture, "-q {0} ", n.Quality);
                    break;
                }

                sb.Append("-if - -of \"{0}\"");

                _encoderCommandLine = sb.ToString();
            }
            if (audioJob.Settings is FaacSettings)
            {
                FaacSettings f = audioJob.Settings as FaacSettings;
                _encoderExecutablePath           = this._settings.FaacPath;
                _mustSendWavHeaderToEncoderStdIn = false;
                switch (f.BitrateMode)
                {
                // {0} means output file name
                // {1} means samplerate in Hz
                // {2} means bits per sample
                // {3} means channel count
                // {4} means samplecount
                // {5} means size in bytes

                case BitrateManagementMode.VBR:
                    _encoderCommandLine = "-q " + f.Quality + " -o \"{0}\" -P -X -R {1} -B {2} -C {3} --mpeg-vers 4 -";
                    break;

                default:
                    _encoderCommandLine = "-b " + f.Bitrate + " -o \"{0}\" -P -X -R {1} -B {2} -C {3} --mpeg-vers 4 -";
                    break;
                }
            }
            if (audioJob.Settings is MP3Settings)
            {
                MP3Settings m = audioJob.Settings as MP3Settings;
                _mustSendWavHeaderToEncoderStdIn = true;
                _encoderExecutablePath           = this._settings.LamePath;

                switch (m.BitrateMode)
                {
                case BitrateManagementMode.VBR:
                    _encoderCommandLine = "-V " + (m.Quality / 10 - 1) + " -h --silent - \"{0}\"";
                    break;

                case BitrateManagementMode.CBR:
                    _encoderCommandLine = "-b " + m.Bitrate + " --cbr -h --silent - \"{0}\"";
                    break;

                case BitrateManagementMode.ABR:
                    _encoderCommandLine = "--abr " + m.Bitrate + " -h --silent - \"{0}\"";
                    break;
                }
            }

            //Just check encoder existance
            _encoderExecutablePath = Path.Combine(AppDomain.CurrentDomain.SetupInformation.ApplicationBase, _encoderExecutablePath);
            if (!File.Exists(_encoderExecutablePath))
            {
                deleteTempFiles();
                throw new EncoderMissingException(_encoderExecutablePath);
            }

            script.AppendFormat("ConvertAudioTo16bit(){0}", Environment.NewLine);


            script.AppendLine(
                @"

return last

function x_dpl" + id + @"(clip a) 
{
	fl = GetChannel(a, 1)
	fr = GetChannel(a, 2)
	c = GetChannel(a, 3)
	sl = GetChannel(a, 5)
	sr = GetChannel(a, 6)
	ssr = MixAudio(sl, sr, 0.2222, 0.2222)
	ssl = Amplify(ssr, -1.0)
	fl_c = MixAudio(fl, c, 0.3254, 0.2301)
	fr_c = MixAudio(fr, c, 0.3254, 0.2301)
	l = MixAudio(ssl, fl_c, 1.0, 1.0)
	r = MixAudio(ssr, fr_c, 1.0, 1.0)
	return MergeChannels(l, r)
}

function x_dpl2" + id + @"(clip a) 
{
	fl = GetChannel(a, 1)
	fr = GetChannel(a, 2)
	c = GetChannel(a, 3)
	sl = GetChannel(a, 5)
	sr = GetChannel(a, 6)
	ssl = MixAudio(sl, sr, 0.2818, 0.1627).Amplify(-1.0)
	fl_c = MixAudio(fl, c, 0.3254, 0.2301)
	ssr = MixAudio(sl, sr, 0.1627, 0.2818)
	fr_c = MixAudio(fr, c, 0.3254, 0.2301)
	l = MixAudio(ssl, fl_c, 1.0, 1.0)
	r = MixAudio(ssr, fr_c, 1.0, 1.0)
	return MergeChannels(l, r)
}

function x_stereo" + id + @"(clip a) 
{
	fl = GetChannel(a, 1)
	fr = GetChannel(a, 2)
	c = GetChannel(a, 3)
	lfe = GetChannel(a, 4)
	sl = GetChannel(a, 5)
	sr = GetChannel(a, 6)
	l_sl = MixAudio(fl, sl, 0.2929, 0.2929)
	c_lfe = MixAudio(lfe, c, 0.2071, 0.2071)
	r_sr = MixAudio(fr, sr, 0.2929, 0.2929)
	l = MixAudio(l_sl, c_lfe, 1.0, 1.0)
	r = MixAudio(r_sr, c_lfe, 1.0, 1.0)
	return MergeChannels(l, r)
}

function x_upmix" + id + @"(clip a) 
{
    m = ConvertToMono(a)
    f = SuperEQ(a,""" + tmp + @"front.feq"")
    s = SuperEQ(a,""" + tmp + @"back.feq"") 
    c = SuperEQ(m,""" + tmp + @"center.feq"") 
    lfe = SuperEQ(m,""" + tmp + @"lfe.feq"") 
    return MergeChannels( f.getleftchannel, f.getrightchannel , c, lfe, s.getleftchannel, s.getrightchannel)
}

function x_upmixR" + id + @"(clip Stereo) 
{
	Front = mixaudio(Stereo.soxfilter(""filter 0-600""),mixaudio(Stereo.soxfilter(""filter 600-1200""),Stereo.soxfilter(""filter 1200-7000""),0.45,0.25),0.50,1)
	Back = mixaudio(Stereo.soxfilter(""filter 0-600""),mixaudio(Stereo.soxfilter(""filter 600-1200""),Stereo.soxfilter(""filter 1200-7000""),0.35,0.15),0.40,1)
	fl = GetLeftChannel(Front)
	fr = GetRightChannel(Front)
	cc = ConvertToMono(stereo).SoxFilter(""filter 625-24000"")
	lfe = ConvertToMono(stereo).SoxFilter(""lowpass 100"",""vol -0.5"")
	sl = GetLeftChannel(Back)
	sr = GetRightChannel(Back)
	sl = DelayAudio(sl,0.02)
	sr = DelayAudio(sr,0.02)
    return MergeChannels(fl,fr,cc,lfe,sl,sr)
}

function x_upmixC" + id + @"(clip stereo) 
{
	left = stereo.GetLeftChannel()
	right = stereo.GetRightChannel()
	fl = mixaudio(left.soxfilter(""filter 0-24000""),right.soxfilter(""filter 0-24000""),0.6,-0.5)
	fr = mixaudio(right.soxfilter(""filter 0-24000""),left.soxfilter(""filter 0-24000""),0.6,-0.5)
	cc = ConvertToMono(stereo).SoxFilter(""filter 625-24000"")
	lfe = ConvertToMono(stereo).SoxFilter(""lowpass 100"",""vol -0.5"")
	sl = mixaudio(left.soxfilter(""filter 0-24000""),right.soxfilter(""filter 0-24000""),0.5,-0.4)
	sr = mixaudio(right.soxfilter(""filter 0-24000""),left.soxfilter(""filter 0-24000""),0.5,-0.4)
	sl = DelayAudio(sl,0.02)
	sr = DelayAudio(sr,0.02)
    return MergeChannels(fl,fr,cc,lfe,sl,sr)
}
                                                                                                                                                     

"
                );
            _avisynthAudioScript = script.ToString();
        }
 /// <summary>
 /// separates encodable from muxable audio streams
 /// in addition to returning the two types separately an array of SubStreams is returned
 /// which is plugged into the muxer.. it contains the names of all the audio files
 /// that have to be muxed
 /// </summary>
 /// <param name="encodable">encodeable audio streams</param>
 /// <param name="muxable">muxable Audio Streams with the path filled out and a blank language</param>
 private void separateEncodableAndMuxableAudioStreams(out AudioJob[] encodable, out MuxStream[] muxable, out AudioEncoderType[] muxTypes)
 {
     encodable = this.getConfiguredAudioJobs(); // discards improperly configured ones
     // the rest of the job is all encodeable
     muxable = new MuxStream[encodable.Length];
     muxTypes = new AudioEncoderType[encodable.Length];
     int j = 0;
     foreach (AudioJob stream in encodable)
     {
         muxable[j] = stream.ToMuxStream();
         muxTypes[j] = stream.Settings.EncoderType;
         j++;
     }
 }
 /// <summary>
 /// Reallocates the audio array so that it only has the files to be muxed and not the files to be encoded, then muxed
 /// </summary>
 /// <param name="audio">All files to be muxed (including the ones which will be encoded first)</param>
 /// <param name="aStreams">All files being encoded (these will be removed from the audio array)</param>
 private void removeStreamsToBeEncoded(ref MuxStream[] audio, AudioJob[] aStreams)
 {
     List<MuxStream> newAudio = new List<MuxStream>();
     foreach (MuxStream stream in audio)
     {
         bool matchFound = false;
         foreach (AudioJob a in aStreams)
         {
             if (stream.path == a.Output)
             {
                 matchFound = true; // In this case we have found a file which needs to be encoded
                 break;
             }
         }
         if (!matchFound) // in this case we have not found any files which will be encoded first to produce this file
         {
             newAudio.Add(stream);
         }
     }
     audio = newAudio.ToArray();
 }