/// <summary>
        /// Prepare an audio file. This will be a single segment of a larger audio file,
        /// modified based on the provided settings.
        /// </summary>
        /// <param name="outputDirectory">
        /// The analysis Base Directory.
        /// </param>
        /// <param name="source">
        /// The source audio file.
        /// </param>
        /// <param name="outputMediaType">
        /// The output Media Type.
        /// </param>
        /// <param name="startOffset">
        /// The start Offset from start of entire original file.
        /// </param>
        /// <param name="endOffset">
        /// The end Offset from start of entire original file.
        /// </param>
        /// <param name="targetSampleRateHz">
        /// The target Sample Rate Hz.
        /// </param>
        /// <returns>
        /// The prepared file. The returned FileSegment will have the targetFile and OriginalFileDuration set -
        /// these are the path to the segmented file and the duration of the segmented file.
        /// The start and end offsets will not be set.
        /// </returns>
        public async Task <FileSegment> PrepareFile(
            DirectoryInfo outputDirectory,
            string source,
            string outputMediaType,
            TimeSpan startOffset,
            TimeSpan endOffset,
            int targetSampleRateHz)
        {
            throw new NotImplementedException();
            var request = new AudioUtilityRequest
            {
                OffsetStart      = startOffset,
                OffsetEnd        = endOffset,
                TargetSampleRate = targetSampleRateHz,
            };
            var preparedFile = AudioFilePreparer.PrepareFile(
                outputDirectory,
                source.ToFileInfo(),
                outputMediaType,
                request,
                TempFileHelper.TempDir());

            return(new FileSegment(
                       preparedFile.TargetInfo.SourceFile,
                       preparedFile.TargetInfo.SampleRate.Value,
                       preparedFile.TargetInfo.Duration.Value));
        }
Пример #2
0
        /// <summary>
        /// Prepare an audio file. This will be a single segment of a larger audio file, modified based on the analysisSettings.
        /// </summary>
        /// <param name="outputDirectory">
        /// The analysis Base Directory.
        /// </param>
        /// <param name="source">
        /// The source audio file.
        /// </param>
        /// <param name="outputMediaType">
        /// The output Media Type.
        /// </param>
        /// <param name="startOffset">
        /// The start Offset from start of entire original file.
        /// </param>
        /// <param name="endOffset">
        /// The end Offset from start of entire original file.
        /// </param>
        /// <param name="targetSampleRateHz">
        /// The target Sample Rate Hz.
        /// </param>
        /// <returns>
        /// The prepared file. The returned FileSegment will have the targetFile and OriginalFileDuration set -
        /// these are the path to the segmented file and the duration of the segmented file.
        /// The start and end offsets will not be set.
        /// </returns>
        public async Task <FileSegment> PrepareFile(
            DirectoryInfo outputDirectory,
            string source,
            string outputMediaType,
            TimeSpan startOffset,
            TimeSpan endOffset,
            int targetSampleRateHz)
        {
            return(await Task.Run(() =>
            {
                FileInfo sourceFileInfo = source.ToFileInfo();

                var request = new AudioUtilityRequest
                {
                    OffsetStart = startOffset,
                    OffsetEnd = endOffset,
                    TargetSampleRate = targetSampleRateHz,
                };
                var preparedFile = AudioFilePreparer.PrepareFile(
                    outputDirectory,
                    sourceFileInfo,
                    outputMediaType,
                    request,
                    TempFileHelper.TempDir(),
                    oldFormat: this.useOldNamingFormat);

                return new FileSegment(
                    preparedFile.TargetInfo.SourceFile,
                    preparedFile.TargetInfo.SampleRate.Value,
                    preparedFile.TargetInfo.Duration.Value);
            }));
        }
Пример #3
0
        public static void SeparateChannels(
            Arguments args,
            FileInfo ipFile,
            out double[] samplesL,
            out double[] samplesR,
            out double epsilon)
        {
            //you'd then use wavreader on the resulting preparedFile
            //the channel select functionality does not currently exist in AnalyzeLongRecording.   I need to add it.
            var request = new AudioUtilityRequest
            {
                OffsetStart      = args.StartOffset,
                OffsetEnd        = args.EndOffset,
                TargetSampleRate = args.SamplingRate,
                Channels         = new[] { 1, 2 },
                MixDownToMono    = false,
            };
            var audioFile = AudioFilePreparer.PrepareFile(args.OpDir, ipFile, args.OutputMediaType, request, args.OpDir).TargetInfo.SourceFile;

            var wavReader = new WavReader(audioFile);

            var recording = new AudioRecording(wavReader);

            samplesL = recording.WavReader.GetChannel(0);
            samplesR = recording.WavReader.GetChannel(1);
            epsilon  = Math.Pow(0.5, recording.BitsPerSample - 1);
        }
Пример #4
0
        /// <summary>
        /// A WRAPPER AROUND THE analyser.Analyze(analysisSettings) METHOD
        /// To be called as an executable with command line arguments.
        /// </summary>
        public static void Execute(Arguments arguments)
        {
            Contract.Requires(arguments != null);

            var(analysisSettings, segmentSettings) = arguments.ToAnalysisSettings();
            TimeSpan offsetStart  = TimeSpan.FromSeconds(arguments.Start ?? 0);
            TimeSpan duration     = TimeSpan.FromSeconds(arguments.Duration ?? 0);
            int      resampleRate = ConfigDictionary.GetInt(AnalysisKeys.ResampleRate, analysisSettings.ConfigDict);

            // EXTRACT THE REQUIRED RECORDING SEGMENT
            FileInfo tempF = segmentSettings.SegmentAudioFile;

            if (tempF.Exists)
            {
                tempF.Delete();
            }

            if (duration == TimeSpan.Zero)
            {
                // Process entire file
                AudioFilePreparer.PrepareFile(arguments.Source, tempF, new AudioUtilityRequest {
                    TargetSampleRate = resampleRate
                }, analysisSettings.AnalysisTempDirectoryFallback);
                ////var fiSegment = AudioFilePreparer.PrepareFile(diOutputDir, fiSourceFile, , Human2.RESAMPLE_RATE);
            }
            else
            {
                AudioFilePreparer.PrepareFile(arguments.Source, tempF, new AudioUtilityRequest {
                    TargetSampleRate = resampleRate, OffsetStart = offsetStart, OffsetEnd = offsetStart.Add(duration)
                }, analysisSettings.AnalysisTempDirectoryFallback);
                ////var fiSegmentOfSourceFile = AudioFilePreparer.PrepareFile(diOutputDir, new FileInfo(recordingPath), MediaTypes.MediaTypeWav, TimeSpan.FromMinutes(2), TimeSpan.FromMinutes(3), RESAMPLE_RATE);
            }

            //DO THE ANALYSIS
            // #############################################################################################################################################
            // BROKEN!
            throw new NotImplementedException("Broken in code updates");
            IAnalyser2      analyser = null; //new Rain_OBSOLETE();
            AnalysisResult2 result   = analyser.Analyze <FileInfo>(analysisSettings, null /*broken */);

            /*DataTable dt = result.Data;
             * //#############################################################################################################################################
             *
             * // ADD IN ADDITIONAL INFO TO RESULTS TABLE
             * if (dt != null)
             * {
             *  int iter = 0; // dummy - iteration number would ordinarily be available at this point.
             *  int startMinute = (int)offsetStart.TotalMinutes;
             *  foreach (DataRow row in dt.Rows)
             *  {
             *      row[InitialiseIndexProperties.KEYRankOrder] = iter;
             *      row[InitialiseIndexProperties.KEYStartMinute] = startMinute;
             *      row[InitialiseIndexProperties.KEYSegmentDuration] = result.AudioDuration.TotalSeconds;
             *  }
             *
             *  CsvTools.DataTable2CSV(dt, segmentSettings.SegmentSummaryIndicesFile.FullName);
             *  //DataTableTools.WriteTable2Console(dt);
             * }*/
        }
        /// <summary>
        /// A WRAPPER AROUND THE analyzer.Analyze(analysisSettings) METHOD
        ///     To be called as an executable with command line arguments.
        /// </summary>
        /// <param name="arguments">
        /// The command line arguments.
        /// </param>
        public static void Execute(Arguments arguments)
        {
            Contract.Requires(arguments != null);

            var(analysisSettings, segmentSettings) = arguments.ToAnalysisSettings();
            TimeSpan start    = TimeSpan.FromSeconds(arguments.Start ?? 0);
            TimeSpan duration = TimeSpan.FromSeconds(arguments.Duration ?? 0);

            // EXTRACT THE REQUIRED RECORDING SEGMENT
            FileInfo tempF = segmentSettings.SegmentAudioFile;

            if (duration == TimeSpan.Zero)
            {
                // Process entire file
                AudioFilePreparer.PrepareFile(
                    arguments.Source,
                    tempF,
                    new AudioUtilityRequest {
                    TargetSampleRate = ResampleRate
                },
                    analysisSettings.AnalysisTempDirectoryFallback);
            }
            else
            {
                AudioFilePreparer.PrepareFile(
                    arguments.Source,
                    tempF,
                    new AudioUtilityRequest
                {
                    TargetSampleRate = ResampleRate,
                    OffsetStart      = start,
                    OffsetEnd        = start.Add(duration),
                },
                    analysisSettings.AnalysisTempDirectoryFallback);
            }

            // DO THE ANALYSIS
            /* ############################################################################################################################################# */
            IAnalyser2 analyser = new LitoriaFallax_OBSOLETE();

            //IAnalyser2 analyser = new Canetoad();
            analyser.BeforeAnalyze(analysisSettings);
            AnalysisResult2 result = analyser.Analyze(analysisSettings, segmentSettings);

            /* ############################################################################################################################################# */

            if (result.Events.Length > 0)
            {
                LoggedConsole.WriteLine("{0} events found", result.Events.Length);
            }
            else
            {
                LoggedConsole.WriteLine("No events found");
            }
        }
        /// <summary>
        /// This entrypoint should be used for testing short files (less than 2 minutes)
        /// </summary>
        public static void Execute(Arguments arguments)
        {
            MainEntry.WarnIfDevleoperEntryUsed("EventRecognizer entry does not do any audio maniuplation.");
            Log.Info("Running event recognizer");

            var sourceAudio     = arguments.Source;
            var configFile      = arguments.Config.ToFileInfo();
            var outputDirectory = arguments.Output;

            if (configFile == null)
            {
                throw new FileNotFoundException("No config file argument provided");
            }
            else if (!configFile.Exists)
            {
                Log.Warn($"Config file {configFile.FullName} not found... attempting to resolve config file");
                configFile = ConfigFile.Resolve(configFile.Name, Directory.GetCurrentDirectory().ToDirectoryInfo());
            }

            LoggedConsole.WriteLine("# Recording file:      " + sourceAudio.FullName);
            LoggedConsole.WriteLine("# Configuration file:  " + configFile);
            LoggedConsole.WriteLine("# Output folder:       " + outputDirectory);

            // find an appropriate event IAnalyzer
            IAnalyser2 recognizer = AnalyseLongRecording.FindAndCheckAnalyser <IEventRecognizer>(
                arguments.AnalysisIdentifier,
                configFile.Name);

            Log.Info("Attempting to run recognizer: " + recognizer.Identifier);

            Log.Info("Reading configuration file");
            Config configuration = ConfigFile.Deserialize <RecognizerBase.RecognizerConfig>(configFile);

            // get default settings
            AnalysisSettings analysisSettings = recognizer.DefaultSettings;

            // convert arguments to analysis settings
            analysisSettings = arguments.ToAnalysisSettings(
                analysisSettings,
                outputIntermediate: true,
                resultSubDirectory: recognizer.Identifier,
                configuration: configuration);

            // Enable this if you want the Config file ResampleRate parameter to work.
            // Generally however the ResampleRate should remain at 22050Hz for all recognizers.
            //analysisSettings.AnalysisTargetSampleRate = (int) configuration[AnalysisKeys.ResampleRate];

            // get transform input audio file - if needed
            Log.Info("Querying source audio file");
            var audioUtilityRequest = new AudioUtilityRequest()
            {
                TargetSampleRate = analysisSettings.AnalysisTargetSampleRate,
            };
            var preparedFile = AudioFilePreparer.PrepareFile(
                outputDirectory,
                sourceAudio,
                MediaTypes.MediaTypeWav,
                audioUtilityRequest,
                outputDirectory);

            var source          = preparedFile.SourceInfo.ToSegment();
            var prepared        = preparedFile.TargetInfo.ToSegment(FileSegment.FileDateBehavior.None);
            var segmentSettings = new SegmentSettings <FileInfo>(
                analysisSettings,
                source,
                (analysisSettings.AnalysisOutputDirectory, analysisSettings.AnalysisTempDirectory),
                prepared);

            if (preparedFile.TargetInfo.SampleRate.Value != analysisSettings.AnalysisTargetSampleRate)
            {
                Log.Warn("Input audio sample rate does not match target sample rate");
            }

            // Execute a pre analyzer hook
            recognizer.BeforeAnalyze(analysisSettings);

            // execute actual analysis - output data will be written
            Log.Info("Running recognizer: " + recognizer.Identifier);
            AnalysisResult2 results = recognizer.Analyze(analysisSettings, segmentSettings);

            // run summarize code - output data can be written
            Log.Info("Running recognizer summary: " + recognizer.Identifier);
            recognizer.SummariseResults(
                analysisSettings,
                source,
                results.Events,
                results.SummaryIndices,
                results.SpectralIndices,
                new[] { results });

            //Log.Info("Recognizer run, saving extra results");
            // TODO: Michael, output anything else as you wish.

            Log.Debug("Clean up temporary files");
            if (source.Source.FullName != prepared.Source.FullName)
            {
                prepared.Source.Delete();
            }

            int eventCount = results?.Events?.Length ?? 0;

            Log.Info($"Number of detected events: {eventCount}");
            Log.Success(recognizer.Identifier + " recognizer has completed");
        }
Пример #7
0
        /// <summary>
        /// A WRAPPER AROUND THE analyser.Analyse(analysisSettings) METHOD
        /// To be called as an executable with command line arguments.
        /// </summary>
        /// <param name="sourcePath"></param>
        /// <param name="configPath"></param>
        /// <param name="outputPath"></param>
        public static int Execute(string[] args)
        {
            int status = 0;

            if (args.Length < 4)
            {
                Console.WriteLine("Require at least 4 command line arguments.");
                status = 1;
                return(status);
            }
            //GET FIRST THREE OBLIGATORY COMMAND LINE ARGUMENTS
            string   recordingPath = args[0];
            string   configPath    = args[1];
            string   outputDir     = args[2];
            FileInfo fiSource      = new FileInfo(recordingPath);

            if (!fiSource.Exists)
            {
                Console.WriteLine("Source file does not exist: " + recordingPath);
                status = 2;
                return(status);
            }
            FileInfo fiConfig = new FileInfo(configPath);

            if (!fiConfig.Exists)
            {
                Console.WriteLine("Config file does not exist: " + configPath);
                status = 2;
                return(status);
            }
            DirectoryInfo diOP = new DirectoryInfo(outputDir);

            if (!diOP.Exists)
            {
                Console.WriteLine("Output directory does not exist: " + outputDir);
                status = 2;
                return(status);
            }

            //INIT SETTINGS
            AnalysisSettings analysisSettings = new AnalysisSettings();

            analysisSettings.ConfigFile           = fiConfig;
            analysisSettings.AnalysisRunDirectory = diOP;
            analysisSettings.AudioFile            = null;
            analysisSettings.EventsFile           = null;
            analysisSettings.IndicesFile          = null;
            analysisSettings.ImageFile            = null;
            TimeSpan tsStart       = new TimeSpan(0, 0, 0);
            TimeSpan tsDuration    = new TimeSpan(0, 0, 0);
            var      configuration = new ConfigDictionary(analysisSettings.ConfigFile.FullName);

            analysisSettings.ConfigDict = configuration.GetTable();


            //PROCESS REMAINDER OF THE OPTIONAL COMMAND LINE ARGUMENTS
            for (int i = 3; i < args.Length; i++)
            {
                string[] parts = args[i].Split(':');
                if (parts[0].StartsWith("-tmpwav"))
                {
                    var outputWavPath = Path.Combine(outputDir, parts[1]);
                    analysisSettings.AudioFile = new FileInfo(outputWavPath);
                }
                else
                if (parts[0].StartsWith("-events"))
                {
                    string eventsPath = Path.Combine(outputDir, parts[1]);
                    analysisSettings.EventsFile = new FileInfo(eventsPath);
                }
                else
                if (parts[0].StartsWith("-indices"))
                {
                    string indicesPath = Path.Combine(outputDir, parts[1]);
                    analysisSettings.IndicesFile = new FileInfo(indicesPath);
                }
                else
                if (parts[0].StartsWith("-sgram"))
                {
                    string sonoImagePath = Path.Combine(outputDir, parts[1]);
                    analysisSettings.ImageFile = new FileInfo(sonoImagePath);
                }
                else
                if (parts[0].StartsWith("-start"))
                {
                    int s = Int32.Parse(parts[1]);
                    tsStart = new TimeSpan(0, 0, s);
                }
                else
                if (parts[0].StartsWith("-duration"))
                {
                    int s = Int32.Parse(parts[1]);
                    tsDuration = new TimeSpan(0, 0, s);
                    if (tsDuration.TotalMinutes > 10)
                    {
                        Console.WriteLine("Segment duration cannot exceed 10 minutes.");
                        status = 3;
                        return(status);
                    }
                }
            }

            //EXTRACT THE REQUIRED RECORDING SEGMENT
            FileInfo tempF = analysisSettings.AudioFile;

            if (tsDuration.TotalSeconds == 0)   //Process entire file
            {
                AudioFilePreparer.PrepareFile(fiSource, tempF, new AudioUtilityRequest {
                    SampleRate = LSKiwiHelper.RESAMPLE_RATE
                });
                //var fiSegment = AudioFilePreparer.PrepareFile(diOutputDir, fiSourceFile, , Human2.RESAMPLE_RATE);
            }
            else
            {
                AudioFilePreparer.PrepareFile(fiSource, tempF, new AudioUtilityRequest {
                    SampleRate = LSKiwiHelper.RESAMPLE_RATE, OffsetStart = tsStart, OffsetEnd = tsStart.Add(tsDuration)
                });
                //var fiSegmentOfSourceFile = AudioFilePreparer.PrepareFile(diOutputDir, new FileInfo(recordingPath), MediaTypes.MediaTypeWav, TimeSpan.FromMinutes(2), TimeSpan.FromMinutes(3), RESAMPLE_RATE);
            }

            //DO THE ANALYSIS
            //#############################################################################################################################################
            IAnalyser      analyser = new LSKiwi2();
            AnalysisResult result   = analyser.Analyse(analysisSettings);
            DataTable      dt       = result.Data;

            //#############################################################################################################################################

            //ADD IN ADDITIONAL INFO TO RESULTS TABLE
            if (dt != null)
            {
                AddContext2Table(dt, tsStart, result.AudioDuration);
                CsvTools.DataTable2CSV(dt, analysisSettings.EventsFile.FullName);
                //DataTableTools.WriteTable(augmentedTable);
            }
            else
            {
                return(-993);  //error!!
            }

            return(status);
        }
Пример #8
0
        /// <summary>
        /// A WRAPPER AROUND THE analyzer.Analyze(analysisSettings) METHOD
        ///     To be called as an executable with command line arguments.
        /// </summary>
        /// <param name="arguments">
        /// The command line arguments.
        /// </param>
        public static void Execute(Arguments arguments)
        {
            Contract.Requires(arguments != null);

            TimeSpan start    = TimeSpan.FromSeconds(arguments.Start ?? 0);
            TimeSpan duration = TimeSpan.FromSeconds(arguments.Duration ?? 0);

            // EXTRACT THE REQUIRED RECORDING SEGMENT
            var audioUtilityRequest = new AudioUtilityRequest {
                TargetSampleRate = RESAMPLE_RATE
            };

            if (duration == TimeSpan.Zero)
            {
                // Process entire file
                audioUtilityRequest = new AudioUtilityRequest {
                    TargetSampleRate = RESAMPLE_RATE
                };
            }
            else
            {
                audioUtilityRequest = new AudioUtilityRequest
                {
                    TargetSampleRate = RESAMPLE_RATE,
                    OffsetStart      = start,
                    OffsetEnd        = start.Add(duration),
                };
            }

            var preparedFile = AudioFilePreparer.PrepareFile(
                arguments.Output,
                arguments.Source,
                MediaTypes.MediaTypeWav,
                audioUtilityRequest,
                arguments.Output);

            var(analysisSettings, segmentSettings) = arguments.ToAnalysisSettings(
                sourceSegment: preparedFile.SourceInfo.ToSegment(),
                preparedSegment: preparedFile.TargetInfo.ToSegment());

            // DO THE ANALYSIS
            /* ############################################################################################################################################# */
            IAnalyser2 analyser = new CanetoadOld_OBSOLETE();

            analyser.BeforeAnalyze(analysisSettings);
            AnalysisResult2 result = analyser.Analyze(analysisSettings, segmentSettings);

            /* ############################################################################################################################################# */

            if (result.Events.Length > 0)
            {
                LoggedConsole.WriteLine("{0} events found", result.Events.Length);
                if (Log.IsDebugEnabled)
                {
                    var firstEvent = (AcousticEvent)result.Events.First();
                    Log.Debug($"Event 0 profile: start={firstEvent.TimeStart}, duration={firstEvent.TimeStart - firstEvent.TimeEnd}");
                }
            }
            else
            {
                LoggedConsole.WriteLine("No events found");
            }
        }
Пример #9
0
        public static void Execute(Arguments arguments)
        {
            const string Title = "# DETERMINING SIGNAL TO NOISE RATIO IN RECORDING";
            string       date  = "# DATE AND TIME: " + DateTime.Now;

            Log.WriteLine(Title);
            Log.WriteLine(date);
            Log.Verbosity = 1;

            var input                    = arguments.Source;
            var sourceFileName           = input.Name;
            var outputDir                = arguments.Output;
            var fileNameWithoutExtension = Path.GetFileNameWithoutExtension(input.FullName);
            var outputTxtPath            = Path.Combine(outputDir.FullName, fileNameWithoutExtension + ".txt").ToFileInfo();

            Log.WriteIfVerbose("# Recording file: " + input.FullName);
            Log.WriteIfVerbose("# Config file:    " + arguments.Config);
            Log.WriteIfVerbose("# Output folder =" + outputDir.FullName);
            FileTools.WriteTextFile(outputTxtPath.FullName, date + "\n# Recording file: " + input.FullName);

            //READ PARAMETER VALUES FROM INI FILE
            // load YAML configuration
            Config configuration = ConfigFile.Deserialize(arguments.Config);

            //ii: SET SONOGRAM CONFIGURATION
            SonogramConfig sonoConfig = new SonogramConfig(); //default values config

            sonoConfig.SourceFName        = input.FullName;
            sonoConfig.WindowSize         = configuration.GetIntOrNull(AnalysisKeys.KeyFrameSize) ?? 512;
            sonoConfig.WindowOverlap      = configuration.GetDoubleOrNull(AnalysisKeys.FrameOverlap) ?? 0.5;
            sonoConfig.WindowFunction     = configuration[AnalysisKeys.KeyWindowFunction];
            sonoConfig.NPointSmoothFFT    = configuration.GetIntOrNull(AnalysisKeys.KeyNPointSmoothFft) ?? 256;
            sonoConfig.NoiseReductionType = SNR.KeyToNoiseReductionType(configuration[AnalysisKeys.NoiseReductionType]);

            int    minHz          = configuration.GetIntOrNull("MIN_HZ") ?? 0;
            int    maxHz          = configuration.GetIntOrNull("MAX_HZ") ?? 11050;
            double segK1          = configuration.GetDoubleOrNull("SEGMENTATION_THRESHOLD_K1") ?? 0;
            double segK2          = configuration.GetDoubleOrNull("SEGMENTATION_THRESHOLD_K2") ?? 0;
            double latency        = configuration.GetDoubleOrNull("K1_K2_LATENCY") ?? 0;
            double vocalGap       = configuration.GetDoubleOrNull("VOCAL_GAP") ?? 0;
            double minVocalLength = configuration.GetDoubleOrNull("MIN_VOCAL_DURATION") ?? 0;

            //bool DRAW_SONOGRAMS = (bool?)configuration.DrawSonograms ?? true;    //options to draw sonogram

            //double intensityThreshold = Acoustics.AED.Default.intensityThreshold;
            //if (dict.ContainsKey(key_AED_INTENSITY_THRESHOLD)) intensityThreshold = Double.Parse(dict[key_AED_INTENSITY_THRESHOLD]);
            //int smallAreaThreshold = Acoustics.AED.Default.smallAreaThreshold;
            //if( dict.ContainsKey(key_AED_SMALL_AREA_THRESHOLD))   smallAreaThreshold = Int32.Parse(dict[key_AED_SMALL_AREA_THRESHOLD]);

            // COnvert input recording into wav
            var convertParameters = new AudioUtilityRequest {
                TargetSampleRate = 17640
            };
            var fileToAnalyse = new FileInfo(Path.Combine(outputDir.FullName, "temp.wav"));

            if (File.Exists(fileToAnalyse.FullName))
            {
                File.Delete(fileToAnalyse.FullName);
            }

            var convertedFileInfo = AudioFilePreparer.PrepareFile(
                input,
                fileToAnalyse,
                convertParameters,
                outputDir);

            // (A) ##########################################################################################################################
            AudioRecording recording              = new AudioRecording(fileToAnalyse.FullName);
            int            signalLength           = recording.WavReader.Samples.Length;
            TimeSpan       wavDuration            = TimeSpan.FromSeconds(recording.WavReader.Time.TotalSeconds);
            double         frameDurationInSeconds = sonoConfig.WindowSize / (double)recording.SampleRate;
            TimeSpan       frameDuration          = TimeSpan.FromTicks((long)(frameDurationInSeconds * TimeSpan.TicksPerSecond));
            int            stepSize = (int)Math.Floor(sonoConfig.WindowSize * (1 - sonoConfig.WindowOverlap));
            double         stepDurationInSeconds = sonoConfig.WindowSize * (1 - sonoConfig.WindowOverlap)
                                                   / recording.SampleRate;
            TimeSpan stepDuration    = TimeSpan.FromTicks((long)(stepDurationInSeconds * TimeSpan.TicksPerSecond));
            double   framesPerSecond = 1 / stepDuration.TotalSeconds;
            int      frameCount      = signalLength / stepSize;

            // (B) ################################## EXTRACT ENVELOPE and SPECTROGRAM ##################################
            var dspOutput = DSP_Frames.ExtractEnvelopeAndFfts(
                recording,
                sonoConfig.WindowSize,
                sonoConfig.WindowOverlap);

            //double[] avAbsolute = dspOutput.Average; //average absolute value over the minute recording

            // (C) ################################## GET SIGNAL WAVEFORM ##################################
            double[] signalEnvelope   = dspOutput.Envelope;
            double   avSignalEnvelope = signalEnvelope.Average();

            // (D) ################################## GET Amplitude Spectrogram ##################################
            double[,] amplitudeSpectrogram = dspOutput.AmplitudeSpectrogram; // get amplitude spectrogram.

            // (E) ################################## Generate deciBel spectrogram from amplitude spectrogram
            double epsilon = Math.Pow(0.5, recording.BitsPerSample - 1);

            double[,] deciBelSpectrogram = MFCCStuff.DecibelSpectra(
                dspOutput.AmplitudeSpectrogram,
                dspOutput.WindowPower,
                recording.SampleRate,
                epsilon);

            LoggedConsole.WriteLine("# Finished calculating decibel spectrogram.");

            StringBuilder sb = new StringBuilder();

            sb.AppendLine("\nSIGNAL PARAMETERS");
            sb.AppendLine("Signal Duration     =" + wavDuration);
            sb.AppendLine("Sample Rate         =" + recording.SampleRate);
            sb.AppendLine("Min Signal Value    =" + dspOutput.MinSignalValue);
            sb.AppendLine("Max Signal Value    =" + dspOutput.MaxSignalValue);
            sb.AppendLine("Max Absolute Ampl   =" + signalEnvelope.Max().ToString("F3") + "  (See Note 1)");
            sb.AppendLine("Epsilon Ampl (1 bit)=" + epsilon);

            sb.AppendLine("\nFRAME PARAMETERS");
            sb.AppendLine("Window Size    =" + sonoConfig.WindowSize);
            sb.AppendLine("Frame Count    =" + frameCount);
            sb.AppendLine("Envelope length=" + signalEnvelope.Length);
            sb.AppendLine("Frame Duration =" + frameDuration.TotalMilliseconds.ToString("F3") + " ms");
            sb.AppendLine("Frame overlap  =" + sonoConfig.WindowOverlap);
            sb.AppendLine("Step Size      =" + stepSize);
            sb.AppendLine("Step duration  =" + stepDuration.TotalMilliseconds.ToString("F3") + " ms");
            sb.AppendLine("Frames Per Sec =" + framesPerSecond.ToString("F1"));

            sb.AppendLine("\nFREQUENCY PARAMETERS");
            sb.AppendLine("Nyquist Freq    =" + dspOutput.NyquistFreq + " Hz");
            sb.AppendLine("Freq Bin Width  =" + dspOutput.FreqBinWidth.ToString("F2") + " Hz");
            sb.AppendLine("Nyquist Bin     =" + dspOutput.NyquistBin);

            sb.AppendLine("\nENERGY PARAMETERS");
            double val = dspOutput.FrameEnergy.Min();

            sb.AppendLine(
                "Minimum dB / frame       =" + (10 * Math.Log10(val)).ToString("F2") + "  (See Notes 2, 3 & 4)");
            val = dspOutput.FrameEnergy.Max();
            sb.AppendLine("Maximum dB / frame       =" + (10 * Math.Log10(val)).ToString("F2"));

            sb.AppendLine("\ndB NOISE SUBTRACTION");
            double noiseRange = 2.0;

            //sb.AppendLine("Noise (estimate of mode) =" + sonogram.SnrData.NoiseSubtracted.ToString("F3") + " dB   (See Note 5)");
            //double noiseSpan = sonogram.SnrData.NoiseRange;
            //sb.AppendLine("Noise range              =" + noiseSpan.ToString("F2") + " to +" + (noiseSpan * -1).ToString("F2") + " dB   (See Note 6)");
            //sb.AppendLine("SNR (max frame-noise)    =" + sonogram.SnrData.Snr.ToString("F2") + " dB   (See Note 7)");

            //sb.Append("\nSEGMENTATION PARAMETERS");
            //sb.Append("Segment Thresholds K1: {0:f2}.  K2: {1:f2}  (See Note 8)", segK1, segK2);
            //sb.Append("# Event Count = " + predictedEvents.Count());

            FileTools.Append2TextFile(outputTxtPath.FullName, sb.ToString());
            FileTools.Append2TextFile(outputTxtPath.FullName, GetSNRNotes(noiseRange).ToString());

            // (F) ################################## DRAW IMAGE 1: original spectorgram
            Log.WriteLine("# Start drawing noise reduced sonograms.");
            TimeSpan X_AxisInterval = TimeSpan.FromSeconds(1);

            //int Y_AxisInterval = (int)Math.Round(1000 / dspOutput.FreqBinWidth);
            int nyquist    = recording.SampleRate / 2;
            int hzInterval = 1000;

            var image1 = DrawSonogram(deciBelSpectrogram, wavDuration, X_AxisInterval, stepDuration, nyquist, hzInterval);

            // (G) ################################## Calculate modal background noise spectrum in decibels
            //double SD_COUNT = -0.5; // number of SDs above the mean for noise removal
            //NoiseReductionType nrt = NoiseReductionType.MODAL;
            //System.Tuple<double[,], double[]> tuple = SNR.NoiseReduce(deciBelSpectrogram, nrt, SD_COUNT);

            //double upperPercentileBound = 0.2;    // lowest percentile for noise removal
            //NoiseReductionType nrt = NoiseReductionType.LOWEST_PERCENTILE;
            //System.Tuple<double[,], double[]> tuple = SNR.NoiseReduce(deciBelSpectrogram, nrt, upperPercentileBound);

            // (H) ################################## Calculate BRIGGS noise removal from amplitude spectrum
            int percentileBound = 20; // low energy percentile for noise removal

            //double binaryThreshold   = 0.6;   //works for higher SNR recordings
            double binaryThreshold = 0.4; //works for lower SNR recordings

            //double binaryThreshold = 0.3;   //works for lower SNR recordings
            double[,] m = NoiseRemoval_Briggs.BriggsNoiseFilterAndGetMask(
                amplitudeSpectrogram,
                percentileBound,
                binaryThreshold);

            string title  = "TITLE";
            var    image2 = NoiseRemoval_Briggs.DrawSonogram(
                m,
                wavDuration,
                X_AxisInterval,
                stepDuration,
                nyquist, hzInterval,
                title);

            //Image image2 = NoiseRemoval_Briggs.BriggsNoiseFilterAndGetSonograms(amplitudeSpectrogram, upperPercentileBound, binaryThreshold,
            //                                                                          wavDuration, X_AxisInterval, stepDuration, Y_AxisInterval);

            // (I) ################################## Calculate MEDIAN noise removal from amplitude spectrum

            //double upperPercentileBound = 0.8;    // lowest percentile for noise removal
            //NoiseReductionType nrt = NoiseReductionType.MEDIAN;
            //System.Tuple<double[,], double[]> tuple = SNR.NoiseReduce(deciBelSpectrogram, nrt, upperPercentileBound);

            //double[,] noiseReducedSpectrogram1 = tuple.Item1;  //
            //double[] noiseProfile              = tuple.Item2;  // smoothed modal profile

            //SNR.NoiseProfile dBProfile = SNR.CalculateNoiseProfile(deciBelSpectrogram, SD_COUNT);       // calculate noise value for each freq bin.
            //double[] noiseProfile = DataTools.filterMovingAverage(dBProfile.noiseThresholds, 7);        // smooth modal profile
            //double[,] noiseReducedSpectrogram1 = SNR.TruncateBgNoiseFromSpectrogram(deciBelSpectrogram, dBProfile.noiseThresholds);
            //Image image2 = DrawSonogram(noiseReducedSpectrogram1, wavDuration, X_AxisInterval, stepDuration, Y_AxisInterval);

            var combinedImage = ImageTools.CombineImagesVertically(image1, image2);

            string imagePath = Path.Combine(outputDir.FullName, fileNameWithoutExtension + ".png");

            combinedImage.Save(imagePath);

            // (G) ################################## Calculate modal background noise spectrum in decibels

            Log.WriteLine("# Finished recording:- " + input.Name);
        }
        /// <summary>
        /// A WRAPPER AROUND THE Analysis() METHOD
        /// To be called as an executable with command line arguments.
        /// </summary>
        public static void Execute(Arguments arguments)
        {
            Contract.Requires(arguments != null);

            TimeSpan tsStart    = TimeSpan.FromSeconds(arguments.Start ?? 0);
            TimeSpan tsDuration = TimeSpan.FromSeconds(arguments.Duration ?? 0);

            string outputDir = arguments.Output.FullName;

            // EXTRACT THE REQUIRED RECORDING SEGMENT
            FileInfo sourceF = arguments.Source;
            FileInfo tempF   = TempFileHelper.NewTempFile(arguments.Output);

            if (tempF.Exists)
            {
                tempF.Delete();
            }

            // GET INFO ABOUT THE SOURCE and the TARGET files - esp need the sampling rate
            AudioUtilityModifiedInfo preparedFile;

            if (tsDuration == TimeSpan.Zero)  // Process entire file
            {
                preparedFile = AudioFilePreparer.PrepareFile(sourceF, tempF, new AudioUtilityRequest {
                    TargetSampleRate = ResampleRate
                }, arguments.Output);
            }
            else
            {
                preparedFile = AudioFilePreparer.PrepareFile(sourceF, tempF, new AudioUtilityRequest {
                    TargetSampleRate = ResampleRate, OffsetStart = tsStart, OffsetEnd = tsStart.Add(tsDuration)
                }, arguments.Output);
            }

            var(analysisSettings, segmentSettings) = arguments.ToAnalysisSettings(
                sourceSegment: preparedFile.SourceInfo.ToSegment(),
                preparedSegment: preparedFile.TargetInfo.ToSegment());

            //get the data file to identify frog calls. Check it exists and then store full path in dictionary.
            string   frogParametersPath = analysisSettings.ConfigDict[key_FROG_DATA];
            FileInfo fi_FrogData        = new FileInfo(Path.Combine(outputDir, frogParametersPath));

            if (!fi_FrogData.Exists)
            {
                LoggedConsole.WriteLine("INVALID PATH: " + fi_FrogData.FullName);
                LoggedConsole.WriteLine("The config file must contain the name of a valid .csv file (containing frog call parameters) located in same directory as the .cfg file.");
                LoggedConsole.WriteLine("For example, use Key/Value pair:  FROG_DATA_FILE=FrogDataAndCompilationFile.csv");
                throw new InvalidOperationException();
            }
            analysisSettings.ConfigDict[key_FROG_DATA] = fi_FrogData.FullName; // store full path in the dictionary.

            // DO THE ANALYSIS
            // #############################################################################################################################################
            IAnalyser2      analyser = new Frogs_OBSOLETE();
            AnalysisResult2 result   = analyser.Analyze(analysisSettings, segmentSettings);

            throw new NotImplementedException("Intentionally broken");

            /*
             * DataTable dt = result.Data;
             * if (dt == null) { throw new InvalidOperationException("Data table of results is null"); }
             * // #############################################################################################################################################
             *
             * // ADD IN ADDITIONAL INFO TO RESULTS TABLE
             * AddContext2Table(dt, tsStart, result.AudioDuration);
             * CsvTools.DataTable2CSV(dt, segmentSettings.SegmentEventsFile.FullName);
             * // DataTableTools.WriteTable(augmentedTable);
             */
        }