Exemple #1
0
        /// <summary>
        /// Plays an audio using the underlying OS's native method. A number stored in this object will be used to determine which loaded audio data at native side that we would like to play. If you previously call `Prepare()` it will take effect here.

        /// [iOS] (Unprepared) Native Audio remembered which of the total of 32 OpenAL source that we just played. It will get the next one (wraps to the first instance when already at 32nd instance), find the correct sound buffer and assign it to that source, then play it.
        /// [iOS] (Prepared) Instead of using buffer index (the sound, not the sound player) it will play the source at the time you call `Prepare` immediately without caring if the sound in that source is currently the same as when you call `Prepare` or not. After calling this `Play`, the prepare status will be reset to unprepared, and the next `Play` will use buffer index as usual.

        /// [Android] (Unprepared) It searches for `AudioTrack` which already contains the `byte[]` representing this sound that is not currently playing. If not found, we load the byte array into any non playing `AudioTrack`. If every `AudioTrack` is playing, it stops and replaces the first instance. Then play it. We use this "searching" approach instead of just assign and play round-robin style of iOS because on Android we are bounded with very low number of AudioTrack and the `write` method takes more time than looping.
        /// [Android] (Prepared) Same as unprepared, but `Prepare` method have already make sure such `AudioTrack` with the correct `byte[]` data exists so the search should found it quickly. Prepare status is never reset when calling `Play()`.

        /// </summary>
        /// <param name="volume">From 0.0f to 1.0f. The interpolation is in linear space.</param>
        /// <param name="pan">-1.0f : Left, 0.0f : Center, 1.0f : Right [iOS] Not implemented yet. All sounds will be at center 0.0f. [Android] It works.</param>
        /// <returns> Please treat this return as `void` for now. The class will be usable in the future version.</returns>
        public NativeAudioController Play(float volume = 1, float pan = 0)
        {
            if (isUnloaded)
            {
                throw new System.Exception("You cannot play an unloaded NativeAudio.");
            }

            int playedSourceIndex = -1;

#if UNITY_IOS
            if (prepared)
            {
                //This is using source index. It means we have already loaded our sound to that source with Prepare.
                NativeAudio._PlayAudioWithSourceIndex(this.prepareIndex, volume, pan);
                playedSourceIndex = this.prepareIndex;
            }
            else
            {
                //This is using buffer index. Which source it use will be determined at native side.
                playedSourceIndex = NativeAudio._PlayAudio(this.NextIndex, volume, pan);
            }
            prepared = false;
#elif UNITY_ANDROID
            playedSourceIndex = NativeAudio.AndroidNativeAudio.CallStatic <int>(NativeAudio.AndroidPlayAudio, this.NextIndex, volume, pan);
#endif
            return(new NativeAudioController(playedSourceIndex)); //Empty object for now
        }
        public NativeAudioController Play(NativeAudio.PlayOptions playOptions)
        {
            if (isUnloaded)
            {
                throw new System.Exception("You cannot play an unloaded NativeAudio.");
            }

            int playedSourceIndex = -1;

#if UNITY_IOS
            if (prepared)
            {
                //This is using source index. It means we have already loaded our sound to that source with Prepare.
                NativeAudio._PlayAudioWithSourceCycle(this.prepareIndex, playOptions);
                playedSourceIndex = this.prepareIndex;
            }
            else
            {
                //-1 audioPlayerIndex results in round-robin, 0~15 results in hard-specifying the track.
                playedSourceIndex = NativeAudio._PlayAudio(this.NextIndex, playOptions.audioPlayerIndex, playOptions);
            }
            prepared = false;
#elif UNITY_ANDROID
            playedSourceIndex = NativeAudio.playAudio(this.NextIndex, playOptions);
#endif
            return(new NativeAudioController(playedSourceIndex));
        }
        /// <summary>
        /// -1 for full left, 0 for center, 1 for full right. This pan is based on "balance effect" and not a "constant energy pan". That is
        /// at the center you hear each side fully. (Constant energy pan has 3dB attenuation to both on center.)
        ///
        /// [iOS] 2D panning in iOS will be emulated in OpenAL's 3D audio engine by splitting your stereo sound into a separated mono sounds,
        /// then position each one on left and right ear of the listener. When panning, instead of adjusting gain we will just move the source
        /// further from the listener and the distance attenuation will do the work. (Gain is reserved to the setting volume command,
        /// so we have 2 stage of gain adjustment this way.
        ///
        /// [Android] Maps to SLVolumeItf interface -> SetStereoPosition
        /// </summary>
        /// <param name="pan"></param>
        public void SetPan(float pan)
        {
#if UNITY_IOS
            NativeAudio._SetPan(InstanceIndex, pan);
#elif UNITY_ANDROID
            NativeAudio.setPan(InstanceIndex, pan);
#endif
        }
Exemple #4
0
        /// <summary>
        /// Immediately stop a specific played sound.
        /// [iOS] One of the total 32 OpenAL sources will stop.
        /// [Android] One of all AudioTrack will stop.
        /// </summary>
        public void Stop()
        {
#if UNITY_IOS
            NativeAudio._StopAudio(instanceIndex);
#elif UNITY_ANDROID
            NativeAudio.AndroidNativeAudio.CallStatic(NativeAudio.AndroidStopAudio, instanceIndex);
#endif
        }
        /// <summary>
        /// Immediately stop a source that was used to play the sound.
        /// [iOS] One of all OpenAL sources that was used to play this sound will stop.
        /// [Android] One of all SLAndroidSimpleBufferQueue that was used to play this sound will stop.
        /// </summary>
        public void Stop()
        {
#if UNITY_IOS
            NativeAudio._StopAudio(InstanceIndex);
#elif UNITY_ANDROID
            NativeAudio.stopAudio(InstanceIndex);
#endif
        }
        /// <summary>
        /// Resume the underlying audio track chosen for a particular audio.
        /// If by the time you call resume a track has already been used to play other audio, the resume will have no effect since the pause status had already been clreared out.
        /// </summary>
        public void TrackResume()
        {
#if UNITY_IOS
            NativeAudio._TrackResume(InstanceIndex);
#elif UNITY_ANDROID
            NativeAudio.trackResume(InstanceIndex);
#endif
        }
        /// <summary>
        /// Set a playback time of this audio player. If the track is in a paused state it is immediately resumed.
        /// You can set it even while the track is playing.
        /// </summary>
        /// <param name="offsetSeconds"></param>
        public void SetPlaybackTime(float offsetSeconds)
        {
#if UNITY_IOS
            NativeAudio._SetPlaybackTime(InstanceIndex, offsetSeconds);
#elif UNITY_ANDROID
            NativeAudio.setPlaybackTime(InstanceIndex, offsetSeconds);
#endif
        }
        /// <summary>
        /// [iOS] Maps to `AL_GAIN`. It is a scalar amplitude multiplier, so the value can go over 1.0 for increasing volume but can be clipped.
        /// If you put 0.5f, it is attenuated by 6 dB.
        ///
        /// [Android] Maps to `SLVolumeItf` interface -> `SetVolumeLevel`.
        /// The floating volume parameter will be converted to millibel (20xlog10x100) so that putting 0.5f here results in 6dB attenuation.
        /// </summary>
        public void SetVolume(float volume)
        {
#if UNITY_IOS
            NativeAudio._SetVolume(InstanceIndex, volume);
#elif UNITY_ANDROID
            NativeAudio.setVolume(InstanceIndex, volume);
#endif
        }
        /// <summary>
        /// Immediately stop a source that was selected to play the sound.
        /// [iOS] One of all OpenAL sources that was used to play this sound will stop.
        /// [Android] One of all SLAndroidSimpleBufferQueue that was used to play this sound will stop.
        /// </summary>
        public void Stop()
        {
            AssertInitialized();
#if UNITY_IOS
            NativeAudio._StopAudio(InstanceIndex);
#elif UNITY_ANDROID
            NativeAudio.stopAudio(InstanceIndex);
#endif
        }
        /// <summary>
        /// Resume the underlying audio track chosen for a particular audio.
        /// If by the time you call resume a track has already been used to play other audio, the resume will have no effect since the pause status had already been clreared out.
        /// </summary>
        public void TrackResume()
        {
            AssertInitialized();
#if UNITY_IOS
            NativeAudio._TrackResume(InstanceIndex);
#elif UNITY_ANDROID
            NativeAudio.trackResume(InstanceIndex);
#endif
        }
Exemple #11
0
        /// <summary>
        /// Shave off as much start up time as possible to play a sound. The majority of load time is already in `Load` but `Prepare` might help a bit more, or not at all. You can also call `Play()` without calling this first. The effectiveness depends on platform's audio library's approach :

        /// [iOS] Assigns OpenAL audio buffer to a source. `NativeAudioPointer` then remembers this source index. The next `Play()` you call will immediately play this remembered source without caring what sound is in it instead of using a buffer index to get sound to pair with the next available source. This means if in between `Prepare()` and `Play()` you have played 32 sounds, the resulting sound will be something else as other sound has already put their buffer into the source you have remembered.

        /// [Android] `write` a loaded audio byte array to any non-playing `AudioTrack` so that the next `Play()` does not require a `write` and can play right away. If all `AudioTrack` is playing the first `AudioTrack` will immediately stop to receive a new `byte[]` data.

        /// </summary>
        public void Prepare()
        {
#if UNITY_IOS
            prepareIndex = NativeAudio._PrepareAudio(NextIndex);
            prepared     = true;
#elif UNITY_ANDROID
            NativeAudio.AndroidNativeAudio.CallStatic(NativeAudio.AndroidPrepareAudio, NextIndex);
#endif
        }
        /// <summary>
        /// Shave off as much start up time as possible to play a sound. The majority of load time is already in `Load` but `Prepare` might help a bit more, or not at all. You can also call `Play()` without calling this first. The effectiveness depends on platform's audio library's approach :

        /// [iOS] Assigns OpenAL audio buffer to a source. `NativeAudioPointer` then remembers this source index. The next `Play()` you call will immediately play this remembered source without caring what sound is in it instead of using a buffer index to get sound to pair with the next available source. This means if in between `Prepare()` and `Play()` you have played 16 sounds, the resulting sound will be something else as other sound has already put their buffer into the source you have remembered.

        /// [Android] No effect as OpenSL ES play audio by pushing data into `SLAndroidSimpleBufferQueueItf`. All the prepare is already at the `Load()`.

        /// </summary>
        public void Prepare()
        {
#if UNITY_IOS
            prepareIndex = NativeAudio._PrepareAudio(NextIndex);
            prepared     = true;
#elif UNITY_ANDROID
            //There is no possible preparation for OpenSL ES at the moment..
#endif
        }
        /// <summary>
        /// -1 for full left, 0 for center, 1 for full right. This pan is based on "balance effect" and not a "constant energy pan". That is
        /// at the center you hear each side fully. (Constant energy pan has 3dB attenuation to both on center.)
        ///
        /// [iOS] 2D panning in iOS will be emulated in OpenAL's 3D audio engine by splitting your stereo sound into a separated mono sounds,
        /// then position each one on left and right ear of the listener. When panning, instead of adjusting gain we will just move the source
        /// further from the listener and the distance attenuation will do the work. (Gain is reserved to the setting volume command,
        /// so we have 2 stage of gain adjustment this way.
        ///
        /// [Android] Maps to SLVolumeItf interface -> SetStereoPosition
        /// </summary>
        /// <param name="pan"></param>
        public void SetPan(float pan)
        {
            AssertInitialized();
#if UNITY_IOS
            NativeAudio._SetPan(InstanceIndex, pan);
#elif UNITY_ANDROID
            NativeAudio.setPan(InstanceIndex, pan);
#endif
        }
        /// <summary>
        /// Ask for playback time to a native audio player. It is relative to the start of audio data currently loaded in **seconds**.
        /// The API is very time sensitive and may or may not change the value in the same frame. (depending on where you call it in the script)
        /// [Android] Because of how "stop hack" was implemented, any stopped audio will have a playback time equals to audio's length (not 0)
        ///
        /// This behaviour is similar to when calling `AudioSettings.dspTime` or `audioSource.time` property, those two are in the same update step.
        ///
        /// Note that `Time.realTimeSinceStartup` is not in an update step unlike audio time, and will change every time you call even in 2 consecutive lines of code.
        ///
        /// [iOS] Get AL_SEC_OFFSET attribute. It update in a certain discrete step, and if that step happen in the middle of
        /// the frame this method will return different value depending on where in the script you call it. The update step timing is THE SAME as
        /// `AudioSettings.dspTime` and `audioSource.time`.
        ///
        /// I observed (in iPad 3, iOS 9) that this function sometimes lags on first few calls.
        /// It might help to pre-warm by calling this several times in loading screen or something.
        ///
        /// [Android] Use GetPosition of SLPlayItf interface. It update in a certain discrete step, and if that step happen in the middle of
        /// the frame this method will return different value depending on where in the script you call it. The update step timing is INDEPENDENT from
        /// `AudioSettings.dspTime` and `audioSource.time`.
        /// </summary>
        public float GetPlaybackTime()
        {
#if UNITY_IOS
            return(NativeAudio._GetPlaybackTime(InstanceIndex));
#elif UNITY_ANDROID
            return(NativeAudio.getPlaybackTime(InstanceIndex));
#else
            return(0);
#endif
        }
Exemple #15
0
        /// <summary>
        /// [Android] Undo the <see cref="Initialize">. It doesn't affect any loaded audio, just dispose all the native players.
        /// Disposing twice is safe, it does nothing.
        /// </summary>
        public static void Dispose()
        {
#if UNITY_ANDROID
            if (Initialized)
            {
                NativeAudio.disposeIfAllocated();
                Initialized = false;
            }
#else
            return;
#endif
        }
Exemple #16
0
        /// <summary>
        /// You cannot call <see cref="Play"> anymore after unloading. It will throw an exception if you do so.
        ///
        /// [iOS] Unload OpenAL buffer. The total number of 16 OpenAL source does not change. Immediately stop the sound if it is playing.
        ///
        /// [Android] `free` the unmanaged audio data array at C code part of OpenSL ES code.
        ///
        /// It is HIGHLY recommended to stop those audio player via <see cref="NativeAudioController.Stop"> before unloading because the play head will continue
        /// running into oblivion if you unload data while it is still reading. I have seen 2 cases :
        ///
        /// - The game immediately crash with signal 11 (SIGSEGV), code 1 (SEGV_MAPERR) on my 36$ low end phone. Probably it does not permit freed memory reading.
        /// - In some device it produce scary noisehead sound if you load something new, `malloc` decided to use the same memory area you just freed,
        /// and the still running playhead pick that up.
        /// </summary>
        public void Unload()
        {
            if (!isUnloaded)
            {
#if UNITY_IOS
                NativeAudio._UnloadAudio(startingIndex);
                isUnloaded = true;
#elif UNITY_ANDROID
                for (int i = startingIndex; i < startingIndex + amount; i++)
                {
                    NativeAudio.unloadAudio(i);
                }
#endif
                isUnloaded = true;
            }
        }
Exemple #17
0
        /// <summary>
        /// Do not call `Play` anymore after unloading.

        /// [iOS] Unload OpenAL buffer. The total number of 32 OpenAL source does not change.
        /// [Android] Unload the `byte[]` array by dereferencing it. We have to wait for the actual unload is by Java's garbage collector. The total number of `AudioTrack` does not change.

        /// </summary>
        public void Unload()
        {
#if UNITY_IOS
            if (!isUnloaded)
            {
                NativeAudio._UnloadAudio(startingIndex);
                isUnloaded = true;
            }
#elif UNITY_ANDROID
            if (!isUnloaded)
            {
                for (int i = startingIndex; i < startingIndex + amount; i++)
                {
                    NativeAudio.AndroidNativeAudio.CallStatic(NativeAudio.AndroidUnloadAudio, i);
                }
                isUnloaded = true;
            }
#endif
        }
Exemple #18
0
        /// <summary>
        /// Loads by copying Unity-imported `AudioClip`'s raw audio memory to native side. You are free to unload the `AudioClip`'s audio data without affecting what's loaded at the native side.
        ///
        /// Hard requirements :
        /// - Load type MUST be Decompress On Load so Native Audio could read raw PCM byte array from your compressed audio.
        /// - If you use Load In Background, you must call `audioClip.LoadAudioData()` beforehand and ensure that `audioClip.loadState` is `AudioDataLoadState.Loaded` before calling `NativeAudio.Load`. Otherwise it would throw an exception. If you are not using Load In Background but also not using Preload Audio Data, Native Audio can load for you if not yet loaded.
        /// - Must not be ambisonic.
        ///
        /// It supports all compression format, force to mono, overriding to any sample rate, and quality slider.
        ///
        /// If this is the first time loading any audio it will call `NativeAudio.Initialize()` automatically which might take a bit more time.
        ///
        /// [iOS] Loads an audio into OpenAL's output audio buffer. (Max 256) This buffer will be paired to one of 16 OpenAL source when you play it.
        ///
        /// [Android] Loads an audio into a `short*` array at unmanaged native side. This array will be pushed into one of available `SLAndroidSimpleBufferQueue` when you play it.
        /// The resampling of audio will occur at this moment to match your player's device native rate. The SLES audio player must be created to match the device rate
        /// to enable the special "fast path" audio. What's left is to make our audio compatible with that fast path player, which the resampler will take care of.
        ///
        /// You can change the sampling quality of SRC (libsamplerate) library per audio basis with the `LoadOptions` overload.
        /// </summary>
        /// <param name="audioClip">
        /// Hard requirements :
        /// - Load type MUST be Decompress On Load so Native Audio could read raw PCM byte array from your compressed audio.
        /// - If you use Load In Background, you must call `audioClip.LoadAudioData()` beforehand and ensure that `audioClip.loadState` is `AudioDataLoadState.Loaded` before calling `NativeAudio.Load`. Otherwise it would throw an exception. If you are not using Load In Background but also not using Preload Audio Data, Native Audio can load for you if not yet loaded.
        /// - Must not be ambisonic.
        /// </param>
        /// <returns> An object that stores a number. Native side can pair this number with an actual loaded audio data when you want to play it. You can `Play`, `Prepare`, or `Unload` with this object. `Load` returns null on error, for example : wrong name, or calling in Editor </returns>
        public static NativeAudioPointer Load(AudioClip audioClip, LoadOptions loadOptions)
        {
            AssertAudioClip(audioClip);
            if (!initialized)
            {
                NativeAudio.Initialize();
            }

            //We have to wait for GC to collect this big array, or you could do `GC.Collect()` immediately after.
            short[] shortArray = AudioClipToShortArray(audioClip);

#if UNITY_IOS
            int startingIndex = _SendByteArray(shortArray, shortArray.Length * 2, audioClip.channels, audioClip.frequency, loadOptions.resamplingQuality);
            if (startingIndex == -1)
            {
                throw new Exception("Error loading NativeAudio with AudioClip named : " + audioClip.name);
            }
            else
            {
                float length = _LengthBySource(startingIndex);
                return(new NativeAudioPointer(audioClip.name, startingIndex, length));
            }
#elif UNITY_ANDROID
            //The native side will interpret short array as byte array, thus we double the length.
            int startingIndex = sendByteArray(shortArray, shortArray.Length * 2, audioClip.channels, audioClip.frequency, loadOptions.resamplingQuality);


            if (startingIndex == -1)
            {
                throw new Exception("Error loading NativeAudio with AudioClip named : " + audioClip.name);
            }
            else
            {
                float length = lengthBySource(startingIndex);
                return(new NativeAudioPointer(audioClip.name, startingIndex, length));
            }
#else
            //Load is defined on editor so that autocomplete shows up, but it is a stub. If you mistakenly use the pointer in editor instead of forwarding to normal sound playing method you will get a null reference error.
            return(null);
#endif
        }
Exemple #19
0
        /// <summary>
        /// (**Advanced**) Loads an audio from `StreamingAssets` folder's desination at runtime. Most of the case you should use the `AudioClip` overload instead.
        /// It only supports .wav PCM 16-bit format, stereo or mono, in any sampling rate since it will be resampled to fit the device.
        ///
        /// If this is the first time loading any audio it will call `NativeAudio.Initialize()` automatically which might take a bit more time.
        ///
        /// [iOS] Loads an audio into OpenAL's output audio buffer. (Max 256) This buffer will be paired to one of 16 OpenAL source when you play it.
        ///
        /// [Android] Loads an audio into a `short*` array at unmanaged native side. This array will be pushed into one of available `SLAndroidSimpleBufferQueue` when you play it.
        /// The resampling of audio will occur at this moment to match your player's device native rate. The SLES audio player must be created to match the device rate
        /// to enable the special "fast path" audio. What's left is to make our audio compatible with that fast path player, which the resampler will take care of.
        ///
        /// You can change the sampling quality of SRC (libsamplerate) library per audio basis with the `LoadOptions` overload.
        ///
        /// If the audio is not found in the main app's persistent space (the destination of `StreamingAssets`) it will continue to search for the audio
        /// in all OBB packages you might have. (Often if your game is a split OBB, things in `StreamingAssets` will go there by default even if the main one is not that large.)
        /// </summary>
        /// <param name="streamingAssetsRelativePath">If the file is `SteamingAssets/Hit.wav` use "Hit.wav" (WITH the extension).</param>
        /// <returns> An object that stores a number. Native side can pair this number with an actual loaded audio data when you want to play it. You can `Play`, `Prepare`, or `Unload` with this object. `Load` returns null on error, for example : wrong name, not existing in StreamingAssets, calling in Editor </returns>
        public static NativeAudioPointer Load(string audioPath, LoadOptions loadOptions)
        {
            if (!initialized)
            {
                NativeAudio.Initialize();
            }

            if (System.IO.Path.GetExtension(audioPath).ToLower() == ".ogg")
            {
                throw new Exception("Loading via StreamingAssets does not support OGG. Please use the AudioClip overload and set the import settings to Vorbis.");
            }

#if UNITY_IOS
            int startingIndex = _LoadAudio(audioPath, loadOptions.resamplingQuality);
            if (startingIndex == -1)
            {
                throw new Exception("Error loading audio at path : " + audioPath);
            }
            else
            {
                float length = _LengthBySource(startingIndex);
                return(new NativeAudioPointer(audioPath, startingIndex, length));
            }
#elif UNITY_ANDROID
            int startingIndex = AndroidNativeAudio.CallStatic <int>(AndroidLoadAudio, audioPath, loadOptions.resamplingQuality);

            if (startingIndex == -1)
            {
                throw new Exception("Error loading audio at path : " + audioPath);
            }
            else
            {
                float length = lengthBySource(startingIndex);
                return(new NativeAudioPointer(audioPath, startingIndex, length));
            }
#else
            //Load is defined on editor so that autocomplete shows up, but it is a stub. If you mistakenly use the pointer in editor instead of forwarding to normal sound playing method you will get a null reference error.
            return(null);
#endif
        }
        private IEnumerator AnalyzeRoutine()
        {
            UnityEngine.Debug.Log("Built in analyze start");
            sw       = new Stopwatch();
            allTicks = new List <long>();

            if (silence != null)
            {
                silence.Unload();
            }
            //This "" is a special path to load a silence.
            silence = NativeAudio.Load("");

            //To warm up the audio circuit we will discard half of the test.
            for (int i = 0; i < framesOfPlay / 2; i++)
            {
                silence.Play();
                yield return(null);
            }

            //Ok this is the real thing.
            for (int i = 0; i < framesOfPlay / 2; i++)
            {
                sw.Start();
                silence.Play();
                yield return(null);

                sw.Stop();
                allTicks.Add(sw.ElapsedTicks);
                sw.Reset();
            }

            analysisResult = new NativeAudioAnalyzerResult()
            {
                averageFps = 1000 / TicksToMs(allTicks.Average())
            };
            analyzeRoutine = null;
            UnityEngine.Debug.Log("Built in analyze end");
        }
Exemple #21
0
        /// <summary>
        /// Loads audio at `audioPath`. If this is the first time loading any audio it will call `NativeAudio.Initialize()` automatically which might take a bit more time.
        /// [iOS] Loads an audio into OpenAL's output audio buffer. (Max 256) This buffer will be paired to one of 32 OpenAL source when you play it.
        /// [Android] Loads an audio into a `byte[]` array at native side. This array will be `write` into one of available `AudioTrack` when you play it.
        /// </summary>
        /// <param name="audioPath">The file must be in `StreamingAssets` folder and in .wav PCM 16-bit format with 44100 Hz sampling rate. If the file is `SteamingAssets/Hit.wav` use "Hit.wav" (WITH the extension).</param>
        /// <returns> An object that stores a number. Native side can pair this number with an actual loaded audio data when you want to play it. You can `Play`, `Prepare`, or `Unload` with this object. `Load` returns null on error, for example : wrong name, not existing in StreamingAssets, calling in Editor </returns>
        public static NativeAudioPointer Load(string audioPath)
        {
            if (!initialized)
            {
                NativeAudio.Initialize();
            }

#if UNITY_IOS
            int startingIndex = _LoadAudio(audioPath);
            if (startingIndex == -1)
            {
                //Debug.LogError("You have either Load-ed a nonexistent audio file or allocated over the iOS's OpenAL buffer amount hard limit. Check your StreamingAssets folder for the correct name, or call nativeAudioPointer.Unload() to free up the quota.");
                return(null);
            }
            else
            {
                return(new NativeAudioPointer(audioPath, startingIndex));
            }
#elif UNITY_ANDROID
            int startingIndex = AndroidNativeAudio.CallStatic <int>(AndroidLoadAudio, audioPath);
            if (startingIndex == -1)
            {
                //This "error" is expected, it is not recomended to make it verbose but you should handle the returned null.

                //Debug.LogError("You have either Load-ed a nonexistent audio file or allocated over the Android's AudioTrack hard limit. Check your StreamingAssets folder for the correct name, or call nativeAudioPointer.Unload() to free up the quota.");
                return(null);
            }
            else
            {
                return(new NativeAudioPointer(audioPath, startingIndex));
            }
#else
            //Load is defined on editor so that autocomplete shows up, but it is a stub. If you mistakenly use the pointer in editor instead of forwarding to normal sound playing method you will get a null reference error.
            return(null);
#endif
        }