コード例 #1
0
        static AudioTrack CreateProgressTone(Context context)
        {
            AssetFileDescriptor fd = context.Resources.OpenRawResourceFd(Resource.Raw.progress_tone);
            int length             = (int)fd.Length;

            AudioTrack audioTrack = new AudioTrack(Stream.VoiceCall,
                                                   SAMPLE_RATE,
                                                   ChannelOut.Mono,
                                                   Encoding.Pcm16bit,
                                                   length,
                                                   AudioTrackMode.Static);

            byte[] data = new byte[length];

            ReadFileToBytes(fd, data);

            audioTrack.Write(data, 0, data.Length);
            audioTrack.SetLoopPoints(0, data.Length / 2, 30);

            return(audioTrack);
        }
コード例 #2
0
ファイル: SoundWAV.cs プロジェクト: damian-666/ReignSDK
        private void createPlayInstance()
        {
            var encoding = sound.bitDepth == 16 ? Encoding.Pcm16bit : Encoding.Pcm8bit;

            totalSamples = sound.data.Length;
            if (sound.bitDepth == 16)
            {
                totalSamples /= 2;
            }
            if (sound.channels == 2)
            {
                totalSamples /= 2;
            }

            instance = new AudioTrack(Stream.Music, sound.sampleRate, sound.channels == 2 ? ChannelConfiguration.Stereo : ChannelConfiguration.Mono, encoding, sound.data.Length, AudioTrackMode.Static);
            instance.Write(sound.data, 0, sound.data.Length);
            if (looped)
            {
                instance.SetLoopPoints(0, totalSamples, -1);
            }
        }
コード例 #3
0
        public static AudioTrack GetAudioTrack(SoundBuffer soundBuffer, bool isLooped)
        {
            if (!isLooped && Mixer.EnableAudioTrackCaching && m_audioTracks.Count >= 16)
            {
                foreach (AudioTrackData audioTrack2 in m_audioTracks)
                {
                    if (audioTrack2.IsAvailable && audioTrack2.SoundBuffer == soundBuffer)
                    {
                        audioTrack2.IsAvailable = false;
                        m_cacheHits++;
                        LogCacheStats();
                        return(audioTrack2.AudioTrack);
                    }
                }
                AudioTrackData audioTrackData = null;
                foreach (AudioTrackData audioTrack3 in m_audioTracks)
                {
                    if (audioTrack3.IsAvailable && audioTrack3.SoundBuffer.ChannelsCount == soundBuffer.ChannelsCount && audioTrack3.SoundBuffer.SamplingFrequency == soundBuffer.SamplingFrequency && audioTrack3.BytesCount >= soundBuffer.m_data.Length && (audioTrackData == null || audioTrack3.BytesCount <= audioTrackData.BytesCount))
                    {
                        audioTrackData = audioTrack3;
                    }
                }
                if (audioTrackData != null)
                {
                    if (m_buffer == null || m_buffer.Length < audioTrackData.BytesCount)
                    {
                        m_buffer = new byte[audioTrackData.BytesCount];
                    }
                    Array.Copy(soundBuffer.m_data, 0, m_buffer, 0, soundBuffer.m_data.Length);
                    Array.Clear(m_buffer, soundBuffer.m_data.Length, audioTrackData.BytesCount - soundBuffer.m_data.Length);
                    audioTrackData.AudioTrack.Write(m_buffer, 0, audioTrackData.BytesCount);
                    audioTrackData.SoundBuffer = soundBuffer;
                    audioTrackData.IsAvailable = false;
                    m_cacheHitsWithWrite++;
                    LogCacheStats();
                    return(audioTrackData.AudioTrack);
                }
                bool flag = true;
                foreach (AudioTrackData audioTrack4 in m_audioTracks)
                {
                    if (audioTrack4.BytesCount < soundBuffer.m_data.Length)
                    {
                        flag = false;
                        break;
                    }
                }
                if (flag)
                {
                    m_cacheFulls++;
                    Log.Warning("AudioTrackCache full, no audio tracks available.");
                    LogCacheStats();
                    return(null);
                }
            }
            AudioTrack audioTrack = new AudioTrack(Stream.Music, soundBuffer.SamplingFrequency, (soundBuffer.ChannelsCount == 1) ? ChannelOut.FrontLeft : ChannelOut.Stereo, Encoding.Pcm16bit, soundBuffer.m_data.Length, AudioTrackMode.Static);

//			AudioTrack audioTrack = new AudioTrack(new AudioAttributes.Builder().SetUsage(AudioUsageKind.Media).SetContentType(AudioContentType.Music).Build(),new AudioFormat(), soundBuffer.m_data.Length, AudioTrackMode.Static,0);

            if (audioTrack.State != 0)
            {
                audioTrack.Write(soundBuffer.m_data, 0, soundBuffer.m_data.Length);
                if (!isLooped)
                {
                    m_audioTracks.Add(new AudioTrackData
                    {
                        AudioTrack  = audioTrack,
                        SoundBuffer = soundBuffer,
                        BytesCount  = soundBuffer.m_data.Length,
                        IsAvailable = false
                    });
                }
                else
                {
                    Mixer.CheckTrackStatus(audioTrack.SetLoopPoints(0, soundBuffer.SamplesCount, -1));
                }
            }
            else
            {
                audioTrack.Release();
                audioTrack = null;
                Log.Warning("Failed to create Cache AudioTrack.");
            }
            m_cacheMisses++;
            if (Mixer.EnableAudioTrackCaching && m_cacheMisses > 200 && m_cacheMisses % 100 == 0)
            {
                Log.Warning("Over {0} AudioTrack objects created.", m_cacheMisses);
            }
            LogCacheStats();
            return(audioTrack);
        }