示例#1
0
    static byte[] ConvertToWav(AudioClip clip)
    {
        var samples = new float[clip.samples];

        clip.GetData(samples, 0);

        Int16[] intData = new Int16[samples.Length];

        //converting in 2 float[] steps to Int16[], //then Int16[] to Byte[]


        Byte[] bytesData = new Byte[HEADER_SIZE + samples.Length * 2];

        //bytesData array is twice the size of
        //dataSource array because a floatconverted in Int16 is 2 bytes.


        int
            rescaleFactor = 32767;

        //to convert float to Int16

        WriteHeader(bytesData, clip);


        for (int i = 0; i < samples.Length; i++)
        {
            intData[i] = (short)(samples[i] * rescaleFactor);

            Byte[] byteArr = new Byte[2];

            byteArr = BitConverter.GetBytes(intData[i]);

            byteArr.CopyTo(bytesData, HEADER_SIZE + i * 2);
        }


        return(bytesData);
    }
示例#2
0
    IEnumerator streamRoutine(string fullpath)
    {
        AudioClip c = RuntimeAudioClipLoader.Manager.Load(fullpath, false, true, false);

        loaderObject = Instantiate(loadingPrefab, transform, false) as GameObject;
        loaderObject.transform.localPosition = new Vector3(-.05f, .013f, 0.061f);
        loaderObject.transform.localRotation = Quaternion.Euler(0, 180, 0);
        loaderObject.transform.localScale    = Vector3.one * .1f;

        while (RuntimeAudioClipLoader.Manager.GetAudioClipLoadState(c) != AudioDataLoadState.Loaded)
        {
            yield return(null);
        }
        if (loaderObject != null)
        {
            Destroy(loaderObject);
        }


        for (int i = 0; i < players.Length; i++)
        {
            players[i].UnloadClip();
        }

        while (c.loadState != AudioDataLoadState.Loaded)
        {
            yield return(null);
        }

        clipSamples = new float[c.samples * c.channels];
        c.GetData(clipSamples, 0);

        //alocate the memory
        m_ClipHandle = GCHandle.Alloc(clipSamples, GCHandleType.Pinned);
        for (int i = 0; i < players.Length; i++)
        {
            players[i].LoadSamples(clipSamples, m_ClipHandle, c.channels);
        }
    }
        void OnAudioDataReceived(string name, AudioClip audio)
        {
#if !UNITY_WEBGL
            int index;
            if (int.TryParse(name, out index))
            {
                if (m_audioBufferList.Contains(index))
                {
                    if (audio == null)
                    {
                        Debug.LogWarning("AudioBuffer is null, abort processing.");
                        return;
                    }
                    float[] srcSample = new float[audio.samples * audio.channels];
                    audio.GetData(srcSample, 0);
                    int          audioBufferSize = (int)(audio.frequency * audio.channels * (m_combinedFrames * m_frameInterval));
                    List <float> destSample      = new List <float>(srcSample);
                    m_audioSampleData.AddRange(destSample.GetRange(0, audioBufferSize));
                }
            }
#endif
        }
示例#4
0
    void Update()
    {
        if (!isActive)
        {
            return;
        }

        m_audioClip.GetData(m_activeSamples, 0);
        numDifferent = 0;



        firstDifferent = -1;
        lastDifferent  = -1;
        for (int i = 0; i < m_activeSamples.Length; i++)
        {
            if (m_activeSamples [i] != m_samples [i])
            {
                numDifferent++;
                if (firstDifferent == -1)
                {
                    firstDifferent = i;
                }
                lastDifferent = i;

                m_samples [i] = m_activeSamples [i];
            }
        }

        if (firstDifferent == -1)
        {
            return;
        }

        // Round first different to nearest multiple of 20


        PackClip();
    }
示例#5
0
    float LevelMax()
    {
        float levelMax = 0;

        float[] waveData    = new float[_sampleWindow];
        int     micPosition = Microphone.GetPosition(null) - (_sampleWindow + 1);

        if (micPosition < 0)
        {
            return(0);
        }
        _clipRecord.GetData(waveData, micPosition);
        for (int i = 0; i < _sampleWindow; ++i)
        {
            float wavePeak = waveData [i] * waveData [i];
            if (levelMax < wavePeak)
            {
                levelMax = wavePeak;
            }
        }
        return(levelMax);
    }
示例#6
0
        private static void ConvertAndWrite(MemoryStream mstream, AudioClip clip)
        {
            var samples = new float[clip.samples];

            clip.GetData(samples, 0);

            Int16[] intData = new Int16[samples.Length];

            Byte[] bytesData = new Byte[samples.Length * 2];

            int rescaleFactor = 32767;

            for (int i = 0; i < samples.Length; i++)
            {
                intData[i] = (short)(samples[i] * rescaleFactor);
                Byte[] byteArr = new Byte[2];
                byteArr = BitConverter.GetBytes(intData[i]);
                byteArr.CopyTo(bytesData, i * 2);
            }

            mstream.Write(bytesData, 0, bytesData.Length);
        }
示例#7
0
    private IEnumerator PlayCoroutine(AudioClip song, int delay, int fadeTime)
    {
        //Add -delay- milliseconds of silence to start of song
        int extraData = song.frequency * song.channels * (delay / 1000);

        float[] songData = new float[song.samples * song.channels];
        song.GetData(songData, 0);
        song = AudioClip.Create(song.name, songData.Length + extraData, song.channels, song.frequency, false);
        float[] newSongData = new float[songData.Length + extraData];
        Array.Copy(songData, 0, newSongData, extraData, songData.Length);
        song.SetData(newSongData, 0);

        m_extraDataLength = extraData;

        Player.clip = song;
        if (Beatmap.CurrentlyLoaded.Notes[0].time.Ms < fadeTime)
        {
            fadeTime = Beatmap.CurrentlyLoaded.Notes[0].time.Ms;
        }
        StartCoroutine(Helper.FadeIn(Player, fadeTime / 1000));
        yield return(null);
    }
    ///-----------------------------------------------------------
    /// <summary>録音終了</summary>
    ///-----------------------------------------------------------
    public void StopRecord()
    {
        //マイクの録音位置を取得
        int position = Microphone.GetPosition(mic);

        //マイクの録音を強制的に終了
        Microphone.End(mic);

        //シーク位置を検査
        if (position > 0)
        {
            //再生時間を確認すると、停止した時間に関わらず、maxDurationの値になっている。これは無音を含んでいる?
            Debug.Log("修正前の録音時間: " + audioClip.length);

            //音声データ一時退避用の領域を確保し、audioClipからのデータを格納
            float[] soundData = new float[audioClip.samples * audioClip.channels];
            audioClip.GetData(soundData, 0);

            //新しい音声データ領域を確保し、positonの分だけ格納できるサイズにする。
            float[] newData = new float[position * audioClip.channels];

            //positionの分だけデータをコピー
            for (int i = 0; i < newData.Length; i++)
            {
                newData[i] = soundData[i];
            }

            //新しいAudioClipのインスタンスを生成し、音声データをセット
            AudioClip newClip = AudioClip.Create(audioClip.name, position, audioClip.channels, audioClip.frequency, false);
            newClip.SetData(newData, 0);

            //audioClipを新しいものに差し替え
            AudioClip.Destroy(audioClip);
            audioClip = newClip;

            //再生時間
            Debug.Log("修正後の録音時間: " + newClip.length);
        }
    }
示例#9
0
    bool CheckRecordedLevel()
    {
        if (m_bMicrophoneInitialized == false)
        {
            Debug.Log("No microphone has been initialized.");
            return(false);
        }

        //get mic volume
        int dec = 128;

        float[] waveData    = new float[dec];
        int     micPosition = Microphone.GetPosition(null) - (dec + 1); // null means the first microphone

        m_pRecordedAudioClip.GetData(waveData, micPosition);

        // Getting a peak on the last 128 samples
        float levelMax = 0;

        for (int i = 0; i < dec; i++)
        {
            float wavePeak = waveData[i] * waveData[i];
            if (levelMax < wavePeak)
            {
                levelMax = wavePeak;
            }
        }
        float level = Mathf.Sqrt(Mathf.Sqrt(levelMax));

        if (level >= m_fMicSensitivity)
        {
            //Then, this audio must be sent.? Probably.
            Debug.LogWarning("Detected Voice input above the sensitivy value, this audio will be sent over the network.");
            return(true);
        }
        //else /*if (level < m_fMicSensitivity)*/  // I don't consider this "if" necessary.
        Debug.Log("This audio doesn't have enough level, it will be rejected.");
        return(false);
    }
示例#10
0
        void Start()
        {
            ChunkStep = ChunkSize / Subchunks;
            Channels  = Song.channels;
            Samples   = new float[Song.samples * Channels];
            Song.GetData(Samples, 0);
            SongName   = Song.name;
            SampleRate = Song.frequency * Channels;
            switch (Mode)
            {
            case GameModeType.Ninja:
                ModeClass = new Ninja();
                break;

            default:
                ModeClass = new FreeForAll();
                break;
            }
            Progress.transform.parent.gameObject.SetActive(true);
            Worker = new Task(Loader);
            Worker.Start();
        }
示例#11
0
        public void AddSamples(AudioClip audioClip, float time)
        {
            if (audioClip != lastAudioClip)
            {
                lastAudioClip = audioClip;
                lastTime      = -1;
            }

            if (lastTime == -1)
            {
                lastTime = time;
            }

            var samplesPerSecond = audioClip.samples / audioClip.length;
            var dataLength       = Math.Max(audioClip.channels, (int)((time - lastTime) * samplesPerSecond / audioClip.channels) * audioClip.channels);
            var data             = new float[dataLength];

            audioClip.GetData(data, (int)(lastTime * samplesPerSecond));
            lastTime = time;

            AddSamples(data);
        }
示例#12
0
    public AudioClip Fade(AudioClip clipToModify, float fadeOutDuration)
    {
        //Calculate the sample size based on the original audio clip and its channels ammount
        samples = new float[clipToModify.samples * clipToModify.channels];
        //Populate the samples array with the data from the audioclip
        clipToModify.GetData(samples, 0);

        //Value represents volume/gain  0 -> 1
        float value = 1.0f;
        //Calculate when the fade out should begin based on the amount of samples
        float fadeOutStart = samples.Length - (fadeOutDuration * samples.Length);
        //Calculate volume loss per sample
        float gainRegressionPerSample = 1.0f / (fadeOutDuration * samples.Length);

        //Loop through all samples
        for (int i = 0; i < samples.Length; i++)
        {
            //When the sample is greater than the start fade out sample
            if (i > fadeOutStart)
            {
                //begin to decrease the volume based on the calculated value
                value -= gainRegressionPerSample;
                //We dont want negatives so cap out at 0
                if (value < 0)
                {
                    value = 0.0f;
                }
            }
            //Set the sample value
            samples[i] = samples[i] * value;
        }
        //Create a new audioclip, based on the length of the samples and channels
        AudioClip temp = AudioClip.Create("TemporaryAudioClip", samples.Length, clipToModify.channels, 44100, false);

        //Populate the audioclip with the samples
        temp.SetData(samples, 0);
        //Return the new audioclip
        return(temp);
    }
示例#13
0
    private void ClipAudio(ref AudioClip clip, int endPos)
    {
        // from https://answers.unity.com/questions/544264/record-dynamic-length-from-microphone.html
        // Capture the current clip data
        var soundData = new float[endPos * clip.channels];

        clip.GetData(soundData, 0);

        // One does not simply shorten an AudioClip,
        //    so we make a new one with the appropriate length
        var newClip = AudioClip.Create(clip.name,
                                       endPos,
                                       clip.channels,
                                       clip.frequency,
                                       false);

        newClip.SetData(soundData, 0);        // Give it the data from the old clip

        // Replace the old clip
        AudioClip.DestroyImmediate(clip);
        clip = newClip;
    }
示例#14
0
    private float wavBufferWrite(int _head, int _position, AudioClip _clip)
    {
        //Bufferに音声データを取り込み
        _clip.GetData(microphoneBuffer, 0);

        //音声データをFileに追加
        if (_head < _position)
        {
            sendWaveData(_head, _position);
            dbTemp = getVolume(_head, _position);
            return(dbTemp);
        }
        else
        {
            sendWaveData(_head, microphoneBuffer.Length);
            sendWaveData(0, _position);
            float db_1 = getVolume(_head, microphoneBuffer.Length);
            float db_2 = getVolume(0, _position);
            dbTemp = (db_1 + db_2) / 2f;
            return(dbTemp);
        }
    }
示例#15
0
    public static AudioClip TrimSilenceAndNormalize(AudioClip clip, float min)
    {
        var data = new float[clip.samples];

        clip.GetData(data, 0);
        var samples = new List <float>(data);

        int i;

        for (i = 0; i < samples.Count; i++)
        {
            if (Mathf.Abs(samples[i]) > min)
            {
                break;
            }
        }

        samples.RemoveRange(0, i);
        if (samples.Count == 0)
        {
            return(null);
        }

        for (i = samples.Count - 1; i > 0; i--)
        {
            if (Mathf.Abs(samples[i]) > min)
            {
                break;
            }
        }

        samples.RemoveRange(i, samples.Count - i);
        Normalize(samples);

        var result = AudioClip.Create(clip.name, samples.Count, clip.channels, clip.frequency, false);

        result.SetData(samples.ToArray(), 0);
        return(result);
    }
示例#16
0
    public byte[] SaveAudioClipToWav(AudioClip audioClip, string filename)
    {
        byte[]     buffer;
        FileStream fsWrite = File.Open(filename, FileMode.Create);

        BinaryWriter bw = new BinaryWriter(fsWrite);

        Byte[] header = { 82, 73, 70, 70, 22, 10, 4, 0, 87, 65, 86, 69, 102, 109, 116, 32 };
        bw.Write(header);

        Byte[] header2 = { 16, 0, 0, 0, 1, 0, 1, 0, 68, 172, 0, 0, 136, 88, 1, 0 };
        bw.Write(header2);

        Byte[] header3 = { 2, 0, 16, 0, 100, 97, 116, 97, 152, 9, 4, 0 };

        bw.Write(header3);

        float[] samples = new float[audioClip.samples];
        audioClip.GetData(samples, 0);

        int i = 0;

        while (i < audioClip.samples)
        {
            int sampleInt = (int)(32000.0 * samples[i++]);
            int msb       = sampleInt / 256;
            int lsb       = sampleInt - (msb * 256);
            bw.Write((Byte)lsb);
            bw.Write((Byte)msb);
        }
        long length    = fsWrite.Length;
        int  lengthInt = Convert.ToInt32(length);

        buffer = new byte[lengthInt];
        fsWrite.Read(buffer, 0, lengthInt);
        return(buffer);

        fsWrite.Close();
    }
示例#17
0
    //从新计算录音文件的长度大小。录音长度公式为 : SamplingRate * 实际录音时间
    void ConvertAndWrite(AudioClip clip)
    {
        int actual_Length = (audioLength_time + 1) * SamplingRate * 2;

        //防止数据丢失,多加一秒的时间

        float[] samples = new float[actual_Length];

        clip.GetData(samples, 0);

        Int16[] intData = new Int16[samples.Length];
        //converting in 2 float[] steps to Int16[], //then Int16[] to Byte[]

        Byte[] bytesData = new Byte[samples.Length * 2];
        //bytesData array is twice the size of
        //dataSource array because a float converted in Int16 is 2 bytes.


        int rescaleFactor = 32767; //to convert float to Int16

        for (int i = 0; i < samples.Length; i++)
        {
            intData[i] = (short)(samples[i] * rescaleFactor);

            // bytesData = BitConverter.GetBytes(intData[i]);

            Byte[] byteArr = new Byte[2];
            byteArr = BitConverter.GetBytes(intData[i]);
            byteArr.CopyTo(bytesData, i * 2);
        }

        speech_Byte = null;

        //把处理后的二进制文件。通过内存流先缓存下来。
        memoryStream = new MemoryStream(bytesData, false);


        StartCoroutine(WriteFileStream());
    }
示例#18
0
    IEnumerator SolveAnimation()
    {
        Animating = true;
        BombAudio.PlaySoundAtTransform("Solve", transform);

        float[] samples = new float[SolveClip.samples * SolveClip.channels];
        SolveClip.GetData(samples, 0);

        float max = samples.Max(sample => Mathf.Abs(sample));

        samples = samples.Select(sample => sample / max).ToArray();

        Vector2[] scaleVectors  = new[] { new Vector2(Random.Range(-1f, 1f), Random.Range(-1f, 1f)), new Vector2(Random.Range(-1f, 1f), Random.Range(-1f, 1f)) };
        Vector2[] offsetVectors = new[] { new Vector2(Random.Range(-2f, 2f), Random.Range(-2f, 2f)), new Vector2(Random.Range(-2f, 2f), Random.Range(-2f, 2f)) };

        float prevAlpha = 0;

        foreach (float alpha in TimeBasedAnimation(SolveClip.length))
        {
            int   startSample = (int)(prevAlpha * (samples.Length - 1));
            int   endSample   = (int)(alpha * (samples.Length - 1));
            float sample      = samples.Skip(startSample).Take(endSample - startSample + 1).Max(s => Mathf.Abs(s));

            for (int i = 0; i < 2; i++)
            {
                TextMesh textMesh = GetTextMesh(SubmitButtons[i]);
                Material mat      = textMesh.GetComponent <Renderer>().material;
                mat.mainTextureScale  = Vector2.one + Vector2.Lerp(Vector2.zero, scaleVectors[i], sample);
                mat.mainTextureOffset = Vector2.Lerp(Vector2.zero, offsetVectors[i], sample * 0.1f);
                textMesh.color        = Color.HSVToRGB(Random.value, 1, Math.Abs(sample));
            }

            prevAlpha = alpha;

            yield return(null);
        }

        Animating = false;
    }
示例#19
0
    private void ConvertAndWrite(FileStream fileStream, AudioClip clip)
    {
        float[] samples = new float[clip.samples];

        clip.GetData(samples, 0);

        Int16[] intData = new Int16[samples.Length];

        Byte[] bytesData = new Byte[samples.Length * 2];

        int rescaleFactor = 32767;         //to convert float to Int16

        for (int i = 0; i < samples.Length; i++)
        {
            intData[i] = (short)(samples[i] * rescaleFactor);
            Byte[] byteArr = new Byte[2];
            byteArr = BitConverter.GetBytes(intData[i]);
            byteArr.CopyTo(bytesData, i * 2);
        }

        fileStream.Write(bytesData, 0, bytesData.Length);
    }
示例#20
0
    private AudioClip MakeSubclip(AudioClip clip, float start, float stop)
    {
        /* Create a new audio clip */
        int   frequency  = clip.frequency;
        float timeLength = stop - start;

        if (timeLength <= 0)
        {
            timeLength += 30.0f;
        }
        int       samplesLength = (int)(frequency * timeLength);
        AudioClip newClip       = AudioClip.Create(clip.name + "-sub", samplesLength, 1, frequency, false);

        /* Create a temporary buffer for the samples */
        float[] data = new float[samplesLength];
        /* Get the data from the original clip */
        clip.GetData(data, (int)(frequency * start));
        /* Transfer the data to the new clip */
        newClip.SetData(data, 0);
        /* Return the sub clip */
        return(newClip);
    }
示例#21
0
    void Start()
    {
        if (Microphone.devices.Length == 0)
        {
            isActive = false;
            return;
        }

        m_audioClip = Microphone.Start(Microphone.devices[0], true, 1, 8192);

        m_samples       = new float[8192];
        m_activeSamples = new float[8192];

        m_audioClip.GetData(m_samples, 0);
        lastDifferent  = 0;
        firstDifferent = 8191;

        m_dataMessage           = new VNetMessageVOIPData();
        m_dataMessage.voiceData = new float[200];

        isActive = true;
    }
示例#22
0
    private float GetMaxVolume()
    {
        float maxVolume = 0f;

        float[] volumeData = new float[VOLAUM_DATA_LENGTH];
        int     offset     = Microphone.GetPosition(mDeviceName) - VOLAUM_DATA_LENGTH + 1;

        if (offset < 0)
        {
            return(0f);
        }
        mMicroRecord.GetData(volumeData, offset);
        for (int i = 0; i < VOLAUM_DATA_LENGTH; i++)
        {
            float tempVolume = volumeData[i];
            if (tempVolume > maxVolume)
            {
                maxVolume = tempVolume;
            }
        }
        return(maxVolume);
    }
示例#23
0
    public float FloatLinearOfClip(AudioClip clip)
    {
        StopMicrophone();

        _recordedClip = clip;

        float levelMax = 0;

        float[] waveData = new float[_recordedClip.samples];

        _recordedClip.GetData(waveData, 0);
        // Getting a peak on the last 128 samples
        for (int i = 0; i < _recordedClip.samples; i++)
        {
            float wavePeak = waveData[i] * waveData[i];
            if (levelMax < wavePeak)
            {
                levelMax = wavePeak;
            }
        }
        return(levelMax);
    }
示例#24
0
    public static void ConvertAndWrite(Stream fileStream, AudioClip clip)
    {
        var samples = new float[clip.samples];

        clip.GetData(samples, 0);

        Byte[] bytesData = new Byte[samples.Length * 2];
        //bytesData array is twice the size of
        //dataSource array because a float converted in Int16 is 2 bytes.

        float rescaleFactor = 32767; //to convert float to Int16

        for (int i = 0; i < samples.Length; i++)
        {
            Int16  intData = (short)(samples[i] * rescaleFactor);
            Byte[] byteArr = new Byte[2];
            byteArr = BitConverter.GetBytes(intData);
            byteArr.CopyTo(bytesData, i * 2);
        }

        fileStream.Write(bytesData, 0, bytesData.Length);
    }
示例#25
0
    float GetMaxVolume()
    {
        float maxVolume = 0f;

        float[] volumeData = new float[128];
        int     offset     = Microphone.GetPosition(device) - 128 + 1;

        if (offset < 0)
        {
            return(0);
        }
        microrecord.GetData(volumeData, offset);
        for (int i = 0; i < 128; i++)
        {
            float tempMax = volumeData [i] * volumeData [i] * volumeData [i] * 12;
            if (maxVolume < tempMax)
            {
                maxVolume = tempMax;
            }
        }
        return(maxVolume);
    }
示例#26
0
    /// <summary>
    /// Adds the audio source clip and initiates a generation of impulse responses.
    /// </summary>
    /// <param name="c">The audio clip</param>
    public void AddAudioSource(AudioClip c)
    {
        audioSources = GameObject.FindGameObjectsWithTag("Audio Source");

        foreach (GameObject a in audioSources)
        {
            float volume = 0.2f;
            if (Equals(a.name, "Guitar Play"))
            {
                volume = 0.5f;
            }
            if (a.transform.childCount == 0)
            {
                GameObject go = new GameObject();
                go.AddComponent <AudioSource>();
                go.GetComponent <AudioSource>();
                go.GetComponent <AudioSource>().clip         = c;
                go.GetComponent <AudioSource>().loop         = true;
                go.GetComponent <AudioSource>().volume       = volume;
                go.GetComponent <AudioSource>().mute         = true;
                go.GetComponent <AudioSource>().spatialBlend = 1;
                go.GetComponent <AudioSource>().minDistance  = 1;
                go.GetComponent <AudioSource>().maxDistance  = 60;
                go.GetComponent <AudioSource>().Play();
                Instantiate(go, a.transform);
                Destroy(go);
            }
        }

        originalAudioClips.Add(c);
        mixedAudioClips.Add(c);
        float[] audioSample = new float[c.samples * c.channels];
        c.GetData(audioSample, 0);
        audioSamples.Add(audioSample);
        signals.Add(new NativeArray <float>(audioSample, Allocator.Persistent));
        offsetSamples.Add(0);

        rirScript.ToggleCalculateImpulseResponse();
    }
示例#27
0
    public void EndRecording()
    {
        //End the Recording the voice of Player
        Microphone.End("");

        //Trim the audioclip by the length of the recording
        recordingNew = AudioClip.Create(recording.name, (int)((Time.time - startRecordingTime) * recording.frequency), recording.channels, recording.frequency, false);
        float[] data = new float[(int)((Time.time - startRecordingTime) * recording.frequency)];
        recording.GetData(data, 0);
        recordingNew.SetData(data, 0);
        this.recording = recordingNew;


        //Play recording
        audioSource.clip = recording;

        //Save recording
        //SavWav.Save(filename, audioSource.clip);

        SaveSystem.SaveVoiceClip(audioSource.clip, clipNumber);
        clipNumber++;
    }
示例#28
0
        private void OnEnable()
        {
            if (_ptr == IntPtr.Zero)
            {
                _ptr = Quinoa_New(AudioSettings.outputSampleRate);
                Debug.Log(AudioSettings.outputSampleRate);
            }

            // create a dummy clip and start playing it so 3d positioning works
            var dummyClip = AudioClip.Create("dummyclip", 1, 1, AudioSettings.outputSampleRate, false);

            dummyClip.SetData(new float[] { 1 }, 0);
            var audioSource = GetComponent <AudioSource>();

            audioSource.clip = dummyClip;
            audioSource.loop = true;
            audioSource.Play();

            float[] d = new float[Clip.samples * Clip.channels];
            if (Clip.GetData(d, 0))
            {
                float[] c = new float[Clip.samples];

                for (int i = 0; i < c.Length; ++i)
                {
                    c[i] = d[i * Clip.channels];
                }

                Quinoa_SetSample(_ptr, c, c.Length, Clip.frequency);
                Debug.Log(Clip.frequency);
            }

            Quinoa_SetSpeed(_ptr, Speed);
            Quinoa_SetPitch(_ptr, Pitch);
            Quinoa_SetWindowSize(_ptr, WindowSize);
            Quinoa_SetStart(_ptr, Start);
            Quinoa_SetLength(_ptr, Length);
            Quinoa_SetWindowSmoothness(_ptr, Smoothness);
        }
    private void UpdateWindow()
    {
        int s = 256;
        int p = Microphone.GetPosition(null);

        if (p > 0)
        {
            _window = new float[s];

            _mic.GetData(_window, p - (s + 1));
            _window_mean = 0F;
            foreach (float f in _window)
            {
                _window_mean += f;
            }
            _window_mean /= _window.Length;
            if (_window_mean < _reference_power)
            {
                _window_mean = _reference_power;
            }
        }
    }
示例#30
0
    public Texture2D PaintWaveformSpectrum(AudioClip audio, float saturation, int width, int height, Color col)
    {
        Debug.Log("Texture Width: " + width);
        Texture2D tex = new Texture2D(width, height, TextureFormat.RGBA32, false);

        float[] samples  = new float[audio.samples];
        float[] waveform = new float[width];
        audio.GetData(samples, 0);
        Debug.Log("Sample length: " + samples.Length);
        int packSize = (audio.samples / width) + 1;
        int s        = 0;

        for (int i = 0; i < audio.samples; i += packSize)
        {
            waveform[s] = Mathf.Abs(samples[i]);
            s++;
        }

        for (int x = 0; x < width; x++)
        {
            for (int y = 0; y < height; y++)
            {
                tex.SetPixel(x, y, Color.black);
            }
        }

        for (int x = 0; x < waveform.Length; x++)
        {
            for (int y = 0; y <= waveform[x] * (height * .75f); y++)
            {
                tex.SetPixel(x, (height / 2) + y, col);
                tex.SetPixel(x, (height / 2) - y, col);
            }
        }
        tex.Apply();

        return(tex);
    }