Example #1
0
    /*
     *  RecordingHandler() records the real-time broadcast from the device's microphone
     *  Recording relies on the microphone recording sample rate
     *  Warning : recordingHZ must be an even integer multiple of chunkSize
     *  Thanks to Michael Pickering - RMichaelPickering (GitHub) for the explanation how to
     *  reduce the important recording delay.
     */
    private IEnumerator RecordingHandler()
    {
        // Allows to keep track how much has been already written
        // Starts at zero
        int chunckEnd = 0;
        // Current read position of broadcast
        int readPosition = 0;
        // Factor for downsampling
        // Value 1 allows to not downsample
        int downSampleFactor = 1;
        // RMS value for sound level
        // It will be calculated per chunck of samples
        float rmsValue;
        // DB value of sound level
        // It will be calculated per chunck of samples
        float dbValue;

        // Float array of samples chunck for processing each chuck of audio samples
        float[] samplesChunk = null;

        Log.Debug("{0}", "devices: {1}", runningProcess, Microphone.devices);

        // Start recording
        // boolean value is for allowing looping records
        recording = Microphone.Start(microphoneID, true, recordingBufferSize, recordingHZ);

        Log.Debug("{0}", " Microphone Ring Buffer includes: {1} channels with a total of: {2} samples.", runningProcess, recording.channels.ToString(), recording.samples.ToString());



        // Microphone.Start returns null only on failure
        // Testing if the recording failed
        if (recording == null)
        {
            StopRecording();
            yield break;
        }

        // End of the first chuck is calculated with 'chuck * downSampleFactor -1'
        // First sample is at position zero
        chunckEnd = chunkSize * downSampleFactor - 1;

        // Calculate how long to wait for at least 1 audioChuck is ready
        yield return(new WaitForSecondsRealtime(chunkSize * downSampleFactor / recordingHZ));


        while (recordingRoutine != 0 && recording != null)
        {
            // Get current writePosition of the microphone in the recording
            int writePosition = Microphone.GetPosition(microphoneID);
            // Testing if the microphone is still recording
            if (!Microphone.IsRecording(microphoneID))
            {
                Log.Error("MicrophoneWidget", "Microphone disconnected.");
                StopRecording();
                yield break;
            }

            // Make sure that at least chunckSize samples have been written
            while (writePosition > readPosition + chunckEnd || writePosition < readPosition)
            {
                // at least one chunk is recorded, make a RecordClip and pass it onto our callback.
                // We are now sure that at least one chuck is recorded
                // Creation of a RecordClip
                samplesChunk = new float[chunkSize * downSampleFactor];
                recording.GetData(samplesChunk, readPosition);


                AudioData record = new AudioData();
                // 20171018 RMPickering - The next statement seems to be setting the MaxLevel to the highest value from the samples, not taking into account the negative values.
                // record.MaxLevel = Mathf.Max(samples);


                // Calculate the max level of the highest value from the samples
                // Don't take into account the negative values (only absolute values)
                float sumSquaredSamples  = 0; // sum squared samples
                float sumAbsoluteSamples = 0; // sum absolute values

                // Implementation of an anti-aliasing filter
                // Must be lower than 8000 Hz
                float CUTOFF = 6500.0f;
                float RC     = 1.0f / (CUTOFF * 2.0f * 3.14f);
                // Using initial sample rate
                float dt    = 1.0f / 16000.0f;
                float alpha = dt / (RC + dt);

                // Calculate RMS and DB values
                sumSquaredSamples  += samplesChunk[0] * samplesChunk[0];
                sumAbsoluteSamples += Mathf.Abs(samplesChunk[0]);

                // Application of the low pass filter
                int i = 0;
                for (i = 1; i < chunkSize * downSampleFactor; i++)
                {
                    // Low pass filter allows smoothing audio recorded above the cutoff frequency
                    samplesChunk[i]     = samplesChunk[i - 1] + alpha * (samplesChunk[i] - samplesChunk[i - 1]);
                    sumSquaredSamples  += samplesChunk[i] * samplesChunk[i]; // sum squared samples
                    sumAbsoluteSamples += Mathf.Abs(samplesChunk[i]);
                }

                // Calculate the square root of average = rmsValue
                rmsValue = Mathf.Sqrt(sumSquaredSamples / chunkSize);
                // Calculate the DB value
                dbValue = 20 * Mathf.Log10(rmsValue / refValue);
                // Set minimum dbValue to -160 dB
                if (dbValue < -160)
                {
                    dbValue = -160;
                }

                // Set MaxLevel
                record.MaxLevel = rmsValue;

                // Set the clip recorded
                record.Clip = AudioClip.Create("audioChunk", chunkSize, 1, recordingHZ, false);

                // Copy the audio samples from the array samplesChuck into the clip recorded
                record.Clip.SetData(samplesChunk, 0);

                // Send the recorded clip to IBM Watson Speech To Text
                speechToText.OnListen(record);

                // Remember which block has been copied
                readPosition += chunkSize * downSampleFactor;
                if (readPosition > recordingHZ * recording.channels - 1)
                {
                    //Reset readPosition to initial value and chunckEnd to begin a new buffer
                    readPosition = 0;
                    chunckEnd    = chunkSize * downSampleFactor - 1;
                }
                else
                {
                    chunckEnd += chunkSize * downSampleFactor;
                }
            }

            // Calculate wait time for nex Update and continue streaming of the micrphone
            yield return(new WaitForSecondsRealtime(chunkSize * downSampleFactor / recordingHZ));
        }

        yield break;
    }
Example #2
0
    void InitSora()
    {
        DisposeSora();

        sora = new Sora();
        if (!MultiSub)
        {
            sora.OnAddTrack = (trackId) =>
            {
                Debug.LogFormat("OnAddTrack: trackId={0}", trackId);
                this.trackId = trackId;
            };
            sora.OnRemoveTrack = (trackId) =>
            {
                Debug.LogFormat("OnRemoveTrack: trackId={0}", trackId);
                this.trackId = 0;
            };
        }
        else
        {
            sora.OnAddTrack = (trackId) =>
            {
                Debug.LogFormat("OnAddTrack: trackId={0}", trackId);
                var obj = GameObject.Instantiate(baseContent, Vector3.zero, Quaternion.identity);
                obj.name = string.Format("track {0}", trackId);
                obj.transform.SetParent(scrollViewContent.transform);
                obj.SetActive(true);
                var image = obj.GetComponent <UnityEngine.UI.RawImage>();
                image.texture = new Texture2D(320, 240, TextureFormat.RGBA32, false);
                tracks.Add(trackId, obj);
            };
            sora.OnRemoveTrack = (trackId) =>
            {
                Debug.LogFormat("OnRemoveTrack: trackId={0}", trackId);
                if (tracks.ContainsKey(trackId))
                {
                    GameObject.Destroy(tracks[trackId]);
                    tracks.Remove(trackId);
                }
            };
        }
        sora.OnNotify = (json) =>
        {
            Debug.LogFormat("OnNotify: {0}", json);
        };
        // これは別スレッドからやってくるので注意すること
        sora.OnHandleAudio = (buf, samples, channels) =>
        {
            lock (audioBuffer)
            {
                audioBuffer.Enqueue(buf);
                audioBufferSamples += samples;
            }
        };

        if (unityAudioOutput)
        {
            var audioClip = AudioClip.Create("AudioClip", 480000, 1, 48000, true, (data) =>
            {
                lock (audioBuffer)
                {
                    if (audioBufferSamples < data.Length)
                    {
                        for (int i = 0; i < data.Length; i++)
                        {
                            data[i] = 0.0f;
                        }
                        return;
                    }

                    var p = audioBuffer.Peek();
                    for (int i = 0; i < data.Length; i++)
                    {
                        data[i] = p[audioBufferPosition] / 32768.0f;
                        ++audioBufferPosition;
                        if (audioBufferPosition >= p.Length)
                        {
                            audioBuffer.Dequeue();
                            p = audioBuffer.Peek();
                            audioBufferPosition = 0;
                        }
                    }
                    audioBufferSamples -= data.Length;
                }
            });
            audioSourceOutput.clip = audioClip;
            audioSourceOutput.Play();
        }

        if (!Recvonly)
        {
            AudioRenderer.Start();
            audioSourceInput.Play();
        }
    }
Example #3
0
    //this function streams the audio
    private void streamAudio(GameObject gameObject, string text)
    {
        SpeechConfig      speechConfig;
        SpeechSynthesizer synthesizer;

        // Creates an instance of a speech config with specified subscription key and service region.
        speechConfig = SpeechConfig.FromSubscription("c5ab91b760b24599b3667791c08aa7d9", "uksouth");

        // The default format is Riff16Khz16BitMonoPcm.
        // We are playing the audio in memory as audio clip, which doesn't require riff header.
        // So we need to set the format to Raw16Khz16BitMonoPcm.
        speechConfig.SetSpeechSynthesisOutputFormat(SpeechSynthesisOutputFormat.Raw16Khz16BitMonoPcm);

        // Creates a speech synthesizer.
        using (synthesizer = new SpeechSynthesizer(speechConfig, null))
        {
            text = cleanText(text);

            //this string defines the voice and text of what will be spoken.
            string ssml = @"<speak version='1.0' xmlns='https://www.w3.org/2001/10/synthesis' xml:lang='en-US'><voice name='" + masterScript.voice + "'>" + text + "</voice></speak>";

            // Starts speech synthesis, and returns after a single utterance is synthesized.
            using (var result = synthesizer.SpeakSsmlAsync(ssml).Result)//synthesizer.SpeakTextAsync(getTextboxText()).Result
            {
                // Checks result
                if (result.Reason == ResultReason.SynthesizingAudioCompleted)
                {
                    Debug.Log("Streaming Audio");

                    var sampleCount = result.AudioData.Length / 2;
                    var audioData   = new float[sampleCount];
                    for (var i = 0; i < sampleCount; ++i)
                    {
                        audioData[i] = (short)(result.AudioData[i * 2 + 1] << 8 | result.AudioData[i * 2]) / 32768.0F;
                    }

                    // The output audio format is 16K 16bit mono
                    var audioClip = AudioClip.Create("SynthesizedAudio", sampleCount, 1, 16000, false);
                    audioClip.SetData(audioData, 0);

                    if (gameObject.GetComponent <AudioSource>() == null)
                    {
                        AudioSource newAudioSource = new AudioSource();
                        gameObject.AddComponent(typeof(AudioSource));
                    }

                    gameObject.GetComponent <AudioSource>().clip   = audioClip;
                    gameObject.GetComponent <AudioSource>().volume = (float)masterScript.audioVolume / 100;

                    if (gameObject.GetComponent <AudioSource>().isPlaying)
                    {
                        gameObject.GetComponent <AudioSource>().Pause();
                    }
                    else
                    {
                        gameObject.GetComponent <AudioSource>().Play();
                    }
                }
                else if (result.Reason == ResultReason.Canceled)
                {
                    var cancellation = SpeechSynthesisCancellationDetails.FromResult(result);
                    Debug.Log(cancellation.Reason);
                }
            }
        }
    }
Example #4
0
    public void ButtonClick()
    {
        lock (threadLocker)
        {
            waitingForSpeak = true;
        }

        string newMessage = null;
        var    startTime  = DateTime.Now;

        // Starts speech synthesis, and returns once the synthesis is started.
        using (var result = synthesizer.StartSpeakingTextAsync(inputField.text).Result)
        {
            // Native playback is not supported on Unity yet (currently only supported on Windows/Linux Desktop).
            // Use the Unity API to play audio here as a short term solution.
            // Native playback support will be added in the future release.
            var audioDataStream   = AudioDataStream.FromResult(result);
            var isFirstAudioChunk = true;
            var audioClip         = AudioClip.Create(
                "Speech",
                SampleRate * 600, // Can speak 10mins audio as maximum
                1,
                SampleRate,
                true,
                (float[] audioChunk) =>
            {
                var chunkSize       = audioChunk.Length;
                var audioChunkBytes = new byte[chunkSize * 2];
                var readBytes       = audioDataStream.ReadData(audioChunkBytes);
                if (isFirstAudioChunk && readBytes > 0)
                {
                    var endTime       = DateTime.Now;
                    var latency       = endTime.Subtract(startTime).TotalMilliseconds;
                    newMessage        = $"Speech synthesis succeeded!\nLatency: {latency} ms.";
                    isFirstAudioChunk = false;
                }

                for (int i = 0; i < chunkSize; ++i)
                {
                    if (i < readBytes / 2)
                    {
                        audioChunk[i] = (short)(audioChunkBytes[i * 2 + 1] << 8 | audioChunkBytes[i * 2]) / 32768.0F;
                    }
                    else
                    {
                        audioChunk[i] = 0.0f;
                    }
                }

                if (readBytes == 0)
                {
                    Thread.Sleep(200);     // Leave some time for the audioSource to finish playback
                    audioSourceNeedStop = true;
                }
            });

            audioSource.clip = audioClip;
            audioSource.Play();
        }

        lock (threadLocker)
        {
            if (newMessage != null)
            {
                message = newMessage;
            }

            waitingForSpeak = false;
        }
    }
    public void FixedUpdate()
    {
        if (animateAgent)
        {
            AnimationParametersFrame currentFAPFrame = null;
            AnimationParametersFrame currentBAPFrame = null;

            AudioElement currentAudio = null;

            // Update of frames
            if (distantConnection)
            {
                // uses THRIFT for updating animation

                if (!thriftConsumerOpened)
                {
                    // standard connection
                    if (!fapReceiver.isConnected() && !fapReceiver.isOnConnection())
                    {
                        fapReceiver.startConnection();
                    }
                    else if (!bapReceiver.isConnected() && !bapReceiver.isOnConnection() && fapReceiver.isConnected())
                    {
                        Debug.Log("FAP Receiver started");
                        bapReceiver.startConnection();
                    }
                    else if (!audioReceiver.isConnected() && !audioReceiver.isOnConnection() && bapReceiver.isConnected())
                    {
                        Debug.Log("BAP Receiver started");
                        audioReceiver.startConnection();
                    }
                    else if (!commandSender.isConnected() && !commandSender.isOnConnection() && audioReceiver.isConnected())
                    {
                        Debug.Log("Audio Receiver started");
                        commandSender.startConnection();
                    }
                    else if (commandSender.isConnected())
                    {
                        Debug.Log("Connection Sender started");
                        thriftConsumerOpened = true;
                    }
                }
                else
                {
                    // FAP animation
                    if (fapReceiver.timer.isSynchronized())
                    {
                        //if (SceneManager.gretaClock <= 0)
                        characterTimer.setTimeMillis(fapReceiver.timer.getTimeMillis() - ANIM_DELAY);// the ANIM_DELAY is to take into account delays on the network
                        SceneManager.gretaClock = (float)characterTimer.getTimeMillis();
                        // Debug.Log(fapReceiver.timer.getTimeMillis()/40 );
                        //currentFAPFrame = fapReceiver.getCurrentFrame (fapReceiver.timer.getTimeMillis () / 40);
                        currentFAPFrame = fapReceiver.getCurrentFrame(characterTimer.getTimeMillis() / 40);
                    }
                    // BAP Animation
                    if (bapReceiver.timer.isSynchronized())
                    {
                        if (SceneManager.gretaClock <= 0)
                        {
                            characterTimer.setTimeMillis(bapReceiver.timer.getTimeMillis() - ANIM_DELAY);// the ANIM_DELAY is to take into account delays on the network
                            SceneManager.gretaClock = (float)(characterTimer.getTimeMillis());
                        }
                        currentBAPFrame = bapReceiver.getCurrentFrame(characterTimer.getTimeMillis() / 40);
                    }
                    // AudioBuffer
                    if (fapReceiver.timer.isSynchronized())
                    { // consumer AUDIO Buffer
                        currentAudio = audioReceiver.getCurrentAudioElement(characterTimer.getTimeMillis() / 40);
                    }
                }
            }

            // Animates agent using local files
            else
            {
                if (fapReceiver.isConnected())
                {
                    fapReceiver.stopConnector();
                    thriftConsumerOpened = false;
                }
                if (bapReceiver.isConnected())
                {
                    bapReceiver.stopConnector();
                    thriftConsumerOpened = false;
                }
                if (audioReceiver.isConnected())
                {
                    audioReceiver.stopConnector();
                    thriftConsumerOpened = false;
                }
            }

            // Update of animation
            if (currentFAPFrame != null)
            {
                if (lastFAPFrame.isEqualTo(currentFAPFrame))
                {
                    cptFrames++;
                    if (cptFrames > 2)
                    {
                        agentPlaying = false;
                        cptFrames    = 0;
                    }
                }
                else
                {
                    agentPlaying = true;
                    cptFrames    = 0;
                    lastFAPFrame = new AnimationParametersFrame(currentFAPFrame);
                }

                applyFapFrame(currentFAPFrame);
            }
            if (currentBAPFrame != null)
            {
                if (lastBAPFrame.isEqualTo(currentBAPFrame))
                {
                    cptFrames++;
                    if (cptFrames > 2)
                    {
                        agentPlaying = false;
                        cptFrames    = 0;
                    }
                }
                else
                {
                    agentPlaying = true;
                    cptFrames    = 0;
                    lastBAPFrame = new AnimationParametersFrame(currentBAPFrame);
                }

                applyBapFrame(currentBAPFrame);
            }

            /*EB : START TEST FOR AUDIO BUFFER*/
            if (audioFilePlayer.isNewAudio() || audioReceiver.isNewAudio())
            {
                //EB : I reconstructed the short values computed by cereproc from the byte buffer sent by VIB
                // and used this short value to fill the float buffer needed by the audio clip
                if (currentAudio.getSampleRate() > 0 && currentAudio.rawData.Length > 0)
                {
                    int len = currentAudio.rawData.Length / 2;
                    //EB: I couldn't find in Unity how to clean an audio clip nor how to modify its buffer length,
                    // so I prefered to destroy the audio clip (to free the memory) and to create an audio clip
                    // which has the appropriate float buffer size.
                    // In theory the frequency should be provided by the currentAudio object (which should
                    // receive such an information in the message from VIB), but since this is not the case
                    // I hard coded the frequency (47250). It works fine with cereproc, but not with MaryTTS.
                    // For Mary you need to set the frequency to 16000. This is ugly, really!
                    // It should be a input and not hard coded. The problem is that the thrift message doesn't
                    // contain the information at all and I don't want to put my hands in that part of your code.
                    Destroy(_currentAudioSource.clip);

                    _currentAudioSource.clip = AudioClip.Create("text", len, 1, currentAudio.getSampleRate(), false);
                    float[] buffer = new float[len];
                    for (int iPCM = 44; iPCM < len; iPCM++)
                    {
                        float f;
                        short i = (short)((currentAudio.rawData[iPCM * 2 + 1] << 8) | currentAudio.rawData[iPCM * 2]);
                        f = ((float)i) / (float)32768;
                        if (f > 1)
                        {
                            f = 1;
                        }
                        if (f < -1)
                        {
                            f = -1;
                        }
                        buffer[iPCM] = f;
                    }
                    _currentAudioSource.clip.SetData(buffer, 0);
                    _currentAudioSource.Play();

                    audioReceiver.setNewAudio(false);
                    audioFilePlayer.setNewAudio(false);
                }
                else
                {
                    if ((_currentAudioSource != null) && (_currentAudioSource.clip != null))
                    {
                        float offSet        = ((float)characterTimer.getTimeMillis() - ((float)currentAudio.getFrameNumber() * 40)) / 1000;
                        int   samplesOffset = (int)(_currentAudioSource.clip.frequency * offSet * _currentAudioSource.clip.channels);
                        _currentAudioSource.timeSamples = samplesOffset;
                        _currentAudioSource.Play();
                    }
                    audioReceiver.setNewAudio(false);
                    audioFilePlayer.setNewAudio(false);
                }
            }
        }
        else
        {
            if (_currentAudioSource != null)
            {
                _currentAudioSource.Stop();
            }
        }
        if (animationIDold != animationID)
        {
            PlayAgentAnimation(animationID);
            animationIDold = animationID;
        }
    }
Example #6
0
    private static object DoLoad(string file, string ext)
    {
        if (ext == "grf")
        {
            return(File.OpenRead(file));
        }
        else
        {
            using (var br = ReadSync(file)) {
                if (br == null)
                {
                    throw new Exception($"Could not load file: {file}");
                }

                switch (ext)
                {
                // Images
                case "jpg":
                case "jpeg":
                case "png":
                    return(new RawImage()
                    {
                        data = br.ToArray()
                    });

                case "bmp":
                    return(loader.LoadBMP(br));

                case "tga":
                    return(TGALoader.LoadTGA(br));

                // Text
                case "txt":
                case "xml":
                case "lua":
                    return(Encoding.UTF8.GetString(br.ToArray()));

                case "spr":
                    SPR spr = SpriteLoader.Load(br);
                    spr.SwitchToRGBA();
                    spr.Compile();
                    spr.filename = file;
                    return(spr);

                case "str":
                    return(EffectLoader.Load(br));

                case "act":
                    return(ActionLoader.Load(br));

                // Binary
                case "gat":
                    return(new Altitude(br));

                case "rsw":
                    return(WorldLoader.Load(br));

                case "gnd":
                    return(GroundLoader.Load(br));

                case "rsm":
                    return(ModelLoader.Load(br));

                // Audio
                case "wav":
                    WAVLoader.WAVFile wav  = WAVLoader.OpenWAV(br.ToArray());
                    AudioClip         clip = AudioClip.Create(file, wav.samples, wav.channels, wav.sampleRate, false);
                    clip.SetData(wav.leftChannel, 0);
                    return(clip);

                case "mp3":
                case "ogg":
                    break;

                default:
                    throw new Exception($"Unsuported file format: {ext} for file {file}");
                }
            }
        }
        return(null);
    }
Example #7
0
    //colores guardados.
    private void obtenerDatos1(int id)
    {
        Debug.Log("funcion con id " + id);
        byte[]        son = new byte[0];
        byte[]        imagen_personaje = new byte[0];
        string        conn             = "URI=file:" + Application.dataPath + "/Recursos/BD/dbdata.db";
        IDbConnection dbconn;

        dbconn = (IDbConnection) new SqliteConnection(conn);
        dbconn.Open();
        IDbCommand dbcmd = dbconn.CreateCommand();
        string     sqlQuery;

        sqlQuery = "select t2.r_color, t2.g_color, t2.b_color, t3.id_personaje, t4.nombre_ubicacion,  t3.audio_personaje, t3.imagen_personaje from detalle_aprendizaje as t1 inner join color as t2 on t2.id = t1.id_color inner join personaje as t3 on t1.id_personaje = t3.id_personaje inner join ubicacion as t4 on t1.id_ubicacion = t4.id where t1.id_detalle_apre = " + id;
        //"select color.r_color, color.g_color, color.b_color, ubicacion.nombre_ubicacion, tpersonaje.audio_personaje, tpersonaje.imagen_personaje from detalle_aprendizaje  as detalle_aprendizaje inner join color on color.id = id_color inner join ubicacion on ubicacion.id = id_ubicacion inner join personaje as tpersonaje on tpersonaje.id_personaje = detalle_aprendizaje.id_personaje where id_detalle_apre = " + id;
        Debug.Log(sqlQuery);
        dbcmd.CommandText = sqlQuery;
        IDataReader reader = dbcmd.ExecuteReader();

        while (reader.Read())
        {
            nombre_ubicacion = reader.GetString(4);
            id_personaje_1   = reader.GetInt32(3);
            imagen_personaje = (byte[])reader["imagen_personaje"];
            son = (byte[])reader["audio_personaje"];
            if (nombre_ubicacion == "derecha")
            {
                id_boton_derecha = id_personaje_1;
                colorDer.r       = reader.GetInt32(0);
                colorDer.g       = reader.GetInt32(1);
                colorDer.b       = reader.GetInt32(2);
            }
            else
            {
                id_boton_izquierda = id_personaje_1;
                colorIzq.r         = reader.GetInt32(0);
                colorIzq.g         = reader.GetInt32(1);
                colorIzq.b         = reader.GetInt32(2);
            }
        }
        Debug.Log(colorIzq.r);
        WAV sonido = new WAV(son);

        audio_personaje_1 = AudioClip.Create("personaje_1", sonido.SampleCount, 1, sonido.Frequency, false, false);
        audio_personaje_1.SetData(sonido.LeftChannel, 0);
        if (nombre_ubicacion == "derecha")
        {
            texturaDerecha.LoadImage(imagen_personaje);
        }
        else
        {
            texturaIzquierda.LoadImage(imagen_personaje);
        }
        playsoundOtravez(audio_personaje_1);
        reader.Close();
        reader = null;
        dbcmd.Dispose();
        dbcmd = null;
        dbconn.Close();
        Debug.Log("Ya salio de la base de datos");
        nombre_ubicacion = "";
        StartCoroutine(playsound());
    }
 private void MakeBeep()
 {
     beep           = AudioClip.Create("beepx", beepLength * 512, 1, 44100, false, true, OnAudioRead, OnAudioSetPosition);
     obj.audio.clip = beep;
 }
Example #9
0
    IEnumerator ShowLoadDialogCoroutine()
    {
        // Show a load file dialog and wait for a response from user
        // Load file/folder: file, Initial path: default (Documents), Title: "Load File", submit button text: "Load"
        yield return(FileBrowser.WaitForLoadDialog(false, null, "Load File", "Load"));

        // Dialog is closed
        // Print whether a file is chosen (FileBrowser.Success)
        // and the path to the selected file (FileBrowser.Result) (null, if FileBrowser.Success is false)
        Debug.Log(FileBrowser.Success + " " + FileBrowser.Result);

        if (FileBrowser.Success)
        {
            // If a file was chosen, read its bytes via FileBrowserHelpers
            // Contrary to File.ReadAllBytes, this function works on Android 10+, as well
            mPath = FileBrowser.Result;
            Debug.Log(mPath);

            if (mPath.Length != 0)
            {
                Image o = Instantiate(prefab, transform);
                Debug.Log(mPath);

                //audioSource = (AudioSource)gameObject.GetComponent(typeof(AudioSource));
                //if (audioSource == null) audioSource = (AudioSource)gameObject.AddComponent<AudioSource>();


                MPGImport.mpg123_init();
                handle_mpg = MPGImport.mpg123_new(null, errPtr);
                x          = MPGImport.mpg123_open(handle_mpg, mPath);
                MPGImport.mpg123_getformat(handle_mpg, out rate, out channels, out encoding);
                intRate     = rate.ToInt32();
                intChannels = channels.ToInt32();
                intEncoding = encoding.ToInt32();

                MPGImport.mpg123_id3(handle_mpg, out id3v1, out id3v2);
                MPGImport.mpg123_format_none(handle_mpg);
                MPGImport.mpg123_format(handle_mpg, intRate, intChannels, 208);

                FrameSize = MPGImport.mpg123_outblock(handle_mpg);
                byte[] Buffer = new byte[FrameSize];
                lengthSamples = MPGImport.mpg123_length(handle_mpg);

                myClip = AudioClip.Create(mPath, lengthSamples, intChannels, intRate, false, false);

                int importIndex = 0;

                while (0 == MPGImport.mpg123_read(handle_mpg, Buffer, FrameSize, out done))
                {
                    float[] fArray;
                    fArray = ByteToFloat(Buffer);

                    myClip.SetData(fArray, (importIndex * fArray.Length) / 2);

                    importIndex++;
                }

                MPGImport.mpg123_close(handle_mpg);

                audioSource.clip = myClip;
                audioSource.loop = true;

                //audioSource.Play();
            }
        }
    }
Example #10
0
    void datosPosicion(int id)
    {
        Debug.Log("Numero aleatorio = " + id);
        byte[]        son  = new byte[0];
        string        conn = "URI=file:" + Application.dataPath + "/Recursos/BD/dbdata.db";
        IDbConnection dbconn;

        dbconn = (IDbConnection) new SqliteConnection(conn);
        dbconn.Open();
        IDbCommand dbcmd    = dbconn.CreateCommand();
        string     sqlQuery = "Select * from ubicacion where id = " + id;

        dbcmd.CommandText = sqlQuery;
        IDataReader reader = dbcmd.ExecuteReader();

        while (reader.Read())
        {
            son = (byte[])reader["audio_ubicacion"];
            Debug.Log(son.Length);
            xi = reader.GetInt32(4);
            yi = reader.GetInt32(6);
            xf = reader.GetInt32(5);
            yf = reader.GetInt32(7);
        }

        WAV sonido = new WAV(son);

        b = AudioClip.Create("testSound", sonido.SampleCount, 1, sonido.Frequency, false, false);
        b.SetData(sonido.LeftChannel, 0);
        //audioUbicacion.clip = audioClip;
        //audioA.Play();
        reader.Close();
        reader = null;
        dbcmd.Dispose();
        dbcmd = null;
        dbconn.Close();

        if (id == 5)
        {
            if (contador_repeticiones == 2)
            {
                Debug.Log("Solo la primera vez a");
                saveDetalleAprendizaje(Menu_Aprendizaje_1.cod_aprendizaje, id, Menu_Aprendizaje_1.cod_color_1, Menu_Aprendizaje_1.cod_personaje_1);
                colors(Menu_Aprendizaje_1.colora, imgA);
            }
            else
            {
                colors(Menu_Aprendizaje_1.colorb, imgA);
            }
            contador = 0;
            InstanciarIzqSuperior();
        }
        if (id == 6)
        {
            //solo la primera vez.
            if (contador_repeticiones == 2)
            {
                Debug.Log("Solo la primera vez b");
                saveDetalleAprendizaje(Menu_Aprendizaje_1.cod_aprendizaje, id, Menu_Aprendizaje_1.cod_color_1, Menu_Aprendizaje_1.cod_personaje_1);
                colors(Menu_Aprendizaje_1.colora, imgB);
            }
            else
            {
                colors(Menu_Aprendizaje_1.colorb, imgB);
            }
            contador = 1;
            InstanciarIzqInferior();
        }
        if (id == 7)
        {
            if (contador_repeticiones == 2)
            {
                Debug.Log("Solo la primera vez a");
                saveDetalleAprendizaje(Menu_Aprendizaje_1.cod_aprendizaje, id, Menu_Aprendizaje_1.cod_color_1, Menu_Aprendizaje_1.cod_personaje_1);
                colors(Menu_Aprendizaje_1.colora, imgC);
            }
            else
            {
                colors(Menu_Aprendizaje_1.colorb, imgC);
            }
            contador = 2;
            InstanciarDerInferior();
        }
        if (id == 8)
        {
            if (contador_repeticiones == 2)
            {
                Debug.Log("Solo la primera vez a");
                saveDetalleAprendizaje(Menu_Aprendizaje_1.cod_aprendizaje, id, Menu_Aprendizaje_1.cod_color_1, Menu_Aprendizaje_1.cod_personaje_1);
                colors(Menu_Aprendizaje_1.colora, imgD);
            }
            else
            {
                colors(Menu_Aprendizaje_1.colorb, imgD);
            }
            contador = 3;
            InstanciarDerSuperior();
        }
        codigo_ubicacion_2 = id;
    }
 public override object Instantiate(Type type)
 {
     return(AudioClip.Create(m_name, m_lengthSamples, m_channels, m_frequency, false));
 }
Example #12
0
    private IEnumerator RecordingHandler()
    {
        m_Recording = Microphone.Start(null, true, m_RecordingBufferSize, m_RecordingHZ);
        yield return(null);      // let m_RecordingRoutine get set..


        //If the recording doesn't initialise properly
        if (m_Recording == null)
        {
            //Stop recording
            StopRecording();
            //Break out of function
            yield break;
        }

        bool bFirstBlock = true;
        int  midPoint    = m_Recording.samples / 2;

        float[] samples = null;

        //While our recording routine is still running and the recording isn't null
        while (m_RecordingRoutine != 0 && m_Recording != null)
        {
            //Get the position to write to
            int writePos = Microphone.GetPosition(null);
            //If we are going to overload the samples array or the mic isn't recording anymore
            if (writePos > m_Recording.samples || !Microphone.IsRecording(null))
            {
                Log.Error("MicrophoneWidget", "Microphone disconnected.");

                //Stop recording
                StopRecording();
                yield break;
            }

            //Recording is done in two halves for some reason
            if ((bFirstBlock && writePos >= midPoint) ||
                (!bFirstBlock && writePos < midPoint))
            {
                // front block is recorded, make a RecordClip and pass it onto our callback.
                samples = new float[midPoint];
                m_Recording.GetData(samples, bFirstBlock ? 0 : midPoint);

                AudioData record = new AudioData();
                record.MaxLevel = Mathf.Max(samples);
                record.Clip     = AudioClip.Create("Recording", midPoint, m_Recording.channels, m_RecordingHZ, false);
                record.Clip.SetData(samples, 0);

                m_SpeechToText.OnListen(record);

                bFirstBlock = !bFirstBlock;
            }
            else
            {
                // calculate the number of samples remaining until we ready for a block of audio,
                // and wait that amount of time it will take to record.
                int   remaining     = bFirstBlock ? (midPoint - writePos) : (m_Recording.samples - writePos);
                float timeRemaining = (float)remaining / (float)m_RecordingHZ;

                yield return(new WaitForSeconds(timeRemaining));
            }
        }

        yield break;
    }
Example #13
0
 void StartOscillator()
 {
     myClip = AudioClip.Create("MySinusoid", samplerate, 1, samplerate, false, OnAudioRead, OnAudioSetPosition);
     audioClipReceiver.GetComponent <AudioClipReceiver>().ReceiveAudioClip(myClip);
 }
Example #14
0
        public static AudioClip Load(Stream dataStream, AudioFormat audioFormat, string unityAudioClipName, bool doStream = false, bool loadInBackground = true, bool diposeDataStreamIfNotNeeded = true)
        {
            AudioClip             audioClip = null;
            CustomAudioFileReader reader    = null;

            try
            {
                reader = new CustomAudioFileReader(dataStream, audioFormat);
                AudioInstance audioInstance = new AudioInstance();
                audioInstance.reader       = reader;
                audioInstance.samplesCount = (int)(reader.Length / (reader.WaveFormat.BitsPerSample / 8));
                AudioInstance audioInstance2 = audioInstance;
                if (doStream)
                {
                    audioClip = (audioInstance2.audioClip = AudioClip.Create(unityAudioClipName, audioInstance2.samplesCount / audioInstance2.channels, audioInstance2.channels, audioInstance2.sampleRate, doStream, delegate(float[] target)
                    {
                        reader.Read(target, 0, target.Length);
                    }, delegate(int target)
                    {
                        reader.Seek(target, SeekOrigin.Begin);
                    }));
                    Manager.SetAudioClipLoadType(audioInstance2, AudioClipLoadType.Streaming);
                    Manager.SetAudioClipLoadState(audioInstance2, AudioDataLoadState.Loaded);
                    return(audioClip);
                }
                audioClip = (audioInstance2.audioClip = AudioClip.Create(unityAudioClipName, audioInstance2.samplesCount / audioInstance2.channels, audioInstance2.channels, audioInstance2.sampleRate, doStream));
                if (diposeDataStreamIfNotNeeded)
                {
                    audioInstance2.streamToDisposeOnceDone = dataStream;
                }
                Manager.SetAudioClipLoadType(audioInstance2, AudioClipLoadType.DecompressOnLoad);
                Manager.SetAudioClipLoadState(audioInstance2, AudioDataLoadState.Loading);
                if (loadInBackground)
                {
                    object obj = Manager.deferredLoadQueue;
                    Monitor.Enter(obj);
                    try
                    {
                        Manager.deferredLoadQueue.Enqueue(audioInstance2);
                    }
                    finally
                    {
                        Monitor.Exit(obj);
                    }
                    Manager.RunDeferredLoaderThread();
                    Manager.EnsureInstanceExists();
                    return(audioClip);
                }
                audioInstance2.dataToSet = new float[audioInstance2.samplesCount];
                audioInstance2.reader.Read(audioInstance2.dataToSet, 0, audioInstance2.dataToSet.Length);
                audioInstance2.audioClip.SetData(audioInstance2.dataToSet, 0);
                Manager.SetAudioClipLoadState(audioInstance2, AudioDataLoadState.Loaded);
                return(audioClip);
            }
            catch (Exception ex)
            {
                Manager.SetAudioClipLoadState(audioClip, AudioDataLoadState.Failed);
                Debug.LogError("Could not load AudioClip named '" + unityAudioClipName + "', exception:" + ex);
                return(audioClip);
            }
        }
Example #15
0
    // Crea el objeto AudioClipToReturn en donde se pegará los datos de audio que recibe de la red para posteriormente reproducirlos...
    private static AudioClip AudioClipCreateEmpty(string ClipName, int Length)
    {
        AudioClip AudioClipToReturn = AudioClip.Create(ClipName, Length, 1, FREQUENCY_RATE, false);

        return(AudioClipToReturn);
    }
Example #16
0
        /// <summary>
        /// This function will combine any number of AudioClips into a single AudioClip. The clips must be the same number of channels
        /// and frequency.
        /// </summary>
        /// <param name="clips">Variable number of AudioClip objects may be provided.</param>
        /// <returns>Returns the resulting AudioClip.</returns>
        public static AudioClip Combine(params AudioClip[] clips)
        {
            if (clips == null || clips.Length == 0)
            {
                return(null);
            }

            AudioClip firstClip = null;

            int length = 0;

            for (int i = 0; i < clips.Length; i++)
            {
                if (clips[i] == null)
                {
                    continue;
                }

                if (firstClip != null)
                {
                    if (firstClip.channels != clips[i].channels ||
                        firstClip.frequency != clips[i].frequency)
                    {
                        Log.Error("AudioClipUtil", "Combine() requires clips to have the sample number of channels and same frequency.");
                        return(null);
                    }
                }
                else
                {
                    firstClip = clips[i];
                }

                length += clips[i].samples * clips[i].channels;
            }

            float[] data = new float[length];
            length = 0;
            for (int i = 0; i < clips.Length; i++)
            {
                if (clips[i] == null)
                {
                    continue;
                }

                float[] buffer = new float[clips[i].samples * clips[i].channels];
                clips[i].GetData(buffer, 0);
                buffer.CopyTo(data, length);
                length += buffer.Length;
            }

            if (length == 0)
            {
                return(null);
            }

            AudioClip result = AudioClip.Create(firstClip.name, length / firstClip.channels, firstClip.channels, firstClip.frequency, false);

            result.SetData(data, 0);

            return(result);
        }
    private IEnumerator RecordingHandler()
    {
        Debug.Log("recording handler started");
        _recording = Microphone.Start(_microphoneID, true, _recordingBufferSize, _recordingHZ);
        yield return(null);      // let m_RecordingRoutine get set..

        if (_recording == null)
        {
            StopRecording();
            Debug.Log("stopped recording because not able to initialise recording");

            yield break;
        }

        bool bFirstBlock = true;
        int  midPoint    = _recording.samples / 2;

        float[] samples = null;

        while (_recordingRoutine != 0 && _recording != null)
        {
            Debug.Log("Recording...");
            loadingText.text = "Waiting for you to say something...";

            int writePos = Microphone.GetPosition(_microphoneID);
            if (writePos > _recording.samples || !Microphone.IsRecording(_microphoneID))
            {
                Log.Error("MicrophoneWidget", "Microphone disconnected.");

                StopRecording();
                yield break;
            }

            if ((bFirstBlock && writePos >= midPoint) ||
                (!bFirstBlock && writePos < midPoint))
            {
                Debug.Log("Making a recording...");
                // front block is recorded, make a RecordClip and pass it onto our callback.
                samples = new float[midPoint];
                _recording.GetData(samples, bFirstBlock ? 0 : midPoint);

                AudioData record = new AudioData();
                record.MaxLevel = Mathf.Max(samples);
                record.Clip     = AudioClip.Create("Recording", midPoint, _recording.channels, _recordingHZ, false);
                record.Clip.SetData(samples, 0);

                //here we send the recorded audio to be processed...
                _speechToText.OnListen(record);

                bFirstBlock = !bFirstBlock;
            }
            else
            {
                // calculate the number of samples remaining until we ready for a block of audio,
                // and wait that amount of time it will take to record.
                Debug.Log("Waiting for enough audio data...");
                int   remaining     = bFirstBlock ? (midPoint - writePos) : (_recording.samples - writePos);
                float timeRemaining = (float)remaining / (float)_recordingHZ;

                yield return(new WaitForSeconds(timeRemaining));
            }
        }

        yield break;
    }
Example #18
0
    IEnumerator Start()
    {
        // Setup camera
        FindObjectOfType <Camera>().orthographic = true;
        // Create audio clip for hit SE.
        var se = gameObject.AddComponent <AudioSource>();

        (se.clip = AudioClip.Create("hit", 600, 1, 44100, false))
        .SetData(Enumerable.Range(0, 600).Select(t => Mathf.Sin(t * 0.1f)).ToArray(), 0);
        // Create sprites
        var field  = CreateSpriteObject("Field", Color.black, new Vector3(0, 1, 1), new Vector2(120, 200));
        var blocks = Enumerable.Range(0, 40).Select(i =>
                                                    CreateSpriteObject("Block " + i, Color.red, new Vector3(-1.7f + ((3.4f / 4) * (i % 5)), 4 - (0.2f * (i / 5)), 0), new Vector2(20, 4))
                                                    ).ToList();
        var bar  = CreateSpriteObject("Bar", Color.cyan, new Vector3(0, -2, 0), new Vector2(20, 4));
        var ball = CreateSpriteObject("Ball", Color.white, new Vector3(0, -1.5f, 0), new Vector2(3, 3));

        ball.transform.rotation = Quaternion.Euler(0, 0, 45);
        // Start main loop
        var velocity = new Vector3(1, 1, 0).normalized *ballSpeed;

        for (;;)
        {
            // Move bar by user input
            var barpos = bar.transform.position;
            barpos.x = Mathf.Clamp(barpos.x + Input.GetAxisRaw("Horizontal") * Time.deltaTime * barSpeed, -2, 2);
            bar.transform.position = barpos;
            // Move ball by velocity
            ball.transform.Translate(velocity * Time.deltaTime, Space.World);
            // Check collision with blocks and remove it if collided
            var ballpos = ball.transform.position;
            if (blocks.RemoveAll(block => {
                if (block.bounds.Intersects(ball.bounds))
                {
                    // Bounce by angle from center of collided block
                    if (Mathf.Abs(Vector3.Dot(Vector3.up, (ballpos - block.transform.position).normalized)) < 0.2f)
                    {
                        velocity.x *= -1;
                    }
                    else
                    {
                        velocity.y *= -1;
                    }
                    velocity *= ballAccel;                     // Speed up!
                    Destroy(block.gameObject);
                    se.Play();
                    return(true);
                }
                return(false);
            }) <= 0)             // Detect no blocks broke
            {
                // Check fumble
                if (ballpos.y < -3)
                {
                    field.color = Color.blue;
                    yield break;
                }
                // Check vertical bounce
                if ((velocity.y < 0 && bar.bounds.Intersects(ball.bounds)) || (velocity.y > 0 && ballpos.y > 4.9f))
                {
                    velocity.y *= -1;
                    se.Play();
                }
                // Check horizontal bounce
                if ((velocity.x < 0 && ballpos.x < -2.33f) || (velocity.x > 0 && ballpos.x > 2.33f))
                {
                    velocity.x *= -1;
                    se.Play();
                }
            }
            // Win if no blocks exist any more
            else if (blocks.Count <= 0)
            {
                field.color = Color.yellow;
                yield break;
            }
            yield return(null);
        }
    }
    private IEnumerator RecordingHandler()
    {
        Log.Debug("ExampleStreamingSplitSamples.RecordingHandler()", "devices: {0}", Microphone.devices);
        //  Start recording
        _recording = Microphone.Start(_microphoneID, true, _recordingBufferSize, _recordingHZ);
        yield return(null);

        if (_recording == null)
        {
            StopRecording();
            yield break;
        }

#if ENABLE_TIME_LOGGING
        //  Set a reference to now to check timing
        DateTime now = DateTime.Now;
#endif

        //  Current sample segment number
        int sampleSegmentNum = 0;

        //  Size of the sample segment in samples
        int sampleSegmentSize = _recording.samples / _sampleSegments;

        //  Init samples
        float[] samples = null;

        while (_recordingRoutine != 0 && _recording != null)
        {
            //  Get the mic position
            int microphonePosition = Microphone.GetPosition(_microphoneID);
            if (microphonePosition > _recording.samples || !Microphone.IsRecording(_microphoneID))
            {
                Log.Error("ExampleStreamingSplitSamples.RecordingHandler()", "Microphone disconnected.");

                StopRecording();
                yield break;
            }

            int sampleStart = sampleSegmentSize * sampleSegmentNum;
            int sampleEnd   = sampleSegmentSize * (sampleSegmentNum + 1);

#if ENABLE_DEBUGGING
            Log.Debug("ExampleStreamingSplitSamples.RecordinHandler", "microphonePosition: {0} | sampleStart: {1} | sampleEnd: {2} | sampleSegmentNum: {3}",
                      microphonePosition.ToString(),
                      sampleStart.ToString(),
                      sampleEnd.ToString(),
                      sampleSegmentNum.ToString());
#endif
            //If the write position is past the end of the sample segment or if write position is before the start of the sample segment
            while (microphonePosition > sampleEnd || microphonePosition < sampleStart)
            {
                //  Init samples
                samples = new float[sampleSegmentSize];
                //  Write data from recording into samples starting from the sampleSegmentStart
                _recording.GetData(samples, sampleStart);

                //  Create AudioData and use the samples we just created
                AudioData record = new AudioData();
                record.MaxLevel = Mathf.Max(Mathf.Abs(Mathf.Min(samples)), Mathf.Max(samples));
                record.Clip     = AudioClip.Create("Recording", sampleSegmentSize, _recording.channels, _recordingHZ, false);
                record.Clip.SetData(samples, 0);

                //  Send the newly created AudioData to the service
                _speechToText.OnListen(record);

                //  Iterate or reset sampleSegmentNum
                if (sampleSegmentNum < _sampleSegments - 1)
                {
                    sampleSegmentNum++;
#if ENABLE_DEBUGGING
                    Log.Debug("ExampleStreamingSplitSamples.RecordingHandler()", "Iterating sampleSegmentNum: {0}", sampleSegmentNum);
#endif
                }
                else
                {
                    sampleSegmentNum = 0;
#if ENABLE_DEBUGGING
                    Log.Debug("ExampleStreamingSplitSamples.RecordingHandler()", "Resetting sampleSegmentNum: {0}", sampleSegmentNum);
#endif
                }

#if ENABLE_TIME_LOGGING
                Log.Debug("ExampleStreamingSplitSamples.RecordingHandler", "Sending data - time since last transmission: {0} ms", Mathf.Floor((float)(DateTime.Now - now).TotalMilliseconds));
                now = DateTime.Now;
#endif
                sampleStart = sampleSegmentSize * sampleSegmentNum;
                sampleEnd   = sampleSegmentSize * (sampleSegmentNum + 1);
            }

            yield return(0);
        }

        yield break;
    }
Example #20
0
 public static AudioClip CreateAudioClip(int lenSamples, int channels, int frequency, bool threeD)
 {
     return(AudioClip.Create("clip", lenSamples / channels, channels, frequency, threeD, false));
 }
        public static AudioClip Load(Stream dataStream, AudioFormat audioFormat, string unityAudioClipName, bool doStream = false, bool loadInBackground = true, bool diposeDataStreamIfNotNeeded = true)
        {
            AudioClip             audioClip = null;
            CustomAudioFileReader reader    = null;

            try
            {
                reader = new CustomAudioFileReader(dataStream, audioFormat);
                Manager.AudioInstance audioInstance = new Manager.AudioInstance
                {
                    reader       = reader,
                    samplesCount = (int)(reader.Length / (long)(reader.WaveFormat.BitsPerSample / 8))
                };
                if (doStream)
                {
                    audioClip = AudioClip.Create(unityAudioClipName, audioInstance.samplesCount / audioInstance.channels, audioInstance.channels, audioInstance.sampleRate, doStream, delegate(float[] target)
                    {
                        reader.Read(target, 0, target.Length);
                    }, delegate(int target)
                    {
                        reader.Seek((long)target, SeekOrigin.Begin);
                    });
                    audioInstance.audioClip = audioClip;
                    Manager.SetAudioClipLoadType(audioInstance, AudioClipLoadType.Streaming);
                    Manager.SetAudioClipLoadState(audioInstance, AudioDataLoadState.Loaded);
                }
                else
                {
                    audioClip = AudioClip.Create(unityAudioClipName, audioInstance.samplesCount / audioInstance.channels, audioInstance.channels, audioInstance.sampleRate, doStream);
                    audioInstance.audioClip = audioClip;
                    if (diposeDataStreamIfNotNeeded)
                    {
                        audioInstance.streamToDisposeOnceDone = dataStream;
                    }
                    Manager.SetAudioClipLoadType(audioInstance, AudioClipLoadType.DecompressOnLoad);
                    Manager.SetAudioClipLoadState(audioInstance, AudioDataLoadState.Loading);
                    if (loadInBackground)
                    {
                        object obj = Manager.deferredLoadQueue;
                        lock (obj)
                        {
                            Manager.deferredLoadQueue.Enqueue(audioInstance);
                        }
                        Manager.RunDeferredLoaderThread();
                        Manager.EnsureInstanceExists();
                    }
                    else
                    {
                        audioInstance.dataToSet = new float[audioInstance.samplesCount];
                        audioInstance.reader.Read(audioInstance.dataToSet, 0, audioInstance.dataToSet.Length);
                        audioInstance.audioClip.SetData(audioInstance.dataToSet, 0);
                        Manager.SetAudioClipLoadState(audioInstance, AudioDataLoadState.Loaded);
                    }
                }
            }
            catch (Exception ex)
            {
                Manager.SetAudioClipLoadState(audioClip, AudioDataLoadState.Failed);
                Debug.LogError(string.Concat(new object[]
                {
                    "Could not load AudioClip named '",
                    unityAudioClipName,
                    "', exception:",
                    ex
                }));
            }
            return(audioClip);
        }
Example #22
0
 void Start()
 {
     audioSource.clip = AudioClip.Create("test", 1000, 2, FREQUENCY, false);
 }
Example #23
0
        /// <summary>Load audio file into AudioClip</summary>
        public static AudioClip ByteArrayToAudioClip(byte[] wavFile, string name = "", bool stream = false)
        {
            /* WAV file format:
             *
             * size - Name              - (index)   Description.
             *
             * 4    - ChunkID           - (0)       "RIFF"
             * 4    - ChunkSize         - (4)       file size minus 8 (RIFF(4) + ChunkSize(4)).
             * 4    - Format            - (8)       "WAVE"
             *
             * 4    - Subchunk1ID       - (12)      "fmt "
             * 4    - Subchunk1Size     - (16)      16 for PCM (20 to 36)
             * 2    - AudioFormat       - (20)      1 for PCM (other values implies some compression).
             * 2    - NumChannels       - (22)      Mono = 1, Stereo = 2, etc.
             * 4    - SampleRate        - (24)      8000, 22050, 44100, etc.
             * 4    - ByteRate          - (28)      == SampleRate * NumChannels * (BitsPerSample/8)
             * 2    - BlockAlign        - (32)      == NumChannels * (BitsPerSample/8)
             * 2    - BitsPerSample     - (34)      8 bits = 8, 16 bits = 16, etc.
             * (Here goes the extra data pointed by Subchunk1Size > 16)
             *
             * 4    - Subchunk2ID       - (36)      "data"
             * 4    - Subchunk2Size     - (40)
             * Subchunk2Size (Data)     - (44)
             */

            // Check if the provided file is a valid PCM file:
            if (IsCompatible(wavFile))
            {
                //int _chunkSize = System.BitConverter.ToInt32(wavFile, 4);               // Not used.
                int _subchunk1Size = System.BitConverter.ToInt32(wavFile, 16);
                int _audioFormat   = System.BitConverter.ToInt16(wavFile, 20);
                int _numChannels   = System.BitConverter.ToInt16(wavFile, 22);
                int _sampleRate    = System.BitConverter.ToInt32(wavFile, 24);
                //int _byteRate = System.BitConverter.ToInt32(wavFile, 28);               // Not used.
                //int _blockAlign = System.BitConverter.ToInt16(wavFile, 32);             // Not used.
                int _bitsPerSample = System.BitConverter.ToInt16(wavFile, 34);
                // Find where data starts:
                int _dataIndex = 20 + _subchunk1Size;
                for (int i = _dataIndex; i < wavFile.Length; i++)
                {
                    if (wavFile[i] == 'd' && wavFile[i + 1] == 'a' && wavFile[i + 2] == 't' && wavFile[i + 3] == 'a')
                    {
                        _dataIndex = i + 4;     // "data" string size = 4
                        break;
                    }
                }
                // Data parameters:
                int _subchunk2Size = System.BitConverter.ToInt32(wavFile, _dataIndex);  // Data size (Subchunk2Size).
                _dataIndex += 4;                                                        // Subchunk2Size = 4
                int _sampleSize  = _bitsPerSample / 8;                                  // Size of a sample.
                int _sampleCount = _subchunk2Size / _sampleSize;                        // How many samples into data.
                                                                                        // WAV method:
                if (_audioFormat == 1)
                {
                    float[] _audioBuffer = new float[_sampleCount];  // Size for all available channels.
                    for (int i = 0; i < _sampleCount; i++)
                    {
                        int   sampleIndex = _dataIndex + i * _sampleSize;
                        short intSample   = System.BitConverter.ToInt16(wavFile, sampleIndex);
                        float sample      = intSample / 32768.0f;
                        _audioBuffer[i] = sample;
                    }
                    // Create the AudioClip:
                    AudioClip audioClip = AudioClip.Create(name, _sampleCount, _numChannels, _sampleRate, stream);
                    audioClip.SetData(_audioBuffer, 0);
                    return(audioClip);
                }
                else
                {
                    Debug.LogError("[OpenWavParser.ByteArrayToAudioClip] Compressed wav format not supported.");
                    return(null);
                }
            }
            else
            {
                Debug.LogError("[OpenWavParser.ByteArrayToAudioClip] Format not supported.");
                return(null);
            }
        }
Example #24
0
    private IEnumerator RecordingHandler()
    {
        Log.Debug("ExampleStreaming.RecordingHandler()", "devices: {0}", Microphone.devices);
        _recording = Microphone.Start(_microphoneID, true, _recordingBufferSize, _recordingHZ);
        //audioSource = GetComponent<AudioSource>();
        audioSource.clip = _recording;
        audioSource.loop = true;
        //audioSource.Play();

        yield return(null);      // let _recordingRoutine get set..

        if (_recording == null)
        {
            StopRecording();
            yield break;
        }

        bool bFirstBlock = true;
        int  midPoint    = _recording.samples / 2;

        float[] samples = null;


        while (_recordingRoutine != 0 && _recording != null)
        {
            int writePos = Microphone.GetPosition(_microphoneID);
            if (writePos > _recording.samples || !Microphone.IsRecording(_microphoneID))
            {
                Log.Error("ExampleStreaming.RecordingHandler()", "Microphone disconnected.");

                StopRecording();
                yield break;
            }

            if ((bFirstBlock && writePos >= midPoint) ||
                (!bFirstBlock && writePos < midPoint))
            {
                // front block is recorded, make a RecordClip and pass it onto our callback.
                samples = new float[midPoint];
                _recording.GetData(samples, bFirstBlock ? 0 : midPoint);

                AudioData record = new AudioData();
                record.MaxLevel = Mathf.Max(Mathf.Abs(Mathf.Min(samples)), Mathf.Max(samples));
                record.Clip     = AudioClip.Create("Recording", midPoint, _recording.channels, _recordingHZ, false);
                record.Clip.SetData(samples, 0);

                _service.OnListen(record);

                bFirstBlock = !bFirstBlock;
            }
            else
            {
                // calculate the number of samples remaining until we ready for a block of audio,
                // and wait that amount of time it will take to record.
                int   remaining     = bFirstBlock ? (midPoint - writePos) : (_recording.samples - writePos);
                float timeRemaining = (float)remaining / (float)_recordingHZ;

                yield return(new WaitForSeconds(timeRemaining));
            }
        }
        yield break;
    }
Example #25
0
        private IEnumerator RecordingHandler()
        {
#if UNITY_WEBPLAYER
            yield return(Application.RequestUserAuthorization(UserAuthorization.Microphone));
#endif
            m_Recording = Microphone.Start(m_MicrophoneID, true, m_RecordingBufferSize, m_RecordingHZ);
            yield return(null);      // let m_RecordingRoutine get set..

            if (m_Recording == null)
            {
                Log.Error("MicrophoneWidget", "Failed to start recording.");
                yield break;
            }

            bool bFirstBlock = true;
            int  midPoint    = m_Recording.samples / 2;

            bool bOutputLevelData = m_LevelOutput.IsConnected;
            bool bOutputAudio     = m_AudioOutput.IsConnected || m_PlaybackRecording;

            int     lastReadPos = 0;
            float[] samples     = null;

            while (m_RecordingRoutine != 0 && m_Recording != null)
            {
                int writePos = Microphone.GetPosition(m_MicrophoneID);
                if (bOutputAudio)
                {
                    if ((bFirstBlock && writePos >= midPoint) ||
                        (!bFirstBlock && writePos < midPoint))
                    {
                        // front block is recorded, make a RecordClip and pass it onto our callback.
                        samples = new float[midPoint];
                        m_Recording.GetData(samples, bFirstBlock ? 0 : midPoint);

                        AudioData record = new AudioData();
                        record.MaxLevel = Mathf.Max(samples);
                        record.Clip     = AudioClip.Create("Recording", midPoint, m_Recording.channels, m_RecordingHZ, false);
                        record.Clip.SetData(samples, 0);

                        if (m_PlaybackRecording)
                        {
                            m_Playback.Add(record.Clip);
                        }
                        if (m_AudioOutput.IsConnected && !m_AudioOutput.SendData(record))
                        {
                            StopRecording();        // automatically stop recording if the callback goes away.
                        }
                        bFirstBlock = !bFirstBlock;
                    }
                    else
                    {
                        // calculate the number of samples remaining until we ready for a block of audio,
                        // and wait that amount of time it will take to record.
                        int   remaining     = bFirstBlock ? (midPoint - writePos) : (m_Recording.samples - writePos);
                        float timeRemaining = (float)remaining / (float)m_RecordingHZ;
                        if (bOutputLevelData && timeRemaining > m_LevelOutputInterval)
                        {
                            timeRemaining = m_LevelOutputInterval;
                        }
                        yield return(new WaitForSeconds(timeRemaining));
                    }
                }
                else
                {
                    yield return(new WaitForSeconds(m_LevelOutputInterval));
                }

                if (m_Recording != null && bOutputLevelData)
                {
                    float fLevel = 0.0f;
                    if (writePos < lastReadPos)
                    {
                        // write has wrapped, grab the last bit from the buffer..
                        samples = new float[m_Recording.samples - lastReadPos];
                        m_Recording.GetData(samples, lastReadPos);
                        fLevel = Mathf.Max(fLevel, Mathf.Max(samples));

                        lastReadPos = 0;
                    }

                    if (lastReadPos < writePos)
                    {
                        samples = new float[writePos - lastReadPos];
                        m_Recording.GetData(samples, lastReadPos);
                        fLevel = Mathf.Max(fLevel, Mathf.Max(samples));

                        lastReadPos = writePos;
                    }

                    m_LevelOutput.SendData(new LevelData(fLevel * m_LevelOutputModifier));
                }
            }

            yield break;
        }
        public IEnumerator GetStatsReturnsReport()
        {
            if (SystemInfo.processorType == "Apple M1")
            {
                Assert.Ignore("todo:: This test will hang up on Apple M1");
            }

            var stream = new MediaStream();

            var go  = new GameObject("Test");
            var cam = go.AddComponent <Camera>();

            stream.AddTrack(cam.CaptureStreamTrack(1280, 720, 0));

            var source = go.AddComponent <AudioSource>();

            source.clip = AudioClip.Create("test", 480, 2, 48000, false);
            stream.AddTrack(new AudioStreamTrack(source));

            yield return(new WaitForSeconds(0.1f));

            var test = new MonoBehaviourTest <SignalingPeers>();

            test.component.AddStream(0, stream);
            yield return(test);

            test.component.CoroutineUpdate();
            yield return(new WaitForSeconds(0.1f));

            var op = test.component.GetPeerStats(0);

            yield return(op);

            Assert.That(op.IsDone, Is.True);
            Assert.That(op.Value.Stats, Is.Not.Empty);
            Assert.That(op.Value.Stats.Keys, Is.Not.Empty);
            Assert.That(op.Value.Stats.Values, Is.Not.Empty);
            Assert.That(op.Value.Stats.Count, Is.GreaterThan(0));

            foreach (RTCStats stats in op.Value.Stats.Values)
            {
                Assert.That(stats, Is.Not.Null);
                Assert.That(stats.Timestamp, Is.GreaterThan(0));
                Assert.That(stats.Id, Is.Not.Empty);
                foreach (var pair in stats.Dict)
                {
                    Assert.That(pair.Key, Is.Not.Empty);
                }
                StatsCheck.Test(stats);
            }
            op.Value.Dispose();

            test.component.Dispose();
            foreach (var track in stream.GetTracks())
            {
                track.Dispose();
            }
            stream.Dispose();
            Object.DestroyImmediate(go);
            Object.DestroyImmediate(test.gameObject);
        }
Example #27
0
    /// <summary>
    /// Setup the word profile
    /// </summary>
    protected virtual void SetupWordProfile(bool playAudio, bool isNoise, int wordIndex)
    {
        if (null == AudioWordDetection ||
            null == Mic ||
            string.IsNullOrEmpty(Mic.DeviceName))
        {
            return;
        }

        if (wordIndex < 0 ||
            wordIndex >= AudioWordDetection.Words.Count)
        {
            return;
        }

        WordDetails details = AudioWordDetection.Words[wordIndex];

        float[] wave = Mic.GetLastData();
        if (null != wave)
        {
            //allocate for the wave copy
            int size = wave.Length;
            if (null == details.Wave ||
                details.Wave.Length != size)
            {
                details.Wave = new float[size];
                if (null != details.Audio)
                {
                    UnityEngine.Object.DestroyImmediate(details.Audio, true);
                    details.Audio = null;
                }
            }

            //trim the wave
            int position = Mic.GetPosition();

            //get the trim size
            int trim = 0;
            if (m_startPosition < position)
            {
                trim = position - m_startPosition;
            }
            else
            {
                trim = size - m_startPosition + position;
            }

            //zero the existing wave
            for (int index = 0; index < size; ++index)
            {
                details.Wave[index] = 0f;
            }

            //shift array
            for (int index = 0, i = m_startPosition; index < trim; ++index, i = (i + 1) % size)
            {
                details.Wave[index] = wave[i];
            }

            //clear existing mic data
            for (int index = 0; index < size; ++index)
            {
                wave[index] = 0;
            }

            if (NormalizeWave &&
                !isNoise)
            {
                //normalize the array
                Mic.NormalizeWave(details.Wave);
            }

            SetupWordProfile(details, isNoise);

            //play the audio
            if (null == details.Audio)
            {
                details.Audio = AudioClip.Create(string.Empty, size, 1, Mic.SampleRate, false);
            }
            details.Audio.SetData(details.Wave, 0);
            GetComponent <AudioSource>().loop = false;
            GetComponent <AudioSource>().mute = false;
            if (playAudio)
            {
                if (NormalizeWave)
                {
                    GetComponent <AudioSource>().PlayOneShot(details.Audio, 0.1f);
                }
                else
                {
                    GetComponent <AudioSource>().PlayOneShot(details.Audio);
                }
            }

            // show profile
            RefExample.OverrideSpectrumImag = true;
            RefExample.SpectrumImag         = details.SpectrumReal;
        }
    }
Example #28
0
        public void Next()
        {
            if (!vidFile.EndOfFile && Playing)
            {
                double time = AudioSettings.dspTime;
                if (time + vidFile.FrameDelay >= nextEventTime)
                {
                    // Read next block or skip over null block
                    vidFile.ReadNextBlock();
                    if (vidFile.LastBlockType == VidBlockTypes.Null)
                    {
                        vidFile.ReadNextBlock();
                    }

                    if (vidFile.LastBlockType == VidBlockTypes.Audio_StartFrame ||
                        vidFile.LastBlockType == VidBlockTypes.Audio_IncrementalFrame)
                    {
                        // Add empty sample at front and end of clip to prevent clicks and pops
                        int srcLength = vidFile.AudioBuffer.Length;
                        int dstLength = srcLength + 2;
                        int pos       = 1;

                        // Create audio clip for this block
                        AudioClip clip;
                        clip = AudioClip.Create(string.Empty, dstLength, 1, vidFile.SampleRate, false);

                        // Fill clip data
                        const float divisor = 1.0f / 128.0f;
                        float[]     data    = new float[dstLength];
                        for (int i = 0; i < srcLength; i++)
                        {
                            data[pos++] = (vidFile.AudioBuffer[i] - 128) * divisor;
                        }
                        clip.SetData(data, 0);
                        clips[flip] = clip;

                        // Schedule clip
                        audioSources[flip].clip   = clips[flip];
                        audioSources[flip].volume = DaggerfallUnity.Settings.SoundVolume;
                        audioSources[flip].PlayScheduled(nextEventTime);
                        nextEventTime       += vidFile.FrameDelay;
                        flip                 = (clipQueueLength - 1) - flip;
                        lastPlayedAudioFrame = true;
                    }

                    if (vidFile.LastBlockType == VidBlockTypes.Video_StartFrame ||
                        vidFile.LastBlockType == VidBlockTypes.Video_IncrementalFrame ||
                        vidFile.LastBlockType == VidBlockTypes.Video_IncrementalRowOffsetFrame)
                    {
                        // Update video
                        vidTexture.SetPixels32(vidFile.FrameBuffer);
                        vidTexture.Apply(false);

                        // Several videos have parts that are only video frames.
                        // If nextEventTime is not updated, the playback becomes too fast in these parts.
                        if (!lastPlayedAudioFrame)
                        {
                            nextEventTime += vidFile.FrameDelay;
                        }

                        lastPlayedAudioFrame = false;
                    }
                }
            }
        }
Example #29
0
    private static AudioClip createAudioFromWave(int waveLength, LeanAudioOptions options)
    {
        float time = longList[waveLength - 2];

        float[] audioArr = new float[(int)(options.frequencyRate * time)];

        int   waveIter        = 0;
        float subWaveDiff     = longList[waveIter];
        float subWaveTimeLast = 0f;
        float subWaveTime     = longList[waveIter];
        float waveHeight      = longList[waveIter + 1];

        for (int i = 0; i < audioArr.Length; i++)
        {
            float passedTime = (float)i / (float)options.frequencyRate;
            if (passedTime > longList[waveIter])
            {
                subWaveTimeLast = longList[waveIter];
                waveIter       += 2;
                subWaveDiff     = longList[waveIter] - longList[waveIter - 2];
                waveHeight      = longList[waveIter + 1];
                // Debug.Log("passed wave i:"+i);
            }

            subWaveTime = passedTime - subWaveTimeLast;
            float ratioElapsed = subWaveTime / subWaveDiff;

            float value = Mathf.Sin(ratioElapsed * Mathf.PI);

            if (options.waveStyle == LeanAudioOptions.LeanAudioWaveStyle.Square)
            {
                if (value > 0f)
                {
                    value = 1f;
                }
                if (value < 0f)
                {
                    value = -1f;
                }
            }
            else if (options.waveStyle == LeanAudioOptions.LeanAudioWaveStyle.Sawtooth)
            {
                float sign = value > 0f ? 1f : -1f;
                if (ratioElapsed < 0.5f)
                {
                    value = (ratioElapsed * 2f) * sign;
                }
                else
                {
                    // 0.5f - 1f
                    value = (1f - ratioElapsed) * 2f * sign;
                }
            }
            else if (options.waveStyle == LeanAudioOptions.LeanAudioWaveStyle.Noise)
            {
                float peakMulti = (1f - options.waveNoiseInfluence) +
                                  Mathf.PerlinNoise(0f, passedTime * options.waveNoiseScale) *
                                  options.waveNoiseInfluence;

                /*if(i<25){
                 *  Debug.Log("passedTime:"+passedTime+" peakMulti:"+peakMulti+" infl:"+options.waveNoiseInfluence);
                 * }*/

                value *= peakMulti;
            }

            //if(i<25)
            //	Debug.Log("passedTime:"+passedTime+" value:"+value+" ratioElapsed:"+ratioElapsed+" subWaveTime:"+subWaveTime+" subWaveDiff:"+subWaveDiff);

            value *= waveHeight;


            if (options.modulation != null)
            {
                for (int k = 0; k < options.modulation.Length; k++)
                {
                    float peakMulti =
                        Mathf.Abs(Mathf.Sin(1.5708f + passedTime * (1f / options.modulation[k][0]) * Mathf.PI));
                    float diff = (1f - options.modulation[k][1]);
                    peakMulti = options.modulation[k][1] + diff * peakMulti;
                    // if(k<10){
                    // Debug.Log("k:"+k+" peakMulti:"+peakMulti+" value:"+value+" after:"+(value*peakMulti));
                    // }
                    value *= peakMulti;
                }
            }

            audioArr[i] = value;
            // Debug.Log("pt:"+pt+" i:"+i+" val:"+audioArr[i]+" len:"+audioArr.Length);
        }


        int lengthSamples = audioArr.Length;

#if UNITY_3_5 || UNITY_4_0 || UNITY_4_0_1 || UNITY_4_1 || UNITY_4_2 || UNITY_4_3 || UNITY_4_5 || UNITY_4_6 || UNITY_4_7
        bool      is3dSound = false;
        AudioClip audioClip =
            AudioClip.Create("Generated Audio", lengthSamples, 1, options.frequencyRate, is3dSound, false);
#else
        AudioClip audioClip = null;
        if (options.useSetData)
        {
            audioClip = AudioClip.Create("Generated Audio", lengthSamples, 1, options.frequencyRate, false, null,
                                         OnAudioSetPosition);
            audioClip.SetData(audioArr, 0);
        }
        else
        {
            options.stream = new LeanAudioStream(audioArr);
            // Debug.Log("len:"+audioArr.Length+" lengthSamples:"+lengthSamples+" freqRate:"+options.frequencyRate);
            audioClip = AudioClip.Create("Generated Audio", lengthSamples, 1, options.frequencyRate, false,
                                         options.stream.OnAudioRead, options.stream.OnAudioSetPosition);
            options.stream.audioClip = audioClip;
        }
#endif

        return(audioClip);
    }
Example #30
0
	//=============================================
	//=============================================
	public static AudioClip SetADPCMData(byte[] bytes)
	{
		
		//lire le header 
		//reconstituer le buffer ADPCM
		//décoder
		int headersize = 60; //hardcoded for ADPCM app encoded
		if (bytes[0] == 'R' && bytes[1] == 'I' && bytes[2] == 'F' && bytes[3] == 'F')
		{
			Debug.Log ("ok, this is a RIFF file");
			
			//48
			ushort format = System.BitConverter.ToUInt16(bytes, 20); //FORMAT
			if (format != 17)
			{
				Debug.Log ("ERROR, bad format : " + format + " - should be IMA ADPCM");
				return null;
			}
			uint totalsamples = System.BitConverter.ToUInt32(bytes, 48);
			ushort blockalign = System.BitConverter.ToUInt16(bytes, 32); //alignement de bloc en octets
			uint SampleRate = System.BitConverter.ToUInt32(bytes, 24);
			Debug.Log ("total samples=" + totalsamples);
			//totalsamples = (uint)(bytes.Length-60); //test
			Debug.Log ("block align=" + blockalign);
			
			int datasize = bytes.Length-headersize;
			int blocks = datasize/blockalign;
			int estimated_samples = blocks*((blockalign-4)*2+1);
			Debug.Log ("estimated samples=" + estimated_samples);
			if (totalsamples >= estimated_samples)
			{
				Debug.Log ("bad totalsamples!");
				totalsamples = (uint)estimated_samples;
			}
			//return null;
			AudioClip dest = AudioClip.Create("adpcm_downloaded", (int)totalsamples, 1, (int)SampleRate, false, false);
			float[] data = new float[totalsamples];
			for (int k=0;k<totalsamples;++k)
				data[k] = 0f;
			IMAADPCM.ADPCMState state = new IMAADPCM.ADPCMState(); 
			state.valprev = 0;
			state.index = 0;
			int j = 0; //destination index
			int i = 0; //source index
			while (i<totalsamples/2)
			{
				if (i%blockalign == 0)
				{
					//Debug.Log ("ADPCM BLOCK (i=" + i + ") = " + bytes[headersize+i+0] + " " + bytes[headersize+i+1] + " " + bytes[headersize+i+2] + " " + bytes[headersize+i+3]);
					//on est sur le premier byte du header
					//Debug.Log ("Current ADPCM state = " + state.valprev + " " + state.index);
					//cas particulier : 4 octets header, on met à jour le state et on écrit un échantillon
					//bytes[headersize+i+1] = 0;
					byte[] b = new byte[2];
					b[0] = bytes[headersize+i+0];
					b[1] = bytes[headersize+i+1];
					short sample = System.BitConverter.ToInt16(b, 0); //sample de référence
					//Debug.Log ("sample=" + sample);
					byte index = bytes[headersize+i+2]; //index de référence
					//le 4e octet ne contient rien de significatif
					//data[j] = ((float)state.valprev)/32767f;
					//short i1 = IMAADPCM.decodeADPCM(bytes[headersize+i+3], ref state); //hack
					//data[j] = (float)(i1)/32768f;
					//if (i>0)
					//	IMAADPCM.encodeADPCM(sample, ref state);
					//if (i>0)
					//{
					//ADPCM state update
					state.valprev = sample;
					state.index = index;
					data[j] = (float)(sample)/32768f;
					//}
					//IMAADPCM.encodeADPCM(sample, ref state); //post encode 
					//IMAADPCM.encodeADPCM(sample, ref state); //post encode 
					
					//byte b2 = (byte)(state.valprev&15);
					//short i1 = IMAADPCM.decodeADPCM(b2, ref state);
					//Debug.Log ("predicted=" + i1 + " sample=" + sample);
					//byte adpcm = IMAADPCM.encodeADPCM(sample, ref state);
					//state.valprev = System.BitConverter.ToInt16(bytes, headersize+i);
					//
					//sample = i1;
					
					//data[j] = ((float)state.valprev)/32768f;
					//data[j] = (float)((double)sample)/32768f;
					//data[j] = (float)(sample)/32768f;
					//data[j] = 0;
					/*if (j>0)
					{
						Debug.Log ("previous="+data[j-1] + " current=" + data[j]);
					}*/
					//j++;
					
					//state.valprev = System.BitConverter.ToInt16(b, 0);
					//state.index = bytes[headersize+i+2];
					//Debug.Log ("Corrected ADPCM state = " + state.valprev + " " + state.index);
					//i+=3;
					//if (j>10)
					j++;
					i+=4; //on avance de 4 pour passer le header
				}
				else
				{
					//cas normal
					byte b = bytes[headersize+i];
					byte b1 = (byte)((b/16)&15); //nibble 1
					byte b2 = (byte)(b&15); //nibble 2
					short i1 = IMAADPCM.decodeADPCM(b2, ref state);
					short i2 = IMAADPCM.decodeADPCM(b1, ref state);
					data[j] = ((float)i1)/32768f;
					j++;
					data[j] = ((float)i2)/32768f;
					j++;
					i++; //next input byte
				}
			}
			
			//patch 
			
			/*for (int k=0;k<totalsamples;++k)
			{
				if (k>0 && k%(blockalign*2) == 0)
				{
					Debug.Log ("before patch " + data[k-1] + " " + data[k] + " " + data[k+1]);
					//data[k] = (data[k-1]+data[k+1])/2f;
					//Debug.Log ("after patch " + data[k-1] + " " + data[k] + " " + data[k+1]);
				}
			}*/
			
			/*
			FileStream fs = new FileStream("adpcm.raw", FileMode.Create);
			for (int u=0;u<data.Length;++u)
			{
				byte[] b = System.BitConverter.GetBytes(data[u]); 
				fs.Write(b, 0, 4);
			}
			*/
			//
			//fs.Close();
			//rampin and out
			
			/*for (int u=0;u<2000;++u)
			{
				float k = (float)u/2000f;
				data[u] *= k;
				data[totalsamples-1-u] *= k;
			}
			*/
			dest.SetData(data, 0);
			return dest;
		}
		else
		{
			Debug.Log ("ERROR, this is not a RIFF file");
		}
		
		return null;
	}