コード例 #1
0
        void UpdateSound()
        {
            // Try to keep 1/10 seconds of sound in the buffer, to avoid both dropouts and unnecessary latency
            float targetLength   = 1.0f / 10.0f;
            float requiredLength = targetLength - soundStream.BufferLength;

            if (requiredLength < 0.0f)
            {
                return;
            }

            uint numSamples = (uint)(soundStream.Frequency * requiredLength);

            if (numSamples == 0)
            {
                return;
            }

            // Allocate a new buffer and fill it with a simple two-oscillator algorithm. The sound is over-amplified
            // (distorted), clamped to the 16-bit range, and finally lowpass-filtered according to the coefficient
            var newData = new short[numSamples];

            for (int i = 0; i < numSamples; ++i)
            {
                osc1 = osc1 + 1.0f % 360.0f;
                osc2 = osc2 + 1.002f % 360.0f;

                float newValue = MathHelper.Clamp((float)((Math.Sin(osc1) + Math.Sin(osc2)) * 100000.0f), -32767.0f, 32767.0f);
                accumulator = MathHelper.Lerp(accumulator, newValue, filter);
                newData[i]  = (short)accumulator;
            }

            // Queue buffer to the stream for playback
            soundStream.AddData(newData, 0, newData.Length);
        }
コード例 #2
0
        // https://github.com/microsoft/MixedRealityCompanionKit/blob/master/LegacySpectatorView/Samples/SharedHolograms/Assets/HoloToolkit/Utilities/Scripts/TextToSpeech.cs
        private async Task <BufferedSoundStream> SpeechStreamToSoundStream(SpeechSynthesisStream inStream)
        {
            var outStream = new BufferedSoundStream();

            uint size = (uint)inStream.Size;

            byte[] wavAudio = new byte[size];

            using (var inputStream = inStream.GetInputStreamAt(0)) {
                inStream.Dispose();
                using (var reader = new DataReader(inputStream)) {
                    await reader.LoadAsync(size);

                    reader.ReadBytes(wavAudio);
                }
            }

            int channelCount = wavAudio[22];
            int frequency    = BytesToInt(wavAudio, 24);

            outStream.SetFormat((uint)frequency, true, channelCount == 2);

            // Get past all the other sub chunks to get to the data subchunk:
            int pos = 12; // First subchunk ID from 12 to 16

            // Keep iterating until we find the data chunk (i.e. 64 61 74 61 ...... (i.e. 100 97 116 97 in decimal))
            while (!(wavAudio[pos] == 100 && wavAudio[pos + 1] == 97 && wavAudio[pos + 2] == 116 && wavAudio[pos + 3] == 97))
            {
                pos += 4;
                int chunkSize = wavAudio[pos] + wavAudio[pos + 1] * 256 + wavAudio[pos + 2] * 65536 + wavAudio[pos + 3] * 16777216;
                pos += 4 + chunkSize;
            }
            pos += 8;

            // Pos is now positioned to start of actual sound data.
            int sampleCount = (wavAudio.Length - pos) / 2;  // 2 bytes per sample (16 bit sound mono)

            if (channelCount == 2)
            {
                sampleCount /= 2;
            }                                            // 4 bytes per sample (16 bit stereo)

            outStream.AddData(wavAudio, pos);

            return(outStream);
        }