Example #1
0
    // No audio is rendered in this method.
    // This code is only here to get updates synchronized in audio frame time.
    private void OnAudioFilterRead(float[] data, int channels)
    {
        bool playNote = (audioFrameCount % speed) == 0;

        if (playNote)
        {
            int note = sequence[sequenceIdx++];
            sequenceIdx %= sequence.Length;
            if (note != -1)
            {
                note += pitch;

                synth.HandleEventNow(new EventQueue.QueuedEvent(EventQueue.EventType.Note_off, 0, -1));
                synth.HandleEventNow(new EventQueue.QueuedEvent(EventQueue.EventType.Note_on, note, -1));
            }
        }

        if (lfo1enabled)
        {
            lfo1.set_freq(lfo1Freq * 1024);
            lfo1.update();
            synth.set_parameter(lfo1Param, lfo1BaseValue + lfo1.sin() * lfo1Strength);
        }
        if (lfo2enabled)
        {
            lfo2.set_freq(lfo2Freq * 1024);
            lfo2.update();
            synth.set_parameter(lfo2Param, lfo2BaseValue + lfo2.sin() * lfo2Strength);
        }

        audioFrameCount++;
    }
Example #2
0
    private void render_float32_stereo_interleaved(float[] buffer, int sample_frames)
    {
        int smp     = 0;
        int buf_idx = 0;

        //int time_smp = masterClock_smp;

        update_params();

        // Cache this for the entire buffer, we don't need to check for
        // every sample if new events have been enqueued.
        // This assumes that no other metdods call GetFrontAndDequeue.
        int queueSize = queue.GetSize();

        // Render loop
        for (; smp < sample_frames; ++smp)
        {
            // Event handling
            // This is sample accurate event handling.
            // If it's too slow, we can decide to only handle 1 event per buffer and
            // move this code outside the loop.
            while (true)
            {
                if (eventIsWaiting == false && queueSize > 0)
                {
                    //queueLock = true;
                    if (queue.GetFrontAndDequeue(ref nextEvent))
                    {
                        eventIsWaiting = true;
                        queueSize--;
                    }
                    //queueLock = false;
                }

                if (eventIsWaiting)
                {
                    if (nextEvent.time_smp <= time_smp)
                    {
                        HandleEventNow(nextEvent);
                        eventIsWaiting = false;
                    }
                    else
                    {
                        // we assume that queued events are in order, so if it's not
                        // now, we stop getting events from the queue
                        break;
                    }
                }
                else
                {
                    // no more events
                    break;
                }
            }

            // Rendering
            if (note_is_on)
            {
                // Render sample
                float amp = aenv.process() * 0.5f;

                float lfo_val = lfo.sin() * 0.48f * pwmStrength + 0.5f;

                //float saw = osc1.saw() * sawAmp;
                //float square = osc1.square(lfo_val) * squareAmp;
                //float sawDPW = osc1.sawDPW() * sawDPWAmp;
                float sine           = osc2.sin() * subSine;
                float sawPolyBLEP    = osc1.sawPolyBLEP() * sawAmp;
                float squarePolyBLEP = osc1.squarePolyBLEP(lfo_val) * squareAmp;

                float sample = (sine + sawPolyBLEP + squarePolyBLEP)
                               * /*(current_velocity * 0.0079f) **/ amp;

                buffer[buf_idx++] = sample;
                buffer[buf_idx++] = sample;

                // Update oscillators
                osc1.update();
                osc2.update();
                lfo.update();
                fenv.update_oneshot();
            }
            else
            {
                buffer[buf_idx++] = 0.0f;
                buffer[buf_idx++] = 0.0f;
            }
            time_smp++;
        }

        // Filter entire buffer
        if (filterEnabled >= 0.5f)
        {
            if (filterType == FilterType.Schmid)
            {
                filter1.process_mono_stride(buffer, sample_frames, 0, 2);
                filter2.process_mono_stride(buffer, sample_frames, 1, 2);
            }
#if LAZZARINI_FILTER
            else if (filterType == FilterType.Lazzarini)
            {
                filter1Laz.process_mono_stride(buffer, sample_frames, 0, 2);
                filter2Laz.process_mono_stride(buffer, sample_frames, 1, 2);
            }
#endif
        }
    }