Beispiel #1
0
        public static int ForceReadIntervalSamples(IAudioStream s, Interval i, float[] array)
        {
            s.Position = TimeUtil.TimeSpanToBytes(i.TimeFrom, s.Properties);
            long bytesRead        = 0;
            long samplesToRead    = TimeUtil.TimeSpanToBytes(i.TimeLength, s.Properties) / s.Properties.SampleByteSize;
            int  totalSamplesRead = 0;
            int  channels         = s.Properties.Channels;

            byte[] temp = new byte[1024 * 32 * channels];

            if (samplesToRead > array.Length)
            {
                throw new ArgumentException("cannot read the requested interval (" + samplesToRead
                                            + ") - the target array is too small (" + array.Length + ")");
            }

            while ((bytesRead = s.Read(temp, 0, temp.Length)) > 0)
            {
                unsafe
                {
                    fixed(byte *sampleBuffer = &temp[0])
                    {
                        float *samples = (float *)sampleBuffer;

                        for (int x = 0; x < bytesRead / 4; x++)
                        {
                            array[totalSamplesRead++] = samples[x];
                            if (samplesToRead == totalSamplesRead)
                            {
                                return(totalSamplesRead);
                            }
                        }
                    }
                }
            }

            return(totalSamplesRead);
        }
Beispiel #2
0
        private void AddTrack(AudioTrack audioTrack)
        {
            if (audioTrack.SourceProperties.SampleRate > audioMixer.SampleRate)
            {
                // The newly added track has a higher samplerate than the current tracks, so we adjust
                // the processing samplerate to the highest rate
                ChangeMixingSampleRate(audioTrack.SourceProperties.SampleRate);
            }

            IAudioStream input        = audioTrack.CreateAudioStream();
            IAudioStream baseStream   = new TolerantStream(new BufferedStream(input, 1024 * 256 * input.SampleBlockSize, true));
            OffsetStream offsetStream = new OffsetStream(baseStream)
            {
                Offset = TimeUtil.TimeSpanToBytes(audioTrack.Offset, baseStream.Properties)
            };

            audioTrack.OffsetChanged += new EventHandler <ValueEventArgs <TimeSpan> >(
                delegate(object sender, ValueEventArgs <TimeSpan> e) {
                offsetStream.Offset = TimeUtil.TimeSpanToBytes(e.Value, offsetStream.Properties);
                audioMixer.UpdateLength();
            });

            // Upmix mono inputs to dual channel stereo or downmix surround to allow channel balancing
            // TODO add better multichannel stream support and allow balancing of surround
            IAudioStream mixToStereoStream = offsetStream;

            if (mixToStereoStream.Properties.Channels == 1)
            {
                mixToStereoStream = new MonoStream(mixToStereoStream, 2);
            }
            else if (mixToStereoStream.Properties.Channels > 2)
            {
                mixToStereoStream = new SurroundDownmixStream(mixToStereoStream);
            }

            // control the track phase
            PhaseInversionStream phaseInversion = new PhaseInversionStream(mixToStereoStream)
            {
                Invert = audioTrack.InvertedPhase
            };

            MonoStream monoStream = new MonoStream(phaseInversion, phaseInversion.Properties.Channels)
            {
                Downmix = audioTrack.MonoDownmix
            };

            // necessary to control each track individually
            VolumeControlStream volumeControl = new VolumeControlStream(monoStream)
            {
                Mute    = audioTrack.Mute,
                Volume  = audioTrack.Volume,
                Balance = audioTrack.Balance
            };

            // when the AudioTrack.Mute property changes, just set it accordingly on the audio stream
            audioTrack.MuteChanged += new EventHandler <ValueEventArgs <bool> >(
                delegate(object vsender, ValueEventArgs <bool> ve) {
                volumeControl.Mute = ve.Value;
            });

            // when the AudioTrack.Solo property changes, we have to react in different ways:
            audioTrack.SoloChanged += new EventHandler <ValueEventArgs <bool> >(
                delegate(object vsender, ValueEventArgs <bool> ve) {
                AudioTrack senderTrack  = (AudioTrack)vsender;
                bool isOtherTrackSoloed = false;

                foreach (AudioTrack vaudioTrack in trackList)
                {
                    if (vaudioTrack != senderTrack && vaudioTrack.Solo)
                    {
                        isOtherTrackSoloed = true;
                        break;
                    }
                }

                /* if there's at least one other track that is soloed, we set the mute property of
                 * the current track to the opposite of the solo property:
                 * - if the track is soloed, we unmute it
                 * - if the track is unsoloed, we mute it
                 */
                if (isOtherTrackSoloed)
                {
                    senderTrack.Mute = !ve.Value;
                }

                /* if this is the only soloed track, we mute all other tracks
                 * if this track just got unsoloed, we unmute all other tracks
                 */
                else
                {
                    foreach (AudioTrack vaudioTrack in trackList)
                    {
                        if (vaudioTrack != senderTrack && !vaudioTrack.Solo)
                        {
                            vaudioTrack.Mute = ve.Value;
                        }
                    }
                }
            });

            // when the AudioTrack.Volume property changes, just set it accordingly on the audio stream
            audioTrack.VolumeChanged += new EventHandler <ValueEventArgs <float> >(
                delegate(object vsender, ValueEventArgs <float> ve) {
                volumeControl.Volume = ve.Value;
            });

            audioTrack.BalanceChanged += new EventHandler <ValueEventArgs <float> >(
                delegate(object vsender, ValueEventArgs <float> ve) {
                volumeControl.Balance = ve.Value;
            });

            audioTrack.InvertedPhaseChanged += new EventHandler <ValueEventArgs <bool> >(
                delegate(object vsender, ValueEventArgs <bool> ve) {
                phaseInversion.Invert = ve.Value;
            });
            audioTrack.MonoDownmixChanged += new EventHandler <ValueEventArgs <bool> >(
                delegate(object vsender, ValueEventArgs <bool> ve) {
                monoStream.Downmix = ve.Value;
            });

            // adjust sample rate to mixer output rate
            ResamplingStream resamplingStream = new ResamplingStream(volumeControl,
                                                                     ResamplingQuality.Medium, audioMixer.Properties.SampleRate);

            IAudioStream trackStream = resamplingStream;

            if (trackStream.Properties.Channels == 1 && audioMixer.Properties.Channels > 1)
            {
                trackStream = new MonoStream(trackStream, audioMixer.Properties.Channels);
            }

            audioMixer.Add(trackStream);
            trackListStreams.Add(audioTrack, trackStream);
        }