/// <summary> /// Called by GATPlayer to /// mix to interlaced data /// according to specified gains /// per channel. /// This method may be useful if /// you subscribe to GATPlayer's /// mixing delegate and choose to /// override default mixing. /// </summary> public override void PanMixSample(IGATBufferedSample sample, int length, float[] audioBuffer, float gain = 1f) { int i; if (gain == 1f) { for (i = 0; i < channelGains.Count; i++) { sample.AudioData.GainMixToInterlaced(audioBuffer, sample.OffsetInBuffer * GATInfo.NbOfChannels, sample.NextIndex, length, channelGains[i]); } } else { for (i = 0; i < channelGains.Count; i++) { sample.AudioData.GainMixToInterlaced(audioBuffer, sample.OffsetInBuffer * GATInfo.NbOfChannels, sample.NextIndex, length, channelGains[i], gain); } } }
/// <summary> /// This method may be used if /// you subscribe to GATPlayer's /// mixing delegate and choose to /// override default mixing. /// </summary> public override void PanMixProcessingBuffer( IGATBufferedSample sample, int length, float[] audioBuffer, float gain = 1f ) { int i; if( gain == 1f ) { for( i = 0; i < channelGains.Count; i++ ) { sample.ProcessingBuffer.GainMixToInterlaced( audioBuffer, sample.OffsetInBuffer * GATInfo.NbOfChannels, 0, length, channelGains[i] ); } } else { for( i = 0; i < channelGains.Count; i++ ) { sample.ProcessingBuffer.GainMixToInterlaced( audioBuffer, sample.OffsetInBuffer * GATInfo.NbOfChannels, 0, length, channelGains[i], gain ); } } }
/// <summary> /// This method may be used if /// you subscribe to GATPlayer's /// mixing delegate and choose to /// override default mixing. /// </summary> public override void PanMixProcessingBuffer(IGATBufferedSample sample, int length, float[] audioBuffer, float gain = 1f) { int i; if (gain == 1f) { for (i = 0; i < channelGains.Count; i++) { if (channelGains[i].ShouldInterpolate && !sample.IsFirstChunk) { sample.ProcessingBuffer.SmoothedGainMixToInterlaced(audioBuffer, sample.OffsetInBuffer * GATInfo.NbOfChannels, 0, length, channelGains[i]); continue; } if (channelGains[i].Gain != 0f) { sample.ProcessingBuffer.GainMixToInterlaced(audioBuffer, sample.OffsetInBuffer * GATInfo.NbOfChannels, 0, length, channelGains[i]); } } } else { for (i = 0; i < channelGains.Count; i++) { if (channelGains[i].ShouldInterpolate && !sample.IsFirstChunk) { sample.ProcessingBuffer.SmoothedGainMixToInterlaced(audioBuffer, sample.OffsetInBuffer * GATInfo.NbOfChannels, 0, length, channelGains[i], gain); continue; } if (channelGains[i].Gain != 0f) { sample.ProcessingBuffer.GainMixToInterlaced(audioBuffer, sample.OffsetInBuffer * GATInfo.NbOfChannels, 0, length, channelGains[i], gain); } } } }
protected override bool PlayerWillMixSample(IGATBufferedSample sample, int length, float[] audioBuffer) { int processedSamples; int i; double dspTime = AudioSettings.dspTime; if (sample.IsFirstChunk) { PlayingStatus = Status.Playing; sample.NextIndex = 1; if (_pitch < 0d) { if (StartPosition == 0) { StartPosition = _dataOwner.AudioData.Count - 2; } } else { if (StartPosition == _dataOwner.AudioData.Count - 2) { StartPosition = 0; } } _dataSource.Seek(StartPosition); if (FadesIn && _fadeInDuration > 0d) { _fader.SetFadeInfo(new FadeInfo(0f, 1f, _fadeInDuration)); _shouldFade = true; } } else if (_scheduledFade != null && _shouldFade == false) { if (dspTime + GATInfo.AudioBufferDuration > _scheduledFade.StartDspTime) { _fader.SetFadeInfo(_scheduledFade); _shouldFade = true; _scheduledFade = null; } } if (_pitch == 1d) { processedSamples = _dataSource.GetData(sample.ProcessingBuffer, length, 0, false); } else if (_pitch == -1d) { processedSamples = _dataSource.GetData(sample.ProcessingBuffer, length, 0, true); } else { processedSamples = _dataSource.GetResampledData(sample.ProcessingBuffer, length, 0, _pitch); } if (StopsEarly) { if (dspTime >= _endDspTime) { _shouldStop = true; } } if (processedSamples < length) { if (_loop) { if (onSampleWillLoop != null) { if (onSampleWillLoop(this) == false) { sample.IsLastChunk = true; goto Processing; } } int seekPos; if (_pitch < 0d) { seekPos = StartPosition == 0 ? _dataOwner.AudioData.Count - 2 : StartPosition; } else { seekPos = StartPosition >= _dataOwner.AudioData.Count - 2 ? 0 : StartPosition; } _dataSource.Seek(seekPos); _dataSource.GetResampledData(sample.ProcessingBuffer, length - processedSamples, processedSamples, _pitch); processedSamples = length; } else { sample.IsLastChunk = true; } } else if (_shouldStop) { sample.ProcessingBuffer.FadeOut(0, length); sample.IsLastChunk = true; _shouldStop = false; } Processing: if (_shouldFade) { if (sample.IsFirstChunk) { dspTime += ( double )sample.OffsetInBuffer / GATInfo.OutputSampleRate; } int fadedSamples = _fader.DoFade(sample.ProcessingBuffer, dspTime, processedSamples); if (fadedSamples < processedSamples) //end of fade { if (_fader.ToGain == 0f) //Faded out, don't mix all! { sample.IsLastChunk = true; processedSamples = fadedSamples; } _shouldFade = false; } } for (i = 0; i < _filters.Count; i++) { _filters[i].ProcessChunk(sample.ProcessingBuffer.ParentArray, sample.ProcessingBuffer.MemOffset, processedSamples, false); } if (ReferenceEquals(sample.Track, null) == false) //sample is played in a track, which will handle Panning via it's own processing buffer. Copy data to the tracks buffer: { sample.Track.MixFrom(sample.ProcessingBuffer, 0, sample.OffsetInBuffer, processedSamples, sample.PlayingGain); } else { sample.PanInfo.PanMixProcessingBuffer(sample, processedSamples, audioBuffer, sample.PlayingGain); } if (sample.IsLastChunk) { PlayingStatus = Status.ReadyToPlay; } return(false); }
protected abstract bool PlayerWillMixSample(IGATBufferedSample sample, int length, float[] audioBuffer);
/// <summary> /// Pans and adds the mono data of the processing buffer to the provided buffer. /// </summary> public abstract void PanMixProcessingBuffer(IGATBufferedSample sample, int length, float[] audioBuffer, float gain = 1f);
protected override bool PlayerWillMixSample( IGATBufferedSample sample, int length, float[] audioBuffer ) { if( sample.IsFirstChunk ) { PlayingStatus = Status.Playing; } if( StopsEarly ) { if( AudioSettings.dspTime >= _endDspTime ) _shouldStop = true; } if( _shouldStop ) { sample.CacheToProcessingBuffer( length ); sample.ProcessingBuffer.FadeOut( 0, length ); sample.IsLastChunk = true; if( ReferenceEquals( sample.Track, null ) == false ) //sample is played in a track, which will handle Panning via it's own processing buffer. Copy data to the tracks buffer: { sample.Track.MixFrom( sample.ProcessingBuffer, 0, sample.OffsetInBuffer, length, sample.PlayingGain ); } else { sample.PanInfo.PanMixProcessingBuffer( sample, length, audioBuffer, sample.PlayingGain ); } PlayingStatus = Status.ReadyToPlay; CurrentLoop = 0; _shouldStop = false; return false; } if( sample.IsLastChunk ) { if( Loop && ( NumberOfLoops == -1 || CurrentLoop < NumberOfLoops ) ) { int extraSamples = GATInfo.AudioBufferSizePerChannel - length; sample.IsLastChunk = false; sample.AudioData.CopyTo( sample.ProcessingBuffer, 0, sample.NextIndex, length ); sample.AudioData.CopyTo( sample.ProcessingBuffer, length, 0, extraSamples ); sample.NextIndex = extraSamples; if( ReferenceEquals( sample.Track, null ) == false ) //sample is played in a track, which will handle Panning via it's own processing buffer. Copy data to the tracks buffer: { sample.Track.MixFrom( sample.ProcessingBuffer, 0, sample.OffsetInBuffer, GATInfo.AudioBufferSizePerChannel, sample.PlayingGain ); } else { sample.PanInfo.PanMixProcessingBuffer( sample, GATInfo.AudioBufferSizePerChannel, audioBuffer, sample.PlayingGain ); } CurrentLoop++; return false; } else { PlayingStatus = Status.ReadyToPlay; CurrentLoop = 0; } } return true; }
bool PlayerWillMixSample(IGATBufferedSample sample, int length, float[] audioBuffer) { int indexInProcessingBuffer = 0; int appliedLength; int lengthToMix = length; float fromGain, toGain; switch (_currentState) { case State.Attack: if (_nextIndex >= _decayStartIndex) //handle attack = 0; { _currentState = State.Decay; goto case State.Decay; } else { if (sample.IsFirstChunk && length > _attackLength) { appliedLength = _attackLength; } else { appliedLength = length; } //Interpolate gain fromGain = (( float )(_nextIndex - _attackStartIndex)) / (_attackLength); if (_nextIndex + appliedLength >= _decayStartIndex) //attack slope will finish before the end of the buffer { appliedLength = _decayStartIndex - _nextIndex; toGain = 1f; _data.CopySmoothedGainTo(_nextIndex, sample.ProcessingBuffer, 0, appliedLength, fromGain, toGain); _nextIndex = _decayStartIndex; indexInProcessingBuffer = appliedLength; _currentState = State.Decay; goto case State.Decay; } else { toGain = (( float )(_nextIndex + appliedLength - _attackStartIndex)) / (_attackLength); _data.CopySmoothedGainTo(_nextIndex, sample.ProcessingBuffer, 0, appliedLength, fromGain, toGain); _nextIndex += appliedLength; } } break; case State.Decay: appliedLength = GATInfo.AudioBufferSizePerChannel - indexInProcessingBuffer; if (_nextIndex + appliedLength >= _loopStartIndex) // decay will end this buffer { appliedLength = _loopStartIndex - _nextIndex; _data.CopyTo(sample.ProcessingBuffer, indexInProcessingBuffer, _nextIndex, appliedLength); _nextIndex = _loopStartIndex; indexInProcessingBuffer += appliedLength; if (_noLoop) { _currentState = State.Release; goto case State.Release; } else if (_loopCrossfadeLength == _loopLength) { _currentState = State.SustainCrossfade; goto case State.SustainCrossfade; } else { _currentState = State.Sustain; goto case State.Sustain; } } else { _data.CopyTo(sample.ProcessingBuffer, indexInProcessingBuffer, _nextIndex, appliedLength); _nextIndex += appliedLength; } break; case State.Sustain: appliedLength = GATInfo.AudioBufferSizePerChannel - indexInProcessingBuffer; if (_nextIndex + appliedLength >= _loopCrossfadeIndex) // will start crossfading in this buffer { appliedLength = _loopCrossfadeIndex - _nextIndex; _data.CopyTo(sample.ProcessingBuffer, indexInProcessingBuffer, _nextIndex, appliedLength); indexInProcessingBuffer += appliedLength; _nextIndex += appliedLength; if (_keepLooping) { _currentState = State.SustainCrossfade; goto case State.SustainCrossfade; } else { _releaseIndex = _nextIndex; _endIndex = _nextIndex + _releaseLength; _currentState = State.Release; goto case State.Release; } } else { _data.CopyTo(sample.ProcessingBuffer, indexInProcessingBuffer, _nextIndex, appliedLength); _nextIndex += appliedLength; } break; case State.SustainCrossfade: appliedLength = GATInfo.AudioBufferSizePerChannel - indexInProcessingBuffer; int crossfadeOffset = _nextIndex - _loopCrossfadeIndex; //Crossfade gains fromGain = 1f - ( float )(crossfadeOffset) / _loopCrossfadeLength; if (_nextIndex + appliedLength > _loopEndIndex) //will finish loop in current buffer { appliedLength = _loopEndIndex - _nextIndex; _data.CopySmoothedGainTo(_nextIndex, sample.ProcessingBuffer, indexInProcessingBuffer, appliedLength, fromGain, 0f); _data.MixSmoothedGainTo(_loopStartIndex - (_loopCrossfadeLength - crossfadeOffset), sample.ProcessingBuffer, indexInProcessingBuffer, appliedLength, 1f - fromGain, 1f); indexInProcessingBuffer += appliedLength; _nextIndex = _loopStartIndex; //need to loop even if release, as xfade has already started if (_keepLooping) { _currentState = State.Sustain; goto case State.Sustain; } else { _releaseIndex = _loopStartIndex; _endIndex = _loopStartIndex + _releaseLength; _currentState = State.Release; goto case State.Release; } } else { //crossfade gain toGain = 1f - ( float )(crossfadeOffset + appliedLength) / _loopCrossfadeLength; _data.CopySmoothedGainTo(_nextIndex, sample.ProcessingBuffer, indexInProcessingBuffer, appliedLength, fromGain, toGain); _data.MixSmoothedGainTo(_loopStartIndex - (_loopCrossfadeLength - crossfadeOffset), sample.ProcessingBuffer, indexInProcessingBuffer, appliedLength, 1f - fromGain, 1f - toGain); _nextIndex += appliedLength; } break; case State.Release: appliedLength = GATInfo.AudioBufferSizePerChannel - indexInProcessingBuffer; fromGain = 1f - (( float )(_nextIndex - _releaseIndex)) / (_releaseLength); if (_nextIndex + appliedLength >= _endIndex) //release slope will finish before the end of the buffer { toGain = 0f; sample.IsLastChunk = true; IsPlaying = false; appliedLength = _endIndex - _nextIndex; lengthToMix = appliedLength + indexInProcessingBuffer; _data.CopySmoothedGainTo(_nextIndex, sample.ProcessingBuffer, indexInProcessingBuffer, appliedLength, fromGain, toGain); } else { toGain = 1f - (( float )(_nextIndex + appliedLength - _releaseIndex)) / (_releaseLength); _data.CopySmoothedGainTo(_nextIndex, sample.ProcessingBuffer, indexInProcessingBuffer, appliedLength, fromGain, toGain); _nextIndex += appliedLength; } break; } sample.NextIndex = _nextIndex; sample.Track.MixFrom(sample.ProcessingBuffer, 0, sample.OffsetInBuffer, lengthToMix); return(false); }
bool PlayerWillMixSample( IGATBufferedSample sample, int length, float[] audioBuffer ) { int indexInProcessingBuffer = 0; int appliedLength; int lengthToMix = length; float fromGain, toGain; switch( _currentState ) { case State.Attack: if( _nextIndex >= _decayStartIndex ) //handle attack = 0; { _currentState = State.Decay; goto case State.Decay; } else { if( sample.IsFirstChunk && length > _attackLength ) { appliedLength = _attackLength; } else { appliedLength = length; } //Interpolate gain fromGain = ( ( float )( _nextIndex - _attackStartIndex ) ) / ( _attackLength ); if( _nextIndex + appliedLength >= _decayStartIndex ) //attack slope will finish before the end of the buffer { appliedLength = _decayStartIndex - _nextIndex; toGain = 1f; _data.CopySmoothedGainTo( _nextIndex, sample.ProcessingBuffer, 0, appliedLength, fromGain, toGain ); _nextIndex = _decayStartIndex; indexInProcessingBuffer = appliedLength; _currentState = State.Decay; goto case State.Decay; } else { toGain = ( ( float )( _nextIndex + appliedLength - _attackStartIndex ) ) / ( _attackLength ); _data.CopySmoothedGainTo( _nextIndex, sample.ProcessingBuffer, 0, appliedLength, fromGain, toGain ); _nextIndex += appliedLength; } } break; case State.Decay: appliedLength = GATInfo.AudioBufferSizePerChannel - indexInProcessingBuffer; if( _nextIndex + appliedLength >= _loopStartIndex ) // decay will end this buffer { appliedLength = _loopStartIndex - _nextIndex; _data.CopyTo( sample.ProcessingBuffer, indexInProcessingBuffer, _nextIndex, appliedLength ); _nextIndex = _loopStartIndex; indexInProcessingBuffer += appliedLength; if( _noLoop ) { _currentState = State.Release; goto case State.Release; } else if( _loopCrossfadeLength == _loopLength ) { _currentState = State.SustainCrossfade; goto case State.SustainCrossfade; } else { _currentState = State.Sustain; goto case State.Sustain; } } else { _data.CopyTo( sample.ProcessingBuffer, indexInProcessingBuffer, _nextIndex, appliedLength ); _nextIndex += appliedLength; } break; case State.Sustain: appliedLength = GATInfo.AudioBufferSizePerChannel - indexInProcessingBuffer; if( _nextIndex + appliedLength >= _loopCrossfadeIndex ) // will start crossfading in this buffer { appliedLength = _loopCrossfadeIndex - _nextIndex; _data.CopyTo( sample.ProcessingBuffer, indexInProcessingBuffer, _nextIndex, appliedLength ); indexInProcessingBuffer += appliedLength; _nextIndex += appliedLength; if( _keepLooping ) { _currentState = State.SustainCrossfade; goto case State.SustainCrossfade; } else { _releaseIndex = _nextIndex; _endIndex = _nextIndex + _releaseLength; _currentState = State.Release; goto case State.Release; } } else { _data.CopyTo( sample.ProcessingBuffer, indexInProcessingBuffer, _nextIndex, appliedLength ); _nextIndex += appliedLength; } break; case State.SustainCrossfade: appliedLength = GATInfo.AudioBufferSizePerChannel - indexInProcessingBuffer; int crossfadeOffset = _nextIndex - _loopCrossfadeIndex; //Crossfade gains fromGain = 1f - ( float )( crossfadeOffset ) / _loopCrossfadeLength; if( _nextIndex + appliedLength > _loopEndIndex ) //will finish loop in current buffer { appliedLength = _loopEndIndex - _nextIndex; _data.CopySmoothedGainTo( _nextIndex, sample.ProcessingBuffer, indexInProcessingBuffer, appliedLength, fromGain, 0f ); _data.MixSmoothedGainTo( _loopStartIndex - ( _loopCrossfadeLength - crossfadeOffset ) , sample.ProcessingBuffer, indexInProcessingBuffer, appliedLength, 1f - fromGain, 1f ); indexInProcessingBuffer += appliedLength; _nextIndex = _loopStartIndex; //need to loop even if release, as xfade has already started if( _keepLooping ) { _currentState = State.Sustain; goto case State.Sustain; } else { _releaseIndex = _loopStartIndex; _endIndex = _loopStartIndex + _releaseLength; _currentState = State.Release; goto case State.Release; } } else { //crossfade gain toGain = 1f - ( float )( crossfadeOffset + appliedLength ) / _loopCrossfadeLength; _data.CopySmoothedGainTo( _nextIndex, sample.ProcessingBuffer, indexInProcessingBuffer, appliedLength, fromGain, toGain ); _data.MixSmoothedGainTo( _loopStartIndex - ( _loopCrossfadeLength - crossfadeOffset ) , sample.ProcessingBuffer, indexInProcessingBuffer, appliedLength, 1f - fromGain, 1f - toGain ); _nextIndex += appliedLength; } break; case State.Release: appliedLength = GATInfo.AudioBufferSizePerChannel - indexInProcessingBuffer; fromGain = 1f - ( ( float )( _nextIndex - _releaseIndex ) ) / ( _releaseLength ); if( _nextIndex + appliedLength >= _endIndex ) //release slope will finish before the end of the buffer { toGain = 0f; sample.IsLastChunk = true; IsPlaying = false; appliedLength = _endIndex - _nextIndex; lengthToMix = appliedLength + indexInProcessingBuffer; _data.CopySmoothedGainTo( _nextIndex, sample.ProcessingBuffer, indexInProcessingBuffer, appliedLength, fromGain, toGain ); } else { toGain = 1f - ( ( float )( _nextIndex + appliedLength - _releaseIndex ) ) / ( _releaseLength ); _data.CopySmoothedGainTo( _nextIndex, sample.ProcessingBuffer, indexInProcessingBuffer, appliedLength, fromGain, toGain ); _nextIndex += appliedLength; } break; } sample.NextIndex = _nextIndex; sample.Track.MixFrom( sample.ProcessingBuffer, 0, sample.OffsetInBuffer, lengthToMix ); return false; }
/// <summary> /// Pans and adds the mono data of the sample to the provided buffer. /// </summary> public abstract void PanMixSample( IGATBufferedSample sample, int length, float[] audioBuffer, float gain = 1f );
protected abstract bool PlayerWillMixSample( IGATBufferedSample sample, int length, float[] audioBuffer );
protected override bool PlayerWillMixSample(IGATBufferedSample sample, int length, float[] audioBuffer) { if (sample.IsFirstChunk) { PlayingStatus = Status.Playing; } if (StopsEarly) { if (AudioSettings.dspTime >= _endDspTime) { _shouldStop = true; } } if (_shouldStop) { sample.CacheToProcessingBuffer(length); sample.ProcessingBuffer.FadeOut(0, length); sample.IsLastChunk = true; if (ReferenceEquals(sample.Track, null) == false) //sample is played in a track, which will handle Panning via it's own processing buffer. Copy data to the tracks buffer: { sample.Track.MixFrom(sample.ProcessingBuffer, 0, sample.OffsetInBuffer, length, sample.PlayingGain); } else { sample.PanInfo.PanMixProcessingBuffer(sample, length, audioBuffer, sample.PlayingGain); } PlayingStatus = Status.ReadyToPlay; CurrentLoop = 0; _shouldStop = false; return(false); } if (sample.IsLastChunk) { if (Loop && (NumberOfLoops == -1 || CurrentLoop < NumberOfLoops)) { int extraSamples = GATInfo.AudioBufferSizePerChannel - length; sample.IsLastChunk = false; sample.AudioData.CopyTo(sample.ProcessingBuffer, 0, sample.NextIndex, length); sample.AudioData.CopyTo(sample.ProcessingBuffer, length, 0, extraSamples); sample.NextIndex = extraSamples; if (ReferenceEquals(sample.Track, null) == false) //sample is played in a track, which will handle Panning via it's own processing buffer. Copy data to the tracks buffer: { sample.Track.MixFrom(sample.ProcessingBuffer, 0, sample.OffsetInBuffer, GATInfo.AudioBufferSizePerChannel, sample.PlayingGain); } else { sample.PanInfo.PanMixProcessingBuffer(sample, GATInfo.AudioBufferSizePerChannel, audioBuffer, sample.PlayingGain); } CurrentLoop++; return(false); } else { PlayingStatus = Status.ReadyToPlay; CurrentLoop = 0; } } return(true); }