示例#1
0
 public static LocalVoiceAudio <T> Create <T>(VoiceClient voiceClient, byte voiceId, IEncoder encoder, VoiceInfo voiceInfo, int channelId)
 {
     if (typeof(T) == typeof(float))
     {
         if (encoder == null || encoder is IEncoderDataFlow <float> )
         {
             return(new LocalVoiceAudioFloat(voiceClient, encoder as IEncoderDataFlow <float>, voiceId, voiceInfo, channelId) as LocalVoiceAudio <T>);
         }
         else
         {
             throw new Exception("[PV] CreateLocalVoice: encoder for IFrameStream<float> is not IBufferEncoder<float>: " + encoder.GetType());
         }
     }
     else if (typeof(T) == typeof(short))
     {
         if (encoder == null || encoder is IEncoderDataFlow <short> )
         {
             return(new LocalVoiceAudioShort(voiceClient, encoder as IEncoderDataFlow <short>, voiceId, voiceInfo, channelId) as LocalVoiceAudio <T>);
         }
         else
         {
             throw new Exception("[PV] CreateLocalVoice: encoder for IFrameStream<short> is not IBufferEncoder<short>: " + encoder.GetType());
         }
     }
     else
     {
         throw new UnsupportedSampleTypeException(typeof(T));
     }
 }
示例#2
0
 internal LocalVoiceAudioShort(VoiceClient voiceClient, IEncoderDataFlow <short> encoder, byte id, VoiceInfo voiceInfo, int channelId)
     : base(voiceClient, encoder, id, voiceInfo, channelId)
 {
     // these 2 processors go after resampler
     this.levelMeter    = new AudioUtil.LevelMeterShort(this.info.SamplingRate, this.info.Channels); //1/2 sec
     this.voiceDetector = new AudioUtil.VoiceDetectorShort(this.info.SamplingRate, this.info.Channels);
     initBuiltinProcessors();
 }
示例#3
0
 internal LocalVoice(VoiceClient voiceClient, IEncoder encoder, byte id, VoiceInfo voiceInfo, int channelId)
 {
     this.encoder     = encoder;
     this.Transmit    = true;
     this.info        = voiceInfo;
     this.channelId   = channelId;
     this.voiceClient = voiceClient;
     this.id          = id;
 }
 internal RemoteVoice(VoiceClient client, int channelId, int playerId, byte voiceId, VoiceInfo info, byte lastEventNumber)
 {
     this.opusDecoder  = new OpusDecoder((SamplingRate)info.SamplingRate, (Channels)info.Channels);
     this.voiceClient  = client;
     this.channelId    = channelId;
     this.playerId     = playerId;
     this.voiceId      = voiceId;
     this.Info         = info;
     this.lastEvNumber = lastEventNumber;
 }
        }                                                                              // called by voice client action, so user still can use action

        /// <summary>Creates Client instance</summary>
        public LoadBalancingFrontend(ConnectionProtocol connectionProtocol = ConnectionProtocol.Udp) : base(connectionProtocol)
        {
            base.OnEventAction       += onEventActionVoiceClient;
            base.OnStateChangeAction += onStateChangeVoiceClient;
            this.voiceClient          = new VoiceClient(this);
            var voiceChannelsCount = Enum.GetValues(typeof(Codec)).Length + 1; // channel per stream type, channel 0 is for user events

            if (loadBalancingPeer.ChannelCount < voiceChannelsCount)
            {
                this.loadBalancingPeer.ChannelCount = (byte)voiceChannelsCount;
            }
        }
示例#6
0
 internal LocalVoiceFramed(VoiceClient voiceClient, IEncoder encoder, byte id, VoiceInfo voiceInfo, int channelId, int frameSize)
     : base(voiceClient, encoder, id, voiceInfo, channelId)
 {
     this.FrameSize = frameSize;
 }
示例#7
0
        }                                                                              // called by voice client action, so user still can use action

        /// <summary>Creates Client instance</summary>
        public LoadBalancingFrontend()
        {
            base.OnEventAction       += onEventActionVoiceClient;
            base.OnStateChangeAction += onStateChangeVoiceClient;
            this.voiceClient          = new VoiceClient(this);
        }