/// <summary> /// Is called when wave-in has received new audio frame. /// </summary> /// <param name="sender">Sender.</param> /// <param name="e">Event data.</param> private void m_pWaveIn_AudioFrameReceived(object sender, EventArgs <byte[]> e) { try{ // We don't have RTP timestamp base or time stamp recycled. if (m_RtpTimeStamp == 0 || m_RtpTimeStamp > m_pRTP_Stream.Session.RtpClock.RtpTimestamp) { m_RtpTimeStamp = m_pRTP_Stream.Session.RtpClock.RtpTimestamp; } // Some sample block missing or silence suppression. // Don't work ... need some more investigation. //else if((m_pRTP_Stream.Session.RtpClock.RtpTimestamp - m_RtpTimeStamp) > 2 * m_pRTP_Stream.Session.RtpClock.MillisecondsToRtpTicks(m_AudioFrameSize)){ // m_RtpTimeStamp = m_pRTP_Stream.Session.RtpClock.RtpTimestamp; //} else { m_RtpTimeStamp += (uint)m_pRTP_Stream.Session.RtpClock.MillisecondsToRtpTicks(m_AudioFrameSize); } if (m_pActiveCodec != null) { RTP_Packet rtpPacket = new RTP_Packet(); rtpPacket.Data = m_pActiveCodec.Encode(e.Value, 0, e.Value.Length); rtpPacket.Timestamp = m_RtpTimeStamp; m_pRTP_Stream.Send(rtpPacket); } } catch (Exception x) { if (!this.IsDisposed) { // Raise error event(We can't throw expection directly, we are on threadpool thread). OnError(x); } } }
private void testReceive(RTP_Packet packet) { _nlist.Add(Nalu.Parse(packet.Data)); if (packet.IsMarker) { buildFrame(); _nlist = new List <Nalu>(); } }
public void SendPPS(RTP_SendStream sender, uint timestamp) { if (_pps != null) { RTP_Packet packet = new RTP_Packet(); packet.Timestamp = timestamp; packet.Data = _pps.NaluBytes(); sender.Send(packet); _ppsStamp = Environment.TickCount; } }
/// <summary> /// Sends audio to RTP session target(s). /// </summary> private void SendAudio() { try{ using (FileStream fs = File.OpenRead(m_SendFile)){ RTP_SendStream sendStream = m_pSession.CreateSendStream(); byte[] buffer = new byte[400]; int readedCount = fs.Read(buffer, 0, buffer.Length); long lastSendTime = DateTime.Now.Ticks; long packetsSent = 0; long totalSent = 0; while (readedCount > 0) { if (m_pMainUI.ActiveCodec != null) { byte[] encodedData = m_pMainUI.ActiveCodec.Encode(buffer, 0, buffer.Length); // Send audio frame. RTP_Packet packet = new RTP_Packet(); packet.Timestamp = m_pSession.RtpClock.RtpTimestamp; packet.Data = encodedData; sendStream.Send(packet); // Read next audio frame. readedCount = fs.Read(buffer, 0, buffer.Length); totalSent += encodedData.Length; packetsSent++; this.BeginInvoke(new MethodInvoker(delegate(){ m_pCodec.Text = m_pMainUI.ActiveCodec.Name; m_pPacketsSent.Text = packetsSent.ToString(); m_pKBSent.Text = Convert.ToString(totalSent / 1000); })); } Thread.Sleep(25); lastSendTime = DateTime.Now.Ticks; } sendStream.Close(); } } catch (Exception x) { string dummy = x.Message; } }
private void testSend(PSFragment psf) { List <Nalu> nList = _helper.ToRTP(psf); uint timestamp = 1; for (int i = 0; i < nList.Count; i++) { RTP_Packet packet = new RTP_Packet(); packet.Timestamp = timestamp; packet.Data = nList[i].NaluBytes(); if (psf.IsFrameEnd && i == nList.Count - 1) { packet.IsMarker = true; } else { packet.IsMarker = false; } testReceive(packet); } }
private void _ups_Unpacked(object arg1, PSFragment psf) { List <Nalu> nList = _rtpHelper.ToRTPPayload(psf); uint timestamp = _multiSession.Sessions[0].RtpClock.RtpTimestamp; //判断是否有新的接收客户端加入。 if (_hasNewTarget) { _hTrigger.SendSPS(_sendStream, timestamp); _hTrigger.SendPPS(_sendStream, timestamp); _hasNewTarget = true; } for (int i = 0; i < nList.Count; i++) { _hTrigger.Update(nList[i]); RTP_Packet packet = new RTP_Packet(); packet.Timestamp = timestamp; packet.Data = nList[i].NaluBytes(); if (psf.IsFrameEnd && i == nList.Count - 1) { packet.IsMarker = true; } else { packet.IsMarker = false; } _sendStream.Send(packet); } //判断是否需要冗余发送SPS和PPS。 if (_hTrigger.IsSPSTimeout) { _hTrigger.SendSPS(_sendStream, timestamp); } if (_hTrigger.IsPPSTimeout) { _hTrigger.SendPPS(_sendStream, timestamp); } }