0

我正在使用 Sharppcap 库从 SIP 呼叫中获取数据包。到目前为止,一切都很好。但是,当我加入这些数据包(MemoryStream 中的 G.711 Alaw 数据包)并使用我从https://www.codeproject.com/Articles/14237/Using-the-G711-standard获得的 AlawDecoder dll 转换它并写入带有解码数组字节的 Wav 文件,我可以听录音,但它听起来不连贯且缓慢。我是音频编程的新手,所以我没有很多经验。下面是我为实现此目的而编写的代码片段:

private static void Device_OnPacketArrival(object sender, CaptureEventArgs e)
    {
        var time = e.Packet.Timeval.Date.AddHours(-3);
        var len = e.Packet.Data.Length;
        


        var packet = PacketDotNet.Packet.ParsePacket(e.Packet.LinkLayerType, e.Packet.Data);
        var device = sender as ICaptureDevice;
        var tcpPacket = packet.Extract<PacketDotNet.TcpPacket>();
        var udpPacket = packet.Extract<PacketDotNet.UdpPacket>();

        if (udpPacket != null)
        {
            var ipPacket = (PacketDotNet.IPPacket)udpPacket.ParentPacket;
            System.Net.IPAddress srcIp = ipPacket.SourceAddress;
            System.Net.IPAddress dstIp = ipPacket.DestinationAddress;
            int srcPort = udpPacket.SourcePort;
            int dstPort = udpPacket.DestinationPort;
            byte[] udpHeaderData = udpPacket.HeaderData;
            byte[] udpPayloadData = udpPacket.PayloadData;
            string decodedUdpPayloadData = Encoding.UTF8.GetString(udpPayloadData);

            if (decodedUdpPayloadData.Contains("m=audio"))
            {
                FindRTPAudioPort(device, decodedUdpPayloadData);
            }
            else if (device.Filter != "udp port 5060")
            {
                RtpPacketsToWave(device, udpPayloadData);
            }
            else
            {
                Console.WriteLine("{0}:{1}:{2},{3} Len={4} {5}:{6} -> {7}:{8} UDP Packet " +
                "\n {9} \n Hex DUMP: {10} \n",
                time.Hour, time.Minute, time.Second, time.Millisecond, len,
                srcIp, srcPort, dstIp, dstPort,
                decodedUdpPayloadData,
                BitConverter.ToString(udpPayloadData));
            }
        }
        else if (tcpPacket != null)
        {
            var ipPacket = (PacketDotNet.IPPacket)tcpPacket.ParentPacket;
            System.Net.IPAddress srcIp = ipPacket.SourceAddress;
            System.Net.IPAddress dstIp = ipPacket.DestinationAddress;
            int srcPort = tcpPacket.SourcePort;
            int dstPort = tcpPacket.DestinationPort;


            Console.WriteLine("{0}:{1}:{2},{3} Len={4} {5}:{6} -> {7}:{8}",
                time.Hour, time.Minute, time.Second, time.Millisecond, len,
                srcIp, srcPort, dstIp, dstPort);
        }
        else
        {
            Console.WriteLine("\n");
        }
    }

    private static void RtpPacketsToWave(ICaptureDevice dev, byte[] payloadData)
    {
        try
        {

            MemoryStreamSingleton memoryStreamSingleton = MemoryStreamSingleton.GetInstance();
            MemoryStream memStream;
            byte[] headlessPayloadData = new byte[160];
            if (payloadData.Length == 172)
            {
                //Skips first 12 bytes containing the packet header
                headlessPayloadData = payloadData.Skip(12).ToArray();
                memStream = new MemoryStream(headlessPayloadData);
                memStream.CopyTo(memoryStreamSingleton);
            }
            Console.WriteLine("Payload length: {0}", headlessPayloadData.Length);
            Console.WriteLine(memoryStreamSingleton.Length);
            if(memoryStreamSingleton.Length > 600000)
            {
                WaveFileGenerator(memoryStreamSingleton.ToArray());
                dev.StopCapture();
            }
        }
        catch (Exception ex)
        {

            Console.WriteLine(ex.ToString());
        }
        
    }

    private static void WaveFileGenerator(byte[] buffer)
    {
        try
        {
          
            Console.WriteLine("Device closed, generating audio file..");
            WaveFormat waveFormat = new WaveFormat(8000, 16, 1);

            short[] pcm16bit = ALawDecoder.ALawDecode(buffer);
            byte[] result1 = new byte[pcm16bit.Length * sizeof(short)];
            Buffer.BlockCopy(pcm16bit, 0, result1, 0, result1.Length);
            

            var outputWave = new WaveFileWriter(@"tmp/test.wav", waveFormat);
            outputWave.Write(result1, 0, result1.Length);
            outputWave.Close();
            var waveFileProvider = new WaveFileReader(@"tmp/test.wav");
            MonoToStereoProvider16 toStereo = new MonoToStereoProvider16(waveFileProvider);
            WaveFileWriter.CreateWaveFile("test.wav", toStereo);
            waveFileProvider.Dispose();
            File.Delete(@"tmp/test.wav");
        }
        catch (Exception ex)
        {
            Console.WriteLine(ex.ToString());
            File.WriteAllText("log.txt", ex.ToString());
        }
    }

我无法理解我错过了什么......

4

1 回答 1

0

由于在接收函数中花费的时间过长,导致数据包溢出内部缓冲区,您可能会丢失数据包。

接收例程中的任何额外处理(包括打印)都可能在捕获大量数据包的情况下导致数据包丢失。

为了帮助减少在该例程中花费的时间并最大化数据包速率,您可以将数据包排队以在后台线程中进行处理。

在 SharpPcap 示例中有一个如何通过后台线程排队和处理数据包的示例,您可以在https://github.com/chmorgan/sharppcap/blob/master/Examples/QueuingPacketsForBackgroundProcessing/Program.cs找到该示例

使用队列方法,接收例程会很快退出,从而可以处理非常高的数据包率而不会丢失。

此外,您可以添加队列大小检查以确认后台线程跟上传入数据包的速率。

降低传入数据包速率的一种方法是使用网络数据包过滤器(在操作系统或驱动程序层中运行)并且仅包括潜在的 SIP 数据包。我不熟悉 SIP,但如果您有更多信息,我可以尝试提供帮助。您的代码似乎正在使用过滤器,但尚不清楚在找到 SIP 流的情况下会发生什么,在这种情况下您是否过滤以仅包含该端口?

让我知道这是否有助于减少或消除断断续续的声音。

于 2020-09-02T01:57:42.130 回答