我正在使用 java 应用程序使用 JAIN SIP java API 与 nuance 语音服务器建立 SIP 会话。然后我通过发送一些 MRCP 命令(如 GET_PARAMS、SET-PARAMS、Define Grammar 和使用 mrcp4j API 识别)来准备系统以进行识别。
然后我使用 JMF api 与语音服务器建立 rtp 和 rtcp 会话,以发送音频进行识别。服务器已收到音频,但在收到 RTCP 再见之前无法识别。
但问题是我无法使用 rtcp bye 结束 rtp 会话,因为我无法在 JMF 文档中找到一种方法。
如果有人可以指导我,那将非常有帮助。我附上了 RTP 会话的代码。
JMF api 文档的链接在这里
// send Audio data
// create the RTP Manager
RTPManager rtpManager = RTPManager.newInstance();
// create the local endpoint for the local interface on any local port
int port = Integer.parseInt(rtpPORT);;
SessionAddress localAddress = new SessionAddress();
InetAddress IP = InetAddress.getByName("hydhtc284704d");
localAddress.setControlHostAddress(IP);
localAddress.setControlPort(24501);
localAddress.setDataHostAddress(IP);
localAddress.setDataPort(24500);
// initialize the RTPManager
rtpManager.initialize(localAddress);
//rtpManager.initialize(rtpConnector);
// specify the remote endpoint of this unicast session
InetAddress ipAddress = InetAddress.getByName("hydhtc227033d");
SessionAddress remoteAddress = new SessionAddress(ipAddress, port, ipAddress, port + 1);
//System.out.println(remoteAddress);
// open the connection
rtpManager.addTarget(remoteAddress);
rtpManager.addSendStreamListener(new SendStreamListener() {
@Override
public void update(SendStreamEvent arg0) {
//System.out.println("Send Stream Event: " + arg0.getSource());
System.out.println("Number of bytes transmitted: " + arg0.getSendStream().getSourceTransmissionStats().getBytesTransmitted());
System.out.println("Sender Report: " + arg0.getSendStream().getSenderReport());
}
});
rtpManager.addReceiveStreamListener(new ReceiveStreamListener() {
@Override
public void update(ReceiveStreamEvent arg0) {
// TODO Auto-generated method stub
}
});
File audioFile = new File("C:\\Users\\Bhanu_Verma\\Desktop\\eclipse\\one.wav");
Processor processor= Manager.createProcessor(audioFile.toURI().toURL());
processor.configure();
// Block until the Processor has been configured
while (processor.getState() != processor.Configured) {
}
processor.setContentDescriptor(new ContentDescriptor(ContentDescriptor.RAW_RTP));
TrackControl track[] = processor.getTrackControls();
//ContentDescriptor cd = new ContentDescriptor(ContentDescriptor.RAW_RTP);
//processor.setContentDescriptor(cd);
boolean encodingOk = false;
// Go through the tracks and try to program one of them to
// output ulaw data.
for (int i = 0; i < track.length; i++) {
if (!encodingOk && track[i] instanceof FormatControl) {
if (((FormatControl)track[i]).setFormat(new AudioFormat(AudioFormat.ULAW_RTP,8000,8,1)) == null)
{
track[i].setEnabled(false);
}
else
{
encodingOk = true;
}
}
else
{
// we could not set this track to ulaw, so disable it
track[i].setEnabled(false);
}
}
// At this point, we have determined where we can send out ulaw data or not.
// realize the processor
if (encodingOk) {
processor.realize();
// block until realized.
while (processor.getState() != processor.Realized) {
}
// get the output datasource of the processor and exit if we fail
DataSource dataOutput = processor.getDataOutput();
// create a send stream for the output data source of a processor and start it
SendStream sendStream = rtpManager.createSendStream(dataOutput,0);
sendStream.start();
System.out.println("Starting processor" + "\n");
processor.start();
while(processor.getState()== processor.Started)
{
System.out.println("Sending Audio..");
}
System.out.println("Processor was started and audio was sent to server");
Wait(2000); // waiting so that audio could be given to the server
// close the connection if no longer needed.
rtpManager.removeTarget(remoteAddress, "Client disconnected.");
// call dispose at the end of the life-cycle of this RTPManager so
// it is prepared to be garbage-collected.
rtpManager.dispose();