以下代码片段位于 Objective-C 中,实现文件.m
用于处理 RTSP 内容的对象 ( RTSPProvider
)。
它使用 Xcode 版本 10.1 (10B61) 和迄今为止的当前 FFmpeg 版本 (4.2.1 / 15.10.2019) 的 FFmpeg 手动构建版本进行了测试。
如果您需要构建脚本配置和/或使用的库版本(只需询问)。
我遇到了与 OP 相同的问题,但无法使用他的解决方案。
完整版本与我使用的中断回调是:
int interruptCallBack(void *ctx){
RTSPProviderObject *whyFFmpeg = (__bridge RTSPProviderObject*)ctx;
NSLog(@"What is this!");
if(whyFFmpeg.whatIsHappeningSTR) {
return 1;
} else {
return 0;
}
}
av_read_frame()
根据我目前的理解,返回值 1 应该中断并退出而不会崩溃。
它仍然崩溃了。我的解决方案是让av_read_frame()
完成阅读并终止会话上下文,该上下文将被释放并且不允许更多阅读。这很容易,因为我在解除分配时遇到了这个问题RTSPProviderObject
并且没有阅读。
最后的用法是:
[self.rtspProvider cleanup];
self.rtspProvider = nil;
以下是完整的代码片段:
#import "Don't forget the required ffmpeg headers or header file"
int interruptCallBack(void *ctx){
RTSPProviderObject *whyFFmpeg = (__bridge RTSPProviderObject*)ctx;
NSLog(@"What is this!");
if(whyFFmpeg.whatIsHappeningSTR) {
return 1;
} else {
return 0;
}
}
@interface RTSPProviderObject ()
@property (nonatomic, assign) AVFormatContext *sessionContext;
@property (nonatomic, assign) NSString *whatIsHappeningSTR;
@property (nonatomic, assign) AVDictionary *sessionOptions;
@property (nonatomic, assign) BOOL usesTcp;
@property (nonatomic, assign) BOOL isInputStreamOpen;
@property (nonatomic, strong) NSLock *audioPacketQueueLock;
@property (nonatomic, strong) NSLock *packetQueueLock;
@property (nonatomic, strong, readwrite) NSMutableArray *audioPacketQueue;
@property (nonatomic, assign) int selectedVideoStreamIndex;
@property (nonatomic, assign) int selectedAudioStreamIndex;
@end
@implementation RTSPProviderObject
- (id _Nullable)init
{
self = [super init];
if (!self)
{
return nil;
}
self.sessionContext = NULL;
self.sessionContext = avformat_alloc_context();
AVFormatContext *pFormatCtx = self.sessionContext;
if (!pFormatCtx)
{
// Error handling code...
}
// MUST be called before avformat_open_input().
av_dict_free(&_sessionOptions);
self.sessionOptions = 0;
if (self.usesTcp)
{
// "rtsp_transport" - Set RTSP transport protocols.
// Allowed are: udp_multicast, tcp, udp, http.
av_dict_set(&_sessionOptions, "rtsp_transport", "tcp", 0);
}
// Open an input stream and read the header with the demuxer options.
// rtspURL - connection url to your remote ip camera which supports RTSP 2.0.
if (avformat_open_input(&pFormatCtx, rtspURL.UTF8String, NULL, &_sessionOptions) != 0)
{
self.isInputStreamOpen = NO;
// Error handling code...
}
self.isInputStreamOpen = YES;
// user-supplied AVFormatContext pFormatCtx might have been modified.
self.sessionContext = pFormatCtx;
pFormatCtx->interrupt_callback.callback = interruptCallBack;
pFormatCtx->interrupt_callback.opaque = (__bridge void *)(self);
// ... Other needed but currently not relevant code for codec/stream and other setup.
}
- (BOOL)prepareNextFrame
{
NSLog(@"%s", __PRETTY_FUNCTION__);
int isVideoFrameAvailable = 0;
// The session context is needed to provide frame data. Frame data is provided for video and audio.
// av_read_frame reads from pFormatCtx.
AVFormatContext *pFormatCtx = self.sessionContext;
if (!pFormatCtx) { return NO; }
// Audio packet access is forbidden.
[self.packetQueueLock lock];
BOOL readResult = YES;
// Calling av_read_frame while it is reading causes a bad_exception.
// We read frames as long as the session context cotains frames to be read and cosumed (usually one).
while (!isVideoFrameAvailable && self.isInputStreamOpen && readResult) {
if (packet.buf == nil && self.whatIsHappeningSTR) {
[self.packetQueueLock unlock];
return NO;
}
NSLog(@"New frame will be read.");
if (self.shouldTerminateStreams) {
[self terminate];
[self.packetQueueLock unlock];
return NO;
}
readResult = av_read_frame(pFormatCtx, &packet) >=0;
// Video packet data decoding.
// We need to make sure that the frame video data which is consumed matches the user selected stream.
if(packet.stream_index == self.selectedVideoStreamId) {
// DEPRECIATED:
// avcodec_decode_video2(self.videoCodecContext, self.rawFrameData, &isVideoFrameAvailable, &packet);
// Replaced by this new implememtation. Read more: https://blogs.gentoo.org/lu_zero/2016/03/29/new-avcodec-api/
// *
// We need the video context to decode video data.
AVCodecContext *videoContext = self.videoCodecContext;
if (!videoContext && videoContext->codec_type == AVMEDIA_TYPE_VIDEO) { isVideoFrameAvailable = 1; }
int ret;
// Supply raw packet data as input to a decoder.
ret = avcodec_send_packet(videoContext, &packet);
if (ret < 0)
{
NSLog(@"codec: sending video packet failed");
[self.packetQueueLock unlock];
return NO;
}
// Return decoded output data from a decoder.
ret = avcodec_receive_frame(videoContext, self.rawFrameData);
if (isVideoFrameAvailable < 0 && isVideoFrameAvailable != AVERROR(EAGAIN) && isVideoFrameAvailable != AVERROR_EOF)
{
[self.packetQueueLock unlock];
return NO;
}
if (ret >= 0) { isVideoFrameAvailable = 1; }
// *
} else {
// avcodec_decode_video2 unreference all the buffers referenced by self.rawFrameData and reset the frame fields.
// We must do this manually if we don't use the video frame or we will leak the frame data.
av_frame_unref(self.rawFrameData);
isVideoFrameAvailable = 1;
}
// Audio packet data consumption.
// We need to make sure that the frame audio data which will be consumed matches the user selected stream.
if (packet.stream_index == self.selectedAudioStreamIndex) {
[self.audioPacketQueueLock lock];
[self.audioPacketQueue addObject:[NSMutableData dataWithBytes:&packet length:sizeof(packet)]];
[self.audioPacketQueueLock unlock];
}
}
[self.packetQueueLock unlock];
return isVideoFrameAvailable!=0;
}
- (void)cleanup
{
NSLog(@"%s", __PRETTY_FUNCTION__);
self.shouldTerminateStreams = YES;
self.whatIsHappeningSTR = @"";
}
- (void)terminate
{
avformat_close_input(&_sessionContext);
}
@end
希望这对任何人都有帮助。感谢您的阅读和贡献。