2

我需要实现允许连接到两种类型的客户端的服务器。第一种类型必须将实时视频和音频流式传输到服务器。第二种类型必须从服务器流式传输此视频。我为视频选择了 h.264 编码,为音频选择了 vorbis ogg 编码。我想使用 RTSP 协议将视频从第一类客户端流式传输到服务器。我已经使用 ffmpeg 中的“libavformat”实现了客户端部分。我的代码如下所示。

#include "v_out_video_stream.h"

#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
#include <libavformat/avformat.h>
#include <libavutil/opt.h>
#include <libavutil/avstring.h>
#include <libavformat/avio.h>
#ifdef __cplusplus
}
#endif
#include <stdexcept>

struct VStatticRegistrar
{
    VStatticRegistrar( )
    {
        av_register_all( );
        avformat_network_init( );
    }
};

VStatticRegistrar __registrar;

struct VOutVideoStream::Private
{
    AVFormatContext * m_context;
    int m_audioStreamIndex;
    int m_videoStreamIndex;

    int m_videoBitrate;
    int m_width;
    int m_height;
    int m_fps;
    int m_audioSamplerate;
};

VOutVideoStream::VOutVideoStream( int videoBitrate, int width, int height, int fps, int audioSamplerate )
{
    d = new Private( );
    d->m_videoBitrate = videoBitrate;
    d->m_width = width;
    d->m_height = height;
    d->m_fps = fps;
    d->m_audioSamplerate = audioSamplerate;
    d->m_context = 0;
    d->m_audioStreamIndex = -1;
    d->m_videoStreamIndex = -1;
}

bool VOutVideoStream::connectToServer( const std::string& rtp_address, int rtp_port )
{
    assert( ! d->m_context );

    // initalize the AV context
    d->m_context = avformat_alloc_context();
    if( !d->m_context )
        return false;
    // get the output format
    d->m_context->oformat = av_guess_format( "rtsp", NULL, NULL );
    if( ! d->m_context->oformat )
        return false;

    // try to open the RTSP stream
    snprintf( d->m_context->filename, sizeof( d->m_context->filename ), "rtsp://%s:%d", rtp_address.c_str(), rtp_port );
    if( avio_open( &d->m_context->pb, d->m_context->filename, AVIO_FLAG_WRITE ) < 0 )
        return false;

    // add an H.264 stream
    AVStream *stream = avformat_new_stream( d->m_context, NULL );
    if ( ! stream )
        return false;
    // initalize codec
    AVCodecContext* codec = stream->codec;
    if( d->m_context->oformat->flags & AVFMT_GLOBALHEADER )
        codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
    codec->codec_id = CODEC_ID_H264;
    codec->codec_type = AVMEDIA_TYPE_VIDEO;
    //codec->bit_rate = d->m_videoBitrate;
    codec->width = d->m_width;
    codec->height = d->m_height;
    codec->time_base.den = d->m_fps;
    codec->time_base.num = 1;
    d->m_audioStreamIndex = stream->index;

    stream = avformat_new_stream( d->m_context, NULL );
    if ( ! stream )
        return false;
    // initalize codec
    codec = stream->codec;
    if( d->m_context->oformat->flags & AVFMT_GLOBALHEADER )
        codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
    codec->codec_id = CODEC_ID_VORBIS;
    codec->codec_type = AVMEDIA_TYPE_AUDIO;
    codec->sample_fmt = AV_SAMPLE_FMT_S16;
    codec->channels = 2;
    codec->bit_rate = d->m_audioSamplerate * codec->channels * 16;
    codec->sample_rate = d->m_audioSamplerate;
    d->m_videoStreamIndex = stream->index;
    // write the header
    return avformat_write_header( d->m_context, NULL ) == 0;
}

void VOutVideoStream::disconnect( )
{
    assert( d->m_context );

    avio_close( d->m_context->pb );
    avformat_free_context( d->m_context );
    d->m_context = 0;
}

VOutVideoStream::~VOutVideoStream( )
{
    if( d->m_context )
        disconnect( );
    delete d;
}

bool VOutVideoStream::send( VNetworkAbstractionLayer& nal )
{
    AVPacket p;
    av_init_packet( &p );
    p.data = nal.getPayload( );
    p.size = nal.getSize( );
    p.stream_index = nal.getType( ) == VNetworkAbstractionLayer::AUDIO_PACKET ? d->m_audioStreamIndex :
                                                                                d->m_videoStreamIndex;
    return av_write_frame( d->m_context, &p ) >= 0;
}

VNetworkAbstractionLayer 是这样定义的:

#ifndef _V_NETWORK_ABSTRACTION_LAYER_H_
#define _V_NETWORK_ABSTRACTION_LAYER_H_

#include <cs/v_cs_global.h>

#include <stdint.h>
#include <cstring>
#include <boost/noncopyable.hpp>
#include <boost/enable_shared_from_this.hpp>

class VNetworkAbstractionLayer : public boost::enable_shared_from_this<VNetworkAbstractionLayer>,
                                 private boost::noncopyable
{
public:
    enum PacketType
    {
        AUDIO_PACKET,
        VIDEO_PACKET
    };

    ~VNetworkAbstractionLayer( ) {
        delete[] m_payload;
    }

    static VNetworkAbstractionLayerPtr factory( int size, const uint8_t* payload, PacketType type ) {
        return VNetworkAbstractionLayerPtr( new VNetworkAbstractionLayer( size, payload, type ) );
    }

    uint8_t* getPayload( ) {
        return m_payload;
    }
    int getSize( ) const {
        return m_size;
    }
    PacketType getType( ) const {
        return m_type;
    }

private:
    VNetworkAbstractionLayer( int size, const uint8_t* payload, PacketType type ) :
        m_size( size ),
        m_payload( new uint8_t[ size ] ),
        m_type( type )
    {
        memcpy( m_payload, payload, size );
    }

    int m_size;
    uint8_t *m_payload;
    PacketType m_type;
};


#endif // _V_NETWORK_ABSTRACTION_LAYER_H_

现在我需要实现服务器。但我在 libavformat 中没有找到任何“监听”方法。谁能解释我如何实现 RTSP 服务器。可能我可以使用任何其他图书馆吗?

4

4 回答 4

0

从头开始编写 RTSP/RTCP/RTP 堆栈非常复杂。您可以查看在 c++ 中实现此类堆栈的live555库。它适用于 ffmpeg/libav

于 2012-12-11T19:01:58.833 回答
0
于 2012-09-15T12:12:23.613 回答
0

您也可以使用https://net7mma.codeplex.com/作为您的服务器!

于 2012-12-12T04:20:06.897 回答
-1

您可以使用VLC作为您的流媒体服务器。

于 2012-09-15T03:57:35.313 回答