溫馨提示×

您好,登錄后才能下訂單哦!

密碼登錄×
登錄注冊(cè)×
其他方式登錄
點(diǎn)擊 登錄注冊(cè) 即表示同意《億速云用戶服務(wù)條款》

Android 音視頻深入 十二 FFmpeg視頻替換聲音(附源碼下載)

發(fā)布時(shí)間:2020-07-23 09:41:50 來源:網(wǎng)絡(luò) 閱讀:994 作者:qq5a70561b29238 欄目:移動(dòng)開發(fā)

項(xiàng)目地址,求star
https://github.com/979451341/AudioVideoStudyCodeTwo/tree/master/FFmpeg%E7%BB%99%E8%A7%86%E9%A2%91%E6%8D%A2%E5%A3%B0%E9%9F%B3

一個(gè)視頻有三個(gè)流,視頻流,音頻流,字幕流,我將視頻A的視頻流拿出來,將音樂B的音頻流拿出來,合在一起成新的視頻

還是老規(guī)矩直接說c代碼如何運(yùn)行

注冊(cè)組件,打開并獲得MP4文件和MP3文件的信息

av_register_all();
//Input
if ((ret = avformat_open_input(&ifmt_ctx_v, in_filename_v, 0, 0)) < 0) {//打開輸入的視頻文件
    LOGE( "Could not open input file.");
    goto end;
}
if ((ret = avformat_find_stream_info(ifmt_ctx_v, 0)) < 0) {//獲取視頻文件信息
    LOGE( "Failed to retrieve input stream information");
    goto end;
}

if ((ret = avformat_open_input(&ifmt_ctx_a, in_filename_a, 0, 0)) < 0) {//打開輸入的音頻文件
    LOGE( "Could not open input file.");
    goto end;
}
if ((ret = avformat_find_stream_info(ifmt_ctx_a, 0)) < 0) {//獲取音頻文件信息
    LOGE( "Failed to retrieve input stream information");
    goto end;
}

創(chuàng)建輸出文件

 //Output
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);//初始化輸出碼流的AVFormatContext。
if (!ofmt_ctx) {
    LOGE( "Could not create output context\n");
    ret = AVERROR_UNKNOWN;
    return -1;
}
ofmt = ofmt_ctx->oformat;

獲取MP4的視頻流和MP3的音頻流

//從輸入的AVStream中獲取一個(gè)輸出的out_stream
for (i = 0; i < ifmt_ctx_v->nb_streams; i++) {
    //Create output AVStream according to input AVStream
    if(ifmt_ctx_v->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
        AVStream *in_stream = ifmt_ctx_v->streams[i];
        AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);//創(chuàng)建流通道AVStream
        videoindex_v=i;
        if (!out_stream) {
            LOGE( "Failed allocating output stream\n");
            ret = AVERROR_UNKNOWN;
            break;
        }
        videoindex_out=out_stream->index;
        //Copy the settings of AVCodecContext
        if (avcodec_copy_context(out_stream->codec, in_stream->codec) < 0) {
            LOGE( "Failed to copy context from input to output stream codec context\n");
            break;
        }
        out_stream->codec->codec_tag = 0;
        if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
            out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
        break;
    }
}

for (i = 0; i < ifmt_ctx_a->nb_streams; i++) {
    //Create output AVStream according to input AVStream
    if(ifmt_ctx_a->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO){
        AVStream *in_stream = ifmt_ctx_a->streams[i];
        AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
        audioindex_a=i;
        if (!out_stream) {
            LOGE( "Failed allocating output stream\n");
            ret = AVERROR_UNKNOWN;
            goto end;
        }
        audioindex_out=out_stream->index;
        //Copy the settings of AVCodecContext
        if (avcodec_copy_context(out_stream->codec, in_stream->codec) < 0) {
            LOGE( "Failed to copy context from input to output stream codec context\n");
            goto end;
        }
        out_stream->codec->codec_tag = 0;
        if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
            out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;

        break;
    }
}

獲取輸出文件的信息并打開輸出文件,獲得輸出流

LOGE("==========Output Information==========\n");
av_dump_format(ofmt_ctx, 0, out_filename, 1);
LOGE("======================================\n");
//Open output file
if (!(ofmt->flags & AVFMT_NOFILE)) {
    if (avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE) < 0) {//打開輸出文件。
        LOGE( "Could not open output file '%s'", out_filename);
        return -1;
    }
}
//Write file header
if (avformat_write_header(ofmt_ctx, NULL) < 0) {
    LOGE( "Error occurred when opening output file\n");
    return -1;
}

接下來就是邊解碼邊編碼了,這個(gè)解碼是解碼視頻流和音頻流,這個(gè)兩個(gè)流解碼速度需要保持一致
這個(gè)通過雙方時(shí)間軸來判斷,然后就將解碼的數(shù)據(jù)編碼放入輸出文件

    //Get an AVPacket .   av_compare_ts是比較時(shí)間戳用的。通過該函數(shù)可以決定該寫入視頻還是音頻。
    if(av_compare_ts(cur_pts_v,ifmt_ctx_v->streams[videoindex_v]->time_base,cur_pts_a,ifmt_ctx_a->streams[audioindex_a]->time_base) <= 0)

開始是先解碼視頻,放入cur_pts_v

        if(av_read_frame(ifmt_ctx, &pkt) >= 0){
            do{
                in_stream  = ifmt_ctx->streams[pkt.stream_index];
                out_stream = ofmt_ctx->streams[stream_index];

                if(pkt.stream_index==videoindex_v){
                    //FIX:No PTS (Example: Raw H.264) H.264裸流沒有PTS,因此必須手動(dòng)寫入PTS
                    //Simple Write PTS
                    if(pkt.pts==AV_NOPTS_VALUE){
                        //Write PTS
                        AVRational time_base1=in_stream->time_base;
                        //Duration between 2 frames (us)
                        int64_t calc_duration=(double)AV_TIME_BASE/av_q2d(in_stream->r_frame_rate);
                        //Parameters
                        pkt.pts=(double)(frame_index*calc_duration)/(double)(av_q2d(time_base1)*AV_TIME_BASE);
                        pkt.dts=pkt.pts;
                        pkt.duration=(double)calc_duration/(double)(av_q2d(time_base1)*AV_TIME_BASE);
                        frame_index++;
                    }

                    cur_pts_v=pkt.pts;
                    break;
                }
            }while(av_read_frame(ifmt_ctx, &pkt) >= 0);
        }

接著是解碼音頻放入cur_pts_a

        if(av_read_frame(ifmt_ctx, &pkt) >= 0){
            do{
                in_stream  = ifmt_ctx->streams[pkt.stream_index];
                out_stream = ofmt_ctx->streams[stream_index];

                if(pkt.stream_index==audioindex_a){

                    //FIX:No PTS
                    //Simple Write PTS
                    if(pkt.pts==AV_NOPTS_VALUE){
                        //Write PTS
                        AVRational time_base1=in_stream->time_base;
                        //Duration between 2 frames (us)
                        int64_t calc_duration=(double)AV_TIME_BASE/av_q2d(in_stream->r_frame_rate);
                        //Parameters
                        pkt.pts=(double)(frame_index*calc_duration)/(double)(av_q2d(time_base1)*AV_TIME_BASE);
                        pkt.dts=pkt.pts;
                        pkt.duration=(double)calc_duration/(double)(av_q2d(time_base1)*AV_TIME_BASE);
                        frame_index++;
                    }
                    cur_pts_a=pkt.pts;

                    break;
                }
            }while(av_read_frame(ifmt_ctx, &pkt) >= 0);
        }else{
            break;
        }

然后就是將之前解碼的數(shù)據(jù)編碼放入輸出文件,釋放這個(gè)pkt

    //Convert PTS/DTS
    pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
    pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
    pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
    pkt.pos = -1;
    pkt.stream_index=stream_index;
    LOGE("Write 1 Packet. size:%5d\tpts:%lld\n",pkt.size,pkt.pts);
    //Write AVPacket 音頻或視頻裸流
    if (av_interleaved_write_frame(ofmt_ctx, &pkt) < 0) {
        LOGE( "Error muxing packet\n");
        break;
    }

av_free_packet(&pkt);

通過不斷地循環(huán),解碼一下視頻流,解碼一下音頻流,編碼放入輸出文件,這三個(gè)步驟不斷循環(huán)完成合成視頻

完成輸出視頻,并釋放資源

//Write file trailer
av_write_trailer(ofmt_ctx);

#if USE_H264BSF
av_bitstream_filter_close(h364bsfc);
#endif
#if USE_AACBSF
av_bitstream_filter_close(aacbsfc);
#endif

end:
avformat_close_input(&ifmt_ctx_v);
avformat_close_input(&ifmt_ctx_a);
/* close output */
if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
    avio_close(ofmt_ctx->pb);
avformat_free_context(ofmt_ctx);
if (ret < 0 && ret != AVERROR_EOF) {
    LOGE( "Error occurred.\n");
    return -1;
}

下一次依舊與音頻有關(guān)系

向AI問一下細(xì)節(jié)

免責(zé)聲明:本站發(fā)布的內(nèi)容(圖片、視頻和文字)以原創(chuàng)、轉(zhuǎn)載和分享為主,文章觀點(diǎn)不代表本網(wǎng)站立場(chǎng),如果涉及侵權(quán)請(qǐng)聯(lián)系站長(zhǎng)郵箱:is@yisu.com進(jìn)行舉報(bào),并提供相關(guān)證據(jù),一經(jīng)查實(shí),將立刻刪除涉嫌侵權(quán)內(nèi)容。

AI