Android FFmpeg 音視頻開發連載:
-
[FFmpeg 編譯和集成]
-
[FFmpeg + ANativeWindow 實現視頻解碼播放]
-
[FFmpeg + OpenSLES 實現音頻解碼播放]
-
[FFmpeg + OpenGLES 實現音頻可視化播放]
-
[FFmpeg + OpenGLES 實現視頻解碼播放和視頻濾鏡]
-
[FFmpeg 播放器實現簡單音視頻同步的三種方式]
-
[FFmpeg + OpenGL ES 實現 3D 全景播放器]
-
[FFmpeg 播放器視頻渲染優化]
-
[FFmpeg、x264以及fdk-aac 編譯整合]
-
[FFmpeg 視頻錄制 - 視頻添加濾鏡和編碼]
-
[FFmpeg + Android AudioRecorder 音頻錄制編碼]
上一集,有讀者吐槽這個系列更新太慢了,其實實現代碼一直都有,只能每天花一點時間整理一些,慢慢整理出來。
前文利用 FFmpeg 分別實現了對[ Android Camera2 采集的預覽幀進行編碼生成 mp4 文件] ,以及對 [Android AudioRecorder 采集 PCM 音頻進行編碼生成 aac 文件] 。
本文將實現對采集的預覽幀(添加濾鏡)和 PCM 音頻同時編碼復用生成一個 mp4 文件,即實現一個仿微信小視頻錄制功能。
音視頻錄制編碼流程
音視頻編碼流程圖
本文采用的是軟件編碼(CPU)實現,所以針對高分辨率的預覽幀時,就需要考慮 CPU 能不能吃得消,在驍龍 8250 上使用軟件編碼分辨率超過 1080P 的圖像就會導致 CPU 比較吃力,這個時候幀率就跟不上了。
音視頻錄制代碼實現
Java 層視頻幀來自 Android Camera2 API 回調接口。
private ImageReader.OnImageAvailableListener mOnPreviewImageAvailableListener = new ImageReader.OnImageAvailableListener() {
@Override
public void onImageAvailable(ImageReader reader) {
Image image = reader.acquireLatestImage();
if (image != null) {
if (mCamera2FrameCallback != null) {
mCamera2FrameCallback.onPreviewFrame(CameraUtil.YUV_420_888_data(image), image.getWidth(), image.getHeight());
}
image.close();
}
}
};
Java 層音頻使用的是 Android AudioRecorder API 錄制的,將 AudioRecoder 封裝到線程里,通過接口回調的方式將 PCM 數據傳出來,默認采樣率為 44.1kHz,雙通道立體聲,采樣格式為 PCM 16 bit 。
JNI 實現主要是,在開始錄制時傳入輸出文件路徑、視頻碼率、幀率、視頻寬高等參數,然后不斷將音頻幀和視頻幀傳入 Native 層的編碼隊列中,供編碼器編碼。
//開始錄制,輸出文件路徑、視頻碼率、幀率、視頻寬高等參數
extern "C"
JNIEXPORT jint JNICALL
Java_com_byteflow_learnffmpeg_media_MediaRecorderContext_native_1StartRecord(JNIEnv *env,
jobject thiz,
jint recorder_type,
jstring out_url,
jint frame_width,
jint frame_height,
jlong video_bit_rate,
jint fps) {
const char* url = env->GetStringUTFChars(out_url, nullptr);
MediaRecorderContext *pContext = MediaRecorderContext::GetContext(env, thiz);
env->ReleaseStringUTFChars(out_url, url);
if(pContext) return pContext->StartRecord(recorder_type, url, frame_width, frame_height, video_bit_rate, fps);
return 0;
}
//傳入音頻幀到編碼隊列
extern "C"
JNIEXPORT void JNICALL
Java_com_byteflow_learnffmpeg_media_MediaRecorderContext_native_1OnAudioData(JNIEnv *env,
jobject thiz,
jbyteArray data,
jint size) {
int len = env->GetArrayLength (data);
unsigned char* buf = new unsigned char[len];
env->GetByteArrayRegion(data, 0, len, reinterpret_cast<jbyte*>(buf));
MediaRecorderContext *pContext = MediaRecorderContext::GetContext(env, thiz);
if(pContext) pContext->OnAudioData(buf, len);
delete[] buf;
}
//傳入視頻幀到編碼隊列
extern "C"
JNIEXPORT void JNICALL
Java_com_byteflow_learnffmpeg_media_MediaRecorderContext_native_1OnPreviewFrame(JNIEnv *env,
jobject thiz,
jint format,
jbyteArray data,
jint width,
jint height) {
int len = env->GetArrayLength (data);
unsigned char* buf = new unsigned char[len];
env->GetByteArrayRegion(data, 0, len, reinterpret_cast<jbyte*>(buf));
MediaRecorderContext *pContext = MediaRecorderContext::GetContext(env, thiz);
if(pContext) pContext->OnPreviewFrame(format, buf, width, height);
delete[] buf;
}
//停止錄制
extern "C"
JNIEXPORT jint JNICALL
Java_com_byteflow_learnffmpeg_media_MediaRecorderContext_native_1StopRecord(JNIEnv *env,
jobject thiz) {
MediaRecorderContext *pContext = MediaRecorderContext::GetContext(env, thiz);
if(pContext) return pContext->StopRecord();
return 0;
}
將音視頻編碼的實現流程封裝到一個類中,代碼基本上就是照著上面的流程圖實現的。
//音視頻錄制的封裝類
class MediaRecorder {
public:
MediaRecorder(const char *url, RecorderParam *param);
~MediaRecorder();
//開始錄制
int StartRecord();
//添加音頻數據到音頻隊列
int OnFrame2Encode(AudioFrame *inputFrame);
//添加視頻數據到視頻隊列
int OnFrame2Encode(VideoFrame *inputFrame);
//停止錄制
int StopRecord();
private:
//啟動音頻編碼線程
static void StartAudioEncodeThread(MediaRecorder *recorder);
//啟動視頻編碼線程
static void StartVideoEncodeThread(MediaRecorder *recorder);
static void StartMediaEncodeThread(MediaRecorder *recorder);
//分配音頻緩沖幀
AVFrame *AllocAudioFrame(AVSampleFormat sample_fmt, uint64_t channel_layout, int sample_rate, int nb_samples);
//分配視頻緩沖幀
AVFrame *AllocVideoFrame(AVPixelFormat pix_fmt, int width, int height);
//寫編碼包到媒體文件
int WritePacket(AVFormatContext *fmt_ctx, AVRational *time_base, AVStream *st, AVPacket *pkt);
//添加媒體流程
void AddStream(AVOutputStream *ost, AVFormatContext *oc, AVCodec **codec, AVCodecID codec_id);
//打印 packet 信息
void PrintfPacket(AVFormatContext *fmt_ctx, AVPacket *pkt);
//打開音頻編碼器
int OpenAudio(AVFormatContext *oc, AVCodec *codec, AVOutputStream *ost);
//打開視頻編碼器
int OpenVideo(AVFormatContext *oc, AVCodec *codec, AVOutputStream *ost);
//編碼一幀音頻
int EncodeAudioFrame(AVOutputStream *ost);
//編碼一幀視頻
int EncodeVideoFrame(AVOutputStream *ost);
//釋放編碼器上下文
void CloseStream(AVOutputStream *ost);
private:
RecorderParam m_RecorderParam = {0};
AVOutputStream m_VideoStream;
AVOutputStream m_AudioStream;
char m_OutUrl[1024] = {0};
AVOutputFormat *m_OutputFormat = nullptr;
AVFormatContext *m_FormatCtx = nullptr;
AVCodec *m_AudioCodec = nullptr;
AVCodec *m_VideoCodec = nullptr;
//視頻幀隊列
ThreadSafeQueue<VideoFrame *>
m_VideoFrameQueue;
//音頻幀隊列
ThreadSafeQueue<AudioFrame *>
m_AudioFrameQueue;
int m_EnableVideo = 0;
int m_EnableAudio = 0;
volatile bool m_Exit = false;
//音頻編碼線程
thread *m_pAudioThread = nullptr;
//視頻編碼線程
thread *m_pVideoThread = nullptr;
};
其中編碼一幀視頻和編碼一幀音頻的實現基本上一致,都是先將格式轉換為目標格式,然后 avcodec_send_frame\avcodec_receive_packet ,最后編碼一個空幀作為結束標志。
int MediaRecorder::EncodeVideoFrame(AVOutputStream *ost) {
LOGCATE("MediaRecorder::EncodeVideoFrame");
int result = 0;
int ret;
AVCodecContext *c;
AVFrame *frame;
AVPacket pkt = { 0 };
c = ost->m_pCodecCtx;
av_init_packet(&pkt);
while (m_VideoFrameQueue.Empty() && !m_Exit) {
usleep(10* 1000);
}
frame = ost->m_pTmpFrame;
AVPixelFormat srcPixFmt = AV_PIX_FMT_YUV420P;
VideoFrame *videoFrame = m_VideoFrameQueue.Pop();
if(videoFrame) {
frame->data[0] = videoFrame->ppPlane[0];
frame->data[1] = videoFrame->ppPlane[1];
frame->data[2] = videoFrame->ppPlane[2];
frame->linesize[0] = videoFrame->pLineSize[0];
frame->linesize[1] = videoFrame->pLineSize[1];
frame->linesize[2] = videoFrame->pLineSize[2];
frame->width = videoFrame->width;
frame->height = videoFrame->height;
switch (videoFrame->format) {
case IMAGE_FORMAT_RGBA:
srcPixFmt = AV_PIX_FMT_RGBA;
break;
case IMAGE_FORMAT_NV21:
srcPixFmt = AV_PIX_FMT_NV21;
break;
case IMAGE_FORMAT_NV12:
srcPixFmt = AV_PIX_FMT_NV12;
break;
case IMAGE_FORMAT_I420:
srcPixFmt = AV_PIX_FMT_YUV420P;
break;
default:
LOGCATE("MediaRecorder::EncodeVideoFrame unSupport format pImage->format=%d", videoFrame->format);
break;
}
}
if((m_VideoFrameQueue.Empty() && m_Exit) || ost->m_EncodeEnd) frame = nullptr;
if(frame != nullptr) {
/* when we pass a frame to the encoder, it may keep a reference to it
* internally; make sure we do not overwrite it here */
if (av_frame_make_writable(ost->m_pFrame) < 0) {
result = 1;
goto EXIT;
}
if (srcPixFmt != AV_PIX_FMT_YUV420P) {
/* as we only generate a YUV420P picture, we must convert it
* to the codec pixel format if needed */
if (!ost->m_pSwsCtx) {
ost->m_pSwsCtx = sws_getContext(c->width, c->height,
srcPixFmt,
c->width, c->height,
c->pix_fmt,
SWS_FAST_BILINEAR, nullptr, nullptr, nullptr);
if (!ost->m_pSwsCtx) {
LOGCATE("MediaRecorder::EncodeVideoFrame Could not initialize the conversion context\n");
result = 1;
goto EXIT;
}
}
sws_scale(ost->m_pSwsCtx, (const uint8_t * const *) frame->data,
frame->linesize, 0, c->height, ost->m_pFrame->data,
ost->m_pFrame->linesize);
}
ost->m_pFrame->pts = ost->m_NextPts++;
frame = ost->m_pFrame;
}
/* encode the image */
ret = avcodec_send_frame(c, frame);
if(ret == AVERROR_EOF) {
result = 1;
goto EXIT;
} else if(ret < 0) {
LOGCATE("MediaRecorder::EncodeVideoFrame video avcodec_send_frame fail. ret=%s", av_err2str(ret));
result = 0;
goto EXIT;
}
while(!ret) {
ret = avcodec_receive_packet(c, &pkt);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
result = 0;
goto EXIT;
} else if (ret < 0) {
LOGCATE("MediaRecorder::EncodeVideoFrame video avcodec_receive_packet fail. ret=%s", av_err2str(ret));
result = 0;
goto EXIT;
}
LOGCATE("MediaRecorder::EncodeVideoFrame video pkt pts=%ld, size=%d", pkt.pts, pkt.size);
int result = WritePacket(m_FormatCtx, &c->time_base, ost->m_pStream, &pkt);
if (result < 0) {
LOGCATE("MediaRecorder::EncodeVideoFrame video Error while writing audio frame: %s",
av_err2str(ret));
result = 0;
goto EXIT;
}
}
EXIT:
NativeImageUtil::FreeNativeImage(videoFrame);
if(videoFrame) delete videoFrame;
return result;
}
最后注意編碼過程中,音視頻時間戳對齊,防止出現視頻聲音播放結束畫面還沒結束的情況。
void MediaRecorder::StartVideoEncodeThread(MediaRecorder *recorder) {
AVOutputStream *vOs = &recorder->m_VideoStream;
AVOutputStream *aOs = &recorder->m_AudioStream;
while (!vOs->m_EncodeEnd) {
double videoTimestamp = vOs->m_NextPts * av_q2d(vOs->m_pCodecCtx->time_base);
double audioTimestamp = aOs->m_NextPts * av_q2d(aOs->m_pCodecCtx->time_base);
LOGCATE("MediaRecorder::StartVideoEncodeThread [videoTimestamp, audioTimestamp]=[%lf, %lf]", videoTimestamp, audioTimestamp);
if (av_compare_ts(vOs->m_NextPts, vOs->m_pCodecCtx->time_base,
aOs->m_NextPts, aOs->m_pCodecCtx->time_base) <= 0 || aOs->m_EncodeEnd) {
LOGCATE("MediaRecorder::StartVideoEncodeThread start queueSize=%d", recorder->m_VideoFrameQueue.Size());
//視頻和音頻時間戳對齊,人對于聲音比較敏感,防止出現視頻聲音播放結束畫面還沒結束的情況
if(audioTimestamp <= videoTimestamp && aOs->m_EncodeEnd) vOs->m_EncodeEnd = aOs->m_EncodeEnd;
vOs->m_EncodeEnd = recorder->EncodeVideoFrame(vOs);
} else {
LOGCATE("MediaRecorder::StartVideoEncodeThread start usleep");
//視頻時間戳大于音頻時間戳時,視頻編碼進行休眠等待對齊
usleep(5 * 1000);
}
}
}
至此,一個小視頻錄制功能實現了,限于篇幅,代碼沒有全部貼出來,完整實現代碼可以參考項目:
https://github.com/githubhaohao/LearnFFmpeg
帶濾鏡的小視頻錄制
基于上節的代碼我們已經實現了類似于微信的小視頻錄制功能,但是簡單的視頻錄制顯然不是本文的目的,關于講 FFmpeg 視頻錄制的文章實在是太多了,所以本文就做一些差異化。
我們基于上一節的功能做一個帶濾鏡的小視頻錄制功能。
帶濾鏡的小視頻錄制
參考上圖,我們在 GL 線程里首先創建 FBO ,先將預覽幀渲染到 FBO 綁定的紋理上添加濾鏡,之后使用 glreadpixels 讀取添加完濾鏡之后的視頻幀放入編碼線程編碼,最后綁定到 FBO 的紋理再做屏幕渲染,這一點我們已經[在添加濾鏡的 FFmpeg 視頻播放器] 一文中做了詳細介紹。
這里我們定義一個類 GLCameraRender 負責完成離屏渲染(添加濾鏡)和屏幕渲染展示預覽幀,這部分代碼可以參考 [FFmpeg 視頻播放器的渲染優化] 一文。
class GLCameraRender: public VideoRender, public BaseGLRender{
public:
//初始化預覽幀的寬高
virtual void Init(int videoWidth, int videoHeight, int *dstSize);
//渲染一幀視頻
virtual void RenderVideoFrame(NativeImage *pImage);
virtual void UnInit();
//GLSurfaceView 的三個回調
virtual void OnSurfaceCreated();
virtual void OnSurfaceChanged(int w, int h);
virtual void OnDrawFrame();
static GLCameraRender *GetInstance();
static void ReleaseInstance();
//更新變換矩陣,Camera預覽幀需要進行旋轉
virtual void UpdateMVPMatrix(int angleX, int angleY, float scaleX, float scaleY);
virtual void UpdateMVPMatrix(TransformMatrix * pTransformMatrix);
//添加好濾鏡之后,視頻幀的回調,然后將帶有濾鏡的視頻幀放入編碼隊列
void SetRenderCallback(void *ctx, OnRenderFrameCallback callback) {
m_CallbackContext = ctx;
m_RenderFrameCallback = callback;
}
//加載濾鏡素材圖像
void SetLUTImage(int index, NativeImage *pLUTImg);
//加載 Java 層著色器腳本
void SetFragShaderStr(int index, char *pShaderStr, int strSize);
private:
GLCameraRender();
virtual ~GLCameraRender();
//創建 FBO
bool CreateFrameBufferObj();
void GetRenderFrameFromFBO();
//創建或更新濾鏡素材紋理
void UpdateExtTexture();
static std::mutex m_Mutex;
static GLCameraRender* s_Instance;
GLuint m_ProgramObj = GL_NONE;
GLuint m_FboProgramObj = GL_NONE;
GLuint m_TextureIds[TEXTURE_NUM];
GLuint m_VaoId = GL_NONE;
GLuint m_VboIds[3];
GLuint m_DstFboTextureId = GL_NONE;
GLuint m_DstFboId = GL_NONE;
NativeImage m_RenderImage;
glm::mat4 m_MVPMatrix;
TransformMatrix m_transformMatrix;
int m_FrameIndex;
vec2 m_ScreenSize;
OnRenderFrameCallback m_RenderFrameCallback = nullptr;
void *m_CallbackContext = nullptr;
//支持滑動選擇濾鏡功能
volatile bool m_IsShaderChanged = false;
volatile bool m_ExtImageChanged = false;
char * m_pFragShaderBuffer = nullptr;
NativeImage m_ExtImage;
GLuint m_ExtTextureId = GL_NONE;
int m_ShaderIndex = 0;
mutex m_ShaderMutex;
};
JNI 層我們需要傳入不同濾鏡的 shader 腳本和一些 LUT 濾鏡的 LUT 圖,這樣我們在 Java 層可以實現通過左右滑動屏幕來切換不同的濾鏡。
extern "C"
JNIEXPORT void JNICALL
Java_com_byteflow_learnffmpeg_media_MediaRecorderContext_native_1SetFilterData(JNIEnv *env,
jobject thiz,
jint index,
jint format,
jint width,
jint height,
jbyteArray bytes) {
int len = env->GetArrayLength (bytes);
uint8_t* buf = new uint8_t[len];
env->GetByteArrayRegion(bytes, 0, len, reinterpret_cast<jbyte*>(buf));
MediaRecorderContext *pContext = MediaRecorderContext::GetContext(env, thiz);
if(pContext) pContext->SetLUTImage(index, format, width, height, buf);
delete[] buf;
env->DeleteLocalRef(bytes);
}
extern "C"
JNIEXPORT void JNICALL
Java_com_byteflow_learnffmpeg_media_MediaRecorderContext_native_1SetFragShader(JNIEnv *env,
jobject thiz,
jint index,
jstring str) {
int length = env->GetStringUTFLength(str);
const char* cStr = env->GetStringUTFChars(str, JNI_FALSE);
char *buf = static_cast<char *>(malloc(length + 1));
memcpy(buf, cStr, length + 1);
MediaRecorderContext *pContext = MediaRecorderContext::GetContext(env, thiz);
if(pContext) pContext->SetFragShader(index, buf, length + 1);
free(buf);
env->ReleaseStringUTFChars(str, cStr);
}
同樣,完整的實現代碼可以參考項目:
https://github.com/githubhaohao/LearnFFmpeg
另外,如果你想要更多的濾鏡,可以參考項目 OpenGLCamera2 ,該項目實現 30 種相機濾鏡和特效。
https://github.com/githubhaohao/OpenGLCamera2
-- END --