本文主要是介绍NDK开发(六):androidStudio3.2.1加载ffmpeg,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
- 创建android项目并支持C++
- 将编译好的so拷贝到项目libs中
- 将头文件拷贝到cpp文件下
- 配置gradle
android {.....sourceSets {main {jniLibs.srcDirs = ['libs']}}
}
defaultConfig {......ndk{abiFilters "armeabi-v7a"}
}
- 创建JNI工具类并生成头文件
package com.qufu.ffmpeg_player.utils;public class VideoUtils {public native static void decode(String input, String output);//注意加载顺序static {System.loadLibrary("hxg_ffmpeg");System.loadLibrary("avutil-54");System.loadLibrary("swresample-1");System.loadLibrary("avcodec-56");System.loadLibrary("avformat-56");System.loadLibrary("swscale-3");System.loadLibrary("postproc-53");System.loadLibrary("avfilter-5");System.loadLibrary("avdevice-56");}
}
/* DO NOT EDIT THIS FILE - it is machine generated */
#include <jni.h>
/* Header for class com_qufu_ffmpeg_player_utils_VideoUtils */#ifndef _Included_com_qufu_ffmpeg_player_utils_VideoUtils
#define _Included_com_qufu_ffmpeg_player_utils_VideoUtils
#ifdef __cplusplus
extern "C" {
#endif
/** Class: com_qufu_ffmpeg_player_utils_VideoUtils* Method: decode* Signature: (Ljava/lang/String;Ljava/lang/String;)V*/
JNIEXPORT void JNICALL Java_com_qufu_ffmpeg_1player_utils_VideoUtils_decode(JNIEnv *, jclass, jstring, jstring);#ifdef __cplusplus
}
#endif
#endif
- JNI实现
//
// Created by yuanxx on 2019/9/27.
//
#include "com_qufu_ffmpeg_player_utils_VideoUtils.h"#include <android/log.h>//编码
#include "include/libavcodec/avcodec.h"
//封装格式处理
#include "include/libavformat/avformat.h"
//像素处理
#include "include/libswscale/swscale.h"#define LOGI(FORMAT,...) __android_log_print(ANDROID_LOG_INFO,"huangxiaoguo",FORMAT,##__VA_ARGS__);
#define LOGE(FORMAT,...) __android_log_print(ANDROID_LOG_ERROR,"huangxiaoguo",FORMAT,##__VA_ARGS__);JNIEXPORT void JNICALL Java_com_qufu_ffmpeg_1player_utils_VideoUtils_decode(JNIEnv *env, jclass jcls, jstring input_jstr, jstring output_jstr){//需要转码的视频文件(输入的视频文件)const char* input_cstr = (*env)->GetStringUTFChars(env,input_jstr,NULL);const char* output_cstr = (*env)->GetStringUTFChars(env,output_jstr,NULL);//1.注册所有组件av_register_all();//封装格式上下文,统领全局的结构体,保存了视频文件封装格式的相关信息AVFormatContext *pFormatCtx = avformat_alloc_context();//2.打开输入视频文件if (avformat_open_input(&pFormatCtx, input_cstr, NULL, NULL) != 0){LOGE("%s","无法打开输入视频文件");return;}//3.获取视频文件信息if (avformat_find_stream_info(pFormatCtx,NULL) < 0){LOGE("%s","无法获取视频文件信息");return;}//获取视频流的索引位置//遍历所有类型的流(音频流、视频流、字幕流),找到视频流int v_stream_idx = -1;int i = 0;//number of streamsfor (; i < pFormatCtx->nb_streams; i++){//流的类型if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO){v_stream_idx = i;break;}}if (v_stream_idx == -1){LOGE("%s","找不到视频流\n");return;}//只有知道视频的编码方式,才能够根据编码方式去找到解码器//获取视频流中的编解码上下文AVCodecContext *pCodecCtx = pFormatCtx->streams[v_stream_idx]->codec;//4.根据编解码上下文中的编码id查找对应的解码AVCodec *pCodec = avcodec_find_decoder(pCodecCtx->codec_id);//(迅雷看看,找不到解码器,临时下载一个解码器)if (pCodec == NULL){LOGE("%s","找不到解码器\n");return;}//5.打开解码器if (avcodec_open2(pCodecCtx,pCodec,NULL)<0){LOGE("%s","解码器无法打开\n");return;}//输出视频信息LOGI("视频的文件格式:%s",pFormatCtx->iformat->name);LOGI("视频时长:%d", (pFormatCtx->duration)/1000000);LOGI("视频的宽高:%d,%d",pCodecCtx->width,pCodecCtx->height);LOGI("解码器的名称:%s",pCodec->name);//准备读取//AVPacket用于存储一帧一帧的压缩数据(H264)//缓冲区,开辟空间AVPacket *packet = (AVPacket*)av_malloc(sizeof(AVPacket));//AVFrame用于存储解码后的像素数据(YUV)//内存分配AVFrame *pFrame = av_frame_alloc();//YUV420AVFrame *pFrameYUV = av_frame_alloc();//只有指定了AVFrame的像素格式、画面大小才能真正分配内存//缓冲区分配内存uint8_t *out_buffer = (uint8_t *)av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));//初始化缓冲区avpicture_fill((AVPicture *)pFrameYUV, out_buffer, AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);//用于转码(缩放)的参数,转之前的宽高,转之后的宽高,格式等struct SwsContext *sws_ctx = sws_getContext(pCodecCtx->width,pCodecCtx->height,pCodecCtx->pix_fmt,pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P,SWS_BICUBIC, NULL, NULL, NULL);int got_picture, ret;FILE *fp_yuv = fopen(output_cstr, "wb+");int frame_count = 0;//6.一帧一帧的读取压缩数据while (av_read_frame(pFormatCtx, packet) >= 0){//只要视频压缩数据(根据流的索引位置判断)if (packet->stream_index == v_stream_idx){//7.解码一帧视频压缩数据,得到视频像素数据ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);if (ret < 0){LOGE("%s","解码错误");return;}//为0说明解码完成,非0正在解码if (got_picture){//AVFrame转为像素格式YUV420,宽高//2 6输入、输出数据//3 7输入、输出画面一行的数据的大小 AVFrame 转换是一行一行转换的//4 输入数据第一列要转码的位置 从0开始//5 输入画面的高度sws_scale(sws_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height,pFrameYUV->data, pFrameYUV->linesize);//输出到YUV文件//AVFrame像素帧写入文件//data解码后的图像像素数据(音频采样数据)//Y 亮度 UV 色度(压缩了) 人对亮度更加敏感//U V 个数是Y的1/4int y_size = pCodecCtx->width * pCodecCtx->height;fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv);fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv);fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv);frame_count++;LOGI("解码第%d帧",frame_count);}}//释放资源av_free_packet(packet);}fclose(fp_yuv);(*env)->ReleaseStringUTFChars(env,input_jstr,input_cstr);(*env)->ReleaseStringUTFChars(env,output_jstr,output_cstr);av_frame_free(&pFrame);avcodec_close(pCodecCtx);avformat_free_context(pFormatCtx);LOGI("解码完成了");}
- 配置CMakeLists.txt
# For more information about using CMake with Android Studio, read the
# documentation: https://d.android.com/studio/projects/add-native-code.html# Sets the minimum version of CMake required to build the native library.cmake_minimum_required(VERSION 3.4.1)find_library( log-liblog )# 导入路径,为了让编译时能够寻找到这个文件夹
include_directories(src/main/cpp/include)set(my_lib_path ${CMAKE_SOURCE_DIR}/libs)# 添加三方的so库
add_library(libavcodecSHAREDIMPORTED )# 指名第三方库的绝对路径
set_target_properties( libavcodecPROPERTIES IMPORTED_LOCATION${my_lib_path}/${ANDROID_ABI}/libavcodec-56.so )# 添加三方的so库
add_library(libavdeviceSHAREDIMPORTED )# 指名第三方库的绝对路径
set_target_properties( libavdevicePROPERTIES IMPORTED_LOCATION${my_lib_path}/${ANDROID_ABI}/libavdevice-56.so )# 添加三方的so库
add_library(libavfilterSHAREDIMPORTED )# 指名第三方库的绝对路径
set_target_properties( libavfilterPROPERTIES IMPORTED_LOCATION${my_lib_path}/${ANDROID_ABI}/libavfilter-5.so )# 添加三方的so库
add_library(libavformatSHAREDIMPORTED )# 指名第三方库的绝对路径
set_target_properties( libavformatPROPERTIES IMPORTED_LOCATION${my_lib_path}/${ANDROID_ABI}/libavformat-56.so )# 添加三方的so库
add_library(libavutilSHAREDIMPORTED )# 指名第三方库的绝对路径
set_target_properties( libavutilPROPERTIES IMPORTED_LOCATION${my_lib_path}/${ANDROID_ABI}/libavutil-54.so )# 添加三方的so库
add_library(libpostprocSHAREDIMPORTED )# 指名第三方库的绝对路径
set_target_properties( libpostprocPROPERTIES IMPORTED_LOCATION${my_lib_path}/${ANDROID_ABI}/libpostproc-53.so )# 添加三方的so库
add_library(libswresampleSHAREDIMPORTED )# 指名第三方库的绝对路径
set_target_properties( libswresamplePROPERTIES IMPORTED_LOCATION${my_lib_path}/${ANDROID_ABI}/libswresample-1.so )# 添加三方的so库
add_library(libswscaleSHAREDIMPORTED )# 指名第三方库的绝对路径
set_target_properties( libswscalePROPERTIES IMPORTED_LOCATION${my_lib_path}/${ANDROID_ABI}/libswscale-3.so )set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=gnu++11")# 添加我们需要写代码的路径,这是声音特效
add_library(hxg_ffmpegSHAREDsrc/main/cpp/ffmpeg_palyer.c )#注意hxg_ffmpeg一定要放在第一位置不然一直报错
target_link_libraries( hxg_ffmpeglibavutillibswresamplelibavcodeclibavformatlibswscalelibpostproclibavfilterlibavdevice${log-lib} )
这里要特别注意: hxg_ffmpeg一定要放在最前面,否则就会报错误,
- 调用
// Example of a call to a native methodfindViewById(R.id.sample_text).setOnClickListener(new View.OnClickListener() {@Overridepublic void onClick(View v) {final String input = new File(Environment.getExternalStorageDirectory(), "input.mp4").getAbsolutePath();final String output = new File(Environment.getExternalStorageDirectory(), "output_1.yuv").getAbsolutePath();new Thread(new Runnable() {@Overridepublic void run() {VideoUtils.decode(input, output);}}).start();}});
- 完成
这篇关于NDK开发(六):androidStudio3.2.1加载ffmpeg的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!