在安卓上以Linux的方式运行C/C++程序

发布于 2023-07-28  243 次阅读


  现在我们需要实现一个程序,该程序调用摄像头,并将图像显示到屏幕(安卓APP)。

# 一般开发工作
program: camera -> process -> display

# 我们的思路
program_1(linux_system): camera(v4l2) -> process -> rtsp_send
program_2(user_android): rtsp_receive -> display

  安卓运行C/C++程序一般有如下几种方法:JNI、QT、以及直接编译C/C++程序。我选择第三种方式来实现这个程序。为此,我们需要进行如下工作:

  1. 下载Mediamtx推流。
  2. 配置交叉编译工具链。
  3. 移植FFmpeg。
  4. 编写C/C++应用程序。

关键词: Android、Linux like、C/C++、V4L2、FFmpeg

注意: 本章节的FFmpeg库使用静态链接。


一、交叉编译工具链

  根据自己的架构下载对应的工具链:developer.arm.com。我目标机器的CPU架构是ARM64-V8,Host编译环境是X86_64,所以我使用的编译器是gcc-arm-10.3-2021.07-x86_64-aarch64-none-linux-gnu

  我们将其放到Host上解压即可,是否需要配置环境变量看个人习惯即可。

二、编译FFmpeg

  自行在FFmpeg.org或者git上下载FFmpeg,我使用的版本是4.3.1。

  进入FFmpeg目录,创建一个脚本用来控制配置configure文件。

#!/bin/bash

export TOOLS=/home/xjt/Gogs/gcc-arm-10.3-2021.07-x86_64-aarch64-none-linux-gnu
export SYSROOT=${TOOLS}/aarch64-none-linux-gnu/libc
# export PREFIX=./linux/arm64
export LD=${TOOLS}/bin/aarch64-none-linux-gnu-ld
export AR=${TOOLS}/bin/aarch64-none-linux-gnu-ar
export RANLIB=${TOOLS}/bin/aarch64-none-linux-gnu-ranlib

function build_lib
{
  ./configure \
  --disable-shared \
  --enable-static \
  --prefix=$PREFIX \
  --cross-prefix=${TOOLS}/bin/aarch64-linux-gnu- \
  --cc=${TOOLS}/bin/aarch64-none-linux-gnu-gcc \
  --nm=${TOOLS}/bin/aarch64-none-linux-gnu-g++ \
  --ld=${LD} \
  --ar=${AR} \
  --ranlib=${RANLIB} \
  --target-os=linux \
  --arch=arm64 \
  --sysroot=$SYSROOT \
  --enable-runtime-cpudetect \
  --enable-cross-compile \
  --enable-pic \
  --enable-gpl \
  --enable-nonfree \
  --enable-yasm \
  --enable-muxer=mpeg4 \
  --enable-muxer=rtsp \
  --enable-encoder=mpeg4 \
  --enable-decoder=mpeg4 \
  --enable-network \
  --enable-protocol=tcp \
  --enable-pthreads \
  --disable-ffmpeg \
  --disable-ffplay \
  --disable-ffprobe \
  --disable-avdevice \
  --disable-doc \
  --extra-ldflags="-L${SYSROOT}/libc/lib -L/home/xjt/Gogs/x264/install/lib -lc" \
  --extra-cflags="-I${SYSROOT}/libc/usr/include -I/home/xjt/Gogs/x264/install/include -Wfatal-errors -Wno-deprecated"
  # --enable-libx264 \
  # --extra-libs=-ldl
}
build_lib

注意:

  1. 这里如果需要静态链接x264,则需要在后面加上--extra-libs=-ldl
  2. RTSP需要--enable-network--enable-protocol=tcp--enable-muxer=rtsp

  接下来配置configuremakemake install即可。

./build.sh
make -j8
make install

之后在PREFIX目录下找到输出,一般有includelibshare

三、应用程序

  应用程序目录如下:

.
├── build
│   ├── bin
│   ├── CMakeCache.txt
│   ├── CMakeFiles
│   ├── cmake_install.cmake
│   └── Makefile
├── CMakeLists.txt
├── lib
│   ├── ffmpeg
│   └── x264
└── src
    ├── CMakeLists.txt
    ├── main.cpp
    ├── v4l2_stream.c
    └── v4l2_stream.h

3.1 ./CMakeLists.txt

PROJECT(RK3568_APP)

CMAKE_MINIMUM_REQUIRED(VERSION 3.5)

SET(COMPILER_PATH "/home/xjt/Gogs/gcc-arm-10.3-2021.07-x86_64-aarch64-none-linux-gnu")
SET(CMAKE_C_COMPILER ${COMPILER_PATH}/bin/aarch64-none-linux-gnu-gcc)
SET(CMAKE_CXX_COMPILER ${COMPILER_PATH}/bin/aarch64-none-linux-gnu-g++)

SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -s -O3 -lc -ldl -lrt -lm")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -s -O3 -lstdc++ -lc -ldl -lrt -lm")

# INCLUDE_DIRECTORIES(./lib/x264/include)
# LINK_DIRECTORIES(./lib/x264/lib)
INCLUDE_DIRECTORIES(./lib/ffmpeg/include)
LINK_DIRECTORIES(./lib/ffmpeg/lib)

SET(CMAKE_EXE_LINKER_FLAGS "-static")
ADD_SUBDIRECTORY(src bin)

3.2 ./src/CMakeLists.txt

FILE(
    GLOB_RECURSE SRC_LIST 
    ./*.c
    ./*.cpp
)

# Exe output path
SET(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR/bin})

ADD_EXECUTABLE(demo ${SRC_LIST})

TARGET_LINK_LIBRARIES(
    demo

    # x264;

    avformat;
    swscale;
    avcodec;
    avutil;
    swresample;

    pthread;
)

3.3 ./src/main.cpp

#include <chrono>
#include <thread>
#include <iostream>
#include <cstdio>

#ifdef __cplusplus
extern "C" {
#endif

#include <stdio.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/opt.h>
#include <libavutil/time.h>
#include <libswscale/swscale.h>
#include <libavutil/avutil.h>
#include <libavutil/imgutils.h>
#include <libswresample/swresample.h>
#include <pthread.h>
#include "v4l2_stream.h"

#ifdef __cplusplus 
}
#endif

// using namespace std;

static int video_is_eof;

#define STREAM_FRAME_RATE 60
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P  /* default pix_fmt */
#define VIDEO_CODEC_ID AV_CODEC_ID_MPEG4

/* video output */
static AVFrame *frame;
static AVPicture src_picture, dst_picture;

uint8_t input[640 * 480 * 2];
uint8_t output[640 * 480 * 2];
uint8_t output_128[640 * 480];

/* Add an output stream. */
static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec, enum AVCodecID codec_id)
{
    AVCodecContext *c;
    AVStream *st;

    /* find the encoder */
    *codec = avcodec_find_encoder(codec_id);
    if (!(*codec)) {
        av_log(NULL, AV_LOG_ERROR, "Could not find encoder for '%s'.\n", avcodec_get_name(codec_id));
    }
    else {
        st = avformat_new_stream(oc, *codec);
        if (!st) {
            av_log(NULL, AV_LOG_ERROR, "Could not allocate stream.\n");
        }
        else {
            st->id = oc->nb_streams - 1;
            st->time_base.den = STREAM_FRAME_RATE;
            st->time_base.num = 1;

            c = st->codec;
            c->codec_id = codec_id;
            c->bit_rate = 16000000;
            c->width = 640;
            c->height = 480;
            c->time_base.den = STREAM_FRAME_RATE;
            c->time_base.num = 1;
            c->gop_size = 15;
            c->pix_fmt = STREAM_PIX_FMT;
        }
    }

    return st;
}

static int open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
{
    int ret;
    AVCodecContext *c = st->codec;
    // AVCodecContext *c = avcodec_alloc_context3(codec);

    /* open the codec */
    ret = avcodec_open2(c, codec, NULL);
    if (ret < 0) {
        av_log(NULL, AV_LOG_ERROR, "Could not open video codec.\n", avcodec_get_name(c->codec_id));
    }
    else {

        /* allocate and init a re-usable frame */
        frame = av_frame_alloc();
        if (!frame) {
            av_log(NULL, AV_LOG_ERROR, "Could not allocate video frame.\n");
            ret = -1;
        }
        else {
            frame->format = c->pix_fmt;
            frame->width = c->width;
            frame->height = c->height;

            /* Allocate the encoded raw picture. */
            ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height);
            if (ret < 0) {
                av_log(NULL, AV_LOG_ERROR, "Could not allocate picture.\n");
            }
            else {
                /* copy data and linesize picture pointers to frame */
                *((AVPicture *)frame) = dst_picture;
            }
        }
    }

    return ret;
}

int VIDEOWRAP_YUYV422_to_YUV420P(int imgWidth, int imgHeight, uint8_t* inputData, uint8_t* outputData, int inputLength)
{
    struct SwsContext* ctx = sws_getContext(imgWidth, imgHeight, AV_PIX_FMT_YUYV422,
                                            imgWidth, imgHeight, AV_PIX_FMT_YUV420P,
                                            SWS_BICUBIC, NULL, NULL, NULL);
    if (!ctx) {
        fprintf(stderr, "Failed to allocate SwsContext.\n");
        return -1; // Error handling
    }

    struct AVFrame* input_pFrame = av_frame_alloc();
    struct AVFrame* output_pFrame = av_frame_alloc();
    if (!input_pFrame || !output_pFrame) {
        fprintf(stderr, "Failed to allocate AVFrame.\n");
        if (input_pFrame) av_frame_free(&input_pFrame);
        if (output_pFrame) av_frame_free(&output_pFrame);
        sws_freeContext(ctx);
        return -1; // Error handling
    }

    av_image_fill_arrays(input_pFrame->data, input_pFrame->linesize, inputData, AV_PIX_FMT_YUYV422, imgWidth, imgHeight, 1);
    av_image_fill_arrays(output_pFrame->data, output_pFrame->linesize, outputData, AV_PIX_FMT_YUV420P, imgWidth, imgHeight, 1);

    sws_scale(ctx,
              (const uint8_t**)input_pFrame->data, input_pFrame->linesize, 0, imgHeight,
              output_pFrame->data, output_pFrame->linesize);

#if 0
    FILE *fl;
    fl = fopen("img.yuv", "w");
    if (NULL == fl)
    {
        fprintf(stderr, "open write file failed.");
    }
    fwrite(outputData, inputLength * 1.5, 1, fl);
    fclose(fl);
#endif

    // Release allocated resources
    av_frame_free(&input_pFrame);
    av_frame_free(&output_pFrame);
    sws_freeContext(ctx);

    return 0;
}

/* Prepare a dummy image. */
static void fill_yuv_image(AVPicture *pict, int frame_index, int width, int height)
{
    int x, y, i;

    i = frame_index;

    memcpy(input, v4l2_sp_globalBuffer[v4l2_sp_globalBufferIndex], v4l2_sp_globalBufferLength);

    VIDEOWRAP_YUYV422_to_YUV420P(
        640, 480, 
        input,
        output, 
        v4l2_sp_globalBufferLength
    );

    memcpy(pict->data[0], output, 640 * 480);
    memcpy(pict->data[1], output_128, 640 * 480 * 0.5);
    memcpy(pict->data[2], output_128, 640 * 480 * 0.5);
}

static int write_video_frame(AVFormatContext *oc, AVStream *st, int64_t frameCount)
{
    int ret = 0;
    AVCodecContext *c = st->codec;
    // AVCodecContext *c = avcodec_alloc_context3(st->codecpar);

    fill_yuv_image(&dst_picture, frameCount, c->width, c->height);

    AVPacket pkt = { 0 };
    int got_packet;
    av_init_packet(&pkt);

    /* encode the image */
    frame->pts = frameCount;
    ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);

    if (ret < 0) {
        av_log(NULL, AV_LOG_ERROR, "Error encoding video frame.\n");
    }
    else {
        if (got_packet) {
            pkt.stream_index = st->index;
            pkt.pts = av_rescale_q_rnd(pkt.pts, c->time_base, st->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
            ret = av_write_frame(oc, &pkt);

            if (ret < 0) {
                av_log(NULL, AV_LOG_ERROR, "Error while writing video frame.\n");
            }
        }
    }

    av_packet_unref(&pkt);
    return ret;
}

int main(int argc, char* argv[])
{
    printf("starting...\n");

    pthread_t v4l2_thread;
    pthread_create(&v4l2_thread, NULL, ThreadEntryPoint, NULL);
    pthread_detach(v4l2_thread);

    memset(output_128, 128, sizeof(output_128));

    const char *url = "rtsp://192.168.50.84:8554/stream";

    AVFormatContext *outContext;
    AVStream *video_st;
    AVCodec *video_codec;
    int ret = 0;
    int64_t frameCount = 0;

    av_log_set_level(AV_LOG_DEBUG);

    av_register_all();
    avformat_network_init();

    avformat_alloc_output_context2(&outContext, NULL, "rtsp", url);

    if (!outContext) {
        av_log(NULL, AV_LOG_FATAL, "Could not allocate an output context for '%s'.\n", url);
        return 0;
    }

    if (!outContext->oformat) {
        av_log(NULL, AV_LOG_FATAL, "Could not create the output format for '%s'.\n", url);
        return 0;
    }

    video_st = add_stream(outContext, &video_codec, VIDEO_CODEC_ID);

    /* Now that all the parameters are set, we can open the video codec and allocate the necessary encode buffers. */
    if (video_st) {
        av_log(NULL, AV_LOG_DEBUG, "Video stream codec %s.\n ", avcodec_get_name(video_st->codec->codec_id));

        ret = open_video(outContext, video_codec, video_st);
        if (ret < 0) {
            av_log(NULL, AV_LOG_FATAL, "Open video stream failed.\n");
            return 0;
        }
    }
    else {
        av_log(NULL, AV_LOG_FATAL, "Add video stream for the codec '%s' failed.\n", avcodec_get_name(VIDEO_CODEC_ID));
        return 0;
    }

    av_dump_format(outContext, 0, url, 1);

    ret = avformat_write_header(outContext, NULL);
    if (ret != 0) {
        av_log(NULL, AV_LOG_ERROR, "Failed to connect to RTSP server for '%s'.\n", url);
        return 0;
    }

    while (video_st) {
        frameCount++;

        ret = write_video_frame(outContext, video_st, frameCount);

        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Write video frame failed.\n", url);
            return 0;
        }
    }

    avcodec_close(video_st->codec);
    av_free(src_picture.data[0]);
    av_free(dst_picture.data[0]);
    av_frame_free(&frame);

    avformat_free_context(outContext);

    printf("finished.\n");

    return 0;
}

3.4 ./src/v4l2_stream.h

#include <stdio.h>  
#include <stdlib.h>  
#include <string.h>  
#include <assert.h>  
#include <fcntl.h>
#include <unistd.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/ioctl.h>
#include <asm/types.h>
#include <sys/mman.h>
#include <linux/videodev2.h>
#include <errno.h>
#include <stdint.h>
#include <stdbool.h>

#ifdef __cplusplus
extern "C" {
#endif

#define _Video_Device_SP_ "/dev/video9"
#define _V4L2_SP_PIX_FMT_ V4L2_PIX_FMT_YUYV
#define _V4L2_SP_BUF_TYPE_ V4L2_BUF_TYPE_VIDEO_CAPTURE
#define _V4L2_SP_BUF_REQ_COUNT_ 60

extern const int v4l2_sp_capture_width;
extern const int v4l2_sp_capture_height;

// device number for v4l2 single-planar
extern int v4l2_sp_fd;

// global memory for mmap v4l2 query buffer, 5 block
extern uint8_t* v4l2_sp_globalBuffer[_V4L2_SP_BUF_REQ_COUNT_];

// global memory buffer length
extern int v4l2_sp_globalBufferLength;

// to specify which memory is currently available
extern int v4l2_sp_globalBufferIndex;

// to specify globalBuffer length
extern int v4l2_sp_globalBufferLength;

// stop capture or not
extern bool v4l2_sp_captureLoopFlag;

int V4L2_SP_Streaming();

void* ThreadEntryPoint(void* arg);

#ifdef __cplusplus
}
#endif

3.5 ./src/v4l2_stream.c

#include "v4l2_stream.h"

const int v4l2_sp_capture_width = 640;
const int v4l2_sp_capture_height = 480;

int v4l2_sp_fd;
uint8_t* v4l2_sp_globalBuffer[_V4L2_SP_BUF_REQ_COUNT_];
int v4l2_sp_globalBufferLength;
int v4l2_sp_globalBufferIndex;

bool v4l2_sp_captureLoopFlag = true;

int V4L2_SP_Streaming()
{
    /* Step 0 : Pre-Define */
    int retval = 0;     // V4L2_SP_Streaming return value
    int ret = 0;        // ioctl check

#if Print_Debug_Info
    printf("V4L2_SP_Streaming(): Step 1 Open Device\n");
#endif
    /* Step 1 : Open Device */
    v4l2_sp_fd = open(_Video_Device_SP_, O_RDWR);
    if (v4l2_sp_fd == -1)
    {
        perror("V4L2_SP_Streaming() open device");
        retval = 1;
        goto out_return;
    }

#if Print_Debug_Info
    printf("V4L2_SP_Streaming(): Step 2 Setting Capture Format\n");
#endif
    /* Step 2: Setting Capture Format */
    struct v4l2_format format;
    format.type = _V4L2_SP_BUF_TYPE_;
    format.fmt.pix.width = v4l2_sp_capture_width;
    format.fmt.pix.height = v4l2_sp_capture_height;
    format.fmt.pix.pixelformat = _V4L2_SP_PIX_FMT_;

    ret = ioctl(v4l2_sp_fd, VIDIOC_S_FMT, &format);             // set FMT
    if (ret == -1)
    {
        perror("V4L2_SP_Streaming() setting format");
        retval = 2;
        goto error_close;
    }

#if Print_Debug_Info
    printf("V4L2_SP_Streaming(): Step 3 Checking FMT Setting\n");
#endif
    /* Step 3: Checking FMT Setting */
    ret = ioctl(v4l2_sp_fd, VIDIOC_G_FMT, &format);             // get FMT
    if (ret == -1)
    {
        perror("V4L2_SP_Streaming() checking format");
        retval = 3;
        goto error_close;
    }

    if (format.fmt.pix.pixelformat == _V4L2_SP_PIX_FMT_)        // set FMT == get FMT ?
    {
        printf("ioctl VIDIOC_S_FMT sucessful\n");
    }
    else
    {
        printf("ioctl VIDIOC_S_FMT failed\n");
        retval = 4;
        goto error_close;
    }

#if Print_Debug_Info
    printf("V4L2_SP_Streaming(): Step 4 Request Buffer\n");
#endif
    /* Step 4: Request Buffer */
    struct v4l2_requestbuffers reqbuf;
    reqbuf.count = _V4L2_SP_BUF_REQ_COUNT_;
    reqbuf.type = _V4L2_SP_BUF_TYPE_;
    reqbuf.memory = V4L2_MEMORY_MMAP;
    ret = ioctl(v4l2_sp_fd, VIDIOC_REQBUFS, &reqbuf);
    if (-1 == ret)
    {
        perror("V4L2_SP_Streaming() reqeuset buff");
        retval = 1;
        goto error_close;
    }

#if Print_Debug_Info
    printf("V4L2_SP_Streaming(): Step 5 mmap Buffer\n");
#endif
    /* Step 5: mmap Buffer */
    struct v4l2_buffer buff;
    buff.type = _V4L2_SP_BUF_TYPE_;
    buff.memory = V4L2_MEMORY_MMAP;
    for (int i = 0; i < _V4L2_SP_BUF_REQ_COUNT_; ++i)
    {
        // query buf
        buff.index = i;
        ret = ioctl(v4l2_sp_fd, VIDIOC_QUERYBUF, &buff);
        if (ret == -1)
        {
            perror("V4L2_SP_Streaming() query");
            retval = 2;
            goto error_close;
        }

        printf("buf[%d]: len = %d offset: %d\n", i, buff.length, buff.m.offset);

        // mmap
        v4l2_sp_globalBuffer[i] = mmap(NULL, buff.length, PROT_READ, MAP_SHARED, v4l2_sp_fd, buff.m.offset);
        v4l2_sp_globalBufferLength = buff.length;
        if (MAP_FAILED == v4l2_sp_globalBuffer[i])
        {
            perror("V4L2_SP_Streaming() mmap");
            retval = 3;
            goto error_munmap;
        }

        // queue
        ret = ioctl(v4l2_sp_fd, VIDIOC_QBUF, &buff);
        if (-1 == ret)
        {
            perror("V4L2_SP_Streaming() queue");
            retval = 4;
            goto error_munmap;
        }
    }

#if Print_Debug_Info
    printf("V4L2_SP_Streaming(): Step 6 Start Streaming\n");
#endif
    /* Step 6: Start Streaming */
    int on = _V4L2_SP_BUF_TYPE_;
    ret = ioctl(v4l2_sp_fd, VIDIOC_STREAMON, &on);
    if (-1 == ret)
    {
        perror("V4L2_SP_Streaming() VIDIOC_STREAMON");
        retval = 5;
        goto error_munmap;
    }

#if Print_Debug_Info
    printf("V4L2_SP_Streaming(): Step 7 Capture\n");
#endif
    /* Step 7: Capture */
    while (v4l2_sp_captureLoopFlag)
    {
        // deque
        ret = ioctl(v4l2_sp_fd, VIDIOC_DQBUF, &buff);
        if (ret != -1)
        {
            // updata global var
            v4l2_sp_globalBufferIndex = buff.index;
            v4l2_sp_globalBufferLength = buff.bytesused;

            // queue
            ret = ioctl(v4l2_sp_fd, VIDIOC_QBUF, &buff);
        }
    }

#if Print_Debug_Info
    printf("V4L2_SP_Streaming(): Exit\n");
#endif
error_munmap:
    for (int i = 0; i < _V4L2_SP_BUF_REQ_COUNT_; ++i)
    {
        if (v4l2_sp_globalBuffer[i] != NULL)
            munmap(v4l2_sp_globalBuffer[i], v4l2_sp_globalBufferLength);
    }

error_close:
    close(v4l2_sp_fd);

out_return:
    return retval;
}

void* ThreadEntryPoint(void* arg) {
    int result = V4L2_SP_Streaming();
    // 使用 pthread_exit() 返回 int 类型的结果
    pthread_exit((void*)result);
    return NULL;
}

3.6 注意事项

  其中fill_yuv_image()v4l2采集并没有上锁,需要注意是否存在IO冲突。

四、预览和优化

4.1 预览

# on win10
.\ffplay.exe rtsp://192.168.50.84:8554/stream

  对于可能存在的延迟累积我使用-flags low_delay -vf setpts=0有效,其他情况可以参考:stackoverflow

4.2 发送端问题

  发送端有如下两个问题:

  1. 上述3.6节中提到的互斥问题。
  2. CPU调度可能引起v4l2采集队列的卡顿问题。

  对于问题1可以通过修改应用程序代码实现,我使用60片缓存“避免”了该问题,如果需要完全避免,则需要互斥量和广播。

  对于问题2则使用taskset来指定进程的affinity mas

taskset -a E * 
taskset -a 1 demo

上述命令的demo指定到CPU0上运行,其余进程只能在CPU1~3上运行。