NDK OpenCV人脸定位

NDK系列之OpenCV人脸定位技术实战,本节主要是通过OpenCV C++库,实现识别人脸定位,并对识别到的人脸画面增加红框显示。

实现效果:

实现逻辑:

1.初始化CameraX,绑定图片分析器ImageAnalysis,监听相机数据;

2.加载OpenCV提供的人脸识别训练数据lbpcascade_frontalface到本地;

3.初始化人脸跟踪中转站FaceTracker,将人脸识别训练数据路径传递到Native层;

4.Native读取人脸识别训练数据,创建人脸检测跟踪器Ptr<DetectionBasedTracker> tracker;

5.通过中转站FaceTracker,调用Native层tracker开启人脸跟踪;

6.通过中转站FaceTracker,实例化Native层播放窗口ANativeWindow,关联surfaceView;

7.获取相机数据,传递Native层,人脸定位,绘制人脸框,渲染画面到屏幕。

本节主要内容:

1.OpenCV库导入;

2.Java层CameraX使用;

3.Native层识别人脸和画面渲染;

源码:

NdkHeadTest: NDK OpenCV人脸定位

一、OpenCV库导入

1)复制OpenCV源文件到cpp目录下,动态库文件复制到jniLibs目录下:

2)在CMakeLists文件中,导入源文件和库文件

二、Java层CameraX使用

1)初始化CameraX,绑定图片分析器ImageAnalysis,监听相机数据;

private void initCamera() {
	/**
	 *  CameraX
	 */
	cameraProviderFuture = ProcessCameraProvider.getInstance(this);
	cameraProviderFuture.addListener(() -> {
		try {
			ProcessCameraProvider cameraProvider = cameraProviderFuture.get();
			bindAnalysis(cameraProvider);
		} catch (Exception e) {
			e.printStackTrace();
		}

	}, ContextCompat.getMainExecutor(this));
}

private void bindAnalysis(ProcessCameraProvider cameraProvider) {
	//STRATEGY_KEEP_ONLY_LATEST :非阻塞模式,每次获得最新帧
	//STRATEGY_BLOCK_PRODUCER : 阻塞模式,处理不及时会导致降低帧率
	//图片分析:得到摄像头图像数据
	ImageAnalysis imageAnalysis =
			new ImageAnalysis.Builder()
					.setTargetResolution(new Size(640, 480))
					.setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
					.build();
	imageAnalysis.setAnalyzer(ContextCompat.getMainExecutor(this), this);
	cameraProvider.unbindAll();
	//绑定生命周期
	cameraProvider.bindToLifecycle(this,
			CameraSelector.DEFAULT_BACK_CAMERA, imageAnalysis);
}

2)相机数据会通过ImageAnalysis.Analyzer接口回调到analyze(@NonNull ImageProxy image)

@Override
public void analyze(@NonNull ImageProxy image) {
	byte[] bytes = Utils.getDataFromImage(image);
	// 定位人脸,并且显示摄像头的图像
	faceTracker.detect(bytes, image.getWidth(), image.getHeight(), image.getImageInfo().getRotationDegrees());
	image.close();
}

三、Native层识别人脸和画面渲染

1)加载OpenCV提供的人脸识别训练数据lbpcascade_frontalface到本地;

String path = Utils.copyAsset2Dir(this, "lbpcascade_frontalface.xml");

2)初始化人脸跟踪中转站FaceTracker,将人脸识别训练数据路径传递到Native层;

faceTracker = new FaceTracker(path);

public FaceTracker(String model) {
	mNativeObj = nativeCreateObject(model);
}

private static native long nativeCreateObject(String model);
	

 Native层接收到人脸识别训练数据路径,初始化FaceTracker.cpp

extern "C"
JNIEXPORT jlong JNICALL
Java_com_ndk_head_FaceTracker_nativeCreateObject(JNIEnv *env, jclass clazz, jstring model_) {
    // 转换人脸训练模型数据为char *
    const char *model = env->GetStringUTFChars(model_, 0);
    FaceTracker *tracker = new FaceTracker(model);
    env->ReleaseStringUTFChars(model_, model);
    // 返回tracker地址给Java层
    return (jlong) tracker;
}	

3)Native读取人脸识别训练数据,创建人脸检测跟踪器Ptr<DetectionBasedTracker> tracker;

FaceTracker::FaceTracker(const char *model) {
    // 初始化互斥锁
    pthread_mutex_init(&mutex, 0);
    // 创建检测器适配器
    Ptr<CascadeDetectorAdapter> mainDetector = makePtr<CascadeDetectorAdapter>(
            makePtr<CascadeClassifier>(model));
    Ptr<CascadeDetectorAdapter> trackingDetector = makePtr<CascadeDetectorAdapter>(
            makePtr<CascadeClassifier>(model));
    //跟踪器
    DetectionBasedTracker::Parameters DetectorParams;
    tracker = makePtr<DetectionBasedTracker>(DetectionBasedTracker(mainDetector, trackingDetector,
                                                                   DetectorParams));
}

4)通过中转站FaceTracker,调用Native层tracker开启人脸跟踪;

faceTracker.start();

public void start() {
	nativeStart(mNativeObj);
}
	
private static native void nativeStart(long thiz);	

Native层开启人脸跟踪 

extern "C"
JNIEXPORT void JNICALL
Java_com_ndk_head_FaceTracker_nativeStart(JNIEnv *env, jclass clazz, jlong thiz) {
    if (thiz != 0) {
        FaceTracker *tracker = reinterpret_cast<FaceTracker *>(thiz);
        tracker->tracker->run();
    }
}

5)通过中转站FaceTracker,实例化Native层播放窗口ANativeWindow,关联surfaceView;

@Override
public void surfaceChanged(@NonNull SurfaceHolder holder, int format, int width, int height) {
	if (faceTracker != null)
		faceTracker.setSurface(holder.getSurface());
}

public void setSurface(Surface surface) {
	nativeSetSurface(mNativeObj, surface);
}

private static native void nativeSetSurface(long thiz, Surface surface);

Native层实例化ANativeWindow,关联surfaceView 

extern "C"
JNIEXPORT void JNICALL
Java_com_ndk_head_FaceTracker_nativeSetSurface(JNIEnv *env, jclass clazz, jlong thiz,
                                               jobject surface) {
    if (thiz != 0) {
        FaceTracker *tracker = reinterpret_cast<FaceTracker *>(thiz);
        if (!surface) {
            tracker->setANativeWindow(0);
            return;
        }
        tracker->setANativeWindow(ANativeWindow_fromSurface(env, surface));
    }
}

6)获取相机数据,传递Native层,人脸定位,绘制人脸框,渲染画面到屏幕。

@Override
public void analyze(@NonNull ImageProxy image) {
	byte[] bytes = Utils.getDataFromImage(image);
	// 定位人脸,并且显示摄像头的图像
	faceTracker.detect(bytes, image.getWidth(), image.getHeight(), image.getImageInfo().getRotationDegrees());
	image.close();
}

public void detect(byte[] inputImage, int width, int height, int rotationDegrees) {
	nativeDetect(mNativeObj, inputImage, width, height, rotationDegrees);
}

private static native void nativeDetect(long thiz, byte[] inputImage, int width, int height, int rotationDegrees);

Native层识别人脸,绘制人脸红框 

extern "C"
JNIEXPORT void JNICALL
Java_com_ndk_head_FaceTracker_nativeDetect(JNIEnv *env, jclass clazz, jlong thiz,
                                           jbyteArray inputImage_, jint width, jint height,
                                           jint rotationDegrees) {
    if (thiz == 0) {
        return;
    }
    FaceTracker *tracker = reinterpret_cast<FaceTracker *>(thiz);
    // 将图片数据转化为jbyte
    jbyte *inputImage = env->GetByteArrayElements(inputImage_, 0);
    // 根据I420宽高,设置Mat(OpenCV支持的图片格式)的宽高,并赋值 src
    Mat src(height * 3 / 2, width, CV_8UC1, inputImage);
    // YUV转为RGBA
    cvtColor(src, src, CV_YUV2RGBA_I420);
    // 旋转图片
    if (rotationDegrees == 90) {
        rotate(src, src, ROTATE_90_CLOCKWISE);
    } else if (rotationDegrees == 270) {
        rotate(src, src, ROTATE_90_COUNTERCLOCKWISE);
    }
    Mat gray; // 存储降噪后的图片(灰度图)
    // 灰度化
    cvtColor(src, gray, CV_RGBA2GRAY);
    // 增强对比度
    equalizeHist(gray, gray);
    // 人脸定位
    tracker->tracker->process(gray);

    std::vector<Rect> faces; // 人脸集合
    tracker->tracker->getObjects(faces);

    for (Rect face:faces) {
        // 找到人脸,画红色矩形框
        rectangle(src, face, Scalar(255, 0, 0));
    }

    tracker->draw(src);
    env->ReleaseByteArrayElements(inputImage_, inputImage, 0);
}

将最终定位完成的图片绘制到屏幕

void FaceTracker::draw(Mat img) {
    pthread_mutex_lock(&mutex);
    do {
        if (!window) {
            break;
        }
        // 设置window格式
        ANativeWindow_setBuffersGeometry(window, img.cols, img.rows,
                                         WINDOW_FORMAT_RGBA_8888);
        // 把需要显示的数据设置给buffer
        ANativeWindow_Buffer buffer;
        if (ANativeWindow_lock(window, &buffer, 0)) {
            ANATIVEWINDOW_RELEASE(window);
            break;
        }
        // 把视频数据刷新到buffer中
        uint8_t *dstData = static_cast<uint8_t *>(buffer.bits);
        int dstlineSize = buffer.stride * 4;
        // 视频图形rgba数据
        uint8_t *srcData = img.data;
        int srclineSize = img.cols * 4;
        // 一行一行的拷贝
        for (int i = 0; i < buffer.height; ++i) {
            memcpy(dstData + i * dstlineSize, srcData + i * srclineSize, srclineSize);
        }
        // 提交渲染
        ANativeWindow_unlockAndPost(window);
    } while (0);
    pthread_mutex_unlock(&mutex);
}

至此,OpenCV人脸识别定位技术项目已完成;同时人眼识别定位等相关实现也是雷同的,都可以通过OpenCV实现,后续会通过OpenCV与OpenGL实现大眼萌特效。

源码:

NdkHeadTest: NDK OpenCV人脸定位

本图文内容来源于网友网络收集整理提供,作为学习参考使用,版权属于原作者。
THE END
分享
二维码
< <上一篇
下一篇>>