Commit 719611fb authored by 姜天宇's avatar 姜天宇

feat(v1.0.1): 增加3568视频流算法

parent ad40cf69
......@@ -8,8 +8,15 @@
/.idea/navEditor.xml
/.idea/assetWizardSettings.xml
.DS_Store
/build
/captures
.externalNativeBuild
.cxx
local.properties
/build
/core/build/
/common/build/
/camera/build/
/data-local/build/
/data-remote/build/
/module-demo/build/
/.idea/
......@@ -14,6 +14,10 @@ android {
versionCode 1000100
versionName "1.0.1"
ndk {
abiFilters 'armeabi-v7a'
}
testInstrumentationRunner "androidx.test.runner.AndroidJUnitRunner"
javaCompileOptions {
......
......@@ -29,6 +29,7 @@
android:supportsRtl="true"
android:theme="@style/Theme.CateringDetect"
android:usesCleartextTraffic="true"
android:hardwareAccelerated="true"
tools:targetApi="31">
<activity
android:name=".ui.SplashActivity"
......
......@@ -3,13 +3,20 @@ package com.wmdigit.camera;
import android.annotation.SuppressLint;
import android.content.Context;
import android.graphics.Bitmap;
import android.graphics.SurfaceTexture;
import android.hardware.camera2.CaptureRequest;
import android.opengl.GLES11Ext;
import android.util.Range;
import android.util.Size;
import androidx.camera.camera2.interop.Camera2Interop;
import androidx.camera.core.Camera;
import androidx.camera.core.CameraInfo;
import androidx.camera.core.CameraSelector;
import androidx.camera.core.ImageAnalysis;
import androidx.camera.core.ImageProxy;
import androidx.camera.core.Preview;
import androidx.camera.core.UseCaseGroup;
import androidx.camera.lifecycle.ProcessCameraProvider;
import androidx.core.content.ContextCompat;
import androidx.lifecycle.LifecycleOwner;
......@@ -50,7 +57,7 @@ public class CameraxController {
/**
* 图片分析线程
*/
private final ExecutorService executors = Executors.newFixedThreadPool(1);
private final ExecutorService executors = Executors.newSingleThreadExecutor();
/**
* YUV转RGB转化器
*/
......@@ -60,6 +67,8 @@ public class CameraxController {
*/
private OnImageAnalyzeListener onImageAnalyzeListener;
// private SurfaceTexture mColorSurfaceTexture = new SurfaceTexture(GLES11Ext.GL_TEXTURE_EXTERNAL_OES);
public static CameraxController getInstance(Context context) {
if (instance == null){
synchronized (CameraxController.class){
......@@ -124,6 +133,7 @@ public class CameraxController {
}, ContextCompat.getMainExecutor(context));
}
int count = 0;
/**
* 创建图片分析器
* @return
......@@ -140,11 +150,14 @@ public class CameraxController {
imageAnalysis.setAnalyzer(executors, imageProxy->{
long startTime = System.currentTimeMillis();
// 处理图片
analyzeImage(imageProxy);
count ++;
if (count % 5 == 0) {
analyzeImage(imageProxy);
}
imageProxy.close();
// 休眠
long costTime = System.currentTimeMillis() - startTime;
System.out.println("处理时间:" + costTime);
/*long costTime = System.currentTimeMillis() - startTime;
// System.out.println("处理时间:" + costTime);
long leftTime = INTERVAL_FRAMES_ANALYZE - costTime;
if (leftTime <= INTERVAL_FRAMES_ANALYZE && leftTime >= 0){
try {
......@@ -152,7 +165,7 @@ public class CameraxController {
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}*/
});
return imageAnalysis;
......
package com.wmdigit.common.utils
import android.content.Context
import android.graphics.Bitmap
import android.graphics.ImageFormat
import android.graphics.Rect
import android.media.Image
import android.renderscript.Allocation
import android.renderscript.Element
import android.renderscript.RenderScript
import android.renderscript.ScriptIntrinsicYuvToRGB
import android.renderscript.Type
import java.nio.ByteBuffer
/**
* Helper class used to efficiently convert a [Media.Image] object from
* [ImageFormat.YUV_420_888] format to an RGB [Bitmap] object.
*
* The [yuvToRgb] method is able to achieve the same FPS as the CameraX image
* analysis use case on a Pixel 3 XL device at the default analyzer resolution,
* which is 30 FPS with 640x480.
*
* NOTE: This has been tested in a limited number of devices and is not
* considered production-ready code. It was created for illustration purposes,
* since this is not an efficient camera pipeline due to the multiple copies
* required to convert each frame.
*/
class YuvToRgbConverter(context: Context) {
private val rs = RenderScript.create(context)
private val scriptYuvToRgb = ScriptIntrinsicYuvToRGB.create(rs, Element.U8_4(rs))
private var pixelCount: Int = -1
private lateinit var yuvBuffer: ByteBuffer
private lateinit var inputAllocation: Allocation
private lateinit var outputAllocation: Allocation
@Synchronized
fun yuvToRgb(image: Image, output: Bitmap) {
// Ensure that the intermediate output byte buffer is allocated
if (!::yuvBuffer.isInitialized) {
pixelCount = image.cropRect.width() * image.cropRect.height()
// Bits per pixel is an average for the whole image, so it's useful to compute the size
// of the full buffer but should not be used to determine pixel offsets
val pixelSizeBits = ImageFormat.getBitsPerPixel(ImageFormat.YUV_420_888)
yuvBuffer = ByteBuffer.allocateDirect(pixelCount * pixelSizeBits / 8)
}
// Rewind the buffer; no need to clear it since it will be filled
yuvBuffer.rewind()
// Get the YUV data in byte array form using NV21 format
imageToByteBuffer(image, yuvBuffer.array())
// Ensure that the RenderScript inputs and outputs are allocated
if (!::inputAllocation.isInitialized) {
// Explicitly create an element with type NV21, since that's the pixel format we use
val elemType = Type.Builder(rs, Element.YUV(rs)).setYuvFormat(ImageFormat.NV21).create()
inputAllocation = Allocation.createSized(rs, elemType.element, yuvBuffer.array().size)
}
if (!::outputAllocation.isInitialized) {
outputAllocation = Allocation.createFromBitmap(rs, output)
}
// Convert NV21 format YUV to RGB
inputAllocation.copyFrom(yuvBuffer.array())
scriptYuvToRgb.setInput(inputAllocation)
scriptYuvToRgb.forEach(outputAllocation)
outputAllocation.copyTo(output)
}
private fun imageToByteBuffer(image: Image, outputBuffer: ByteArray) {
assert(image.format == ImageFormat.YUV_420_888)
val imageCrop = image.cropRect
val imagePlanes = image.planes
imagePlanes.forEachIndexed { planeIndex, plane ->
// How many values are read in input for each output value written
// Only the Y plane has a value for every pixel, U and V have half the resolution i.e.
//
// Y Plane U Plane V Plane
// =============== ======= =======
// Y Y Y Y Y Y Y Y U U U U V V V V
// Y Y Y Y Y Y Y Y U U U U V V V V
// Y Y Y Y Y Y Y Y U U U U V V V V
// Y Y Y Y Y Y Y Y U U U U V V V V
// Y Y Y Y Y Y Y Y
// Y Y Y Y Y Y Y Y
// Y Y Y Y Y Y Y Y
val outputStride: Int
// The index in the output buffer the next value will be written at
// For Y it's zero, for U and V we start at the end of Y and interleave them i.e.
//
// First chunk Second chunk
// =============== ===============
// Y Y Y Y Y Y Y Y V U V U V U V U
// Y Y Y Y Y Y Y Y V U V U V U V U
// Y Y Y Y Y Y Y Y V U V U V U V U
// Y Y Y Y Y Y Y Y V U V U V U V U
// Y Y Y Y Y Y Y Y
// Y Y Y Y Y Y Y Y
// Y Y Y Y Y Y Y Y
var outputOffset: Int
when (planeIndex) {
0 -> {
outputStride = 1
outputOffset = 0
}
1 -> {
outputStride = 2
// For NV21 format, U is in odd-numbered indices
outputOffset = pixelCount + 1
}
2 -> {
outputStride = 2
// For NV21 format, V is in even-numbered indices
outputOffset = pixelCount
}
else -> {
// Image contains more than 3 planes, something strange is going on
return@forEachIndexed
}
}
val planeBuffer = plane.buffer
val rowStride = plane.rowStride
val pixelStride = plane.pixelStride
// We have to divide the width and height by two if it's not the Y plane
val planeCrop = if (planeIndex == 0) {
imageCrop
} else {
Rect(
imageCrop.left / 2,
imageCrop.top / 2,
imageCrop.right / 2,
imageCrop.bottom / 2
)
}
val planeWidth = planeCrop.width()
val planeHeight = planeCrop.height()
// Intermediate buffer used to store the bytes of each row
val rowBuffer = ByteArray(plane.rowStride)
// Size of each row in bytes
val rowLength = if (pixelStride == 1 && outputStride == 1) {
planeWidth
} else {
// Take into account that the stride may include data from pixels other than this
// particular plane and row, and that could be between pixels and not after every
// pixel:
//
// |---- Pixel stride ----| Row ends here --> |
// | Pixel 1 | Other Data | Pixel 2 | Other Data | ... | Pixel N |
//
// We need to get (N-1) * (pixel stride bytes) per row + 1 byte for the last pixel
(planeWidth - 1) * pixelStride + 1
}
for (row in 0 until planeHeight) {
// Move buffer position to the beginning of this row
planeBuffer.position(
(row + planeCrop.top) * rowStride + planeCrop.left * pixelStride)
if (pixelStride == 1 && outputStride == 1) {
// When there is a single stride value for pixel and output, we can just copy
// the entire row in a single step
planeBuffer.get(outputBuffer, outputOffset, rowLength)
outputOffset += rowLength
} else {
// When either pixel or output have a stride > 1 we must copy pixel by pixel
planeBuffer.get(rowBuffer, 0, rowLength)
for (col in 0 until planeWidth) {
outputBuffer[outputOffset] = rowBuffer[col * pixelStride]
outputOffset += outputStride
}
}
}
}
}
}
\ No newline at end of file
......@@ -10,7 +10,7 @@ android {
minSdk 24
ndk {
abiFilters 'armeabi-v7a', "arm64-v8a"
abiFilters 'armeabi-v7a'
// abiFilters 'armeabi-v7a','arm64-v8a','x86','x86_64'
}
......
......@@ -54,6 +54,39 @@ set_target_properties(
# ${CMAKE_SOURCE_DIR}/../jniLibs/${ANDROID_ABI}/libdetfea.a
#)
#视频流
add_library(videopipe STATIC IMPORTED)
set_target_properties(
videopipe
PROPERTIES IMPORTED_LOCATION
${CMAKE_SOURCE_DIR}/../jniLibs/${ANDROID_ABI}/libvideopipe.a
)
add_library(io3_lat3d_c3d STATIC IMPORTED)
set_target_properties(
io3_lat3d_c3d
PROPERTIES IMPORTED_LOCATION
${CMAKE_SOURCE_DIR}/../jniLibs/${ANDROID_ABI}/libio3_lat3d_c3d.a
)
add_library(io3_lat3d_backbone STATIC IMPORTED)
set_target_properties(
io3_lat3d_backbone
PROPERTIES IMPORTED_LOCATION
${CMAKE_SOURCE_DIR}/../jniLibs/${ANDROID_ABI}/libio3_lat3d_backbone.a
)
# rknn库
add_library(rknnapi STATIC IMPORTED)
set_target_properties(rknnapi PROPERTIES
IMPORTED_LOCATION
${CMAKE_SOURCE_DIR}/../jniLibs/${ANDROID_ABI}/librknn_api.so)
add_library(rknnrt STATIC IMPORTED)
set_target_properties(rknnrt PROPERTIES
IMPORTED_LOCATION
${CMAKE_SOURCE_DIR}/../jniLibs/${ANDROID_ABI}/librknnrt.so)
# Log库
find_library( # Sets the name of the path variable.
log-lib
......@@ -79,6 +112,28 @@ target_link_libraries(
${log-lib}
)
# 视频流
add_library(
video_pipe
SHARED
videopipe_rk3568.cpp
)
target_link_libraries(
video_pipe
android
wmai
io3_lat3d_backbone
io3_lat3d_c3d
opencv
jnigraphics
c++_shared
videopipe
rknnrt
rknnapi
image_tools
${log-lib})
# 餐饮目标检测
add_library(
......
......@@ -37,11 +37,11 @@ cv::Mat convert_bitmap_to_mat(JNIEnv *env, jobject bitmap){
// 根据Bitmap的图片格式决定type
int type;
if (info.format == ANDROID_BITMAP_FORMAT_RGBA_8888){
LOGD("CV_8UC4");
// LOGD("CV_8UC4");
type = CV_8UC4;
}
else if (info.format == ANDROID_BITMAP_FORMAT_RGB_565){
LOGD("CV_8UC2");
// LOGD("CV_8UC2");
type = CV_8UC2;
}
else{
......
This diff is collapsed.
#ifndef _VIDEOPIPE_H_
#define _VIDEOPIPE_H_
#include <vector>
#include <map>
#include <string>
#include <opencv2/core.hpp>
namespace libvideopipe
{
#define WM_AI_VIDEO_PIPE_OK 0x00000000 // OK
#define WM_AI_VIDEO_FOOD_PIPE_MARK_EMPTY 0x10020001 // 输入temp为空
#define WM_AI_VIDEO_FOOD_PIPE_INPUT_FRAME_EMPTY 0x10020002 // 输入input数据为空
#define WM_AI_VIDEO_FOOD_PIPE_INPUT_FRAME_SIZE_SMALLER_THAN_MARK 0x10020003 // 输入input数据尺寸比mark小
#define WM_AI_VIDEO_FOOD_PIPE_ORB_TEMP_POINTS_ZERO 0x10020004
#define WM_AI_VIDEO_MOTION_PIPE_INPUT_FRAME_EMPTY 0x10020101
#define WM_AI_VIDEO_MOTION_PIPE_INIT_PARAM_ERROR 0x10020102
#define WM_AI_VIDEO_MOTION_PIPE_HANDLE_EMPTY 0x10020103
#define WM_AI_VIDEO_MOTION_PIPE_MARK_EMPTY 0x10020104
#define WM_AI_VIDEO_DEPTH_PIPE_INPUT_FRAME_EMPTY 0x10020201
#define WM_AI_VIDEO_DEPTH_PIPE_INPUT_PARAM_ERROR 0x10020202
// 视频流回调函数申明
typedef void (*VideoPipeCallback)(const char* data, int len, void* user);
struct INIT_INFO
{
bool enable_ai = false; // 是否启动ai匹配判断 FOOD
bool gpu_enable = false; // 是否开启gpu FOOD Motion
bool lat3d_enable = true; // 是否开启lat3d算法 Motion
int match_type = 0; // 0: 边缘 1:orb
unsigned char * lat3d_backbone_model_data = NULL;
int lat3d_backbone_model_len;
unsigned char * lat3d_c3d_model_data = NULL;
int lat3d_c3d_model_len;
};
//*****************************************
//food
//*****************************************
struct FOOD_VIDEOPIPE_INPUT
{
unsigned long int pts; // 时间戳
cv::Mat img; // 输入的该帧图片(roi之后)
bool debug_mode; // 是否debug模式
};
enum FOOD_REALTIME_SERVICE_PLATE_STATUS_TYPES
{
STATIC_EMPTY = 0, // 静止的空盘
MOVING_EMPTY = 1, // 运动的空盘
STATIC_FULL = 2, // 静止的餐盘
MOVING_FULL = 3, // 运动的餐盘
MOVING_2_STATIC_EMPTY = 4, // 运动->静止空
STATIC_EMPTY_2_MOVING = 5, // 静止空->运动
STATIC_FULL_2_MOVING = 6, // 静止满->运动
MOVING_2_STATIC_FULL = 7, // 运动->静止满
};
struct DEBUG_INFO //此部分为调试信息
{
cv::Mat back, fore, shadow;
float movement_area;
float match;
};
struct ALG_PARAM
{
cv::Mat mark_mat;
float mark_threshold;
float mark_ai_threshold;
float move_lr_threshold;
float move_area_threshold;
float shadow_threshold;
float var_threshold;
};
enum FOOD_STATIC_FULL_TYPES // 输出的类型
{
NORMAL = 0, // 正常输出
CHANGED = 1, // 经过更改后的输出
};
struct FOOD_VIDEOPIPE_OUTPUT
{
int index; // 图像索引,目前该字段无效
bool has_best_obj; // 是否具有目标,如果有则需要后续的AI识别
unsigned long int output_pts; // 时间戳(与输入时的时间戳一致)
cv::Mat best_img; // 最佳的用于识别的图像帧
bool debug_mode; // 是否debug模式
FOOD_STATIC_FULL_TYPES output_types; // 用于判断是否二次更改
FOOD_REALTIME_SERVICE_PLATE_STATUS_TYPES service_plate_status; // 输出状态类型
DEBUG_INFO debug_info; // debug信息
int error_code; // 错误码
};
class FoodVideoPipe
{
public:
FoodVideoPipe(INIT_INFO init_info);
~FoodVideoPipe();
public:
// 开始追踪判断目标
int Trace(FOOD_VIDEOPIPE_INPUT input);
// 开始Lat3D追踪判断目标
int TraceLat3D(FOOD_VIDEOPIPE_INPUT input);
// 设置回调函数
void SetResultCallback(VideoPipeCallback cb, void* user);
// 目前该函数无效
void SetROI(cv::Rect roi);
// 设置标志物的图像
void SetMarkMat(cv::Mat temp);
// 返回标志物的图像
cv::Mat GetMarkMat();
// 设置标志物的阈值,这个阈值范围是0~1(建议0.95~0.995调调看);如果值设越大,越容易“把空盘当餐盘”回调出来。
void SetMarkThreshold(float threshold);
float GetMarkThreshold();
// 设置标志物的AI阈值,这个阈值范围是0~1(建议0.90~0.93调调看);要想这个值有效,则必须enable_ai=true。
void SetAIMarkThreshold(float threshold);
float GetAIMarkThreshold();
// 设置运动程度学习率的阈值,这个阈值范围是0.001~0.3;值设得越小,静止状态越来得慢
void SetMovementThreshold(float threshold);
float GetMovementThreshold();
// 阴影检测开关
void SetShadowEnable(bool enable);
bool GetShadowEnable();
// 阴影检测的阈值,设得越小,越容易把前景判断为阴影 [0.005~1] 默认是:0.05
void SetShadowThreshold(float threshold);
float GetShadowThreshold();
// 前景和背景对比度阈值,设得越小,越容易前景检测灵敏 [1~1000] 默认是:500
void SetVarThreshold(float threshold);
float GetVarThreshold();
// 设置运动面积的阈值:0.01~0.5值越小,运动检测触发得灵敏些,稳定性差些
void SetAreaThreshold(float threshold);
float GetAreaThreshold();
// 设置模板的缩放系数,一般根据一次trace的耗时长短来定
void SetTempScale(float scale);
float GetTempScale();
// 获取匹配的结果
float GetMatchResult();
private:
void trace_out(FOOD_VIDEOPIPE_INPUT input);
bool movement_detect(FOOD_VIDEOPIPE_INPUT input);
bool mark_detect(FOOD_VIDEOPIPE_INPUT input);
private:
// roi
cv::Rect roi_;
// call back
VideoPipeCallback cb_function_;
void* cb_param_;
// video track handle
void* video_handle_;
};
//*****************************************
//motion
//*****************************************
struct MOTION_VIDEOPIPE_INPUT
{
unsigned long int pts; // 时间戳
cv::Mat img; // 输入的该帧图片(roi之后)
bool debug_mode; // 是否debug模式;
int debug_dump_img_type; // -1:all 0: 只存empty 1: 只存full 2:只存in 3: 只存out 4: 只存empty->full 5: 只存full->empty
};
// 'empty': 0, 'full': 1,'in': 2, 'out': 3
enum MOTION_STATUS_TYPES
{
MOTION_STATIC_EMPTY = 0, // 静止的空
MOTION_STATIC_FULL = 1, // 静止的满
MOTION_IN = 2, // 进入
MOTION_OUT = 3, // 离开
};
struct MOTION_VIDEOPIPE_OUTPUT
{
int index; // 图像索引,目前该字段无效
unsigned long int output_pts; // 时间戳(与输入时的时间戳一致)
cv::Mat best_img; // 最佳的用于识别的图像帧
MOTION_STATUS_TYPES status; // 状态
bool debug_mode; // 是否debug模式
int error_code; // 错误码
};
class MotionVideoPipe
{
public:
MotionVideoPipe();
~MotionVideoPipe();
public:
int Init(INIT_INFO init_info);
void SetROI(cv::Rect roi);
// 喂数据
int FeedFrame(MOTION_VIDEOPIPE_INPUT input);
// 设置回调函数
void SetResultCallback(VideoPipeCallback cb, void* user);
// 设置标志物的图像
int SetMarkMat(cv::Mat temp);
// 返回标志物的图像
cv::Mat GetMarkMat();
private:
void trace_out(MOTION_VIDEOPIPE_INPUT input);
private:
// roi
cv::Rect roi_;
// call back
VideoPipeCallback cb_function_;
void* cb_param_;
//
void* video_handle_;
};
//*****************************************
//depth
//*****************************************
enum DEPTH_VIDEOPIPE_STATUS // 输出的类型
{
DEPTH_IN = 0, // 进入
DEPTH_OUT = 1, // 出去
};
struct DEPTH_VIDEOPIPE_INPUT
{
unsigned long int pts; // 时间戳
cv::Mat img; // 输入的该帧图片(roi之后)
bool debug_mode; // 是否debug模式
};
struct DEPTH_VIDEOPIPE_OUTPUT
{
int index; // 图像索引,目前该字段无效
unsigned long int output_pts; // 当前时刻帧的时间戳(与输入时的时间戳一致)
DEPTH_VIDEOPIPE_STATUS status; // 状态
cv::Mat best_img; // 最佳的图像帧
float best_area; // 最佳的面积
unsigned long int best_pts; // 最佳的图像帧的时间戳
bool debug_mode; // 是否debug模式
int error_code; // 错误码
};
class DepthVideoPipe
{
public:
DepthVideoPipe();
~DepthVideoPipe();
public:
void SetROI(cv::Rect roi);
// 喂数据
int FeedFrame(DEPTH_VIDEOPIPE_INPUT input);
// 设置有效深度信息检测范围,这个阈值范围;
int SetVaildDepthRange(float* valid_depth_range);
int GetVaildDepthRange(float* valid_depth_range);
// 设置回调函数
void SetResultCallback(VideoPipeCallback cb, void* user);
// 设置运动程度的阈值,这个阈值范围是0~1(建议0.1~0.2调调看);
// 值设得越小,检测运动越灵敏
void SetAreaThreshold(float threshold);
float GetAreaThreshold();
// 设置运动程度的阈值,这个阈值范围是0~1(建议0.1~0.2调调看);
// 值设得越小,检测进入越灵敏
void SetInThreshold(float threshold);
float GetInThreshold();
// 设置运动程度的阈值,这个阈值范围是0~1(建议0.1~0.2调调看);
// 值设得越大,检测出去越灵敏
void SetOutThreshold(float threshold);
float GetOutThreshold();
private:
void trace_out(DEPTH_VIDEOPIPE_STATUS status,DEPTH_VIDEOPIPE_INPUT input);
bool detect_obj(DEPTH_VIDEOPIPE_INPUT input);
private:
// roi
cv::Rect roi_;
// call back
VideoPipeCallback cb_function_;
void* cb_param_;
//
void* video_handle_;
};
}
#endif
//
// Created by dizi on 2024/8/2.
//
#ifndef CATERINGDETECT_VIDEOPIPE_RK3568_H
#define CATERINGDETECT_VIDEOPIPE_RK3568_H
#include <jni.h>
#include "image_tools.h"
#include "videopipe.h"
libvideopipe::MotionVideoPipe* motion;
void* user;
libvideopipe::INIT_INFO init_info;
JavaVM *global_jvm;
// VideoPipeManager实例
jclass java_class_video_pipe_manager;
long long get_timestamp();
void feed_frame_callback(int state);
void trace_result_callback(const char* data, int len, void* user);
unsigned char* read_file_byte(JNIEnv *env, jstring file_path, int& size);
#endif //CATERINGDETECT_VIDEOPIPE_RK3568_H
//
// Created by dizi on 2024/8/2.
//
#include "videopipe_rk3568.h"
JNIEXPORT jint JNICALL JNI_OnLoad(JavaVM *vm, void *reserved) {
global_jvm = vm;
JNIEnv *env;
if (vm->GetEnv(reinterpret_cast<void**>(&env), JNI_VERSION_1_6) != JNI_OK) {
return JNI_ERR;
}
java_class_video_pipe_manager = (jclass)env->NewGlobalRef(env->FindClass("com/wmdigit/core/videopipe/VideoPipeManager"));
return JNI_VERSION_1_6;
}
/**
* 算法初始化
*/
extern "C"
JNIEXPORT jint JNICALL
Java_com_wmdigit_core_videopipe_VideoPipeManager_init(JNIEnv *env, jobject thiz,
jstring modelBackBonePath,
jstring modelC3dPath) {
int size_back_bone, size_c3d;
unsigned char* data_back_bone = read_file_byte(env, modelBackBonePath, size_back_bone);
unsigned char* data_c3d = read_file_byte(env, modelC3dPath, size_c3d);
init_info.lat3d_backbone_model_data = data_back_bone;
init_info.lat3d_backbone_model_len = size_back_bone;
init_info.lat3d_c3d_model_data = data_c3d;
init_info.lat3d_c3d_model_len = size_c3d;
init_info.lat3d_enable = true;
init_info.gpu_enable = true;
motion = new libvideopipe::MotionVideoPipe();
// 初始化
int ret_init = motion->Init(init_info);
// 设置回调
if (ret_init == 0){
libvideopipe::VideoPipeCallback callback = trace_result_callback;
motion->SetResultCallback(callback, &user);
}
return ret_init;
}
/**
* 配置空盘图
*/
extern "C"
JNIEXPORT jint JNICALL
Java_com_wmdigit_core_videopipe_VideoPipeManager_setMarkMat(JNIEnv *env, jobject thiz,
jobject bitmap) {
// Bitmap转mat
cv::Mat mat = convert_bitmap_to_mat(env, bitmap);
// RGBA转RGB
cv::Mat mat_rgb = cv::Mat();
cv::cvtColor(mat, mat_rgb, CV_RGBA2RGB);
// 设置空盘图
return motion->SetMarkMat(mat_rgb.clone());
}
extern "C"
JNIEXPORT void JNICALL
Java_com_wmdigit_core_videopipe_VideoPipeManager_feedFrame(JNIEnv *env, jobject thiz,
jobject bitmap) {
// Bitmap转mat
cv::Mat mat = convert_bitmap_to_mat(env, bitmap);
// rgba转rgb
cv::Mat mat_rgb = cv::Mat();
cv::cvtColor(mat, mat_rgb, CV_RGBA2RGB);
// 组装入参
libvideopipe::MOTION_VIDEOPIPE_INPUT input;
input.img = mat_rgb.clone();
input.pts = get_timestamp();
input.debug_mode = false;
motion->FeedFrame(input);
}
/**
* FeedFrame的回调
* @param data
* @param len
* @param user
*/
void trace_result_callback(const char* data, int len, void* user)
{
auto* output = (libvideopipe::MOTION_VIDEOPIPE_OUTPUT*) data;
if (output->status == libvideopipe::MOTION_STATUS_TYPES::MOTION_IN)
{
LOGD("进入");
feed_frame_callback(2);
}
else if (output->status == libvideopipe::MOTION_STATUS_TYPES::MOTION_OUT)
{
LOGD("离开");
feed_frame_callback(3);
}
else if (output->status == libvideopipe::MOTION_STATUS_TYPES::MOTION_STATIC_EMPTY)
{
}
else if (output->status == libvideopipe::MOTION_STATUS_TYPES::MOTION_STATIC_FULL)
{
}
else{
}
}
void feed_frame_callback(int state){
JNIEnv *env;
global_jvm->GetEnv((void**)&env, JNI_VERSION_1_6);
jmethodID j_method_instance = env->GetStaticMethodID(
java_class_video_pipe_manager,
"getInstance",
"()Lcom/wmdigit/core/videopipe/VideoPipeManager;"
);
jobject instance=env->CallStaticObjectMethod(java_class_video_pipe_manager, j_method_instance);
jmethodID j_method_in_out;
if (state == 2){
j_method_in_out = env->GetMethodID(java_class_video_pipe_manager, "objectIn", "()V");
}
else{
j_method_in_out = env->GetMethodID(java_class_video_pipe_manager, "objectOut", "()V");
}
env->CallVoidMethod(instance, j_method_in_out);
env->DeleteLocalRef(instance);
}
/**
* 读本地文件,用于加载模型
* @param env
* @param file_path
* @return
*/
unsigned char* read_file_byte(JNIEnv *env, jstring file_path, int& size){
const char *jpath = env->GetStringUTFChars(file_path, nullptr);
FILE *pFile;
pFile = fopen(jpath, "rw");
if (pFile == nullptr) {
env->ReleaseStringUTFChars(file_path, jpath);
return nullptr;
}
unsigned char *pBuf; //定义文件指针
fseek(pFile, 0, SEEK_END); //把指针移动到文件的结尾 ,获取文件长度
size = (int) ftell(pFile); //获取文件长度
LOGD("size:%d", size);
pBuf = new unsigned char[size + 1]; //定义数组长度
rewind(pFile); //把指针移动到文件开头 因为我们一开始把指针移动到结尾,如果不移动回来 会出错
fread(pBuf, 1, (size_t) size, pFile); //读文件
pBuf[size] = 0; //把读到的文件最后一位 写为0 要不然系统会一直寻找到0后才结束
fclose(pFile); // 关闭文件
env->ReleaseStringUTFChars(file_path, jpath);
return pBuf;
}
/**
* 获取时间戳
* @return
*/
long long get_timestamp()
{
long long tmp;
struct timeval tv;
gettimeofday(&tv, nullptr);
tmp = tv.tv_sec;
tmp = tmp * 1000;
tmp = tmp + (tv.tv_usec / 1000);
return tmp;
}
\ No newline at end of file
......@@ -3,6 +3,7 @@ package com.wmdigit.core;
import android.content.Context;
import com.wmdigit.core.catering.TargetDetectionManager;
import com.wmdigit.core.videopipe.VideoPipeManager;
/**
* Core模块初始化类
......@@ -26,7 +27,10 @@ public class CoreModule {
* 初始化算法库
*/
private static void initCore() {
// 初始化目标检测
TargetDetectionManager.getInstance().initTargetDetection();
// 初始化视频流
VideoPipeManager.getInstance().initVideoPipe();
}
public static Context getAppContext() {
......
package com.wmdigit.core.videopipe;
import android.graphics.Bitmap;
import com.elvishew.xlog.XLog;
import com.wmdigit.common.utils.FileUtils;
import com.wmdigit.core.CoreModule;
import java.io.File;
import java.io.IOException;
/**
* 视频流管理类
* @author dizi
*/
public class VideoPipeManager {
static {
System.loadLibrary("video_pipe");
}
/**
* 初始化模型
* @param modelBackBonePath
* @param modelC3dPath
* @return
*/
private native int init(String modelBackBonePath, String modelC3dPath);
/**
* 设置空盘图
* @param bitmap
*/
private native int setMarkMat(Bitmap bitmap);
/**
* 喂图片帧
* @param bitmap
*/
private native void feedFrame(Bitmap bitmap);
private static VideoPipeManager instance;
public static VideoPipeManager getInstance() {
if (instance == null){
synchronized (VideoPipeManager.class){
if (instance == null){
instance = new VideoPipeManager();
}
}
}
return instance;
}
/**
* 初始化视频流算法
*/
public void initVideoPipe(){
// 拷贝模型文件
String modelRootPath = CoreModule.getAppContext().getExternalFilesDir("model").getAbsolutePath();
String modelBackBonePath = modelRootPath + "/lat3d_backbone.rknn";
String modelC3dPath = modelRootPath + "/lat3d_c3d.rknn";
File fileBackBone = new File(modelBackBonePath);
File fileC3d = new File(modelC3dPath);
// 检查本地是否有模型文件
if (!fileBackBone.exists() || !fileC3d.exists()){
// 将assert模型文件输出到disk
try {
FileUtils.copyAssertsFolderToDisk(CoreModule.getAppContext(), "model", modelRootPath);
} catch (IOException e) {
XLog.e(e.toString());
}
}
// 初始化模型
int ret = init(modelBackBonePath, modelC3dPath);
XLog.i("视频流初始化结果:" + ret);
}
/**
* 设置空盘
* @param bitmap
*/
public void setEmptyImage(Bitmap bitmap){
int ret = setMarkMat(bitmap);
XLog.i("设置空盘结果:" + ret);
}
/**
* 喂图片帧
* @param bitmap
*/
public void processImage(Bitmap bitmap){
feedFrame(bitmap);
}
public void objectIn(){
}
public void objectOut(){
}
}
v1.0.0 2024/07/18 1.演示Demo
v1.0.1 2024/08/01 1.增加camerax
2.todo 增加3568视频流算法
\ No newline at end of file
2.增加3568视频流算法
\ No newline at end of file
......@@ -14,6 +14,7 @@ import com.wmdigit.cateringdetect.demo.ui.adapter.DemoImagesAdapter;
import com.wmdigit.cateringdetect.demo.ui.adapter.ShoppingCartAdapter;
import com.wmdigit.cateringdetect.demo.ui.viewmodel.DemoHomeViewModel;
import com.wmdigit.common.base.mvvm.BaseMvvmFragment;
import com.wmdigit.core.videopipe.VideoPipeManager;
/**
......@@ -88,7 +89,19 @@ public class DemoHomeFragment extends BaseMvvmFragment<DemoHomeViewModel, Fragme
.apply();
}
int count = 0;
private void processBitmap(Bitmap bitmap){
if (count < 10) {
count++;
}
if (count == 10){
// 设置背景
VideoPipeManager.getInstance().setEmptyImage(bitmap);
count ++;
}
else{
VideoPipeManager.getInstance().processImage(bitmap);
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment