完成so库的代码

This commit is contained in:
guanyuankai 2025-10-31 10:14:51 +08:00
parent dec2f2f6e4
commit 9f87f90af9
7 changed files with 1055 additions and 157 deletions

43
.vscode/settings.json vendored
View File

@ -14,5 +14,46 @@
"ANDROID_PLATFORM": "android-24"
},
"cmake.buildDirectory": "${workspaceFolder}/out/build/android-arm64-v8a-Debug"
"cmake.buildDirectory": "${workspaceFolder}/out/build/android-arm64-v8a-Debug",
"files.associations": {
"algorithm": "cpp",
"cmath": "cpp",
"__bit_reference": "cpp",
"__hash_table": "cpp",
"__locale": "cpp",
"__node_handle": "cpp",
"__split_buffer": "cpp",
"__verbose_abort": "cpp",
"array": "cpp",
"cctype": "cpp",
"clocale": "cpp",
"cstdarg": "cpp",
"cstddef": "cpp",
"cstdint": "cpp",
"cstdio": "cpp",
"cstdlib": "cpp",
"cstring": "cpp",
"ctime": "cpp",
"cwchar": "cpp",
"execution": "cpp",
"memory": "cpp",
"initializer_list": "cpp",
"ios": "cpp",
"iosfwd": "cpp",
"iostream": "cpp",
"istream": "cpp",
"limits": "cpp",
"locale": "cpp",
"map": "cpp",
"mutex": "cpp",
"new": "cpp",
"stdexcept": "cpp",
"streambuf": "cpp",
"string": "cpp",
"string_view": "cpp",
"typeinfo": "cpp",
"unordered_map": "cpp",
"variant": "cpp",
"vector": "cpp"
}
}

View File

@ -107,13 +107,10 @@ target_include_directories(face_sdk_jni PUBLIC
# 7.
# -----------------------------------------------------------------
target_link_libraries(face_sdk_jni
# OpenCV
${OpenCV_LIBS}
# ONNX Runtime
onnxruntime
# Android NDK
log # __android_log_print
android # AConfiguration, AAssetManager
jnigraphics # Android Bitmap

View File

@ -2,66 +2,65 @@
#include <vector>
#include <string>
#include "opencv2/opencv.hpp" // 我们在接口中需要 cv::Mat
// 避免在头文件中暴露完整的 OpenCV 头,使用前向声明
namespace cv {
class Mat;
}
// SDK 状态码
// 定义 SDK 状态码
enum class SDKStatus {
SUCCESS = 0,
MODEL_LOAD_ERROR = -1,
EXTRACTION_ERROR = -2,
NO_FACE_DETECTED = -3,
POOR_QUALITY = -4,
INVALID_INPUT = -5
MODEL_LOAD_ERROR = -1, // 模型加载失败
PIPELINE_ERROR = -2, // 管线(算法)执行失败
INVALID_INPUT = -3, // 无效输入(如空图像)
NOT_INITIALIZED = -4 // SDK 未初始化
};
// 特征提取结果
struct FeatureResult {
// 特征提取结果结构体
struct SDKExtractResult {
SDKStatus status;
std::vector<float> feature; // 512维特征
std::string error_message;
std::vector<float> feature;
std::string message;
};
// 【宏观架构核心】
// FaceSDK 类,提供给 C++ 调用
/**
* @class FaceSDK
* @brief FacePipeline C++ API (使 Pimpl )
* * JNI
*/
class FaceSDK {
public:
/**
* @brief SDK
* @param model_dir 7onnx文件的目录路径
* @brief
* @param model_dir .onnx
*/
FaceSDK(const std::string& model_dir);
/**
* @brief
* @brief
*/
~FaceSDK();
/**
* @brief 1
* @param image (OpenCV Mat, BGR )
* @return FeatureResult
* @brief SDK
*/
FeatureResult extractFeature(const cv::Mat& image);
bool IsInitialized() const;
/**
* @brief 2 ()
* @param feature1 1
* @param feature2 2
* @return (0.0 ~ 1.0)
* @brief
* @param image BGR cv::Mat
* @return SDKExtractResult
*/
static float compareFeatures(const std::vector<float>& feature1,
const std::vector<float>& feature2);
SDKExtractResult ExtractFeature(const cv::Mat& image);
/**
* @brief
* @param feat1 1
* @param feat2 2
* @return
*/
float Compare(const std::vector<float>& feat1, const std::vector<float>& feat2);
private:
// PImpl 模式 (Pointer to Implementation)
// 这是一个高级 C++ 技巧,用于隐藏所有内部实现细节
// (如 FacePipeline, ONNX Runtime, OpenCV 的成员变量)
// 使得这个头文件非常干净,并且编译速度更快。
// Pimpl (Private Implementation)
// "Impl" 是一个前向声明的私有类
class Impl;
Impl* p_impl;
};
std::unique_ptr<Impl> m_impl;
};

View File

@ -1,88 +1,596 @@
#include "face_pipeline.h"
#include <iostream> // 用于日志
#include <vector>
#include <string>
// 构造函数初始化ORT环境、会话选项并加载模型
// 构造函数
FacePipeline::FacePipeline(const std::string& model_dir)
: m_env(ORT_LOGGING_LEVEL_WARNING, "FaceSDK") // 初始化ORT环境
: m_env(ORT_LOGGING_LEVEL_WARNING, "FaceSDK"),
m_memory_info(Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault))
{
// 配置会话选项
m_session_options.SetIntraOpNumThreads(1); // 移动端通常设置为1
m_session_options.SetIntraOpNumThreads(4); // 使用4线程
m_session_options.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_ALL);
// 立即加载模型
m_initialized = LoadModels(model_dir);
if (m_initialized) {
std::cout << "FacePipeline initialized successfully." << std::endl;
InitMemoryAllocators();
LOGI("FacePipeline initialized successfully.");
} else {
std::cerr << "FacePipeline initialization failed." << std::endl;
LOGE("FacePipeline initialization failed.");
}
}
// 析构函数 (由于使用unique_ptr, 资源会自动释放)
FacePipeline::~FacePipeline() {}
// (私有) 加载所有模型
bool FacePipeline::LoadModels(const std::string& model_dir) {
try {
// 【【修正点】】:
// 我们不再使用 to_wstring()。
// 我们直接使用 .c_str()因为Android API需要 const char*
// 1. 人脸检测
std::string detector_path = model_dir + "/faceboxesv2-640x640.onnx";
m_session_detector = std::make_unique<Ort::Session>(m_env, detector_path.c_str(), m_session_options);
std::cout << "Loaded model: " << detector_path << std::endl;
auto load_session = [&](std::unique_ptr<Ort::Session>& session, const std::string& model_name) {
std::string model_path = model_dir + "/" + model_name;
try {
session = std::make_unique<Ort::Session>(m_env, model_path.c_str(), m_session_options);
LOGI("Loaded model: %s", model_path.c_str());
} catch (const Ort::Exception& e) {
LOGE("Error loading model %s: %s", model_path.c_str(), e.what());
return false;
}
return true;
};
// 2. 关键点 (Net1)
std::string lm1_path = model_dir + "/face_landmarker_pts5_net1.onnx";
m_session_landmarker1 = std::make_unique<Ort::Session>(m_env, lm1_path.c_str(), m_session_options);
std::cout << "Loaded model: " << lm1_path << std::endl;
if (!load_session(m_session_rotator, "model_gray_mobilenetv2_rotcls.onnx")) return false;
if (!load_session(m_session_detector, "faceboxesv2-640x640.onnx")) return false;
if (!load_session(m_session_pose_var, "fsanet-var.onnx")) return false;
if (!load_session(m_session_pose_conv, "fsanet-conv.onnx")) return false;
if (!load_session(m_session_landmarker1, "face_landmarker_pts5_net1.onnx")) return false;
if (!load_session(m_session_landmarker2, "face_landmarker_pts5_net2.onnx")) return false;
if (!load_session(m_session_recognizer, "face_recognizer.onnx")) return false;
// 3. 关键点 (Net2)
std::string lm2_path = model_dir + "/face_landmarker_pts5_net2.onnx";
m_session_landmarker2 = std::make_unique<Ort::Session>(m_env, lm2_path.c_str(), m_session_options);
std::cout << "Loaded model: " << lm2_path << std::endl;
// 4. 人脸识别
std::string rec_path = model_dir + "/face_recognizer.onnx";
m_session_recognizer = std::make_unique<Ort::Session>(m_env, rec_path.c_str(), m_session_options);
std::cout << "Loaded model: " << rec_path << std::endl;
// 5. 旋转分类
std::string rot_path = model_dir + "/model_gray_mobilenetv2_rotcls.onnx";
m_session_rotator = std::make_unique<Ort::Session>(m_env, rot_path.c_str(), m_session_options);
std::cout << "Loaded model: " << rot_path << std::endl;
// 6. 姿态估计 (VAR)
std::string pose_var_path = model_dir + "/fsanet-var.onnx";
m_session_pose_var = std::make_unique<Ort::Session>(m_env, pose_var_path.c_str(), m_session_options);
std::cout << "Loaded model: " << pose_var_path << std::endl;
// 7. 姿态估计 (CONV)
std::string pose_conv_path = model_dir + "/fsanet-conv.onnx";
m_session_pose_conv = std::make_unique<Ort::Session>(m_env, pose_conv_path.c_str(), m_session_options);
std::cout << "Loaded model: " << pose_conv_path << std::endl;
} catch (const Ort::Exception& e) {
// 如果任何模型加载失败,捕获异常
std::cerr << "Error loading models: " << e.what() << std::endl;
return false;
}
std::cout << "All 7 models loaded successfully." << std::endl;
LOGI("All 7 models loaded successfully.");
return true;
}
// L2 归一化 (将在Extract中使用)
void FacePipeline::normalize_l2(std::vector<float>& v) {
// (私有) 获取模型输入/输出信息
void FacePipeline::InitMemoryAllocators() {
auto get_io_names = [&](Ort::Session* session,
std::vector<const char*>& input_names,
std::vector<const char*>& output_names,
std::vector<int64_t>& input_shape)
{
input_names.clear();
output_names.clear();
input_shape.clear();
for (size_t i = 0; i < session->GetInputCount(); ++i) {
auto input_name_ptr = session->GetInputNameAllocated(i, m_allocator);
input_names.push_back(strdup(input_name_ptr.get()));
}
for (size_t i = 0; i < session->GetOutputCount(); ++i) {
auto output_name_ptr = session->GetOutputNameAllocated(i, m_allocator);
output_names.push_back(strdup(output_name_ptr.get()));
}
auto input_type_info = session->GetInputTypeInfo(0);
auto tensor_info = input_type_info.GetTensorTypeAndShapeInfo();
input_shape = tensor_info.GetShape();
if (input_shape[0] < 1) input_shape[0] = 1;
};
get_io_names(m_session_rotator.get(), m_rot_input_names, m_rot_output_names, m_rot_input_shape);
get_io_names(m_session_detector.get(), m_det_input_names, m_det_output_names, m_det_input_shape);
get_io_names(m_session_pose_var.get(), m_pose_var_input_names, m_pose_var_output_names, m_pose_var_input_shape);
get_io_names(m_session_pose_conv.get(), m_pose_conv_input_names, m_pose_conv_output_names, m_pose_conv_input_shape);
get_io_names(m_session_landmarker1.get(), m_lm1_input_names, m_lm1_output_names, m_lm1_input_shape);
get_io_names(m_session_landmarker2.get(), m_lm2_input_names, m_lm2_output_names, m_lm2_input_shape);
get_io_names(m_session_recognizer.get(), m_rec_input_names, m_rec_output_names, m_rec_input_shape);
// 生成 FaceBoxesV2 的锚点
generate_anchors_faceboxes(m_det_input_shape[2], m_det_input_shape[3]); // H, W (640, 640)
// 调整Blob缓冲区大小 (查找最大所需size)
size_t max_blob_size = 0;
auto update_max = [&](const std::vector<int64_t>& shape) {
size_t s = std::accumulate(shape.begin() + 1, shape.end(), 1, std::multiplies<size_t>());
if (s > max_blob_size) max_blob_size = s;
};
update_max(m_rot_input_shape);
update_max(m_det_input_shape);
update_max(m_pose_var_input_shape);
update_max(m_lm1_input_shape);
update_max(m_rec_input_shape);
m_blob_buffer.resize(max_blob_size);
}
// --- 图像预处理辅助函数 ---
void FacePipeline::image_to_blob(const cv::Mat& img, std::vector<float>& blob, const float* mean, const float* std) {
int channels = img.channels();
int height = img.rows;
int width = img.cols;
for (int c = 0; c < channels; c++) {
for (int h = 0; h < height; h++) {
for (int w = 0; w < width; w++) {
float val;
if (channels == 3) {
val = static_cast<float>(img.at<cv::Vec3b>(h, w)[c]);
} else {
val = static_cast<float>(img.at<uchar>(h, w));
}
blob[c * width * height + h * width + w] = (val - mean[c]) * std[c];
}
}
}
}
Ort::Value FacePipeline::create_tensor(const std::vector<float>& blob_data, const std::vector<int64_t>& input_shape) {
return Ort::Value::CreateTensor<float>(m_memory_info,
const_cast<float*>(blob_data.data()),
blob_data.size(),
input_shape.data(),
input_shape.size());
}
// --- 核心管线实现 ---
bool FacePipeline::Extract(const cv::Mat& image, std::vector<float>& feature) {
if (!m_initialized) {
LOGE("Extract failed: Pipeline is not initialized.");
return false;
}
if (image.empty()) {
LOGE("Extract failed: Input image is empty.");
return false;
}
// --- 1. 旋转检测 ---
int rot_angle_code = RunRotation(image);
cv::Mat upright_image;
if (rot_angle_code >= 0) {
cv::rotate(image, upright_image, rot_angle_code);
} else {
upright_image = image;
}
// --- 2. 人脸检测 ---
std::vector<FaceBox> boxes;
if (!RunDetection(upright_image, boxes)) {
LOGI("Extract failed: No face detected.");
return false;
}
// (Python 使用 topk=2, NMS 后 boxes[0] 即是最佳)
FaceBox best_box = boxes[0];
// 裁剪人脸 (用于姿态和关键点)
// crop_face, (assess_quality)
// Python 的 crop_face 实现了带 padding 的裁剪
cv::Rect face_rect_raw(best_box.x1, best_box.y1, best_box.x2 - best_box.x1, best_box.y2 - best_box.y1);
int pad_top = std::max(0, -face_rect_raw.y);
int pad_bottom = std::max(0, (face_rect_raw.y + face_rect_raw.height) - upright_image.rows);
int pad_left = std::max(0, -face_rect_raw.x);
int pad_right = std::max(0, (face_rect_raw.x + face_rect_raw.width) - upright_image.cols);
cv::Mat face_crop_padded;
cv::copyMakeBorder(upright_image, face_crop_padded, pad_top, pad_bottom, pad_left, pad_right, cv::BORDER_CONSTANT, cv::Scalar(0,0,0));
cv::Rect face_rect_padded(face_rect_raw.x + pad_left, face_rect_raw.y + pad_top, face_rect_raw.width, face_rect_raw.height);
cv::Mat face_crop = face_crop_padded(face_rect_padded);
// --- 5. 人脸对齐 (在姿态检测前,因为姿态检测需要对齐的脸) ---
// (assess_quality) 调用 self.pose_checker.check(aligned_face)
// QualityOfPose.check()
// Landmark5er.inference() -> crop_face -> resize(112, 112)
// FaceAlign.align() -> 256x256
//
// **逻辑冲突**:
// face_feature_extractor.py L345 (assess_quality) 调用 pose_checker.check(aligned_face)
// 但 L336 (align_face) 依赖 landmarks
// 但 L330 (extract_landmarks) 依赖 boxes
//
// **修正**: Python 源码 L306 `QualityOfPose` 构造函数 -> L416 `check` -> L389 `detect_angle` -> L370 `transform`
// QualityOfPose.transform() 接收的是 *未对齐* 的脸部裁剪 (L379 canvas[ny1:ny1 + h, nx1:nx1 + w] = mat)
// **我的 C++ 逻辑错了**。 姿态检测不需要对齐的脸,它需要 *原始裁剪*。
// --- 3. 姿态估计 (质量过滤) ---
FacePose pose;
if (!RunPose(face_crop, pose)) {
LOGI("Extract failed: Pose estimation failed.");
return false;
}
if (std::abs(pose.yaw) > m_pose_threshold || std::abs(pose.pitch) > m_pose_threshold) {
LOGI("Extract failed: Face pose (Y:%.1f, P:%.1f) exceeds threshold (%.1f)", pose.yaw, pose.pitch, m_pose_threshold);
return false;
}
// --- 4. 关键点检测 ---
FaceLandmark landmark;
if (!RunLandmark(upright_image, best_box, landmark)) {
LOGI("Extract failed: Landmark detection failed.");
return false;
}
// --- 5. 人脸对齐 ---
cv::Mat aligned_face = RunAlignment(upright_image, landmark);
// --- 6. 特征提取 ---
if (!RunRecognition(aligned_face, feature)) {
LOGI("Extract failed: Feature recognition failed.");
return false;
}
// --- 7. 归一化 (在 RunRecognition 内部完成) ---
LOGI("Extract success.");
return true;
}
// --- 步骤 1: 旋转检测 (来自 face_feature_extractor.py) ---
void FacePipeline::preprocess_rotation(const cv::Mat& image, std::vector<float>& blob_data) {
cv::Mat gray_img, resized, cropped, gray_3d;
cv::cvtColor(image, gray_img, cv::COLOR_BGR2GRAY);
cv::resize(gray_img, resized, cv::Size(256, 256), 0, 0, cv::INTER_LINEAR);
int start = (256 - 224) / 2;
cv::Rect crop_rect(start, start, 224, 224);
cropped = resized(crop_rect);
cv::cvtColor(cropped, gray_3d, cv::COLOR_GRAY2BGR);
// 归一化: / 255.0 (mean=[0,0,0], std=[1,1,1])
const float mean[3] = {0.0f, 0.0f, 0.0f};
const float std[3] = {1.0f / 255.0f, 1.0f / 255.0f, 1.0f / 255.0f}; // 乘以 1/255 等于除以 255
image_to_blob(gray_3d, blob_data, mean, std);
}
int FacePipeline::RunRotation(const cv::Mat& image) {
preprocess_rotation(image, m_blob_buffer);
auto input_tensor = create_tensor(m_blob_buffer, m_rot_input_shape);
auto output_tensors = m_session_rotator->Run(Ort::RunOptions{nullptr},
m_rot_input_names.data(), &input_tensor, 1,
m_rot_output_names.data(), 1);
float* output_data = output_tensors[0].GetTensorMutableData<float>();
int max_index = std::distance(output_data, std::max_element(output_data, output_data + 4));
// (correct_image_rotation)
if (max_index == 1) return cv::ROTATE_90_CLOCKWISE;
if (max_index == 2) return cv::ROTATE_180;
if (max_index == 3) return cv::ROTATE_90_COUNTERCLOCKWISE;
return -1;
}
// --- 步骤 2: 人脸检测 (来自 facedetector.py) ---
void FacePipeline::preprocess_detection(const cv::Mat& img, std::vector<float>& blob_data) {
cv::Mat resized;
cv::resize(img, resized, cv::Size(m_det_input_shape[3], m_det_input_shape[2])); // 640x640
// 归一化: (img - [104, 117, 123]) * 1.0
const float mean[3] = {104.0f, 117.0f, 123.0f}; // BGR
const float std[3] = {1.0f, 1.0f, 1.0f};
image_to_blob(resized, blob_data, mean, std);
}
bool FacePipeline::RunDetection(const cv::Mat& image, std::vector<FaceBox>& boxes) {
float img_height = (float)image.rows;
float img_width = (float)image.cols;
preprocess_detection(image, m_blob_buffer);
auto input_tensor = create_tensor(m_blob_buffer, m_det_input_shape);
auto output_tensors = m_session_detector->Run(Ort::RunOptions{nullptr},
m_det_input_names.data(), &input_tensor, 1,
m_det_output_names.data(), 2); // 2 outputs!
const float* bboxes_data = output_tensors[0].GetTensorData<float>(); // [1, N, 4]
const float* probs_data = output_tensors[1].GetTensorData<float>(); // [1, N, 2]
long num_anchors = output_tensors[0].GetTensorTypeAndShapeInfo().GetShape()[1];
if (num_anchors != m_anchors.size()) {
LOGE("Anchor size mismatch! Expected %zu, Got %ld", m_anchors.size(), num_anchors);
return false;
}
std::vector<FaceBox> bbox_collection;
const float variance[2] = {0.1f, 0.2f}; //
for (long i = 0; i < num_anchors; ++i) {
float conf = probs_data[i * 2 + 1]; // (probs[0, i, 1])
if (conf < m_det_threshold) continue;
const Anchor& anchor = m_anchors[i];
float dx = bboxes_data[i * 4 + 0];
float dy = bboxes_data[i * 4 + 1];
float dw = bboxes_data[i * 4 + 2];
float dh = bboxes_data[i * 4 + 3];
float cx = anchor.cx + dx * variance[0] * anchor.s_kx; //
float cy = anchor.cy + dy * variance[0] * anchor.s_ky; //
float w = anchor.s_kx * std::exp(dw * variance[1]); //
float h = anchor.s_ky * std::exp(dh * variance[1]); //
bbox_collection.push_back({
(cx - w / 2.0f) * img_width,
(cy - h / 2.0f) * img_height,
(cx + w / 2.0f) * img_width,
(cy + h / 2.0f) * img_height,
conf
});
}
boxes = hard_nms(bbox_collection, m_det_iou_threshold, m_det_topk); // (nms_type=0)
return !boxes.empty();
}
void FacePipeline::generate_anchors_faceboxes(int target_height, int target_width) {
// (generate_anchors)
m_anchors.clear();
std::vector<int> steps = {32, 64, 128};
std::vector<std::vector<int>> min_sizes = {{32, 64, 128}, {256}, {512}};
std::vector<std::vector<int>> feature_maps;
for (int step : steps) {
feature_maps.push_back({(int)std::ceil((float)target_height / step), (int)std::ceil((float)target_width / step)});
}
std::vector<float> offset_32 = {0.0f, 0.25f, 0.5f, 0.75f};
std::vector<float> offset_64 = {0.0f, 0.5f};
for (int k = 0; k < feature_maps.size(); ++k) {
auto f_map = feature_maps[k];
auto tmp_min_sizes = min_sizes[k];
int f_h = f_map[0];
int f_w = f_map[1];
for (int i = 0; i < f_h; ++i) {
for (int j = 0; j < f_w; ++j) {
for (int min_size : tmp_min_sizes) {
float s_kx = (float)min_size / target_width;
float s_ky = (float)min_size / target_height;
if (min_size == 32) {
for (float offset_y : offset_32) for (float offset_x : offset_32)
m_anchors.push_back({(j + offset_x) * steps[k] / target_width, (i + offset_y) * steps[k] / target_height, s_kx, s_ky});
} else if (min_size == 64) {
for (float offset_y : offset_64) for (float offset_x : offset_64)
m_anchors.push_back({(j + offset_x) * steps[k] / target_width, (i + offset_y) * steps[k] / target_height, s_kx, s_ky});
} else {
m_anchors.push_back({(j + 0.5f) * steps[k] / target_width, (i + 0.5f) * steps[k] / target_height, s_kx, s_ky});
}
}
}
}
}
}
// --- 步骤 3: 姿态估计 (来自 imgchecker.py) ---
void FacePipeline::preprocess_pose(const cv::Mat& img, std::vector<float>& blob_data) {
float pad = 0.3f; //
int h = img.rows;
int w = img.cols;
int nh = (int)(h + pad * h);
int nw = (int)(w + pad * w);
int nx1 = std::max(0, (nw - w) / 2);
int ny1 = std::max(0, (nh - h) / 2);
cv::Mat canvas = cv::Mat::zeros(nh, nw, CV_8UC3);
img.copyTo(canvas(cv::Rect(nx1, ny1, w, h)));
cv::Mat resized;
cv::resize(canvas, resized, cv::Size(m_pose_var_input_shape[3], m_pose_var_input_shape[2])); // 64x64
// 归一化: (img - 127.5) / 127.5
const float mean[3] = {127.5f, 127.5f, 127.5f};
const float std[3] = {1.0f / 127.5f, 1.0f / 127.5f, 1.0f / 127.5f};
image_to_blob(resized, blob_data, mean, std);
}
bool FacePipeline::RunPose(const cv::Mat& face_crop, FacePose& pose) {
preprocess_pose(face_crop, m_blob_buffer);
// 运行 VAR
auto input_tensor_var = create_tensor(m_blob_buffer, m_pose_var_input_shape);
auto output_var = m_session_pose_var->Run(Ort::RunOptions{nullptr},
m_pose_var_input_names.data(), &input_tensor_var, 1,
m_pose_var_output_names.data(), 1);
// 运行 CONV (使用相同的 blob)
auto input_tensor_conv = create_tensor(m_blob_buffer, m_pose_conv_input_shape);
auto output_conv = m_session_pose_conv->Run(Ort::RunOptions{nullptr},
m_pose_conv_input_names.data(), &input_tensor_conv, 1,
m_pose_conv_output_names.data(), 1);
const float* data_var = output_var[0].GetTensorData<float>();
const float* data_conv = output_conv[0].GetTensorData<float>();
// 结合 (平均)
pose.yaw = (data_var[0] + data_conv[0]) / 2.0f;
pose.pitch = (data_var[1] + data_conv[1]) / 2.0f;
pose.roll = (data_var[2] + data_conv[2]) / 2.0f;
return true;
}
// --- 步骤 4: 关键点检测 (来自 facelandmarks5er.py) ---
void FacePipeline::preprocess_landmark_net1(const cv::Mat& img, std::vector<float>& blob_data) {
cv::Mat resized, gray_img;
cv::resize(img, resized, cv::Size(m_lm1_input_shape[3], m_lm1_input_shape[2])); // 112x112
cv::cvtColor(resized, gray_img, cv::COLOR_BGR2GRAY); //
// 归一化: 无 (0-255)
const float mean[1] = {0.0f};
const float std[1] = {1.0f};
image_to_blob(gray_img, blob_data, mean, std);
}
// C++ 转译 facelandmarks5er.py::shape_index_process
std::vector<float> FacePipeline::shape_index_process(const Ort::Value& feat_val, const Ort::Value& pos_val) {
auto feat_shape = feat_val.GetTensorTypeAndShapeInfo().GetShape();
auto pos_shape = pos_val.GetTensorTypeAndShapeInfo().GetShape();
const float* feat_data = feat_val.GetTensorData<float>();
const float* pos_data = pos_val.GetTensorData<float>();
long feat_n = feat_shape[0]; // 1
long feat_c = feat_shape[1];
long feat_h = feat_shape[2];
long feat_w = feat_shape[3];
long pos_n = pos_shape[0]; // 1
long landmark_x2 = pos_shape[1]; // 10
int landmark_num = landmark_x2 / 2; // 5
float m_origin[] = {112.0f, 112.0f};
float m_origin_patch[] = {15.0f, 15.0f};
int x_patch_h = (int)(m_origin_patch[0] * feat_h / m_origin[0] + 0.5f);
int x_patch_w = (int)(m_origin_patch[1] * feat_w / m_origin[1] + 0.5f);
int feat_patch_h = x_patch_h;
int feat_patch_w = x_patch_w;
float r_h = (feat_patch_h - 1) / 2.0f;
float r_w = (feat_patch_w - 1) / 2.0f;
std::vector<long> out_shape = {feat_n, feat_c, x_patch_h, (long)landmark_num, x_patch_w};
std::vector<float> buff(feat_n * feat_c * x_patch_h * landmark_num * x_patch_w, 0.0f);
for (int i = 0; i < landmark_num; ++i) {
for (int n = 0; n < feat_n; ++n) {
float y_pos = pos_data[n * landmark_x2 + 2 * i + 1];
float x_pos = pos_data[n * landmark_x2 + 2 * i];
int y = (int)(y_pos * (feat_h - 1) - r_h + 0.5f);
int x = (int)(x_pos * (feat_w - 1) - r_w + 0.5f);
for (int c = 0; c < feat_c; ++c) {
for (int ph = 0; ph < feat_patch_h; ++ph) {
for (int pw = 0; pw < feat_patch_w; ++pw) {
int y_p = y + ph;
int x_p = x + pw;
long out_idx = n * (feat_c * x_patch_h * landmark_num * x_patch_w) +
c * (x_patch_h * landmark_num * x_patch_w) +
ph * (landmark_num * x_patch_w) +
i * (x_patch_w) +
pw;
if (y_p < 0 || y_p >= feat_h || x_p < 0 || x_p >= feat_w) {
buff[out_idx] = 0.0f;
} else {
long feat_idx = n * (feat_c * feat_h * feat_w) +
c * (feat_h * feat_w) +
y_p * (feat_w) +
x_p;
buff[out_idx] = feat_data[feat_idx];
}
}
}
}
}
}
return buff;
}
bool FacePipeline::RunLandmark(const cv::Mat& image, const FaceBox& box, FaceLandmark& landmark) {
// 1. 裁剪人脸
cv::Rect face_rect_raw(box.x1, box.y1, box.x2 - box.x1, box.y2 - box.y1);
int pad_top = std::max(0, -face_rect_raw.y);
int pad_bottom = std::max(0, (face_rect_raw.y + face_rect_raw.height) - image.rows);
int pad_left = std::max(0, -face_rect_raw.x);
int pad_right = std::max(0, (face_rect_raw.x + face_rect_raw.width) - image.cols);
cv::Mat face_crop_padded;
cv::copyMakeBorder(image, face_crop_padded, pad_top, pad_bottom, pad_left, pad_right, cv::BORDER_CONSTANT, cv::Scalar(0,0,0));
cv::Rect face_rect_padded(face_rect_raw.x + pad_left, face_rect_raw.y + pad_top, face_rect_raw.width, face_rect_raw.height);
cv::Mat face_crop = face_crop_padded(face_rect_padded);
// 2. 预处理 Net1
preprocess_landmark_net1(face_crop, m_blob_buffer);
auto input_tensor_net1 = create_tensor(m_blob_buffer, m_lm1_input_shape);
// 3. 运行 Net1
auto output_net1 = m_session_landmarker1->Run(Ort::RunOptions{nullptr},
m_lm1_input_names.data(), &input_tensor_net1, 1,
m_lm1_output_names.data(), 2); // 2 outputs
// 4. Shape Index Process
std::vector<float> shape_index_blob = shape_index_process(output_net1[0], output_net1[1]);
// 5. 准备 Net2 输入
auto input_tensor_net2 = Ort::Value::CreateTensor<float>(m_memory_info,
shape_index_blob.data(),
shape_index_blob.size(),
m_lm2_input_shape.data(),
m_lm2_input_shape.size());
// 6. 运行 Net2
auto output_net2 = m_session_landmarker2->Run(Ort::RunOptions{nullptr},
m_lm2_input_names.data(), &input_tensor_net2, 1,
m_lm2_output_names.data(), 1);
// 7. 后处理
const float* data_net1_pos = output_net1[1].GetTensorData<float>();
const float* data_net2 = output_net2[0].GetTensorData<float>();
auto shape_net1_pos = output_net1[1].GetTensorTypeAndShapeInfo().GetShape(); // [1, 10]
int landmark_x2 = shape_net1_pos[1];
float scale_x = (box.x2 - box.x1) / 112.0f;
float scale_y = (box.y2 - box.y1) / 112.0f;
for (int i = 0; i < 5; ++i) {
float x_norm = (data_net2[i * 2 + 0] + data_net1_pos[i * 2 + 0]) * 112.0f;
float y_norm = (data_net2[i * 2 + 1] + data_net1_pos[i * 2 + 1]) * 112.0f;
float x = box.x1 + x_norm * scale_x;
float y = box.y1 + y_norm * scale_y;
x = std::max(0.01f, std::min(x, (float)image.cols - 0.01f));
y = std::max(0.01f, std::min(y, (float)image.rows - 0.01f));
landmark.points[i] = cv::Point2f(x, y);
}
return true;
}
// --- 步骤 5: 人脸对齐 (来自 facealign.py) ---
cv::Mat FacePipeline::RunAlignment(const cv::Mat& image, const FaceLandmark& landmark) {
// (align)
std::vector<cv::Point2f> src_points;
std::vector<cv::Point2f> dst_points;
for (int i = 0; i < 5; ++i) {
src_points.push_back(landmark.points[i]);
dst_points.push_back(cv::Point2f(m_landmark_template.at<float>(i, 0),
m_landmark_template.at<float>(i, 1)));
}
// (transformation_maker) -> estimateAffinePartial2D
cv::Mat transform_matrix = cv::estimateAffinePartial2D(src_points, dst_points);
cv::Mat aligned_face;
// (spatial_transform) -> warpAffine
// (crop_width, crop_height = 256, 256)
cv::warpAffine(image, aligned_face, transform_matrix, m_align_output_size, cv::INTER_LINEAR);
return aligned_face;
}
// --- 步骤 6: 特征提取 (来自 facerecoger.py) ---
void FacePipeline::preprocess_recognition(const cv::Mat& img, std::vector<float>& blob_data) {
cv::Mat resized, rgb_img;
// (resize to 248, 248)
cv::resize(img, resized, cv::Size(m_rec_input_shape[3], m_rec_input_shape[2]));
// (BGR -> RGB)
cv::cvtColor(resized, rgb_img, cv::COLOR_BGR2RGB);
// 归一化: 无 (0-255)
const float mean[3] = {0.0f, 0.0f, 0.0f};
const float std[3] = {1.0f, 1.0f, 1.0f};
image_to_blob(rgb_img, blob_data, mean, std);
}
void FacePipeline::normalize_sqrt_l2(std::vector<float>& v) {
// (temp_result = np.sqrt(pred_result[0]))
double norm = 0.0;
for (float val : v) {
for (float& val : v) {
val = std::sqrt(std::max(0.0f, val)); // 取 sqrt
norm += val * val;
}
if (norm > 1e-6) { // 避免除以零
// (norm = temp_result / np.linalg.norm(...))
if (norm > 1e-6) {
norm = std::sqrt(norm);
for (float& val : v) {
val = static_cast<float>(val / norm);
@ -90,28 +598,22 @@ void FacePipeline::normalize_l2(std::vector<float>& v) {
}
}
bool FacePipeline::RunRecognition(const cv::Mat& aligned_face, std::vector<float>& feature) {
preprocess_recognition(aligned_face, m_blob_buffer);
auto input_tensor = create_tensor(m_blob_buffer, m_rec_input_shape);
auto output_tensors = m_session_recognizer->Run(Ort::RunOptions{nullptr},
m_rec_input_names.data(), &input_tensor, 1,
m_rec_output_names.data(), 1);
long feature_dim = output_tensors[0].GetTensorTypeAndShapeInfo().GetShape()[1];
const float* output_data = output_tensors[0].GetTensorData<float>();
feature.resize(feature_dim);
memcpy(feature.data(), output_data, feature_dim * sizeof(float));
// (后处理: SQRT-L2 Norm)
normalize_sqrt_l2(feature);
// Extract 方法的桩函数 (我们将在下一步实现)
bool FacePipeline::Extract(const cv::Mat& image, std::vector<float>& feature) {
if (!m_initialized) {
std::cerr << "Pipeline is not initialized." << std::endl;
return false;
}
if (image.empty()) {
std::cerr << "Input image is empty." << std::endl;
return false;
}
// --------------------------------------------------
// TODO: 在这里实现完整的 7 模型推理管线
// --------------------------------------------------
// std::cout << "Extract method is not implemented yet." << std::endl;
// 临时填充一个假的特征向量
feature.assign(512, 0.5f);
normalize_l2(feature);
return true;
}

View File

@ -4,17 +4,21 @@
#include <vector>
#include <memory>
#include <stdexcept>
// ONNX Runtime C++ API
#include "onnxruntime_cxx_api.h"
// OpenCV
#include "opencv2/opencv.hpp"
// 为 compare_features 提供辅助函数
#include <numeric>
#include <array>
#include <cmath>
#include <numeric>
#include <algorithm>
#include <android/log.h>
#include "onnxruntime_cxx_api.h"
#include "opencv2/opencv.hpp"
#include "opencv2/calib3d.hpp" // for estimateAffinePartial2D
// --- 日志宏 ---
#define LOG_TAG "FacePipeline_CPP"
#define LOGI(...) __android_log_print(ANDROID_LOG_INFO, LOG_TAG, __VA_ARGS__)
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR, LOG_TAG, __VA_ARGS__)
/**
* @brief L2归一化的特征向量的余弦相似度
@ -30,6 +34,62 @@ inline float compare_features(const std::vector<float>& v1, const std::vector<fl
return std::max(-1.0f, std::min(1.0f, static_cast<float>(dot_product)));
}
// --- 辅助结构体 (与 facedetector.py::Box 对应) ---
struct FaceBox {
float x1, y1, x2, y2, score;
};
struct FaceLandmark {
std::array<cv::Point2f, 5> points;
};
struct FacePose {
float yaw, pitch, roll;
};
// --- NMS 辅助函数 (与 facedetector.py::hard_nms 对应) ---
inline float iou_of(const FaceBox& a, const FaceBox& b) {
float inter_x1 = std::max(a.x1, b.x1);
float inter_y1 = std::max(a.y1, b.y1);
float inter_x2 = std::min(a.x2, b.x2);
float inter_y2 = std::min(a.y2, b.y2);
if (inter_x1 < inter_x2 && inter_y1 < inter_y2) {
float inter_area = (inter_x2 - inter_x1 + 1.0f) * (inter_y2 - inter_y1 + 1.0f);
float a_area = (a.x2 - a.x1 + 1.0f) * (a.y2 - a.y1 + 1.0f);
float b_area = (b.x2 - b.x1 + 1.0f) * (b.y2 - b.y1 + 1.0f);
float union_area = a_area + b_area - inter_area;
return inter_area / union_area;
}
return 0.0f;
}
inline std::vector<FaceBox> hard_nms(std::vector<FaceBox>& boxes, float iou_threshold, int topk) {
if (boxes.empty()) return {};
std::sort(boxes.begin(), boxes.end(), [](const FaceBox& a, const FaceBox& b) {
return a.score > b.score;
});
std::vector<int> merged(boxes.size(), 0);
std::vector<FaceBox> output;
for (int i = 0; i < boxes.size(); ++i) {
if (merged[i]) continue;
output.push_back(boxes[i]);
merged[i] = 1;
for (int j = i + 1; j < boxes.size(); ++j) {
if (merged[j]) continue;
if (iou_of(boxes[i], boxes[j]) > iou_threshold) {
merged[j] = 1;
}
}
if (output.size() >= topk) break;
}
return output;
}
/**
* @class FacePipeline
@ -37,43 +97,53 @@ inline float compare_features(const std::vector<float>& v1, const std::vector<fl
*/
class FacePipeline {
public:
/**
* @brief
* @param model_dir .onnx文件的目录路径
* @note
*/
FacePipeline(const std::string& model_dir);
~FacePipeline();
/**
* @brief
* @return true , false
*/
bool IsInitialized() const { return m_initialized; }
/**
* @brief (1)
* @param image (BGR格式)
* @param feature () 512L2归一化特征
* @return true , false
*/
bool Extract(const cv::Mat& image, std::vector<float>& feature);
private:
/**
* @brief () 7
*/
// --- 模型加载与初始化 ---
bool LoadModels(const std::string& model_dir);
void InitMemoryAllocators();
/**
* @brief () L2归一化向量
*/
void normalize_l2(std::vector<float>& v);
// --- 核心管线步骤 ---
void preprocess_rotation(const cv::Mat &image, std::vector<float> &blob_data);
int RunRotation(const cv::Mat& image); // [模型5]
bool RunDetection(const cv::Mat& image, std::vector<FaceBox>& boxes); // [模型1]
bool RunPose(const cv::Mat& face_crop, FacePose& pose); // [模型6, 7]
bool RunLandmark(const cv::Mat& image, const FaceBox& box, FaceLandmark& landmark); // [模型2, 3]
cv::Mat RunAlignment(const cv::Mat& image, const FaceLandmark& landmark); //
bool RunRecognition(const cv::Mat& aligned_face, std::vector<float>& feature); // [模型4]
// --- 预处理/后处理 辅助函数 ---
// [模型1] FaceBoxesV2
struct Anchor { float cx, cy, s_kx, s_ky; };
std::vector<Anchor> m_anchors;
void generate_anchors_faceboxes(int target_height, int target_width);
void preprocess_detection(const cv::Mat& img, std::vector<float>& blob_data);
// [模型6, 7] FSANet
void preprocess_pose(const cv::Mat& img, std::vector<float>& blob_data);
// [模型2, 3] Landmark5er
void preprocess_landmark_net1(const cv::Mat& img, std::vector<float>& blob_data);
std::vector<float> shape_index_process(const Ort::Value& feat_data, const Ort::Value& pos_data);
// [模型4] FaceRecognizer
void preprocess_recognition(const cv::Mat& img, std::vector<float>& blob_data);
void normalize_sqrt_l2(std::vector<float>& v); //
// 通用
void image_to_blob(const cv::Mat& img, std::vector<float>& blob, const float* mean, const float* std);
Ort::Value create_tensor(const std::vector<float>& blob_data, const std::vector<int64_t>& input_shape);
// --- ONNX Runtime 核心组件 ---
Ort::Env m_env;
Ort::SessionOptions m_session_options;
Ort::AllocatorWithDefaultOptions m_allocator;
Ort::MemoryInfo m_memory_info;
bool m_initialized = false;
// --- 7个模型的会话 (Session) ---
@ -84,4 +154,43 @@ private:
std::unique_ptr<Ort::Session> m_session_rotator;
std::unique_ptr<Ort::Session> m_session_pose_var;
std::unique_ptr<Ort::Session> m_session_pose_conv;
// --- ONNX模型输入/输出名称 (C-style strings) ---
// 我们在加载模型时获取这些
std::vector<const char*> m_rot_input_names, m_rot_output_names;
std::vector<int64_t> m_rot_input_shape;
std::vector<const char*> m_det_input_names, m_det_output_names;
std::vector<int64_t> m_det_input_shape;
std::vector<const char*> m_pose_var_input_names, m_pose_var_output_names;
std::vector<int64_t> m_pose_var_input_shape;
std::vector<const char*> m_pose_conv_input_names, m_pose_conv_output_names;
std::vector<int64_t> m_pose_conv_input_shape;
std::vector<const char*> m_lm1_input_names, m_lm1_output_names;
std::vector<int64_t> m_lm1_input_shape;
std::vector<const char*> m_lm2_input_names, m_lm2_output_names;
std::vector<int64_t> m_lm2_input_shape;
std::vector<const char*> m_rec_input_names, m_rec_output_names;
std::vector<int64_t> m_rec_input_shape;
// --- 临时缓冲区 ---
std::vector<float> m_blob_buffer;
// --- 常量 (来自 Python) ---
const float m_det_threshold = 0.35f;
const float m_det_iou_threshold = 0.45f;
const int m_det_topk = 300;
const float m_pose_threshold = 30.0f; // (来自 face_feature_extractor.py)
const cv::Mat m_landmark_template = (cv::Mat_<float>(5, 2) <<
89.3095f, 72.9025f, // (来自 facealign.py)
169.3095f, 72.9025f, //
127.8949f, 127.0441f, //
96.8796f, 184.8907f, //
159.1065f, 184.7601f); //
const cv::Size m_align_output_size = cv::Size(256, 256); //
};

View File

@ -0,0 +1,66 @@
#include "face_sdk.h"
#include "face_pipeline.h" // 在 .cpp 中包含实现
#include <memory>
// --- Pimpl 实现 ---
// 我们在 .cpp 文件中定义私有实现类
class FaceSDK::Impl {
public:
// Impl 的构造函数真正创建了 FacePipeline
Impl(const std::string& model_dir)
: pipeline(std::make_unique<FacePipeline>(model_dir))
{
// 构造函数体
}
// 检查内部管线是否OK
bool IsInitialized() const {
return pipeline && pipeline->IsInitialized();
}
// 持有核心管线的智能指针
std::unique_ptr<FacePipeline> pipeline;
};
// --- FaceSDK 公共方法的实现 ---
// 构造函数:创建 Impl 实例
FaceSDK::FaceSDK(const std::string& model_dir)
: m_impl(std::make_unique<Impl>(model_dir))
{
// 构造函数体
}
// 析构函数:必须在 .cpp 中定义,因为 Impl 是不完整类型
FaceSDK::~FaceSDK() = default;
// IsInitialized 的实现
bool FaceSDK::IsInitialized() const {
if (!m_impl) return false;
return m_impl->IsInitialized();
}
// Compare 的实现 (这是一个简单的辅助函数)
float FaceSDK::Compare(const std::vector<float>& feat1, const std::vector<float>& feat2) {
// 调用我们在 face_pipeline.h 中定义的全局辅助函数
return compare_features(feat1, feat2);
}
// ExtractFeature 的实现
SDKExtractResult FaceSDK::ExtractFeature(const cv::Mat& image) {
if (!IsInitialized()) {
return { SDKStatus::NOT_INITIALIZED, {}, "SDK 未初始化" };
}
if (image.empty()) {
return { SDKStatus::INVALID_INPUT, {}, "输入图像为空" };
}
std::vector<float> feature;
bool success = m_impl->pipeline->Extract(image, feature);
if (success) {
return { SDKStatus::SUCCESS, feature, "提取成功" };
} else {
return { SDKStatus::PIPELINE_ERROR, {}, "未检测到合格人脸或算法出错" };
}
}

View File

@ -0,0 +1,184 @@
#include <jni.h>
#include <string>
#include <vector>
#include <android/log.h>
#include <android/bitmap.h> // 用于 Bitmap -> cv::Mat
#include "face_sdk.h" // 我们的 C++ API
#include "opencv2/opencv.hpp"
// --- 日志宏 ---
#define LOG_TAG "FaceSDK_JNI"
#define LOGI(...) __android_log_print(ANDROID_LOG_INFO, LOG_TAG, __VA_ARGS__)
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR, LOG_TAG, __VA_ARGS__)
// --- 全局 SDK 实例 ---
// 我们将 C++ SDK 实例保存在一个全局智能指针中
// 指针 (long) 将被返回给 Java 层持有
std::unique_ptr<FaceSDK> g_sdk_instance;
// --- 辅助函数Bitmap 转 cv::Mat ---
bool ConvertBitmapToMat(JNIEnv* env, jobject j_bitmap, cv::Mat& out_mat) {
AndroidBitmapInfo bmp_info;
if (AndroidBitmap_getInfo(env, j_bitmap, &bmp_info) < 0) {
LOGE("AndroidBitmap_getInfo() failed");
return false;
}
// 只支持 RGBA_8888
if (bmp_info.format != ANDROID_BITMAP_FORMAT_RGBA_8888) {
LOGE("Unsupported bitmap format. Only RGBA_8888 is supported.");
return false;
}
void* bmp_pixels;
if (AndroidBitmap_lockPixels(env, j_bitmap, &bmp_pixels) < 0) {
LOGE("AndroidBitmap_lockPixels() failed");
return false;
}
// 创建一个 cv::Mat 来包装 Bitmap 像素
// 注意:这是 RGBA 格式
cv::Mat tmp_mat(bmp_info.height, bmp_info.width, CV_8UC4, bmp_pixels);
// 我们的人脸管线需要 BGR 格式
// TODO: 确认 python 管线是否需要 RGB。cv::cvtColor更安全。
cv::cvtColor(tmp_mat, out_mat, cv::COLOR_RGBA2BGR);
AndroidBitmap_unlockPixels(env, j_bitmap);
return true;
}
// --- JNI 接口实现 ---
#ifdef __cplusplus
extern "C" {
#endif
// JNI 函数命名规则: Java_包名_类名_方法名
// 【【【请将 "com_facesdk_wrapper_FaceSDKWrapper" 替换为您自己的包名和类名】】】
/**
* @brief SDK
* @param env JNIEnv
* @param thiz Java 'this'
* @param j_model_dir (String) .onnx
* @return (long) C++ FaceSDK 0
*/
JNIEXPORT jlong JNICALL
Java_com_facesdk_wrapper_FaceSDKWrapper_nativeInit(JNIEnv *env, jobject thiz, jstring j_model_dir) {
const char *model_dir_cstr = env->GetStringUTFChars(j_model_dir, nullptr);
if (model_dir_cstr == nullptr) {
LOGE("Failed to get model dir string");
return 0; // 返回 0 (null)
}
std::string model_dir(model_dir_cstr);
env->ReleaseStringUTFChars(j_model_dir, model_dir_cstr);
LOGI("Initializing FaceSDK with model path: %s", model_dir.c_str());
try {
g_sdk_instance = std::make_unique<FaceSDK>(model_dir);
if (g_sdk_instance && g_sdk_instance->IsInitialized()) {
LOGI("SDK Initialized successfully.");
// 返回实例的指针地址 (转为 long)
return (jlong)g_sdk_instance.get();
} else {
LOGE("SDK g_sdk_instance->IsInitialized() failed.");
g_sdk_instance.reset(); // 释放内存
return 0;
}
} catch (const std::exception& e) {
LOGE("SDK Initialization failed with exception: %s", e.what());
g_sdk_instance.reset();
return 0;
}
}
/**
* @brief SDK
*/
JNIEXPORT void JNICALL
Java_com_facesdk_wrapper_FaceSDKWrapper_nativeRelease(JNIEnv *env, jobject thiz) {
LOGI("Releasing SDK instance.");
g_sdk_instance.reset(); // 释放智能指针管理的内存
}
/**
* @brief
* @param env JNIEnv
* @param thiz Java 'this'
* @param j_bitmap (Bitmap)
* @return (float[]) 512 null
*/
JNIEXPORT jfloatArray JNICALL
Java_com_facesdk_wrapper_FaceSDKWrapper_nativeExtractFeature(JNIEnv *env, jobject thiz, jobject j_bitmap) {
if (!g_sdk_instance) {
LOGE("SDK not initialized. Call init() first.");
return nullptr;
}
// 1. Bitmap -> cv::Mat
cv::Mat image_bgr;
if (!ConvertBitmapToMat(env, j_bitmap, image_bgr)) {
LOGE("Failed to convert Bitmap to cv::Mat");
return nullptr;
}
// 2. 调用 C++ API
SDKExtractResult result = g_sdk_instance->ExtractFeature(image_bgr);
if (result.status != SDKStatus::SUCCESS) {
LOGE("Feature extraction failed: %s", result.message.c_str());
return nullptr;
}
// 3. std::vector<float> -> jfloatArray
jfloatArray j_feature = env->NewFloatArray(result.feature.size());
if (j_feature == nullptr) {
LOGE("Failed to create new jfloatArray");
return nullptr;
}
env->SetFloatArrayRegion(j_feature, 0, result.feature.size(), result.feature.data());
return j_feature;
}
/**
* @brief
* @param env JNIEnv
* @param thiz Java 'this'
* @param j_feat1 (float[]) 1
* @param j_feat2 (float[]) 2
* @return (float)
*/
JNIEXPORT jfloat JNICALL
Java_com_facesdk_wrapper_FaceSDKWrapper_nativeCompare(JNIEnv *env, jobject thiz, jfloatArray j_feat1, jfloatArray j_feat2) {
if (!g_sdk_instance) {
LOGE("SDK not initialized.");
return -2.0f; // 返回无效值
}
// 1. jfloatArray -> std::vector<float>
jsize len1 = env->GetArrayLength(j_feat1);
jfloat* body1 = env->GetFloatArrayElements(j_feat1, nullptr);
std::vector<float> feat1(body1, body1 + len1);
env->ReleaseFloatArrayElements(j_feat1, body1, 0);
// 2. jfloatArray -> std::vector<float>
jsize len2 = env->GetArrayLength(j_feat2);
jfloat* body2 = env->GetFloatArrayElements(j_feat2, nullptr);
std::vector<float> feat2(body2, body2 + len2);
env->ReleaseFloatArrayElements(j_feat2, body2, 0);
// 3. 调用 C++ API
return g_sdk_instance->Compare(feat1, feat2);
}
#ifdef __cplusplus
}
#endif