C++程序中使用openpose预测关节点坐标的简易实现

C++程序中使用openpose预测关节点坐标的简易实现

虽然在openpose的官网上已经给出了很多可用的demo,但是如果我们在自己的C++项目中想要使用openpose来预测三维关键点官网给出的例子不是很适用,所以我现在给出了C++程序中使用openpose预测关节点坐标的简易实现,包括CMakeLists.txt的书写.
当然我的程序还是要求先配置好openpose和opencv
这里有一篇很好的博客对于如何使用openpose官方demo做了非常详细的介绍,是使用openpose官方demo非常好的工具:OpenPose命令行参数记录

(1)预测关节点坐标的简单封装op.h

#ifndef CAFFETEST_OP_H
#define CAFFETEST_OP_H
#include "caffe/caffe.hpp"
 //OpenPose dependencies
#include 
#include 
#include 
#include 
#include 
using namespace std;
#define FLAGS_logging_level 3
#define FLAGS_output_resolution "-1x-1"
#define FLAGS_net_resolution "-1x368"
#define FLAGS_model_pose "BODY_25"
#define FLAGS_alpha_pose 0.6
#define FLAGS_scale_gap 0.3
#define FLAGS_scale_number 1
#define FLAGS_render_threshold 0.05
#define FLAGS_num_gpu_start 0
#define FLAGS_disable_blending false
#define FLAGS_model_folder "/home/litchi/project/openpose/models/"

class OpenPose{
public:
    std::unique_ptr poseExtractorCaffe;
    std::unique_ptr poseRenderer;
    std::unique_ptr frameDisplayer;
    std::unique_ptr scaleAndSizeExtractor;

    OpenPose(){
        caffe::Caffe::set_mode(caffe::Caffe::GPU);
        caffe::Caffe::SetDevice(0);

        op::log("OpenPose Library Tutorial - Example 1.", op::Priority::High);
        // ------------------------- INITIALIZATION -------------------------
        // Step 1 - Set logging level
        // - 0 will output all the logging messages
        // - 255 will output nothing
        op::ConfigureLog::setPriorityThreshold((op::Priority)FLAGS_logging_level);
        op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
        // Step 2 - Read Google flags (user defined configuration)
        // outputSize
        const auto outputSize = op::flagsToPoint(FLAGS_output_resolution, "-1x-1");
        // netInputSize
        const auto netInputSize = op::flagsToPoint(FLAGS_net_resolution, "-1x368");
        // poseModel
        const auto poseModel = op::flagsToPoseModel(FLAGS_model_pose);
        // Check no contradictory flags enabled
        if (FLAGS_alpha_pose < 0. || FLAGS_alpha_pose > 1.)
            op::error("Alpha value for blending must be in the range [0,1].", __LINE__, __FUNCTION__, __FILE__);
        if (FLAGS_scale_gap <= 0. && FLAGS_scale_number > 1)
            op::error("Incompatible flag configuration: scale_gap must be greater than 0 or scale_number = 1.",
                      __LINE__, __FUNCTION__, __FILE__);
        // Logging
        op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
        // Step 3 - Initialize all required classes
        scaleAndSizeExtractor = std::unique_ptr(new op::ScaleAndSizeExtractor(netInputSize, outputSize, FLAGS_scale_number, FLAGS_scale_gap));

        poseExtractorCaffe = std::unique_ptr(new op::PoseExtractorCaffe{poseModel, FLAGS_model_folder, FLAGS_num_gpu_start});

        poseRenderer = std::unique_ptr(new op::PoseCpuRenderer{poseModel, (float)FLAGS_render_threshold, !FLAGS_disable_blending,
                                                                                    (float)FLAGS_alpha_pose});
        frameDisplayer = std::unique_ptr(new op::FrameDisplayer{"OpenPose Tutorial - Example 1", outputSize});


        // Step 4 - Initialize resources on desired thread (in this case single thread, i.e. we init resources here)
        poseExtractorCaffe->initializationOnThread();
        poseRenderer->initializationOnThread();
    }

    string forward(const cv::Mat& inputImage, bool display = false){
        op::OpOutputToCvMat opOutputToCvMat;
        op::CvMatToOpInput cvMatToOpInput;
        op::CvMatToOpOutput cvMatToOpOutput;
        if(inputImage.empty())
            op::error("Could not open or find the image: ", __LINE__, __FUNCTION__, __FILE__);
        const op::Point imageSize{inputImage.cols, inputImage.rows};
        // Step 2 - Get desired scale sizes
        std::vector scaleInputToNetInputs;
        std::vector> netInputSizes;
        double scaleInputToOutput;
        op::Point outputResolution;
        std::tie(scaleInputToNetInputs, netInputSizes, scaleInputToOutput, outputResolution)
                = scaleAndSizeExtractor->extract(imageSize);
        // Step 3 - Format input image to OpenPose input and output formats
        const auto netInputArray = cvMatToOpInput.createArray(inputImage, scaleInputToNetInputs, netInputSizes);
        // Step 4 - Estimate poseKeypoints
        poseExtractorCaffe->forwardPass(netInputArray, imageSize, scaleInputToNetInputs);
        const auto poseKeypoints = poseExtractorCaffe->getPoseKeypoints();

        if(display){
            auto outputArray = cvMatToOpOutput.createArray(inputImage, scaleInputToOutput, outputResolution);
            // Step 5 - Render poseKeypoints
            poseRenderer->renderPose(outputArray, poseKeypoints, scaleInputToOutput);
            // Step 6 - OpenPose output format to cv::Mat
            auto outputImage = opOutputToCvMat.formatToCvMat(outputArray);

            // ------------------------- SHOWING RESULT AND CLOSING -------------------------
            // Step 1 - Show results
            frameDisplayer->displayFrame(outputImage, 0); // Alternative: cv::imshow(outputImage) + cv::waitKey(0)
            // Step 2 - Logging information message
            op::log("Example 1 successfully finished.", op::Priority::High);
            // Return successful message
        }
        return poseKeypoints.toString();
    }
};

#endif //CAFFETEST_OP_H

这一部分需要注意的是

#define FLAGS_model_folder “/home/litchi/project/openpose/models/“ 这里需要换成自己的机器中openpose/model的位置

#define FLAGS_model_pose “BODY_25” 这里可以选择输出的数据类型,比如”COCO”等

对这个封装的调用

#include 
#include
#include "op.h"
using namespace std;
int main() {
    string videopath="/home/litchi/data/20193.mp4";
    cv::VideoCapture capture;
    cv::Mat frame;
    frame= capture.open(videopath);
    if(!capture.isOpened())
    {
        cout<<"cant open"<

在videopath中写上路径即可这样就是简易的预测了图片中任务的关节点的坐标,输出的数据注意第三项是x,y估计的准确度.

CMakeLists.txt

cmake_minimum_required(VERSION 3.13)
project(easyopenpose)
set(CMAKE_CXX_STANDARD 14)
set(INC_DIR /usr/include
        /usr/local/cuda/include #cuda的include位置 修改为自己的机器的正确的地方
        /home/litchi/project/openpose/3rdparty/caffe/include #机器中正确的caffe位置
        /home/litchi/project/openpose/3rdparty/caffe/src #机器中正确的caffe位置
        )
set(LINK_DIR /usr/lib
        /usr/lib/x86_64-linux-gnu/
        /usr/local/cuda/lib64 #cuda的lib位置 修改为正确的地方
        /home/litchi/project/openpose/3rdparty/caffe/build/lib #caffe的lib位置 修改为正确的地方
        )
set(OpenCV_DIR /usr/local/share/OpenCV) #opencv的位置
find_package(OpenCV REQUIRED)
find_package(OpenPose REQUIRED)
include_directories(${INC_DIR}${OpenCV_INCLUDE_DIRS}${OpenPose_INCLUDE_DIRS}${EIGEN3_INCLUDE_DIR})
link_directories(${LINK_DIR}${OpenCV_LIBS}${OpenPose_LIBS})
add_executable(easyopenpose main.cpp op.h)
target_link_libraries(easyopenpose
        caffe
        boost_system
        glog
        jsoncpp
        ${OpenPose_LIBS}
        ${OpenCV_LIBS}
        )

CMakeLists.txt的书写只需要根据楼主给出的注释修改路径就可以用了
验证结果是否正确可以视频文件用官方demo运行比较结果
根据我测试的数据,楼主这份代码和官方demo运行结果完美fit
源码可以在楼主上传的资源中下载


   转载规则


《C++程序中使用openpose预测关节点坐标的简易实现》 ivory 采用 知识共享署名 4.0 国际许可协议 进行许可。
 上一篇
Linux操作系统下配置opencv-3.2+openpose+caffe Linux操作系统下配置opencv-3.2+openpose+caffe
Linux操作系统下配置opencv-3.2+openpose+caffe首先需要安装cuda8.0可以先配置一下这个,源码用cmake编译可以用cmake-gui是一个非常好用的工具安装代码如下: sudo apt-get install
2019-07-08
下一篇 
tensorflow模型转化为caffe模型并调用预测 tensorflow模型转化为caffe模型并调用预测
tensorflow模型转化为caffe模型并调用预测本文一共分为三个部分首先根据tensorflow的网络结构代码写caffe的deploy.prototxt,再用python代码写XXXX.caffemodel文件,最后调用caffe模
  目录